aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/boost/assert.hpp38
-rw-r--r--src/boost/format.hpp64
-rw-r--r--src/boost/format/exceptions.hpp96
-rw-r--r--src/boost/format/feed_args.hpp254
-rw-r--r--src/boost/format/format_class.hpp135
-rw-r--r--src/boost/format/format_fwd.hpp49
-rw-r--r--src/boost/format/format_implementation.cc256
-rw-r--r--src/boost/format/free_funcs.cc71
-rw-r--r--src/boost/format/group.hpp680
-rw-r--r--src/boost/format/internals.hpp167
-rw-r--r--src/boost/format/internals_fwd.hpp65
-rw-r--r--src/boost/format/local.mk7
-rw-r--r--src/boost/format/macros_default.hpp48
-rw-r--r--src/boost/format/parsing.cc454
-rw-r--r--src/boost/throw_exception.hpp47
-rw-r--r--src/build-remote/build-remote.cc283
-rw-r--r--src/build-remote/local.mk11
-rw-r--r--src/buildenv/buildenv.cc186
-rw-r--r--src/buildenv/local.mk9
-rw-r--r--src/libexpr/attr-path.cc96
-rw-r--r--src/libexpr/attr-path.hh13
-rw-r--r--src/libexpr/attr-set.cc63
-rw-r--r--src/libexpr/attr-set.hh95
-rw-r--r--src/libexpr/common-opts.cc67
-rw-r--r--src/libexpr/common-opts.hh20
-rw-r--r--src/libexpr/eval-inline.hh81
-rw-r--r--src/libexpr/eval.cc1814
-rw-r--r--src/libexpr/eval.hh304
-rw-r--r--src/libexpr/get-drvs.cc332
-rw-r--r--src/libexpr/get-drvs.hh93
-rw-r--r--src/libexpr/json-to-value.cc143
-rw-r--r--src/libexpr/json-to-value.hh13
-rw-r--r--src/libexpr/lexer.l209
-rw-r--r--src/libexpr/local.mk33
-rw-r--r--src/libexpr/names.cc107
-rw-r--r--src/libexpr/names.hh30
-rw-r--r--src/libexpr/nix-expr.pc.in10
-rw-r--r--src/libexpr/nixexpr.cc438
-rw-r--r--src/libexpr/nixexpr.hh343
-rw-r--r--src/libexpr/parser.y687
-rw-r--r--src/libexpr/primops.cc2077
-rw-r--r--src/libexpr/primops.hh15
-rw-r--r--src/libexpr/primops/fetchgit.cc84
-rw-r--r--src/libexpr/primops/fetchgit.hh14
-rw-r--r--src/libexpr/symbol-table.hh80
-rw-r--r--src/libexpr/value-to-json.cc95
-rw-r--r--src/libexpr/value-to-json.hh19
-rw-r--r--src/libexpr/value-to-xml.cc178
-rw-r--r--src/libexpr/value-to-xml.hh14
-rw-r--r--src/libexpr/value.hh274
-rw-r--r--src/libmain/common-args.cc33
-rw-r--r--src/libmain/common-args.hh33
-rw-r--r--src/libmain/local.mk15
-rw-r--r--src/libmain/nix-main.pc.in9
-rw-r--r--src/libmain/shared.cc349
-rw-r--r--src/libmain/shared.hh107
-rw-r--r--src/libmain/stack.cc70
-rw-r--r--src/libstore/binary-cache-store.cc407
-rw-r--r--src/libstore/binary-cache-store.hh133
-rw-r--r--src/libstore/build.cc3935
-rw-r--r--src/libstore/builtins.cc71
-rw-r--r--src/libstore/builtins.hh9
-rw-r--r--src/libstore/crypto.cc126
-rw-r--r--src/libstore/crypto.hh54
-rw-r--r--src/libstore/derivations.cc444
-rw-r--r--src/libstore/derivations.hh122
-rw-r--r--src/libstore/download.cc708
-rw-r--r--src/libstore/download.hh79
-rw-r--r--src/libstore/export-import.cc106
-rw-r--r--src/libstore/fs-accessor.hh30
-rw-r--r--src/libstore/gc.cc851
-rw-r--r--src/libstore/globals.cc104
-rw-r--r--src/libstore/globals.hh318
-rw-r--r--src/libstore/http-binary-cache-store.cc115
-rw-r--r--src/libstore/legacy-ssh-store.cc256
-rw-r--r--src/libstore/local-binary-cache-store.cc107
-rw-r--r--src/libstore/local-fs-store.cc130
-rw-r--r--src/libstore/local-store.cc1345
-rw-r--r--src/libstore/local-store.hh289
-rw-r--r--src/libstore/local.mk41
-rw-r--r--src/libstore/misc.cc277
-rw-r--r--src/libstore/nar-accessor.cc142
-rw-r--r--src/libstore/nar-accessor.hh11
-rw-r--r--src/libstore/nar-info-disk-cache.cc270
-rw-r--r--src/libstore/nar-info-disk-cache.hh31
-rw-r--r--src/libstore/nar-info.cc116
-rw-r--r--src/libstore/nar-info.hh24
-rw-r--r--src/libstore/nix-store.pc.in9
-rw-r--r--src/libstore/optimise-store.cc275
-rw-r--r--src/libstore/pathlocks.cc216
-rw-r--r--src/libstore/pathlocks.hh46
-rw-r--r--src/libstore/profiles.cc236
-rw-r--r--src/libstore/profiles.hh65
-rw-r--r--src/libstore/references.cc122
-rw-r--r--src/libstore/references.hh11
-rw-r--r--src/libstore/remote-fs-accessor.cc57
-rw-r--r--src/libstore/remote-fs-accessor.hh29
-rw-r--r--src/libstore/remote-store.cc655
-rw-r--r--src/libstore/remote-store.hh142
-rw-r--r--src/libstore/s3-binary-cache-store.cc339
-rw-r--r--src/libstore/s3-binary-cache-store.hh33
-rw-r--r--src/libstore/s3.hh33
-rw-r--r--src/libstore/sandbox-defaults.sb.in63
-rw-r--r--src/libstore/schema.sql42
-rw-r--r--src/libstore/serve-protocol.hh23
-rw-r--r--src/libstore/sqlite.cc197
-rw-r--r--src/libstore/sqlite.hh114
-rw-r--r--src/libstore/ssh-store.cc104
-rw-r--r--src/libstore/ssh.cc102
-rw-r--r--src/libstore/ssh.hh49
-rw-r--r--src/libstore/store-api.cc836
-rw-r--r--src/libstore/store-api.hh729
-rw-r--r--src/libstore/worker-protocol.hh66
-rw-r--r--src/libutil/affinity.cc55
-rw-r--r--src/libutil/affinity.hh9
-rw-r--r--src/libutil/archive.cc351
-rw-r--r--src/libutil/archive.hh92
-rw-r--r--src/libutil/args.cc180
-rw-r--r--src/libutil/args.hh163
-rw-r--r--src/libutil/compression.cc315
-rw-r--r--src/libutil/compression.hh26
-rw-r--r--src/libutil/config.cc230
-rw-r--r--src/libutil/config.hh189
-rw-r--r--src/libutil/finally.hh14
-rw-r--r--src/libutil/hash.cc354
-rw-r--r--src/libutil/hash.hh133
-rw-r--r--src/libutil/istringstream_nocopy.hh92
-rw-r--r--src/libutil/json.cc169
-rw-r--r--src/libutil/json.hh181
-rw-r--r--src/libutil/local.mk13
-rw-r--r--src/libutil/logging.cc82
-rw-r--r--src/libutil/logging.hh99
-rw-r--r--src/libutil/lru-cache.hh92
-rw-r--r--src/libutil/monitor-fd.hh42
-rw-r--r--src/libutil/pool.hh157
-rw-r--r--src/libutil/ref.hh92
-rw-r--r--src/libutil/serialise.cc247
-rw-r--r--src/libutil/serialise.hh241
-rw-r--r--src/libutil/sync.hh87
-rw-r--r--src/libutil/thread-pool.cc104
-rw-r--r--src/libutil/thread-pool.hh119
-rw-r--r--src/libutil/types.hh154
-rw-r--r--src/libutil/util.cc1295
-rw-r--r--src/libutil/util.hh457
-rw-r--r--src/libutil/xml-writer.cc94
-rw-r--r--src/libutil/xml-writer.hh69
-rw-r--r--src/nix-build/local.mk9
-rwxr-xr-xsrc/nix-build/nix-build.cc515
-rw-r--r--src/nix-channel/local.mk7
-rwxr-xr-xsrc/nix-channel/nix-channel.cc267
-rw-r--r--src/nix-collect-garbage/local.mk7
-rw-r--r--src/nix-collect-garbage/nix-collect-garbage.cc92
-rw-r--r--src/nix-copy-closure/local.mk7
-rwxr-xr-xsrc/nix-copy-closure/nix-copy-closure.cc63
-rw-r--r--src/nix-daemon/local.mk13
-rw-r--r--src/nix-daemon/nix-daemon.cc1042
-rw-r--r--src/nix-env/local.mk7
-rw-r--r--src/nix-env/nix-env.cc1426
-rw-r--r--src/nix-env/user-env.cc155
-rw-r--r--src/nix-env/user-env.hh13
-rw-r--r--src/nix-instantiate/local.mk7
-rw-r--r--src/nix-instantiate/nix-instantiate.cc197
-rw-r--r--src/nix-prefetch-url/local.mk7
-rw-r--r--src/nix-prefetch-url/nix-prefetch-url.cc210
-rw-r--r--src/nix-store/dotgraph.cc156
-rw-r--r--src/nix-store/dotgraph.hh11
-rw-r--r--src/nix-store/local.mk9
-rw-r--r--src/nix-store/nix-store.cc1070
-rw-r--r--src/nix-store/xmlgraph.cc66
-rw-r--r--src/nix-store/xmlgraph.hh11
-rw-r--r--src/nix/build.cc32
-rw-r--r--src/nix/cat.cc74
-rw-r--r--src/nix/command.cc132
-rw-r--r--src/nix/command.hh149
-rw-r--r--src/nix/copy.cc57
-rw-r--r--src/nix/eval.cc43
-rw-r--r--src/nix/hash.cc140
-rw-r--r--src/nix/installables.cc255
-rw-r--r--src/nix/legacy.cc7
-rw-r--r--src/nix/legacy.hh23
-rw-r--r--src/nix/local.mk9
-rw-r--r--src/nix/log.cc47
-rw-r--r--src/nix/ls.cc123
-rw-r--r--src/nix/main.cc61
-rw-r--r--src/nix/path-info.cc105
-rw-r--r--src/nix/progress-bar.cc157
-rw-r--r--src/nix/progress-bar.hh15
-rw-r--r--src/nix/run.cc104
-rw-r--r--src/nix/show-config.cc38
-rw-r--r--src/nix/sigs.cc139
-rw-r--r--src/nix/verify.cc172
-rw-r--r--src/resolve-system-dependencies/local.mk11
-rw-r--r--src/resolve-system-dependencies/resolve-system-dependencies.cc194
193 files changed, 40072 insertions, 0 deletions
diff --git a/src/boost/assert.hpp b/src/boost/assert.hpp
new file mode 100644
index 000000000..754ebb954
--- /dev/null
+++ b/src/boost/assert.hpp
@@ -0,0 +1,38 @@
+//
+// boost/assert.hpp - BOOST_ASSERT(expr)
+//
+// Copyright (c) 2001, 2002 Peter Dimov and Multi Media Ltd.
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+// Note: There are no include guards. This is intentional.
+//
+// See http://www.boost.org/libs/utility/assert.html for documentation.
+//
+
+#undef BOOST_ASSERT
+
+#if defined(BOOST_DISABLE_ASSERTS)
+
+# define BOOST_ASSERT(expr) ((void)0)
+
+#elif defined(BOOST_ENABLE_ASSERT_HANDLER)
+
+#include <boost/current_function.hpp>
+
+namespace boost
+{
+
+void assertion_failed(char const * expr, char const * function, char const * file, long line); // user defined
+
+} // namespace boost
+
+#define BOOST_ASSERT(expr) ((expr)? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
+
+#else
+# include <assert.h>
+# define BOOST_ASSERT(expr) assert(expr)
+#endif
diff --git a/src/boost/format.hpp b/src/boost/format.hpp
new file mode 100644
index 000000000..f965f0f33
--- /dev/null
+++ b/src/boost/format.hpp
@@ -0,0 +1,64 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+// format.hpp : primary header
+// ----------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_HPP
+#define BOOST_FORMAT_HPP
+
+#include <vector>
+#include <string>
+#include <sstream>
+#include <cassert>
+
+#if HAVE_LOCALE
+#include <locale>
+#else
+#define BOOST_NO_STD_LOCALE
+#define BOOST_NO_LOCALE_ISIDIGIT
+#include <cctype>
+#endif
+
+#include <boost/format/macros_default.hpp>
+
+
+// **** Forward declarations ----------------------------------
+#include <boost/format/format_fwd.hpp> // basic_format<Ch,Tr>, and other frontends
+#include <boost/format/internals_fwd.hpp> // misc forward declarations for internal use
+
+
+// **** Auxiliary structs (stream_format_state<Ch,Tr> , and format_item<Ch,Tr> )
+#include <boost/format/internals.hpp>
+
+// **** Format class interface --------------------------------
+#include <boost/format/format_class.hpp>
+
+// **** Exceptions -----------------------------------------------
+#include <boost/format/exceptions.hpp>
+
+// **** Implementation -------------------------------------------
+//#include <boost/format/format_implementation.hpp> // member functions
+
+#include <boost/format/group.hpp> // class for grouping arguments
+
+#include <boost/format/feed_args.hpp> // argument-feeding functions
+//#include <boost/format/parsing.hpp> // format-string parsing (member-)functions
+
+// **** Implementation of the free functions ----------------------
+//#include <boost/format/free_funcs.hpp>
+
+
+#endif // BOOST_FORMAT_HPP
diff --git a/src/boost/format/exceptions.hpp b/src/boost/format/exceptions.hpp
new file mode 100644
index 000000000..a7641458c
--- /dev/null
+++ b/src/boost/format/exceptions.hpp
@@ -0,0 +1,96 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// exceptions.hpp
+// ------------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_EXCEPTIONS_HPP
+#define BOOST_FORMAT_EXCEPTIONS_HPP
+
+
+#include <stdexcept>
+
+
+namespace boost {
+
+namespace io {
+
+// **** exceptions -----------------------------------------------
+
+class format_error : public std::exception
+{
+public:
+ format_error() { abort(); }
+ virtual const char *what() const throw()
+ {
+ return "boost::format_error: "
+ "format generic failure";
+ }
+};
+
+class bad_format_string : public format_error
+{
+public:
+ bad_format_string() { abort(); }
+ virtual const char *what() const throw()
+ {
+ return "boost::bad_format_string: "
+ "format-string is ill-formed";
+ }
+};
+
+class too_few_args : public format_error
+{
+public:
+ too_few_args() { abort(); }
+ virtual const char *what() const throw()
+ {
+ return "boost::too_few_args: "
+ "format-string refered to more arguments than were passed";
+ }
+};
+
+class too_many_args : public format_error
+{
+public:
+ too_many_args() { abort(); }
+ virtual const char *what() const throw()
+ {
+ return "boost::too_many_args: "
+ "format-string refered to less arguments than were passed";
+ }
+};
+
+
+class out_of_range : public format_error
+{
+public:
+ out_of_range() { abort(); }
+ virtual const char *what() const throw()
+ {
+ return "boost::out_of_range: "
+ "tried to refer to an argument (or item) number which is out of range, "
+ "according to the format string.";
+ }
+};
+
+
+} // namespace io
+
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_EXCEPTIONS_HPP
diff --git a/src/boost/format/feed_args.hpp b/src/boost/format/feed_args.hpp
new file mode 100644
index 000000000..cdd57fdf2
--- /dev/null
+++ b/src/boost/format/feed_args.hpp
@@ -0,0 +1,254 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+// feed_args.hpp : functions for processing each argument
+// (feed, feed_manip, and distribute)
+// ----------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_FEED_ARGS_HPP
+#define BOOST_FORMAT_FEED_ARGS_HPP
+
+#include "boost/format/format_class.hpp"
+#include "boost/format/group.hpp"
+
+#include "boost/throw_exception.hpp"
+
+namespace boost {
+namespace io {
+namespace detail {
+namespace {
+
+ inline
+ void empty_buf(BOOST_IO_STD ostringstream & os) {
+ static const std::string emptyStr;
+ os.str(emptyStr);
+ }
+
+ void do_pad( std::string & s,
+ std::streamsize w,
+ const char c,
+ std::ios::fmtflags f,
+ bool center)
+ __attribute__ ((unused));
+
+ void do_pad( std::string & s,
+ std::streamsize w,
+ const char c,
+ std::ios::fmtflags f,
+ bool center)
+ // applies centered / left / right padding to the string s.
+ // Effects : string s is padded.
+ {
+ std::streamsize n=w-s.size();
+ if(n<=0) {
+ return;
+ }
+ if(center)
+ {
+ s.reserve(w); // allocate once for the 2 inserts
+ const std::streamsize n1 = n /2, n0 = n - n1;
+ s.insert(s.begin(), n0, c);
+ s.append(n1, c);
+ }
+ else
+ {
+ if(f & std::ios::left) {
+ s.append(n, c);
+ }
+ else {
+ s.insert(s.begin(), n, c);
+ }
+ }
+ } // -do_pad(..)
+
+
+ template<class T> inline
+ void put_head(BOOST_IO_STD ostream& , const T& ) {
+ }
+
+ template<class T> inline
+ void put_head( BOOST_IO_STD ostream& os, const group1<T>& x ) {
+ os << group_head(x.a1_); // send the first N-1 items, not the last
+ }
+
+ template<class T> inline
+ void put_last( BOOST_IO_STD ostream& os, const T& x ) {
+ os << x ;
+ }
+
+ template<class T> inline
+ void put_last( BOOST_IO_STD ostream& os, const group1<T>& x ) {
+ os << group_last(x.a1_); // this selects the last element
+ }
+
+#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
+ template<class T> inline
+ void put_head( BOOST_IO_STD ostream& , T& ) {
+ }
+
+ template<class T> inline
+ void put_last( BOOST_IO_STD ostream& os, T& x ) {
+ os << x ;
+ }
+#endif
+
+
+
+
+template<class T>
+void put( T x,
+ const format_item& specs,
+ std::string & res,
+ BOOST_IO_STD ostringstream& oss_ )
+{
+ // does the actual conversion of x, with given params, into a string
+ // using the *supplied* strinstream. (the stream state is important)
+
+ typedef std::string string_t;
+ typedef format_item format_item_t;
+
+ stream_format_state prev_state(oss_);
+
+ specs.state_.apply_on(oss_);
+
+ // in case x is a group, apply the manip part of it,
+ // in order to find width
+ put_head( oss_, x );
+ empty_buf( oss_);
+
+ const std::streamsize w=oss_.width();
+ const std::ios::fmtflags fl=oss_.flags();
+ const bool internal = (fl & std::ios::internal) != 0;
+ const bool two_stepped_padding = internal
+ && ! ( specs.pad_scheme_ & format_item_t::spacepad )
+ && specs.truncate_ < 0 ;
+
+
+ if(! two_stepped_padding)
+ {
+ if(w>0) // handle simple padding via do_pad, not natively in stream
+ oss_.width(0);
+ put_last( oss_, x);
+ res = oss_.str();
+
+ if (specs.truncate_ >= 0)
+ res.erase(specs.truncate_);
+
+ // complex pads :
+ if(specs.pad_scheme_ & format_item_t::spacepad)
+ {
+ if( res.size()==0 || ( res[0]!='+' && res[0]!='-' ))
+ {
+ res.insert(res.begin(), 1, ' '); // insert 1 space at pos 0
+ }
+ }
+ if(w > 0) // need do_pad
+ {
+ do_pad(res,w,oss_.fill(), fl, (specs.pad_scheme_ & format_item_t::centered) !=0 );
+ }
+ }
+ else // 2-stepped padding
+ {
+ put_last( oss_, x); // oss_.width() may result in padding.
+ res = oss_.str();
+
+ if (specs.truncate_ >= 0)
+ res.erase(specs.truncate_);
+
+ if( res.size() - w > 0)
+ { // length w exceeded
+ // either it was multi-output with first output padding up all width..
+ // either it was one big arg and we are fine.
+ empty_buf( oss_);
+ oss_.width(0);
+ put_last(oss_, x );
+ string_t tmp = oss_.str(); // minimal-length output
+ std::streamsize d;
+ if( (d=w - tmp.size()) <=0 )
+ {
+ // minimal length is already >= w, so no padding (cool!)
+ res.swap(tmp);
+ }
+ else
+ { // hum.. we need to pad (it was necessarily multi-output)
+ typedef typename string_t::size_type size_type;
+ size_type i = 0;
+ while( i<tmp.size() && tmp[i] == res[i] ) // find where we should pad.
+ ++i;
+ tmp.insert(i, static_cast<size_type>( d ), oss_.fill());
+ res.swap( tmp );
+ }
+ }
+ else
+ { // okay, only one thing was printed and padded, so res is fine.
+ }
+ }
+
+ prev_state.apply_on(oss_);
+ empty_buf( oss_);
+ oss_.clear();
+} // end- put(..)
+
+
+} // local namespace
+
+
+
+
+
+template<class T>
+void distribute(basic_format& self, T x)
+ // call put(x, ..) on every occurence of the current argument :
+{
+ if(self.cur_arg_ >= self.num_args_)
+ {
+ if( self.exceptions() & too_many_args_bit )
+ boost::throw_exception(too_many_args()); // too many variables have been supplied !
+ else return;
+ }
+ for(unsigned long i=0; i < self.items_.size(); ++i)
+ {
+ if(self.items_[i].argN_ == self.cur_arg_)
+ {
+ put<T> (x, self.items_[i], self.items_[i].res_, self.oss_ );
+ }
+ }
+}
+
+template<class T>
+basic_format& feed(basic_format& self, T x)
+{
+ if(self.dumped_) self.clear();
+ distribute<T> (self, x);
+ ++self.cur_arg_;
+ if(self.bound_.size() != 0)
+ {
+ while( self.cur_arg_ < self.num_args_ && self.bound_[self.cur_arg_] )
+ ++self.cur_arg_;
+ }
+
+ // this arg is finished, reset the stream's format state
+ self.state0_.apply_on(self.oss_);
+ return self;
+}
+
+
+} // namespace detail
+} // namespace io
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_FEED_ARGS_HPP
diff --git a/src/boost/format/format_class.hpp b/src/boost/format/format_class.hpp
new file mode 100644
index 000000000..6875623ac
--- /dev/null
+++ b/src/boost/format/format_class.hpp
@@ -0,0 +1,135 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// format_class.hpp : class interface
+// ------------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_CLASS_HPP
+#define BOOST_FORMAT_CLASS_HPP
+
+#include <vector>
+#include <string>
+
+#include <boost/format/format_fwd.hpp>
+#include <boost/format/internals_fwd.hpp>
+
+#include <boost/format/internals.hpp>
+
+namespace boost {
+
+class basic_format
+{
+public:
+ typedef std::string string_t;
+ typedef BOOST_IO_STD ostringstream internal_stream_t;
+private:
+ typedef BOOST_IO_STD ostream stream_t;
+ typedef io::detail::stream_format_state stream_format_state;
+ typedef io::detail::format_item format_item_t;
+
+public:
+ basic_format(const char* str);
+ basic_format(const string_t& s);
+#ifndef BOOST_NO_STD_LOCALE
+ basic_format(const char* str, const std::locale & loc);
+ basic_format(const string_t& s, const std::locale & loc);
+#endif // no locale
+ basic_format(const basic_format& x);
+ basic_format& operator= (const basic_format& x);
+
+ basic_format& clear(); // empty the string buffers (except bound arguments, see clear_binds() )
+
+ // pass arguments through those operators :
+ template<class T> basic_format& operator%(const T& x)
+ {
+ return io::detail::feed<const T&>(*this,x);
+ }
+
+#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
+ template<class T> basic_format& operator%(T& x)
+ {
+ return io::detail::feed<T&>(*this,x);
+ }
+#endif
+
+
+ // system for binding arguments :
+ template<class T>
+ basic_format& bind_arg(int argN, const T& val)
+ {
+ return io::detail::bind_arg_body(*this, argN, val);
+ }
+ basic_format& clear_bind(int argN);
+ basic_format& clear_binds();
+
+ // modify the params of a directive, by applying a manipulator :
+ template<class T>
+ basic_format& modify_item(int itemN, const T& manipulator)
+ {
+ return io::detail::modify_item_body(*this, itemN, manipulator) ;
+ }
+
+ // Choosing which errors will throw exceptions :
+ unsigned char exceptions() const;
+ unsigned char exceptions(unsigned char newexcept);
+
+ // final output
+ string_t str() const;
+ friend BOOST_IO_STD ostream&
+ operator<< ( BOOST_IO_STD ostream& , const basic_format& );
+
+
+ template<class T> friend basic_format&
+ io::detail::feed(basic_format&, T);
+
+ template<class T> friend
+ void io::detail::distribute(basic_format&, T);
+
+ template<class T> friend
+ basic_format& io::detail::modify_item_body(basic_format&, int, const T&);
+
+ template<class T> friend
+ basic_format& io::detail::bind_arg_body(basic_format&, int, const T&);
+
+// make the members private only if the friend templates are supported
+private:
+
+ // flag bits, used for style_
+ enum style_values { ordered = 1, // set only if all directives are positional directives
+ special_needs = 4 };
+
+ // parse the format string :
+ void parse(const string_t&);
+
+ int style_; // style of format-string : positional or not, etc
+ int cur_arg_; // keep track of wich argument will come
+ int num_args_; // number of expected arguments
+ mutable bool dumped_; // true only after call to str() or <<
+ std::vector<format_item_t> items_; // vector of directives (aka items)
+ string_t prefix_; // piece of string to insert before first item
+
+ std::vector<bool> bound_; // stores which arguments were bound
+ // size = num_args OR zero
+ internal_stream_t oss_; // the internal stream.
+ stream_format_state state0_; // reference state for oss_
+ unsigned char exceptions_;
+}; // class basic_format
+
+
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_CLASS_HPP
diff --git a/src/boost/format/format_fwd.hpp b/src/boost/format/format_fwd.hpp
new file mode 100644
index 000000000..97c55f668
--- /dev/null
+++ b/src/boost/format/format_fwd.hpp
@@ -0,0 +1,49 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// format_fwd.hpp : forward declarations, for primary header format.hpp
+// ------------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_FWD_HPP
+#define BOOST_FORMAT_FWD_HPP
+
+#include <string>
+#include <iosfwd>
+
+namespace boost {
+
+class basic_format;
+
+typedef basic_format format;
+
+namespace io {
+enum format_error_bits { bad_format_string_bit = 1,
+ too_few_args_bit = 2, too_many_args_bit = 4,
+ out_of_range_bit = 8,
+ all_error_bits = 255, no_error_bits=0 };
+
+// Convertion: format to string
+std::string str(const basic_format& ) ;
+
+} // namespace io
+
+
+BOOST_IO_STD ostream&
+operator<<( BOOST_IO_STD ostream&, const basic_format&);
+
+
+} // namespace boost
+
+#endif // BOOST_FORMAT_FWD_HPP
diff --git a/src/boost/format/format_implementation.cc b/src/boost/format/format_implementation.cc
new file mode 100644
index 000000000..aa191afe1
--- /dev/null
+++ b/src/boost/format/format_implementation.cc
@@ -0,0 +1,256 @@
+// -*- C++ -*-
+// Boost general library format ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+// format_implementation.hpp Implementation of the basic_format class
+// ----------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_IMPLEMENTATION_HPP
+#define BOOST_FORMAT_IMPLEMENTATION_HPP
+
+#include <boost/throw_exception.hpp>
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+
+namespace boost {
+
+// -------- format:: -------------------------------------------
+basic_format::basic_format(const char* str)
+ : style_(0), cur_arg_(0), num_args_(0), dumped_(false),
+ items_(), oss_(), exceptions_(io::all_error_bits)
+{
+ state0_.set_by_stream(oss_);
+ string_t emptyStr;
+ if( !str) str = emptyStr.c_str();
+ parse( str );
+}
+
+#ifndef BOOST_NO_STD_LOCALE
+basic_format::basic_format(const char* str, const std::locale & loc)
+ : style_(0), cur_arg_(0), num_args_(0), dumped_(false),
+ items_(), oss_(), exceptions_(io::all_error_bits)
+{
+ oss_.imbue( loc );
+ state0_.set_by_stream(oss_);
+ string_t emptyStr;
+ if( !str) str = emptyStr.c_str();
+ parse( str );
+}
+
+basic_format::basic_format(const string_t& s, const std::locale & loc)
+ : style_(0), cur_arg_(0), num_args_(0), dumped_(false),
+ items_(), oss_(), exceptions_(io::all_error_bits)
+{
+ oss_.imbue( loc );
+ state0_.set_by_stream(oss_);
+ parse(s);
+}
+#endif //BOOST_NO_STD_LOCALE
+
+basic_format::basic_format(const string_t& s)
+ : style_(0), cur_arg_(0), num_args_(0), dumped_(false),
+ items_(), oss_(), exceptions_(io::all_error_bits)
+{
+ state0_.set_by_stream(oss_);
+ parse(s);
+}
+
+basic_format:: basic_format(const basic_format& x)
+ : style_(x.style_), cur_arg_(x.cur_arg_), num_args_(x.num_args_), dumped_(false),
+ items_(x.items_), prefix_(x.prefix_), bound_(x.bound_),
+ oss_(), // <- we obviously can't copy x.oss_
+ state0_(x.state0_), exceptions_(x.exceptions_)
+{
+ state0_.apply_on(oss_);
+}
+
+basic_format& basic_format::operator= (const basic_format& x)
+{
+ if(this == &x)
+ return *this;
+ state0_ = x.state0_;
+ state0_.apply_on(oss_);
+
+ // plus all the other (trivial) assignments :
+ exceptions_ = x.exceptions_;
+ items_ = x.items_;
+ prefix_ = x.prefix_;
+ bound_=x.bound_;
+ style_=x.style_;
+ cur_arg_=x.cur_arg_;
+ num_args_=x.num_args_;
+ dumped_=x.dumped_;
+ return *this;
+}
+
+
+unsigned char basic_format::exceptions() const
+{
+ return exceptions_;
+}
+
+unsigned char basic_format::exceptions(unsigned char newexcept)
+{
+ unsigned char swp = exceptions_;
+ exceptions_ = newexcept;
+ return swp;
+}
+
+
+basic_format& basic_format ::clear()
+ // empty the string buffers (except bound arguments, see clear_binds() )
+ // and make the format object ready for formatting a new set of arguments
+{
+ BOOST_ASSERT( bound_.size()==0 || num_args_ == static_cast<int>(bound_.size()) );
+
+ for(unsigned long i=0; i<items_.size(); ++i){
+ items_[i].state_ = items_[i].ref_state_;
+ // clear converted strings only if the corresponding argument is not bound :
+ if( bound_.size()==0 || !bound_[ items_[i].argN_ ] ) items_[i].res_.resize(0);
+ }
+ cur_arg_=0; dumped_=false;
+ // maybe first arg is bound:
+ if(bound_.size() != 0)
+ {
+ while(cur_arg_ < num_args_ && bound_[cur_arg_] ) ++cur_arg_;
+ }
+ return *this;
+}
+
+basic_format& basic_format ::clear_binds()
+ // cancel all bindings, and clear()
+{
+ bound_.resize(0);
+ clear();
+ return *this;
+}
+
+basic_format& basic_format::clear_bind(int argN)
+ // cancel the binding of ONE argument, and clear()
+{
+ if(argN<1 || argN > num_args_ || bound_.size()==0 || !bound_[argN-1] )
+ {
+ if( exceptions() & io::out_of_range_bit )
+ boost::throw_exception(io::out_of_range()); // arg not in range.
+ else return *this;
+ }
+ bound_[argN-1]=false;
+ clear();
+ return *this;
+}
+
+
+
+std::string basic_format::str() const
+{
+ dumped_=true;
+ if(items_.size()==0)
+ return prefix_;
+ if( cur_arg_ < num_args_)
+ if( exceptions() & io::too_few_args_bit )
+ boost::throw_exception(io::too_few_args()); // not enough variables have been supplied !
+
+ unsigned long sz = prefix_.size();
+ unsigned long i;
+ for(i=0; i < items_.size(); ++i)
+ sz += items_[i].res_.size() + items_[i].appendix_.size();
+ string_t res;
+ res.reserve(sz);
+
+ res += prefix_;
+ for(i=0; i < items_.size(); ++i)
+ {
+ const format_item_t& item = items_[i];
+ res += item.res_;
+ if( item.argN_ == format_item_t::argN_tabulation)
+ {
+ BOOST_ASSERT( item.pad_scheme_ & format_item_t::tabulation);
+ std::streamsize n = item.state_.width_ - res.size();
+ if( n > 0 )
+ res.append( n, item.state_.fill_ );
+ }
+ res += item.appendix_;
+ }
+ return res;
+}
+
+namespace io {
+namespace detail {
+
+template<class T>
+basic_format& bind_arg_body( basic_format& self,
+ int argN,
+ const T& val)
+ // bind one argument to a fixed value
+ // this is persistent over clear() calls, thus also over str() and <<
+{
+ if(self.dumped_) self.clear(); // needed, because we will modify cur_arg_..
+ if(argN<1 || argN > self.num_args_)
+ {
+ if( self.exceptions() & io::out_of_range_bit )
+ boost::throw_exception(io::out_of_range()); // arg not in range.
+ else return self;
+ }
+ if(self.bound_.size()==0)
+ self.bound_.assign(self.num_args_,false);
+ else
+ BOOST_ASSERT( self.num_args_ == static_cast<signed int>(self.bound_.size()) );
+ int o_cur_arg = self.cur_arg_;
+ self.cur_arg_ = argN-1; // arrays begin at 0
+
+ self.bound_[self.cur_arg_]=false; // if already set, we unset and re-sets..
+ self.operator%(val); // put val at the right place, because cur_arg is set
+
+
+ // Now re-position cur_arg before leaving :
+ self.cur_arg_ = o_cur_arg;
+ self.bound_[argN-1]=true;
+ if(self.cur_arg_ == argN-1 )
+ // hum, now this arg is bound, so move to next free arg
+ {
+ while(self.cur_arg_ < self.num_args_ && self.bound_[self.cur_arg_]) ++self.cur_arg_;
+ }
+ // In any case, we either have all args, or are on a non-binded arg :
+ BOOST_ASSERT( self.cur_arg_ >= self.num_args_ || ! self.bound_[self.cur_arg_]);
+ return self;
+}
+
+template<class T>
+basic_format& modify_item_body( basic_format& self,
+ int itemN,
+ const T& manipulator)
+ // applies a manipulator to the format_item describing a given directive.
+ // this is a permanent change, clear or clear_binds won't cancel that.
+{
+ if(itemN<1 || itemN >= static_cast<signed int>(self.items_.size() ))
+ {
+ if( self.exceptions() & io::out_of_range_bit )
+ boost::throw_exception(io::out_of_range()); // item not in range.
+ else return self;
+ }
+ self.items_[itemN-1].ref_state_.apply_manip( manipulator );
+ self.items_[itemN-1].state_ = self.items_[itemN-1].ref_state_;
+ return self;
+}
+
+} // namespace detail
+
+} // namespace io
+
+} // namespace boost
+
+
+
+#endif // BOOST_FORMAT_IMPLEMENTATION_HPP
diff --git a/src/boost/format/free_funcs.cc b/src/boost/format/free_funcs.cc
new file mode 100644
index 000000000..151db37a0
--- /dev/null
+++ b/src/boost/format/free_funcs.cc
@@ -0,0 +1,71 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// free_funcs.hpp : implementation of the free functions declared in namespace format
+// ------------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_FUNCS_HPP
+#define BOOST_FORMAT_FUNCS_HPP
+
+#include "boost/format.hpp"
+#include "boost/throw_exception.hpp"
+
+namespace boost {
+
+namespace io {
+ inline
+ std::string str(const basic_format& f)
+ // adds up all pieces of strings and converted items, and return the formatted string
+ {
+ return f.str();
+ }
+} // - namespace io
+
+BOOST_IO_STD ostream&
+operator<<( BOOST_IO_STD ostream& os,
+ const boost::basic_format& f)
+ // effect: "return os << str(f);" but we can try to do it faster
+{
+ typedef boost::basic_format format_t;
+ if(f.items_.size()==0)
+ os << f.prefix_;
+ else {
+ if(f.cur_arg_ < f.num_args_)
+ if( f.exceptions() & io::too_few_args_bit )
+ boost::throw_exception(io::too_few_args()); // not enough variables have been supplied !
+ if(f.style_ & format_t::special_needs)
+ os << f.str();
+ else {
+ // else we dont have to count chars output, so we dump directly to os :
+ os << f.prefix_;
+ for(unsigned long i=0; i<f.items_.size(); ++i)
+ {
+ const format_t::format_item_t& item = f.items_[i];
+ os << item.res_;
+ os << item.appendix_;
+
+ }
+ }
+ }
+ f.dumped_=true;
+ return os;
+}
+
+
+
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_FUNCS_HPP
diff --git a/src/boost/format/group.hpp b/src/boost/format/group.hpp
new file mode 100644
index 000000000..ac63f3f0b
--- /dev/null
+++ b/src/boost/format/group.hpp
@@ -0,0 +1,680 @@
+
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+
+// group.hpp : encapsulates a group of manipulators along with an argument
+//
+// group_head : cut the last element of a group out.
+// (is overloaded below on each type of group)
+
+// group_last : returns the last element of a group
+// (is overloaded below on each type of group)
+
+// ----------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_GROUP_HPP
+#define BOOST_FORMAT_GROUP_HPP
+
+
+namespace boost {
+namespace io {
+
+
+namespace detail {
+
+
+// empty group, but useful even though.
+struct group0
+{
+ group0() {}
+};
+
+template <class Ch, class Tr>
+inline
+BOOST_IO_STD ostream&
+operator << ( BOOST_IO_STD ostream& os,
+ const group0& )
+{
+ return os;
+}
+
+template <class T1>
+struct group1
+{
+ T1 a1_;
+ group1(T1 a1)
+ : a1_(a1)
+ {}
+};
+
+template <class Ch, class Tr, class T1>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group1<T1>& x)
+{
+ os << x.a1_;
+ return os;
+}
+
+
+
+
+template <class T1,class T2>
+struct group2
+{
+ T1 a1_;
+ T2 a2_;
+ group2(T1 a1,T2 a2)
+ : a1_(a1),a2_(a2)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group2<T1,T2>& x)
+{
+ os << x.a1_<< x.a2_;
+ return os;
+}
+
+template <class T1,class T2,class T3>
+struct group3
+{
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+ group3(T1 a1,T2 a2,T3 a3)
+ : a1_(a1),a2_(a2),a3_(a3)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group3<T1,T2,T3>& x)
+{
+ os << x.a1_<< x.a2_<< x.a3_;
+ return os;
+}
+
+template <class T1,class T2,class T3,class T4>
+struct group4
+{
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+ T4 a4_;
+ group4(T1 a1,T2 a2,T3 a3,T4 a4)
+ : a1_(a1),a2_(a2),a3_(a3),a4_(a4)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group4<T1,T2,T3,T4>& x)
+{
+ os << x.a1_<< x.a2_<< x.a3_<< x.a4_;
+ return os;
+}
+
+template <class T1,class T2,class T3,class T4,class T5>
+struct group5
+{
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+ T4 a4_;
+ T5 a5_;
+ group5(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5)
+ : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group5<T1,T2,T3,T4,T5>& x)
+{
+ os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_;
+ return os;
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6>
+struct group6
+{
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+ T4 a4_;
+ T5 a5_;
+ T6 a6_;
+ group6(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6)
+ : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group6<T1,T2,T3,T4,T5,T6>& x)
+{
+ os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_;
+ return os;
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
+struct group7
+{
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+ T4 a4_;
+ T5 a5_;
+ T6 a6_;
+ T7 a7_;
+ group7(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7)
+ : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group7<T1,T2,T3,T4,T5,T6,T7>& x)
+{
+ os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_;
+ return os;
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
+struct group8
+{
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+ T4 a4_;
+ T5 a5_;
+ T6 a6_;
+ T7 a7_;
+ T8 a8_;
+ group8(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8)
+ : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group8<T1,T2,T3,T4,T5,T6,T7,T8>& x)
+{
+ os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_;
+ return os;
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
+struct group9
+{
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+ T4 a4_;
+ T5 a5_;
+ T6 a6_;
+ T7 a7_;
+ T8 a8_;
+ T9 a9_;
+ group9(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9)
+ : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8),a9_(a9)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group9<T1,T2,T3,T4,T5,T6,T7,T8,T9>& x)
+{
+ os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_<< x.a9_;
+ return os;
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
+struct group10
+{
+ T1 a1_;
+ T2 a2_;
+ T3 a3_;
+ T4 a4_;
+ T5 a5_;
+ T6 a6_;
+ T7 a7_;
+ T8 a8_;
+ T9 a9_;
+ T10 a10_;
+ group10(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9,T10 a10)
+ : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8),a9_(a9),a10_(a10)
+ {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+ const group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10>& x)
+{
+ os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_<< x.a9_<< x.a10_;
+ return os;
+}
+
+
+
+
+template <class T1,class T2>
+inline
+group1<T1>
+group_head( group2<T1,T2> const& x)
+{
+ return group1<T1> (x.a1_);
+}
+
+template <class T1,class T2>
+inline
+group1<T2>
+group_last( group2<T1,T2> const& x)
+{
+ return group1<T2> (x.a2_);
+}
+
+
+
+template <class T1,class T2,class T3>
+inline
+group2<T1,T2>
+group_head( group3<T1,T2,T3> const& x)
+{
+ return group2<T1,T2> (x.a1_,x.a2_);
+}
+
+template <class T1,class T2,class T3>
+inline
+group1<T3>
+group_last( group3<T1,T2,T3> const& x)
+{
+ return group1<T3> (x.a3_);
+}
+
+
+
+template <class T1,class T2,class T3,class T4>
+inline
+group3<T1,T2,T3>
+group_head( group4<T1,T2,T3,T4> const& x)
+{
+ return group3<T1,T2,T3> (x.a1_,x.a2_,x.a3_);
+}
+
+template <class T1,class T2,class T3,class T4>
+inline
+group1<T4>
+group_last( group4<T1,T2,T3,T4> const& x)
+{
+ return group1<T4> (x.a4_);
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5>
+inline
+group4<T1,T2,T3,T4>
+group_head( group5<T1,T2,T3,T4,T5> const& x)
+{
+ return group4<T1,T2,T3,T4> (x.a1_,x.a2_,x.a3_,x.a4_);
+}
+
+template <class T1,class T2,class T3,class T4,class T5>
+inline
+group1<T5>
+group_last( group5<T1,T2,T3,T4,T5> const& x)
+{
+ return group1<T5> (x.a5_);
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6>
+inline
+group5<T1,T2,T3,T4,T5>
+group_head( group6<T1,T2,T3,T4,T5,T6> const& x)
+{
+ return group5<T1,T2,T3,T4,T5> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_);
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6>
+inline
+group1<T6>
+group_last( group6<T1,T2,T3,T4,T5,T6> const& x)
+{
+ return group1<T6> (x.a6_);
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
+inline
+group6<T1,T2,T3,T4,T5,T6>
+group_head( group7<T1,T2,T3,T4,T5,T6,T7> const& x)
+{
+ return group6<T1,T2,T3,T4,T5,T6> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_);
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
+inline
+group1<T7>
+group_last( group7<T1,T2,T3,T4,T5,T6,T7> const& x)
+{
+ return group1<T7> (x.a7_);
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
+inline
+group7<T1,T2,T3,T4,T5,T6,T7>
+group_head( group8<T1,T2,T3,T4,T5,T6,T7,T8> const& x)
+{
+ return group7<T1,T2,T3,T4,T5,T6,T7> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_);
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
+inline
+group1<T8>
+group_last( group8<T1,T2,T3,T4,T5,T6,T7,T8> const& x)
+{
+ return group1<T8> (x.a8_);
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
+inline
+group8<T1,T2,T3,T4,T5,T6,T7,T8>
+group_head( group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> const& x)
+{
+ return group8<T1,T2,T3,T4,T5,T6,T7,T8> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_,x.a8_);
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
+inline
+group1<T9>
+group_last( group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> const& x)
+{
+ return group1<T9> (x.a9_);
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
+inline
+group9<T1,T2,T3,T4,T5,T6,T7,T8,T9>
+group_head( group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10> const& x)
+{
+ return group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_,x.a8_,x.a9_);
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
+inline
+group1<T10>
+group_last( group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10> const& x)
+{
+ return group1<T10> (x.a10_);
+}
+
+
+
+
+
+} // namespace detail
+
+
+
+// helper functions
+
+
+inline detail::group1< detail::group0 >
+group() { return detail::group1< detail::group0 > ( detail::group0() ); }
+
+template <class T1, class Var>
+inline
+detail::group1< detail::group2<T1, Var const&> >
+ group(T1 a1, Var const& var)
+{
+ return detail::group1< detail::group2<T1, Var const&> >
+ ( detail::group2<T1, Var const&>
+ (a1, var)
+ );
+}
+
+template <class T1,class T2, class Var>
+inline
+detail::group1< detail::group3<T1,T2, Var const&> >
+ group(T1 a1,T2 a2, Var const& var)
+{
+ return detail::group1< detail::group3<T1,T2, Var const&> >
+ ( detail::group3<T1,T2, Var const&>
+ (a1,a2, var)
+ );
+}
+
+template <class T1,class T2,class T3, class Var>
+inline
+detail::group1< detail::group4<T1,T2,T3, Var const&> >
+ group(T1 a1,T2 a2,T3 a3, Var const& var)
+{
+ return detail::group1< detail::group4<T1,T2,T3, Var const&> >
+ ( detail::group4<T1,T2,T3, Var const&>
+ (a1,a2,a3, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4, class Var>
+inline
+detail::group1< detail::group5<T1,T2,T3,T4, Var const&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4, Var const& var)
+{
+ return detail::group1< detail::group5<T1,T2,T3,T4, Var const&> >
+ ( detail::group5<T1,T2,T3,T4, Var const&>
+ (a1,a2,a3,a4, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5, class Var>
+inline
+detail::group1< detail::group6<T1,T2,T3,T4,T5, Var const&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5, Var const& var)
+{
+ return detail::group1< detail::group6<T1,T2,T3,T4,T5, Var const&> >
+ ( detail::group6<T1,T2,T3,T4,T5, Var const&>
+ (a1,a2,a3,a4,a5, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6, class Var>
+inline
+detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var const&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6, Var const& var)
+{
+ return detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var const&> >
+ ( detail::group7<T1,T2,T3,T4,T5,T6, Var const&>
+ (a1,a2,a3,a4,a5,a6, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7, class Var>
+inline
+detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7, Var const& var)
+{
+ return detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> >
+ ( detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&>
+ (a1,a2,a3,a4,a5,a6,a7, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8, class Var>
+inline
+detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8, Var const& var)
+{
+ return detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> >
+ ( detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&>
+ (a1,a2,a3,a4,a5,a6,a7,a8, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9, class Var>
+inline
+detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9, Var const& var)
+{
+ return detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> >
+ ( detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&>
+ (a1,a2,a3,a4,a5,a6,a7,a8,a9, var)
+ );
+}
+
+
+#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
+
+template <class T1, class Var>
+inline
+detail::group1< detail::group2<T1, Var&> >
+ group(T1 a1, Var& var)
+{
+ return detail::group1< detail::group2<T1, Var&> >
+ ( detail::group2<T1, Var&>
+ (a1, var)
+ );
+}
+
+template <class T1,class T2, class Var>
+inline
+detail::group1< detail::group3<T1,T2, Var&> >
+ group(T1 a1,T2 a2, Var& var)
+{
+ return detail::group1< detail::group3<T1,T2, Var&> >
+ ( detail::group3<T1,T2, Var&>
+ (a1,a2, var)
+ );
+}
+
+template <class T1,class T2,class T3, class Var>
+inline
+detail::group1< detail::group4<T1,T2,T3, Var&> >
+ group(T1 a1,T2 a2,T3 a3, Var& var)
+{
+ return detail::group1< detail::group4<T1,T2,T3, Var&> >
+ ( detail::group4<T1,T2,T3, Var&>
+ (a1,a2,a3, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4, class Var>
+inline
+detail::group1< detail::group5<T1,T2,T3,T4, Var&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4, Var& var)
+{
+ return detail::group1< detail::group5<T1,T2,T3,T4, Var&> >
+ ( detail::group5<T1,T2,T3,T4, Var&>
+ (a1,a2,a3,a4, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5, class Var>
+inline
+detail::group1< detail::group6<T1,T2,T3,T4,T5, Var&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5, Var& var)
+{
+ return detail::group1< detail::group6<T1,T2,T3,T4,T5, Var&> >
+ ( detail::group6<T1,T2,T3,T4,T5, Var&>
+ (a1,a2,a3,a4,a5, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6, class Var>
+inline
+detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6, Var& var)
+{
+ return detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var&> >
+ ( detail::group7<T1,T2,T3,T4,T5,T6, Var&>
+ (a1,a2,a3,a4,a5,a6, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7, class Var>
+inline
+detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7, Var& var)
+{
+ return detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> >
+ ( detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&>
+ (a1,a2,a3,a4,a5,a6,a7, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8, class Var>
+inline
+detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8, Var& var)
+{
+ return detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> >
+ ( detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&>
+ (a1,a2,a3,a4,a5,a6,a7,a8, var)
+ );
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9, class Var>
+inline
+detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> >
+ group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9, Var& var)
+{
+ return detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> >
+ ( detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&>
+ (a1,a2,a3,a4,a5,a6,a7,a8,a9, var)
+ );
+}
+
+
+#endif //end- #ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
+
+
+} // namespace io
+
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_GROUP_HPP
diff --git a/src/boost/format/internals.hpp b/src/boost/format/internals.hpp
new file mode 100644
index 000000000..d25eb4c86
--- /dev/null
+++ b/src/boost/format/internals.hpp
@@ -0,0 +1,167 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+// internals.hpp : internal structs. included by format.hpp
+// stream_format_state, and format_item
+// ----------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_INTERNALS_HPP
+#define BOOST_FORMAT_INTERNALS_HPP
+
+
+#include <string>
+#include <sstream>
+
+namespace boost {
+namespace io {
+namespace detail {
+
+
+// --------------
+// set of params that define the format state of a stream
+
+struct stream_format_state
+{
+ typedef std::ios basic_ios;
+
+ std::streamsize width_;
+ std::streamsize precision_;
+ char fill_;
+ std::ios::fmtflags flags_;
+
+ stream_format_state() : width_(-1), precision_(-1), fill_(0), flags_(std::ios::dec) {}
+ stream_format_state(basic_ios& os) {set_by_stream(os); }
+
+ void apply_on(basic_ios & os) const; //- applies format_state to the stream
+ template<class T> void apply_manip(T manipulator) //- modifies state by applying manipulator.
+ { apply_manip_body<T>( *this, manipulator) ; }
+ void reset(); //- sets to default state.
+ void set_by_stream(const basic_ios& os); //- sets to os's state.
+};
+
+
+
+// --------------
+// format_item : stores all parameters that can be defined by directives in the format-string
+
+struct format_item
+{
+ enum pad_values { zeropad = 1, spacepad =2, centered=4, tabulation = 8 };
+
+ enum arg_values { argN_no_posit = -1, // non-positional directive. argN will be set later.
+ argN_tabulation = -2, // tabulation directive. (no argument read)
+ argN_ignored = -3 // ignored directive. (no argument read)
+ };
+ typedef BOOST_IO_STD ios basic_ios;
+ typedef detail::stream_format_state stream_format_state;
+ typedef std::string string_t;
+ typedef BOOST_IO_STD ostringstream internal_stream_t;
+
+
+ int argN_; //- argument number (starts at 0, eg : %1 => argN=0)
+ // negative values are used for items that don't process
+ // an argument
+ string_t res_; //- result of the formatting of this item
+ string_t appendix_; //- piece of string between this item and the next
+
+ stream_format_state ref_state_;// set by parsing the format_string, is only affected by modify_item
+ stream_format_state state_; // always same as ref_state, _unless_ modified by manipulators 'group(..)'
+
+ // non-stream format-state parameters
+ signed int truncate_; //- is >=0 for directives like %.5s (take 5 chars from the string)
+ unsigned int pad_scheme_; //- several possible padding schemes can mix. see pad_values
+
+ format_item() : argN_(argN_no_posit), truncate_(-1), pad_scheme_(0) {}
+
+ void compute_states(); // sets states according to truncate and pad_scheme.
+};
+
+
+
+// -----------------------------------------------------------
+// Definitions
+// -----------------------------------------------------------
+
+// --- stream_format_state:: -------------------------------------------
+inline
+void stream_format_state::apply_on(basic_ios & os) const
+ // set the state of this stream according to our params
+{
+ if(width_ != -1)
+ os.width(width_);
+ if(precision_ != -1)
+ os.precision(precision_);
+ if(fill_ != 0)
+ os.fill(fill_);
+ os.flags(flags_);
+}
+
+inline
+void stream_format_state::set_by_stream(const basic_ios& os)
+ // set our params according to the state of this stream
+{
+ flags_ = os.flags();
+ width_ = os.width();
+ precision_ = os.precision();
+ fill_ = os.fill();
+}
+
+template<class T> inline
+void apply_manip_body( stream_format_state& self,
+ T manipulator)
+ // modify our params according to the manipulator
+{
+ BOOST_IO_STD stringstream ss;
+ self.apply_on( ss );
+ ss << manipulator;
+ self.set_by_stream( ss );
+}
+
+inline
+void stream_format_state::reset()
+ // set our params to standard's default state
+{
+ width_=-1; precision_=-1; fill_=0;
+ flags_ = std::ios::dec;
+}
+
+
+// --- format_items:: -------------------------------------------
+inline
+void format_item::compute_states()
+ // reflect pad_scheme_ on state_ and ref_state_
+ // because some pad_schemes has complex consequences on several state params.
+{
+ if(pad_scheme_ & zeropad)
+ {
+ if(ref_state_.flags_ & std::ios::left)
+ {
+ pad_scheme_ = pad_scheme_ & (~zeropad); // ignore zeropad in left alignment
+ }
+ else
+ {
+ ref_state_.fill_='0';
+ ref_state_.flags_ |= std::ios::internal;
+ }
+ }
+ state_ = ref_state_;
+}
+
+
+} } } // namespaces boost :: io :: detail
+
+
+#endif // BOOST_FORMAT_INTERNALS_HPP
diff --git a/src/boost/format/internals_fwd.hpp b/src/boost/format/internals_fwd.hpp
new file mode 100644
index 000000000..a8ebf7c3a
--- /dev/null
+++ b/src/boost/format/internals_fwd.hpp
@@ -0,0 +1,65 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// internals_fwd.hpp : forward declarations, for internal headers
+// ------------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_INTERNAL_FWD_HPP
+#define BOOST_FORMAT_INTERNAL_FWD_HPP
+
+#include "boost/format/format_fwd.hpp"
+
+
+namespace boost {
+namespace io {
+
+namespace detail {
+ struct stream_format_state;
+ struct format_item;
+}
+
+
+namespace detail {
+
+ // these functions were intended as methods,
+ // but MSVC have problems with template member functions :
+
+ // defined in format_implementation.hpp :
+ template<class T>
+ basic_format& modify_item_body( basic_format& self,
+ int itemN, const T& manipulator);
+
+ template<class T>
+ basic_format& bind_arg_body( basic_format& self,
+ int argN, const T& val);
+
+ template<class T>
+ void apply_manip_body( stream_format_state& self,
+ T manipulator);
+
+ // argument feeding (defined in feed_args.hpp ) :
+ template<class T>
+ void distribute(basic_format& self, T x);
+
+ template<class T>
+ basic_format& feed(basic_format& self, T x);
+
+} // namespace detail
+
+} // namespace io
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_INTERNAL_FWD_HPP
diff --git a/src/boost/format/local.mk b/src/boost/format/local.mk
new file mode 100644
index 000000000..3776eff38
--- /dev/null
+++ b/src/boost/format/local.mk
@@ -0,0 +1,7 @@
+libraries += libformat
+
+libformat_NAME = libnixformat
+
+libformat_DIR := $(d)
+
+libformat_SOURCES := $(wildcard $(d)/*.cc)
diff --git a/src/boost/format/macros_default.hpp b/src/boost/format/macros_default.hpp
new file mode 100644
index 000000000..4fd84a163
--- /dev/null
+++ b/src/boost/format/macros_default.hpp
@@ -0,0 +1,48 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rüdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// macros_default.hpp : configuration for the format library
+// provides default values for the stl workaround macros
+// ------------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_MACROS_DEFAULT_HPP
+#define BOOST_FORMAT_MACROS_DEFAULT_HPP
+
+// *** This should go to "boost/config/suffix.hpp".
+
+#ifndef BOOST_IO_STD
+# define BOOST_IO_STD std::
+#endif
+
+// **** Workaround for io streams, stlport and msvc.
+#ifdef BOOST_IO_NEEDS_USING_DECLARATION
+namespace boost {
+ using std::char_traits;
+ using std::basic_ostream;
+ using std::basic_ostringstream;
+ namespace io {
+ using std::basic_ostream;
+ namespace detail {
+ using std::basic_ios;
+ using std::basic_ostream;
+ using std::basic_ostringstream;
+ }
+ }
+}
+#endif
+
+// ------------------------------------------------------------------------------
+
+#endif // BOOST_FORMAT_MACROS_DEFAULT_HPP
diff --git a/src/boost/format/parsing.cc b/src/boost/format/parsing.cc
new file mode 100644
index 000000000..34c36adeb
--- /dev/null
+++ b/src/boost/format/parsing.cc
@@ -0,0 +1,454 @@
+// -*- C++ -*-
+// Boost general library 'format' ---------------------------
+// See http://www.boost.org for updates, documentation, and revision history.
+
+// (C) Samuel Krempp 2001
+// krempp@crans.ens-cachan.fr
+// Permission to copy, use, modify, sell and
+// distribute this software is granted provided this copyright notice appears
+// in all copies. This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rudiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// parsing.hpp : implementation of the parsing member functions
+// ( parse, parse_printf_directive)
+// ------------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_PARSING_HPP
+#define BOOST_FORMAT_PARSING_HPP
+
+
+#include <boost/format.hpp>
+#include <boost/throw_exception.hpp>
+#include <boost/assert.hpp>
+
+
+namespace boost {
+namespace io {
+namespace detail {
+
+ template<class Stream> inline
+ bool wrap_isdigit(char c, Stream &os)
+ {
+#ifndef BOOST_NO_LOCALE_ISIDIGIT
+ return std::isdigit(c, os.rdbuf()->getloc() );
+# else
+ using namespace std;
+ return isdigit(c);
+#endif
+ } //end- wrap_isdigit(..)
+
+ template<class Res> inline
+ Res str2int(const std::string& s,
+ std::string::size_type start,
+ BOOST_IO_STD ios &os,
+ const Res = Res(0) )
+ // Input : char string, with starting index
+ // a basic_ios& merely to call its widen/narrow member function in the desired locale.
+ // Effects : reads s[start:] and converts digits into an integral n, of type Res
+ // Returns : n
+ {
+ Res n = 0;
+ while(start<s.size() && wrap_isdigit(s[start], os) ) {
+ char cur_ch = s[start];
+ BOOST_ASSERT(cur_ch != 0 ); // since we called isdigit, this should not happen.
+ n *= 10;
+ n += cur_ch - '0'; // 22.2.1.1.2 of the C++ standard
+ ++start;
+ }
+ return n;
+ }
+
+ void skip_asterisk(const std::string & buf,
+ std::string::size_type * pos_p,
+ BOOST_IO_STD ios &os)
+ // skip printf's "asterisk-fields" directives in the format-string buf
+ // Input : char string, with starting index *pos_p
+ // a basic_ios& merely to call its widen/narrow member function in the desired locale.
+ // Effects : advance *pos_p by skipping printf's asterisk fields.
+ // Returns : nothing
+ {
+ using namespace std;
+ BOOST_ASSERT( pos_p != 0);
+ if(*pos_p >= buf.size() ) return;
+ if(buf[ *pos_p]=='*') {
+ ++ (*pos_p);
+ while (*pos_p < buf.size() && wrap_isdigit(buf[*pos_p],os)) ++(*pos_p);
+ if(buf[*pos_p]=='$') ++(*pos_p);
+ }
+ }
+
+
+ inline void maybe_throw_exception( unsigned char exceptions)
+ // auxiliary func called by parse_printf_directive
+ // for centralising error handling
+ // it either throws if user sets the corresponding flag, or does nothing.
+ {
+ if(exceptions & io::bad_format_string_bit)
+ boost::throw_exception(io::bad_format_string());
+ }
+
+
+
+ bool parse_printf_directive(const std::string & buf,
+ std::string::size_type * pos_p,
+ detail::format_item * fpar,
+ BOOST_IO_STD ios &os,
+ unsigned char exceptions)
+ // Input : a 'printf-directive' in the format-string, starting at buf[ *pos_p ]
+ // a basic_ios& merely to call its widen/narrow member function in the desired locale.
+ // a bitset'excpetions' telling whether to throw exceptions on errors.
+ // Returns : true if parse somehow succeeded (possibly ignoring errors if exceptions disabled)
+ // false if it failed so bad that the directive should be printed verbatim
+ // Effects : - *pos_p is incremented so that buf[*pos_p] is the first char after the directive
+ // - *fpar is set with the parameters read in the directive
+ {
+ typedef format_item format_item_t;
+ BOOST_ASSERT( pos_p != 0);
+ std::string::size_type &i1 = *pos_p,
+ i0;
+ fpar->argN_ = format_item_t::argN_no_posit; // if no positional-directive
+
+ bool in_brackets=false;
+ if(buf[i1]=='|')
+ {
+ in_brackets=true;
+ if( ++i1 >= buf.size() ) {
+ maybe_throw_exception(exceptions);
+ return false;
+ }
+ }
+
+ // the flag '0' would be picked as a digit for argument order, but here it's a flag :
+ if(buf[i1]=='0')
+ goto parse_flags;
+
+ // handle argument order (%2$d) or possibly width specification: %2d
+ i0 = i1; // save position before digits
+ while (i1 < buf.size() && wrap_isdigit(buf[i1], os))
+ ++i1;
+ if (i1!=i0)
+ {
+ if( i1 >= buf.size() ) {
+ maybe_throw_exception(exceptions);
+ return false;
+ }
+ int n=str2int(buf,i0, os, int(0) );
+
+ // %N% case : this is already the end of the directive
+ if( buf[i1] == '%' )
+ {
+ fpar->argN_ = n-1;
+ ++i1;
+ if( in_brackets)
+ maybe_throw_exception(exceptions);
+ // but don't return. maybe "%" was used in lieu of '$', so we go on.
+ else return true;
+ }
+
+ if ( buf[i1]=='$' )
+ {
+ fpar->argN_ = n-1;
+ ++i1;
+ }
+ else
+ {
+ // non-positionnal directive
+ fpar->ref_state_.width_ = n;
+ fpar->argN_ = format_item_t::argN_no_posit;
+ goto parse_precision;
+ }
+ }
+
+ parse_flags:
+ // handle flags
+ while ( i1 <buf.size()) // as long as char is one of + - = # 0 l h or ' '
+ {
+ // misc switches
+ switch (buf[i1])
+ {
+ case '\'' : break; // no effect yet. (painful to implement)
+ case 'l':
+ case 'h': // short/long modifier : for printf-comaptibility (no action needed)
+ break;
+ case '-':
+ fpar->ref_state_.flags_ |= std::ios::left;
+ break;
+ case '=':
+ fpar->pad_scheme_ |= format_item_t::centered;
+ break;
+ case ' ':
+ fpar->pad_scheme_ |= format_item_t::spacepad;
+ break;
+ case '+':
+ fpar->ref_state_.flags_ |= std::ios::showpos;
+ break;
+ case '0':
+ fpar->pad_scheme_ |= format_item_t::zeropad;
+ // need to know alignment before really setting flags,
+ // so just add 'zeropad' flag for now, it will be processed later.
+ break;
+ case '#':
+ fpar->ref_state_.flags_ |= std::ios::showpoint | std::ios::showbase;
+ break;
+ default:
+ goto parse_width;
+ }
+ ++i1;
+ } // loop on flag.
+ if( i1>=buf.size()) {
+ maybe_throw_exception(exceptions);
+ return true;
+ }
+
+ parse_width:
+ // handle width spec
+ skip_asterisk(buf, &i1, os); // skips 'asterisk fields' : *, or *N$
+ i0 = i1; // save position before digits
+ while (i1<buf.size() && wrap_isdigit(buf[i1], os))
+ i1++;
+
+ if (i1!=i0)
+ { fpar->ref_state_.width_ = str2int( buf,i0, os, std::streamsize(0) ); }
+
+ parse_precision:
+ if( i1>=buf.size()) {
+ maybe_throw_exception(exceptions);
+ return true;
+ }
+ // handle precision spec
+ if (buf[i1]=='.')
+ {
+ ++i1;
+ skip_asterisk(buf, &i1, os);
+ i0 = i1; // save position before digits
+ while (i1<buf.size() && wrap_isdigit(buf[i1], os))
+ ++i1;
+
+ if(i1==i0)
+ fpar->ref_state_.precision_ = 0;
+ else
+ fpar->ref_state_.precision_ = str2int(buf,i0, os, std::streamsize(0) );
+ }
+
+ // handle formatting-type flags :
+ while( i1<buf.size() &&
+ ( buf[i1]=='l' || buf[i1]=='L' || buf[i1]=='h') )
+ ++i1;
+ if( i1>=buf.size()) {
+ maybe_throw_exception(exceptions);
+ return true;
+ }
+
+ if( in_brackets && buf[i1]=='|' )
+ {
+ ++i1;
+ return true;
+ }
+ switch (buf[i1])
+ {
+ case 'X':
+ fpar->ref_state_.flags_ |= std::ios::uppercase;
+ case 'p': // pointer => set hex.
+ case 'x':
+ fpar->ref_state_.flags_ &= ~std::ios::basefield;
+ fpar->ref_state_.flags_ |= std::ios::hex;
+ break;
+
+ case 'o':
+ fpar->ref_state_.flags_ &= ~std::ios::basefield;
+ fpar->ref_state_.flags_ |= std::ios::oct;
+ break;
+
+ case 'E':
+ fpar->ref_state_.flags_ |= std::ios::uppercase;
+ case 'e':
+ fpar->ref_state_.flags_ &= ~std::ios::floatfield;
+ fpar->ref_state_.flags_ |= std::ios::scientific;
+
+ fpar->ref_state_.flags_ &= ~std::ios::basefield;
+ fpar->ref_state_.flags_ |= std::ios::dec;
+ break;
+
+ case 'f':
+ fpar->ref_state_.flags_ &= ~std::ios::floatfield;
+ fpar->ref_state_.flags_ |= std::ios::fixed;
+ case 'u':
+ case 'd':
+ case 'i':
+ fpar->ref_state_.flags_ &= ~std::ios::basefield;
+ fpar->ref_state_.flags_ |= std::ios::dec;
+ break;
+
+ case 'T':
+ ++i1;
+ if( i1 >= buf.size())
+ maybe_throw_exception(exceptions);
+ else
+ fpar->ref_state_.fill_ = buf[i1];
+ fpar->pad_scheme_ |= format_item_t::tabulation;
+ fpar->argN_ = format_item_t::argN_tabulation;
+ break;
+ case 't':
+ fpar->ref_state_.fill_ = ' ';
+ fpar->pad_scheme_ |= format_item_t::tabulation;
+ fpar->argN_ = format_item_t::argN_tabulation;
+ break;
+
+ case 'G':
+ fpar->ref_state_.flags_ |= std::ios::uppercase;
+ break;
+ case 'g': // 'g' conversion is default for floats.
+ fpar->ref_state_.flags_ &= ~std::ios::basefield;
+ fpar->ref_state_.flags_ |= std::ios::dec;
+
+ // CLEAR all floatield flags, so stream will CHOOSE
+ fpar->ref_state_.flags_ &= ~std::ios::floatfield;
+ break;
+
+ case 'C':
+ case 'c':
+ fpar->truncate_ = 1;
+ break;
+ case 'S':
+ case 's':
+ fpar->truncate_ = fpar->ref_state_.precision_;
+ fpar->ref_state_.precision_ = -1;
+ break;
+ case 'n' :
+ fpar->argN_ = format_item_t::argN_ignored;
+ break;
+ default:
+ maybe_throw_exception(exceptions);
+ }
+ ++i1;
+
+ if( in_brackets )
+ {
+ if( i1<buf.size() && buf[i1]=='|' )
+ {
+ ++i1;
+ return true;
+ }
+ else maybe_throw_exception(exceptions);
+ }
+ return true;
+ }
+
+} // detail namespace
+} // io namespace
+
+
+// -----------------------------------------------
+// format :: parse(..)
+
+void basic_format::parse(const string_t & buf)
+ // parse the format-string
+{
+ using namespace std;
+ const char arg_mark = '%';
+ bool ordered_args=true;
+ int max_argN=-1;
+ string_t::size_type i1=0;
+ int num_items=0;
+
+ // A: find upper_bound on num_items and allocates arrays
+ i1=0;
+ while( (i1=buf.find(arg_mark,i1)) != string::npos )
+ {
+ if( i1+1 >= buf.size() ) {
+ if(exceptions() & io::bad_format_string_bit)
+ boost::throw_exception(io::bad_format_string()); // must not end in "bla bla %"
+ else break; // stop there, ignore last '%'
+ }
+ if(buf[i1+1] == buf[i1] ) { i1+=2; continue; } // escaped "%%" / "##"
+ ++i1;
+
+ // in case of %N% directives, dont count it double (wastes allocations..) :
+ while(i1 < buf.size() && io::detail::wrap_isdigit(buf[i1],oss_)) ++i1;
+ if( i1 < buf.size() && buf[i1] == arg_mark ) ++ i1;
+
+ ++num_items;
+ }
+ items_.assign( num_items, format_item_t() );
+
+ // B: Now the real parsing of the format string :
+ num_items=0;
+ i1 = 0;
+ string_t::size_type i0 = i1;
+ bool special_things=false;
+ int cur_it=0;
+ while( (i1=buf.find(arg_mark,i1)) != string::npos )
+ {
+ string_t & piece = (cur_it==0) ? prefix_ : items_[cur_it-1].appendix_;
+
+ if( buf[i1+1] == buf[i1] ) // escaped mark, '%%'
+ {
+ piece += buf.substr(i0, i1-i0) + buf[i1];
+ i1+=2; i0=i1;
+ continue;
+ }
+ BOOST_ASSERT( static_cast<unsigned int>(cur_it) < items_.size() || cur_it==0);
+
+ if(i1!=i0) piece += buf.substr(i0, i1-i0);
+ ++i1;
+
+ bool parse_ok;
+ parse_ok = io::detail::parse_printf_directive(buf, &i1, &items_[cur_it], oss_, exceptions());
+ if( ! parse_ok ) continue; // the directive will be printed verbatim
+
+ i0=i1;
+ items_[cur_it].compute_states(); // process complex options, like zeropad, into stream params.
+
+ int argN=items_[cur_it].argN_;
+ if(argN == format_item_t::argN_ignored)
+ continue;
+ if(argN ==format_item_t::argN_no_posit)
+ ordered_args=false;
+ else if(argN == format_item_t::argN_tabulation) special_things=true;
+ else if(argN > max_argN) max_argN = argN;
+ ++num_items;
+ ++cur_it;
+ } // loop on %'s
+ BOOST_ASSERT(cur_it == num_items);
+
+ // store the final piece of string
+ string_t & piece = (cur_it==0) ? prefix_ : items_[cur_it-1].appendix_;
+ piece += buf.substr(i0);
+
+ if( !ordered_args)
+ {
+ if(max_argN >= 0 ) // dont mix positional with non-positionnal directives
+ {
+ if(exceptions() & io::bad_format_string_bit)
+ boost::throw_exception(io::bad_format_string());
+ // else do nothing. => positionnal arguments are processed as non-positionnal
+ }
+ // set things like it would have been with positional directives :
+ int non_ordered_items = 0;
+ for(int i=0; i< num_items; ++i)
+ if(items_[i].argN_ == format_item_t::argN_no_posit)
+ {
+ items_[i].argN_ = non_ordered_items;
+ ++non_ordered_items;
+ }
+ max_argN = non_ordered_items-1;
+ }
+
+ // C: set some member data :
+ items_.resize(num_items);
+
+ if(special_things) style_ |= special_needs;
+ num_args_ = max_argN + 1;
+ if(ordered_args) style_ |= ordered;
+ else style_ &= ~ordered;
+}
+
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_PARSING_HPP
diff --git a/src/boost/throw_exception.hpp b/src/boost/throw_exception.hpp
new file mode 100644
index 000000000..07b4ae5ce
--- /dev/null
+++ b/src/boost/throw_exception.hpp
@@ -0,0 +1,47 @@
+#ifndef BOOST_THROW_EXCEPTION_HPP_INCLUDED
+#define BOOST_THROW_EXCEPTION_HPP_INCLUDED
+
+// MS compatible compilers support #pragma once
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1020)
+# pragma once
+#endif
+
+//
+// boost/throw_exception.hpp
+//
+// Copyright (c) 2002 Peter Dimov and Multi Media Ltd.
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+// http://www.boost.org/libs/utility/throw_exception.html
+//
+
+//#include <boost/config.hpp>
+
+#ifdef BOOST_NO_EXCEPTIONS
+# include <exception>
+#endif
+
+namespace boost
+{
+
+#ifdef BOOST_NO_EXCEPTIONS
+
+void throw_exception(std::exception const & e); // user defined
+
+#else
+
+template<class E> void throw_exception(E const & e)
+{
+ throw e;
+}
+
+#endif
+
+} // namespace boost
+
+#endif // #ifndef BOOST_THROW_EXCEPTION_HPP_INCLUDED
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
new file mode 100644
index 000000000..d7aee2886
--- /dev/null
+++ b/src/build-remote/build-remote.cc
@@ -0,0 +1,283 @@
+#include <cstdlib>
+#include <cstring>
+#include <algorithm>
+#include <set>
+#include <memory>
+#include <tuple>
+#include <iomanip>
+#if __APPLE__
+#include <sys/time.h>
+#endif
+
+#include "shared.hh"
+#include "pathlocks.hh"
+#include "globals.hh"
+#include "serialise.hh"
+#include "store-api.hh"
+#include "derivations.hh"
+
+using namespace nix;
+using std::cin;
+
+static void handleAlarm(int sig) {
+}
+
+class Machine {
+ const std::set<string> supportedFeatures;
+ const std::set<string> mandatoryFeatures;
+
+public:
+ const string hostName;
+ const std::vector<string> systemTypes;
+ const string sshKey;
+ const unsigned int maxJobs;
+ const unsigned int speedFactor;
+ bool enabled;
+
+ bool allSupported(const std::set<string> & features) const {
+ return std::all_of(features.begin(), features.end(),
+ [&](const string & feature) {
+ return supportedFeatures.count(feature) ||
+ mandatoryFeatures.count(feature);
+ });
+ }
+
+ bool mandatoryMet(const std::set<string> & features) const {
+ return std::all_of(mandatoryFeatures.begin(), mandatoryFeatures.end(),
+ [&](const string & feature) {
+ return features.count(feature);
+ });
+ }
+
+ Machine(decltype(hostName) hostName,
+ decltype(systemTypes) systemTypes,
+ decltype(sshKey) sshKey,
+ decltype(maxJobs) maxJobs,
+ decltype(speedFactor) speedFactor,
+ decltype(supportedFeatures) supportedFeatures,
+ decltype(mandatoryFeatures) mandatoryFeatures) :
+ supportedFeatures(supportedFeatures),
+ mandatoryFeatures(mandatoryFeatures),
+ hostName(hostName),
+ systemTypes(systemTypes),
+ sshKey(sshKey),
+ maxJobs(maxJobs),
+ speedFactor(std::max(1U, speedFactor)),
+ enabled(true)
+ {};
+};;
+
+static std::vector<Machine> readConf()
+{
+ auto conf = getEnv("NIX_REMOTE_SYSTEMS", SYSCONFDIR "/nix/machines");
+
+ auto machines = std::vector<Machine>{};
+ auto lines = std::vector<string>{};
+ try {
+ lines = tokenizeString<std::vector<string>>(readFile(conf), "\n");
+ } catch (const SysError & e) {
+ if (e.errNo != ENOENT)
+ throw;
+ }
+ for (auto line : lines) {
+ chomp(line);
+ line.erase(std::find(line.begin(), line.end(), '#'), line.end());
+ if (line.empty()) {
+ continue;
+ }
+ auto tokens = tokenizeString<std::vector<string>>(line);
+ auto sz = tokens.size();
+ if (sz < 4)
+ throw FormatError("bad machines.conf file ‘%1%’", conf);
+ machines.emplace_back(tokens[0],
+ tokenizeString<std::vector<string>>(tokens[1], ","),
+ tokens[2],
+ stoull(tokens[3]),
+ sz >= 5 ? stoull(tokens[4]) : 1LL,
+ sz >= 6 ?
+ tokenizeString<std::set<string>>(tokens[5], ",") :
+ std::set<string>{},
+ sz >= 7 ?
+ tokenizeString<std::set<string>>(tokens[6], ",") :
+ std::set<string>{});
+ }
+ return machines;
+}
+
+static string currentLoad;
+
+static AutoCloseFD openSlotLock(const Machine & m, unsigned long long slot)
+{
+ std::ostringstream fn_stream(currentLoad, std::ios_base::ate | std::ios_base::out);
+ fn_stream << "/";
+ for (auto t : m.systemTypes) {
+ fn_stream << t << "-";
+ }
+ fn_stream << m.hostName << "-" << slot;
+ return openLockFile(fn_stream.str(), true);
+}
+
+static char display_env[] = "DISPLAY=";
+static char ssh_env[] = "SSH_ASKPASS=";
+
+int main (int argc, char * * argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+
+ /* Ensure we don't get any SSH passphrase or host key popups. */
+ if (putenv(display_env) == -1 ||
+ putenv(ssh_env) == -1)
+ throw SysError("setting SSH env vars");
+
+ if (argc != 4)
+ throw UsageError("called without required arguments");
+
+ auto store = openStore();
+
+ auto localSystem = argv[1];
+ settings.maxSilentTime = stoull(string(argv[2]));
+ settings.buildTimeout = stoull(string(argv[3]));
+
+ currentLoad = getEnv("NIX_CURRENT_LOAD", "/run/nix/current-load");
+
+ std::shared_ptr<Store> sshStore;
+ AutoCloseFD bestSlotLock;
+
+ auto machines = readConf();
+ string drvPath;
+ string hostName;
+ for (string line; getline(cin, line);) {
+ auto tokens = tokenizeString<std::vector<string>>(line);
+ auto sz = tokens.size();
+ if (sz != 3 && sz != 4)
+ throw Error("invalid build hook line ‘%1%’", line);
+ auto amWilling = tokens[0] == "1";
+ auto neededSystem = tokens[1];
+ drvPath = tokens[2];
+ auto requiredFeatures = sz == 3 ?
+ std::set<string>{} :
+ tokenizeString<std::set<string>>(tokens[3], ",");
+ auto canBuildLocally = amWilling && (neededSystem == localSystem);
+
+ /* Error ignored here, will be caught later */
+ mkdir(currentLoad.c_str(), 0777);
+
+ while (true) {
+ bestSlotLock = -1;
+ AutoCloseFD lock = openLockFile(currentLoad + "/main-lock", true);
+ lockFile(lock.get(), ltWrite, true);
+
+ bool rightType = false;
+
+ Machine * bestMachine = nullptr;
+ unsigned long long bestLoad = 0;
+ for (auto & m : machines) {
+ if (m.enabled && std::find(m.systemTypes.begin(),
+ m.systemTypes.end(),
+ neededSystem) != m.systemTypes.end() &&
+ m.allSupported(requiredFeatures) &&
+ m.mandatoryMet(requiredFeatures)) {
+ rightType = true;
+ AutoCloseFD free;
+ unsigned long long load = 0;
+ for (unsigned long long slot = 0; slot < m.maxJobs; ++slot) {
+ auto slotLock = openSlotLock(m, slot);
+ if (lockFile(slotLock.get(), ltWrite, false)) {
+ if (!free) {
+ free = std::move(slotLock);
+ }
+ } else {
+ ++load;
+ }
+ }
+ if (!free) {
+ continue;
+ }
+ bool best = false;
+ if (!bestSlotLock) {
+ best = true;
+ } else if (load / m.speedFactor < bestLoad / bestMachine->speedFactor) {
+ best = true;
+ } else if (load / m.speedFactor == bestLoad / bestMachine->speedFactor) {
+ if (m.speedFactor > bestMachine->speedFactor) {
+ best = true;
+ } else if (m.speedFactor == bestMachine->speedFactor) {
+ if (load < bestLoad) {
+ best = true;
+ }
+ }
+ }
+ if (best) {
+ bestLoad = load;
+ bestSlotLock = std::move(free);
+ bestMachine = &m;
+ }
+ }
+ }
+
+ if (!bestSlotLock) {
+ if (rightType && !canBuildLocally)
+ std::cerr << "# postpone\n";
+ else
+ std::cerr << "# decline\n";
+ break;
+ }
+
+#if __APPLE__
+ futimes(bestSlotLock.get(), NULL);
+#else
+ futimens(bestSlotLock.get(), NULL);
+#endif
+
+ lock = -1;
+
+ try {
+ sshStore = openStore("ssh-ng://" + bestMachine->hostName,
+ { {"ssh-key", bestMachine->sshKey },
+ {"max-connections", "1" } });
+ hostName = bestMachine->hostName;
+ } catch (std::exception & e) {
+ printError("unable to open SSH connection to ‘%s’: %s; trying other available machines...",
+ bestMachine->hostName, e.what());
+ bestMachine->enabled = false;
+ continue;
+ }
+ goto connected;
+ }
+ }
+
+connected:
+ std::cerr << "# accept\n";
+ string line;
+ if (!getline(cin, line))
+ throw Error("hook caller didn't send inputs");
+ auto inputs = tokenizeString<PathSet>(line);
+ if (!getline(cin, line))
+ throw Error("hook caller didn't send outputs");
+ auto outputs = tokenizeString<PathSet>(line);
+ AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + hostName + ".upload-lock", true);
+ auto old = signal(SIGALRM, handleAlarm);
+ alarm(15 * 60);
+ if (!lockFile(uploadLock.get(), ltWrite, true))
+ printError("somebody is hogging the upload lock for ‘%s’, continuing...");
+ alarm(0);
+ signal(SIGALRM, old);
+ copyPaths(store, ref<Store>(sshStore), inputs);
+ uploadLock = -1;
+
+ printError("building ‘%s’ on ‘%s’", drvPath, hostName);
+ sshStore->buildDerivation(drvPath, readDerivation(drvPath));
+
+ PathSet missing;
+ for (auto & path : outputs)
+ if (!store->isValidPath(path)) missing.insert(path);
+
+ if (!missing.empty()) {
+ setenv("NIX_HELD_LOCKS", concatStringsSep(" ", missing).c_str(), 1); /* FIXME: ugly */
+ copyPaths(ref<Store>(sshStore), store, missing);
+ }
+
+ return;
+ });
+}
diff --git a/src/build-remote/local.mk b/src/build-remote/local.mk
new file mode 100644
index 000000000..62d5a010c
--- /dev/null
+++ b/src/build-remote/local.mk
@@ -0,0 +1,11 @@
+programs += build-remote
+
+build-remote_DIR := $(d)
+
+build-remote_INSTALL_DIR := $(libexecdir)/nix
+
+build-remote_LIBS = libmain libutil libformat libstore
+
+build-remote_SOURCES := $(d)/build-remote.cc
+
+build-remote_CXXFLAGS = -DSYSCONFDIR="\"$(sysconfdir)\""
diff --git a/src/buildenv/buildenv.cc b/src/buildenv/buildenv.cc
new file mode 100644
index 000000000..f997096ed
--- /dev/null
+++ b/src/buildenv/buildenv.cc
@@ -0,0 +1,186 @@
+#include "shared.hh"
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <algorithm>
+
+using namespace nix;
+
+typedef std::map<Path,int> Priorities;
+
+static bool isDirectory (const Path & path)
+{
+ struct stat st;
+ if (stat(path.c_str(), &st) == -1)
+ throw SysError(format("getting status of ‘%1%’") % path);
+ return S_ISDIR(st.st_mode);
+}
+
+static auto priorities = Priorities{};
+
+static auto symlinks = 0;
+
+/* For each activated package, create symlinks */
+static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
+{
+ auto srcFiles = readDirectory(srcDir);
+ for (const auto & ent : srcFiles) {
+ if (ent.name[0] == '.')
+ /* not matched by glob */
+ continue;
+ const auto & srcFile = srcDir + "/" + ent.name;
+ auto dstFile = dstDir + "/" + ent.name;
+
+ /* The files below are special-cased to that they don't show up
+ * in user profiles, either because they are useless, or
+ * because they would cauase pointless collisions (e.g., each
+ * Python package brings its own
+ * `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
+ */
+ if (hasSuffix(srcFile, "/propagated-build-inputs") ||
+ hasSuffix(srcFile, "/nix-support") ||
+ hasSuffix(srcFile, "/perllocal.pod") ||
+ hasSuffix(srcFile, "/info/dir") ||
+ hasSuffix(srcFile, "/log")) {
+ continue;
+ } else if (isDirectory(srcFile)) {
+ struct stat dstSt;
+ auto res = lstat(dstFile.c_str(), &dstSt);
+ if (res == 0) {
+ if (S_ISDIR(dstSt.st_mode)) {
+ createLinks(srcFile, dstFile, priority);
+ continue;
+ } else if (S_ISLNK(dstSt.st_mode)) {
+ auto target = readLink(dstFile);
+ if (!isDirectory(target))
+ throw Error(format("collision between ‘%1%’ and non-directory ‘%2%’")
+ % srcFile % target);
+ if (unlink(dstFile.c_str()) == -1)
+ throw SysError(format("unlinking ‘%1%’") % dstFile);
+ if (mkdir(dstFile.c_str(), 0755) == -1)
+ throw SysError(format("creating directory ‘%1%’"));
+ createLinks(target, dstFile, priorities[dstFile]);
+ createLinks(srcFile, dstFile, priority);
+ continue;
+ }
+ } else if (errno != ENOENT)
+ throw SysError(format("getting status of ‘%1%’") % dstFile);
+ } else {
+ struct stat dstSt;
+ auto res = lstat(dstFile.c_str(), &dstSt);
+ if (res == 0) {
+ if (S_ISLNK(dstSt.st_mode)) {
+ auto target = readLink(dstFile);
+ auto prevPriority = priorities[dstFile];
+ if (prevPriority == priority)
+ throw Error(format(
+ "collision between ‘%1%’ and ‘%2%’; "
+ "use ‘nix-env --set-flag priority NUMBER PKGNAME’ "
+ "to change the priority of one of the conflicting packages"
+ ) % srcFile % target);
+ if (prevPriority < priority)
+ continue;
+ if (unlink(dstFile.c_str()) == -1)
+ throw SysError(format("unlinking ‘%1%’") % dstFile);
+ }
+ } else if (errno != ENOENT)
+ throw SysError(format("getting status of ‘%1%’") % dstFile);
+ }
+ createSymlink(srcFile, dstFile);
+ priorities[dstFile] = priority;
+ symlinks++;
+ }
+}
+
+typedef std::set<Path> FileProp;
+
+static auto done = FileProp{};
+static auto postponed = FileProp{};
+
+static auto out = string{};
+
+static void addPkg(const Path & pkgDir, int priority)
+{
+ if (done.find(pkgDir) != done.end())
+ return;
+ done.insert(pkgDir);
+ createLinks(pkgDir, out, priority);
+ auto propagatedFN = pkgDir + "/nix-support/propagated-user-env-packages";
+ auto propagated = string{};
+ {
+ AutoCloseFD fd = open(propagatedFN.c_str(), O_RDONLY | O_CLOEXEC);
+ if (!fd) {
+ if (errno == ENOENT)
+ return;
+ throw SysError(format("opening ‘%1%’") % propagatedFN);
+ }
+ propagated = readLine(fd.get());
+ }
+ for (const auto & p : tokenizeString<std::vector<string>>(propagated, " "))
+ if (done.find(p) == done.end())
+ postponed.insert(p);
+}
+
+struct Package {
+ Path path;
+ bool active;
+ int priority;
+ Package(Path path, bool active, int priority) : path{std::move(path)}, active{active}, priority{priority} {}
+};
+
+typedef std::vector<Package> Packages;
+
+int main(int argc, char ** argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+ out = getEnv("out");
+ if (mkdir(out.c_str(), 0755) == -1)
+ throw SysError(format("creating %1%") % out);
+
+ /* Convert the stuff we get from the environment back into a coherent
+ * data type.
+ */
+ auto pkgs = Packages{};
+ auto derivations = tokenizeString<Strings>(getEnv("derivations"));
+ while (!derivations.empty()) {
+ /* !!! We're trusting the caller to structure derivations env var correctly */
+ auto active = derivations.front(); derivations.pop_front();
+ auto priority = stoi(derivations.front()); derivations.pop_front();
+ auto outputs = stoi(derivations.front()); derivations.pop_front();
+ for (auto n = 0; n < outputs; n++) {
+ auto path = derivations.front(); derivations.pop_front();
+ pkgs.emplace_back(path, active != "false", priority);
+ }
+ }
+
+ /* Symlink to the packages that have been installed explicitly by the
+ * user. Process in priority order to reduce unnecessary
+ * symlink/unlink steps.
+ */
+ std::sort(pkgs.begin(), pkgs.end(), [](const Package & a, const Package & b) {
+ return a.priority < b.priority || (a.priority == b.priority && a.path < b.path);
+ });
+ for (const auto & pkg : pkgs)
+ if (pkg.active)
+ addPkg(pkg.path, pkg.priority);
+
+ /* Symlink to the packages that have been "propagated" by packages
+ * installed by the user (i.e., package X declares that it wants Y
+ * installed as well). We do these later because they have a lower
+ * priority in case of collisions.
+ */
+ auto priorityCounter = 1000;
+ while (!postponed.empty()) {
+ auto pkgDirs = postponed;
+ postponed = FileProp{};
+ for (const auto & pkgDir : pkgDirs)
+ addPkg(pkgDir, priorityCounter++);
+ }
+
+ std::cerr << "created " << symlinks << " symlinks in user environment\n";
+
+ createSymlink(getEnv("manifest"), out + "/manifest.nix");
+ });
+}
+
diff --git a/src/buildenv/local.mk b/src/buildenv/local.mk
new file mode 100644
index 000000000..17ec13b23
--- /dev/null
+++ b/src/buildenv/local.mk
@@ -0,0 +1,9 @@
+programs += buildenv
+
+buildenv_DIR := $(d)
+
+buildenv_INSTALL_DIR := $(libexecdir)/nix
+
+buildenv_LIBS = libmain libstore libutil libformat
+
+buildenv_SOURCES := $(d)/buildenv.cc
diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc
new file mode 100644
index 000000000..55379f94b
--- /dev/null
+++ b/src/libexpr/attr-path.cc
@@ -0,0 +1,96 @@
+#include "attr-path.hh"
+#include "eval-inline.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+static Strings parseAttrPath(const string & s)
+{
+ Strings res;
+ string cur;
+ string::const_iterator i = s.begin();
+ while (i != s.end()) {
+ if (*i == '.') {
+ res.push_back(cur);
+ cur.clear();
+ } else if (*i == '"') {
+ ++i;
+ while (1) {
+ if (i == s.end())
+ throw Error(format("missing closing quote in selection path ‘%1%’") % s);
+ if (*i == '"') break;
+ cur.push_back(*i++);
+ }
+ } else
+ cur.push_back(*i);
+ ++i;
+ }
+ if (!cur.empty()) res.push_back(cur);
+ return res;
+}
+
+
+Value * findAlongAttrPath(EvalState & state, const string & attrPath,
+ Bindings & autoArgs, Value & vIn)
+{
+ Strings tokens = parseAttrPath(attrPath);
+
+ Error attrError =
+ Error(format("attribute selection path ‘%1%’ does not match expression") % attrPath);
+
+ Value * v = &vIn;
+
+ for (auto & attr : tokens) {
+
+ /* Is i an index (integer) or a normal attribute name? */
+ enum { apAttr, apIndex } apType = apAttr;
+ unsigned int attrIndex;
+ if (string2Int(attr, attrIndex)) apType = apIndex;
+
+ /* Evaluate the expression. */
+ Value * vNew = state.allocValue();
+ state.autoCallFunction(autoArgs, *v, *vNew);
+ v = vNew;
+ state.forceValue(*v);
+
+ /* It should evaluate to either a set or an expression,
+ according to what is specified in the attrPath. */
+
+ if (apType == apAttr) {
+
+ if (v->type != tAttrs)
+ throw TypeError(
+ format("the expression selected by the selection path ‘%1%’ should be a set but is %2%")
+ % attrPath % showType(*v));
+
+ if (attr.empty())
+ throw Error(format("empty attribute name in selection path ‘%1%’") % attrPath);
+
+ Bindings::iterator a = v->attrs->find(state.symbols.create(attr));
+ if (a == v->attrs->end())
+ throw Error(format("attribute ‘%1%’ in selection path ‘%2%’ not found") % attr % attrPath);
+ v = &*a->value;
+ }
+
+ else if (apType == apIndex) {
+
+ if (!v->isList())
+ throw TypeError(
+ format("the expression selected by the selection path ‘%1%’ should be a list but is %2%")
+ % attrPath % showType(*v));
+
+ if (attrIndex >= v->listSize())
+ throw Error(format("list index %1% in selection path ‘%2%’ is out of range") % attrIndex % attrPath);
+
+ v = v->listElems()[attrIndex];
+ }
+
+ }
+
+ return v;
+}
+
+
+}
diff --git a/src/libexpr/attr-path.hh b/src/libexpr/attr-path.hh
new file mode 100644
index 000000000..46a341950
--- /dev/null
+++ b/src/libexpr/attr-path.hh
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "eval.hh"
+
+#include <string>
+#include <map>
+
+namespace nix {
+
+Value * findAlongAttrPath(EvalState & state, const string & attrPath,
+ Bindings & autoArgs, Value & vIn);
+
+}
diff --git a/src/libexpr/attr-set.cc b/src/libexpr/attr-set.cc
new file mode 100644
index 000000000..910428c02
--- /dev/null
+++ b/src/libexpr/attr-set.cc
@@ -0,0 +1,63 @@
+#include "attr-set.hh"
+#include "eval.hh"
+
+#include <algorithm>
+
+
+namespace nix {
+
+
+static void * allocBytes(size_t n)
+{
+ void * p;
+#if HAVE_BOEHMGC
+ p = GC_malloc(n);
+#else
+ p = malloc(n);
+#endif
+ if (!p) throw std::bad_alloc();
+ return p;
+}
+
+
+/* Allocate a new array of attributes for an attribute set with a specific
+ capacity. The space is implicitly reserved after the Bindings
+ structure. */
+Bindings * EvalState::allocBindings(Bindings::size_t capacity)
+{
+ return new (allocBytes(sizeof(Bindings) + sizeof(Attr) * capacity)) Bindings(capacity);
+}
+
+
+void EvalState::mkAttrs(Value & v, unsigned int capacity)
+{
+ if (capacity == 0) {
+ v = vEmptySet;
+ return;
+ }
+ clearValue(v);
+ v.type = tAttrs;
+ v.attrs = allocBindings(capacity);
+ nrAttrsets++;
+ nrAttrsInAttrsets += capacity;
+}
+
+
+/* Create a new attribute named 'name' on an existing attribute set stored
+ in 'vAttrs' and return the newly allocated Value which is associated with
+ this attribute. */
+Value * EvalState::allocAttr(Value & vAttrs, const Symbol & name)
+{
+ Value * v = allocValue();
+ vAttrs.attrs->push_back(Attr(name, v));
+ return v;
+}
+
+
+void Bindings::sort()
+{
+ std::sort(begin(), end());
+}
+
+
+}
diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh
new file mode 100644
index 000000000..e1fc2bf6d
--- /dev/null
+++ b/src/libexpr/attr-set.hh
@@ -0,0 +1,95 @@
+#pragma once
+
+#include "nixexpr.hh"
+#include "symbol-table.hh"
+
+#include <algorithm>
+
+namespace nix {
+
+
+class EvalState;
+struct Value;
+
+/* Map one attribute name to its value. */
+struct Attr
+{
+ Symbol name;
+ Value * value;
+ Pos * pos;
+ Attr(Symbol name, Value * value, Pos * pos = &noPos)
+ : name(name), value(value), pos(pos) { };
+ Attr() : pos(&noPos) { };
+ bool operator < (const Attr & a) const
+ {
+ return name < a.name;
+ }
+};
+
+/* Bindings contains all the attributes of an attribute set. It is defined
+ by its size and its capacity, the capacity being the number of Attr
+ elements allocated after this structure, while the size corresponds to
+ the number of elements already inserted in this structure. */
+class Bindings
+{
+public:
+ typedef uint32_t size_t;
+
+private:
+ size_t size_, capacity_;
+ Attr attrs[0];
+
+ Bindings(size_t capacity) : size_(0), capacity_(capacity) { }
+ Bindings(const Bindings & bindings) = delete;
+
+public:
+ size_t size() const { return size_; }
+
+ bool empty() const { return !size_; }
+
+ typedef Attr * iterator;
+
+ void push_back(const Attr & attr)
+ {
+ assert(size_ < capacity_);
+ attrs[size_++] = attr;
+ }
+
+ iterator find(const Symbol & name)
+ {
+ Attr key(name, 0);
+ iterator i = std::lower_bound(begin(), end(), key);
+ if (i != end() && i->name == name) return i;
+ return end();
+ }
+
+ iterator begin() { return &attrs[0]; }
+ iterator end() { return &attrs[size_]; }
+
+ Attr & operator[](size_t pos)
+ {
+ return attrs[pos];
+ }
+
+ void sort();
+
+ size_t capacity() { return capacity_; }
+
+ /* Returns the attributes in lexicographically sorted order. */
+ std::vector<const Attr *> lexicographicOrder() const
+ {
+ std::vector<const Attr *> res;
+ res.reserve(size_);
+ for (size_t n = 0; n < size_; n++)
+ res.emplace_back(&attrs[n]);
+ std::sort(res.begin(), res.end(), [](const Attr * a, const Attr * b) {
+ return (string) a->name < (string) b->name;
+ });
+ return res;
+ }
+
+ friend class EvalState;
+};
+
+
+}
diff --git a/src/libexpr/common-opts.cc b/src/libexpr/common-opts.cc
new file mode 100644
index 000000000..06d6ed87d
--- /dev/null
+++ b/src/libexpr/common-opts.cc
@@ -0,0 +1,67 @@
+#include "common-opts.hh"
+#include "shared.hh"
+#include "download.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+bool parseAutoArgs(Strings::iterator & i,
+ const Strings::iterator & argsEnd, std::map<string, string> & res)
+{
+ string arg = *i;
+ if (arg != "--arg" && arg != "--argstr") return false;
+
+ UsageError error(format("‘%1%’ requires two arguments") % arg);
+
+ if (++i == argsEnd) throw error;
+ string name = *i;
+ if (++i == argsEnd) throw error;
+ string value = *i;
+
+ res[name] = (arg == "--arg" ? 'E' : 'S') + value;
+
+ return true;
+}
+
+
+Bindings * evalAutoArgs(EvalState & state, std::map<string, string> & in)
+{
+ Bindings * res = state.allocBindings(in.size());
+ for (auto & i : in) {
+ Value * v = state.allocValue();
+ if (i.second[0] == 'E')
+ state.mkThunk_(*v, state.parseExprFromString(string(i.second, 1), absPath(".")));
+ else
+ mkString(*v, string(i.second, 1));
+ res->push_back(Attr(state.symbols.create(i.first), v));
+ }
+ res->sort();
+ return res;
+}
+
+
+bool parseSearchPathArg(Strings::iterator & i,
+ const Strings::iterator & argsEnd, Strings & searchPath)
+{
+ if (*i != "-I") return false;
+ if (++i == argsEnd) throw UsageError("‘-I’ requires an argument");
+ searchPath.push_back(*i);
+ return true;
+}
+
+
+Path lookupFileArg(EvalState & state, string s)
+{
+ if (isUri(s))
+ return getDownloader()->downloadCached(state.store, s, true);
+ else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
+ Path p = s.substr(1, s.size() - 2);
+ return state.findFile(p);
+ } else
+ return absPath(s);
+}
+
+
+}
diff --git a/src/libexpr/common-opts.hh b/src/libexpr/common-opts.hh
new file mode 100644
index 000000000..cb2732d6f
--- /dev/null
+++ b/src/libexpr/common-opts.hh
@@ -0,0 +1,20 @@
+#pragma once
+
+#include "eval.hh"
+
+namespace nix {
+
+class Store;
+
+/* Some common option parsing between nix-env and nix-instantiate. */
+bool parseAutoArgs(Strings::iterator & i,
+ const Strings::iterator & argsEnd, std::map<string, string> & res);
+
+Bindings * evalAutoArgs(EvalState & state, std::map<string, string> & in);
+
+bool parseSearchPathArg(Strings::iterator & i,
+ const Strings::iterator & argsEnd, Strings & searchPath);
+
+Path lookupFileArg(EvalState & state, string s);
+
+}
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
new file mode 100644
index 000000000..0748fbd3f
--- /dev/null
+++ b/src/libexpr/eval-inline.hh
@@ -0,0 +1,81 @@
+#pragma once
+
+#include "eval.hh"
+
+#define LocalNoInline(f) static f __attribute__((noinline)); f
+#define LocalNoInlineNoReturn(f) static f __attribute__((noinline, noreturn)); f
+
+namespace nix {
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const Pos & pos))
+{
+ throw EvalError(format(s) % pos);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const Value & v))
+{
+ throw TypeError(format(s) % showType(v));
+}
+
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const Value & v, const Pos & pos))
+{
+ throw TypeError(format(s) % showType(v) % pos);
+}
+
+
+void EvalState::forceValue(Value & v, const Pos & pos)
+{
+ if (v.type == tThunk) {
+ Env * env = v.thunk.env;
+ Expr * expr = v.thunk.expr;
+ try {
+ v.type = tBlackhole;
+ //checkInterrupt();
+ expr->eval(*this, *env, v);
+ } catch (Error & e) {
+ v.type = tThunk;
+ v.thunk.env = env;
+ v.thunk.expr = expr;
+ throw;
+ }
+ }
+ else if (v.type == tApp)
+ callFunction(*v.app.left, *v.app.right, v, noPos);
+ else if (v.type == tBlackhole)
+ throwEvalError("infinite recursion encountered, at %1%", pos);
+}
+
+
+inline void EvalState::forceAttrs(Value & v)
+{
+ forceValue(v);
+ if (v.type != tAttrs)
+ throwTypeError("value is %1% while a set was expected", v);
+}
+
+
+inline void EvalState::forceAttrs(Value & v, const Pos & pos)
+{
+ forceValue(v);
+ if (v.type != tAttrs)
+ throwTypeError("value is %1% while a set was expected, at %2%", v, pos);
+}
+
+
+inline void EvalState::forceList(Value & v)
+{
+ forceValue(v);
+ if (!v.isList())
+ throwTypeError("value is %1% while a list was expected", v);
+}
+
+
+inline void EvalState::forceList(Value & v, const Pos & pos)
+{
+ forceValue(v);
+ if (!v.isList())
+ throwTypeError("value is %1% while a list was expected, at %2%", v, pos);
+}
+
+}
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
new file mode 100644
index 000000000..5e1ae63c4
--- /dev/null
+++ b/src/libexpr/eval.cc
@@ -0,0 +1,1814 @@
+#include "eval.hh"
+#include "hash.hh"
+#include "util.hh"
+#include "store-api.hh"
+#include "derivations.hh"
+#include "globals.hh"
+#include "eval-inline.hh"
+#include "download.hh"
+
+#include <algorithm>
+#include <cstring>
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#if HAVE_BOEHMGC
+
+#include <gc/gc.h>
+#include <gc/gc_cpp.h>
+
+#define NEW new (UseGC)
+
+#else
+
+#define NEW new
+
+#endif
+
+
+namespace nix {
+
+
+static char * dupString(const char * s)
+{
+ char * t;
+#if HAVE_BOEHMGC
+ t = GC_strdup(s);
+#else
+ t = strdup(s);
+#endif
+ if (!t) throw std::bad_alloc();
+ return t;
+}
+
+
+static void * allocBytes(size_t n)
+{
+ void * p;
+#if HAVE_BOEHMGC
+ p = GC_malloc(n);
+#else
+ p = malloc(n);
+#endif
+ if (!p) throw std::bad_alloc();
+ return p;
+}
+
+
+static void printValue(std::ostream & str, std::set<const Value *> & active, const Value & v)
+{
+ checkInterrupt();
+
+ if (active.find(&v) != active.end()) {
+ str << "<CYCLE>";
+ return;
+ }
+ active.insert(&v);
+
+ switch (v.type) {
+ case tInt:
+ str << v.integer;
+ break;
+ case tBool:
+ str << (v.boolean ? "true" : "false");
+ break;
+ case tString:
+ str << "\"";
+ for (const char * i = v.string.s; *i; i++)
+ if (*i == '\"' || *i == '\\') str << "\\" << *i;
+ else if (*i == '\n') str << "\\n";
+ else if (*i == '\r') str << "\\r";
+ else if (*i == '\t') str << "\\t";
+ else str << *i;
+ str << "\"";
+ break;
+ case tPath:
+ str << v.path; // !!! escaping?
+ break;
+ case tNull:
+ str << "null";
+ break;
+ case tAttrs: {
+ str << "{ ";
+ for (auto & i : v.attrs->lexicographicOrder()) {
+ str << i->name << " = ";
+ printValue(str, active, *i->value);
+ str << "; ";
+ }
+ str << "}";
+ break;
+ }
+ case tList1:
+ case tList2:
+ case tListN:
+ str << "[ ";
+ for (unsigned int n = 0; n < v.listSize(); ++n) {
+ printValue(str, active, *v.listElems()[n]);
+ str << " ";
+ }
+ str << "]";
+ break;
+ case tThunk:
+ case tApp:
+ str << "<CODE>";
+ break;
+ case tLambda:
+ str << "<LAMBDA>";
+ break;
+ case tPrimOp:
+ str << "<PRIMOP>";
+ break;
+ case tPrimOpApp:
+ str << "<PRIMOP-APP>";
+ break;
+ case tExternal:
+ str << *v.external;
+ break;
+ case tFloat:
+ str << v.fpoint;
+ break;
+ default:
+ throw Error("invalid value");
+ }
+
+ active.erase(&v);
+}
+
+
+std::ostream & operator << (std::ostream & str, const Value & v)
+{
+ std::set<const Value *> active;
+ printValue(str, active, v);
+ return str;
+}
+
+
+string showType(const Value & v)
+{
+ switch (v.type) {
+ case tInt: return "an integer";
+ case tBool: return "a boolean";
+ case tString: return "a string";
+ case tPath: return "a path";
+ case tNull: return "null";
+ case tAttrs: return "a set";
+ case tList1: case tList2: case tListN: return "a list";
+ case tThunk: return "a thunk";
+ case tApp: return "a function application";
+ case tLambda: return "a function";
+ case tBlackhole: return "a black hole";
+ case tPrimOp: return "a built-in function";
+ case tPrimOpApp: return "a partially applied built-in function";
+ case tExternal: return v.external->showType();
+ case tFloat: return "a float";
+ }
+ abort();
+}
+
+
+#if HAVE_BOEHMGC
+/* Called when the Boehm GC runs out of memory. */
+static void * oomHandler(size_t requested)
+{
+ /* Convert this to a proper C++ exception. */
+ throw std::bad_alloc();
+}
+#endif
+
+
+static Symbol getName(const AttrName & name, EvalState & state, Env & env)
+{
+ if (name.symbol.set()) {
+ return name.symbol;
+ } else {
+ Value nameValue;
+ name.expr->eval(state, env, nameValue);
+ state.forceStringNoCtx(nameValue);
+ return state.symbols.create(nameValue.string.s);
+ }
+}
+
+
+static bool gcInitialised = false;
+
+void initGC()
+{
+ if (gcInitialised) return;
+
+#if HAVE_BOEHMGC
+ /* Initialise the Boehm garbage collector. */
+ GC_set_all_interior_pointers(0);
+
+ GC_INIT();
+
+ GC_set_oom_fn(oomHandler);
+
+ /* Set the initial heap size to something fairly big (25% of
+ physical RAM, up to a maximum of 384 MiB) so that in most cases
+ we don't need to garbage collect at all. (Collection has a
+ fairly significant overhead.) The heap size can be overridden
+ through libgc's GC_INITIAL_HEAP_SIZE environment variable. We
+ should probably also provide a nix.conf setting for this. Note
+ that GC_expand_hp() causes a lot of virtual, but not physical
+ (resident) memory to be allocated. This might be a problem on
+ systems that don't overcommit. */
+ if (!getenv("GC_INITIAL_HEAP_SIZE")) {
+ size_t size = 32 * 1024 * 1024;
+#if HAVE_SYSCONF && defined(_SC_PAGESIZE) && defined(_SC_PHYS_PAGES)
+ size_t maxSize = 384 * 1024 * 1024;
+ long pageSize = sysconf(_SC_PAGESIZE);
+ long pages = sysconf(_SC_PHYS_PAGES);
+ if (pageSize != -1)
+ size = (pageSize * pages) / 4; // 25% of RAM
+ if (size > maxSize) size = maxSize;
+#endif
+ debug(format("setting initial heap size to %1% bytes") % size);
+ GC_expand_hp(size);
+ }
+
+#endif
+
+ gcInitialised = true;
+}
+
+
+/* Very hacky way to parse $NIX_PATH, which is colon-separated, but
+ can contain URLs (e.g. "nixpkgs=https://bla...:foo=https://"). */
+static Strings parseNixPath(const string & s)
+{
+ Strings res;
+
+ auto p = s.begin();
+
+ while (p != s.end()) {
+ auto start = p;
+ auto start2 = p;
+
+ while (p != s.end() && *p != ':') {
+ if (*p == '=') start2 = p + 1;
+ ++p;
+ }
+
+ if (p == s.end()) {
+ if (p != start) res.push_back(std::string(start, p));
+ break;
+ }
+
+ if (*p == ':') {
+ if (isUri(std::string(start2, s.end()))) {
+ ++p;
+ while (p != s.end() && *p != ':') ++p;
+ }
+ res.push_back(std::string(start, p));
+ if (p == s.end()) break;
+ }
+
+ ++p;
+ }
+
+ return res;
+}
+
+
+EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
+ : sWith(symbols.create("<with>"))
+ , sOutPath(symbols.create("outPath"))
+ , sDrvPath(symbols.create("drvPath"))
+ , sType(symbols.create("type"))
+ , sMeta(symbols.create("meta"))
+ , sName(symbols.create("name"))
+ , sValue(symbols.create("value"))
+ , sSystem(symbols.create("system"))
+ , sOverrides(symbols.create("__overrides"))
+ , sOutputs(symbols.create("outputs"))
+ , sOutputName(symbols.create("outputName"))
+ , sIgnoreNulls(symbols.create("__ignoreNulls"))
+ , sFile(symbols.create("file"))
+ , sLine(symbols.create("line"))
+ , sColumn(symbols.create("column"))
+ , sFunctor(symbols.create("__functor"))
+ , sToString(symbols.create("__toString"))
+ , sRight(symbols.create("right"))
+ , sWrong(symbols.create("wrong"))
+ , sStructuredAttrs(symbols.create("__structuredAttrs"))
+ , sBuilder(symbols.create("builder"))
+ , store(store)
+ , baseEnv(allocEnv(128))
+ , staticBaseEnv(false, 0)
+{
+ countCalls = getEnv("NIX_COUNT_CALLS", "0") != "0";
+
+ restricted = settings.restrictEval;
+
+ assert(gcInitialised);
+
+ /* Initialise the Nix expression search path. */
+ Strings paths = parseNixPath(getEnv("NIX_PATH", ""));
+ for (auto & i : _searchPath) addToSearchPath(i);
+ for (auto & i : paths) addToSearchPath(i);
+ addToSearchPath("nix=" + settings.nixDataDir + "/nix/corepkgs");
+
+ clearValue(vEmptySet);
+ vEmptySet.type = tAttrs;
+ vEmptySet.attrs = allocBindings(0);
+
+ createBaseEnv();
+}
+
+
+EvalState::~EvalState()
+{
+ fileEvalCache.clear();
+}
+
+
+Path EvalState::checkSourcePath(const Path & path_)
+{
+ if (!restricted) return path_;
+
+ /* Resolve symlinks. */
+ debug(format("checking access to ‘%s’") % path_);
+ Path path = canonPath(path_, true);
+
+ for (auto & i : searchPath) {
+ auto r = resolveSearchPathElem(i);
+ if (!r.first) continue;
+ if (path == r.second || isInDir(path, r.second))
+ return path;
+ }
+
+ /* To support import-from-derivation, allow access to anything in
+ the store. FIXME: only allow access to paths that have been
+ constructed by this evaluation. */
+ if (store->isInStore(path)) return path;
+
+#if 0
+ /* Hack to support the chroot dependencies of corepkgs (see
+ corepkgs/config.nix.in). */
+ if (path == settings.nixPrefix && isStorePath(settings.nixPrefix))
+ return path;
+#endif
+
+ throw RestrictedPathError(format("access to path ‘%1%’ is forbidden in restricted mode") % path_);
+}
+
+
+void EvalState::addConstant(const string & name, Value & v)
+{
+ Value * v2 = allocValue();
+ *v2 = v;
+ staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
+ baseEnv.values[baseEnvDispl++] = v2;
+ string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
+ baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v2));
+}
+
+
+void EvalState::addPrimOp(const string & name,
+ unsigned int arity, PrimOpFun primOp)
+{
+ Value * v = allocValue();
+ string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
+ Symbol sym = symbols.create(name2);
+ v->type = tPrimOp;
+ v->primOp = NEW PrimOp(primOp, arity, sym);
+ staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
+ baseEnv.values[baseEnvDispl++] = v;
+ baseEnv.values[0]->attrs->push_back(Attr(sym, v));
+}
+
+
+Value & EvalState::getBuiltin(const string & name)
+{
+ return *baseEnv.values[0]->attrs->find(symbols.create(name))->value;
+}
+
+
+/* Every "format" object (even temporary) takes up a few hundred bytes
+ of stack space, which is a real killer in the recursive
+ evaluator. So here are some helper functions for throwing
+ exceptions. */
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const string & s2))
+{
+ throw EvalError(format(s) % s2);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const string & s2, const Pos & pos))
+{
+ throw EvalError(format(s) % s2 % pos);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const string & s2, const string & s3))
+{
+ throw EvalError(format(s) % s2 % s3);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const string & s2, const string & s3, const Pos & pos))
+{
+ throw EvalError(format(s) % s2 % s3 % pos);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const Symbol & sym, const Pos & p1, const Pos & p2))
+{
+ throw EvalError(format(s) % sym % p1 % p2);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const Pos & pos))
+{
+ throw TypeError(format(s) % pos);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const string & s1))
+{
+ throw TypeError(format(s) % s1);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const ExprLambda & fun, const Symbol & s2, const Pos & pos))
+{
+ throw TypeError(format(s) % fun.showNamePos() % s2 % pos);
+}
+
+LocalNoInlineNoReturn(void throwAssertionError(const char * s, const Pos & pos))
+{
+ throw AssertionError(format(s) % pos);
+}
+
+LocalNoInlineNoReturn(void throwUndefinedVarError(const char * s, const string & s1, const Pos & pos))
+{
+ throw UndefinedVarError(format(s) % s1 % pos);
+}
+
+LocalNoInline(void addErrorPrefix(Error & e, const char * s, const string & s2))
+{
+ e.addPrefix(format(s) % s2);
+}
+
+LocalNoInline(void addErrorPrefix(Error & e, const char * s, const ExprLambda & fun, const Pos & pos))
+{
+ e.addPrefix(format(s) % fun.showNamePos() % pos);
+}
+
+LocalNoInline(void addErrorPrefix(Error & e, const char * s, const string & s2, const Pos & pos))
+{
+ e.addPrefix(format(s) % s2 % pos);
+}
+
+
+void mkString(Value & v, const char * s)
+{
+ mkStringNoCopy(v, dupString(s));
+}
+
+
+Value & mkString(Value & v, const string & s, const PathSet & context)
+{
+ mkString(v, s.c_str());
+ if (!context.empty()) {
+ unsigned int n = 0;
+ v.string.context = (const char * *)
+ allocBytes((context.size() + 1) * sizeof(char *));
+ for (auto & i : context)
+ v.string.context[n++] = dupString(i.c_str());
+ v.string.context[n] = 0;
+ }
+ return v;
+}
+
+
+void mkPath(Value & v, const char * s)
+{
+ mkPathNoCopy(v, dupString(s));
+}
+
+
+inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
+{
+ for (unsigned int l = var.level; l; --l, env = env->up) ;
+
+ if (!var.fromWith) return env->values[var.displ];
+
+ while (1) {
+ if (!env->haveWithAttrs) {
+ if (noEval) return 0;
+ Value * v = allocValue();
+ evalAttrs(*env->up, (Expr *) env->values[0], *v);
+ env->values[0] = v;
+ env->haveWithAttrs = true;
+ }
+ Bindings::iterator j = env->values[0]->attrs->find(var.name);
+ if (j != env->values[0]->attrs->end()) {
+ if (countCalls && j->pos) attrSelects[*j->pos]++;
+ return j->value;
+ }
+ if (!env->prevWith)
+ throwUndefinedVarError("undefined variable ‘%1%’ at %2%", var.name, var.pos);
+ for (unsigned int l = env->prevWith; l; --l, env = env->up) ;
+ }
+}
+
+
+Value * EvalState::allocValue()
+{
+ nrValues++;
+ return (Value *) allocBytes(sizeof(Value));
+}
+
+
+Env & EvalState::allocEnv(unsigned int size)
+{
+ assert(size <= std::numeric_limits<decltype(Env::size)>::max());
+
+ nrEnvs++;
+ nrValuesInEnvs += size;
+ Env * env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
+ env->size = size;
+
+ /* Clear the values because maybeThunk() and lookupVar fromWith expect this. */
+ for (unsigned i = 0; i < size; ++i)
+ env->values[i] = 0;
+
+ return *env;
+}
+
+
+void EvalState::mkList(Value & v, unsigned int size)
+{
+ clearValue(v);
+ if (size == 1)
+ v.type = tList1;
+ else if (size == 2)
+ v.type = tList2;
+ else {
+ v.type = tListN;
+ v.bigList.size = size;
+ v.bigList.elems = size ? (Value * *) allocBytes(size * sizeof(Value *)) : 0;
+ }
+ nrListElems += size;
+}
+
+
+unsigned long nrThunks = 0;
+
+static inline void mkThunk(Value & v, Env & env, Expr * expr)
+{
+ v.type = tThunk;
+ v.thunk.env = &env;
+ v.thunk.expr = expr;
+ nrThunks++;
+}
+
+
+void EvalState::mkThunk_(Value & v, Expr * expr)
+{
+ mkThunk(v, baseEnv, expr);
+}
+
+
+void EvalState::mkPos(Value & v, Pos * pos)
+{
+ if (pos) {
+ mkAttrs(v, 3);
+ mkString(*allocAttr(v, sFile), pos->file);
+ mkInt(*allocAttr(v, sLine), pos->line);
+ mkInt(*allocAttr(v, sColumn), pos->column);
+ v.attrs->sort();
+ } else
+ mkNull(v);
+}
+
+
+/* Create a thunk for the delayed computation of the given expression
+ in the given environment. But if the expression is a variable,
+ then look it up right away. This significantly reduces the number
+ of thunks allocated. */
+Value * Expr::maybeThunk(EvalState & state, Env & env)
+{
+ Value * v = state.allocValue();
+ mkThunk(*v, env, this);
+ return v;
+}
+
+
+unsigned long nrAvoided = 0;
+
+Value * ExprVar::maybeThunk(EvalState & state, Env & env)
+{
+ Value * v = state.lookupVar(&env, *this, true);
+ /* The value might not be initialised in the environment yet.
+ In that case, ignore it. */
+ if (v) { nrAvoided++; return v; }
+ return Expr::maybeThunk(state, env);
+}
+
+
+Value * ExprString::maybeThunk(EvalState & state, Env & env)
+{
+ nrAvoided++;
+ return &v;
+}
+
+Value * ExprInt::maybeThunk(EvalState & state, Env & env)
+{
+ nrAvoided++;
+ return &v;
+}
+
+Value * ExprFloat::maybeThunk(EvalState & state, Env & env)
+{
+ nrAvoided++;
+ return &v;
+}
+
+Value * ExprPath::maybeThunk(EvalState & state, Env & env)
+{
+ nrAvoided++;
+ return &v;
+}
+
+
+void EvalState::evalFile(const Path & path, Value & v)
+{
+ FileEvalCache::iterator i;
+ if ((i = fileEvalCache.find(path)) != fileEvalCache.end()) {
+ v = i->second;
+ return;
+ }
+
+ Path path2 = resolveExprPath(path);
+ if ((i = fileEvalCache.find(path2)) != fileEvalCache.end()) {
+ v = i->second;
+ return;
+ }
+
+ Activity act(*logger, lvlTalkative, format("evaluating file ‘%1%’") % path2);
+ Expr * e = parseExprFromFile(checkSourcePath(path2));
+ try {
+ eval(e, v);
+ } catch (Error & e) {
+ addErrorPrefix(e, "while evaluating the file ‘%1%’:\n", path2);
+ throw;
+ }
+
+ fileEvalCache[path2] = v;
+ if (path != path2) fileEvalCache[path] = v;
+}
+
+
+void EvalState::resetFileCache()
+{
+ fileEvalCache.clear();
+}
+
+
+void EvalState::eval(Expr * e, Value & v)
+{
+ e->eval(*this, baseEnv, v);
+}
+
+
+inline bool EvalState::evalBool(Env & env, Expr * e)
+{
+ Value v;
+ e->eval(*this, env, v);
+ if (v.type != tBool)
+ throwTypeError("value is %1% while a Boolean was expected", v);
+ return v.boolean;
+}
+
+
+inline bool EvalState::evalBool(Env & env, Expr * e, const Pos & pos)
+{
+ Value v;
+ e->eval(*this, env, v);
+ if (v.type != tBool)
+ throwTypeError("value is %1% while a Boolean was expected, at %2%", v, pos);
+ return v.boolean;
+}
+
+
+inline void EvalState::evalAttrs(Env & env, Expr * e, Value & v)
+{
+ e->eval(*this, env, v);
+ if (v.type != tAttrs)
+ throwTypeError("value is %1% while a set was expected", v);
+}
+
+
+void Expr::eval(EvalState & state, Env & env, Value & v)
+{
+ abort();
+}
+
+
+void ExprInt::eval(EvalState & state, Env & env, Value & v)
+{
+ v = this->v;
+}
+
+
+void ExprFloat::eval(EvalState & state, Env & env, Value & v)
+{
+ v = this->v;
+}
+
+void ExprString::eval(EvalState & state, Env & env, Value & v)
+{
+ v = this->v;
+}
+
+
+void ExprPath::eval(EvalState & state, Env & env, Value & v)
+{
+ v = this->v;
+}
+
+
+void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
+{
+ state.mkAttrs(v, attrs.size() + dynamicAttrs.size());
+ Env *dynamicEnv = &env;
+
+ if (recursive) {
+ /* Create a new environment that contains the attributes in
+ this `rec'. */
+ Env & env2(state.allocEnv(attrs.size()));
+ env2.up = &env;
+ dynamicEnv = &env2;
+
+ AttrDefs::iterator overrides = attrs.find(state.sOverrides);
+ bool hasOverrides = overrides != attrs.end();
+
+ /* The recursive attributes are evaluated in the new
+ environment, while the inherited attributes are evaluated
+ in the original environment. */
+ unsigned int displ = 0;
+ for (auto & i : attrs) {
+ Value * vAttr;
+ if (hasOverrides && !i.second.inherited) {
+ vAttr = state.allocValue();
+ mkThunk(*vAttr, env2, i.second.e);
+ } else
+ vAttr = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
+ env2.values[displ++] = vAttr;
+ v.attrs->push_back(Attr(i.first, vAttr, &i.second.pos));
+ }
+
+ /* If the rec contains an attribute called `__overrides', then
+ evaluate it, and add the attributes in that set to the rec.
+ This allows overriding of recursive attributes, which is
+ otherwise not possible. (You can use the // operator to
+ replace an attribute, but other attributes in the rec will
+ still reference the original value, because that value has
+ been substituted into the bodies of the other attributes.
+ Hence we need __overrides.) */
+ if (hasOverrides) {
+ Value * vOverrides = (*v.attrs)[overrides->second.displ].value;
+ state.forceAttrs(*vOverrides);
+ Bindings * newBnds = state.allocBindings(v.attrs->size() + vOverrides->attrs->size());
+ for (auto & i : *v.attrs)
+ newBnds->push_back(i);
+ for (auto & i : *vOverrides->attrs) {
+ AttrDefs::iterator j = attrs.find(i.name);
+ if (j != attrs.end()) {
+ (*newBnds)[j->second.displ] = i;
+ env2.values[j->second.displ] = i.value;
+ } else
+ newBnds->push_back(i);
+ }
+ newBnds->sort();
+ v.attrs = newBnds;
+ }
+ }
+
+ else
+ for (auto & i : attrs)
+ v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), &i.second.pos));
+
+ /* Dynamic attrs apply *after* rec and __overrides. */
+ for (auto & i : dynamicAttrs) {
+ Value nameVal;
+ i.nameExpr->eval(state, *dynamicEnv, nameVal);
+ state.forceValue(nameVal, i.pos);
+ if (nameVal.type == tNull)
+ continue;
+ state.forceStringNoCtx(nameVal);
+ Symbol nameSym = state.symbols.create(nameVal.string.s);
+ Bindings::iterator j = v.attrs->find(nameSym);
+ if (j != v.attrs->end())
+ throwEvalError("dynamic attribute ‘%1%’ at %2% already defined at %3%", nameSym, i.pos, *j->pos);
+
+ i.valueExpr->setName(nameSym);
+ /* Keep sorted order so find can catch duplicates */
+ v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos));
+ v.attrs->sort(); // FIXME: inefficient
+ }
+}
+
+
+void ExprLet::eval(EvalState & state, Env & env, Value & v)
+{
+ /* Create a new environment that contains the attributes in this
+ `let'. */
+ Env & env2(state.allocEnv(attrs->attrs.size()));
+ env2.up = &env;
+
+ /* The recursive attributes are evaluated in the new environment,
+ while the inherited attributes are evaluated in the original
+ environment. */
+ unsigned int displ = 0;
+ for (auto & i : attrs->attrs)
+ env2.values[displ++] = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
+
+ body->eval(state, env2, v);
+}
+
+
+void ExprList::eval(EvalState & state, Env & env, Value & v)
+{
+ state.mkList(v, elems.size());
+ for (unsigned int n = 0; n < elems.size(); ++n)
+ v.listElems()[n] = elems[n]->maybeThunk(state, env);
+}
+
+
+void ExprVar::eval(EvalState & state, Env & env, Value & v)
+{
+ Value * v2 = state.lookupVar(&env, *this, false);
+ state.forceValue(*v2, pos);
+ v = *v2;
+}
+
+
+static string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPath)
+{
+ std::ostringstream out;
+ bool first = true;
+ for (auto & i : attrPath) {
+ if (!first) out << '.'; else first = false;
+ try {
+ out << getName(i, state, env);
+ } catch (Error & e) {
+ assert(!i.symbol.set());
+ out << "\"${" << *i.expr << "}\"";
+ }
+ }
+ return out.str();
+}
+
+
+unsigned long nrLookups = 0;
+
+void ExprSelect::eval(EvalState & state, Env & env, Value & v)
+{
+ Value vTmp;
+ Pos * pos2 = 0;
+ Value * vAttrs = &vTmp;
+
+ e->eval(state, env, vTmp);
+
+ try {
+
+ for (auto & i : attrPath) {
+ nrLookups++;
+ Bindings::iterator j;
+ Symbol name = getName(i, state, env);
+ if (def) {
+ state.forceValue(*vAttrs, pos);
+ if (vAttrs->type != tAttrs ||
+ (j = vAttrs->attrs->find(name)) == vAttrs->attrs->end())
+ {
+ def->eval(state, env, v);
+ return;
+ }
+ } else {
+ state.forceAttrs(*vAttrs, pos);
+ if ((j = vAttrs->attrs->find(name)) == vAttrs->attrs->end())
+ throwEvalError("attribute ‘%1%’ missing, at %2%", name, pos);
+ }
+ vAttrs = j->value;
+ pos2 = j->pos;
+ if (state.countCalls && pos2) state.attrSelects[*pos2]++;
+ }
+
+ state.forceValue(*vAttrs, ( pos2 != NULL ? *pos2 : this->pos ) );
+
+ } catch (Error & e) {
+ if (pos2 && pos2->file != state.sDerivationNix)
+ addErrorPrefix(e, "while evaluating the attribute ‘%1%’ at %2%:\n",
+ showAttrPath(state, env, attrPath), *pos2);
+ throw;
+ }
+
+ v = *vAttrs;
+}
+
+
+void ExprOpHasAttr::eval(EvalState & state, Env & env, Value & v)
+{
+ Value vTmp;
+ Value * vAttrs = &vTmp;
+
+ e->eval(state, env, vTmp);
+
+ for (auto & i : attrPath) {
+ state.forceValue(*vAttrs);
+ Bindings::iterator j;
+ Symbol name = getName(i, state, env);
+ if (vAttrs->type != tAttrs ||
+ (j = vAttrs->attrs->find(name)) == vAttrs->attrs->end())
+ {
+ mkBool(v, false);
+ return;
+ } else {
+ vAttrs = j->value;
+ }
+ }
+
+ mkBool(v, true);
+}
+
+
+void ExprLambda::eval(EvalState & state, Env & env, Value & v)
+{
+ v.type = tLambda;
+ v.lambda.env = &env;
+ v.lambda.fun = this;
+}
+
+
+void ExprApp::eval(EvalState & state, Env & env, Value & v)
+{
+ /* FIXME: vFun prevents GCC from doing tail call optimisation. */
+ Value vFun;
+ e1->eval(state, env, vFun);
+ state.callFunction(vFun, *(e2->maybeThunk(state, env)), v, pos);
+}
+
+
+void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos)
+{
+ /* Figure out the number of arguments still needed. */
+ unsigned int argsDone = 0;
+ Value * primOp = &fun;
+ while (primOp->type == tPrimOpApp) {
+ argsDone++;
+ primOp = primOp->primOpApp.left;
+ }
+ assert(primOp->type == tPrimOp);
+ unsigned int arity = primOp->primOp->arity;
+ unsigned int argsLeft = arity - argsDone;
+
+ if (argsLeft == 1) {
+ /* We have all the arguments, so call the primop. */
+
+ /* Put all the arguments in an array. */
+ Value * vArgs[arity];
+ unsigned int n = arity - 1;
+ vArgs[n--] = &arg;
+ for (Value * arg = &fun; arg->type == tPrimOpApp; arg = arg->primOpApp.left)
+ vArgs[n--] = arg->primOpApp.right;
+
+ /* And call the primop. */
+ nrPrimOpCalls++;
+ if (countCalls) primOpCalls[primOp->primOp->name]++;
+ primOp->primOp->fun(*this, pos, vArgs, v);
+ } else {
+ Value * fun2 = allocValue();
+ *fun2 = fun;
+ v.type = tPrimOpApp;
+ v.primOpApp.left = fun2;
+ v.primOpApp.right = &arg;
+ }
+}
+
+
+void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
+{
+ if (fun.type == tPrimOp || fun.type == tPrimOpApp) {
+ callPrimOp(fun, arg, v, pos);
+ return;
+ }
+
+ if (fun.type == tAttrs) {
+ auto found = fun.attrs->find(sFunctor);
+ if (found != fun.attrs->end()) {
+ /* fun may be allocated on the stack of the calling function,
+ * but for functors we may keep a reference, so heap-allocate
+ * a copy and use that instead.
+ */
+ auto & fun2 = *allocValue();
+ fun2 = fun;
+ /* !!! Should we use the attr pos here? */
+ forceValue(*found->value, pos);
+ Value v2;
+ callFunction(*found->value, fun2, v2, pos);
+ forceValue(v2, pos);
+ return callFunction(v2, arg, v, pos);
+ }
+ }
+
+ if (fun.type != tLambda)
+ throwTypeError("attempt to call something which is not a function but %1%, at %2%", fun, pos);
+
+ ExprLambda & lambda(*fun.lambda.fun);
+
+ unsigned int size =
+ (lambda.arg.empty() ? 0 : 1) +
+ (lambda.matchAttrs ? lambda.formals->formals.size() : 0);
+ Env & env2(allocEnv(size));
+ env2.up = fun.lambda.env;
+
+ unsigned int displ = 0;
+
+ if (!lambda.matchAttrs)
+ env2.values[displ++] = &arg;
+
+ else {
+ forceAttrs(arg, pos);
+
+ if (!lambda.arg.empty())
+ env2.values[displ++] = &arg;
+
+ /* For each formal argument, get the actual argument. If
+ there is no matching actual argument but the formal
+ argument has a default, use the default. */
+ unsigned int attrsUsed = 0;
+ for (auto & i : lambda.formals->formals) {
+ Bindings::iterator j = arg.attrs->find(i.name);
+ if (j == arg.attrs->end()) {
+ if (!i.def) throwTypeError("%1% called without required argument ‘%2%’, at %3%",
+ lambda, i.name, pos);
+ env2.values[displ++] = i.def->maybeThunk(*this, env2);
+ } else {
+ attrsUsed++;
+ env2.values[displ++] = j->value;
+ }
+ }
+
+ /* Check that each actual argument is listed as a formal
+ argument (unless the attribute match specifies a `...'). */
+ if (!lambda.formals->ellipsis && attrsUsed != arg.attrs->size()) {
+ /* Nope, so show the first unexpected argument to the
+ user. */
+ for (auto & i : *arg.attrs)
+ if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end())
+ throwTypeError("%1% called with unexpected argument ‘%2%’, at %3%", lambda, i.name, pos);
+ abort(); // can't happen
+ }
+ }
+
+ nrFunctionCalls++;
+ if (countCalls) incrFunctionCall(&lambda);
+
+ /* Evaluate the body. This is conditional on showTrace, because
+ catching exceptions makes this function not tail-recursive. */
+ if (settings.showTrace)
+ try {
+ lambda.body->eval(*this, env2, v);
+ } catch (Error & e) {
+ addErrorPrefix(e, "while evaluating %1%, called from %2%:\n", lambda, pos);
+ throw;
+ }
+ else
+ fun.lambda.fun->body->eval(*this, env2, v);
+}
+
+
+// Lifted out of callFunction() because it creates a temporary that
+// prevents tail-call optimisation.
+void EvalState::incrFunctionCall(ExprLambda * fun)
+{
+ functionCalls[fun]++;
+}
+
+
+void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
+{
+ forceValue(fun);
+
+ if (fun.type == tAttrs) {
+ auto found = fun.attrs->find(sFunctor);
+ if (found != fun.attrs->end()) {
+ forceValue(*found->value);
+ Value * v = allocValue();
+ callFunction(*found->value, fun, *v, noPos);
+ forceValue(*v);
+ return autoCallFunction(args, *v, res);
+ }
+ }
+
+ if (fun.type != tLambda || !fun.lambda.fun->matchAttrs) {
+ res = fun;
+ return;
+ }
+
+ Value * actualArgs = allocValue();
+ mkAttrs(*actualArgs, fun.lambda.fun->formals->formals.size());
+
+ for (auto & i : fun.lambda.fun->formals->formals) {
+ Bindings::iterator j = args.find(i.name);
+ if (j != args.end())
+ actualArgs->attrs->push_back(*j);
+ else if (!i.def)
+ throwTypeError("cannot auto-call a function that has an argument without a default value (‘%1%’)", i.name);
+ }
+
+ actualArgs->attrs->sort();
+
+ callFunction(fun, *actualArgs, res, noPos);
+}
+
+
+void ExprWith::eval(EvalState & state, Env & env, Value & v)
+{
+ Env & env2(state.allocEnv(1));
+ env2.up = &env;
+ env2.prevWith = prevWith;
+ env2.haveWithAttrs = false;
+ env2.values[0] = (Value *) attrs;
+
+ body->eval(state, env2, v);
+}
+
+
+void ExprIf::eval(EvalState & state, Env & env, Value & v)
+{
+ (state.evalBool(env, cond) ? then : else_)->eval(state, env, v);
+}
+
+
+void ExprAssert::eval(EvalState & state, Env & env, Value & v)
+{
+ if (!state.evalBool(env, cond, pos))
+ throwAssertionError("assertion failed at %1%", pos);
+ body->eval(state, env, v);
+}
+
+
+void ExprOpNot::eval(EvalState & state, Env & env, Value & v)
+{
+ mkBool(v, !state.evalBool(env, e));
+}
+
+
+void ExprOpEq::eval(EvalState & state, Env & env, Value & v)
+{
+ Value v1; e1->eval(state, env, v1);
+ Value v2; e2->eval(state, env, v2);
+ mkBool(v, state.eqValues(v1, v2));
+}
+
+
+void ExprOpNEq::eval(EvalState & state, Env & env, Value & v)
+{
+ Value v1; e1->eval(state, env, v1);
+ Value v2; e2->eval(state, env, v2);
+ mkBool(v, !state.eqValues(v1, v2));
+}
+
+
+void ExprOpAnd::eval(EvalState & state, Env & env, Value & v)
+{
+ mkBool(v, state.evalBool(env, e1, pos) && state.evalBool(env, e2, pos));
+}
+
+
+void ExprOpOr::eval(EvalState & state, Env & env, Value & v)
+{
+ mkBool(v, state.evalBool(env, e1, pos) || state.evalBool(env, e2, pos));
+}
+
+
+void ExprOpImpl::eval(EvalState & state, Env & env, Value & v)
+{
+ mkBool(v, !state.evalBool(env, e1, pos) || state.evalBool(env, e2, pos));
+}
+
+
+void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v)
+{
+ Value v1, v2;
+ state.evalAttrs(env, e1, v1);
+ state.evalAttrs(env, e2, v2);
+
+ state.nrOpUpdates++;
+
+ if (v1.attrs->size() == 0) { v = v2; return; }
+ if (v2.attrs->size() == 0) { v = v1; return; }
+
+ state.mkAttrs(v, v1.attrs->size() + v2.attrs->size());
+
+ /* Merge the sets, preferring values from the second set. Make
+ sure to keep the resulting vector in sorted order. */
+ Bindings::iterator i = v1.attrs->begin();
+ Bindings::iterator j = v2.attrs->begin();
+
+ while (i != v1.attrs->end() && j != v2.attrs->end()) {
+ if (i->name == j->name) {
+ v.attrs->push_back(*j);
+ ++i; ++j;
+ }
+ else if (i->name < j->name)
+ v.attrs->push_back(*i++);
+ else
+ v.attrs->push_back(*j++);
+ }
+
+ while (i != v1.attrs->end()) v.attrs->push_back(*i++);
+ while (j != v2.attrs->end()) v.attrs->push_back(*j++);
+
+ state.nrOpUpdateValuesCopied += v.attrs->size();
+}
+
+
+void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v)
+{
+ Value v1; e1->eval(state, env, v1);
+ Value v2; e2->eval(state, env, v2);
+ Value * lists[2] = { &v1, &v2 };
+ state.concatLists(v, 2, lists, pos);
+}
+
+
+void EvalState::concatLists(Value & v, unsigned int nrLists, Value * * lists, const Pos & pos)
+{
+ nrListConcats++;
+
+ Value * nonEmpty = 0;
+ unsigned int len = 0;
+ for (unsigned int n = 0; n < nrLists; ++n) {
+ forceList(*lists[n], pos);
+ unsigned int l = lists[n]->listSize();
+ len += l;
+ if (l) nonEmpty = lists[n];
+ }
+
+ if (nonEmpty && len == nonEmpty->listSize()) {
+ v = *nonEmpty;
+ return;
+ }
+
+ mkList(v, len);
+ auto out = v.listElems();
+ for (unsigned int n = 0, pos = 0; n < nrLists; ++n) {
+ unsigned int l = lists[n]->listSize();
+ memcpy(out + pos, lists[n]->listElems(), l * sizeof(Value *));
+ pos += l;
+ }
+}
+
+
+void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
+{
+ PathSet context;
+ std::ostringstream s;
+ NixInt n = 0;
+ NixFloat nf = 0;
+
+ bool first = !forceString;
+ ValueType firstType = tString;
+
+ for (auto & i : *es) {
+ Value vTmp;
+ i->eval(state, env, vTmp);
+
+ /* If the first element is a path, then the result will also
+ be a path, we don't copy anything (yet - that's done later,
+ since paths are copied when they are used in a derivation),
+ and none of the strings are allowed to have contexts. */
+ if (first) {
+ firstType = vTmp.type;
+ first = false;
+ }
+
+ if (firstType == tInt) {
+ if (vTmp.type == tInt) {
+ n += vTmp.integer;
+ } else if (vTmp.type == tFloat) {
+ // Upgrade the type from int to float;
+ firstType = tFloat;
+ nf = n;
+ nf += vTmp.fpoint;
+ } else
+ throwEvalError("cannot add %1% to an integer, at %2%", showType(vTmp), pos);
+ } else if (firstType == tFloat) {
+ if (vTmp.type == tInt) {
+ nf += vTmp.integer;
+ } else if (vTmp.type == tFloat) {
+ nf += vTmp.fpoint;
+ } else
+ throwEvalError("cannot add %1% to a float, at %2%", showType(vTmp), pos);
+ } else
+ s << state.coerceToString(pos, vTmp, context, false, firstType == tString);
+ }
+
+ if (firstType == tInt)
+ mkInt(v, n);
+ else if (firstType == tFloat)
+ mkFloat(v, nf);
+ else if (firstType == tPath) {
+ if (!context.empty())
+ throwEvalError("a string that refers to a store path cannot be appended to a path, at %1%", pos);
+ auto path = canonPath(s.str());
+ mkPath(v, path.c_str());
+ } else
+ mkString(v, s.str(), context);
+}
+
+
+void ExprPos::eval(EvalState & state, Env & env, Value & v)
+{
+ state.mkPos(v, &pos);
+}
+
+
+void EvalState::forceValueDeep(Value & v)
+{
+ std::set<const Value *> seen;
+
+ std::function<void(Value & v)> recurse;
+
+ recurse = [&](Value & v) {
+ if (seen.find(&v) != seen.end()) return;
+ seen.insert(&v);
+
+ forceValue(v);
+
+ if (v.type == tAttrs) {
+ for (auto & i : *v.attrs)
+ try {
+ recurse(*i.value);
+ } catch (Error & e) {
+ addErrorPrefix(e, "while evaluating the attribute ‘%1%’ at %2%:\n", i.name, *i.pos);
+ throw;
+ }
+ }
+
+ else if (v.isList()) {
+ for (unsigned int n = 0; n < v.listSize(); ++n)
+ recurse(*v.listElems()[n]);
+ }
+ };
+
+ recurse(v);
+}
+
+
+NixInt EvalState::forceInt(Value & v, const Pos & pos)
+{
+ forceValue(v, pos);
+ if (v.type != tInt)
+ throwTypeError("value is %1% while an integer was expected, at %2%", v, pos);
+ return v.integer;
+}
+
+
+NixFloat EvalState::forceFloat(Value & v, const Pos & pos)
+{
+ forceValue(v, pos);
+ if (v.type == tInt)
+ return v.integer;
+ else if (v.type != tFloat)
+ throwTypeError("value is %1% while a float was expected, at %2%", v, pos);
+ return v.fpoint;
+}
+
+
+bool EvalState::forceBool(Value & v, const Pos & pos)
+{
+ forceValue(v);
+ if (v.type != tBool)
+ throwTypeError("value is %1% while a Boolean was expected, at %2%", v, pos);
+ return v.boolean;
+}
+
+
+bool EvalState::isFunctor(Value & fun)
+{
+ return fun.type == tAttrs && fun.attrs->find(sFunctor) != fun.attrs->end();
+}
+
+
+void EvalState::forceFunction(Value & v, const Pos & pos)
+{
+ forceValue(v);
+ if (v.type != tLambda && v.type != tPrimOp && v.type != tPrimOpApp && !isFunctor(v))
+ throwTypeError("value is %1% while a function was expected, at %2%", v, pos);
+}
+
+
+string EvalState::forceString(Value & v, const Pos & pos)
+{
+ forceValue(v, pos);
+ if (v.type != tString) {
+ if (pos)
+ throwTypeError("value is %1% while a string was expected, at %2%", v, pos);
+ else
+ throwTypeError("value is %1% while a string was expected", v);
+ }
+ return string(v.string.s);
+}
+
+
+void copyContext(const Value & v, PathSet & context)
+{
+ if (v.string.context)
+ for (const char * * p = v.string.context; *p; ++p)
+ context.insert(*p);
+}
+
+
+string EvalState::forceString(Value & v, PathSet & context, const Pos & pos)
+{
+ string s = forceString(v, pos);
+ copyContext(v, context);
+ return s;
+}
+
+
+string EvalState::forceStringNoCtx(Value & v, const Pos & pos)
+{
+ string s = forceString(v, pos);
+ if (v.string.context) {
+ if (pos)
+ throwEvalError("the string ‘%1%’ is not allowed to refer to a store path (such as ‘%2%’), at %3%",
+ v.string.s, v.string.context[0], pos);
+ else
+ throwEvalError("the string ‘%1%’ is not allowed to refer to a store path (such as ‘%2%’)",
+ v.string.s, v.string.context[0]);
+ }
+ return s;
+}
+
+
+bool EvalState::isDerivation(Value & v)
+{
+ if (v.type != tAttrs) return false;
+ Bindings::iterator i = v.attrs->find(sType);
+ if (i == v.attrs->end()) return false;
+ forceValue(*i->value);
+ if (i->value->type != tString) return false;
+ return strcmp(i->value->string.s, "derivation") == 0;
+}
+
+
+string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
+ bool coerceMore, bool copyToStore)
+{
+ forceValue(v);
+
+ string s;
+
+ if (v.type == tString) {
+ copyContext(v, context);
+ return v.string.s;
+ }
+
+ if (v.type == tPath) {
+ Path path(canonPath(v.path));
+ return copyToStore ? copyPathToStore(context, path) : path;
+ }
+
+ if (v.type == tAttrs) {
+ auto i = v.attrs->find(sToString);
+ if (i != v.attrs->end()) {
+ forceValue(*i->value, pos);
+ Value v1;
+ callFunction(*i->value, v, v1, pos);
+ return coerceToString(pos, v1, context, coerceMore, copyToStore);
+ }
+ i = v.attrs->find(sOutPath);
+ if (i == v.attrs->end()) throwTypeError("cannot coerce a set to a string, at %1%", pos);
+ return coerceToString(pos, *i->value, context, coerceMore, copyToStore);
+ }
+
+ if (v.type == tExternal)
+ return v.external->coerceToString(pos, context, coerceMore, copyToStore);
+
+ if (coerceMore) {
+
+ /* Note that `false' is represented as an empty string for
+ shell scripting convenience, just like `null'. */
+ if (v.type == tBool && v.boolean) return "1";
+ if (v.type == tBool && !v.boolean) return "";
+ if (v.type == tInt) return std::to_string(v.integer);
+ if (v.type == tFloat) return std::to_string(v.fpoint);
+ if (v.type == tNull) return "";
+
+ if (v.isList()) {
+ string result;
+ for (unsigned int n = 0; n < v.listSize(); ++n) {
+ result += coerceToString(pos, *v.listElems()[n],
+ context, coerceMore, copyToStore);
+ if (n < v.listSize() - 1
+ /* !!! not quite correct */
+ && (!v.listElems()[n]->isList() || v.listElems()[n]->listSize() != 0))
+ result += " ";
+ }
+ return result;
+ }
+ }
+
+ throwTypeError("cannot coerce %1% to a string, at %2%", v, pos);
+}
+
+
+string EvalState::copyPathToStore(PathSet & context, const Path & path)
+{
+ if (nix::isDerivation(path))
+ throwEvalError("file names are not allowed to end in ‘%1%’", drvExtension);
+
+ Path dstPath;
+ if (srcToStore[path] != "")
+ dstPath = srcToStore[path];
+ else {
+ dstPath = settings.readOnlyMode
+ ? store->computeStorePathForPath(checkSourcePath(path)).first
+ : store->addToStore(baseNameOf(path), checkSourcePath(path), true, htSHA256, defaultPathFilter, repair);
+ srcToStore[path] = dstPath;
+ printMsg(lvlChatty, format("copied source ‘%1%’ -> ‘%2%’")
+ % path % dstPath);
+ }
+
+ context.insert(dstPath);
+ return dstPath;
+}
+
+
+Path EvalState::coerceToPath(const Pos & pos, Value & v, PathSet & context)
+{
+ string path = coerceToString(pos, v, context, false, false);
+ if (path == "" || path[0] != '/')
+ throwEvalError("string ‘%1%’ doesn't represent an absolute path, at %2%", path, pos);
+ return path;
+}
+
+
+bool EvalState::eqValues(Value & v1, Value & v2)
+{
+ forceValue(v1);
+ forceValue(v2);
+
+ /* !!! Hack to support some old broken code that relies on pointer
+ equality tests between sets. (Specifically, builderDefs calls
+ uniqList on a list of sets.) Will remove this eventually. */
+ if (&v1 == &v2) return true;
+
+ // Special case type-compatibility between float and int
+ if (v1.type == tInt && v2.type == tFloat)
+ return v1.integer == v2.fpoint;
+ if (v1.type == tFloat && v2.type == tInt)
+ return v1.fpoint == v2.integer;
+
+ // All other types are not compatible with each other.
+ if (v1.type != v2.type) return false;
+
+ switch (v1.type) {
+
+ case tInt:
+ return v1.integer == v2.integer;
+
+ case tBool:
+ return v1.boolean == v2.boolean;
+
+ case tString:
+ return strcmp(v1.string.s, v2.string.s) == 0;
+
+ case tPath:
+ return strcmp(v1.path, v2.path) == 0;
+
+ case tNull:
+ return true;
+
+ case tList1:
+ case tList2:
+ case tListN:
+ if (v1.listSize() != v2.listSize()) return false;
+ for (unsigned int n = 0; n < v1.listSize(); ++n)
+ if (!eqValues(*v1.listElems()[n], *v2.listElems()[n])) return false;
+ return true;
+
+ case tAttrs: {
+ /* If both sets denote a derivation (type = "derivation"),
+ then compare their outPaths. */
+ if (isDerivation(v1) && isDerivation(v2)) {
+ Bindings::iterator i = v1.attrs->find(sOutPath);
+ Bindings::iterator j = v2.attrs->find(sOutPath);
+ if (i != v1.attrs->end() && j != v2.attrs->end())
+ return eqValues(*i->value, *j->value);
+ }
+
+ if (v1.attrs->size() != v2.attrs->size()) return false;
+
+ /* Otherwise, compare the attributes one by one. */
+ Bindings::iterator i, j;
+ for (i = v1.attrs->begin(), j = v2.attrs->begin(); i != v1.attrs->end(); ++i, ++j)
+ if (i->name != j->name || !eqValues(*i->value, *j->value))
+ return false;
+
+ return true;
+ }
+
+ /* Functions are incomparable. */
+ case tLambda:
+ case tPrimOp:
+ case tPrimOpApp:
+ return false;
+
+ case tExternal:
+ return *v1.external == *v2.external;
+
+ case tFloat:
+ return v1.fpoint == v2.fpoint;
+
+ default:
+ throwEvalError("cannot compare %1% with %2%", showType(v1), showType(v2));
+ }
+}
+
+
+void EvalState::printStats()
+{
+ bool showStats = getEnv("NIX_SHOW_STATS", "0") != "0";
+ Verbosity v = showStats ? lvlInfo : lvlDebug;
+ printMsg(v, "evaluation statistics:");
+
+ struct rusage buf;
+ getrusage(RUSAGE_SELF, &buf);
+ float cpuTime = buf.ru_utime.tv_sec + ((float) buf.ru_utime.tv_usec / 1000000);
+
+ uint64_t bEnvs = nrEnvs * sizeof(Env) + nrValuesInEnvs * sizeof(Value *);
+ uint64_t bLists = nrListElems * sizeof(Value *);
+ uint64_t bValues = nrValues * sizeof(Value);
+ uint64_t bAttrsets = nrAttrsets * sizeof(Bindings) + nrAttrsInAttrsets * sizeof(Attr);
+
+ printMsg(v, format(" time elapsed: %1%") % cpuTime);
+ printMsg(v, format(" size of a value: %1%") % sizeof(Value));
+ printMsg(v, format(" size of an attr: %1%") % sizeof(Attr));
+ printMsg(v, format(" environments allocated: %1% (%2% bytes)") % nrEnvs % bEnvs);
+ printMsg(v, format(" list elements: %1% (%2% bytes)") % nrListElems % bLists);
+ printMsg(v, format(" list concatenations: %1%") % nrListConcats);
+ printMsg(v, format(" values allocated: %1% (%2% bytes)") % nrValues % bValues);
+ printMsg(v, format(" sets allocated: %1% (%2% bytes)") % nrAttrsets % bAttrsets);
+ printMsg(v, format(" right-biased unions: %1%") % nrOpUpdates);
+ printMsg(v, format(" values copied in right-biased unions: %1%") % nrOpUpdateValuesCopied);
+ printMsg(v, format(" symbols in symbol table: %1%") % symbols.size());
+ printMsg(v, format(" size of symbol table: %1%") % symbols.totalSize());
+ printMsg(v, format(" number of thunks: %1%") % nrThunks);
+ printMsg(v, format(" number of thunks avoided: %1%") % nrAvoided);
+ printMsg(v, format(" number of attr lookups: %1%") % nrLookups);
+ printMsg(v, format(" number of primop calls: %1%") % nrPrimOpCalls);
+ printMsg(v, format(" number of function calls: %1%") % nrFunctionCalls);
+ printMsg(v, format(" total allocations: %1% bytes") % (bEnvs + bLists + bValues + bAttrsets));
+
+#if HAVE_BOEHMGC
+ GC_word heapSize, totalBytes;
+ GC_get_heap_usage_safe(&heapSize, 0, 0, 0, &totalBytes);
+ printMsg(v, format(" current Boehm heap size: %1% bytes") % heapSize);
+ printMsg(v, format(" total Boehm heap allocations: %1% bytes") % totalBytes);
+#endif
+
+ if (countCalls) {
+ v = lvlInfo;
+
+ printMsg(v, format("calls to %1% primops:") % primOpCalls.size());
+ typedef std::multimap<unsigned int, Symbol> PrimOpCalls_;
+ PrimOpCalls_ primOpCalls_;
+ for (auto & i : primOpCalls)
+ primOpCalls_.insert(std::pair<unsigned int, Symbol>(i.second, i.first));
+ for (auto i = primOpCalls_.rbegin(); i != primOpCalls_.rend(); ++i)
+ printMsg(v, format("%1$10d %2%") % i->first % i->second);
+
+ printMsg(v, format("calls to %1% functions:") % functionCalls.size());
+ typedef std::multimap<unsigned int, ExprLambda *> FunctionCalls_;
+ FunctionCalls_ functionCalls_;
+ for (auto & i : functionCalls)
+ functionCalls_.insert(std::pair<unsigned int, ExprLambda *>(i.second, i.first));
+ for (auto i = functionCalls_.rbegin(); i != functionCalls_.rend(); ++i)
+ printMsg(v, format("%1$10d %2%") % i->first % i->second->showNamePos());
+
+ printMsg(v, format("evaluations of %1% attributes:") % attrSelects.size());
+ typedef std::multimap<unsigned int, Pos> AttrSelects_;
+ AttrSelects_ attrSelects_;
+ for (auto & i : attrSelects)
+ attrSelects_.insert(std::pair<unsigned int, Pos>(i.second, i.first));
+ for (auto i = attrSelects_.rbegin(); i != attrSelects_.rend(); ++i)
+ printMsg(v, format("%1$10d %2%") % i->first % i->second);
+
+ }
+}
+
+
+size_t valueSize(Value & v)
+{
+ std::set<const void *> seen;
+
+ auto doString = [&](const char * s) -> size_t {
+ if (seen.find(s) != seen.end()) return 0;
+ seen.insert(s);
+ return strlen(s) + 1;
+ };
+
+ std::function<size_t(Value & v)> doValue;
+ std::function<size_t(Env & v)> doEnv;
+
+ doValue = [&](Value & v) -> size_t {
+ if (seen.find(&v) != seen.end()) return 0;
+ seen.insert(&v);
+
+ size_t sz = sizeof(Value);
+
+ switch (v.type) {
+ case tString:
+ sz += doString(v.string.s);
+ if (v.string.context)
+ for (const char * * p = v.string.context; *p; ++p)
+ sz += doString(*p);
+ break;
+ case tPath:
+ sz += doString(v.path);
+ break;
+ case tAttrs:
+ if (seen.find(v.attrs) == seen.end()) {
+ seen.insert(v.attrs);
+ sz += sizeof(Bindings) + sizeof(Attr) * v.attrs->capacity();
+ for (auto & i : *v.attrs)
+ sz += doValue(*i.value);
+ }
+ break;
+ case tList1:
+ case tList2:
+ case tListN:
+ if (seen.find(v.listElems()) == seen.end()) {
+ seen.insert(v.listElems());
+ sz += v.listSize() * sizeof(Value *);
+ for (unsigned int n = 0; n < v.listSize(); ++n)
+ sz += doValue(*v.listElems()[n]);
+ }
+ break;
+ case tThunk:
+ sz += doEnv(*v.thunk.env);
+ break;
+ case tApp:
+ sz += doValue(*v.app.left);
+ sz += doValue(*v.app.right);
+ break;
+ case tLambda:
+ sz += doEnv(*v.lambda.env);
+ break;
+ case tPrimOpApp:
+ sz += doValue(*v.primOpApp.left);
+ sz += doValue(*v.primOpApp.right);
+ break;
+ case tExternal:
+ if (seen.find(v.external) != seen.end()) break;
+ seen.insert(v.external);
+ sz += v.external->valueSize(seen);
+ break;
+ default:
+ ;
+ }
+
+ return sz;
+ };
+
+ doEnv = [&](Env & env) -> size_t {
+ if (seen.find(&env) != seen.end()) return 0;
+ seen.insert(&env);
+
+ size_t sz = sizeof(Env) + sizeof(Value *) * env.size;
+
+ for (unsigned int i = 0; i < env.size; ++i)
+ if (env.values[i])
+ sz += doValue(*env.values[i]);
+
+ if (env.up) sz += doEnv(*env.up);
+
+ return sz;
+ };
+
+ return doValue(v);
+}
+
+
+string ExternalValueBase::coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const
+{
+ throw TypeError(format("cannot coerce %1% to a string, at %2%") %
+ showType() % pos);
+}
+
+
+bool ExternalValueBase::operator==(const ExternalValueBase & b) const
+{
+ return false;
+}
+
+
+std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) {
+ return v.print(str);
+}
+
+
+}
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
new file mode 100644
index 000000000..46d5a1cc8
--- /dev/null
+++ b/src/libexpr/eval.hh
@@ -0,0 +1,304 @@
+#pragma once
+
+#include "attr-set.hh"
+#include "value.hh"
+#include "nixexpr.hh"
+#include "symbol-table.hh"
+#include "hash.hh"
+
+#include <map>
+
+
+namespace nix {
+
+
+class Store;
+class EvalState;
+
+
+typedef void (* PrimOpFun) (EvalState & state, const Pos & pos, Value * * args, Value & v);
+
+
+struct PrimOp
+{
+ PrimOpFun fun;
+ size_t arity;
+ Symbol name;
+ PrimOp(PrimOpFun fun, size_t arity, Symbol name)
+ : fun(fun), arity(arity), name(name) { }
+};
+
+
+struct Env
+{
+ Env * up;
+ unsigned short size; // used by ‘valueSize’
+ unsigned short prevWith:15; // nr of levels up to next `with' environment
+ unsigned short haveWithAttrs:1;
+ Value * values[0];
+};
+
+
+Value & mkString(Value & v, const string & s, const PathSet & context = PathSet());
+
+void copyContext(const Value & v, PathSet & context);
+
+
+/* Cache for calls to addToStore(); maps source paths to the store
+ paths. */
+typedef std::map<Path, Path> SrcToStore;
+
+
+std::ostream & operator << (std::ostream & str, const Value & v);
+
+
+typedef std::pair<std::string, std::string> SearchPathElem;
+typedef std::list<SearchPathElem> SearchPath;
+
+
+/* Initialise the Boehm GC, if applicable. */
+void initGC();
+
+
+class EvalState
+{
+public:
+ SymbolTable symbols;
+
+ const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue,
+ sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls,
+ sFile, sLine, sColumn, sFunctor, sToString,
+ sRight, sWrong, sStructuredAttrs, sBuilder;
+ Symbol sDerivationNix;
+
+ /* If set, force copying files to the Nix store even if they
+ already exist there. */
+ bool repair = false;
+
+ /* If set, don't allow access to files outside of the Nix search
+ path or to environment variables. */
+ bool restricted;
+
+ Value vEmptySet;
+
+ const ref<Store> store;
+
+private:
+ SrcToStore srcToStore;
+
+ /* A cache from path names to values. */
+#if HAVE_BOEHMGC
+ typedef std::map<Path, Value, std::less<Path>, traceable_allocator<std::pair<const Path, Value> > > FileEvalCache;
+#else
+ typedef std::map<Path, Value> FileEvalCache;
+#endif
+ FileEvalCache fileEvalCache;
+
+ SearchPath searchPath;
+
+ std::map<std::string, std::pair<bool, std::string>> searchPathResolved;
+
+public:
+
+ EvalState(const Strings & _searchPath, ref<Store> store);
+ ~EvalState();
+
+ void addToSearchPath(const string & s);
+
+ SearchPath getSearchPath() { return searchPath; }
+
+ Path checkSourcePath(const Path & path);
+
+ /* Parse a Nix expression from the specified file. */
+ Expr * parseExprFromFile(const Path & path);
+ Expr * parseExprFromFile(const Path & path, StaticEnv & staticEnv);
+
+ /* Parse a Nix expression from the specified string. */
+ Expr * parseExprFromString(const string & s, const Path & basePath, StaticEnv & staticEnv);
+ Expr * parseExprFromString(const string & s, const Path & basePath);
+
+ /* Evaluate an expression read from the given file to normal
+ form. */
+ void evalFile(const Path & path, Value & v);
+
+ void resetFileCache();
+
+ /* Look up a file in the search path. */
+ Path findFile(const string & path);
+ Path findFile(SearchPath & searchPath, const string & path, const Pos & pos = noPos);
+
+ /* If the specified search path element is a URI, download it. */
+ std::pair<bool, std::string> resolveSearchPathElem(const SearchPathElem & elem);
+
+ /* Evaluate an expression to normal form, storing the result in
+ value `v'. */
+ void eval(Expr * e, Value & v);
+
+ /* Evaluation the expression, then verify that it has the expected
+ type. */
+ inline bool evalBool(Env & env, Expr * e);
+ inline bool evalBool(Env & env, Expr * e, const Pos & pos);
+ inline void evalAttrs(Env & env, Expr * e, Value & v);
+
+ /* If `v' is a thunk, enter it and overwrite `v' with the result
+ of the evaluation of the thunk. If `v' is a delayed function
+ application, call the function and overwrite `v' with the
+ result. Otherwise, this is a no-op. */
+ inline void forceValue(Value & v, const Pos & pos = noPos);
+
+ /* Force a value, then recursively force list elements and
+ attributes. */
+ void forceValueDeep(Value & v);
+
+ /* Force `v', and then verify that it has the expected type. */
+ NixInt forceInt(Value & v, const Pos & pos);
+ NixFloat forceFloat(Value & v, const Pos & pos);
+ bool forceBool(Value & v, const Pos & pos);
+ inline void forceAttrs(Value & v);
+ inline void forceAttrs(Value & v, const Pos & pos);
+ inline void forceList(Value & v);
+ inline void forceList(Value & v, const Pos & pos);
+ void forceFunction(Value & v, const Pos & pos); // either lambda or primop
+ string forceString(Value & v, const Pos & pos = noPos);
+ string forceString(Value & v, PathSet & context, const Pos & pos = noPos);
+ string forceStringNoCtx(Value & v, const Pos & pos = noPos);
+
+ /* Return true iff the value `v' denotes a derivation (i.e. a
+ set with attribute `type = "derivation"'). */
+ bool isDerivation(Value & v);
+
+ /* String coercion. Converts strings, paths and derivations to a
+ string. If `coerceMore' is set, also converts nulls, integers,
+ booleans and lists to a string. If `copyToStore' is set,
+ referenced paths are copied to the Nix store as a side effect. */
+ string coerceToString(const Pos & pos, Value & v, PathSet & context,
+ bool coerceMore = false, bool copyToStore = true);
+
+ string copyPathToStore(PathSet & context, const Path & path);
+
+ /* Path coercion. Converts strings, paths and derivations to a
+ path. The result is guaranteed to be a canonicalised, absolute
+ path. Nothing is copied to the store. */
+ Path coerceToPath(const Pos & pos, Value & v, PathSet & context);
+
+public:
+
+ /* The base environment, containing the builtin functions and
+ values. */
+ Env & baseEnv;
+
+ /* The same, but used during parsing to resolve variables. */
+ StaticEnv staticBaseEnv; // !!! should be private
+
+private:
+
+ unsigned int baseEnvDispl = 0;
+
+ void createBaseEnv();
+
+ void addConstant(const string & name, Value & v);
+
+ void addPrimOp(const string & name,
+ unsigned int arity, PrimOpFun primOp);
+
+public:
+
+ Value & getBuiltin(const string & name);
+
+private:
+
+ inline Value * lookupVar(Env * env, const ExprVar & var, bool noEval);
+
+ friend struct ExprVar;
+ friend struct ExprAttrs;
+ friend struct ExprLet;
+
+ Expr * parse(const char * text, const Path & path,
+ const Path & basePath, StaticEnv & staticEnv);
+
+public:
+
+ /* Do a deep equality test between two values. That is, list
+ elements and attributes are compared recursively. */
+ bool eqValues(Value & v1, Value & v2);
+
+ bool isFunctor(Value & fun);
+
+ void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos);
+ void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos);
+
+ /* Automatically call a function for which each argument has a
+ default value or has a binding in the `args' map. */
+ void autoCallFunction(Bindings & args, Value & fun, Value & res);
+
+ /* Allocation primitives. */
+ Value * allocValue();
+ Env & allocEnv(unsigned int size);
+
+ Value * allocAttr(Value & vAttrs, const Symbol & name);
+
+ Bindings * allocBindings(Bindings::size_t capacity);
+
+ void mkList(Value & v, unsigned int length);
+ void mkAttrs(Value & v, unsigned int capacity);
+ void mkThunk_(Value & v, Expr * expr);
+ void mkPos(Value & v, Pos * pos);
+
+ void concatLists(Value & v, unsigned int nrLists, Value * * lists, const Pos & pos);
+
+ /* Print statistics. */
+ void printStats();
+
+ void realiseContext(const PathSet & context);
+
+private:
+
+ unsigned long nrEnvs = 0;
+ unsigned long nrValuesInEnvs = 0;
+ unsigned long nrValues = 0;
+ unsigned long nrListElems = 0;
+ unsigned long nrAttrsets = 0;
+ unsigned long nrAttrsInAttrsets = 0;
+ unsigned long nrOpUpdates = 0;
+ unsigned long nrOpUpdateValuesCopied = 0;
+ unsigned long nrListConcats = 0;
+ unsigned long nrPrimOpCalls = 0;
+ unsigned long nrFunctionCalls = 0;
+
+ bool countCalls;
+
+ typedef std::map<Symbol, unsigned int> PrimOpCalls;
+ PrimOpCalls primOpCalls;
+
+ typedef std::map<ExprLambda *, unsigned int> FunctionCalls;
+ FunctionCalls functionCalls;
+
+ void incrFunctionCall(ExprLambda * fun);
+
+ typedef std::map<Pos, unsigned int> AttrSelects;
+ AttrSelects attrSelects;
+
+ friend struct ExprOpUpdate;
+ friend struct ExprOpConcatLists;
+ friend struct ExprSelect;
+ friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v);
+};
+
+
+/* Return a string representing the type of the value `v'. */
+string showType(const Value & v);
+
+
+/* If `path' refers to a directory, then append "/default.nix". */
+Path resolveExprPath(Path path);
+
+struct InvalidPathError : EvalError
+{
+ Path path;
+ InvalidPathError(const Path & path);
+#ifdef EXCEPTION_NEEDS_THROW_SPEC
+ ~InvalidPathError() throw () { };
+#endif
+};
+
+}
diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc
new file mode 100644
index 000000000..ae9fb0e5e
--- /dev/null
+++ b/src/libexpr/get-drvs.cc
@@ -0,0 +1,332 @@
+#include "get-drvs.hh"
+#include "util.hh"
+#include "eval-inline.hh"
+
+#include <cstring>
+#include <regex>
+
+
+namespace nix {
+
+
+string DrvInfo::queryDrvPath()
+{
+ if (drvPath == "" && attrs) {
+ Bindings::iterator i = attrs->find(state->sDrvPath);
+ PathSet context;
+ drvPath = i != attrs->end() ? state->coerceToPath(*i->pos, *i->value, context) : "";
+ }
+ return drvPath;
+}
+
+
+string DrvInfo::queryOutPath()
+{
+ if (outPath == "" && attrs) {
+ Bindings::iterator i = attrs->find(state->sOutPath);
+ PathSet context;
+ outPath = i != attrs->end() ? state->coerceToPath(*i->pos, *i->value, context) : "";
+ }
+ return outPath;
+}
+
+
+DrvInfo::Outputs DrvInfo::queryOutputs(bool onlyOutputsToInstall)
+{
+ if (outputs.empty()) {
+ /* Get the ‘outputs’ list. */
+ Bindings::iterator i;
+ if (attrs && (i = attrs->find(state->sOutputs)) != attrs->end()) {
+ state->forceList(*i->value, *i->pos);
+
+ /* For each output... */
+ for (unsigned int j = 0; j < i->value->listSize(); ++j) {
+ /* Evaluate the corresponding set. */
+ string name = state->forceStringNoCtx(*i->value->listElems()[j], *i->pos);
+ Bindings::iterator out = attrs->find(state->symbols.create(name));
+ if (out == attrs->end()) continue; // FIXME: throw error?
+ state->forceAttrs(*out->value);
+
+ /* And evaluate its ‘outPath’ attribute. */
+ Bindings::iterator outPath = out->value->attrs->find(state->sOutPath);
+ if (outPath == out->value->attrs->end()) continue; // FIXME: throw error?
+ PathSet context;
+ outputs[name] = state->coerceToPath(*outPath->pos, *outPath->value, context);
+ }
+ } else
+ outputs["out"] = queryOutPath();
+ }
+ if (!onlyOutputsToInstall || !attrs)
+ return outputs;
+
+ /* Check for `meta.outputsToInstall` and return `outputs` reduced to that. */
+ const Value * outTI = queryMeta("outputsToInstall");
+ if (!outTI) return outputs;
+ const auto errMsg = Error("this derivation has bad ‘meta.outputsToInstall’");
+ /* ^ this shows during `nix-env -i` right under the bad derivation */
+ if (!outTI->isList()) throw errMsg;
+ Outputs result;
+ for (auto i = outTI->listElems(); i != outTI->listElems() + outTI->listSize(); ++i) {
+ if ((*i)->type != tString) throw errMsg;
+ auto out = outputs.find((*i)->string.s);
+ if (out == outputs.end()) throw errMsg;
+ result.insert(*out);
+ }
+ return result;
+}
+
+
+string DrvInfo::queryOutputName()
+{
+ if (outputName == "" && attrs) {
+ Bindings::iterator i = attrs->find(state->sOutputName);
+ outputName = i != attrs->end() ? state->forceStringNoCtx(*i->value) : "";
+ }
+ return outputName;
+}
+
+
+Bindings * DrvInfo::getMeta()
+{
+ if (meta) return meta;
+ if (!attrs) return 0;
+ Bindings::iterator a = attrs->find(state->sMeta);
+ if (a == attrs->end()) return 0;
+ state->forceAttrs(*a->value, *a->pos);
+ meta = a->value->attrs;
+ return meta;
+}
+
+
+StringSet DrvInfo::queryMetaNames()
+{
+ StringSet res;
+ if (!getMeta()) return res;
+ for (auto & i : *meta)
+ res.insert(i.name);
+ return res;
+}
+
+
+bool DrvInfo::checkMeta(Value & v)
+{
+ state->forceValue(v);
+ if (v.isList()) {
+ for (unsigned int n = 0; n < v.listSize(); ++n)
+ if (!checkMeta(*v.listElems()[n])) return false;
+ return true;
+ }
+ else if (v.type == tAttrs) {
+ Bindings::iterator i = v.attrs->find(state->sOutPath);
+ if (i != v.attrs->end()) return false;
+ for (auto & i : *v.attrs)
+ if (!checkMeta(*i.value)) return false;
+ return true;
+ }
+ else return v.type == tInt || v.type == tBool || v.type == tString ||
+ v.type == tFloat;
+}
+
+
+Value * DrvInfo::queryMeta(const string & name)
+{
+ if (!getMeta()) return 0;
+ Bindings::iterator a = meta->find(state->symbols.create(name));
+ if (a == meta->end() || !checkMeta(*a->value)) return 0;
+ return a->value;
+}
+
+
+string DrvInfo::queryMetaString(const string & name)
+{
+ Value * v = queryMeta(name);
+ if (!v || v->type != tString) return "";
+ return v->string.s;
+}
+
+
+NixInt DrvInfo::queryMetaInt(const string & name, NixInt def)
+{
+ Value * v = queryMeta(name);
+ if (!v) return def;
+ if (v->type == tInt) return v->integer;
+ if (v->type == tString) {
+ /* Backwards compatibility with before we had support for
+ integer meta fields. */
+ NixInt n;
+ if (string2Int(v->string.s, n)) return n;
+ }
+ return def;
+}
+
+NixFloat DrvInfo::queryMetaFloat(const string & name, NixFloat def)
+{
+ Value * v = queryMeta(name);
+ if (!v) return def;
+ if (v->type == tFloat) return v->fpoint;
+ if (v->type == tString) {
+ /* Backwards compatibility with before we had support for
+ float meta fields. */
+ NixFloat n;
+ if (string2Float(v->string.s, n)) return n;
+ }
+ return def;
+}
+
+
+bool DrvInfo::queryMetaBool(const string & name, bool def)
+{
+ Value * v = queryMeta(name);
+ if (!v) return def;
+ if (v->type == tBool) return v->boolean;
+ if (v->type == tString) {
+ /* Backwards compatibility with before we had support for
+ Boolean meta fields. */
+ if (strcmp(v->string.s, "true") == 0) return true;
+ if (strcmp(v->string.s, "false") == 0) return false;
+ }
+ return def;
+}
+
+
+void DrvInfo::setMeta(const string & name, Value * v)
+{
+ getMeta();
+ Bindings * old = meta;
+ meta = state->allocBindings(1 + (old ? old->size() : 0));
+ Symbol sym = state->symbols.create(name);
+ if (old)
+ for (auto i : *old)
+ if (i.name != sym)
+ meta->push_back(i);
+ if (v) meta->push_back(Attr(sym, v));
+ meta->sort();
+}
+
+
+/* Cache for already considered attrsets. */
+typedef set<Bindings *> Done;
+
+
+/* Evaluate value `v'. If it evaluates to a set of type `derivation',
+ then put information about it in `drvs' (unless it's already in `done').
+ The result boolean indicates whether it makes sense
+ for the caller to recursively search for derivations in `v'. */
+static bool getDerivation(EvalState & state, Value & v,
+ const string & attrPath, DrvInfos & drvs, Done & done,
+ bool ignoreAssertionFailures)
+{
+ try {
+ state.forceValue(v);
+ if (!state.isDerivation(v)) return true;
+
+ /* Remove spurious duplicates (e.g., a set like `rec { x =
+ derivation {...}; y = x;}'. */
+ if (done.find(v.attrs) != done.end()) return false;
+ done.insert(v.attrs);
+
+ Bindings::iterator i = v.attrs->find(state.sName);
+ /* !!! We really would like to have a decent back trace here. */
+ if (i == v.attrs->end()) throw TypeError("derivation name missing");
+
+ Bindings::iterator i2 = v.attrs->find(state.sSystem);
+
+ DrvInfo drv(state, state.forceStringNoCtx(*i->value), attrPath,
+ i2 == v.attrs->end() ? "unknown" : state.forceStringNoCtx(*i2->value, *i2->pos),
+ v.attrs);
+
+ drvs.push_back(drv);
+ return false;
+
+ } catch (AssertionError & e) {
+ if (ignoreAssertionFailures) return false;
+ throw;
+ }
+}
+
+
+bool getDerivation(EvalState & state, Value & v, DrvInfo & drv,
+ bool ignoreAssertionFailures)
+{
+ Done done;
+ DrvInfos drvs;
+ getDerivation(state, v, "", drvs, done, ignoreAssertionFailures);
+ if (drvs.size() != 1) return false;
+ drv = drvs.front();
+ return true;
+}
+
+
+static string addToPath(const string & s1, const string & s2)
+{
+ return s1.empty() ? s2 : s1 + "." + s2;
+}
+
+
+static std::regex attrRegex("[A-Za-z_][A-Za-z0-9-_+]*");
+
+
+static void getDerivations(EvalState & state, Value & vIn,
+ const string & pathPrefix, Bindings & autoArgs,
+ DrvInfos & drvs, Done & done,
+ bool ignoreAssertionFailures)
+{
+ Value v;
+ state.autoCallFunction(autoArgs, vIn, v);
+
+ /* Process the expression. */
+ if (!getDerivation(state, v, pathPrefix, drvs, done, ignoreAssertionFailures)) ;
+
+ else if (v.type == tAttrs) {
+
+ /* !!! undocumented hackery to support combining channels in
+ nix-env.cc. */
+ bool combineChannels = v.attrs->find(state.symbols.create("_combineChannels")) != v.attrs->end();
+
+ /* Consider the attributes in sorted order to get more
+ deterministic behaviour in nix-env operations (e.g. when
+ there are names clashes between derivations, the derivation
+ bound to the attribute with the "lower" name should take
+ precedence). */
+ for (auto & i : v.attrs->lexicographicOrder()) {
+ Activity act(*logger, lvlDebug, format("evaluating attribute ‘%1%’") % i->name);
+ if (!std::regex_match(std::string(i->name), attrRegex))
+ continue;
+ string pathPrefix2 = addToPath(pathPrefix, i->name);
+ if (combineChannels)
+ getDerivations(state, *i->value, pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
+ else if (getDerivation(state, *i->value, pathPrefix2, drvs, done, ignoreAssertionFailures)) {
+ /* If the value of this attribute is itself a set,
+ should we recurse into it? => Only if it has a
+ `recurseForDerivations = true' attribute. */
+ if (i->value->type == tAttrs) {
+ Bindings::iterator j = i->value->attrs->find(state.symbols.create("recurseForDerivations"));
+ if (j != i->value->attrs->end() && state.forceBool(*j->value, *j->pos))
+ getDerivations(state, *i->value, pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
+ }
+ }
+ }
+ }
+
+ else if (v.isList()) {
+ for (unsigned int n = 0; n < v.listSize(); ++n) {
+ Activity act(*logger, lvlDebug, "evaluating list element");
+ string pathPrefix2 = addToPath(pathPrefix, (format("%1%") % n).str());
+ if (getDerivation(state, *v.listElems()[n], pathPrefix2, drvs, done, ignoreAssertionFailures))
+ getDerivations(state, *v.listElems()[n], pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
+ }
+ }
+
+ else throw TypeError("expression does not evaluate to a derivation (or a set or list of those)");
+}
+
+
+void getDerivations(EvalState & state, Value & v, const string & pathPrefix,
+ Bindings & autoArgs, DrvInfos & drvs, bool ignoreAssertionFailures)
+{
+ Done done;
+ getDerivations(state, v, pathPrefix, autoArgs, drvs, done, ignoreAssertionFailures);
+}
+
+
+}
diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh
new file mode 100644
index 000000000..37fcbe829
--- /dev/null
+++ b/src/libexpr/get-drvs.hh
@@ -0,0 +1,93 @@
+#pragma once
+
+#include "eval.hh"
+
+#include <string>
+#include <map>
+
+
+namespace nix {
+
+
+struct DrvInfo
+{
+public:
+ typedef std::map<string, Path> Outputs;
+
+private:
+ EvalState * state;
+
+ string drvPath;
+ string outPath;
+ string outputName;
+ Outputs outputs;
+
+ bool failed; // set if we get an AssertionError
+
+ Bindings * attrs, * meta;
+
+ Bindings * getMeta();
+
+ bool checkMeta(Value & v);
+
+public:
+ string name;
+ string attrPath; /* path towards the derivation */
+ string system;
+
+ DrvInfo(EvalState & state) : state(&state), failed(false), attrs(0), meta(0) { };
+ DrvInfo(EvalState & state, const string & name, const string & attrPath, const string & system, Bindings * attrs)
+ : state(&state), failed(false), attrs(attrs), meta(0), name(name), attrPath(attrPath), system(system) { };
+
+ string queryDrvPath();
+ string queryOutPath();
+ string queryOutputName();
+ /** Return the list of outputs. The "outputs to install" are determined by `mesa.outputsToInstall`. */
+ Outputs queryOutputs(bool onlyOutputsToInstall = false);
+
+ StringSet queryMetaNames();
+ Value * queryMeta(const string & name);
+ string queryMetaString(const string & name);
+ NixInt queryMetaInt(const string & name, NixInt def);
+ NixFloat queryMetaFloat(const string & name, NixFloat def);
+ bool queryMetaBool(const string & name, bool def);
+ void setMeta(const string & name, Value * v);
+
+ /*
+ MetaInfo queryMetaInfo(EvalState & state) const;
+ MetaValue queryMetaInfo(EvalState & state, const string & name) const;
+ */
+
+ void setDrvPath(const string & s)
+ {
+ drvPath = s;
+ }
+
+ void setOutPath(const string & s)
+ {
+ outPath = s;
+ }
+
+ void setFailed() { failed = true; };
+ bool hasFailed() { return failed; };
+};
+
+
+#if HAVE_BOEHMGC
+typedef list<DrvInfo, traceable_allocator<DrvInfo> > DrvInfos;
+#else
+typedef list<DrvInfo> DrvInfos;
+#endif
+
+
+/* If value `v' denotes a derivation, store information about the
+ derivation in `drv' and return true. Otherwise, return false. */
+bool getDerivation(EvalState & state, Value & v, DrvInfo & drv,
+ bool ignoreAssertionFailures);
+
+void getDerivations(EvalState & state, Value & v, const string & pathPrefix,
+ Bindings & autoArgs, DrvInfos & drvs,
+ bool ignoreAssertionFailures);
+
+
+}
diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc
new file mode 100644
index 000000000..c189cdef3
--- /dev/null
+++ b/src/libexpr/json-to-value.cc
@@ -0,0 +1,143 @@
+#include "json-to-value.hh"
+
+#include <cstring>
+
+namespace nix {
+
+
+static void skipWhitespace(const char * & s)
+{
+ while (*s == ' ' || *s == '\t' || *s == '\n' || *s == '\r') s++;
+}
+
+
+static string parseJSONString(const char * & s)
+{
+ string res;
+ if (*s++ != '"') throw JSONParseError("expected JSON string");
+ while (*s != '"') {
+ if (!*s) throw JSONParseError("got end-of-string in JSON string");
+ if (*s == '\\') {
+ s++;
+ if (*s == '"') res += '"';
+ else if (*s == '\\') res += '\\';
+ else if (*s == '/') res += '/';
+ else if (*s == '/') res += '/';
+ else if (*s == 'b') res += '\b';
+ else if (*s == 'f') res += '\f';
+ else if (*s == 'n') res += '\n';
+ else if (*s == 'r') res += '\r';
+ else if (*s == 't') res += '\t';
+ else if (*s == 'u') throw JSONParseError("\\u characters in JSON strings are currently not supported");
+ else throw JSONParseError("invalid escaped character in JSON string");
+ s++;
+ } else
+ res += *s++;
+ }
+ s++;
+ return res;
+}
+
+
+static void parseJSON(EvalState & state, const char * & s, Value & v)
+{
+ skipWhitespace(s);
+
+ if (!*s) throw JSONParseError("expected JSON value");
+
+ if (*s == '[') {
+ s++;
+ ValueVector values;
+ values.reserve(128);
+ skipWhitespace(s);
+ while (1) {
+ if (values.empty() && *s == ']') break;
+ Value * v2 = state.allocValue();
+ parseJSON(state, s, *v2);
+ values.push_back(v2);
+ skipWhitespace(s);
+ if (*s == ']') break;
+ if (*s != ',') throw JSONParseError("expected ‘,’ or ‘]’ after JSON array element");
+ s++;
+ }
+ s++;
+ state.mkList(v, values.size());
+ for (size_t n = 0; n < values.size(); ++n)
+ v.listElems()[n] = values[n];
+ }
+
+ else if (*s == '{') {
+ s++;
+ ValueMap attrs;
+ while (1) {
+ skipWhitespace(s);
+ if (attrs.empty() && *s == '}') break;
+ string name = parseJSONString(s);
+ skipWhitespace(s);
+ if (*s != ':') throw JSONParseError("expected ‘:’ in JSON object");
+ s++;
+ Value * v2 = state.allocValue();
+ parseJSON(state, s, *v2);
+ attrs[state.symbols.create(name)] = v2;
+ skipWhitespace(s);
+ if (*s == '}') break;
+ if (*s != ',') throw JSONParseError("expected ‘,’ or ‘}’ after JSON member");
+ s++;
+ }
+ state.mkAttrs(v, attrs.size());
+ for (auto & i : attrs)
+ v.attrs->push_back(Attr(i.first, i.second));
+ v.attrs->sort();
+ s++;
+ }
+
+ else if (*s == '"') {
+ mkString(v, parseJSONString(s));
+ }
+
+ else if (isdigit(*s) || *s == '-' || *s == '.' ) {
+ // Buffer into a string first, then use built-in C++ conversions
+ std::string tmp_number;
+ ValueType number_type = tInt;
+
+ while (isdigit(*s) || *s == '-' || *s == '.' || *s == 'e' || *s == 'E') {
+ if (*s == '.' || *s == 'e' || *s == 'E')
+ number_type = tFloat;
+ tmp_number += *s++;
+ }
+
+ if (number_type == tFloat)
+ mkFloat(v, stod(tmp_number));
+ else
+ mkInt(v, stoi(tmp_number));
+ }
+
+ else if (strncmp(s, "true", 4) == 0) {
+ s += 4;
+ mkBool(v, true);
+ }
+
+ else if (strncmp(s, "false", 5) == 0) {
+ s += 5;
+ mkBool(v, false);
+ }
+
+ else if (strncmp(s, "null", 4) == 0) {
+ s += 4;
+ mkNull(v);
+ }
+
+ else throw JSONParseError("unrecognised JSON value");
+}
+
+
+void parseJSON(EvalState & state, const string & s_, Value & v)
+{
+ const char * s = s_.c_str();
+ parseJSON(state, s, v);
+ skipWhitespace(s);
+ if (*s) throw JSONParseError(format("expected end-of-string while parsing JSON value: %1%") % s);
+}
+
+
+}
diff --git a/src/libexpr/json-to-value.hh b/src/libexpr/json-to-value.hh
new file mode 100644
index 000000000..33f35b16c
--- /dev/null
+++ b/src/libexpr/json-to-value.hh
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "eval.hh"
+
+#include <string>
+
+namespace nix {
+
+MakeError(JSONParseError, EvalError)
+
+void parseJSON(EvalState & state, const string & s, Value & v);
+
+}
diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l
new file mode 100644
index 000000000..5b1ff0350
--- /dev/null
+++ b/src/libexpr/lexer.l
@@ -0,0 +1,209 @@
+%option reentrant bison-bridge bison-locations
+%option noyywrap
+%option never-interactive
+%option stack
+%option nodefault
+%option nounput noyy_top_state
+
+
+%x STRING
+%x IND_STRING
+%x INSIDE_DOLLAR_CURLY
+
+
+%{
+#include "nixexpr.hh"
+#include "parser-tab.hh"
+
+using namespace nix;
+
+namespace nix {
+
+
+static void initLoc(YYLTYPE * loc)
+{
+ loc->first_line = loc->last_line = 1;
+ loc->first_column = loc->last_column = 1;
+}
+
+
+static void adjustLoc(YYLTYPE * loc, const char * s, size_t len)
+{
+ loc->first_line = loc->last_line;
+ loc->first_column = loc->last_column;
+
+ while (len--) {
+ switch (*s++) {
+ case '\r':
+ if (*s == '\n') /* cr/lf */
+ s++;
+ /* fall through */
+ case '\n':
+ ++loc->last_line;
+ loc->last_column = 1;
+ break;
+ default:
+ ++loc->last_column;
+ }
+ }
+}
+
+
+static Expr * unescapeStr(SymbolTable & symbols, const char * s)
+{
+ string t;
+ char c;
+ while ((c = *s++)) {
+ if (c == '\\') {
+ assert(*s);
+ c = *s++;
+ if (c == 'n') t += '\n';
+ else if (c == 'r') t += '\r';
+ else if (c == 't') t += '\t';
+ else t += c;
+ }
+ else if (c == '\r') {
+ /* Normalise CR and CR/LF into LF. */
+ t += '\n';
+ if (*s == '\n') s++; /* cr/lf */
+ }
+ else t += c;
+ }
+ return new ExprString(symbols.create(t));
+}
+
+
+}
+
+#define YY_USER_INIT initLoc(yylloc)
+#define YY_USER_ACTION adjustLoc(yylloc, yytext, yyleng);
+
+#define PUSH_STATE(state) yy_push_state(state, yyscanner)
+#define POP_STATE() yy_pop_state(yyscanner)
+
+%}
+
+
+ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]*
+INT [0-9]+
+FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)?
+PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+\/?
+HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+\/?
+SPATH \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\>
+URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+
+
+
+%%
+
+<INITIAL,INSIDE_DOLLAR_CURLY>{
+
+
+if { return IF; }
+then { return THEN; }
+else { return ELSE; }
+assert { return ASSERT; }
+with { return WITH; }
+let { return LET; }
+in { return IN; }
+rec { return REC; }
+inherit { return INHERIT; }
+or { return OR_KW; }
+\.\.\. { return ELLIPSIS; }
+
+\=\= { return EQ; }
+\!\= { return NEQ; }
+\<\= { return LEQ; }
+\>\= { return GEQ; }
+\&\& { return AND; }
+\|\| { return OR; }
+\-\> { return IMPL; }
+\/\/ { return UPDATE; }
+\+\+ { return CONCAT; }
+
+{ID} { yylval->id = strdup(yytext); return ID; }
+{INT} { errno = 0;
+ yylval->n = strtol(yytext, 0, 10);
+ if (errno != 0)
+ throw ParseError(format("invalid integer ‘%1%’") % yytext);
+ return INT;
+ }
+{FLOAT} { errno = 0;
+ yylval->nf = strtod(yytext, 0);
+ if (errno != 0)
+ throw ParseError(format("invalid float ‘%1%’") % yytext);
+ return FLOAT;
+ }
+
+\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; }
+}
+
+\} { return '}'; }
+<INSIDE_DOLLAR_CURLY>\} { POP_STATE(); return '}'; }
+\{ { return '{'; }
+<INSIDE_DOLLAR_CURLY>\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return '{'; }
+
+<INITIAL,INSIDE_DOLLAR_CURLY>\" { PUSH_STATE(STRING); return '"'; }
+<STRING>([^\$\"\\]|\$[^\{\"\\]|\\.|\$\\.)*\$/\" |
+<STRING>([^\$\"\\]|\$[^\{\"\\]|\\.|\$\\.)+ {
+ /* It is impossible to match strings ending with '$' with one
+ regex because trailing contexts are only valid at the end
+ of a rule. (A sane but undocumented limitation.) */
+ yylval->e = unescapeStr(data->symbols, yytext);
+ return STR;
+ }
+<STRING>\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; }
+<STRING>\" { POP_STATE(); return '"'; }
+<STRING>. return yytext[0]; /* just in case: shouldn't be reached */
+
+<INITIAL,INSIDE_DOLLAR_CURLY>\'\'(\ *\n)? { PUSH_STATE(IND_STRING); return IND_STRING_OPEN; }
+<IND_STRING>([^\$\']|\$[^\{\']|\'[^\'\$])+ {
+ yylval->e = new ExprIndStr(yytext);
+ return IND_STR;
+ }
+<IND_STRING>\'\'\$ {
+ yylval->e = new ExprIndStr("$");
+ return IND_STR;
+ }
+<IND_STRING>\'\'\' {
+ yylval->e = new ExprIndStr("''");
+ return IND_STR;
+ }
+<IND_STRING>\'\'\\. {
+ yylval->e = unescapeStr(data->symbols, yytext + 2);
+ return IND_STR;
+ }
+<IND_STRING>\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; }
+<IND_STRING>\'\' { POP_STATE(); return IND_STRING_CLOSE; }
+<IND_STRING>\' {
+ yylval->e = new ExprIndStr("'");
+ return IND_STR;
+ }
+<IND_STRING>. return yytext[0]; /* just in case: shouldn't be reached */
+
+<INITIAL,INSIDE_DOLLAR_CURLY>{
+
+{PATH} { if (yytext[yyleng-1] == '/')
+ throw ParseError("path ‘%s’ has a trailing slash", yytext);
+ yylval->path = strdup(yytext);
+ return PATH;
+ }
+{HPATH} { if (yytext[yyleng-1] == '/')
+ throw ParseError("path ‘%s’ has a trailing slash", yytext);
+ yylval->path = strdup(yytext);
+ return HPATH;
+ }
+{SPATH} { yylval->path = strdup(yytext); return SPATH; }
+{URI} { yylval->uri = strdup(yytext); return URI; }
+
+[ \t\r\n]+ /* eat up whitespace */
+\#[^\r\n]* /* single-line comments */
+\/\*([^*]|\*+[^*/])*\*+\/ /* long comments */
+
+. return yytext[0];
+
+}
+
+<<EOF>> { data->atEnd = true; return 0; }
+
+%%
+
diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk
new file mode 100644
index 000000000..daa3258f0
--- /dev/null
+++ b/src/libexpr/local.mk
@@ -0,0 +1,33 @@
+libraries += libexpr
+
+libexpr_NAME = libnixexpr
+
+libexpr_DIR := $(d)
+
+libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc
+
+libexpr_LIBS = libutil libstore libformat
+
+libexpr_LDFLAGS =
+ifneq ($(OS), FreeBSD)
+ libexpr_LDFLAGS += -ldl
+endif
+
+# The dependency on libgc must be propagated (i.e. meaning that
+# programs/libraries that use libexpr must explicitly pass -lgc),
+# because inline functions in libexpr's header files call libgc.
+libexpr_LDFLAGS_PROPAGATED = $(BDW_GC_LIBS)
+
+libexpr_ORDER_AFTER := $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
+
+$(d)/parser-tab.cc $(d)/parser-tab.hh: $(d)/parser.y
+ $(trace-gen) bison -v -o $(libexpr_DIR)/parser-tab.cc $< -d
+
+$(d)/lexer-tab.cc $(d)/lexer-tab.hh: $(d)/lexer.l
+ $(trace-gen) flex --outfile $(libexpr_DIR)/lexer-tab.cc --header-file=$(libexpr_DIR)/lexer-tab.hh $<
+
+clean-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
+
+dist-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
+
+$(eval $(call install-file-in, $(d)/nix-expr.pc, $(prefix)/lib/pkgconfig, 0644))
diff --git a/src/libexpr/names.cc b/src/libexpr/names.cc
new file mode 100644
index 000000000..6d78d2116
--- /dev/null
+++ b/src/libexpr/names.cc
@@ -0,0 +1,107 @@
+#include "names.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+DrvName::DrvName()
+{
+ name = "";
+}
+
+
+/* Parse a derivation name. The `name' part of a derivation name is
+ everything up to but not including the first dash *not* followed by
+ a letter. The `version' part is the rest (excluding the separating
+ dash). E.g., `apache-httpd-2.0.48' is parsed to (`apache-httpd',
+ '2.0.48'). */
+DrvName::DrvName(const string & s) : hits(0)
+{
+ name = fullName = s;
+ for (unsigned int i = 0; i < s.size(); ++i) {
+ /* !!! isalpha/isdigit are affected by the locale. */
+ if (s[i] == '-' && i + 1 < s.size() && !isalpha(s[i + 1])) {
+ name = string(s, 0, i);
+ version = string(s, i + 1);
+ break;
+ }
+ }
+}
+
+
+bool DrvName::matches(DrvName & n)
+{
+ if (name != "*") {
+ if (!regex) regex = std::unique_ptr<std::regex>(new std::regex(name, std::regex::extended));
+ if (!std::regex_match(n.name, *regex)) return false;
+ }
+ if (version != "" && version != n.version) return false;
+ return true;
+}
+
+
+static string nextComponent(string::const_iterator & p,
+ const string::const_iterator end)
+{
+ /* Skip any dots and dashes (component separators). */
+ while (p != end && (*p == '.' || *p == '-')) ++p;
+
+ if (p == end) return "";
+
+ /* If the first character is a digit, consume the longest sequence
+ of digits. Otherwise, consume the longest sequence of
+ non-digit, non-separator characters. */
+ string s;
+ if (isdigit(*p))
+ while (p != end && isdigit(*p)) s += *p++;
+ else
+ while (p != end && (!isdigit(*p) && *p != '.' && *p != '-'))
+ s += *p++;
+
+ return s;
+}
+
+
+static bool componentsLT(const string & c1, const string & c2)
+{
+ int n1, n2;
+ bool c1Num = string2Int(c1, n1), c2Num = string2Int(c2, n2);
+
+ if (c1Num && c2Num) return n1 < n2;
+ else if (c1 == "" && c2Num) return true;
+ else if (c1 == "pre" && c2 != "pre") return true;
+ else if (c2 == "pre") return false;
+ /* Assume that `2.3a' < `2.3.1'. */
+ else if (c2Num) return true;
+ else if (c1Num) return false;
+ else return c1 < c2;
+}
+
+
+int compareVersions(const string & v1, const string & v2)
+{
+ string::const_iterator p1 = v1.begin();
+ string::const_iterator p2 = v2.begin();
+
+ while (p1 != v1.end() || p2 != v2.end()) {
+ string c1 = nextComponent(p1, v1.end());
+ string c2 = nextComponent(p2, v2.end());
+ if (componentsLT(c1, c2)) return -1;
+ else if (componentsLT(c2, c1)) return 1;
+ }
+
+ return 0;
+}
+
+
+DrvNames drvNamesFromArgs(const Strings & opArgs)
+{
+ DrvNames result;
+ for (auto & i : opArgs)
+ result.push_back(DrvName(i));
+ return result;
+}
+
+
+}
diff --git a/src/libexpr/names.hh b/src/libexpr/names.hh
new file mode 100644
index 000000000..9667fc96f
--- /dev/null
+++ b/src/libexpr/names.hh
@@ -0,0 +1,30 @@
+#pragma once
+
+#include <memory>
+
+#include "types.hh"
+#include <regex>
+
+namespace nix {
+
+struct DrvName
+{
+ string fullName;
+ string name;
+ string version;
+ unsigned int hits;
+
+ DrvName();
+ DrvName(const string & s);
+ bool matches(DrvName & n);
+
+private:
+ std::unique_ptr<std::regex> regex;
+};
+
+typedef list<DrvName> DrvNames;
+
+int compareVersions(const string & v1, const string & v2);
+DrvNames drvNamesFromArgs(const Strings & opArgs);
+
+}
diff --git a/src/libexpr/nix-expr.pc.in b/src/libexpr/nix-expr.pc.in
new file mode 100644
index 000000000..21b6c38dd
--- /dev/null
+++ b/src/libexpr/nix-expr.pc.in
@@ -0,0 +1,10 @@
+prefix=@prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: Nix
+Description: Nix Package Manager
+Version: @PACKAGE_VERSION@
+Requires: nix-store bdw-gc
+Libs: -L${libdir} -lnixexpr
+Cflags: -I${includedir}/nix
diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc
new file mode 100644
index 000000000..b2c9f0528
--- /dev/null
+++ b/src/libexpr/nixexpr.cc
@@ -0,0 +1,438 @@
+#include "nixexpr.hh"
+#include "derivations.hh"
+#include "util.hh"
+
+#include <cstdlib>
+
+
+namespace nix {
+
+
+/* Displaying abstract syntax trees. */
+
+std::ostream & operator << (std::ostream & str, Expr & e)
+{
+ e.show(str);
+ return str;
+}
+
+static void showString(std::ostream & str, const string & s)
+{
+ str << '"';
+ for (auto c : (string) s)
+ if (c == '"' || c == '\\' || c == '$') str << "\\" << c;
+ else if (c == '\n') str << "\\n";
+ else if (c == '\r') str << "\\r";
+ else if (c == '\t') str << "\\t";
+ else str << c;
+ str << '"';
+}
+
+static void showId(std::ostream & str, const string & s)
+{
+ if (s.empty())
+ str << "\"\"";
+ else if (s == "if") // FIXME: handle other keywords
+ str << '"' << s << '"';
+ else {
+ char c = s[0];
+ if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_')) {
+ showString(str, s);
+ return;
+ }
+ for (auto c : s)
+ if (!((c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '_' || c == '\'' || c == '-')) {
+ showString(str, s);
+ return;
+ }
+ str << s;
+ }
+}
+
+std::ostream & operator << (std::ostream & str, const Symbol & sym)
+{
+ showId(str, *sym.s);
+ return str;
+}
+
+void Expr::show(std::ostream & str)
+{
+ abort();
+}
+
+void ExprInt::show(std::ostream & str)
+{
+ str << n;
+}
+
+void ExprFloat::show(std::ostream & str)
+{
+ str << nf;
+}
+
+void ExprString::show(std::ostream & str)
+{
+ showString(str, s);
+}
+
+void ExprPath::show(std::ostream & str)
+{
+ str << s;
+}
+
+void ExprVar::show(std::ostream & str)
+{
+ str << name;
+}
+
+void ExprSelect::show(std::ostream & str)
+{
+ str << "(" << *e << ")." << showAttrPath(attrPath);
+ if (def) str << " or (" << *def << ")";
+}
+
+void ExprOpHasAttr::show(std::ostream & str)
+{
+ str << "((" << *e << ") ? " << showAttrPath(attrPath) << ")";
+}
+
+void ExprAttrs::show(std::ostream & str)
+{
+ if (recursive) str << "rec ";
+ str << "{ ";
+ for (auto & i : attrs)
+ if (i.second.inherited)
+ str << "inherit " << i.first << " " << "; ";
+ else
+ str << i.first << " = " << *i.second.e << "; ";
+ for (auto & i : dynamicAttrs)
+ str << "\"${" << *i.nameExpr << "}\" = " << *i.valueExpr << "; ";
+ str << "}";
+}
+
+void ExprList::show(std::ostream & str)
+{
+ str << "[ ";
+ for (auto & i : elems)
+ str << "(" << *i << ") ";
+ str << "]";
+}
+
+void ExprLambda::show(std::ostream & str)
+{
+ str << "(";
+ if (matchAttrs) {
+ str << "{ ";
+ bool first = true;
+ for (auto & i : formals->formals) {
+ if (first) first = false; else str << ", ";
+ str << i.name;
+ if (i.def) str << " ? " << *i.def;
+ }
+ if (formals->ellipsis) {
+ if (!first) str << ", ";
+ str << "...";
+ }
+ str << " }";
+ if (!arg.empty()) str << " @ ";
+ }
+ if (!arg.empty()) str << arg;
+ str << ": " << *body << ")";
+}
+
+void ExprLet::show(std::ostream & str)
+{
+ str << "(let ";
+ for (auto & i : attrs->attrs)
+ if (i.second.inherited) {
+ str << "inherit " << i.first << "; ";
+ }
+ else
+ str << i.first << " = " << *i.second.e << "; ";
+ str << "in " << *body << ")";
+}
+
+void ExprWith::show(std::ostream & str)
+{
+ str << "(with " << *attrs << "; " << *body << ")";
+}
+
+void ExprIf::show(std::ostream & str)
+{
+ str << "(if " << *cond << " then " << *then << " else " << *else_ << ")";
+}
+
+void ExprAssert::show(std::ostream & str)
+{
+ str << "assert " << *cond << "; " << *body;
+}
+
+void ExprOpNot::show(std::ostream & str)
+{
+ str << "(! " << *e << ")";
+}
+
+void ExprConcatStrings::show(std::ostream & str)
+{
+ bool first = true;
+ str << "(";
+ for (auto & i : *es) {
+ if (first) first = false; else str << " + ";
+ str << *i;
+ }
+ str << ")";
+}
+
+void ExprPos::show(std::ostream & str)
+{
+ str << "__curPos";
+}
+
+
+std::ostream & operator << (std::ostream & str, const Pos & pos)
+{
+ if (!pos)
+ str << "undefined position";
+ else
+ str << (format(ANSI_BOLD "%1%" ANSI_NORMAL ":%2%:%3%") % (string) pos.file % pos.line % pos.column).str();
+ return str;
+}
+
+
+string showAttrPath(const AttrPath & attrPath)
+{
+ std::ostringstream out;
+ bool first = true;
+ for (auto & i : attrPath) {
+ if (!first) out << '.'; else first = false;
+ if (i.symbol.set())
+ out << i.symbol;
+ else
+ out << "\"${" << *i.expr << "}\"";
+ }
+ return out.str();
+}
+
+
+Pos noPos;
+
+
+/* Computing levels/displacements for variables. */
+
+void Expr::bindVars(const StaticEnv & env)
+{
+ abort();
+}
+
+void ExprInt::bindVars(const StaticEnv & env)
+{
+}
+
+void ExprFloat::bindVars(const StaticEnv & env)
+{
+}
+
+void ExprString::bindVars(const StaticEnv & env)
+{
+}
+
+void ExprPath::bindVars(const StaticEnv & env)
+{
+}
+
+void ExprVar::bindVars(const StaticEnv & env)
+{
+ /* Check whether the variable appears in the environment. If so,
+ set its level and displacement. */
+ const StaticEnv * curEnv;
+ unsigned int level;
+ int withLevel = -1;
+ for (curEnv = &env, level = 0; curEnv; curEnv = curEnv->up, level++) {
+ if (curEnv->isWith) {
+ if (withLevel == -1) withLevel = level;
+ } else {
+ StaticEnv::Vars::const_iterator i = curEnv->vars.find(name);
+ if (i != curEnv->vars.end()) {
+ fromWith = false;
+ this->level = level;
+ displ = i->second;
+ return;
+ }
+ }
+ }
+
+ /* Otherwise, the variable must be obtained from the nearest
+ enclosing `with'. If there is no `with', then we can issue an
+ "undefined variable" error now. */
+ if (withLevel == -1) throw UndefinedVarError(format("undefined variable ‘%1%’ at %2%") % name % pos);
+
+ fromWith = true;
+ this->level = withLevel;
+}
+
+void ExprSelect::bindVars(const StaticEnv & env)
+{
+ e->bindVars(env);
+ if (def) def->bindVars(env);
+ for (auto & i : attrPath)
+ if (!i.symbol.set())
+ i.expr->bindVars(env);
+}
+
+void ExprOpHasAttr::bindVars(const StaticEnv & env)
+{
+ e->bindVars(env);
+ for (auto & i : attrPath)
+ if (!i.symbol.set())
+ i.expr->bindVars(env);
+}
+
+void ExprAttrs::bindVars(const StaticEnv & env)
+{
+ const StaticEnv * dynamicEnv = &env;
+ StaticEnv newEnv(false, &env);
+
+ if (recursive) {
+ dynamicEnv = &newEnv;
+
+ unsigned int displ = 0;
+ for (auto & i : attrs)
+ newEnv.vars[i.first] = i.second.displ = displ++;
+
+ for (auto & i : attrs)
+ i.second.e->bindVars(i.second.inherited ? env : newEnv);
+ }
+
+ else
+ for (auto & i : attrs)
+ i.second.e->bindVars(env);
+
+ for (auto & i : dynamicAttrs) {
+ i.nameExpr->bindVars(*dynamicEnv);
+ i.valueExpr->bindVars(*dynamicEnv);
+ }
+}
+
+void ExprList::bindVars(const StaticEnv & env)
+{
+ for (auto & i : elems)
+ i->bindVars(env);
+}
+
+void ExprLambda::bindVars(const StaticEnv & env)
+{
+ StaticEnv newEnv(false, &env);
+
+ unsigned int displ = 0;
+
+ if (!arg.empty()) newEnv.vars[arg] = displ++;
+
+ if (matchAttrs) {
+ for (auto & i : formals->formals)
+ newEnv.vars[i.name] = displ++;
+
+ for (auto & i : formals->formals)
+ if (i.def) i.def->bindVars(newEnv);
+ }
+
+ body->bindVars(newEnv);
+}
+
+void ExprLet::bindVars(const StaticEnv & env)
+{
+ StaticEnv newEnv(false, &env);
+
+ unsigned int displ = 0;
+ for (auto & i : attrs->attrs)
+ newEnv.vars[i.first] = i.second.displ = displ++;
+
+ for (auto & i : attrs->attrs)
+ i.second.e->bindVars(i.second.inherited ? env : newEnv);
+
+ body->bindVars(newEnv);
+}
+
+void ExprWith::bindVars(const StaticEnv & env)
+{
+ /* Does this `with' have an enclosing `with'? If so, record its
+ level so that `lookupVar' can look up variables in the previous
+ `with' if this one doesn't contain the desired attribute. */
+ const StaticEnv * curEnv;
+ unsigned int level;
+ prevWith = 0;
+ for (curEnv = &env, level = 1; curEnv; curEnv = curEnv->up, level++)
+ if (curEnv->isWith) {
+ prevWith = level;
+ break;
+ }
+
+ attrs->bindVars(env);
+ StaticEnv newEnv(true, &env);
+ body->bindVars(newEnv);
+}
+
+void ExprIf::bindVars(const StaticEnv & env)
+{
+ cond->bindVars(env);
+ then->bindVars(env);
+ else_->bindVars(env);
+}
+
+void ExprAssert::bindVars(const StaticEnv & env)
+{
+ cond->bindVars(env);
+ body->bindVars(env);
+}
+
+void ExprOpNot::bindVars(const StaticEnv & env)
+{
+ e->bindVars(env);
+}
+
+void ExprConcatStrings::bindVars(const StaticEnv & env)
+{
+ for (auto & i : *es)
+ i->bindVars(env);
+}
+
+void ExprPos::bindVars(const StaticEnv & env)
+{
+}
+
+
+/* Storing function names. */
+
+void Expr::setName(Symbol & name)
+{
+}
+
+
+void ExprLambda::setName(Symbol & name)
+{
+ this->name = name;
+ body->setName(name);
+}
+
+
+string ExprLambda::showNamePos() const
+{
+ return (format("%1% at %2%") % (name.set() ? "‘" + (string) name + "’" : "anonymous function") % pos).str();
+}
+
+
+
+/* Symbol table. */
+
+size_t SymbolTable::totalSize() const
+{
+ size_t n = 0;
+ for (auto & i : symbols)
+ n += i.size();
+ return n;
+}
+
+
+}
diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh
new file mode 100644
index 000000000..d2ca09b3a
--- /dev/null
+++ b/src/libexpr/nixexpr.hh
@@ -0,0 +1,343 @@
+#pragma once
+
+#include "value.hh"
+#include "symbol-table.hh"
+
+#include <map>
+
+
+namespace nix {
+
+
+MakeError(EvalError, Error)
+MakeError(ParseError, Error)
+MakeError(IncompleteParseError, ParseError)
+MakeError(AssertionError, EvalError)
+MakeError(ThrownError, AssertionError)
+MakeError(Abort, EvalError)
+MakeError(TypeError, EvalError)
+MakeError(UndefinedVarError, Error)
+MakeError(RestrictedPathError, Error)
+
+
+/* Position objects. */
+
+struct Pos
+{
+ Symbol file;
+ unsigned int line, column;
+ Pos() : line(0), column(0) { };
+ Pos(const Symbol & file, unsigned int line, unsigned int column)
+ : file(file), line(line), column(column) { };
+ operator bool() const
+ {
+ return line != 0;
+ }
+ bool operator < (const Pos & p2) const
+ {
+ if (!line) return p2.line;
+ if (!p2.line) return false;
+ int d = ((string) file).compare((string) p2.file);
+ if (d < 0) return true;
+ if (d > 0) return false;
+ if (line < p2.line) return true;
+ if (line > p2.line) return false;
+ return column < p2.column;
+ }
+};
+
+extern Pos noPos;
+
+std::ostream & operator << (std::ostream & str, const Pos & pos);
+
+
+struct Env;
+struct Value;
+class EvalState;
+struct StaticEnv;
+
+
+/* An attribute path is a sequence of attribute names. */
+struct AttrName
+{
+ Symbol symbol;
+ Expr * expr;
+ AttrName(const Symbol & s) : symbol(s) {};
+ AttrName(Expr * e) : expr(e) {};
+};
+
+typedef std::vector<AttrName> AttrPath;
+
+string showAttrPath(const AttrPath & attrPath);
+
+
+/* Abstract syntax of Nix expressions. */
+
+struct Expr
+{
+ virtual ~Expr() { };
+ virtual void show(std::ostream & str);
+ virtual void bindVars(const StaticEnv & env);
+ virtual void eval(EvalState & state, Env & env, Value & v);
+ virtual Value * maybeThunk(EvalState & state, Env & env);
+ virtual void setName(Symbol & name);
+};
+
+std::ostream & operator << (std::ostream & str, Expr & e);
+
+#define COMMON_METHODS \
+ void show(std::ostream & str); \
+ void eval(EvalState & state, Env & env, Value & v); \
+ void bindVars(const StaticEnv & env);
+
+struct ExprInt : Expr
+{
+ NixInt n;
+ Value v;
+ ExprInt(NixInt n) : n(n) { mkInt(v, n); };
+ COMMON_METHODS
+ Value * maybeThunk(EvalState & state, Env & env);
+};
+
+struct ExprFloat : Expr
+{
+ NixFloat nf;
+ Value v;
+ ExprFloat(NixFloat nf) : nf(nf) { mkFloat(v, nf); };
+ COMMON_METHODS
+ Value * maybeThunk(EvalState & state, Env & env);
+};
+
+struct ExprString : Expr
+{
+ Symbol s;
+ Value v;
+ ExprString(const Symbol & s) : s(s) { mkString(v, s); };
+ COMMON_METHODS
+ Value * maybeThunk(EvalState & state, Env & env);
+};
+
+/* Temporary class used during parsing of indented strings. */
+struct ExprIndStr : Expr
+{
+ string s;
+ ExprIndStr(const string & s) : s(s) { };
+};
+
+struct ExprPath : Expr
+{
+ string s;
+ Value v;
+ ExprPath(const string & s) : s(s) { mkPathNoCopy(v, this->s.c_str()); };
+ COMMON_METHODS
+ Value * maybeThunk(EvalState & state, Env & env);
+};
+
+struct ExprVar : Expr
+{
+ Pos pos;
+ Symbol name;
+
+ /* Whether the variable comes from an environment (e.g. a rec, let
+ or function argument) or from a "with". */
+ bool fromWith;
+
+ /* In the former case, the value is obtained by going `level'
+ levels up from the current environment and getting the
+ `displ'th value in that environment. In the latter case, the
+ value is obtained by getting the attribute named `name' from
+ the set stored in the environment that is `level' levels up
+ from the current one.*/
+ unsigned int level;
+ unsigned int displ;
+
+ ExprVar(const Symbol & name) : name(name) { };
+ ExprVar(const Pos & pos, const Symbol & name) : pos(pos), name(name) { };
+ COMMON_METHODS
+ Value * maybeThunk(EvalState & state, Env & env);
+};
+
+struct ExprSelect : Expr
+{
+ Pos pos;
+ Expr * e, * def;
+ AttrPath attrPath;
+ ExprSelect(const Pos & pos, Expr * e, const AttrPath & attrPath, Expr * def) : pos(pos), e(e), def(def), attrPath(attrPath) { };
+ ExprSelect(const Pos & pos, Expr * e, const Symbol & name) : pos(pos), e(e), def(0) { attrPath.push_back(AttrName(name)); };
+ COMMON_METHODS
+};
+
+struct ExprOpHasAttr : Expr
+{
+ Expr * e;
+ AttrPath attrPath;
+ ExprOpHasAttr(Expr * e, const AttrPath & attrPath) : e(e), attrPath(attrPath) { };
+ COMMON_METHODS
+};
+
+struct ExprAttrs : Expr
+{
+ bool recursive;
+ struct AttrDef {
+ bool inherited;
+ Expr * e;
+ Pos pos;
+ unsigned int displ; // displacement
+ AttrDef(Expr * e, const Pos & pos, bool inherited=false)
+ : inherited(inherited), e(e), pos(pos) { };
+ AttrDef() { };
+ };
+ typedef std::map<Symbol, AttrDef> AttrDefs;
+ AttrDefs attrs;
+ struct DynamicAttrDef {
+ Expr * nameExpr, * valueExpr;
+ Pos pos;
+ DynamicAttrDef(Expr * nameExpr, Expr * valueExpr, const Pos & pos)
+ : nameExpr(nameExpr), valueExpr(valueExpr), pos(pos) { };
+ };
+ typedef std::vector<DynamicAttrDef> DynamicAttrDefs;
+ DynamicAttrDefs dynamicAttrs;
+ ExprAttrs() : recursive(false) { };
+ COMMON_METHODS
+};
+
+struct ExprList : Expr
+{
+ std::vector<Expr *> elems;
+ ExprList() { };
+ COMMON_METHODS
+};
+
+struct Formal
+{
+ Symbol name;
+ Expr * def;
+ Formal(const Symbol & name, Expr * def) : name(name), def(def) { };
+};
+
+struct Formals
+{
+ typedef std::list<Formal> Formals_;
+ Formals_ formals;
+ std::set<Symbol> argNames; // used during parsing
+ bool ellipsis;
+};
+
+struct ExprLambda : Expr
+{
+ Pos pos;
+ Symbol name;
+ Symbol arg;
+ bool matchAttrs;
+ Formals * formals;
+ Expr * body;
+ ExprLambda(const Pos & pos, const Symbol & arg, bool matchAttrs, Formals * formals, Expr * body)
+ : pos(pos), arg(arg), matchAttrs(matchAttrs), formals(formals), body(body)
+ {
+ if (!arg.empty() && formals && formals->argNames.find(arg) != formals->argNames.end())
+ throw ParseError(format("duplicate formal function argument ‘%1%’ at %2%")
+ % arg % pos);
+ };
+ void setName(Symbol & name);
+ string showNamePos() const;
+ COMMON_METHODS
+};
+
+struct ExprLet : Expr
+{
+ ExprAttrs * attrs;
+ Expr * body;
+ ExprLet(ExprAttrs * attrs, Expr * body) : attrs(attrs), body(body) { };
+ COMMON_METHODS
+};
+
+struct ExprWith : Expr
+{
+ Pos pos;
+ Expr * attrs, * body;
+ unsigned int prevWith;
+ ExprWith(const Pos & pos, Expr * attrs, Expr * body) : pos(pos), attrs(attrs), body(body) { };
+ COMMON_METHODS
+};
+
+struct ExprIf : Expr
+{
+ Expr * cond, * then, * else_;
+ ExprIf(Expr * cond, Expr * then, Expr * else_) : cond(cond), then(then), else_(else_) { };
+ COMMON_METHODS
+};
+
+struct ExprAssert : Expr
+{
+ Pos pos;
+ Expr * cond, * body;
+ ExprAssert(const Pos & pos, Expr * cond, Expr * body) : pos(pos), cond(cond), body(body) { };
+ COMMON_METHODS
+};
+
+struct ExprOpNot : Expr
+{
+ Expr * e;
+ ExprOpNot(Expr * e) : e(e) { };
+ COMMON_METHODS
+};
+
+#define MakeBinOp(name, s) \
+ struct Expr##name : Expr \
+ { \
+ Pos pos; \
+ Expr * e1, * e2; \
+ Expr##name(Expr * e1, Expr * e2) : e1(e1), e2(e2) { }; \
+ Expr##name(const Pos & pos, Expr * e1, Expr * e2) : pos(pos), e1(e1), e2(e2) { }; \
+ void show(std::ostream & str) \
+ { \
+ str << "(" << *e1 << " " s " " << *e2 << ")"; \
+ } \
+ void bindVars(const StaticEnv & env) \
+ { \
+ e1->bindVars(env); e2->bindVars(env); \
+ } \
+ void eval(EvalState & state, Env & env, Value & v); \
+ };
+
+MakeBinOp(App, "")
+MakeBinOp(OpEq, "==")
+MakeBinOp(OpNEq, "!=")
+MakeBinOp(OpAnd, "&&")
+MakeBinOp(OpOr, "||")
+MakeBinOp(OpImpl, "->")
+MakeBinOp(OpUpdate, "//")
+MakeBinOp(OpConcatLists, "++")
+
+struct ExprConcatStrings : Expr
+{
+ Pos pos;
+ bool forceString;
+ vector<Expr *> * es;
+ ExprConcatStrings(const Pos & pos, bool forceString, vector<Expr *> * es)
+ : pos(pos), forceString(forceString), es(es) { };
+ COMMON_METHODS
+};
+
+struct ExprPos : Expr
+{
+ Pos pos;
+ ExprPos(const Pos & pos) : pos(pos) { };
+ COMMON_METHODS
+};
+
+
+/* Static environments are used to map variable names onto (level,
+ displacement) pairs used to obtain the value of the variable at
+ runtime. */
+struct StaticEnv
+{
+ bool isWith;
+ const StaticEnv * up;
+ typedef std::map<Symbol, unsigned int> Vars;
+ Vars vars;
+ StaticEnv(bool isWith, const StaticEnv * up) : isWith(isWith), up(up) { };
+};
+
+
+}
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
new file mode 100644
index 000000000..d07eeddda
--- /dev/null
+++ b/src/libexpr/parser.y
@@ -0,0 +1,687 @@
+%glr-parser
+%pure-parser
+%locations
+%error-verbose
+%defines
+/* %no-lines */
+%parse-param { void * scanner }
+%parse-param { nix::ParseData * data }
+%lex-param { void * scanner }
+%lex-param { nix::ParseData * data }
+%expect 1
+%expect-rr 1
+
+%code requires {
+
+#ifndef BISON_HEADER
+#define BISON_HEADER
+
+#include "util.hh"
+
+#include "nixexpr.hh"
+#include "eval.hh"
+
+namespace nix {
+
+ struct ParseData
+ {
+ EvalState & state;
+ SymbolTable & symbols;
+ Expr * result;
+ Path basePath;
+ Symbol path;
+ string error;
+ bool atEnd;
+ Symbol sLetBody;
+ ParseData(EvalState & state)
+ : state(state)
+ , symbols(state.symbols)
+ , atEnd(false)
+ , sLetBody(symbols.create("<let-body>"))
+ { };
+ };
+
+}
+
+#define YY_DECL int yylex \
+ (YYSTYPE * yylval_param, YYLTYPE * yylloc_param, yyscan_t yyscanner, nix::ParseData * data)
+
+#endif
+
+}
+
+%{
+
+#include "parser-tab.hh"
+#include "lexer-tab.hh"
+
+YY_DECL;
+
+using namespace nix;
+
+
+namespace nix {
+
+
+static void dupAttr(const AttrPath & attrPath, const Pos & pos, const Pos & prevPos)
+{
+ throw ParseError(format("attribute ‘%1%’ at %2% already defined at %3%")
+ % showAttrPath(attrPath) % pos % prevPos);
+}
+
+
+static void dupAttr(Symbol attr, const Pos & pos, const Pos & prevPos)
+{
+ throw ParseError(format("attribute ‘%1%’ at %2% already defined at %3%")
+ % attr % pos % prevPos);
+}
+
+
+static void addAttr(ExprAttrs * attrs, AttrPath & attrPath,
+ Expr * e, const Pos & pos)
+{
+ AttrPath::iterator i;
+ // All attrpaths have at least one attr
+ assert(!attrPath.empty());
+ for (i = attrPath.begin(); i + 1 < attrPath.end(); i++) {
+ if (i->symbol.set()) {
+ ExprAttrs::AttrDefs::iterator j = attrs->attrs.find(i->symbol);
+ if (j != attrs->attrs.end()) {
+ if (!j->second.inherited) {
+ ExprAttrs * attrs2 = dynamic_cast<ExprAttrs *>(j->second.e);
+ if (!attrs2) dupAttr(attrPath, pos, j->second.pos);
+ attrs = attrs2;
+ } else
+ dupAttr(attrPath, pos, j->second.pos);
+ } else {
+ ExprAttrs * nested = new ExprAttrs;
+ attrs->attrs[i->symbol] = ExprAttrs::AttrDef(nested, pos);
+ attrs = nested;
+ }
+ } else {
+ ExprAttrs *nested = new ExprAttrs;
+ attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(i->expr, nested, pos));
+ attrs = nested;
+ }
+ }
+ if (i->symbol.set()) {
+ ExprAttrs::AttrDefs::iterator j = attrs->attrs.find(i->symbol);
+ if (j != attrs->attrs.end()) {
+ dupAttr(attrPath, pos, j->second.pos);
+ } else {
+ attrs->attrs[i->symbol] = ExprAttrs::AttrDef(e, pos);
+ e->setName(i->symbol);
+ }
+ } else {
+ attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(i->expr, e, pos));
+ }
+}
+
+
+static void addFormal(const Pos & pos, Formals * formals, const Formal & formal)
+{
+ if (formals->argNames.find(formal.name) != formals->argNames.end())
+ throw ParseError(format("duplicate formal function argument ‘%1%’ at %2%")
+ % formal.name % pos);
+ formals->formals.push_front(formal);
+ formals->argNames.insert(formal.name);
+}
+
+
+static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector<Expr *> & es)
+{
+ if (es.empty()) return new ExprString(symbols.create(""));
+
+ /* Figure out the minimum indentation. Note that by design
+ whitespace-only final lines are not taken into account. (So
+ the " " in "\n ''" is ignored, but the " " in "\n foo''" is.) */
+ bool atStartOfLine = true; /* = seen only whitespace in the current line */
+ unsigned int minIndent = 1000000;
+ unsigned int curIndent = 0;
+ for (auto & i : es) {
+ ExprIndStr * e = dynamic_cast<ExprIndStr *>(i);
+ if (!e) {
+ /* Anti-quotations end the current start-of-line whitespace. */
+ if (atStartOfLine) {
+ atStartOfLine = false;
+ if (curIndent < minIndent) minIndent = curIndent;
+ }
+ continue;
+ }
+ for (unsigned int j = 0; j < e->s.size(); ++j) {
+ if (atStartOfLine) {
+ if (e->s[j] == ' ')
+ curIndent++;
+ else if (e->s[j] == '\n') {
+ /* Empty line, doesn't influence minimum
+ indentation. */
+ curIndent = 0;
+ } else {
+ atStartOfLine = false;
+ if (curIndent < minIndent) minIndent = curIndent;
+ }
+ } else if (e->s[j] == '\n') {
+ atStartOfLine = true;
+ curIndent = 0;
+ }
+ }
+ }
+
+ /* Strip spaces from each line. */
+ vector<Expr *> * es2 = new vector<Expr *>;
+ atStartOfLine = true;
+ unsigned int curDropped = 0;
+ unsigned int n = es.size();
+ for (vector<Expr *>::iterator i = es.begin(); i != es.end(); ++i, --n) {
+ ExprIndStr * e = dynamic_cast<ExprIndStr *>(*i);
+ if (!e) {
+ atStartOfLine = false;
+ curDropped = 0;
+ es2->push_back(*i);
+ continue;
+ }
+
+ string s2;
+ for (unsigned int j = 0; j < e->s.size(); ++j) {
+ if (atStartOfLine) {
+ if (e->s[j] == ' ') {
+ if (curDropped++ >= minIndent)
+ s2 += e->s[j];
+ }
+ else if (e->s[j] == '\n') {
+ curDropped = 0;
+ s2 += e->s[j];
+ } else {
+ atStartOfLine = false;
+ curDropped = 0;
+ s2 += e->s[j];
+ }
+ } else {
+ s2 += e->s[j];
+ if (e->s[j] == '\n') atStartOfLine = true;
+ }
+ }
+
+ /* Remove the last line if it is empty and consists only of
+ spaces. */
+ if (n == 1) {
+ string::size_type p = s2.find_last_of('\n');
+ if (p != string::npos && s2.find_first_not_of(' ', p + 1) == string::npos)
+ s2 = string(s2, 0, p + 1);
+ }
+
+ es2->push_back(new ExprString(symbols.create(s2)));
+ }
+
+ /* If this is a single string, then don't do a concatenation. */
+ return es2->size() == 1 && dynamic_cast<ExprString *>((*es2)[0]) ? (*es2)[0] : new ExprConcatStrings(pos, true, es2);
+}
+
+
+static inline Pos makeCurPos(const YYLTYPE & loc, ParseData * data)
+{
+ return Pos(data->path, loc.first_line, loc.first_column);
+}
+
+#define CUR_POS makeCurPos(*yylocp, data)
+
+
+}
+
+
+void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * error)
+{
+ data->error = (format("%1%, at %2%")
+ % error % makeCurPos(*loc, data)).str();
+}
+
+
+%}
+
+%union {
+ // !!! We're probably leaking stuff here.
+ nix::Expr * e;
+ nix::ExprList * list;
+ nix::ExprAttrs * attrs;
+ nix::Formals * formals;
+ nix::Formal * formal;
+ nix::NixInt n;
+ nix::NixFloat nf;
+ const char * id; // !!! -> Symbol
+ char * path;
+ char * uri;
+ std::vector<nix::AttrName> * attrNames;
+ std::vector<nix::Expr *> * string_parts;
+}
+
+%type <e> start expr expr_function expr_if expr_op
+%type <e> expr_app expr_select expr_simple
+%type <list> expr_list
+%type <attrs> binds
+%type <formals> formals
+%type <formal> formal
+%type <attrNames> attrs attrpath
+%type <string_parts> string_parts_interpolated ind_string_parts
+%type <e> string_parts string_attr
+%type <id> attr
+%token <id> ID ATTRPATH
+%token <e> STR IND_STR
+%token <n> INT
+%token <nf> FLOAT
+%token <path> PATH HPATH SPATH
+%token <uri> URI
+%token IF THEN ELSE ASSERT WITH LET IN REC INHERIT EQ NEQ AND OR IMPL OR_KW
+%token DOLLAR_CURLY /* == ${ */
+%token IND_STRING_OPEN IND_STRING_CLOSE
+%token ELLIPSIS
+
+%nonassoc IMPL
+%left OR
+%left AND
+%nonassoc EQ NEQ
+%left '<' '>' LEQ GEQ
+%right UPDATE
+%left NOT
+%left '+' '-'
+%left '*' '/'
+%right CONCAT
+%nonassoc '?'
+%nonassoc NEGATE
+
+%%
+
+start: expr { data->result = $1; };
+
+expr: expr_function;
+
+expr_function
+ : ID ':' expr_function
+ { $$ = new ExprLambda(CUR_POS, data->symbols.create($1), false, 0, $3); }
+ | '{' formals '}' ':' expr_function
+ { $$ = new ExprLambda(CUR_POS, data->symbols.create(""), true, $2, $5); }
+ | '{' formals '}' '@' ID ':' expr_function
+ { $$ = new ExprLambda(CUR_POS, data->symbols.create($5), true, $2, $7); }
+ | ID '@' '{' formals '}' ':' expr_function
+ { $$ = new ExprLambda(CUR_POS, data->symbols.create($1), true, $4, $7); }
+ | ASSERT expr ';' expr_function
+ { $$ = new ExprAssert(CUR_POS, $2, $4); }
+ | WITH expr ';' expr_function
+ { $$ = new ExprWith(CUR_POS, $2, $4); }
+ | LET binds IN expr_function
+ { if (!$2->dynamicAttrs.empty())
+ throw ParseError(format("dynamic attributes not allowed in let at %1%")
+ % CUR_POS);
+ $$ = new ExprLet($2, $4);
+ }
+ | expr_if
+ ;
+
+expr_if
+ : IF expr THEN expr ELSE expr { $$ = new ExprIf($2, $4, $6); }
+ | expr_op
+ ;
+
+expr_op
+ : '!' expr_op %prec NOT { $$ = new ExprOpNot($2); }
+ | '-' expr_op %prec NEGATE { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), new ExprInt(0)), $2); }
+ | expr_op EQ expr_op { $$ = new ExprOpEq($1, $3); }
+ | expr_op NEQ expr_op { $$ = new ExprOpNEq($1, $3); }
+ | expr_op '<' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3); }
+ | expr_op LEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1)); }
+ | expr_op '>' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1); }
+ | expr_op GEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3)); }
+ | expr_op AND expr_op { $$ = new ExprOpAnd(CUR_POS, $1, $3); }
+ | expr_op OR expr_op { $$ = new ExprOpOr(CUR_POS, $1, $3); }
+ | expr_op IMPL expr_op { $$ = new ExprOpImpl(CUR_POS, $1, $3); }
+ | expr_op UPDATE expr_op { $$ = new ExprOpUpdate(CUR_POS, $1, $3); }
+ | expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, *$3); }
+ | expr_op '+' expr_op
+ { $$ = new ExprConcatStrings(CUR_POS, false, new vector<Expr *>({$1, $3})); }
+ | expr_op '-' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), $1), $3); }
+ | expr_op '*' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__mul")), $1), $3); }
+ | expr_op '/' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__div")), $1), $3); }
+ | expr_op CONCAT expr_op { $$ = new ExprOpConcatLists(CUR_POS, $1, $3); }
+ | expr_app
+ ;
+
+expr_app
+ : expr_app expr_select
+ { $$ = new ExprApp(CUR_POS, $1, $2); }
+ | expr_select { $$ = $1; }
+ ;
+
+expr_select
+ : expr_simple '.' attrpath
+ { $$ = new ExprSelect(CUR_POS, $1, *$3, 0); }
+ | expr_simple '.' attrpath OR_KW expr_select
+ { $$ = new ExprSelect(CUR_POS, $1, *$3, $5); }
+ | /* Backwards compatibility: because Nixpkgs has a rarely used
+ function named ‘or’, allow stuff like ‘map or [...]’. */
+ expr_simple OR_KW
+ { $$ = new ExprApp(CUR_POS, $1, new ExprVar(CUR_POS, data->symbols.create("or"))); }
+ | expr_simple { $$ = $1; }
+ ;
+
+expr_simple
+ : ID {
+ if (strcmp($1, "__curPos") == 0)
+ $$ = new ExprPos(CUR_POS);
+ else
+ $$ = new ExprVar(CUR_POS, data->symbols.create($1));
+ }
+ | INT { $$ = new ExprInt($1); }
+ | FLOAT { $$ = new ExprFloat($1); }
+ | '"' string_parts '"' { $$ = $2; }
+ | IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE {
+ $$ = stripIndentation(CUR_POS, data->symbols, *$2);
+ }
+ | PATH { $$ = new ExprPath(absPath($1, data->basePath)); }
+ | HPATH { $$ = new ExprPath(getEnv("HOME", "") + string{$1 + 1}); }
+ | SPATH {
+ string path($1 + 1, strlen($1) - 2);
+ $$ = new ExprApp(CUR_POS,
+ new ExprApp(new ExprVar(data->symbols.create("__findFile")),
+ new ExprVar(data->symbols.create("__nixPath"))),
+ new ExprString(data->symbols.create(path)));
+ }
+ | URI { $$ = new ExprString(data->symbols.create($1)); }
+ | '(' expr ')' { $$ = $2; }
+ /* Let expressions `let {..., body = ...}' are just desugared
+ into `(rec {..., body = ...}).body'. */
+ | LET '{' binds '}'
+ { $3->recursive = true; $$ = new ExprSelect(noPos, $3, data->symbols.create("body")); }
+ | REC '{' binds '}'
+ { $3->recursive = true; $$ = $3; }
+ | '{' binds '}'
+ { $$ = $2; }
+ | '[' expr_list ']' { $$ = $2; }
+ ;
+
+string_parts
+ : STR
+ | string_parts_interpolated { $$ = new ExprConcatStrings(CUR_POS, true, $1); }
+ | { $$ = new ExprString(data->symbols.create("")); }
+ ;
+
+string_parts_interpolated
+ : string_parts_interpolated STR { $$ = $1; $1->push_back($2); }
+ | string_parts_interpolated DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); }
+ | DOLLAR_CURLY expr '}' { $$ = new vector<Expr *>; $$->push_back($2); }
+ | STR DOLLAR_CURLY expr '}' {
+ $$ = new vector<Expr *>;
+ $$->push_back($1);
+ $$->push_back($3);
+ }
+ ;
+
+ind_string_parts
+ : ind_string_parts IND_STR { $$ = $1; $1->push_back($2); }
+ | ind_string_parts DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); }
+ | { $$ = new vector<Expr *>; }
+ ;
+
+binds
+ : binds attrpath '=' expr ';' { $$ = $1; addAttr($$, *$2, $4, makeCurPos(@2, data)); }
+ | binds INHERIT attrs ';'
+ { $$ = $1;
+ for (auto & i : *$3) {
+ if ($$->attrs.find(i.symbol) != $$->attrs.end())
+ dupAttr(i.symbol, makeCurPos(@3, data), $$->attrs[i.symbol].pos);
+ Pos pos = makeCurPos(@3, data);
+ $$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprVar(CUR_POS, i.symbol), pos, true);
+ }
+ }
+ | binds INHERIT '(' expr ')' attrs ';'
+ { $$ = $1;
+ /* !!! Should ensure sharing of the expression in $4. */
+ for (auto & i : *$6) {
+ if ($$->attrs.find(i.symbol) != $$->attrs.end())
+ dupAttr(i.symbol, makeCurPos(@6, data), $$->attrs[i.symbol].pos);
+ $$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data));
+ }
+ }
+ | { $$ = new ExprAttrs; }
+ ;
+
+attrs
+ : attrs attr { $$ = $1; $1->push_back(AttrName(data->symbols.create($2))); }
+ | attrs string_attr
+ { $$ = $1;
+ ExprString * str = dynamic_cast<ExprString *>($2);
+ if (str) {
+ $$->push_back(AttrName(str->s));
+ delete str;
+ } else
+ throw ParseError(format("dynamic attributes not allowed in inherit at %1%")
+ % makeCurPos(@2, data));
+ }
+ | { $$ = new AttrPath; }
+ ;
+
+attrpath
+ : attrpath '.' attr { $$ = $1; $1->push_back(AttrName(data->symbols.create($3))); }
+ | attrpath '.' string_attr
+ { $$ = $1;
+ ExprString * str = dynamic_cast<ExprString *>($3);
+ if (str) {
+ $$->push_back(AttrName(str->s));
+ delete str;
+ } else
+ $$->push_back(AttrName($3));
+ }
+ | attr { $$ = new vector<AttrName>; $$->push_back(AttrName(data->symbols.create($1))); }
+ | string_attr
+ { $$ = new vector<AttrName>;
+ ExprString *str = dynamic_cast<ExprString *>($1);
+ if (str) {
+ $$->push_back(AttrName(str->s));
+ delete str;
+ } else
+ $$->push_back(AttrName($1));
+ }
+ ;
+
+attr
+ : ID { $$ = $1; }
+ | OR_KW { $$ = "or"; }
+ ;
+
+string_attr
+ : '"' string_parts '"' { $$ = $2; }
+ | DOLLAR_CURLY expr '}' { $$ = $2; }
+ ;
+
+expr_list
+ : expr_list expr_select { $$ = $1; $1->elems.push_back($2); /* !!! dangerous */ }
+ | { $$ = new ExprList; }
+ ;
+
+formals
+ : formal ',' formals
+ { $$ = $3; addFormal(CUR_POS, $$, *$1); }
+ | formal
+ { $$ = new Formals; addFormal(CUR_POS, $$, *$1); $$->ellipsis = false; }
+ |
+ { $$ = new Formals; $$->ellipsis = false; }
+ | ELLIPSIS
+ { $$ = new Formals; $$->ellipsis = true; }
+ ;
+
+formal
+ : ID { $$ = new Formal(data->symbols.create($1), 0); }
+ | ID '?' expr { $$ = new Formal(data->symbols.create($1), $3); }
+ ;
+
+%%
+
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "eval.hh"
+#include "download.hh"
+#include "store-api.hh"
+#include "primops/fetchgit.hh"
+
+
+namespace nix {
+
+
+Expr * EvalState::parse(const char * text,
+ const Path & path, const Path & basePath, StaticEnv & staticEnv)
+{
+ yyscan_t scanner;
+ ParseData data(*this);
+ data.basePath = basePath;
+ data.path = data.symbols.create(path);
+
+ yylex_init(&scanner);
+ yy_scan_string(text, scanner);
+ int res = yyparse(scanner, &data);
+ yylex_destroy(scanner);
+
+ if (res) {
+ if (data.atEnd)
+ throw IncompleteParseError(data.error);
+ else
+ throw ParseError(data.error);
+ }
+
+ data.result->bindVars(staticEnv);
+
+ return data.result;
+}
+
+
+Path resolveExprPath(Path path)
+{
+ assert(path[0] == '/');
+
+ /* If `path' is a symlink, follow it. This is so that relative
+ path references work. */
+ struct stat st;
+ while (true) {
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting status of ‘%1%’") % path);
+ if (!S_ISLNK(st.st_mode)) break;
+ path = absPath(readLink(path), dirOf(path));
+ }
+
+ /* If `path' refers to a directory, append `/default.nix'. */
+ if (S_ISDIR(st.st_mode))
+ path = canonPath(path + "/default.nix");
+
+ return path;
+}
+
+
+Expr * EvalState::parseExprFromFile(const Path & path)
+{
+ return parseExprFromFile(path, staticBaseEnv);
+}
+
+
+Expr * EvalState::parseExprFromFile(const Path & path, StaticEnv & staticEnv)
+{
+ return parse(readFile(path).c_str(), path, dirOf(path), staticEnv);
+}
+
+
+Expr * EvalState::parseExprFromString(const string & s, const Path & basePath, StaticEnv & staticEnv)
+{
+ return parse(s.c_str(), "(string)", basePath, staticEnv);
+}
+
+
+Expr * EvalState::parseExprFromString(const string & s, const Path & basePath)
+{
+ return parseExprFromString(s, basePath, staticBaseEnv);
+}
+
+
+void EvalState::addToSearchPath(const string & s)
+{
+ size_t pos = s.find('=');
+ string prefix;
+ Path path;
+ if (pos == string::npos) {
+ path = s;
+ } else {
+ prefix = string(s, 0, pos);
+ path = string(s, pos + 1);
+ }
+
+ searchPath.emplace_back(prefix, path);
+}
+
+
+Path EvalState::findFile(const string & path)
+{
+ return findFile(searchPath, path);
+}
+
+
+Path EvalState::findFile(SearchPath & searchPath, const string & path, const Pos & pos)
+{
+ for (auto & i : searchPath) {
+ std::string suffix;
+ if (i.first.empty())
+ suffix = "/" + path;
+ else {
+ auto s = i.first.size();
+ if (path.compare(0, s, i.first) != 0 ||
+ (path.size() > s && path[s] != '/'))
+ continue;
+ suffix = path.size() == s ? "" : "/" + string(path, s);
+ }
+ auto r = resolveSearchPathElem(i);
+ if (!r.first) continue;
+ Path res = r.second + suffix;
+ if (pathExists(res)) return canonPath(res);
+ }
+ format f = format(
+ "file ‘%1%’ was not found in the Nix search path (add it using $NIX_PATH or -I)"
+ + string(pos ? ", at %2%" : ""));
+ f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit);
+ throw ThrownError(f % path % pos);
+}
+
+
+std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathElem & elem)
+{
+ auto i = searchPathResolved.find(elem.second);
+ if (i != searchPathResolved.end()) return i->second;
+
+ std::pair<bool, std::string> res;
+
+ if (isUri(elem.second)) {
+ try {
+ if (hasPrefix(elem.second, "git://") || hasSuffix(elem.second, ".git"))
+ // FIXME: support specifying revision/branch
+ res = { true, exportGit(store, elem.second, "master") };
+ else
+ res = { true, getDownloader()->downloadCached(store, elem.second, true) };
+ } catch (DownloadError & e) {
+ printError(format("warning: Nix search path entry ‘%1%’ cannot be downloaded, ignoring") % elem.second);
+ res = { false, "" };
+ }
+ } else {
+ auto path = absPath(elem.second);
+ if (pathExists(path))
+ res = { true, path };
+ else {
+ printError(format("warning: Nix search path entry ‘%1%’ does not exist, ignoring") % elem.second);
+ res = { false, "" };
+ }
+ }
+
+ debug(format("resolved search path element ‘%s’ to ‘%s’") % elem.second % res.second);
+
+ searchPathResolved[elem.second] = res;
+ return res;
+}
+
+
+}
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
new file mode 100644
index 000000000..615cc8138
--- /dev/null
+++ b/src/libexpr/primops.cc
@@ -0,0 +1,2077 @@
+#include "archive.hh"
+#include "derivations.hh"
+#include "download.hh"
+#include "eval-inline.hh"
+#include "eval.hh"
+#include "globals.hh"
+#include "json-to-value.hh"
+#include "names.hh"
+#include "store-api.hh"
+#include "util.hh"
+#include "json.hh"
+#include "value-to-json.hh"
+#include "value-to-xml.hh"
+#include "primops.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <cstring>
+#include <regex>
+#include <dlfcn.h>
+
+
+namespace nix {
+
+
+/*************************************************************
+ * Miscellaneous
+ *************************************************************/
+
+
+/* Decode a context string ‘!<name>!<path>’ into a pair <path,
+ name>. */
+std::pair<string, string> decodeContext(const string & s)
+{
+ if (s.at(0) == '!') {
+ size_t index = s.find("!", 1);
+ return std::pair<string, string>(string(s, index + 1), string(s, 1, index - 1));
+ } else
+ return std::pair<string, string>(s.at(0) == '/' ? s: string(s, 1), "");
+}
+
+
+InvalidPathError::InvalidPathError(const Path & path) :
+ EvalError(format("path ‘%1%’ is not valid") % path), path(path) {}
+
+void EvalState::realiseContext(const PathSet & context)
+{
+ PathSet drvs;
+ for (auto & i : context) {
+ std::pair<string, string> decoded = decodeContext(i);
+ Path ctx = decoded.first;
+ assert(store->isStorePath(ctx));
+ if (!store->isValidPath(ctx))
+ throw InvalidPathError(ctx);
+ if (!decoded.second.empty() && nix::isDerivation(ctx))
+ drvs.insert(decoded.first + "!" + decoded.second);
+ }
+ if (!drvs.empty()) {
+ if (!settings.enableImportFromDerivation)
+ throw EvalError(format("attempted to realize ‘%1%’ during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin()));
+ /* For performance, prefetch all substitute info. */
+ PathSet willBuild, willSubstitute, unknown;
+ unsigned long long downloadSize, narSize;
+ store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize);
+ store->buildPaths(drvs);
+ }
+}
+
+
+/* Load and evaluate an expression from path specified by the
+ argument. */
+static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ Path path = state.coerceToPath(pos, *args[1], context);
+
+ try {
+ state.realiseContext(context);
+ } catch (InvalidPathError & e) {
+ throw EvalError(format("cannot import ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+ % path % e.path % pos);
+ }
+
+ path = state.checkSourcePath(path);
+
+ if (state.store->isStorePath(path) && state.store->isValidPath(path) && isDerivation(path)) {
+ Derivation drv = readDerivation(path);
+ Value & w = *state.allocValue();
+ state.mkAttrs(w, 3 + drv.outputs.size());
+ Value * v2 = state.allocAttr(w, state.sDrvPath);
+ mkString(*v2, path, {"=" + path});
+ v2 = state.allocAttr(w, state.sName);
+ mkString(*v2, drv.env["name"]);
+ Value * outputsVal =
+ state.allocAttr(w, state.symbols.create("outputs"));
+ state.mkList(*outputsVal, drv.outputs.size());
+ unsigned int outputs_index = 0;
+
+ for (const auto & o : drv.outputs) {
+ v2 = state.allocAttr(w, state.symbols.create(o.first));
+ mkString(*v2, o.second.path, {"!" + o.first + "!" + path});
+ outputsVal->listElems()[outputs_index] = state.allocValue();
+ mkString(*(outputsVal->listElems()[outputs_index++]), o.first);
+ }
+ w.attrs->sort();
+ Value fun;
+ state.evalFile(settings.nixDataDir + "/nix/corepkgs/imported-drv-to-derivation.nix", fun);
+ state.forceFunction(fun, pos);
+ mkApp(v, fun, w);
+ state.forceAttrs(v, pos);
+ } else {
+ state.forceAttrs(*args[0]);
+ if (args[0]->attrs->empty())
+ state.evalFile(path, v);
+ else {
+ Env * env = &state.allocEnv(args[0]->attrs->size());
+ env->up = &state.baseEnv;
+
+ StaticEnv staticEnv(false, &state.staticBaseEnv);
+
+ unsigned int displ = 0;
+ for (auto & attr : *args[0]->attrs) {
+ staticEnv.vars[attr.name] = displ;
+ env->values[displ++] = attr.value;
+ }
+
+ Activity act(*logger, lvlTalkative, format("evaluating file ‘%1%’") % path);
+ Expr * e = state.parseExprFromFile(resolveExprPath(path), staticEnv);
+
+ e->eval(state, *env, v);
+ }
+ }
+}
+
+
+/* Want reasonable symbol names, so extern C */
+/* !!! Should we pass the Pos or the file name too? */
+extern "C" typedef void (*ValueInitializer)(EvalState & state, Value & v);
+
+/* Load a ValueInitializer from a DSO and return whatever it initializes */
+static void prim_importNative(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ Path path = state.coerceToPath(pos, *args[0], context);
+
+ try {
+ state.realiseContext(context);
+ } catch (InvalidPathError & e) {
+ throw EvalError(format("cannot import ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+ % path % e.path % pos);
+ }
+
+ path = state.checkSourcePath(path);
+
+ string sym = state.forceStringNoCtx(*args[1], pos);
+
+ void *handle = dlopen(path.c_str(), RTLD_LAZY | RTLD_LOCAL);
+ if (!handle)
+ throw EvalError(format("could not open ‘%1%’: %2%") % path % dlerror());
+
+ dlerror();
+ ValueInitializer func = (ValueInitializer) dlsym(handle, sym.c_str());
+ if(!func) {
+ char *message = dlerror();
+ if (message)
+ throw EvalError(format("could not load symbol ‘%1%’ from ‘%2%’: %3%") % sym % path % message);
+ else
+ throw EvalError(format("symbol ‘%1%’ from ‘%2%’ resolved to NULL when a function pointer was expected")
+ % sym % path);
+ }
+
+ (func)(state, v);
+
+ /* We don't dlclose because v may be a primop referencing a function in the shared object file */
+}
+
+
+/* Execute a program and parse its output */
+static void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceList(*args[0], pos);
+ auto elems = args[0]->listElems();
+ auto count = args[0]->listSize();
+ if (count == 0) {
+ throw EvalError(format("at least one argument to 'exec' required, at %1%") % pos);
+ }
+ PathSet context;
+ auto program = state.coerceToString(pos, *elems[0], context, false, false);
+ Strings commandArgs;
+ for (unsigned int i = 1; i < args[0]->listSize(); ++i) {
+ commandArgs.emplace_back(state.coerceToString(pos, *elems[i], context, false, false));
+ }
+ try {
+ state.realiseContext(context);
+ } catch (InvalidPathError & e) {
+ throw EvalError(format("cannot execute ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+ % program % e.path % pos);
+ }
+
+ auto output = runProgram(program, true, commandArgs);
+ Expr * parsed;
+ try {
+ parsed = state.parseExprFromString(output, pos.file);
+ } catch (Error & e) {
+ e.addPrefix(format("While parsing the output from ‘%1%’, at %2%\n") % program % pos);
+ throw;
+ }
+ try {
+ state.eval(parsed, v);
+ } catch (Error & e) {
+ e.addPrefix(format("While evaluating the output from ‘%1%’, at %2%\n") % program % pos);
+ throw;
+ }
+}
+
+
+/* Return a string representing the type of the expression. */
+static void prim_typeOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ string t;
+ switch (args[0]->type) {
+ case tInt: t = "int"; break;
+ case tBool: t = "bool"; break;
+ case tString: t = "string"; break;
+ case tPath: t = "path"; break;
+ case tNull: t = "null"; break;
+ case tAttrs: t = "set"; break;
+ case tList1: case tList2: case tListN: t = "list"; break;
+ case tLambda:
+ case tPrimOp:
+ case tPrimOpApp:
+ t = "lambda";
+ break;
+ case tExternal:
+ t = args[0]->external->typeOf();
+ break;
+ case tFloat: t = "float"; break;
+ default: abort();
+ }
+ mkString(v, state.symbols.create(t));
+}
+
+
+/* Determine whether the argument is the null value. */
+static void prim_isNull(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ mkBool(v, args[0]->type == tNull);
+}
+
+
+/* Determine whether the argument is a function. */
+static void prim_isFunction(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ mkBool(v, args[0]->type == tLambda);
+}
+
+
+/* Determine whether the argument is an integer. */
+static void prim_isInt(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ mkBool(v, args[0]->type == tInt);
+}
+
+/* Determine whether the argument is a float. */
+static void prim_isFloat(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ mkBool(v, args[0]->type == tFloat);
+}
+
+/* Determine whether the argument is a string. */
+static void prim_isString(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ mkBool(v, args[0]->type == tString);
+}
+
+
+/* Determine whether the argument is a Boolean. */
+static void prim_isBool(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ mkBool(v, args[0]->type == tBool);
+}
+
+
+struct CompareValues
+{
+ bool operator () (const Value * v1, const Value * v2) const
+ {
+ if (v1->type == tFloat && v2->type == tInt)
+ return v1->fpoint < v2->integer;
+ if (v1->type == tInt && v2->type == tFloat)
+ return v1->integer < v2->fpoint;
+ if (v1->type != v2->type)
+ throw EvalError(format("cannot compare %1% with %2%") % showType(*v1) % showType(*v2));
+ switch (v1->type) {
+ case tInt:
+ return v1->integer < v2->integer;
+ case tFloat:
+ return v1->fpoint < v2->fpoint;
+ case tString:
+ return strcmp(v1->string.s, v2->string.s) < 0;
+ case tPath:
+ return strcmp(v1->path, v2->path) < 0;
+ default:
+ throw EvalError(format("cannot compare %1% with %2%") % showType(*v1) % showType(*v2));
+ }
+ }
+};
+
+
+#if HAVE_BOEHMGC
+typedef list<Value *, gc_allocator<Value *> > ValueList;
+#else
+typedef list<Value *> ValueList;
+#endif
+
+
+static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ Activity act(*logger, lvlDebug, "finding dependencies");
+
+ state.forceAttrs(*args[0], pos);
+
+ /* Get the start set. */
+ Bindings::iterator startSet =
+ args[0]->attrs->find(state.symbols.create("startSet"));
+ if (startSet == args[0]->attrs->end())
+ throw EvalError(format("attribute ‘startSet’ required, at %1%") % pos);
+ state.forceList(*startSet->value, pos);
+
+ ValueList workSet;
+ for (unsigned int n = 0; n < startSet->value->listSize(); ++n)
+ workSet.push_back(startSet->value->listElems()[n]);
+
+ /* Get the operator. */
+ Bindings::iterator op =
+ args[0]->attrs->find(state.symbols.create("operator"));
+ if (op == args[0]->attrs->end())
+ throw EvalError(format("attribute ‘operator’ required, at %1%") % pos);
+ state.forceValue(*op->value);
+
+ /* Construct the closure by applying the operator to element of
+ `workSet', adding the result to `workSet', continuing until
+ no new elements are found. */
+ ValueList res;
+ // `doneKeys' doesn't need to be a GC root, because its values are
+ // reachable from res.
+ set<Value *, CompareValues> doneKeys;
+ while (!workSet.empty()) {
+ Value * e = *(workSet.begin());
+ workSet.pop_front();
+
+ state.forceAttrs(*e, pos);
+
+ Bindings::iterator key =
+ e->attrs->find(state.symbols.create("key"));
+ if (key == e->attrs->end())
+ throw EvalError(format("attribute ‘key’ required, at %1%") % pos);
+ state.forceValue(*key->value);
+
+ if (doneKeys.find(key->value) != doneKeys.end()) continue;
+ doneKeys.insert(key->value);
+ res.push_back(e);
+
+ /* Call the `operator' function with `e' as argument. */
+ Value call;
+ mkApp(call, *op->value, *e);
+ state.forceList(call, pos);
+
+ /* Add the values returned by the operator to the work set. */
+ for (unsigned int n = 0; n < call.listSize(); ++n) {
+ state.forceValue(*call.listElems()[n]);
+ workSet.push_back(call.listElems()[n]);
+ }
+ }
+
+ /* Create the result list. */
+ state.mkList(v, res.size());
+ unsigned int n = 0;
+ for (auto & i : res)
+ v.listElems()[n++] = i;
+}
+
+
+static void prim_abort(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ string s = state.coerceToString(pos, *args[0], context);
+ throw Abort(format("evaluation aborted with the following error message: ‘%1%’") % s);
+}
+
+
+static void prim_throw(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ string s = state.coerceToString(pos, *args[0], context);
+ throw ThrownError(s);
+}
+
+
+static void prim_addErrorContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ try {
+ state.forceValue(*args[1]);
+ v = *args[1];
+ } catch (Error & e) {
+ PathSet context;
+ e.addPrefix(format("%1%\n") % state.coerceToString(pos, *args[0], context));
+ throw;
+ }
+}
+
+
+/* Try evaluating the argument. Success => {success=true; value=something;},
+ * else => {success=false; value=false;} */
+static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.mkAttrs(v, 2);
+ try {
+ state.forceValue(*args[0]);
+ v.attrs->push_back(Attr(state.sValue, args[0]));
+ mkBool(*state.allocAttr(v, state.symbols.create("success")), true);
+ } catch (AssertionError & e) {
+ mkBool(*state.allocAttr(v, state.sValue), false);
+ mkBool(*state.allocAttr(v, state.symbols.create("success")), false);
+ }
+ v.attrs->sort();
+}
+
+
+/* Return an environment variable. Use with care. */
+static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ string name = state.forceStringNoCtx(*args[0], pos);
+ mkString(v, state.restricted ? "" : getEnv(name));
+}
+
+
+/* Evaluate the first argument, then return the second argument. */
+static void prim_seq(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ state.forceValue(*args[1]);
+ v = *args[1];
+}
+
+
+/* Evaluate the first argument deeply (i.e. recursing into lists and
+ attrsets), then return the second argument. */
+static void prim_deepSeq(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValueDeep(*args[0]);
+ state.forceValue(*args[1]);
+ v = *args[1];
+}
+
+
+/* Evaluate the first expression and print it on standard error. Then
+ return the second expression. Useful for debugging. */
+static void prim_trace(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ if (args[0]->type == tString)
+ printError(format("trace: %1%") % args[0]->string.s);
+ else
+ printError(format("trace: %1%") % *args[0]);
+ state.forceValue(*args[1]);
+ v = *args[1];
+}
+
+
+void prim_valueSize(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ /* We're not forcing the argument on purpose. */
+ mkInt(v, valueSize(*args[0]));
+}
+
+
+/*************************************************************
+ * Derivations
+ *************************************************************/
+
+
+/* Construct (as a unobservable side effect) a Nix derivation
+ expression that performs the derivation described by the argument
+ set. Returns the original set extended with the following
+ attributes: `outPath' containing the primary output path of the
+ derivation; `drvPath' containing the path of the Nix expression;
+ and `type' set to `derivation' to indicate that this is a
+ derivation. */
+static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ Activity act(*logger, lvlVomit, "evaluating derivation");
+
+ state.forceAttrs(*args[0], pos);
+
+ /* Figure out the name first (for stack backtraces). */
+ Bindings::iterator attr = args[0]->attrs->find(state.sName);
+ if (attr == args[0]->attrs->end())
+ throw EvalError(format("required attribute ‘name’ missing, at %1%") % pos);
+ string drvName;
+ Pos & posDrvName(*attr->pos);
+ try {
+ drvName = state.forceStringNoCtx(*attr->value, pos);
+ } catch (Error & e) {
+ e.addPrefix(format("while evaluating the derivation attribute ‘name’ at %1%:\n") % posDrvName);
+ throw;
+ }
+
+ /* Check whether attributes should be passed as a JSON file. */
+ std::ostringstream jsonBuf;
+ std::unique_ptr<JSONObject> jsonObject;
+ attr = args[0]->attrs->find(state.sStructuredAttrs);
+ if (attr != args[0]->attrs->end() && state.forceBool(*attr->value, pos))
+ jsonObject = std::make_unique<JSONObject>(jsonBuf);
+
+ /* Check whether null attributes should be ignored. */
+ bool ignoreNulls = false;
+ attr = args[0]->attrs->find(state.sIgnoreNulls);
+ if (attr != args[0]->attrs->end())
+ ignoreNulls = state.forceBool(*attr->value, pos);
+
+ /* Build the derivation expression by processing the attributes. */
+ Derivation drv;
+
+ PathSet context;
+
+ string outputHash, outputHashAlgo;
+ bool outputHashRecursive = false;
+
+ StringSet outputs;
+ outputs.insert("out");
+
+ for (auto & i : args[0]->attrs->lexicographicOrder()) {
+ if (i->name == state.sIgnoreNulls) continue;
+ string key = i->name;
+ Activity act(*logger, lvlVomit, format("processing attribute ‘%1%’") % key);
+
+ auto handleHashMode = [&](const std::string & s) {
+ if (s == "recursive") outputHashRecursive = true;
+ else if (s == "flat") outputHashRecursive = false;
+ else throw EvalError("invalid value ‘%s’ for ‘outputHashMode’ attribute, at %s", s, posDrvName);
+ };
+
+ auto handleOutputs = [&](const Strings & ss) {
+ outputs.clear();
+ for (auto & j : ss) {
+ if (outputs.find(j) != outputs.end())
+ throw EvalError(format("duplicate derivation output ‘%1%’, at %2%") % j % posDrvName);
+ /* !!! Check whether j is a valid attribute
+ name. */
+ /* Derivations cannot be named ‘drv’, because
+ then we'd have an attribute ‘drvPath’ in
+ the resulting set. */
+ if (j == "drv")
+ throw EvalError(format("invalid derivation output name ‘drv’, at %1%") % posDrvName);
+ outputs.insert(j);
+ }
+ if (outputs.empty())
+ throw EvalError(format("derivation cannot have an empty set of outputs, at %1%") % posDrvName);
+ };
+
+ try {
+
+ if (ignoreNulls) {
+ state.forceValue(*i->value);
+ if (i->value->type == tNull) continue;
+ }
+
+ /* The `args' attribute is special: it supplies the
+ command-line arguments to the builder. */
+ if (key == "args") {
+ state.forceList(*i->value, pos);
+ for (unsigned int n = 0; n < i->value->listSize(); ++n) {
+ string s = state.coerceToString(posDrvName, *i->value->listElems()[n], context, true);
+ drv.args.push_back(s);
+ }
+ }
+
+ /* All other attributes are passed to the builder through
+ the environment. */
+ else {
+
+ if (jsonObject) {
+
+ if (i->name == state.sStructuredAttrs) continue;
+
+ auto placeholder(jsonObject->placeholder(key));
+ printValueAsJSON(state, true, *i->value, placeholder, context);
+
+ if (i->name == state.sBuilder)
+ drv.builder = state.forceString(*i->value, context, posDrvName);
+ else if (i->name == state.sSystem)
+ drv.platform = state.forceStringNoCtx(*i->value, posDrvName);
+ else if (i->name == state.sName)
+ drvName = state.forceStringNoCtx(*i->value, posDrvName);
+ else if (key == "outputHash")
+ outputHash = state.forceStringNoCtx(*i->value, posDrvName);
+ else if (key == "outputHashAlgo")
+ outputHashAlgo = state.forceStringNoCtx(*i->value, posDrvName);
+ else if (key == "outputHashMode")
+ handleHashMode(state.forceStringNoCtx(*i->value, posDrvName));
+ else if (key == "outputs") {
+ /* Require ‘outputs’ to be a list of strings. */
+ state.forceList(*i->value, posDrvName);
+ Strings ss;
+ for (unsigned int n = 0; n < i->value->listSize(); ++n)
+ ss.emplace_back(state.forceStringNoCtx(*i->value->listElems()[n], posDrvName));
+ handleOutputs(ss);
+ }
+
+ } else {
+ auto s = state.coerceToString(posDrvName, *i->value, context, true);
+ drv.env.emplace(key, s);
+ if (i->name == state.sBuilder) drv.builder = s;
+ else if (i->name == state.sSystem) drv.platform = s;
+ else if (i->name == state.sName) {
+ drvName = s;
+ printMsg(lvlVomit, format("derivation name is ‘%1%’") % drvName);
+ }
+ else if (key == "outputHash") outputHash = s;
+ else if (key == "outputHashAlgo") outputHashAlgo = s;
+ else if (key == "outputHashMode") handleHashMode(s);
+ else if (key == "outputs")
+ handleOutputs(tokenizeString<Strings>(s));
+ }
+
+ }
+
+ } catch (Error & e) {
+ e.addPrefix(format("while evaluating the attribute ‘%1%’ of the derivation ‘%2%’ at %3%:\n")
+ % key % drvName % posDrvName);
+ throw;
+ }
+ }
+
+ if (jsonObject) {
+ jsonObject.reset();
+ drv.env.emplace("__json", jsonBuf.str());
+ }
+
+ /* Everything in the context of the strings in the derivation
+ attributes should be added as dependencies of the resulting
+ derivation. */
+ for (auto & path : context) {
+
+ /* Paths marked with `=' denote that the path of a derivation
+ is explicitly passed to the builder. Since that allows the
+ builder to gain access to every path in the dependency
+ graph of the derivation (including all outputs), all paths
+ in the graph must be added to this derivation's list of
+ inputs to ensure that they are available when the builder
+ runs. */
+ if (path.at(0) == '=') {
+ /* !!! This doesn't work if readOnlyMode is set. */
+ PathSet refs;
+ state.store->computeFSClosure(string(path, 1), refs);
+ for (auto & j : refs) {
+ drv.inputSrcs.insert(j);
+ if (isDerivation(j))
+ drv.inputDrvs[j] = state.store->queryDerivationOutputNames(j);
+ }
+ }
+
+ /* See prim_unsafeDiscardOutputDependency. */
+ else if (path.at(0) == '~')
+ drv.inputSrcs.insert(string(path, 1));
+
+ /* Handle derivation outputs of the form ‘!<name>!<path>’. */
+ else if (path.at(0) == '!') {
+ std::pair<string, string> ctx = decodeContext(path);
+ drv.inputDrvs[ctx.first].insert(ctx.second);
+ }
+
+ /* Handle derivation contexts returned by
+ ‘builtins.storePath’. */
+ else if (isDerivation(path))
+ drv.inputDrvs[path] = state.store->queryDerivationOutputNames(path);
+
+ /* Otherwise it's a source file. */
+ else
+ drv.inputSrcs.insert(path);
+ }
+
+ /* Do we have all required attributes? */
+ if (drv.builder == "")
+ throw EvalError(format("required attribute ‘builder’ missing, at %1%") % posDrvName);
+ if (drv.platform == "")
+ throw EvalError(format("required attribute ‘system’ missing, at %1%") % posDrvName);
+
+ /* Check whether the derivation name is valid. */
+ checkStoreName(drvName);
+ if (isDerivation(drvName))
+ throw EvalError(format("derivation names are not allowed to end in ‘%1%’, at %2%")
+ % drvExtension % posDrvName);
+
+ if (outputHash != "") {
+ /* Handle fixed-output derivations. */
+ if (outputs.size() != 1 || *(outputs.begin()) != "out")
+ throw Error(format("multiple outputs are not supported in fixed-output derivations, at %1%") % posDrvName);
+
+ HashType ht = parseHashType(outputHashAlgo);
+ if (ht == htUnknown)
+ throw EvalError(format("unknown hash algorithm ‘%1%’, at %2%") % outputHashAlgo % posDrvName);
+ Hash h = parseHash16or32(ht, outputHash);
+ outputHash = printHash(h);
+ if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo;
+
+ Path outPath = state.store->makeFixedOutputPath(outputHashRecursive, h, drvName);
+ drv.env["out"] = outPath;
+ drv.outputs["out"] = DerivationOutput(outPath, outputHashAlgo, outputHash);
+ }
+
+ else {
+ /* Construct the "masked" store derivation, which is the final
+ one except that in the list of outputs, the output paths
+ are empty, and the corresponding environment variables have
+ an empty value. This ensures that changes in the set of
+ output names do get reflected in the hash. */
+ for (auto & i : outputs) {
+ drv.env[i] = "";
+ drv.outputs[i] = DerivationOutput("", "", "");
+ }
+
+ /* Use the masked derivation expression to compute the output
+ path. */
+ Hash h = hashDerivationModulo(*state.store, drv);
+
+ for (auto & i : drv.outputs)
+ if (i.second.path == "") {
+ Path outPath = state.store->makeOutputPath(i.first, h, drvName);
+ drv.env[i.first] = outPath;
+ i.second.path = outPath;
+ }
+ }
+
+ /* Write the resulting term into the Nix store directory. */
+ Path drvPath = writeDerivation(state.store, drv, drvName, state.repair);
+
+ printMsg(lvlChatty, format("instantiated ‘%1%’ -> ‘%2%’")
+ % drvName % drvPath);
+
+ /* Optimisation, but required in read-only mode! because in that
+ case we don't actually write store derivations, so we can't
+ read them later. */
+ drvHashes[drvPath] = hashDerivationModulo(*state.store, drv);
+
+ state.mkAttrs(v, 1 + drv.outputs.size());
+ mkString(*state.allocAttr(v, state.sDrvPath), drvPath, {"=" + drvPath});
+ for (auto & i : drv.outputs) {
+ mkString(*state.allocAttr(v, state.symbols.create(i.first)),
+ i.second.path, {"!" + i.first + "!" + drvPath});
+ }
+ v.attrs->sort();
+}
+
+
+/* Return a placeholder string for the specified output that will be
+ substituted by the corresponding output path at build time. For
+ example, ‘placeholder "out"’ returns the string
+ /1rz4g4znpzjwh1xymhjpm42vipw92pr73vdgl6xs1hycac8kf2n9. At build
+ time, any occurence of this string in an derivation attribute will
+ be replaced with the concrete path in the Nix store of the output
+ ‘out’. */
+static void prim_placeholder(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ mkString(v, hashPlaceholder(state.forceStringNoCtx(*args[0], pos)));
+}
+
+
+/*************************************************************
+ * Paths
+ *************************************************************/
+
+
+/* Convert the argument to a path. !!! obsolete? */
+static void prim_toPath(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ Path path = state.coerceToPath(pos, *args[0], context);
+ mkString(v, canonPath(path), context);
+}
+
+
+/* Allow a valid store path to be used in an expression. This is
+ useful in some generated expressions such as in nix-push, which
+ generates a call to a function with an already existing store path
+ as argument. You don't want to use `toPath' here because it copies
+ the path to the Nix store, which yields a copy like
+ /nix/store/newhash-oldhash-oldname. In the past, `toPath' had
+ special case behaviour for store paths, but that created weird
+ corner cases. */
+static void prim_storePath(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context));
+ /* Resolve symlinks in ‘path’, unless ‘path’ itself is a symlink
+ directly in the store. The latter condition is necessary so
+ e.g. nix-push does the right thing. */
+ if (!state.store->isStorePath(path)) path = canonPath(path, true);
+ if (!state.store->isInStore(path))
+ throw EvalError(format("path ‘%1%’ is not in the Nix store, at %2%") % path % pos);
+ Path path2 = state.store->toStorePath(path);
+ if (!settings.readOnlyMode)
+ state.store->ensurePath(path2);
+ context.insert(path2);
+ mkString(v, path, context);
+}
+
+
+static void prim_pathExists(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ Path path = state.coerceToPath(pos, *args[0], context);
+ if (!context.empty())
+ throw EvalError(format("string ‘%1%’ cannot refer to other paths, at %2%") % path % pos);
+ try {
+ mkBool(v, pathExists(state.checkSourcePath(path)));
+ } catch (SysError & e) {
+ /* Don't give away info from errors while canonicalising
+ ‘path’ in restricted mode. */
+ mkBool(v, false);
+ } catch (RestrictedPathError & e) {
+ mkBool(v, false);
+ }
+}
+
+
+/* Return the base name of the given string, i.e., everything
+ following the last slash. */
+static void prim_baseNameOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ mkString(v, baseNameOf(state.coerceToString(pos, *args[0], context, false, false)), context);
+}
+
+
+/* Return the directory of the given path, i.e., everything before the
+ last slash. Return either a path or a string depending on the type
+ of the argument. */
+static void prim_dirOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ Path dir = dirOf(state.coerceToPath(pos, *args[0], context));
+ if (args[0]->type == tPath) mkPath(v, dir.c_str()); else mkString(v, dir, context);
+}
+
+
+/* Return the contents of a file as a string. */
+static void prim_readFile(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ Path path = state.coerceToPath(pos, *args[0], context);
+ try {
+ state.realiseContext(context);
+ } catch (InvalidPathError & e) {
+ throw EvalError(format("cannot read ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+ % path % e.path % pos);
+ }
+ string s = readFile(state.checkSourcePath(path));
+ if (s.find((char) 0) != string::npos)
+ throw Error(format("the contents of the file ‘%1%’ cannot be represented as a Nix string") % path);
+ mkString(v, s.c_str());
+}
+
+
+/* Find a file in the Nix search path. Used to implement <x> paths,
+ which are desugared to ‘findFile __nixPath "x"’. */
+static void prim_findFile(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceList(*args[0], pos);
+
+ SearchPath searchPath;
+
+ for (unsigned int n = 0; n < args[0]->listSize(); ++n) {
+ Value & v2(*args[0]->listElems()[n]);
+ state.forceAttrs(v2, pos);
+
+ string prefix;
+ Bindings::iterator i = v2.attrs->find(state.symbols.create("prefix"));
+ if (i != v2.attrs->end())
+ prefix = state.forceStringNoCtx(*i->value, pos);
+
+ i = v2.attrs->find(state.symbols.create("path"));
+ if (i == v2.attrs->end())
+ throw EvalError(format("attribute ‘path’ missing, at %1%") % pos);
+
+ PathSet context;
+ string path = state.coerceToString(pos, *i->value, context, false, false);
+
+ try {
+ state.realiseContext(context);
+ } catch (InvalidPathError & e) {
+ throw EvalError(format("cannot find ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+ % path % e.path % pos);
+ }
+
+ searchPath.emplace_back(prefix, path);
+ }
+
+ string path = state.forceStringNoCtx(*args[1], pos);
+
+ mkPath(v, state.checkSourcePath(state.findFile(searchPath, path, pos)).c_str());
+}
+
+/* Read a directory (without . or ..) */
+static void prim_readDir(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet ctx;
+ Path path = state.coerceToPath(pos, *args[0], ctx);
+ try {
+ state.realiseContext(ctx);
+ } catch (InvalidPathError & e) {
+ throw EvalError(format("cannot read ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+ % path % e.path % pos);
+ }
+
+ DirEntries entries = readDirectory(state.checkSourcePath(path));
+ state.mkAttrs(v, entries.size());
+
+ for (auto & ent : entries) {
+ Value * ent_val = state.allocAttr(v, state.symbols.create(ent.name));
+ if (ent.type == DT_UNKNOWN)
+ ent.type = getFileType(path + "/" + ent.name);
+ mkStringNoCopy(*ent_val,
+ ent.type == DT_REG ? "regular" :
+ ent.type == DT_DIR ? "directory" :
+ ent.type == DT_LNK ? "symlink" :
+ "unknown");
+ }
+
+ v.attrs->sort();
+}
+
+
+/*************************************************************
+ * Creating files
+ *************************************************************/
+
+
+/* Convert the argument (which can be any Nix expression) to an XML
+ representation returned in a string. Not all Nix expressions can
+ be sensibly or completely represented (e.g., functions). */
+static void prim_toXML(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ std::ostringstream out;
+ PathSet context;
+ printValueAsXML(state, true, false, *args[0], out, context);
+ mkString(v, out.str(), context);
+}
+
+
+/* Convert the argument (which can be any Nix expression) to a JSON
+ string. Not all Nix expressions can be sensibly or completely
+ represented (e.g., functions). */
+static void prim_toJSON(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ std::ostringstream out;
+ PathSet context;
+ printValueAsJSON(state, true, *args[0], out, context);
+ mkString(v, out.str(), context);
+}
+
+
+/* Parse a JSON string to a value. */
+static void prim_fromJSON(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ string s = state.forceStringNoCtx(*args[0], pos);
+ parseJSON(state, s, v);
+}
+
+
+/* Store a string in the Nix store as a source file that can be used
+ as an input by derivations. */
+static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ string name = state.forceStringNoCtx(*args[0], pos);
+ string contents = state.forceString(*args[1], context, pos);
+
+ PathSet refs;
+
+ for (auto path : context) {
+ if (path.at(0) == '=') path = string(path, 1);
+ if (isDerivation(path)) {
+ /* See prim_unsafeDiscardOutputDependency. */
+ if (path.at(0) != '~')
+ throw EvalError(format("in ‘toFile’: the file ‘%1%’ cannot refer to derivation outputs, at %2%") % name % pos);
+ path = string(path, 1);
+ }
+ refs.insert(path);
+ }
+
+ Path storePath = settings.readOnlyMode
+ ? state.store->computeStorePathForText(name, contents, refs)
+ : state.store->addTextToStore(name, contents, refs, state.repair);
+
+ /* Note: we don't need to add `context' to the context of the
+ result, since `storePath' itself has references to the paths
+ used in args[1]. */
+
+ mkString(v, storePath, {storePath});
+}
+
+
+struct FilterFromExpr : PathFilter
+{
+ EvalState & state;
+ Value & filter;
+ Pos pos;
+
+ FilterFromExpr(EvalState & state, Value & filter, const Pos & pos)
+ : state(state), filter(filter), pos(pos)
+ {
+ }
+
+ bool operator () (const Path & path)
+ {
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ /* Call the filter function. The first argument is the path,
+ the second is a string indicating the type of the file. */
+ Value arg1;
+ mkString(arg1, path);
+
+ Value fun2;
+ state.callFunction(filter, arg1, fun2, noPos);
+
+ Value arg2;
+ mkString(arg2,
+ S_ISREG(st.st_mode) ? "regular" :
+ S_ISDIR(st.st_mode) ? "directory" :
+ S_ISLNK(st.st_mode) ? "symlink" :
+ "unknown" /* not supported, will fail! */);
+
+ Value res;
+ state.callFunction(fun2, arg2, res, noPos);
+
+ return state.forceBool(res, pos);
+ }
+};
+
+
+static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ Path path = state.coerceToPath(pos, *args[1], context);
+ if (!context.empty())
+ throw EvalError(format("string ‘%1%’ cannot refer to other paths, at %2%") % path % pos);
+
+ state.forceValue(*args[0]);
+ if (args[0]->type != tLambda)
+ throw TypeError(format("first argument in call to ‘filterSource’ is not a function but %1%, at %2%") % showType(*args[0]) % pos);
+
+ FilterFromExpr filter(state, *args[0], pos);
+
+ path = state.checkSourcePath(path);
+
+ Path dstPath = settings.readOnlyMode
+ ? state.store->computeStorePathForPath(path, true, htSHA256, filter).first
+ : state.store->addToStore(baseNameOf(path), path, true, htSHA256, filter, state.repair);
+
+ mkString(v, dstPath, {dstPath});
+}
+
+
+/*************************************************************
+ * Sets
+ *************************************************************/
+
+
+/* Return the names of the attributes in a set as a sorted list of
+ strings. */
+static void prim_attrNames(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceAttrs(*args[0], pos);
+
+ state.mkList(v, args[0]->attrs->size());
+
+ size_t n = 0;
+ for (auto & i : args[0]->attrs->lexicographicOrder())
+ mkString(*(v.listElems()[n++] = state.allocValue()), i->name);
+}
+
+
+/* Return the values of the attributes in a set as a list, in the same
+ order as attrNames. */
+static void prim_attrValues(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceAttrs(*args[0], pos);
+
+ state.mkList(v, args[0]->attrs->size());
+
+ unsigned int n = 0;
+ for (auto & i : *args[0]->attrs)
+ v.listElems()[n++] = (Value *) &i;
+
+ std::sort(v.listElems(), v.listElems() + n,
+ [](Value * v1, Value * v2) { return (string) ((Attr *) v1)->name < (string) ((Attr *) v2)->name; });
+
+ for (unsigned int i = 0; i < n; ++i)
+ v.listElems()[i] = ((Attr *) v.listElems()[i])->value;
+}
+
+
+/* Dynamic version of the `.' operator. */
+void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ string attr = state.forceStringNoCtx(*args[0], pos);
+ state.forceAttrs(*args[1], pos);
+ // !!! Should we create a symbol here or just do a lookup?
+ Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr));
+ if (i == args[1]->attrs->end())
+ throw EvalError(format("attribute ‘%1%’ missing, at %2%") % attr % pos);
+ // !!! add to stack trace?
+ if (state.countCalls && i->pos) state.attrSelects[*i->pos]++;
+ state.forceValue(*i->value);
+ v = *i->value;
+}
+
+
+/* Return position information of the specified attribute. */
+void prim_unsafeGetAttrPos(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ string attr = state.forceStringNoCtx(*args[0], pos);
+ state.forceAttrs(*args[1], pos);
+ Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr));
+ if (i == args[1]->attrs->end())
+ mkNull(v);
+ else
+ state.mkPos(v, i->pos);
+}
+
+
+/* Dynamic version of the `?' operator. */
+static void prim_hasAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ string attr = state.forceStringNoCtx(*args[0], pos);
+ state.forceAttrs(*args[1], pos);
+ mkBool(v, args[1]->attrs->find(state.symbols.create(attr)) != args[1]->attrs->end());
+}
+
+
+/* Determine whether the argument is a set. */
+static void prim_isAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ mkBool(v, args[0]->type == tAttrs);
+}
+
+
+static void prim_removeAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceAttrs(*args[0], pos);
+ state.forceList(*args[1], pos);
+
+ /* Get the attribute names to be removed. */
+ std::set<Symbol> names;
+ for (unsigned int i = 0; i < args[1]->listSize(); ++i) {
+ state.forceStringNoCtx(*args[1]->listElems()[i], pos);
+ names.insert(state.symbols.create(args[1]->listElems()[i]->string.s));
+ }
+
+ /* Copy all attributes not in that set. Note that we don't need
+ to sort v.attrs because it's a subset of an already sorted
+ vector. */
+ state.mkAttrs(v, args[0]->attrs->size());
+ for (auto & i : *args[0]->attrs) {
+ if (names.find(i.name) == names.end())
+ v.attrs->push_back(i);
+ }
+}
+
+
+/* Builds a set from a list specifying (name, value) pairs. To be
+ precise, a list [{name = "name1"; value = value1;} ... {name =
+ "nameN"; value = valueN;}] is transformed to {name1 = value1;
+ ... nameN = valueN;}. In case of duplicate occurences of the same
+ name, the first takes precedence. */
+static void prim_listToAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceList(*args[0], pos);
+
+ state.mkAttrs(v, args[0]->listSize());
+
+ std::set<Symbol> seen;
+
+ for (unsigned int i = 0; i < args[0]->listSize(); ++i) {
+ Value & v2(*args[0]->listElems()[i]);
+ state.forceAttrs(v2, pos);
+
+ Bindings::iterator j = v2.attrs->find(state.sName);
+ if (j == v2.attrs->end())
+ throw TypeError(format("‘name’ attribute missing in a call to ‘listToAttrs’, at %1%") % pos);
+ string name = state.forceStringNoCtx(*j->value, pos);
+
+ Symbol sym = state.symbols.create(name);
+ if (seen.find(sym) == seen.end()) {
+ Bindings::iterator j2 = v2.attrs->find(state.symbols.create(state.sValue));
+ if (j2 == v2.attrs->end())
+ throw TypeError(format("‘value’ attribute missing in a call to ‘listToAttrs’, at %1%") % pos);
+
+ v.attrs->push_back(Attr(sym, j2->value, j2->pos));
+ seen.insert(sym);
+ }
+ }
+
+ v.attrs->sort();
+}
+
+
+/* Return the right-biased intersection of two sets as1 and as2,
+ i.e. a set that contains every attribute from as2 that is also a
+ member of as1. */
+static void prim_intersectAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceAttrs(*args[0], pos);
+ state.forceAttrs(*args[1], pos);
+
+ state.mkAttrs(v, std::min(args[0]->attrs->size(), args[1]->attrs->size()));
+
+ for (auto & i : *args[0]->attrs) {
+ Bindings::iterator j = args[1]->attrs->find(i.name);
+ if (j != args[1]->attrs->end())
+ v.attrs->push_back(*j);
+ }
+}
+
+
+/* Collect each attribute named `attr' from a list of attribute sets.
+ Sets that don't contain the named attribute are ignored.
+
+ Example:
+ catAttrs "a" [{a = 1;} {b = 0;} {a = 2;}]
+ => [1 2]
+*/
+static void prim_catAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ Symbol attrName = state.symbols.create(state.forceStringNoCtx(*args[0], pos));
+ state.forceList(*args[1], pos);
+
+ Value * res[args[1]->listSize()];
+ unsigned int found = 0;
+
+ for (unsigned int n = 0; n < args[1]->listSize(); ++n) {
+ Value & v2(*args[1]->listElems()[n]);
+ state.forceAttrs(v2, pos);
+ Bindings::iterator i = v2.attrs->find(attrName);
+ if (i != v2.attrs->end())
+ res[found++] = i->value;
+ }
+
+ state.mkList(v, found);
+ for (unsigned int n = 0; n < found; ++n)
+ v.listElems()[n] = res[n];
+}
+
+
+/* Return a set containing the names of the formal arguments expected
+ by the function `f'. The value of each attribute is a Boolean
+ denoting whether the corresponding argument has a default value. For instance,
+
+ functionArgs ({ x, y ? 123}: ...)
+ => { x = false; y = true; }
+
+ "Formal argument" here refers to the attributes pattern-matched by
+ the function. Plain lambdas are not included, e.g.
+
+ functionArgs (x: ...)
+ => { }
+*/
+static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ if (args[0]->type != tLambda)
+ throw TypeError(format("‘functionArgs’ requires a function, at %1%") % pos);
+
+ if (!args[0]->lambda.fun->matchAttrs) {
+ state.mkAttrs(v, 0);
+ return;
+ }
+
+ state.mkAttrs(v, args[0]->lambda.fun->formals->formals.size());
+ for (auto & i : args[0]->lambda.fun->formals->formals)
+ // !!! should optimise booleans (allocate only once)
+ mkBool(*state.allocAttr(v, i.name), i.def);
+ v.attrs->sort();
+}
+
+
+/*************************************************************
+ * Lists
+ *************************************************************/
+
+
+/* Determine whether the argument is a list. */
+static void prim_isList(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ mkBool(v, args[0]->isList());
+}
+
+
+static void elemAt(EvalState & state, const Pos & pos, Value & list, int n, Value & v)
+{
+ state.forceList(list, pos);
+ if (n < 0 || (unsigned int) n >= list.listSize())
+ throw Error(format("list index %1% is out of bounds, at %2%") % n % pos);
+ state.forceValue(*list.listElems()[n]);
+ v = *list.listElems()[n];
+}
+
+
+/* Return the n-1'th element of a list. */
+static void prim_elemAt(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ elemAt(state, pos, *args[0], state.forceInt(*args[1], pos), v);
+}
+
+
+/* Return the first element of a list. */
+static void prim_head(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ elemAt(state, pos, *args[0], 0, v);
+}
+
+
+/* Return a list consisting of everything but the first element of
+ a list. Warning: this function takes O(n) time, so you probably
+ don't want to use it! */
+static void prim_tail(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceList(*args[0], pos);
+ if (args[0]->listSize() == 0)
+ throw Error(format("‘tail’ called on an empty list, at %1%") % pos);
+ state.mkList(v, args[0]->listSize() - 1);
+ for (unsigned int n = 0; n < v.listSize(); ++n)
+ v.listElems()[n] = args[0]->listElems()[n + 1];
+}
+
+
+/* Apply a function to every element of a list. */
+static void prim_map(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceFunction(*args[0], pos);
+ state.forceList(*args[1], pos);
+
+ state.mkList(v, args[1]->listSize());
+
+ for (unsigned int n = 0; n < v.listSize(); ++n)
+ mkApp(*(v.listElems()[n] = state.allocValue()),
+ *args[0], *args[1]->listElems()[n]);
+}
+
+
+/* Filter a list using a predicate; that is, return a list containing
+ every element from the list for which the predicate function
+ returns true. */
+static void prim_filter(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceFunction(*args[0], pos);
+ state.forceList(*args[1], pos);
+
+ // FIXME: putting this on the stack is risky.
+ Value * vs[args[1]->listSize()];
+ unsigned int k = 0;
+
+ bool same = true;
+ for (unsigned int n = 0; n < args[1]->listSize(); ++n) {
+ Value res;
+ state.callFunction(*args[0], *args[1]->listElems()[n], res, noPos);
+ if (state.forceBool(res, pos))
+ vs[k++] = args[1]->listElems()[n];
+ else
+ same = false;
+ }
+
+ if (same)
+ v = *args[1];
+ else {
+ state.mkList(v, k);
+ for (unsigned int n = 0; n < k; ++n) v.listElems()[n] = vs[n];
+ }
+}
+
+
+/* Return true if a list contains a given element. */
+static void prim_elem(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ bool res = false;
+ state.forceList(*args[1], pos);
+ for (unsigned int n = 0; n < args[1]->listSize(); ++n)
+ if (state.eqValues(*args[0], *args[1]->listElems()[n])) {
+ res = true;
+ break;
+ }
+ mkBool(v, res);
+}
+
+
+/* Concatenate a list of lists. */
+static void prim_concatLists(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceList(*args[0], pos);
+ state.concatLists(v, args[0]->listSize(), args[0]->listElems(), pos);
+}
+
+
+/* Return the length of a list. This is an O(1) time operation. */
+static void prim_length(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceList(*args[0], pos);
+ mkInt(v, args[0]->listSize());
+}
+
+
+/* Reduce a list by applying a binary operator, from left to
+ right. The operator is applied strictly. */
+static void prim_foldlStrict(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceFunction(*args[0], pos);
+ state.forceList(*args[2], pos);
+
+ Value * vCur = args[1];
+
+ if (args[2]->listSize())
+ for (unsigned int n = 0; n < args[2]->listSize(); ++n) {
+ Value vTmp;
+ state.callFunction(*args[0], *vCur, vTmp, pos);
+ vCur = n == args[2]->listSize() - 1 ? &v : state.allocValue();
+ state.callFunction(vTmp, *args[2]->listElems()[n], *vCur, pos);
+ }
+ else
+ v = *vCur;
+
+ state.forceValue(v);
+}
+
+
+static void anyOrAll(bool any, EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceFunction(*args[0], pos);
+ state.forceList(*args[1], pos);
+
+ Value vTmp;
+ for (unsigned int n = 0; n < args[1]->listSize(); ++n) {
+ state.callFunction(*args[0], *args[1]->listElems()[n], vTmp, pos);
+ bool res = state.forceBool(vTmp, pos);
+ if (res == any) {
+ mkBool(v, any);
+ return;
+ }
+ }
+
+ mkBool(v, !any);
+}
+
+
+static void prim_any(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ anyOrAll(true, state, pos, args, v);
+}
+
+
+static void prim_all(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ anyOrAll(false, state, pos, args, v);
+}
+
+
+static void prim_genList(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceFunction(*args[0], pos);
+ auto len = state.forceInt(*args[1], pos);
+
+ if (len < 0)
+ throw EvalError(format("cannot create list of size %1%, at %2%") % len % pos);
+
+ state.mkList(v, len);
+
+ for (unsigned int n = 0; n < (unsigned int) len; ++n) {
+ Value * arg = state.allocValue();
+ mkInt(*arg, n);
+ mkApp(*(v.listElems()[n] = state.allocValue()), *args[0], *arg);
+ }
+}
+
+
+static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v);
+
+
+static void prim_sort(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceFunction(*args[0], pos);
+ state.forceList(*args[1], pos);
+
+ auto len = args[1]->listSize();
+ state.mkList(v, len);
+ for (unsigned int n = 0; n < len; ++n) {
+ state.forceValue(*args[1]->listElems()[n]);
+ v.listElems()[n] = args[1]->listElems()[n];
+ }
+
+
+ auto comparator = [&](Value * a, Value * b) {
+ /* Optimization: if the comparator is lessThan, bypass
+ callFunction. */
+ if (args[0]->type == tPrimOp && args[0]->primOp->fun == prim_lessThan)
+ return CompareValues()(a, b);
+
+ Value vTmp1, vTmp2;
+ state.callFunction(*args[0], *a, vTmp1, pos);
+ state.callFunction(vTmp1, *b, vTmp2, pos);
+ return state.forceBool(vTmp2, pos);
+ };
+
+ /* FIXME: std::sort can segfault if the comparator is not a strict
+ weak ordering. What to do? std::stable_sort() seems more
+ resilient, but no guarantees... */
+ std::stable_sort(v.listElems(), v.listElems() + len, comparator);
+}
+
+
+static void prim_partition(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceFunction(*args[0], pos);
+ state.forceList(*args[1], pos);
+
+ auto len = args[1]->listSize();
+
+ ValueVector right, wrong;
+
+ for (unsigned int n = 0; n < len; ++n) {
+ auto vElem = args[1]->listElems()[n];
+ state.forceValue(*vElem);
+ Value res;
+ state.callFunction(*args[0], *vElem, res, pos);
+ if (state.forceBool(res, pos))
+ right.push_back(vElem);
+ else
+ wrong.push_back(vElem);
+ }
+
+ state.mkAttrs(v, 2);
+
+ Value * vRight = state.allocAttr(v, state.sRight);
+ state.mkList(*vRight, right.size());
+ memcpy(vRight->listElems(), right.data(), sizeof(Value *) * right.size());
+
+ Value * vWrong = state.allocAttr(v, state.sWrong);
+ state.mkList(*vWrong, wrong.size());
+ memcpy(vWrong->listElems(), wrong.data(), sizeof(Value *) * wrong.size());
+
+ v.attrs->sort();
+}
+
+
+/*************************************************************
+ * Integer arithmetic
+ *************************************************************/
+
+
+static void prim_add(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ if (args[0]->type == tFloat || args[1]->type == tFloat)
+ mkFloat(v, state.forceFloat(*args[0], pos) + state.forceFloat(*args[1], pos));
+ else
+ mkInt(v, state.forceInt(*args[0], pos) + state.forceInt(*args[1], pos));
+}
+
+
+static void prim_sub(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ if (args[0]->type == tFloat || args[1]->type == tFloat)
+ mkFloat(v, state.forceFloat(*args[0], pos) - state.forceFloat(*args[1], pos));
+ else
+ mkInt(v, state.forceInt(*args[0], pos) - state.forceInt(*args[1], pos));
+}
+
+
+static void prim_mul(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ if (args[0]->type == tFloat || args[1]->type == tFloat)
+ mkFloat(v, state.forceFloat(*args[0], pos) * state.forceFloat(*args[1], pos));
+ else
+ mkInt(v, state.forceInt(*args[0], pos) * state.forceInt(*args[1], pos));
+}
+
+
+static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ NixFloat f2 = state.forceFloat(*args[1], pos);
+ if (f2 == 0) throw EvalError(format("division by zero, at %1%") % pos);
+
+ if (args[0]->type == tFloat || args[1]->type == tFloat) {
+ mkFloat(v, state.forceFloat(*args[0], pos) / state.forceFloat(*args[1], pos));
+ } else {
+ NixInt i1 = state.forceInt(*args[0], pos);
+ NixInt i2 = state.forceInt(*args[1], pos);
+ /* Avoid division overflow as it might raise SIGFPE. */
+ if (i1 == std::numeric_limits<NixInt>::min() && i2 == -1)
+ throw EvalError(format("overflow in integer division, at %1%") % pos);
+ mkInt(v, i1 / i2);
+ }
+}
+
+
+static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceValue(*args[0]);
+ state.forceValue(*args[1]);
+ CompareValues comp;
+ mkBool(v, comp(args[0], args[1]));
+}
+
+
+/*************************************************************
+ * String manipulation
+ *************************************************************/
+
+
+/* Convert the argument to a string. Paths are *not* copied to the
+ store, so `toString /foo/bar' yields `"/foo/bar"', not
+ `"/nix/store/whatever..."'. */
+static void prim_toString(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ string s = state.coerceToString(pos, *args[0], context, true, false);
+ mkString(v, s, context);
+}
+
+
+/* `substring start len str' returns the substring of `str' starting
+ at character position `min(start, stringLength str)' inclusive and
+ ending at `min(start + len, stringLength str)'. `start' must be
+ non-negative. */
+static void prim_substring(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ int start = state.forceInt(*args[0], pos);
+ int len = state.forceInt(*args[1], pos);
+ PathSet context;
+ string s = state.coerceToString(pos, *args[2], context);
+
+ if (start < 0) throw EvalError(format("negative start position in ‘substring’, at %1%") % pos);
+
+ mkString(v, (unsigned int) start >= s.size() ? "" : string(s, start, len), context);
+}
+
+
+static void prim_stringLength(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ string s = state.coerceToString(pos, *args[0], context);
+ mkInt(v, s.size());
+}
+
+
+static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ string s = state.coerceToString(pos, *args[0], context);
+ mkString(v, s, PathSet());
+}
+
+
+/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a
+ builder without causing the derivation to be built (for instance,
+ in the derivation that builds NARs in nix-push, when doing
+ source-only deployment). This primop marks the string context so
+ that builtins.derivation adds the path to drv.inputSrcs rather than
+ drv.inputDrvs. */
+static void prim_unsafeDiscardOutputDependency(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+ string s = state.coerceToString(pos, *args[0], context);
+
+ PathSet context2;
+ for (auto & p : context)
+ context2.insert(p.at(0) == '=' ? "~" + string(p, 1) : p);
+
+ mkString(v, s, context2);
+}
+
+
+/* Return the cryptographic hash of a string in base-16. */
+static void prim_hashString(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ string type = state.forceStringNoCtx(*args[0], pos);
+ HashType ht = parseHashType(type);
+ if (ht == htUnknown)
+ throw Error(format("unknown hash type ‘%1%’, at %2%") % type % pos);
+
+ PathSet context; // discarded
+ string s = state.forceString(*args[1], context, pos);
+
+ mkString(v, printHash(hashString(ht, s)), context);
+}
+
+
+/* Match a regular expression against a string and return either
+ ‘null’ or a list containing substring matches. */
+static void prim_match(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ std::regex regex(state.forceStringNoCtx(*args[0], pos), std::regex::extended);
+
+ PathSet context;
+ const std::string str = state.forceString(*args[1], context, pos);
+
+
+ std::smatch match;
+ if (!std::regex_match(str, match, regex)) {
+ mkNull(v);
+ return;
+ }
+
+ // the first match is the whole string
+ const size_t len = match.size() - 1;
+ state.mkList(v, len);
+ for (size_t i = 0; i < len; ++i) {
+ if (!match[i+1].matched)
+ mkNull(*(v.listElems()[i] = state.allocValue()));
+ else
+ mkString(*(v.listElems()[i] = state.allocValue()), match[i + 1].str().c_str());
+ }
+}
+
+
+static void prim_concatStringSep(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ PathSet context;
+
+ auto sep = state.forceString(*args[0], context, pos);
+ state.forceList(*args[1], pos);
+
+ string res;
+ res.reserve((args[1]->listSize() + 32) * sep.size());
+ bool first = true;
+
+ for (unsigned int n = 0; n < args[1]->listSize(); ++n) {
+ if (first) first = false; else res += sep;
+ res += state.coerceToString(pos, *args[1]->listElems()[n], context);
+ }
+
+ mkString(v, res, context);
+}
+
+
+static void prim_replaceStrings(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceList(*args[0], pos);
+ state.forceList(*args[1], pos);
+ if (args[0]->listSize() != args[1]->listSize())
+ throw EvalError(format("‘from’ and ‘to’ arguments to ‘replaceStrings’ have different lengths, at %1%") % pos);
+
+ vector<string> from;
+ from.reserve(args[0]->listSize());
+ for (unsigned int n = 0; n < args[0]->listSize(); ++n)
+ from.push_back(state.forceString(*args[0]->listElems()[n], pos));
+
+ vector<std::pair<string, PathSet>> to;
+ to.reserve(args[1]->listSize());
+ for (unsigned int n = 0; n < args[1]->listSize(); ++n) {
+ PathSet ctx;
+ auto s = state.forceString(*args[1]->listElems()[n], ctx, pos);
+ to.push_back(std::make_pair(std::move(s), std::move(ctx)));
+ }
+
+ PathSet context;
+ auto s = state.forceString(*args[2], context, pos);
+
+ string res;
+ for (size_t p = 0; p < s.size(); ) {
+ bool found = false;
+ auto i = from.begin();
+ auto j = to.begin();
+ for (; i != from.end(); ++i, ++j)
+ if (s.compare(p, i->size(), *i) == 0) {
+ found = true;
+ p += i->size();
+ res += j->first;
+ for (auto& path : j->second)
+ context.insert(path);
+ j->second.clear();
+ break;
+ }
+ if (!found) res += s[p++];
+ }
+
+ mkString(v, res, context);
+}
+
+
+/*************************************************************
+ * Versions
+ *************************************************************/
+
+
+static void prim_parseDrvName(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ string name = state.forceStringNoCtx(*args[0], pos);
+ DrvName parsed(name);
+ state.mkAttrs(v, 2);
+ mkString(*state.allocAttr(v, state.sName), parsed.name);
+ mkString(*state.allocAttr(v, state.symbols.create("version")), parsed.version);
+ v.attrs->sort();
+}
+
+
+static void prim_compareVersions(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ string version1 = state.forceStringNoCtx(*args[0], pos);
+ string version2 = state.forceStringNoCtx(*args[1], pos);
+ mkInt(v, compareVersions(version1, version2));
+}
+
+
+/*************************************************************
+ * Networking
+ *************************************************************/
+
+
+void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
+ const string & who, bool unpack)
+{
+ string url;
+ Hash expectedHash;
+ string name;
+
+ state.forceValue(*args[0]);
+
+ if (args[0]->type == tAttrs) {
+
+ state.forceAttrs(*args[0], pos);
+
+ for (auto & attr : *args[0]->attrs) {
+ string n(attr.name);
+ if (n == "url")
+ url = state.forceStringNoCtx(*attr.value, *attr.pos);
+ else if (n == "sha256")
+ expectedHash = parseHash16or32(htSHA256, state.forceStringNoCtx(*attr.value, *attr.pos));
+ else if (n == "name")
+ name = state.forceStringNoCtx(*attr.value, *attr.pos);
+ else
+ throw EvalError(format("unsupported argument ‘%1%’ to ‘%2%’, at %3%") % attr.name % who % attr.pos);
+ }
+
+ if (url.empty())
+ throw EvalError(format("‘url’ argument required, at %1%") % pos);
+
+ } else
+ url = state.forceStringNoCtx(*args[0], pos);
+
+ if (state.restricted && !expectedHash)
+ throw Error(format("‘%1%’ is not allowed in restricted mode") % who);
+
+ Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash);
+ mkString(v, res, PathSet({res}));
+}
+
+
+static void prim_fetchurl(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ fetch(state, pos, args, v, "fetchurl", false);
+}
+
+
+static void prim_fetchTarball(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ fetch(state, pos, args, v, "fetchTarball", true);
+}
+
+
+/*************************************************************
+ * Primop registration
+ *************************************************************/
+
+
+RegisterPrimOp::PrimOps * RegisterPrimOp::primOps;
+
+
+RegisterPrimOp::RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun)
+{
+ if (!primOps) primOps = new PrimOps;
+ primOps->emplace_back(name, arity, fun);
+}
+
+
+void EvalState::createBaseEnv()
+{
+ baseEnv.up = 0;
+
+ /* Add global constants such as `true' to the base environment. */
+ Value v;
+
+ /* `builtins' must be first! */
+ mkAttrs(v, 128);
+ addConstant("builtins", v);
+
+ mkBool(v, true);
+ addConstant("true", v);
+
+ mkBool(v, false);
+ addConstant("false", v);
+
+ mkNull(v);
+ addConstant("null", v);
+
+ mkInt(v, time(0));
+ addConstant("__currentTime", v);
+
+ mkString(v, settings.thisSystem);
+ addConstant("__currentSystem", v);
+
+ mkString(v, nixVersion);
+ addConstant("__nixVersion", v);
+
+ mkString(v, store->storeDir);
+ addConstant("__storeDir", v);
+
+ /* Language version. This should be increased every time a new
+ language feature gets added. It's not necessary to increase it
+ when primops get added, because you can just use `builtins ?
+ primOp' to check. */
+ mkInt(v, 4);
+ addConstant("__langVersion", v);
+
+ // Miscellaneous
+ addPrimOp("scopedImport", 2, prim_scopedImport);
+ Value * v2 = allocValue();
+ mkAttrs(*v2, 0);
+ mkApp(v, *baseEnv.values[baseEnvDispl - 1], *v2);
+ forceValue(v);
+ addConstant("import", v);
+ if (settings.enableNativeCode) {
+ addPrimOp("__importNative", 2, prim_importNative);
+ addPrimOp("__exec", 1, prim_exec);
+ }
+ addPrimOp("__typeOf", 1, prim_typeOf);
+ addPrimOp("isNull", 1, prim_isNull);
+ addPrimOp("__isFunction", 1, prim_isFunction);
+ addPrimOp("__isString", 1, prim_isString);
+ addPrimOp("__isInt", 1, prim_isInt);
+ addPrimOp("__isFloat", 1, prim_isFloat);
+ addPrimOp("__isBool", 1, prim_isBool);
+ addPrimOp("__genericClosure", 1, prim_genericClosure);
+ addPrimOp("abort", 1, prim_abort);
+ addPrimOp("throw", 1, prim_throw);
+ addPrimOp("__addErrorContext", 2, prim_addErrorContext);
+ addPrimOp("__tryEval", 1, prim_tryEval);
+ addPrimOp("__getEnv", 1, prim_getEnv);
+
+ // Strictness
+ addPrimOp("__seq", 2, prim_seq);
+ addPrimOp("__deepSeq", 2, prim_deepSeq);
+
+ // Debugging
+ addPrimOp("__trace", 2, prim_trace);
+ addPrimOp("__valueSize", 1, prim_valueSize);
+
+ // Paths
+ addPrimOp("__toPath", 1, prim_toPath);
+ addPrimOp("__storePath", 1, prim_storePath);
+ addPrimOp("__pathExists", 1, prim_pathExists);
+ addPrimOp("baseNameOf", 1, prim_baseNameOf);
+ addPrimOp("dirOf", 1, prim_dirOf);
+ addPrimOp("__readFile", 1, prim_readFile);
+ addPrimOp("__readDir", 1, prim_readDir);
+ addPrimOp("__findFile", 2, prim_findFile);
+
+ // Creating files
+ addPrimOp("__toXML", 1, prim_toXML);
+ addPrimOp("__toJSON", 1, prim_toJSON);
+ addPrimOp("__fromJSON", 1, prim_fromJSON);
+ addPrimOp("__toFile", 2, prim_toFile);
+ addPrimOp("__filterSource", 2, prim_filterSource);
+
+ // Sets
+ addPrimOp("__attrNames", 1, prim_attrNames);
+ addPrimOp("__attrValues", 1, prim_attrValues);
+ addPrimOp("__getAttr", 2, prim_getAttr);
+ addPrimOp("__unsafeGetAttrPos", 2, prim_unsafeGetAttrPos);
+ addPrimOp("__hasAttr", 2, prim_hasAttr);
+ addPrimOp("__isAttrs", 1, prim_isAttrs);
+ addPrimOp("removeAttrs", 2, prim_removeAttrs);
+ addPrimOp("__listToAttrs", 1, prim_listToAttrs);
+ addPrimOp("__intersectAttrs", 2, prim_intersectAttrs);
+ addPrimOp("__catAttrs", 2, prim_catAttrs);
+ addPrimOp("__functionArgs", 1, prim_functionArgs);
+
+ // Lists
+ addPrimOp("__isList", 1, prim_isList);
+ addPrimOp("__elemAt", 2, prim_elemAt);
+ addPrimOp("__head", 1, prim_head);
+ addPrimOp("__tail", 1, prim_tail);
+ addPrimOp("map", 2, prim_map);
+ addPrimOp("__filter", 2, prim_filter);
+ addPrimOp("__elem", 2, prim_elem);
+ addPrimOp("__concatLists", 1, prim_concatLists);
+ addPrimOp("__length", 1, prim_length);
+ addPrimOp("__foldl'", 3, prim_foldlStrict);
+ addPrimOp("__any", 2, prim_any);
+ addPrimOp("__all", 2, prim_all);
+ addPrimOp("__genList", 2, prim_genList);
+ addPrimOp("__sort", 2, prim_sort);
+ addPrimOp("__partition", 2, prim_partition);
+
+ // Integer arithmetic
+ addPrimOp("__add", 2, prim_add);
+ addPrimOp("__sub", 2, prim_sub);
+ addPrimOp("__mul", 2, prim_mul);
+ addPrimOp("__div", 2, prim_div);
+ addPrimOp("__lessThan", 2, prim_lessThan);
+
+ // String manipulation
+ addPrimOp("toString", 1, prim_toString);
+ addPrimOp("__substring", 3, prim_substring);
+ addPrimOp("__stringLength", 1, prim_stringLength);
+ addPrimOp("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext);
+ addPrimOp("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency);
+ addPrimOp("__hashString", 2, prim_hashString);
+ addPrimOp("__match", 2, prim_match);
+ addPrimOp("__concatStringsSep", 2, prim_concatStringSep);
+ addPrimOp("__replaceStrings", 3, prim_replaceStrings);
+
+ // Versions
+ addPrimOp("__parseDrvName", 1, prim_parseDrvName);
+ addPrimOp("__compareVersions", 2, prim_compareVersions);
+
+ // Derivations
+ addPrimOp("derivationStrict", 1, prim_derivationStrict);
+ addPrimOp("placeholder", 1, prim_placeholder);
+
+ // Networking
+ addPrimOp("__fetchurl", 1, prim_fetchurl);
+ addPrimOp("fetchTarball", 1, prim_fetchTarball);
+
+ /* Add a wrapper around the derivation primop that computes the
+ `drvPath' and `outPath' attributes lazily. */
+ string path = settings.nixDataDir + "/nix/corepkgs/derivation.nix";
+ sDerivationNix = symbols.create(path);
+ evalFile(path, v);
+ addConstant("derivation", v);
+
+ /* Add a value containing the current Nix expression search path. */
+ mkList(v, searchPath.size());
+ int n = 0;
+ for (auto & i : searchPath) {
+ v2 = v.listElems()[n++] = allocValue();
+ mkAttrs(*v2, 2);
+ mkString(*allocAttr(*v2, symbols.create("path")), i.second);
+ mkString(*allocAttr(*v2, symbols.create("prefix")), i.first);
+ v2->attrs->sort();
+ }
+ addConstant("__nixPath", v);
+
+ if (RegisterPrimOp::primOps)
+ for (auto & primOp : *RegisterPrimOp::primOps)
+ addPrimOp(std::get<0>(primOp), std::get<1>(primOp), std::get<2>(primOp));
+
+ /* Now that we've added all primops, sort the `builtins' set,
+ because attribute lookups expect it to be sorted. */
+ baseEnv.values[0]->attrs->sort();
+}
+
+
+}
diff --git a/src/libexpr/primops.hh b/src/libexpr/primops.hh
new file mode 100644
index 000000000..39d23b04a
--- /dev/null
+++ b/src/libexpr/primops.hh
@@ -0,0 +1,15 @@
+#include "eval.hh"
+
+#include <tuple>
+#include <vector>
+
+namespace nix {
+
+struct RegisterPrimOp
+{
+ typedef std::vector<std::tuple<std::string, size_t, PrimOpFun>> PrimOps;
+ static PrimOps * primOps;
+ RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun);
+};
+
+}
diff --git a/src/libexpr/primops/fetchgit.cc b/src/libexpr/primops/fetchgit.cc
new file mode 100644
index 000000000..09e2c077b
--- /dev/null
+++ b/src/libexpr/primops/fetchgit.cc
@@ -0,0 +1,84 @@
+#include "primops.hh"
+#include "eval-inline.hh"
+#include "download.hh"
+#include "store-api.hh"
+
+namespace nix {
+
+Path exportGit(ref<Store> store, const std::string & uri, const std::string & rev)
+{
+ if (!isUri(uri))
+ throw EvalError(format("‘%s’ is not a valid URI") % uri);
+
+ Path cacheDir = getCacheDir() + "/nix/git";
+
+ if (!pathExists(cacheDir)) {
+ createDirs(cacheDir);
+ runProgram("git", true, { "init", "--bare", cacheDir });
+ }
+
+ Activity act(*logger, lvlInfo, format("fetching Git repository ‘%s’") % uri);
+
+ std::string localRef = "pid-" + std::to_string(getpid());
+ Path localRefFile = cacheDir + "/refs/heads/" + localRef;
+
+ runProgram("git", true, { "-C", cacheDir, "fetch", uri, rev + ":" + localRef });
+
+ std::string commitHash = chomp(readFile(localRefFile));
+
+ unlink(localRefFile.c_str());
+
+ debug(format("got revision ‘%s’") % commitHash);
+
+ // FIXME: should pipe this, or find some better way to extract a
+ // revision.
+ auto tar = runProgram("git", true, { "-C", cacheDir, "archive", commitHash });
+
+ Path tmpDir = createTempDir();
+ AutoDelete delTmpDir(tmpDir, true);
+
+ runProgram("tar", true, { "x", "-C", tmpDir }, tar);
+
+ return store->addToStore("git-export", tmpDir);
+}
+
+static void prim_fetchgit(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ // FIXME: cut&paste from fetch().
+ if (state.restricted) throw Error("‘fetchgit’ is not allowed in restricted mode");
+
+ std::string url;
+ std::string rev = "master";
+
+ state.forceValue(*args[0]);
+
+ if (args[0]->type == tAttrs) {
+
+ state.forceAttrs(*args[0], pos);
+
+ for (auto & attr : *args[0]->attrs) {
+ string name(attr.name);
+ if (name == "url") {
+ PathSet context;
+ url = state.coerceToString(*attr.pos, *attr.value, context, false, false);
+ if (hasPrefix(url, "/")) url = "file://" + url;
+ } else if (name == "rev")
+ rev = state.forceStringNoCtx(*attr.value, *attr.pos);
+ else
+ throw EvalError("unsupported argument ‘%s’ to ‘fetchgit’, at %s", attr.name, *attr.pos);
+ }
+
+ if (url.empty())
+ throw EvalError(format("‘url’ argument required, at %1%") % pos);
+
+ } else
+ url = state.forceStringNoCtx(*args[0], pos);
+
+ Path storePath = exportGit(state.store, url, rev);
+
+ mkString(v, storePath, PathSet({storePath}));
+}
+
+static RegisterPrimOp r("__fetchgit", 1, prim_fetchgit);
+
+}
diff --git a/src/libexpr/primops/fetchgit.hh b/src/libexpr/primops/fetchgit.hh
new file mode 100644
index 000000000..6ffb21a96
--- /dev/null
+++ b/src/libexpr/primops/fetchgit.hh
@@ -0,0 +1,14 @@
+#pragma once
+
+#include <string>
+
+#include "ref.hh"
+
+namespace nix {
+
+class Store;
+
+Path exportGit(ref<Store> store,
+ const std::string & uri, const std::string & rev);
+
+}
diff --git a/src/libexpr/symbol-table.hh b/src/libexpr/symbol-table.hh
new file mode 100644
index 000000000..c2ee49dd3
--- /dev/null
+++ b/src/libexpr/symbol-table.hh
@@ -0,0 +1,80 @@
+#pragma once
+
+#include <map>
+#include <unordered_set>
+
+#include "types.hh"
+
+namespace nix {
+
+/* Symbol table used by the parser and evaluator to represent and look
+ up identifiers and attributes efficiently. SymbolTable::create()
+ converts a string into a symbol. Symbols have the property that
+ they can be compared efficiently (using a pointer equality test),
+ because the symbol table stores only one copy of each string. */
+
+class Symbol
+{
+private:
+ const string * s; // pointer into SymbolTable
+ Symbol(const string * s) : s(s) { };
+ friend class SymbolTable;
+
+public:
+ Symbol() : s(0) { };
+
+ bool operator == (const Symbol & s2) const
+ {
+ return s == s2.s;
+ }
+
+ bool operator != (const Symbol & s2) const
+ {
+ return s != s2.s;
+ }
+
+ bool operator < (const Symbol & s2) const
+ {
+ return s < s2.s;
+ }
+
+ operator const string & () const
+ {
+ return *s;
+ }
+
+ bool set() const
+ {
+ return s;
+ }
+
+ bool empty() const
+ {
+ return s->empty();
+ }
+
+ friend std::ostream & operator << (std::ostream & str, const Symbol & sym);
+};
+
+class SymbolTable
+{
+private:
+ typedef std::unordered_set<string> Symbols;
+ Symbols symbols;
+
+public:
+ Symbol create(const string & s)
+ {
+ std::pair<Symbols::iterator, bool> res = symbols.insert(s);
+ return Symbol(&*res.first);
+ }
+
+ unsigned int size() const
+ {
+ return symbols.size();
+ }
+
+ size_t totalSize() const;
+};
+
+}
diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc
new file mode 100644
index 000000000..72e413e44
--- /dev/null
+++ b/src/libexpr/value-to-json.cc
@@ -0,0 +1,95 @@
+#include "value-to-json.hh"
+#include "json.hh"
+#include "eval-inline.hh"
+#include "util.hh"
+
+#include <cstdlib>
+#include <iomanip>
+
+
+namespace nix {
+
+void printValueAsJSON(EvalState & state, bool strict,
+ Value & v, JSONPlaceholder & out, PathSet & context)
+{
+ checkInterrupt();
+
+ if (strict) state.forceValue(v);
+
+ switch (v.type) {
+
+ case tInt:
+ out.write(v.integer);
+ break;
+
+ case tBool:
+ out.write(v.boolean);
+ break;
+
+ case tString:
+ copyContext(v, context);
+ out.write(v.string.s);
+ break;
+
+ case tPath:
+ out.write(state.copyPathToStore(context, v.path));
+ break;
+
+ case tNull:
+ out.write(nullptr);
+ break;
+
+ case tAttrs: {
+ Bindings::iterator i = v.attrs->find(state.sOutPath);
+ if (i == v.attrs->end()) {
+ auto obj(out.object());
+ StringSet names;
+ for (auto & j : *v.attrs)
+ names.insert(j.name);
+ for (auto & j : names) {
+ Attr & a(*v.attrs->find(state.symbols.create(j)));
+ auto placeholder(obj.placeholder(j));
+ printValueAsJSON(state, strict, *a.value, placeholder, context);
+ }
+ } else
+ printValueAsJSON(state, strict, *i->value, out, context);
+ break;
+ }
+
+ case tList1: case tList2: case tListN: {
+ auto list(out.list());
+ for (unsigned int n = 0; n < v.listSize(); ++n) {
+ auto placeholder(list.placeholder());
+ printValueAsJSON(state, strict, *v.listElems()[n], placeholder, context);
+ }
+ break;
+ }
+
+ case tExternal:
+ v.external->printValueAsJSON(state, strict, out, context);
+ break;
+
+ case tFloat:
+ out.write(v.fpoint);
+ break;
+
+ default:
+ throw TypeError(format("cannot convert %1% to JSON") % showType(v));
+ }
+}
+
+void printValueAsJSON(EvalState & state, bool strict,
+ Value & v, std::ostream & str, PathSet & context)
+{
+ JSONPlaceholder out(str);
+ printValueAsJSON(state, strict, v, out, context);
+}
+
+void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
+ JSONPlaceholder & out, PathSet & context) const
+{
+ throw TypeError(format("cannot convert %1% to JSON") % showType());
+}
+
+
+}
diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh
new file mode 100644
index 000000000..67fed6487
--- /dev/null
+++ b/src/libexpr/value-to-json.hh
@@ -0,0 +1,19 @@
+#pragma once
+
+#include "nixexpr.hh"
+#include "eval.hh"
+
+#include <string>
+#include <map>
+
+namespace nix {
+
+class JSONPlaceholder;
+
+void printValueAsJSON(EvalState & state, bool strict,
+ Value & v, JSONPlaceholder & out, PathSet & context);
+
+void printValueAsJSON(EvalState & state, bool strict,
+ Value & v, std::ostream & str, PathSet & context);
+
+}
diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc
new file mode 100644
index 000000000..00b1918a8
--- /dev/null
+++ b/src/libexpr/value-to-xml.cc
@@ -0,0 +1,178 @@
+#include "value-to-xml.hh"
+#include "xml-writer.hh"
+#include "eval-inline.hh"
+#include "util.hh"
+
+#include <cstdlib>
+
+
+namespace nix {
+
+
+static XMLAttrs singletonAttrs(const string & name, const string & value)
+{
+ XMLAttrs attrs;
+ attrs[name] = value;
+ return attrs;
+}
+
+
+static void printValueAsXML(EvalState & state, bool strict, bool location,
+ Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen);
+
+
+static void posToXML(XMLAttrs & xmlAttrs, const Pos & pos)
+{
+ xmlAttrs["path"] = pos.file;
+ xmlAttrs["line"] = (format("%1%") % pos.line).str();
+ xmlAttrs["column"] = (format("%1%") % pos.column).str();
+}
+
+
+static void showAttrs(EvalState & state, bool strict, bool location,
+ Bindings & attrs, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
+{
+ StringSet names;
+
+ for (auto & i : attrs)
+ names.insert(i.name);
+
+ for (auto & i : names) {
+ Attr & a(*attrs.find(state.symbols.create(i)));
+
+ XMLAttrs xmlAttrs;
+ xmlAttrs["name"] = i;
+ if (location && a.pos != &noPos) posToXML(xmlAttrs, *a.pos);
+
+ XMLOpenElement _(doc, "attr", xmlAttrs);
+ printValueAsXML(state, strict, location,
+ *a.value, doc, context, drvsSeen);
+ }
+}
+
+
+static void printValueAsXML(EvalState & state, bool strict, bool location,
+ Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
+{
+ checkInterrupt();
+
+ if (strict) state.forceValue(v);
+
+ switch (v.type) {
+
+ case tInt:
+ doc.writeEmptyElement("int", singletonAttrs("value", (format("%1%") % v.integer).str()));
+ break;
+
+ case tBool:
+ doc.writeEmptyElement("bool", singletonAttrs("value", v.boolean ? "true" : "false"));
+ break;
+
+ case tString:
+ /* !!! show the context? */
+ copyContext(v, context);
+ doc.writeEmptyElement("string", singletonAttrs("value", v.string.s));
+ break;
+
+ case tPath:
+ doc.writeEmptyElement("path", singletonAttrs("value", v.path));
+ break;
+
+ case tNull:
+ doc.writeEmptyElement("null");
+ break;
+
+ case tAttrs:
+ if (state.isDerivation(v)) {
+ XMLAttrs xmlAttrs;
+
+ Bindings::iterator a = v.attrs->find(state.symbols.create("derivation"));
+
+ Path drvPath;
+ a = v.attrs->find(state.sDrvPath);
+ if (a != v.attrs->end()) {
+ if (strict) state.forceValue(*a->value);
+ if (a->value->type == tString)
+ xmlAttrs["drvPath"] = drvPath = a->value->string.s;
+ }
+
+ a = v.attrs->find(state.sOutPath);
+ if (a != v.attrs->end()) {
+ if (strict) state.forceValue(*a->value);
+ if (a->value->type == tString)
+ xmlAttrs["outPath"] = a->value->string.s;
+ }
+
+ XMLOpenElement _(doc, "derivation", xmlAttrs);
+
+ if (drvPath != "" && drvsSeen.find(drvPath) == drvsSeen.end()) {
+ drvsSeen.insert(drvPath);
+ showAttrs(state, strict, location, *v.attrs, doc, context, drvsSeen);
+ } else
+ doc.writeEmptyElement("repeated");
+ }
+
+ else {
+ XMLOpenElement _(doc, "attrs");
+ showAttrs(state, strict, location, *v.attrs, doc, context, drvsSeen);
+ }
+
+ break;
+
+ case tList1: case tList2: case tListN: {
+ XMLOpenElement _(doc, "list");
+ for (unsigned int n = 0; n < v.listSize(); ++n)
+ printValueAsXML(state, strict, location, *v.listElems()[n], doc, context, drvsSeen);
+ break;
+ }
+
+ case tLambda: {
+ XMLAttrs xmlAttrs;
+ if (location) posToXML(xmlAttrs, v.lambda.fun->pos);
+ XMLOpenElement _(doc, "function", xmlAttrs);
+
+ if (v.lambda.fun->matchAttrs) {
+ XMLAttrs attrs;
+ if (!v.lambda.fun->arg.empty()) attrs["name"] = v.lambda.fun->arg;
+ if (v.lambda.fun->formals->ellipsis) attrs["ellipsis"] = "1";
+ XMLOpenElement _(doc, "attrspat", attrs);
+ for (auto & i : v.lambda.fun->formals->formals)
+ doc.writeEmptyElement("attr", singletonAttrs("name", i.name));
+ } else
+ doc.writeEmptyElement("varpat", singletonAttrs("name", v.lambda.fun->arg));
+
+ break;
+ }
+
+ case tExternal:
+ v.external->printValueAsXML(state, strict, location, doc, context, drvsSeen);
+ break;
+
+ case tFloat:
+ doc.writeEmptyElement("float", singletonAttrs("value", (format("%1%") % v.fpoint).str()));
+ break;
+
+ default:
+ doc.writeEmptyElement("unevaluated");
+ }
+}
+
+
+void ExternalValueBase::printValueAsXML(EvalState & state, bool strict,
+ bool location, XMLWriter & doc, PathSet & context, PathSet & drvsSeen) const
+{
+ doc.writeEmptyElement("unevaluated");
+}
+
+
+void printValueAsXML(EvalState & state, bool strict, bool location,
+ Value & v, std::ostream & out, PathSet & context)
+{
+ XMLWriter doc(true, out);
+ XMLOpenElement root(doc, "expr");
+ PathSet drvsSeen;
+ printValueAsXML(state, strict, location, v, doc, context, drvsSeen);
+}
+
+
+}
diff --git a/src/libexpr/value-to-xml.hh b/src/libexpr/value-to-xml.hh
new file mode 100644
index 000000000..97657327e
--- /dev/null
+++ b/src/libexpr/value-to-xml.hh
@@ -0,0 +1,14 @@
+#pragma once
+
+#include "nixexpr.hh"
+#include "eval.hh"
+
+#include <string>
+#include <map>
+
+namespace nix {
+
+void printValueAsXML(EvalState & state, bool strict, bool location,
+ Value & v, std::ostream & out, PathSet & context);
+
+}
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
new file mode 100644
index 000000000..9df516f06
--- /dev/null
+++ b/src/libexpr/value.hh
@@ -0,0 +1,274 @@
+#pragma once
+
+#include "symbol-table.hh"
+
+#if HAVE_BOEHMGC
+#include <gc/gc_allocator.h>
+#endif
+
+namespace nix {
+
+
+typedef enum {
+ tInt = 1,
+ tBool,
+ tString,
+ tPath,
+ tNull,
+ tAttrs,
+ tList1,
+ tList2,
+ tListN,
+ tThunk,
+ tApp,
+ tLambda,
+ tBlackhole,
+ tPrimOp,
+ tPrimOpApp,
+ tExternal,
+ tFloat
+} ValueType;
+
+
+class Bindings;
+struct Env;
+struct Expr;
+struct ExprLambda;
+struct PrimOp;
+struct PrimOp;
+class Symbol;
+struct Pos;
+class EvalState;
+class XMLWriter;
+class JSONPlaceholder;
+
+
+typedef long NixInt;
+typedef float NixFloat;
+
+/* External values must descend from ExternalValueBase, so that
+ * type-agnostic nix functions (e.g. showType) can be implemented
+ */
+class ExternalValueBase
+{
+ friend std::ostream & operator << (std::ostream & str, const ExternalValueBase & v);
+ protected:
+ /* Print out the value */
+ virtual std::ostream & print(std::ostream & str) const = 0;
+
+ public:
+ /* Return a simple string describing the type */
+ virtual string showType() const = 0;
+
+ /* Return a string to be used in builtins.typeOf */
+ virtual string typeOf() const = 0;
+
+ /* How much space does this value take up */
+ virtual size_t valueSize(std::set<const void *> & seen) const = 0;
+
+ /* Coerce the value to a string. Defaults to uncoercable, i.e. throws an
+ * error
+ */
+ virtual string coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const;
+
+ /* Compare to another value of the same type. Defaults to uncomparable,
+ * i.e. always false.
+ */
+ virtual bool operator==(const ExternalValueBase & b) const;
+
+ /* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
+ virtual void printValueAsJSON(EvalState & state, bool strict,
+ JSONPlaceholder & out, PathSet & context) const;
+
+ /* Print the value as XML. Defaults to unevaluated */
+ virtual void printValueAsXML(EvalState & state, bool strict, bool location,
+ XMLWriter & doc, PathSet & context, PathSet & drvsSeen) const;
+
+ virtual ~ExternalValueBase()
+ {
+ };
+};
+
+std::ostream & operator << (std::ostream & str, const ExternalValueBase & v);
+
+
+struct Value
+{
+ ValueType type;
+ union
+ {
+ NixInt integer;
+ bool boolean;
+
+ /* Strings in the evaluator carry a so-called `context' which
+ is a list of strings representing store paths. This is to
+ allow users to write things like
+
+ "--with-freetype2-library=" + freetype + "/lib"
+
+ where `freetype' is a derivation (or a source to be copied
+ to the store). If we just concatenated the strings without
+ keeping track of the referenced store paths, then if the
+ string is used as a derivation attribute, the derivation
+ will not have the correct dependencies in its inputDrvs and
+ inputSrcs.
+
+ The semantics of the context is as follows: when a string
+ with context C is used as a derivation attribute, then the
+ derivations in C will be added to the inputDrvs of the
+ derivation, and the other store paths in C will be added to
+ the inputSrcs of the derivations.
+
+ For canonicity, the store paths should be in sorted order. */
+ struct {
+ const char * s;
+ const char * * context; // must be in sorted order
+ } string;
+
+ const char * path;
+ Bindings * attrs;
+ struct {
+ unsigned int size;
+ Value * * elems;
+ } bigList;
+ Value * smallList[2];
+ struct {
+ Env * env;
+ Expr * expr;
+ } thunk;
+ struct {
+ Value * left, * right;
+ } app;
+ struct {
+ Env * env;
+ ExprLambda * fun;
+ } lambda;
+ PrimOp * primOp;
+ struct {
+ Value * left, * right;
+ } primOpApp;
+ ExternalValueBase * external;
+ NixFloat fpoint;
+ };
+
+ bool isList() const
+ {
+ return type == tList1 || type == tList2 || type == tListN;
+ }
+
+ Value * * listElems()
+ {
+ return type == tList1 || type == tList2 ? smallList : bigList.elems;
+ }
+
+ const Value * const * listElems() const
+ {
+ return type == tList1 || type == tList2 ? smallList : bigList.elems;
+ }
+
+ unsigned int listSize() const
+ {
+ return type == tList1 ? 1 : type == tList2 ? 2 : bigList.size;
+ }
+};
+
+
+/* After overwriting an app node, be sure to clear pointers in the
+ Value to ensure that the target isn't kept alive unnecessarily. */
+static inline void clearValue(Value & v)
+{
+ v.app.left = v.app.right = 0;
+}
+
+
+static inline void mkInt(Value & v, NixInt n)
+{
+ clearValue(v);
+ v.type = tInt;
+ v.integer = n;
+}
+
+
+static inline void mkFloat(Value & v, NixFloat n)
+{
+ clearValue(v);
+ v.type = tFloat;
+ v.fpoint = n;
+}
+
+
+static inline void mkBool(Value & v, bool b)
+{
+ clearValue(v);
+ v.type = tBool;
+ v.boolean = b;
+}
+
+
+static inline void mkNull(Value & v)
+{
+ clearValue(v);
+ v.type = tNull;
+}
+
+
+static inline void mkApp(Value & v, Value & left, Value & right)
+{
+ v.type = tApp;
+ v.app.left = &left;
+ v.app.right = &right;
+}
+
+
+static inline void mkPrimOpApp(Value & v, Value & left, Value & right)
+{
+ v.type = tPrimOpApp;
+ v.app.left = &left;
+ v.app.right = &right;
+}
+
+
+static inline void mkStringNoCopy(Value & v, const char * s)
+{
+ v.type = tString;
+ v.string.s = s;
+ v.string.context = 0;
+}
+
+
+static inline void mkString(Value & v, const Symbol & s)
+{
+ mkStringNoCopy(v, ((const string &) s).c_str());
+}
+
+
+void mkString(Value & v, const char * s);
+
+
+static inline void mkPathNoCopy(Value & v, const char * s)
+{
+ clearValue(v);
+ v.type = tPath;
+ v.path = s;
+}
+
+
+void mkPath(Value & v, const char * s);
+
+
+/* Compute the size in bytes of the given value, including all values
+ and environments reachable from it. Static expressions (Exprs) are
+ not included. */
+size_t valueSize(Value & v);
+
+
+#if HAVE_BOEHMGC
+typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
+typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<std::pair<const Symbol, Value *> > > ValueMap;
+#else
+typedef std::vector<Value *> ValueVector;
+typedef std::map<Symbol, Value *> ValueMap;
+#endif
+
+
+}
diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc
new file mode 100644
index 000000000..9a7a89313
--- /dev/null
+++ b/src/libmain/common-args.cc
@@ -0,0 +1,33 @@
+#include "common-args.hh"
+#include "globals.hh"
+
+namespace nix {
+
+MixCommonArgs::MixCommonArgs(const string & programName)
+ : programName(programName)
+{
+ mkFlag('v', "verbose", "increase verbosity level", []() {
+ verbosity = (Verbosity) (verbosity + 1);
+ });
+
+ mkFlag(0, "quiet", "decrease verbosity level", []() {
+ verbosity = verbosity > lvlError ? (Verbosity) (verbosity - 1) : lvlError;
+ });
+
+ mkFlag(0, "debug", "enable debug output", []() {
+ verbosity = lvlDebug;
+ });
+
+ mkFlag(0, "option", {"name", "value"}, "set a Nix configuration option (overriding nix.conf)", 2,
+ [](Strings ss) {
+ auto name = ss.front(); ss.pop_front();
+ auto value = ss.front();
+ try {
+ settings.set(name, value);
+ } catch (UsageError & e) {
+ warn(e.what());
+ }
+ });
+}
+
+}
diff --git a/src/libmain/common-args.hh b/src/libmain/common-args.hh
new file mode 100644
index 000000000..a4de3dccf
--- /dev/null
+++ b/src/libmain/common-args.hh
@@ -0,0 +1,33 @@
+#pragma once
+
+#include "args.hh"
+
+namespace nix {
+
+struct MixCommonArgs : virtual Args
+{
+ string programName;
+ MixCommonArgs(const string & programName);
+};
+
+struct MixDryRun : virtual Args
+{
+ bool dryRun = false;
+
+ MixDryRun()
+ {
+ mkFlag(0, "dry-run", "show what this command would do without doing it", &dryRun);
+ }
+};
+
+struct MixJSON : virtual Args
+{
+ bool json = false;
+
+ MixJSON()
+ {
+ mkFlag(0, "json", "produce JSON output", &json);
+ }
+};
+
+}
diff --git a/src/libmain/local.mk b/src/libmain/local.mk
new file mode 100644
index 000000000..f1fd3eb72
--- /dev/null
+++ b/src/libmain/local.mk
@@ -0,0 +1,15 @@
+libraries += libmain
+
+libmain_NAME = libnixmain
+
+libmain_DIR := $(d)
+
+libmain_SOURCES := $(wildcard $(d)/*.cc)
+
+libmain_LDFLAGS = $(OPENSSL_LIBS)
+
+libmain_LIBS = libstore libutil libformat
+
+libmain_ALLOW_UNDEFINED = 1
+
+$(eval $(call install-file-in, $(d)/nix-main.pc, $(prefix)/lib/pkgconfig, 0644))
diff --git a/src/libmain/nix-main.pc.in b/src/libmain/nix-main.pc.in
new file mode 100644
index 000000000..de1bdf706
--- /dev/null
+++ b/src/libmain/nix-main.pc.in
@@ -0,0 +1,9 @@
+prefix=@prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: Nix
+Description: Nix Package Manager
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -lnixmain
+Cflags: -I${includedir}/nix
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
new file mode 100644
index 000000000..d6c1c0c9c
--- /dev/null
+++ b/src/libmain/shared.cc
@@ -0,0 +1,349 @@
+#include "common-args.hh"
+#include "globals.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "util.hh"
+
+#include <algorithm>
+#include <cctype>
+#include <exception>
+#include <iostream>
+#include <mutex>
+
+#include <cstdlib>
+#include <sys/time.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <openssl/crypto.h>
+
+
+namespace nix {
+
+
+static bool gcWarning = true;
+
+void printGCWarning()
+{
+ if (!gcWarning) return;
+ static bool haveWarned = false;
+ warnOnce(haveWarned,
+ "you did not specify ‘--add-root’; "
+ "the result might be removed by the garbage collector");
+}
+
+
+void printMissing(ref<Store> store, const PathSet & paths)
+{
+ unsigned long long downloadSize, narSize;
+ PathSet willBuild, willSubstitute, unknown;
+ store->queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, narSize);
+ printMissing(store, willBuild, willSubstitute, unknown, downloadSize, narSize);
+}
+
+
+void printMissing(ref<Store> store, const PathSet & willBuild,
+ const PathSet & willSubstitute, const PathSet & unknown,
+ unsigned long long downloadSize, unsigned long long narSize)
+{
+ if (!willBuild.empty()) {
+ printInfo(format("these derivations will be built:"));
+ Paths sorted = store->topoSortPaths(willBuild);
+ reverse(sorted.begin(), sorted.end());
+ for (auto & i : sorted)
+ printInfo(format(" %1%") % i);
+ }
+
+ if (!willSubstitute.empty()) {
+ printInfo(format("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):")
+ % (downloadSize / (1024.0 * 1024.0))
+ % (narSize / (1024.0 * 1024.0)));
+ for (auto & i : willSubstitute)
+ printInfo(format(" %1%") % i);
+ }
+
+ if (!unknown.empty()) {
+ printInfo(format("don't know how to build these paths%1%:")
+ % (settings.readOnlyMode ? " (may be caused by read-only store access)" : ""));
+ for (auto & i : unknown)
+ printInfo(format(" %1%") % i);
+ }
+}
+
+
+string getArg(const string & opt,
+ Strings::iterator & i, const Strings::iterator & end)
+{
+ ++i;
+ if (i == end) throw UsageError(format("‘%1%’ requires an argument") % opt);
+ return *i;
+}
+
+
+/* OpenSSL is not thread-safe by default - it will randomly crash
+ unless the user supplies a mutex locking function. So let's do
+ that. */
+static std::vector<std::mutex> opensslLocks;
+
+static void opensslLockCallback(int mode, int type, const char * file, int line)
+{
+ if (mode & CRYPTO_LOCK)
+ opensslLocks[type].lock();
+ else
+ opensslLocks[type].unlock();
+}
+
+
+static void sigHandler(int signo) { }
+
+
+void initNix()
+{
+ /* Turn on buffering for cerr. */
+#if HAVE_PUBSETBUF
+ static char buf[1024];
+ std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));
+#endif
+
+ /* Initialise OpenSSL locking. */
+ opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
+ CRYPTO_set_locking_callback(opensslLockCallback);
+
+ settings.loadConfFile();
+
+ startSignalHandlerThread();
+
+ /* Reset SIGCHLD to its default. */
+ struct sigaction act;
+ sigemptyset(&act.sa_mask);
+ act.sa_handler = SIG_DFL;
+ act.sa_flags = 0;
+ if (sigaction(SIGCHLD, &act, 0))
+ throw SysError("resetting SIGCHLD");
+
+ /* Install a dummy SIGUSR1 handler for use with pthread_kill(). */
+ act.sa_handler = sigHandler;
+ if (sigaction(SIGUSR1, &act, 0)) throw SysError("handling SIGUSR1");
+
+ /* Register a SIGSEGV handler to detect stack overflows. */
+ detectStackOverflow();
+
+ /* There is no privacy in the Nix system ;-) At least not for
+ now. In particular, store objects should be readable by
+ everybody. */
+ umask(0022);
+
+ /* Initialise the PRNG. */
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ srandom(tv.tv_usec);
+}
+
+
+struct LegacyArgs : public MixCommonArgs
+{
+ std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg;
+
+ LegacyArgs(const std::string & programName,
+ std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg)
+ : MixCommonArgs(programName), parseArg(parseArg)
+ {
+ mkFlag('Q', "no-build-output", "do not show build output",
+ &settings.verboseBuild, false);
+
+ mkFlag('K', "keep-failed", "keep temporary directories of failed builds",
+ &(bool&) settings.keepFailed);
+
+ mkFlag('k', "keep-going", "keep going after a build fails",
+ &(bool&) settings.keepGoing);
+
+ mkFlag(0, "fallback", "build from source if substitution fails", []() {
+ settings.tryFallback = true;
+ });
+
+ mkFlag1('j', "max-jobs", "jobs", "maximum number of parallel builds", [=](std::string s) {
+ settings.set("build-max-jobs", s);
+ });
+
+ auto intSettingAlias = [&](char shortName, const std::string & longName,
+ const std::string & description, const std::string & dest) {
+ mkFlag<unsigned int>(shortName, longName, description, [=](unsigned int n) {
+ settings.set(dest, std::to_string(n));
+ });
+ };
+
+ intSettingAlias(0, "cores", "maximum number of CPU cores to use inside a build", "build-cores");
+ intSettingAlias(0, "max-silent-time", "number of seconds of silence before a build is killed", "build-max-silent-time");
+ intSettingAlias(0, "timeout", "number of seconds before a build is killed", "build-timeout");
+
+ mkFlag(0, "readonly-mode", "do not write to the Nix store",
+ &settings.readOnlyMode);
+
+ mkFlag(0, "no-build-hook", "disable use of the build hook mechanism",
+ &(bool&) settings.useBuildHook, false);
+
+ mkFlag(0, "show-trace", "show Nix expression stack trace in evaluation errors",
+ &settings.showTrace);
+
+ mkFlag(0, "no-gc-warning", "disable warning about not using ‘--add-root’",
+ &gcWarning, false);
+ }
+
+ bool processFlag(Strings::iterator & pos, Strings::iterator end) override
+ {
+ if (MixCommonArgs::processFlag(pos, end)) return true;
+ bool res = parseArg(pos, end);
+ if (res) ++pos;
+ return res;
+ }
+
+ bool processArgs(const Strings & args, bool finish) override
+ {
+ if (args.empty()) return true;
+ assert(args.size() == 1);
+ Strings ss(args);
+ auto pos = ss.begin();
+ if (!parseArg(pos, ss.end()))
+ throw UsageError(format("unexpected argument ‘%1%’") % args.front());
+ return true;
+ }
+};
+
+
+void parseCmdLine(int argc, char * * argv,
+ std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg)
+{
+ LegacyArgs(baseNameOf(argv[0]), parseArg).parseCmdline(argvToStrings(argc, argv));
+}
+
+
+void printVersion(const string & programName)
+{
+ std::cout << format("%1% (Nix) %2%") % programName % nixVersion << std::endl;
+ if (verbosity > lvlInfo) {
+ Strings cfg;
+#if HAVE_BOEHMGC
+ cfg.push_back("gc");
+#endif
+#if HAVE_SODIUM
+ cfg.push_back("signed-caches");
+#endif
+ std::cout << "Features: " << concatStringsSep(", ", cfg) << "\n";
+ std::cout << "Configuration file: " << settings.nixConfDir + "/nix.conf" << "\n";
+ std::cout << "Store directory: " << settings.nixStore << "\n";
+ std::cout << "State directory: " << settings.nixStateDir << "\n";
+ }
+ throw Exit();
+}
+
+
+void showManPage(const string & name)
+{
+ restoreSignals();
+ execlp("man", "man", name.c_str(), NULL);
+ throw SysError(format("command ‘man %1%’ failed") % name.c_str());
+}
+
+
+int handleExceptions(const string & programName, std::function<void()> fun)
+{
+ ReceiveInterrupts receiveInterrupts; // FIXME: need better place for this
+
+ string error = ANSI_RED "error:" ANSI_NORMAL " ";
+ try {
+ try {
+ fun();
+ } catch (...) {
+ /* Subtle: we have to make sure that any `interrupted'
+ condition is discharged before we reach printMsg()
+ below, since otherwise it will throw an (uncaught)
+ exception. */
+ setInterruptThrown();
+ throw;
+ }
+ } catch (Exit & e) {
+ return e.status;
+ } catch (UsageError & e) {
+ printError(
+ format(error + "%1%\nTry ‘%2% --help’ for more information.")
+ % e.what() % programName);
+ return 1;
+ } catch (BaseError & e) {
+ printError(format(error + "%1%%2%") % (settings.showTrace ? e.prefix() : "") % e.msg());
+ if (e.prefix() != "" && !settings.showTrace)
+ printError("(use ‘--show-trace’ to show detailed location information)");
+ return e.status;
+ } catch (std::bad_alloc & e) {
+ printError(error + "out of memory");
+ return 1;
+ } catch (std::exception & e) {
+ printError(error + e.what());
+ return 1;
+ }
+
+ return 0;
+}
+
+
+RunPager::RunPager()
+{
+ if (!isatty(STDOUT_FILENO)) return;
+ char * pager = getenv("NIX_PAGER");
+ if (!pager) pager = getenv("PAGER");
+ if (pager && ((string) pager == "" || (string) pager == "cat")) return;
+
+ Pipe toPager;
+ toPager.create();
+
+ pid = startProcess([&]() {
+ if (dup2(toPager.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("dupping stdin");
+ if (!getenv("LESS"))
+ setenv("LESS", "FRSXMK", 1);
+ restoreSignals();
+ if (pager)
+ execl("/bin/sh", "sh", "-c", pager, NULL);
+ execlp("pager", "pager", NULL);
+ execlp("less", "less", NULL);
+ execlp("more", "more", NULL);
+ throw SysError(format("executing ‘%1%’") % pager);
+ });
+
+ pid.setKillSignal(SIGINT);
+
+ if (dup2(toPager.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("dupping stdout");
+}
+
+
+RunPager::~RunPager()
+{
+ try {
+ if (pid != -1) {
+ std::cout.flush();
+ close(STDOUT_FILENO);
+ pid.wait();
+ }
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+string showBytes(unsigned long long bytes)
+{
+ return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
+}
+
+
+PrintFreed::~PrintFreed()
+{
+ if (show)
+ std::cout << format("%1% store paths deleted, %2% freed\n")
+ % results.paths.size()
+ % showBytes(results.bytesFreed);
+}
+
+
+}
diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh
new file mode 100644
index 000000000..6d94a22f7
--- /dev/null
+++ b/src/libmain/shared.hh
@@ -0,0 +1,107 @@
+#pragma once
+
+#include "util.hh"
+#include "args.hh"
+
+#include <signal.h>
+
+#include <locale>
+
+
+namespace nix {
+
+class Exit : public std::exception
+{
+public:
+ int status;
+ Exit() : status(0) { }
+ Exit(int status) : status(status) { }
+};
+
+int handleExceptions(const string & programName, std::function<void()> fun);
+
+void initNix();
+
+void parseCmdLine(int argc, char * * argv,
+ std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg);
+
+void printVersion(const string & programName);
+
+/* Ugh. No better place to put this. */
+void printGCWarning();
+
+class Store;
+
+void printMissing(ref<Store> store, const PathSet & paths);
+
+void printMissing(ref<Store> store, const PathSet & willBuild,
+ const PathSet & willSubstitute, const PathSet & unknown,
+ unsigned long long downloadSize, unsigned long long narSize);
+
+string getArg(const string & opt,
+ Strings::iterator & i, const Strings::iterator & end);
+
+template<class N> N getIntArg(const string & opt,
+ Strings::iterator & i, const Strings::iterator & end, bool allowUnit)
+{
+ ++i;
+ if (i == end) throw UsageError(format("‘%1%’ requires an argument") % opt);
+ string s = *i;
+ N multiplier = 1;
+ if (allowUnit && !s.empty()) {
+ char u = std::toupper(*s.rbegin());
+ if (std::isalpha(u)) {
+ if (u == 'K') multiplier = 1ULL << 10;
+ else if (u == 'M') multiplier = 1ULL << 20;
+ else if (u == 'G') multiplier = 1ULL << 30;
+ else if (u == 'T') multiplier = 1ULL << 40;
+ else throw UsageError(format("invalid unit specifier ‘%1%’") % u);
+ s.resize(s.size() - 1);
+ }
+ }
+ N n;
+ if (!string2Int(s, n))
+ throw UsageError(format("‘%1%’ requires an integer argument") % opt);
+ return n * multiplier;
+}
+
+
+/* Show the manual page for the specified program. */
+void showManPage(const string & name);
+
+/* The constructor of this class starts a pager if stdout is a
+ terminal and $PAGER is set. Stdout is redirected to the pager. */
+class RunPager
+{
+public:
+ RunPager();
+ ~RunPager();
+
+private:
+ Pid pid;
+};
+
+extern volatile ::sig_atomic_t blockInt;
+
+
+/* GC helpers. */
+
+string showBytes(unsigned long long bytes);
+
+struct GCResults;
+
+struct PrintFreed
+{
+ bool show;
+ const GCResults & results;
+ PrintFreed(bool show, const GCResults & results)
+ : show(show), results(results) { }
+ ~PrintFreed();
+};
+
+
+/* Install a SIGSEGV handler to detect stack overflows. */
+void detectStackOverflow();
+
+
+}
diff --git a/src/libmain/stack.cc b/src/libmain/stack.cc
new file mode 100644
index 000000000..57b6a197c
--- /dev/null
+++ b/src/libmain/stack.cc
@@ -0,0 +1,70 @@
+#include "types.hh"
+
+#include <cstring>
+#include <cstddef>
+#include <cstdlib>
+
+#include <unistd.h>
+#include <signal.h>
+
+namespace nix {
+
+
+static void sigsegvHandler(int signo, siginfo_t * info, void * ctx)
+{
+ /* Detect stack overflows by comparing the faulting address with
+ the stack pointer. Unfortunately, getting the stack pointer is
+ not portable. */
+ bool haveSP = true;
+ char * sp = 0;
+#if defined(__x86_64__) && defined(REG_RSP)
+ sp = (char *) ((ucontext *) ctx)->uc_mcontext.gregs[REG_RSP];
+#elif defined(REG_ESP)
+ sp = (char *) ((ucontext *) ctx)->uc_mcontext.gregs[REG_ESP];
+#else
+ haveSP = false;
+#endif
+
+ if (haveSP) {
+ ptrdiff_t diff = (char *) info->si_addr - sp;
+ if (diff < 0) diff = -diff;
+ if (diff < 4096) {
+ char msg[] = "error: stack overflow (possible infinite recursion)\n";
+ [[gnu::unused]] int res = write(2, msg, strlen(msg));
+ _exit(1); // maybe abort instead?
+ }
+ }
+
+ /* Restore default behaviour (i.e. segfault and dump core). */
+ struct sigaction act;
+ sigfillset(&act.sa_mask);
+ act.sa_handler = SIG_DFL;
+ act.sa_flags = 0;
+ if (sigaction(SIGSEGV, &act, 0)) abort();
+}
+
+
+void detectStackOverflow()
+{
+#if defined(SA_SIGINFO) && defined (SA_ONSTACK)
+ /* Install a SIGSEGV handler to detect stack overflows. This
+ requires an alternative stack, otherwise the signal cannot be
+ delivered when we're out of stack space. */
+ stack_t stack;
+ stack.ss_size = 4096 * 4 + MINSIGSTKSZ;
+ stack.ss_sp = new char[stack.ss_size];
+ if (!stack.ss_sp) throw Error("cannot allocate alternative stack");
+ stack.ss_flags = 0;
+ if (sigaltstack(&stack, 0) == -1) throw SysError("cannot set alternative stack");
+
+ struct sigaction act;
+ sigfillset(&act.sa_mask);
+ act.sa_sigaction = sigsegvHandler;
+ act.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ if (sigaction(SIGSEGV, &act, 0))
+ throw SysError("resetting SIGCHLD");
+#endif
+}
+
+
+}
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
new file mode 100644
index 000000000..b536c6c00
--- /dev/null
+++ b/src/libstore/binary-cache-store.cc
@@ -0,0 +1,407 @@
+#include "archive.hh"
+#include "binary-cache-store.hh"
+#include "compression.hh"
+#include "derivations.hh"
+#include "fs-accessor.hh"
+#include "globals.hh"
+#include "nar-info.hh"
+#include "sync.hh"
+#include "remote-fs-accessor.hh"
+#include "nar-info-disk-cache.hh"
+#include "nar-accessor.hh"
+#include "json.hh"
+
+#include <chrono>
+
+#include <future>
+
+namespace nix {
+
+/* Given requests for a path /nix/store/<x>/<y>, this accessor will
+ first download the NAR for /nix/store/<x> from the binary cache,
+ build a NAR accessor for that NAR, and use that to access <y>. */
+struct BinaryCacheStoreAccessor : public FSAccessor
+{
+ ref<BinaryCacheStore> store;
+
+ std::map<Path, ref<FSAccessor>> nars;
+
+ BinaryCacheStoreAccessor(ref<BinaryCacheStore> store)
+ : store(store)
+ {
+ }
+
+ std::pair<ref<FSAccessor>, Path> fetch(const Path & path_)
+ {
+ auto path = canonPath(path_);
+
+ auto storePath = store->toStorePath(path);
+ std::string restPath = std::string(path, storePath.size());
+
+ if (!store->isValidPath(storePath))
+ throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
+
+ auto i = nars.find(storePath);
+ if (i != nars.end()) return {i->second, restPath};
+
+ StringSink sink;
+ store->narFromPath(storePath, sink);
+
+ auto accessor = makeNarAccessor(sink.s);
+ nars.emplace(storePath, accessor);
+ return {accessor, restPath};
+ }
+
+ Stat stat(const Path & path) override
+ {
+ auto res = fetch(path);
+ return res.first->stat(res.second);
+ }
+
+ StringSet readDirectory(const Path & path) override
+ {
+ auto res = fetch(path);
+ return res.first->readDirectory(res.second);
+ }
+
+ std::string readFile(const Path & path) override
+ {
+ auto res = fetch(path);
+ return res.first->readFile(res.second);
+ }
+
+ std::string readLink(const Path & path) override
+ {
+ auto res = fetch(path);
+ return res.first->readLink(res.second);
+ }
+};
+
+BinaryCacheStore::BinaryCacheStore(const Params & params)
+ : Store(params)
+{
+ if (secretKeyFile != "")
+ secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));
+
+ StringSink sink;
+ sink << narVersionMagic1;
+ narMagic = *sink.s;
+}
+
+void BinaryCacheStore::init()
+{
+ std::string cacheInfoFile = "nix-cache-info";
+
+ auto cacheInfo = getFile(cacheInfoFile);
+ if (!cacheInfo) {
+ upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info");
+ } else {
+ for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
+ size_t colon = line.find(':');
+ if (colon == std::string::npos) continue;
+ auto name = line.substr(0, colon);
+ auto value = trim(line.substr(colon + 1, std::string::npos));
+ if (name == "StoreDir") {
+ if (value != storeDir)
+ throw Error(format("binary cache ‘%s’ is for Nix stores with prefix ‘%s’, not ‘%s’")
+ % getUri() % value % storeDir);
+ } else if (name == "WantMassQuery") {
+ wantMassQuery_ = value == "1";
+ } else if (name == "Priority") {
+ string2Int(value, priority);
+ }
+ }
+ }
+}
+
+void BinaryCacheStore::notImpl()
+{
+ throw Error("operation not implemented for binary cache stores");
+}
+
+std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
+{
+ std::promise<std::shared_ptr<std::string>> promise;
+ getFile(path,
+ [&](std::shared_ptr<std::string> result) {
+ promise.set_value(result);
+ },
+ [&](std::exception_ptr exc) {
+ promise.set_exception(exc);
+ });
+ return promise.get_future().get();
+}
+
+Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
+{
+ assertStorePath(storePath);
+ return storePathToHash(storePath) + ".narinfo";
+}
+
+void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs, std::shared_ptr<FSAccessor> accessor)
+{
+ if (!repair && isValidPath(info.path)) return;
+
+ /* Verify that all references are valid. This may do some .narinfo
+ reads, but typically they'll already be cached. */
+ for (auto & ref : info.references)
+ try {
+ if (ref != info.path)
+ queryPathInfo(ref);
+ } catch (InvalidPath &) {
+ throw Error(format("cannot add ‘%s’ to the binary cache because the reference ‘%s’ is not valid")
+ % info.path % ref);
+ }
+
+ auto narInfoFile = narInfoFileFor(info.path);
+
+ assert(nar->compare(0, narMagic.size(), narMagic) == 0);
+
+ auto narInfo = make_ref<NarInfo>(info);
+
+ narInfo->narSize = nar->size();
+ narInfo->narHash = hashString(htSHA256, *nar);
+
+ if (info.narHash && info.narHash != narInfo->narHash)
+ throw Error(format("refusing to copy corrupted path ‘%1%’ to binary cache") % info.path);
+
+ auto accessor_ = std::dynamic_pointer_cast<BinaryCacheStoreAccessor>(accessor);
+
+ /* Optionally write a JSON file containing a listing of the
+ contents of the NAR. */
+ if (writeNARListing) {
+ std::ostringstream jsonOut;
+
+ {
+ JSONObject jsonRoot(jsonOut);
+ jsonRoot.attr("version", 1);
+
+ auto narAccessor = makeNarAccessor(nar);
+
+ if (accessor_)
+ accessor_->nars.emplace(info.path, narAccessor);
+
+ std::function<void(const Path &, JSONPlaceholder &)> recurse;
+
+ recurse = [&](const Path & path, JSONPlaceholder & res) {
+ auto st = narAccessor->stat(path);
+
+ auto obj = res.object();
+
+ switch (st.type) {
+ case FSAccessor::Type::tRegular:
+ obj.attr("type", "regular");
+ obj.attr("size", st.fileSize);
+ if (st.isExecutable)
+ obj.attr("executable", true);
+ break;
+ case FSAccessor::Type::tDirectory:
+ obj.attr("type", "directory");
+ {
+ auto res2 = obj.object("entries");
+ for (auto & name : narAccessor->readDirectory(path)) {
+ auto res3 = res2.placeholder(name);
+ recurse(path + "/" + name, res3);
+ }
+ }
+ break;
+ case FSAccessor::Type::tSymlink:
+ obj.attr("type", "symlink");
+ obj.attr("target", narAccessor->readLink(path));
+ break;
+ default:
+ abort();
+ }
+ };
+
+ {
+ auto res = jsonRoot.placeholder("root");
+ recurse("", res);
+ }
+ }
+
+ upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json");
+ }
+
+ else {
+ if (accessor_)
+ accessor_->nars.emplace(info.path, makeNarAccessor(nar));
+ }
+
+ /* Compress the NAR. */
+ narInfo->compression = compression;
+ auto now1 = std::chrono::steady_clock::now();
+ auto narCompressed = compress(compression, *nar);
+ auto now2 = std::chrono::steady_clock::now();
+ narInfo->fileHash = hashString(htSHA256, *narCompressed);
+ narInfo->fileSize = narCompressed->size();
+
+ auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+ printMsg(lvlTalkative, format("copying path ‘%1%’ (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache")
+ % narInfo->path % narInfo->narSize
+ % ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0)
+ % duration);
+
+ /* Atomically write the NAR file. */
+ narInfo->url = "nar/" + printHash32(narInfo->fileHash) + ".nar"
+ + (compression == "xz" ? ".xz" :
+ compression == "bzip2" ? ".bz2" :
+ compression == "br" ? ".br" :
+ "");
+ if (repair || !fileExists(narInfo->url)) {
+ stats.narWrite++;
+ upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
+ } else
+ stats.narWriteAverted++;
+
+ stats.narWriteBytes += nar->size();
+ stats.narWriteCompressedBytes += narCompressed->size();
+ stats.narWriteCompressionTimeMs += duration;
+
+ /* Atomically write the NAR info file.*/
+ if (secretKey) narInfo->sign(*secretKey);
+
+ upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo");
+
+ auto hashPart = storePathToHash(narInfo->path);
+
+ {
+ auto state_(state.lock());
+ state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo));
+ }
+
+ if (diskCache)
+ diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
+
+ stats.narInfoWrite++;
+}
+
+bool BinaryCacheStore::isValidPathUncached(const Path & storePath)
+{
+ // FIXME: this only checks whether a .narinfo with a matching hash
+ // part exists. So ‘f4kb...-foo’ matches ‘f4kb...-bar’, even
+ // though they shouldn't. Not easily fixed.
+ return fileExists(narInfoFileFor(storePath));
+}
+
+void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
+{
+ auto info = queryPathInfo(storePath).cast<const NarInfo>();
+
+ auto nar = getFile(info->url);
+
+ if (!nar) throw Error(format("file ‘%s’ missing from binary cache") % info->url);
+
+ stats.narRead++;
+ stats.narReadCompressedBytes += nar->size();
+
+ /* Decompress the NAR. FIXME: would be nice to have the remote
+ side do this. */
+ try {
+ nar = decompress(info->compression, *nar);
+ } catch (UnknownCompressionMethod &) {
+ throw Error(format("binary cache path ‘%s’ uses unknown compression method ‘%s’")
+ % storePath % info->compression);
+ }
+
+ stats.narReadBytes += nar->size();
+
+ printMsg(lvlTalkative, format("exporting path ‘%1%’ (%2% bytes)") % storePath % nar->size());
+
+ assert(nar->size() % 8 == 0);
+
+ sink((unsigned char *) nar->c_str(), nar->size());
+}
+
+void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure)
+{
+ auto narInfoFile = narInfoFileFor(storePath);
+
+ getFile(narInfoFile,
+ [=](std::shared_ptr<std::string> data) {
+ if (!data) return success(0);
+
+ stats.narInfoRead++;
+
+ callSuccess(success, failure, (std::shared_ptr<ValidPathInfo>)
+ std::make_shared<NarInfo>(*this, *data, narInfoFile));
+ },
+ failure);
+}
+
+Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
+ bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
+{
+ // FIXME: some cut&paste from LocalStore::addToStore().
+
+ /* Read the whole path into memory. This is not a very scalable
+ method for very large paths, but `copyPath' is mainly used for
+ small files. */
+ StringSink sink;
+ Hash h;
+ if (recursive) {
+ dumpPath(srcPath, sink, filter);
+ h = hashString(hashAlgo, *sink.s);
+ } else {
+ auto s = readFile(srcPath);
+ dumpString(s, sink);
+ h = hashString(hashAlgo, s);
+ }
+
+ ValidPathInfo info;
+ info.path = makeFixedOutputPath(recursive, h, name);
+
+ addToStore(info, sink.s, repair, false, 0);
+
+ return info.path;
+}
+
+Path BinaryCacheStore::addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair)
+{
+ ValidPathInfo info;
+ info.path = computeStorePathForText(name, s, references);
+ info.references = references;
+
+ if (repair || !isValidPath(info.path)) {
+ StringSink sink;
+ dumpString(s, sink);
+ addToStore(info, sink.s, repair, false, 0);
+ }
+
+ return info.path;
+}
+
+ref<FSAccessor> BinaryCacheStore::getFSAccessor()
+{
+ return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
+}
+
+std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path & path)
+{
+ Path drvPath;
+
+ if (isDerivation(path))
+ drvPath = path;
+ else {
+ try {
+ auto info = queryPathInfo(path);
+ // FIXME: add a "Log" field to .narinfo
+ if (info->deriver == "") return nullptr;
+ drvPath = info->deriver;
+ } catch (InvalidPath &) {
+ return nullptr;
+ }
+ }
+
+ auto logPath = "log/" + baseNameOf(drvPath);
+
+ debug("fetching build log from binary cache ‘%s/%s’", getUri(), logPath);
+
+ return getFile(logPath);
+}
+
+}
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
new file mode 100644
index 000000000..5c2d0acfd
--- /dev/null
+++ b/src/libstore/binary-cache-store.hh
@@ -0,0 +1,133 @@
+#pragma once
+
+#include "crypto.hh"
+#include "store-api.hh"
+
+#include "pool.hh"
+
+#include <atomic>
+
+namespace nix {
+
+struct NarInfo;
+
+class BinaryCacheStore : public Store
+{
+public:
+
+ const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
+ const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
+ const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"};
+
+private:
+
+ std::unique_ptr<SecretKey> secretKey;
+
+protected:
+
+ BinaryCacheStore(const Params & params);
+
+ [[noreturn]] void notImpl();
+
+public:
+
+ virtual bool fileExists(const std::string & path) = 0;
+
+ virtual void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) = 0;
+
+ /* Return the contents of the specified file, or null if it
+ doesn't exist. */
+ virtual void getFile(const std::string & path,
+ std::function<void(std::shared_ptr<std::string>)> success,
+ std::function<void(std::exception_ptr exc)> failure) = 0;
+
+ std::shared_ptr<std::string> getFile(const std::string & path);
+
+protected:
+
+ bool wantMassQuery_ = false;
+ int priority = 50;
+
+public:
+
+ virtual void init();
+
+private:
+
+ std::string narMagic;
+
+ std::string narInfoFileFor(const Path & storePath);
+
+public:
+
+ bool isValidPathUncached(const Path & path) override;
+
+ PathSet queryAllValidPaths() override
+ { notImpl(); }
+
+ void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override;
+
+ void queryReferrers(const Path & path,
+ PathSet & referrers) override
+ { notImpl(); }
+
+ PathSet queryDerivationOutputs(const Path & path) override
+ { notImpl(); }
+
+ StringSet queryDerivationOutputNames(const Path & path) override
+ { notImpl(); }
+
+ Path queryPathFromHashPart(const string & hashPart) override
+ { notImpl(); }
+
+ bool wantMassQuery() override { return wantMassQuery_; }
+
+ void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs,
+ std::shared_ptr<FSAccessor> accessor) override;
+
+ Path addToStore(const string & name, const Path & srcPath,
+ bool recursive, HashType hashAlgo,
+ PathFilter & filter, bool repair) override;
+
+ Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair) override;
+
+ void narFromPath(const Path & path, Sink & sink) override;
+
+ void buildPaths(const PathSet & paths, BuildMode buildMode) override
+ { notImpl(); }
+
+ BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode) override
+ { notImpl(); }
+
+ void ensurePath(const Path & path) override
+ { notImpl(); }
+
+ void addTempRoot(const Path & path) override
+ { notImpl(); }
+
+ void addIndirectRoot(const Path & path) override
+ { notImpl(); }
+
+ Roots findRoots() override
+ { notImpl(); }
+
+ void collectGarbage(const GCOptions & options, GCResults & results) override
+ { notImpl(); }
+
+ ref<FSAccessor> getFSAccessor() override;
+
+ void addSignatures(const Path & storePath, const StringSet & sigs) override
+ { notImpl(); }
+
+ std::shared_ptr<std::string> getBuildLog(const Path & path) override;
+
+};
+
+}
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
new file mode 100644
index 000000000..9bf1ab5aa
--- /dev/null
+++ b/src/libstore/build.cc
@@ -0,0 +1,3935 @@
+#include "references.hh"
+#include "pathlocks.hh"
+#include "globals.hh"
+#include "local-store.hh"
+#include "util.hh"
+#include "archive.hh"
+#include "affinity.hh"
+#include "builtins.hh"
+#include "finally.hh"
+#include "compression.hh"
+#include "json.hh"
+
+#include <algorithm>
+#include <iostream>
+#include <map>
+#include <sstream>
+#include <thread>
+#include <future>
+#include <chrono>
+
+#include <limits.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/utsname.h>
+#include <sys/select.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <cstring>
+
+#include <pwd.h>
+#include <grp.h>
+
+/* Includes required for chroot support. */
+#if __linux__
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <netinet/ip.h>
+#include <sys/personality.h>
+#include <sys/mman.h>
+#include <sched.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <sys/syscall.h>
+#define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old))
+#endif
+
+#if HAVE_STATVFS
+#include <sys/statvfs.h>
+#endif
+
+
+namespace nix {
+
+using std::map;
+
+
+static string pathNullDevice = "/dev/null";
+
+
+/* Forward definition. */
+class Worker;
+struct HookInstance;
+
+
+/* A pointer to a goal. */
+class Goal;
+class DerivationGoal;
+typedef std::shared_ptr<Goal> GoalPtr;
+typedef std::weak_ptr<Goal> WeakGoalPtr;
+
+struct CompareGoalPtrs {
+ bool operator() (const GoalPtr & a, const GoalPtr & b);
+};
+
+/* Set of goals. */
+typedef set<GoalPtr, CompareGoalPtrs> Goals;
+typedef list<WeakGoalPtr> WeakGoals;
+
+/* A map of paths to goals (and the other way around). */
+typedef map<Path, WeakGoalPtr> WeakGoalMap;
+
+
+
+class Goal : public std::enable_shared_from_this<Goal>
+{
+public:
+ typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode;
+
+protected:
+
+ /* Backlink to the worker. */
+ Worker & worker;
+
+ /* Goals that this goal is waiting for. */
+ Goals waitees;
+
+ /* Goals waiting for this one to finish. Must use weak pointers
+ here to prevent cycles. */
+ WeakGoals waiters;
+
+ /* Number of goals we are/were waiting for that have failed. */
+ unsigned int nrFailed;
+
+ /* Number of substitution goals we are/were waiting for that
+ failed because there are no substituters. */
+ unsigned int nrNoSubstituters;
+
+ /* Number of substitution goals we are/were waiting for that
+ failed because othey had unsubstitutable references. */
+ unsigned int nrIncompleteClosure;
+
+ /* Name of this goal for debugging purposes. */
+ string name;
+
+ /* Whether the goal is finished. */
+ ExitCode exitCode;
+
+ Goal(Worker & worker) : worker(worker)
+ {
+ nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
+ exitCode = ecBusy;
+ }
+
+ virtual ~Goal()
+ {
+ trace("goal destroyed");
+ }
+
+public:
+ virtual void work() = 0;
+
+ void addWaitee(GoalPtr waitee);
+
+ virtual void waiteeDone(GoalPtr waitee, ExitCode result);
+
+ virtual void handleChildOutput(int fd, const string & data)
+ {
+ abort();
+ }
+
+ virtual void handleEOF(int fd)
+ {
+ abort();
+ }
+
+ void trace(const format & f);
+
+ string getName()
+ {
+ return name;
+ }
+
+ ExitCode getExitCode()
+ {
+ return exitCode;
+ }
+
+ /* Callback in case of a timeout. It should wake up its waiters,
+ get rid of any running child processes that are being monitored
+ by the worker (important!), etc. */
+ virtual void timedOut() = 0;
+
+ virtual string key() = 0;
+
+protected:
+ void amDone(ExitCode result);
+};
+
+
+bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) {
+ string s1 = a->key();
+ string s2 = b->key();
+ return s1 < s2;
+}
+
+
+typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
+
+
+/* A mapping used to remember for each child process to what goal it
+ belongs, and file descriptors for receiving log data and output
+ path creation commands. */
+struct Child
+{
+ WeakGoalPtr goal;
+ Goal * goal2; // ugly hackery
+ set<int> fds;
+ bool respectTimeouts;
+ bool inBuildSlot;
+ steady_time_point lastOutput; /* time we last got output on stdout/stderr */
+ steady_time_point timeStarted;
+};
+
+
+/* The worker class. */
+class Worker
+{
+private:
+
+ /* Note: the worker should only have strong pointers to the
+ top-level goals. */
+
+ /* The top-level goals of the worker. */
+ Goals topGoals;
+
+ /* Goals that are ready to do some work. */
+ WeakGoals awake;
+
+ /* Goals waiting for a build slot. */
+ WeakGoals wantingToBuild;
+
+ /* Child processes currently running. */
+ std::list<Child> children;
+
+ /* Number of build slots occupied. This includes local builds and
+ substitutions but not remote builds via the build hook. */
+ unsigned int nrLocalBuilds;
+
+ /* Maps used to prevent multiple instantiations of a goal for the
+ same derivation / path. */
+ WeakGoalMap derivationGoals;
+ WeakGoalMap substitutionGoals;
+
+ /* Goals waiting for busy paths to be unlocked. */
+ WeakGoals waitingForAnyGoal;
+
+ /* Goals sleeping for a few seconds (polling a lock). */
+ WeakGoals waitingForAWhile;
+
+ /* Last time the goals in `waitingForAWhile' where woken up. */
+ steady_time_point lastWokenUp;
+
+ /* Cache for pathContentsGood(). */
+ std::map<Path, bool> pathContentsGoodCache;
+
+public:
+
+ /* Set if at least one derivation had a BuildError (i.e. permanent
+ failure). */
+ bool permanentFailure;
+
+ /* Set if at least one derivation had a timeout. */
+ bool timedOut;
+
+ LocalStore & store;
+
+ std::unique_ptr<HookInstance> hook;
+
+ Worker(LocalStore & store);
+ ~Worker();
+
+ /* Make a goal (with caching). */
+ GoalPtr makeDerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
+ std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(const Path & drvPath,
+ const BasicDerivation & drv, BuildMode buildMode = bmNormal);
+ GoalPtr makeSubstitutionGoal(const Path & storePath, bool repair = false);
+
+ /* Remove a dead goal. */
+ void removeGoal(GoalPtr goal);
+
+ /* Wake up a goal (i.e., there is something for it to do). */
+ void wakeUp(GoalPtr goal);
+
+ /* Return the number of local build and substitution processes
+ currently running (but not remote builds via the build
+ hook). */
+ unsigned int getNrLocalBuilds();
+
+ /* Registers a running child process. `inBuildSlot' means that
+ the process counts towards the jobs limit. */
+ void childStarted(GoalPtr goal, const set<int> & fds,
+ bool inBuildSlot, bool respectTimeouts);
+
+ /* Unregisters a running child process. `wakeSleepers' should be
+ false if there is no sense in waking up goals that are sleeping
+ because they can't run yet (e.g., there is no free build slot,
+ or the hook would still say `postpone'). */
+ void childTerminated(Goal * goal, bool wakeSleepers = true);
+
+ /* Put `goal' to sleep until a build slot becomes available (which
+ might be right away). */
+ void waitForBuildSlot(GoalPtr goal);
+
+ /* Wait for any goal to finish. Pretty indiscriminate way to
+ wait for some resource that some other goal is holding. */
+ void waitForAnyGoal(GoalPtr goal);
+
+ /* Wait for a few seconds and then retry this goal. Used when
+ waiting for a lock held by another process. This kind of
+ polling is inefficient, but POSIX doesn't really provide a way
+ to wait for multiple locks in the main select() loop. */
+ void waitForAWhile(GoalPtr goal);
+
+ /* Loop until the specified top-level goals have finished. */
+ void run(const Goals & topGoals);
+
+ /* Wait for input to become available. */
+ void waitForInput();
+
+ unsigned int exitStatus();
+
+ /* Check whether the given valid path exists and has the right
+ contents. */
+ bool pathContentsGood(const Path & path);
+
+ void markContentsGood(const Path & path);
+};
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+void addToWeakGoals(WeakGoals & goals, GoalPtr p)
+{
+ // FIXME: necessary?
+ // FIXME: O(n)
+ for (auto & i : goals)
+ if (i.lock() == p) return;
+ goals.push_back(p);
+}
+
+
+void Goal::addWaitee(GoalPtr waitee)
+{
+ waitees.insert(waitee);
+ addToWeakGoals(waitee->waiters, shared_from_this());
+}
+
+
+void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
+{
+ assert(waitees.find(waitee) != waitees.end());
+ waitees.erase(waitee);
+
+ trace(format("waitee ‘%1%’ done; %2% left") %
+ waitee->name % waitees.size());
+
+ if (result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure) ++nrFailed;
+
+ if (result == ecNoSubstituters) ++nrNoSubstituters;
+
+ if (result == ecIncompleteClosure) ++nrIncompleteClosure;
+
+ if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) {
+
+ /* If we failed and keepGoing is not set, we remove all
+ remaining waitees. */
+ for (auto & goal : waitees) {
+ WeakGoals waiters2;
+ for (auto & j : goal->waiters)
+ if (j.lock() != shared_from_this()) waiters2.push_back(j);
+ goal->waiters = waiters2;
+ }
+ waitees.clear();
+
+ worker.wakeUp(shared_from_this());
+ }
+}
+
+
+void Goal::amDone(ExitCode result)
+{
+ trace("done");
+ assert(exitCode == ecBusy);
+ assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure);
+ exitCode = result;
+ for (auto & i : waiters) {
+ GoalPtr goal = i.lock();
+ if (goal) goal->waiteeDone(shared_from_this(), result);
+ }
+ waiters.clear();
+ worker.removeGoal(shared_from_this());
+}
+
+
+void Goal::trace(const format & f)
+{
+ debug(format("%1%: %2%") % name % f);
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+/* Common initialisation performed in child processes. */
+static void commonChildInit(Pipe & logPipe)
+{
+ restoreSignals();
+
+ /* Put the child in a separate session (and thus a separate
+ process group) so that it has no controlling terminal (meaning
+ that e.g. ssh cannot open /dev/tty) and it doesn't receive
+ terminal signals. */
+ if (setsid() == -1)
+ throw SysError(format("creating a new session"));
+
+ /* Dup the write side of the logger pipe into stderr. */
+ if (dup2(logPipe.writeSide.get(), STDERR_FILENO) == -1)
+ throw SysError("cannot pipe standard error into log file");
+
+ /* Dup stderr to stdout. */
+ if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1)
+ throw SysError("cannot dup stderr into stdout");
+
+ /* Reroute stdin to /dev/null. */
+ int fdDevNull = open(pathNullDevice.c_str(), O_RDWR);
+ if (fdDevNull == -1)
+ throw SysError(format("cannot open ‘%1%’") % pathNullDevice);
+ if (dup2(fdDevNull, STDIN_FILENO) == -1)
+ throw SysError("cannot dup null device into stdin");
+ close(fdDevNull);
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+class UserLock
+{
+private:
+ /* POSIX locks suck. If we have a lock on a file, and we open and
+ close that file again (without closing the original file
+ descriptor), we lose the lock. So we have to be *very* careful
+ not to open a lock file on which we are holding a lock. */
+ static Sync<PathSet> lockedPaths_;
+
+ Path fnUserLock;
+ AutoCloseFD fdUserLock;
+
+ string user;
+ uid_t uid;
+ gid_t gid;
+ std::vector<gid_t> supplementaryGIDs;
+
+public:
+ UserLock();
+ ~UserLock();
+
+ void kill();
+
+ string getUser() { return user; }
+ uid_t getUID() { assert(uid); return uid; }
+ uid_t getGID() { assert(gid); return gid; }
+ std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
+
+ bool enabled() { return uid != 0; }
+
+};
+
+
+Sync<PathSet> UserLock::lockedPaths_;
+
+
+UserLock::UserLock()
+{
+ assert(settings.buildUsersGroup != "");
+
+ /* Get the members of the build-users-group. */
+ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
+ if (!gr)
+ throw Error(format("the group ‘%1%’ specified in ‘build-users-group’ does not exist")
+ % settings.buildUsersGroup);
+ gid = gr->gr_gid;
+
+ /* Copy the result of getgrnam. */
+ Strings users;
+ for (char * * p = gr->gr_mem; *p; ++p) {
+ debug(format("found build user ‘%1%’") % *p);
+ users.push_back(*p);
+ }
+
+ if (users.empty())
+ throw Error(format("the build users group ‘%1%’ has no members")
+ % settings.buildUsersGroup);
+
+ /* Find a user account that isn't currently in use for another
+ build. */
+ for (auto & i : users) {
+ debug(format("trying user ‘%1%’") % i);
+
+ struct passwd * pw = getpwnam(i.c_str());
+ if (!pw)
+ throw Error(format("the user ‘%1%’ in the group ‘%2%’ does not exist")
+ % i % settings.buildUsersGroup);
+
+ createDirs(settings.nixStateDir + "/userpool");
+
+ fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
+
+ {
+ auto lockedPaths(lockedPaths_.lock());
+ if (lockedPaths->count(fnUserLock))
+ /* We already have a lock on this one. */
+ continue;
+ lockedPaths->insert(fnUserLock);
+ }
+
+ try {
+
+ AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fd)
+ throw SysError(format("opening user lock ‘%1%’") % fnUserLock);
+
+ if (lockFile(fd.get(), ltWrite, false)) {
+ fdUserLock = std::move(fd);
+ user = i;
+ uid = pw->pw_uid;
+
+ /* Sanity check... */
+ if (uid == getuid() || uid == geteuid())
+ throw Error(format("the Nix user should not be a member of ‘%1%’")
+ % settings.buildUsersGroup);
+
+#if __linux__
+ /* Get the list of supplementary groups of this build user. This
+ is usually either empty or contains a group such as "kvm". */
+ supplementaryGIDs.resize(10);
+ int ngroups = supplementaryGIDs.size();
+ int err = getgrouplist(pw->pw_name, pw->pw_gid,
+ supplementaryGIDs.data(), &ngroups);
+ if (err == -1)
+ throw Error(format("failed to get list of supplementary groups for ‘%1%’") % pw->pw_name);
+
+ supplementaryGIDs.resize(ngroups);
+#endif
+
+ return;
+ }
+
+ } catch (...) {
+ lockedPaths_.lock()->erase(fnUserLock);
+ }
+ }
+
+ throw Error(format("all build users are currently in use; "
+ "consider creating additional users and adding them to the ‘%1%’ group")
+ % settings.buildUsersGroup);
+}
+
+
+UserLock::~UserLock()
+{
+ auto lockedPaths(lockedPaths_.lock());
+ assert(lockedPaths->count(fnUserLock));
+ lockedPaths->erase(fnUserLock);
+}
+
+
+void UserLock::kill()
+{
+ killUser(uid);
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+struct HookInstance
+{
+ /* Pipes for talking to the build hook. */
+ Pipe toHook;
+
+ /* Pipe for the hook's standard output/error. */
+ Pipe fromHook;
+
+ /* Pipe for the builder's standard output/error. */
+ Pipe builderOut;
+
+ /* The process ID of the hook. */
+ Pid pid;
+
+ HookInstance();
+
+ ~HookInstance();
+};
+
+
+HookInstance::HookInstance()
+{
+ debug("starting build hook");
+
+ Path buildHook = getEnv("NIX_BUILD_HOOK");
+ if (string(buildHook, 0, 1) != "/") buildHook = settings.nixLibexecDir + "/nix/" + buildHook;
+ buildHook = canonPath(buildHook);
+
+ /* Create a pipe to get the output of the child. */
+ fromHook.create();
+
+ /* Create the communication pipes. */
+ toHook.create();
+
+ /* Create a pipe to get the output of the builder. */
+ builderOut.create();
+
+ /* Fork the hook. */
+ pid = startProcess([&]() {
+
+ commonChildInit(fromHook);
+
+ if (chdir("/") == -1) throw SysError("changing into /");
+
+ /* Dup the communication pipes. */
+ if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("dupping to-hook read side");
+
+ /* Use fd 4 for the builder's stdout/stderr. */
+ if (dup2(builderOut.writeSide.get(), 4) == -1)
+ throw SysError("dupping builder's stdout/stderr");
+
+ Strings args = {
+ baseNameOf(buildHook),
+ settings.thisSystem,
+ (format("%1%") % settings.maxSilentTime).str(),
+ (format("%1%") % settings.buildTimeout).str()
+ };
+
+ execv(buildHook.c_str(), stringsToCharPtrs(args).data());
+
+ throw SysError(format("executing ‘%1%’") % buildHook);
+ });
+
+ pid.setSeparatePG(true);
+ fromHook.writeSide = -1;
+ toHook.readSide = -1;
+}
+
+
+HookInstance::~HookInstance()
+{
+ try {
+ toHook.writeSide = -1;
+ if (pid != -1) pid.kill();
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+typedef map<std::string, std::string> StringRewrites;
+
+
+std::string rewriteStrings(std::string s, const StringRewrites & rewrites)
+{
+ for (auto & i : rewrites) {
+ size_t j = 0;
+ while ((j = s.find(i.first, j)) != string::npos)
+ s.replace(j, i.first.size(), i.second);
+ }
+ return s;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
+
+class SubstitutionGoal;
+
+class DerivationGoal : public Goal
+{
+private:
+ /* Whether to use an on-disk .drv file. */
+ bool useDerivation;
+
+ /* The path of the derivation. */
+ Path drvPath;
+
+ /* The specific outputs that we need to build. Empty means all of
+ them. */
+ StringSet wantedOutputs;
+
+ /* Whether additional wanted outputs have been added. */
+ bool needRestart = false;
+
+ /* Whether to retry substituting the outputs after building the
+ inputs. */
+ bool retrySubstitution = false;
+
+ /* The derivation stored at drvPath. */
+ std::unique_ptr<BasicDerivation> drv;
+
+ /* The remainder is state held during the build. */
+
+ /* Locks on the output paths. */
+ PathLocks outputLocks;
+
+ /* All input paths (that is, the union of FS closures of the
+ immediate input paths). */
+ PathSet inputPaths;
+
+ /* Referenceable paths (i.e., input and output paths). */
+ PathSet allPaths;
+
+ /* Outputs that are already valid. If we're repairing, these are
+ the outputs that are valid *and* not corrupt. */
+ PathSet validPaths;
+
+ /* Outputs that are corrupt or not valid. */
+ PathSet missingPaths;
+
+ /* User selected for running the builder. */
+ std::unique_ptr<UserLock> buildUser;
+
+ /* The process ID of the builder. */
+ Pid pid;
+
+ /* The temporary directory. */
+ Path tmpDir;
+
+ /* The path of the temporary directory in the sandbox. */
+ Path tmpDirInSandbox;
+
+ /* File descriptor for the log file. */
+ AutoCloseFD fdLogFile;
+ std::shared_ptr<BufferedSink> logFileSink, logSink;
+
+ /* Number of bytes received from the builder's stdout/stderr. */
+ unsigned long logSize;
+
+ /* The most recent log lines. */
+ std::list<std::string> logTail;
+
+ std::string currentLogLine;
+ size_t currentLogLinePos = 0; // to handle carriage return
+
+ /* Pipe for the builder's standard output/error. */
+ Pipe builderOut;
+
+ /* Pipe for synchronising updates to the builder user namespace. */
+ Pipe userNamespaceSync;
+
+ /* The build hook. */
+ std::unique_ptr<HookInstance> hook;
+
+ /* Whether we're currently doing a chroot build. */
+ bool useChroot = false;
+
+ Path chrootRootDir;
+
+ /* RAII object to delete the chroot directory. */
+ std::shared_ptr<AutoDelete> autoDelChroot;
+
+ /* Whether this is a fixed-output derivation. */
+ bool fixedOutput;
+
+ /* Whether to run the build in a private network namespace. */
+ bool privateNetwork = false;
+
+ typedef void (DerivationGoal::*GoalState)();
+ GoalState state;
+
+ /* Stuff we need to pass to initChild(). */
+ struct ChrootPath {
+ Path source;
+ bool optional;
+ ChrootPath(Path source = "", bool optional = false)
+ : source(source), optional(optional)
+ { }
+ };
+ typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
+ DirsInChroot dirsInChroot;
+
+ typedef map<string, string> Environment;
+ Environment env;
+
+#if __APPLE__
+ typedef string SandboxProfile;
+ SandboxProfile additionalSandboxProfile;
+ AutoDelete autoDelSandbox;
+#endif
+
+ /* Hash rewriting. */
+ StringRewrites inputRewrites, outputRewrites;
+ typedef map<Path, Path> RedirectedOutputs;
+ RedirectedOutputs redirectedOutputs;
+
+ BuildMode buildMode;
+
+ /* If we're repairing without a chroot, there may be outputs that
+ are valid but corrupt. So we redirect these outputs to
+ temporary paths. */
+ PathSet redirectedBadOutputs;
+
+ BuildResult result;
+
+ /* The current round, if we're building multiple times. */
+ unsigned int curRound = 1;
+
+ unsigned int nrRounds;
+
+ /* Path registration info from the previous round, if we're
+ building multiple times. Since this contains the hash, it
+ allows us to compare whether two rounds produced the same
+ result. */
+ ValidPathInfos prevInfos;
+
+ const uid_t sandboxUid = 1000;
+ const gid_t sandboxGid = 100;
+
+ const static Path homeDir;
+
+public:
+ DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs,
+ Worker & worker, BuildMode buildMode = bmNormal);
+ DerivationGoal(const Path & drvPath, const BasicDerivation & drv,
+ Worker & worker, BuildMode buildMode = bmNormal);
+ ~DerivationGoal();
+
+ void timedOut() override;
+
+ string key() override
+ {
+ /* Ensure that derivations get built in order of their name,
+ i.e. a derivation named "aardvark" always comes before
+ "baboon". And substitution goals always happen before
+ derivation goals (due to "b$"). */
+ return "b$" + storePathToName(drvPath) + "$" + drvPath;
+ }
+
+ void work() override;
+
+ Path getDrvPath()
+ {
+ return drvPath;
+ }
+
+ /* Add wanted outputs to an already existing derivation goal. */
+ void addWantedOutputs(const StringSet & outputs);
+
+ BuildResult getResult() { return result; }
+
+private:
+ /* The states. */
+ void getDerivation();
+ void loadDerivation();
+ void haveDerivation();
+ void outputsSubstituted();
+ void closureRepaired();
+ void inputsRealised();
+ void tryToBuild();
+ void buildDone();
+
+ /* Is the build hook willing to perform the build? */
+ HookReply tryBuildHook();
+
+ /* Start building a derivation. */
+ void startBuilder();
+
+ /* Fill in the environment for the builder. */
+ void initEnv();
+
+ /* Write a JSON file containing the derivation attributes. */
+ void writeStructuredAttrs();
+
+ /* Make a file owned by the builder. */
+ void chownToBuilder(const Path & path);
+
+ /* Handle the exportReferencesGraph attribute. */
+ void doExportReferencesGraph();
+
+ /* Run the builder's process. */
+ void runChild();
+
+ friend int childEntry(void *);
+
+ /* Check that the derivation outputs all exist and register them
+ as valid. */
+ void registerOutputs();
+
+ /* Open a log file and a pipe to it. */
+ Path openLogFile();
+
+ /* Close the log file. */
+ void closeLogFile();
+
+ /* Delete the temporary directory, if we have one. */
+ void deleteTmpDir(bool force);
+
+ /* Callback used by the worker to write to the log. */
+ void handleChildOutput(int fd, const string & data) override;
+ void handleEOF(int fd) override;
+ void flushLine();
+
+ /* Return the set of (in)valid paths. */
+ PathSet checkPathValidity(bool returnValid, bool checkHash);
+
+ /* Abort the goal if `path' failed to build. */
+ bool pathFailed(const Path & path);
+
+ /* Forcibly kill the child process, if any. */
+ void killChild();
+
+ Path addHashRewrite(const Path & path);
+
+ void repairClosure();
+
+ void done(BuildResult::Status status, const string & msg = "");
+};
+
+
+const Path DerivationGoal::homeDir = "/homeless-shelter";
+
+
+DerivationGoal::DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs,
+ Worker & worker, BuildMode buildMode)
+ : Goal(worker)
+ , useDerivation(true)
+ , drvPath(drvPath)
+ , wantedOutputs(wantedOutputs)
+ , buildMode(buildMode)
+{
+ state = &DerivationGoal::getDerivation;
+ name = (format("building of ‘%1%’") % drvPath).str();
+ trace("created");
+}
+
+
+DerivationGoal::DerivationGoal(const Path & drvPath, const BasicDerivation & drv,
+ Worker & worker, BuildMode buildMode)
+ : Goal(worker)
+ , useDerivation(false)
+ , drvPath(drvPath)
+ , buildMode(buildMode)
+{
+ this->drv = std::unique_ptr<BasicDerivation>(new BasicDerivation(drv));
+ state = &DerivationGoal::haveDerivation;
+ name = (format("building of %1%") % showPaths(drv.outputPaths())).str();
+ trace("created");
+
+ /* Prevent the .chroot directory from being
+ garbage-collected. (See isActiveTempFile() in gc.cc.) */
+ worker.store.addTempRoot(drvPath);
+}
+
+
+DerivationGoal::~DerivationGoal()
+{
+ /* Careful: we should never ever throw an exception from a
+ destructor. */
+ try { killChild(); } catch (...) { ignoreException(); }
+ try { deleteTmpDir(false); } catch (...) { ignoreException(); }
+ try { closeLogFile(); } catch (...) { ignoreException(); }
+}
+
+
+void DerivationGoal::killChild()
+{
+ if (pid != -1) {
+ worker.childTerminated(this);
+
+ if (buildUser) {
+ /* If we're using a build user, then there is a tricky
+ race condition: if we kill the build user before the
+ child has done its setuid() to the build user uid, then
+ it won't be killed, and we'll potentially lock up in
+ pid.wait(). So also send a conventional kill to the
+ child. */
+ ::kill(-pid, SIGKILL); /* ignore the result */
+ buildUser->kill();
+ pid.wait();
+ } else
+ pid.kill();
+
+ assert(pid == -1);
+ }
+
+ hook.reset();
+}
+
+
+void DerivationGoal::timedOut()
+{
+ killChild();
+ done(BuildResult::TimedOut);
+}
+
+
+void DerivationGoal::work()
+{
+ (this->*state)();
+}
+
+
+void DerivationGoal::addWantedOutputs(const StringSet & outputs)
+{
+ /* If we already want all outputs, there is nothing to do. */
+ if (wantedOutputs.empty()) return;
+
+ if (outputs.empty()) {
+ wantedOutputs.clear();
+ needRestart = true;
+ } else
+ for (auto & i : outputs)
+ if (wantedOutputs.find(i) == wantedOutputs.end()) {
+ wantedOutputs.insert(i);
+ needRestart = true;
+ }
+}
+
+
+void DerivationGoal::getDerivation()
+{
+ trace("init");
+
+ /* The first thing to do is to make sure that the derivation
+ exists. If it doesn't, it may be created through a
+ substitute. */
+ if (buildMode == bmNormal && worker.store.isValidPath(drvPath)) {
+ loadDerivation();
+ return;
+ }
+
+ addWaitee(worker.makeSubstitutionGoal(drvPath));
+
+ state = &DerivationGoal::loadDerivation;
+}
+
+
+void DerivationGoal::loadDerivation()
+{
+ trace("loading derivation");
+
+ if (nrFailed != 0) {
+ printError(format("cannot build missing derivation ‘%1%’") % drvPath);
+ done(BuildResult::MiscFailure);
+ return;
+ }
+
+ /* `drvPath' should already be a root, but let's be on the safe
+ side: if the user forgot to make it a root, we wouldn't want
+ things being garbage collected while we're busy. */
+ worker.store.addTempRoot(drvPath);
+
+ assert(worker.store.isValidPath(drvPath));
+
+ /* Get the derivation. */
+ drv = std::unique_ptr<BasicDerivation>(new Derivation(worker.store.derivationFromPath(drvPath)));
+
+ haveDerivation();
+}
+
+
+void DerivationGoal::haveDerivation()
+{
+ trace("have derivation");
+
+ for (auto & i : drv->outputs)
+ worker.store.addTempRoot(i.second.path);
+
+ /* Check what outputs paths are not already valid. */
+ PathSet invalidOutputs = checkPathValidity(false, buildMode == bmRepair);
+
+ /* If they are all valid, then we're done. */
+ if (invalidOutputs.size() == 0 && buildMode == bmNormal) {
+ done(BuildResult::AlreadyValid);
+ return;
+ }
+
+ /* Reject doing a hash build of anything other than a fixed-output
+ derivation. */
+ if (buildMode == bmHash) {
+ if (drv->outputs.size() != 1 ||
+ drv->outputs.find("out") == drv->outputs.end() ||
+ drv->outputs["out"].hashAlgo == "")
+ throw Error(format("cannot do a hash build of non-fixed-output derivation ‘%1%’") % drvPath);
+ }
+
+ /* We are first going to try to create the invalid output paths
+ through substitutes. If that doesn't work, we'll build
+ them. */
+ if (settings.useSubstitutes && drv->substitutesAllowed())
+ for (auto & i : invalidOutputs)
+ addWaitee(worker.makeSubstitutionGoal(i, buildMode == bmRepair));
+
+ if (waitees.empty()) /* to prevent hang (no wake-up event) */
+ outputsSubstituted();
+ else
+ state = &DerivationGoal::outputsSubstituted;
+}
+
+
+void DerivationGoal::outputsSubstituted()
+{
+ trace("all outputs substituted (maybe)");
+
+ if (nrFailed > 0 && nrFailed > nrNoSubstituters + nrIncompleteClosure && !settings.tryFallback) {
+ done(BuildResult::TransientFailure, (format("some substitutes for the outputs of derivation ‘%1%’ failed (usually happens due to networking issues); try ‘--fallback’ to build derivation from source ") % drvPath).str());
+ return;
+ }
+
+ /* If the substitutes form an incomplete closure, then we should
+ build the dependencies of this derivation, but after that, we
+ can still use the substitutes for this derivation itself. */
+ if (nrIncompleteClosure > 0 && !retrySubstitution) retrySubstitution = true;
+
+ nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
+
+ if (needRestart) {
+ needRestart = false;
+ haveDerivation();
+ return;
+ }
+
+ unsigned int nrInvalid = checkPathValidity(false, buildMode == bmRepair).size();
+ if (buildMode == bmNormal && nrInvalid == 0) {
+ done(BuildResult::Substituted);
+ return;
+ }
+ if (buildMode == bmRepair && nrInvalid == 0) {
+ repairClosure();
+ return;
+ }
+ if (buildMode == bmCheck && nrInvalid > 0)
+ throw Error(format("some outputs of ‘%1%’ are not valid, so checking is not possible") % drvPath);
+
+ /* Otherwise, at least one of the output paths could not be
+ produced using a substitute. So we have to build instead. */
+
+ /* Make sure checkPathValidity() from now on checks all
+ outputs. */
+ wantedOutputs = PathSet();
+
+ /* The inputs must be built before we can build this goal. */
+ if (useDerivation)
+ for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs)
+ addWaitee(worker.makeDerivationGoal(i.first, i.second, buildMode == bmRepair ? bmRepair : bmNormal));
+
+ for (auto & i : drv->inputSrcs) {
+ if (worker.store.isValidPath(i)) continue;
+ if (!settings.useSubstitutes)
+ throw Error(format("dependency of ‘%1%’ of ‘%2%’ does not exist, and substitution is disabled")
+ % i % drvPath);
+ addWaitee(worker.makeSubstitutionGoal(i));
+ }
+
+ if (waitees.empty()) /* to prevent hang (no wake-up event) */
+ inputsRealised();
+ else
+ state = &DerivationGoal::inputsRealised;
+}
+
+
+void DerivationGoal::repairClosure()
+{
+ /* If we're repairing, we now know that our own outputs are valid.
+ Now check whether the other paths in the outputs closure are
+ good. If not, then start derivation goals for the derivations
+ that produced those outputs. */
+
+ /* Get the output closure. */
+ PathSet outputClosure;
+ for (auto & i : drv->outputs) {
+ if (!wantOutput(i.first, wantedOutputs)) continue;
+ worker.store.computeFSClosure(i.second.path, outputClosure);
+ }
+
+ /* Filter out our own outputs (which we have already checked). */
+ for (auto & i : drv->outputs)
+ outputClosure.erase(i.second.path);
+
+ /* Get all dependencies of this derivation so that we know which
+ derivation is responsible for which path in the output
+ closure. */
+ PathSet inputClosure;
+ if (useDerivation) worker.store.computeFSClosure(drvPath, inputClosure);
+ std::map<Path, Path> outputsToDrv;
+ for (auto & i : inputClosure)
+ if (isDerivation(i)) {
+ Derivation drv = worker.store.derivationFromPath(i);
+ for (auto & j : drv.outputs)
+ outputsToDrv[j.second.path] = i;
+ }
+
+ /* Check each path (slow!). */
+ PathSet broken;
+ for (auto & i : outputClosure) {
+ if (worker.pathContentsGood(i)) continue;
+ printError(format("found corrupted or missing path ‘%1%’ in the output closure of ‘%2%’") % i % drvPath);
+ Path drvPath2 = outputsToDrv[i];
+ if (drvPath2 == "")
+ addWaitee(worker.makeSubstitutionGoal(i, true));
+ else
+ addWaitee(worker.makeDerivationGoal(drvPath2, PathSet(), bmRepair));
+ }
+
+ if (waitees.empty()) {
+ done(BuildResult::AlreadyValid);
+ return;
+ }
+
+ state = &DerivationGoal::closureRepaired;
+}
+
+
+void DerivationGoal::closureRepaired()
+{
+ trace("closure repaired");
+ if (nrFailed > 0)
+ throw Error(format("some paths in the output closure of derivation ‘%1%’ could not be repaired") % drvPath);
+ done(BuildResult::AlreadyValid);
+}
+
+
+void DerivationGoal::inputsRealised()
+{
+ trace("all inputs realised");
+
+ if (nrFailed != 0) {
+ if (!useDerivation)
+ throw Error(format("some dependencies of ‘%1%’ are missing") % drvPath);
+ printError(
+ format("cannot build derivation ‘%1%’: %2% dependencies couldn't be built")
+ % drvPath % nrFailed);
+ done(BuildResult::DependencyFailed);
+ return;
+ }
+
+ if (retrySubstitution) {
+ haveDerivation();
+ return;
+ }
+
+ /* Gather information necessary for computing the closure and/or
+ running the build hook. */
+
+ /* The outputs are referenceable paths. */
+ for (auto & i : drv->outputs) {
+ debug(format("building path ‘%1%’") % i.second.path);
+ allPaths.insert(i.second.path);
+ }
+
+ /* Determine the full set of input paths. */
+
+ /* First, the input derivations. */
+ if (useDerivation)
+ for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs) {
+ /* Add the relevant output closures of the input derivation
+ `i' as input paths. Only add the closures of output paths
+ that are specified as inputs. */
+ assert(worker.store.isValidPath(i.first));
+ Derivation inDrv = worker.store.derivationFromPath(i.first);
+ for (auto & j : i.second)
+ if (inDrv.outputs.find(j) != inDrv.outputs.end())
+ worker.store.computeFSClosure(inDrv.outputs[j].path, inputPaths);
+ else
+ throw Error(
+ format("derivation ‘%1%’ requires non-existent output ‘%2%’ from input derivation ‘%3%’")
+ % drvPath % j % i.first);
+ }
+
+ /* Second, the input sources. */
+ worker.store.computeFSClosure(drv->inputSrcs, inputPaths);
+
+ debug(format("added input paths %1%") % showPaths(inputPaths));
+
+ allPaths.insert(inputPaths.begin(), inputPaths.end());
+
+ /* Is this a fixed-output derivation? */
+ fixedOutput = true;
+ for (auto & i : drv->outputs)
+ if (i.second.hash == "") fixedOutput = false;
+
+ /* Don't repeat fixed-output derivations since they're already
+ verified by their output hash.*/
+ nrRounds = fixedOutput ? 1 : settings.buildRepeat + 1;
+
+ /* Okay, try to build. Note that here we don't wait for a build
+ slot to become available, since we don't need one if there is a
+ build hook. */
+ state = &DerivationGoal::tryToBuild;
+ worker.wakeUp(shared_from_this());
+
+ result = BuildResult();
+}
+
+
+void DerivationGoal::tryToBuild()
+{
+ trace("trying to build");
+
+ /* Check for the possibility that some other goal in this process
+ has locked the output since we checked in haveDerivation().
+ (It can't happen between here and the lockPaths() call below
+ because we're not allowing multi-threading.) If so, put this
+ goal to sleep until another goal finishes, then try again. */
+ for (auto & i : drv->outputs)
+ if (pathIsLockedByMe(worker.store.toRealPath(i.second.path))) {
+ debug(format("putting derivation ‘%1%’ to sleep because ‘%2%’ is locked by another goal")
+ % drvPath % i.second.path);
+ worker.waitForAnyGoal(shared_from_this());
+ return;
+ }
+
+ /* Obtain locks on all output paths. The locks are automatically
+ released when we exit this function or Nix crashes. If we
+ can't acquire the lock, then continue; hopefully some other
+ goal can start a build, and if not, the main loop will sleep a
+ few seconds and then retry this goal. */
+ PathSet lockFiles;
+ for (auto & outPath : drv->outputPaths())
+ lockFiles.insert(worker.store.toRealPath(outPath));
+
+ if (!outputLocks.lockPaths(lockFiles, "", false)) {
+ worker.waitForAWhile(shared_from_this());
+ return;
+ }
+
+ /* Now check again whether the outputs are valid. This is because
+ another process may have started building in parallel. After
+ it has finished and released the locks, we can (and should)
+ reuse its results. (Strictly speaking the first check can be
+ omitted, but that would be less efficient.) Note that since we
+ now hold the locks on the output paths, no other process can
+ build this derivation, so no further checks are necessary. */
+ validPaths = checkPathValidity(true, buildMode == bmRepair);
+ if (buildMode != bmCheck && validPaths.size() == drv->outputs.size()) {
+ debug(format("skipping build of derivation ‘%1%’, someone beat us to it") % drvPath);
+ outputLocks.setDeletion(true);
+ done(BuildResult::AlreadyValid);
+ return;
+ }
+
+ missingPaths = drv->outputPaths();
+ if (buildMode != bmCheck)
+ for (auto & i : validPaths) missingPaths.erase(i);
+
+ /* If any of the outputs already exist but are not valid, delete
+ them. */
+ for (auto & i : drv->outputs) {
+ Path path = i.second.path;
+ if (worker.store.isValidPath(path)) continue;
+ debug(format("removing invalid path ‘%1%’") % path);
+ deletePath(worker.store.toRealPath(path));
+ }
+
+ /* Don't do a remote build if the derivation has the attribute
+ `preferLocalBuild' set. Also, check and repair modes are only
+ supported for local builds. */
+ bool buildLocally = buildMode != bmNormal || drv->willBuildLocally();
+
+ /* Is the build hook willing to accept this job? */
+ if (!buildLocally) {
+ switch (tryBuildHook()) {
+ case rpAccept:
+ /* Yes, it has started doing so. Wait until we get
+ EOF from the hook. */
+ result.startTime = time(0); // inexact
+ state = &DerivationGoal::buildDone;
+ return;
+ case rpPostpone:
+ /* Not now; wait until at least one child finishes or
+ the wake-up timeout expires. */
+ worker.waitForAWhile(shared_from_this());
+ outputLocks.unlock();
+ return;
+ case rpDecline:
+ /* We should do it ourselves. */
+ break;
+ }
+ }
+
+ /* Make sure that we are allowed to start a build. If this
+ derivation prefers to be done locally, do it even if
+ maxBuildJobs is 0. */
+ unsigned int curBuilds = worker.getNrLocalBuilds();
+ if (curBuilds >= settings.maxBuildJobs && !(buildLocally && curBuilds == 0)) {
+ worker.waitForBuildSlot(shared_from_this());
+ outputLocks.unlock();
+ return;
+ }
+
+ try {
+
+ /* Okay, we have to build. */
+ startBuilder();
+
+ } catch (BuildError & e) {
+ printError(e.msg());
+ outputLocks.unlock();
+ buildUser.reset();
+ worker.permanentFailure = true;
+ done(BuildResult::InputRejected, e.msg());
+ return;
+ }
+
+ /* This state will be reached when we get EOF on the child's
+ log pipe. */
+ state = &DerivationGoal::buildDone;
+}
+
+
+void replaceValidPath(const Path & storePath, const Path tmpPath)
+{
+ /* We can't atomically replace storePath (the original) with
+ tmpPath (the replacement), so we have to move it out of the
+ way first. We'd better not be interrupted here, because if
+ we're repairing (say) Glibc, we end up with a broken system. */
+ Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % rand()).str();
+ if (pathExists(storePath))
+ rename(storePath.c_str(), oldPath.c_str());
+ if (rename(tmpPath.c_str(), storePath.c_str()) == -1)
+ throw SysError(format("moving ‘%1%’ to ‘%2%’") % tmpPath % storePath);
+ deletePath(oldPath);
+}
+
+
+MakeError(NotDeterministic, BuildError)
+
+
+void DerivationGoal::buildDone()
+{
+ trace("build done");
+
+ /* Release the build user at the end of this function. We don't do
+ it right away because we don't want another build grabbing this
+ uid and then messing around with our output. */
+ Finally releaseBuildUser([&]() { buildUser.reset(); });
+
+ /* Since we got an EOF on the logger pipe, the builder is presumed
+ to have terminated. In fact, the builder could also have
+ simply have closed its end of the pipe, so just to be sure,
+ kill it. */
+ int status = hook ? hook->pid.kill() : pid.kill();
+
+ debug(format("builder process for ‘%1%’ finished") % drvPath);
+
+ result.timesBuilt++;
+ result.stopTime = time(0);
+
+ /* So the child is gone now. */
+ worker.childTerminated(this);
+
+ /* Close the read side of the logger pipe. */
+ if (hook) {
+ hook->builderOut.readSide = -1;
+ hook->fromHook.readSide = -1;
+ }
+ else builderOut.readSide = -1;
+
+ /* Close the log file. */
+ closeLogFile();
+
+ /* When running under a build user, make sure that all processes
+ running under that uid are gone. This is to prevent a
+ malicious user from leaving behind a process that keeps files
+ open and modifies them after they have been chown'ed to
+ root. */
+ if (buildUser) buildUser->kill();
+
+ bool diskFull = false;
+
+ try {
+
+ /* Check the exit status. */
+ if (!statusOk(status)) {
+
+ /* Heuristically check whether the build failure may have
+ been caused by a disk full condition. We have no way
+ of knowing whether the build actually got an ENOSPC.
+ So instead, check if the disk is (nearly) full now. If
+ so, we don't mark this build as a permanent failure. */
+#if HAVE_STATVFS
+ unsigned long long required = 8ULL * 1024 * 1024; // FIXME: make configurable
+ struct statvfs st;
+ if (statvfs(worker.store.realStoreDir.c_str(), &st) == 0 &&
+ (unsigned long long) st.f_bavail * st.f_bsize < required)
+ diskFull = true;
+ if (statvfs(tmpDir.c_str(), &st) == 0 &&
+ (unsigned long long) st.f_bavail * st.f_bsize < required)
+ diskFull = true;
+#endif
+
+ deleteTmpDir(false);
+
+ /* Move paths out of the chroot for easier debugging of
+ build failures. */
+ if (useChroot && buildMode == bmNormal)
+ for (auto & i : missingPaths)
+ if (pathExists(chrootRootDir + i))
+ rename((chrootRootDir + i).c_str(), i.c_str());
+
+ std::string msg = (format("builder for ‘%1%’ %2%")
+ % drvPath % statusToString(status)).str();
+
+ if (!settings.verboseBuild && !logTail.empty()) {
+ msg += (format("; last %d log lines:") % logTail.size()).str();
+ for (auto & line : logTail)
+ msg += "\n " + line;
+ }
+
+ if (diskFull)
+ msg += "\nnote: build failure may have been caused by lack of free disk space";
+
+ throw BuildError(msg);
+ }
+
+ /* Compute the FS closure of the outputs and register them as
+ being valid. */
+ registerOutputs();
+
+ if (buildMode == bmCheck) {
+ done(BuildResult::Built);
+ return;
+ }
+
+ /* Delete unused redirected outputs (when doing hash rewriting). */
+ for (auto & i : redirectedOutputs)
+ deletePath(i.second);
+
+ /* Delete the chroot (if we were using one). */
+ autoDelChroot.reset(); /* this runs the destructor */
+
+ deleteTmpDir(true);
+
+ /* Repeat the build if necessary. */
+ if (curRound++ < nrRounds) {
+ outputLocks.unlock();
+ state = &DerivationGoal::tryToBuild;
+ worker.wakeUp(shared_from_this());
+ return;
+ }
+
+ /* It is now safe to delete the lock files, since all future
+ lockers will see that the output paths are valid; they will
+ not create new lock files with the same names as the old
+ (unlinked) lock files. */
+ outputLocks.setDeletion(true);
+ outputLocks.unlock();
+
+ } catch (BuildError & e) {
+ if (!hook)
+ printError(e.msg());
+ outputLocks.unlock();
+
+ BuildResult::Status st = BuildResult::MiscFailure;
+
+ if (hook && WIFEXITED(status) && WEXITSTATUS(status) == 101)
+ st = BuildResult::TimedOut;
+
+ else if (hook && (!WIFEXITED(status) || WEXITSTATUS(status) != 100)) {
+ }
+
+ else {
+ st =
+ dynamic_cast<NotDeterministic*>(&e) ? BuildResult::NotDeterministic :
+ statusOk(status) ? BuildResult::OutputRejected :
+ fixedOutput || diskFull ? BuildResult::TransientFailure :
+ BuildResult::PermanentFailure;
+ }
+
+ done(st, e.msg());
+ return;
+ }
+
+ done(BuildResult::Built);
+}
+
+
+HookReply DerivationGoal::tryBuildHook()
+{
+ if (!settings.useBuildHook || getEnv("NIX_BUILD_HOOK") == "" || !useDerivation) return rpDecline;
+
+ if (!worker.hook)
+ worker.hook = std::make_unique<HookInstance>();
+
+ try {
+
+ /* Tell the hook about system features (beyond the system type)
+ required from the build machine. (The hook could parse the
+ drv file itself, but this is easier.) */
+ Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures"));
+ for (auto & i : features) checkStoreName(i); /* !!! abuse */
+
+ /* Send the request to the hook. */
+ writeLine(worker.hook->toHook.writeSide.get(), (format("%1% %2% %3% %4%")
+ % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0")
+ % drv->platform % drvPath % concatStringsSep(",", features)).str());
+
+ /* Read the first line of input, which should be a word indicating
+ whether the hook wishes to perform the build. */
+ string reply;
+ while (true) {
+ string s = readLine(worker.hook->fromHook.readSide.get());
+ if (string(s, 0, 2) == "# ") {
+ reply = string(s, 2);
+ break;
+ }
+ s += "\n";
+ writeToStderr(s);
+ }
+
+ debug(format("hook reply is ‘%1%’") % reply);
+
+ if (reply == "decline" || reply == "postpone")
+ return reply == "decline" ? rpDecline : rpPostpone;
+ else if (reply != "accept")
+ throw Error(format("bad hook reply ‘%1%’") % reply);
+
+ } catch (SysError & e) {
+ if (e.errNo == EPIPE) {
+ printError("build hook died unexpectedly: %s",
+ chomp(drainFD(worker.hook->fromHook.readSide.get())));
+ worker.hook = 0;
+ return rpDecline;
+ } else
+ throw;
+ }
+
+ printMsg(lvlTalkative, format("using hook to build path(s) %1%") % showPaths(missingPaths));
+
+ hook = std::move(worker.hook);
+
+ /* Tell the hook all the inputs that have to be copied to the
+ remote system. This unfortunately has to contain the entire
+ derivation closure to ensure that the validity invariant holds
+ on the remote system. (I.e., it's unfortunate that we have to
+ list it since the remote system *probably* already has it.) */
+ PathSet allInputs;
+ allInputs.insert(inputPaths.begin(), inputPaths.end());
+ worker.store.computeFSClosure(drvPath, allInputs);
+
+ string s;
+ for (auto & i : allInputs) { s += i; s += ' '; }
+ writeLine(hook->toHook.writeSide.get(), s);
+
+ /* Tell the hooks the missing outputs that have to be copied back
+ from the remote system. */
+ s = "";
+ for (auto & i : missingPaths) { s += i; s += ' '; }
+ writeLine(hook->toHook.writeSide.get(), s);
+
+ hook->toHook.writeSide = -1;
+
+ /* Create the log file and pipe. */
+ Path logFile = openLogFile();
+
+ set<int> fds;
+ fds.insert(hook->fromHook.readSide.get());
+ fds.insert(hook->builderOut.readSide.get());
+ worker.childStarted(shared_from_this(), fds, false, false);
+
+ return rpAccept;
+}
+
+
+void chmod_(const Path & path, mode_t mode)
+{
+ if (chmod(path.c_str(), mode) == -1)
+ throw SysError(format("setting permissions on ‘%1%’") % path);
+}
+
+
+int childEntry(void * arg)
+{
+ ((DerivationGoal *) arg)->runChild();
+ return 1;
+}
+
+
+void DerivationGoal::startBuilder()
+{
+ auto f = format(
+ buildMode == bmRepair ? "repairing path(s) %1%" :
+ buildMode == bmCheck ? "checking path(s) %1%" :
+ nrRounds > 1 ? "building path(s) %1% (round %2%/%3%)" :
+ "building path(s) %1%");
+ f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit);
+ printInfo(f % showPaths(missingPaths) % curRound % nrRounds);
+
+ /* Right platform? */
+ if (!drv->canBuildLocally()) {
+ throw Error(
+ format("a ‘%1%’ is required to build ‘%3%’, but I am a ‘%2%’")
+ % drv->platform % settings.thisSystem % drvPath);
+ }
+
+#if __APPLE__
+ additionalSandboxProfile = get(drv->env, "__sandboxProfile");
+#endif
+
+ /* Are we doing a chroot build? */
+ {
+ if (settings.sandboxMode == smEnabled) {
+ if (get(drv->env, "__noChroot") == "1")
+ throw Error(format("derivation ‘%1%’ has ‘__noChroot’ set, "
+ "but that's not allowed when ‘build-use-sandbox’ is ‘true’") % drvPath);
+#if __APPLE__
+ if (additionalSandboxProfile != "")
+ throw Error(format("derivation ‘%1%’ specifies a sandbox profile, "
+ "but this is only allowed when ‘build-use-sandbox’ is ‘relaxed’") % drvPath);
+#endif
+ useChroot = true;
+ }
+ else if (settings.sandboxMode == smDisabled)
+ useChroot = false;
+ else if (settings.sandboxMode == smRelaxed)
+ useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1";
+ }
+
+ if (worker.store.storeDir != worker.store.realStoreDir)
+ useChroot = true;
+
+ /* If `build-users-group' is not empty, then we have to build as
+ one of the members of that group. */
+ if (settings.buildUsersGroup != "" && getuid() == 0) {
+ buildUser = std::make_unique<UserLock>();
+
+ /* Make sure that no other processes are executing under this
+ uid. */
+ buildUser->kill();
+ }
+
+ /* Create a temporary directory where the build will take
+ place. */
+ auto drvName = storePathToName(drvPath);
+ tmpDir = createTempDir("", "nix-build-" + drvName, false, false, 0700);
+
+ /* In a sandbox, for determinism, always use the same temporary
+ directory. */
+ tmpDirInSandbox = useChroot ? canonPath("/tmp", true) + "/nix-build-" + drvName + "-0" : tmpDir;
+ chownToBuilder(tmpDir);
+
+ /* Substitute output placeholders with the actual output paths. */
+ for (auto & output : drv->outputs)
+ inputRewrites[hashPlaceholder(output.first)] = output.second.path;
+
+ /* Construct the environment passed to the builder. */
+ initEnv();
+
+ writeStructuredAttrs();
+
+ /* Handle exportReferencesGraph(), if set. */
+ doExportReferencesGraph();
+
+ if (useChroot) {
+
+ /* Allow a user-configurable set of directories from the
+ host file system. */
+ PathSet dirs = settings.sandboxPaths;
+ PathSet dirs2 = settings.extraSandboxPaths;
+ dirs.insert(dirs2.begin(), dirs2.end());
+
+ dirsInChroot.clear();
+
+ for (auto i : dirs) {
+ if (i.empty()) continue;
+ bool optional = false;
+ if (i[i.size() - 1] == '?') {
+ optional = true;
+ i.pop_back();
+ }
+ size_t p = i.find('=');
+ if (p == string::npos)
+ dirsInChroot[i] = {i, optional};
+ else
+ dirsInChroot[string(i, 0, p)] = {string(i, p + 1), optional};
+ }
+ dirsInChroot[tmpDirInSandbox] = tmpDir;
+
+ /* Add the closure of store paths to the chroot. */
+ PathSet closure;
+ for (auto & i : dirsInChroot)
+ try {
+ if (worker.store.isInStore(i.second.source))
+ worker.store.computeFSClosure(worker.store.toStorePath(i.second.source), closure);
+ } catch (Error & e) {
+ throw Error(format("while processing ‘build-sandbox-paths’: %s") % e.what());
+ }
+ for (auto & i : closure)
+ dirsInChroot[i] = i;
+
+ PathSet allowedPaths = settings.allowedImpureHostPrefixes;
+
+ /* This works like the above, except on a per-derivation level */
+ Strings impurePaths = tokenizeString<Strings>(get(drv->env, "__impureHostDeps"));
+
+ for (auto & i : impurePaths) {
+ bool found = false;
+ /* Note: we're not resolving symlinks here to prevent
+ giving a non-root user info about inaccessible
+ files. */
+ Path canonI = canonPath(i);
+ /* If only we had a trie to do this more efficiently :) luckily, these are generally going to be pretty small */
+ for (auto & a : allowedPaths) {
+ Path canonA = canonPath(a);
+ if (canonI == canonA || isInDir(canonI, canonA)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ throw Error(format("derivation ‘%1%’ requested impure path ‘%2%’, but it was not in allowed-impure-host-deps") % drvPath % i);
+
+ dirsInChroot[i] = i;
+ }
+
+#if __linux__
+ /* Create a temporary directory in which we set up the chroot
+ environment using bind-mounts. We put it in the Nix store
+ to ensure that we can create hard-links to non-directory
+ inputs in the fake Nix store in the chroot (see below). */
+ chrootRootDir = worker.store.toRealPath(drvPath) + ".chroot";
+ deletePath(chrootRootDir);
+
+ /* Clean up the chroot directory automatically. */
+ autoDelChroot = std::make_shared<AutoDelete>(chrootRootDir);
+
+ printMsg(lvlChatty, format("setting up chroot environment in ‘%1%’") % chrootRootDir);
+
+ if (mkdir(chrootRootDir.c_str(), 0750) == -1)
+ throw SysError(format("cannot create ‘%1%’") % chrootRootDir);
+
+ if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1)
+ throw SysError(format("cannot change ownership of ‘%1%’") % chrootRootDir);
+
+ /* Create a writable /tmp in the chroot. Many builders need
+ this. (Of course they should really respect $TMPDIR
+ instead.) */
+ Path chrootTmpDir = chrootRootDir + "/tmp";
+ createDirs(chrootTmpDir);
+ chmod_(chrootTmpDir, 01777);
+
+ /* Create a /etc/passwd with entries for the build user and the
+ nobody account. The latter is kind of a hack to support
+ Samba-in-QEMU. */
+ createDirs(chrootRootDir + "/etc");
+
+ writeFile(chrootRootDir + "/etc/passwd",
+ (format(
+ "root:x:0:0:Nix build user:/:/noshell\n"
+ "nixbld:x:%1%:%2%:Nix build user:/:/noshell\n"
+ "nobody:x:65534:65534:Nobody:/:/noshell\n") % sandboxUid % sandboxGid).str());
+
+ /* Declare the build user's group so that programs get a consistent
+ view of the system (e.g., "id -gn"). */
+ writeFile(chrootRootDir + "/etc/group",
+ (format(
+ "root:x:0:\n"
+ "nixbld:!:%1%:\n"
+ "nogroup:x:65534:\n") % sandboxGid).str());
+
+ /* Create /etc/hosts with localhost entry. */
+ if (!fixedOutput)
+ writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n");
+
+ /* Make the closure of the inputs available in the chroot,
+ rather than the whole Nix store. This prevents any access
+ to undeclared dependencies. Directories are bind-mounted,
+ while other inputs are hard-linked (since only directories
+ can be bind-mounted). !!! As an extra security
+ precaution, make the fake Nix store only writable by the
+ build user. */
+ Path chrootStoreDir = chrootRootDir + worker.store.storeDir;
+ createDirs(chrootStoreDir);
+ chmod_(chrootStoreDir, 01775);
+
+ if (buildUser && chown(chrootStoreDir.c_str(), 0, buildUser->getGID()) == -1)
+ throw SysError(format("cannot change ownership of ‘%1%’") % chrootStoreDir);
+
+ for (auto & i : inputPaths) {
+ Path r = worker.store.toRealPath(i);
+ struct stat st;
+ if (lstat(r.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % i);
+ if (S_ISDIR(st.st_mode))
+ dirsInChroot[i] = r;
+ else {
+ Path p = chrootRootDir + i;
+ if (link(r.c_str(), p.c_str()) == -1) {
+ /* Hard-linking fails if we exceed the maximum
+ link count on a file (e.g. 32000 of ext3),
+ which is quite possible after a `nix-store
+ --optimise'. */
+ if (errno != EMLINK)
+ throw SysError(format("linking ‘%1%’ to ‘%2%’") % p % i);
+ StringSink sink;
+ dumpPath(r, sink);
+ StringSource source(*sink.s);
+ restorePath(p, source);
+ }
+ }
+ }
+
+ /* If we're repairing, checking or rebuilding part of a
+ multiple-outputs derivation, it's possible that we're
+ rebuilding a path that is in settings.dirsInChroot
+ (typically the dependencies of /bin/sh). Throw them
+ out. */
+ for (auto & i : drv->outputs)
+ dirsInChroot.erase(i.second.path);
+
+#elif __APPLE__
+ /* We don't really have any parent prep work to do (yet?)
+ All work happens in the child, instead. */
+#else
+ throw Error("sandboxing builds is not supported on this platform");
+#endif
+ }
+
+ else {
+
+ if (pathExists(homeDir))
+ throw Error(format("directory ‘%1%’ exists; please remove it") % homeDir);
+
+ /* We're not doing a chroot build, but we have some valid
+ output paths. Since we can't just overwrite or delete
+ them, we have to do hash rewriting: i.e. in the
+ environment/arguments passed to the build, we replace the
+ hashes of the valid outputs with unique dummy strings;
+ after the build, we discard the redirected outputs
+ corresponding to the valid outputs, and rewrite the
+ contents of the new outputs to replace the dummy strings
+ with the actual hashes. */
+ if (validPaths.size() > 0)
+ for (auto & i : validPaths)
+ addHashRewrite(i);
+
+ /* If we're repairing, then we don't want to delete the
+ corrupt outputs in advance. So rewrite them as well. */
+ if (buildMode == bmRepair)
+ for (auto & i : missingPaths)
+ if (worker.store.isValidPath(i) && pathExists(i)) {
+ addHashRewrite(i);
+ redirectedBadOutputs.insert(i);
+ }
+ }
+
+ if (settings.preBuildHook != "") {
+ printMsg(lvlChatty, format("executing pre-build hook ‘%1%’")
+ % settings.preBuildHook);
+ auto args = useChroot ? Strings({drvPath, chrootRootDir}) :
+ Strings({ drvPath });
+ enum BuildHookState {
+ stBegin,
+ stExtraChrootDirs
+ };
+ auto state = stBegin;
+ auto lines = runProgram(settings.preBuildHook, false, args);
+ auto lastPos = std::string::size_type{0};
+ for (auto nlPos = lines.find('\n'); nlPos != string::npos;
+ nlPos = lines.find('\n', lastPos)) {
+ auto line = std::string{lines, lastPos, nlPos - lastPos};
+ lastPos = nlPos + 1;
+ if (state == stBegin) {
+ if (line == "extra-sandbox-paths" || line == "extra-chroot-dirs") {
+ state = stExtraChrootDirs;
+ } else {
+ throw Error(format("unknown pre-build hook command ‘%1%’")
+ % line);
+ }
+ } else if (state == stExtraChrootDirs) {
+ if (line == "") {
+ state = stBegin;
+ } else {
+ auto p = line.find('=');
+ if (p == string::npos)
+ dirsInChroot[line] = line;
+ else
+ dirsInChroot[string(line, 0, p)] = string(line, p + 1);
+ }
+ }
+ }
+ }
+
+ /* Run the builder. */
+ printMsg(lvlChatty, format("executing builder ‘%1%’") % drv->builder);
+
+ /* Create the log file. */
+ Path logFile = openLogFile();
+
+ /* Create a pipe to get the output of the builder. */
+ builderOut.create();
+
+ result.startTime = time(0);
+
+ /* Fork a child to build the package. */
+ ProcessOptions options;
+
+#if __linux__
+ if (useChroot) {
+ /* Set up private namespaces for the build:
+
+ - The PID namespace causes the build to start as PID 1.
+ Processes outside of the chroot are not visible to those
+ on the inside, but processes inside the chroot are
+ visible from the outside (though with different PIDs).
+
+ - The private mount namespace ensures that all the bind
+ mounts we do will only show up in this process and its
+ children, and will disappear automatically when we're
+ done.
+
+ - The private network namespace ensures that the builder
+ cannot talk to the outside world (or vice versa). It
+ only has a private loopback interface. (Fixed-output
+ derivations are not run in a private network namespace
+ to allow functions like fetchurl to work.)
+
+ - The IPC namespace prevents the builder from communicating
+ with outside processes using SysV IPC mechanisms (shared
+ memory, message queues, semaphores). It also ensures
+ that all IPC objects are destroyed when the builder
+ exits.
+
+ - The UTS namespace ensures that builders see a hostname of
+ localhost rather than the actual hostname.
+
+ We use a helper process to do the clone() to work around
+ clone() being broken in multi-threaded programs due to
+ at-fork handlers not being run. Note that we use
+ CLONE_PARENT to ensure that the real builder is parented to
+ us.
+ */
+
+ if (!fixedOutput)
+ privateNetwork = true;
+
+ userNamespaceSync.create();
+
+ options.allowVfork = false;
+
+ Pid helper = startProcess([&]() {
+
+ /* Drop additional groups here because we can't do it
+ after we've created the new user namespace. FIXME:
+ this means that if we're not root in the parent
+ namespace, we can't drop additional groups; they will
+ be mapped to nogroup in the child namespace. There does
+ not seem to be a workaround for this. (But who can tell
+ from reading user_namespaces(7)?)
+ See also https://lwn.net/Articles/621612/. */
+ if (getuid() == 0 && setgroups(0, 0) == -1)
+ throw SysError("setgroups failed");
+
+ size_t stackSize = 1 * 1024 * 1024;
+ char * stack = (char *) mmap(0, stackSize,
+ PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (stack == MAP_FAILED) throw SysError("allocating stack");
+
+ int flags = CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
+ if (privateNetwork)
+ flags |= CLONE_NEWNET;
+
+ pid_t child = clone(childEntry, stack + stackSize, flags, this);
+ if (child == -1 && errno == EINVAL)
+ /* Fallback for Linux < 2.13 where CLONE_NEWPID and
+ CLONE_PARENT are not allowed together. */
+ child = clone(childEntry, stack + stackSize, flags & ~CLONE_NEWPID, this);
+ if (child == -1) throw SysError("cloning builder process");
+
+ writeFull(builderOut.writeSide.get(), std::to_string(child) + "\n");
+ _exit(0);
+ }, options);
+
+ if (helper.wait() != 0)
+ throw Error("unable to start build process");
+
+ userNamespaceSync.readSide = -1;
+
+ pid_t tmp;
+ if (!string2Int<pid_t>(readLine(builderOut.readSide.get()), tmp)) abort();
+ pid = tmp;
+
+ /* Set the UID/GID mapping of the builder's user namespace
+ such that the sandbox user maps to the build user, or to
+ the calling user (if build users are disabled). */
+ uid_t hostUid = buildUser ? buildUser->getUID() : getuid();
+ uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
+
+ writeFile("/proc/" + std::to_string(pid) + "/uid_map",
+ (format("%d %d 1") % sandboxUid % hostUid).str());
+
+ writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
+
+ writeFile("/proc/" + std::to_string(pid) + "/gid_map",
+ (format("%d %d 1") % sandboxGid % hostGid).str());
+
+ /* Signal the builder that we've updated its user
+ namespace. */
+ writeFull(userNamespaceSync.writeSide.get(), "1");
+ userNamespaceSync.writeSide = -1;
+
+ } else
+#endif
+ {
+ options.allowVfork = !buildUser && !drv->isBuiltin();
+ pid = startProcess([&]() {
+ runChild();
+ }, options);
+ }
+
+ /* parent */
+ pid.setSeparatePG(true);
+ builderOut.writeSide = -1;
+ worker.childStarted(shared_from_this(), {builderOut.readSide.get()}, true, true);
+
+ /* Check if setting up the build environment failed. */
+ while (true) {
+ string msg = readLine(builderOut.readSide.get());
+ if (string(msg, 0, 1) == "\1") {
+ if (msg.size() == 1) break;
+ throw Error(string(msg, 1));
+ }
+ debug(msg);
+ }
+}
+
+
+void DerivationGoal::initEnv()
+{
+ env.clear();
+
+ /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when
+ PATH is not set. We don't want this, so we fill it in with some dummy
+ value. */
+ env["PATH"] = "/path-not-set";
+
+ /* Set HOME to a non-existing path to prevent certain programs from using
+ /etc/passwd (or NIS, or whatever) to locate the home directory (for
+ example, wget looks for ~/.wgetrc). I.e., these tools use /etc/passwd
+ if HOME is not set, but they will just assume that the settings file
+ they are looking for does not exist if HOME is set but points to some
+ non-existing path. */
+ env["HOME"] = homeDir;
+
+ /* Tell the builder where the Nix store is. Usually they
+ shouldn't care, but this is useful for purity checking (e.g.,
+ the compiler or linker might only want to accept paths to files
+ in the store or in the build directory). */
+ env["NIX_STORE"] = worker.store.storeDir;
+
+ /* The maximum number of cores to utilize for parallel building. */
+ env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str();
+
+ /* In non-structured mode, add all bindings specified in the
+ derivation via the environments, except those listed in the
+ passAsFile attribute. Those are passed as file names pointing
+ to temporary files containing the contents. Note that
+ passAsFile is ignored in structure mode because it's not
+ needed (attributes are not passed through the environment, so
+ there is no size constraint). */
+ if (!drv->env.count("__json")) {
+
+ StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile"));
+ int fileNr = 0;
+ for (auto & i : drv->env) {
+ if (passAsFile.find(i.first) == passAsFile.end()) {
+ env[i.first] = i.second;
+ } else {
+ string fn = ".attr-" + std::to_string(fileNr++);
+ Path p = tmpDir + "/" + fn;
+ writeFile(p, i.second);
+ chownToBuilder(p);
+ env[i.first + "Path"] = tmpDirInSandbox + "/" + fn;
+ }
+ }
+
+ }
+
+ /* For convenience, set an environment pointing to the top build
+ directory. */
+ env["NIX_BUILD_TOP"] = tmpDirInSandbox;
+
+ /* Also set TMPDIR and variants to point to this directory. */
+ env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDirInSandbox;
+
+ /* Explicitly set PWD to prevent problems with chroot builds. In
+ particular, dietlibc cannot figure out the cwd because the
+ inode of the current directory doesn't appear in .. (because
+ getdents returns the inode of the mount point). */
+ env["PWD"] = tmpDirInSandbox;
+
+ /* Compatibility hack with Nix <= 0.7: if this is a fixed-output
+ derivation, tell the builder, so that for instance `fetchurl'
+ can skip checking the output. On older Nixes, this environment
+ variable won't be set, so `fetchurl' will do the check. */
+ if (fixedOutput) env["NIX_OUTPUT_CHECKED"] = "1";
+
+ /* *Only* if this is a fixed-output derivation, propagate the
+ values of the environment variables specified in the
+ `impureEnvVars' attribute to the builder. This allows for
+ instance environment variables for proxy configuration such as
+ `http_proxy' to be easily passed to downloaders like
+ `fetchurl'. Passing such environment variables from the caller
+ to the builder is generally impure, but the output of
+ fixed-output derivations is by definition pure (since we
+ already know the cryptographic hash of the output). */
+ if (fixedOutput) {
+ Strings varNames = tokenizeString<Strings>(get(drv->env, "impureEnvVars"));
+ for (auto & i : varNames) env[i] = getEnv(i);
+ }
+}
+
+
+void DerivationGoal::writeStructuredAttrs()
+{
+ auto json = drv->env.find("__json");
+ if (json == drv->env.end()) return;
+
+ writeFile(tmpDir + "/.attrs.json", rewriteStrings(json->second, inputRewrites));
+}
+
+
+void DerivationGoal::chownToBuilder(const Path & path)
+{
+ if (!buildUser) return;
+ if (chown(path.c_str(), buildUser->getUID(), buildUser->getGID()) == -1)
+ throw SysError(format("cannot change ownership of ‘%1%’") % path);
+}
+
+
+void DerivationGoal::doExportReferencesGraph()
+{
+ /* The `exportReferencesGraph' feature allows the references graph
+ to be passed to a builder. This attribute should be a list of
+ pairs [name1 path1 name2 path2 ...]. The references graph of
+ each `pathN' will be stored in a text file `nameN' in the
+ temporary build directory. The text files have the format used
+ by `nix-store --register-validity'. However, the deriver
+ fields are left empty. */
+ string s = get(drv->env, "exportReferencesGraph");
+ Strings ss = tokenizeString<Strings>(s);
+ if (ss.size() % 2 != 0)
+ throw BuildError(format("odd number of tokens in ‘exportReferencesGraph’: ‘%1%’") % s);
+ for (Strings::iterator i = ss.begin(); i != ss.end(); ) {
+ string fileName = *i++;
+ checkStoreName(fileName); /* !!! abuse of this function */
+
+ /* Check that the store path is valid. */
+ Path storePath = *i++;
+ if (!worker.store.isInStore(storePath))
+ throw BuildError(format("‘exportReferencesGraph’ contains a non-store path ‘%1%’")
+ % storePath);
+ storePath = worker.store.toStorePath(storePath);
+ if (!worker.store.isValidPath(storePath))
+ throw BuildError(format("‘exportReferencesGraph’ contains an invalid path ‘%1%’")
+ % storePath);
+
+ /* If there are derivations in the graph, then include their
+ outputs as well. This is useful if you want to do things
+ like passing all build-time dependencies of some path to a
+ derivation that builds a NixOS DVD image. */
+ PathSet paths, paths2;
+ worker.store.computeFSClosure(storePath, paths);
+ paths2 = paths;
+
+ for (auto & j : paths2) {
+ if (isDerivation(j)) {
+ Derivation drv = worker.store.derivationFromPath(j);
+ for (auto & k : drv.outputs)
+ worker.store.computeFSClosure(k.second.path, paths);
+ }
+ }
+
+ if (!drv->env.count("__json")) {
+
+ /* Write closure info to <fileName>. */
+ writeFile(tmpDir + "/" + fileName,
+ worker.store.makeValidityRegistration(paths, false, false));
+
+ } else {
+
+ /* Write a more comprehensive JSON serialisation to
+ <fileName>. */
+ std::ostringstream str;
+ {
+ JSONPlaceholder jsonRoot(str, true);
+ worker.store.pathInfoToJSON(jsonRoot, paths, false, true);
+ }
+ writeFile(tmpDir + "/" + fileName, str.str());
+
+ }
+ }
+}
+
+
+void DerivationGoal::runChild()
+{
+ /* Warning: in the child we should absolutely not make any SQLite
+ calls! */
+
+ try { /* child */
+
+ commonChildInit(builderOut);
+
+ bool setUser = true;
+
+ /* Make the contents of netrc available to builtin:fetchurl
+ (which may run under a different uid and/or in a sandbox). */
+ std::string netrcData;
+ try {
+ if (drv->isBuiltin() && drv->builder == "builtin:fetchurl")
+ netrcData = readFile(settings.netrcFile);
+ } catch (SysError &) { }
+
+#if __linux__
+ if (useChroot) {
+
+ userNamespaceSync.writeSide = -1;
+
+ if (drainFD(userNamespaceSync.readSide.get()) != "1")
+ throw Error("user namespace initialisation failed");
+
+ userNamespaceSync.readSide = -1;
+
+ if (privateNetwork) {
+
+ /* Initialise the loopback interface. */
+ AutoCloseFD fd(socket(PF_INET, SOCK_DGRAM, IPPROTO_IP));
+ if (!fd) throw SysError("cannot open IP socket");
+
+ struct ifreq ifr;
+ strcpy(ifr.ifr_name, "lo");
+ ifr.ifr_flags = IFF_UP | IFF_LOOPBACK | IFF_RUNNING;
+ if (ioctl(fd.get(), SIOCSIFFLAGS, &ifr) == -1)
+ throw SysError("cannot set loopback interface flags");
+ }
+
+ /* Set the hostname etc. to fixed values. */
+ char hostname[] = "localhost";
+ if (sethostname(hostname, sizeof(hostname)) == -1)
+ throw SysError("cannot set host name");
+ char domainname[] = "(none)"; // kernel default
+ if (setdomainname(domainname, sizeof(domainname)) == -1)
+ throw SysError("cannot set domain name");
+
+ /* Make all filesystems private. This is necessary
+ because subtrees may have been mounted as "shared"
+ (MS_SHARED). (Systemd does this, for instance.) Even
+ though we have a private mount namespace, mounting
+ filesystems on top of a shared subtree still propagates
+ outside of the namespace. Making a subtree private is
+ local to the namespace, though, so setting MS_PRIVATE
+ does not affect the outside world. */
+ if (mount(0, "/", 0, MS_REC|MS_PRIVATE, 0) == -1) {
+ throw SysError("unable to make ‘/’ private mount");
+ }
+
+ /* Bind-mount chroot directory to itself, to treat it as a
+ different filesystem from /, as needed for pivot_root. */
+ if (mount(chrootRootDir.c_str(), chrootRootDir.c_str(), 0, MS_BIND, 0) == -1)
+ throw SysError(format("unable to bind mount ‘%1%’") % chrootRootDir);
+
+ /* Set up a nearly empty /dev, unless the user asked to
+ bind-mount the host /dev. */
+ Strings ss;
+ if (dirsInChroot.find("/dev") == dirsInChroot.end()) {
+ createDirs(chrootRootDir + "/dev/shm");
+ createDirs(chrootRootDir + "/dev/pts");
+ ss.push_back("/dev/full");
+#ifdef __linux__
+ if (pathExists("/dev/kvm"))
+ ss.push_back("/dev/kvm");
+#endif
+ ss.push_back("/dev/null");
+ ss.push_back("/dev/random");
+ ss.push_back("/dev/tty");
+ ss.push_back("/dev/urandom");
+ ss.push_back("/dev/zero");
+ createSymlink("/proc/self/fd", chrootRootDir + "/dev/fd");
+ createSymlink("/proc/self/fd/0", chrootRootDir + "/dev/stdin");
+ createSymlink("/proc/self/fd/1", chrootRootDir + "/dev/stdout");
+ createSymlink("/proc/self/fd/2", chrootRootDir + "/dev/stderr");
+ }
+
+ /* Fixed-output derivations typically need to access the
+ network, so give them access to /etc/resolv.conf and so
+ on. */
+ if (fixedOutput) {
+ ss.push_back("/etc/resolv.conf");
+ ss.push_back("/etc/nsswitch.conf");
+ ss.push_back("/etc/services");
+ ss.push_back("/etc/hosts");
+ if (pathExists("/var/run/nscd/socket"))
+ ss.push_back("/var/run/nscd/socket");
+ }
+
+ for (auto & i : ss) dirsInChroot.emplace(i, i);
+
+ /* Bind-mount all the directories from the "host"
+ filesystem that we want in the chroot
+ environment. */
+ for (auto & i : dirsInChroot) {
+ struct stat st;
+ Path source = i.second.source;
+ Path target = chrootRootDir + i.first;
+ if (source == "/proc") continue; // backwards compatibility
+ debug(format("bind mounting ‘%1%’ to ‘%2%’") % source % target);
+ if (stat(source.c_str(), &st) == -1) {
+ if (i.second.optional && errno == ENOENT)
+ continue;
+ else
+ throw SysError(format("getting attributes of path ‘%1%’") % source);
+ }
+ if (S_ISDIR(st.st_mode))
+ createDirs(target);
+ else {
+ createDirs(dirOf(target));
+ writeFile(target, "");
+ }
+ if (mount(source.c_str(), target.c_str(), "", MS_BIND | MS_REC, 0) == -1)
+ throw SysError(format("bind mount from ‘%1%’ to ‘%2%’ failed") % source % target);
+ }
+
+ /* Bind a new instance of procfs on /proc. */
+ createDirs(chrootRootDir + "/proc");
+ if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
+ throw SysError("mounting /proc");
+
+ /* Mount a new tmpfs on /dev/shm to ensure that whatever
+ the builder puts in /dev/shm is cleaned up automatically. */
+ if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
+ fmt("size=%s", settings.sandboxShmSize).c_str()) == -1)
+ throw SysError("mounting /dev/shm");
+
+ /* Mount a new devpts on /dev/pts. Note that this
+ requires the kernel to be compiled with
+ CONFIG_DEVPTS_MULTIPLE_INSTANCES=y (which is the case
+ if /dev/ptx/ptmx exists). */
+ if (pathExists("/dev/pts/ptmx") &&
+ !pathExists(chrootRootDir + "/dev/ptmx")
+ && !dirsInChroot.count("/dev/pts"))
+ {
+ if (mount("none", (chrootRootDir + "/dev/pts").c_str(), "devpts", 0, "newinstance,mode=0620") == -1)
+ throw SysError("mounting /dev/pts");
+ createSymlink("/dev/pts/ptmx", chrootRootDir + "/dev/ptmx");
+
+ /* Make sure /dev/pts/ptmx is world-writable. With some
+ Linux versions, it is created with permissions 0. */
+ chmod_(chrootRootDir + "/dev/pts/ptmx", 0666);
+ }
+
+ /* Do the chroot(). */
+ if (chdir(chrootRootDir.c_str()) == -1)
+ throw SysError(format("cannot change directory to ‘%1%’") % chrootRootDir);
+
+ if (mkdir("real-root", 0) == -1)
+ throw SysError("cannot create real-root directory");
+
+ if (pivot_root(".", "real-root") == -1)
+ throw SysError(format("cannot pivot old root directory onto ‘%1%’") % (chrootRootDir + "/real-root"));
+
+ if (chroot(".") == -1)
+ throw SysError(format("cannot change root directory to ‘%1%’") % chrootRootDir);
+
+ if (umount2("real-root", MNT_DETACH) == -1)
+ throw SysError("cannot unmount real root filesystem");
+
+ if (rmdir("real-root") == -1)
+ throw SysError("cannot remove real-root directory");
+
+ /* Switch to the sandbox uid/gid in the user namespace,
+ which corresponds to the build user or calling user in
+ the parent namespace. */
+ if (setgid(sandboxGid) == -1)
+ throw SysError("setgid failed");
+ if (setuid(sandboxUid) == -1)
+ throw SysError("setuid failed");
+
+ setUser = false;
+ }
+#endif
+
+ if (chdir(tmpDirInSandbox.c_str()) == -1)
+ throw SysError(format("changing into ‘%1%’") % tmpDir);
+
+ /* Close all other file descriptors. */
+ closeMostFDs(set<int>());
+
+#if __linux__
+ /* Change the personality to 32-bit if we're doing an
+ i686-linux build on an x86_64-linux machine. */
+ struct utsname utsbuf;
+ uname(&utsbuf);
+ if (drv->platform == "i686-linux" &&
+ (settings.thisSystem == "x86_64-linux" ||
+ (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64")))) {
+ if (personality(PER_LINUX32) == -1)
+ throw SysError("cannot set i686-linux personality");
+ }
+
+ /* Impersonate a Linux 2.6 machine to get some determinism in
+ builds that depend on the kernel version. */
+ if ((drv->platform == "i686-linux" || drv->platform == "x86_64-linux") && settings.impersonateLinux26) {
+ int cur = personality(0xffffffff);
+ if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
+ }
+
+ /* Disable address space randomization for improved
+ determinism. */
+ int cur = personality(0xffffffff);
+ if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
+#endif
+
+ /* Disable core dumps by default. */
+ struct rlimit limit = { 0, RLIM_INFINITY };
+ setrlimit(RLIMIT_CORE, &limit);
+
+ // FIXME: set other limits to deterministic values?
+
+ /* Fill in the environment. */
+ Strings envStrs;
+ for (auto & i : env)
+ envStrs.push_back(rewriteStrings(i.first + "=" + i.second, inputRewrites));
+
+ /* If we are running in `build-users' mode, then switch to the
+ user we allocated above. Make sure that we drop all root
+ privileges. Note that above we have closed all file
+ descriptors except std*, so that's safe. Also note that
+ setuid() when run as root sets the real, effective and
+ saved UIDs. */
+ if (setUser && buildUser) {
+ /* Preserve supplementary groups of the build user, to allow
+ admins to specify groups such as "kvm". */
+ if (!buildUser->getSupplementaryGIDs().empty() &&
+ setgroups(buildUser->getSupplementaryGIDs().size(),
+ buildUser->getSupplementaryGIDs().data()) == -1)
+ throw SysError("cannot set supplementary groups of build user");
+
+ if (setgid(buildUser->getGID()) == -1 ||
+ getgid() != buildUser->getGID() ||
+ getegid() != buildUser->getGID())
+ throw SysError("setgid failed");
+
+ if (setuid(buildUser->getUID()) == -1 ||
+ getuid() != buildUser->getUID() ||
+ geteuid() != buildUser->getUID())
+ throw SysError("setuid failed");
+ }
+
+ /* Fill in the arguments. */
+ Strings args;
+
+ const char *builder = "invalid";
+
+ string sandboxProfile;
+ if (drv->isBuiltin()) {
+ ;
+#if __APPLE__
+ } else if (useChroot) {
+ /* Lots and lots and lots of file functions freak out if they can't stat their full ancestry */
+ PathSet ancestry;
+
+ /* We build the ancestry before adding all inputPaths to the store because we know they'll
+ all have the same parents (the store), and there might be lots of inputs. This isn't
+ particularly efficient... I doubt it'll be a bottleneck in practice */
+ for (auto & i : dirsInChroot) {
+ Path cur = i.first;
+ while (cur.compare("/") != 0) {
+ cur = dirOf(cur);
+ ancestry.insert(cur);
+ }
+ }
+
+ /* And we want the store in there regardless of how empty dirsInChroot. We include the innermost
+ path component this time, since it's typically /nix/store and we care about that. */
+ Path cur = worker.store.storeDir;
+ while (cur.compare("/") != 0) {
+ ancestry.insert(cur);
+ cur = dirOf(cur);
+ }
+
+ /* Add all our input paths to the chroot */
+ for (auto & i : inputPaths)
+ dirsInChroot[i] = i;
+
+ /* This has to appear before import statements */
+ sandboxProfile += "(version 1)\n";
+
+ /* Violations will go to the syslog if you set this. Unfortunately the destination does not appear to be configurable */
+ if (settings.darwinLogSandboxViolations) {
+ sandboxProfile += "(deny default)\n";
+ } else {
+ sandboxProfile += "(deny default (with no-log))\n";
+ }
+
+ /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
+ to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */
+ Path globalTmpDir = canonPath(getEnv("TMPDIR", "/tmp"), true);
+
+ /* They don't like trailing slashes on subpath directives */
+ if (globalTmpDir.back() == '/') globalTmpDir.pop_back();
+
+ /* Our rwx outputs */
+ sandboxProfile += "(allow file-read* file-write* process-exec\n";
+ for (auto & i : missingPaths) {
+ sandboxProfile += (format("\t(subpath \"%1%\")\n") % i.c_str()).str();
+ }
+ sandboxProfile += ")\n";
+
+ /* Our inputs (transitive dependencies and any impurities computed above)
+
+ without file-write* allowed, access() incorrectly returns EPERM
+ */
+ sandboxProfile += "(allow file-read* file-write* process-exec\n";
+ for (auto & i : dirsInChroot) {
+ if (i.first != i.second.source)
+ throw Error(format(
+ "can't map '%1%' to '%2%': mismatched impure paths not supported on Darwin")
+ % i.first % i.second.source);
+
+ string path = i.first;
+ struct stat st;
+ if (lstat(path.c_str(), &st)) {
+ if (i.second.optional && errno == ENOENT)
+ continue;
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+ }
+ if (S_ISDIR(st.st_mode))
+ sandboxProfile += (format("\t(subpath \"%1%\")\n") % path).str();
+ else
+ sandboxProfile += (format("\t(literal \"%1%\")\n") % path).str();
+ }
+ sandboxProfile += ")\n";
+
+ /* Allow file-read* on full directory hierarchy to self. Allows realpath() */
+ sandboxProfile += "(allow file-read*\n";
+ for (auto & i : ancestry) {
+ sandboxProfile += (format("\t(literal \"%1%\")\n") % i.c_str()).str();
+ }
+ sandboxProfile += ")\n";
+
+ sandboxProfile += additionalSandboxProfile;
+
+ debug("Generated sandbox profile:");
+ debug(sandboxProfile);
+
+ Path sandboxFile = drvPath + ".sb";
+ deletePath(sandboxFile);
+ autoDelSandbox.reset(sandboxFile, false);
+
+ writeFile(sandboxFile, sandboxProfile);
+
+ builder = "/usr/bin/sandbox-exec";
+ args.push_back("sandbox-exec");
+ args.push_back("-f");
+ args.push_back(sandboxFile);
+ args.push_back("-D");
+ args.push_back("_GLOBAL_TMP_DIR=" + globalTmpDir);
+ args.push_back(drv->builder);
+#endif
+ } else {
+ builder = drv->builder.c_str();
+ string builderBasename = baseNameOf(drv->builder);
+ args.push_back(builderBasename);
+ }
+
+ for (auto & i : drv->args)
+ args.push_back(rewriteStrings(i, inputRewrites));
+
+ /* Indicate that we managed to set up the build environment. */
+ writeFull(STDERR_FILENO, string("\1\n"));
+
+ /* Execute the program. This should not return. */
+ if (drv->isBuiltin()) {
+ try {
+ if (drv->builder == "builtin:fetchurl")
+ builtinFetchurl(*drv, netrcData);
+ else
+ throw Error(format("unsupported builtin function ‘%1%’") % string(drv->builder, 8));
+ _exit(0);
+ } catch (std::exception & e) {
+ writeFull(STDERR_FILENO, "error: " + string(e.what()) + "\n");
+ _exit(1);
+ }
+ }
+
+ execve(builder, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
+
+ throw SysError(format("executing ‘%1%’") % drv->builder);
+
+ } catch (std::exception & e) {
+ writeFull(STDERR_FILENO, "\1while setting up the build environment: " + string(e.what()) + "\n");
+ _exit(1);
+ }
+}
+
+
+/* Parse a list of reference specifiers. Each element must either be
+ a store path, or the symbolic name of the output of the derivation
+ (such as `out'). */
+PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, string attr)
+{
+ PathSet result;
+ Paths paths = tokenizeString<Paths>(attr);
+ for (auto & i : paths) {
+ if (store.isStorePath(i))
+ result.insert(i);
+ else if (drv.outputs.find(i) != drv.outputs.end())
+ result.insert(drv.outputs.find(i)->second.path);
+ else throw BuildError(
+ format("derivation contains an illegal reference specifier ‘%1%’") % i);
+ }
+ return result;
+}
+
+
+void DerivationGoal::registerOutputs()
+{
+ /* When using a build hook, the build hook can register the output
+ as valid (by doing `nix-store --import'). If so we don't have
+ to do anything here. */
+ if (hook) {
+ bool allValid = true;
+ for (auto & i : drv->outputs)
+ if (!worker.store.isValidPath(i.second.path)) allValid = false;
+ if (allValid) return;
+ }
+
+ ValidPathInfos infos;
+
+ /* Set of inodes seen during calls to canonicalisePathMetaData()
+ for this build's outputs. This needs to be shared between
+ outputs to allow hard links between outputs. */
+ InodesSeen inodesSeen;
+
+ Path checkSuffix = ".check";
+ bool runDiffHook = settings.runDiffHook;
+ bool keepPreviousRound = settings.keepFailed || runDiffHook;
+
+ /* Check whether the output paths were created, and grep each
+ output path to determine what other paths it references. Also make all
+ output paths read-only. */
+ for (auto & i : drv->outputs) {
+ Path path = i.second.path;
+ if (missingPaths.find(path) == missingPaths.end()) continue;
+
+ ValidPathInfo info;
+
+ Path actualPath = path;
+ if (useChroot) {
+ actualPath = chrootRootDir + path;
+ if (pathExists(actualPath)) {
+ /* Move output paths from the chroot to the Nix store. */
+ if (buildMode == bmRepair)
+ replaceValidPath(path, actualPath);
+ else
+ if (buildMode != bmCheck && rename(actualPath.c_str(), worker.store.toRealPath(path).c_str()) == -1)
+ throw SysError(format("moving build output ‘%1%’ from the sandbox to the Nix store") % path);
+ }
+ if (buildMode != bmCheck) actualPath = worker.store.toRealPath(path);
+ } else {
+ Path redirected = redirectedOutputs[path];
+ if (buildMode == bmRepair
+ && redirectedBadOutputs.find(path) != redirectedBadOutputs.end()
+ && pathExists(redirected))
+ replaceValidPath(path, redirected);
+ if (buildMode == bmCheck && redirected != "")
+ actualPath = redirected;
+ }
+
+ struct stat st;
+ if (lstat(actualPath.c_str(), &st) == -1) {
+ if (errno == ENOENT)
+ throw BuildError(
+ format("builder for ‘%1%’ failed to produce output path ‘%2%’")
+ % drvPath % path);
+ throw SysError(format("getting attributes of path ‘%1%’") % actualPath);
+ }
+
+#ifndef __CYGWIN__
+ /* Check that the output is not group or world writable, as
+ that means that someone else can have interfered with the
+ build. Also, the output should be owned by the build
+ user. */
+ if ((!S_ISLNK(st.st_mode) && (st.st_mode & (S_IWGRP | S_IWOTH))) ||
+ (buildUser && st.st_uid != buildUser->getUID()))
+ throw BuildError(format("suspicious ownership or permission on ‘%1%’; rejecting this build output") % path);
+#endif
+
+ /* Apply hash rewriting if necessary. */
+ bool rewritten = false;
+ if (!outputRewrites.empty()) {
+ printError(format("warning: rewriting hashes in ‘%1%’; cross fingers") % path);
+
+ /* Canonicalise first. This ensures that the path we're
+ rewriting doesn't contain a hard link to /etc/shadow or
+ something like that. */
+ canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen);
+
+ /* FIXME: this is in-memory. */
+ StringSink sink;
+ dumpPath(actualPath, sink);
+ deletePath(actualPath);
+ sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites));
+ StringSource source(*sink.s);
+ restorePath(actualPath, source);
+
+ rewritten = true;
+ }
+
+ /* Check that fixed-output derivations produced the right
+ outputs (i.e., the content hash should match the specified
+ hash). */
+ if (i.second.hash != "") {
+
+ bool recursive; Hash h;
+ i.second.parseHashInfo(recursive, h);
+
+ if (!recursive) {
+ /* The output path should be a regular file without
+ execute permission. */
+ if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
+ throw BuildError(
+ format("output path ‘%1%’ should be a non-executable regular file") % path);
+ }
+
+ /* Check the hash. In hash mode, move the path produced by
+ the derivation to its content-addressed location. */
+ Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath);
+ if (buildMode == bmHash) {
+ Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]);
+ printError(format("build produced path ‘%1%’ with %2% hash ‘%3%’")
+ % dest % printHashType(h.type) % printHash16or32(h2));
+ if (worker.store.isValidPath(dest))
+ return;
+ Path actualDest = worker.store.toRealPath(dest);
+ if (actualPath != actualDest) {
+ PathLocks outputLocks({actualDest});
+ deletePath(actualDest);
+ if (rename(actualPath.c_str(), actualDest.c_str()) == -1)
+ throw SysError(format("moving ‘%1%’ to ‘%2%’") % actualPath % dest);
+ }
+ path = dest;
+ actualPath = actualDest;
+ } else {
+ if (h != h2)
+ throw BuildError(
+ format("output path ‘%1%’ has %2% hash ‘%3%’ when ‘%4%’ was expected")
+ % path % i.second.hashAlgo % printHash16or32(h2) % printHash16or32(h));
+ }
+
+ info.ca = makeFixedOutputCA(recursive, h2);
+ }
+
+ /* Get rid of all weird permissions. This also checks that
+ all files are owned by the build user, if applicable. */
+ canonicalisePathMetaData(actualPath,
+ buildUser && !rewritten ? buildUser->getUID() : -1, inodesSeen);
+
+ /* For this output path, find the references to other paths
+ contained in it. Compute the SHA-256 NAR hash at the same
+ time. The hash is stored in the database so that we can
+ verify later on whether nobody has messed with the store. */
+ Activity act(*logger, lvlTalkative, format("scanning for references inside ‘%1%’") % path);
+ HashResult hash;
+ PathSet references = scanForReferences(actualPath, allPaths, hash);
+
+ if (buildMode == bmCheck) {
+ if (!worker.store.isValidPath(path)) continue;
+ auto info = *worker.store.queryPathInfo(path);
+ if (hash.first != info.narHash) {
+ if (settings.keepFailed) {
+ Path dst = worker.store.toRealPath(path + checkSuffix);
+ deletePath(dst);
+ if (rename(actualPath.c_str(), dst.c_str()))
+ throw SysError(format("renaming ‘%1%’ to ‘%2%’") % actualPath % dst);
+ throw Error(format("derivation ‘%1%’ may not be deterministic: output ‘%2%’ differs from ‘%3%’")
+ % drvPath % path % dst);
+ } else
+ throw Error(format("derivation ‘%1%’ may not be deterministic: output ‘%2%’ differs")
+ % drvPath % path);
+ }
+
+ /* Since we verified the build, it's now ultimately
+ trusted. */
+ if (!info.ultimate) {
+ info.ultimate = true;
+ worker.store.signPathInfo(info);
+ worker.store.registerValidPaths({info});
+ }
+
+ continue;
+ }
+
+ /* For debugging, print out the referenced and unreferenced
+ paths. */
+ for (auto & i : inputPaths) {
+ PathSet::iterator j = references.find(i);
+ if (j == references.end())
+ debug(format("unreferenced input: ‘%1%’") % i);
+ else
+ debug(format("referenced input: ‘%1%’") % i);
+ }
+
+ /* Enforce `allowedReferences' and friends. */
+ auto checkRefs = [&](const string & attrName, bool allowed, bool recursive) {
+ if (drv->env.find(attrName) == drv->env.end()) return;
+
+ PathSet spec = parseReferenceSpecifiers(worker.store, *drv, get(drv->env, attrName));
+
+ PathSet used;
+ if (recursive) {
+ /* Our requisites are the union of the closures of our references. */
+ for (auto & i : references)
+ /* Don't call computeFSClosure on ourselves. */
+ if (path != i)
+ worker.store.computeFSClosure(i, used);
+ } else
+ used = references;
+
+ PathSet badPaths;
+
+ for (auto & i : used)
+ if (allowed) {
+ if (spec.find(i) == spec.end())
+ badPaths.insert(i);
+ } else {
+ if (spec.find(i) != spec.end())
+ badPaths.insert(i);
+ }
+
+ if (!badPaths.empty()) {
+ string badPathsStr;
+ for (auto & i : badPaths) {
+ badPathsStr += "\n\t";
+ badPathsStr += i;
+ }
+ throw BuildError(format("output ‘%1%’ is not allowed to refer to the following paths:%2%") % actualPath % badPathsStr);
+ }
+ };
+
+ checkRefs("allowedReferences", true, false);
+ checkRefs("allowedRequisites", true, true);
+ checkRefs("disallowedReferences", false, false);
+ checkRefs("disallowedRequisites", false, true);
+
+ if (curRound == nrRounds) {
+ worker.store.optimisePath(actualPath); // FIXME: combine with scanForReferences()
+ worker.markContentsGood(path);
+ }
+
+ info.path = path;
+ info.narHash = hash.first;
+ info.narSize = hash.second;
+ info.references = references;
+ info.deriver = drvPath;
+ info.ultimate = true;
+ worker.store.signPathInfo(info);
+
+ infos.push_back(info);
+ }
+
+ if (buildMode == bmCheck) return;
+
+ /* Compare the result with the previous round, and report which
+ path is different, if any.*/
+ if (curRound > 1 && prevInfos != infos) {
+ assert(prevInfos.size() == infos.size());
+ for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
+ if (!(*i == *j)) {
+ result.isNonDeterministic = true;
+ Path prev = i->path + checkSuffix;
+ bool prevExists = keepPreviousRound && pathExists(prev);
+ auto msg = prevExists
+ ? fmt("output ‘%1%’ of ‘%2%’ differs from ‘%3%’ from previous round", i->path, drvPath, prev)
+ : fmt("output ‘%1%’ of ‘%2%’ differs from previous round", i->path, drvPath);
+
+ auto diffHook = settings.diffHook;
+ if (prevExists && diffHook != "" && runDiffHook) {
+ try {
+ auto diff = runProgram(diffHook, true, {prev, i->path});
+ if (diff != "")
+ printError(chomp(diff));
+ } catch (Error & error) {
+ printError("diff hook execution failed: %s", error.what());
+ }
+ }
+
+ if (settings.enforceDeterminism)
+ throw NotDeterministic(msg);
+
+ printError(msg);
+ curRound = nrRounds; // we know enough, bail out early
+ }
+ }
+
+ /* If this is the first round of several, then move the output out
+ of the way. */
+ if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
+ for (auto & i : drv->outputs) {
+ Path prev = i.second.path + checkSuffix;
+ deletePath(prev);
+ Path dst = i.second.path + checkSuffix;
+ if (rename(i.second.path.c_str(), dst.c_str()))
+ throw SysError(format("renaming ‘%1%’ to ‘%2%’") % i.second.path % dst);
+ }
+ }
+
+ if (curRound < nrRounds) {
+ prevInfos = infos;
+ return;
+ }
+
+ /* Remove the .check directories if we're done. FIXME: keep them
+ if the result was not determistic? */
+ if (curRound == nrRounds) {
+ for (auto & i : drv->outputs) {
+ Path prev = i.second.path + checkSuffix;
+ deletePath(prev);
+ }
+ }
+
+ /* Register each output path as valid, and register the sets of
+ paths referenced by each of them. If there are cycles in the
+ outputs, this will fail. */
+ worker.store.registerValidPaths(infos);
+}
+
+
+Path DerivationGoal::openLogFile()
+{
+ logSize = 0;
+
+ if (!settings.keepLog) return "";
+
+ string baseName = baseNameOf(drvPath);
+
+ /* Create a log file. */
+ Path dir = fmt("%s/%s/%s/", worker.store.logDir, worker.store.drvsLogDir, string(baseName, 0, 2));
+ createDirs(dir);
+
+ Path logFileName = fmt("%s/%s%s", dir, string(baseName, 2),
+ settings.compressLog ? ".bz2" : "");
+
+ fdLogFile = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0666);
+ if (!fdLogFile) throw SysError(format("creating log file ‘%1%’") % logFileName);
+
+ logFileSink = std::make_shared<FdSink>(fdLogFile.get());
+
+ if (settings.compressLog)
+ logSink = std::shared_ptr<CompressionSink>(makeCompressionSink("bzip2", *logFileSink));
+ else
+ logSink = logFileSink;
+
+ return logFileName;
+}
+
+
+void DerivationGoal::closeLogFile()
+{
+ auto logSink2 = std::dynamic_pointer_cast<CompressionSink>(logSink);
+ if (logSink2) logSink2->finish();
+ if (logFileSink) logFileSink->flush();
+ logSink = logFileSink = 0;
+ fdLogFile = -1;
+}
+
+
+void DerivationGoal::deleteTmpDir(bool force)
+{
+ if (tmpDir != "") {
+ /* Don't keep temporary directories for builtins because they
+ might have privileged stuff (like a copy of netrc). */
+ if (settings.keepFailed && !force && !drv->isBuiltin()) {
+ printError(
+ format("note: keeping build directory ‘%2%’")
+ % drvPath % tmpDir);
+ chmod(tmpDir.c_str(), 0755);
+ }
+ else
+ deletePath(tmpDir);
+ tmpDir = "";
+ }
+}
+
+
+void DerivationGoal::handleChildOutput(int fd, const string & data)
+{
+ if ((hook && fd == hook->builderOut.readSide.get()) ||
+ (!hook && fd == builderOut.readSide.get()))
+ {
+ logSize += data.size();
+ if (settings.maxLogSize && logSize > settings.maxLogSize) {
+ printError(
+ format("%1% killed after writing more than %2% bytes of log output")
+ % getName() % settings.maxLogSize);
+ killChild();
+ done(BuildResult::LogLimitExceeded);
+ return;
+ }
+
+ for (auto c : data)
+ if (c == '\r')
+ currentLogLinePos = 0;
+ else if (c == '\n')
+ flushLine();
+ else {
+ if (currentLogLinePos >= currentLogLine.size())
+ currentLogLine.resize(currentLogLinePos + 1);
+ currentLogLine[currentLogLinePos++] = c;
+ }
+
+ if (logSink) (*logSink)(data);
+ }
+
+ if (hook && fd == hook->fromHook.readSide.get())
+ printError(data); // FIXME?
+}
+
+
+void DerivationGoal::handleEOF(int fd)
+{
+ if (!currentLogLine.empty()) flushLine();
+ worker.wakeUp(shared_from_this());
+}
+
+
+void DerivationGoal::flushLine()
+{
+ if (settings.verboseBuild &&
+ (settings.printRepeatedBuilds || curRound == 1))
+ printError(filterANSIEscapes(currentLogLine, true));
+ else {
+ logTail.push_back(currentLogLine);
+ if (logTail.size() > settings.logLines) logTail.pop_front();
+ }
+ currentLogLine = "";
+ currentLogLinePos = 0;
+}
+
+
+PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash)
+{
+ PathSet result;
+ for (auto & i : drv->outputs) {
+ if (!wantOutput(i.first, wantedOutputs)) continue;
+ bool good =
+ worker.store.isValidPath(i.second.path) &&
+ (!checkHash || worker.pathContentsGood(i.second.path));
+ if (good == returnValid) result.insert(i.second.path);
+ }
+ return result;
+}
+
+
+Path DerivationGoal::addHashRewrite(const Path & path)
+{
+ string h1 = string(path, worker.store.storeDir.size() + 1, 32);
+ string h2 = string(printHash32(hashString(htSHA256, "rewrite:" + drvPath + ":" + path)), 0, 32);
+ Path p = worker.store.storeDir + "/" + h2 + string(path, worker.store.storeDir.size() + 33);
+ deletePath(p);
+ assert(path.size() == p.size());
+ inputRewrites[h1] = h2;
+ outputRewrites[h2] = h1;
+ redirectedOutputs[path] = p;
+ return p;
+}
+
+
+void DerivationGoal::done(BuildResult::Status status, const string & msg)
+{
+ result.status = status;
+ result.errorMsg = msg;
+ amDone(result.success() ? ecSuccess : ecFailed);
+ if (result.status == BuildResult::TimedOut)
+ worker.timedOut = true;
+ if (result.status == BuildResult::PermanentFailure)
+ worker.permanentFailure = true;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+class SubstitutionGoal : public Goal
+{
+ friend class Worker;
+
+private:
+ /* The store path that should be realised through a substitute. */
+ Path storePath;
+
+ /* The remaining substituters. */
+ std::list<ref<Store>> subs;
+
+ /* The current substituter. */
+ std::shared_ptr<Store> sub;
+
+ /* Whether any substituter can realise this path. */
+ bool hasSubstitute;
+
+ /* Path info returned by the substituter's query info operation. */
+ std::shared_ptr<const ValidPathInfo> info;
+
+ /* Pipe for the substituter's standard output. */
+ Pipe outPipe;
+
+ /* The substituter thread. */
+ std::thread thr;
+
+ std::promise<void> promise;
+
+ /* Whether to try to repair a valid path. */
+ bool repair;
+
+ /* Location where we're downloading the substitute. Differs from
+ storePath when doing a repair. */
+ Path destPath;
+
+ typedef void (SubstitutionGoal::*GoalState)();
+ GoalState state;
+
+public:
+ SubstitutionGoal(const Path & storePath, Worker & worker, bool repair = false);
+ ~SubstitutionGoal();
+
+ void timedOut() { abort(); };
+
+ string key()
+ {
+ /* "a$" ensures substitution goals happen before derivation
+ goals. */
+ return "a$" + storePathToName(storePath) + "$" + storePath;
+ }
+
+ void work();
+
+ /* The states. */
+ void init();
+ void tryNext();
+ void gotInfo();
+ void referencesValid();
+ void tryToRun();
+ void finished();
+
+ /* Callback used by the worker to write to the log. */
+ void handleChildOutput(int fd, const string & data);
+ void handleEOF(int fd);
+
+ Path getStorePath() { return storePath; }
+};
+
+
+SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, bool repair)
+ : Goal(worker)
+ , hasSubstitute(false)
+ , repair(repair)
+{
+ this->storePath = storePath;
+ state = &SubstitutionGoal::init;
+ name = (format("substitution of ‘%1%’") % storePath).str();
+ trace("created");
+}
+
+
+SubstitutionGoal::~SubstitutionGoal()
+{
+ try {
+ if (thr.joinable()) {
+ // FIXME: signal worker thread to quit.
+ thr.join();
+ worker.childTerminated(this);
+ }
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+void SubstitutionGoal::work()
+{
+ (this->*state)();
+}
+
+
+void SubstitutionGoal::init()
+{
+ trace("init");
+
+ worker.store.addTempRoot(storePath);
+
+ /* If the path already exists we're done. */
+ if (!repair && worker.store.isValidPath(storePath)) {
+ amDone(ecSuccess);
+ return;
+ }
+
+ if (settings.readOnlyMode)
+ throw Error(format("cannot substitute path ‘%1%’ - no write access to the Nix store") % storePath);
+
+ subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
+
+ tryNext();
+}
+
+
+void SubstitutionGoal::tryNext()
+{
+ trace("trying next substituter");
+
+ if (subs.size() == 0) {
+ /* None left. Terminate this goal and let someone else deal
+ with it. */
+ debug(format("path ‘%1%’ is required, but there is no substituter that can build it") % storePath);
+
+ /* Hack: don't indicate failure if there were no substituters.
+ In that case the calling derivation should just do a
+ build. */
+ amDone(hasSubstitute ? ecFailed : ecNoSubstituters);
+ return;
+ }
+
+ sub = subs.front();
+ subs.pop_front();
+
+ if (sub->storeDir != worker.store.storeDir) {
+ tryNext();
+ return;
+ }
+
+ try {
+ // FIXME: make async
+ info = sub->queryPathInfo(storePath);
+ } catch (InvalidPath &) {
+ tryNext();
+ return;
+ }
+
+ hasSubstitute = true;
+
+ /* Bail out early if this substituter lacks a valid
+ signature. LocalStore::addToStore() also checks for this, but
+ only after we've downloaded the path. */
+ if (worker.store.requireSigs && !info->checkSignatures(worker.store, worker.store.publicKeys)) {
+ printInfo(format("warning: substituter ‘%s’ does not have a valid signature for path ‘%s’")
+ % sub->getUri() % storePath);
+ tryNext();
+ return;
+ }
+
+ /* To maintain the closure invariant, we first have to realise the
+ paths referenced by this one. */
+ for (auto & i : info->references)
+ if (i != storePath) /* ignore self-references */
+ addWaitee(worker.makeSubstitutionGoal(i));
+
+ if (waitees.empty()) /* to prevent hang (no wake-up event) */
+ referencesValid();
+ else
+ state = &SubstitutionGoal::referencesValid;
+}
+
+
+void SubstitutionGoal::referencesValid()
+{
+ trace("all references realised");
+
+ if (nrFailed > 0) {
+ debug(format("some references of path ‘%1%’ could not be realised") % storePath);
+ amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
+ return;
+ }
+
+ for (auto & i : info->references)
+ if (i != storePath) /* ignore self-references */
+ assert(worker.store.isValidPath(i));
+
+ state = &SubstitutionGoal::tryToRun;
+ worker.wakeUp(shared_from_this());
+}
+
+
+void SubstitutionGoal::tryToRun()
+{
+ trace("trying to run");
+
+ /* Make sure that we are allowed to start a build. Note that even
+ if maxBuildJobs == 0 (no local builds allowed), we still allow
+ a substituter to run. This is because substitutions cannot be
+ distributed to another machine via the build hook. */
+ if (worker.getNrLocalBuilds() >= std::min(1U, (unsigned int) settings.maxBuildJobs)) {
+ worker.waitForBuildSlot(shared_from_this());
+ return;
+ }
+
+ printInfo(format("fetching path ‘%1%’...") % storePath);
+
+ outPipe.create();
+
+ promise = std::promise<void>();
+
+ thr = std::thread([this]() {
+ try {
+ /* Wake up the worker loop when we're done. */
+ Finally updateStats([this]() { outPipe.writeSide = -1; });
+
+ copyStorePath(ref<Store>(sub), ref<Store>(worker.store.shared_from_this()),
+ storePath, repair);
+
+ promise.set_value();
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ });
+
+ worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
+
+ state = &SubstitutionGoal::finished;
+}
+
+
+void SubstitutionGoal::finished()
+{
+ trace("substitute finished");
+
+ thr.join();
+ worker.childTerminated(this);
+
+ try {
+ promise.get_future().get();
+ } catch (Error & e) {
+ printInfo(e.msg());
+
+ /* Try the next substitute. */
+ state = &SubstitutionGoal::tryNext;
+ worker.wakeUp(shared_from_this());
+ return;
+ }
+
+ worker.markContentsGood(storePath);
+
+ printMsg(lvlChatty,
+ format("substitution of path ‘%1%’ succeeded") % storePath);
+
+ amDone(ecSuccess);
+}
+
+
+void SubstitutionGoal::handleChildOutput(int fd, const string & data)
+{
+}
+
+
+void SubstitutionGoal::handleEOF(int fd)
+{
+ if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+static bool working = false;
+
+
+Worker::Worker(LocalStore & store)
+ : store(store)
+{
+ /* Debugging: prevent recursive workers. */
+ if (working) abort();
+ working = true;
+ nrLocalBuilds = 0;
+ lastWokenUp = steady_time_point::min();
+ permanentFailure = false;
+ timedOut = false;
+}
+
+
+Worker::~Worker()
+{
+ working = false;
+
+ /* Explicitly get rid of all strong pointers now. After this all
+ goals that refer to this worker should be gone. (Otherwise we
+ are in trouble, since goals may call childTerminated() etc. in
+ their destructors). */
+ topGoals.clear();
+}
+
+
+GoalPtr Worker::makeDerivationGoal(const Path & path,
+ const StringSet & wantedOutputs, BuildMode buildMode)
+{
+ GoalPtr goal = derivationGoals[path].lock();
+ if (!goal) {
+ goal = std::make_shared<DerivationGoal>(path, wantedOutputs, *this, buildMode);
+ derivationGoals[path] = goal;
+ wakeUp(goal);
+ } else
+ (dynamic_cast<DerivationGoal *>(goal.get()))->addWantedOutputs(wantedOutputs);
+ return goal;
+}
+
+
+std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const Path & drvPath,
+ const BasicDerivation & drv, BuildMode buildMode)
+{
+ auto goal = std::make_shared<DerivationGoal>(drvPath, drv, *this, buildMode);
+ wakeUp(goal);
+ return goal;
+}
+
+
+GoalPtr Worker::makeSubstitutionGoal(const Path & path, bool repair)
+{
+ GoalPtr goal = substitutionGoals[path].lock();
+ if (!goal) {
+ goal = std::make_shared<SubstitutionGoal>(path, *this, repair);
+ substitutionGoals[path] = goal;
+ wakeUp(goal);
+ }
+ return goal;
+}
+
+
+static void removeGoal(GoalPtr goal, WeakGoalMap & goalMap)
+{
+ /* !!! inefficient */
+ for (WeakGoalMap::iterator i = goalMap.begin();
+ i != goalMap.end(); )
+ if (i->second.lock() == goal) {
+ WeakGoalMap::iterator j = i; ++j;
+ goalMap.erase(i);
+ i = j;
+ }
+ else ++i;
+}
+
+
+void Worker::removeGoal(GoalPtr goal)
+{
+ nix::removeGoal(goal, derivationGoals);
+ nix::removeGoal(goal, substitutionGoals);
+ if (topGoals.find(goal) != topGoals.end()) {
+ topGoals.erase(goal);
+ /* If a top-level goal failed, then kill all other goals
+ (unless keepGoing was set). */
+ if (goal->getExitCode() == Goal::ecFailed && !settings.keepGoing)
+ topGoals.clear();
+ }
+
+ /* Wake up goals waiting for any goal to finish. */
+ for (auto & i : waitingForAnyGoal) {
+ GoalPtr goal = i.lock();
+ if (goal) wakeUp(goal);
+ }
+
+ waitingForAnyGoal.clear();
+}
+
+
+void Worker::wakeUp(GoalPtr goal)
+{
+ goal->trace("woken up");
+ addToWeakGoals(awake, goal);
+}
+
+
+unsigned Worker::getNrLocalBuilds()
+{
+ return nrLocalBuilds;
+}
+
+
+void Worker::childStarted(GoalPtr goal, const set<int> & fds,
+ bool inBuildSlot, bool respectTimeouts)
+{
+ Child child;
+ child.goal = goal;
+ child.goal2 = goal.get();
+ child.fds = fds;
+ child.timeStarted = child.lastOutput = steady_time_point::clock::now();
+ child.inBuildSlot = inBuildSlot;
+ child.respectTimeouts = respectTimeouts;
+ children.emplace_back(child);
+ if (inBuildSlot) nrLocalBuilds++;
+}
+
+
+void Worker::childTerminated(Goal * goal, bool wakeSleepers)
+{
+ auto i = std::find_if(children.begin(), children.end(),
+ [&](const Child & child) { return child.goal2 == goal; });
+ if (i == children.end()) return;
+
+ if (i->inBuildSlot) {
+ assert(nrLocalBuilds > 0);
+ nrLocalBuilds--;
+ }
+
+ children.erase(i);
+
+ if (wakeSleepers) {
+
+ /* Wake up goals waiting for a build slot. */
+ for (auto & j : wantingToBuild) {
+ GoalPtr goal = j.lock();
+ if (goal) wakeUp(goal);
+ }
+
+ wantingToBuild.clear();
+ }
+}
+
+
+void Worker::waitForBuildSlot(GoalPtr goal)
+{
+ debug("wait for build slot");
+ if (getNrLocalBuilds() < settings.maxBuildJobs)
+ wakeUp(goal); /* we can do it right away */
+ else
+ addToWeakGoals(wantingToBuild, goal);
+}
+
+
+void Worker::waitForAnyGoal(GoalPtr goal)
+{
+ debug("wait for any goal");
+ addToWeakGoals(waitingForAnyGoal, goal);
+}
+
+
+void Worker::waitForAWhile(GoalPtr goal)
+{
+ debug("wait for a while");
+ addToWeakGoals(waitingForAWhile, goal);
+}
+
+
+void Worker::run(const Goals & _topGoals)
+{
+ for (auto & i : _topGoals) topGoals.insert(i);
+
+ Activity act(*logger, lvlDebug, "entered goal loop");
+
+ while (1) {
+
+ checkInterrupt();
+
+ /* Call every wake goal (in the ordering established by
+ CompareGoalPtrs). */
+ while (!awake.empty() && !topGoals.empty()) {
+ Goals awake2;
+ for (auto & i : awake) {
+ GoalPtr goal = i.lock();
+ if (goal) awake2.insert(goal);
+ }
+ awake.clear();
+ for (auto & goal : awake2) {
+ checkInterrupt();
+ goal->work();
+ if (topGoals.empty()) break; // stuff may have been cancelled
+ }
+ }
+
+ if (topGoals.empty()) break;
+
+ /* Wait for input. */
+ if (!children.empty() || !waitingForAWhile.empty())
+ waitForInput();
+ else {
+ if (awake.empty() && 0 == settings.maxBuildJobs) throw Error(
+ "unable to start any build; either increase ‘--max-jobs’ "
+ "or enable distributed builds");
+ assert(!awake.empty());
+ }
+ }
+
+ /* If --keep-going is not set, it's possible that the main goal
+ exited while some of its subgoals were still active. But if
+ --keep-going *is* set, then they must all be finished now. */
+ assert(!settings.keepGoing || awake.empty());
+ assert(!settings.keepGoing || wantingToBuild.empty());
+ assert(!settings.keepGoing || children.empty());
+}
+
+
+void Worker::waitForInput()
+{
+ printMsg(lvlVomit, "waiting for children");
+
+ /* Process output from the file descriptors attached to the
+ children, namely log output and output path creation commands.
+ We also use this to detect child termination: if we get EOF on
+ the logger pipe of a build, we assume that the builder has
+ terminated. */
+
+ bool useTimeout = false;
+ struct timeval timeout;
+ timeout.tv_usec = 0;
+ auto before = steady_time_point::clock::now();
+
+ /* If we're monitoring for silence on stdout/stderr, or if there
+ is a build timeout, then wait for input until the first
+ deadline for any child. */
+ auto nearest = steady_time_point::max(); // nearest deadline
+ for (auto & i : children) {
+ if (!i.respectTimeouts) continue;
+ if (0 != settings.maxSilentTime)
+ nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime));
+ if (0 != settings.buildTimeout)
+ nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout));
+ }
+ if (nearest != steady_time_point::max()) {
+ timeout.tv_sec = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>(nearest - before).count());
+ useTimeout = true;
+ }
+
+ /* If we are polling goals that are waiting for a lock, then wake
+ up after a few seconds at most. */
+ if (!waitingForAWhile.empty()) {
+ useTimeout = true;
+ if (lastWokenUp == steady_time_point::min())
+ printError("waiting for locks or build slots...");
+ if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
+ timeout.tv_sec = std::max(1L,
+ (long) std::chrono::duration_cast<std::chrono::seconds>(
+ lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count());
+ } else lastWokenUp = steady_time_point::min();
+
+ if (useTimeout)
+ vomit("sleeping %d seconds", timeout.tv_sec);
+
+ /* Use select() to wait for the input side of any logger pipe to
+ become `available'. Note that `available' (i.e., non-blocking)
+ includes EOF. */
+ fd_set fds;
+ FD_ZERO(&fds);
+ int fdMax = 0;
+ for (auto & i : children) {
+ for (auto & j : i.fds) {
+ FD_SET(j, &fds);
+ if (j >= fdMax) fdMax = j + 1;
+ }
+ }
+
+ if (select(fdMax, &fds, 0, 0, useTimeout ? &timeout : 0) == -1) {
+ if (errno == EINTR) return;
+ throw SysError("waiting for input");
+ }
+
+ auto after = steady_time_point::clock::now();
+
+ /* Process all available file descriptors. FIXME: this is
+ O(children * fds). */
+ decltype(children)::iterator i;
+ for (auto j = children.begin(); j != children.end(); j = i) {
+ i = std::next(j);
+
+ checkInterrupt();
+
+ GoalPtr goal = j->goal.lock();
+ assert(goal);
+
+ set<int> fds2(j->fds);
+ for (auto & k : fds2) {
+ if (FD_ISSET(k, &fds)) {
+ unsigned char buffer[4096];
+ ssize_t rd = read(k, buffer, sizeof(buffer));
+ if (rd == -1) {
+ if (errno != EINTR)
+ throw SysError(format("reading from %1%")
+ % goal->getName());
+ } else if (rd == 0) {
+ debug(format("%1%: got EOF") % goal->getName());
+ goal->handleEOF(k);
+ j->fds.erase(k);
+ } else {
+ printMsg(lvlVomit, format("%1%: read %2% bytes")
+ % goal->getName() % rd);
+ string data((char *) buffer, rd);
+ j->lastOutput = after;
+ goal->handleChildOutput(k, data);
+ }
+ }
+ }
+
+ if (goal->getExitCode() == Goal::ecBusy &&
+ 0 != settings.maxSilentTime &&
+ j->respectTimeouts &&
+ after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime))
+ {
+ printError(
+ format("%1% timed out after %2% seconds of silence")
+ % goal->getName() % settings.maxSilentTime);
+ goal->timedOut();
+ }
+
+ else if (goal->getExitCode() == Goal::ecBusy &&
+ 0 != settings.buildTimeout &&
+ j->respectTimeouts &&
+ after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout))
+ {
+ printError(
+ format("%1% timed out after %2% seconds")
+ % goal->getName() % settings.buildTimeout);
+ goal->timedOut();
+ }
+ }
+
+ if (!waitingForAWhile.empty() && lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) {
+ lastWokenUp = after;
+ for (auto & i : waitingForAWhile) {
+ GoalPtr goal = i.lock();
+ if (goal) wakeUp(goal);
+ }
+ waitingForAWhile.clear();
+ }
+}
+
+
+unsigned int Worker::exitStatus()
+{
+ return timedOut ? 101 : (permanentFailure ? 100 : 1);
+}
+
+
+bool Worker::pathContentsGood(const Path & path)
+{
+ std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path);
+ if (i != pathContentsGoodCache.end()) return i->second;
+ printInfo(format("checking path ‘%1%’...") % path);
+ auto info = store.queryPathInfo(path);
+ bool res;
+ if (!pathExists(path))
+ res = false;
+ else {
+ HashResult current = hashPath(info->narHash.type, path);
+ Hash nullHash(htSHA256);
+ res = info->narHash == nullHash || info->narHash == current.first;
+ }
+ pathContentsGoodCache[path] = res;
+ if (!res) printError(format("path ‘%1%’ is corrupted or missing!") % path);
+ return res;
+}
+
+
+void Worker::markContentsGood(const Path & path)
+{
+ pathContentsGoodCache[path] = true;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+void LocalStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
+{
+ Worker worker(*this);
+
+ Goals goals;
+ for (auto & i : drvPaths) {
+ DrvPathWithOutputs i2 = parseDrvPathWithOutputs(i);
+ if (isDerivation(i2.first))
+ goals.insert(worker.makeDerivationGoal(i2.first, i2.second, buildMode));
+ else
+ goals.insert(worker.makeSubstitutionGoal(i, buildMode));
+ }
+
+ worker.run(goals);
+
+ PathSet failed;
+ for (auto & i : goals) {
+ if (i->getExitCode() != Goal::ecSuccess) {
+ DerivationGoal * i2 = dynamic_cast<DerivationGoal *>(i.get());
+ if (i2) failed.insert(i2->getDrvPath());
+ else failed.insert(dynamic_cast<SubstitutionGoal *>(i.get())->getStorePath());
+ }
+ }
+
+ if (!failed.empty())
+ throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
+}
+
+
+BuildResult LocalStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode)
+{
+ Worker worker(*this);
+ auto goal = worker.makeBasicDerivationGoal(drvPath, drv, buildMode);
+
+ BuildResult result;
+
+ try {
+ worker.run(Goals{goal});
+ result = goal->getResult();
+ } catch (Error & e) {
+ result.status = BuildResult::MiscFailure;
+ result.errorMsg = e.msg();
+ }
+
+ return result;
+}
+
+
+void LocalStore::ensurePath(const Path & path)
+{
+ /* If the path is already valid, we're done. */
+ if (isValidPath(path)) return;
+
+ Worker worker(*this);
+ GoalPtr goal = worker.makeSubstitutionGoal(path);
+ Goals goals = {goal};
+
+ worker.run(goals);
+
+ if (goal->getExitCode() != Goal::ecSuccess)
+ throw Error(worker.exitStatus(), "path ‘%s’ does not exist and cannot be created", path);
+}
+
+
+void LocalStore::repairPath(const Path & path)
+{
+ Worker worker(*this);
+ GoalPtr goal = worker.makeSubstitutionGoal(path, true);
+ Goals goals = {goal};
+
+ worker.run(goals);
+
+ if (goal->getExitCode() != Goal::ecSuccess) {
+ /* Since substituting the path didn't work, if we have a valid
+ deriver, then rebuild the deriver. */
+ auto deriver = queryPathInfo(path)->deriver;
+ if (deriver != "" && isValidPath(deriver)) {
+ goals.clear();
+ goals.insert(worker.makeDerivationGoal(deriver, StringSet(), bmRepair));
+ worker.run(goals);
+ } else
+ throw Error(worker.exitStatus(), "cannot repair path ‘%s’", path);
+ }
+}
+
+
+}
diff --git a/src/libstore/builtins.cc b/src/libstore/builtins.cc
new file mode 100644
index 000000000..c5dbd57f8
--- /dev/null
+++ b/src/libstore/builtins.cc
@@ -0,0 +1,71 @@
+#include "builtins.hh"
+#include "download.hh"
+#include "store-api.hh"
+#include "archive.hh"
+#include "compression.hh"
+
+namespace nix {
+
+void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
+{
+ /* Make the host's netrc data available. Too bad curl requires
+ this to be stored in a file. It would be nice if we could just
+ pass a pointer to the data. */
+ if (netrcData != "") {
+ settings.netrcFile = "netrc";
+ writeFile(settings.netrcFile, netrcData, 0600);
+ }
+
+ auto getAttr = [&](const string & name) {
+ auto i = drv.env.find(name);
+ if (i == drv.env.end()) throw Error(format("attribute ‘%s’ missing") % name);
+ return i->second;
+ };
+
+ auto fetch = [&](const string & url) {
+ /* No need to do TLS verification, because we check the hash of
+ the result anyway. */
+ DownloadRequest request(url);
+ request.verifyTLS = false;
+
+ /* Show a progress indicator, even though stderr is not a tty. */
+ request.showProgress = DownloadRequest::yes;
+
+ /* Note: have to use a fresh downloader here because we're in
+ a forked process. */
+ auto data = makeDownloader()->download(request);
+ assert(data.data);
+
+ return data.data;
+ };
+
+ std::shared_ptr<std::string> data;
+
+ try {
+ if (getAttr("outputHashMode") == "flat")
+ data = fetch("http://tarballs.nixos.org/" + getAttr("outputHashAlgo") + "/" + getAttr("outputHash"));
+ } catch (Error & e) {
+ debug(e.what());
+ }
+
+ if (!data) data = fetch(getAttr("url"));
+
+ Path storePath = getAttr("out");
+
+ auto unpack = drv.env.find("unpack");
+ if (unpack != drv.env.end() && unpack->second == "1") {
+ if (string(*data, 0, 6) == string("\xfd" "7zXZ\0", 6))
+ data = decompress("xz", *data);
+ StringSource source(*data);
+ restorePath(storePath, source);
+ } else
+ writeFile(storePath, *data);
+
+ auto executable = drv.env.find("executable");
+ if (executable != drv.env.end() && executable->second == "1") {
+ if (chmod(storePath.c_str(), 0755) == -1)
+ throw SysError(format("making ‘%1%’ executable") % storePath);
+ }
+}
+
+}
diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh
new file mode 100644
index 000000000..0cc6ba31f
--- /dev/null
+++ b/src/libstore/builtins.hh
@@ -0,0 +1,9 @@
+#pragma once
+
+#include "derivations.hh"
+
+namespace nix {
+
+void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
+
+}
diff --git a/src/libstore/crypto.cc b/src/libstore/crypto.cc
new file mode 100644
index 000000000..f56a6adab
--- /dev/null
+++ b/src/libstore/crypto.cc
@@ -0,0 +1,126 @@
+#include "crypto.hh"
+#include "util.hh"
+#include "globals.hh"
+
+#if HAVE_SODIUM
+#include <sodium.h>
+#endif
+
+namespace nix {
+
+static std::pair<std::string, std::string> split(const string & s)
+{
+ size_t colon = s.find(':');
+ if (colon == std::string::npos || colon == 0)
+ return {"", ""};
+ return {std::string(s, 0, colon), std::string(s, colon + 1)};
+}
+
+Key::Key(const string & s)
+{
+ auto ss = split(s);
+
+ name = ss.first;
+ key = ss.second;
+
+ if (name == "" || key == "")
+ throw Error("secret key is corrupt");
+
+ key = base64Decode(key);
+}
+
+SecretKey::SecretKey(const string & s)
+ : Key(s)
+{
+#if HAVE_SODIUM
+ if (key.size() != crypto_sign_SECRETKEYBYTES)
+ throw Error("secret key is not valid");
+#endif
+}
+
+#if !HAVE_SODIUM
+[[noreturn]] static void noSodium()
+{
+ throw Error("Nix was not compiled with libsodium, required for signed binary cache support");
+}
+#endif
+
+std::string SecretKey::signDetached(const std::string & data) const
+{
+#if HAVE_SODIUM
+ unsigned char sig[crypto_sign_BYTES];
+ unsigned long long sigLen;
+ crypto_sign_detached(sig, &sigLen, (unsigned char *) data.data(), data.size(),
+ (unsigned char *) key.data());
+ return name + ":" + base64Encode(std::string((char *) sig, sigLen));
+#else
+ noSodium();
+#endif
+}
+
+PublicKey SecretKey::toPublicKey() const
+{
+#if HAVE_SODIUM
+ unsigned char pk[crypto_sign_PUBLICKEYBYTES];
+ crypto_sign_ed25519_sk_to_pk(pk, (unsigned char *) key.data());
+ return PublicKey(name, std::string((char *) pk, crypto_sign_PUBLICKEYBYTES));
+#else
+ noSodium();
+#endif
+}
+
+PublicKey::PublicKey(const string & s)
+ : Key(s)
+{
+#if HAVE_SODIUM
+ if (key.size() != crypto_sign_PUBLICKEYBYTES)
+ throw Error("public key is not valid");
+#endif
+}
+
+bool verifyDetached(const std::string & data, const std::string & sig,
+ const PublicKeys & publicKeys)
+{
+#if HAVE_SODIUM
+ auto ss = split(sig);
+
+ auto key = publicKeys.find(ss.first);
+ if (key == publicKeys.end()) return false;
+
+ auto sig2 = base64Decode(ss.second);
+ if (sig2.size() != crypto_sign_BYTES)
+ throw Error("signature is not valid");
+
+ return crypto_sign_verify_detached((unsigned char *) sig2.data(),
+ (unsigned char *) data.data(), data.size(),
+ (unsigned char *) key->second.key.data()) == 0;
+#else
+ noSodium();
+#endif
+}
+
+PublicKeys getDefaultPublicKeys()
+{
+ PublicKeys publicKeys;
+
+ // FIXME: filter duplicates
+
+ for (auto s : settings.binaryCachePublicKeys.get()) {
+ PublicKey key(s);
+ publicKeys.emplace(key.name, key);
+ }
+
+ for (auto secretKeyFile : settings.secretKeyFiles.get()) {
+ try {
+ SecretKey secretKey(readFile(secretKeyFile));
+ publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
+ } catch (SysError & e) {
+ /* Ignore unreadable key files. That's normal in a
+ multi-user installation. */
+ }
+ }
+
+ return publicKeys;
+}
+
+}
diff --git a/src/libstore/crypto.hh b/src/libstore/crypto.hh
new file mode 100644
index 000000000..9110af3aa
--- /dev/null
+++ b/src/libstore/crypto.hh
@@ -0,0 +1,54 @@
+#pragma once
+
+#include "types.hh"
+
+#include <map>
+
+namespace nix {
+
+struct Key
+{
+ std::string name;
+ std::string key;
+
+ /* Construct Key from a string in the format
+ ‘<name>:<key-in-base64>’. */
+ Key(const std::string & s);
+
+protected:
+ Key(const std::string & name, const std::string & key)
+ : name(name), key(key) { }
+};
+
+struct PublicKey;
+
+struct SecretKey : Key
+{
+ SecretKey(const std::string & s);
+
+ /* Return a detached signature of the given string. */
+ std::string signDetached(const std::string & s) const;
+
+ PublicKey toPublicKey() const;
+};
+
+struct PublicKey : Key
+{
+ PublicKey(const std::string & data);
+
+private:
+ PublicKey(const std::string & name, const std::string & key)
+ : Key(name, key) { }
+ friend struct SecretKey;
+};
+
+typedef std::map<std::string, PublicKey> PublicKeys;
+
+/* Return true iff ‘sig’ is a correct signature over ‘data’ using one
+ of the given public keys. */
+bool verifyDetached(const std::string & data, const std::string & sig,
+ const PublicKeys & publicKeys);
+
+PublicKeys getDefaultPublicKeys();
+
+}
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
new file mode 100644
index 000000000..0c6ceb9f6
--- /dev/null
+++ b/src/libstore/derivations.cc
@@ -0,0 +1,444 @@
+#include "derivations.hh"
+#include "store-api.hh"
+#include "globals.hh"
+#include "util.hh"
+#include "worker-protocol.hh"
+#include "fs-accessor.hh"
+#include "istringstream_nocopy.hh"
+
+namespace nix {
+
+
+void DerivationOutput::parseHashInfo(bool & recursive, Hash & hash) const
+{
+ recursive = false;
+ string algo = hashAlgo;
+
+ if (string(algo, 0, 2) == "r:") {
+ recursive = true;
+ algo = string(algo, 2);
+ }
+
+ HashType hashType = parseHashType(algo);
+ if (hashType == htUnknown)
+ throw Error(format("unknown hash algorithm ‘%1%’") % algo);
+
+ hash = parseHash(hashType, this->hash);
+}
+
+
+Path BasicDerivation::findOutput(const string & id) const
+{
+ auto i = outputs.find(id);
+ if (i == outputs.end())
+ throw Error(format("derivation has no output ‘%1%’") % id);
+ return i->second.path;
+}
+
+
+bool BasicDerivation::willBuildLocally() const
+{
+ return get(env, "preferLocalBuild") == "1" && canBuildLocally();
+}
+
+
+bool BasicDerivation::substitutesAllowed() const
+{
+ return get(env, "allowSubstitutes", "1") == "1";
+}
+
+
+bool BasicDerivation::isBuiltin() const
+{
+ return string(builder, 0, 8) == "builtin:";
+}
+
+
+bool BasicDerivation::canBuildLocally() const
+{
+ return platform == settings.thisSystem
+ || isBuiltin()
+#if __linux__
+ || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux")
+ || (platform == "armv6l-linux" && settings.thisSystem == "armv7l-linux")
+ || (platform == "armv5tel-linux" && (settings.thisSystem == "armv7l-linux" || settings.thisSystem == "armv6l-linux"))
+#elif __FreeBSD__
+ || (platform == "i686-linux" && settings.thisSystem == "x86_64-freebsd")
+ || (platform == "i686-linux" && settings.thisSystem == "i686-freebsd")
+#endif
+ ;
+}
+
+
+Path writeDerivation(ref<Store> store,
+ const Derivation & drv, const string & name, bool repair)
+{
+ PathSet references;
+ references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
+ for (auto & i : drv.inputDrvs)
+ references.insert(i.first);
+ /* Note that the outputs of a derivation are *not* references
+ (that can be missing (of course) and should not necessarily be
+ held during a garbage collection). */
+ string suffix = name + drvExtension;
+ string contents = drv.unparse();
+ return settings.readOnlyMode
+ ? store->computeStorePathForText(suffix, contents, references)
+ : store->addTextToStore(suffix, contents, references, repair);
+}
+
+
+/* Read string `s' from stream `str'. */
+static void expect(std::istream & str, const string & s)
+{
+ char s2[s.size()];
+ str.read(s2, s.size());
+ if (string(s2, s.size()) != s)
+ throw FormatError(format("expected string ‘%1%’") % s);
+}
+
+
+/* Read a C-style string from stream `str'. */
+static string parseString(std::istream & str)
+{
+ string res;
+ expect(str, "\"");
+ int c;
+ while ((c = str.get()) != '"')
+ if (c == '\\') {
+ c = str.get();
+ if (c == 'n') res += '\n';
+ else if (c == 'r') res += '\r';
+ else if (c == 't') res += '\t';
+ else res += c;
+ }
+ else res += c;
+ return res;
+}
+
+
+static Path parsePath(std::istream & str)
+{
+ string s = parseString(str);
+ if (s.size() == 0 || s[0] != '/')
+ throw FormatError(format("bad path ‘%1%’ in derivation") % s);
+ return s;
+}
+
+
+static bool endOfList(std::istream & str)
+{
+ if (str.peek() == ',') {
+ str.get();
+ return false;
+ }
+ if (str.peek() == ']') {
+ str.get();
+ return true;
+ }
+ return false;
+}
+
+
+static StringSet parseStrings(std::istream & str, bool arePaths)
+{
+ StringSet res;
+ while (!endOfList(str))
+ res.insert(arePaths ? parsePath(str) : parseString(str));
+ return res;
+}
+
+
+static Derivation parseDerivation(const string & s)
+{
+ Derivation drv;
+ istringstream_nocopy str(s);
+ expect(str, "Derive([");
+
+ /* Parse the list of outputs. */
+ while (!endOfList(str)) {
+ DerivationOutput out;
+ expect(str, "("); string id = parseString(str);
+ expect(str, ","); out.path = parsePath(str);
+ expect(str, ","); out.hashAlgo = parseString(str);
+ expect(str, ","); out.hash = parseString(str);
+ expect(str, ")");
+ drv.outputs[id] = out;
+ }
+
+ /* Parse the list of input derivations. */
+ expect(str, ",[");
+ while (!endOfList(str)) {
+ expect(str, "(");
+ Path drvPath = parsePath(str);
+ expect(str, ",[");
+ drv.inputDrvs[drvPath] = parseStrings(str, false);
+ expect(str, ")");
+ }
+
+ expect(str, ",["); drv.inputSrcs = parseStrings(str, true);
+ expect(str, ","); drv.platform = parseString(str);
+ expect(str, ","); drv.builder = parseString(str);
+
+ /* Parse the builder arguments. */
+ expect(str, ",[");
+ while (!endOfList(str))
+ drv.args.push_back(parseString(str));
+
+ /* Parse the environment variables. */
+ expect(str, ",[");
+ while (!endOfList(str)) {
+ expect(str, "("); string name = parseString(str);
+ expect(str, ","); string value = parseString(str);
+ expect(str, ")");
+ drv.env[name] = value;
+ }
+
+ expect(str, ")");
+ return drv;
+}
+
+
+Derivation readDerivation(const Path & drvPath)
+{
+ try {
+ return parseDerivation(readFile(drvPath));
+ } catch (FormatError & e) {
+ throw Error(format("error parsing derivation ‘%1%’: %2%") % drvPath % e.msg());
+ }
+}
+
+
+Derivation Store::derivationFromPath(const Path & drvPath)
+{
+ assertStorePath(drvPath);
+ ensurePath(drvPath);
+ auto accessor = getFSAccessor();
+ try {
+ return parseDerivation(accessor->readFile(drvPath));
+ } catch (FormatError & e) {
+ throw Error(format("error parsing derivation ‘%1%’: %2%") % drvPath % e.msg());
+ }
+}
+
+
+static void printString(string & res, const string & s)
+{
+ res += '"';
+ for (const char * i = s.c_str(); *i; i++)
+ if (*i == '\"' || *i == '\\') { res += "\\"; res += *i; }
+ else if (*i == '\n') res += "\\n";
+ else if (*i == '\r') res += "\\r";
+ else if (*i == '\t') res += "\\t";
+ else res += *i;
+ res += '"';
+}
+
+
+template<class ForwardIterator>
+static void printStrings(string & res, ForwardIterator i, ForwardIterator j)
+{
+ res += '[';
+ bool first = true;
+ for ( ; i != j; ++i) {
+ if (first) first = false; else res += ',';
+ printString(res, *i);
+ }
+ res += ']';
+}
+
+
+string Derivation::unparse() const
+{
+ string s;
+ s.reserve(65536);
+ s += "Derive([";
+
+ bool first = true;
+ for (auto & i : outputs) {
+ if (first) first = false; else s += ',';
+ s += '('; printString(s, i.first);
+ s += ','; printString(s, i.second.path);
+ s += ','; printString(s, i.second.hashAlgo);
+ s += ','; printString(s, i.second.hash);
+ s += ')';
+ }
+
+ s += "],[";
+ first = true;
+ for (auto & i : inputDrvs) {
+ if (first) first = false; else s += ',';
+ s += '('; printString(s, i.first);
+ s += ','; printStrings(s, i.second.begin(), i.second.end());
+ s += ')';
+ }
+
+ s += "],";
+ printStrings(s, inputSrcs.begin(), inputSrcs.end());
+
+ s += ','; printString(s, platform);
+ s += ','; printString(s, builder);
+ s += ','; printStrings(s, args.begin(), args.end());
+
+ s += ",[";
+ first = true;
+ for (auto & i : env) {
+ if (first) first = false; else s += ',';
+ s += '('; printString(s, i.first);
+ s += ','; printString(s, i.second);
+ s += ')';
+ }
+
+ s += "])";
+
+ return s;
+}
+
+
+bool isDerivation(const string & fileName)
+{
+ return hasSuffix(fileName, drvExtension);
+}
+
+
+bool BasicDerivation::isFixedOutput() const
+{
+ return outputs.size() == 1 &&
+ outputs.begin()->first == "out" &&
+ outputs.begin()->second.hash != "";
+}
+
+
+DrvHashes drvHashes;
+
+
+/* Returns the hash of a derivation modulo fixed-output
+ subderivations. A fixed-output derivation is a derivation with one
+ output (`out') for which an expected hash and hash algorithm are
+ specified (using the `outputHash' and `outputHashAlgo'
+ attributes). We don't want changes to such derivations to
+ propagate upwards through the dependency graph, changing output
+ paths everywhere.
+
+ For instance, if we change the url in a call to the `fetchurl'
+ function, we do not want to rebuild everything depending on it
+ (after all, (the hash of) the file being downloaded is unchanged).
+ So the *output paths* should not change. On the other hand, the
+ *derivation paths* should change to reflect the new dependency
+ graph.
+
+ That's what this function does: it returns a hash which is just the
+ hash of the derivation ATerm, except that any input derivation
+ paths have been replaced by the result of a recursive call to this
+ function, and that for fixed-output derivations we return a hash of
+ its output path. */
+Hash hashDerivationModulo(Store & store, Derivation drv)
+{
+ /* Return a fixed hash for fixed-output derivations. */
+ if (drv.isFixedOutput()) {
+ DerivationOutputs::const_iterator i = drv.outputs.begin();
+ return hashString(htSHA256, "fixed:out:"
+ + i->second.hashAlgo + ":"
+ + i->second.hash + ":"
+ + i->second.path);
+ }
+
+ /* For other derivations, replace the inputs paths with recursive
+ calls to this function.*/
+ DerivationInputs inputs2;
+ for (auto & i : drv.inputDrvs) {
+ Hash h = drvHashes[i.first];
+ if (!h) {
+ assert(store.isValidPath(i.first));
+ Derivation drv2 = readDerivation(i.first);
+ h = hashDerivationModulo(store, drv2);
+ drvHashes[i.first] = h;
+ }
+ inputs2[printHash(h)] = i.second;
+ }
+ drv.inputDrvs = inputs2;
+
+ return hashString(htSHA256, drv.unparse());
+}
+
+
+DrvPathWithOutputs parseDrvPathWithOutputs(const string & s)
+{
+ size_t n = s.find("!");
+ return n == s.npos
+ ? DrvPathWithOutputs(s, std::set<string>())
+ : DrvPathWithOutputs(string(s, 0, n), tokenizeString<std::set<string> >(string(s, n + 1), ","));
+}
+
+
+Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs)
+{
+ return outputs.empty()
+ ? drvPath
+ : drvPath + "!" + concatStringsSep(",", outputs);
+}
+
+
+bool wantOutput(const string & output, const std::set<string> & wanted)
+{
+ return wanted.empty() || wanted.find(output) != wanted.end();
+}
+
+
+PathSet BasicDerivation::outputPaths() const
+{
+ PathSet paths;
+ for (auto & i : outputs)
+ paths.insert(i.second.path);
+ return paths;
+}
+
+
+Source & readDerivation(Source & in, Store & store, BasicDerivation & drv)
+{
+ drv.outputs.clear();
+ auto nr = readNum<size_t>(in);
+ for (size_t n = 0; n < nr; n++) {
+ auto name = readString(in);
+ DerivationOutput o;
+ in >> o.path >> o.hashAlgo >> o.hash;
+ store.assertStorePath(o.path);
+ drv.outputs[name] = o;
+ }
+
+ drv.inputSrcs = readStorePaths<PathSet>(store, in);
+ in >> drv.platform >> drv.builder;
+ drv.args = readStrings<Strings>(in);
+
+ nr = readNum<size_t>(in);
+ for (size_t n = 0; n < nr; n++) {
+ auto key = readString(in);
+ auto value = readString(in);
+ drv.env[key] = value;
+ }
+
+ return in;
+}
+
+
+Sink & operator << (Sink & out, const BasicDerivation & drv)
+{
+ out << drv.outputs.size();
+ for (auto & i : drv.outputs)
+ out << i.first << i.second.path << i.second.hashAlgo << i.second.hash;
+ out << drv.inputSrcs << drv.platform << drv.builder << drv.args;
+ out << drv.env.size();
+ for (auto & i : drv.env)
+ out << i.first << i.second;
+ return out;
+}
+
+
+std::string hashPlaceholder(const std::string & outputName)
+{
+ // FIXME: memoize?
+ return "/" + printHash32(hashString(htSHA256, "nix-output:" + outputName));
+}
+
+
+}
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
new file mode 100644
index 000000000..9717a81e4
--- /dev/null
+++ b/src/libstore/derivations.hh
@@ -0,0 +1,122 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+
+#include <map>
+
+
+namespace nix {
+
+
+/* Extension of derivations in the Nix store. */
+const string drvExtension = ".drv";
+
+
+/* Abstract syntax of derivations. */
+
+struct DerivationOutput
+{
+ Path path;
+ string hashAlgo; /* hash used for expected hash computation */
+ string hash; /* expected hash, may be null */
+ DerivationOutput()
+ {
+ }
+ DerivationOutput(Path path, string hashAlgo, string hash)
+ {
+ this->path = path;
+ this->hashAlgo = hashAlgo;
+ this->hash = hash;
+ }
+ void parseHashInfo(bool & recursive, Hash & hash) const;
+};
+
+typedef std::map<string, DerivationOutput> DerivationOutputs;
+
+/* For inputs that are sub-derivations, we specify exactly which
+ output IDs we are interested in. */
+typedef std::map<Path, StringSet> DerivationInputs;
+
+typedef std::map<string, string> StringPairs;
+
+struct BasicDerivation
+{
+ DerivationOutputs outputs; /* keyed on symbolic IDs */
+ PathSet inputSrcs; /* inputs that are sources */
+ string platform;
+ Path builder;
+ Strings args;
+ StringPairs env;
+
+ virtual ~BasicDerivation() { };
+
+ /* Return the path corresponding to the output identifier `id' in
+ the given derivation. */
+ Path findOutput(const string & id) const;
+
+ bool willBuildLocally() const;
+
+ bool substitutesAllowed() const;
+
+ bool isBuiltin() const;
+
+ bool canBuildLocally() const;
+
+ /* Return true iff this is a fixed-output derivation. */
+ bool isFixedOutput() const;
+
+ /* Return the output paths of a derivation. */
+ PathSet outputPaths() const;
+
+};
+
+struct Derivation : BasicDerivation
+{
+ DerivationInputs inputDrvs; /* inputs that are sub-derivations */
+
+ /* Print a derivation. */
+ std::string unparse() const;
+};
+
+
+class Store;
+
+
+/* Write a derivation to the Nix store, and return its path. */
+Path writeDerivation(ref<Store> store,
+ const Derivation & drv, const string & name, bool repair = false);
+
+/* Read a derivation from a file. */
+Derivation readDerivation(const Path & drvPath);
+
+/* Check whether a file name ends with the extension for
+ derivations. */
+bool isDerivation(const string & fileName);
+
+Hash hashDerivationModulo(Store & store, Derivation drv);
+
+/* Memoisation of hashDerivationModulo(). */
+typedef std::map<Path, Hash> DrvHashes;
+
+extern DrvHashes drvHashes; // FIXME: global, not thread-safe
+
+/* Split a string specifying a derivation and a set of outputs
+ (/nix/store/hash-foo!out1,out2,...) into the derivation path and
+ the outputs. */
+typedef std::pair<string, std::set<string> > DrvPathWithOutputs;
+DrvPathWithOutputs parseDrvPathWithOutputs(const string & s);
+
+Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs);
+
+bool wantOutput(const string & output, const std::set<string> & wanted);
+
+struct Source;
+struct Sink;
+
+Source & readDerivation(Source & in, Store & store, BasicDerivation & drv);
+Sink & operator << (Sink & out, const BasicDerivation & drv);
+
+std::string hashPlaceholder(const std::string & outputName);
+
+}
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
new file mode 100644
index 000000000..4d502219e
--- /dev/null
+++ b/src/libstore/download.cc
@@ -0,0 +1,708 @@
+#include "download.hh"
+#include "util.hh"
+#include "globals.hh"
+#include "hash.hh"
+#include "store-api.hh"
+#include "archive.hh"
+#include "s3.hh"
+#include "compression.hh"
+
+#ifdef ENABLE_S3
+#include <aws/core/client/ClientConfiguration.h>
+#endif
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <curl/curl.h>
+
+#include <queue>
+#include <iostream>
+#include <thread>
+#include <cmath>
+#include <random>
+
+namespace nix {
+
+double getTime()
+{
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ return tv.tv_sec + (tv.tv_usec / 1000000.0);
+}
+
+std::string resolveUri(const std::string & uri)
+{
+ if (uri.compare(0, 8, "channel:") == 0)
+ return "https://nixos.org/channels/" + std::string(uri, 8) + "/nixexprs.tar.xz";
+ else
+ return uri;
+}
+
+ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data)
+{
+ if (encoding == "")
+ return data;
+ else if (encoding == "br")
+ return decompress(encoding, *data);
+ else
+ throw Error("unsupported Content-Encoding ‘%s’", encoding);
+}
+
+struct CurlDownloader : public Downloader
+{
+ CURLM * curlm = 0;
+
+ std::random_device rd;
+ std::mt19937 mt19937;
+
+ bool enableHttp2;
+
+ struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
+ {
+ CurlDownloader & downloader;
+ DownloadRequest request;
+ DownloadResult result;
+ bool done = false; // whether either the success or failure function has been called
+ std::function<void(const DownloadResult &)> success;
+ std::function<void(std::exception_ptr exc)> failure;
+ CURL * req = 0;
+ bool active = false; // whether the handle has been added to the multi object
+ std::string status;
+
+ bool showProgress = false;
+ double prevProgressTime{0}, startTime{0};
+ unsigned int moveBack{1};
+
+ unsigned int attempt = 0;
+
+ /* Don't start this download until the specified time point
+ has been reached. */
+ std::chrono::steady_clock::time_point embargo;
+
+ struct curl_slist * requestHeaders = 0;
+
+ std::string encoding;
+
+ DownloadItem(CurlDownloader & downloader, const DownloadRequest & request)
+ : downloader(downloader), request(request)
+ {
+ showProgress =
+ request.showProgress == DownloadRequest::yes ||
+ (request.showProgress == DownloadRequest::automatic && isatty(STDERR_FILENO));
+
+ if (!request.expectedETag.empty())
+ requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
+ }
+
+ ~DownloadItem()
+ {
+ if (req) {
+ if (active)
+ curl_multi_remove_handle(downloader.curlm, req);
+ curl_easy_cleanup(req);
+ }
+ if (requestHeaders) curl_slist_free_all(requestHeaders);
+ try {
+ if (!done)
+ fail(DownloadError(Interrupted, format("download of ‘%s’ was interrupted") % request.uri));
+ } catch (...) {
+ ignoreException();
+ }
+ }
+
+ template<class T>
+ void fail(const T & e)
+ {
+ assert(!done);
+ done = true;
+ callFailure(failure, std::make_exception_ptr(e));
+ }
+
+ size_t writeCallback(void * contents, size_t size, size_t nmemb)
+ {
+ size_t realSize = size * nmemb;
+ result.data->append((char *) contents, realSize);
+ return realSize;
+ }
+
+ static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
+ {
+ return ((DownloadItem *) userp)->writeCallback(contents, size, nmemb);
+ }
+
+ size_t headerCallback(void * contents, size_t size, size_t nmemb)
+ {
+ size_t realSize = size * nmemb;
+ std::string line((char *) contents, realSize);
+ printMsg(lvlVomit, format("got header for ‘%s’: %s") % request.uri % trim(line));
+ if (line.compare(0, 5, "HTTP/") == 0) { // new response starts
+ result.etag = "";
+ auto ss = tokenizeString<vector<string>>(line, " ");
+ status = ss.size() >= 2 ? ss[1] : "";
+ result.data = std::make_shared<std::string>();
+ encoding = "";
+ } else {
+ auto i = line.find(':');
+ if (i != string::npos) {
+ string name = toLower(trim(string(line, 0, i)));
+ if (name == "etag") {
+ result.etag = trim(string(line, i + 1));
+ /* Hack to work around a GitHub bug: it sends
+ ETags, but ignores If-None-Match. So if we get
+ the expected ETag on a 200 response, then shut
+ down the connection because we already have the
+ data. */
+ if (result.etag == request.expectedETag && status == "200") {
+ debug(format("shutting down on 200 HTTP response with expected ETag"));
+ return 0;
+ }
+ } else if (name == "content-encoding")
+ encoding = trim(string(line, i + 1));;
+ }
+ }
+ return realSize;
+ }
+
+ static size_t headerCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
+ {
+ return ((DownloadItem *) userp)->headerCallback(contents, size, nmemb);
+ }
+
+ int progressCallback(double dltotal, double dlnow)
+ {
+ if (showProgress) {
+ double now = getTime();
+ if (prevProgressTime <= now - 1) {
+ string s = (format(" [%1$.0f/%2$.0f KiB, %3$.1f KiB/s]")
+ % (dlnow / 1024.0)
+ % (dltotal / 1024.0)
+ % (now == startTime ? 0 : dlnow / 1024.0 / (now - startTime))).str();
+ std::cerr << "\e[" << moveBack << "D" << s;
+ moveBack = s.size();
+ std::cerr.flush();
+ prevProgressTime = now;
+ }
+ }
+ return _isInterrupted;
+ }
+
+ static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
+ {
+ return ((DownloadItem *) userp)->progressCallback(dltotal, dlnow);
+ }
+
+ static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr)
+ {
+ if (type == CURLINFO_TEXT)
+ vomit("curl: %s", chomp(std::string(data, size)));
+ return 0;
+ }
+
+ void init()
+ {
+ // FIXME: handle parallel downloads.
+ if (showProgress) {
+ std::cerr << (format("downloading ‘%1%’... ") % request.uri);
+ std::cerr.flush();
+ startTime = getTime();
+ }
+
+ if (!req) req = curl_easy_init();
+
+ curl_easy_reset(req);
+
+ if (verbosity >= lvlVomit) {
+ curl_easy_setopt(req, CURLOPT_VERBOSE, 1);
+ curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, DownloadItem::debugCallback);
+ }
+
+ curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
+ curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
+ curl_easy_setopt(req, CURLOPT_USERAGENT, ("curl/" LIBCURL_VERSION " Nix/" + nixVersion).c_str());
+ #if LIBCURL_VERSION_NUM >= 0x072b00
+ curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
+ #endif
+ #if LIBCURL_VERSION_NUM >= 0x072f00
+ if (downloader.enableHttp2)
+ curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
+ #endif
+ curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper);
+ curl_easy_setopt(req, CURLOPT_WRITEDATA, this);
+ curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, DownloadItem::headerCallbackWrapper);
+ curl_easy_setopt(req, CURLOPT_HEADERDATA, this);
+
+ curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper);
+ curl_easy_setopt(req, CURLOPT_PROGRESSDATA, this);
+ curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0);
+
+ curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
+
+ if (request.head)
+ curl_easy_setopt(req, CURLOPT_NOBODY, 1);
+
+ if (request.verifyTLS)
+ curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
+ else {
+ curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
+ curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
+ }
+
+ curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, settings.connectTimeout.get());
+
+ /* If no file exist in the specified path, curl continues to work
+ anyway as if netrc support was disabled. */
+ curl_easy_setopt(req, CURLOPT_NETRC_FILE, settings.netrcFile.get().c_str());
+ curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
+
+ result.data = std::make_shared<std::string>();
+ }
+
+ void finish(CURLcode code)
+ {
+ if (showProgress)
+ //std::cerr << "\e[" << moveBack << "D\e[K\n";
+ std::cerr << "\n";
+
+ long httpStatus = 0;
+ curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
+
+ char * effectiveUrlCStr;
+ curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUrlCStr);
+ if (effectiveUrlCStr)
+ result.effectiveUrl = effectiveUrlCStr;
+
+ debug(format("finished download of ‘%s’; curl status = %d, HTTP status = %d, body = %d bytes")
+ % request.uri % code % httpStatus % (result.data ? result.data->size() : 0));
+
+ if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) {
+ code = CURLE_OK;
+ httpStatus = 304;
+ }
+
+ if (code == CURLE_OK &&
+ (httpStatus == 200 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
+ {
+ result.cached = httpStatus == 304;
+ done = true;
+
+ try {
+ result.data = decodeContent(encoding, ref<std::string>(result.data));
+ callSuccess(success, failure, const_cast<const DownloadResult &>(result));
+ } catch (...) {
+ done = true;
+ callFailure(failure, std::current_exception());
+ }
+ } else {
+ Error err =
+ (httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) ? NotFound :
+ httpStatus == 403 ? Forbidden :
+ (httpStatus == 408 || httpStatus == 500 || httpStatus == 503
+ || httpStatus == 504 || httpStatus == 522 || httpStatus == 524
+ || code == CURLE_COULDNT_RESOLVE_HOST
+ || code == CURLE_RECV_ERROR
+
+ // this seems to occur occasionally for retriable reasons, and shows up in an error like this:
+ // curl: (23) Failed writing body (315 != 16366)
+ || code == CURLE_WRITE_ERROR
+
+ // this is a generic SSL failure that in some cases (e.g., certificate error) is permanent but also appears in transient cases, so we consider it retryable
+ || code == CURLE_SSL_CONNECT_ERROR
+#if LIBCURL_VERSION_NUM >= 0x073200
+ || code == CURLE_HTTP2
+ || code == CURLE_HTTP2_STREAM
+#endif
+ ) ? Transient :
+ Misc;
+
+ attempt++;
+
+ auto exc =
+ code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
+ ? DownloadError(Interrupted, format("download of ‘%s’ was interrupted") % request.uri)
+ : httpStatus != 0
+ ? DownloadError(err, format("unable to download ‘%s’: HTTP error %d (curl error: %s)") % request.uri % httpStatus % curl_easy_strerror(code))
+ : DownloadError(err, format("unable to download ‘%s’: %s (%d)") % request.uri % curl_easy_strerror(code) % code);
+
+ /* If this is a transient error, then maybe retry the
+ download after a while. */
+ if (err == Transient && attempt < request.tries) {
+ int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(downloader.mt19937));
+ printError(format("warning: %s; retrying in %d ms") % exc.what() % ms);
+ embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
+ downloader.enqueueItem(shared_from_this());
+ }
+ else
+ fail(exc);
+ }
+ }
+ };
+
+ struct State
+ {
+ struct EmbargoComparator {
+ bool operator() (const std::shared_ptr<DownloadItem> & i1, const std::shared_ptr<DownloadItem> & i2) {
+ return i1->embargo > i2->embargo;
+ }
+ };
+ bool quit = false;
+ std::priority_queue<std::shared_ptr<DownloadItem>, std::vector<std::shared_ptr<DownloadItem>>, EmbargoComparator> incoming;
+ };
+
+ Sync<State> state_;
+
+ /* We can't use a std::condition_variable to wake up the curl
+ thread, because it only monitors file descriptors. So use a
+ pipe instead. */
+ Pipe wakeupPipe;
+
+ std::thread workerThread;
+
+ CurlDownloader()
+ : mt19937(rd())
+ {
+ static std::once_flag globalInit;
+ std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL);
+
+ curlm = curl_multi_init();
+
+ #if LIBCURL_VERSION_NUM >= 0x072b00 // correct?
+ curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX);
+ #endif
+ curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
+ settings.binaryCachesParallelConnections.get());
+
+ enableHttp2 = settings.enableHttp2;
+
+ wakeupPipe.create();
+ fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
+
+ workerThread = std::thread([&]() { workerThreadEntry(); });
+ }
+
+ ~CurlDownloader()
+ {
+ stopWorkerThread();
+
+ workerThread.join();
+
+ if (curlm) curl_multi_cleanup(curlm);
+ }
+
+ void stopWorkerThread()
+ {
+ /* Signal the worker thread to exit. */
+ {
+ auto state(state_.lock());
+ state->quit = true;
+ }
+ writeFull(wakeupPipe.writeSide.get(), " ", false);
+ }
+
+ void workerThreadMain()
+ {
+ /* Cause this thread to be notified on SIGINT. */
+ auto callback = createInterruptCallback([&]() {
+ stopWorkerThread();
+ });
+
+ std::map<CURL *, std::shared_ptr<DownloadItem>> items;
+
+ bool quit = false;
+
+ std::chrono::steady_clock::time_point nextWakeup;
+
+ while (!quit) {
+ checkInterrupt();
+
+ /* Let curl do its thing. */
+ int running;
+ CURLMcode mc = curl_multi_perform(curlm, &running);
+ if (mc != CURLM_OK)
+ throw nix::Error(format("unexpected error from curl_multi_perform(): %s") % curl_multi_strerror(mc));
+
+ /* Set the promises of any finished requests. */
+ CURLMsg * msg;
+ int left;
+ while ((msg = curl_multi_info_read(curlm, &left))) {
+ if (msg->msg == CURLMSG_DONE) {
+ auto i = items.find(msg->easy_handle);
+ assert(i != items.end());
+ i->second->finish(msg->data.result);
+ curl_multi_remove_handle(curlm, i->second->req);
+ i->second->active = false;
+ items.erase(i);
+ }
+ }
+
+ /* Wait for activity, including wakeup events. */
+ int numfds = 0;
+ struct curl_waitfd extraFDs[1];
+ extraFDs[0].fd = wakeupPipe.readSide.get();
+ extraFDs[0].events = CURL_WAIT_POLLIN;
+ extraFDs[0].revents = 0;
+ auto sleepTimeMs =
+ nextWakeup != std::chrono::steady_clock::time_point()
+ ? std::max(0, (int) std::chrono::duration_cast<std::chrono::milliseconds>(nextWakeup - std::chrono::steady_clock::now()).count())
+ : 1000000000;
+ vomit("download thread waiting for %d ms", sleepTimeMs);
+ mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds);
+ if (mc != CURLM_OK)
+ throw nix::Error(format("unexpected error from curl_multi_wait(): %s") % curl_multi_strerror(mc));
+
+ nextWakeup = std::chrono::steady_clock::time_point();
+
+ /* Add new curl requests from the incoming requests queue,
+ except for requests that are embargoed (waiting for a
+ retry timeout to expire). */
+ if (extraFDs[0].revents & CURL_WAIT_POLLIN) {
+ char buf[1024];
+ auto res = read(extraFDs[0].fd, buf, sizeof(buf));
+ if (res == -1 && errno != EINTR)
+ throw SysError("reading curl wakeup socket");
+ }
+
+ std::vector<std::shared_ptr<DownloadItem>> incoming;
+ auto now = std::chrono::steady_clock::now();
+
+ {
+ auto state(state_.lock());
+ while (!state->incoming.empty()) {
+ auto item = state->incoming.top();
+ if (item->embargo <= now) {
+ incoming.push_back(item);
+ state->incoming.pop();
+ } else {
+ if (nextWakeup == std::chrono::steady_clock::time_point()
+ || item->embargo < nextWakeup)
+ nextWakeup = item->embargo;
+ break;
+ }
+ }
+ quit = state->quit;
+ }
+
+ for (auto & item : incoming) {
+ debug(format("starting download of %s") % item->request.uri);
+ item->init();
+ curl_multi_add_handle(curlm, item->req);
+ item->active = true;
+ items[item->req] = item;
+ }
+ }
+
+ debug("download thread shutting down");
+ }
+
+ void workerThreadEntry()
+ {
+ try {
+ workerThreadMain();
+ } catch (nix::Interrupted & e) {
+ } catch (std::exception & e) {
+ printError(format("unexpected error in download thread: %s") % e.what());
+ }
+
+ {
+ auto state(state_.lock());
+ while (!state->incoming.empty()) state->incoming.pop();
+ state->quit = true;
+ }
+ }
+
+ void enqueueItem(std::shared_ptr<DownloadItem> item)
+ {
+ {
+ auto state(state_.lock());
+ if (state->quit)
+ throw nix::Error("cannot enqueue download request because the download thread is shutting down");
+ state->incoming.push(item);
+ }
+ writeFull(wakeupPipe.writeSide.get(), " ");
+ }
+
+ void enqueueDownload(const DownloadRequest & request,
+ std::function<void(const DownloadResult &)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ /* Ugly hack to support s3:// URIs. */
+ if (hasPrefix(request.uri, "s3://")) {
+ // FIXME: do this on a worker thread
+ sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult {
+#ifdef ENABLE_S3
+ S3Helper s3Helper(Aws::Region::US_EAST_1); // FIXME: make configurable
+ auto slash = request.uri.find('/', 5);
+ if (slash == std::string::npos)
+ throw nix::Error("bad S3 URI ‘%s’", request.uri);
+ std::string bucketName(request.uri, 5, slash - 5);
+ std::string key(request.uri, slash + 1);
+ // FIXME: implement ETag
+ auto s3Res = s3Helper.getObject(bucketName, key);
+ DownloadResult res;
+ if (!s3Res.data)
+ throw DownloadError(NotFound, fmt("S3 object ‘%s’ does not exist", request.uri));
+ res.data = s3Res.data;
+ return res;
+#else
+ throw nix::Error("cannot download ‘%s’ because Nix is not built with S3 support", request.uri);
+#endif
+ });
+ return;
+ }
+
+ auto item = std::make_shared<DownloadItem>(*this, request);
+ item->success = success;
+ item->failure = failure;
+ enqueueItem(item);
+ }
+};
+
+ref<Downloader> getDownloader()
+{
+ static std::shared_ptr<Downloader> downloader;
+ static std::once_flag downloaderCreated;
+ std::call_once(downloaderCreated, [&]() { downloader = makeDownloader(); });
+ return ref<Downloader>(downloader);
+}
+
+ref<Downloader> makeDownloader()
+{
+ return make_ref<CurlDownloader>();
+}
+
+std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest & request)
+{
+ auto promise = std::make_shared<std::promise<DownloadResult>>();
+ enqueueDownload(request,
+ [promise](const DownloadResult & result) { promise->set_value(result); },
+ [promise](std::exception_ptr exc) { promise->set_exception(exc); });
+ return promise->get_future();
+}
+
+DownloadResult Downloader::download(const DownloadRequest & request)
+{
+ return enqueueDownload(request).get();
+}
+
+Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl)
+{
+ auto url = resolveUri(url_);
+
+ if (name == "") {
+ auto p = url.rfind('/');
+ if (p != string::npos) name = string(url, p + 1);
+ }
+
+ Path expectedStorePath;
+ if (expectedHash) {
+ expectedStorePath = store->makeFixedOutputPath(unpack, expectedHash, name);
+ if (store->isValidPath(expectedStorePath))
+ return expectedStorePath;
+ }
+
+ Path cacheDir = getCacheDir() + "/nix/tarballs";
+ createDirs(cacheDir);
+
+ string urlHash = printHash32(hashString(htSHA256, url));
+
+ Path dataFile = cacheDir + "/" + urlHash + ".info";
+ Path fileLink = cacheDir + "/" + urlHash + "-file";
+
+ Path storePath;
+
+ string expectedETag;
+
+ int ttl = settings.tarballTtl;
+ bool skip = false;
+
+ if (pathExists(fileLink) && pathExists(dataFile)) {
+ storePath = readLink(fileLink);
+ store->addTempRoot(storePath);
+ if (store->isValidPath(storePath)) {
+ auto ss = tokenizeString<vector<string>>(readFile(dataFile), "\n");
+ if (ss.size() >= 3 && ss[0] == url) {
+ time_t lastChecked;
+ if (string2Int(ss[2], lastChecked) && lastChecked + ttl >= time(0)) {
+ skip = true;
+ if (effectiveUrl)
+ *effectiveUrl = url_;
+ } else if (!ss[1].empty()) {
+ debug(format("verifying previous ETag ‘%1%’") % ss[1]);
+ expectedETag = ss[1];
+ }
+ }
+ } else
+ storePath = "";
+ }
+
+ if (!skip) {
+
+ try {
+ DownloadRequest request(url);
+ request.expectedETag = expectedETag;
+ auto res = download(request);
+ if (effectiveUrl)
+ *effectiveUrl = res.effectiveUrl;
+
+ if (!res.cached) {
+ ValidPathInfo info;
+ StringSink sink;
+ dumpString(*res.data, sink);
+ Hash hash = hashString(expectedHash ? expectedHash.type : htSHA256, *res.data);
+ info.path = store->makeFixedOutputPath(false, hash, name);
+ info.narHash = hashString(htSHA256, *sink.s);
+ info.ca = makeFixedOutputCA(false, hash);
+ store->addToStore(info, sink.s, false, true);
+ storePath = info.path;
+ }
+
+ assert(!storePath.empty());
+ replaceSymlink(storePath, fileLink);
+
+ writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
+ } catch (DownloadError & e) {
+ if (storePath.empty()) throw;
+ printError(format("warning: %1%; using cached result") % e.msg());
+ }
+ }
+
+ if (unpack) {
+ Path unpackedLink = cacheDir + "/" + baseNameOf(storePath) + "-unpacked";
+ Path unpackedStorePath;
+ if (pathExists(unpackedLink)) {
+ unpackedStorePath = readLink(unpackedLink);
+ store->addTempRoot(unpackedStorePath);
+ if (!store->isValidPath(unpackedStorePath))
+ unpackedStorePath = "";
+ }
+ if (unpackedStorePath.empty()) {
+ printInfo(format("unpacking ‘%1%’...") % url);
+ Path tmpDir = createTempDir();
+ AutoDelete autoDelete(tmpDir, true);
+ // FIXME: this requires GNU tar for decompression.
+ runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"});
+ unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, false);
+ }
+ replaceSymlink(unpackedStorePath, unpackedLink);
+ storePath = unpackedStorePath;
+ }
+
+ if (expectedStorePath != "" && storePath != expectedStorePath)
+ throw nix::Error(format("hash mismatch in file downloaded from ‘%s’") % url);
+
+ return storePath;
+}
+
+
+bool isUri(const string & s)
+{
+ if (s.compare(0, 8, "channel:") == 0) return true;
+ size_t pos = s.find("://");
+ if (pos == string::npos) return false;
+ string scheme(s, 0, pos);
+ return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3";
+}
+
+
+}
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
new file mode 100644
index 000000000..62f3860b9
--- /dev/null
+++ b/src/libstore/download.hh
@@ -0,0 +1,79 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+
+#include <string>
+#include <future>
+
+namespace nix {
+
+struct DownloadRequest
+{
+ std::string uri;
+ std::string expectedETag;
+ bool verifyTLS = true;
+ enum { yes, no, automatic } showProgress = yes;
+ bool head = false;
+ size_t tries = 5;
+ unsigned int baseRetryTimeMs = 250;
+
+ DownloadRequest(const std::string & uri) : uri(uri) { }
+};
+
+struct DownloadResult
+{
+ bool cached = false;
+ std::string etag;
+ std::string effectiveUrl;
+ std::shared_ptr<std::string> data;
+};
+
+class Store;
+
+struct Downloader
+{
+ /* Enqueue a download request, returning a future to the result of
+ the download. The future may throw a DownloadError
+ exception. */
+ virtual void enqueueDownload(const DownloadRequest & request,
+ std::function<void(const DownloadResult &)> success,
+ std::function<void(std::exception_ptr exc)> failure) = 0;
+
+ std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
+
+ /* Synchronously download a file. */
+ DownloadResult download(const DownloadRequest & request);
+
+ /* Check if the specified file is already in ~/.cache/nix/tarballs
+ and is more recent than ‘tarball-ttl’ seconds. Otherwise,
+ use the recorded ETag to verify if the server has a more
+ recent version, and if so, download it to the Nix store. */
+ Path downloadCached(ref<Store> store, const string & uri, bool unpack, string name = "",
+ const Hash & expectedHash = Hash(), string * effectiveUri = nullptr);
+
+ enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
+};
+
+/* Return a shared Downloader object. Using this object is preferred
+ because it enables connection reuse and HTTP/2 multiplexing. */
+ref<Downloader> getDownloader();
+
+/* Return a new Downloader object. */
+ref<Downloader> makeDownloader();
+
+class DownloadError : public Error
+{
+public:
+ Downloader::Error error;
+ DownloadError(Downloader::Error error, const FormatOrString & fs)
+ : Error(fs), error(error)
+ { }
+};
+
+bool isUri(const string & s);
+
+/* Decode data according to the Content-Encoding header. */
+ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data);
+
+}
diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc
new file mode 100644
index 000000000..2b8ab063e
--- /dev/null
+++ b/src/libstore/export-import.cc
@@ -0,0 +1,106 @@
+#include "store-api.hh"
+#include "archive.hh"
+#include "worker-protocol.hh"
+
+#include <algorithm>
+
+namespace nix {
+
+struct HashAndWriteSink : Sink
+{
+ Sink & writeSink;
+ HashSink hashSink;
+ HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
+ {
+ }
+ virtual void operator () (const unsigned char * data, size_t len)
+ {
+ writeSink(data, len);
+ hashSink(data, len);
+ }
+ Hash currentHash()
+ {
+ return hashSink.currentHash().first;
+ }
+};
+
+void Store::exportPaths(const Paths & paths, Sink & sink)
+{
+ Paths sorted = topoSortPaths(PathSet(paths.begin(), paths.end()));
+ std::reverse(sorted.begin(), sorted.end());
+
+ std::string doneLabel("paths exported");
+ logger->incExpected(doneLabel, sorted.size());
+
+ for (auto & path : sorted) {
+ Activity act(*logger, lvlInfo, format("exporting path ‘%s’") % path);
+ sink << 1;
+ exportPath(path, sink);
+ logger->incProgress(doneLabel);
+ }
+
+ sink << 0;
+}
+
+void Store::exportPath(const Path & path, Sink & sink)
+{
+ auto info = queryPathInfo(path);
+
+ HashAndWriteSink hashAndWriteSink(sink);
+
+ narFromPath(path, hashAndWriteSink);
+
+ /* Refuse to export paths that have changed. This prevents
+ filesystem corruption from spreading to other machines.
+ Don't complain if the stored hash is zero (unknown). */
+ Hash hash = hashAndWriteSink.currentHash();
+ if (hash != info->narHash && info->narHash != Hash(info->narHash.type))
+ throw Error(format("hash of path ‘%1%’ has changed from ‘%2%’ to ‘%3%’!") % path
+ % printHash(info->narHash) % printHash(hash));
+
+ hashAndWriteSink << exportMagic << path << info->references << info->deriver << 0;
+}
+
+Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, bool dontCheckSigs)
+{
+ Paths res;
+ while (true) {
+ auto n = readNum<uint64_t>(source);
+ if (n == 0) break;
+ if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’");
+
+ /* Extract the NAR from the source. */
+ TeeSink tee(source);
+ parseDump(tee, tee.source);
+
+ uint32_t magic = readInt(source);
+ if (magic != exportMagic)
+ throw Error("Nix archive cannot be imported; wrong format");
+
+ ValidPathInfo info;
+
+ info.path = readStorePath(*this, source);
+
+ Activity act(*logger, lvlInfo, format("importing path ‘%s’") % info.path);
+
+ info.references = readStorePaths<PathSet>(*this, source);
+
+ info.deriver = readString(source);
+ if (info.deriver != "") assertStorePath(info.deriver);
+
+ info.narHash = hashString(htSHA256, *tee.source.data);
+ info.narSize = tee.source.data->size();
+
+ // Ignore optional legacy signature.
+ if (readInt(source) == 1)
+ readString(source);
+
+ addToStore(info, tee.source.data, false, dontCheckSigs, accessor);
+
+ res.push_back(info.path);
+ }
+
+ return res;
+}
+
+}
diff --git a/src/libstore/fs-accessor.hh b/src/libstore/fs-accessor.hh
new file mode 100644
index 000000000..a67e0775b
--- /dev/null
+++ b/src/libstore/fs-accessor.hh
@@ -0,0 +1,30 @@
+#pragma once
+
+#include "types.hh"
+
+namespace nix {
+
+/* An abstract class for accessing a filesystem-like structure, such
+ as a (possibly remote) Nix store or the contents of a NAR file. */
+class FSAccessor
+{
+public:
+ enum Type { tMissing, tRegular, tSymlink, tDirectory };
+
+ struct Stat
+ {
+ Type type;
+ uint64_t fileSize; // regular files only
+ bool isExecutable; // regular files only
+ };
+
+ virtual Stat stat(const Path & path) = 0;
+
+ virtual StringSet readDirectory(const Path & path) = 0;
+
+ virtual std::string readFile(const Path & path) = 0;
+
+ virtual std::string readLink(const Path & path) = 0;
+};
+
+}
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
new file mode 100644
index 000000000..3e7e42cbc
--- /dev/null
+++ b/src/libstore/gc.cc
@@ -0,0 +1,851 @@
+#include "derivations.hh"
+#include "globals.hh"
+#include "local-store.hh"
+
+#include <functional>
+#include <queue>
+#include <algorithm>
+#include <regex>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <climits>
+
+namespace nix {
+
+
+static string gcLockName = "gc.lock";
+static string tempRootsDir = "temproots";
+static string gcRootsDir = "gcroots";
+
+
+/* Acquire the global GC lock. This is used to prevent new Nix
+ processes from starting after the temporary root files have been
+ read. To be precise: when they try to create a new temporary root
+ file, they will block until the garbage collector has finished /
+ yielded the GC lock. */
+int LocalStore::openGCLock(LockType lockType)
+{
+ Path fnGCLock = (format("%1%/%2%")
+ % stateDir % gcLockName).str();
+
+ debug(format("acquiring global GC lock ‘%1%’") % fnGCLock);
+
+ AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fdGCLock)
+ throw SysError(format("opening global GC lock ‘%1%’") % fnGCLock);
+
+ if (!lockFile(fdGCLock.get(), lockType, false)) {
+ printError(format("waiting for the big garbage collector lock..."));
+ lockFile(fdGCLock.get(), lockType, true);
+ }
+
+ /* !!! Restrict read permission on the GC root. Otherwise any
+ process that can open the file for reading can DoS the
+ collector. */
+
+ return fdGCLock.release();
+}
+
+
+static void makeSymlink(const Path & link, const Path & target)
+{
+ /* Create directories up to `gcRoot'. */
+ createDirs(dirOf(link));
+
+ /* Create the new symlink. */
+ Path tempLink = (format("%1%.tmp-%2%-%3%")
+ % link % getpid() % rand()).str();
+ createSymlink(target, tempLink);
+
+ /* Atomically replace the old one. */
+ if (rename(tempLink.c_str(), link.c_str()) == -1)
+ throw SysError(format("cannot rename ‘%1%’ to ‘%2%’")
+ % tempLink % link);
+}
+
+
+void LocalStore::syncWithGC()
+{
+ AutoCloseFD fdGCLock = openGCLock(ltRead);
+}
+
+
+void LocalStore::addIndirectRoot(const Path & path)
+{
+ string hash = printHash32(hashString(htSHA1, path));
+ Path realRoot = canonPath((format("%1%/%2%/auto/%3%")
+ % stateDir % gcRootsDir % hash).str());
+ makeSymlink(realRoot, path);
+}
+
+
+Path LocalFSStore::addPermRoot(const Path & _storePath,
+ const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir)
+{
+ Path storePath(canonPath(_storePath));
+ Path gcRoot(canonPath(_gcRoot));
+ assertStorePath(storePath);
+
+ if (isInStore(gcRoot))
+ throw Error(format(
+ "creating a garbage collector root (%1%) in the Nix store is forbidden "
+ "(are you running nix-build inside the store?)") % gcRoot);
+
+ if (indirect) {
+ /* Don't clobber the link if it already exists and doesn't
+ point to the Nix store. */
+ if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
+ throw Error(format("cannot create symlink ‘%1%’; already exists") % gcRoot);
+ makeSymlink(gcRoot, storePath);
+ addIndirectRoot(gcRoot);
+ }
+
+ else {
+ if (!allowOutsideRootsDir) {
+ Path rootsDir = canonPath((format("%1%/%2%") % stateDir % gcRootsDir).str());
+
+ if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/")
+ throw Error(format(
+ "path ‘%1%’ is not a valid garbage collector root; "
+ "it's not in the directory ‘%2%’")
+ % gcRoot % rootsDir);
+ }
+
+ if (baseNameOf(gcRoot) == baseNameOf(storePath))
+ writeFile(gcRoot, "");
+ else
+ makeSymlink(gcRoot, storePath);
+ }
+
+ /* Check that the root can be found by the garbage collector.
+ !!! This can be very slow on machines that have many roots.
+ Instead of reading all the roots, it would be more efficient to
+ check if the root is in a directory in or linked from the
+ gcroots directory. */
+ if (settings.checkRootReachability) {
+ Roots roots = findRoots();
+ if (roots.find(gcRoot) == roots.end())
+ printError(
+ format(
+ "warning: ‘%1%’ is not in a directory where the garbage collector looks for roots; "
+ "therefore, ‘%2%’ might be removed by the garbage collector")
+ % gcRoot % storePath);
+ }
+
+ /* Grab the global GC root, causing us to block while a GC is in
+ progress. This prevents the set of permanent roots from
+ increasing while a GC is in progress. */
+ syncWithGC();
+
+ return gcRoot;
+}
+
+
+void LocalStore::addTempRoot(const Path & path)
+{
+ auto state(_state.lock());
+
+ /* Create the temporary roots file for this process. */
+ if (!state->fdTempRoots) {
+
+ while (1) {
+ Path dir = (format("%1%/%2%") % stateDir % tempRootsDir).str();
+ createDirs(dir);
+
+ state->fnTempRoots = (format("%1%/%2%") % dir % getpid()).str();
+
+ AutoCloseFD fdGCLock = openGCLock(ltRead);
+
+ if (pathExists(state->fnTempRoots))
+ /* It *must* be stale, since there can be no two
+ processes with the same pid. */
+ unlink(state->fnTempRoots.c_str());
+
+ state->fdTempRoots = openLockFile(state->fnTempRoots, true);
+
+ fdGCLock = -1;
+
+ debug(format("acquiring read lock on ‘%1%’") % state->fnTempRoots);
+ lockFile(state->fdTempRoots.get(), ltRead, true);
+
+ /* Check whether the garbage collector didn't get in our
+ way. */
+ struct stat st;
+ if (fstat(state->fdTempRoots.get(), &st) == -1)
+ throw SysError(format("statting ‘%1%’") % state->fnTempRoots);
+ if (st.st_size == 0) break;
+
+ /* The garbage collector deleted this file before we could
+ get a lock. (It won't delete the file after we get a
+ lock.) Try again. */
+ }
+
+ }
+
+ /* Upgrade the lock to a write lock. This will cause us to block
+ if the garbage collector is holding our lock. */
+ debug(format("acquiring write lock on ‘%1%’") % state->fnTempRoots);
+ lockFile(state->fdTempRoots.get(), ltWrite, true);
+
+ string s = path + '\0';
+ writeFull(state->fdTempRoots.get(), s);
+
+ /* Downgrade to a read lock. */
+ debug(format("downgrading to read lock on ‘%1%’") % state->fnTempRoots);
+ lockFile(state->fdTempRoots.get(), ltRead, true);
+}
+
+
+void LocalStore::readTempRoots(PathSet & tempRoots, FDs & fds)
+{
+ /* Read the `temproots' directory for per-process temporary root
+ files. */
+ DirEntries tempRootFiles = readDirectory(
+ (format("%1%/%2%") % stateDir % tempRootsDir).str());
+
+ for (auto & i : tempRootFiles) {
+ Path path = (format("%1%/%2%/%3%") % stateDir % tempRootsDir % i.name).str();
+
+ debug(format("reading temporary root file ‘%1%’") % path);
+ FDPtr fd(new AutoCloseFD(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666)));
+ if (!*fd) {
+ /* It's okay if the file has disappeared. */
+ if (errno == ENOENT) continue;
+ throw SysError(format("opening temporary roots file ‘%1%’") % path);
+ }
+
+ /* This should work, but doesn't, for some reason. */
+ //FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
+ //if (*fd == -1) continue;
+
+ /* Try to acquire a write lock without blocking. This can
+ only succeed if the owning process has died. In that case
+ we don't care about its temporary roots. */
+ if (lockFile(fd->get(), ltWrite, false)) {
+ printError(format("removing stale temporary roots file ‘%1%’") % path);
+ unlink(path.c_str());
+ writeFull(fd->get(), "d");
+ continue;
+ }
+
+ /* Acquire a read lock. This will prevent the owning process
+ from upgrading to a write lock, therefore it will block in
+ addTempRoot(). */
+ debug(format("waiting for read lock on ‘%1%’") % path);
+ lockFile(fd->get(), ltRead, true);
+
+ /* Read the entire file. */
+ string contents = readFile(fd->get());
+
+ /* Extract the roots. */
+ string::size_type pos = 0, end;
+
+ while ((end = contents.find((char) 0, pos)) != string::npos) {
+ Path root(contents, pos, end - pos);
+ debug(format("got temporary root ‘%1%’") % root);
+ assertStorePath(root);
+ tempRoots.insert(root);
+ pos = end + 1;
+ }
+
+ fds.push_back(fd); /* keep open */
+ }
+}
+
+
+void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
+{
+ auto foundRoot = [&](const Path & path, const Path & target) {
+ Path storePath = toStorePath(target);
+ if (isStorePath(storePath) && isValidPath(storePath))
+ roots[path] = storePath;
+ else
+ printInfo(format("skipping invalid root from ‘%1%’ to ‘%2%’") % path % storePath);
+ };
+
+ try {
+
+ if (type == DT_UNKNOWN)
+ type = getFileType(path);
+
+ if (type == DT_DIR) {
+ for (auto & i : readDirectory(path))
+ findRoots(path + "/" + i.name, i.type, roots);
+ }
+
+ else if (type == DT_LNK) {
+ Path target = readLink(path);
+ if (isInStore(target))
+ foundRoot(path, target);
+
+ /* Handle indirect roots. */
+ else {
+ target = absPath(target, dirOf(path));
+ if (!pathExists(target)) {
+ if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
+ printInfo(format("removing stale link from ‘%1%’ to ‘%2%’") % path % target);
+ unlink(path.c_str());
+ }
+ } else {
+ struct stat st2 = lstat(target);
+ if (!S_ISLNK(st2.st_mode)) return;
+ Path target2 = readLink(target);
+ if (isInStore(target2)) foundRoot(target, target2);
+ }
+ }
+ }
+
+ else if (type == DT_REG) {
+ Path storePath = storeDir + "/" + baseNameOf(path);
+ if (isStorePath(storePath) && isValidPath(storePath))
+ roots[path] = storePath;
+ }
+
+ }
+
+ catch (SysError & e) {
+ /* We only ignore permanent failures. */
+ if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
+ printInfo(format("cannot read potential root ‘%1%’") % path);
+ else
+ throw;
+ }
+}
+
+
+Roots LocalStore::findRoots()
+{
+ Roots roots;
+
+ /* Process direct roots in {gcroots,manifests,profiles}. */
+ findRoots(stateDir + "/" + gcRootsDir, DT_UNKNOWN, roots);
+ if (pathExists(stateDir + "/manifests"))
+ findRoots(stateDir + "/manifests", DT_UNKNOWN, roots);
+ findRoots(stateDir + "/profiles", DT_UNKNOWN, roots);
+
+ return roots;
+}
+
+
+static void readProcLink(const string & file, StringSet & paths)
+{
+ /* 64 is the starting buffer size gnu readlink uses... */
+ auto bufsiz = ssize_t{64};
+try_again:
+ char buf[bufsiz];
+ auto res = readlink(file.c_str(), buf, bufsiz);
+ if (res == -1) {
+ if (errno == ENOENT || errno == EACCES)
+ return;
+ throw SysError("reading symlink");
+ }
+ if (res == bufsiz) {
+ if (SSIZE_MAX / 2 < bufsiz)
+ throw Error("stupidly long symlink");
+ bufsiz *= 2;
+ goto try_again;
+ }
+ if (res > 0 && buf[0] == '/')
+ paths.emplace(static_cast<char *>(buf), res);
+ return;
+}
+
+static string quoteRegexChars(const string & raw)
+{
+ static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])");
+ return std::regex_replace(raw, specialRegex, R"(\$&)");
+}
+
+static void readFileRoots(const char * path, StringSet & paths)
+{
+ try {
+ paths.emplace(readFile(path));
+ } catch (SysError & e) {
+ if (e.errNo != ENOENT && e.errNo != EACCES)
+ throw;
+ }
+}
+
+void LocalStore::findRuntimeRoots(PathSet & roots)
+{
+ StringSet paths;
+ auto procDir = AutoCloseDir{opendir("/proc")};
+ if (procDir) {
+ struct dirent * ent;
+ auto digitsRegex = std::regex(R"(^\d+$)");
+ auto mapRegex = std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)");
+ auto storePathRegex = std::regex(quoteRegexChars(storeDir) + R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)");
+ while (errno = 0, ent = readdir(procDir.get())) {
+ checkInterrupt();
+ if (std::regex_match(ent->d_name, digitsRegex)) {
+ readProcLink((format("/proc/%1%/exe") % ent->d_name).str(), paths);
+ readProcLink((format("/proc/%1%/cwd") % ent->d_name).str(), paths);
+
+ auto fdStr = (format("/proc/%1%/fd") % ent->d_name).str();
+ auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
+ if (!fdDir) {
+ if (errno == ENOENT || errno == EACCES)
+ continue;
+ throw SysError(format("opening %1%") % fdStr);
+ }
+ struct dirent * fd_ent;
+ while (errno = 0, fd_ent = readdir(fdDir.get())) {
+ if (fd_ent->d_name[0] != '.') {
+ readProcLink((format("%1%/%2%") % fdStr % fd_ent->d_name).str(), paths);
+ }
+ }
+ if (errno)
+ throw SysError(format("iterating /proc/%1%/fd") % ent->d_name);
+ fdDir.reset();
+
+ auto mapLines =
+ tokenizeString<std::vector<string>>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n");
+ for (const auto& line : mapLines) {
+ auto match = std::smatch{};
+ if (std::regex_match(line, match, mapRegex))
+ paths.emplace(match[1]);
+ }
+
+ try {
+ auto envString = readFile((format("/proc/%1%/environ") % ent->d_name).str(), true);
+ auto env_end = std::sregex_iterator{};
+ for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
+ paths.emplace(i->str());
+ } catch (SysError & e) {
+ if (errno == ENOENT || errno == EACCES)
+ continue;
+ throw;
+ }
+ }
+ }
+ if (errno)
+ throw SysError("iterating /proc");
+ }
+
+#if !defined(__linux__)
+ try {
+ std::regex lsofRegex(R"(^n(/.*)$)");
+ auto lsofLines =
+ tokenizeString<std::vector<string>>(runProgram(LSOF, true, { "-n", "-w", "-F", "n" }), "\n");
+ for (const auto & line : lsofLines) {
+ std::smatch match;
+ if (std::regex_match(line, match, lsofRegex))
+ paths.emplace(match[1]);
+ }
+ } catch (ExecError & e) {
+ /* lsof not installed, lsof failed */
+ }
+#endif
+
+#if defined(__linux__)
+ readFileRoots("/proc/sys/kernel/modprobe", paths);
+ readFileRoots("/proc/sys/kernel/fbsplash", paths);
+ readFileRoots("/proc/sys/kernel/poweroff_cmd", paths);
+#endif
+
+ for (auto & i : paths)
+ if (isInStore(i)) {
+ Path path = toStorePath(i);
+ if (roots.find(path) == roots.end() && isStorePath(path) && isValidPath(path)) {
+ debug(format("got additional root ‘%1%’") % path);
+ roots.insert(path);
+ }
+ }
+}
+
+
+struct GCLimitReached { };
+
+
+struct LocalStore::GCState
+{
+ GCOptions options;
+ GCResults & results;
+ PathSet roots;
+ PathSet tempRoots;
+ PathSet dead;
+ PathSet alive;
+ bool gcKeepOutputs;
+ bool gcKeepDerivations;
+ unsigned long long bytesInvalidated;
+ bool moveToTrash = true;
+ bool shouldDelete;
+ GCState(GCResults & results_) : results(results_), bytesInvalidated(0) { }
+};
+
+
+bool LocalStore::isActiveTempFile(const GCState & state,
+ const Path & path, const string & suffix)
+{
+ return hasSuffix(path, suffix)
+ && state.tempRoots.find(string(path, 0, path.size() - suffix.size())) != state.tempRoots.end();
+}
+
+
+void LocalStore::deleteGarbage(GCState & state, const Path & path)
+{
+ unsigned long long bytesFreed;
+ deletePath(path, bytesFreed);
+ state.results.bytesFreed += bytesFreed;
+}
+
+
+void LocalStore::deletePathRecursive(GCState & state, const Path & path)
+{
+ checkInterrupt();
+
+ unsigned long long size = 0;
+
+ if (isStorePath(path) && isValidPath(path)) {
+ PathSet referrers;
+ queryReferrers(path, referrers);
+ for (auto & i : referrers)
+ if (i != path) deletePathRecursive(state, i);
+ size = queryPathInfo(path)->narSize;
+ invalidatePathChecked(path);
+ }
+
+ Path realPath = realStoreDir + "/" + baseNameOf(path);
+
+ struct stat st;
+ if (lstat(realPath.c_str(), &st)) {
+ if (errno == ENOENT) return;
+ throw SysError(format("getting status of %1%") % realPath);
+ }
+
+ printInfo(format("deleting ‘%1%’") % path);
+
+ state.results.paths.insert(path);
+
+ /* If the path is not a regular file or symlink, move it to the
+ trash directory. The move is to ensure that later (when we're
+ not holding the global GC lock) we can delete the path without
+ being afraid that the path has become alive again. Otherwise
+ delete it right away. */
+ if (state.moveToTrash && S_ISDIR(st.st_mode)) {
+ // Estimate the amount freed using the narSize field. FIXME:
+ // if the path was not valid, need to determine the actual
+ // size.
+ try {
+ if (chmod(realPath.c_str(), st.st_mode | S_IWUSR) == -1)
+ throw SysError(format("making ‘%1%’ writable") % realPath);
+ Path tmp = trashDir + "/" + baseNameOf(path);
+ if (rename(realPath.c_str(), tmp.c_str()))
+ throw SysError(format("unable to rename ‘%1%’ to ‘%2%’") % realPath % tmp);
+ state.bytesInvalidated += size;
+ } catch (SysError & e) {
+ if (e.errNo == ENOSPC) {
+ printInfo(format("note: can't create move ‘%1%’: %2%") % realPath % e.msg());
+ deleteGarbage(state, realPath);
+ }
+ }
+ } else
+ deleteGarbage(state, realPath);
+
+ if (state.results.bytesFreed + state.bytesInvalidated > state.options.maxFreed) {
+ printInfo(format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
+ throw GCLimitReached();
+ }
+}
+
+
+bool LocalStore::canReachRoot(GCState & state, PathSet & visited, const Path & path)
+{
+ if (visited.find(path) != visited.end()) return false;
+
+ if (state.alive.find(path) != state.alive.end()) {
+ return true;
+ }
+
+ if (state.dead.find(path) != state.dead.end()) {
+ return false;
+ }
+
+ if (state.roots.find(path) != state.roots.end()) {
+ debug(format("cannot delete ‘%1%’ because it's a root") % path);
+ state.alive.insert(path);
+ return true;
+ }
+
+ visited.insert(path);
+
+ if (!isStorePath(path) || !isValidPath(path)) return false;
+
+ PathSet incoming;
+
+ /* Don't delete this path if any of its referrers are alive. */
+ queryReferrers(path, incoming);
+
+ /* If gc-keep-derivations is set and this is a derivation, then
+ don't delete the derivation if any of the outputs are alive. */
+ if (state.gcKeepDerivations && isDerivation(path)) {
+ PathSet outputs = queryDerivationOutputs(path);
+ for (auto & i : outputs)
+ if (isValidPath(i) && queryPathInfo(i)->deriver == path)
+ incoming.insert(i);
+ }
+
+ /* If gc-keep-outputs is set, then don't delete this path if there
+ are derivers of this path that are not garbage. */
+ if (state.gcKeepOutputs) {
+ PathSet derivers = queryValidDerivers(path);
+ for (auto & i : derivers)
+ incoming.insert(i);
+ }
+
+ for (auto & i : incoming)
+ if (i != path)
+ if (canReachRoot(state, visited, i)) {
+ state.alive.insert(path);
+ return true;
+ }
+
+ return false;
+}
+
+
+void LocalStore::tryToDelete(GCState & state, const Path & path)
+{
+ checkInterrupt();
+
+ auto realPath = realStoreDir + "/" + baseNameOf(path);
+ if (realPath == linksDir || realPath == trashDir) return;
+
+ Activity act(*logger, lvlDebug, format("considering whether to delete ‘%1%’") % path);
+
+ if (!isStorePath(path) || !isValidPath(path)) {
+ /* A lock file belonging to a path that we're building right
+ now isn't garbage. */
+ if (isActiveTempFile(state, path, ".lock")) return;
+
+ /* Don't delete .chroot directories for derivations that are
+ currently being built. */
+ if (isActiveTempFile(state, path, ".chroot")) return;
+
+ /* Don't delete .check directories for derivations that are
+ currently being built, because we may need to run
+ diff-hook. */
+ if (isActiveTempFile(state, path, ".check")) return;
+ }
+
+ PathSet visited;
+
+ if (canReachRoot(state, visited, path)) {
+ debug(format("cannot delete ‘%1%’ because it's still reachable") % path);
+ } else {
+ /* No path we visited was a root, so everything is garbage.
+ But we only delete ‘path’ and its referrers here so that
+ ‘nix-store --delete’ doesn't have the unexpected effect of
+ recursing into derivations and outputs. */
+ state.dead.insert(visited.begin(), visited.end());
+ if (state.shouldDelete)
+ deletePathRecursive(state, path);
+ }
+}
+
+
+/* Unlink all files in /nix/store/.links that have a link count of 1,
+ which indicates that there are no other links and so they can be
+ safely deleted. FIXME: race condition with optimisePath(): we
+ might see a link count of 1 just before optimisePath() increases
+ the link count. */
+void LocalStore::removeUnusedLinks(const GCState & state)
+{
+ AutoCloseDir dir(opendir(linksDir.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % linksDir);
+
+ long long actualSize = 0, unsharedSize = 0;
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) {
+ checkInterrupt();
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ Path path = linksDir + "/" + name;
+
+ struct stat st;
+ if (lstat(path.c_str(), &st) == -1)
+ throw SysError(format("statting ‘%1%’") % path);
+
+ if (st.st_nlink != 1) {
+ unsigned long long size = st.st_blocks * 512ULL;
+ actualSize += size;
+ unsharedSize += (st.st_nlink - 1) * size;
+ continue;
+ }
+
+ printMsg(lvlTalkative, format("deleting unused link ‘%1%’") % path);
+
+ if (unlink(path.c_str()) == -1)
+ throw SysError(format("deleting ‘%1%’") % path);
+
+ state.results.bytesFreed += st.st_blocks * 512ULL;
+ }
+
+ struct stat st;
+ if (stat(linksDir.c_str(), &st) == -1)
+ throw SysError(format("statting ‘%1%’") % linksDir);
+ long long overhead = st.st_blocks * 512ULL;
+
+ printInfo(format("note: currently hard linking saves %.2f MiB")
+ % ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
+}
+
+
+void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
+{
+ GCState state(results);
+ state.options = options;
+ state.gcKeepOutputs = settings.gcKeepOutputs;
+ state.gcKeepDerivations = settings.gcKeepDerivations;
+
+ /* Using `--ignore-liveness' with `--delete' can have unintended
+ consequences if `gc-keep-outputs' or `gc-keep-derivations' are
+ true (the garbage collector will recurse into deleting the
+ outputs or derivers, respectively). So disable them. */
+ if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
+ state.gcKeepOutputs = false;
+ state.gcKeepDerivations = false;
+ }
+
+ state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
+
+ if (state.shouldDelete)
+ deletePath(reservedPath);
+
+ /* Acquire the global GC root. This prevents
+ a) New roots from being added.
+ b) Processes from creating new temporary root files. */
+ AutoCloseFD fdGCLock = openGCLock(ltWrite);
+
+ /* Find the roots. Since we've grabbed the GC lock, the set of
+ permanent roots cannot increase now. */
+ printError(format("finding garbage collector roots..."));
+ Roots rootMap = options.ignoreLiveness ? Roots() : findRoots();
+
+ for (auto & i : rootMap) state.roots.insert(i.second);
+
+ /* Add additional roots returned by the program specified by the
+ NIX_ROOT_FINDER environment variable. This is typically used
+ to add running programs to the set of roots (to prevent them
+ from being garbage collected). */
+ if (!options.ignoreLiveness)
+ findRuntimeRoots(state.roots);
+
+ /* Read the temporary roots. This acquires read locks on all
+ per-process temporary root files. So after this point no paths
+ can be added to the set of temporary roots. */
+ FDs fds;
+ readTempRoots(state.tempRoots, fds);
+ state.roots.insert(state.tempRoots.begin(), state.tempRoots.end());
+
+ /* After this point the set of roots or temporary roots cannot
+ increase, since we hold locks on everything. So everything
+ that is not reachable from `roots' is garbage. */
+
+ if (state.shouldDelete) {
+ if (pathExists(trashDir)) deleteGarbage(state, trashDir);
+ try {
+ createDirs(trashDir);
+ } catch (SysError & e) {
+ if (e.errNo == ENOSPC) {
+ printInfo(format("note: can't create trash directory: %1%") % e.msg());
+ state.moveToTrash = false;
+ }
+ }
+ }
+
+ /* Now either delete all garbage paths, or just the specified
+ paths (for gcDeleteSpecific). */
+
+ if (options.action == GCOptions::gcDeleteSpecific) {
+
+ for (auto & i : options.pathsToDelete) {
+ assertStorePath(i);
+ tryToDelete(state, i);
+ if (state.dead.find(i) == state.dead.end())
+ throw Error(format("cannot delete path ‘%1%’ since it is still alive") % i);
+ }
+
+ } else if (options.maxFreed > 0) {
+
+ if (state.shouldDelete)
+ printError(format("deleting garbage..."));
+ else
+ printError(format("determining live/dead paths..."));
+
+ try {
+
+ AutoCloseDir dir(opendir(realStoreDir.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % realStoreDir);
+
+ /* Read the store and immediately delete all paths that
+ aren't valid. When using --max-freed etc., deleting
+ invalid paths is preferred over deleting unreachable
+ paths, since unreachable paths could become reachable
+ again. We don't use readDirectory() here so that GCing
+ can start faster. */
+ Paths entries;
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) {
+ checkInterrupt();
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ Path path = storeDir + "/" + name;
+ if (isStorePath(path) && isValidPath(path))
+ entries.push_back(path);
+ else
+ tryToDelete(state, path);
+ }
+
+ dir.reset();
+
+ /* Now delete the unreachable valid paths. Randomise the
+ order in which we delete entries to make the collector
+ less biased towards deleting paths that come
+ alphabetically first (e.g. /nix/store/000...). This
+ matters when using --max-freed etc. */
+ vector<Path> entries_(entries.begin(), entries.end());
+ random_shuffle(entries_.begin(), entries_.end());
+
+ for (auto & i : entries_)
+ tryToDelete(state, i);
+
+ } catch (GCLimitReached & e) {
+ }
+ }
+
+ if (state.options.action == GCOptions::gcReturnLive) {
+ state.results.paths = state.alive;
+ return;
+ }
+
+ if (state.options.action == GCOptions::gcReturnDead) {
+ state.results.paths = state.dead;
+ return;
+ }
+
+ /* Allow other processes to add to the store from here on. */
+ fdGCLock = -1;
+ fds.clear();
+
+ /* Delete the trash directory. */
+ printInfo(format("deleting ‘%1%’") % trashDir);
+ deleteGarbage(state, trashDir);
+
+ /* Clean up the links directory. */
+ if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
+ printError(format("deleting unused links..."));
+ removeUnusedLinks(state);
+ }
+
+ /* While we're at it, vacuum the database. */
+ //if (options.action == GCOptions::gcDeleteDead) vacuumDB();
+}
+
+
+}
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
new file mode 100644
index 000000000..953bf6aaa
--- /dev/null
+++ b/src/libstore/globals.cc
@@ -0,0 +1,104 @@
+#include "globals.hh"
+#include "util.hh"
+#include "archive.hh"
+#include "args.hh"
+
+#include <algorithm>
+#include <map>
+#include <thread>
+
+
+namespace nix {
+
+
+/* The default location of the daemon socket, relative to nixStateDir.
+ The socket is in a directory to allow you to control access to the
+ Nix daemon by setting the mode/ownership of the directory
+ appropriately. (This wouldn't work on the socket itself since it
+ must be deleted and recreated on startup.) */
+#define DEFAULT_SOCKET_PATH "/daemon-socket/socket"
+
+/* chroot-like behavior from Apple's sandbox */
+#if __APPLE__
+ #define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh"
+#else
+ #define DEFAULT_ALLOWED_IMPURE_PREFIXES ""
+#endif
+
+Settings settings;
+
+Settings::Settings()
+ : Config({})
+ , nixPrefix(NIX_PREFIX)
+ , nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))))
+ , nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)))
+ , nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)))
+ , nixStateDir(canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)))
+ , nixConfDir(canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)))
+ , nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)))
+ , nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)))
+ , nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH))
+{
+ buildUsersGroup = getuid() == 0 ? "nixbld" : "";
+ lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
+ caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt"));
+
+#if __linux__
+ sandboxPaths = tokenizeString<StringSet>("/bin/sh=" BASH_PATH);
+#endif
+
+ allowedImpureHostPrefixes = tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES);
+}
+
+void Settings::loadConfFile()
+{
+ applyConfigFile(nixConfDir + "/nix.conf");
+
+ /* We only want to send overrides to the daemon, i.e. stuff from
+ ~/.nix/nix.conf or the command line. */
+ resetOverriden();
+
+ applyConfigFile(getConfigDir() + "/nix/nix.conf");
+}
+
+void Settings::set(const string & name, const string & value)
+{
+ Config::set(name, value);
+}
+
+unsigned int Settings::getDefaultCores()
+{
+ return std::max(1U, std::thread::hardware_concurrency());
+}
+
+const string nixVersion = PACKAGE_VERSION;
+
+template<> void BaseSetting<SandboxMode>::set(const std::string & str)
+{
+ if (str == "true") value = smEnabled;
+ else if (str == "relaxed") value = smRelaxed;
+ else if (str == "false") value = smDisabled;
+ else throw UsageError("option '%s' has invalid value '%s'", name, str);
+}
+
+template<> std::string BaseSetting<SandboxMode>::to_string()
+{
+ if (value == smEnabled) return "true";
+ else if (value == smRelaxed) return "relaxed";
+ else if (value == smDisabled) return "false";
+ else abort();
+}
+
+template<> void BaseSetting<SandboxMode>::toJSON(JSONPlaceholder & out)
+{
+ AbstractSetting::toJSON(out);
+}
+
+void MaxBuildJobsSetting::set(const std::string & str)
+{
+ if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency());
+ else if (!string2Int(str, value))
+ throw UsageError("configuration setting ‘%s’ should be ‘auto’ or an integer", name);
+}
+
+}
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
new file mode 100644
index 000000000..b4f44de2e
--- /dev/null
+++ b/src/libstore/globals.hh
@@ -0,0 +1,318 @@
+#pragma once
+
+#include "types.hh"
+#include "config.hh"
+
+#include <map>
+#include <sys/types.h>
+
+
+namespace nix {
+
+typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode;
+
+extern bool useCaseHack; // FIXME
+
+struct CaseHackSetting : public BaseSetting<bool>
+{
+ CaseHackSetting(Config * options,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : BaseSetting<bool>(useCaseHack, name, description, aliases)
+ {
+ options->addSetting(this);
+ }
+
+ void set(const std::string & str) override
+ {
+ BaseSetting<bool>::set(str);
+ nix::useCaseHack = true;
+ }
+};
+
+struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
+{
+ MaxBuildJobsSetting(Config * options,
+ unsigned int def,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : BaseSetting<unsigned int>(def, name, description, aliases)
+ {
+ options->addSetting(this);
+ }
+
+ void set(const std::string & str) override;
+};
+
+class Settings : public Config {
+
+ unsigned int getDefaultCores();
+
+public:
+
+ Settings();
+
+ void loadConfFile();
+
+ void set(const string & name, const string & value);
+
+ Path nixPrefix;
+
+ /* The directory where we store sources and derived files. */
+ Path nixStore;
+
+ Path nixDataDir; /* !!! fix */
+
+ /* The directory where we log various operations. */
+ Path nixLogDir;
+
+ /* The directory where state is stored. */
+ Path nixStateDir;
+
+ /* The directory where configuration files are stored. */
+ Path nixConfDir;
+
+ /* The directory where internal helper programs are stored. */
+ Path nixLibexecDir;
+
+ /* The directory where the main programs are stored. */
+ Path nixBinDir;
+
+ /* File name of the socket the daemon listens to. */
+ Path nixDaemonSocketFile;
+
+ Setting<bool> keepFailed{this, false, "keep-failed",
+ "Whether to keep temporary directories of failed builds."};
+
+ Setting<bool> keepGoing{this, false, "keep-going",
+ "Whether to keep building derivations when another build fails."};
+
+ Setting<bool> tryFallback{this, false, "build-fallback",
+ "Whether to fall back to building when substitution fails."};
+
+ /* Whether to show build log output in real time. */
+ bool verboseBuild = true;
+
+ /* If verboseBuild is false, the number of lines of the tail of
+ the log to show if a build fails. */
+ size_t logLines = 10;
+
+ MaxBuildJobsSetting maxBuildJobs{this, 1, "build-max-jobs",
+ "Maximum number of parallel build jobs. \"auto\" means use number of cores."};
+
+ Setting<unsigned int> buildCores{this, getDefaultCores(), "build-cores",
+ "Number of CPU cores to utilize in parallel within a build, "
+ "i.e. by passing this number to Make via '-j'. 0 means that the "
+ "number of actual CPU cores on the local host ought to be "
+ "auto-detected."};
+
+ /* Read-only mode. Don't copy stuff to the store, don't change
+ the database. */
+ bool readOnlyMode = false;
+
+ Setting<std::string> thisSystem{this, SYSTEM, "system",
+ "The canonical Nix system name."};
+
+ Setting<time_t> maxSilentTime{this, 0, "build-max-silent-time",
+ "The maximum time in seconds that a builer can go without "
+ "producing any output on stdout/stderr before it is killed. "
+ "0 means infinity."};
+
+ Setting<time_t> buildTimeout{this, 0, "build-timeout",
+ "The maximum duration in seconds that a builder can run. "
+ "0 means infinity."};
+
+ Setting<bool> useBuildHook{this, true, "remote-builds",
+ "Whether to use build hooks (for distributed builds)."};
+
+ Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space",
+ "Amount of reserved disk space for the garbage collector."};
+
+ Setting<bool> fsyncMetadata{this, true, "fsync-metadata",
+ "Whether SQLite should use fsync()."};
+
+ Setting<bool> useSQLiteWAL{this, true, "use-sqlite-wal",
+ "Whether SQLite should use WAL mode."};
+
+ Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
+ "Whether to call sync() before registering a path as valid."};
+
+ Setting<bool> useSubstitutes{this, true, "build-use-substitutes",
+ "Whether to use substitutes."};
+
+ Setting<std::string> buildUsersGroup{this, "", "build-users-group",
+ "The Unix group that contains the build users."};
+
+ Setting<bool> impersonateLinux26{this, false, "build-impersonate-linux-26",
+ "Whether to impersonate a Linux 2.6 machine on newer kernels."};
+
+ Setting<bool> keepLog{this, true, "build-keep-log",
+ "Whether to store build logs."};
+
+ Setting<bool> compressLog{this, true, "build-compress-log",
+ "Whether to compress logs."};
+
+ Setting<unsigned long> maxLogSize{this, 0, "build-max-log-size",
+ "Maximum number of bytes a builder can write to stdout/stderr "
+ "before being killed (0 means no limit)."};
+
+ /* When build-repeat > 0 and verboseBuild == true, whether to
+ print repeated builds (i.e. builds other than the first one) to
+ stderr. Hack to prevent Hydra logs from being polluted. */
+ bool printRepeatedBuilds = true;
+
+ Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
+ "How often (in seconds) to poll for locks."};
+
+ Setting<bool> checkRootReachability{this, false, "gc-check-reachability",
+ "Whether to check if new GC roots can in fact be found by the "
+ "garbage collector."};
+
+ Setting<bool> gcKeepOutputs{this, false, "gc-keep-outputs",
+ "Whether the garbage collector should keep outputs of live derivations."};
+
+ Setting<bool> gcKeepDerivations{this, true, "gc-keep-derivations",
+ "Whether the garbage collector should keep derivers of live paths."};
+
+ Setting<bool> autoOptimiseStore{this, false, "auto-optimise-store",
+ "Whether to automatically replace files with identical contents with hard links."};
+
+ Setting<bool> envKeepDerivations{this, false, "env-keep-derivations",
+ "Whether to add derivations as a dependency of user environments "
+ "(to prevent them from being GCed)."};
+
+ /* Whether to lock the Nix client and worker to the same CPU. */
+ bool lockCPU;
+
+ /* Whether to show a stack trace if Nix evaluation fails. */
+ bool showTrace = false;
+
+ Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
+ "Whether builtin functions that allow executing native code should be enabled."};
+
+ Setting<SandboxMode> sandboxMode{this, smDisabled, "build-use-sandbox",
+ "Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
+ {"build-use-chroot"}};
+
+ Setting<PathSet> sandboxPaths{this, {}, "build-sandbox-paths",
+ "The paths to make available inside the build sandbox.",
+ {"build-chroot-dirs"}};
+
+ Setting<PathSet> extraSandboxPaths{this, {}, "build-extra-sandbox-paths",
+ "Additional paths to make available inside the build sandbox.",
+ {"build-extra-chroot-dirs"}};
+
+ Setting<bool> restrictEval{this, false, "restrict-eval",
+ "Whether to restrict file system access to paths in $NIX_PATH, "
+ "and to disallow fetching files from the network."};
+
+ Setting<size_t> buildRepeat{this, 0, "build-repeat",
+ "The number of times to repeat a build in order to verify determinism."};
+
+#if __linux__
+ Setting<std::string> sandboxShmSize{this, "50%", "sandbox-dev-shm-size",
+ "The size of /dev/shm in the build sandbox."};
+#endif
+
+ Setting<PathSet> allowedImpureHostPrefixes{this, {}, "allowed-impure-host-deps",
+ "Which prefixes to allow derivations to ask for access to (primarily for Darwin)."};
+
+#if __APPLE__
+ Setting<bool> darwinLogSandboxViolations{this, false, "darwin-log-sandbox-violations",
+ "Whether to log Darwin sandbox access violations to the system log."};
+#endif
+
+ Setting<bool> runDiffHook{this, false, "run-diff-hook",
+ "Whether to run the program specified by the diff-hook setting "
+ "repeated builds produce a different result. Typically used to "
+ "plug in diffoscope."};
+
+ PathSetting diffHook{this, true, "", "diff-hook",
+ "A program that prints out the differences between the two paths "
+ "specified on its command line."};
+
+ Setting<bool> enforceDeterminism{this, true, "enforce-determinism",
+ "Whether to fail if repeated builds produce different output."};
+
+ Setting<Strings> binaryCachePublicKeys{this,
+ {"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
+ "binary-cache-public-keys",
+ "Trusted public keys for secure substitution."};
+
+ Setting<Strings> secretKeyFiles{this, {}, "secret-key-files",
+ "Secret keys with which to sign local builds."};
+
+ Setting<size_t> binaryCachesParallelConnections{this, 25, "http-connections",
+ "Number of parallel HTTP connections.",
+ {"binary-caches-parallel-connections"}};
+
+ Setting<bool> enableHttp2{this, true, "enable-http2",
+ "Whether to enable HTTP/2 support."};
+
+ Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl",
+ "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."};
+
+ Setting<std::string> signedBinaryCaches{this, "*", "signed-binary-caches",
+ "Obsolete."};
+
+ Setting<Strings> substituters{this,
+ nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
+ "substituters",
+ "The URIs of substituters (such as https://cache.nixos.org/).",
+ {"binary-caches"}};
+
+ // FIXME: provide a way to add to option values.
+ Setting<Strings> extraSubstituters{this, {}, "extra-substituters",
+ "Additional URIs of substituters.",
+ {"extra-binary-caches"}};
+
+ Setting<StringSet> trustedSubstituters{this, {}, "trusted-substituters",
+ "Disabled substituters that may be enabled via the substituters option by untrusted users.",
+ {"trusted-binary-caches"}};
+
+ Setting<Strings> trustedUsers{this, {"root"}, "trusted-users",
+ "Which users or groups are trusted to ask the daemon to do unsafe things."};
+
+ /* ?Who we trust to use the daemon in safe ways */
+ Setting<Strings> allowedUsers{this, {"*"}, "allowed-users",
+ "Which users or groups are allowed to connect to the daemon."};
+
+ Setting<bool> printMissing{this, true, "print-missing",
+ "Whether to print what paths need to be built or downloaded."};
+
+ Setting<std::string> preBuildHook{this,
+#if __APPLE__
+ nixLibexecDir + "/nix/resolve-system-dependencies",
+#else
+ "",
+#endif
+ "pre-build-hook",
+ "A program to run just before a build to set derivation-specific build settings."};
+
+ Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
+ "Path to the netrc file used to obtain usernames/passwords for downloads."};
+
+ /* Path to the SSL CA file used */
+ Path caFile;
+
+ Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation",
+ "Whether the evaluator allows importing the result of a derivation."};
+
+ CaseHackSetting useCaseHack{this, "use-case-hack",
+ "Whether to enable a Darwin-specific hack for dealing with file name collisions."};
+
+ Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
+ "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
+};
+
+
+// FIXME: don't use a global variable.
+extern Settings settings;
+
+
+extern const string nixVersion;
+
+
+}
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
new file mode 100644
index 000000000..37a7d6ace
--- /dev/null
+++ b/src/libstore/http-binary-cache-store.cc
@@ -0,0 +1,115 @@
+#include "binary-cache-store.hh"
+#include "download.hh"
+#include "globals.hh"
+#include "nar-info-disk-cache.hh"
+
+namespace nix {
+
+MakeError(UploadToHTTP, Error);
+
+class HttpBinaryCacheStore : public BinaryCacheStore
+{
+private:
+
+ Path cacheUri;
+
+public:
+
+ HttpBinaryCacheStore(
+ const Params & params, const Path & _cacheUri)
+ : BinaryCacheStore(params)
+ , cacheUri(_cacheUri)
+ {
+ if (cacheUri.back() == '/')
+ cacheUri.pop_back();
+
+ diskCache = getNarInfoDiskCache();
+ }
+
+ std::string getUri() override
+ {
+ return cacheUri;
+ }
+
+ void init() override
+ {
+ // FIXME: do this lazily?
+ if (!diskCache->cacheExists(cacheUri, wantMassQuery_, priority)) {
+ try {
+ BinaryCacheStore::init();
+ } catch (UploadToHTTP &) {
+ throw Error(format("‘%s’ does not appear to be a binary cache") % cacheUri);
+ }
+ diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority);
+ }
+ }
+
+protected:
+
+ bool fileExists(const std::string & path) override
+ {
+ try {
+ DownloadRequest request(cacheUri + "/" + path);
+ request.showProgress = DownloadRequest::no;
+ request.head = true;
+ request.tries = 5;
+ getDownloader()->download(request);
+ return true;
+ } catch (DownloadError & e) {
+ /* S3 buckets return 403 if a file doesn't exist and the
+ bucket is unlistable, so treat 403 as 404. */
+ if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
+ return false;
+ throw;
+ }
+ }
+
+ void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) override
+ {
+ throw UploadToHTTP("uploading to an HTTP binary cache is not supported");
+ }
+
+ void getFile(const std::string & path,
+ std::function<void(std::shared_ptr<std::string>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ DownloadRequest request(cacheUri + "/" + path);
+ request.showProgress = DownloadRequest::no;
+ request.tries = 8;
+
+ getDownloader()->enqueueDownload(request,
+ [success](const DownloadResult & result) {
+ success(result.data);
+ },
+ [success, failure](std::exception_ptr exc) {
+ try {
+ std::rethrow_exception(exc);
+ } catch (DownloadError & e) {
+ if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
+ return success(0);
+ failure(exc);
+ } catch (...) {
+ failure(exc);
+ }
+ });
+ }
+
+};
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, 7) != "http://" &&
+ std::string(uri, 0, 8) != "https://" &&
+ (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" || std::string(uri, 0, 7) != "file://")
+ ) return 0;
+ auto store = std::make_shared<HttpBinaryCacheStore>(params, uri);
+ store->init();
+ return store;
+});
+
+}
+
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
new file mode 100644
index 000000000..befc560bf
--- /dev/null
+++ b/src/libstore/legacy-ssh-store.cc
@@ -0,0 +1,256 @@
+#include "archive.hh"
+#include "pool.hh"
+#include "remote-store.hh"
+#include "serve-protocol.hh"
+#include "store-api.hh"
+#include "worker-protocol.hh"
+#include "ssh.hh"
+
+namespace nix {
+
+static std::string uriScheme = "ssh://";
+
+struct LegacySSHStore : public Store
+{
+ const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"};
+ const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
+ const Setting<bool> compress{this, false, "compress", "whether to compress the connection"};
+
+ struct Connection
+ {
+ std::unique_ptr<SSHMaster::Connection> sshConn;
+ FdSink to;
+ FdSource from;
+ };
+
+ std::string host;
+
+ ref<Pool<Connection>> connections;
+
+ SSHMaster master;
+
+ LegacySSHStore(const string & host, const Params & params)
+ : Store(params)
+ , host(host)
+ , connections(make_ref<Pool<Connection>>(
+ std::max(1, (int) maxConnections),
+ [this]() { return openConnection(); },
+ [](const ref<Connection> & r) { return true; }
+ ))
+ , master(
+ host,
+ sshKey,
+ // Use SSH master only if using more than 1 connection.
+ connections->capacity() > 1,
+ compress)
+ {
+ }
+
+ ref<Connection> openConnection()
+ {
+ auto conn = make_ref<Connection>();
+ conn->sshConn = master.startCommand("nix-store --serve --write");
+ conn->to = FdSink(conn->sshConn->in.get());
+ conn->from = FdSource(conn->sshConn->out.get());
+
+ int remoteVersion;
+
+ try {
+ conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
+ conn->to.flush();
+
+ unsigned int magic = readInt(conn->from);
+ if (magic != SERVE_MAGIC_2)
+ throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%s’", host);
+ remoteVersion = readInt(conn->from);
+ if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
+ throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%s’", host);
+
+ } catch (EndOfFile & e) {
+ throw Error("cannot connect to ‘%1%’", host);
+ }
+
+ return conn;
+ };
+
+ string getUri() override
+ {
+ return uriScheme + host;
+ }
+
+ void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() -> std::shared_ptr<ValidPathInfo> {
+ auto conn(connections->get());
+
+ debug("querying remote host ‘%s’ for info on ‘%s’", host, path);
+
+ conn->to << cmdQueryPathInfos << PathSet{path};
+ conn->to.flush();
+
+ auto info = std::make_shared<ValidPathInfo>();
+ conn->from >> info->path;
+ if (info->path.empty()) return nullptr;
+ assert(path == info->path);
+
+ PathSet references;
+ conn->from >> info->deriver;
+ info->references = readStorePaths<PathSet>(*this, conn->from);
+ readLongLong(conn->from); // download size
+ info->narSize = readLongLong(conn->from);
+
+ auto s = readString(conn->from);
+ assert(s == "");
+
+ return info;
+ });
+ }
+
+ void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs,
+ std::shared_ptr<FSAccessor> accessor) override
+ {
+ debug("adding path ‘%s’ to remote host ‘%s’", info.path, host);
+
+ auto conn(connections->get());
+
+ conn->to
+ << cmdImportPaths
+ << 1;
+ conn->to(*nar);
+ conn->to
+ << exportMagic
+ << info.path
+ << info.references
+ << info.deriver
+ << 0
+ << 0;
+ conn->to.flush();
+
+ if (readInt(conn->from) != 1)
+ throw Error("failed to add path ‘%s’ to remote host ‘%s’, info.path, host");
+
+ }
+
+ void narFromPath(const Path & path, Sink & sink) override
+ {
+ auto conn(connections->get());
+
+ conn->to << cmdDumpStorePath << path;
+ conn->to.flush();
+
+ /* FIXME: inefficient. */
+ ParseSink parseSink; /* null sink; just parse the NAR */
+ TeeSource savedNAR(conn->from);
+ parseDump(parseSink, savedNAR);
+ sink(*savedNAR.data);
+ }
+
+ /* Unsupported methods. */
+ [[noreturn]] void unsupported()
+ {
+ throw Error("operation not supported on SSH stores");
+ }
+
+ PathSet queryAllValidPaths() override { unsupported(); }
+
+ void queryReferrers(const Path & path, PathSet & referrers) override
+ { unsupported(); }
+
+ PathSet queryDerivationOutputs(const Path & path) override
+ { unsupported(); }
+
+ StringSet queryDerivationOutputNames(const Path & path) override
+ { unsupported(); }
+
+ Path queryPathFromHashPart(const string & hashPart) override
+ { unsupported(); }
+
+ Path addToStore(const string & name, const Path & srcPath,
+ bool recursive, HashType hashAlgo,
+ PathFilter & filter, bool repair) override
+ { unsupported(); }
+
+ Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair) override
+ { unsupported(); }
+
+ void buildPaths(const PathSet & paths, BuildMode buildMode) override
+ { unsupported(); }
+
+ BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode) override
+ { unsupported(); }
+
+ void ensurePath(const Path & path) override
+ { unsupported(); }
+
+ void addTempRoot(const Path & path) override
+ { unsupported(); }
+
+ void addIndirectRoot(const Path & path) override
+ { unsupported(); }
+
+ Roots findRoots() override
+ { unsupported(); }
+
+ void collectGarbage(const GCOptions & options, GCResults & results) override
+ { unsupported(); }
+
+ ref<FSAccessor> getFSAccessor() override
+ { unsupported(); }
+
+ void addSignatures(const Path & storePath, const StringSet & sigs) override
+ { unsupported(); }
+
+ bool isTrusted() override
+ { return true; }
+
+ void computeFSClosure(const PathSet & paths,
+ PathSet & out, bool flipDirection = false,
+ bool includeOutputs = false, bool includeDerivers = false) override
+ {
+ if (flipDirection || includeDerivers) {
+ Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
+ return;
+ }
+
+ auto conn(connections->get());
+
+ conn->to
+ << cmdQueryClosure
+ << includeOutputs
+ << paths;
+ conn->to.flush();
+
+ auto res = readStorePaths<PathSet>(*this, conn->from);
+
+ out.insert(res.begin(), res.end());
+ }
+
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override
+ {
+ auto conn(connections->get());
+
+ conn->to
+ << cmdQueryValidPaths
+ << false // lock
+ << maybeSubstitute
+ << paths;
+ conn->to.flush();
+
+ return readStorePaths<PathSet>(*this, conn->from);
+ }
+};
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
+ return std::make_shared<LegacySSHStore>(std::string(uri, uriScheme.size()), params);
+});
+
+}
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
new file mode 100644
index 000000000..aff22f9fc
--- /dev/null
+++ b/src/libstore/local-binary-cache-store.cc
@@ -0,0 +1,107 @@
+#include "binary-cache-store.hh"
+#include "globals.hh"
+#include "nar-info-disk-cache.hh"
+
+namespace nix {
+
+class LocalBinaryCacheStore : public BinaryCacheStore
+{
+private:
+
+ Path binaryCacheDir;
+
+public:
+
+ LocalBinaryCacheStore(
+ const Params & params, const Path & binaryCacheDir)
+ : BinaryCacheStore(params)
+ , binaryCacheDir(binaryCacheDir)
+ {
+ }
+
+ void init() override;
+
+ std::string getUri() override
+ {
+ return "file://" + binaryCacheDir;
+ }
+
+protected:
+
+ bool fileExists(const std::string & path) override;
+
+ void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) override;
+
+ void getFile(const std::string & path,
+ std::function<void(std::shared_ptr<std::string>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
+ try {
+ return std::make_shared<std::string>(readFile(binaryCacheDir + "/" + path));
+ } catch (SysError & e) {
+ if (e.errNo == ENOENT) return std::shared_ptr<std::string>();
+ throw;
+ }
+ });
+ }
+
+ PathSet queryAllValidPaths() override
+ {
+ PathSet paths;
+
+ for (auto & entry : readDirectory(binaryCacheDir)) {
+ if (entry.name.size() != 40 ||
+ !hasSuffix(entry.name, ".narinfo"))
+ continue;
+ paths.insert(storeDir + "/" + entry.name.substr(0, entry.name.size() - 8));
+ }
+
+ return paths;
+ }
+
+};
+
+void LocalBinaryCacheStore::init()
+{
+ createDirs(binaryCacheDir + "/nar");
+ BinaryCacheStore::init();
+}
+
+static void atomicWrite(const Path & path, const std::string & s)
+{
+ Path tmp = path + ".tmp." + std::to_string(getpid());
+ AutoDelete del(tmp, false);
+ writeFile(tmp, s);
+ if (rename(tmp.c_str(), path.c_str()))
+ throw SysError(format("renaming ‘%1%’ to ‘%2%’") % tmp % path);
+ del.cancel();
+}
+
+bool LocalBinaryCacheStore::fileExists(const std::string & path)
+{
+ return pathExists(binaryCacheDir + "/" + path);
+}
+
+void LocalBinaryCacheStore::upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType)
+{
+ atomicWrite(binaryCacheDir + "/" + path, data);
+}
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1" ||
+ std::string(uri, 0, 7) != "file://")
+ return 0;
+ auto store = std::make_shared<LocalBinaryCacheStore>(params, std::string(uri, 7));
+ store->init();
+ return store;
+});
+
+}
diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc
new file mode 100644
index 000000000..bf247903c
--- /dev/null
+++ b/src/libstore/local-fs-store.cc
@@ -0,0 +1,130 @@
+#include "archive.hh"
+#include "fs-accessor.hh"
+#include "store-api.hh"
+#include "globals.hh"
+#include "compression.hh"
+#include "derivations.hh"
+
+namespace nix {
+
+LocalFSStore::LocalFSStore(const Params & params)
+ : Store(params)
+{
+}
+
+struct LocalStoreAccessor : public FSAccessor
+{
+ ref<LocalFSStore> store;
+
+ LocalStoreAccessor(ref<LocalFSStore> store) : store(store) { }
+
+ Path toRealPath(const Path & path)
+ {
+ Path storePath = store->toStorePath(path);
+ if (!store->isValidPath(storePath))
+ throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
+ return store->getRealStoreDir() + std::string(path, store->storeDir.size());
+ }
+
+ FSAccessor::Stat stat(const Path & path) override
+ {
+ auto realPath = toRealPath(path);
+
+ struct stat st;
+ if (lstat(path.c_str(), &st)) {
+ if (errno == ENOENT || errno == ENOTDIR) return {Type::tMissing, 0, false};
+ throw SysError(format("getting status of ‘%1%’") % path);
+ }
+
+ if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode))
+ throw Error(format("file ‘%1%’ has unsupported type") % path);
+
+ return {
+ S_ISREG(st.st_mode) ? Type::tRegular :
+ S_ISLNK(st.st_mode) ? Type::tSymlink :
+ Type::tDirectory,
+ S_ISREG(st.st_mode) ? (uint64_t) st.st_size : 0,
+ S_ISREG(st.st_mode) && st.st_mode & S_IXUSR};
+ }
+
+ StringSet readDirectory(const Path & path) override
+ {
+ auto realPath = toRealPath(path);
+
+ auto entries = nix::readDirectory(path);
+
+ StringSet res;
+ for (auto & entry : entries)
+ res.insert(entry.name);
+
+ return res;
+ }
+
+ std::string readFile(const Path & path) override
+ {
+ return nix::readFile(toRealPath(path));
+ }
+
+ std::string readLink(const Path & path) override
+ {
+ return nix::readLink(toRealPath(path));
+ }
+};
+
+ref<FSAccessor> LocalFSStore::getFSAccessor()
+{
+ return make_ref<LocalStoreAccessor>(ref<LocalFSStore>(std::dynamic_pointer_cast<LocalFSStore>(shared_from_this())));
+}
+
+void LocalFSStore::narFromPath(const Path & path, Sink & sink)
+{
+ if (!isValidPath(path))
+ throw Error(format("path ‘%s’ is not valid") % path);
+ dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
+}
+
+const string LocalFSStore::drvsLogDir = "drvs";
+
+
+
+std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path & path_)
+{
+ auto path(path_);
+
+ assertStorePath(path);
+
+
+ if (!isDerivation(path)) {
+ try {
+ path = queryPathInfo(path)->deriver;
+ } catch (InvalidPath &) {
+ return nullptr;
+ }
+ if (path == "") return nullptr;
+ }
+
+ string baseName = baseNameOf(path);
+
+ for (int j = 0; j < 2; j++) {
+
+ Path logPath =
+ j == 0
+ ? fmt("%s/%s/%s/%s", logDir, drvsLogDir, string(baseName, 0, 2), string(baseName, 2))
+ : fmt("%s/%s/%s", logDir, drvsLogDir, baseName);
+ Path logBz2Path = logPath + ".bz2";
+
+ if (pathExists(logPath))
+ return std::make_shared<std::string>(readFile(logPath));
+
+ else if (pathExists(logBz2Path)) {
+ try {
+ return decompress("bzip2", readFile(logBz2Path));
+ } catch (Error &) { }
+ }
+
+ }
+
+ return nullptr;
+}
+
+}
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
new file mode 100644
index 000000000..5a98454ab
--- /dev/null
+++ b/src/libstore/local-store.cc
@@ -0,0 +1,1345 @@
+#include "local-store.hh"
+#include "globals.hh"
+#include "archive.hh"
+#include "pathlocks.hh"
+#include "worker-protocol.hh"
+#include "derivations.hh"
+#include "nar-info.hh"
+
+#include <iostream>
+#include <algorithm>
+#include <cstring>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/select.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <utime.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+#include <grp.h>
+
+#if __linux__
+#include <sched.h>
+#include <sys/statvfs.h>
+#include <sys/mount.h>
+#include <sys/ioctl.h>
+#endif
+
+#include <sqlite3.h>
+
+
+namespace nix {
+
+
+LocalStore::LocalStore(const Params & params)
+ : Store(params)
+ , LocalFSStore(params)
+ , realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
+ "physical path to the Nix store"}
+ , realStoreDir(realStoreDir_)
+ , dbDir(stateDir + "/db")
+ , linksDir(realStoreDir + "/.links")
+ , reservedPath(dbDir + "/reserved")
+ , schemaPath(dbDir + "/schema")
+ , trashDir(realStoreDir + "/trash")
+ , publicKeys(getDefaultPublicKeys())
+{
+ auto state(_state.lock());
+
+ /* Create missing state directories if they don't already exist. */
+ createDirs(realStoreDir);
+ makeStoreWritable();
+ createDirs(linksDir);
+ Path profilesDir = stateDir + "/profiles";
+ createDirs(profilesDir);
+ createDirs(stateDir + "/temproots");
+ createDirs(dbDir);
+ Path gcRootsDir = stateDir + "/gcroots";
+ if (!pathExists(gcRootsDir)) {
+ createDirs(gcRootsDir);
+ createSymlink(profilesDir, gcRootsDir + "/profiles");
+ }
+
+ /* Optionally, create directories and set permissions for a
+ multi-user install. */
+ if (getuid() == 0 && settings.buildUsersGroup != "") {
+
+ Path perUserDir = profilesDir + "/per-user";
+ createDirs(perUserDir);
+ if (chmod(perUserDir.c_str(), 01777) == -1)
+ throw SysError(format("could not set permissions on ‘%1%’ to 1777") % perUserDir);
+
+ mode_t perm = 01775;
+
+ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
+ if (!gr)
+ printError(format("warning: the group ‘%1%’ specified in ‘build-users-group’ does not exist")
+ % settings.buildUsersGroup);
+ else {
+ struct stat st;
+ if (stat(realStoreDir.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % realStoreDir);
+
+ if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) {
+ if (chown(realStoreDir.c_str(), 0, gr->gr_gid) == -1)
+ throw SysError(format("changing ownership of path ‘%1%’") % realStoreDir);
+ if (chmod(realStoreDir.c_str(), perm) == -1)
+ throw SysError(format("changing permissions on path ‘%1%’") % realStoreDir);
+ }
+ }
+ }
+
+ /* Ensure that the store and its parents are not symlinks. */
+ if (getEnv("NIX_IGNORE_SYMLINK_STORE") != "1") {
+ Path path = realStoreDir;
+ struct stat st;
+ while (path != "/") {
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting status of ‘%1%’") % path);
+ if (S_ISLNK(st.st_mode))
+ throw Error(format(
+ "the path ‘%1%’ is a symlink; "
+ "this is not allowed for the Nix store and its parent directories")
+ % path);
+ path = dirOf(path);
+ }
+ }
+
+ /* We can't open a SQLite database if the disk is full. Since
+ this prevents the garbage collector from running when it's most
+ needed, we reserve some dummy space that we can free just
+ before doing a garbage collection. */
+ try {
+ struct stat st;
+ if (stat(reservedPath.c_str(), &st) == -1 ||
+ st.st_size != settings.reservedSize)
+ {
+ AutoCloseFD fd = open(reservedPath.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0600);
+ int res = -1;
+#if HAVE_POSIX_FALLOCATE
+ res = posix_fallocate(fd.get(), 0, settings.reservedSize);
+#endif
+ if (res == -1) {
+ writeFull(fd.get(), string(settings.reservedSize, 'X'));
+ [[gnu::unused]] auto res2 = ftruncate(fd.get(), settings.reservedSize);
+ }
+ }
+ } catch (SysError & e) { /* don't care about errors */
+ }
+
+ /* Acquire the big fat lock in shared mode to make sure that no
+ schema upgrade is in progress. */
+ Path globalLockPath = dbDir + "/big-lock";
+ globalLock = openLockFile(globalLockPath.c_str(), true);
+
+ if (!lockFile(globalLock.get(), ltRead, false)) {
+ printError("waiting for the big Nix store lock...");
+ lockFile(globalLock.get(), ltRead, true);
+ }
+
+ /* Check the current database schema and if necessary do an
+ upgrade. */
+ int curSchema = getSchema();
+ if (curSchema > nixSchemaVersion)
+ throw Error(format("current Nix store schema is version %1%, but I only support %2%")
+ % curSchema % nixSchemaVersion);
+
+ else if (curSchema == 0) { /* new store */
+ curSchema = nixSchemaVersion;
+ openDB(*state, true);
+ writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
+ }
+
+ else if (curSchema < nixSchemaVersion) {
+ if (curSchema < 5)
+ throw Error(
+ "Your Nix store has a database in Berkeley DB format,\n"
+ "which is no longer supported. To convert to the new format,\n"
+ "please upgrade Nix to version 0.12 first.");
+
+ if (curSchema < 6)
+ throw Error(
+ "Your Nix store has a database in flat file format,\n"
+ "which is no longer supported. To convert to the new format,\n"
+ "please upgrade Nix to version 1.11 first.");
+
+ if (!lockFile(globalLock.get(), ltWrite, false)) {
+ printError("waiting for exclusive access to the Nix store...");
+ lockFile(globalLock.get(), ltWrite, true);
+ }
+
+ /* Get the schema version again, because another process may
+ have performed the upgrade already. */
+ curSchema = getSchema();
+
+ if (curSchema < 7) { upgradeStore7(); }
+
+ openDB(*state, false);
+
+ if (curSchema < 8) {
+ SQLiteTxn txn(state->db);
+ state->db.exec("alter table ValidPaths add column ultimate integer");
+ state->db.exec("alter table ValidPaths add column sigs text");
+ txn.commit();
+ }
+
+ if (curSchema < 9) {
+ SQLiteTxn txn(state->db);
+ state->db.exec("drop table FailedPaths");
+ txn.commit();
+ }
+
+ if (curSchema < 10) {
+ SQLiteTxn txn(state->db);
+ state->db.exec("alter table ValidPaths add column ca text");
+ txn.commit();
+ }
+
+ writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
+
+ lockFile(globalLock.get(), ltRead, true);
+ }
+
+ else openDB(*state, false);
+
+ /* Prepare SQL statements. */
+ state->stmtRegisterValidPath.create(state->db,
+ "insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);");
+ state->stmtUpdatePathInfo.create(state->db,
+ "update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca = ? where path = ?;");
+ state->stmtAddReference.create(state->db,
+ "insert or replace into Refs (referrer, reference) values (?, ?);");
+ state->stmtQueryPathInfo.create(state->db,
+ "select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca from ValidPaths where path = ?;");
+ state->stmtQueryReferences.create(state->db,
+ "select path from Refs join ValidPaths on reference = id where referrer = ?;");
+ state->stmtQueryReferrers.create(state->db,
+ "select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
+ state->stmtInvalidatePath.create(state->db,
+ "delete from ValidPaths where path = ?;");
+ state->stmtAddDerivationOutput.create(state->db,
+ "insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
+ state->stmtQueryValidDerivers.create(state->db,
+ "select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
+ state->stmtQueryDerivationOutputs.create(state->db,
+ "select id, path from DerivationOutputs where drv = ?;");
+ // Use "path >= ?" with limit 1 rather than "path like '?%'" to
+ // ensure efficient lookup.
+ state->stmtQueryPathFromHashPart.create(state->db,
+ "select path from ValidPaths where path >= ? limit 1;");
+ state->stmtQueryValidPaths.create(state->db, "select path from ValidPaths");
+}
+
+
+LocalStore::~LocalStore()
+{
+ auto state(_state.lock());
+
+ try {
+ if (state->fdTempRoots) {
+ state->fdTempRoots = -1;
+ unlink(state->fnTempRoots.c_str());
+ }
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+std::string LocalStore::getUri()
+{
+ return "local";
+}
+
+
+int LocalStore::getSchema()
+{
+ int curSchema = 0;
+ if (pathExists(schemaPath)) {
+ string s = readFile(schemaPath);
+ if (!string2Int(s, curSchema))
+ throw Error(format("‘%1%’ is corrupt") % schemaPath);
+ }
+ return curSchema;
+}
+
+
+void LocalStore::openDB(State & state, bool create)
+{
+ if (access(dbDir.c_str(), R_OK | W_OK))
+ throw SysError(format("Nix database directory ‘%1%’ is not writable") % dbDir);
+
+ /* Open the Nix database. */
+ string dbPath = dbDir + "/db.sqlite";
+ auto & db(state.db);
+ if (sqlite3_open_v2(dbPath.c_str(), &db.db,
+ SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
+ throw Error(format("cannot open Nix database ‘%1%’") % dbPath);
+
+ if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
+ throwSQLiteError(db, "setting timeout");
+
+ db.exec("pragma foreign_keys = 1");
+
+ /* !!! check whether sqlite has been built with foreign key
+ support */
+
+ /* Whether SQLite should fsync(). "Normal" synchronous mode
+ should be safe enough. If the user asks for it, don't sync at
+ all. This can cause database corruption if the system
+ crashes. */
+ string syncMode = settings.fsyncMetadata ? "normal" : "off";
+ db.exec("pragma synchronous = " + syncMode);
+
+ /* Set the SQLite journal mode. WAL mode is fastest, so it's the
+ default. */
+ string mode = settings.useSQLiteWAL ? "wal" : "truncate";
+ string prevMode;
+ {
+ SQLiteStmt stmt;
+ stmt.create(db, "pragma main.journal_mode;");
+ if (sqlite3_step(stmt) != SQLITE_ROW)
+ throwSQLiteError(db, "querying journal mode");
+ prevMode = string((const char *) sqlite3_column_text(stmt, 0));
+ }
+ if (prevMode != mode &&
+ sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "setting journal mode");
+
+ /* Increase the auto-checkpoint interval to 40000 pages. This
+ seems enough to ensure that instantiating the NixOS system
+ derivation is done in a single fsync(). */
+ if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "setting autocheckpoint interval");
+
+ /* Initialise the database schema, if necessary. */
+ if (create) {
+ const char * schema =
+#include "schema.sql.hh"
+ ;
+ db.exec(schema);
+ }
+}
+
+
+/* To improve purity, users may want to make the Nix store a read-only
+ bind mount. So make the Nix store writable for this process. */
+void LocalStore::makeStoreWritable()
+{
+#if __linux__
+ if (getuid() != 0) return;
+ /* Check if /nix/store is on a read-only mount. */
+ struct statvfs stat;
+ if (statvfs(realStoreDir.c_str(), &stat) != 0)
+ throw SysError("getting info about the Nix store mount point");
+
+ if (stat.f_flag & ST_RDONLY) {
+ if (unshare(CLONE_NEWNS) == -1)
+ throw SysError("setting up a private mount namespace");
+
+ if (mount(0, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
+ throw SysError(format("remounting %1% writable") % realStoreDir);
+ }
+#endif
+}
+
+
+const time_t mtimeStore = 1; /* 1 second into the epoch */
+
+
+static void canonicaliseTimestampAndPermissions(const Path & path, const struct stat & st)
+{
+ if (!S_ISLNK(st.st_mode)) {
+
+ /* Mask out all type related bits. */
+ mode_t mode = st.st_mode & ~S_IFMT;
+
+ if (mode != 0444 && mode != 0555) {
+ mode = (st.st_mode & S_IFMT)
+ | 0444
+ | (st.st_mode & S_IXUSR ? 0111 : 0);
+ if (chmod(path.c_str(), mode) == -1)
+ throw SysError(format("changing mode of ‘%1%’ to %2$o") % path % mode);
+ }
+
+ }
+
+ if (st.st_mtime != mtimeStore) {
+ struct timeval times[2];
+ times[0].tv_sec = st.st_atime;
+ times[0].tv_usec = 0;
+ times[1].tv_sec = mtimeStore;
+ times[1].tv_usec = 0;
+#if HAVE_LUTIMES
+ if (lutimes(path.c_str(), times) == -1)
+ if (errno != ENOSYS ||
+ (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1))
+#else
+ if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)
+#endif
+ throw SysError(format("changing modification time of ‘%1%’") % path);
+ }
+}
+
+
+void canonicaliseTimestampAndPermissions(const Path & path)
+{
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+ canonicaliseTimestampAndPermissions(path, st);
+}
+
+
+static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+{
+ checkInterrupt();
+
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ /* Really make sure that the path is of a supported type. */
+ if (!(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode)))
+ throw Error(format("file ‘%1%’ has an unsupported type") % path);
+
+ /* Fail if the file is not owned by the build user. This prevents
+ us from messing up the ownership/permissions of files
+ hard-linked into the output (e.g. "ln /etc/shadow $out/foo").
+ However, ignore files that we chown'ed ourselves previously to
+ ensure that we don't fail on hard links within the same build
+ (i.e. "touch $out/foo; ln $out/foo $out/bar"). */
+ if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
+ assert(!S_ISDIR(st.st_mode));
+ if (inodesSeen.find(Inode(st.st_dev, st.st_ino)) == inodesSeen.end())
+ throw BuildError(format("invalid ownership on file ‘%1%’") % path);
+ mode_t mode = st.st_mode & ~S_IFMT;
+ assert(S_ISLNK(st.st_mode) || (st.st_uid == geteuid() && (mode == 0444 || mode == 0555) && st.st_mtime == mtimeStore));
+ return;
+ }
+
+ inodesSeen.insert(Inode(st.st_dev, st.st_ino));
+
+ canonicaliseTimestampAndPermissions(path, st);
+
+ /* Change ownership to the current uid. If it's a symlink, use
+ lchown if available, otherwise don't bother. Wrong ownership
+ of a symlink doesn't matter, since the owning user can't change
+ the symlink and can't delete it because the directory is not
+ writable. The only exception is top-level paths in the Nix
+ store (since that directory is group-writable for the Nix build
+ users group); we check for this case below. */
+ if (st.st_uid != geteuid()) {
+#if HAVE_LCHOWN
+ if (lchown(path.c_str(), geteuid(), getegid()) == -1)
+#else
+ if (!S_ISLNK(st.st_mode) &&
+ chown(path.c_str(), geteuid(), getegid()) == -1)
+#endif
+ throw SysError(format("changing owner of ‘%1%’ to %2%")
+ % path % geteuid());
+ }
+
+ if (S_ISDIR(st.st_mode)) {
+ DirEntries entries = readDirectory(path);
+ for (auto & i : entries)
+ canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
+ }
+}
+
+
+void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+{
+ canonicalisePathMetaData_(path, fromUid, inodesSeen);
+
+ /* On platforms that don't have lchown(), the top-level path can't
+ be a symlink, since we can't change its ownership. */
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ if (st.st_uid != geteuid()) {
+ assert(S_ISLNK(st.st_mode));
+ throw Error(format("wrong ownership of top-level store path ‘%1%’") % path);
+ }
+}
+
+
+void canonicalisePathMetaData(const Path & path, uid_t fromUid)
+{
+ InodesSeen inodesSeen;
+ canonicalisePathMetaData(path, fromUid, inodesSeen);
+}
+
+
+void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & drv)
+{
+ string drvName = storePathToName(drvPath);
+ assert(isDerivation(drvName));
+ drvName = string(drvName, 0, drvName.size() - drvExtension.size());
+
+ if (drv.isFixedOutput()) {
+ DerivationOutputs::const_iterator out = drv.outputs.find("out");
+ if (out == drv.outputs.end())
+ throw Error(format("derivation ‘%1%’ does not have an output named ‘out’") % drvPath);
+
+ bool recursive; Hash h;
+ out->second.parseHashInfo(recursive, h);
+ Path outPath = makeFixedOutputPath(recursive, h, drvName);
+
+ StringPairs::const_iterator j = drv.env.find("out");
+ if (out->second.path != outPath || j == drv.env.end() || j->second != outPath)
+ throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’")
+ % drvPath % out->second.path % outPath);
+ }
+
+ else {
+ Derivation drvCopy(drv);
+ for (auto & i : drvCopy.outputs) {
+ i.second.path = "";
+ drvCopy.env[i.first] = "";
+ }
+
+ Hash h = hashDerivationModulo(*this, drvCopy);
+
+ for (auto & i : drv.outputs) {
+ Path outPath = makeOutputPath(i.first, h, drvName);
+ StringPairs::const_iterator j = drv.env.find(i.first);
+ if (i.second.path != outPath || j == drv.env.end() || j->second != outPath)
+ throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’")
+ % drvPath % i.second.path % outPath);
+ }
+ }
+}
+
+
+uint64_t LocalStore::addValidPath(State & state,
+ const ValidPathInfo & info, bool checkOutputs)
+{
+ assert(info.ca == "" || info.isContentAddressed(*this));
+
+ state.stmtRegisterValidPath.use()
+ (info.path)
+ ("sha256:" + printHash(info.narHash))
+ (info.registrationTime == 0 ? time(0) : info.registrationTime)
+ (info.deriver, info.deriver != "")
+ (info.narSize, info.narSize != 0)
+ (info.ultimate ? 1 : 0, info.ultimate)
+ (concatStringsSep(" ", info.sigs), !info.sigs.empty())
+ (info.ca, !info.ca.empty())
+ .exec();
+ uint64_t id = sqlite3_last_insert_rowid(state.db);
+
+ /* If this is a derivation, then store the derivation outputs in
+ the database. This is useful for the garbage collector: it can
+ efficiently query whether a path is an output of some
+ derivation. */
+ if (isDerivation(info.path)) {
+ Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(info.path));
+
+ /* Verify that the output paths in the derivation are correct
+ (i.e., follow the scheme for computing output paths from
+ derivations). Note that if this throws an error, then the
+ DB transaction is rolled back, so the path validity
+ registration above is undone. */
+ if (checkOutputs) checkDerivationOutputs(info.path, drv);
+
+ for (auto & i : drv.outputs) {
+ state.stmtAddDerivationOutput.use()
+ (id)
+ (i.first)
+ (i.second.path)
+ .exec();
+ }
+ }
+
+ {
+ auto state_(Store::state.lock());
+ state_->pathInfoCache.upsert(storePathToHash(info.path), std::make_shared<ValidPathInfo>(info));
+ }
+
+ return id;
+}
+
+
+Hash parseHashField(const Path & path, const string & s)
+{
+ string::size_type colon = s.find(':');
+ if (colon == string::npos)
+ throw Error(format("corrupt hash ‘%1%’ in valid-path entry for ‘%2%’")
+ % s % path);
+ HashType ht = parseHashType(string(s, 0, colon));
+ if (ht == htUnknown)
+ throw Error(format("unknown hash type ‘%1%’ in valid-path entry for ‘%2%’")
+ % string(s, 0, colon) % path);
+ return parseHash(ht, string(s, colon + 1));
+}
+
+
+void LocalStore::queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure)
+{
+ sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
+
+ auto info = std::make_shared<ValidPathInfo>();
+ info->path = path;
+
+ assertStorePath(path);
+
+ return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
+ auto state(_state.lock());
+
+ /* Get the path info. */
+ auto useQueryPathInfo(state->stmtQueryPathInfo.use()(path));
+
+ if (!useQueryPathInfo.next())
+ return std::shared_ptr<ValidPathInfo>();
+
+ info->id = useQueryPathInfo.getInt(0);
+
+ info->narHash = parseHashField(path, useQueryPathInfo.getStr(1));
+
+ info->registrationTime = useQueryPathInfo.getInt(2);
+
+ auto s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 3);
+ if (s) info->deriver = s;
+
+ /* Note that narSize = NULL yields 0. */
+ info->narSize = useQueryPathInfo.getInt(4);
+
+ info->ultimate = useQueryPathInfo.getInt(5) == 1;
+
+ s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 6);
+ if (s) info->sigs = tokenizeString<StringSet>(s, " ");
+
+ s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 7);
+ if (s) info->ca = s;
+
+ /* Get the references. */
+ auto useQueryReferences(state->stmtQueryReferences.use()(info->id));
+
+ while (useQueryReferences.next())
+ info->references.insert(useQueryReferences.getStr(0));
+
+ return info;
+ });
+ });
+}
+
+
+/* Update path info in the database. */
+void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info)
+{
+ state.stmtUpdatePathInfo.use()
+ (info.narSize, info.narSize != 0)
+ ("sha256:" + printHash(info.narHash))
+ (info.ultimate ? 1 : 0, info.ultimate)
+ (concatStringsSep(" ", info.sigs), !info.sigs.empty())
+ (info.ca, !info.ca.empty())
+ (info.path)
+ .exec();
+}
+
+
+uint64_t LocalStore::queryValidPathId(State & state, const Path & path)
+{
+ auto use(state.stmtQueryPathInfo.use()(path));
+ if (!use.next())
+ throw Error(format("path ‘%1%’ is not valid") % path);
+ return use.getInt(0);
+}
+
+
+bool LocalStore::isValidPath_(State & state, const Path & path)
+{
+ return state.stmtQueryPathInfo.use()(path).next();
+}
+
+
+bool LocalStore::isValidPathUncached(const Path & path)
+{
+ return retrySQLite<bool>([&]() {
+ auto state(_state.lock());
+ return isValidPath_(*state, path);
+ });
+}
+
+
+PathSet LocalStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
+{
+ PathSet res;
+ for (auto & i : paths)
+ if (isValidPath(i)) res.insert(i);
+ return res;
+}
+
+
+PathSet LocalStore::queryAllValidPaths()
+{
+ return retrySQLite<PathSet>([&]() {
+ auto state(_state.lock());
+ auto use(state->stmtQueryValidPaths.use());
+ PathSet res;
+ while (use.next()) res.insert(use.getStr(0));
+ return res;
+ });
+}
+
+
+void LocalStore::queryReferrers(State & state, const Path & path, PathSet & referrers)
+{
+ auto useQueryReferrers(state.stmtQueryReferrers.use()(path));
+
+ while (useQueryReferrers.next())
+ referrers.insert(useQueryReferrers.getStr(0));
+}
+
+
+void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
+{
+ assertStorePath(path);
+ return retrySQLite<void>([&]() {
+ auto state(_state.lock());
+ queryReferrers(*state, path, referrers);
+ });
+}
+
+
+PathSet LocalStore::queryValidDerivers(const Path & path)
+{
+ assertStorePath(path);
+
+ return retrySQLite<PathSet>([&]() {
+ auto state(_state.lock());
+
+ auto useQueryValidDerivers(state->stmtQueryValidDerivers.use()(path));
+
+ PathSet derivers;
+ while (useQueryValidDerivers.next())
+ derivers.insert(useQueryValidDerivers.getStr(1));
+
+ return derivers;
+ });
+}
+
+
+PathSet LocalStore::queryDerivationOutputs(const Path & path)
+{
+ return retrySQLite<PathSet>([&]() {
+ auto state(_state.lock());
+
+ auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
+ (queryValidPathId(*state, path)));
+
+ PathSet outputs;
+ while (useQueryDerivationOutputs.next())
+ outputs.insert(useQueryDerivationOutputs.getStr(1));
+
+ return outputs;
+ });
+}
+
+
+StringSet LocalStore::queryDerivationOutputNames(const Path & path)
+{
+ return retrySQLite<StringSet>([&]() {
+ auto state(_state.lock());
+
+ auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
+ (queryValidPathId(*state, path)));
+
+ StringSet outputNames;
+ while (useQueryDerivationOutputs.next())
+ outputNames.insert(useQueryDerivationOutputs.getStr(0));
+
+ return outputNames;
+ });
+}
+
+
+Path LocalStore::queryPathFromHashPart(const string & hashPart)
+{
+ if (hashPart.size() != storePathHashLen) throw Error("invalid hash part");
+
+ Path prefix = storeDir + "/" + hashPart;
+
+ return retrySQLite<Path>([&]() -> std::string {
+ auto state(_state.lock());
+
+ auto useQueryPathFromHashPart(state->stmtQueryPathFromHashPart.use()(prefix));
+
+ if (!useQueryPathFromHashPart.next()) return "";
+
+ const char * s = (const char *) sqlite3_column_text(state->stmtQueryPathFromHashPart, 0);
+ return s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0 ? s : "";
+ });
+}
+
+
+PathSet LocalStore::querySubstitutablePaths(const PathSet & paths)
+{
+ if (!settings.useSubstitutes) return PathSet();
+
+ auto remaining = paths;
+ PathSet res;
+
+ for (auto & sub : getDefaultSubstituters()) {
+ if (remaining.empty()) break;
+ if (sub->storeDir != storeDir) continue;
+ if (!sub->wantMassQuery()) continue;
+
+ auto valid = sub->queryValidPaths(remaining);
+
+ PathSet remaining2;
+ for (auto & path : remaining)
+ if (valid.count(path))
+ res.insert(path);
+ else
+ remaining2.insert(path);
+
+ std::swap(remaining, remaining2);
+ }
+
+ return res;
+}
+
+
+void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos)
+{
+ if (!settings.useSubstitutes) return;
+ for (auto & sub : getDefaultSubstituters()) {
+ if (sub->storeDir != storeDir) continue;
+ for (auto & path : paths) {
+ if (infos.count(path)) continue;
+ debug(format("checking substituter ‘%s’ for path ‘%s’")
+ % sub->getUri() % path);
+ try {
+ auto info = sub->queryPathInfo(path);
+ auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
+ std::shared_ptr<const ValidPathInfo>(info));
+ infos[path] = SubstitutablePathInfo{
+ info->deriver,
+ info->references,
+ narInfo ? narInfo->fileSize : 0,
+ info->narSize};
+ } catch (InvalidPath) {
+ }
+ }
+ }
+}
+
+
+void LocalStore::registerValidPath(const ValidPathInfo & info)
+{
+ ValidPathInfos infos;
+ infos.push_back(info);
+ registerValidPaths(infos);
+}
+
+
+void LocalStore::registerValidPaths(const ValidPathInfos & infos)
+{
+ /* SQLite will fsync by default, but the new valid paths may not
+ be fsync-ed. So some may want to fsync them before registering
+ the validity, at the expense of some speed of the path
+ registering operation. */
+ if (settings.syncBeforeRegistering) sync();
+
+ return retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ SQLiteTxn txn(state->db);
+ PathSet paths;
+
+ for (auto & i : infos) {
+ assert(i.narHash.type == htSHA256);
+ if (isValidPath_(*state, i.path))
+ updatePathInfo(*state, i);
+ else
+ addValidPath(*state, i, false);
+ paths.insert(i.path);
+ }
+
+ for (auto & i : infos) {
+ auto referrer = queryValidPathId(*state, i.path);
+ for (auto & j : i.references)
+ state->stmtAddReference.use()(referrer)(queryValidPathId(*state, j)).exec();
+ }
+
+ /* Check that the derivation outputs are correct. We can't do
+ this in addValidPath() above, because the references might
+ not be valid yet. */
+ for (auto & i : infos)
+ if (isDerivation(i.path)) {
+ // FIXME: inefficient; we already loaded the
+ // derivation in addValidPath().
+ Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(i.path));
+ checkDerivationOutputs(i.path, drv);
+ }
+
+ /* Do a topological sort of the paths. This will throw an
+ error if a cycle is detected and roll back the
+ transaction. Cycles can only occur when a derivation
+ has multiple outputs. */
+ topoSortPaths(paths);
+
+ txn.commit();
+ });
+}
+
+
+/* Invalidate a path. The caller is responsible for checking that
+ there are no referrers. */
+void LocalStore::invalidatePath(State & state, const Path & path)
+{
+ debug(format("invalidating path ‘%1%’") % path);
+
+ state.stmtInvalidatePath.use()(path).exec();
+
+ /* Note that the foreign key constraints on the Refs table take
+ care of deleting the references entries for `path'. */
+
+ {
+ auto state_(Store::state.lock());
+ state_->pathInfoCache.erase(storePathToHash(path));
+ }
+}
+
+
+void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs, std::shared_ptr<FSAccessor> accessor)
+{
+ Hash h = hashString(htSHA256, *nar);
+ if (h != info.narHash)
+ throw Error(format("hash mismatch importing path ‘%s’; expected hash ‘%s’, got ‘%s’") %
+ info.path % info.narHash.to_string() % h.to_string());
+
+ if (requireSigs && !dontCheckSigs && !info.checkSignatures(*this, publicKeys))
+ throw Error("cannot add path ‘%s’ because it lacks a valid signature", info.path);
+
+ addTempRoot(info.path);
+
+ if (repair || !isValidPath(info.path)) {
+
+ PathLocks outputLock;
+
+ Path realPath = realStoreDir + "/" + baseNameOf(info.path);
+
+ /* Lock the output path. But don't lock if we're being called
+ from a build hook (whose parent process already acquired a
+ lock on this path). */
+ Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS"));
+ if (find(locksHeld.begin(), locksHeld.end(), info.path) == locksHeld.end())
+ outputLock.lockPaths({realPath});
+
+ if (repair || !isValidPath(info.path)) {
+
+ deletePath(realPath);
+
+ StringSource source(*nar);
+ restorePath(realPath, source);
+
+ canonicalisePathMetaData(realPath, -1);
+
+ optimisePath(realPath); // FIXME: combine with hashPath()
+
+ registerValidPath(info);
+ }
+
+ outputLock.setDeletion(true);
+ }
+}
+
+
+Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
+ bool recursive, HashType hashAlgo, bool repair)
+{
+ Hash h = hashString(hashAlgo, dump);
+
+ Path dstPath = makeFixedOutputPath(recursive, h, name);
+
+ addTempRoot(dstPath);
+
+ if (repair || !isValidPath(dstPath)) {
+
+ /* The first check above is an optimisation to prevent
+ unnecessary lock acquisition. */
+
+ Path realPath = realStoreDir + "/" + baseNameOf(dstPath);
+
+ PathLocks outputLock({realPath});
+
+ if (repair || !isValidPath(dstPath)) {
+
+ deletePath(realPath);
+
+ if (recursive) {
+ StringSource source(dump);
+ restorePath(realPath, source);
+ } else
+ writeFile(realPath, dump);
+
+ canonicalisePathMetaData(realPath, -1);
+
+ /* Register the SHA-256 hash of the NAR serialisation of
+ the path in the database. We may just have computed it
+ above (if called with recursive == true and hashAlgo ==
+ sha256); otherwise, compute it here. */
+ HashResult hash;
+ if (recursive) {
+ hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
+ hash.second = dump.size();
+ } else
+ hash = hashPath(htSHA256, realPath);
+
+ optimisePath(realPath); // FIXME: combine with hashPath()
+
+ ValidPathInfo info;
+ info.path = dstPath;
+ info.narHash = hash.first;
+ info.narSize = hash.second;
+ info.ultimate = true;
+ info.ca = makeFixedOutputCA(recursive, h);
+ registerValidPath(info);
+ }
+
+ outputLock.setDeletion(true);
+ }
+
+ return dstPath;
+}
+
+
+Path LocalStore::addToStore(const string & name, const Path & _srcPath,
+ bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
+{
+ Path srcPath(absPath(_srcPath));
+
+ /* Read the whole path into memory. This is not a very scalable
+ method for very large paths, but `copyPath' is mainly used for
+ small files. */
+ StringSink sink;
+ if (recursive)
+ dumpPath(srcPath, sink, filter);
+ else
+ sink.s = make_ref<std::string>(readFile(srcPath));
+
+ return addToStoreFromDump(*sink.s, name, recursive, hashAlgo, repair);
+}
+
+
+Path LocalStore::addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair)
+{
+ auto hash = hashString(htSHA256, s);
+ auto dstPath = makeTextPath(name, hash, references);
+
+ addTempRoot(dstPath);
+
+ if (repair || !isValidPath(dstPath)) {
+
+ Path realPath = realStoreDir + "/" + baseNameOf(dstPath);
+
+ PathLocks outputLock({realPath});
+
+ if (repair || !isValidPath(dstPath)) {
+
+ deletePath(realPath);
+
+ writeFile(realPath, s);
+
+ canonicalisePathMetaData(realPath, -1);
+
+ StringSink sink;
+ dumpString(s, sink);
+ auto narHash = hashString(htSHA256, *sink.s);
+
+ optimisePath(realPath);
+
+ ValidPathInfo info;
+ info.path = dstPath;
+ info.narHash = narHash;
+ info.narSize = sink.s->size();
+ info.references = references;
+ info.ultimate = true;
+ info.ca = "text:" + hash.to_string();
+ registerValidPath(info);
+ }
+
+ outputLock.setDeletion(true);
+ }
+
+ return dstPath;
+}
+
+
+/* Create a temporary directory in the store that won't be
+ garbage-collected. */
+Path LocalStore::createTempDirInStore()
+{
+ Path tmpDir;
+ do {
+ /* There is a slight possibility that `tmpDir' gets deleted by
+ the GC between createTempDir() and addTempRoot(), so repeat
+ until `tmpDir' exists. */
+ tmpDir = createTempDir(realStoreDir);
+ addTempRoot(tmpDir);
+ } while (!pathExists(tmpDir));
+ return tmpDir;
+}
+
+
+void LocalStore::invalidatePathChecked(const Path & path)
+{
+ assertStorePath(path);
+
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ SQLiteTxn txn(state->db);
+
+ if (isValidPath_(*state, path)) {
+ PathSet referrers; queryReferrers(*state, path, referrers);
+ referrers.erase(path); /* ignore self-references */
+ if (!referrers.empty())
+ throw PathInUse(format("cannot delete path ‘%1%’ because it is in use by %2%")
+ % path % showPaths(referrers));
+ invalidatePath(*state, path);
+ }
+
+ txn.commit();
+ });
+}
+
+
+bool LocalStore::verifyStore(bool checkContents, bool repair)
+{
+ printError(format("reading the Nix store..."));
+
+ bool errors = false;
+
+ /* Acquire the global GC lock to prevent a garbage collection. */
+ AutoCloseFD fdGCLock = openGCLock(ltWrite);
+
+ PathSet store;
+ for (auto & i : readDirectory(realStoreDir)) store.insert(i.name);
+
+ /* Check whether all valid paths actually exist. */
+ printInfo("checking path existence...");
+
+ PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
+
+ for (auto & i : validPaths2)
+ verifyPath(i, store, done, validPaths, repair, errors);
+
+ /* Release the GC lock so that checking content hashes (which can
+ take ages) doesn't block the GC or builds. */
+ fdGCLock = -1;
+
+ /* Optionally, check the content hashes (slow). */
+ if (checkContents) {
+ printInfo("checking hashes...");
+
+ Hash nullHash(htSHA256);
+
+ for (auto & i : validPaths) {
+ try {
+ auto info = std::const_pointer_cast<ValidPathInfo>(std::shared_ptr<const ValidPathInfo>(queryPathInfo(i)));
+
+ /* Check the content hash (optionally - slow). */
+ printMsg(lvlTalkative, format("checking contents of ‘%1%’") % i);
+ HashResult current = hashPath(info->narHash.type, i);
+
+ if (info->narHash != nullHash && info->narHash != current.first) {
+ printError(format("path ‘%1%’ was modified! "
+ "expected hash ‘%2%’, got ‘%3%’")
+ % i % printHash(info->narHash) % printHash(current.first));
+ if (repair) repairPath(i); else errors = true;
+ } else {
+
+ bool update = false;
+
+ /* Fill in missing hashes. */
+ if (info->narHash == nullHash) {
+ printError(format("fixing missing hash on ‘%1%’") % i);
+ info->narHash = current.first;
+ update = true;
+ }
+
+ /* Fill in missing narSize fields (from old stores). */
+ if (info->narSize == 0) {
+ printError(format("updating size field on ‘%1%’ to %2%") % i % current.second);
+ info->narSize = current.second;
+ update = true;
+ }
+
+ if (update) {
+ auto state(_state.lock());
+ updatePathInfo(*state, *info);
+ }
+
+ }
+
+ } catch (Error & e) {
+ /* It's possible that the path got GC'ed, so ignore
+ errors on invalid paths. */
+ if (isValidPath(i))
+ printError(format("error: %1%") % e.msg());
+ else
+ printError(format("warning: %1%") % e.msg());
+ errors = true;
+ }
+ }
+ }
+
+ return errors;
+}
+
+
+void LocalStore::verifyPath(const Path & path, const PathSet & store,
+ PathSet & done, PathSet & validPaths, bool repair, bool & errors)
+{
+ checkInterrupt();
+
+ if (done.find(path) != done.end()) return;
+ done.insert(path);
+
+ if (!isStorePath(path)) {
+ printError(format("path ‘%1%’ is not in the Nix store") % path);
+ auto state(_state.lock());
+ invalidatePath(*state, path);
+ return;
+ }
+
+ if (store.find(baseNameOf(path)) == store.end()) {
+ /* Check any referrers first. If we can invalidate them
+ first, then we can invalidate this path as well. */
+ bool canInvalidate = true;
+ PathSet referrers; queryReferrers(path, referrers);
+ for (auto & i : referrers)
+ if (i != path) {
+ verifyPath(i, store, done, validPaths, repair, errors);
+ if (validPaths.find(i) != validPaths.end())
+ canInvalidate = false;
+ }
+
+ if (canInvalidate) {
+ printError(format("path ‘%1%’ disappeared, removing from database...") % path);
+ auto state(_state.lock());
+ invalidatePath(*state, path);
+ } else {
+ printError(format("path ‘%1%’ disappeared, but it still has valid referrers!") % path);
+ if (repair)
+ try {
+ repairPath(path);
+ } catch (Error & e) {
+ printError(format("warning: %1%") % e.msg());
+ errors = true;
+ }
+ else errors = true;
+ }
+
+ return;
+ }
+
+ validPaths.insert(path);
+}
+
+
+#if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL)
+
+static void makeMutable(const Path & path)
+{
+ checkInterrupt();
+
+ struct stat st = lstat(path);
+
+ if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) return;
+
+ if (S_ISDIR(st.st_mode)) {
+ for (auto & i : readDirectory(path))
+ makeMutable(path + "/" + i.name);
+ }
+
+ /* The O_NOFOLLOW is important to prevent us from changing the
+ mutable bit on the target of a symlink (which would be a
+ security hole). */
+ AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_NOFOLLOW | O_CLOEXEC);
+ if (fd == -1) {
+ if (errno == ELOOP) return; // it's a symlink
+ throw SysError(format("opening file ‘%1%’") % path);
+ }
+
+ unsigned int flags = 0, old;
+
+ /* Silently ignore errors getting/setting the immutable flag so
+ that we work correctly on filesystems that don't support it. */
+ if (ioctl(fd, FS_IOC_GETFLAGS, &flags)) return;
+ old = flags;
+ flags &= ~FS_IMMUTABLE_FL;
+ if (old == flags) return;
+ if (ioctl(fd, FS_IOC_SETFLAGS, &flags)) return;
+}
+
+/* Upgrade from schema 6 (Nix 0.15) to schema 7 (Nix >= 1.3). */
+void LocalStore::upgradeStore7()
+{
+ if (getuid() != 0) return;
+ printError("removing immutable bits from the Nix store (this may take a while)...");
+ makeMutable(realStoreDir);
+}
+
+#else
+
+void LocalStore::upgradeStore7()
+{
+}
+
+#endif
+
+
+void LocalStore::vacuumDB()
+{
+ auto state(_state.lock());
+ state->db.exec("vacuum");
+}
+
+
+void LocalStore::addSignatures(const Path & storePath, const StringSet & sigs)
+{
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ SQLiteTxn txn(state->db);
+
+ auto info = std::const_pointer_cast<ValidPathInfo>(std::shared_ptr<const ValidPathInfo>(queryPathInfo(storePath)));
+
+ info->sigs.insert(sigs.begin(), sigs.end());
+
+ updatePathInfo(*state, *info);
+
+ txn.commit();
+ });
+}
+
+
+void LocalStore::signPathInfo(ValidPathInfo & info)
+{
+ // FIXME: keep secret keys in memory.
+
+ auto secretKeyFiles = settings.secretKeyFiles;
+
+ for (auto & secretKeyFile : secretKeyFiles.get()) {
+ SecretKey secretKey(readFile(secretKeyFile));
+ info.sign(secretKey);
+ }
+}
+
+
+}
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
new file mode 100644
index 000000000..f2c40e964
--- /dev/null
+++ b/src/libstore/local-store.hh
@@ -0,0 +1,289 @@
+#pragma once
+
+#include "sqlite.hh"
+
+#include "pathlocks.hh"
+#include "store-api.hh"
+#include "sync.hh"
+#include "util.hh"
+
+#include <string>
+#include <unordered_set>
+
+
+namespace nix {
+
+
+/* Nix store and database schema version. Version 1 (or 0) was Nix <=
+ 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
+ Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
+ Nix 1.0. Version 7 is Nix 1.3. Version 10 is 1.12. */
+const int nixSchemaVersion = 10;
+
+
+struct Derivation;
+
+
+struct OptimiseStats
+{
+ unsigned long filesLinked = 0;
+ unsigned long long bytesFreed = 0;
+ unsigned long long blocksFreed = 0;
+};
+
+
+class LocalStore : public LocalFSStore
+{
+private:
+
+ /* Lock file used for upgrading. */
+ AutoCloseFD globalLock;
+
+ struct State
+ {
+ /* The SQLite database object. */
+ SQLite db;
+
+ /* Some precompiled SQLite statements. */
+ SQLiteStmt stmtRegisterValidPath;
+ SQLiteStmt stmtUpdatePathInfo;
+ SQLiteStmt stmtAddReference;
+ SQLiteStmt stmtQueryPathInfo;
+ SQLiteStmt stmtQueryReferences;
+ SQLiteStmt stmtQueryReferrers;
+ SQLiteStmt stmtInvalidatePath;
+ SQLiteStmt stmtAddDerivationOutput;
+ SQLiteStmt stmtQueryValidDerivers;
+ SQLiteStmt stmtQueryDerivationOutputs;
+ SQLiteStmt stmtQueryPathFromHashPart;
+ SQLiteStmt stmtQueryValidPaths;
+
+ /* The file to which we write our temporary roots. */
+ Path fnTempRoots;
+ AutoCloseFD fdTempRoots;
+ };
+
+ Sync<State, std::recursive_mutex> _state;
+
+public:
+
+ PathSetting realStoreDir_;
+
+ const Path realStoreDir;
+ const Path dbDir;
+ const Path linksDir;
+ const Path reservedPath;
+ const Path schemaPath;
+ const Path trashDir;
+
+private:
+
+ Setting<bool> requireSigs{(Store*) this,
+ settings.signedBinaryCaches != "", // FIXME
+ "require-sigs", "whether store paths should have a trusted signature on import"};
+
+ PublicKeys publicKeys;
+
+public:
+
+ /* Initialise the local store, upgrading the schema if
+ necessary. */
+ LocalStore(const Params & params);
+
+ ~LocalStore();
+
+ /* Implementations of abstract store API methods. */
+
+ std::string getUri() override;
+
+ bool isValidPathUncached(const Path & path) override;
+
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override;
+
+ PathSet queryAllValidPaths() override;
+
+ void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override;
+
+ void queryReferrers(const Path & path, PathSet & referrers) override;
+
+ PathSet queryValidDerivers(const Path & path) override;
+
+ PathSet queryDerivationOutputs(const Path & path) override;
+
+ StringSet queryDerivationOutputNames(const Path & path) override;
+
+ Path queryPathFromHashPart(const string & hashPart) override;
+
+ PathSet querySubstitutablePaths(const PathSet & paths) override;
+
+ void querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos) override;
+
+ void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs,
+ std::shared_ptr<FSAccessor> accessor) override;
+
+ Path addToStore(const string & name, const Path & srcPath,
+ bool recursive, HashType hashAlgo,
+ PathFilter & filter, bool repair) override;
+
+ /* Like addToStore(), but the contents of the path are contained
+ in `dump', which is either a NAR serialisation (if recursive ==
+ true) or simply the contents of a regular file (if recursive ==
+ false). */
+ Path addToStoreFromDump(const string & dump, const string & name,
+ bool recursive = true, HashType hashAlgo = htSHA256, bool repair = false);
+
+ Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair) override;
+
+ void buildPaths(const PathSet & paths, BuildMode buildMode) override;
+
+ BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode) override;
+
+ void ensurePath(const Path & path) override;
+
+ void addTempRoot(const Path & path) override;
+
+ void addIndirectRoot(const Path & path) override;
+
+ void syncWithGC() override;
+
+private:
+
+ typedef std::shared_ptr<AutoCloseFD> FDPtr;
+ typedef list<FDPtr> FDs;
+
+ void readTempRoots(PathSet & tempRoots, FDs & fds);
+
+public:
+
+ Roots findRoots() override;
+
+ void collectGarbage(const GCOptions & options, GCResults & results) override;
+
+ /* Optimise the disk space usage of the Nix store by hard-linking
+ files with the same contents. */
+ void optimiseStore(OptimiseStats & stats);
+
+ void optimiseStore() override;
+
+ /* Optimise a single store path. */
+ void optimisePath(const Path & path);
+
+ bool verifyStore(bool checkContents, bool repair) override;
+
+ /* Register the validity of a path, i.e., that `path' exists, that
+ the paths referenced by it exists, and in the case of an output
+ path of a derivation, that it has been produced by a successful
+ execution of the derivation (or something equivalent). Also
+ register the hash of the file system contents of the path. The
+ hash must be a SHA-256 hash. */
+ void registerValidPath(const ValidPathInfo & info);
+
+ void registerValidPaths(const ValidPathInfos & infos);
+
+ void vacuumDB();
+
+ /* Repair the contents of the given path by redownloading it using
+ a substituter (if available). */
+ void repairPath(const Path & path);
+
+ void addSignatures(const Path & storePath, const StringSet & sigs) override;
+
+private:
+
+ int getSchema();
+
+ void openDB(State & state, bool create);
+
+ void makeStoreWritable();
+
+ uint64_t queryValidPathId(State & state, const Path & path);
+
+ uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true);
+
+ void invalidatePath(State & state, const Path & path);
+
+ /* Delete a path from the Nix store. */
+ void invalidatePathChecked(const Path & path);
+
+ void verifyPath(const Path & path, const PathSet & store,
+ PathSet & done, PathSet & validPaths, bool repair, bool & errors);
+
+ void updatePathInfo(State & state, const ValidPathInfo & info);
+
+ void upgradeStore6();
+ void upgradeStore7();
+ PathSet queryValidPathsOld();
+ ValidPathInfo queryPathInfoOld(const Path & path);
+
+ struct GCState;
+
+ void deleteGarbage(GCState & state, const Path & path);
+
+ void tryToDelete(GCState & state, const Path & path);
+
+ bool canReachRoot(GCState & state, PathSet & visited, const Path & path);
+
+ void deletePathRecursive(GCState & state, const Path & path);
+
+ bool isActiveTempFile(const GCState & state,
+ const Path & path, const string & suffix);
+
+ int openGCLock(LockType lockType);
+
+ void findRoots(const Path & path, unsigned char type, Roots & roots);
+
+ void findRuntimeRoots(PathSet & roots);
+
+ void removeUnusedLinks(const GCState & state);
+
+ Path createTempDirInStore();
+
+ void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
+
+ typedef std::unordered_set<ino_t> InodeHash;
+
+ InodeHash loadInodeHash();
+ Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
+ void optimisePath_(OptimiseStats & stats, const Path & path, InodeHash & inodeHash);
+
+ // Internal versions that are not wrapped in retry_sqlite.
+ bool isValidPath_(State & state, const Path & path);
+ void queryReferrers(State & state, const Path & path, PathSet & referrers);
+
+ /* Add signatures to a ValidPathInfo using the secret keys
+ specified by the ‘secret-key-files’ option. */
+ void signPathInfo(ValidPathInfo & info);
+
+ Path getRealStoreDir() override { return realStoreDir; }
+
+ friend class DerivationGoal;
+ friend class SubstitutionGoal;
+};
+
+
+typedef std::pair<dev_t, ino_t> Inode;
+typedef set<Inode> InodesSeen;
+
+
+/* "Fix", or canonicalise, the meta-data of the files in a store path
+ after it has been built. In particular:
+ - the last modification date on each file is set to 1 (i.e.,
+ 00:00:01 1/1/1970 UTC)
+ - the permissions are set of 444 or 555 (i.e., read-only with or
+ without execute permission; setuid bits etc. are cleared)
+ - the owner and group are set to the Nix user and group, if we're
+ running as root. */
+void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
+void canonicalisePathMetaData(const Path & path, uid_t fromUid);
+
+void canonicaliseTimestampAndPermissions(const Path & path);
+
+MakeError(PathInUse, Error);
+
+}
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
new file mode 100644
index 000000000..4da20330c
--- /dev/null
+++ b/src/libstore/local.mk
@@ -0,0 +1,41 @@
+libraries += libstore
+
+libstore_NAME = libnixstore
+
+libstore_DIR := $(d)
+
+libstore_SOURCES := $(wildcard $(d)/*.cc)
+
+libstore_LIBS = libutil libformat
+
+libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
+
+ifeq ($(ENABLE_S3), 1)
+ libstore_LDFLAGS += -laws-cpp-sdk-s3 -laws-cpp-sdk-core
+endif
+
+ifeq ($(OS), SunOS)
+ libstore_LDFLAGS += -lsocket
+endif
+
+libstore_CXXFLAGS = \
+ -DNIX_PREFIX=\"$(prefix)\" \
+ -DNIX_STORE_DIR=\"$(storedir)\" \
+ -DNIX_DATA_DIR=\"$(datadir)\" \
+ -DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
+ -DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
+ -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
+ -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
+ -DNIX_BIN_DIR=\"$(bindir)\" \
+ -DBASH_PATH="\"$(bash)\"" \
+ -DLSOF=\"$(lsof)\"
+
+$(d)/local-store.cc: $(d)/schema.sql.hh
+
+%.sql.hh: %.sql
+ $(trace-gen) sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $< > $@ || (rm $@ && exit 1)
+
+clean-files += $(d)/schema.sql.hh
+
+$(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
+$(eval $(call install-file-in, $(d)/sandbox-defaults.sb, $(datadir)/nix, 0644))
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
new file mode 100644
index 000000000..9a88cdc31
--- /dev/null
+++ b/src/libstore/misc.cc
@@ -0,0 +1,277 @@
+#include "derivations.hh"
+#include "globals.hh"
+#include "local-store.hh"
+#include "store-api.hh"
+#include "thread-pool.hh"
+
+
+namespace nix {
+
+
+void Store::computeFSClosure(const PathSet & startPaths,
+ PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
+{
+ struct State
+ {
+ size_t pending;
+ PathSet & paths;
+ std::exception_ptr exc;
+ };
+
+ Sync<State> state_(State{0, paths_, 0});
+
+ std::function<void(const Path &)> enqueue;
+
+ std::condition_variable done;
+
+ enqueue = [&](const Path & path) -> void {
+ {
+ auto state(state_.lock());
+ if (state->exc) return;
+ if (state->paths.count(path)) return;
+ state->paths.insert(path);
+ state->pending++;
+ }
+
+ queryPathInfo(path,
+ [&, path](ref<ValidPathInfo> info) {
+ // FIXME: calls to isValidPath() should be async
+
+ if (flipDirection) {
+
+ PathSet referrers;
+ queryReferrers(path, referrers);
+ for (auto & ref : referrers)
+ if (ref != path)
+ enqueue(ref);
+
+ if (includeOutputs)
+ for (auto & i : queryValidDerivers(path))
+ enqueue(i);
+
+ if (includeDerivers && isDerivation(path))
+ for (auto & i : queryDerivationOutputs(path))
+ if (isValidPath(i) && queryPathInfo(i)->deriver == path)
+ enqueue(i);
+
+ } else {
+
+ for (auto & ref : info->references)
+ if (ref != path)
+ enqueue(ref);
+
+ if (includeOutputs && isDerivation(path))
+ for (auto & i : queryDerivationOutputs(path))
+ if (isValidPath(i)) enqueue(i);
+
+ if (includeDerivers && isValidPath(info->deriver))
+ enqueue(info->deriver);
+
+ }
+
+ {
+ auto state(state_.lock());
+ assert(state->pending);
+ if (!--state->pending) done.notify_one();
+ }
+
+ },
+
+ [&, path](std::exception_ptr exc) {
+ auto state(state_.lock());
+ if (!state->exc) state->exc = exc;
+ assert(state->pending);
+ if (!--state->pending) done.notify_one();
+ });
+ };
+
+ for (auto & startPath : startPaths)
+ enqueue(startPath);
+
+ {
+ auto state(state_.lock());
+ while (state->pending) state.wait(done);
+ if (state->exc) std::rethrow_exception(state->exc);
+ }
+}
+
+
+void Store::computeFSClosure(const Path & startPath,
+ PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
+{
+ computeFSClosure(PathSet{startPath}, paths_, flipDirection, includeOutputs, includeDerivers);
+}
+
+
+void Store::queryMissing(const PathSet & targets,
+ PathSet & willBuild_, PathSet & willSubstitute_, PathSet & unknown_,
+ unsigned long long & downloadSize_, unsigned long long & narSize_)
+{
+ downloadSize_ = narSize_ = 0;
+
+ ThreadPool pool;
+
+ struct State
+ {
+ PathSet done;
+ PathSet & unknown, & willSubstitute, & willBuild;
+ unsigned long long & downloadSize;
+ unsigned long long & narSize;
+ };
+
+ struct DrvState
+ {
+ size_t left;
+ bool done = false;
+ PathSet outPaths;
+ DrvState(size_t left) : left(left) { }
+ };
+
+ Sync<State> state_(State{PathSet(), unknown_, willSubstitute_, willBuild_, downloadSize_, narSize_});
+
+ std::function<void(Path)> doPath;
+
+ auto mustBuildDrv = [&](const Path & drvPath, const Derivation & drv) {
+ {
+ auto state(state_.lock());
+ state->willBuild.insert(drvPath);
+ }
+
+ for (auto & i : drv.inputDrvs)
+ pool.enqueue(std::bind(doPath, makeDrvPathWithOutputs(i.first, i.second)));
+ };
+
+ auto checkOutput = [&](
+ const Path & drvPath, ref<Derivation> drv, const Path & outPath, ref<Sync<DrvState>> drvState_)
+ {
+ if (drvState_->lock()->done) return;
+
+ SubstitutablePathInfos infos;
+ querySubstitutablePathInfos({outPath}, infos);
+
+ if (infos.empty()) {
+ drvState_->lock()->done = true;
+ mustBuildDrv(drvPath, *drv);
+ } else {
+ {
+ auto drvState(drvState_->lock());
+ if (drvState->done) return;
+ assert(drvState->left);
+ drvState->left--;
+ drvState->outPaths.insert(outPath);
+ if (!drvState->left) {
+ for (auto & path : drvState->outPaths)
+ pool.enqueue(std::bind(doPath, path));
+ }
+ }
+ }
+ };
+
+ doPath = [&](const Path & path) {
+
+ {
+ auto state(state_.lock());
+ if (state->done.count(path)) return;
+ state->done.insert(path);
+ }
+
+ DrvPathWithOutputs i2 = parseDrvPathWithOutputs(path);
+
+ if (isDerivation(i2.first)) {
+ if (!isValidPath(i2.first)) {
+ // FIXME: we could try to substitute the derivation.
+ auto state(state_.lock());
+ state->unknown.insert(path);
+ return;
+ }
+
+ Derivation drv = derivationFromPath(i2.first);
+
+ PathSet invalid;
+ for (auto & j : drv.outputs)
+ if (wantOutput(j.first, i2.second)
+ && !isValidPath(j.second.path))
+ invalid.insert(j.second.path);
+ if (invalid.empty()) return;
+
+ if (settings.useSubstitutes && drv.substitutesAllowed()) {
+ auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
+ for (auto & output : invalid)
+ pool.enqueue(std::bind(checkOutput, i2.first, make_ref<Derivation>(drv), output, drvState));
+ } else
+ mustBuildDrv(i2.first, drv);
+
+ } else {
+
+ if (isValidPath(path)) return;
+
+ SubstitutablePathInfos infos;
+ querySubstitutablePathInfos({path}, infos);
+
+ if (infos.empty()) {
+ auto state(state_.lock());
+ state->unknown.insert(path);
+ return;
+ }
+
+ auto info = infos.find(path);
+ assert(info != infos.end());
+
+ {
+ auto state(state_.lock());
+ state->willSubstitute.insert(path);
+ state->downloadSize += info->second.downloadSize;
+ state->narSize += info->second.narSize;
+ }
+
+ for (auto & ref : info->second.references)
+ pool.enqueue(std::bind(doPath, ref));
+ }
+ };
+
+ for (auto & path : targets)
+ pool.enqueue(std::bind(doPath, path));
+
+ pool.process();
+}
+
+
+Paths Store::topoSortPaths(const PathSet & paths)
+{
+ Paths sorted;
+ PathSet visited, parents;
+
+ std::function<void(const Path & path, const Path * parent)> dfsVisit;
+
+ dfsVisit = [&](const Path & path, const Path * parent) {
+ if (parents.find(path) != parents.end())
+ throw BuildError(format("cycle detected in the references of ‘%1%’ from ‘%2%’") % path % *parent);
+
+ if (visited.find(path) != visited.end()) return;
+ visited.insert(path);
+ parents.insert(path);
+
+ PathSet references;
+ try {
+ references = queryPathInfo(path)->references;
+ } catch (InvalidPath &) {
+ }
+
+ for (auto & i : references)
+ /* Don't traverse into paths that don't exist. That can
+ happen due to substitutes for non-existent paths. */
+ if (i != path && paths.find(i) != paths.end())
+ dfsVisit(i, &path);
+
+ sorted.push_front(path);
+ parents.erase(path);
+ };
+
+ for (auto & i : paths)
+ dfsVisit(i, nullptr);
+
+ return sorted;
+}
+
+
+}
diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc
new file mode 100644
index 000000000..4cb5de744
--- /dev/null
+++ b/src/libstore/nar-accessor.cc
@@ -0,0 +1,142 @@
+#include "nar-accessor.hh"
+#include "archive.hh"
+
+#include <map>
+
+namespace nix {
+
+struct NarMember
+{
+ FSAccessor::Type type;
+
+ bool isExecutable;
+
+ /* If this is a regular file, position of the contents of this
+ file in the NAR. */
+ size_t start, size;
+
+ std::string target;
+};
+
+struct NarIndexer : ParseSink, StringSource
+{
+ // FIXME: should store this as a tree. Now we're vulnerable to
+ // O(nm) memory consumption (e.g. for x_0/.../x_n/{y_0..y_m}).
+ typedef std::map<Path, NarMember> Members;
+ Members members;
+
+ Path currentPath;
+ std::string currentStart;
+ bool isExec = false;
+
+ NarIndexer(const std::string & nar) : StringSource(nar)
+ {
+ }
+
+ void createDirectory(const Path & path) override
+ {
+ members.emplace(path,
+ NarMember{FSAccessor::Type::tDirectory, false, 0, 0});
+ }
+
+ void createRegularFile(const Path & path) override
+ {
+ currentPath = path;
+ }
+
+ void isExecutable() override
+ {
+ isExec = true;
+ }
+
+ void preallocateContents(unsigned long long size) override
+ {
+ currentStart = string(s, pos, 16);
+ assert(size <= std::numeric_limits<size_t>::max());
+ members.emplace(currentPath,
+ NarMember{FSAccessor::Type::tRegular, isExec, pos, (size_t) size});
+ }
+
+ void receiveContents(unsigned char * data, unsigned int len) override
+ {
+ // Sanity check
+ if (!currentStart.empty()) {
+ assert(len < 16 || currentStart == string((char *) data, 16));
+ currentStart.clear();
+ }
+ }
+
+ void createSymlink(const Path & path, const string & target) override
+ {
+ members.emplace(path,
+ NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
+ }
+
+ Members::iterator find(const Path & path)
+ {
+ auto i = members.find(path);
+ if (i == members.end())
+ throw Error(format("NAR file does not contain path ‘%1%’") % path);
+ return i;
+ }
+};
+
+struct NarAccessor : public FSAccessor
+{
+ ref<const std::string> nar;
+ NarIndexer indexer;
+
+ NarAccessor(ref<const std::string> nar) : nar(nar), indexer(*nar)
+ {
+ parseDump(indexer, indexer);
+ }
+
+ Stat stat(const Path & path) override
+ {
+ auto i = indexer.members.find(path);
+ if (i == indexer.members.end())
+ return {FSAccessor::Type::tMissing, 0, false};
+ return {i->second.type, i->second.size, i->second.isExecutable};
+ }
+
+ StringSet readDirectory(const Path & path) override
+ {
+ auto i = indexer.find(path);
+
+ if (i->second.type != FSAccessor::Type::tDirectory)
+ throw Error(format("path ‘%1%’ inside NAR file is not a directory") % path);
+
+ ++i;
+ StringSet res;
+ while (i != indexer.members.end() && isInDir(i->first, path)) {
+ // FIXME: really bad performance.
+ if (i->first.find('/', path.size() + 1) == std::string::npos)
+ res.insert(std::string(i->first, path.size() + 1));
+ ++i;
+ }
+ return res;
+ }
+
+ std::string readFile(const Path & path) override
+ {
+ auto i = indexer.find(path);
+ if (i->second.type != FSAccessor::Type::tRegular)
+ throw Error(format("path ‘%1%’ inside NAR file is not a regular file") % path);
+ return std::string(*nar, i->second.start, i->second.size);
+ }
+
+ std::string readLink(const Path & path) override
+ {
+ auto i = indexer.find(path);
+ if (i->second.type != FSAccessor::Type::tSymlink)
+ throw Error(format("path ‘%1%’ inside NAR file is not a symlink") % path);
+ return i->second.target;
+ }
+};
+
+ref<FSAccessor> makeNarAccessor(ref<const std::string> nar)
+{
+ return make_ref<NarAccessor>(nar);
+}
+
+}
diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh
new file mode 100644
index 000000000..83c570be4
--- /dev/null
+++ b/src/libstore/nar-accessor.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "fs-accessor.hh"
+
+namespace nix {
+
+/* Return an object that provides access to the contents of a NAR
+ file. */
+ref<FSAccessor> makeNarAccessor(ref<const std::string> nar);
+
+}
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
new file mode 100644
index 000000000..180a936ed
--- /dev/null
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -0,0 +1,270 @@
+#include "nar-info-disk-cache.hh"
+#include "sync.hh"
+#include "sqlite.hh"
+
+#include <sqlite3.h>
+
+namespace nix {
+
+static const char * schema = R"sql(
+
+create table if not exists BinaryCaches (
+ id integer primary key autoincrement not null,
+ url text unique not null,
+ timestamp integer not null,
+ storeDir text not null,
+ wantMassQuery integer not null,
+ priority integer not null
+);
+
+create table if not exists NARs (
+ cache integer not null,
+ hashPart text not null,
+ namePart text,
+ url text,
+ compression text,
+ fileHash text,
+ fileSize integer,
+ narHash text,
+ narSize integer,
+ refs text,
+ deriver text,
+ sigs text,
+ timestamp integer not null,
+ present integer not null,
+ primary key (cache, hashPart),
+ foreign key (cache) references BinaryCaches(id) on delete cascade
+);
+
+create table if not exists LastPurge (
+ dummy text primary key,
+ value integer
+);
+
+)sql";
+
+class NarInfoDiskCacheImpl : public NarInfoDiskCache
+{
+public:
+
+ /* How long negative and positive lookups are valid. */
+ const int ttlNegative = 3600;
+ const int ttlPositive = 30 * 24 * 3600;
+
+ /* How often to purge expired entries from the cache. */
+ const int purgeInterval = 24 * 3600;
+
+ struct Cache
+ {
+ int id;
+ Path storeDir;
+ bool wantMassQuery;
+ int priority;
+ };
+
+ struct State
+ {
+ SQLite db;
+ SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, purgeCache;
+ std::map<std::string, Cache> caches;
+ };
+
+ Sync<State> _state;
+
+ NarInfoDiskCacheImpl()
+ {
+ auto state(_state.lock());
+
+ Path dbPath = getCacheDir() + "/nix/binary-cache-v5.sqlite";
+ createDirs(dirOf(dbPath));
+
+ state->db = SQLite(dbPath);
+
+ if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK)
+ throwSQLiteError(state->db, "setting timeout");
+
+ // We can always reproduce the cache.
+ state->db.exec("pragma synchronous = off");
+ state->db.exec("pragma main.journal_mode = truncate");
+
+ state->db.exec(schema);
+
+ state->insertCache.create(state->db,
+ "insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
+
+ state->queryCache.create(state->db,
+ "select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?");
+
+ state->insertNAR.create(state->db,
+ "insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, "
+ "narSize, refs, deriver, sigs, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)");
+
+ state->insertMissingNAR.create(state->db,
+ "insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)");
+
+ state->queryNAR.create(state->db,
+ "select * from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
+
+ /* Periodically purge expired entries from the database. */
+ retrySQLite<void>([&]() {
+ auto now = time(0);
+
+ SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
+ auto queryLastPurge_(queryLastPurge.use());
+
+ if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
+ SQLiteStmt(state->db,
+ "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
+ .use()
+ (now - ttlNegative)
+ (now - ttlPositive)
+ .exec();
+
+ debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
+
+ SQLiteStmt(state->db,
+ "insert or replace into LastPurge(dummy, value) values ('', ?)")
+ .use()(now).exec();
+ }
+ });
+ }
+
+ Cache & getCache(State & state, const std::string & uri)
+ {
+ auto i = state.caches.find(uri);
+ if (i == state.caches.end()) abort();
+ return i->second;
+ }
+
+ void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
+ {
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ // FIXME: race
+
+ state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
+ assert(sqlite3_changes(state->db) == 1);
+ state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+ });
+ }
+
+ bool cacheExists(const std::string & uri,
+ bool & wantMassQuery, int & priority) override
+ {
+ return retrySQLite<bool>([&]() {
+ auto state(_state.lock());
+
+ auto i = state->caches.find(uri);
+ if (i == state->caches.end()) {
+ auto queryCache(state->queryCache.use()(uri));
+ if (!queryCache.next()) return false;
+ state->caches.emplace(uri,
+ Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
+ }
+
+ auto & cache(getCache(*state, uri));
+
+ wantMassQuery = cache.wantMassQuery;
+ priority = cache.priority;
+
+ return true;
+ });
+ }
+
+ std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
+ const std::string & uri, const std::string & hashPart) override
+ {
+ return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>(
+ [&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
+
+ auto now = time(0);
+
+ auto queryNAR(state->queryNAR.use()
+ (cache.id)
+ (hashPart)
+ (now - ttlNegative)
+ (now - ttlPositive));
+
+ if (!queryNAR.next())
+ return {oUnknown, 0};
+
+ if (!queryNAR.getInt(13))
+ return {oInvalid, 0};
+
+ auto narInfo = make_ref<NarInfo>();
+
+ auto namePart = queryNAR.getStr(2);
+ narInfo->path = cache.storeDir + "/" +
+ hashPart + (namePart.empty() ? "" : "-" + namePart);
+ narInfo->url = queryNAR.getStr(3);
+ narInfo->compression = queryNAR.getStr(4);
+ if (!queryNAR.isNull(5))
+ narInfo->fileHash = parseHash(queryNAR.getStr(5));
+ narInfo->fileSize = queryNAR.getInt(6);
+ narInfo->narHash = parseHash(queryNAR.getStr(7));
+ narInfo->narSize = queryNAR.getInt(8);
+ for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " "))
+ narInfo->references.insert(cache.storeDir + "/" + r);
+ if (!queryNAR.isNull(10))
+ narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10);
+ for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " "))
+ narInfo->sigs.insert(sig);
+
+ return {oValid, narInfo};
+ });
+ }
+
+ void upsertNarInfo(
+ const std::string & uri, const std::string & hashPart,
+ std::shared_ptr<ValidPathInfo> info) override
+ {
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
+
+ if (info) {
+
+ auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
+
+ assert(hashPart == storePathToHash(info->path));
+
+ state->insertNAR.use()
+ (cache.id)
+ (hashPart)
+ (storePathToName(info->path))
+ (narInfo ? narInfo->url : "", narInfo != 0)
+ (narInfo ? narInfo->compression : "", narInfo != 0)
+ (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
+ (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
+ (info->narHash.to_string())
+ (info->narSize)
+ (concatStringsSep(" ", info->shortRefs()))
+ (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "")
+ (concatStringsSep(" ", info->sigs))
+ (time(0)).exec();
+
+ } else {
+ state->insertMissingNAR.use()
+ (cache.id)
+ (hashPart)
+ (time(0)).exec();
+ }
+ });
+ }
+};
+
+ref<NarInfoDiskCache> getNarInfoDiskCache()
+{
+ static Sync<std::shared_ptr<NarInfoDiskCache>> cache;
+
+ auto cache_(cache.lock());
+ if (!*cache_) *cache_ = std::make_shared<NarInfoDiskCacheImpl>();
+ return ref<NarInfoDiskCache>(*cache_);
+}
+
+}
diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh
new file mode 100644
index 000000000..88d909732
--- /dev/null
+++ b/src/libstore/nar-info-disk-cache.hh
@@ -0,0 +1,31 @@
+#pragma once
+
+#include "ref.hh"
+#include "nar-info.hh"
+
+namespace nix {
+
+class NarInfoDiskCache
+{
+public:
+ typedef enum { oValid, oInvalid, oUnknown } Outcome;
+
+ virtual void createCache(const std::string & uri, const Path & storeDir,
+ bool wantMassQuery, int priority) = 0;
+
+ virtual bool cacheExists(const std::string & uri,
+ bool & wantMassQuery, int & priority) = 0;
+
+ virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
+ const std::string & uri, const std::string & hashPart) = 0;
+
+ virtual void upsertNarInfo(
+ const std::string & uri, const std::string & hashPart,
+ std::shared_ptr<ValidPathInfo> info) = 0;
+};
+
+/* Return a singleton cache object that can be used concurrently by
+ multiple threads. */
+ref<NarInfoDiskCache> getNarInfoDiskCache();
+
+}
diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc
new file mode 100644
index 000000000..d1042c6de
--- /dev/null
+++ b/src/libstore/nar-info.cc
@@ -0,0 +1,116 @@
+#include "globals.hh"
+#include "nar-info.hh"
+
+namespace nix {
+
+NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence)
+{
+ auto corrupt = [&]() {
+ throw Error(format("NAR info file ‘%1%’ is corrupt") % whence);
+ };
+
+ auto parseHashField = [&](const string & s) {
+ try {
+ return parseHash(s);
+ } catch (BadHash &) {
+ corrupt();
+ return Hash(); // never reached
+ }
+ };
+
+ size_t pos = 0;
+ while (pos < s.size()) {
+
+ size_t colon = s.find(':', pos);
+ if (colon == std::string::npos) corrupt();
+
+ std::string name(s, pos, colon - pos);
+
+ size_t eol = s.find('\n', colon + 2);
+ if (eol == std::string::npos) corrupt();
+
+ std::string value(s, colon + 2, eol - colon - 2);
+
+ if (name == "StorePath") {
+ if (!store.isStorePath(value)) corrupt();
+ path = value;
+ }
+ else if (name == "URL")
+ url = value;
+ else if (name == "Compression")
+ compression = value;
+ else if (name == "FileHash")
+ fileHash = parseHashField(value);
+ else if (name == "FileSize") {
+ if (!string2Int(value, fileSize)) corrupt();
+ }
+ else if (name == "NarHash")
+ narHash = parseHashField(value);
+ else if (name == "NarSize") {
+ if (!string2Int(value, narSize)) corrupt();
+ }
+ else if (name == "References") {
+ auto refs = tokenizeString<Strings>(value, " ");
+ if (!references.empty()) corrupt();
+ for (auto & r : refs) {
+ auto r2 = store.storeDir + "/" + r;
+ if (!store.isStorePath(r2)) corrupt();
+ references.insert(r2);
+ }
+ }
+ else if (name == "Deriver") {
+ if (value != "unknown-deriver") {
+ auto p = store.storeDir + "/" + value;
+ if (!store.isStorePath(p)) corrupt();
+ deriver = p;
+ }
+ }
+ else if (name == "System")
+ system = value;
+ else if (name == "Sig")
+ sigs.insert(value);
+ else if (name == "CA") {
+ if (!ca.empty()) corrupt();
+ ca = value;
+ }
+
+ pos = eol + 1;
+ }
+
+ if (compression == "") compression = "bzip2";
+
+ if (path.empty() || url.empty() || narSize == 0 || !narHash) corrupt();
+}
+
+std::string NarInfo::to_string() const
+{
+ std::string res;
+ res += "StorePath: " + path + "\n";
+ res += "URL: " + url + "\n";
+ assert(compression != "");
+ res += "Compression: " + compression + "\n";
+ assert(fileHash.type == htSHA256);
+ res += "FileHash: sha256:" + printHash32(fileHash) + "\n";
+ res += "FileSize: " + std::to_string(fileSize) + "\n";
+ assert(narHash.type == htSHA256);
+ res += "NarHash: sha256:" + printHash32(narHash) + "\n";
+ res += "NarSize: " + std::to_string(narSize) + "\n";
+
+ res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
+
+ if (!deriver.empty())
+ res += "Deriver: " + baseNameOf(deriver) + "\n";
+
+ if (!system.empty())
+ res += "System: " + system + "\n";
+
+ for (auto sig : sigs)
+ res += "Sig: " + sig + "\n";
+
+ if (!ca.empty())
+ res += "CA: " + ca + "\n";
+
+ return res;
+}
+
+}
diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh
new file mode 100644
index 000000000..4995061fb
--- /dev/null
+++ b/src/libstore/nar-info.hh
@@ -0,0 +1,24 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+#include "store-api.hh"
+
+namespace nix {
+
+struct NarInfo : ValidPathInfo
+{
+ std::string url;
+ std::string compression;
+ Hash fileHash;
+ uint64_t fileSize = 0;
+ std::string system;
+
+ NarInfo() { }
+ NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
+ NarInfo(const Store & store, const std::string & s, const std::string & whence);
+
+ std::string to_string() const;
+};
+
+}
diff --git a/src/libstore/nix-store.pc.in b/src/libstore/nix-store.pc.in
new file mode 100644
index 000000000..3f1a2d83d
--- /dev/null
+++ b/src/libstore/nix-store.pc.in
@@ -0,0 +1,9 @@
+prefix=@prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: Nix
+Description: Nix Package Manager
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -lnixstore -lnixutil -lnixformat
+Cflags: -I${includedir}/nix
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
new file mode 100644
index 000000000..cf234e35d
--- /dev/null
+++ b/src/libstore/optimise-store.cc
@@ -0,0 +1,275 @@
+#include "util.hh"
+#include "local-store.hh"
+#include "globals.hh"
+
+#include <cstdlib>
+#include <cstring>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+
+
+namespace nix {
+
+
+static void makeWritable(const Path & path)
+{
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+ if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
+ throw SysError(format("changing writability of ‘%1%’") % path);
+}
+
+
+struct MakeReadOnly
+{
+ Path path;
+ MakeReadOnly(const Path & path) : path(path) { }
+ ~MakeReadOnly()
+ {
+ try {
+ /* This will make the path read-only. */
+ if (path != "") canonicaliseTimestampAndPermissions(path);
+ } catch (...) {
+ ignoreException();
+ }
+ }
+};
+
+
+LocalStore::InodeHash LocalStore::loadInodeHash()
+{
+ debug("loading hash inodes in memory");
+ InodeHash inodeHash;
+
+ AutoCloseDir dir(opendir(linksDir.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % linksDir);
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) { /* sic */
+ checkInterrupt();
+ // We don't care if we hit non-hash files, anything goes
+ inodeHash.insert(dirent->d_ino);
+ }
+ if (errno) throw SysError(format("reading directory ‘%1%’") % linksDir);
+
+ printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size());
+
+ return inodeHash;
+}
+
+
+Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash)
+{
+ Strings names;
+
+ AutoCloseDir dir(opendir(path.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % path);
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) { /* sic */
+ checkInterrupt();
+
+ if (inodeHash.count(dirent->d_ino)) {
+ debug(format("‘%1%’ is already linked") % dirent->d_name);
+ continue;
+ }
+
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ names.push_back(name);
+ }
+ if (errno) throw SysError(format("reading directory ‘%1%’") % path);
+
+ return names;
+}
+
+
+void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHash & inodeHash)
+{
+ checkInterrupt();
+
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ if (S_ISDIR(st.st_mode)) {
+ Strings names = readDirectoryIgnoringInodes(path, inodeHash);
+ for (auto & i : names)
+ optimisePath_(stats, path + "/" + i, inodeHash);
+ return;
+ }
+
+ /* We can hard link regular files and maybe symlinks. */
+ if (!S_ISREG(st.st_mode)
+#if CAN_LINK_SYMLINK
+ && !S_ISLNK(st.st_mode)
+#endif
+ ) return;
+
+ /* Sometimes SNAFUs can cause files in the Nix store to be
+ modified, in particular when running programs as root under
+ NixOS (example: $fontconfig/var/cache being modified). Skip
+ those files. FIXME: check the modification time. */
+ if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
+ printError(format("skipping suspicious writable file ‘%1%’") % path);
+ return;
+ }
+
+ /* This can still happen on top-level files. */
+ if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
+ debug(format("‘%1%’ is already linked, with %2% other file(s)") % path % (st.st_nlink - 2));
+ return;
+ }
+
+ /* Hash the file. Note that hashPath() returns the hash over the
+ NAR serialisation, which includes the execute bit on the file.
+ Thus, executable and non-executable files with the same
+ contents *won't* be linked (which is good because otherwise the
+ permissions would be screwed up).
+
+ Also note that if `path' is a symlink, then we're hashing the
+ contents of the symlink (i.e. the result of readlink()), not
+ the contents of the target (which may not even exist). */
+ Hash hash = hashPath(htSHA256, path).first;
+ debug(format("‘%1%’ has hash ‘%2%’") % path % printHash(hash));
+
+ /* Check if this is a known hash. */
+ Path linkPath = linksDir + "/" + printHash32(hash);
+
+ retry:
+ if (!pathExists(linkPath)) {
+ /* Nope, create a hard link in the links directory. */
+ if (link(path.c_str(), linkPath.c_str()) == 0) {
+ inodeHash.insert(st.st_ino);
+ return;
+ }
+
+ switch (errno) {
+ case EEXIST:
+ /* Fall through if another process created ‘linkPath’ before
+ we did. */
+ break;
+
+ case ENOSPC:
+ /* On ext4, that probably means the directory index is
+ full. When that happens, it's fine to ignore it: we
+ just effectively disable deduplication of this
+ file. */
+ printInfo("cannot link ‘%s’ to ‘%s’: %s", linkPath, path, strerror(errno));
+ return;
+
+ default:
+ throw SysError("cannot link ‘%1%’ to ‘%2%’", linkPath, path);
+ }
+ }
+
+ /* Yes! We've seen a file with the same contents. Replace the
+ current file with a hard link to that file. */
+ struct stat stLink;
+ if (lstat(linkPath.c_str(), &stLink))
+ throw SysError(format("getting attributes of path ‘%1%’") % linkPath);
+
+ if (st.st_ino == stLink.st_ino) {
+ debug(format("‘%1%’ is already linked to ‘%2%’") % path % linkPath);
+ return;
+ }
+
+ if (st.st_size != stLink.st_size) {
+ printError(format("removing corrupted link ‘%1%’") % linkPath);
+ unlink(linkPath.c_str());
+ goto retry;
+ }
+
+ printMsg(lvlTalkative, format("linking ‘%1%’ to ‘%2%’") % path % linkPath);
+
+ /* Make the containing directory writable, but only if it's not
+ the store itself (we don't want or need to mess with its
+ permissions). */
+ bool mustToggle = dirOf(path) != realStoreDir;
+ if (mustToggle) makeWritable(dirOf(path));
+
+ /* When we're done, make the directory read-only again and reset
+ its timestamp back to 0. */
+ MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : "");
+
+ Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
+ % realStoreDir % getpid() % rand()).str();
+
+ if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
+ if (errno == EMLINK) {
+ /* Too many links to the same file (>= 32000 on most file
+ systems). This is likely to happen with empty files.
+ Just shrug and ignore. */
+ if (st.st_size)
+ printInfo(format("‘%1%’ has maximum number of links") % linkPath);
+ return;
+ }
+ throw SysError("cannot link ‘%1%’ to ‘%2%’", tempLink, linkPath);
+ }
+
+ /* Atomically replace the old file with the new hard link. */
+ if (rename(tempLink.c_str(), path.c_str()) == -1) {
+ if (unlink(tempLink.c_str()) == -1)
+ printError(format("unable to unlink ‘%1%’") % tempLink);
+ if (errno == EMLINK) {
+ /* Some filesystems generate too many links on the rename,
+ rather than on the original link. (Probably it
+ temporarily increases the st_nlink field before
+ decreasing it again.) */
+ if (st.st_size)
+ printInfo(format("‘%1%’ has maximum number of links") % linkPath);
+ return;
+ }
+ throw SysError(format("cannot rename ‘%1%’ to ‘%2%’") % tempLink % path);
+ }
+
+ stats.filesLinked++;
+ stats.bytesFreed += st.st_size;
+ stats.blocksFreed += st.st_blocks;
+}
+
+
+void LocalStore::optimiseStore(OptimiseStats & stats)
+{
+ PathSet paths = queryAllValidPaths();
+ InodeHash inodeHash = loadInodeHash();
+
+ for (auto & i : paths) {
+ addTempRoot(i);
+ if (!isValidPath(i)) continue; /* path was GC'ed, probably */
+ Activity act(*logger, lvlChatty, format("hashing files in ‘%1%’") % i);
+ optimisePath_(stats, realStoreDir + "/" + baseNameOf(i), inodeHash);
+ }
+}
+
+static string showBytes(unsigned long long bytes)
+{
+ return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
+}
+
+void LocalStore::optimiseStore()
+{
+ OptimiseStats stats;
+
+ optimiseStore(stats);
+
+ printError(
+ format("%1% freed by hard-linking %2% files")
+ % showBytes(stats.bytesFreed)
+ % stats.filesLinked);
+}
+
+void LocalStore::optimisePath(const Path & path)
+{
+ OptimiseStats stats;
+ InodeHash inodeHash;
+
+ if (settings.autoOptimiseStore) optimisePath_(stats, path, inodeHash);
+}
+
+
+}
diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc
new file mode 100644
index 000000000..bf7ad3d21
--- /dev/null
+++ b/src/libstore/pathlocks.cc
@@ -0,0 +1,216 @@
+#include "pathlocks.hh"
+#include "util.hh"
+#include "sync.hh"
+
+#include <cerrno>
+#include <cstdlib>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+
+namespace nix {
+
+
+AutoCloseFD openLockFile(const Path & path, bool create)
+{
+ AutoCloseFD fd;
+
+ fd = open(path.c_str(), O_CLOEXEC | O_RDWR | (create ? O_CREAT : 0), 0600);
+ if (!fd && (create || errno != ENOENT))
+ throw SysError(format("opening lock file ‘%1%’") % path);
+
+ return fd;
+}
+
+
+void deleteLockFile(const Path & path, int fd)
+{
+ /* Get rid of the lock file. Have to be careful not to introduce
+ races. Write a (meaningless) token to the file to indicate to
+ other processes waiting on this lock that the lock is stale
+ (deleted). */
+ unlink(path.c_str());
+ writeFull(fd, "d");
+ /* Note that the result of unlink() is ignored; removing the lock
+ file is an optimisation, not a necessity. */
+}
+
+
+bool lockFile(int fd, LockType lockType, bool wait)
+{
+ struct flock lock;
+ if (lockType == ltRead) lock.l_type = F_RDLCK;
+ else if (lockType == ltWrite) lock.l_type = F_WRLCK;
+ else if (lockType == ltNone) lock.l_type = F_UNLCK;
+ else abort();
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0; /* entire file */
+
+ if (wait) {
+ while (fcntl(fd, F_SETLKW, &lock) != 0) {
+ checkInterrupt();
+ if (errno != EINTR)
+ throw SysError(format("acquiring/releasing lock"));
+ else
+ return false;
+ }
+ } else {
+ while (fcntl(fd, F_SETLK, &lock) != 0) {
+ checkInterrupt();
+ if (errno == EACCES || errno == EAGAIN) return false;
+ if (errno != EINTR)
+ throw SysError(format("acquiring/releasing lock"));
+ }
+ }
+
+ return true;
+}
+
+
+/* This enables us to check whether are not already holding a lock on
+ a file ourselves. POSIX locks (fcntl) suck in this respect: if we
+ close a descriptor, the previous lock will be closed as well. And
+ there is no way to query whether we already have a lock (F_GETLK
+ only works on locks held by other processes). */
+static Sync<StringSet> lockedPaths_;
+
+
+PathLocks::PathLocks()
+ : deletePaths(false)
+{
+}
+
+
+PathLocks::PathLocks(const PathSet & paths, const string & waitMsg)
+ : deletePaths(false)
+{
+ lockPaths(paths, waitMsg);
+}
+
+
+bool PathLocks::lockPaths(const PathSet & _paths,
+ const string & waitMsg, bool wait)
+{
+ assert(fds.empty());
+
+ /* Note that `fds' is built incrementally so that the destructor
+ will only release those locks that we have already acquired. */
+
+ /* Sort the paths. This assures that locks are always acquired in
+ the same order, thus preventing deadlocks. */
+ Paths paths(_paths.begin(), _paths.end());
+ paths.sort();
+
+ /* Acquire the lock for each path. */
+ for (auto & path : paths) {
+ checkInterrupt();
+ Path lockPath = path + ".lock";
+
+ debug(format("locking path ‘%1%’") % path);
+
+ {
+ auto lockedPaths(lockedPaths_.lock());
+ if (lockedPaths->count(lockPath))
+ throw Error("deadlock: trying to re-acquire self-held lock ‘%s’", lockPath);
+ lockedPaths->insert(lockPath);
+ }
+
+ try {
+
+ AutoCloseFD fd;
+
+ while (1) {
+
+ /* Open/create the lock file. */
+ fd = openLockFile(lockPath, true);
+
+ /* Acquire an exclusive lock. */
+ if (!lockFile(fd.get(), ltWrite, false)) {
+ if (wait) {
+ if (waitMsg != "") printError(waitMsg);
+ lockFile(fd.get(), ltWrite, true);
+ } else {
+ /* Failed to lock this path; release all other
+ locks. */
+ unlock();
+ lockedPaths_.lock()->erase(lockPath);
+ return false;
+ }
+ }
+
+ debug(format("lock acquired on ‘%1%’") % lockPath);
+
+ /* Check that the lock file hasn't become stale (i.e.,
+ hasn't been unlinked). */
+ struct stat st;
+ if (fstat(fd.get(), &st) == -1)
+ throw SysError(format("statting lock file ‘%1%’") % lockPath);
+ if (st.st_size != 0)
+ /* This lock file has been unlinked, so we're holding
+ a lock on a deleted file. This means that other
+ processes may create and acquire a lock on
+ `lockPath', and proceed. So we must retry. */
+ debug(format("open lock file ‘%1%’ has become stale") % lockPath);
+ else
+ break;
+ }
+
+ /* Use borrow so that the descriptor isn't closed. */
+ fds.push_back(FDPair(fd.release(), lockPath));
+
+ } catch (...) {
+ lockedPaths_.lock()->erase(lockPath);
+ throw;
+ }
+
+ }
+
+ return true;
+}
+
+
+PathLocks::~PathLocks()
+{
+ try {
+ unlock();
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+void PathLocks::unlock()
+{
+ for (auto & i : fds) {
+ if (deletePaths) deleteLockFile(i.second, i.first);
+
+ lockedPaths_.lock()->erase(i.second);
+
+ if (close(i.first) == -1)
+ printError(
+ format("error (ignored): cannot close lock file on ‘%1%’") % i.second);
+
+ debug(format("lock released on ‘%1%’") % i.second);
+ }
+
+ fds.clear();
+}
+
+
+void PathLocks::setDeletion(bool deletePaths)
+{
+ this->deletePaths = deletePaths;
+}
+
+
+bool pathIsLockedByMe(const Path & path)
+{
+ Path lockPath = path + ".lock";
+ return lockedPaths_.lock()->count(lockPath);
+}
+
+
+}
diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh
new file mode 100644
index 000000000..2a7de6114
--- /dev/null
+++ b/src/libstore/pathlocks.hh
@@ -0,0 +1,46 @@
+#pragma once
+
+#include "util.hh"
+
+
+namespace nix {
+
+
+/* Open (possibly create) a lock file and return the file descriptor.
+ -1 is returned if create is false and the lock could not be opened
+ because it doesn't exist. Any other error throws an exception. */
+AutoCloseFD openLockFile(const Path & path, bool create);
+
+/* Delete an open lock file. */
+void deleteLockFile(const Path & path, int fd);
+
+enum LockType { ltRead, ltWrite, ltNone };
+
+bool lockFile(int fd, LockType lockType, bool wait);
+
+
+class PathLocks
+{
+private:
+ typedef std::pair<int, Path> FDPair;
+ list<FDPair> fds;
+ bool deletePaths;
+
+public:
+ PathLocks();
+ PathLocks(const PathSet & paths,
+ const string & waitMsg = "");
+ bool lockPaths(const PathSet & _paths,
+ const string & waitMsg = "",
+ bool wait = true);
+ ~PathLocks();
+ void unlock();
+ void setDeletion(bool deletePaths);
+};
+
+
+// FIXME: not thread-safe!
+bool pathIsLockedByMe(const Path & path);
+
+
+}
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
new file mode 100644
index 000000000..f24daa886
--- /dev/null
+++ b/src/libstore/profiles.cc
@@ -0,0 +1,236 @@
+#include "profiles.hh"
+#include "store-api.hh"
+#include "util.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+
+
+namespace nix {
+
+
+static bool cmpGensByNumber(const Generation & a, const Generation & b)
+{
+ return a.number < b.number;
+}
+
+
+/* Parse a generation name of the format
+ `<profilename>-<number>-link'. */
+static int parseName(const string & profileName, const string & name)
+{
+ if (string(name, 0, profileName.size() + 1) != profileName + "-") return -1;
+ string s = string(name, profileName.size() + 1);
+ string::size_type p = s.find("-link");
+ if (p == string::npos) return -1;
+ int n;
+ if (string2Int(string(s, 0, p), n) && n >= 0)
+ return n;
+ else
+ return -1;
+}
+
+
+
+Generations findGenerations(Path profile, int & curGen)
+{
+ Generations gens;
+
+ Path profileDir = dirOf(profile);
+ string profileName = baseNameOf(profile);
+
+ for (auto & i : readDirectory(profileDir)) {
+ int n;
+ if ((n = parseName(profileName, i.name)) != -1) {
+ Generation gen;
+ gen.path = profileDir + "/" + i.name;
+ gen.number = n;
+ struct stat st;
+ if (lstat(gen.path.c_str(), &st) != 0)
+ throw SysError(format("statting ‘%1%’") % gen.path);
+ gen.creationTime = st.st_mtime;
+ gens.push_back(gen);
+ }
+ }
+
+ gens.sort(cmpGensByNumber);
+
+ curGen = pathExists(profile)
+ ? parseName(profileName, readLink(profile))
+ : -1;
+
+ return gens;
+}
+
+
+static void makeName(const Path & profile, unsigned int num,
+ Path & outLink)
+{
+ Path prefix = (format("%1%-%2%") % profile % num).str();
+ outLink = prefix + "-link";
+}
+
+
+Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
+{
+ /* The new generation number should be higher than old the
+ previous ones. */
+ int dummy;
+ Generations gens = findGenerations(profile, dummy);
+
+ unsigned int num;
+ if (gens.size() > 0) {
+ Generation last = gens.back();
+
+ if (readLink(last.path) == outPath) {
+ /* We only create a new generation symlink if it differs
+ from the last one.
+
+ This helps keeping gratuitous installs/rebuilds from piling
+ up uncontrolled numbers of generations, cluttering up the
+ UI like grub. */
+ return last.path;
+ }
+
+ num = gens.back().number;
+ } else {
+ num = 0;
+ }
+
+ /* Create the new generation. Note that addPermRoot() blocks if
+ the garbage collector is running to prevent the stuff we've
+ built from moving from the temporary roots (which the GC knows)
+ to the permanent roots (of which the GC would have a stale
+ view). If we didn't do it this way, the GC might remove the
+ user environment etc. we've just built. */
+ Path generation;
+ makeName(profile, num + 1, generation);
+ store->addPermRoot(outPath, generation, false, true);
+
+ return generation;
+}
+
+
+static void removeFile(const Path & path)
+{
+ if (remove(path.c_str()) == -1)
+ throw SysError(format("cannot unlink ‘%1%’") % path);
+}
+
+
+void deleteGeneration(const Path & profile, unsigned int gen)
+{
+ Path generation;
+ makeName(profile, gen, generation);
+ removeFile(generation);
+}
+
+
+static void deleteGeneration2(const Path & profile, unsigned int gen, bool dryRun)
+{
+ if (dryRun)
+ printInfo(format("would remove generation %1%") % gen);
+ else {
+ printInfo(format("removing generation %1%") % gen);
+ deleteGeneration(profile, gen);
+ }
+}
+
+
+void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun)
+{
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ int curGen;
+ Generations gens = findGenerations(profile, curGen);
+
+ if (gensToDelete.find(curGen) != gensToDelete.end())
+ throw Error(format("cannot delete current generation of profile %1%’") % profile);
+
+ for (auto & i : gens) {
+ if (gensToDelete.find(i.number) == gensToDelete.end()) continue;
+ deleteGeneration2(profile, i.number, dryRun);
+ }
+}
+
+
+void deleteOldGenerations(const Path & profile, bool dryRun)
+{
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ int curGen;
+ Generations gens = findGenerations(profile, curGen);
+
+ for (auto & i : gens)
+ if (i.number != curGen)
+ deleteGeneration2(profile, i.number, dryRun);
+}
+
+
+void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
+{
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ int curGen;
+ Generations gens = findGenerations(profile, curGen);
+
+ bool canDelete = false;
+ for (auto i = gens.rbegin(); i != gens.rend(); ++i)
+ if (canDelete) {
+ assert(i->creationTime < t);
+ if (i->number != curGen)
+ deleteGeneration2(profile, i->number, dryRun);
+ } else if (i->creationTime < t) {
+ /* We may now start deleting generations, but we don't
+ delete this generation yet, because this generation was
+ still the one that was active at the requested point in
+ time. */
+ canDelete = true;
+ }
+}
+
+
+void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun)
+{
+ time_t curTime = time(0);
+ string strDays = string(timeSpec, 0, timeSpec.size() - 1);
+ int days;
+
+ if (!string2Int(strDays, days) || days < 1)
+ throw Error(format("invalid number of days specifier ‘%1%’") % timeSpec);
+
+ time_t oldTime = curTime - days * 24 * 3600;
+
+ deleteGenerationsOlderThan(profile, oldTime, dryRun);
+}
+
+
+void switchLink(Path link, Path target)
+{
+ /* Hacky. */
+ if (dirOf(target) == dirOf(link)) target = baseNameOf(target);
+
+ replaceSymlink(target, link);
+}
+
+
+void lockProfile(PathLocks & lock, const Path & profile)
+{
+ lock.lockPaths({profile}, (format("waiting for lock on profile ‘%1%’") % profile).str());
+ lock.setDeletion(true);
+}
+
+
+string optimisticLockProfile(const Path & profile)
+{
+ return pathExists(profile) ? readLink(profile) : "";
+}
+
+
+}
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
new file mode 100644
index 000000000..1d4e6d303
--- /dev/null
+++ b/src/libstore/profiles.hh
@@ -0,0 +1,65 @@
+#pragma once
+
+#include "types.hh"
+#include "pathlocks.hh"
+
+#include <time.h>
+
+
+namespace nix {
+
+
+struct Generation
+{
+ int number;
+ Path path;
+ time_t creationTime;
+ Generation()
+ {
+ number = -1;
+ }
+ operator bool() const
+ {
+ return number != -1;
+ }
+};
+
+typedef list<Generation> Generations;
+
+
+/* Returns the list of currently present generations for the specified
+ profile, sorted by generation number. */
+Generations findGenerations(Path profile, int & curGen);
+
+class LocalFSStore;
+
+Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath);
+
+void deleteGeneration(const Path & profile, unsigned int gen);
+
+void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun);
+
+void deleteOldGenerations(const Path & profile, bool dryRun);
+
+void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
+
+void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun);
+
+void switchLink(Path link, Path target);
+
+/* Ensure exclusive access to a profile. Any command that modifies
+ the profile first acquires this lock. */
+void lockProfile(PathLocks & lock, const Path & profile);
+
+/* Optimistic locking is used by long-running operations like `nix-env
+ -i'. Instead of acquiring the exclusive lock for the entire
+ duration of the operation, we just perform the operation
+ optimistically (without an exclusive lock), and check at the end
+ whether the profile changed while we were busy (i.e., the symlink
+ target changed). If so, the operation is restarted. Restarting is
+ generally cheap, since the build results are still in the Nix
+ store. Most of the time, only the user environment has to be
+ rebuilt. */
+string optimisticLockProfile(const Path & profile);
+
+}
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
new file mode 100644
index 000000000..33eab5a24
--- /dev/null
+++ b/src/libstore/references.cc
@@ -0,0 +1,122 @@
+#include "references.hh"
+#include "hash.hh"
+#include "util.hh"
+#include "archive.hh"
+
+#include <map>
+#include <cstdlib>
+
+
+namespace nix {
+
+
+static unsigned int refLength = 32; /* characters */
+
+
+static void search(const unsigned char * s, unsigned int len,
+ StringSet & hashes, StringSet & seen)
+{
+ static bool initialised = false;
+ static bool isBase32[256];
+ if (!initialised) {
+ for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
+ for (unsigned int i = 0; i < base32Chars.size(); ++i)
+ isBase32[(unsigned char) base32Chars[i]] = true;
+ initialised = true;
+ }
+
+ for (unsigned int i = 0; i + refLength <= len; ) {
+ int j;
+ bool match = true;
+ for (j = refLength - 1; j >= 0; --j)
+ if (!isBase32[(unsigned char) s[i + j]]) {
+ i += j + 1;
+ match = false;
+ break;
+ }
+ if (!match) continue;
+ string ref((const char *) s + i, refLength);
+ if (hashes.find(ref) != hashes.end()) {
+ debug(format("found reference to ‘%1%’ at offset ‘%2%’")
+ % ref % i);
+ seen.insert(ref);
+ hashes.erase(ref);
+ }
+ ++i;
+ }
+}
+
+
+struct RefScanSink : Sink
+{
+ HashSink hashSink;
+ StringSet hashes;
+ StringSet seen;
+
+ string tail;
+
+ RefScanSink() : hashSink(htSHA256) { }
+
+ void operator () (const unsigned char * data, size_t len);
+};
+
+
+void RefScanSink::operator () (const unsigned char * data, size_t len)
+{
+ hashSink(data, len);
+
+ /* It's possible that a reference spans the previous and current
+ fragment, so search in the concatenation of the tail of the
+ previous fragment and the start of the current fragment. */
+ string s = tail + string((const char *) data, len > refLength ? refLength : len);
+ search((const unsigned char *) s.data(), s.size(), hashes, seen);
+
+ search(data, len, hashes, seen);
+
+ unsigned int tailLen = len <= refLength ? len : refLength;
+ tail =
+ string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) +
+ string((const char *) data + len - tailLen, tailLen);
+}
+
+
+PathSet scanForReferences(const string & path,
+ const PathSet & refs, HashResult & hash)
+{
+ RefScanSink sink;
+ std::map<string, Path> backMap;
+
+ /* For efficiency (and a higher hit rate), just search for the
+ hash part of the file name. (This assumes that all references
+ have the form `HASH-bla'). */
+ for (auto & i : refs) {
+ string baseName = baseNameOf(i);
+ string::size_type pos = baseName.find('-');
+ if (pos == string::npos)
+ throw Error(format("bad reference ‘%1%’") % i);
+ string s = string(baseName, 0, pos);
+ assert(s.size() == refLength);
+ assert(backMap.find(s) == backMap.end());
+ // parseHash(htSHA256, s);
+ sink.hashes.insert(s);
+ backMap[s] = i;
+ }
+
+ /* Look for the hashes in the NAR dump of the path. */
+ dumpPath(path, sink);
+
+ /* Map the hashes found back to their store paths. */
+ PathSet found;
+ for (auto & i : sink.seen) {
+ std::map<string, Path>::iterator j;
+ if ((j = backMap.find(i)) == backMap.end()) abort();
+ found.insert(j->second);
+ }
+
+ hash = sink.hashSink.finish();
+
+ return found;
+}
+
+
+}
diff --git a/src/libstore/references.hh b/src/libstore/references.hh
new file mode 100644
index 000000000..013809d12
--- /dev/null
+++ b/src/libstore/references.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+
+namespace nix {
+
+PathSet scanForReferences(const Path & path, const PathSet & refs,
+ HashResult & hash);
+
+}
diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc
new file mode 100644
index 000000000..ca14057c2
--- /dev/null
+++ b/src/libstore/remote-fs-accessor.cc
@@ -0,0 +1,57 @@
+#include "remote-fs-accessor.hh"
+#include "nar-accessor.hh"
+
+namespace nix {
+
+
+RemoteFSAccessor::RemoteFSAccessor(ref<Store> store)
+ : store(store)
+{
+}
+
+std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
+{
+ auto path = canonPath(path_);
+
+ auto storePath = store->toStorePath(path);
+ std::string restPath = std::string(path, storePath.size());
+
+ if (!store->isValidPath(storePath))
+ throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
+
+ auto i = nars.find(storePath);
+ if (i != nars.end()) return {i->second, restPath};
+
+ StringSink sink;
+ store->narFromPath(storePath, sink);
+
+ auto accessor = makeNarAccessor(sink.s);
+ nars.emplace(storePath, accessor);
+ return {accessor, restPath};
+}
+
+FSAccessor::Stat RemoteFSAccessor::stat(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->stat(res.second);
+}
+
+StringSet RemoteFSAccessor::readDirectory(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readDirectory(res.second);
+}
+
+std::string RemoteFSAccessor::readFile(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readFile(res.second);
+}
+
+std::string RemoteFSAccessor::readLink(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readLink(res.second);
+}
+
+}
diff --git a/src/libstore/remote-fs-accessor.hh b/src/libstore/remote-fs-accessor.hh
new file mode 100644
index 000000000..28f36c829
--- /dev/null
+++ b/src/libstore/remote-fs-accessor.hh
@@ -0,0 +1,29 @@
+#pragma once
+
+#include "fs-accessor.hh"
+#include "ref.hh"
+#include "store-api.hh"
+
+namespace nix {
+
+class RemoteFSAccessor : public FSAccessor
+{
+ ref<Store> store;
+
+ std::map<Path, ref<FSAccessor>> nars;
+
+ std::pair<ref<FSAccessor>, Path> fetch(const Path & path_);
+public:
+
+ RemoteFSAccessor(ref<Store> store);
+
+ Stat stat(const Path & path) override;
+
+ StringSet readDirectory(const Path & path) override;
+
+ std::string readFile(const Path & path) override;
+
+ std::string readLink(const Path & path) override;
+};
+
+}
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
new file mode 100644
index 000000000..bc9ef3d47
--- /dev/null
+++ b/src/libstore/remote-store.cc
@@ -0,0 +1,655 @@
+#include "serialise.hh"
+#include "util.hh"
+#include "remote-store.hh"
+#include "worker-protocol.hh"
+#include "archive.hh"
+#include "affinity.hh"
+#include "globals.hh"
+#include "derivations.hh"
+#include "pool.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <cstring>
+
+namespace nix {
+
+
+Path readStorePath(Store & store, Source & from)
+{
+ Path path = readString(from);
+ store.assertStorePath(path);
+ return path;
+}
+
+
+template<class T> T readStorePaths(Store & store, Source & from)
+{
+ T paths = readStrings<T>(from);
+ for (auto & i : paths) store.assertStorePath(i);
+ return paths;
+}
+
+template PathSet readStorePaths(Store & store, Source & from);
+template Paths readStorePaths(Store & store, Source & from);
+
+/* TODO: Separate these store impls into different files, give them better names */
+RemoteStore::RemoteStore(const Params & params)
+ : Store(params)
+ , connections(make_ref<Pool<Connection>>(
+ std::max(1, (int) maxConnections),
+ [this]() { return openConnectionWrapper(); },
+ [](const ref<Connection> & r) { return r->to.good() && r->from.good(); }
+ ))
+{
+}
+
+
+ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
+{
+ if (failed)
+ throw Error("opening a connection to remote store ‘%s’ previously failed", getUri());
+ try {
+ return openConnection();
+ } catch (...) {
+ failed = true;
+ throw;
+ }
+}
+
+
+UDSRemoteStore::UDSRemoteStore(const Params & params)
+ : Store(params)
+ , LocalFSStore(params)
+ , RemoteStore(params)
+{
+}
+
+
+std::string UDSRemoteStore::getUri()
+{
+ return "daemon";
+}
+
+
+ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
+{
+ auto conn = make_ref<Connection>();
+
+ /* Connect to a daemon that does the privileged work for us. */
+ conn->fd = socket(PF_UNIX, SOCK_STREAM
+ #ifdef SOCK_CLOEXEC
+ | SOCK_CLOEXEC
+ #endif
+ , 0);
+ if (!conn->fd)
+ throw SysError("cannot create Unix domain socket");
+ closeOnExec(conn->fd.get());
+
+ string socketPath = settings.nixDaemonSocketFile;
+
+ struct sockaddr_un addr;
+ addr.sun_family = AF_UNIX;
+ if (socketPath.size() + 1 >= sizeof(addr.sun_path))
+ throw Error(format("socket path ‘%1%’ is too long") % socketPath);
+ strcpy(addr.sun_path, socketPath.c_str());
+
+ if (connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
+ throw SysError(format("cannot connect to daemon at ‘%1%’") % socketPath);
+
+ conn->from.fd = conn->fd.get();
+ conn->to.fd = conn->fd.get();
+
+ initConnection(*conn);
+
+ return conn;
+}
+
+
+void RemoteStore::initConnection(Connection & conn)
+{
+ /* Send the magic greeting, check for the reply. */
+ try {
+ conn.to << WORKER_MAGIC_1;
+ conn.to.flush();
+ unsigned int magic = readInt(conn.from);
+ if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
+
+ conn.from >> conn.daemonVersion;
+ if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
+ throw Error("Nix daemon protocol version not supported");
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
+ throw Error("the Nix daemon version is too old");
+ conn.to << PROTOCOL_VERSION;
+
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) {
+ int cpu = settings.lockCPU ? lockToCurrentCPU() : -1;
+ if (cpu != -1)
+ conn.to << 1 << cpu;
+ else
+ conn.to << 0;
+ }
+
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
+ conn.to << false;
+
+ conn.processStderr();
+ }
+ catch (Error & e) {
+ throw Error("cannot open connection to remote store ‘%s’: %s", getUri(), e.what());
+ }
+
+ setOptions(conn);
+}
+
+
+void RemoteStore::setOptions(Connection & conn)
+{
+ conn.to << wopSetOptions
+ << settings.keepFailed
+ << settings.keepGoing
+ << settings.tryFallback
+ << verbosity
+ << settings.maxBuildJobs
+ << settings.maxSilentTime
+ << settings.useBuildHook
+ << (settings.verboseBuild ? lvlError : lvlVomit)
+ << 0 // obsolete log type
+ << 0 /* obsolete print build trace */
+ << settings.buildCores
+ << settings.useSubstitutes;
+
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
+ auto overrides = settings.getSettings(true);
+ conn.to << overrides.size();
+ for (auto & i : overrides)
+ conn.to << i.first << i.second;
+ }
+
+ conn.processStderr();
+}
+
+
+bool RemoteStore::isValidPathUncached(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopIsValidPath << path;
+ conn->processStderr();
+ return readInt(conn->from);
+}
+
+
+PathSet RemoteStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
+{
+ auto conn(connections->get());
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
+ PathSet res;
+ for (auto & i : paths)
+ if (isValidPath(i)) res.insert(i);
+ return res;
+ } else {
+ conn->to << wopQueryValidPaths << paths;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+ }
+}
+
+
+PathSet RemoteStore::queryAllValidPaths()
+{
+ auto conn(connections->get());
+ conn->to << wopQueryAllValidPaths;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+}
+
+
+PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
+{
+ auto conn(connections->get());
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
+ PathSet res;
+ for (auto & i : paths) {
+ conn->to << wopHasSubstitutes << i;
+ conn->processStderr();
+ if (readInt(conn->from)) res.insert(i);
+ }
+ return res;
+ } else {
+ conn->to << wopQuerySubstitutablePaths << paths;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+ }
+}
+
+
+void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos)
+{
+ if (paths.empty()) return;
+
+ auto conn(connections->get());
+
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
+
+ for (auto & i : paths) {
+ SubstitutablePathInfo info;
+ conn->to << wopQuerySubstitutablePathInfo << i;
+ conn->processStderr();
+ unsigned int reply = readInt(conn->from);
+ if (reply == 0) continue;
+ info.deriver = readString(conn->from);
+ if (info.deriver != "") assertStorePath(info.deriver);
+ info.references = readStorePaths<PathSet>(*this, conn->from);
+ info.downloadSize = readLongLong(conn->from);
+ info.narSize = readLongLong(conn->from);
+ infos[i] = info;
+ }
+
+ } else {
+
+ conn->to << wopQuerySubstitutablePathInfos << paths;
+ conn->processStderr();
+ size_t count = readNum<size_t>(conn->from);
+ for (size_t n = 0; n < count; n++) {
+ Path path = readStorePath(*this, conn->from);
+ SubstitutablePathInfo & info(infos[path]);
+ info.deriver = readString(conn->from);
+ if (info.deriver != "") assertStorePath(info.deriver);
+ info.references = readStorePaths<PathSet>(*this, conn->from);
+ info.downloadSize = readLongLong(conn->from);
+ info.narSize = readLongLong(conn->from);
+ }
+
+ }
+}
+
+
+void RemoteStore::queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure)
+{
+ sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
+ auto conn(connections->get());
+ conn->to << wopQueryPathInfo << path;
+ try {
+ conn->processStderr();
+ } catch (Error & e) {
+ // Ugly backwards compatibility hack.
+ if (e.msg().find("is not valid") != std::string::npos)
+ throw InvalidPath(e.what());
+ throw;
+ }
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
+ bool valid; conn->from >> valid;
+ if (!valid) throw InvalidPath(format("path ‘%s’ is not valid") % path);
+ }
+ auto info = std::make_shared<ValidPathInfo>();
+ info->path = path;
+ info->deriver = readString(conn->from);
+ if (info->deriver != "") assertStorePath(info->deriver);
+ info->narHash = parseHash(htSHA256, readString(conn->from));
+ info->references = readStorePaths<PathSet>(*this, conn->from);
+ conn->from >> info->registrationTime >> info->narSize;
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
+ conn->from >> info->ultimate;
+ info->sigs = readStrings<StringSet>(conn->from);
+ conn->from >> info->ca;
+ }
+ return info;
+ });
+}
+
+
+void RemoteStore::queryReferrers(const Path & path,
+ PathSet & referrers)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryReferrers << path;
+ conn->processStderr();
+ PathSet referrers2 = readStorePaths<PathSet>(*this, conn->from);
+ referrers.insert(referrers2.begin(), referrers2.end());
+}
+
+
+PathSet RemoteStore::queryValidDerivers(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryValidDerivers << path;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+}
+
+
+PathSet RemoteStore::queryDerivationOutputs(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryDerivationOutputs << path;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+}
+
+
+PathSet RemoteStore::queryDerivationOutputNames(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryDerivationOutputNames << path;
+ conn->processStderr();
+ return readStrings<PathSet>(conn->from);
+}
+
+
+Path RemoteStore::queryPathFromHashPart(const string & hashPart)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryPathFromHashPart << hashPart;
+ conn->processStderr();
+ Path path = readString(conn->from);
+ if (!path.empty()) assertStorePath(path);
+ return path;
+}
+
+
+void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs, std::shared_ptr<FSAccessor> accessor)
+{
+ auto conn(connections->get());
+
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
+ conn->to << wopImportPaths;
+
+ StringSink sink;
+ sink << 1 // == path follows
+ ;
+ assert(nar->size() % 8 == 0);
+ sink((unsigned char *) nar->data(), nar->size());
+ sink
+ << exportMagic
+ << info.path
+ << info.references
+ << info.deriver
+ << 0 // == no legacy signature
+ << 0 // == no path follows
+ ;
+
+ StringSource source(*sink.s);
+ conn->processStderr(0, &source);
+
+ auto importedPaths = readStorePaths<PathSet>(*this, conn->from);
+ assert(importedPaths.size() <= 1);
+ }
+
+ else {
+ conn->to << wopAddToStoreNar
+ << info.path << info.deriver << printHash(info.narHash)
+ << info.references << info.registrationTime << info.narSize
+ << info.ultimate << info.sigs << info.ca
+ << repair << dontCheckSigs;
+ conn->to(*nar);
+ conn->processStderr();
+ }
+}
+
+
+Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
+ bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
+{
+ if (repair) throw Error("repairing is not supported when building through the Nix daemon");
+
+ auto conn(connections->get());
+
+ Path srcPath(absPath(_srcPath));
+
+ conn->to << wopAddToStore << name
+ << ((hashAlgo == htSHA256 && recursive) ? 0 : 1) /* backwards compatibility hack */
+ << (recursive ? 1 : 0)
+ << printHashType(hashAlgo);
+
+ try {
+ conn->to.written = 0;
+ conn->to.warn = true;
+ dumpPath(srcPath, conn->to, filter);
+ conn->to.warn = false;
+ conn->processStderr();
+ } catch (SysError & e) {
+ /* Daemon closed while we were sending the path. Probably OOM
+ or I/O error. */
+ if (e.errNo == EPIPE)
+ try {
+ conn->processStderr();
+ } catch (EndOfFile & e) { }
+ throw;
+ }
+
+ return readStorePath(*this, conn->from);
+}
+
+
+Path RemoteStore::addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair)
+{
+ if (repair) throw Error("repairing is not supported when building through the Nix daemon");
+
+ auto conn(connections->get());
+ conn->to << wopAddTextToStore << name << s << references;
+
+ conn->processStderr();
+ return readStorePath(*this, conn->from);
+}
+
+
+void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
+{
+ auto conn(connections->get());
+ conn->to << wopBuildPaths;
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) {
+ conn->to << drvPaths;
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
+ conn->to << buildMode;
+ else
+ /* Old daemons did not take a 'buildMode' parameter, so we
+ need to validate it here on the client side. */
+ if (buildMode != bmNormal)
+ throw Error("repairing or checking is not supported when building through the Nix daemon");
+ } else {
+ /* For backwards compatibility with old daemons, strip output
+ identifiers. */
+ PathSet drvPaths2;
+ for (auto & i : drvPaths)
+ drvPaths2.insert(string(i, 0, i.find('!')));
+ conn->to << drvPaths2;
+ }
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode)
+{
+ auto conn(connections->get());
+ conn->to << wopBuildDerivation << drvPath << drv << buildMode;
+ conn->processStderr();
+ BuildResult res;
+ unsigned int status;
+ conn->from >> status >> res.errorMsg;
+ res.status = (BuildResult::Status) status;
+ return res;
+}
+
+
+void RemoteStore::ensurePath(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopEnsurePath << path;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+void RemoteStore::addTempRoot(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopAddTempRoot << path;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+void RemoteStore::addIndirectRoot(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopAddIndirectRoot << path;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+void RemoteStore::syncWithGC()
+{
+ auto conn(connections->get());
+ conn->to << wopSyncWithGC;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+Roots RemoteStore::findRoots()
+{
+ auto conn(connections->get());
+ conn->to << wopFindRoots;
+ conn->processStderr();
+ size_t count = readNum<size_t>(conn->from);
+ Roots result;
+ while (count--) {
+ Path link = readString(conn->from);
+ Path target = readStorePath(*this, conn->from);
+ result[link] = target;
+ }
+ return result;
+}
+
+
+void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
+{
+ auto conn(connections->get());
+
+ conn->to
+ << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness
+ << options.maxFreed
+ /* removed options */
+ << 0 << 0 << 0;
+
+ conn->processStderr();
+
+ results.paths = readStrings<PathSet>(conn->from);
+ results.bytesFreed = readLongLong(conn->from);
+ readLongLong(conn->from); // obsolete
+
+ {
+ auto state_(Store::state.lock());
+ state_->pathInfoCache.clear();
+ }
+}
+
+
+void RemoteStore::optimiseStore()
+{
+ auto conn(connections->get());
+ conn->to << wopOptimiseStore;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+bool RemoteStore::verifyStore(bool checkContents, bool repair)
+{
+ auto conn(connections->get());
+ conn->to << wopVerifyStore << checkContents << repair;
+ conn->processStderr();
+ return readInt(conn->from);
+}
+
+
+void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs)
+{
+ auto conn(connections->get());
+ conn->to << wopAddSignatures << storePath << sigs;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+void RemoteStore::queryMissing(const PathSet & targets,
+ PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+ unsigned long long & downloadSize, unsigned long long & narSize)
+{
+ {
+ auto conn(connections->get());
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
+ // Don't hold the connection handle in the fallback case
+ // to prevent a deadlock.
+ goto fallback;
+ conn->to << wopQueryMissing << targets;
+ conn->processStderr();
+ willBuild = readStorePaths<PathSet>(*this, conn->from);
+ willSubstitute = readStorePaths<PathSet>(*this, conn->from);
+ unknown = readStorePaths<PathSet>(*this, conn->from);
+ conn->from >> downloadSize >> narSize;
+ return;
+ }
+
+ fallback:
+ return Store::queryMissing(targets, willBuild, willSubstitute,
+ unknown, downloadSize, narSize);
+}
+
+
+RemoteStore::Connection::~Connection()
+{
+ try {
+ to.flush();
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
+{
+ to.flush();
+ unsigned int msg;
+ while ((msg = readInt(from)) == STDERR_NEXT
+ || msg == STDERR_READ || msg == STDERR_WRITE) {
+ if (msg == STDERR_WRITE) {
+ string s = readString(from);
+ if (!sink) throw Error("no sink");
+ (*sink)(s);
+ }
+ else if (msg == STDERR_READ) {
+ if (!source) throw Error("no source");
+ size_t len = readNum<size_t>(from);
+ auto buf = std::make_unique<unsigned char[]>(len);
+ writeString(buf.get(), source->read(buf.get(), len), to);
+ to.flush();
+ }
+ else
+ printError(chomp(readString(from)));
+ }
+ if (msg == STDERR_ERROR) {
+ string error = readString(from);
+ unsigned int status = readInt(from);
+ throw Error(status, error);
+ }
+ else if (msg != STDERR_LAST)
+ throw Error("protocol error processing standard error");
+}
+
+
+}
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
new file mode 100644
index 000000000..479cf3a79
--- /dev/null
+++ b/src/libstore/remote-store.hh
@@ -0,0 +1,142 @@
+#pragma once
+
+#include <limits>
+#include <string>
+
+#include "store-api.hh"
+
+
+namespace nix {
+
+
+class Pipe;
+class Pid;
+struct FdSink;
+struct FdSource;
+template<typename T> class Pool;
+
+
+/* FIXME: RemoteStore is a misnomer - should be something like
+ DaemonStore. */
+class RemoteStore : public virtual Store
+{
+public:
+
+ const Setting<int> maxConnections{(Store*) this, 1,
+ "max-connections", "maximum number of concurrent connections to the Nix daemon"};
+
+ RemoteStore(const Params & params);
+
+ /* Implementations of abstract store API methods. */
+
+ bool isValidPathUncached(const Path & path) override;
+
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override;
+
+ PathSet queryAllValidPaths() override;
+
+ void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override;
+
+ void queryReferrers(const Path & path, PathSet & referrers) override;
+
+ PathSet queryValidDerivers(const Path & path) override;
+
+ PathSet queryDerivationOutputs(const Path & path) override;
+
+ StringSet queryDerivationOutputNames(const Path & path) override;
+
+ Path queryPathFromHashPart(const string & hashPart) override;
+
+ PathSet querySubstitutablePaths(const PathSet & paths) override;
+
+ void querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos) override;
+
+ void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs,
+ std::shared_ptr<FSAccessor> accessor) override;
+
+ Path addToStore(const string & name, const Path & srcPath,
+ bool recursive = true, HashType hashAlgo = htSHA256,
+ PathFilter & filter = defaultPathFilter, bool repair = false) override;
+
+ Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair = false) override;
+
+ void buildPaths(const PathSet & paths, BuildMode buildMode) override;
+
+ BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode) override;
+
+ void ensurePath(const Path & path) override;
+
+ void addTempRoot(const Path & path) override;
+
+ void addIndirectRoot(const Path & path) override;
+
+ void syncWithGC() override;
+
+ Roots findRoots() override;
+
+ void collectGarbage(const GCOptions & options, GCResults & results) override;
+
+ void optimiseStore() override;
+
+ bool verifyStore(bool checkContents, bool repair) override;
+
+ void addSignatures(const Path & storePath, const StringSet & sigs) override;
+
+ void queryMissing(const PathSet & targets,
+ PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+ unsigned long long & downloadSize, unsigned long long & narSize) override;
+
+protected:
+
+ struct Connection
+ {
+ FdSink to;
+ FdSource from;
+ unsigned int daemonVersion;
+
+ virtual ~Connection();
+
+ void processStderr(Sink * sink = 0, Source * source = 0);
+ };
+
+ ref<Connection> openConnectionWrapper();
+
+ virtual ref<Connection> openConnection() = 0;
+
+ void initConnection(Connection & conn);
+
+ ref<Pool<Connection>> connections;
+
+private:
+
+ std::atomic_bool failed{false};
+
+ void setOptions(Connection & conn);
+};
+
+class UDSRemoteStore : public LocalFSStore, public RemoteStore
+{
+public:
+
+ UDSRemoteStore(const Params & params);
+
+ std::string getUri() override;
+
+private:
+
+ struct Connection : RemoteStore::Connection
+ {
+ AutoCloseFD fd;
+ };
+
+ ref<RemoteStore::Connection> openConnection() override;
+};
+
+
+}
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
new file mode 100644
index 000000000..245455296
--- /dev/null
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -0,0 +1,339 @@
+#if ENABLE_S3
+
+#include "s3.hh"
+#include "s3-binary-cache-store.hh"
+#include "nar-info.hh"
+#include "nar-info-disk-cache.hh"
+#include "globals.hh"
+#include "compression.hh"
+#include "download.hh"
+#include "istringstream_nocopy.hh"
+
+#include <aws/core/Aws.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/client/DefaultRetryStrategy.h>
+#include <aws/s3/S3Client.h>
+#include <aws/s3/model/CreateBucketRequest.h>
+#include <aws/s3/model/GetBucketLocationRequest.h>
+#include <aws/s3/model/GetObjectRequest.h>
+#include <aws/s3/model/HeadObjectRequest.h>
+#include <aws/s3/model/ListObjectsRequest.h>
+#include <aws/s3/model/PutObjectRequest.h>
+
+namespace nix {
+
+struct S3Error : public Error
+{
+ Aws::S3::S3Errors err;
+ S3Error(Aws::S3::S3Errors err, const FormatOrString & fs)
+ : Error(fs), err(err) { };
+};
+
+/* Helper: given an Outcome<R, E>, return R in case of success, or
+ throw an exception in case of an error. */
+template<typename R, typename E>
+R && checkAws(const FormatOrString & fs, Aws::Utils::Outcome<R, E> && outcome)
+{
+ if (!outcome.IsSuccess())
+ throw S3Error(
+ outcome.GetError().GetErrorType(),
+ fs.s + ": " + outcome.GetError().GetMessage());
+ return outcome.GetResultWithOwnership();
+}
+
+static void initAWS()
+{
+ static std::once_flag flag;
+ std::call_once(flag, []() {
+ Aws::SDKOptions options;
+
+ /* We install our own OpenSSL locking function (see
+ shared.cc), so don't let aws-sdk-cpp override it. */
+ options.cryptoOptions.initAndCleanupOpenSSL = false;
+
+ Aws::InitAPI(options);
+ });
+}
+
+S3Helper::S3Helper(const string & region)
+ : config(makeConfig(region))
+ , client(make_ref<Aws::S3::S3Client>(*config))
+{
+}
+
+/* Log AWS retries. */
+class RetryStrategy : public Aws::Client::DefaultRetryStrategy
+{
+ long CalculateDelayBeforeNextRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override
+ {
+ auto res = Aws::Client::DefaultRetryStrategy::CalculateDelayBeforeNextRetry(error, attemptedRetries);
+ printError("AWS error '%s' (%s), will retry in %d ms",
+ error.GetExceptionName(), error.GetMessage(), res);
+ return res;
+ }
+};
+
+ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region)
+{
+ initAWS();
+ auto res = make_ref<Aws::Client::ClientConfiguration>();
+ res->region = region;
+ res->requestTimeoutMs = 600 * 1000;
+ res->retryStrategy = std::make_shared<RetryStrategy>();
+ res->caFile = settings.caFile;
+ return res;
+}
+
+S3Helper::DownloadResult S3Helper::getObject(
+ const std::string & bucketName, const std::string & key)
+{
+ debug("fetching ‘s3://%s/%s’...", bucketName, key);
+
+ auto request =
+ Aws::S3::Model::GetObjectRequest()
+ .WithBucket(bucketName)
+ .WithKey(key);
+
+ request.SetResponseStreamFactory([&]() {
+ return Aws::New<std::stringstream>("STRINGSTREAM");
+ });
+
+ DownloadResult res;
+
+ auto now1 = std::chrono::steady_clock::now();
+
+ try {
+
+ auto result = checkAws(fmt("AWS error fetching ‘%s’", key),
+ client->GetObject(request));
+
+ res.data = decodeContent(
+ result.GetContentEncoding(),
+ make_ref<std::string>(
+ dynamic_cast<std::stringstream &>(result.GetBody()).str()));
+
+ } catch (S3Error & e) {
+ if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
+ }
+
+ auto now2 = std::chrono::steady_clock::now();
+
+ res.durationMs = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+
+ return res;
+}
+
+struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
+{
+ const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
+ const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
+ const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
+ const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
+
+ std::string bucketName;
+
+ Stats stats;
+
+ S3Helper s3Helper;
+
+ S3BinaryCacheStoreImpl(
+ const Params & params, const std::string & bucketName)
+ : S3BinaryCacheStore(params)
+ , bucketName(bucketName)
+ , s3Helper(region)
+ {
+ diskCache = getNarInfoDiskCache();
+ }
+
+ std::string getUri() override
+ {
+ return "s3://" + bucketName;
+ }
+
+ void init() override
+ {
+ if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
+
+ /* Create the bucket if it doesn't already exists. */
+ // FIXME: HeadBucket would be more appropriate, but doesn't return
+ // an easily parsed 404 message.
+ auto res = s3Helper.client->GetBucketLocation(
+ Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName));
+
+ if (!res.IsSuccess()) {
+ if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET)
+ throw Error(format("AWS error checking bucket ‘%s’: %s") % bucketName % res.GetError().GetMessage());
+
+ checkAws(format("AWS error creating bucket ‘%s’") % bucketName,
+ s3Helper.client->CreateBucket(
+ Aws::S3::Model::CreateBucketRequest()
+ .WithBucket(bucketName)
+ .WithCreateBucketConfiguration(
+ Aws::S3::Model::CreateBucketConfiguration()
+ /* .WithLocationConstraint(
+ Aws::S3::Model::BucketLocationConstraint::US) */ )));
+ }
+
+ BinaryCacheStore::init();
+
+ diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority);
+ }
+ }
+
+ const Stats & getS3Stats() override
+ {
+ return stats;
+ }
+
+ /* This is a specialisation of isValidPath() that optimistically
+ fetches the .narinfo file, rather than first checking for its
+ existence via a HEAD request. Since .narinfos are small, doing
+ a GET is unlikely to be slower than HEAD. */
+ bool isValidPathUncached(const Path & storePath) override
+ {
+ try {
+ queryPathInfo(storePath);
+ return true;
+ } catch (InvalidPath & e) {
+ return false;
+ }
+ }
+
+ bool fileExists(const std::string & path) override
+ {
+ stats.head++;
+
+ auto res = s3Helper.client->HeadObject(
+ Aws::S3::Model::HeadObjectRequest()
+ .WithBucket(bucketName)
+ .WithKey(path));
+
+ if (!res.IsSuccess()) {
+ auto & error = res.GetError();
+ if (error.GetErrorType() == Aws::S3::S3Errors::UNKNOWN // FIXME
+ && error.GetMessage().find("404") != std::string::npos)
+ return false;
+ throw Error(format("AWS error fetching ‘%s’: %s") % path % error.GetMessage());
+ }
+
+ return true;
+ }
+
+ void uploadFile(const std::string & path, const std::string & data,
+ const std::string & mimeType,
+ const std::string & contentEncoding)
+ {
+ auto request =
+ Aws::S3::Model::PutObjectRequest()
+ .WithBucket(bucketName)
+ .WithKey(path);
+
+ request.SetContentType(mimeType);
+
+ if (contentEncoding != "")
+ request.SetContentEncoding(contentEncoding);
+
+ auto stream = std::make_shared<istringstream_nocopy>(data);
+
+ request.SetBody(stream);
+
+ stats.put++;
+ stats.putBytes += data.size();
+
+ auto now1 = std::chrono::steady_clock::now();
+
+ auto result = checkAws(format("AWS error uploading ‘%s’") % path,
+ s3Helper.client->PutObject(request));
+
+ auto now2 = std::chrono::steady_clock::now();
+
+ auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+
+ printInfo(format("uploaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
+ % bucketName % path % data.size() % duration);
+
+ stats.putTimeMs += duration;
+ }
+
+ void upsertFile(const std::string & path, const std::string & data,
+ const std::string & mimeType) override
+ {
+ if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
+ uploadFile(path, *compress(narinfoCompression, data), mimeType, narinfoCompression);
+ else if (lsCompression != "" && hasSuffix(path, ".ls"))
+ uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression);
+ else if (logCompression != "" && hasPrefix(path, "log/"))
+ uploadFile(path, *compress(logCompression, data), mimeType, logCompression);
+ else
+ uploadFile(path, data, mimeType, "");
+ }
+
+ void getFile(const std::string & path,
+ std::function<void(std::shared_ptr<std::string>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
+ debug(format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
+
+ stats.get++;
+
+ auto res = s3Helper.getObject(bucketName, path);
+
+ stats.getBytes += res.data ? res.data->size() : 0;
+ stats.getTimeMs += res.durationMs;
+
+ if (res.data)
+ printTalkative("downloaded ‘s3://%s/%s’ (%d bytes) in %d ms",
+ bucketName, path, res.data->size(), res.durationMs);
+
+ return res.data;
+ });
+ }
+
+ PathSet queryAllValidPaths() override
+ {
+ PathSet paths;
+ std::string marker;
+
+ do {
+ debug(format("listing bucket ‘s3://%s’ from key ‘%s’...") % bucketName % marker);
+
+ auto res = checkAws(format("AWS error listing bucket ‘%s’") % bucketName,
+ s3Helper.client->ListObjects(
+ Aws::S3::Model::ListObjectsRequest()
+ .WithBucket(bucketName)
+ .WithDelimiter("/")
+ .WithMarker(marker)));
+
+ auto & contents = res.GetContents();
+
+ debug(format("got %d keys, next marker ‘%s’")
+ % contents.size() % res.GetNextMarker());
+
+ for (auto object : contents) {
+ auto & key = object.GetKey();
+ if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue;
+ paths.insert(storeDir + "/" + key.substr(0, key.size() - 8));
+ }
+
+ marker = res.GetNextMarker();
+ } while (!marker.empty());
+
+ return paths;
+ }
+
+};
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, 5) != "s3://") return 0;
+ auto store = std::make_shared<S3BinaryCacheStoreImpl>(params, std::string(uri, 5));
+ store->init();
+ return store;
+});
+
+}
+
+#endif
diff --git a/src/libstore/s3-binary-cache-store.hh b/src/libstore/s3-binary-cache-store.hh
new file mode 100644
index 000000000..4d43fe4d2
--- /dev/null
+++ b/src/libstore/s3-binary-cache-store.hh
@@ -0,0 +1,33 @@
+#pragma once
+
+#include "binary-cache-store.hh"
+
+#include <atomic>
+
+namespace nix {
+
+class S3BinaryCacheStore : public BinaryCacheStore
+{
+protected:
+
+ S3BinaryCacheStore(const Params & params)
+ : BinaryCacheStore(params)
+ { }
+
+public:
+
+ struct Stats
+ {
+ std::atomic<uint64_t> put{0};
+ std::atomic<uint64_t> putBytes{0};
+ std::atomic<uint64_t> putTimeMs{0};
+ std::atomic<uint64_t> get{0};
+ std::atomic<uint64_t> getBytes{0};
+ std::atomic<uint64_t> getTimeMs{0};
+ std::atomic<uint64_t> head{0};
+ };
+
+ virtual const Stats & getS3Stats() = 0;
+};
+
+}
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
new file mode 100644
index 000000000..08a7fbf96
--- /dev/null
+++ b/src/libstore/s3.hh
@@ -0,0 +1,33 @@
+#pragma once
+
+#if ENABLE_S3
+
+#include "ref.hh"
+
+namespace Aws { namespace Client { class ClientConfiguration; } }
+namespace Aws { namespace S3 { class S3Client; } }
+
+namespace nix {
+
+struct S3Helper
+{
+ ref<Aws::Client::ClientConfiguration> config;
+ ref<Aws::S3::S3Client> client;
+
+ S3Helper(const std::string & region);
+
+ ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region);
+
+ struct DownloadResult
+ {
+ std::shared_ptr<std::string> data;
+ unsigned int durationMs;
+ };
+
+ DownloadResult getObject(
+ const std::string & bucketName, const std::string & key);
+};
+
+}
+
+#endif
diff --git a/src/libstore/sandbox-defaults.sb.in b/src/libstore/sandbox-defaults.sb.in
new file mode 100644
index 000000000..b5e80085f
--- /dev/null
+++ b/src/libstore/sandbox-defaults.sb.in
@@ -0,0 +1,63 @@
+(allow file-read* file-write-data (literal "/dev/null"))
+(allow ipc-posix*)
+(allow mach-lookup (global-name "com.apple.SecurityServer"))
+
+(allow file-read*
+ (literal "/dev/dtracehelper")
+ (literal "/dev/tty")
+ (literal "/dev/autofs_nowait")
+ (literal "/System/Library/CoreServices/SystemVersion.plist")
+ (literal "/private/var/run/systemkeychaincheck.done")
+ (literal "/private/etc/protocols")
+ (literal "/private/var/tmp")
+ (literal "/private/var/db")
+ (subpath "/private/var/db/mds"))
+
+(allow file-read*
+ (subpath "/usr/share/icu")
+ (subpath "/usr/share/locale")
+ (subpath "/usr/share/zoneinfo"))
+
+(allow file-write*
+ (literal "/dev/tty")
+ (literal "/dev/dtracehelper")
+ (literal "/mds"))
+
+(allow file-ioctl (literal "/dev/dtracehelper"))
+
+(allow file-read-metadata
+ (literal "/var")
+ (literal "/tmp")
+ ; symlinks
+ (literal "@sysconfdir@")
+ (literal "@sysconfdir@/nix")
+ (literal "@sysconfdir@/nix/nix.conf")
+ (literal "/etc/resolv.conf")
+ (literal "/private/etc/resolv.conf"))
+
+(allow file-read*
+ (literal "/private@sysconfdir@/nix/nix.conf")
+ (literal "/private/var/run/resolv.conf"))
+
+; some builders use filehandles other than stdin/stdout
+(allow file*
+ (subpath "/dev/fd")
+ (literal "/dev/ptmx")
+ (regex #"^/dev/[pt]ty.*$"))
+
+; allow everything inside TMP
+(allow file* process-exec
+ (subpath (param "_GLOBAL_TMP_DIR"))
+ (subpath "/private/tmp"))
+
+(allow process-fork)
+(allow sysctl-read)
+(allow signal (target same-sandbox))
+
+; allow getpwuid (for git and other packages)
+(allow mach-lookup
+ (global-name "com.apple.system.notification_center")
+ (global-name "com.apple.system.opendirectoryd.libinfo"))
+
+; allow local networking
+(allow network* (local ip) (remote unix-socket))
diff --git a/src/libstore/schema.sql b/src/libstore/schema.sql
new file mode 100644
index 000000000..09c71a2b8
--- /dev/null
+++ b/src/libstore/schema.sql
@@ -0,0 +1,42 @@
+create table if not exists ValidPaths (
+ id integer primary key autoincrement not null,
+ path text unique not null,
+ hash text not null,
+ registrationTime integer not null,
+ deriver text,
+ narSize integer,
+ ultimate integer, -- null implies "false"
+ sigs text, -- space-separated
+ ca text -- if not null, an assertion that the path is content-addressed; see ValidPathInfo
+);
+
+create table if not exists Refs (
+ referrer integer not null,
+ reference integer not null,
+ primary key (referrer, reference),
+ foreign key (referrer) references ValidPaths(id) on delete cascade,
+ foreign key (reference) references ValidPaths(id) on delete restrict
+);
+
+create index if not exists IndexReferrer on Refs(referrer);
+create index if not exists IndexReference on Refs(reference);
+
+-- Paths can refer to themselves, causing a tuple (N, N) in the Refs
+-- table. This causes a deletion of the corresponding row in
+-- ValidPaths to cause a foreign key constraint violation (due to `on
+-- delete restrict' on the `reference' column). Therefore, explicitly
+-- get rid of self-references.
+create trigger if not exists DeleteSelfRefs before delete on ValidPaths
+ begin
+ delete from Refs where referrer = old.id and reference = old.id;
+ end;
+
+create table if not exists DerivationOutputs (
+ drv integer not null,
+ id text not null, -- symbolic output id, usually "out"
+ path text not null,
+ primary key (drv, id),
+ foreign key (drv) references ValidPaths(id) on delete cascade
+);
+
+create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
new file mode 100644
index 000000000..f8cc9a4b6
--- /dev/null
+++ b/src/libstore/serve-protocol.hh
@@ -0,0 +1,23 @@
+#pragma once
+
+namespace nix {
+
+#define SERVE_MAGIC_1 0x390c9deb
+#define SERVE_MAGIC_2 0x5452eecb
+
+#define SERVE_PROTOCOL_VERSION 0x203
+#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
+#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
+
+typedef enum {
+ cmdQueryValidPaths = 1,
+ cmdQueryPathInfos = 2,
+ cmdDumpStorePath = 3,
+ cmdImportPaths = 4,
+ cmdExportPaths = 5,
+ cmdBuildPaths = 6,
+ cmdQueryClosure = 7,
+ cmdBuildDerivation = 8,
+} ServeCommand;
+
+}
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
new file mode 100644
index 000000000..a81e62dbd
--- /dev/null
+++ b/src/libstore/sqlite.cc
@@ -0,0 +1,197 @@
+#include "sqlite.hh"
+#include "util.hh"
+
+#include <sqlite3.h>
+
+#include <atomic>
+
+namespace nix {
+
+[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f)
+{
+ int err = sqlite3_errcode(db);
+
+ auto path = sqlite3_db_filename(db, nullptr);
+ if (!path) path = "(in-memory)";
+
+ if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
+ throw SQLiteBusy(
+ err == SQLITE_PROTOCOL
+ ? fmt("SQLite database ‘%s’ is busy (SQLITE_PROTOCOL)", path)
+ : fmt("SQLite database ‘%s’ is busy", path));
+ }
+ else
+ throw SQLiteError("%s: %s (in ‘%s’)", f.str(), sqlite3_errstr(err), path);
+}
+
+SQLite::SQLite(const Path & path)
+{
+ if (sqlite3_open_v2(path.c_str(), &db,
+ SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK)
+ throw Error(format("cannot open SQLite database ‘%s’") % path);
+}
+
+SQLite::~SQLite()
+{
+ try {
+ if (db && sqlite3_close(db) != SQLITE_OK)
+ throwSQLiteError(db, "closing database");
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+void SQLite::exec(const std::string & stmt)
+{
+ retrySQLite<void>([&]() {
+ if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt);
+ });
+}
+
+void SQLiteStmt::create(sqlite3 * db, const string & sql)
+{
+ checkInterrupt();
+ assert(!stmt);
+ if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK)
+ throwSQLiteError(db, fmt("creating statement ‘%s’", sql));
+ this->db = db;
+ this->sql = sql;
+}
+
+SQLiteStmt::~SQLiteStmt()
+{
+ try {
+ if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
+ throwSQLiteError(db, fmt("finalizing statement ‘%s’", sql));
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+SQLiteStmt::Use::Use(SQLiteStmt & stmt)
+ : stmt(stmt)
+{
+ assert(stmt.stmt);
+ /* Note: sqlite3_reset() returns the error code for the most
+ recent call to sqlite3_step(). So ignore it. */
+ sqlite3_reset(stmt);
+}
+
+SQLiteStmt::Use::~Use()
+{
+ sqlite3_reset(stmt);
+}
+
+SQLiteStmt::Use & SQLiteStmt::Use::operator () (const std::string & value, bool notNull)
+{
+ if (notNull) {
+ if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
+ throwSQLiteError(stmt.db, "binding argument");
+ } else
+ bind();
+ return *this;
+}
+
+SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull)
+{
+ if (notNull) {
+ if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
+ throwSQLiteError(stmt.db, "binding argument");
+ } else
+ bind();
+ return *this;
+}
+
+SQLiteStmt::Use & SQLiteStmt::Use::bind()
+{
+ if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
+ throwSQLiteError(stmt.db, "binding argument");
+ return *this;
+}
+
+int SQLiteStmt::Use::step()
+{
+ return sqlite3_step(stmt);
+}
+
+void SQLiteStmt::Use::exec()
+{
+ int r = step();
+ assert(r != SQLITE_ROW);
+ if (r != SQLITE_DONE)
+ throwSQLiteError(stmt.db, fmt("executing SQLite statement ‘%s’", stmt.sql));
+}
+
+bool SQLiteStmt::Use::next()
+{
+ int r = step();
+ if (r != SQLITE_DONE && r != SQLITE_ROW)
+ throwSQLiteError(stmt.db, fmt("executing SQLite query ‘%s’", stmt.sql));
+ return r == SQLITE_ROW;
+}
+
+std::string SQLiteStmt::Use::getStr(int col)
+{
+ auto s = (const char *) sqlite3_column_text(stmt, col);
+ assert(s);
+ return s;
+}
+
+int64_t SQLiteStmt::Use::getInt(int col)
+{
+ // FIXME: detect nulls?
+ return sqlite3_column_int64(stmt, col);
+}
+
+bool SQLiteStmt::Use::isNull(int col)
+{
+ return sqlite3_column_type(stmt, col) == SQLITE_NULL;
+}
+
+SQLiteTxn::SQLiteTxn(sqlite3 * db)
+{
+ this->db = db;
+ if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "starting transaction");
+ active = true;
+}
+
+void SQLiteTxn::commit()
+{
+ if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "committing transaction");
+ active = false;
+}
+
+SQLiteTxn::~SQLiteTxn()
+{
+ try {
+ if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "aborting transaction");
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+void handleSQLiteBusy(const SQLiteBusy & e)
+{
+ static std::atomic<time_t> lastWarned{0};
+
+ time_t now = time(0);
+
+ if (now > lastWarned + 10) {
+ lastWarned = now;
+ printError("warning: %s", e.what());
+ }
+
+ /* Sleep for a while since retrying the transaction right away
+ is likely to fail again. */
+ checkInterrupt();
+ struct timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
+ nanosleep(&t, 0);
+}
+
+}
diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh
new file mode 100644
index 000000000..14a7a0dd8
--- /dev/null
+++ b/src/libstore/sqlite.hh
@@ -0,0 +1,114 @@
+#pragma once
+
+#include <functional>
+#include <string>
+
+#include "types.hh"
+
+class sqlite3;
+class sqlite3_stmt;
+
+namespace nix {
+
+/* RAII wrapper to close a SQLite database automatically. */
+struct SQLite
+{
+ sqlite3 * db = 0;
+ SQLite() { }
+ SQLite(const Path & path);
+ SQLite(const SQLite & from) = delete;
+ SQLite& operator = (const SQLite & from) = delete;
+ SQLite& operator = (SQLite && from) { db = from.db; from.db = 0; return *this; }
+ ~SQLite();
+ operator sqlite3 * () { return db; }
+
+ void exec(const std::string & stmt);
+};
+
+/* RAII wrapper to create and destroy SQLite prepared statements. */
+struct SQLiteStmt
+{
+ sqlite3 * db = 0;
+ sqlite3_stmt * stmt = 0;
+ std::string sql;
+ SQLiteStmt() { }
+ SQLiteStmt(sqlite3 * db, const std::string & sql) { create(db, sql); }
+ void create(sqlite3 * db, const std::string & s);
+ ~SQLiteStmt();
+ operator sqlite3_stmt * () { return stmt; }
+
+ /* Helper for binding / executing statements. */
+ class Use
+ {
+ friend struct SQLiteStmt;
+ private:
+ SQLiteStmt & stmt;
+ unsigned int curArg = 1;
+ Use(SQLiteStmt & stmt);
+
+ public:
+
+ ~Use();
+
+ /* Bind the next parameter. */
+ Use & operator () (const std::string & value, bool notNull = true);
+ Use & operator () (int64_t value, bool notNull = true);
+ Use & bind(); // null
+
+ int step();
+
+ /* Execute a statement that does not return rows. */
+ void exec();
+
+ /* For statements that return 0 or more rows. Returns true iff
+ a row is available. */
+ bool next();
+
+ std::string getStr(int col);
+ int64_t getInt(int col);
+ bool isNull(int col);
+ };
+
+ Use use()
+ {
+ return Use(*this);
+ }
+};
+
+/* RAII helper that ensures transactions are aborted unless explicitly
+ committed. */
+struct SQLiteTxn
+{
+ bool active = false;
+ sqlite3 * db;
+
+ SQLiteTxn(sqlite3 * db);
+
+ void commit();
+
+ ~SQLiteTxn();
+};
+
+
+MakeError(SQLiteError, Error);
+MakeError(SQLiteBusy, SQLiteError);
+
+[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f);
+
+void handleSQLiteBusy(const SQLiteBusy & e);
+
+/* Convenience function for retrying a SQLite transaction when the
+ database is busy. */
+template<typename T>
+T retrySQLite(std::function<T()> fun)
+{
+ while (true) {
+ try {
+ return fun();
+ } catch (SQLiteBusy & e) {
+ handleSQLiteBusy(e);
+ }
+ }
+}
+
+}
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
new file mode 100644
index 000000000..bb536fadf
--- /dev/null
+++ b/src/libstore/ssh-store.cc
@@ -0,0 +1,104 @@
+#include "store-api.hh"
+#include "remote-store.hh"
+#include "remote-fs-accessor.hh"
+#include "archive.hh"
+#include "worker-protocol.hh"
+#include "pool.hh"
+#include "ssh.hh"
+
+namespace nix {
+
+static std::string uriScheme = "ssh-ng://";
+
+class SSHStore : public RemoteStore
+{
+public:
+
+ const Setting<Path> sshKey{(Store*) this, "", "ssh-key", "path to an SSH private key"};
+ const Setting<bool> compress{(Store*) this, false, "compress", "whether to compress the connection"};
+
+ SSHStore(const std::string & host, const Params & params)
+ : Store(params)
+ , RemoteStore(params)
+ , host(host)
+ , master(
+ host,
+ sshKey,
+ // Use SSH master only if using more than 1 connection.
+ connections->capacity() > 1,
+ compress)
+ {
+ }
+
+ std::string getUri() override
+ {
+ return uriScheme + host;
+ }
+
+ void narFromPath(const Path & path, Sink & sink) override;
+
+ ref<FSAccessor> getFSAccessor() override;
+
+private:
+
+ struct Connection : RemoteStore::Connection
+ {
+ std::unique_ptr<SSHMaster::Connection> sshConn;
+ };
+
+ ref<RemoteStore::Connection> openConnection() override;
+
+ std::string host;
+
+ SSHMaster master;
+};
+
+
+class ForwardSource : public Source
+{
+ Source & readSource;
+ Sink & writeSink;
+public:
+ ForwardSource(Source & readSource, Sink & writeSink) : readSource(readSource), writeSink(writeSink) {}
+ size_t read(unsigned char * data, size_t len) override
+ {
+ auto res = readSource.read(data, len);
+ writeSink(data, len);
+ return res;
+ }
+};
+
+void SSHStore::narFromPath(const Path & path, Sink & sink)
+{
+ auto conn(connections->get());
+ conn->to << wopNarFromPath << path;
+ conn->processStderr();
+ ParseSink ps;
+ auto fwd = ForwardSource(conn->from, sink);
+ parseDump(ps, fwd);
+}
+
+ref<FSAccessor> SSHStore::getFSAccessor()
+{
+ return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
+}
+
+ref<RemoteStore::Connection> SSHStore::openConnection()
+{
+ auto conn = make_ref<Connection>();
+ conn->sshConn = master.startCommand("nix-daemon --stdio");
+ conn->to = FdSink(conn->sshConn->in.get());
+ conn->from = FdSource(conn->sshConn->out.get());
+ initConnection(*conn);
+ return conn;
+}
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
+ return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params);
+});
+
+}
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
new file mode 100644
index 000000000..e54f3f4ba
--- /dev/null
+++ b/src/libstore/ssh.cc
@@ -0,0 +1,102 @@
+#include "ssh.hh"
+
+namespace nix {
+
+void SSHMaster::addCommonSSHOpts(Strings & args)
+{
+ for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS")))
+ args.push_back(i);
+ if (!keyFile.empty())
+ args.insert(args.end(), {"-i", keyFile});
+ if (compress)
+ args.push_back("-C");
+}
+
+std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string & command)
+{
+ Path socketPath = startMaster();
+
+ Pipe in, out;
+ in.create();
+ out.create();
+
+ auto conn = std::make_unique<Connection>();
+ conn->sshPid = startProcess([&]() {
+ restoreSignals();
+
+ close(in.writeSide.get());
+ close(out.readSide.get());
+
+ if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("duping over stdin");
+ if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("duping over stdout");
+
+ Strings args = { "ssh", host.c_str(), "-x", "-a" };
+ addCommonSSHOpts(args);
+ if (socketPath != "")
+ args.insert(args.end(), {"-S", socketPath});
+ args.push_back(command);
+ execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
+
+ throw SysError("executing ‘%s’ on ‘%s’", command, host);
+ });
+
+
+ in.readSide = -1;
+ out.writeSide = -1;
+
+ conn->out = std::move(out.readSide);
+ conn->in = std::move(in.writeSide);
+
+ return conn;
+}
+
+Path SSHMaster::startMaster()
+{
+ if (!useMaster) return "";
+
+ auto state(state_.lock());
+
+ if (state->sshMaster != -1) return state->socketPath;
+
+ state->tmpDir = std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
+
+ state->socketPath = (Path) *state->tmpDir + "/ssh.sock";
+
+ Pipe out;
+ out.create();
+
+ state->sshMaster = startProcess([&]() {
+ restoreSignals();
+
+ close(out.readSide.get());
+
+ if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("duping over stdout");
+
+ Strings args =
+ { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath
+ , "-o", "LocalCommand=echo started"
+ , "-o", "PermitLocalCommand=yes"
+ };
+ addCommonSSHOpts(args);
+ execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
+
+ throw SysError("starting SSH master");
+ });
+
+ out.writeSide = -1;
+
+ std::string reply;
+ try {
+ reply = readLine(out.readSide.get());
+ } catch (EndOfFile & e) { }
+
+ if (reply != "started")
+ throw Error("failed to start SSH master connection to ‘%s’", host);
+
+ return state->socketPath;
+}
+
+}
diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh
new file mode 100644
index 000000000..b4396467e
--- /dev/null
+++ b/src/libstore/ssh.hh
@@ -0,0 +1,49 @@
+#pragma once
+
+#include "util.hh"
+#include "sync.hh"
+
+namespace nix {
+
+class SSHMaster
+{
+private:
+
+ const std::string host;
+ const std::string keyFile;
+ const bool useMaster;
+ const bool compress;
+
+ struct State
+ {
+ Pid sshMaster;
+ std::unique_ptr<AutoDelete> tmpDir;
+ Path socketPath;
+ };
+
+ Sync<State> state_;
+
+ void addCommonSSHOpts(Strings & args);
+
+public:
+
+ SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress)
+ : host(host)
+ , keyFile(keyFile)
+ , useMaster(useMaster)
+ , compress(compress)
+ {
+ }
+
+ struct Connection
+ {
+ Pid sshPid;
+ AutoCloseFD out, in;
+ };
+
+ std::unique_ptr<Connection> startCommand(const std::string & command);
+
+ Path startMaster();
+};
+
+}
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
new file mode 100644
index 000000000..835bbb90e
--- /dev/null
+++ b/src/libstore/store-api.cc
@@ -0,0 +1,836 @@
+#include "crypto.hh"
+#include "globals.hh"
+#include "store-api.hh"
+#include "util.hh"
+#include "nar-info-disk-cache.hh"
+#include "thread-pool.hh"
+#include "json.hh"
+#include "derivations.hh"
+
+#include <future>
+
+
+namespace nix {
+
+
+bool Store::isInStore(const Path & path) const
+{
+ return isInDir(path, storeDir);
+}
+
+
+bool Store::isStorePath(const Path & path) const
+{
+ return isInStore(path)
+ && path.size() >= storeDir.size() + 1 + storePathHashLen
+ && path.find('/', storeDir.size() + 1) == Path::npos;
+}
+
+
+void Store::assertStorePath(const Path & path) const
+{
+ if (!isStorePath(path))
+ throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+}
+
+
+Path Store::toStorePath(const Path & path) const
+{
+ if (!isInStore(path))
+ throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+ Path::size_type slash = path.find('/', storeDir.size() + 1);
+ if (slash == Path::npos)
+ return path;
+ else
+ return Path(path, 0, slash);
+}
+
+
+Path Store::followLinksToStore(const Path & _path) const
+{
+ Path path = absPath(_path);
+ while (!isInStore(path)) {
+ if (!isLink(path)) break;
+ string target = readLink(path);
+ path = absPath(target, dirOf(path));
+ }
+ if (!isInStore(path))
+ throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+ return path;
+}
+
+
+Path Store::followLinksToStorePath(const Path & path) const
+{
+ return toStorePath(followLinksToStore(path));
+}
+
+
+string storePathToName(const Path & path)
+{
+ auto base = baseNameOf(path);
+ assert(base.size() == storePathHashLen || (base.size() > storePathHashLen && base[storePathHashLen] == '-'));
+ return base.size() == storePathHashLen ? "" : string(base, storePathHashLen + 1);
+}
+
+
+string storePathToHash(const Path & path)
+{
+ auto base = baseNameOf(path);
+ assert(base.size() >= storePathHashLen);
+ return string(base, 0, storePathHashLen);
+}
+
+
+void checkStoreName(const string & name)
+{
+ string validChars = "+-._?=";
+ /* Disallow names starting with a dot for possible security
+ reasons (e.g., "." and ".."). */
+ if (string(name, 0, 1) == ".")
+ throw Error(format("illegal name: ‘%1%’") % name);
+ for (auto & i : name)
+ if (!((i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ validChars.find(i) != string::npos))
+ {
+ throw Error(format("invalid character ‘%1%’ in name ‘%2%’")
+ % i % name);
+ }
+}
+
+
+/* Store paths have the following form:
+
+ <store>/<h>-<name>
+
+ where
+
+ <store> = the location of the Nix store, usually /nix/store
+
+ <name> = a human readable name for the path, typically obtained
+ from the name attribute of the derivation, or the name of the
+ source file from which the store path is created. For derivation
+ outputs other than the default "out" output, the string "-<id>"
+ is suffixed to <name>.
+
+ <h> = base-32 representation of the first 160 bits of a SHA-256
+ hash of <s>; the hash part of the store name
+
+ <s> = the string "<type>:sha256:<h2>:<store>:<name>";
+ note that it includes the location of the store as well as the
+ name to make sure that changes to either of those are reflected
+ in the hash (e.g. you won't get /nix/store/<h>-name1 and
+ /nix/store/<h>-name2 with equal hash parts).
+
+ <type> = one of:
+ "text:<r1>:<r2>:...<rN>"
+ for plain text files written to the store using
+ addTextToStore(); <r1> ... <rN> are the references of the
+ path.
+ "source"
+ for paths copied to the store using addToStore() when recursive
+ = true and hashAlgo = "sha256"
+ "output:<id>"
+ for either the outputs created by derivations, OR paths copied
+ to the store using addToStore() with recursive != true or
+ hashAlgo != "sha256" (in that case "source" is used; it's
+ silly, but it's done that way for compatibility). <id> is the
+ name of the output (usually, "out").
+
+ <h2> = base-16 representation of a SHA-256 hash of:
+ if <type> = "text:...":
+ the string written to the resulting store path
+ if <type> = "source":
+ the serialisation of the path from which this store path is
+ copied, as returned by hashPath()
+ if <type> = "output:<id>":
+ for non-fixed derivation outputs:
+ the derivation (see hashDerivationModulo() in
+ primops.cc)
+ for paths copied by addToStore() or produced by fixed-output
+ derivations:
+ the string "fixed:out:<rec><algo>:<hash>:", where
+ <rec> = "r:" for recursive (path) hashes, or "" for flat
+ (file) hashes
+ <algo> = "md5", "sha1" or "sha256"
+ <hash> = base-16 representation of the path or flat hash of
+ the contents of the path (or expected contents of the
+ path for fixed-output derivations)
+
+ It would have been nicer to handle fixed-output derivations under
+ "source", e.g. have something like "source:<rec><algo>", but we're
+ stuck with this for now...
+
+ The main reason for this way of computing names is to prevent name
+ collisions (for security). For instance, it shouldn't be feasible
+ to come up with a derivation whose output path collides with the
+ path for a copied source. The former would have a <s> starting with
+ "output:out:", while the latter would have a <2> starting with
+ "source:".
+*/
+
+
+Path Store::makeStorePath(const string & type,
+ const Hash & hash, const string & name) const
+{
+ /* e.g., "source:sha256:1abc...:/nix/store:foo.tar.gz" */
+ string s = type + ":sha256:" + printHash(hash) + ":"
+ + storeDir + ":" + name;
+
+ checkStoreName(name);
+
+ return storeDir + "/"
+ + printHash32(compressHash(hashString(htSHA256, s), 20))
+ + "-" + name;
+}
+
+
+Path Store::makeOutputPath(const string & id,
+ const Hash & hash, const string & name) const
+{
+ return makeStorePath("output:" + id, hash,
+ name + (id == "out" ? "" : "-" + id));
+}
+
+
+Path Store::makeFixedOutputPath(bool recursive,
+ const Hash & hash, const string & name) const
+{
+ return hash.type == htSHA256 && recursive
+ ? makeStorePath("source", hash, name)
+ : makeStorePath("output:out", hashString(htSHA256,
+ "fixed:out:" + (recursive ? (string) "r:" : "") +
+ printHashType(hash.type) + ":" + printHash(hash) + ":"),
+ name);
+}
+
+
+Path Store::makeTextPath(const string & name, const Hash & hash,
+ const PathSet & references) const
+{
+ assert(hash.type == htSHA256);
+ /* Stuff the references (if any) into the type. This is a bit
+ hacky, but we can't put them in `s' since that would be
+ ambiguous. */
+ string type = "text";
+ for (auto & i : references) {
+ type += ":";
+ type += i;
+ }
+ return makeStorePath(type, hash, name);
+}
+
+
+std::pair<Path, Hash> Store::computeStorePathForPath(const Path & srcPath,
+ bool recursive, HashType hashAlgo, PathFilter & filter) const
+{
+ Hash h = recursive ? hashPath(hashAlgo, srcPath, filter).first : hashFile(hashAlgo, srcPath);
+ string name = baseNameOf(srcPath);
+ Path dstPath = makeFixedOutputPath(recursive, h, name);
+ return std::pair<Path, Hash>(dstPath, h);
+}
+
+
+Path Store::computeStorePathForText(const string & name, const string & s,
+ const PathSet & references) const
+{
+ return makeTextPath(name, hashString(htSHA256, s), references);
+}
+
+
+Store::Store(const Params & params)
+ : Config(params)
+ , state({(size_t) pathInfoCacheSize})
+{
+}
+
+
+std::string Store::getUri()
+{
+ return "";
+}
+
+
+bool Store::isValidPath(const Path & storePath)
+{
+ auto hashPart = storePathToHash(storePath);
+
+ {
+ auto state_(state.lock());
+ auto res = state_->pathInfoCache.get(hashPart);
+ if (res) {
+ stats.narInfoReadAverted++;
+ return *res != 0;
+ }
+ }
+
+ if (diskCache) {
+ auto res = diskCache->lookupNarInfo(getUri(), hashPart);
+ if (res.first != NarInfoDiskCache::oUnknown) {
+ stats.narInfoReadAverted++;
+ auto state_(state.lock());
+ state_->pathInfoCache.upsert(hashPart,
+ res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
+ return res.first == NarInfoDiskCache::oValid;
+ }
+ }
+
+ bool valid = isValidPathUncached(storePath);
+
+ if (diskCache && !valid)
+ // FIXME: handle valid = true case.
+ diskCache->upsertNarInfo(getUri(), hashPart, 0);
+
+ return valid;
+}
+
+
+/* Default implementation for stores that only implement
+ queryPathInfoUncached(). */
+bool Store::isValidPathUncached(const Path & path)
+{
+ try {
+ queryPathInfo(path);
+ return true;
+ } catch (InvalidPath &) {
+ return false;
+ }
+}
+
+
+ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
+{
+ std::promise<ref<ValidPathInfo>> promise;
+
+ queryPathInfo(storePath,
+ [&](ref<ValidPathInfo> info) {
+ promise.set_value(info);
+ },
+ [&](std::exception_ptr exc) {
+ promise.set_exception(exc);
+ });
+
+ return promise.get_future().get();
+}
+
+
+void Store::queryPathInfo(const Path & storePath,
+ std::function<void(ref<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure)
+{
+ auto hashPart = storePathToHash(storePath);
+
+ try {
+
+ {
+ auto res = state.lock()->pathInfoCache.get(hashPart);
+ if (res) {
+ stats.narInfoReadAverted++;
+ if (!*res)
+ throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
+ return success(ref<ValidPathInfo>(*res));
+ }
+ }
+
+ if (diskCache) {
+ auto res = diskCache->lookupNarInfo(getUri(), hashPart);
+ if (res.first != NarInfoDiskCache::oUnknown) {
+ stats.narInfoReadAverted++;
+ {
+ auto state_(state.lock());
+ state_->pathInfoCache.upsert(hashPart,
+ res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
+ if (res.first == NarInfoDiskCache::oInvalid ||
+ (res.second->path != storePath && storePathToName(storePath) != ""))
+ throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
+ }
+ return success(ref<ValidPathInfo>(res.second));
+ }
+ }
+
+ } catch (std::exception & e) {
+ return callFailure(failure);
+ }
+
+ queryPathInfoUncached(storePath,
+ [this, storePath, hashPart, success, failure](std::shared_ptr<ValidPathInfo> info) {
+
+ if (diskCache)
+ diskCache->upsertNarInfo(getUri(), hashPart, info);
+
+ {
+ auto state_(state.lock());
+ state_->pathInfoCache.upsert(hashPart, info);
+ }
+
+ if (!info
+ || (info->path != storePath && storePathToName(storePath) != ""))
+ {
+ stats.narInfoMissing++;
+ return failure(std::make_exception_ptr(InvalidPath(format("path ‘%s’ is not valid") % storePath)));
+ }
+
+ callSuccess(success, failure, ref<ValidPathInfo>(info));
+
+ }, failure);
+}
+
+
+PathSet Store::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
+{
+ struct State
+ {
+ size_t left;
+ PathSet valid;
+ std::exception_ptr exc;
+ };
+
+ Sync<State> state_(State{paths.size(), PathSet()});
+
+ std::condition_variable wakeup;
+
+ for (auto & path : paths)
+ queryPathInfo(path,
+ [path, &state_, &wakeup](ref<ValidPathInfo> info) {
+ auto state(state_.lock());
+ state->valid.insert(path);
+ assert(state->left);
+ if (!--state->left)
+ wakeup.notify_one();
+ },
+ [path, &state_, &wakeup](std::exception_ptr exc) {
+ auto state(state_.lock());
+ try {
+ std::rethrow_exception(exc);
+ } catch (InvalidPath &) {
+ } catch (...) {
+ state->exc = exc;
+ }
+ assert(state->left);
+ if (!--state->left)
+ wakeup.notify_one();
+ });
+
+ while (true) {
+ auto state(state_.lock());
+ if (!state->left) {
+ if (state->exc) std::rethrow_exception(state->exc);
+ return state->valid;
+ }
+ state.wait(wakeup);
+ }
+}
+
+
+/* Return a string accepted by decodeValidPathInfo() that
+ registers the specified paths as valid. Note: it's the
+ responsibility of the caller to provide a closure. */
+string Store::makeValidityRegistration(const PathSet & paths,
+ bool showDerivers, bool showHash)
+{
+ string s = "";
+
+ for (auto & i : paths) {
+ s += i + "\n";
+
+ auto info = queryPathInfo(i);
+
+ if (showHash) {
+ s += printHash(info->narHash) + "\n";
+ s += (format("%1%\n") % info->narSize).str();
+ }
+
+ Path deriver = showDerivers ? info->deriver : "";
+ s += deriver + "\n";
+
+ s += (format("%1%\n") % info->references.size()).str();
+
+ for (auto & j : info->references)
+ s += j + "\n";
+ }
+
+ return s;
+}
+
+
+void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const PathSet & storePaths,
+ bool includeImpureInfo, bool showClosureSize)
+{
+ auto jsonList = jsonOut.list();
+
+ for (auto storePath : storePaths) {
+ auto info = queryPathInfo(storePath);
+ storePath = info->path;
+
+ auto jsonPath = jsonList.object();
+ jsonPath
+ .attr("path", storePath)
+ .attr("narHash", info->narHash.to_string())
+ .attr("narSize", info->narSize);
+
+ {
+ auto jsonRefs = jsonPath.list("references");
+ for (auto & ref : info->references)
+ jsonRefs.elem(ref);
+ }
+
+ if (info->ca != "")
+ jsonPath.attr("ca", info->ca);
+
+ if (showClosureSize)
+ jsonPath.attr("closureSize", getClosureSize(storePath));
+
+ if (!includeImpureInfo) continue;
+
+ if (info->deriver != "")
+ jsonPath.attr("deriver", info->deriver);
+
+ if (info->registrationTime)
+ jsonPath.attr("registrationTime", info->registrationTime);
+
+ if (info->ultimate)
+ jsonPath.attr("ultimate", info->ultimate);
+
+ if (!info->sigs.empty()) {
+ auto jsonSigs = jsonPath.list("signatures");
+ for (auto & sig : info->sigs)
+ jsonSigs.elem(sig);
+ }
+ }
+}
+
+
+unsigned long long Store::getClosureSize(const Path & storePath)
+{
+ unsigned long long totalSize = 0;
+ PathSet closure;
+ computeFSClosure(storePath, closure, false, false);
+ for (auto & p : closure)
+ totalSize += queryPathInfo(p)->narSize;
+ return totalSize;
+}
+
+
+const Store::Stats & Store::getStats()
+{
+ {
+ auto state_(state.lock());
+ stats.pathInfoCacheSize = state_->pathInfoCache.size();
+ }
+ return stats;
+}
+
+
+void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
+ const Path & storePath, bool repair, bool dontCheckSigs)
+{
+ auto info = srcStore->queryPathInfo(storePath);
+
+ StringSink sink;
+ srcStore->narFromPath({storePath}, sink);
+
+ if (srcStore->isTrusted())
+ dontCheckSigs = true;
+
+ if (!info->narHash && dontCheckSigs) {
+ auto info2 = make_ref<ValidPathInfo>(*info);
+ info2->narHash = hashString(htSHA256, *sink.s);
+ info = info2;
+ }
+
+ dstStore->addToStore(*info, sink.s, repair, dontCheckSigs);
+}
+
+
+void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
+ const PathSet & storePaths, bool repair, bool dontCheckSigs)
+{
+ PathSet closure;
+ for (auto & path : storePaths)
+ srcStore->computeFSClosure(path, closure);
+
+ // FIXME: use copyStorePaths()
+
+ PathSet valid = dstStore->queryValidPaths(closure);
+
+ if (valid.size() == closure.size()) return;
+
+ Paths sorted = srcStore->topoSortPaths(closure);
+
+ Paths missing;
+ for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
+ if (!valid.count(*i)) missing.push_back(*i);
+
+ printMsg(lvlDebug, format("copying %1% missing paths") % missing.size());
+
+ for (auto & i : missing)
+ copyStorePath(srcStore, dstStore, i, repair, dontCheckSigs);
+}
+
+
+ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven)
+{
+ ValidPathInfo info;
+ getline(str, info.path);
+ if (str.eof()) { info.path = ""; return info; }
+ if (hashGiven) {
+ string s;
+ getline(str, s);
+ info.narHash = parseHash(htSHA256, s);
+ getline(str, s);
+ if (!string2Int(s, info.narSize)) throw Error("number expected");
+ }
+ getline(str, info.deriver);
+ string s; int n;
+ getline(str, s);
+ if (!string2Int(s, n)) throw Error("number expected");
+ while (n--) {
+ getline(str, s);
+ info.references.insert(s);
+ }
+ if (!str || str.eof()) throw Error("missing input");
+ return info;
+}
+
+
+string showPaths(const PathSet & paths)
+{
+ string s;
+ for (auto & i : paths) {
+ if (s.size() != 0) s += ", ";
+ s += "‘" + i + "’";
+ }
+ return s;
+}
+
+
+std::string ValidPathInfo::fingerprint() const
+{
+ if (narSize == 0 || !narHash)
+ throw Error(format("cannot calculate fingerprint of path ‘%s’ because its size/hash is not known")
+ % path);
+ return
+ "1;" + path + ";"
+ + printHashType(narHash.type) + ":" + printHash32(narHash) + ";"
+ + std::to_string(narSize) + ";"
+ + concatStringsSep(",", references);
+}
+
+
+void ValidPathInfo::sign(const SecretKey & secretKey)
+{
+ sigs.insert(secretKey.signDetached(fingerprint()));
+}
+
+
+bool ValidPathInfo::isContentAddressed(const Store & store) const
+{
+ auto warn = [&]() {
+ printError(format("warning: path ‘%s’ claims to be content-addressed but isn't") % path);
+ };
+
+ if (hasPrefix(ca, "text:")) {
+ auto hash = parseHash(std::string(ca, 5));
+ if (store.makeTextPath(storePathToName(path), hash, references) == path)
+ return true;
+ else
+ warn();
+ }
+
+ else if (hasPrefix(ca, "fixed:")) {
+ bool recursive = ca.compare(6, 2, "r:") == 0;
+ auto hash = parseHash(std::string(ca, recursive ? 8 : 6));
+ if (store.makeFixedOutputPath(recursive, hash, storePathToName(path)) == path)
+ return true;
+ else
+ warn();
+ }
+
+ return false;
+}
+
+
+size_t ValidPathInfo::checkSignatures(const Store & store, const PublicKeys & publicKeys) const
+{
+ if (isContentAddressed(store)) return maxSigs;
+
+ size_t good = 0;
+ for (auto & sig : sigs)
+ if (checkSignature(publicKeys, sig))
+ good++;
+ return good;
+}
+
+
+bool ValidPathInfo::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const
+{
+ return verifyDetached(fingerprint(), sig, publicKeys);
+}
+
+
+Strings ValidPathInfo::shortRefs() const
+{
+ Strings refs;
+ for (auto & r : references)
+ refs.push_back(baseNameOf(r));
+ return refs;
+}
+
+
+std::string makeFixedOutputCA(bool recursive, const Hash & hash)
+{
+ return "fixed:" + (recursive ? (std::string) "r:" : "") + hash.to_string();
+}
+
+
+}
+
+
+#include "local-store.hh"
+#include "remote-store.hh"
+
+
+namespace nix {
+
+
+RegisterStoreImplementation::Implementations * RegisterStoreImplementation::implementations = 0;
+
+
+ref<Store> openStore(const std::string & uri_)
+{
+ auto uri(uri_);
+ Store::Params params;
+ auto q = uri.find('?');
+ if (q != std::string::npos) {
+ for (auto s : tokenizeString<Strings>(uri.substr(q + 1), "&")) {
+ auto e = s.find('=');
+ if (e != std::string::npos)
+ params[s.substr(0, e)] = s.substr(e + 1);
+ }
+ uri = uri_.substr(0, q);
+ }
+ return openStore(uri, params);
+}
+
+ref<Store> openStore(const std::string & uri, const Store::Params & params)
+{
+ for (auto fun : *RegisterStoreImplementation::implementations) {
+ auto store = fun(uri, params);
+ if (store) {
+ store->warnUnknownSettings();
+ return ref<Store>(store);
+ }
+ }
+
+ throw Error(format("don't know how to open Nix store ‘%s’") % uri);
+}
+
+
+StoreType getStoreType(const std::string & uri, const std::string & stateDir)
+{
+ if (uri == "daemon") {
+ return tDaemon;
+ } else if (uri == "local") {
+ return tLocal;
+ } else if (uri == "" || uri == "auto") {
+ if (access(stateDir.c_str(), R_OK | W_OK) == 0)
+ return tLocal;
+ else if (pathExists(settings.nixDaemonSocketFile))
+ return tDaemon;
+ else
+ return tLocal;
+ } else {
+ return tOther;
+ }
+}
+
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ switch (getStoreType(uri, get(params, "state", settings.nixStateDir))) {
+ case tDaemon:
+ return std::shared_ptr<Store>(std::make_shared<UDSRemoteStore>(params));
+ case tLocal:
+ return std::shared_ptr<Store>(std::make_shared<LocalStore>(params));
+ default:
+ return nullptr;
+ }
+});
+
+
+std::list<ref<Store>> getDefaultSubstituters()
+{
+ struct State {
+ bool done = false;
+ std::list<ref<Store>> stores;
+ };
+ static Sync<State> state_;
+
+ auto state(state_.lock());
+
+ if (state->done) return state->stores;
+
+ StringSet done;
+
+ auto addStore = [&](const std::string & uri) {
+ if (done.count(uri)) return;
+ done.insert(uri);
+ state->stores.push_back(openStore(uri));
+ };
+
+ for (auto uri : settings.substituters.get())
+ addStore(uri);
+
+ for (auto uri : settings.extraSubstituters.get())
+ addStore(uri);
+
+ state->done = true;
+
+ return state->stores;
+}
+
+
+void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute)
+{
+ PathSet valid = to->queryValidPaths(storePaths, substitute);
+
+ PathSet missing;
+ for (auto & path : storePaths)
+ if (!valid.count(path)) missing.insert(path);
+
+ std::string copiedLabel = "copied";
+
+ logger->setExpected(copiedLabel, missing.size());
+
+ ThreadPool pool;
+
+ processGraph<Path>(pool,
+ PathSet(missing.begin(), missing.end()),
+
+ [&](const Path & storePath) {
+ if (to->isValidPath(storePath)) return PathSet();
+ return from->queryPathInfo(storePath)->references;
+ },
+
+ [&](const Path & storePath) {
+ checkInterrupt();
+
+ if (!to->isValidPath(storePath)) {
+ Activity act(*logger, lvlInfo, format("copying ‘%s’...") % storePath);
+
+ copyStorePath(from, to, storePath);
+
+ logger->incProgress(copiedLabel);
+ } else
+ logger->incExpected(copiedLabel, -1);
+ });
+
+ pool.process();
+}
+
+
+}
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
new file mode 100644
index 000000000..067309c9e
--- /dev/null
+++ b/src/libstore/store-api.hh
@@ -0,0 +1,729 @@
+#pragma once
+
+#include "hash.hh"
+#include "serialise.hh"
+#include "crypto.hh"
+#include "lru-cache.hh"
+#include "sync.hh"
+#include "globals.hh"
+#include "config.hh"
+
+#include <atomic>
+#include <limits>
+#include <map>
+#include <memory>
+#include <string>
+
+
+namespace nix {
+
+
+struct BasicDerivation;
+struct Derivation;
+class FSAccessor;
+class NarInfoDiskCache;
+class Store;
+class JSONPlaceholder;
+
+
+/* Size of the hash part of store paths, in base-32 characters. */
+const size_t storePathHashLen = 32; // i.e. 160 bits
+
+/* Magic header of exportPath() output (obsolete). */
+const uint32_t exportMagic = 0x4558494e;
+
+
+typedef std::map<Path, Path> Roots;
+
+
+struct GCOptions
+{
+ /* Garbage collector operation:
+
+ - `gcReturnLive': return the set of paths reachable from
+ (i.e. in the closure of) the roots.
+
+ - `gcReturnDead': return the set of paths not reachable from
+ the roots.
+
+ - `gcDeleteDead': actually delete the latter set.
+
+ - `gcDeleteSpecific': delete the paths listed in
+ `pathsToDelete', insofar as they are not reachable.
+ */
+ typedef enum {
+ gcReturnLive,
+ gcReturnDead,
+ gcDeleteDead,
+ gcDeleteSpecific,
+ } GCAction;
+
+ GCAction action{gcDeleteDead};
+
+ /* If `ignoreLiveness' is set, then reachability from the roots is
+ ignored (dangerous!). However, the paths must still be
+ unreferenced *within* the store (i.e., there can be no other
+ store paths that depend on them). */
+ bool ignoreLiveness{false};
+
+ /* For `gcDeleteSpecific', the paths to delete. */
+ PathSet pathsToDelete;
+
+ /* Stop after at least `maxFreed' bytes have been freed. */
+ unsigned long long maxFreed{std::numeric_limits<unsigned long long>::max()};
+};
+
+
+struct GCResults
+{
+ /* Depending on the action, the GC roots, or the paths that would
+ be or have been deleted. */
+ PathSet paths;
+
+ /* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
+ number of bytes that would be or was freed. */
+ unsigned long long bytesFreed = 0;
+};
+
+
+struct SubstitutablePathInfo
+{
+ Path deriver;
+ PathSet references;
+ unsigned long long downloadSize; /* 0 = unknown or inapplicable */
+ unsigned long long narSize; /* 0 = unknown */
+};
+
+typedef std::map<Path, SubstitutablePathInfo> SubstitutablePathInfos;
+
+
+struct ValidPathInfo
+{
+ Path path;
+ Path deriver;
+ Hash narHash;
+ PathSet references;
+ time_t registrationTime = 0;
+ uint64_t narSize = 0; // 0 = unknown
+ uint64_t id; // internal use only
+
+ /* Whether the path is ultimately trusted, that is, it was built
+ locally or is content-addressable (e.g. added via addToStore()
+ or the result of a fixed-output derivation). */
+ bool ultimate = false;
+
+ StringSet sigs; // note: not necessarily verified
+
+ /* If non-empty, an assertion that the path is content-addressed,
+ i.e., that the store path is computed from a cryptographic hash
+ of the contents of the path, plus some other bits of data like
+ the "name" part of the path. Such a path doesn't need
+ signatures, since we don't have to trust anybody's claim that
+ the path is the output of a particular derivation. (In the
+ extensional store model, we have to trust that the *contents*
+ of an output path of a derivation were actually produced by
+ that derivation. In the intensional model, we have to trust
+ that a particular output path was produced by a derivation; the
+ path then implies the contents.)
+
+ Ideally, the content-addressability assertion would just be a
+ Boolean, and the store path would be computed from
+ ‘storePathToName(path)’, ‘narHash’ and ‘references’. However,
+ 1) we've accumulated several types of content-addressed paths
+ over the years; and 2) fixed-output derivations support
+ multiple hash algorithms and serialisation methods (flat file
+ vs NAR). Thus, ‘ca’ has one of the following forms:
+
+ * ‘text:sha256:<sha256 hash of file contents>’: For paths
+ computed by makeTextPath() / addTextToStore().
+
+ * ‘fixed:<r?>:<ht>:<h>’: For paths computed by
+ makeFixedOutputPath() / addToStore().
+ */
+ std::string ca;
+
+ bool operator == (const ValidPathInfo & i) const
+ {
+ return
+ path == i.path
+ && narHash == i.narHash
+ && references == i.references;
+ }
+
+ /* Return a fingerprint of the store path to be used in binary
+ cache signatures. It contains the store path, the base-32
+ SHA-256 hash of the NAR serialisation of the path, the size of
+ the NAR, and the sorted references. The size field is strictly
+ speaking superfluous, but might prevent endless/excessive data
+ attacks. */
+ std::string fingerprint() const;
+
+ void sign(const SecretKey & secretKey);
+
+ /* Return true iff the path is verifiably content-addressed. */
+ bool isContentAddressed(const Store & store) const;
+
+ static const size_t maxSigs = std::numeric_limits<size_t>::max();
+
+ /* Return the number of signatures on this .narinfo that were
+ produced by one of the specified keys, or maxSigs if the path
+ is content-addressed. */
+ size_t checkSignatures(const Store & store, const PublicKeys & publicKeys) const;
+
+ /* Verify a single signature. */
+ bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const;
+
+ Strings shortRefs() const;
+
+ virtual ~ValidPathInfo() { }
+};
+
+typedef list<ValidPathInfo> ValidPathInfos;
+
+
+enum BuildMode { bmNormal, bmRepair, bmCheck, bmHash };
+
+
+struct BuildResult
+{
+ /* Note: don't remove status codes, and only add new status codes
+ at the end of the list, to prevent client/server
+ incompatibilities in the nix-store --serve protocol. */
+ enum Status {
+ Built = 0,
+ Substituted,
+ AlreadyValid,
+ PermanentFailure,
+ InputRejected,
+ OutputRejected,
+ TransientFailure, // possibly transient
+ CachedFailure, // no longer used
+ TimedOut,
+ MiscFailure,
+ DependencyFailed,
+ LogLimitExceeded,
+ NotDeterministic,
+ } status = MiscFailure;
+ std::string errorMsg;
+
+ /* How many times this build was performed. */
+ unsigned int timesBuilt = 0;
+
+ /* If timesBuilt > 1, whether some builds did not produce the same
+ result. (Note that 'isNonDeterministic = false' does not mean
+ the build is deterministic, just that we don't have evidence of
+ non-determinism.) */
+ bool isNonDeterministic = false;
+
+ /* The start/stop times of the build (or one of the rounds, if it
+ was repeated). */
+ time_t startTime = 0, stopTime = 0;
+
+ bool success() {
+ return status == Built || status == Substituted || status == AlreadyValid;
+ }
+};
+
+
+class Store : public std::enable_shared_from_this<Store>, public Config
+{
+public:
+
+ typedef std::map<std::string, std::string> Params;
+
+ const PathSetting storeDir_{this, false, settings.nixStore,
+ "store", "path to the Nix store"};
+ const Path storeDir = storeDir_;
+
+ const Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size", "size of the in-memory store path information cache"};
+
+protected:
+
+ struct State
+ {
+ LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache;
+ };
+
+ Sync<State> state;
+
+ std::shared_ptr<NarInfoDiskCache> diskCache;
+
+ Store(const Params & params);
+
+public:
+
+ virtual ~Store() { }
+
+ virtual std::string getUri() = 0;
+
+ /* Return true if ‘path’ is in the Nix store (but not the Nix
+ store itself). */
+ bool isInStore(const Path & path) const;
+
+ /* Return true if ‘path’ is a store path, i.e. a direct child of
+ the Nix store. */
+ bool isStorePath(const Path & path) const;
+
+ /* Throw an exception if ‘path’ is not a store path. */
+ void assertStorePath(const Path & path) const;
+
+ /* Chop off the parts after the top-level store name, e.g.,
+ /nix/store/abcd-foo/bar => /nix/store/abcd-foo. */
+ Path toStorePath(const Path & path) const;
+
+ /* Follow symlinks until we end up with a path in the Nix store. */
+ Path followLinksToStore(const Path & path) const;
+
+ /* Same as followLinksToStore(), but apply toStorePath() to the
+ result. */
+ Path followLinksToStorePath(const Path & path) const;
+
+ /* Constructs a unique store path name. */
+ Path makeStorePath(const string & type,
+ const Hash & hash, const string & name) const;
+
+ Path makeOutputPath(const string & id,
+ const Hash & hash, const string & name) const;
+
+ Path makeFixedOutputPath(bool recursive,
+ const Hash & hash, const string & name) const;
+
+ Path makeTextPath(const string & name, const Hash & hash,
+ const PathSet & references) const;
+
+ /* This is the preparatory part of addToStore(); it computes the
+ store path to which srcPath is to be copied. Returns the store
+ path and the cryptographic hash of the contents of srcPath. */
+ std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath,
+ bool recursive = true, HashType hashAlgo = htSHA256,
+ PathFilter & filter = defaultPathFilter) const;
+
+ /* Preparatory part of addTextToStore().
+
+ !!! Computation of the path should take the references given to
+ addTextToStore() into account, otherwise we have a (relatively
+ minor) security hole: a caller can register a source file with
+ bogus references. If there are too many references, the path may
+ not be garbage collected when it has to be (not really a problem,
+ the caller could create a root anyway), or it may be garbage
+ collected when it shouldn't be (more serious).
+
+ Hashing the references would solve this (bogus references would
+ simply yield a different store path, so other users wouldn't be
+ affected), but it has some backwards compatibility issues (the
+ hashing scheme changes), so I'm not doing that for now. */
+ Path computeStorePathForText(const string & name, const string & s,
+ const PathSet & references) const;
+
+ /* Check whether a path is valid. */
+ bool isValidPath(const Path & path);
+
+protected:
+
+ virtual bool isValidPathUncached(const Path & path);
+
+public:
+
+ /* Query which of the given paths is valid. Optionally, try to
+ substitute missing paths. */
+ virtual PathSet queryValidPaths(const PathSet & paths,
+ bool maybeSubstitute = false);
+
+ /* Query the set of all valid paths. Note that for some store
+ backends, the name part of store paths may be omitted
+ (i.e. you'll get /nix/store/<hash> rather than
+ /nix/store/<hash>-<name>). Use queryPathInfo() to obtain the
+ full store path. */
+ virtual PathSet queryAllValidPaths() = 0;
+
+ /* Query information about a valid path. It is permitted to omit
+ the name part of the store path. */
+ ref<const ValidPathInfo> queryPathInfo(const Path & path);
+
+ /* Asynchronous version of queryPathInfo(). */
+ void queryPathInfo(const Path & path,
+ std::function<void(ref<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure);
+
+protected:
+
+ virtual void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) = 0;
+
+public:
+
+ /* Queries the set of incoming FS references for a store path.
+ The result is not cleared. */
+ virtual void queryReferrers(const Path & path,
+ PathSet & referrers) = 0;
+
+ /* Return all currently valid derivations that have `path' as an
+ output. (Note that the result of `queryDeriver()' is the
+ derivation that was actually used to produce `path', which may
+ not exist anymore.) */
+ virtual PathSet queryValidDerivers(const Path & path) { return {}; };
+
+ /* Query the outputs of the derivation denoted by `path'. */
+ virtual PathSet queryDerivationOutputs(const Path & path) = 0;
+
+ /* Query the output names of the derivation denoted by `path'. */
+ virtual StringSet queryDerivationOutputNames(const Path & path) = 0;
+
+ /* Query the full store path given the hash part of a valid store
+ path, or "" if the path doesn't exist. */
+ virtual Path queryPathFromHashPart(const string & hashPart) = 0;
+
+ /* Query which of the given paths have substitutes. */
+ virtual PathSet querySubstitutablePaths(const PathSet & paths) { return {}; };
+
+ /* Query substitute info (i.e. references, derivers and download
+ sizes) of a set of paths. If a path does not have substitute
+ info, it's omitted from the resulting ‘infos’ map. */
+ virtual void querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos) { return; };
+
+ virtual bool wantMassQuery() { return false; }
+
+ /* Import a path into the store. */
+ virtual void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair = false, bool dontCheckSigs = false,
+ std::shared_ptr<FSAccessor> accessor = 0) = 0;
+
+ /* Copy the contents of a path to the store and register the
+ validity the resulting path. The resulting path is returned.
+ The function object `filter' can be used to exclude files (see
+ libutil/archive.hh). */
+ virtual Path addToStore(const string & name, const Path & srcPath,
+ bool recursive = true, HashType hashAlgo = htSHA256,
+ PathFilter & filter = defaultPathFilter, bool repair = false) = 0;
+
+ /* Like addToStore, but the contents written to the output path is
+ a regular file containing the given string. */
+ virtual Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair = false) = 0;
+
+ /* Write a NAR dump of a store path. */
+ virtual void narFromPath(const Path & path, Sink & sink) = 0;
+
+ /* For each path, if it's a derivation, build it. Building a
+ derivation means ensuring that the output paths are valid. If
+ they are already valid, this is a no-op. Otherwise, validity
+ can be reached in two ways. First, if the output paths is
+ substitutable, then build the path that way. Second, the
+ output paths can be created by running the builder, after
+ recursively building any sub-derivations. For inputs that are
+ not derivations, substitute them. */
+ virtual void buildPaths(const PathSet & paths, BuildMode buildMode = bmNormal) = 0;
+
+ /* Build a single non-materialized derivation (i.e. not from an
+ on-disk .drv file). Note that ‘drvPath’ is only used for
+ informational purposes. */
+ virtual BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode = bmNormal) = 0;
+
+ /* Ensure that a path is valid. If it is not currently valid, it
+ may be made valid by running a substitute (if defined for the
+ path). */
+ virtual void ensurePath(const Path & path) = 0;
+
+ /* Add a store path as a temporary root of the garbage collector.
+ The root disappears as soon as we exit. */
+ virtual void addTempRoot(const Path & path) = 0;
+
+ /* Add an indirect root, which is merely a symlink to `path' from
+ /nix/var/nix/gcroots/auto/<hash of `path'>. `path' is supposed
+ to be a symlink to a store path. The garbage collector will
+ automatically remove the indirect root when it finds that
+ `path' has disappeared. */
+ virtual void addIndirectRoot(const Path & path) = 0;
+
+ /* Acquire the global GC lock, then immediately release it. This
+ function must be called after registering a new permanent root,
+ but before exiting. Otherwise, it is possible that a running
+ garbage collector doesn't see the new root and deletes the
+ stuff we've just built. By acquiring the lock briefly, we
+ ensure that either:
+
+ - The collector is already running, and so we block until the
+ collector is finished. The collector will know about our
+ *temporary* locks, which should include whatever it is we
+ want to register as a permanent lock.
+
+ - The collector isn't running, or it's just started but hasn't
+ acquired the GC lock yet. In that case we get and release
+ the lock right away, then exit. The collector scans the
+ permanent root and sees our's.
+
+ In either case the permanent root is seen by the collector. */
+ virtual void syncWithGC() { };
+
+ /* Find the roots of the garbage collector. Each root is a pair
+ (link, storepath) where `link' is the path of the symlink
+ outside of the Nix store that point to `storePath'. */
+ virtual Roots findRoots() = 0;
+
+ /* Perform a garbage collection. */
+ virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
+
+ /* Return a string representing information about the path that
+ can be loaded into the database using `nix-store --load-db' or
+ `nix-store --register-validity'. */
+ string makeValidityRegistration(const PathSet & paths,
+ bool showDerivers, bool showHash);
+
+ /* Write a JSON representation of store path metadata, such as the
+ hash and the references. If ‘includeImpureInfo’ is true,
+ variable elements such as the registration time are
+ included. If ‘showClosureSize’ is true, the closure size of
+ each path is included. */
+ void pathInfoToJSON(JSONPlaceholder & jsonOut, const PathSet & storePaths,
+ bool includeImpureInfo, bool showClosureSize);
+
+ /* Return the size of the closure of the specified path, that is,
+ the sum of the size of the NAR serialisation of each path in
+ the closure. */
+ unsigned long long getClosureSize(const Path & storePath);
+
+ /* Optimise the disk space usage of the Nix store by hard-linking files
+ with the same contents. */
+ virtual void optimiseStore() { };
+
+ /* Check the integrity of the Nix store. Returns true if errors
+ remain. */
+ virtual bool verifyStore(bool checkContents, bool repair) { return false; };
+
+ /* Return an object to access files in the Nix store. */
+ virtual ref<FSAccessor> getFSAccessor() = 0;
+
+ /* Add signatures to the specified store path. The signatures are
+ not verified. */
+ virtual void addSignatures(const Path & storePath, const StringSet & sigs) = 0;
+
+ /* Utility functions. */
+
+ /* Read a derivation, after ensuring its existence through
+ ensurePath(). */
+ Derivation derivationFromPath(const Path & drvPath);
+
+ /* Place in `out' the set of all store paths in the file system
+ closure of `storePath'; that is, all paths than can be directly
+ or indirectly reached from it. `out' is not cleared. If
+ `flipDirection' is true, the set of paths that can reach
+ `storePath' is returned; that is, the closures under the
+ `referrers' relation instead of the `references' relation is
+ returned. */
+ virtual void computeFSClosure(const PathSet & paths,
+ PathSet & out, bool flipDirection = false,
+ bool includeOutputs = false, bool includeDerivers = false);
+
+ void computeFSClosure(const Path & path,
+ PathSet & out, bool flipDirection = false,
+ bool includeOutputs = false, bool includeDerivers = false);
+
+ /* Given a set of paths that are to be built, return the set of
+ derivations that will be built, and the set of output paths
+ that will be substituted. */
+ virtual void queryMissing(const PathSet & targets,
+ PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+ unsigned long long & downloadSize, unsigned long long & narSize);
+
+ /* Sort a set of paths topologically under the references
+ relation. If p refers to q, then p preceeds q in this list. */
+ Paths topoSortPaths(const PathSet & paths);
+
+ /* Export multiple paths in the format expected by ‘nix-store
+ --import’. */
+ void exportPaths(const Paths & paths, Sink & sink);
+
+ void exportPath(const Path & path, Sink & sink);
+
+ /* Import a sequence of NAR dumps created by exportPaths() into
+ the Nix store. Optionally, the contents of the NARs are
+ preloaded into the specified FS accessor to speed up subsequent
+ access. */
+ Paths importPaths(Source & source, std::shared_ptr<FSAccessor> accessor,
+ bool dontCheckSigs = false);
+
+ struct Stats
+ {
+ std::atomic<uint64_t> narInfoRead{0};
+ std::atomic<uint64_t> narInfoReadAverted{0};
+ std::atomic<uint64_t> narInfoMissing{0};
+ std::atomic<uint64_t> narInfoWrite{0};
+ std::atomic<uint64_t> pathInfoCacheSize{0};
+ std::atomic<uint64_t> narRead{0};
+ std::atomic<uint64_t> narReadBytes{0};
+ std::atomic<uint64_t> narReadCompressedBytes{0};
+ std::atomic<uint64_t> narWrite{0};
+ std::atomic<uint64_t> narWriteAverted{0};
+ std::atomic<uint64_t> narWriteBytes{0};
+ std::atomic<uint64_t> narWriteCompressedBytes{0};
+ std::atomic<uint64_t> narWriteCompressionTimeMs{0};
+ };
+
+ const Stats & getStats();
+
+ /* Whether this store paths from this store can be imported even
+ if they lack a signature. */
+ virtual bool isTrusted() { return false; }
+
+ /* Return the build log of the specified store path, if available,
+ or null otherwise. */
+ virtual std::shared_ptr<std::string> getBuildLog(const Path & path)
+ { return nullptr; }
+
+ /* Hack to allow long-running processes like hydra-queue-runner to
+ occasionally flush their path info cache. */
+ void clearPathInfoCache()
+ {
+ state.lock()->pathInfoCache.clear();
+ }
+
+protected:
+
+ Stats stats;
+
+};
+
+
+class LocalFSStore : public virtual Store
+{
+public:
+
+ // FIXME: the (Store*) cast works around a bug in gcc that causes
+ // it to emit the call to the Option constructor. Clang works fine
+ // either way.
+ const PathSetting rootDir{(Store*) this, true, "",
+ "root", "directory prefixed to all other paths"};
+ const PathSetting stateDir{(Store*) this, false,
+ rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
+ "state", "directory where Nix will store state"};
+ const PathSetting logDir{(Store*) this, false,
+ rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
+ "log", "directory where Nix will store state"};
+
+ const static string drvsLogDir;
+
+ LocalFSStore(const Params & params);
+
+ void narFromPath(const Path & path, Sink & sink) override;
+ ref<FSAccessor> getFSAccessor() override;
+
+ /* Register a permanent GC root. */
+ Path addPermRoot(const Path & storePath,
+ const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false);
+
+ virtual Path getRealStoreDir() { return storeDir; }
+
+ Path toRealPath(const Path & storePath)
+ {
+ return getRealStoreDir() + "/" + baseNameOf(storePath);
+ }
+
+ std::shared_ptr<std::string> getBuildLog(const Path & path) override;
+};
+
+
+/* Extract the name part of the given store path. */
+string storePathToName(const Path & path);
+
+/* Extract the hash part of the given store path. */
+string storePathToHash(const Path & path);
+
+/* Check whether ‘name’ is a valid store path name part, i.e. contains
+ only the characters [a-zA-Z0-9\+\-\.\_\?\=] and doesn't start with
+ a dot. */
+void checkStoreName(const string & name);
+
+
+/* Copy a path from one store to another. */
+void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
+ const Path & storePath, bool repair = false, bool dontCheckSigs = false);
+
+
+/* Copy the closure of the specified paths from one store to another. */
+void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
+ const PathSet & storePaths, bool repair = false, bool dontCheckSigs = false);
+
+
+/* Remove the temporary roots file for this process. Any temporary
+ root becomes garbage after this point unless it has been registered
+ as a (permanent) root. */
+void removeTempRoots();
+
+
+/* Return a Store object to access the Nix store denoted by
+ ‘uri’ (slight misnomer...). Supported values are:
+
+ * ‘direct’: The Nix store in /nix/store and database in
+ /nix/var/nix/db, accessed directly.
+
+ * ‘daemon’: The Nix store accessed via a Unix domain socket
+ connection to nix-daemon.
+
+ * ‘file://<path>’: A binary cache stored in <path>.
+
+ If ‘uri’ is empty, it defaults to ‘direct’ or ‘daemon’ depending on
+ whether the user has write access to the local Nix store/database.
+ set to true *unless* you're going to collect garbage. */
+ref<Store> openStore(const std::string & uri = getEnv("NIX_REMOTE"));
+
+ref<Store> openStore(const std::string & uri, const Store::Params & params);
+
+
+void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute = false);
+
+enum StoreType {
+ tDaemon,
+ tLocal,
+ tOther
+};
+
+
+StoreType getStoreType(const std::string & uri = getEnv("NIX_REMOTE"), const std::string & stateDir = settings.nixStateDir);
+
+/* Return the default substituter stores, defined by the
+ ‘substituters’ option and various legacy options like
+ ‘binary-caches’. */
+std::list<ref<Store>> getDefaultSubstituters();
+
+
+/* Store implementation registration. */
+typedef std::function<std::shared_ptr<Store>(
+ const std::string & uri, const Store::Params & params)> OpenStore;
+
+struct RegisterStoreImplementation
+{
+ typedef std::vector<OpenStore> Implementations;
+ static Implementations * implementations;
+
+ RegisterStoreImplementation(OpenStore fun)
+ {
+ if (!implementations) implementations = new Implementations;
+ implementations->push_back(fun);
+ }
+};
+
+
+
+/* Display a set of paths in human-readable form (i.e., between quotes
+ and separated by commas). */
+string showPaths(const PathSet & paths);
+
+
+ValidPathInfo decodeValidPathInfo(std::istream & str,
+ bool hashGiven = false);
+
+
+/* Compute the content-addressability assertion (ValidPathInfo::ca)
+ for paths created by makeFixedOutputPath() / addToStore(). */
+std::string makeFixedOutputCA(bool recursive, const Hash & hash);
+
+
+MakeError(SubstError, Error)
+MakeError(BuildError, Error) /* denotes a permanent build failure */
+MakeError(InvalidPath, Error)
+
+
+}
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
new file mode 100644
index 000000000..6c6766b36
--- /dev/null
+++ b/src/libstore/worker-protocol.hh
@@ -0,0 +1,66 @@
+#pragma once
+
+namespace nix {
+
+
+#define WORKER_MAGIC_1 0x6e697863
+#define WORKER_MAGIC_2 0x6478696f
+
+#define PROTOCOL_VERSION 0x113
+#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
+#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
+
+
+typedef enum {
+ wopIsValidPath = 1,
+ wopHasSubstitutes = 3,
+ wopQueryPathHash = 4, // obsolete
+ wopQueryReferences = 5, // obsolete
+ wopQueryReferrers = 6,
+ wopAddToStore = 7,
+ wopAddTextToStore = 8,
+ wopBuildPaths = 9,
+ wopEnsurePath = 10,
+ wopAddTempRoot = 11,
+ wopAddIndirectRoot = 12,
+ wopSyncWithGC = 13,
+ wopFindRoots = 14,
+ wopExportPath = 16, // obsolete
+ wopQueryDeriver = 18, // obsolete
+ wopSetOptions = 19,
+ wopCollectGarbage = 20,
+ wopQuerySubstitutablePathInfo = 21,
+ wopQueryDerivationOutputs = 22,
+ wopQueryAllValidPaths = 23,
+ wopQueryFailedPaths = 24,
+ wopClearFailedPaths = 25,
+ wopQueryPathInfo = 26,
+ wopImportPaths = 27, // obsolete
+ wopQueryDerivationOutputNames = 28,
+ wopQueryPathFromHashPart = 29,
+ wopQuerySubstitutablePathInfos = 30,
+ wopQueryValidPaths = 31,
+ wopQuerySubstitutablePaths = 32,
+ wopQueryValidDerivers = 33,
+ wopOptimiseStore = 34,
+ wopVerifyStore = 35,
+ wopBuildDerivation = 36,
+ wopAddSignatures = 37,
+ wopNarFromPath = 38,
+ wopAddToStoreNar = 39,
+ wopQueryMissing = 40,
+} WorkerOp;
+
+
+#define STDERR_NEXT 0x6f6c6d67
+#define STDERR_READ 0x64617461 // data needed from source
+#define STDERR_WRITE 0x64617416 // data for sink
+#define STDERR_LAST 0x616c7473
+#define STDERR_ERROR 0x63787470
+
+
+Path readStorePath(Store & store, Source & from);
+template<class T> T readStorePaths(Store & store, Source & from);
+
+
+}
diff --git a/src/libutil/affinity.cc b/src/libutil/affinity.cc
new file mode 100644
index 000000000..98f8287ad
--- /dev/null
+++ b/src/libutil/affinity.cc
@@ -0,0 +1,55 @@
+#include "types.hh"
+#include "util.hh"
+#include "affinity.hh"
+
+#if __linux__
+#include <sched.h>
+#endif
+
+namespace nix {
+
+
+#if __linux__
+static bool didSaveAffinity = false;
+static cpu_set_t savedAffinity;
+#endif
+
+
+void setAffinityTo(int cpu)
+{
+#if __linux__
+ if (sched_getaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) return;
+ didSaveAffinity = true;
+ debug(format("locking this thread to CPU %1%") % cpu);
+ cpu_set_t newAffinity;
+ CPU_ZERO(&newAffinity);
+ CPU_SET(cpu, &newAffinity);
+ if (sched_setaffinity(0, sizeof(cpu_set_t), &newAffinity) == -1)
+ printError(format("failed to lock thread to CPU %1%") % cpu);
+#endif
+}
+
+
+int lockToCurrentCPU()
+{
+#if __linux__
+ int cpu = sched_getcpu();
+ if (cpu != -1) setAffinityTo(cpu);
+ return cpu;
+#else
+ return -1;
+#endif
+}
+
+
+void restoreAffinity()
+{
+#if __linux__
+ if (!didSaveAffinity) return;
+ if (sched_setaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1)
+ printError("failed to restore affinity %1%");
+#endif
+}
+
+
+}
diff --git a/src/libutil/affinity.hh b/src/libutil/affinity.hh
new file mode 100644
index 000000000..c1bd28e13
--- /dev/null
+++ b/src/libutil/affinity.hh
@@ -0,0 +1,9 @@
+#pragma once
+
+namespace nix {
+
+void setAffinityTo(int cpu);
+int lockToCurrentCPU();
+void restoreAffinity();
+
+}
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
new file mode 100644
index 000000000..e0e6f5dfd
--- /dev/null
+++ b/src/libutil/archive.cc
@@ -0,0 +1,351 @@
+#include <cerrno>
+#include <algorithm>
+#include <vector>
+#include <map>
+
+#include <strings.h> // for strcasecmp
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+
+#include "archive.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+bool useCaseHack =
+#if __APPLE__
+ true;
+#else
+ false;
+#endif
+
+const std::string narVersionMagic1 = "nix-archive-1";
+
+static string caseHackSuffix = "~nix~case~hack~";
+
+PathFilter defaultPathFilter;
+
+
+static void dumpContents(const Path & path, size_t size,
+ Sink & sink)
+{
+ sink << "contents" << size;
+
+ AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
+ if (!fd) throw SysError(format("opening file ‘%1%’") % path);
+
+ unsigned char buf[65536];
+ size_t left = size;
+
+ while (left > 0) {
+ size_t n = left > sizeof(buf) ? sizeof(buf) : left;
+ readFull(fd.get(), buf, n);
+ left -= n;
+ sink(buf, n);
+ }
+
+ writePadding(size, sink);
+}
+
+
+static void dump(const Path & path, Sink & sink, PathFilter & filter)
+{
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ sink << "(";
+
+ if (S_ISREG(st.st_mode)) {
+ sink << "type" << "regular";
+ if (st.st_mode & S_IXUSR)
+ sink << "executable" << "";
+ dumpContents(path, (size_t) st.st_size, sink);
+ }
+
+ else if (S_ISDIR(st.st_mode)) {
+ sink << "type" << "directory";
+
+ /* If we're on a case-insensitive system like Mac OS X, undo
+ the case hack applied by restorePath(). */
+ std::map<string, string> unhacked;
+ for (auto & i : readDirectory(path))
+ if (useCaseHack) {
+ string name(i.name);
+ size_t pos = i.name.find(caseHackSuffix);
+ if (pos != string::npos) {
+ debug(format("removing case hack suffix from ‘%1%’") % (path + "/" + i.name));
+ name.erase(pos);
+ }
+ if (unhacked.find(name) != unhacked.end())
+ throw Error(format("file name collision in between ‘%1%’ and ‘%2%’")
+ % (path + "/" + unhacked[name]) % (path + "/" + i.name));
+ unhacked[name] = i.name;
+ } else
+ unhacked[i.name] = i.name;
+
+ for (auto & i : unhacked)
+ if (filter(path + "/" + i.first)) {
+ sink << "entry" << "(" << "name" << i.first << "node";
+ dump(path + "/" + i.second, sink, filter);
+ sink << ")";
+ }
+ }
+
+ else if (S_ISLNK(st.st_mode))
+ sink << "type" << "symlink" << "target" << readLink(path);
+
+ else throw Error(format("file ‘%1%’ has an unsupported type") % path);
+
+ sink << ")";
+}
+
+
+void dumpPath(const Path & path, Sink & sink, PathFilter & filter)
+{
+ sink << narVersionMagic1;
+ dump(path, sink, filter);
+}
+
+
+void dumpString(const std::string & s, Sink & sink)
+{
+ sink << narVersionMagic1 << "(" << "type" << "regular" << "contents" << s << ")";
+}
+
+
+static SerialisationError badArchive(string s)
+{
+ return SerialisationError("bad archive: " + s);
+}
+
+
+#if 0
+static void skipGeneric(Source & source)
+{
+ if (readString(source) == "(") {
+ while (readString(source) != ")")
+ skipGeneric(source);
+ }
+}
+#endif
+
+
+static void parseContents(ParseSink & sink, Source & source, const Path & path)
+{
+ unsigned long long size = readLongLong(source);
+
+ sink.preallocateContents(size);
+
+ unsigned long long left = size;
+ unsigned char buf[65536];
+
+ while (left) {
+ checkInterrupt();
+ unsigned int n = sizeof(buf);
+ if ((unsigned long long) n > left) n = left;
+ source(buf, n);
+ sink.receiveContents(buf, n);
+ left -= n;
+ }
+
+ readPadding(size, source);
+}
+
+
+struct CaseInsensitiveCompare
+{
+ bool operator() (const string & a, const string & b) const
+ {
+ return strcasecmp(a.c_str(), b.c_str()) < 0;
+ }
+};
+
+
+static void parse(ParseSink & sink, Source & source, const Path & path)
+{
+ string s;
+
+ s = readString(source);
+ if (s != "(") throw badArchive("expected open tag");
+
+ enum { tpUnknown, tpRegular, tpDirectory, tpSymlink } type = tpUnknown;
+
+ std::map<Path, int, CaseInsensitiveCompare> names;
+
+ while (1) {
+ checkInterrupt();
+
+ s = readString(source);
+
+ if (s == ")") {
+ break;
+ }
+
+ else if (s == "type") {
+ if (type != tpUnknown)
+ throw badArchive("multiple type fields");
+ string t = readString(source);
+
+ if (t == "regular") {
+ type = tpRegular;
+ sink.createRegularFile(path);
+ }
+
+ else if (t == "directory") {
+ sink.createDirectory(path);
+ type = tpDirectory;
+ }
+
+ else if (t == "symlink") {
+ type = tpSymlink;
+ }
+
+ else throw badArchive("unknown file type " + t);
+
+ }
+
+ else if (s == "contents" && type == tpRegular) {
+ parseContents(sink, source, path);
+ }
+
+ else if (s == "executable" && type == tpRegular) {
+ auto s = readString(source);
+ if (s != "") throw badArchive("executable marker has non-empty value");
+ sink.isExecutable();
+ }
+
+ else if (s == "entry" && type == tpDirectory) {
+ string name, prevName;
+
+ s = readString(source);
+ if (s != "(") throw badArchive("expected open tag");
+
+ while (1) {
+ checkInterrupt();
+
+ s = readString(source);
+
+ if (s == ")") {
+ break;
+ } else if (s == "name") {
+ name = readString(source);
+ if (name.empty() || name == "." || name == ".." || name.find('/') != string::npos || name.find((char) 0) != string::npos)
+ throw Error(format("NAR contains invalid file name ‘%1%’") % name);
+ if (name <= prevName)
+ throw Error("NAR directory is not sorted");
+ prevName = name;
+ if (useCaseHack) {
+ auto i = names.find(name);
+ if (i != names.end()) {
+ debug(format("case collision between ‘%1%’ and ‘%2%’") % i->first % name);
+ name += caseHackSuffix;
+ name += std::to_string(++i->second);
+ } else
+ names[name] = 0;
+ }
+ } else if (s == "node") {
+ if (s.empty()) throw badArchive("entry name missing");
+ parse(sink, source, path + "/" + name);
+ } else
+ throw badArchive("unknown field " + s);
+ }
+ }
+
+ else if (s == "target" && type == tpSymlink) {
+ string target = readString(source);
+ sink.createSymlink(path, target);
+ }
+
+ else
+ throw badArchive("unknown field " + s);
+ }
+}
+
+
+void parseDump(ParseSink & sink, Source & source)
+{
+ string version;
+ try {
+ version = readString(source);
+ } catch (SerialisationError & e) {
+ /* This generally means the integer at the start couldn't be
+ decoded. Ignore and throw the exception below. */
+ }
+ if (version != narVersionMagic1)
+ throw badArchive("input doesn't look like a Nix archive");
+ parse(sink, source, "");
+}
+
+
+struct RestoreSink : ParseSink
+{
+ Path dstPath;
+ AutoCloseFD fd;
+
+ void createDirectory(const Path & path)
+ {
+ Path p = dstPath + path;
+ if (mkdir(p.c_str(), 0777) == -1)
+ throw SysError(format("creating directory ‘%1%’") % p);
+ };
+
+ void createRegularFile(const Path & path)
+ {
+ Path p = dstPath + path;
+ fd = open(p.c_str(), O_CREAT | O_EXCL | O_WRONLY | O_CLOEXEC, 0666);
+ if (!fd) throw SysError(format("creating file ‘%1%’") % p);
+ }
+
+ void isExecutable()
+ {
+ struct stat st;
+ if (fstat(fd.get(), &st) == -1)
+ throw SysError("fstat");
+ if (fchmod(fd.get(), st.st_mode | (S_IXUSR | S_IXGRP | S_IXOTH)) == -1)
+ throw SysError("fchmod");
+ }
+
+ void preallocateContents(unsigned long long len)
+ {
+#if HAVE_POSIX_FALLOCATE
+ if (len) {
+ errno = posix_fallocate(fd.get(), 0, len);
+ /* Note that EINVAL may indicate that the underlying
+ filesystem doesn't support preallocation (e.g. on
+ OpenSolaris). Since preallocation is just an
+ optimisation, ignore it. */
+ if (errno && errno != EINVAL)
+ throw SysError(format("preallocating file of %1% bytes") % len);
+ }
+#endif
+ }
+
+ void receiveContents(unsigned char * data, unsigned int len)
+ {
+ writeFull(fd.get(), data, len);
+ }
+
+ void createSymlink(const Path & path, const string & target)
+ {
+ Path p = dstPath + path;
+ nix::createSymlink(target, p);
+ }
+};
+
+
+void restorePath(const Path & path, Source & source)
+{
+ RestoreSink sink;
+ sink.dstPath = path;
+ parseDump(sink, source);
+}
+
+
+}
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
new file mode 100644
index 000000000..607ebf8b2
--- /dev/null
+++ b/src/libutil/archive.hh
@@ -0,0 +1,92 @@
+#pragma once
+
+#include "types.hh"
+#include "serialise.hh"
+
+
+namespace nix {
+
+
+/* dumpPath creates a Nix archive of the specified path. The format
+ is as follows:
+
+ IF path points to a REGULAR FILE:
+ dump(path) = attrs(
+ [ ("type", "regular")
+ , ("contents", contents(path))
+ ])
+
+ IF path points to a DIRECTORY:
+ dump(path) = attrs(
+ [ ("type", "directory")
+ , ("entries", concat(map(f, sort(entries(path)))))
+ ])
+ where f(fn) = attrs(
+ [ ("name", fn)
+ , ("file", dump(path + "/" + fn))
+ ])
+
+ where:
+
+ attrs(as) = concat(map(attr, as)) + encN(0)
+ attrs((a, b)) = encS(a) + encS(b)
+
+ encS(s) = encN(len(s)) + s + (padding until next 64-bit boundary)
+
+ encN(n) = 64-bit little-endian encoding of n.
+
+ contents(path) = the contents of a regular file.
+
+ sort(strings) = lexicographic sort by 8-bit value (strcmp).
+
+ entries(path) = the entries of a directory, without `.' and
+ `..'.
+
+ `+' denotes string concatenation. */
+
+struct PathFilter
+{
+ virtual ~PathFilter() { }
+ virtual bool operator () (const Path & path) { return true; }
+};
+
+extern PathFilter defaultPathFilter;
+
+void dumpPath(const Path & path, Sink & sink,
+ PathFilter & filter = defaultPathFilter);
+
+void dumpString(const std::string & s, Sink & sink);
+
+/* FIXME: fix this API, it sucks. */
+struct ParseSink
+{
+ virtual void createDirectory(const Path & path) { };
+
+ virtual void createRegularFile(const Path & path) { };
+ virtual void isExecutable() { };
+ virtual void preallocateContents(unsigned long long size) { };
+ virtual void receiveContents(unsigned char * data, unsigned int len) { };
+
+ virtual void createSymlink(const Path & path, const string & target) { };
+};
+
+struct TeeSink : ParseSink
+{
+ TeeSource source;
+
+ TeeSink(Source & source) : source(source) { }
+};
+
+void parseDump(ParseSink & sink, Source & source);
+
+void restorePath(const Path & path, Source & source);
+
+
+// FIXME: global variables are bad m'kay.
+extern bool useCaseHack;
+
+
+extern const std::string narVersionMagic1;
+
+
+}
diff --git a/src/libutil/args.cc b/src/libutil/args.cc
new file mode 100644
index 000000000..115484f9e
--- /dev/null
+++ b/src/libutil/args.cc
@@ -0,0 +1,180 @@
+#include "args.hh"
+#include "hash.hh"
+
+namespace nix {
+
+void Args::parseCmdline(const Strings & _cmdline)
+{
+ Strings pendingArgs;
+ bool dashDash = false;
+
+ Strings cmdline(_cmdline);
+
+ for (auto pos = cmdline.begin(); pos != cmdline.end(); ) {
+
+ auto arg = *pos;
+
+ /* Expand compound dash options (i.e., `-qlf' -> `-q -l -f',
+ `-j3` -> `-j 3`). */
+ if (!dashDash && arg.length() > 2 && arg[0] == '-' && arg[1] != '-' && isalpha(arg[1])) {
+ *pos = (string) "-" + arg[1];
+ auto next = pos; ++next;
+ for (unsigned int j = 2; j < arg.length(); j++)
+ if (isalpha(arg[j]))
+ cmdline.insert(next, (string) "-" + arg[j]);
+ else {
+ cmdline.insert(next, string(arg, j));
+ break;
+ }
+ arg = *pos;
+ }
+
+ if (!dashDash && arg == "--") {
+ dashDash = true;
+ ++pos;
+ }
+ else if (!dashDash && std::string(arg, 0, 1) == "-") {
+ if (!processFlag(pos, cmdline.end()))
+ throw UsageError(format("unrecognised flag ‘%1%’") % arg);
+ }
+ else {
+ pendingArgs.push_back(*pos++);
+ if (processArgs(pendingArgs, false))
+ pendingArgs.clear();
+ }
+ }
+
+ processArgs(pendingArgs, true);
+}
+
+void Args::printHelp(const string & programName, std::ostream & out)
+{
+ std::cout << "Usage: " << programName << " <FLAGS>...";
+ for (auto & exp : expectedArgs) {
+ std::cout << renderLabels({exp.label});
+ // FIXME: handle arity > 1
+ if (exp.arity == 0) std::cout << "...";
+ }
+ std::cout << "\n";
+
+ auto s = description();
+ if (s != "")
+ std::cout << "\nSummary: " << s << ".\n";
+
+ if (longFlags.size()) {
+ std::cout << "\n";
+ std::cout << "Flags:\n";
+ printFlags(out);
+ }
+}
+
+void Args::printFlags(std::ostream & out)
+{
+ Table2 table;
+ for (auto & flag : longFlags)
+ table.push_back(std::make_pair(
+ (flag.second.shortName ? std::string("-") + flag.second.shortName + ", " : " ")
+ + "--" + flag.first + renderLabels(flag.second.labels),
+ flag.second.description));
+ printTable(out, table);
+}
+
+bool Args::processFlag(Strings::iterator & pos, Strings::iterator end)
+{
+ assert(pos != end);
+
+ auto process = [&](const std::string & name, const Flag & flag) -> bool {
+ ++pos;
+ Strings args;
+ for (size_t n = 0 ; n < flag.arity; ++n) {
+ if (pos == end)
+ throw UsageError(format("flag ‘%1%’ requires %2% argument(s)")
+ % name % flag.arity);
+ args.push_back(*pos++);
+ }
+ flag.handler(args);
+ return true;
+ };
+
+ if (string(*pos, 0, 2) == "--") {
+ auto i = longFlags.find(string(*pos, 2));
+ if (i == longFlags.end()) return false;
+ return process("--" + i->first, i->second);
+ }
+
+ if (string(*pos, 0, 1) == "-" && pos->size() == 2) {
+ auto c = (*pos)[1];
+ auto i = shortFlags.find(c);
+ if (i == shortFlags.end()) return false;
+ return process(std::string("-") + c, i->second);
+ }
+
+ return false;
+}
+
+bool Args::processArgs(const Strings & args, bool finish)
+{
+ if (expectedArgs.empty()) {
+ if (!args.empty())
+ throw UsageError(format("unexpected argument ‘%1%’") % args.front());
+ return true;
+ }
+
+ auto & exp = expectedArgs.front();
+
+ bool res = false;
+
+ if ((exp.arity == 0 && finish) ||
+ (exp.arity > 0 && args.size() == exp.arity))
+ {
+ exp.handler(args);
+ expectedArgs.pop_front();
+ res = true;
+ }
+
+ if (finish && !expectedArgs.empty())
+ throw UsageError("more arguments are required");
+
+ return res;
+}
+
+void Args::mkHashTypeFlag(const std::string & name, HashType * ht)
+{
+ mkFlag1(0, name, "TYPE", "hash algorithm (‘md5’, ‘sha1’, ‘sha256’, or ‘sha512’)", [=](std::string s) {
+ *ht = parseHashType(s);
+ if (*ht == htUnknown)
+ throw UsageError(format("unknown hash type ‘%1%’") % s);
+ });
+}
+
+Strings argvToStrings(int argc, char * * argv)
+{
+ Strings args;
+ argc--; argv++;
+ while (argc--) args.push_back(*argv++);
+ return args;
+}
+
+std::string renderLabels(const Strings & labels)
+{
+ std::string res;
+ for (auto label : labels) {
+ for (auto & c : label) c = std::toupper(c);
+ res += " <" + label + ">";
+ }
+ return res;
+}
+
+void printTable(std::ostream & out, const Table2 & table)
+{
+ size_t max = 0;
+ for (auto & row : table)
+ max = std::max(max, row.first.size());
+ for (auto & row : table) {
+ out << " " << row.first
+ << std::string(max - row.first.size() + 2, ' ')
+ << row.second << "\n";
+ }
+}
+
+}
diff --git a/src/libutil/args.hh b/src/libutil/args.hh
new file mode 100644
index 000000000..ac12f8be6
--- /dev/null
+++ b/src/libutil/args.hh
@@ -0,0 +1,163 @@
+#pragma once
+
+#include <iostream>
+#include <map>
+#include <memory>
+
+#include "util.hh"
+
+namespace nix {
+
+MakeError(UsageError, Error);
+
+enum HashType : char;
+
+class Args
+{
+public:
+
+ /* Parse the command line, throwing a UsageError if something goes
+ wrong. */
+ void parseCmdline(const Strings & cmdline);
+
+ virtual void printHelp(const string & programName, std::ostream & out);
+
+ virtual std::string description() { return ""; }
+
+protected:
+
+ /* Flags. */
+ struct Flag
+ {
+ char shortName;
+ std::string description;
+ Strings labels;
+ size_t arity;
+ std::function<void(Strings)> handler;
+ };
+
+ std::map<std::string, Flag> longFlags;
+ std::map<char, Flag> shortFlags;
+
+ virtual bool processFlag(Strings::iterator & pos, Strings::iterator end);
+
+ void printFlags(std::ostream & out);
+
+ /* Positional arguments. */
+ struct ExpectedArg
+ {
+ std::string label;
+ size_t arity; // 0 = any
+ std::function<void(Strings)> handler;
+ };
+
+ std::list<ExpectedArg> expectedArgs;
+
+ virtual bool processArgs(const Strings & args, bool finish);
+
+public:
+
+ /* Helper functions for constructing flags / positional
+ arguments. */
+
+ void mkFlag(char shortName, const std::string & longName,
+ const Strings & labels, const std::string & description,
+ size_t arity, std::function<void(Strings)> handler)
+ {
+ auto flag = Flag{shortName, description, labels, arity, handler};
+ if (shortName) shortFlags[shortName] = flag;
+ longFlags[longName] = flag;
+ }
+
+ void mkFlag(char shortName, const std::string & longName,
+ const std::string & description, std::function<void()> fun)
+ {
+ mkFlag(shortName, longName, {}, description, 0, std::bind(fun));
+ }
+
+ void mkFlag1(char shortName, const std::string & longName,
+ const std::string & label, const std::string & description,
+ std::function<void(std::string)> fun)
+ {
+ mkFlag(shortName, longName, {label}, description, 1, [=](Strings ss) {
+ fun(ss.front());
+ });
+ }
+
+ void mkFlag(char shortName, const std::string & name,
+ const std::string & description, bool * dest)
+ {
+ mkFlag(shortName, name, description, dest, true);
+ }
+
+ void mkFlag(char shortName, const std::string & longName,
+ const std::string & label, const std::string & description,
+ string * dest)
+ {
+ mkFlag1(shortName, longName, label, description, [=](std::string s) {
+ *dest = s;
+ });
+ }
+
+ void mkHashTypeFlag(const std::string & name, HashType * ht);
+
+ template<class T>
+ void mkFlag(char shortName, const std::string & longName, const std::string & description,
+ T * dest, const T & value)
+ {
+ mkFlag(shortName, longName, {}, description, 0, [=](Strings ss) {
+ *dest = value;
+ });
+ }
+
+ template<class I>
+ void mkIntFlag(char shortName, const std::string & longName,
+ const std::string & description, I * dest)
+ {
+ mkFlag<I>(shortName, longName, description, [=](I n) {
+ *dest = n;
+ });
+ }
+
+ template<class I>
+ void mkFlag(char shortName, const std::string & longName,
+ const std::string & description, std::function<void(I)> fun)
+ {
+ mkFlag(shortName, longName, {"N"}, description, 1, [=](Strings ss) {
+ I n;
+ if (!string2Int(ss.front(), n))
+ throw UsageError(format("flag ‘--%1%’ requires a integer argument") % longName);
+ fun(n);
+ });
+ }
+
+ /* Expect a string argument. */
+ void expectArg(const std::string & label, string * dest)
+ {
+ expectedArgs.push_back(ExpectedArg{label, 1, [=](Strings ss) {
+ *dest = ss.front();
+ }});
+ }
+
+ /* Expect 0 or more arguments. */
+ void expectArgs(const std::string & label, Strings * dest)
+ {
+ expectedArgs.push_back(ExpectedArg{label, 0, [=](Strings ss) {
+ *dest = ss;
+ }});
+ }
+
+ friend class MultiCommand;
+};
+
+Strings argvToStrings(int argc, char * * argv);
+
+/* Helper function for rendering argument labels. */
+std::string renderLabels(const Strings & labels);
+
+/* Helper function for printing 2-column tables. */
+typedef std::vector<std::pair<std::string, std::string>> Table2;
+
+void printTable(std::ostream & out, const Table2 & table);
+
+}
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
new file mode 100644
index 000000000..b0b1d709f
--- /dev/null
+++ b/src/libutil/compression.cc
@@ -0,0 +1,315 @@
+#include "compression.hh"
+#include "util.hh"
+#include "finally.hh"
+
+#include <lzma.h>
+#include <bzlib.h>
+#include <cstdio>
+#include <cstring>
+
+#include <iostream>
+
+namespace nix {
+
+static ref<std::string> decompressXZ(const std::string & in)
+{
+ lzma_stream strm(LZMA_STREAM_INIT);
+
+ lzma_ret ret = lzma_stream_decoder(
+ &strm, UINT64_MAX, LZMA_CONCATENATED);
+ if (ret != LZMA_OK)
+ throw CompressionError("unable to initialise lzma decoder");
+
+ Finally free([&]() { lzma_end(&strm); });
+
+ lzma_action action = LZMA_RUN;
+ uint8_t outbuf[BUFSIZ];
+ ref<std::string> res = make_ref<std::string>();
+ strm.next_in = (uint8_t *) in.c_str();
+ strm.avail_in = in.size();
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+
+ while (true) {
+ checkInterrupt();
+
+ if (strm.avail_in == 0)
+ action = LZMA_FINISH;
+
+ lzma_ret ret = lzma_code(&strm, action);
+
+ if (strm.avail_out == 0 || ret == LZMA_STREAM_END) {
+ res->append((char *) outbuf, sizeof(outbuf) - strm.avail_out);
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+ }
+
+ if (ret == LZMA_STREAM_END)
+ return res;
+
+ if (ret != LZMA_OK)
+ throw CompressionError("error %d while decompressing xz file", ret);
+ }
+}
+
+static ref<std::string> decompressBzip2(const std::string & in)
+{
+ bz_stream strm;
+ memset(&strm, 0, sizeof(strm));
+
+ int ret = BZ2_bzDecompressInit(&strm, 0, 0);
+ if (ret != BZ_OK)
+ throw CompressionError("unable to initialise bzip2 decoder");
+
+ Finally free([&]() { BZ2_bzDecompressEnd(&strm); });
+
+ char outbuf[BUFSIZ];
+ ref<std::string> res = make_ref<std::string>();
+ strm.next_in = (char *) in.c_str();
+ strm.avail_in = in.size();
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+
+ while (true) {
+ checkInterrupt();
+
+ int ret = BZ2_bzDecompress(&strm);
+
+ if (strm.avail_out == 0 || ret == BZ_STREAM_END) {
+ res->append(outbuf, sizeof(outbuf) - strm.avail_out);
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+ }
+
+ if (ret == BZ_STREAM_END)
+ return res;
+
+ if (ret != BZ_OK)
+ throw CompressionError("error while decompressing bzip2 file");
+
+ if (strm.avail_in == 0)
+ throw CompressionError("bzip2 data ends prematurely");
+ }
+}
+
+static ref<std::string> decompressBrotli(const std::string & in)
+{
+ // FIXME: use libbrotli
+ return make_ref<std::string>(runProgram(BRO, true, {"-d"}, {in}));
+}
+
+ref<std::string> compress(const std::string & method, const std::string & in)
+{
+ StringSink ssink;
+ auto sink = makeCompressionSink(method, ssink);
+ (*sink)(in);
+ sink->finish();
+ return ssink.s;
+}
+
+ref<std::string> decompress(const std::string & method, const std::string & in)
+{
+ if (method == "none")
+ return make_ref<std::string>(in);
+ else if (method == "xz")
+ return decompressXZ(in);
+ else if (method == "bzip2")
+ return decompressBzip2(in);
+ else if (method == "br")
+ return decompressBrotli(in);
+ else
+ throw UnknownCompressionMethod(format("unknown compression method ‘%s’") % method);
+}
+
+struct NoneSink : CompressionSink
+{
+ Sink & nextSink;
+ NoneSink(Sink & nextSink) : nextSink(nextSink) { }
+ void finish() override { flush(); }
+ void write(const unsigned char * data, size_t len) override { nextSink(data, len); }
+};
+
+struct XzSink : CompressionSink
+{
+ Sink & nextSink;
+ uint8_t outbuf[BUFSIZ];
+ lzma_stream strm = LZMA_STREAM_INIT;
+ bool finished = false;
+
+ XzSink(Sink & nextSink) : nextSink(nextSink)
+ {
+ lzma_ret ret = lzma_easy_encoder(
+ &strm, 6, LZMA_CHECK_CRC64);
+ if (ret != LZMA_OK)
+ throw CompressionError("unable to initialise lzma encoder");
+ // FIXME: apply the x86 BCJ filter?
+
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+ }
+
+ ~XzSink()
+ {
+ lzma_end(&strm);
+ }
+
+ void finish() override
+ {
+ CompressionSink::flush();
+
+ assert(!finished);
+ finished = true;
+
+ while (true) {
+ checkInterrupt();
+
+ lzma_ret ret = lzma_code(&strm, LZMA_FINISH);
+ if (ret != LZMA_OK && ret != LZMA_STREAM_END)
+ throw CompressionError("error while flushing xz file");
+
+ if (strm.avail_out == 0 || ret == LZMA_STREAM_END) {
+ nextSink(outbuf, sizeof(outbuf) - strm.avail_out);
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+ }
+
+ if (ret == LZMA_STREAM_END) break;
+ }
+ }
+
+ void write(const unsigned char * data, size_t len) override
+ {
+ assert(!finished);
+
+ strm.next_in = data;
+ strm.avail_in = len;
+
+ while (strm.avail_in) {
+ checkInterrupt();
+
+ lzma_ret ret = lzma_code(&strm, LZMA_RUN);
+ if (ret != LZMA_OK)
+ throw CompressionError("error while compressing xz file");
+
+ if (strm.avail_out == 0) {
+ nextSink(outbuf, sizeof(outbuf));
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+ }
+ }
+ }
+};
+
+struct BzipSink : CompressionSink
+{
+ Sink & nextSink;
+ char outbuf[BUFSIZ];
+ bz_stream strm;
+ bool finished = false;
+
+ BzipSink(Sink & nextSink) : nextSink(nextSink)
+ {
+ memset(&strm, 0, sizeof(strm));
+ int ret = BZ2_bzCompressInit(&strm, 9, 0, 30);
+ if (ret != BZ_OK)
+ throw CompressionError("unable to initialise bzip2 encoder");
+
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+ }
+
+ ~BzipSink()
+ {
+ BZ2_bzCompressEnd(&strm);
+ }
+
+ void finish() override
+ {
+ flush();
+
+ assert(!finished);
+ finished = true;
+
+ while (true) {
+ checkInterrupt();
+
+ int ret = BZ2_bzCompress(&strm, BZ_FINISH);
+ if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END)
+ throw CompressionError("error while flushing bzip2 file");
+
+ if (strm.avail_out == 0 || ret == BZ_STREAM_END) {
+ nextSink((unsigned char *) outbuf, sizeof(outbuf) - strm.avail_out);
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+ }
+
+ if (ret == BZ_STREAM_END) break;
+ }
+ }
+
+ void write(const unsigned char * data, size_t len) override
+ {
+ assert(!finished);
+
+ strm.next_in = (char *) data;
+ strm.avail_in = len;
+
+ while (strm.avail_in) {
+ checkInterrupt();
+
+ int ret = BZ2_bzCompress(&strm, BZ_RUN);
+ if (ret != BZ_OK)
+ CompressionError("error while compressing bzip2 file");
+
+ if (strm.avail_out == 0) {
+ nextSink((unsigned char *) outbuf, sizeof(outbuf));
+ strm.next_out = outbuf;
+ strm.avail_out = sizeof(outbuf);
+ }
+ }
+ }
+};
+
+struct BrotliSink : CompressionSink
+{
+ Sink & nextSink;
+ std::string data;
+
+ BrotliSink(Sink & nextSink) : nextSink(nextSink)
+ {
+ }
+
+ ~BrotliSink()
+ {
+ }
+
+ // FIXME: use libbrotli
+
+ void finish() override
+ {
+ flush();
+ nextSink(runProgram(BRO, true, {}, data));
+ }
+
+ void write(const unsigned char * data, size_t len) override
+ {
+ checkInterrupt();
+ this->data.append((const char *) data, len);
+ }
+};
+
+ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink)
+{
+ if (method == "none")
+ return make_ref<NoneSink>(nextSink);
+ else if (method == "xz")
+ return make_ref<XzSink>(nextSink);
+ else if (method == "bzip2")
+ return make_ref<BzipSink>(nextSink);
+ else if (method == "br")
+ return make_ref<BrotliSink>(nextSink);
+ else
+ throw UnknownCompressionMethod(format("unknown compression method ‘%s’") % method);
+}
+
+}
diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh
new file mode 100644
index 000000000..e3e6f5a99
--- /dev/null
+++ b/src/libutil/compression.hh
@@ -0,0 +1,26 @@
+#pragma once
+
+#include "ref.hh"
+#include "types.hh"
+#include "serialise.hh"
+
+#include <string>
+
+namespace nix {
+
+ref<std::string> compress(const std::string & method, const std::string & in);
+
+ref<std::string> decompress(const std::string & method, const std::string & in);
+
+struct CompressionSink : BufferedSink
+{
+ virtual void finish() = 0;
+};
+
+ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink);
+
+MakeError(UnknownCompressionMethod, Error);
+
+MakeError(CompressionError, Error);
+
+}
diff --git a/src/libutil/config.cc b/src/libutil/config.cc
new file mode 100644
index 000000000..62c6433c7
--- /dev/null
+++ b/src/libutil/config.cc
@@ -0,0 +1,230 @@
+#include "config.hh"
+#include "args.hh"
+#include "json.hh"
+
+namespace nix {
+
+void Config::set(const std::string & name, const std::string & value)
+{
+ auto i = _settings.find(name);
+ if (i == _settings.end())
+ throw UsageError("unknown setting '%s'", name);
+ i->second.setting->set(value);
+ i->second.setting->overriden = true;
+}
+
+void Config::addSetting(AbstractSetting * setting)
+{
+ _settings.emplace(setting->name, Config::SettingData{false, setting});
+ for (auto & alias : setting->aliases)
+ _settings.emplace(alias, Config::SettingData{true, setting});
+
+ bool set = false;
+
+ auto i = initials.find(setting->name);
+ if (i != initials.end()) {
+ setting->set(i->second);
+ setting->overriden = true;
+ initials.erase(i);
+ set = true;
+ }
+
+ for (auto & alias : setting->aliases) {
+ auto i = initials.find(alias);
+ if (i != initials.end()) {
+ if (set)
+ warn("setting '%s' is set, but it's an alias of '%s' which is also set",
+ alias, setting->name);
+ else {
+ setting->set(i->second);
+ setting->overriden = true;
+ initials.erase(i);
+ set = true;
+ }
+ }
+ }
+}
+
+void Config::warnUnknownSettings()
+{
+ for (auto & i : initials)
+ warn("unknown setting '%s'", i.first);
+}
+
+StringMap Config::getSettings(bool overridenOnly)
+{
+ StringMap res;
+ for (auto & opt : _settings)
+ if (!opt.second.isAlias && (!overridenOnly || opt.second.setting->overriden))
+ res.emplace(opt.first, opt.second.setting->to_string());
+ return res;
+}
+
+void Config::applyConfigFile(const Path & path, bool fatal)
+{
+ try {
+ string contents = readFile(path);
+
+ unsigned int pos = 0;
+
+ while (pos < contents.size()) {
+ string line;
+ while (pos < contents.size() && contents[pos] != '\n')
+ line += contents[pos++];
+ pos++;
+
+ string::size_type hash = line.find('#');
+ if (hash != string::npos)
+ line = string(line, 0, hash);
+
+ vector<string> tokens = tokenizeString<vector<string> >(line);
+ if (tokens.empty()) continue;
+
+ if (tokens.size() < 2 || tokens[1] != "=")
+ throw UsageError("illegal configuration line ‘%1%’ in ‘%2%’", line, path);
+
+ string name = tokens[0];
+
+ vector<string>::iterator i = tokens.begin();
+ advance(i, 2);
+
+ try {
+ set(name, concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow
+ } catch (UsageError & e) {
+ if (fatal) throw;
+ warn("in configuration file '%s': %s", path, e.what());
+ }
+ };
+ } catch (SysError &) { }
+}
+
+void Config::resetOverriden()
+{
+ for (auto & s : _settings)
+ s.second.setting->overriden = false;
+}
+
+void Config::toJSON(JSONObject & out)
+{
+ for (auto & s : _settings)
+ if (!s.second.isAlias) {
+ JSONObject out2(out.object(s.first));
+ out2.attr("description", s.second.setting->description);
+ JSONPlaceholder out3(out2.placeholder("value"));
+ s.second.setting->toJSON(out3);
+ }
+}
+
+AbstractSetting::AbstractSetting(
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases)
+ : name(name), description(description), aliases(aliases)
+{
+}
+
+void AbstractSetting::toJSON(JSONPlaceholder & out)
+{
+ out.write(to_string());
+}
+
+template<typename T>
+void BaseSetting<T>::toJSON(JSONPlaceholder & out)
+{
+ out.write(value);
+}
+
+template<> void BaseSetting<std::string>::set(const std::string & str)
+{
+ value = str;
+}
+
+template<> std::string BaseSetting<std::string>::to_string()
+{
+ return value;
+}
+
+template<typename T>
+void BaseSetting<T>::set(const std::string & str)
+{
+ static_assert(std::is_integral<T>::value, "Integer required.");
+ if (!string2Int(str, value))
+ throw UsageError("setting '%s' has invalid value '%s'", name, str);
+}
+
+template<typename T>
+std::string BaseSetting<T>::to_string()
+{
+ static_assert(std::is_integral<T>::value, "Integer required.");
+ return std::to_string(value);
+}
+
+template<> void BaseSetting<bool>::set(const std::string & str)
+{
+ if (str == "true" || str == "yes" || str == "1")
+ value = true;
+ else if (str == "false" || str == "no" || str == "0")
+ value = false;
+ else
+ throw UsageError("Boolean setting '%s' has invalid value '%s'", name, str);
+}
+
+template<> std::string BaseSetting<bool>::to_string()
+{
+ return value ? "true" : "false";
+}
+
+template<> void BaseSetting<Strings>::set(const std::string & str)
+{
+ value = tokenizeString<Strings>(str);
+}
+
+template<> std::string BaseSetting<Strings>::to_string()
+{
+ return concatStringsSep(" ", value);
+}
+
+template<> void BaseSetting<Strings>::toJSON(JSONPlaceholder & out)
+{
+ JSONList list(out.list());
+ for (auto & s : value)
+ list.elem(s);
+}
+
+template<> void BaseSetting<StringSet>::set(const std::string & str)
+{
+ value = tokenizeString<StringSet>(str);
+}
+
+template<> std::string BaseSetting<StringSet>::to_string()
+{
+ return concatStringsSep(" ", value);
+}
+
+template<> void BaseSetting<StringSet>::toJSON(JSONPlaceholder & out)
+{
+ JSONList list(out.list());
+ for (auto & s : value)
+ list.elem(s);
+}
+
+template class BaseSetting<int>;
+template class BaseSetting<unsigned int>;
+template class BaseSetting<long>;
+template class BaseSetting<unsigned long>;
+template class BaseSetting<long long>;
+template class BaseSetting<unsigned long long>;
+template class BaseSetting<bool>;
+
+void PathSetting::set(const std::string & str)
+{
+ if (str == "") {
+ if (allowEmpty)
+ value = "";
+ else
+ throw UsageError("setting '%s' cannot be empty", name);
+ } else
+ value = canonPath(str);
+}
+
+}
diff --git a/src/libutil/config.hh b/src/libutil/config.hh
new file mode 100644
index 000000000..919621091
--- /dev/null
+++ b/src/libutil/config.hh
@@ -0,0 +1,189 @@
+#include <map>
+#include <set>
+
+#include "types.hh"
+
+#pragma once
+
+namespace nix {
+
+class Args;
+class AbstractSetting;
+class JSONPlaceholder;
+class JSONObject;
+
+/* A class to simplify providing configuration settings. The typical
+ use is to inherit Config and add Setting<T> members:
+
+ class MyClass : private Config
+ {
+ Setting<int> foo{this, 123, "foo", "the number of foos to use"};
+ Setting<std::string> bar{this, "blabla", "bar", "the name of the bar"};
+
+ MyClass() : Config(readConfigFile("/etc/my-app.conf"))
+ {
+ std::cout << foo << "\n"; // will print 123 unless overriden
+ }
+ };
+*/
+
+class Config
+{
+ friend class AbstractSetting;
+
+ struct SettingData
+ {
+ bool isAlias = false;
+ AbstractSetting * setting;
+ };
+
+ std::map<std::string, SettingData> _settings;
+
+ StringMap initials;
+
+public:
+
+ Config(const StringMap & initials)
+ : initials(initials)
+ { }
+
+ void set(const std::string & name, const std::string & value);
+
+ void addSetting(AbstractSetting * setting);
+
+ void warnUnknownSettings();
+
+ StringMap getSettings(bool overridenOnly = false);
+
+ void applyConfigFile(const Path & path, bool fatal = false);
+
+ void resetOverriden();
+
+ void toJSON(JSONObject & out);
+};
+
+class AbstractSetting
+{
+ friend class Config;
+
+public:
+
+ const std::string name;
+ const std::string description;
+ const std::set<std::string> aliases;
+
+ int created = 123;
+
+ bool overriden = false;
+
+protected:
+
+ AbstractSetting(
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases);
+
+ virtual ~AbstractSetting()
+ {
+ // Check against a gcc miscompilation causing our constructor
+ // not to run (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80431).
+ assert(created == 123);
+ }
+
+ virtual void set(const std::string & value) = 0;
+
+ virtual std::string to_string() = 0;
+
+ virtual void toJSON(JSONPlaceholder & out);
+
+ bool isOverriden() { return overriden; }
+};
+
+/* A setting of type T. */
+template<typename T>
+class BaseSetting : public AbstractSetting
+{
+protected:
+
+ T value;
+
+public:
+
+ BaseSetting(const T & def,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : AbstractSetting(name, description, aliases)
+ , value(def)
+ { }
+
+ operator const T &() const { return value; }
+ operator T &() { return value; }
+ const T & get() const { return value; }
+ bool operator ==(const T & v2) const { return value == v2; }
+ bool operator !=(const T & v2) const { return value != v2; }
+ void operator =(const T & v) { assign(v); }
+ virtual void assign(const T & v) { value = v; }
+
+ void set(const std::string & str) override;
+
+ std::string to_string() override;
+
+ void toJSON(JSONPlaceholder & out) override;
+};
+
+template<typename T>
+std::ostream & operator <<(std::ostream & str, const BaseSetting<T> & opt)
+{
+ str << (const T &) opt;
+ return str;
+}
+
+template<typename T>
+bool operator ==(const T & v1, const BaseSetting<T> & v2) { return v1 == (const T &) v2; }
+
+template<typename T>
+class Setting : public BaseSetting<T>
+{
+public:
+ Setting(Config * options,
+ const T & def,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : BaseSetting<T>(def, name, description, aliases)
+ {
+ options->addSetting(this);
+ }
+
+ void operator =(const T & v) { this->assign(v); }
+};
+
+/* A special setting for Paths. These are automatically canonicalised
+ (e.g. "/foo//bar/" becomes "/foo/bar"). */
+class PathSetting : public BaseSetting<Path>
+{
+ bool allowEmpty;
+
+public:
+
+ PathSetting(Config * options,
+ bool allowEmpty,
+ const Path & def,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : BaseSetting<Path>(def, name, description, aliases)
+ , allowEmpty(allowEmpty)
+ {
+ options->addSetting(this);
+ }
+
+ void set(const std::string & str) override;
+
+ Path operator +(const char * p) const { return value + p; }
+
+ void operator =(const Path & v) { this->assign(v); }
+};
+
+}
diff --git a/src/libutil/finally.hh b/src/libutil/finally.hh
new file mode 100644
index 000000000..7760cfe9a
--- /dev/null
+++ b/src/libutil/finally.hh
@@ -0,0 +1,14 @@
+#pragma once
+
+#include <functional>
+
+/* A trivial class to run a function at the end of a scope. */
+class Finally
+{
+private:
+ std::function<void()> fun;
+
+public:
+ Finally(std::function<void()> fun) : fun(fun) { }
+ ~Finally() { fun(); }
+};
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
new file mode 100644
index 000000000..9f4afd93c
--- /dev/null
+++ b/src/libutil/hash.cc
@@ -0,0 +1,354 @@
+#include <iostream>
+#include <cstring>
+
+#include <openssl/md5.h>
+#include <openssl/sha.h>
+
+#include "hash.hh"
+#include "archive.hh"
+#include "util.hh"
+#include "istringstream_nocopy.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+namespace nix {
+
+
+Hash::Hash()
+{
+ type = htUnknown;
+ hashSize = 0;
+ memset(hash, 0, maxHashSize);
+}
+
+
+Hash::Hash(HashType type)
+{
+ this->type = type;
+ if (type == htMD5) hashSize = md5HashSize;
+ else if (type == htSHA1) hashSize = sha1HashSize;
+ else if (type == htSHA256) hashSize = sha256HashSize;
+ else if (type == htSHA512) hashSize = sha512HashSize;
+ else abort();
+ assert(hashSize <= maxHashSize);
+ memset(hash, 0, maxHashSize);
+}
+
+
+bool Hash::operator == (const Hash & h2) const
+{
+ if (hashSize != h2.hashSize) return false;
+ for (unsigned int i = 0; i < hashSize; i++)
+ if (hash[i] != h2.hash[i]) return false;
+ return true;
+}
+
+
+bool Hash::operator != (const Hash & h2) const
+{
+ return !(*this == h2);
+}
+
+
+bool Hash::operator < (const Hash & h) const
+{
+ for (unsigned int i = 0; i < hashSize; i++) {
+ if (hash[i] < h.hash[i]) return true;
+ if (hash[i] > h.hash[i]) return false;
+ }
+ return false;
+}
+
+
+std::string Hash::to_string(bool base32) const
+{
+ return printHashType(type) + ":" + (base32 ? printHash32(*this) : printHash(*this));
+}
+
+
+const string base16Chars = "0123456789abcdef";
+
+
+string printHash(const Hash & hash)
+{
+ char buf[hash.hashSize * 2];
+ for (unsigned int i = 0; i < hash.hashSize; i++) {
+ buf[i * 2] = base16Chars[hash.hash[i] >> 4];
+ buf[i * 2 + 1] = base16Chars[hash.hash[i] & 0x0f];
+ }
+ return string(buf, hash.hashSize * 2);
+}
+
+
+Hash parseHash(const string & s)
+{
+ string::size_type colon = s.find(':');
+ if (colon == string::npos)
+ throw BadHash(format("invalid hash ‘%s’") % s);
+ string hts = string(s, 0, colon);
+ HashType ht = parseHashType(hts);
+ if (ht == htUnknown)
+ throw BadHash(format("unknown hash type ‘%s’") % hts);
+ return parseHash16or32(ht, string(s, colon + 1));
+}
+
+
+Hash parseHash(HashType ht, const string & s)
+{
+ Hash hash(ht);
+ if (s.length() != hash.hashSize * 2)
+ throw BadHash(format("invalid hash ‘%1%’") % s);
+ for (unsigned int i = 0; i < hash.hashSize; i++) {
+ string s2(s, i * 2, 2);
+ if (!isxdigit(s2[0]) || !isxdigit(s2[1]))
+ throw BadHash(format("invalid hash ‘%1%’") % s);
+ istringstream_nocopy str(s2);
+ int n;
+ str >> std::hex >> n;
+ hash.hash[i] = n;
+ }
+ return hash;
+}
+
+
+// omitted: E O U T
+const string base32Chars = "0123456789abcdfghijklmnpqrsvwxyz";
+
+
+string printHash32(const Hash & hash)
+{
+ assert(hash.hashSize);
+ size_t len = hash.base32Len();
+ assert(len);
+
+ string s;
+ s.reserve(len);
+
+ for (int n = len - 1; n >= 0; n--) {
+ unsigned int b = n * 5;
+ unsigned int i = b / 8;
+ unsigned int j = b % 8;
+ unsigned char c =
+ (hash.hash[i] >> j)
+ | (i >= hash.hashSize - 1 ? 0 : hash.hash[i + 1] << (8 - j));
+ s.push_back(base32Chars[c & 0x1f]);
+ }
+
+ return s;
+}
+
+
+string printHash16or32(const Hash & hash)
+{
+ return hash.type == htMD5 ? printHash(hash) : printHash32(hash);
+}
+
+
+Hash parseHash32(HashType ht, const string & s)
+{
+ Hash hash(ht);
+ size_t len = hash.base32Len();
+ assert(s.size() == len);
+
+ for (unsigned int n = 0; n < len; ++n) {
+ char c = s[len - n - 1];
+ unsigned char digit;
+ for (digit = 0; digit < base32Chars.size(); ++digit) /* !!! slow */
+ if (base32Chars[digit] == c) break;
+ if (digit >= 32)
+ throw BadHash(format("invalid base-32 hash ‘%1%’") % s);
+ unsigned int b = n * 5;
+ unsigned int i = b / 8;
+ unsigned int j = b % 8;
+ hash.hash[i] |= digit << j;
+
+ if (i < hash.hashSize - 1) {
+ hash.hash[i + 1] |= digit >> (8 - j);
+ } else {
+ if (digit >> (8 - j))
+ throw BadHash(format("invalid base-32 hash ‘%1%’") % s);
+ }
+ }
+
+ return hash;
+}
+
+
+Hash parseHash16or32(HashType ht, const string & s)
+{
+ Hash hash(ht);
+ if (s.size() == hash.hashSize * 2)
+ /* hexadecimal representation */
+ hash = parseHash(ht, s);
+ else if (s.size() == hash.base32Len())
+ /* base-32 representation */
+ hash = parseHash32(ht, s);
+ else
+ throw BadHash(format("hash ‘%1%’ has wrong length for hash type ‘%2%’")
+ % s % printHashType(ht));
+ return hash;
+}
+
+
+bool isHash(const string & s)
+{
+ if (s.length() != 32) return false;
+ for (int i = 0; i < 32; i++) {
+ char c = s[i];
+ if (!((c >= '0' && c <= '9') ||
+ (c >= 'a' && c <= 'f')))
+ return false;
+ }
+ return true;
+}
+
+
+union Ctx
+{
+ MD5_CTX md5;
+ SHA_CTX sha1;
+ SHA256_CTX sha256;
+ SHA512_CTX sha512;
+};
+
+
+static void start(HashType ht, Ctx & ctx)
+{
+ if (ht == htMD5) MD5_Init(&ctx.md5);
+ else if (ht == htSHA1) SHA1_Init(&ctx.sha1);
+ else if (ht == htSHA256) SHA256_Init(&ctx.sha256);
+ else if (ht == htSHA512) SHA512_Init(&ctx.sha512);
+}
+
+
+static void update(HashType ht, Ctx & ctx,
+ const unsigned char * bytes, unsigned int len)
+{
+ if (ht == htMD5) MD5_Update(&ctx.md5, bytes, len);
+ else if (ht == htSHA1) SHA1_Update(&ctx.sha1, bytes, len);
+ else if (ht == htSHA256) SHA256_Update(&ctx.sha256, bytes, len);
+ else if (ht == htSHA512) SHA512_Update(&ctx.sha512, bytes, len);
+}
+
+
+static void finish(HashType ht, Ctx & ctx, unsigned char * hash)
+{
+ if (ht == htMD5) MD5_Final(hash, &ctx.md5);
+ else if (ht == htSHA1) SHA1_Final(hash, &ctx.sha1);
+ else if (ht == htSHA256) SHA256_Final(hash, &ctx.sha256);
+ else if (ht == htSHA512) SHA512_Final(hash, &ctx.sha512);
+}
+
+
+Hash hashString(HashType ht, const string & s)
+{
+ Ctx ctx;
+ Hash hash(ht);
+ start(ht, ctx);
+ update(ht, ctx, (const unsigned char *) s.data(), s.length());
+ finish(ht, ctx, hash.hash);
+ return hash;
+}
+
+
+Hash hashFile(HashType ht, const Path & path)
+{
+ Ctx ctx;
+ Hash hash(ht);
+ start(ht, ctx);
+
+ AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
+ if (!fd) throw SysError(format("opening file ‘%1%’") % path);
+
+ unsigned char buf[8192];
+ ssize_t n;
+ while ((n = read(fd.get(), buf, sizeof(buf)))) {
+ checkInterrupt();
+ if (n == -1) throw SysError(format("reading file ‘%1%’") % path);
+ update(ht, ctx, buf, n);
+ }
+
+ finish(ht, ctx, hash.hash);
+ return hash;
+}
+
+
+HashSink::HashSink(HashType ht) : ht(ht)
+{
+ ctx = new Ctx;
+ bytes = 0;
+ start(ht, *ctx);
+}
+
+HashSink::~HashSink()
+{
+ bufPos = 0;
+ delete ctx;
+}
+
+void HashSink::write(const unsigned char * data, size_t len)
+{
+ bytes += len;
+ update(ht, *ctx, data, len);
+}
+
+HashResult HashSink::finish()
+{
+ flush();
+ Hash hash(ht);
+ nix::finish(ht, *ctx, hash.hash);
+ return HashResult(hash, bytes);
+}
+
+HashResult HashSink::currentHash()
+{
+ flush();
+ Ctx ctx2 = *ctx;
+ Hash hash(ht);
+ nix::finish(ht, ctx2, hash.hash);
+ return HashResult(hash, bytes);
+}
+
+
+HashResult hashPath(
+ HashType ht, const Path & path, PathFilter & filter)
+{
+ HashSink sink(ht);
+ dumpPath(path, sink, filter);
+ return sink.finish();
+}
+
+
+Hash compressHash(const Hash & hash, unsigned int newSize)
+{
+ Hash h;
+ h.hashSize = newSize;
+ for (unsigned int i = 0; i < hash.hashSize; ++i)
+ h.hash[i % newSize] ^= hash.hash[i];
+ return h;
+}
+
+
+HashType parseHashType(const string & s)
+{
+ if (s == "md5") return htMD5;
+ else if (s == "sha1") return htSHA1;
+ else if (s == "sha256") return htSHA256;
+ else if (s == "sha512") return htSHA512;
+ else return htUnknown;
+}
+
+
+string printHashType(HashType ht)
+{
+ if (ht == htMD5) return "md5";
+ else if (ht == htSHA1) return "sha1";
+ else if (ht == htSHA256) return "sha256";
+ else if (ht == htSHA512) return "sha512";
+ else abort();
+}
+
+
+}
diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh
new file mode 100644
index 000000000..02e213fc7
--- /dev/null
+++ b/src/libutil/hash.hh
@@ -0,0 +1,133 @@
+#pragma once
+
+#include "types.hh"
+#include "serialise.hh"
+
+
+namespace nix {
+
+
+MakeError(BadHash, Error);
+
+
+enum HashType : char { htUnknown, htMD5, htSHA1, htSHA256, htSHA512 };
+
+
+const int md5HashSize = 16;
+const int sha1HashSize = 20;
+const int sha256HashSize = 32;
+const int sha512HashSize = 64;
+
+extern const string base32Chars;
+
+
+struct Hash
+{
+ static const unsigned int maxHashSize = 64;
+ unsigned int hashSize;
+ unsigned char hash[maxHashSize];
+
+ HashType type;
+
+ /* Create an unset hash object. */
+ Hash();
+
+ /* Create a zero-filled hash object. */
+ Hash(HashType type);
+
+ /* Check whether a hash is set. */
+ operator bool () const { return type != htUnknown; }
+
+ /* Check whether two hash are equal. */
+ bool operator == (const Hash & h2) const;
+
+ /* Check whether two hash are not equal. */
+ bool operator != (const Hash & h2) const;
+
+ /* For sorting. */
+ bool operator < (const Hash & h) const;
+
+ /* Returns the length of a base-16 representation of this hash. */
+ size_t base16Len() const
+ {
+ return hashSize * 2;
+ }
+
+ /* Returns the length of a base-32 representation of this hash. */
+ size_t base32Len() const
+ {
+ return (hashSize * 8 - 1) / 5 + 1;
+ }
+
+ std::string to_string(bool base32 = true) const;
+};
+
+
+/* Convert a hash to a hexadecimal representation. */
+string printHash(const Hash & hash);
+
+Hash parseHash(const string & s);
+
+/* Parse a hexadecimal representation of a hash code. */
+Hash parseHash(HashType ht, const string & s);
+
+/* Convert a hash to a base-32 representation. */
+string printHash32(const Hash & hash);
+
+/* Print a hash in base-16 if it's MD5, or base-32 otherwise. */
+string printHash16or32(const Hash & hash);
+
+/* Parse a base-32 representation of a hash code. */
+Hash parseHash32(HashType ht, const string & s);
+
+/* Parse a base-16 or base-32 representation of a hash code. */
+Hash parseHash16or32(HashType ht, const string & s);
+
+/* Verify that the given string is a valid hash code. */
+bool isHash(const string & s);
+
+/* Compute the hash of the given string. */
+Hash hashString(HashType ht, const string & s);
+
+/* Compute the hash of the given file. */
+Hash hashFile(HashType ht, const Path & path);
+
+/* Compute the hash of the given path. The hash is defined as
+ (essentially) hashString(ht, dumpPath(path)). */
+struct PathFilter;
+extern PathFilter defaultPathFilter;
+typedef std::pair<Hash, unsigned long long> HashResult;
+HashResult hashPath(HashType ht, const Path & path,
+ PathFilter & filter = defaultPathFilter);
+
+/* Compress a hash to the specified number of bytes by cyclically
+ XORing bytes together. */
+Hash compressHash(const Hash & hash, unsigned int newSize);
+
+/* Parse a string representing a hash type. */
+HashType parseHashType(const string & s);
+
+/* And the reverse. */
+string printHashType(HashType ht);
+
+
+union Ctx;
+
+class HashSink : public BufferedSink
+{
+private:
+ HashType ht;
+ Ctx * ctx;
+ unsigned long long bytes;
+
+public:
+ HashSink(HashType ht);
+ HashSink(const HashSink & h);
+ ~HashSink();
+ void write(const unsigned char * data, size_t len);
+ HashResult finish();
+ HashResult currentHash();
+};
+
+
+}
diff --git a/src/libutil/istringstream_nocopy.hh b/src/libutil/istringstream_nocopy.hh
new file mode 100644
index 000000000..f7beac578
--- /dev/null
+++ b/src/libutil/istringstream_nocopy.hh
@@ -0,0 +1,92 @@
+/* This file provides a variant of std::istringstream that doesn't
+ copy its string argument. This is useful for large strings. The
+ caller must ensure that the string object is not destroyed while
+ it's referenced by this object. */
+
+#pragma once
+
+#include <string>
+#include <iostream>
+
+template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>>
+class basic_istringbuf_nocopy : public std::basic_streambuf<CharT, Traits>
+{
+public:
+ typedef std::basic_string<CharT, Traits, Allocator> string_type;
+
+ typedef typename std::basic_streambuf<CharT, Traits>::off_type off_type;
+
+ typedef typename std::basic_streambuf<CharT, Traits>::pos_type pos_type;
+
+ typedef typename std::basic_streambuf<CharT, Traits>::int_type int_type;
+
+ typedef typename std::basic_streambuf<CharT, Traits>::traits_type traits_type;
+
+private:
+ const string_type & s;
+
+ off_type off;
+
+public:
+ basic_istringbuf_nocopy(const string_type & s) : s{s}, off{0}
+ {
+ }
+
+private:
+ pos_type seekoff(off_type off, std::ios_base::seekdir dir, std::ios_base::openmode which)
+ {
+ if (which & std::ios_base::in) {
+ this->off = dir == std::ios_base::beg
+ ? off
+ : (dir == std::ios_base::end
+ ? s.size() + off
+ : this->off + off);
+ }
+ return pos_type(this->off);
+ }
+
+ pos_type seekpos(pos_type pos, std::ios_base::openmode which)
+ {
+ return seekoff(pos, std::ios_base::beg, which);
+ }
+
+ std::streamsize showmanyc()
+ {
+ return s.size() - off;
+ }
+
+ int_type underflow()
+ {
+ if (typename string_type::size_type(off) == s.size())
+ return traits_type::eof();
+ return traits_type::to_int_type(s[off]);
+ }
+
+ int_type uflow()
+ {
+ if (typename string_type::size_type(off) == s.size())
+ return traits_type::eof();
+ return traits_type::to_int_type(s[off++]);
+ }
+
+ int_type pbackfail(int_type ch)
+ {
+ if (off == 0 || (ch != traits_type::eof() && ch != s[off - 1]))
+ return traits_type::eof();
+
+ return traits_type::to_int_type(s[--off]);
+ }
+
+};
+
+template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>>
+class basic_istringstream_nocopy : public std::basic_iostream<CharT, Traits>
+{
+ typedef basic_istringbuf_nocopy<CharT, Traits, Allocator> buf_type;
+ buf_type buf;
+public:
+ basic_istringstream_nocopy(const typename buf_type::string_type & s) :
+ std::basic_iostream<CharT, Traits>(&buf), buf(s) {};
+};
+
+typedef basic_istringstream_nocopy<char> istringstream_nocopy;
diff --git a/src/libutil/json.cc b/src/libutil/json.cc
new file mode 100644
index 000000000..b8b8ef9c8
--- /dev/null
+++ b/src/libutil/json.cc
@@ -0,0 +1,169 @@
+#include "json.hh"
+
+#include <iomanip>
+#include <cstring>
+
+namespace nix {
+
+void toJSON(std::ostream & str, const char * start, const char * end)
+{
+ str << '"';
+ for (auto i = start; i != end; i++)
+ if (*i == '\"' || *i == '\\') str << '\\' << *i;
+ else if (*i == '\n') str << "\\n";
+ else if (*i == '\r') str << "\\r";
+ else if (*i == '\t') str << "\\t";
+ else if (*i >= 0 && *i < 32)
+ str << "\\u" << std::setfill('0') << std::setw(4) << std::hex << (uint16_t) *i << std::dec;
+ else str << *i;
+ str << '"';
+}
+
+void toJSON(std::ostream & str, const char * s)
+{
+ if (!s) str << "null"; else toJSON(str, s, s + strlen(s));
+}
+
+template<> void toJSON<int>(std::ostream & str, const int & n) { str << n; }
+template<> void toJSON<unsigned int>(std::ostream & str, const unsigned int & n) { str << n; }
+template<> void toJSON<long>(std::ostream & str, const long & n) { str << n; }
+template<> void toJSON<unsigned long>(std::ostream & str, const unsigned long & n) { str << n; }
+template<> void toJSON<long long>(std::ostream & str, const long long & n) { str << n; }
+template<> void toJSON<unsigned long long>(std::ostream & str, const unsigned long long & n) { str << n; }
+template<> void toJSON<float>(std::ostream & str, const float & n) { str << n; }
+
+template<> void toJSON<std::string>(std::ostream & str, const std::string & s)
+{
+ toJSON(str, s.c_str(), s.c_str() + s.size());
+}
+
+template<> void toJSON<bool>(std::ostream & str, const bool & b)
+{
+ str << (b ? "true" : "false");
+}
+
+template<> void toJSON<std::nullptr_t>(std::ostream & str, const std::nullptr_t & b)
+{
+ str << "null";
+}
+
+JSONWriter::JSONWriter(std::ostream & str, bool indent)
+ : state(new JSONState(str, indent))
+{
+ state->stack.push_back(this);
+}
+
+JSONWriter::JSONWriter(JSONState * state)
+ : state(state)
+{
+ state->stack.push_back(this);
+}
+
+JSONWriter::~JSONWriter()
+{
+ assertActive();
+ state->stack.pop_back();
+ if (state->stack.empty()) delete state;
+}
+
+void JSONWriter::comma()
+{
+ assertActive();
+ if (first) {
+ first = false;
+ } else {
+ state->str << ',';
+ }
+ if (state->indent) indent();
+}
+
+void JSONWriter::indent()
+{
+ state->str << '\n' << std::string(state->depth * 2, ' ');
+}
+
+void JSONList::open()
+{
+ state->depth++;
+ state->str << '[';
+}
+
+JSONList::~JSONList()
+{
+ state->depth--;
+ if (state->indent && !first) indent();
+ state->str << "]";
+}
+
+JSONList JSONList::list()
+{
+ comma();
+ return JSONList(state);
+}
+
+JSONObject JSONList::object()
+{
+ comma();
+ return JSONObject(state);
+}
+
+JSONPlaceholder JSONList::placeholder()
+{
+ comma();
+ return JSONPlaceholder(state);
+}
+
+void JSONObject::open()
+{
+ state->depth++;
+ state->str << '{';
+}
+
+JSONObject::~JSONObject()
+{
+ state->depth--;
+ if (state->indent && !first) indent();
+ state->str << "}";
+}
+
+void JSONObject::attr(const std::string & s)
+{
+ comma();
+ toJSON(state->str, s);
+ state->str << ':';
+ if (state->indent) state->str << ' ';
+}
+
+JSONList JSONObject::list(const std::string & name)
+{
+ attr(name);
+ return JSONList(state);
+}
+
+JSONObject JSONObject::object(const std::string & name)
+{
+ attr(name);
+ return JSONObject(state);
+}
+
+JSONPlaceholder JSONObject::placeholder(const std::string & name)
+{
+ attr(name);
+ return JSONPlaceholder(state);
+}
+
+JSONList JSONPlaceholder::list()
+{
+ assertValid();
+ first = false;
+ return JSONList(state);
+}
+
+JSONObject JSONPlaceholder::object()
+{
+ assertValid();
+ first = false;
+ return JSONObject(state);
+}
+
+}
diff --git a/src/libutil/json.hh b/src/libutil/json.hh
new file mode 100644
index 000000000..595e9bbe3
--- /dev/null
+++ b/src/libutil/json.hh
@@ -0,0 +1,181 @@
+#pragma once
+
+#include <iostream>
+#include <vector>
+#include <cassert>
+
+namespace nix {
+
+void toJSON(std::ostream & str, const char * start, const char * end);
+void toJSON(std::ostream & str, const char * s);
+
+template<typename T>
+void toJSON(std::ostream & str, const T & n);
+
+class JSONWriter
+{
+protected:
+
+ struct JSONState
+ {
+ std::ostream & str;
+ bool indent;
+ size_t depth = 0;
+ std::vector<JSONWriter *> stack;
+ JSONState(std::ostream & str, bool indent) : str(str), indent(indent) { }
+ ~JSONState()
+ {
+ assert(stack.empty());
+ }
+ };
+
+ JSONState * state;
+
+ bool first = true;
+
+ JSONWriter(std::ostream & str, bool indent);
+
+ JSONWriter(JSONState * state);
+
+ ~JSONWriter();
+
+ void assertActive()
+ {
+ assert(!state->stack.empty() && state->stack.back() == this);
+ }
+
+ void comma();
+
+ void indent();
+};
+
+class JSONObject;
+class JSONPlaceholder;
+
+class JSONList : JSONWriter
+{
+private:
+
+ friend class JSONObject;
+ friend class JSONPlaceholder;
+
+ void open();
+
+ JSONList(JSONState * state)
+ : JSONWriter(state)
+ {
+ open();
+ }
+
+public:
+
+ JSONList(std::ostream & str, bool indent = false)
+ : JSONWriter(str, indent)
+ {
+ open();
+ }
+
+ ~JSONList();
+
+ template<typename T>
+ JSONList & elem(const T & v)
+ {
+ comma();
+ toJSON(state->str, v);
+ return *this;
+ }
+
+ JSONList list();
+
+ JSONObject object();
+
+ JSONPlaceholder placeholder();
+};
+
+class JSONObject : JSONWriter
+{
+private:
+
+ friend class JSONList;
+ friend class JSONPlaceholder;
+
+ void open();
+
+ JSONObject(JSONState * state)
+ : JSONWriter(state)
+ {
+ open();
+ }
+
+ void attr(const std::string & s);
+
+public:
+
+ JSONObject(std::ostream & str, bool indent = false)
+ : JSONWriter(str, indent)
+ {
+ open();
+ }
+
+ ~JSONObject();
+
+ template<typename T>
+ JSONObject & attr(const std::string & name, const T & v)
+ {
+ attr(name);
+ toJSON(state->str, v);
+ return *this;
+ }
+
+ JSONList list(const std::string & name);
+
+ JSONObject object(const std::string & name);
+
+ JSONPlaceholder placeholder(const std::string & name);
+};
+
+class JSONPlaceholder : JSONWriter
+{
+
+private:
+
+ friend class JSONList;
+ friend class JSONObject;
+
+ JSONPlaceholder(JSONState * state)
+ : JSONWriter(state)
+ {
+ }
+
+ void assertValid()
+ {
+ assertActive();
+ assert(first);
+ }
+
+public:
+
+ JSONPlaceholder(std::ostream & str, bool indent = false)
+ : JSONWriter(str, indent)
+ {
+ }
+
+ ~JSONPlaceholder()
+ {
+ assert(!first || std::uncaught_exception());
+ }
+
+ template<typename T>
+ void write(const T & v)
+ {
+ assertValid();
+ first = false;
+ toJSON(state->str, v);
+ }
+
+ JSONList list();
+
+ JSONObject object();
+};
+
+}
diff --git a/src/libutil/local.mk b/src/libutil/local.mk
new file mode 100644
index 000000000..0721b21c2
--- /dev/null
+++ b/src/libutil/local.mk
@@ -0,0 +1,13 @@
+libraries += libutil
+
+libutil_NAME = libnixutil
+
+libutil_DIR := $(d)
+
+libutil_SOURCES := $(wildcard $(d)/*.cc)
+
+libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS)
+
+libutil_LIBS = libformat
+
+libutil_CXXFLAGS = -DBRO=\"$(bro)\"
diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc
new file mode 100644
index 000000000..afcc2ec58
--- /dev/null
+++ b/src/libutil/logging.cc
@@ -0,0 +1,82 @@
+#include "logging.hh"
+#include "util.hh"
+
+namespace nix {
+
+Logger * logger = makeDefaultLogger();
+
+void Logger::warn(const std::string & msg)
+{
+ log(lvlInfo, ANSI_RED "warning:" ANSI_NORMAL " " + msg);
+}
+
+class SimpleLogger : public Logger
+{
+public:
+
+ bool systemd, tty;
+
+ SimpleLogger()
+ {
+ systemd = getEnv("IN_SYSTEMD") == "1";
+ tty = isatty(STDERR_FILENO);
+ }
+
+ void log(Verbosity lvl, const FormatOrString & fs) override
+ {
+ if (lvl > verbosity) return;
+
+ std::string prefix;
+
+ if (systemd) {
+ char c;
+ switch (lvl) {
+ case lvlError: c = '3'; break;
+ case lvlInfo: c = '5'; break;
+ case lvlTalkative: case lvlChatty: c = '6'; break;
+ default: c = '7';
+ }
+ prefix = std::string("<") + c + ">";
+ }
+
+ writeToStderr(prefix + (tty ? fs.s : filterANSIEscapes(fs.s)) + "\n");
+ }
+
+ void startActivity(Activity & activity, Verbosity lvl, const FormatOrString & fs) override
+ {
+ log(lvl, fs);
+ }
+
+ void stopActivity(Activity & activity) override
+ {
+ }
+};
+
+Verbosity verbosity = lvlInfo;
+
+void warnOnce(bool & haveWarned, const FormatOrString & fs)
+{
+ if (!haveWarned) {
+ warn(fs.s);
+ haveWarned = true;
+ }
+}
+
+void writeToStderr(const string & s)
+{
+ try {
+ writeFull(STDERR_FILENO, s, false);
+ } catch (SysError & e) {
+ /* Ignore failing writes to stderr. We need to ignore write
+ errors to ensure that cleanup code that logs to stderr runs
+ to completion if the other side of stderr has been closed
+ unexpectedly. */
+ }
+}
+
+Logger * makeDefaultLogger()
+{
+ return new SimpleLogger();
+}
+
+}
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
new file mode 100644
index 000000000..81aebccdc
--- /dev/null
+++ b/src/libutil/logging.hh
@@ -0,0 +1,99 @@
+#pragma once
+
+#include "types.hh"
+
+namespace nix {
+
+typedef enum {
+ lvlError = 0,
+ lvlInfo,
+ lvlTalkative,
+ lvlChatty,
+ lvlDebug,
+ lvlVomit
+} Verbosity;
+
+class Activity;
+
+class Logger
+{
+ friend class Activity;
+
+public:
+
+ virtual ~Logger() { }
+
+ virtual void log(Verbosity lvl, const FormatOrString & fs) = 0;
+
+ void log(const FormatOrString & fs)
+ {
+ log(lvlInfo, fs);
+ }
+
+ virtual void warn(const std::string & msg);
+
+ virtual void setExpected(const std::string & label, uint64_t value = 1) { }
+ virtual void setProgress(const std::string & label, uint64_t value = 1) { }
+ virtual void incExpected(const std::string & label, uint64_t value = 1) { }
+ virtual void incProgress(const std::string & label, uint64_t value = 1) { }
+
+private:
+
+ virtual void startActivity(Activity & activity, Verbosity lvl, const FormatOrString & fs) = 0;
+
+ virtual void stopActivity(Activity & activity) = 0;
+
+};
+
+class Activity
+{
+public:
+ Logger & logger;
+
+ Activity(Logger & logger, Verbosity lvl, const FormatOrString & fs)
+ : logger(logger)
+ {
+ logger.startActivity(*this, lvl, fs);
+ }
+
+ ~Activity()
+ {
+ logger.stopActivity(*this);
+ }
+};
+
+extern Logger * logger;
+
+Logger * makeDefaultLogger();
+
+extern Verbosity verbosity; /* suppress msgs > this */
+
+/* Print a message if the current log level is at least the specified
+ level. Note that this has to be implemented as a macro to ensure
+ that the arguments are evaluated lazily. */
+#define printMsg(level, args...) \
+ do { \
+ if (level <= nix::verbosity) { \
+ logger->log(level, fmt(args)); \
+ } \
+ } while (0)
+
+#define printError(args...) printMsg(lvlError, args)
+#define printInfo(args...) printMsg(lvlInfo, args)
+#define printTalkative(args...) printMsg(lvlTalkative, args)
+#define debug(args...) printMsg(lvlDebug, args)
+#define vomit(args...) printMsg(lvlVomit, args)
+
+template<typename... Args>
+inline void warn(const std::string & fs, Args... args)
+{
+ boost::format f(fs);
+ formatHelper(f, args...);
+ logger->warn(f.str());
+}
+
+void warnOnce(bool & haveWarned, const FormatOrString & fs);
+
+void writeToStderr(const string & s);
+
+}
diff --git a/src/libutil/lru-cache.hh b/src/libutil/lru-cache.hh
new file mode 100644
index 000000000..3cb5d5088
--- /dev/null
+++ b/src/libutil/lru-cache.hh
@@ -0,0 +1,92 @@
+#pragma once
+
+#include <map>
+#include <list>
+
+namespace nix {
+
+/* A simple least-recently used cache. Not thread-safe. */
+template<typename Key, typename Value>
+class LRUCache
+{
+private:
+
+ size_t capacity;
+
+ // Stupid wrapper to get around circular dependency between Data
+ // and LRU.
+ struct LRUIterator;
+
+ using Data = std::map<Key, std::pair<LRUIterator, Value>>;
+ using LRU = std::list<typename Data::iterator>;
+
+ struct LRUIterator { typename LRU::iterator it; };
+
+ Data data;
+ LRU lru;
+
+public:
+
+ LRUCache(size_t capacity) : capacity(capacity) { }
+
+ /* Insert or upsert an item in the cache. */
+ void upsert(const Key & key, const Value & value)
+ {
+ if (capacity == 0) return;
+
+ erase(key);
+
+ if (data.size() >= capacity) {
+ /* Retire the oldest item. */
+ auto oldest = lru.begin();
+ data.erase(*oldest);
+ lru.erase(oldest);
+ }
+
+ auto res = data.emplace(key, std::make_pair(LRUIterator(), value));
+ assert(res.second);
+ auto & i(res.first);
+
+ auto j = lru.insert(lru.end(), i);
+
+ i->second.first.it = j;
+ }
+
+ bool erase(const Key & key)
+ {
+ auto i = data.find(key);
+ if (i == data.end()) return false;
+ lru.erase(i->second.first.it);
+ data.erase(i);
+ return true;
+ }
+
+ /* Look up an item in the cache. If it exists, it becomes the most
+ recently used item. */
+ // FIXME: use boost::optional?
+ Value * get(const Key & key)
+ {
+ auto i = data.find(key);
+ if (i == data.end()) return 0;
+
+ /* Move this item to the back of the LRU list. */
+ lru.erase(i->second.first.it);
+ auto j = lru.insert(lru.end(), i);
+ i->second.first.it = j;
+
+ return &i->second.second;
+ }
+
+ size_t size()
+ {
+ return data.size();
+ }
+
+ void clear()
+ {
+ data.clear();
+ lru.clear();
+ }
+};
+
+}
diff --git a/src/libutil/monitor-fd.hh b/src/libutil/monitor-fd.hh
new file mode 100644
index 000000000..e0ec66c01
--- /dev/null
+++ b/src/libutil/monitor-fd.hh
@@ -0,0 +1,42 @@
+#pragma once
+
+#include <thread>
+#include <atomic>
+
+#include <cstdlib>
+#include <poll.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+
+namespace nix {
+
+
+class MonitorFdHup
+{
+private:
+ std::thread thread;
+
+public:
+ MonitorFdHup(int fd)
+ {
+ thread = std::thread([fd]() {
+ /* Wait indefinitely until a POLLHUP occurs. */
+ struct pollfd fds[1];
+ fds[0].fd = fd;
+ fds[0].events = 0;
+ if (poll(fds, 1, -1) == -1) abort(); // can't happen
+ assert(fds[0].revents & POLLHUP);
+ triggerInterrupt();
+ });
+ };
+
+ ~MonitorFdHup()
+ {
+ pthread_cancel(thread.native_handle());
+ thread.join();
+ }
+};
+
+
+}
diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh
new file mode 100644
index 000000000..20df21948
--- /dev/null
+++ b/src/libutil/pool.hh
@@ -0,0 +1,157 @@
+#pragma once
+
+#include <functional>
+#include <limits>
+#include <list>
+#include <memory>
+#include <cassert>
+
+#include "sync.hh"
+#include "ref.hh"
+
+namespace nix {
+
+/* This template class implements a simple pool manager of resources
+ of some type R, such as database connections. It is used as
+ follows:
+
+ class Connection { ... };
+
+ Pool<Connection> pool;
+
+ {
+ auto conn(pool.get());
+ conn->exec("select ...");
+ }
+
+ Here, the Connection object referenced by ‘conn’ is automatically
+ returned to the pool when ‘conn’ goes out of scope.
+*/
+
+template <class R>
+class Pool
+{
+public:
+
+ /* A function that produces new instances of R on demand. */
+ typedef std::function<ref<R>()> Factory;
+
+ /* A function that checks whether an instance of R is still
+ usable. Unusable instances are removed from the pool. */
+ typedef std::function<bool(const ref<R> &)> Validator;
+
+private:
+
+ Factory factory;
+ Validator validator;
+
+ struct State
+ {
+ size_t inUse = 0;
+ size_t max;
+ std::vector<ref<R>> idle;
+ };
+
+ Sync<State> state;
+
+ std::condition_variable wakeup;
+
+public:
+
+ Pool(size_t max = std::numeric_limits<size_t>::max(),
+ const Factory & factory = []() { return make_ref<R>(); },
+ const Validator & validator = [](ref<R> r) { return true; })
+ : factory(factory)
+ , validator(validator)
+ {
+ auto state_(state.lock());
+ state_->max = max;
+ }
+
+ ~Pool()
+ {
+ auto state_(state.lock());
+ assert(!state_->inUse);
+ state_->max = 0;
+ state_->idle.clear();
+ }
+
+ class Handle
+ {
+ private:
+ Pool & pool;
+ std::shared_ptr<R> r;
+
+ friend Pool;
+
+ Handle(Pool & pool, std::shared_ptr<R> r) : pool(pool), r(r) { }
+
+ public:
+ Handle(Handle && h) : pool(h.pool), r(h.r) { h.r.reset(); }
+
+ Handle(const Handle & l) = delete;
+
+ ~Handle()
+ {
+ if (!r) return;
+ {
+ auto state_(pool.state.lock());
+ state_->idle.push_back(ref<R>(r));
+ assert(state_->inUse);
+ state_->inUse--;
+ }
+ pool.wakeup.notify_one();
+ }
+
+ R * operator -> () { return &*r; }
+ R & operator * () { return *r; }
+ };
+
+ Handle get()
+ {
+ {
+ auto state_(state.lock());
+
+ /* If we're over the maximum number of instance, we need
+ to wait until a slot becomes available. */
+ while (state_->idle.empty() && state_->inUse >= state_->max)
+ state_.wait(wakeup);
+
+ while (!state_->idle.empty()) {
+ auto p = state_->idle.back();
+ state_->idle.pop_back();
+ if (validator(p)) {
+ state_->inUse++;
+ return Handle(*this, p);
+ }
+ }
+
+ state_->inUse++;
+ }
+
+ /* We need to create a new instance. Because that might take a
+ while, we don't hold the lock in the meantime. */
+ try {
+ Handle h(*this, factory());
+ return h;
+ } catch (...) {
+ auto state_(state.lock());
+ state_->inUse--;
+ wakeup.notify_one();
+ throw;
+ }
+ }
+
+ size_t count()
+ {
+ auto state_(state.lock());
+ return state_->idle.size() + state_->inUse;
+ }
+
+ size_t capacity()
+ {
+ return state.lock()->max;
+ }
+};
+
+}
diff --git a/src/libutil/ref.hh b/src/libutil/ref.hh
new file mode 100644
index 000000000..0be2a7e74
--- /dev/null
+++ b/src/libutil/ref.hh
@@ -0,0 +1,92 @@
+#pragma once
+
+#include <memory>
+#include <exception>
+#include <stdexcept>
+
+namespace nix {
+
+/* A simple non-nullable reference-counted pointer. Actually a wrapper
+ around std::shared_ptr that prevents non-null constructions. */
+template<typename T>
+class ref
+{
+private:
+
+ std::shared_ptr<T> p;
+
+public:
+
+ ref<T>(const ref<T> & r)
+ : p(r.p)
+ { }
+
+ explicit ref<T>(const std::shared_ptr<T> & p)
+ : p(p)
+ {
+ if (!p)
+ throw std::invalid_argument("null pointer cast to ref");
+ }
+
+ explicit ref<T>(T * p)
+ : p(p)
+ {
+ if (!p)
+ throw std::invalid_argument("null pointer cast to ref");
+ }
+
+ T* operator ->() const
+ {
+ return &*p;
+ }
+
+ T& operator *() const
+ {
+ return *p;
+ }
+
+ operator std::shared_ptr<T> () const
+ {
+ return p;
+ }
+
+ std::shared_ptr<T> get_ptr() const
+ {
+ return p;
+ }
+
+ template<typename T2>
+ ref<T2> cast() const
+ {
+ return ref<T2>(std::dynamic_pointer_cast<T2>(p));
+ }
+
+ template<typename T2>
+ std::shared_ptr<T2> dynamic_pointer_cast() const
+ {
+ return std::dynamic_pointer_cast<T2>(p);
+ }
+
+ template<typename T2>
+ operator ref<T2> () const
+ {
+ return ref<T2>((std::shared_ptr<T2>) p);
+ }
+
+private:
+
+ template<typename T2, typename... Args>
+ friend ref<T2>
+ make_ref(Args&&... args);
+
+};
+
+template<typename T, typename... Args>
+inline ref<T>
+make_ref(Args&&... args)
+{
+ auto p = std::make_shared<T>(std::forward<Args>(args)...);
+ return ref<T>(p);
+}
+
+}
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
new file mode 100644
index 000000000..950e6362a
--- /dev/null
+++ b/src/libutil/serialise.cc
@@ -0,0 +1,247 @@
+#include "serialise.hh"
+#include "util.hh"
+
+#include <cstring>
+#include <cerrno>
+#include <memory>
+
+
+namespace nix {
+
+
+void BufferedSink::operator () (const unsigned char * data, size_t len)
+{
+ if (!buffer) buffer = decltype(buffer)(new unsigned char[bufSize]);
+
+ while (len) {
+ /* Optimisation: bypass the buffer if the data exceeds the
+ buffer size. */
+ if (bufPos + len >= bufSize) {
+ flush();
+ write(data, len);
+ break;
+ }
+ /* Otherwise, copy the bytes to the buffer. Flush the buffer
+ when it's full. */
+ size_t n = bufPos + len > bufSize ? bufSize - bufPos : len;
+ memcpy(buffer.get() + bufPos, data, n);
+ data += n; bufPos += n; len -= n;
+ if (bufPos == bufSize) flush();
+ }
+}
+
+
+void BufferedSink::flush()
+{
+ if (bufPos == 0) return;
+ size_t n = bufPos;
+ bufPos = 0; // don't trigger the assert() in ~BufferedSink()
+ write(buffer.get(), n);
+}
+
+
+FdSink::~FdSink()
+{
+ try { flush(); } catch (...) { ignoreException(); }
+}
+
+
+size_t threshold = 256 * 1024 * 1024;
+
+static void warnLargeDump()
+{
+ printError("warning: dumping very large path (> 256 MiB); this may run out of memory");
+}
+
+
+void FdSink::write(const unsigned char * data, size_t len)
+{
+ written += len;
+ static bool warned = false;
+ if (warn && !warned) {
+ if (written > threshold) {
+ warnLargeDump();
+ warned = true;
+ }
+ }
+ try {
+ writeFull(fd, data, len);
+ } catch (SysError & e) {
+ _good = true;
+ }
+}
+
+
+bool FdSink::good()
+{
+ return _good;
+}
+
+
+void Source::operator () (unsigned char * data, size_t len)
+{
+ while (len) {
+ size_t n = read(data, len);
+ data += n; len -= n;
+ }
+}
+
+
+size_t BufferedSource::read(unsigned char * data, size_t len)
+{
+ if (!buffer) buffer = decltype(buffer)(new unsigned char[bufSize]);
+
+ if (!bufPosIn) bufPosIn = readUnbuffered(buffer.get(), bufSize);
+
+ /* Copy out the data in the buffer. */
+ size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len;
+ memcpy(data, buffer.get() + bufPosOut, n);
+ bufPosOut += n;
+ if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0;
+ return n;
+}
+
+
+bool BufferedSource::hasData()
+{
+ return bufPosOut < bufPosIn;
+}
+
+
+size_t FdSource::readUnbuffered(unsigned char * data, size_t len)
+{
+ ssize_t n;
+ do {
+ checkInterrupt();
+ n = ::read(fd, (char *) data, bufSize);
+ } while (n == -1 && errno == EINTR);
+ if (n == -1) { _good = false; throw SysError("reading from file"); }
+ if (n == 0) { _good = false; throw EndOfFile("unexpected end-of-file"); }
+ read += n;
+ return n;
+}
+
+
+bool FdSource::good()
+{
+ return _good;
+}
+
+
+size_t StringSource::read(unsigned char * data, size_t len)
+{
+ if (pos == s.size()) throw EndOfFile("end of string reached");
+ size_t n = s.copy((char *) data, len, pos);
+ pos += n;
+ return n;
+}
+
+
+void writePadding(size_t len, Sink & sink)
+{
+ if (len % 8) {
+ unsigned char zero[8];
+ memset(zero, 0, sizeof(zero));
+ sink(zero, 8 - (len % 8));
+ }
+}
+
+
+void writeString(const unsigned char * buf, size_t len, Sink & sink)
+{
+ sink << len;
+ sink(buf, len);
+ writePadding(len, sink);
+}
+
+
+Sink & operator << (Sink & sink, const string & s)
+{
+ writeString((const unsigned char *) s.data(), s.size(), sink);
+ return sink;
+}
+
+
+template<class T> void writeStrings(const T & ss, Sink & sink)
+{
+ sink << ss.size();
+ for (auto & i : ss)
+ sink << i;
+}
+
+Sink & operator << (Sink & sink, const Strings & s)
+{
+ writeStrings(s, sink);
+ return sink;
+}
+
+Sink & operator << (Sink & sink, const StringSet & s)
+{
+ writeStrings(s, sink);
+ return sink;
+}
+
+
+void readPadding(size_t len, Source & source)
+{
+ if (len % 8) {
+ unsigned char zero[8];
+ size_t n = 8 - (len % 8);
+ source(zero, n);
+ for (unsigned int i = 0; i < n; i++)
+ if (zero[i]) throw SerialisationError("non-zero padding");
+ }
+}
+
+
+size_t readString(unsigned char * buf, size_t max, Source & source)
+{
+ auto len = readNum<size_t>(source);
+ if (len > max) throw Error("string is too long");
+ source(buf, len);
+ readPadding(len, source);
+ return len;
+}
+
+
+string readString(Source & source)
+{
+ auto len = readNum<size_t>(source);
+ std::string res(len, 0);
+ source((unsigned char*) res.data(), len);
+ readPadding(len, source);
+ return res;
+}
+
+Source & operator >> (Source & in, string & s)
+{
+ s = readString(in);
+ return in;
+}
+
+
+template<class T> T readStrings(Source & source)
+{
+ auto count = readNum<size_t>(source);
+ T ss;
+ while (count--)
+ ss.insert(ss.end(), readString(source));
+ return ss;
+}
+
+template Paths readStrings(Source & source);
+template PathSet readStrings(Source & source);
+
+
+void StringSink::operator () (const unsigned char * data, size_t len)
+{
+ static bool warned = false;
+ if (!warned && s->size() > threshold) {
+ warnLargeDump();
+ warned = true;
+ }
+ s->append((const char *) data, len);
+}
+
+
+}
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
new file mode 100644
index 000000000..2bdee7080
--- /dev/null
+++ b/src/libutil/serialise.hh
@@ -0,0 +1,241 @@
+#pragma once
+
+#include <memory>
+
+#include "types.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+/* Abstract destination of binary data. */
+struct Sink
+{
+ virtual ~Sink() { }
+ virtual void operator () (const unsigned char * data, size_t len) = 0;
+ virtual bool good() { return true; }
+
+ void operator () (const std::string & s)
+ {
+ (*this)((const unsigned char *) s.data(), s.size());
+ }
+};
+
+
+/* A buffered abstract sink. */
+struct BufferedSink : Sink
+{
+ size_t bufSize, bufPos;
+ std::unique_ptr<unsigned char[]> buffer;
+
+ BufferedSink(size_t bufSize = 32 * 1024)
+ : bufSize(bufSize), bufPos(0), buffer(nullptr) { }
+
+ void operator () (const unsigned char * data, size_t len) override;
+
+ void operator () (const std::string & s)
+ {
+ Sink::operator()(s);
+ }
+
+ void flush();
+
+ virtual void write(const unsigned char * data, size_t len) = 0;
+};
+
+
+/* Abstract source of binary data. */
+struct Source
+{
+ virtual ~Source() { }
+
+ /* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’.
+ It blocks until all the requested data is available, or throws
+ an error if it is not going to be available. */
+ void operator () (unsigned char * data, size_t len);
+
+ /* Store up to ‘len’ in the buffer pointed to by ‘data’, and
+ return the number of bytes stored. If blocks until at least
+ one byte is available. */
+ virtual size_t read(unsigned char * data, size_t len) = 0;
+
+ virtual bool good() { return true; }
+};
+
+
+/* A buffered abstract source. */
+struct BufferedSource : Source
+{
+ size_t bufSize, bufPosIn, bufPosOut;
+ std::unique_ptr<unsigned char[]> buffer;
+
+ BufferedSource(size_t bufSize = 32 * 1024)
+ : bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(nullptr) { }
+
+ size_t read(unsigned char * data, size_t len) override;
+
+ /* Underlying read call, to be overridden. */
+ virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
+
+ bool hasData();
+};
+
+
+/* A sink that writes data to a file descriptor. */
+struct FdSink : BufferedSink
+{
+ int fd;
+ bool warn = false;
+ size_t written = 0;
+
+ FdSink() : fd(-1) { }
+ FdSink(int fd) : fd(fd) { }
+ FdSink(FdSink&&) = default;
+ FdSink& operator=(FdSink&&) = default;
+ ~FdSink();
+
+ void write(const unsigned char * data, size_t len) override;
+
+ bool good() override;
+
+private:
+ bool _good = true;
+};
+
+
+/* A source that reads data from a file descriptor. */
+struct FdSource : BufferedSource
+{
+ int fd;
+ size_t read = 0;
+
+ FdSource() : fd(-1) { }
+ FdSource(int fd) : fd(fd) { }
+ size_t readUnbuffered(unsigned char * data, size_t len) override;
+ bool good() override;
+private:
+ bool _good = true;
+};
+
+
+/* A sink that writes data to a string. */
+struct StringSink : Sink
+{
+ ref<std::string> s;
+ StringSink() : s(make_ref<std::string>()) { };
+ StringSink(ref<std::string> s) : s(s) { };
+ void operator () (const unsigned char * data, size_t len) override;
+};
+
+
+/* A source that reads data from a string. */
+struct StringSource : Source
+{
+ const string & s;
+ size_t pos;
+ StringSource(const string & _s) : s(_s), pos(0) { }
+ size_t read(unsigned char * data, size_t len) override;
+};
+
+
+/* Adapter class of a Source that saves all data read to `s'. */
+struct TeeSource : Source
+{
+ Source & orig;
+ ref<std::string> data;
+ TeeSource(Source & orig)
+ : orig(orig), data(make_ref<std::string>()) { }
+ size_t read(unsigned char * data, size_t len)
+ {
+ size_t n = orig.read(data, len);
+ this->data->append((const char *) data, n);
+ return n;
+ }
+};
+
+
+void writePadding(size_t len, Sink & sink);
+void writeString(const unsigned char * buf, size_t len, Sink & sink);
+
+inline Sink & operator << (Sink & sink, uint64_t n)
+{
+ unsigned char buf[8];
+ buf[0] = n & 0xff;
+ buf[1] = (n >> 8) & 0xff;
+ buf[2] = (n >> 16) & 0xff;
+ buf[3] = (n >> 24) & 0xff;
+ buf[4] = (n >> 32) & 0xff;
+ buf[5] = (n >> 40) & 0xff;
+ buf[6] = (n >> 48) & 0xff;
+ buf[7] = (n >> 56) & 0xff;
+ sink(buf, sizeof(buf));
+ return sink;
+}
+
+Sink & operator << (Sink & sink, const string & s);
+Sink & operator << (Sink & sink, const Strings & s);
+Sink & operator << (Sink & sink, const StringSet & s);
+
+
+MakeError(SerialisationError, Error)
+
+
+template<typename T>
+T readNum(Source & source)
+{
+ unsigned char buf[8];
+ source(buf, sizeof(buf));
+
+ uint64_t n =
+ ((unsigned long long) buf[0]) |
+ ((unsigned long long) buf[1] << 8) |
+ ((unsigned long long) buf[2] << 16) |
+ ((unsigned long long) buf[3] << 24) |
+ ((unsigned long long) buf[4] << 32) |
+ ((unsigned long long) buf[5] << 40) |
+ ((unsigned long long) buf[6] << 48) |
+ ((unsigned long long) buf[7] << 56);
+
+ if (n > std::numeric_limits<T>::max())
+ throw SerialisationError("serialised integer %d is too large for type ‘%s’", n, typeid(T).name());
+
+ return n;
+}
+
+
+inline unsigned int readInt(Source & source)
+{
+ return readNum<unsigned int>(source);
+}
+
+
+inline uint64_t readLongLong(Source & source)
+{
+ return readNum<uint64_t>(source);
+}
+
+
+void readPadding(size_t len, Source & source);
+size_t readString(unsigned char * buf, size_t max, Source & source);
+string readString(Source & source);
+template<class T> T readStrings(Source & source);
+
+Source & operator >> (Source & in, string & s);
+
+template<typename T>
+Source & operator >> (Source & in, T & n)
+{
+ n = readNum<T>(in);
+ return in;
+}
+
+template<typename T>
+Source & operator >> (Source & in, bool & b)
+{
+ b = readNum<uint64_t>(in);
+ return in;
+}
+
+
+}
diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh
new file mode 100644
index 000000000..611c900e0
--- /dev/null
+++ b/src/libutil/sync.hh
@@ -0,0 +1,87 @@
+#pragma once
+
+#include <mutex>
+#include <condition_variable>
+#include <cassert>
+
+namespace nix {
+
+/* This template class ensures synchronized access to a value of type
+ T. It is used as follows:
+
+ struct Data { int x; ... };
+
+ Sync<Data> data;
+
+ {
+ auto data_(data.lock());
+ data_->x = 123;
+ }
+
+ Here, "data" is automatically unlocked when "data_" goes out of
+ scope.
+*/
+
+template<class T, class M = std::mutex>
+class Sync
+{
+private:
+ M mutex;
+ T data;
+
+public:
+
+ Sync() { }
+ Sync(const T & data) : data(data) { }
+ Sync(T && data) noexcept : data(std::move(data)) { }
+
+ class Lock
+ {
+ private:
+ Sync * s;
+ std::unique_lock<M> lk;
+ friend Sync;
+ Lock(Sync * s) : s(s), lk(s->mutex) { }
+ public:
+ Lock(Lock && l) : s(l.s) { abort(); }
+ Lock(const Lock & l) = delete;
+ ~Lock() { }
+ T * operator -> () { return &s->data; }
+ T & operator * () { return s->data; }
+
+ void wait(std::condition_variable & cv)
+ {
+ assert(s);
+ cv.wait(lk);
+ }
+
+ template<class Rep, class Period>
+ void wait_for(std::condition_variable & cv,
+ const std::chrono::duration<Rep, Period> & duration)
+ {
+ assert(s);
+ cv.wait_for(lk, duration);
+ }
+
+ template<class Rep, class Period, class Predicate>
+ bool wait_for(std::condition_variable & cv,
+ const std::chrono::duration<Rep, Period> & duration,
+ Predicate pred)
+ {
+ assert(s);
+ return cv.wait_for(lk, duration, pred);
+ }
+
+ template<class Clock, class Duration>
+ std::cv_status wait_until(std::condition_variable & cv,
+ const std::chrono::time_point<Clock, Duration> & duration)
+ {
+ assert(s);
+ return cv.wait_until(lk, duration);
+ }
+ };
+
+ Lock lock() { return Lock(this); }
+};
+
+}
diff --git a/src/libutil/thread-pool.cc b/src/libutil/thread-pool.cc
new file mode 100644
index 000000000..0a3a40724
--- /dev/null
+++ b/src/libutil/thread-pool.cc
@@ -0,0 +1,104 @@
+#include "thread-pool.hh"
+#include "affinity.hh"
+
+namespace nix {
+
+ThreadPool::ThreadPool(size_t _maxThreads)
+ : maxThreads(_maxThreads)
+{
+ restoreAffinity(); // FIXME
+
+ if (!maxThreads) {
+ maxThreads = std::thread::hardware_concurrency();
+ if (!maxThreads) maxThreads = 1;
+ }
+
+ debug(format("starting pool of %d threads") % maxThreads);
+}
+
+ThreadPool::~ThreadPool()
+{
+ std::vector<std::thread> workers;
+ {
+ auto state(state_.lock());
+ state->quit = true;
+ std::swap(workers, state->workers);
+ }
+
+ debug(format("reaping %d worker threads") % workers.size());
+
+ work.notify_all();
+
+ for (auto & thr : workers)
+ thr.join();
+}
+
+void ThreadPool::enqueue(const work_t & t)
+{
+ auto state(state_.lock());
+ if (state->quit)
+ throw ThreadPoolShutDown("cannot enqueue a work item while the thread pool is shutting down");
+ state->left.push(t);
+ if (state->left.size() > state->workers.size() && state->workers.size() < maxThreads)
+ state->workers.emplace_back(&ThreadPool::workerEntry, this);
+ work.notify_one();
+}
+
+void ThreadPool::process()
+{
+ while (true) {
+ auto state(state_.lock());
+ if (state->exception)
+ std::rethrow_exception(state->exception);
+ if (state->left.empty() && !state->pending) break;
+ state.wait(done);
+ }
+}
+
+void ThreadPool::workerEntry()
+{
+ bool didWork = false;
+
+ while (true) {
+ work_t w;
+ {
+ auto state(state_.lock());
+ while (true) {
+ if (state->quit || state->exception) return;
+ if (didWork) {
+ assert(state->pending);
+ state->pending--;
+ didWork = false;
+ }
+ if (!state->left.empty()) break;
+ if (!state->pending)
+ done.notify_all();
+ state.wait(work);
+ }
+ w = state->left.front();
+ state->left.pop();
+ state->pending++;
+ }
+
+ try {
+ w();
+ } catch (std::exception & e) {
+ auto state(state_.lock());
+ if (state->exception) {
+ if (!dynamic_cast<Interrupted*>(&e) &&
+ !dynamic_cast<ThreadPoolShutDown*>(&e))
+ printError(format("error: %s") % e.what());
+ } else {
+ state->exception = std::current_exception();
+ work.notify_all();
+ done.notify_all();
+ }
+ }
+
+ didWork = true;
+ }
+}
+
+}
+
+
diff --git a/src/libutil/thread-pool.hh b/src/libutil/thread-pool.hh
new file mode 100644
index 000000000..b64dc52d4
--- /dev/null
+++ b/src/libutil/thread-pool.hh
@@ -0,0 +1,119 @@
+#pragma once
+
+#include "sync.hh"
+#include "util.hh"
+
+#include <queue>
+#include <functional>
+#include <thread>
+#include <map>
+
+namespace nix {
+
+MakeError(ThreadPoolShutDown, Error)
+
+/* A simple thread pool that executes a queue of work items
+ (lambdas). */
+class ThreadPool
+{
+public:
+
+ ThreadPool(size_t maxThreads = 0);
+
+ ~ThreadPool();
+
+ // FIXME: use std::packaged_task?
+ typedef std::function<void()> work_t;
+
+ /* Enqueue a function to be executed by the thread pool. */
+ void enqueue(const work_t & t);
+
+ /* Execute work items until the queue is empty. Note that work
+ items are allowed to add new items to the queue; this is
+ handled correctly. Queue processing stops prematurely if any
+ work item throws an exception. This exception is propagated to
+ the calling thread. If multiple work items throw an exception
+ concurrently, only one item is propagated; the others are
+ printed on stderr and otherwise ignored. */
+ void process();
+
+private:
+
+ size_t maxThreads;
+
+ struct State
+ {
+ std::queue<work_t> left;
+ size_t pending = 0;
+ std::exception_ptr exception;
+ std::vector<std::thread> workers;
+ bool quit = false;
+ };
+
+ Sync<State> state_;
+
+ std::condition_variable work, done;
+
+ void workerEntry();
+};
+
+/* Process in parallel a set of items of type T that have a partial
+ ordering between them. Thus, any item is only processed after all
+ its dependencies have been processed. */
+template<typename T>
+void processGraph(
+ ThreadPool & pool,
+ const std::set<T> & nodes,
+ std::function<std::set<T>(const T &)> getEdges,
+ std::function<void(const T &)> processNode)
+{
+ struct Graph {
+ std::set<T> left;
+ std::map<T, std::set<T>> refs, rrefs;
+ std::function<void(T)> wrap;
+ };
+
+ ref<Sync<Graph>> graph_ = make_ref<Sync<Graph>>();
+
+ auto wrapWork = [&pool, graph_, processNode](const T & node) {
+ processNode(node);
+
+ /* Enqueue work for all nodes that were waiting on this one. */
+ {
+ auto graph(graph_->lock());
+ graph->left.erase(node);
+ for (auto & rref : graph->rrefs[node]) {
+ auto & refs(graph->refs[rref]);
+ auto i = refs.find(node);
+ assert(i != refs.end());
+ refs.erase(i);
+ if (refs.empty())
+ pool.enqueue(std::bind(graph->wrap, rref));
+ }
+ }
+ };
+
+ {
+ auto graph(graph_->lock());
+ graph->left = nodes;
+ graph->wrap = wrapWork;
+ }
+
+ /* Build the dependency graph; enqueue all nodes with no
+ dependencies. */
+ for (auto & node : nodes) {
+ auto refs = getEdges(node);
+ {
+ auto graph(graph_->lock());
+ for (auto & ref : refs)
+ if (ref != node && graph->left.count(ref)) {
+ graph->refs[node].insert(ref);
+ graph->rrefs[ref].insert(node);
+ }
+ if (graph->refs[node].empty())
+ pool.enqueue(std::bind(graph->wrap, node));
+ }
+ }
+}
+
+}
diff --git a/src/libutil/types.hh b/src/libutil/types.hh
new file mode 100644
index 000000000..1429c2385
--- /dev/null
+++ b/src/libutil/types.hh
@@ -0,0 +1,154 @@
+#pragma once
+
+
+#include "ref.hh"
+
+#include <string>
+#include <list>
+#include <set>
+#include <memory>
+#include <map>
+
+#include <boost/format.hpp>
+
+/* Before 4.7, gcc's std::exception uses empty throw() specifiers for
+ * its (virtual) destructor and what() in c++11 mode, in violation of spec
+ */
+#ifdef __GNUC__
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7)
+#define EXCEPTION_NEEDS_THROW_SPEC
+#endif
+#endif
+
+
+namespace nix {
+
+
+/* Inherit some names from other namespaces for convenience. */
+using std::string;
+using std::list;
+using std::set;
+using std::vector;
+using boost::format;
+
+
+struct FormatOrString
+{
+ string s;
+ FormatOrString(const string & s) : s(s) { };
+ FormatOrString(const format & f) : s(f.str()) { };
+ FormatOrString(const char * s) : s(s) { };
+};
+
+
+/* A helper for formatting strings. ‘fmt(format, a_0, ..., a_n)’ is
+ equivalent to ‘boost::format(format) % a_0 % ... %
+ ... a_n’. However, ‘fmt(s)’ is equivalent to ‘s’ (so no %-expansion
+ takes place). */
+
+inline void formatHelper(boost::format & f)
+{
+}
+
+template<typename T, typename... Args>
+inline void formatHelper(boost::format & f, T x, Args... args)
+{
+ formatHelper(f % x, args...);
+}
+
+inline std::string fmt(const std::string & s)
+{
+ return s;
+}
+
+inline std::string fmt(const char * s)
+{
+ return s;
+}
+
+inline std::string fmt(const FormatOrString & fs)
+{
+ return fs.s;
+}
+
+template<typename... Args>
+inline std::string fmt(const std::string & fs, Args... args)
+{
+ boost::format f(fs);
+ formatHelper(f, args...);
+ return f.str();
+}
+
+
+/* BaseError should generally not be caught, as it has Interrupted as
+ a subclass. Catch Error instead. */
+class BaseError : public std::exception
+{
+protected:
+ string prefix_; // used for location traces etc.
+ string err;
+public:
+ unsigned int status = 1; // exit status
+
+ template<typename... Args>
+ BaseError(unsigned int status, Args... args)
+ : err(fmt(args...))
+ , status(status)
+ {
+ }
+
+ template<typename... Args>
+ BaseError(Args... args)
+ : err(fmt(args...))
+ {
+ }
+
+#ifdef EXCEPTION_NEEDS_THROW_SPEC
+ ~BaseError() throw () { };
+ const char * what() const throw () { return err.c_str(); }
+#else
+ const char * what() const noexcept { return err.c_str(); }
+#endif
+
+ const string & msg() const { return err; }
+ const string & prefix() const { return prefix_; }
+ BaseError & addPrefix(const FormatOrString & fs);
+};
+
+#define MakeError(newClass, superClass) \
+ class newClass : public superClass \
+ { \
+ public: \
+ using superClass::superClass; \
+ };
+
+MakeError(Error, BaseError)
+
+class SysError : public Error
+{
+public:
+ int errNo;
+
+ template<typename... Args>
+ SysError(Args... args)
+ : Error(addErrno(fmt(args...)))
+ { }
+
+private:
+
+ std::string addErrno(const std::string & s);
+};
+
+
+typedef list<string> Strings;
+typedef set<string> StringSet;
+typedef std::map<std::string, std::string> StringMap;
+
+
+/* Paths are just strings. */
+typedef string Path;
+typedef list<Path> Paths;
+typedef set<Path> PathSet;
+
+
+}
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
new file mode 100644
index 000000000..88a2f752c
--- /dev/null
+++ b/src/libutil/util.cc
@@ -0,0 +1,1295 @@
+#include "util.hh"
+#include "affinity.hh"
+#include "sync.hh"
+#include "finally.hh"
+
+#include <cctype>
+#include <cerrno>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <iostream>
+#include <sstream>
+#include <thread>
+#include <future>
+
+#include <sys/wait.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <limits.h>
+
+#ifdef __APPLE__
+#include <sys/syscall.h>
+#endif
+
+#ifdef __linux__
+#include <sys/prctl.h>
+#endif
+
+
+extern char * * environ;
+
+
+namespace nix {
+
+
+BaseError & BaseError::addPrefix(const FormatOrString & fs)
+{
+ prefix_ = fs.s + prefix_;
+ return *this;
+}
+
+
+std::string SysError::addErrno(const std::string & s)
+{
+ errNo = errno;
+ return s + ": " + strerror(errNo);
+}
+
+
+string getEnv(const string & key, const string & def)
+{
+ char * value = getenv(key.c_str());
+ return value ? string(value) : def;
+}
+
+
+std::map<std::string, std::string> getEnv()
+{
+ std::map<std::string, std::string> env;
+ for (size_t i = 0; environ[i]; ++i) {
+ auto s = environ[i];
+ auto eq = strchr(s, '=');
+ if (!eq)
+ // invalid env, just keep going
+ continue;
+ env.emplace(std::string(s, eq), std::string(eq + 1));
+ }
+ return env;
+}
+
+
+Path absPath(Path path, Path dir)
+{
+ if (path[0] != '/') {
+ if (dir == "") {
+#ifdef __GNU__
+ /* GNU (aka. GNU/Hurd) doesn't have any limitation on path
+ lengths and doesn't define `PATH_MAX'. */
+ char *buf = getcwd(NULL, 0);
+ if (buf == NULL)
+#else
+ char buf[PATH_MAX];
+ if (!getcwd(buf, sizeof(buf)))
+#endif
+ throw SysError("cannot get cwd");
+ dir = buf;
+#ifdef __GNU__
+ free(buf);
+#endif
+ }
+ path = dir + "/" + path;
+ }
+ return canonPath(path);
+}
+
+
+Path canonPath(const Path & path, bool resolveSymlinks)
+{
+ assert(path != "");
+
+ string s;
+
+ if (path[0] != '/')
+ throw Error(format("not an absolute path: ‘%1%’") % path);
+
+ string::const_iterator i = path.begin(), end = path.end();
+ string temp;
+
+ /* Count the number of times we follow a symlink and stop at some
+ arbitrary (but high) limit to prevent infinite loops. */
+ unsigned int followCount = 0, maxFollow = 1024;
+
+ while (1) {
+
+ /* Skip slashes. */
+ while (i != end && *i == '/') i++;
+ if (i == end) break;
+
+ /* Ignore `.'. */
+ if (*i == '.' && (i + 1 == end || i[1] == '/'))
+ i++;
+
+ /* If `..', delete the last component. */
+ else if (*i == '.' && i + 1 < end && i[1] == '.' &&
+ (i + 2 == end || i[2] == '/'))
+ {
+ if (!s.empty()) s.erase(s.rfind('/'));
+ i += 2;
+ }
+
+ /* Normal component; copy it. */
+ else {
+ s += '/';
+ while (i != end && *i != '/') s += *i++;
+
+ /* If s points to a symlink, resolve it and restart (since
+ the symlink target might contain new symlinks). */
+ if (resolveSymlinks && isLink(s)) {
+ if (++followCount >= maxFollow)
+ throw Error(format("infinite symlink recursion in path ‘%1%’") % path);
+ temp = absPath(readLink(s), dirOf(s))
+ + string(i, end);
+ i = temp.begin(); /* restart */
+ end = temp.end();
+ s = "";
+ }
+ }
+ }
+
+ return s.empty() ? "/" : s;
+}
+
+
+Path dirOf(const Path & path)
+{
+ Path::size_type pos = path.rfind('/');
+ if (pos == string::npos)
+ throw Error(format("invalid file name ‘%1%’") % path);
+ return pos == 0 ? "/" : Path(path, 0, pos);
+}
+
+
+string baseNameOf(const Path & path)
+{
+ if (path.empty())
+ return "";
+
+ Path::size_type last = path.length() - 1;
+ if (path[last] == '/' && last > 0)
+ last -= 1;
+
+ Path::size_type pos = path.rfind('/', last);
+ if (pos == string::npos)
+ pos = 0;
+ else
+ pos += 1;
+
+ return string(path, pos, last - pos + 1);
+}
+
+
+bool isInDir(const Path & path, const Path & dir)
+{
+ return path[0] == '/'
+ && string(path, 0, dir.size()) == dir
+ && path.size() >= dir.size() + 2
+ && path[dir.size()] == '/';
+}
+
+
+struct stat lstat(const Path & path)
+{
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting status of ‘%1%’") % path);
+ return st;
+}
+
+
+bool pathExists(const Path & path)
+{
+ int res;
+ struct stat st;
+ res = lstat(path.c_str(), &st);
+ if (!res) return true;
+ if (errno != ENOENT && errno != ENOTDIR)
+ throw SysError(format("getting status of %1%") % path);
+ return false;
+}
+
+
+Path readLink(const Path & path)
+{
+ checkInterrupt();
+ struct stat st = lstat(path);
+ if (!S_ISLNK(st.st_mode))
+ throw Error(format("‘%1%’ is not a symlink") % path);
+ char buf[st.st_size];
+ ssize_t rlsize = readlink(path.c_str(), buf, st.st_size);
+ if (rlsize == -1)
+ throw SysError(format("reading symbolic link ‘%1%’") % path);
+ else if (rlsize > st.st_size)
+ throw Error(format("symbolic link ‘%1%’ size overflow %2% > %3%")
+ % path % rlsize % st.st_size);
+ return string(buf, rlsize);
+}
+
+
+bool isLink(const Path & path)
+{
+ struct stat st = lstat(path);
+ return S_ISLNK(st.st_mode);
+}
+
+
+DirEntries readDirectory(const Path & path)
+{
+ DirEntries entries;
+ entries.reserve(64);
+
+ AutoCloseDir dir(opendir(path.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % path);
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) { /* sic */
+ checkInterrupt();
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ entries.emplace_back(name, dirent->d_ino,
+#ifdef HAVE_STRUCT_DIRENT_D_TYPE
+ dirent->d_type
+#else
+ DT_UNKNOWN
+#endif
+ );
+ }
+ if (errno) throw SysError(format("reading directory ‘%1%’") % path);
+
+ return entries;
+}
+
+
+unsigned char getFileType(const Path & path)
+{
+ struct stat st = lstat(path);
+ if (S_ISDIR(st.st_mode)) return DT_DIR;
+ if (S_ISLNK(st.st_mode)) return DT_LNK;
+ if (S_ISREG(st.st_mode)) return DT_REG;
+ return DT_UNKNOWN;
+}
+
+
+string readFile(int fd)
+{
+ struct stat st;
+ if (fstat(fd, &st) == -1)
+ throw SysError("statting file");
+
+ auto buf = std::make_unique<unsigned char[]>(st.st_size);
+ readFull(fd, buf.get(), st.st_size);
+
+ return string((char *) buf.get(), st.st_size);
+}
+
+
+string readFile(const Path & path, bool drain)
+{
+ AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
+ if (!fd)
+ throw SysError(format("opening file ‘%1%’") % path);
+ return drain ? drainFD(fd.get()) : readFile(fd.get());
+}
+
+
+void writeFile(const Path & path, const string & s, mode_t mode)
+{
+ AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode);
+ if (!fd)
+ throw SysError(format("opening file ‘%1%’") % path);
+ writeFull(fd.get(), s);
+}
+
+
+string readLine(int fd)
+{
+ string s;
+ while (1) {
+ checkInterrupt();
+ char ch;
+ ssize_t rd = read(fd, &ch, 1);
+ if (rd == -1) {
+ if (errno != EINTR)
+ throw SysError("reading a line");
+ } else if (rd == 0)
+ throw EndOfFile("unexpected EOF reading a line");
+ else {
+ if (ch == '\n') return s;
+ s += ch;
+ }
+ }
+}
+
+
+void writeLine(int fd, string s)
+{
+ s += '\n';
+ writeFull(fd, s);
+}
+
+
+static void _deletePath(const Path & path, unsigned long long & bytesFreed)
+{
+ checkInterrupt();
+
+ struct stat st;
+ if (lstat(path.c_str(), &st) == -1) {
+ if (errno == ENOENT) return;
+ throw SysError(format("getting status of ‘%1%’") % path);
+ }
+
+ if (!S_ISDIR(st.st_mode) && st.st_nlink == 1)
+ bytesFreed += st.st_blocks * 512;
+
+ if (S_ISDIR(st.st_mode)) {
+ /* Make the directory accessible. */
+ const auto PERM_MASK = S_IRUSR | S_IWUSR | S_IXUSR;
+ if ((st.st_mode & PERM_MASK) != PERM_MASK) {
+ if (chmod(path.c_str(), st.st_mode | PERM_MASK) == -1)
+ throw SysError(format("chmod ‘%1%’") % path);
+ }
+
+ for (auto & i : readDirectory(path))
+ _deletePath(path + "/" + i.name, bytesFreed);
+ }
+
+ if (remove(path.c_str()) == -1) {
+ if (errno == ENOENT) return;
+ throw SysError(format("cannot unlink ‘%1%’") % path);
+ }
+}
+
+
+void deletePath(const Path & path)
+{
+ unsigned long long dummy;
+ deletePath(path, dummy);
+}
+
+
+void deletePath(const Path & path, unsigned long long & bytesFreed)
+{
+ Activity act(*logger, lvlDebug, format("recursively deleting path ‘%1%’") % path);
+ bytesFreed = 0;
+ _deletePath(path, bytesFreed);
+}
+
+
+static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
+ int & counter)
+{
+ tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR", "/tmp") : tmpRoot, true);
+ if (includePid)
+ return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++).str();
+ else
+ return (format("%1%/%2%-%3%") % tmpRoot % prefix % counter++).str();
+}
+
+
+Path createTempDir(const Path & tmpRoot, const Path & prefix,
+ bool includePid, bool useGlobalCounter, mode_t mode)
+{
+ static int globalCounter = 0;
+ int localCounter = 0;
+ int & counter(useGlobalCounter ? globalCounter : localCounter);
+
+ while (1) {
+ checkInterrupt();
+ Path tmpDir = tempName(tmpRoot, prefix, includePid, counter);
+ if (mkdir(tmpDir.c_str(), mode) == 0) {
+#if __FreeBSD__
+ /* Explicitly set the group of the directory. This is to
+ work around around problems caused by BSD's group
+ ownership semantics (directories inherit the group of
+ the parent). For instance, the group of /tmp on
+ FreeBSD is "wheel", so all directories created in /tmp
+ will be owned by "wheel"; but if the user is not in
+ "wheel", then "tar" will fail to unpack archives that
+ have the setgid bit set on directories. */
+ if (chown(tmpDir.c_str(), (uid_t) -1, getegid()) != 0)
+ throw SysError(format("setting group of directory ‘%1%’") % tmpDir);
+#endif
+ return tmpDir;
+ }
+ if (errno != EEXIST)
+ throw SysError(format("creating directory ‘%1%’") % tmpDir);
+ }
+}
+
+
+Path getCacheDir()
+{
+ Path cacheDir = getEnv("XDG_CACHE_HOME");
+ if (cacheDir.empty()) {
+ Path homeDir = getEnv("HOME");
+ if (homeDir.empty()) throw Error("$XDG_CACHE_HOME and $HOME are not set");
+ cacheDir = homeDir + "/.cache";
+ }
+ return cacheDir;
+}
+
+
+Path getConfigDir()
+{
+ Path configDir = getEnv("XDG_CONFIG_HOME");
+ if (configDir.empty()) {
+ Path homeDir = getEnv("HOME");
+ if (homeDir.empty()) throw Error("$XDG_CONFIG_HOME and $HOME are not set");
+ configDir = homeDir + "/.config";
+ }
+ return configDir;
+}
+
+
+Paths createDirs(const Path & path)
+{
+ Paths created;
+ if (path == "/") return created;
+
+ struct stat st;
+ if (lstat(path.c_str(), &st) == -1) {
+ created = createDirs(dirOf(path));
+ if (mkdir(path.c_str(), 0777) == -1 && errno != EEXIST)
+ throw SysError(format("creating directory ‘%1%’") % path);
+ st = lstat(path);
+ created.push_back(path);
+ }
+
+ if (S_ISLNK(st.st_mode) && stat(path.c_str(), &st) == -1)
+ throw SysError(format("statting symlink ‘%1%’") % path);
+
+ if (!S_ISDIR(st.st_mode)) throw Error(format("‘%1%’ is not a directory") % path);
+
+ return created;
+}
+
+
+void createSymlink(const Path & target, const Path & link)
+{
+ if (symlink(target.c_str(), link.c_str()))
+ throw SysError(format("creating symlink from ‘%1%’ to ‘%2%’") % link % target);
+}
+
+
+void replaceSymlink(const Path & target, const Path & link)
+{
+ Path tmp = canonPath(dirOf(link) + "/.new_" + baseNameOf(link));
+
+ createSymlink(target, tmp);
+
+ if (rename(tmp.c_str(), link.c_str()) != 0)
+ throw SysError(format("renaming ‘%1%’ to ‘%2%’") % tmp % link);
+}
+
+
+void readFull(int fd, unsigned char * buf, size_t count)
+{
+ while (count) {
+ checkInterrupt();
+ ssize_t res = read(fd, (char *) buf, count);
+ if (res == -1) {
+ if (errno == EINTR) continue;
+ throw SysError("reading from file");
+ }
+ if (res == 0) throw EndOfFile("unexpected end-of-file");
+ count -= res;
+ buf += res;
+ }
+}
+
+
+void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterrupts)
+{
+ while (count) {
+ if (allowInterrupts) checkInterrupt();
+ ssize_t res = write(fd, (char *) buf, count);
+ if (res == -1 && errno != EINTR)
+ throw SysError("writing to file");
+ if (res > 0) {
+ count -= res;
+ buf += res;
+ }
+ }
+}
+
+
+void writeFull(int fd, const string & s, bool allowInterrupts)
+{
+ writeFull(fd, (const unsigned char *) s.data(), s.size(), allowInterrupts);
+}
+
+
+string drainFD(int fd)
+{
+ string result;
+ unsigned char buffer[4096];
+ while (1) {
+ checkInterrupt();
+ ssize_t rd = read(fd, buffer, sizeof buffer);
+ if (rd == -1) {
+ if (errno != EINTR)
+ throw SysError("reading from file");
+ }
+ else if (rd == 0) break;
+ else result.append((char *) buffer, rd);
+ }
+ return result;
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+AutoDelete::AutoDelete() : del{false} {}
+
+AutoDelete::AutoDelete(const string & p, bool recursive) : path(p)
+{
+ del = true;
+ this->recursive = recursive;
+}
+
+AutoDelete::~AutoDelete()
+{
+ try {
+ if (del) {
+ if (recursive)
+ deletePath(path);
+ else {
+ if (remove(path.c_str()) == -1)
+ throw SysError(format("cannot unlink ‘%1%’") % path);
+ }
+ }
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+void AutoDelete::cancel()
+{
+ del = false;
+}
+
+void AutoDelete::reset(const Path & p, bool recursive) {
+ path = p;
+ this->recursive = recursive;
+ del = true;
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+AutoCloseFD::AutoCloseFD() : fd{-1} {}
+
+
+AutoCloseFD::AutoCloseFD(int fd) : fd{fd} {}
+
+
+AutoCloseFD::AutoCloseFD(AutoCloseFD&& that) : fd{that.fd}
+{
+ that.fd = -1;
+}
+
+
+AutoCloseFD& AutoCloseFD::operator =(AutoCloseFD&& that)
+{
+ close();
+ fd = that.fd;
+ that.fd = -1;
+ return *this;
+}
+
+
+AutoCloseFD::~AutoCloseFD()
+{
+ try {
+ close();
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+int AutoCloseFD::get() const
+{
+ return fd;
+}
+
+
+void AutoCloseFD::close()
+{
+ if (fd != -1) {
+ if (::close(fd) == -1)
+ /* This should never happen. */
+ throw SysError(format("closing file descriptor %1%") % fd);
+ }
+}
+
+
+AutoCloseFD::operator bool() const
+{
+ return fd != -1;
+}
+
+
+int AutoCloseFD::release()
+{
+ int oldFD = fd;
+ fd = -1;
+ return oldFD;
+}
+
+
+void Pipe::create()
+{
+ int fds[2];
+#if HAVE_PIPE2
+ if (pipe2(fds, O_CLOEXEC) != 0) throw SysError("creating pipe");
+#else
+ if (pipe(fds) != 0) throw SysError("creating pipe");
+ closeOnExec(fds[0]);
+ closeOnExec(fds[1]);
+#endif
+ readSide = fds[0];
+ writeSide = fds[1];
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+Pid::Pid()
+{
+}
+
+
+Pid::Pid(pid_t pid)
+ : pid(pid)
+{
+}
+
+
+Pid::~Pid()
+{
+ if (pid != -1) kill();
+}
+
+
+void Pid::operator =(pid_t pid)
+{
+ if (this->pid != -1 && this->pid != pid) kill();
+ this->pid = pid;
+ killSignal = SIGKILL; // reset signal to default
+}
+
+
+Pid::operator pid_t()
+{
+ return pid;
+}
+
+
+int Pid::kill()
+{
+ assert(pid != -1);
+
+ debug(format("killing process %1%") % pid);
+
+ /* Send the requested signal to the child. If it has its own
+ process group, send the signal to every process in the child
+ process group (which hopefully includes *all* its children). */
+ if (::kill(separatePG ? -pid : pid, killSignal) != 0)
+ printError((SysError(format("killing process %1%") % pid).msg()));
+
+ return wait();
+}
+
+
+int Pid::wait()
+{
+ assert(pid != -1);
+ while (1) {
+ int status;
+ int res = waitpid(pid, &status, 0);
+ if (res == pid) {
+ pid = -1;
+ return status;
+ }
+ if (errno != EINTR)
+ throw SysError("cannot get child exit status");
+ checkInterrupt();
+ }
+}
+
+
+void Pid::setSeparatePG(bool separatePG)
+{
+ this->separatePG = separatePG;
+}
+
+
+void Pid::setKillSignal(int signal)
+{
+ this->killSignal = signal;
+}
+
+
+pid_t Pid::release()
+{
+ pid_t p = pid;
+ pid = -1;
+ return p;
+}
+
+
+void killUser(uid_t uid)
+{
+ debug(format("killing all processes running under uid ‘%1%’") % uid);
+
+ assert(uid != 0); /* just to be safe... */
+
+ /* The system call kill(-1, sig) sends the signal `sig' to all
+ users to which the current process can send signals. So we
+ fork a process, switch to uid, and send a mass kill. */
+
+ ProcessOptions options;
+ options.allowVfork = false;
+
+ Pid pid = startProcess([&]() {
+
+ if (setuid(uid) == -1)
+ throw SysError("setting uid");
+
+ while (true) {
+#ifdef __APPLE__
+ /* OSX's kill syscall takes a third parameter that, among
+ other things, determines if kill(-1, signo) affects the
+ calling process. In the OSX libc, it's set to true,
+ which means "follow POSIX", which we don't want here
+ */
+ if (syscall(SYS_kill, -1, SIGKILL, false) == 0) break;
+#else
+ if (kill(-1, SIGKILL) == 0) break;
+#endif
+ if (errno == ESRCH) break; /* no more processes */
+ if (errno != EINTR)
+ throw SysError(format("cannot kill processes for uid ‘%1%’") % uid);
+ }
+
+ _exit(0);
+ }, options);
+
+ int status = pid.wait();
+ if (status != 0)
+ throw Error(format("cannot kill processes for uid ‘%1%’: %2%") % uid % statusToString(status));
+
+ /* !!! We should really do some check to make sure that there are
+ no processes left running under `uid', but there is no portable
+ way to do so (I think). The most reliable way may be `ps -eo
+ uid | grep -q $uid'. */
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+/* Wrapper around vfork to prevent the child process from clobbering
+ the caller's stack frame in the parent. */
+static pid_t doFork(bool allowVfork, std::function<void()> fun) __attribute__((noinline));
+static pid_t doFork(bool allowVfork, std::function<void()> fun)
+{
+#ifdef __linux__
+ pid_t pid = allowVfork ? vfork() : fork();
+#else
+ pid_t pid = fork();
+#endif
+ if (pid != 0) return pid;
+ fun();
+ abort();
+}
+
+
+pid_t startProcess(std::function<void()> fun, const ProcessOptions & options)
+{
+ auto wrapper = [&]() {
+ if (!options.allowVfork)
+ logger = makeDefaultLogger();
+ try {
+#if __linux__
+ if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1)
+ throw SysError("setting death signal");
+#endif
+ restoreAffinity();
+ fun();
+ } catch (std::exception & e) {
+ try {
+ std::cerr << options.errorPrefix << e.what() << "\n";
+ } catch (...) { }
+ } catch (...) { }
+ if (options.runExitHandlers)
+ exit(1);
+ else
+ _exit(1);
+ };
+
+ pid_t pid = doFork(options.allowVfork, wrapper);
+ if (pid == -1) throw SysError("unable to fork");
+
+ return pid;
+}
+
+
+std::vector<char *> stringsToCharPtrs(const Strings & ss)
+{
+ std::vector<char *> res;
+ for (auto & s : ss) res.push_back((char *) s.c_str());
+ res.push_back(0);
+ return res;
+}
+
+
+string runProgram(Path program, bool searchPath, const Strings & args,
+ const std::experimental::optional<std::string> & input)
+{
+ checkInterrupt();
+
+ /* Create a pipe. */
+ Pipe out, in;
+ out.create();
+ if (input) in.create();
+
+ /* Fork. */
+ Pid pid = startProcess([&]() {
+ if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("dupping stdout");
+ if (input && dup2(in.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("dupping stdin");
+
+ Strings args_(args);
+ args_.push_front(program);
+
+ restoreSignals();
+
+ if (searchPath)
+ execvp(program.c_str(), stringsToCharPtrs(args_).data());
+ else
+ execv(program.c_str(), stringsToCharPtrs(args_).data());
+
+ throw SysError(format("executing ‘%1%’") % program);
+ });
+
+ out.writeSide = -1;
+
+ std::thread writerThread;
+
+ std::promise<void> promise;
+
+ Finally doJoin([&]() {
+ if (writerThread.joinable())
+ writerThread.join();
+ });
+
+
+ if (input) {
+ in.readSide = -1;
+ writerThread = std::thread([&]() {
+ try {
+ writeFull(in.writeSide.get(), *input);
+ promise.set_value();
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ in.writeSide = -1;
+ });
+ }
+
+ string result = drainFD(out.readSide.get());
+
+ /* Wait for the child to finish. */
+ int status = pid.wait();
+ if (!statusOk(status))
+ throw ExecError(status, format("program ‘%1%’ %2%")
+ % program % statusToString(status));
+
+ /* Wait for the writer thread to finish. */
+ if (input) promise.get_future().get();
+
+ return result;
+}
+
+
+void closeMostFDs(const set<int> & exceptions)
+{
+ int maxFD = 0;
+ maxFD = sysconf(_SC_OPEN_MAX);
+ for (int fd = 0; fd < maxFD; ++fd)
+ if (fd != STDIN_FILENO && fd != STDOUT_FILENO && fd != STDERR_FILENO
+ && exceptions.find(fd) == exceptions.end())
+ close(fd); /* ignore result */
+}
+
+
+void closeOnExec(int fd)
+{
+ int prev;
+ if ((prev = fcntl(fd, F_GETFD, 0)) == -1 ||
+ fcntl(fd, F_SETFD, prev | FD_CLOEXEC) == -1)
+ throw SysError("setting close-on-exec flag");
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+bool _isInterrupted = false;
+
+static thread_local bool interruptThrown = false;
+
+void setInterruptThrown()
+{
+ interruptThrown = true;
+}
+
+void _interrupted()
+{
+ /* Block user interrupts while an exception is being handled.
+ Throwing an exception while another exception is being handled
+ kills the program! */
+ if (!interruptThrown && !std::uncaught_exception()) {
+ interruptThrown = true;
+ throw Interrupted("interrupted by the user");
+ }
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+template<class C> C tokenizeString(const string & s, const string & separators)
+{
+ C result;
+ string::size_type pos = s.find_first_not_of(separators, 0);
+ while (pos != string::npos) {
+ string::size_type end = s.find_first_of(separators, pos + 1);
+ if (end == string::npos) end = s.size();
+ string token(s, pos, end - pos);
+ result.insert(result.end(), token);
+ pos = s.find_first_not_of(separators, end);
+ }
+ return result;
+}
+
+template Strings tokenizeString(const string & s, const string & separators);
+template StringSet tokenizeString(const string & s, const string & separators);
+template vector<string> tokenizeString(const string & s, const string & separators);
+
+
+string concatStringsSep(const string & sep, const Strings & ss)
+{
+ string s;
+ for (auto & i : ss) {
+ if (s.size() != 0) s += sep;
+ s += i;
+ }
+ return s;
+}
+
+
+string concatStringsSep(const string & sep, const StringSet & ss)
+{
+ string s;
+ for (auto & i : ss) {
+ if (s.size() != 0) s += sep;
+ s += i;
+ }
+ return s;
+}
+
+
+string chomp(const string & s)
+{
+ size_t i = s.find_last_not_of(" \n\r\t");
+ return i == string::npos ? "" : string(s, 0, i + 1);
+}
+
+
+string trim(const string & s, const string & whitespace)
+{
+ auto i = s.find_first_not_of(whitespace);
+ if (i == string::npos) return "";
+ auto j = s.find_last_not_of(whitespace);
+ return string(s, i, j == string::npos ? j : j - i + 1);
+}
+
+
+string replaceStrings(const std::string & s,
+ const std::string & from, const std::string & to)
+{
+ if (from.empty()) return s;
+ string res = s;
+ size_t pos = 0;
+ while ((pos = res.find(from, pos)) != std::string::npos) {
+ res.replace(pos, from.size(), to);
+ pos += to.size();
+ }
+ return res;
+}
+
+
+string statusToString(int status)
+{
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ if (WIFEXITED(status))
+ return (format("failed with exit code %1%") % WEXITSTATUS(status)).str();
+ else if (WIFSIGNALED(status)) {
+ int sig = WTERMSIG(status);
+#if HAVE_STRSIGNAL
+ const char * description = strsignal(sig);
+ return (format("failed due to signal %1% (%2%)") % sig % description).str();
+#else
+ return (format("failed due to signal %1%") % sig).str();
+#endif
+ }
+ else
+ return "died abnormally";
+ } else return "succeeded";
+}
+
+
+bool statusOk(int status)
+{
+ return WIFEXITED(status) && WEXITSTATUS(status) == 0;
+}
+
+
+bool hasPrefix(const string & s, const string & suffix)
+{
+ return s.compare(0, suffix.size(), suffix) == 0;
+}
+
+
+bool hasSuffix(const string & s, const string & suffix)
+{
+ return s.size() >= suffix.size() && string(s, s.size() - suffix.size()) == suffix;
+}
+
+
+std::string toLower(const std::string & s)
+{
+ std::string r(s);
+ for (auto & c : r)
+ c = std::tolower(c);
+ return r;
+}
+
+
+string decodeOctalEscaped(const string & s)
+{
+ string r;
+ for (string::const_iterator i = s.begin(); i != s.end(); ) {
+ if (*i != '\\') { r += *i++; continue; }
+ unsigned char c = 0;
+ ++i;
+ while (i != s.end() && *i >= '0' && *i < '8')
+ c = c * 8 + (*i++ - '0');
+ r += c;
+ }
+ return r;
+}
+
+
+void ignoreException()
+{
+ try {
+ throw;
+ } catch (std::exception & e) {
+ printError(format("error (ignored): %1%") % e.what());
+ }
+}
+
+
+string filterANSIEscapes(const string & s, bool nixOnly)
+{
+ string t, r;
+ enum { stTop, stEscape, stCSI } state = stTop;
+ for (auto c : s) {
+ if (state == stTop) {
+ if (c == '\e') {
+ state = stEscape;
+ r = c;
+ } else
+ t += c;
+ } else if (state == stEscape) {
+ r += c;
+ if (c == '[')
+ state = stCSI;
+ else {
+ t += r;
+ state = stTop;
+ }
+ } else {
+ r += c;
+ if (c >= 0x40 && c != 0x7e) {
+ if (nixOnly && (c != 'p' && c != 'q' && c != 's' && c != 'a' && c != 'b'))
+ t += r;
+ state = stTop;
+ r.clear();
+ }
+ }
+ }
+ t += r;
+ return t;
+}
+
+
+static char base64Chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+
+string base64Encode(const string & s)
+{
+ string res;
+ int data = 0, nbits = 0;
+
+ for (char c : s) {
+ data = data << 8 | (unsigned char) c;
+ nbits += 8;
+ while (nbits >= 6) {
+ nbits -= 6;
+ res.push_back(base64Chars[data >> nbits & 0x3f]);
+ }
+ }
+
+ if (nbits) res.push_back(base64Chars[data << (6 - nbits) & 0x3f]);
+ while (res.size() % 4) res.push_back('=');
+
+ return res;
+}
+
+
+string base64Decode(const string & s)
+{
+ bool init = false;
+ char decode[256];
+ if (!init) {
+ // FIXME: not thread-safe.
+ memset(decode, -1, sizeof(decode));
+ for (int i = 0; i < 64; i++)
+ decode[(int) base64Chars[i]] = i;
+ init = true;
+ }
+
+ string res;
+ unsigned int d = 0, bits = 0;
+
+ for (char c : s) {
+ if (c == '=') break;
+ if (c == '\n') continue;
+
+ char digit = decode[(unsigned char) c];
+ if (digit == -1)
+ throw Error("invalid character in Base64 string");
+
+ bits += 6;
+ d = d << 6 | digit;
+ if (bits >= 8) {
+ res.push_back(d >> (bits - 8) & 0xff);
+ bits -= 8;
+ }
+ }
+
+ return res;
+}
+
+
+void callFailure(const std::function<void(std::exception_ptr exc)> & failure, std::exception_ptr exc)
+{
+ try {
+ failure(exc);
+ } catch (std::exception & e) {
+ printError(format("uncaught exception: %s") % e.what());
+ abort();
+ }
+}
+
+
+static Sync<std::list<std::function<void()>>> _interruptCallbacks;
+
+static void signalHandlerThread(sigset_t set)
+{
+ while (true) {
+ int signal = 0;
+ sigwait(&set, &signal);
+
+ if (signal == SIGINT || signal == SIGTERM || signal == SIGHUP)
+ triggerInterrupt();
+ }
+}
+
+void triggerInterrupt()
+{
+ _isInterrupted = true;
+
+ {
+ auto interruptCallbacks(_interruptCallbacks.lock());
+ for (auto & callback : *interruptCallbacks) {
+ try {
+ callback();
+ } catch (...) {
+ ignoreException();
+ }
+ }
+ }
+}
+
+static sigset_t savedSignalMask;
+
+void startSignalHandlerThread()
+{
+ if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask))
+ throw SysError("quering signal mask");
+
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGINT);
+ sigaddset(&set, SIGTERM);
+ sigaddset(&set, SIGHUP);
+ sigaddset(&set, SIGPIPE);
+ if (pthread_sigmask(SIG_BLOCK, &set, nullptr))
+ throw SysError("blocking signals");
+
+ std::thread(signalHandlerThread, set).detach();
+}
+
+void restoreSignals()
+{
+ if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr))
+ throw SysError("restoring signals");
+}
+
+/* RAII helper to automatically deregister a callback. */
+struct InterruptCallbackImpl : InterruptCallback
+{
+ std::list<std::function<void()>>::iterator it;
+ ~InterruptCallbackImpl() override
+ {
+ _interruptCallbacks.lock()->erase(it);
+ }
+};
+
+std::unique_ptr<InterruptCallback> createInterruptCallback(std::function<void()> callback)
+{
+ auto interruptCallbacks(_interruptCallbacks.lock());
+ interruptCallbacks->push_back(callback);
+
+ auto res = std::make_unique<InterruptCallbackImpl>();
+ res->it = interruptCallbacks->end();
+ res->it--;
+
+ return std::unique_ptr<InterruptCallback>(res.release());
+}
+
+}
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
new file mode 100644
index 000000000..ae40dcd4c
--- /dev/null
+++ b/src/libutil/util.hh
@@ -0,0 +1,457 @@
+#pragma once
+
+#include "types.hh"
+#include "logging.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <functional>
+#include <limits>
+#include <cstdio>
+#include <map>
+#include <sstream>
+#include <experimental/optional>
+
+#ifndef HAVE_STRUCT_DIRENT_D_TYPE
+#define DT_UNKNOWN 0
+#define DT_REG 1
+#define DT_LNK 2
+#define DT_DIR 3
+#endif
+
+namespace nix {
+
+
+/* Return an environment variable. */
+string getEnv(const string & key, const string & def = "");
+
+/* Get the entire environment. */
+std::map<std::string, std::string> getEnv();
+
+/* Return an absolutized path, resolving paths relative to the
+ specified directory, or the current directory otherwise. The path
+ is also canonicalised. */
+Path absPath(Path path, Path dir = "");
+
+/* Canonicalise a path by removing all `.' or `..' components and
+ double or trailing slashes. Optionally resolves all symlink
+ components such that each component of the resulting path is *not*
+ a symbolic link. */
+Path canonPath(const Path & path, bool resolveSymlinks = false);
+
+/* Return the directory part of the given canonical path, i.e.,
+ everything before the final `/'. If the path is the root or an
+ immediate child thereof (e.g., `/foo'), this means an empty string
+ is returned. */
+Path dirOf(const Path & path);
+
+/* Return the base name of the given canonical path, i.e., everything
+ following the final `/'. */
+string baseNameOf(const Path & path);
+
+/* Check whether a given path is a descendant of the given
+ directory. */
+bool isInDir(const Path & path, const Path & dir);
+
+/* Get status of `path'. */
+struct stat lstat(const Path & path);
+
+/* Return true iff the given path exists. */
+bool pathExists(const Path & path);
+
+/* Read the contents (target) of a symbolic link. The result is not
+ in any way canonicalised. */
+Path readLink(const Path & path);
+
+bool isLink(const Path & path);
+
+/* Read the contents of a directory. The entries `.' and `..' are
+ removed. */
+struct DirEntry
+{
+ string name;
+ ino_t ino;
+ unsigned char type; // one of DT_*
+ DirEntry(const string & name, ino_t ino, unsigned char type)
+ : name(name), ino(ino), type(type) { }
+};
+
+typedef vector<DirEntry> DirEntries;
+
+DirEntries readDirectory(const Path & path);
+
+unsigned char getFileType(const Path & path);
+
+/* Read the contents of a file into a string. */
+string readFile(int fd);
+string readFile(const Path & path, bool drain = false);
+
+/* Write a string to a file. */
+void writeFile(const Path & path, const string & s, mode_t mode = 0666);
+
+/* Read a line from a file descriptor. */
+string readLine(int fd);
+
+/* Write a line to a file descriptor. */
+void writeLine(int fd, string s);
+
+/* Delete a path; i.e., in the case of a directory, it is deleted
+ recursively. It's not an error if the path does not exist. The
+ second variant returns the number of bytes and blocks freed. */
+void deletePath(const Path & path);
+
+void deletePath(const Path & path, unsigned long long & bytesFreed);
+
+/* Create a temporary directory. */
+Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix",
+ bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755);
+
+/* Return $XDG_CACHE_HOME or $HOME/.cache. */
+Path getCacheDir();
+
+/* Return $XDG_CONFIG_HOME or $HOME/.config. */
+Path getConfigDir();
+
+/* Create a directory and all its parents, if necessary. Returns the
+ list of created directories, in order of creation. */
+Paths createDirs(const Path & path);
+
+/* Create a symlink. */
+void createSymlink(const Path & target, const Path & link);
+
+/* Atomically create or replace a symlink. */
+void replaceSymlink(const Path & target, const Path & link);
+
+
+/* Wrappers arount read()/write() that read/write exactly the
+ requested number of bytes. */
+void readFull(int fd, unsigned char * buf, size_t count);
+void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterrupts = true);
+void writeFull(int fd, const string & s, bool allowInterrupts = true);
+
+MakeError(EndOfFile, Error)
+
+
+/* Read a file descriptor until EOF occurs. */
+string drainFD(int fd);
+
+
+
+/* Automatic cleanup of resources. */
+
+
+class AutoDelete
+{
+ Path path;
+ bool del;
+ bool recursive;
+public:
+ AutoDelete();
+ AutoDelete(const Path & p, bool recursive = true);
+ ~AutoDelete();
+ void cancel();
+ void reset(const Path & p, bool recursive = true);
+ operator Path() const { return path; }
+};
+
+
+class AutoCloseFD
+{
+ int fd;
+ void close();
+public:
+ AutoCloseFD();
+ AutoCloseFD(int fd);
+ AutoCloseFD(const AutoCloseFD & fd) = delete;
+ AutoCloseFD(AutoCloseFD&& fd);
+ ~AutoCloseFD();
+ AutoCloseFD& operator =(const AutoCloseFD & fd) = delete;
+ AutoCloseFD& operator =(AutoCloseFD&& fd);
+ int get() const;
+ explicit operator bool() const;
+ int release();
+};
+
+
+class Pipe
+{
+public:
+ AutoCloseFD readSide, writeSide;
+ void create();
+};
+
+
+struct DIRDeleter
+{
+ void operator()(DIR * dir) const {
+ closedir(dir);
+ }
+};
+
+typedef std::unique_ptr<DIR, DIRDeleter> AutoCloseDir;
+
+
+class Pid
+{
+ pid_t pid = -1;
+ bool separatePG = false;
+ int killSignal = SIGKILL;
+public:
+ Pid();
+ Pid(pid_t pid);
+ ~Pid();
+ void operator =(pid_t pid);
+ operator pid_t();
+ int kill();
+ int wait();
+
+ void setSeparatePG(bool separatePG);
+ void setKillSignal(int signal);
+ pid_t release();
+};
+
+
+/* Kill all processes running under the specified uid by sending them
+ a SIGKILL. */
+void killUser(uid_t uid);
+
+
+/* Fork a process that runs the given function, and return the child
+ pid to the caller. */
+struct ProcessOptions
+{
+ string errorPrefix = "error: ";
+ bool dieWithParent = true;
+ bool runExitHandlers = false;
+ bool allowVfork = true;
+};
+
+pid_t startProcess(std::function<void()> fun, const ProcessOptions & options = ProcessOptions());
+
+
+/* Run a program and return its stdout in a string (i.e., like the
+ shell backtick operator). */
+string runProgram(Path program, bool searchPath = false,
+ const Strings & args = Strings(),
+ const std::experimental::optional<std::string> & input = {});
+
+class ExecError : public Error
+{
+public:
+ int status;
+
+ template<typename... Args>
+ ExecError(int status, Args... args)
+ : Error(args...), status(status)
+ { }
+};
+
+/* Convert a list of strings to a null-terminated vector of char
+ *'s. The result must not be accessed beyond the lifetime of the
+ list of strings. */
+std::vector<char *> stringsToCharPtrs(const Strings & ss);
+
+/* Close all file descriptors except stdin, stdout, stderr, and those
+ listed in the given set. Good practice in child processes. */
+void closeMostFDs(const set<int> & exceptions);
+
+/* Set the close-on-exec flag for the given file descriptor. */
+void closeOnExec(int fd);
+
+
+/* User interruption. */
+
+extern bool _isInterrupted;
+
+void setInterruptThrown();
+
+void _interrupted();
+
+void inline checkInterrupt()
+{
+ if (_isInterrupted) _interrupted();
+}
+
+MakeError(Interrupted, BaseError)
+
+
+MakeError(FormatError, Error)
+
+
+/* String tokenizer. */
+template<class C> C tokenizeString(const string & s, const string & separators = " \t\n\r");
+
+
+/* Concatenate the given strings with a separator between the
+ elements. */
+string concatStringsSep(const string & sep, const Strings & ss);
+string concatStringsSep(const string & sep, const StringSet & ss);
+
+
+/* Remove trailing whitespace from a string. */
+string chomp(const string & s);
+
+
+/* Remove whitespace from the start and end of a string. */
+string trim(const string & s, const string & whitespace = " \n\r\t");
+
+
+/* Replace all occurrences of a string inside another string. */
+string replaceStrings(const std::string & s,
+ const std::string & from, const std::string & to);
+
+
+/* Convert the exit status of a child as returned by wait() into an
+ error string. */
+string statusToString(int status);
+
+bool statusOk(int status);
+
+
+/* Parse a string into an integer. */
+template<class N> bool string2Int(const string & s, N & n)
+{
+ if (string(s, 0, 1) == "-" && !std::numeric_limits<N>::is_signed)
+ return false;
+ std::istringstream str(s);
+ str >> n;
+ return str && str.get() == EOF;
+}
+
+/* Parse a string into a float. */
+template<class N> bool string2Float(const string & s, N & n)
+{
+ std::istringstream str(s);
+ str >> n;
+ return str && str.get() == EOF;
+}
+
+
+/* Return true iff `s' starts with `prefix'. */
+bool hasPrefix(const string & s, const string & prefix);
+
+
+/* Return true iff `s' ends in `suffix'. */
+bool hasSuffix(const string & s, const string & suffix);
+
+
+/* Convert a string to lower case. */
+std::string toLower(const std::string & s);
+
+
+/* Escape a string that contains octal-encoded escape codes such as
+ used in /etc/fstab and /proc/mounts (e.g. "foo\040bar" decodes to
+ "foo bar"). */
+string decodeOctalEscaped(const string & s);
+
+
+/* Exception handling in destructors: print an error message, then
+ ignore the exception. */
+void ignoreException();
+
+
+/* Some ANSI escape sequences. */
+#define ANSI_NORMAL "\e[0m"
+#define ANSI_BOLD "\e[1m"
+#define ANSI_RED "\e[31;1m"
+
+
+/* Filter out ANSI escape codes from the given string. If ‘nixOnly’ is
+ set, only filter escape codes generated by Nixpkgs' stdenv (used to
+ denote nesting etc.). */
+string filterANSIEscapes(const string & s, bool nixOnly = false);
+
+
+/* Base64 encoding/decoding. */
+string base64Encode(const string & s);
+string base64Decode(const string & s);
+
+
+/* Get a value for the specified key from an associate container, or a
+ default value if the key doesn't exist. */
+template <class T>
+string get(const T & map, const string & key, const string & def = "")
+{
+ auto i = map.find(key);
+ return i == map.end() ? def : i->second;
+}
+
+
+/* Call ‘failure’ with the current exception as argument. If ‘failure’
+ throws an exception, abort the program. */
+void callFailure(const std::function<void(std::exception_ptr exc)> & failure,
+ std::exception_ptr exc = std::current_exception());
+
+
+/* Evaluate the function ‘f’. If it returns a value, call ‘success’
+ with that value as its argument. If it or ‘success’ throws an
+ exception, call ‘failure’. If ‘failure’ throws an exception, abort
+ the program. */
+template<class T>
+void sync2async(
+ const std::function<void(T)> & success,
+ const std::function<void(std::exception_ptr exc)> & failure,
+ const std::function<T()> & f)
+{
+ try {
+ success(f());
+ } catch (...) {
+ callFailure(failure);
+ }
+}
+
+
+/* Call the function ‘success’. If it throws an exception, call
+ ‘failure’. If that throws an exception, abort the program. */
+template<class T>
+void callSuccess(
+ const std::function<void(T)> & success,
+ const std::function<void(std::exception_ptr exc)> & failure,
+ T && arg)
+{
+ try {
+ success(arg);
+ } catch (...) {
+ callFailure(failure);
+ }
+}
+
+
+/* Start a thread that handles various signals. Also block those signals
+ on the current thread (and thus any threads created by it). */
+void startSignalHandlerThread();
+
+/* Restore default signal handling. */
+void restoreSignals();
+
+struct InterruptCallback
+{
+ virtual ~InterruptCallback() { };
+};
+
+/* Register a function that gets called on SIGINT (in a non-signal
+ context). */
+std::unique_ptr<InterruptCallback> createInterruptCallback(
+ std::function<void()> callback);
+
+void triggerInterrupt();
+
+/* A RAII class that causes the current thread to receive SIGUSR1 when
+ the signal handler thread receives SIGINT. That is, this allows
+ SIGINT to be multiplexed to multiple threads. */
+struct ReceiveInterrupts
+{
+ pthread_t target;
+ std::unique_ptr<InterruptCallback> callback;
+
+ ReceiveInterrupts()
+ : target(pthread_self())
+ , callback(createInterruptCallback([&]() { pthread_kill(target, SIGUSR1); }))
+ { }
+};
+
+}
diff --git a/src/libutil/xml-writer.cc b/src/libutil/xml-writer.cc
new file mode 100644
index 000000000..98bd058d1
--- /dev/null
+++ b/src/libutil/xml-writer.cc
@@ -0,0 +1,94 @@
+#include <assert.h>
+
+#include "xml-writer.hh"
+
+
+namespace nix {
+
+
+XMLWriter::XMLWriter(bool indent, std::ostream & output)
+ : output(output), indent(indent)
+{
+ output << "<?xml version='1.0' encoding='utf-8'?>" << std::endl;
+ closed = false;
+}
+
+
+XMLWriter::~XMLWriter()
+{
+ close();
+}
+
+
+void XMLWriter::close()
+{
+ if (closed) return;
+ while (!pendingElems.empty()) closeElement();
+ closed = true;
+}
+
+
+void XMLWriter::indent_(unsigned int depth)
+{
+ if (!indent) return;
+ output << string(depth * 2, ' ');
+}
+
+
+void XMLWriter::openElement(const string & name,
+ const XMLAttrs & attrs)
+{
+ assert(!closed);
+ indent_(pendingElems.size());
+ output << "<" << name;
+ writeAttrs(attrs);
+ output << ">";
+ if (indent) output << std::endl;
+ pendingElems.push_back(name);
+}
+
+
+void XMLWriter::closeElement()
+{
+ assert(!pendingElems.empty());
+ indent_(pendingElems.size() - 1);
+ output << "</" << pendingElems.back() << ">";
+ if (indent) output << std::endl;
+ pendingElems.pop_back();
+ if (pendingElems.empty()) closed = true;
+}
+
+
+void XMLWriter::writeEmptyElement(const string & name,
+ const XMLAttrs & attrs)
+{
+ assert(!closed);
+ indent_(pendingElems.size());
+ output << "<" << name;
+ writeAttrs(attrs);
+ output << " />";
+ if (indent) output << std::endl;
+}
+
+
+void XMLWriter::writeAttrs(const XMLAttrs & attrs)
+{
+ for (auto & i : attrs) {
+ output << " " << i.first << "=\"";
+ for (unsigned int j = 0; j < i.second.size(); ++j) {
+ char c = i.second[j];
+ if (c == '"') output << "&quot;";
+ else if (c == '<') output << "&lt;";
+ else if (c == '>') output << "&gt;";
+ else if (c == '&') output << "&amp;";
+ /* Escape newlines to prevent attribute normalisation (see
+ XML spec, section 3.3.3. */
+ else if (c == '\n') output << "&#xA;";
+ else output << c;
+ }
+ output << "\"";
+ }
+}
+
+
+}
diff --git a/src/libutil/xml-writer.hh b/src/libutil/xml-writer.hh
new file mode 100644
index 000000000..3cefe3712
--- /dev/null
+++ b/src/libutil/xml-writer.hh
@@ -0,0 +1,69 @@
+#pragma once
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <map>
+
+
+namespace nix {
+
+using std::string;
+using std::map;
+using std::list;
+
+
+typedef map<string, string> XMLAttrs;
+
+
+class XMLWriter
+{
+private:
+
+ std::ostream & output;
+
+ bool indent;
+ bool closed;
+
+ list<string> pendingElems;
+
+public:
+
+ XMLWriter(bool indent, std::ostream & output);
+ ~XMLWriter();
+
+ void close();
+
+ void openElement(const string & name,
+ const XMLAttrs & attrs = XMLAttrs());
+ void closeElement();
+
+ void writeEmptyElement(const string & name,
+ const XMLAttrs & attrs = XMLAttrs());
+
+private:
+ void writeAttrs(const XMLAttrs & attrs);
+
+ void indent_(unsigned int depth);
+};
+
+
+class XMLOpenElement
+{
+private:
+ XMLWriter & writer;
+public:
+ XMLOpenElement(XMLWriter & writer, const string & name,
+ const XMLAttrs & attrs = XMLAttrs())
+ : writer(writer)
+ {
+ writer.openElement(name, attrs);
+ }
+ ~XMLOpenElement()
+ {
+ writer.closeElement();
+ }
+};
+
+
+}
diff --git a/src/nix-build/local.mk b/src/nix-build/local.mk
new file mode 100644
index 000000000..91532411a
--- /dev/null
+++ b/src/nix-build/local.mk
@@ -0,0 +1,9 @@
+programs += nix-build
+
+nix-build_DIR := $(d)
+
+nix-build_SOURCES := $(d)/nix-build.cc
+
+nix-build_LIBS = libmain libstore libutil libformat
+
+$(eval $(call install-symlink, nix-build, $(bindir)/nix-shell))
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
new file mode 100755
index 000000000..bb031d515
--- /dev/null
+++ b/src/nix-build/nix-build.cc
@@ -0,0 +1,515 @@
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <regex>
+#include <sstream>
+#include <vector>
+
+#include <unistd.h>
+
+#include "store-api.hh"
+#include "globals.hh"
+#include "derivations.hh"
+#include "affinity.hh"
+#include "util.hh"
+#include "shared.hh"
+
+using namespace nix;
+
+extern char * * environ;
+
+/* Recreate the effect of the perl shellwords function, breaking up a
+ * string into arguments like a shell word, including escapes
+ */
+std::vector<string> shellwords(const string & s)
+{
+ std::regex whitespace("^(\\s+).*");
+ auto begin = s.cbegin();
+ std::vector<string> res;
+ std::string cur;
+ enum state {
+ sBegin,
+ sQuote
+ };
+ state st = sBegin;
+ auto it = begin;
+ for (; it != s.cend(); ++it) {
+ if (st == sBegin) {
+ std::smatch match;
+ if (regex_search(it, s.cend(), match, whitespace)) {
+ cur.append(begin, it);
+ res.push_back(cur);
+ cur.clear();
+ it = match[1].second;
+ begin = it;
+ }
+ }
+ switch (*it) {
+ case '"':
+ cur.append(begin, it);
+ begin = it + 1;
+ st = st == sBegin ? sQuote : sBegin;
+ break;
+ case '\\':
+ /* perl shellwords mostly just treats the next char as part of the string with no special processing */
+ cur.append(begin, it);
+ begin = ++it;
+ break;
+ }
+ }
+ cur.append(begin, it);
+ if (!cur.empty()) res.push_back(cur);
+ return res;
+}
+
+static void maybePrintExecError(ExecError & e)
+{
+ if (WIFEXITED(e.status))
+ throw Exit(WEXITSTATUS(e.status));
+ else
+ throw e;
+}
+
+int main(int argc, char ** argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+ auto store = openStore();
+ auto dryRun = false;
+ auto verbose = false;
+ auto runEnv = std::regex_search(argv[0], std::regex("nix-shell$"));
+ auto pure = false;
+ auto fromArgs = false;
+ auto packages = false;
+ // Same condition as bash uses for interactive shells
+ auto interactive = isatty(STDIN_FILENO) && isatty(STDERR_FILENO);
+
+ Strings instArgs;
+ Strings buildArgs;
+ Strings exprs;
+
+ auto shell = getEnv("SHELL", "/bin/sh");
+ std::string envCommand; // interactive shell
+ Strings envExclude;
+
+ auto myName = runEnv ? "nix-shell" : "nix-build";
+
+ auto inShebang = false;
+ std::string script;
+ std::vector<string> savedArgs;
+
+ AutoDelete tmpDir(createTempDir("", myName));
+
+ std::string outLink = "./result";
+ auto drvLink = (Path) tmpDir + "/derivation";
+
+ std::vector<string> args;
+ for (int i = 1; i < argc; ++i)
+ args.push_back(argv[i]);
+
+ // Heuristic to see if we're invoked as a shebang script, namely, if we
+ // have a single argument, it's the name of an executable file, and it
+ // starts with "#!".
+ if (runEnv && argc > 1 && !std::regex_search(argv[1], std::regex("nix-shell"))) {
+ script = argv[1];
+ if (access(script.c_str(), F_OK) == 0 && access(script.c_str(), X_OK) == 0) {
+ auto lines = tokenizeString<Strings>(readFile(script), "\n");
+ if (std::regex_search(lines.front(), std::regex("^#!"))) {
+ lines.pop_front();
+ inShebang = true;
+ for (int i = 2; i < argc; ++i)
+ savedArgs.push_back(argv[i]);
+ args.clear();
+ for (auto line : lines) {
+ line = chomp(line);
+ std::smatch match;
+ if (std::regex_match(line, match, std::regex("^#!\\s*nix-shell (.*)$")))
+ for (const auto & word : shellwords(match[1].str()))
+ args.push_back(word);
+ }
+ }
+ }
+ }
+
+ for (size_t n = 0; n < args.size(); ++n) {
+ auto arg = args[n];
+
+ if (arg == "--help") {
+ deletePath(tmpDir);
+ showManPage(myName);
+ }
+
+ else if (arg == "--version")
+ printVersion(myName);
+
+ else if (arg == "--add-drv-link") {
+ drvLink = "./derivation";
+ }
+
+ else if (arg == "--no-out-link" || arg == "--no-link") {
+ outLink = (Path) tmpDir + "/result";
+ }
+
+ else if (arg == "--drv-link") {
+ n++;
+ if (n >= args.size()) {
+ throw UsageError("--drv-link requires an argument");
+ }
+ drvLink = args[n];
+ }
+
+ else if (arg == "--out-link" || arg == "-o") {
+ n++;
+ if (n >= args.size()) {
+ throw UsageError(format("%1% requires an argument") % arg);
+ }
+ outLink = args[n];
+ }
+
+ else if (arg == "--attr" || arg == "-A" || arg == "-I") {
+ n++;
+ if (n >= args.size()) {
+ throw UsageError(format("%1% requires an argument") % arg);
+ }
+ instArgs.push_back(arg);
+ instArgs.push_back(args[n]);
+ }
+
+ else if (arg == "--arg" || arg == "--argstr") {
+ if (n + 2 >= args.size()) {
+ throw UsageError(format("%1% requires two arguments") % arg);
+ }
+ instArgs.push_back(arg);
+ instArgs.push_back(args[n + 1]);
+ instArgs.push_back(args[n + 2]);
+ n += 2;
+ }
+
+ else if (arg == "--option") {
+ if (n + 2 >= args.size()) {
+ throw UsageError(format("%1% requires two arguments") % arg);
+ }
+ instArgs.push_back(arg);
+ instArgs.push_back(args[n + 1]);
+ instArgs.push_back(args[n + 2]);
+ buildArgs.push_back(arg);
+ buildArgs.push_back(args[n + 1]);
+ buildArgs.push_back(args[n + 2]);
+ n += 2;
+ }
+
+ else if (arg == "--max-jobs" || arg == "-j" || arg == "--max-silent-time" || arg == "--cores" || arg == "--timeout" || arg == "--add-root") {
+ n++;
+ if (n >= args.size()) {
+ throw UsageError(format("%1% requires an argument") % arg);
+ }
+ buildArgs.push_back(arg);
+ buildArgs.push_back(args[n]);
+ }
+
+ else if (arg == "--dry-run") {
+ buildArgs.push_back("--dry-run");
+ dryRun = true;
+ }
+
+ else if (arg == "--show-trace") {
+ instArgs.push_back(arg);
+ }
+
+ else if (arg == "-") {
+ exprs = Strings{"-"};
+ }
+
+ else if (arg == "--verbose" || (arg.size() >= 2 && arg.substr(0, 2) == "-v")) {
+ buildArgs.push_back(arg);
+ instArgs.push_back(arg);
+ verbose = true;
+ }
+
+ else if (arg == "--quiet" || arg == "--repair") {
+ buildArgs.push_back(arg);
+ instArgs.push_back(arg);
+ }
+
+ else if (arg == "--check") {
+ buildArgs.push_back(arg);
+ }
+
+ else if (arg == "--run-env") { // obsolete
+ runEnv = true;
+ }
+
+ else if (arg == "--command" || arg == "--run") {
+ n++;
+ if (n >= args.size()) {
+ throw UsageError(format("%1% requires an argument") % arg);
+ }
+ envCommand = args[n] + "\nexit";
+ if (arg == "--run")
+ interactive = false;
+ }
+
+ else if (arg == "--exclude") {
+ n++;
+ if (n >= args.size()) {
+ throw UsageError(format("%1% requires an argument") % arg);
+ }
+ envExclude.push_back(args[n]);
+ }
+
+ else if (arg == "--pure") { pure = true; }
+ else if (arg == "--impure") { pure = false; }
+
+ else if (arg == "--expr" || arg == "-E") {
+ fromArgs = true;
+ instArgs.push_back("--expr");
+ }
+
+ else if (arg == "--packages" || arg == "-p") {
+ packages = true;
+ }
+
+ else if (inShebang && arg == "-i") {
+ n++;
+ if (n >= args.size()) {
+ throw UsageError(format("%1% requires an argument") % arg);
+ }
+ interactive = false;
+ auto interpreter = args[n];
+ auto execArgs = "";
+
+ auto shellEscape = [](const string & s) {
+ return "'" + std::regex_replace(s, std::regex("'"), "'\\''") + "'";
+ };
+
+ // Ãœberhack to support Perl. Perl examines the shebang and
+ // executes it unless it contains the string "perl" or "indir",
+ // or (undocumented) argv[0] does not contain "perl". Exploit
+ // the latter by doing "exec -a".
+ if (std::regex_search(interpreter, std::regex("perl")))
+ execArgs = "-a PERL";
+
+ std::ostringstream joined;
+ for (const auto & i : savedArgs)
+ joined << shellEscape(i) << ' ';
+
+ if (std::regex_search(interpreter, std::regex("ruby"))) {
+ // Hack for Ruby. Ruby also examines the shebang. It tries to
+ // read the shebang to understand which packages to read from. Since
+ // this is handled via nix-shell -p, we wrap our ruby script execution
+ // in ruby -e 'load' which ignores the shebangs.
+ envCommand = (format("exec %1% %2% -e 'load(\"%3%\") -- %4%") % execArgs % interpreter % script % joined.str()).str();
+ } else {
+ envCommand = (format("exec %1% %2% %3% %4%") % execArgs % interpreter % script % joined.str()).str();
+ }
+ }
+
+ else if (!arg.empty() && arg[0] == '-') {
+ buildArgs.push_back(arg);
+ }
+
+ else if (arg == "-Q" || arg == "--no-build-output") {
+ buildArgs.push_back(arg);
+ instArgs.push_back(arg);
+ }
+
+ else {
+ exprs.push_back(arg);
+ }
+ }
+
+ if (packages && fromArgs) {
+ throw UsageError("‘-p’ and ‘-E’ are mutually exclusive");
+ }
+
+ if (packages) {
+ instArgs.push_back("--expr");
+ std::ostringstream joined;
+ joined << "with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) \"shell\" { buildInputs = [ ";
+ for (const auto & i : exprs)
+ joined << '(' << i << ") ";
+ joined << "]; } \"\"";
+ exprs = Strings{joined.str()};
+ } else if (!fromArgs) {
+ if (exprs.empty() && runEnv && access("shell.nix", F_OK) == 0)
+ exprs.push_back("shell.nix");
+ if (exprs.empty())
+ exprs.push_back("default.nix");
+ }
+
+ if (runEnv)
+ setenv("IN_NIX_SHELL", pure ? "pure" : "impure", 1);
+
+ for (auto & expr : exprs) {
+ // Instantiate.
+ std::vector<string> drvPaths;
+ if (!std::regex_match(expr, std::regex("^/.*\\.drv$"))) {
+ // If we're in a #! script, interpret filenames relative to the
+ // script.
+ if (inShebang && !packages)
+ expr = absPath(expr, dirOf(script));
+
+ Strings instantiateArgs{"--add-root", drvLink, "--indirect"};
+ for (const auto & arg : instArgs)
+ instantiateArgs.push_back(arg);
+ instantiateArgs.push_back(expr);
+ try {
+ auto instOutput = runProgram(settings.nixBinDir + "/nix-instantiate", false, instantiateArgs);
+ drvPaths = tokenizeString<std::vector<string>>(instOutput);
+ } catch (ExecError & e) {
+ maybePrintExecError(e);
+ }
+ } else {
+ drvPaths.push_back(expr);
+ }
+
+ if (runEnv) {
+ if (drvPaths.size() != 1)
+ throw UsageError("a single derivation is required");
+ auto drvPath = drvPaths[0];
+ drvPath = drvPath.substr(0, drvPath.find_first_of('!'));
+ if (isLink(drvPath))
+ drvPath = readLink(drvPath);
+ auto drv = store->derivationFromPath(drvPath);
+
+ // Build or fetch all dependencies of the derivation.
+ Strings nixStoreArgs{"-r", "--no-output", "--no-gc-warning"};
+ for (const auto & arg : buildArgs)
+ nixStoreArgs.push_back(arg);
+ for (const auto & input : drv.inputDrvs)
+ if (std::all_of(envExclude.cbegin(), envExclude.cend(), [&](const string & exclude) { return !std::regex_search(input.first, std::regex(exclude)); }))
+ nixStoreArgs.push_back(input.first);
+ for (const auto & src : drv.inputSrcs)
+ nixStoreArgs.push_back(src);
+
+ try {
+ runProgram(settings.nixBinDir + "/nix-store", false, nixStoreArgs);
+ } catch (ExecError & e) {
+ maybePrintExecError(e);
+ }
+
+ // Set the environment.
+ auto env = getEnv();
+
+ auto tmp = getEnv("TMPDIR", getEnv("XDG_RUNTIME_DIR", "/tmp"));
+
+ if (pure) {
+ std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"};
+ decltype(env) newEnv;
+ for (auto & i : env)
+ if (keepVars.count(i.first))
+ newEnv.emplace(i);
+ env = newEnv;
+ // NixOS hack: prevent /etc/bashrc from sourcing /etc/profile.
+ env["__ETC_PROFILE_SOURCED"] = "1";
+ }
+
+ env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmp;
+ env["NIX_STORE"] = store->storeDir;
+
+ for (auto & var : drv.env)
+ env[var.first] = var.second;
+
+ restoreAffinity();
+
+ // Run a shell using the derivation's environment. For
+ // convenience, source $stdenv/setup to setup additional
+ // environment variables and shell functions. Also don't lose
+ // the current $PATH directories.
+ auto rcfile = (Path) tmpDir + "/rc";
+ writeFile(rcfile, fmt(
+ "rm -rf '%1%'; "
+ "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc; "
+ "%2%"
+ "dontAddDisableDepTrack=1; "
+ "[ -e $stdenv/setup ] && source $stdenv/setup; "
+ "%3%"
+ "set +e; "
+ "[ -n \"$PS1\" ] && PS1=\"\\n\\[\\033[1;32m\\][nix-shell:\\w]$\\[\\033[0m\\] \"; "
+ "if [ \"$(type -t runHook)\" = function ]; then runHook shellHook; fi; "
+ "unset NIX_ENFORCE_PURITY; "
+ "unset NIX_INDENT_MAKE; "
+ "shopt -u nullglob; "
+ "unset TZ; %4%"
+ "%5%",
+ (Path) tmpDir,
+ (pure ? "" : "p=$PATH; "),
+ (pure ? "" : "PATH=$PATH:$p; unset p; "),
+ (getenv("TZ") ? (string("export TZ='") + getenv("TZ") + "'; ") : ""),
+ envCommand));
+
+ Strings envStrs;
+ for (auto & i : env)
+ envStrs.push_back(i.first + "=" + i.second);
+
+ auto args = interactive
+ ? Strings{"bash", "--rcfile", rcfile}
+ : Strings{"bash", rcfile};
+
+ auto envPtrs = stringsToCharPtrs(envStrs);
+
+ auto shell = getEnv("NIX_BUILD_SHELL", "bash");
+
+ environ = envPtrs.data();
+
+ auto argPtrs = stringsToCharPtrs(args);
+
+ restoreSignals();
+
+ execvp(shell.c_str(), argPtrs.data());
+
+ throw SysError("executing shell ‘%s’", shell);
+ }
+
+ // Ugly hackery to make "nix-build -A foo.all" produce symlinks
+ // ./result, ./result-dev, and so on, rather than ./result,
+ // ./result-2-dev, and so on. This combines multiple derivation
+ // paths into one "/nix/store/drv-path!out1,out2,..." argument.
+ std::string prevDrvPath;
+ Strings drvPaths2;
+ for (const auto & drvPath : drvPaths) {
+ auto p = drvPath;
+ std::string output = "out";
+ std::smatch match;
+ if (std::regex_match(drvPath, match, std::regex("(.*)!(.*)"))) {
+ p = match[1].str();
+ output = match[2].str();
+ }
+ auto target = readLink(p);
+ if (verbose)
+ std::cerr << "derivation is " << target << '\n';
+ if (target == prevDrvPath) {
+ auto last = drvPaths2.back();
+ drvPaths2.pop_back();
+ drvPaths2.push_back(last + "," + output);
+ } else {
+ drvPaths2.push_back(target + "!" + output);
+ prevDrvPath = target;
+ }
+ }
+ // Build.
+ Strings outPaths;
+ Strings nixStoreArgs{"--add-root", outLink, "--indirect", "-r"};
+ for (const auto & arg : buildArgs)
+ nixStoreArgs.push_back(arg);
+ for (const auto & path : drvPaths2)
+ nixStoreArgs.push_back(path);
+
+ std::string nixStoreRes;
+ try {
+ nixStoreRes = runProgram(settings.nixBinDir + "/nix-store", false, nixStoreArgs);
+ } catch (ExecError & e) {
+ maybePrintExecError(e);
+ }
+
+ for (const auto & outpath : tokenizeString<std::vector<string>>(nixStoreRes))
+ outPaths.push_back(chomp(outpath));
+
+ if (dryRun)
+ continue;
+
+ for (const auto & outPath : outPaths)
+ std::cout << readLink(outPath) << '\n';
+ }
+ });
+}
diff --git a/src/nix-channel/local.mk b/src/nix-channel/local.mk
new file mode 100644
index 000000000..49fc105c6
--- /dev/null
+++ b/src/nix-channel/local.mk
@@ -0,0 +1,7 @@
+programs += nix-channel
+
+nix-channel_DIR := $(d)
+
+nix-channel_LIBS = libmain libutil libformat libstore
+
+nix-channel_SOURCES := $(d)/nix-channel.cc
diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc
new file mode 100755
index 000000000..0f50f6242
--- /dev/null
+++ b/src/nix-channel/nix-channel.cc
@@ -0,0 +1,267 @@
+#include "shared.hh"
+#include "globals.hh"
+#include "download.hh"
+#include <fcntl.h>
+#include <regex>
+#include "store-api.hh"
+#include <pwd.h>
+
+using namespace nix;
+
+typedef std::map<string,string> Channels;
+
+static auto channels = Channels{};
+static auto channelsList = Path{};
+
+// Reads the list of channels.
+static void readChannels()
+{
+ if (!pathExists(channelsList)) return;
+ auto channelsFile = readFile(channelsList);
+
+ for (const auto & line : tokenizeString<std::vector<string>>(channelsFile, "\n")) {
+ chomp(line);
+ if (std::regex_search(line, std::regex("^\\s*\\#")))
+ continue;
+ auto split = tokenizeString<std::vector<string>>(line, " ");
+ auto url = std::regex_replace(split[0], std::regex("/*$"), "");
+ auto name = split.size() > 1 ? split[1] : baseNameOf(url);
+ channels[name] = url;
+ }
+}
+
+// Writes the list of channels.
+static void writeChannels()
+{
+ auto channelsFD = AutoCloseFD{open(channelsList.c_str(), O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC, 0644)};
+ if (!channelsFD)
+ throw SysError(format("opening ‘%1%’ for writing") % channelsList);
+ for (const auto & channel : channels)
+ writeFull(channelsFD.get(), channel.second + " " + channel.first + "\n");
+}
+
+// Adds a channel.
+static void addChannel(const string & url, const string & name)
+{
+ if (!regex_search(url, std::regex("^(file|http|https)://")))
+ throw Error(format("invalid channel URL ‘%1%’") % url);
+ if (!regex_search(name, std::regex("^[a-zA-Z0-9_][a-zA-Z0-9_\\.-]*$")))
+ throw Error(format("invalid channel identifier ‘%1%’") % name);
+ readChannels();
+ channels[name] = url;
+ writeChannels();
+}
+
+static auto profile = Path{};
+
+// Remove a channel.
+static void removeChannel(const string & name)
+{
+ readChannels();
+ channels.erase(name);
+ writeChannels();
+
+ runProgram(settings.nixBinDir + "/nix-env", true, { "--profile", profile, "--uninstall", name });
+}
+
+static auto nixDefExpr = Path{};
+
+// Fetch Nix expressions and binary cache URLs from the subscribed channels.
+static void update(const StringSet & channelNames)
+{
+ readChannels();
+
+ auto store = openStore();
+
+ // Download each channel.
+ auto exprs = Strings{};
+ for (const auto & channel : channels) {
+ auto name = channel.first;
+ auto url = channel.second;
+ if (!(channelNames.empty() || channelNames.count(name)))
+ continue;
+
+ // We want to download the url to a file to see if it's a tarball while also checking if we
+ // got redirected in the process, so that we can grab the various parts of a nix channel
+ // definition from a consistent location if the redirect changes mid-download.
+ auto effectiveUrl = string{};
+ auto dl = getDownloader();
+ auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl);
+ url = chomp(std::move(effectiveUrl));
+
+ // If the URL contains a version number, append it to the name
+ // attribute (so that "nix-env -q" on the channels profile
+ // shows something useful).
+ auto cname = name;
+ std::smatch match;
+ auto urlBase = baseNameOf(url);
+ if (std::regex_search(urlBase, match, std::regex("(-\\d.*)$"))) {
+ cname = cname + (string) match[1];
+ }
+
+ auto extraAttrs = string{};
+
+ auto unpacked = false;
+ if (std::regex_search(filename, std::regex("\\.tar\\.(gz|bz2|xz)$"))) {
+ runProgram(settings.nixBinDir + "/nix-build", false, { "--no-out-link", "--expr", "import <nix/unpack-channel.nix> "
+ "{ name = \"" + cname + "\"; channelName = \"" + name + "\"; src = builtins.storePath \"" + filename + "\"; }" });
+ unpacked = true;
+ }
+
+ if (!unpacked) {
+ // The URL doesn't unpack directly, so let's try treating it like a full channel folder with files in it
+ // Check if the channel advertises a binary cache.
+ DownloadRequest request(url + "/binary-cache-url");
+ request.showProgress = DownloadRequest::no;
+ try {
+ auto dlRes = dl->download(request);
+ extraAttrs = "binaryCacheURL = \"" + *dlRes.data + "\";";
+ } catch (DownloadError & e) {
+ }
+
+ // Download the channel tarball.
+ auto fullURL = url + "/nixexprs.tar.xz";
+ try {
+ filename = dl->downloadCached(store, fullURL, false);
+ } catch (DownloadError & e) {
+ fullURL = url + "/nixexprs.tar.bz2";
+ filename = dl->downloadCached(store, fullURL, false);
+ }
+ chomp(filename);
+ }
+
+ // Regardless of where it came from, add the expression representing this channel to accumulated expression
+ exprs.push_back("f: f { name = \"" + cname + "\"; channelName = \"" + name + "\"; src = builtins.storePath \"" + filename + "\"; " + extraAttrs + " }");
+ }
+
+ // Unpack the channel tarballs into the Nix store and install them
+ // into the channels profile.
+ std::cerr << "unpacking channels...\n";
+ auto envArgs = Strings{ "--profile", profile, "--file", "<nix/unpack-channel.nix>", "--install", "--from-expression" };
+ for (auto & expr : exprs)
+ envArgs.push_back(std::move(expr));
+ envArgs.push_back("--quiet");
+ runProgram(settings.nixBinDir + "/nix-env", false, envArgs);
+
+ // Make the channels appear in nix-env.
+ struct stat st;
+ if (lstat(nixDefExpr.c_str(), &st) == 0) {
+ if (S_ISLNK(st.st_mode))
+ // old-skool ~/.nix-defexpr
+ if (unlink(nixDefExpr.c_str()) == -1)
+ throw SysError(format("unlinking %1%") % nixDefExpr);
+ } else if (errno != ENOENT) {
+ throw SysError(format("getting status of %1%") % nixDefExpr);
+ }
+ createDirs(nixDefExpr);
+ auto channelLink = nixDefExpr + "/channels";
+ replaceSymlink(profile, channelLink);
+}
+
+int main(int argc, char ** argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+
+ // Turn on caching in nix-prefetch-url.
+ auto channelCache = settings.nixStateDir + "/channel-cache";
+ createDirs(channelCache);
+ setenv("NIX_DOWNLOAD_CACHE", channelCache.c_str(), 1);
+
+ // Figure out the name of the `.nix-channels' file to use
+ auto home = getEnv("HOME");
+ if (home.empty())
+ throw Error("$HOME not set");
+ channelsList = home + "/.nix-channels";
+ nixDefExpr = home + "/.nix-defexpr";
+
+ // Figure out the name of the channels profile.
+ auto name = string{};
+ auto pw = getpwuid(getuid());
+ if (!pw)
+ name = getEnv("USER", "");
+ else
+ name = pw->pw_name;
+ if (name.empty())
+ throw Error("cannot figure out user name");
+ profile = settings.nixStateDir + "/profiles/per-user/" + name + "/channels";
+ createDirs(dirOf(profile));
+
+ enum {
+ cNone,
+ cAdd,
+ cRemove,
+ cList,
+ cUpdate,
+ cRollback
+ } cmd = cNone;
+ auto args = std::vector<string>{};
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ if (*arg == "--help") {
+ showManPage("nix-channel");
+ } else if (*arg == "--version") {
+ printVersion("nix-channel");
+ } else if (*arg == "--add") {
+ cmd = cAdd;
+ } else if (*arg == "--remove") {
+ cmd = cRemove;
+ } else if (*arg == "--list") {
+ cmd = cList;
+ } else if (*arg == "--update") {
+ cmd = cUpdate;
+ } else if (*arg == "--rollback") {
+ cmd = cRollback;
+ } else {
+ args.push_back(std::move(*arg));
+ }
+ return true;
+ });
+ switch (cmd) {
+ case cNone:
+ throw UsageError("no command specified");
+ case cAdd:
+ if (args.size() < 1 || args.size() > 2)
+ throw UsageError("‘--add’ requires one or two arguments");
+ {
+ auto url = args[0];
+ auto name = string{};
+ if (args.size() == 2) {
+ name = args[1];
+ } else {
+ name = baseNameOf(url);
+ name = std::regex_replace(name, std::regex("-unstable$"), "");
+ name = std::regex_replace(name, std::regex("-stable$"), "");
+ }
+ addChannel(url, name);
+ }
+ break;
+ case cRemove:
+ if (args.size() != 1)
+ throw UsageError("‘--remove’ requires one argument");
+ removeChannel(args[0]);
+ break;
+ case cList:
+ if (!args.empty())
+ throw UsageError("‘--list’ expects no arguments");
+ readChannels();
+ for (const auto & channel : channels)
+ std::cout << channel.first << ' ' << channel.second << '\n';
+ break;
+ case cUpdate:
+ update(StringSet(args.begin(), args.end()));
+ break;
+ case cRollback:
+ if (args.size() > 1)
+ throw UsageError("‘--rollback’ has at most one argument");
+ auto envArgs = Strings{"--profile", profile};
+ if (args.size() == 1) {
+ envArgs.push_back("--switch-generation");
+ envArgs.push_back(args[0]);
+ } else {
+ envArgs.push_back("--rollback");
+ }
+ runProgram(settings.nixBinDir + "/nix-env", false, envArgs);
+ break;
+ }
+ });
+}
diff --git a/src/nix-collect-garbage/local.mk b/src/nix-collect-garbage/local.mk
new file mode 100644
index 000000000..02d14cf62
--- /dev/null
+++ b/src/nix-collect-garbage/local.mk
@@ -0,0 +1,7 @@
+programs += nix-collect-garbage
+
+nix-collect-garbage_DIR := $(d)
+
+nix-collect-garbage_SOURCES := $(d)/nix-collect-garbage.cc
+
+nix-collect-garbage_LIBS = libmain libstore libutil libformat
diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc
new file mode 100644
index 000000000..cc663a969
--- /dev/null
+++ b/src/nix-collect-garbage/nix-collect-garbage.cc
@@ -0,0 +1,92 @@
+#include "store-api.hh"
+#include "profiles.hh"
+#include "shared.hh"
+#include "globals.hh"
+
+#include <iostream>
+#include <cerrno>
+
+using namespace nix;
+
+std::string deleteOlderThan;
+bool dryRun = false;
+
+
+/* If `-d' was specified, remove all old generations of all profiles.
+ * Of course, this makes rollbacks to before this point in time
+ * impossible. */
+
+void removeOldGenerations(std::string dir)
+{
+ if (access(dir.c_str(), R_OK) != 0) return;
+
+ bool canWrite = access(dir.c_str(), W_OK) == 0;
+
+ for (auto & i : readDirectory(dir)) {
+ checkInterrupt();
+
+ auto path = dir + "/" + i.name;
+ auto type = i.type == DT_UNKNOWN ? getFileType(path) : i.type;
+
+ if (type == DT_LNK && canWrite) {
+ std::string link;
+ try {
+ link = readLink(path);
+ } catch (SysError & e) {
+ if (e.errNo == ENOENT) continue;
+ }
+ if (link.find("link") != string::npos) {
+ printInfo(format("removing old generations of profile %1%") % path);
+ if (deleteOlderThan != "")
+ deleteGenerationsOlderThan(path, deleteOlderThan, dryRun);
+ else
+ deleteOldGenerations(path, dryRun);
+ }
+ } else if (type == DT_DIR) {
+ removeOldGenerations(path);
+ }
+ }
+}
+
+int main(int argc, char * * argv)
+{
+ bool removeOld = false;
+
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+
+ GCOptions options;
+
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ if (*arg == "--help")
+ showManPage("nix-collect-garbage");
+ else if (*arg == "--version")
+ printVersion("nix-collect-garbage");
+ else if (*arg == "--delete-old" || *arg == "-d") removeOld = true;
+ else if (*arg == "--delete-older-than") {
+ removeOld = true;
+ deleteOlderThan = getArg(*arg, arg, end);
+ }
+ else if (*arg == "--dry-run") dryRun = true;
+ else if (*arg == "--max-freed") {
+ long long maxFreed = getIntArg<long long>(*arg, arg, end, true);
+ options.maxFreed = maxFreed >= 0 ? maxFreed : 0;
+ }
+ else
+ return false;
+ return true;
+ });
+
+ auto profilesDir = settings.nixStateDir + "/profiles";
+ if (removeOld) removeOldGenerations(profilesDir);
+
+ // Run the actual garbage collector.
+ if (!dryRun) {
+ auto store = openStore();
+ options.action = GCOptions::gcDeleteDead;
+ GCResults results;
+ PrintFreed freed(true, results);
+ store->collectGarbage(options, results);
+ }
+ });
+}
diff --git a/src/nix-copy-closure/local.mk b/src/nix-copy-closure/local.mk
new file mode 100644
index 000000000..42bb34dd8
--- /dev/null
+++ b/src/nix-copy-closure/local.mk
@@ -0,0 +1,7 @@
+programs += nix-copy-closure
+
+nix-copy-closure_DIR := $(d)
+
+nix-copy-closure_LIBS = libmain libutil libformat libstore
+
+nix-copy-closure_SOURCES := $(d)/nix-copy-closure.cc
diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc
new file mode 100755
index 000000000..ed43bffbc
--- /dev/null
+++ b/src/nix-copy-closure/nix-copy-closure.cc
@@ -0,0 +1,63 @@
+#include "shared.hh"
+#include "store-api.hh"
+
+using namespace nix;
+
+int main(int argc, char ** argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+
+ auto gzip = false;
+ auto toMode = true;
+ auto includeOutputs = false;
+ auto dryRun = false;
+ auto useSubstitutes = false;
+ std::string sshHost;
+ PathSet storePaths;
+
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ if (*arg == "--help")
+ showManPage("nix-copy-closure");
+ else if (*arg == "--version")
+ printVersion("nix-copy-closure");
+ else if (*arg == "--gzip" || *arg == "--bzip2" || *arg == "--xz") {
+ if (*arg != "--gzip")
+ printMsg(lvlError, format("Warning: ‘%1%’ is not implemented, falling back to gzip") % *arg);
+ gzip = true;
+ } else if (*arg == "--from")
+ toMode = false;
+ else if (*arg == "--to")
+ toMode = true;
+ else if (*arg == "--include-outputs")
+ includeOutputs = true;
+ else if (*arg == "--show-progress")
+ printMsg(lvlError, "Warning: ‘--show-progress’ is not implemented");
+ else if (*arg == "--dry-run")
+ dryRun = true;
+ else if (*arg == "--use-substitutes" || *arg == "-s")
+ useSubstitutes = true;
+ else if (sshHost.empty())
+ sshHost = *arg;
+ else
+ storePaths.insert(*arg);
+ return true;
+ });
+
+ if (sshHost.empty())
+ throw UsageError("no host name specified");
+
+ auto remoteUri = "ssh://" + sshHost + (gzip ? "?compress=true" : "");
+ auto to = toMode ? openStore(remoteUri) : openStore();
+ auto from = toMode ? openStore() : openStore(remoteUri);
+
+ PathSet storePaths2;
+ for (auto & path : storePaths)
+ storePaths2.insert(from->followLinksToStorePath(path));
+
+ PathSet closure;
+ from->computeFSClosure(storePaths2, closure, false, includeOutputs);
+
+ copyPaths(from, to, closure, useSubstitutes);
+ });
+}
diff --git a/src/nix-daemon/local.mk b/src/nix-daemon/local.mk
new file mode 100644
index 000000000..5a4474465
--- /dev/null
+++ b/src/nix-daemon/local.mk
@@ -0,0 +1,13 @@
+programs += nix-daemon
+
+nix-daemon_DIR := $(d)
+
+nix-daemon_SOURCES := $(d)/nix-daemon.cc
+
+nix-daemon_LIBS = libmain libstore libutil libformat
+
+nix-daemon_LDFLAGS = -pthread
+
+ifeq ($(OS), SunOS)
+ nix-daemon_LDFLAGS += -lsocket
+endif
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
new file mode 100644
index 000000000..07ad0b45b
--- /dev/null
+++ b/src/nix-daemon/nix-daemon.cc
@@ -0,0 +1,1042 @@
+#include "shared.hh"
+#include "local-store.hh"
+#include "util.hh"
+#include "serialise.hh"
+#include "worker-protocol.hh"
+#include "archive.hh"
+#include "affinity.hh"
+#include "globals.hh"
+#include "monitor-fd.hh"
+#include "derivations.hh"
+
+#include <algorithm>
+
+#include <cstring>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <errno.h>
+#include <pwd.h>
+#include <grp.h>
+#include <fcntl.h>
+#include <limits.h>
+
+#if __APPLE__ || __FreeBSD__
+#include <sys/ucred.h>
+#endif
+
+using namespace nix;
+
+#ifndef __linux__
+#define SPLICE_F_MOVE 0
+static ssize_t splice(int fd_in, void *off_in, int fd_out, void *off_out, size_t len, unsigned int flags)
+{
+ /* We ignore most parameters, we just have them for conformance with the linux syscall */
+ char buf[8192];
+ auto read_count = read(fd_in, buf, sizeof(buf));
+ if (read_count == -1)
+ return read_count;
+ auto write_count = decltype(read_count)(0);
+ while (write_count < read_count) {
+ auto res = write(fd_out, buf + write_count, read_count - write_count);
+ if (res == -1)
+ return res;
+ write_count += res;
+ }
+ return read_count;
+}
+#endif
+
+static FdSource from(STDIN_FILENO);
+static FdSink to(STDOUT_FILENO);
+
+static bool canSendStderr;
+
+static Logger * defaultLogger;
+
+
+/* Logger that forwards log messages to the client, *if* we're in a
+ state where the protocol allows it (i.e., when canSendStderr is
+ true). */
+class TunnelLogger : public Logger
+{
+ void log(Verbosity lvl, const FormatOrString & fs) override
+ {
+ if (lvl > verbosity) return;
+
+ if (canSendStderr) {
+ try {
+ to << STDERR_NEXT << (fs.s + "\n");
+ to.flush();
+ } catch (...) {
+ /* Write failed; that means that the other side is
+ gone. */
+ canSendStderr = false;
+ throw;
+ }
+ } else
+ defaultLogger->log(lvl, fs);
+ }
+
+ void startActivity(Activity & activity, Verbosity lvl, const FormatOrString & fs) override
+ {
+ log(lvl, fs);
+ }
+
+ void stopActivity(Activity & activity) override
+ {
+ }
+};
+
+
+/* startWork() means that we're starting an operation for which we
+ want to send out stderr to the client. */
+static void startWork()
+{
+ canSendStderr = true;
+}
+
+
+/* stopWork() means that we're done; stop sending stderr to the
+ client. */
+static void stopWork(bool success = true, const string & msg = "", unsigned int status = 0)
+{
+ canSendStderr = false;
+
+ if (success)
+ to << STDERR_LAST;
+ else {
+ to << STDERR_ERROR << msg;
+ if (status != 0) to << status;
+ }
+}
+
+
+struct TunnelSink : Sink
+{
+ Sink & to;
+ TunnelSink(Sink & to) : to(to) { }
+ virtual void operator () (const unsigned char * data, size_t len)
+ {
+ to << STDERR_WRITE;
+ writeString(data, len, to);
+ }
+};
+
+
+struct TunnelSource : BufferedSource
+{
+ Source & from;
+ TunnelSource(Source & from) : from(from) { }
+ size_t readUnbuffered(unsigned char * data, size_t len)
+ {
+ to << STDERR_READ << len;
+ to.flush();
+ size_t n = readString(data, len, from);
+ if (n == 0) throw EndOfFile("unexpected end-of-file");
+ return n;
+ }
+};
+
+
+/* If the NAR archive contains a single file at top-level, then save
+ the contents of the file to `s'. Otherwise barf. */
+struct RetrieveRegularNARSink : ParseSink
+{
+ bool regular;
+ string s;
+
+ RetrieveRegularNARSink() : regular(true) { }
+
+ void createDirectory(const Path & path)
+ {
+ regular = false;
+ }
+
+ void receiveContents(unsigned char * data, unsigned int len)
+ {
+ s.append((const char *) data, len);
+ }
+
+ void createSymlink(const Path & path, const string & target)
+ {
+ regular = false;
+ }
+};
+
+
+static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVersion,
+ Source & from, Sink & to, unsigned int op)
+{
+ switch (op) {
+
+ case wopIsValidPath: {
+ /* 'readStorePath' could raise an error leading to the connection
+ being closed. To be able to recover from an invalid path error,
+ call 'startWork' early, and do 'assertStorePath' afterwards so
+ that the 'Error' exception handler doesn't close the
+ connection. */
+ Path path = readString(from);
+ startWork();
+ store->assertStorePath(path);
+ bool result = store->isValidPath(path);
+ stopWork();
+ to << result;
+ break;
+ }
+
+ case wopQueryValidPaths: {
+ PathSet paths = readStorePaths<PathSet>(*store, from);
+ startWork();
+ PathSet res = store->queryValidPaths(paths);
+ stopWork();
+ to << res;
+ break;
+ }
+
+ case wopHasSubstitutes: {
+ Path path = readStorePath(*store, from);
+ startWork();
+ PathSet res = store->querySubstitutablePaths({path});
+ stopWork();
+ to << (res.find(path) != res.end());
+ break;
+ }
+
+ case wopQuerySubstitutablePaths: {
+ PathSet paths = readStorePaths<PathSet>(*store, from);
+ startWork();
+ PathSet res = store->querySubstitutablePaths(paths);
+ stopWork();
+ to << res;
+ break;
+ }
+
+ case wopQueryPathHash: {
+ Path path = readStorePath(*store, from);
+ startWork();
+ auto hash = store->queryPathInfo(path)->narHash;
+ stopWork();
+ to << printHash(hash);
+ break;
+ }
+
+ case wopQueryReferences:
+ case wopQueryReferrers:
+ case wopQueryValidDerivers:
+ case wopQueryDerivationOutputs: {
+ Path path = readStorePath(*store, from);
+ startWork();
+ PathSet paths;
+ if (op == wopQueryReferences)
+ paths = store->queryPathInfo(path)->references;
+ else if (op == wopQueryReferrers)
+ store->queryReferrers(path, paths);
+ else if (op == wopQueryValidDerivers)
+ paths = store->queryValidDerivers(path);
+ else paths = store->queryDerivationOutputs(path);
+ stopWork();
+ to << paths;
+ break;
+ }
+
+ case wopQueryDerivationOutputNames: {
+ Path path = readStorePath(*store, from);
+ startWork();
+ StringSet names;
+ names = store->queryDerivationOutputNames(path);
+ stopWork();
+ to << names;
+ break;
+ }
+
+ case wopQueryDeriver: {
+ Path path = readStorePath(*store, from);
+ startWork();
+ auto deriver = store->queryPathInfo(path)->deriver;
+ stopWork();
+ to << deriver;
+ break;
+ }
+
+ case wopQueryPathFromHashPart: {
+ string hashPart = readString(from);
+ startWork();
+ Path path = store->queryPathFromHashPart(hashPart);
+ stopWork();
+ to << path;
+ break;
+ }
+
+ case wopAddToStore: {
+ bool fixed, recursive;
+ std::string s, baseName;
+ from >> baseName >> fixed /* obsolete */ >> recursive >> s;
+ /* Compatibility hack. */
+ if (!fixed) {
+ s = "sha256";
+ recursive = true;
+ }
+ HashType hashAlgo = parseHashType(s);
+
+ TeeSource savedNAR(from);
+ RetrieveRegularNARSink savedRegular;
+
+ if (recursive) {
+ /* Get the entire NAR dump from the client and save it to
+ a string so that we can pass it to
+ addToStoreFromDump(). */
+ ParseSink sink; /* null sink; just parse the NAR */
+ parseDump(sink, savedNAR);
+ } else
+ parseDump(savedRegular, from);
+
+ startWork();
+ if (!savedRegular.regular) throw Error("regular file expected");
+ Path path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
+ stopWork();
+
+ to << path;
+ break;
+ }
+
+ case wopAddTextToStore: {
+ string suffix = readString(from);
+ string s = readString(from);
+ PathSet refs = readStorePaths<PathSet>(*store, from);
+ startWork();
+ Path path = store->addTextToStore(suffix, s, refs, false);
+ stopWork();
+ to << path;
+ break;
+ }
+
+ case wopExportPath: {
+ Path path = readStorePath(*store, from);
+ readInt(from); // obsolete
+ startWork();
+ TunnelSink sink(to);
+ store->exportPath(path, sink);
+ stopWork();
+ to << 1;
+ break;
+ }
+
+ case wopImportPaths: {
+ startWork();
+ TunnelSource source(from);
+ Paths paths = store->importPaths(source, 0, trusted);
+ stopWork();
+ to << paths;
+ break;
+ }
+
+ case wopBuildPaths: {
+ PathSet drvs = readStorePaths<PathSet>(*store, from);
+ BuildMode mode = bmNormal;
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
+ mode = (BuildMode) readInt(from);
+
+ /* Repairing is not atomic, so disallowed for "untrusted"
+ clients. */
+ if (mode == bmRepair && !trusted)
+ throw Error("repairing is not supported when building through the Nix daemon");
+ }
+ startWork();
+ store->buildPaths(drvs, mode);
+ stopWork();
+ to << 1;
+ break;
+ }
+
+ case wopBuildDerivation: {
+ Path drvPath = readStorePath(*store, from);
+ BasicDerivation drv;
+ readDerivation(from, *store, drv);
+ BuildMode buildMode = (BuildMode) readInt(from);
+ startWork();
+ if (!trusted)
+ throw Error("you are not privileged to build derivations");
+ auto res = store->buildDerivation(drvPath, drv, buildMode);
+ stopWork();
+ to << res.status << res.errorMsg;
+ break;
+ }
+
+ case wopEnsurePath: {
+ Path path = readStorePath(*store, from);
+ startWork();
+ store->ensurePath(path);
+ stopWork();
+ to << 1;
+ break;
+ }
+
+ case wopAddTempRoot: {
+ Path path = readStorePath(*store, from);
+ startWork();
+ store->addTempRoot(path);
+ stopWork();
+ to << 1;
+ break;
+ }
+
+ case wopAddIndirectRoot: {
+ Path path = absPath(readString(from));
+ startWork();
+ store->addIndirectRoot(path);
+ stopWork();
+ to << 1;
+ break;
+ }
+
+ case wopSyncWithGC: {
+ startWork();
+ store->syncWithGC();
+ stopWork();
+ to << 1;
+ break;
+ }
+
+ case wopFindRoots: {
+ startWork();
+ Roots roots = store->findRoots();
+ stopWork();
+ to << roots.size();
+ for (auto & i : roots)
+ to << i.first << i.second;
+ break;
+ }
+
+ case wopCollectGarbage: {
+ GCOptions options;
+ options.action = (GCOptions::GCAction) readInt(from);
+ options.pathsToDelete = readStorePaths<PathSet>(*store, from);
+ from >> options.ignoreLiveness >> options.maxFreed;
+ // obsolete fields
+ readInt(from);
+ readInt(from);
+ readInt(from);
+
+ GCResults results;
+
+ startWork();
+ if (options.ignoreLiveness)
+ throw Error("you are not allowed to ignore liveness");
+ store->collectGarbage(options, results);
+ stopWork();
+
+ to << results.paths << results.bytesFreed << 0 /* obsolete */;
+
+ break;
+ }
+
+ case wopSetOptions: {
+ settings.keepFailed = readInt(from);
+ settings.keepGoing = readInt(from);
+ settings.tryFallback = readInt(from);
+ verbosity = (Verbosity) readInt(from);
+ settings.maxBuildJobs.assign(readInt(from));
+ settings.maxSilentTime = readInt(from);
+ settings.useBuildHook = readInt(from) != 0;
+ settings.verboseBuild = lvlError == (Verbosity) readInt(from);
+ readInt(from); // obsolete logType
+ readInt(from); // obsolete printBuildTrace
+ settings.buildCores = readInt(from);
+ settings.useSubstitutes = readInt(from);
+
+ StringMap overrides;
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
+ unsigned int n = readInt(from);
+ for (unsigned int i = 0; i < n; i++) {
+ string name = readString(from);
+ string value = readString(from);
+ overrides.emplace(name, value);
+ }
+ }
+
+ startWork();
+
+ for (auto & i : overrides) {
+ auto & name(i.first);
+ auto & value(i.second);
+
+ auto setSubstituters = [&](Setting<Strings> & res) {
+ if (name != res.name && res.aliases.count(name) == 0)
+ return false;
+ StringSet trusted = settings.trustedSubstituters;
+ for (auto & s : settings.substituters.get())
+ trusted.insert(s);
+ Strings subs;
+ auto ss = tokenizeString<Strings>(value);
+ for (auto & s : ss)
+ if (trusted.count(s))
+ subs.push_back(s);
+ else
+ warn("ignoring untrusted substituter '%s'", s);
+ res = subs;
+ return true;
+ };
+
+ try {
+ if (trusted
+ || name == settings.buildTimeout.name
+ || name == settings.connectTimeout.name)
+ settings.set(name, value);
+ else if (setSubstituters(settings.substituters))
+ ;
+ else if (setSubstituters(settings.extraSubstituters))
+ ;
+ else
+ debug("ignoring untrusted setting '%s'", name);
+ } catch (UsageError & e) {
+ warn(e.what());
+ }
+ }
+
+ stopWork();
+ break;
+ }
+
+ case wopQuerySubstitutablePathInfo: {
+ Path path = absPath(readString(from));
+ startWork();
+ SubstitutablePathInfos infos;
+ store->querySubstitutablePathInfos({path}, infos);
+ stopWork();
+ SubstitutablePathInfos::iterator i = infos.find(path);
+ if (i == infos.end())
+ to << 0;
+ else {
+ to << 1 << i->second.deriver << i->second.references << i->second.downloadSize << i->second.narSize;
+ }
+ break;
+ }
+
+ case wopQuerySubstitutablePathInfos: {
+ PathSet paths = readStorePaths<PathSet>(*store, from);
+ startWork();
+ SubstitutablePathInfos infos;
+ store->querySubstitutablePathInfos(paths, infos);
+ stopWork();
+ to << infos.size();
+ for (auto & i : infos) {
+ to << i.first << i.second.deriver << i.second.references
+ << i.second.downloadSize << i.second.narSize;
+ }
+ break;
+ }
+
+ case wopQueryAllValidPaths: {
+ startWork();
+ PathSet paths = store->queryAllValidPaths();
+ stopWork();
+ to << paths;
+ break;
+ }
+
+ case wopQueryPathInfo: {
+ Path path = readStorePath(*store, from);
+ std::shared_ptr<const ValidPathInfo> info;
+ startWork();
+ try {
+ info = store->queryPathInfo(path);
+ } catch (InvalidPath &) {
+ if (GET_PROTOCOL_MINOR(clientVersion) < 17) throw;
+ }
+ stopWork();
+ if (info) {
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 17)
+ to << 1;
+ to << info->deriver << printHash(info->narHash) << info->references
+ << info->registrationTime << info->narSize;
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 16) {
+ to << info->ultimate
+ << info->sigs
+ << info->ca;
+ }
+ } else {
+ assert(GET_PROTOCOL_MINOR(clientVersion) >= 17);
+ to << 0;
+ }
+ break;
+ }
+
+ case wopOptimiseStore:
+ startWork();
+ store->optimiseStore();
+ stopWork();
+ to << 1;
+ break;
+
+ case wopVerifyStore: {
+ bool checkContents, repair;
+ from >> checkContents >> repair;
+ startWork();
+ if (repair && !trusted)
+ throw Error("you are not privileged to repair paths");
+ bool errors = store->verifyStore(checkContents, repair);
+ stopWork();
+ to << errors;
+ break;
+ }
+
+ case wopAddSignatures: {
+ Path path = readStorePath(*store, from);
+ StringSet sigs = readStrings<StringSet>(from);
+ startWork();
+ if (!trusted)
+ throw Error("you are not privileged to add signatures");
+ store->addSignatures(path, sigs);
+ stopWork();
+ to << 1;
+ break;
+ }
+
+ case wopNarFromPath: {
+ auto path = readStorePath(*store, from);
+ startWork();
+ stopWork();
+ dumpPath(path, to);
+ break;
+ }
+
+ case wopAddToStoreNar: {
+ bool repair, dontCheckSigs;
+ ValidPathInfo info;
+ info.path = readStorePath(*store, from);
+ from >> info.deriver;
+ if (!info.deriver.empty())
+ store->assertStorePath(info.deriver);
+ info.narHash = parseHash(htSHA256, readString(from));
+ info.references = readStorePaths<PathSet>(*store, from);
+ from >> info.registrationTime >> info.narSize >> info.ultimate;
+ info.sigs = readStrings<StringSet>(from);
+ from >> info.ca >> repair >> dontCheckSigs;
+ if (!trusted && dontCheckSigs)
+ dontCheckSigs = false;
+
+ TeeSink tee(from);
+ parseDump(tee, tee.source);
+
+ startWork();
+ store->addToStore(info, tee.source.data, repair, dontCheckSigs, nullptr);
+ stopWork();
+ break;
+ }
+
+ case wopQueryMissing: {
+ PathSet targets = readStorePaths<PathSet>(*store, from);
+ startWork();
+ PathSet willBuild, willSubstitute, unknown;
+ unsigned long long downloadSize, narSize;
+ store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize);
+ stopWork();
+ to << willBuild << willSubstitute << unknown << downloadSize << narSize;
+ break;
+ }
+
+ default:
+ throw Error(format("invalid operation %1%") % op);
+ }
+}
+
+
+static void processConnection(bool trusted)
+{
+ MonitorFdHup monitor(from.fd);
+
+ canSendStderr = false;
+ defaultLogger = logger;
+ logger = new TunnelLogger();
+
+ /* Exchange the greeting. */
+ unsigned int magic = readInt(from);
+ if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch");
+ to << WORKER_MAGIC_2 << PROTOCOL_VERSION;
+ to.flush();
+ unsigned int clientVersion = readInt(from);
+
+ if (clientVersion < 0x10a)
+ throw Error("the Nix client version is too old");
+
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from))
+ setAffinityTo(readInt(from));
+
+ readInt(from); // obsolete reserveSpace
+
+ /* Send startup error messages to the client. */
+ startWork();
+
+ try {
+
+ /* If we can't accept clientVersion, then throw an error
+ *here* (not above). */
+
+#if 0
+ /* Prevent users from doing something very dangerous. */
+ if (geteuid() == 0 &&
+ querySetting("build-users-group", "") == "")
+ throw Error("if you run ‘nix-daemon’ as root, then you MUST set ‘build-users-group’!");
+#endif
+
+ /* Open the store. */
+ Store::Params params; // FIXME: get params from somewhere
+ // Disable caching since the client already does that.
+ params["path-info-cache-size"] = "0";
+ auto store = make_ref<LocalStore>(params);
+
+ stopWork();
+ to.flush();
+
+ /* Process client requests. */
+ unsigned int opCount = 0;
+
+ while (true) {
+ WorkerOp op;
+ try {
+ op = (WorkerOp) readInt(from);
+ } catch (Interrupted & e) {
+ break;
+ } catch (EndOfFile & e) {
+ break;
+ }
+
+ opCount++;
+
+ try {
+ performOp(store, trusted, clientVersion, from, to, op);
+ } catch (Error & e) {
+ /* If we're not in a state where we can send replies, then
+ something went wrong processing the input of the
+ client. This can happen especially if I/O errors occur
+ during addTextToStore() / importPath(). If that
+ happens, just send the error message and exit. */
+ bool errorAllowed = canSendStderr;
+ stopWork(false, e.msg(), e.status);
+ if (!errorAllowed) throw;
+ } catch (std::bad_alloc & e) {
+ stopWork(false, "Nix daemon out of memory", 1);
+ throw;
+ }
+
+ to.flush();
+
+ assert(!canSendStderr);
+ };
+
+ canSendStderr = false;
+ _isInterrupted = false;
+ debug(format("%1% operations") % opCount);
+
+ } catch (Error & e) {
+ stopWork(false, e.msg(), 1);
+ to.flush();
+ return;
+ }
+}
+
+
+static void sigChldHandler(int sigNo)
+{
+ /* Reap all dead children. */
+ while (waitpid(-1, 0, WNOHANG) > 0) ;
+}
+
+
+static void setSigChldAction(bool autoReap)
+{
+ struct sigaction act, oact;
+ act.sa_handler = autoReap ? sigChldHandler : SIG_DFL;
+ sigfillset(&act.sa_mask);
+ act.sa_flags = 0;
+ if (sigaction(SIGCHLD, &act, &oact))
+ throw SysError("setting SIGCHLD handler");
+}
+
+
+bool matchUser(const string & user, const string & group, const Strings & users)
+{
+ if (find(users.begin(), users.end(), "*") != users.end())
+ return true;
+
+ if (find(users.begin(), users.end(), user) != users.end())
+ return true;
+
+ for (auto & i : users)
+ if (string(i, 0, 1) == "@") {
+ if (group == string(i, 1)) return true;
+ struct group * gr = getgrnam(i.c_str() + 1);
+ if (!gr) continue;
+ for (char * * mem = gr->gr_mem; *mem; mem++)
+ if (user == string(*mem)) return true;
+ }
+
+ return false;
+}
+
+
+struct PeerInfo
+{
+ bool pidKnown;
+ pid_t pid;
+ bool uidKnown;
+ uid_t uid;
+ bool gidKnown;
+ gid_t gid;
+};
+
+
+/* Get the identity of the caller, if possible. */
+static PeerInfo getPeerInfo(int remote)
+{
+ PeerInfo peer = { false, 0, false, 0, false, 0 };
+
+#if defined(SO_PEERCRED)
+
+ ucred cred;
+ socklen_t credLen = sizeof(cred);
+ if (getsockopt(remote, SOL_SOCKET, SO_PEERCRED, &cred, &credLen) == -1)
+ throw SysError("getting peer credentials");
+ peer = { true, cred.pid, true, cred.uid, true, cred.gid };
+
+#elif defined(LOCAL_PEERCRED)
+
+#if !defined(SOL_LOCAL)
+#define SOL_LOCAL 0
+#endif
+
+ xucred cred;
+ socklen_t credLen = sizeof(cred);
+ if (getsockopt(remote, SOL_LOCAL, LOCAL_PEERCRED, &cred, &credLen) == -1)
+ throw SysError("getting peer credentials");
+ peer = { false, 0, true, cred.cr_uid, false, 0 };
+
+#endif
+
+ return peer;
+}
+
+
+#define SD_LISTEN_FDS_START 3
+
+
+static void daemonLoop(char * * argv)
+{
+ if (chdir("/") == -1)
+ throw SysError("cannot change current directory");
+
+ /* Get rid of children automatically; don't let them become
+ zombies. */
+ setSigChldAction(true);
+
+ AutoCloseFD fdSocket;
+
+ /* Handle socket-based activation by systemd. */
+ if (getEnv("LISTEN_FDS") != "") {
+ if (getEnv("LISTEN_PID") != std::to_string(getpid()) || getEnv("LISTEN_FDS") != "1")
+ throw Error("unexpected systemd environment variables");
+ fdSocket = SD_LISTEN_FDS_START;
+ }
+
+ /* Otherwise, create and bind to a Unix domain socket. */
+ else {
+
+ /* Create and bind to a Unix domain socket. */
+ fdSocket = socket(PF_UNIX, SOCK_STREAM, 0);
+ if (!fdSocket)
+ throw SysError("cannot create Unix domain socket");
+
+ string socketPath = settings.nixDaemonSocketFile;
+
+ createDirs(dirOf(socketPath));
+
+ /* Urgh, sockaddr_un allows path names of only 108 characters.
+ So chdir to the socket directory so that we can pass a
+ relative path name. */
+ if (chdir(dirOf(socketPath).c_str()) == -1)
+ throw SysError("cannot change current directory");
+ Path socketPathRel = "./" + baseNameOf(socketPath);
+
+ struct sockaddr_un addr;
+ addr.sun_family = AF_UNIX;
+ if (socketPathRel.size() >= sizeof(addr.sun_path))
+ throw Error(format("socket path ‘%1%’ is too long") % socketPathRel);
+ strcpy(addr.sun_path, socketPathRel.c_str());
+
+ unlink(socketPath.c_str());
+
+ /* Make sure that the socket is created with 0666 permission
+ (everybody can connect --- provided they have access to the
+ directory containing the socket). */
+ mode_t oldMode = umask(0111);
+ int res = bind(fdSocket.get(), (struct sockaddr *) &addr, sizeof(addr));
+ umask(oldMode);
+ if (res == -1)
+ throw SysError(format("cannot bind to socket ‘%1%’") % socketPath);
+
+ if (chdir("/") == -1) /* back to the root */
+ throw SysError("cannot change current directory");
+
+ if (listen(fdSocket.get(), 5) == -1)
+ throw SysError(format("cannot listen on socket ‘%1%’") % socketPath);
+ }
+
+ closeOnExec(fdSocket.get());
+
+ /* Loop accepting connections. */
+ while (1) {
+
+ try {
+ /* Accept a connection. */
+ struct sockaddr_un remoteAddr;
+ socklen_t remoteAddrLen = sizeof(remoteAddr);
+
+ AutoCloseFD remote = accept(fdSocket.get(),
+ (struct sockaddr *) &remoteAddr, &remoteAddrLen);
+ checkInterrupt();
+ if (!remote) {
+ if (errno == EINTR) continue;
+ throw SysError("accepting connection");
+ }
+
+ closeOnExec(remote.get());
+
+ bool trusted = false;
+ PeerInfo peer = getPeerInfo(remote.get());
+
+ struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0;
+ string user = pw ? pw->pw_name : std::to_string(peer.uid);
+
+ struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0;
+ string group = gr ? gr->gr_name : std::to_string(peer.gid);
+
+ Strings trustedUsers = settings.trustedUsers;
+ Strings allowedUsers = settings.allowedUsers;
+
+ if (matchUser(user, group, trustedUsers))
+ trusted = true;
+
+ if (!trusted && !matchUser(user, group, allowedUsers))
+ throw Error(format("user ‘%1%’ is not allowed to connect to the Nix daemon") % user);
+
+ printInfo(format((string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : ""))
+ % (peer.pidKnown ? std::to_string(peer.pid) : "<unknown>")
+ % (peer.uidKnown ? user : "<unknown>"));
+
+ /* Fork a child to handle the connection. */
+ ProcessOptions options;
+ options.errorPrefix = "unexpected Nix daemon error: ";
+ options.dieWithParent = false;
+ options.runExitHandlers = true;
+ options.allowVfork = false;
+ startProcess([&]() {
+ fdSocket = -1;
+
+ /* Background the daemon. */
+ if (setsid() == -1)
+ throw SysError(format("creating a new session"));
+
+ /* Restore normal handling of SIGCHLD. */
+ setSigChldAction(false);
+
+ /* For debugging, stuff the pid into argv[1]. */
+ if (peer.pidKnown && argv[1]) {
+ string processName = std::to_string(peer.pid);
+ strncpy(argv[1], processName.c_str(), strlen(argv[1]));
+ }
+
+ /* Handle the connection. */
+ from.fd = remote.get();
+ to.fd = remote.get();
+ processConnection(trusted);
+
+ exit(0);
+ }, options);
+
+ } catch (Interrupted & e) {
+ throw;
+ } catch (Error & e) {
+ printError(format("error processing connection: %1%") % e.msg());
+ }
+ }
+}
+
+
+int main(int argc, char * * argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+
+ auto stdio = false;
+
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ if (*arg == "--daemon")
+ ; /* ignored for backwards compatibility */
+ else if (*arg == "--help")
+ showManPage("nix-daemon");
+ else if (*arg == "--version")
+ printVersion("nix-daemon");
+ else if (*arg == "--stdio")
+ stdio = true;
+ else return false;
+ return true;
+ });
+
+ if (stdio) {
+ if (getStoreType() == tDaemon) {
+ /* Forward on this connection to the real daemon */
+ auto socketPath = settings.nixDaemonSocketFile;
+ auto s = socket(PF_UNIX, SOCK_STREAM, 0);
+ if (s == -1)
+ throw SysError("creating Unix domain socket");
+
+ auto socketDir = dirOf(socketPath);
+ if (chdir(socketDir.c_str()) == -1)
+ throw SysError(format("changing to socket directory ‘%1%’") % socketDir);
+
+ auto socketName = baseNameOf(socketPath);
+ auto addr = sockaddr_un{};
+ addr.sun_family = AF_UNIX;
+ if (socketName.size() + 1 >= sizeof(addr.sun_path))
+ throw Error(format("socket name %1% is too long") % socketName);
+ strcpy(addr.sun_path, socketName.c_str());
+
+ if (connect(s, (struct sockaddr *) &addr, sizeof(addr)) == -1)
+ throw SysError(format("cannot connect to daemon at %1%") % socketPath);
+
+ auto nfds = (s > STDIN_FILENO ? s : STDIN_FILENO) + 1;
+ while (true) {
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(s, &fds);
+ FD_SET(STDIN_FILENO, &fds);
+ if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1)
+ throw SysError("waiting for data from client or server");
+ if (FD_ISSET(s, &fds)) {
+ auto res = splice(s, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
+ if (res == -1)
+ throw SysError("splicing data from daemon socket to stdout");
+ else if (res == 0)
+ throw EndOfFile("unexpected EOF from daemon socket");
+ }
+ if (FD_ISSET(STDIN_FILENO, &fds)) {
+ auto res = splice(STDIN_FILENO, nullptr, s, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
+ if (res == -1)
+ throw SysError("splicing data from stdin to daemon socket");
+ else if (res == 0)
+ return;
+ }
+ }
+ } else {
+ processConnection(true);
+ }
+ } else {
+ daemonLoop(argv);
+ }
+ });
+}
diff --git a/src/nix-env/local.mk b/src/nix-env/local.mk
new file mode 100644
index 000000000..e80719cd7
--- /dev/null
+++ b/src/nix-env/local.mk
@@ -0,0 +1,7 @@
+programs += nix-env
+
+nix-env_DIR := $(d)
+
+nix-env_SOURCES := $(wildcard $(d)/*.cc)
+
+nix-env_LIBS = libexpr libmain libstore libutil libformat
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
new file mode 100644
index 000000000..908c09bc8
--- /dev/null
+++ b/src/nix-env/nix-env.cc
@@ -0,0 +1,1426 @@
+#include "attr-path.hh"
+#include "common-opts.hh"
+#include "derivations.hh"
+#include "eval.hh"
+#include "get-drvs.hh"
+#include "globals.hh"
+#include "names.hh"
+#include "profiles.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "user-env.hh"
+#include "util.hh"
+#include "json.hh"
+#include "value-to-json.hh"
+#include "xml-writer.hh"
+
+#include <cerrno>
+#include <ctime>
+#include <algorithm>
+#include <iostream>
+#include <sstream>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+
+using namespace nix;
+using std::cout;
+
+
+typedef enum {
+ srcNixExprDrvs,
+ srcNixExprs,
+ srcStorePaths,
+ srcProfile,
+ srcAttrPath,
+ srcUnknown
+} InstallSourceType;
+
+
+struct InstallSourceInfo
+{
+ InstallSourceType type;
+ Path nixExprPath; /* for srcNixExprDrvs, srcNixExprs */
+ Path profile; /* for srcProfile */
+ string systemFilter; /* for srcNixExprDrvs */
+ Bindings * autoArgs;
+};
+
+
+struct Globals
+{
+ InstallSourceInfo instSource;
+ Path profile;
+ std::shared_ptr<EvalState> state;
+ bool dryRun;
+ bool preserveInstalled;
+ bool removeAll;
+ string forceName;
+ bool prebuiltOnly;
+};
+
+
+typedef void (* Operation) (Globals & globals,
+ Strings opFlags, Strings opArgs);
+
+
+static string needArg(Strings::iterator & i,
+ Strings & args, const string & arg)
+{
+ if (i == args.end()) throw UsageError(
+ format("‘%1%’ requires an argument") % arg);
+ return *i++;
+}
+
+
+static bool parseInstallSourceOptions(Globals & globals,
+ Strings::iterator & i, Strings & args, const string & arg)
+{
+ if (arg == "--from-expression" || arg == "-E")
+ globals.instSource.type = srcNixExprs;
+ else if (arg == "--from-profile") {
+ globals.instSource.type = srcProfile;
+ globals.instSource.profile = needArg(i, args, arg);
+ }
+ else if (arg == "--attr" || arg == "-A")
+ globals.instSource.type = srcAttrPath;
+ else return false;
+ return true;
+}
+
+
+static bool isNixExpr(const Path & path, struct stat & st)
+{
+ return S_ISREG(st.st_mode) || (S_ISDIR(st.st_mode) && pathExists(path + "/default.nix"));
+}
+
+
+static void getAllExprs(EvalState & state,
+ const Path & path, StringSet & attrs, Value & v)
+{
+ StringSet namesSorted;
+ for (auto & i : readDirectory(path)) namesSorted.insert(i.name);
+
+ for (auto & i : namesSorted) {
+ /* Ignore the manifest.nix used by profiles. This is
+ necessary to prevent it from showing up in channels (which
+ are implemented using profiles). */
+ if (i == "manifest.nix") continue;
+
+ Path path2 = path + "/" + i;
+
+ struct stat st;
+ if (stat(path2.c_str(), &st) == -1)
+ continue; // ignore dangling symlinks in ~/.nix-defexpr
+
+ if (isNixExpr(path2, st) && (!S_ISREG(st.st_mode) || hasSuffix(path2, ".nix"))) {
+ /* Strip off the `.nix' filename suffix (if applicable),
+ otherwise the attribute cannot be selected with the
+ `-A' option. Useful if you want to stick a Nix
+ expression directly in ~/.nix-defexpr. */
+ string attrName = i;
+ if (hasSuffix(attrName, ".nix"))
+ attrName = string(attrName, 0, attrName.size() - 4);
+ if (attrs.find(attrName) != attrs.end()) {
+ printError(format("warning: name collision in input Nix expressions, skipping ‘%1%’") % path2);
+ continue;
+ }
+ attrs.insert(attrName);
+ /* Load the expression on demand. */
+ Value & vFun = state.getBuiltin("import");
+ Value & vArg(*state.allocValue());
+ mkString(vArg, path2);
+ if (v.attrs->size() == v.attrs->capacity())
+ throw Error(format("too many Nix expressions in directory ‘%1%’") % path);
+ mkApp(*state.allocAttr(v, state.symbols.create(attrName)), vFun, vArg);
+ }
+ else if (S_ISDIR(st.st_mode))
+ /* `path2' is a directory (with no default.nix in it);
+ recurse into it. */
+ getAllExprs(state, path2, attrs, v);
+ }
+}
+
+
+static void loadSourceExpr(EvalState & state, const Path & path, Value & v)
+{
+ struct stat st;
+ if (stat(path.c_str(), &st) == -1)
+ throw SysError(format("getting information about ‘%1%’") % path);
+
+ if (isNixExpr(path, st)) {
+ state.evalFile(path, v);
+ return;
+ }
+
+ /* The path is a directory. Put the Nix expressions in the
+ directory in a set, with the file name of each expression as
+ the attribute name. Recurse into subdirectories (but keep the
+ set flat, not nested, to make it easier for a user to have a
+ ~/.nix-defexpr directory that includes some system-wide
+ directory). */
+ if (S_ISDIR(st.st_mode)) {
+ state.mkAttrs(v, 1024);
+ state.mkList(*state.allocAttr(v, state.symbols.create("_combineChannels")), 0);
+ StringSet attrs;
+ getAllExprs(state, path, attrs, v);
+ v.attrs->sort();
+ }
+}
+
+
+static void loadDerivations(EvalState & state, Path nixExprPath,
+ string systemFilter, Bindings & autoArgs,
+ const string & pathPrefix, DrvInfos & elems)
+{
+ Value vRoot;
+ loadSourceExpr(state, nixExprPath, vRoot);
+
+ Value & v(*findAlongAttrPath(state, pathPrefix, autoArgs, vRoot));
+
+ getDerivations(state, v, pathPrefix, autoArgs, elems, true);
+
+ /* Filter out all derivations not applicable to the current
+ system. */
+ for (DrvInfos::iterator i = elems.begin(), j; i != elems.end(); i = j) {
+ j = i; j++;
+ if (systemFilter != "*" && i->system != systemFilter)
+ elems.erase(i);
+ }
+}
+
+
+static Path getHomeDir()
+{
+ Path homeDir(getEnv("HOME", ""));
+ if (homeDir == "") throw Error("HOME environment variable not set");
+ return homeDir;
+}
+
+
+static Path getDefNixExprPath()
+{
+ return getHomeDir() + "/.nix-defexpr";
+}
+
+
+static int getPriority(EvalState & state, DrvInfo & drv)
+{
+ return drv.queryMetaInt("priority", 0);
+}
+
+
+static int comparePriorities(EvalState & state, DrvInfo & drv1, DrvInfo & drv2)
+{
+ return getPriority(state, drv2) - getPriority(state, drv1);
+}
+
+
+// FIXME: this function is rather slow since it checks a single path
+// at a time.
+static bool isPrebuilt(EvalState & state, DrvInfo & elem)
+{
+ Path path = elem.queryOutPath();
+ if (state.store->isValidPath(path)) return true;
+ PathSet ps = state.store->querySubstitutablePaths({path});
+ return ps.find(path) != ps.end();
+}
+
+
+static void checkSelectorUse(DrvNames & selectors)
+{
+ /* Check that all selectors have been used. */
+ for (auto & i : selectors)
+ if (i.hits == 0 && i.fullName != "*")
+ throw Error(format("selector ‘%1%’ matches no derivations") % i.fullName);
+}
+
+
+static DrvInfos filterBySelector(EvalState & state, const DrvInfos & allElems,
+ const Strings & args, bool newestOnly)
+{
+ DrvNames selectors = drvNamesFromArgs(args);
+ if (selectors.empty())
+ selectors.push_back(DrvName("*"));
+
+ DrvInfos elems;
+ set<unsigned int> done;
+
+ for (auto & i : selectors) {
+ typedef list<std::pair<DrvInfo, unsigned int> > Matches;
+ Matches matches;
+ unsigned int n = 0;
+ for (DrvInfos::const_iterator j = allElems.begin();
+ j != allElems.end(); ++j, ++n)
+ {
+ DrvName drvName(j->name);
+ if (i.matches(drvName)) {
+ i.hits++;
+ matches.push_back(std::pair<DrvInfo, unsigned int>(*j, n));
+ }
+ }
+
+ /* If `newestOnly', if a selector matches multiple derivations
+ with the same name, pick the one matching the current
+ system. If there are still multiple derivations, pick the
+ one with the highest priority. If there are still multiple
+ derivations, pick the one with the highest version.
+ Finally, if there are still multiple derivations,
+ arbitrarily pick the first one. */
+ if (newestOnly) {
+
+ /* Map from package names to derivations. */
+ typedef map<string, std::pair<DrvInfo, unsigned int> > Newest;
+ Newest newest;
+ StringSet multiple;
+
+ for (auto & j : matches) {
+ DrvName drvName(j.first.name);
+ int d = 1;
+
+ Newest::iterator k = newest.find(drvName.name);
+
+ if (k != newest.end()) {
+ d = j.first.system == k->second.first.system ? 0 :
+ j.first.system == settings.thisSystem ? 1 :
+ k->second.first.system == settings.thisSystem ? -1 : 0;
+ if (d == 0)
+ d = comparePriorities(state, j.first, k->second.first);
+ if (d == 0)
+ d = compareVersions(drvName.version, DrvName(k->second.first.name).version);
+ }
+
+ if (d > 0) {
+ newest.erase(drvName.name);
+ newest.insert(Newest::value_type(drvName.name, j));
+ multiple.erase(j.first.name);
+ } else if (d == 0) {
+ multiple.insert(j.first.name);
+ }
+ }
+
+ matches.clear();
+ for (auto & j : newest) {
+ if (multiple.find(j.second.first.name) != multiple.end())
+ printInfo(
+ format("warning: there are multiple derivations named ‘%1%’; using the first one")
+ % j.second.first.name);
+ matches.push_back(j.second);
+ }
+ }
+
+ /* Insert only those elements in the final list that we
+ haven't inserted before. */
+ for (auto & j : matches)
+ if (done.find(j.second) == done.end()) {
+ done.insert(j.second);
+ elems.push_back(j.first);
+ }
+ }
+
+ checkSelectorUse(selectors);
+
+ return elems;
+}
+
+
+static bool isPath(const string & s)
+{
+ return s.find('/') != string::npos;
+}
+
+
+static void queryInstSources(EvalState & state,
+ InstallSourceInfo & instSource, const Strings & args,
+ DrvInfos & elems, bool newestOnly)
+{
+ InstallSourceType type = instSource.type;
+ if (type == srcUnknown && args.size() > 0 && isPath(args.front()))
+ type = srcStorePaths;
+
+ switch (type) {
+
+ /* Get the available user environment elements from the
+ derivations specified in a Nix expression, including only
+ those with names matching any of the names in `args'. */
+ case srcUnknown:
+ case srcNixExprDrvs: {
+
+ /* Load the derivations from the (default or specified)
+ Nix expression. */
+ DrvInfos allElems;
+ loadDerivations(state, instSource.nixExprPath,
+ instSource.systemFilter, *instSource.autoArgs, "", allElems);
+
+ elems = filterBySelector(state, allElems, args, newestOnly);
+
+ break;
+ }
+
+ /* Get the available user environment elements from the Nix
+ expressions specified on the command line; these should be
+ functions that take the default Nix expression file as
+ argument, e.g., if the file is `./foo.nix', then the
+ argument `x: x.bar' is equivalent to `(x: x.bar)
+ (import ./foo.nix)' = `(import ./foo.nix).bar'. */
+ case srcNixExprs: {
+
+ Value vArg;
+ loadSourceExpr(state, instSource.nixExprPath, vArg);
+
+ for (auto & i : args) {
+ Expr * eFun = state.parseExprFromString(i, absPath("."));
+ Value vFun, vTmp;
+ state.eval(eFun, vFun);
+ mkApp(vTmp, vFun, vArg);
+ getDerivations(state, vTmp, "", *instSource.autoArgs, elems, true);
+ }
+
+ break;
+ }
+
+ /* The available user environment elements are specified as a
+ list of store paths (which may or may not be
+ derivations). */
+ case srcStorePaths: {
+
+ for (auto & i : args) {
+ Path path = state.store->followLinksToStorePath(i);
+
+ string name = baseNameOf(path);
+ string::size_type dash = name.find('-');
+ if (dash != string::npos)
+ name = string(name, dash + 1);
+
+ DrvInfo elem(state, name, "", "", 0);
+
+ if (isDerivation(path)) {
+ elem.setDrvPath(path);
+ elem.setOutPath(state.store->derivationFromPath(path).findOutput("out"));
+ if (name.size() >= drvExtension.size() &&
+ string(name, name.size() - drvExtension.size()) == drvExtension)
+ name = string(name, 0, name.size() - drvExtension.size());
+ }
+ else elem.setOutPath(path);
+
+ elems.push_back(elem);
+ }
+
+ break;
+ }
+
+ /* Get the available user environment elements from another
+ user environment. These are then filtered as in the
+ `srcNixExprDrvs' case. */
+ case srcProfile: {
+ elems = filterBySelector(state,
+ queryInstalled(state, instSource.profile),
+ args, newestOnly);
+ break;
+ }
+
+ case srcAttrPath: {
+ Value vRoot;
+ loadSourceExpr(state, instSource.nixExprPath, vRoot);
+ for (auto & i : args) {
+ Value & v(*findAlongAttrPath(state, i, *instSource.autoArgs, vRoot));
+ getDerivations(state, v, "", *instSource.autoArgs, elems, true);
+ }
+ break;
+ }
+ }
+}
+
+
+static void printMissing(EvalState & state, DrvInfos & elems)
+{
+ PathSet targets;
+ for (auto & i : elems) {
+ Path drvPath = i.queryDrvPath();
+ if (drvPath != "")
+ targets.insert(drvPath);
+ else
+ targets.insert(i.queryOutPath());
+ }
+
+ printMissing(state.store, targets);
+}
+
+
+static bool keep(DrvInfo & drv)
+{
+ return drv.queryMetaBool("keep", false);
+}
+
+
+static void installDerivations(Globals & globals,
+ const Strings & args, const Path & profile)
+{
+ debug(format("installing derivations"));
+
+ /* Get the set of user environment elements to be installed. */
+ DrvInfos newElems, newElemsTmp;
+ queryInstSources(*globals.state, globals.instSource, args, newElemsTmp, true);
+
+ /* If --prebuilt-only is given, filter out source-only packages. */
+ for (auto & i : newElemsTmp)
+ if (!globals.prebuiltOnly || isPrebuilt(*globals.state, i))
+ newElems.push_back(i);
+
+ StringSet newNames;
+ for (auto & i : newElems) {
+ /* `forceName' is a hack to get package names right in some
+ one-click installs, namely those where the name used in the
+ path is not the one we want (e.g., `java-front' versus
+ `java-front-0.9pre15899'). */
+ if (globals.forceName != "")
+ i.name = globals.forceName;
+ newNames.insert(DrvName(i.name).name);
+ }
+
+
+ while (true) {
+ string lockToken = optimisticLockProfile(profile);
+
+ DrvInfos allElems(newElems);
+
+ /* Add in the already installed derivations, unless they have
+ the same name as a to-be-installed element. */
+ if (!globals.removeAll) {
+ DrvInfos installedElems = queryInstalled(*globals.state, profile);
+
+ for (auto & i : installedElems) {
+ DrvName drvName(i.name);
+ if (!globals.preserveInstalled &&
+ newNames.find(drvName.name) != newNames.end() &&
+ !keep(i))
+ printInfo(format("replacing old ‘%1%’") % i.name);
+ else
+ allElems.push_back(i);
+ }
+
+ for (auto & i : newElems)
+ printInfo(format("installing ‘%1%’") % i.name);
+ }
+
+ printMissing(*globals.state, newElems);
+
+ if (globals.dryRun) return;
+
+ if (createUserEnv(*globals.state, allElems,
+ profile, settings.envKeepDerivations, lockToken)) break;
+ }
+}
+
+
+static void opInstall(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
+ string arg = *i++;
+ if (parseInstallSourceOptions(globals, i, opFlags, arg)) ;
+ else if (arg == "--preserve-installed" || arg == "-P")
+ globals.preserveInstalled = true;
+ else if (arg == "--remove-all" || arg == "-r")
+ globals.removeAll = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % arg);
+ }
+
+ installDerivations(globals, opArgs, globals.profile);
+}
+
+
+typedef enum { utLt, utLeq, utEq, utAlways } UpgradeType;
+
+
+static void upgradeDerivations(Globals & globals,
+ const Strings & args, UpgradeType upgradeType)
+{
+ debug(format("upgrading derivations"));
+
+ /* Upgrade works as follows: we take all currently installed
+ derivations, and for any derivation matching any selector, look
+ for a derivation in the input Nix expression that has the same
+ name and a higher version number. */
+
+ while (true) {
+ string lockToken = optimisticLockProfile(globals.profile);
+
+ DrvInfos installedElems = queryInstalled(*globals.state, globals.profile);
+
+ /* Fetch all derivations from the input file. */
+ DrvInfos availElems;
+ queryInstSources(*globals.state, globals.instSource, args, availElems, false);
+
+ /* Go through all installed derivations. */
+ DrvInfos newElems;
+ for (auto & i : installedElems) {
+ DrvName drvName(i.name);
+
+ try {
+
+ if (keep(i)) {
+ newElems.push_back(i);
+ continue;
+ }
+
+ /* Find the derivation in the input Nix expression
+ with the same name that satisfies the version
+ constraints specified by upgradeType. If there are
+ multiple matches, take the one with the highest
+ priority. If there are still multiple matches,
+ take the one with the highest version.
+ Do not upgrade if it would decrease the priority. */
+ DrvInfos::iterator bestElem = availElems.end();
+ string bestVersion;
+ for (auto j = availElems.begin(); j != availElems.end(); ++j) {
+ if (comparePriorities(*globals.state, i, *j) > 0)
+ continue;
+ DrvName newName(j->name);
+ if (newName.name == drvName.name) {
+ int d = compareVersions(drvName.version, newName.version);
+ if ((upgradeType == utLt && d < 0) ||
+ (upgradeType == utLeq && d <= 0) ||
+ (upgradeType == utEq && d == 0) ||
+ upgradeType == utAlways)
+ {
+ int d2 = -1;
+ if (bestElem != availElems.end()) {
+ d2 = comparePriorities(*globals.state, *bestElem, *j);
+ if (d2 == 0) d2 = compareVersions(bestVersion, newName.version);
+ }
+ if (d2 < 0 && (!globals.prebuiltOnly || isPrebuilt(*globals.state, *j))) {
+ bestElem = j;
+ bestVersion = newName.version;
+ }
+ }
+ }
+ }
+
+ if (bestElem != availElems.end() &&
+ i.queryOutPath() !=
+ bestElem->queryOutPath())
+ {
+ const char * action = compareVersions(drvName.version, bestVersion) <= 0
+ ? "upgrading" : "downgrading";
+ printInfo(
+ format("%1% ‘%2%’ to ‘%3%’")
+ % action % i.name % bestElem->name);
+ newElems.push_back(*bestElem);
+ } else newElems.push_back(i);
+
+ } catch (Error & e) {
+ e.addPrefix(format("while trying to find an upgrade for ‘%1%’:\n") % i.name);
+ throw;
+ }
+ }
+
+ printMissing(*globals.state, newElems);
+
+ if (globals.dryRun) return;
+
+ if (createUserEnv(*globals.state, newElems,
+ globals.profile, settings.envKeepDerivations, lockToken)) break;
+ }
+}
+
+
+static void opUpgrade(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ UpgradeType upgradeType = utLt;
+ for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
+ string arg = *i++;
+ if (parseInstallSourceOptions(globals, i, opFlags, arg)) ;
+ else if (arg == "--lt") upgradeType = utLt;
+ else if (arg == "--leq") upgradeType = utLeq;
+ else if (arg == "--eq") upgradeType = utEq;
+ else if (arg == "--always") upgradeType = utAlways;
+ else throw UsageError(format("unknown flag ‘%1%’") % arg);
+ }
+
+ upgradeDerivations(globals, opArgs, upgradeType);
+}
+
+
+static void setMetaFlag(EvalState & state, DrvInfo & drv,
+ const string & name, const string & value)
+{
+ Value * v = state.allocValue();
+ mkString(*v, value.c_str());
+ drv.setMeta(name, v);
+}
+
+
+static void opSetFlag(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ if (opFlags.size() > 0)
+ throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+ if (opArgs.size() < 2)
+ throw UsageError("not enough arguments to ‘--set-flag’");
+
+ Strings::iterator arg = opArgs.begin();
+ string flagName = *arg++;
+ string flagValue = *arg++;
+ DrvNames selectors = drvNamesFromArgs(Strings(arg, opArgs.end()));
+
+ while (true) {
+ string lockToken = optimisticLockProfile(globals.profile);
+
+ DrvInfos installedElems = queryInstalled(*globals.state, globals.profile);
+
+ /* Update all matching derivations. */
+ for (auto & i : installedElems) {
+ DrvName drvName(i.name);
+ for (auto & j : selectors)
+ if (j.matches(drvName)) {
+ printInfo(format("setting flag on ‘%1%’") % i.name);
+ j.hits++;
+ setMetaFlag(*globals.state, i, flagName, flagValue);
+ break;
+ }
+ }
+
+ checkSelectorUse(selectors);
+
+ /* Write the new user environment. */
+ if (createUserEnv(*globals.state, installedElems,
+ globals.profile, settings.envKeepDerivations, lockToken)) break;
+ }
+}
+
+
+static void opSet(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ auto store2 = globals.state->store.dynamic_pointer_cast<LocalFSStore>();
+ if (!store2) throw Error("--set is not supported for this Nix store");
+
+ for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
+ string arg = *i++;
+ if (parseInstallSourceOptions(globals, i, opFlags, arg)) ;
+ else throw UsageError(format("unknown flag ‘%1%’") % arg);
+ }
+
+ DrvInfos elems;
+ queryInstSources(*globals.state, globals.instSource, opArgs, elems, true);
+
+ if (elems.size() != 1)
+ throw Error("--set requires exactly one derivation");
+
+ DrvInfo & drv(elems.front());
+
+ if (globals.forceName != "")
+ drv.name = globals.forceName;
+
+ if (drv.queryDrvPath() != "") {
+ PathSet paths = {drv.queryDrvPath()};
+ printMissing(globals.state->store, paths);
+ if (globals.dryRun) return;
+ globals.state->store->buildPaths(paths, globals.state->repair ? bmRepair : bmNormal);
+ }
+ else {
+ printMissing(globals.state->store, {drv.queryOutPath()});
+ if (globals.dryRun) return;
+ globals.state->store->ensurePath(drv.queryOutPath());
+ }
+
+ debug(format("switching to new user environment"));
+ Path generation = createGeneration(ref<LocalFSStore>(store2), globals.profile, drv.queryOutPath());
+ switchLink(globals.profile, generation);
+}
+
+
+static void uninstallDerivations(Globals & globals, Strings & selectors,
+ Path & profile)
+{
+ while (true) {
+ string lockToken = optimisticLockProfile(profile);
+
+ DrvInfos installedElems = queryInstalled(*globals.state, profile);
+ DrvInfos newElems;
+
+ for (auto & i : installedElems) {
+ DrvName drvName(i.name);
+ bool found = false;
+ for (auto & j : selectors)
+ /* !!! the repeated calls to followLinksToStorePath()
+ are expensive, should pre-compute them. */
+ if ((isPath(j) && i.queryOutPath() == globals.state->store->followLinksToStorePath(j))
+ || DrvName(j).matches(drvName))
+ {
+ printInfo(format("uninstalling ‘%1%’") % i.name);
+ found = true;
+ break;
+ }
+ if (!found) newElems.push_back(i);
+ }
+
+ if (globals.dryRun) return;
+
+ if (createUserEnv(*globals.state, newElems,
+ profile, settings.envKeepDerivations, lockToken)) break;
+ }
+}
+
+
+static void opUninstall(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ if (opFlags.size() > 0)
+ throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+ uninstallDerivations(globals, opArgs, globals.profile);
+}
+
+
+static bool cmpChars(char a, char b)
+{
+ return toupper(a) < toupper(b);
+}
+
+
+static bool cmpElemByName(const DrvInfo & a, const DrvInfo & b)
+{
+ return lexicographical_compare(
+ a.name.begin(), a.name.end(),
+ b.name.begin(), b.name.end(), cmpChars);
+}
+
+
+typedef list<Strings> Table;
+
+
+void printTable(Table & table)
+{
+ unsigned int nrColumns = table.size() > 0 ? table.front().size() : 0;
+
+ vector<unsigned int> widths;
+ widths.resize(nrColumns);
+
+ for (auto & i : table) {
+ assert(i.size() == nrColumns);
+ Strings::iterator j;
+ unsigned int column;
+ for (j = i.begin(), column = 0; j != i.end(); ++j, ++column)
+ if (j->size() > widths[column]) widths[column] = j->size();
+ }
+
+ for (auto & i : table) {
+ Strings::iterator j;
+ unsigned int column;
+ for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) {
+ string s = *j;
+ replace(s.begin(), s.end(), '\n', ' ');
+ cout << s;
+ if (column < nrColumns - 1)
+ cout << string(widths[column] - s.size() + 2, ' ');
+ }
+ cout << std::endl;
+ }
+}
+
+
+/* This function compares the version of an element against the
+ versions in the given set of elements. `cvLess' means that only
+ lower versions are in the set, `cvEqual' means that at most an
+ equal version is in the set, and `cvGreater' means that there is at
+ least one element with a higher version in the set. `cvUnavail'
+ means that there are no elements with the same name in the set. */
+
+typedef enum { cvLess, cvEqual, cvGreater, cvUnavail } VersionDiff;
+
+static VersionDiff compareVersionAgainstSet(
+ const DrvInfo & elem, const DrvInfos & elems, string & version)
+{
+ DrvName name(elem.name);
+
+ VersionDiff diff = cvUnavail;
+ version = "?";
+
+ for (auto & i : elems) {
+ DrvName name2(i.name);
+ if (name.name == name2.name) {
+ int d = compareVersions(name.version, name2.version);
+ if (d < 0) {
+ diff = cvGreater;
+ version = name2.version;
+ }
+ else if (diff != cvGreater && d == 0) {
+ diff = cvEqual;
+ version = name2.version;
+ }
+ else if (diff != cvGreater && diff != cvEqual && d > 0) {
+ diff = cvLess;
+ if (version == "" || compareVersions(version, name2.version) < 0)
+ version = name2.version;
+ }
+ }
+ }
+
+ return diff;
+}
+
+
+static void queryJSON(Globals & globals, vector<DrvInfo> & elems)
+{
+ JSONObject topObj(cout, true);
+ for (auto & i : elems) {
+ JSONObject pkgObj = topObj.object(i.attrPath);
+
+ pkgObj.attr("name", i.name);
+ pkgObj.attr("system", i.system);
+
+ JSONObject metaObj = pkgObj.object("meta");
+ StringSet metaNames = i.queryMetaNames();
+ for (auto & j : metaNames) {
+ auto placeholder = metaObj.placeholder(j);
+ Value * v = i.queryMeta(j);
+ if (!v) {
+ printError(format("derivation ‘%1%’ has invalid meta attribute ‘%2%’") % i.name % j);
+ placeholder.write(nullptr);
+ } else {
+ PathSet context;
+ printValueAsJSON(*globals.state, true, *v, placeholder, context);
+ }
+ }
+ }
+}
+
+
+static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ Strings remaining;
+ string attrPath;
+
+ bool printStatus = false;
+ bool printName = true;
+ bool printAttrPath = false;
+ bool printSystem = false;
+ bool printDrvPath = false;
+ bool printOutPath = false;
+ bool printDescription = false;
+ bool printMeta = false;
+ bool compareVersions = false;
+ bool xmlOutput = false;
+ bool jsonOutput = false;
+
+ enum { sInstalled, sAvailable } source = sInstalled;
+
+ settings.readOnlyMode = true; /* makes evaluation a bit faster */
+
+ for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
+ string arg = *i++;
+ if (arg == "--status" || arg == "-s") printStatus = true;
+ else if (arg == "--no-name") printName = false;
+ else if (arg == "--system") printSystem = true;
+ else if (arg == "--description") printDescription = true;
+ else if (arg == "--compare-versions" || arg == "-c") compareVersions = true;
+ else if (arg == "--drv-path") printDrvPath = true;
+ else if (arg == "--out-path") printOutPath = true;
+ else if (arg == "--meta") printMeta = true;
+ else if (arg == "--installed") source = sInstalled;
+ else if (arg == "--available" || arg == "-a") source = sAvailable;
+ else if (arg == "--xml") xmlOutput = true;
+ else if (arg == "--json") jsonOutput = true;
+ else if (arg == "--attr-path" || arg == "-P") printAttrPath = true;
+ else if (arg == "--attr" || arg == "-A")
+ attrPath = needArg(i, opFlags, arg);
+ else
+ throw UsageError(format("unknown flag ‘%1%’") % arg);
+ }
+
+
+ /* Obtain derivation information from the specified source. */
+ DrvInfos availElems, installedElems;
+
+ if (source == sInstalled || compareVersions || printStatus)
+ installedElems = queryInstalled(*globals.state, globals.profile);
+
+ if (source == sAvailable || compareVersions)
+ loadDerivations(*globals.state, globals.instSource.nixExprPath,
+ globals.instSource.systemFilter, *globals.instSource.autoArgs,
+ attrPath, availElems);
+
+ DrvInfos elems_ = filterBySelector(*globals.state,
+ source == sInstalled ? installedElems : availElems,
+ opArgs, false);
+
+ DrvInfos & otherElems(source == sInstalled ? availElems : installedElems);
+
+
+ /* Sort them by name. */
+ /* !!! */
+ vector<DrvInfo> elems;
+ for (auto & i : elems_) elems.push_back(i);
+ sort(elems.begin(), elems.end(), cmpElemByName);
+
+
+ /* We only need to know the installed paths when we are querying
+ the status of the derivation. */
+ PathSet installed; /* installed paths */
+
+ if (printStatus) {
+ for (auto & i : installedElems)
+ installed.insert(i.queryOutPath());
+ }
+
+
+ /* Query which paths have substitutes. */
+ PathSet validPaths, substitutablePaths;
+ if (printStatus || globals.prebuiltOnly) {
+ PathSet paths;
+ for (auto & i : elems)
+ try {
+ paths.insert(i.queryOutPath());
+ } catch (AssertionError & e) {
+ printMsg(lvlTalkative, format("skipping derivation named ‘%1%’ which gives an assertion failure") % i.name);
+ i.setFailed();
+ }
+ validPaths = globals.state->store->queryValidPaths(paths);
+ substitutablePaths = globals.state->store->querySubstitutablePaths(paths);
+ }
+
+
+ /* Print the desired columns, or XML output. */
+ if (jsonOutput) {
+ queryJSON(globals, elems);
+ return;
+ }
+
+ bool tty = isatty(STDOUT_FILENO);
+ RunPager pager;
+
+ Table table;
+ std::ostringstream dummy;
+ XMLWriter xml(true, *(xmlOutput ? &cout : &dummy));
+ XMLOpenElement xmlRoot(xml, "items");
+
+ for (auto & i : elems) {
+ try {
+ if (i.hasFailed()) continue;
+
+ Activity act(*logger, lvlDebug, format("outputting query result ‘%1%’") % i.attrPath);
+
+ if (globals.prebuiltOnly &&
+ validPaths.find(i.queryOutPath()) == validPaths.end() &&
+ substitutablePaths.find(i.queryOutPath()) == substitutablePaths.end())
+ continue;
+
+ /* For table output. */
+ Strings columns;
+
+ /* For XML output. */
+ XMLAttrs attrs;
+
+ if (printStatus) {
+ Path outPath = i.queryOutPath();
+ bool hasSubs = substitutablePaths.find(outPath) != substitutablePaths.end();
+ bool isInstalled = installed.find(outPath) != installed.end();
+ bool isValid = validPaths.find(outPath) != validPaths.end();
+ if (xmlOutput) {
+ attrs["installed"] = isInstalled ? "1" : "0";
+ attrs["valid"] = isValid ? "1" : "0";
+ attrs["substitutable"] = hasSubs ? "1" : "0";
+ } else
+ columns.push_back(
+ (string) (isInstalled ? "I" : "-")
+ + (isValid ? "P" : "-")
+ + (hasSubs ? "S" : "-"));
+ }
+
+ if (xmlOutput)
+ attrs["attrPath"] = i.attrPath;
+ else if (printAttrPath)
+ columns.push_back(i.attrPath);
+
+ if (xmlOutput)
+ attrs["name"] = i.name;
+ else if (printName)
+ columns.push_back(i.name);
+
+ if (compareVersions) {
+ /* Compare this element against the versions of the
+ same named packages in either the set of available
+ elements, or the set of installed elements. !!!
+ This is O(N * M), should be O(N * lg M). */
+ string version;
+ VersionDiff diff = compareVersionAgainstSet(i, otherElems, version);
+
+ char ch;
+ switch (diff) {
+ case cvLess: ch = '>'; break;
+ case cvEqual: ch = '='; break;
+ case cvGreater: ch = '<'; break;
+ case cvUnavail: ch = '-'; break;
+ default: abort();
+ }
+
+ if (xmlOutput) {
+ if (diff != cvUnavail) {
+ attrs["versionDiff"] = ch;
+ attrs["maxComparedVersion"] = version;
+ }
+ } else {
+ string column = (string) "" + ch + " " + version;
+ if (diff == cvGreater && tty)
+ column = ANSI_RED + column + ANSI_NORMAL;
+ columns.push_back(column);
+ }
+ }
+
+ if (xmlOutput) {
+ if (i.system != "") attrs["system"] = i.system;
+ }
+ else if (printSystem)
+ columns.push_back(i.system);
+
+ if (printDrvPath) {
+ string drvPath = i.queryDrvPath();
+ if (xmlOutput) {
+ if (drvPath != "") attrs["drvPath"] = drvPath;
+ } else
+ columns.push_back(drvPath == "" ? "-" : drvPath);
+ }
+
+ if (printOutPath && !xmlOutput) {
+ DrvInfo::Outputs outputs = i.queryOutputs();
+ string s;
+ for (auto & j : outputs) {
+ if (!s.empty()) s += ';';
+ if (j.first != "out") { s += j.first; s += "="; }
+ s += j.second;
+ }
+ columns.push_back(s);
+ }
+
+ if (printDescription) {
+ string descr = i.queryMetaString("description");
+ if (xmlOutput) {
+ if (descr != "") attrs["description"] = descr;
+ } else
+ columns.push_back(descr);
+ }
+
+ if (xmlOutput) {
+ if (printOutPath || printMeta) {
+ XMLOpenElement item(xml, "item", attrs);
+ if (printOutPath) {
+ DrvInfo::Outputs outputs = i.queryOutputs();
+ for (auto & j : outputs) {
+ XMLAttrs attrs2;
+ attrs2["name"] = j.first;
+ attrs2["path"] = j.second;
+ xml.writeEmptyElement("output", attrs2);
+ }
+ }
+ if (printMeta) {
+ StringSet metaNames = i.queryMetaNames();
+ for (auto & j : metaNames) {
+ XMLAttrs attrs2;
+ attrs2["name"] = j;
+ Value * v = i.queryMeta(j);
+ if (!v)
+ printError(format("derivation ‘%1%’ has invalid meta attribute ‘%2%’") % i.name % j);
+ else {
+ if (v->type == tString) {
+ attrs2["type"] = "string";
+ attrs2["value"] = v->string.s;
+ xml.writeEmptyElement("meta", attrs2);
+ } else if (v->type == tInt) {
+ attrs2["type"] = "int";
+ attrs2["value"] = (format("%1%") % v->integer).str();
+ xml.writeEmptyElement("meta", attrs2);
+ } else if (v->type == tFloat) {
+ attrs2["type"] = "float";
+ attrs2["value"] = (format("%1%") % v->fpoint).str();
+ xml.writeEmptyElement("meta", attrs2);
+ } else if (v->type == tBool) {
+ attrs2["type"] = "bool";
+ attrs2["value"] = v->boolean ? "true" : "false";
+ xml.writeEmptyElement("meta", attrs2);
+ } else if (v->isList()) {
+ attrs2["type"] = "strings";
+ XMLOpenElement m(xml, "meta", attrs2);
+ for (unsigned int j = 0; j < v->listSize(); ++j) {
+ if (v->listElems()[j]->type != tString) continue;
+ XMLAttrs attrs3;
+ attrs3["value"] = v->listElems()[j]->string.s;
+ xml.writeEmptyElement("string", attrs3);
+ }
+ } else if (v->type == tAttrs) {
+ attrs2["type"] = "strings";
+ XMLOpenElement m(xml, "meta", attrs2);
+ Bindings & attrs = *v->attrs;
+ for (auto &i : attrs) {
+ Attr & a(*attrs.find(i.name));
+ if(a.value->type != tString) continue;
+ XMLAttrs attrs3;
+ attrs3["type"] = i.name;
+ attrs3["value"] = a.value->string.s;
+ xml.writeEmptyElement("string", attrs3);
+ }
+ }
+ }
+ }
+ }
+ } else
+ xml.writeEmptyElement("item", attrs);
+ } else
+ table.push_back(columns);
+
+ cout.flush();
+
+ } catch (AssertionError & e) {
+ printMsg(lvlTalkative, format("skipping derivation named ‘%1%’ which gives an assertion failure") % i.name);
+ } catch (Error & e) {
+ e.addPrefix(format("while querying the derivation named ‘%1%’:\n") % i.name);
+ throw;
+ }
+ }
+
+ if (!xmlOutput) printTable(table);
+}
+
+
+static void opSwitchProfile(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ if (opFlags.size() > 0)
+ throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+ if (opArgs.size() != 1)
+ throw UsageError(format("exactly one argument expected"));
+
+ Path profile = absPath(opArgs.front());
+ Path profileLink = getHomeDir() + "/.nix-profile";
+
+ switchLink(profileLink, profile);
+}
+
+
+static const int prevGen = -2;
+
+
+static void switchGeneration(Globals & globals, int dstGen)
+{
+ PathLocks lock;
+ lockProfile(lock, globals.profile);
+
+ int curGen;
+ Generations gens = findGenerations(globals.profile, curGen);
+
+ Generation dst;
+ for (auto & i : gens)
+ if ((dstGen == prevGen && i.number < curGen) ||
+ (dstGen >= 0 && i.number == dstGen))
+ dst = i;
+
+ if (!dst) {
+ if (dstGen == prevGen)
+ throw Error(format("no generation older than the current (%1%) exists")
+ % curGen);
+ else
+ throw Error(format("generation %1% does not exist") % dstGen);
+ }
+
+ printInfo(format("switching from generation %1% to %2%")
+ % curGen % dst.number);
+
+ if (globals.dryRun) return;
+
+ switchLink(globals.profile, dst.path);
+}
+
+
+static void opSwitchGeneration(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ if (opFlags.size() > 0)
+ throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+ if (opArgs.size() != 1)
+ throw UsageError(format("exactly one argument expected"));
+
+ int dstGen;
+ if (!string2Int(opArgs.front(), dstGen))
+ throw UsageError(format("expected a generation number"));
+
+ switchGeneration(globals, dstGen);
+}
+
+
+static void opRollback(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ if (opFlags.size() > 0)
+ throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+ if (opArgs.size() != 0)
+ throw UsageError(format("no arguments expected"));
+
+ switchGeneration(globals, prevGen);
+}
+
+
+static void opListGenerations(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ if (opFlags.size() > 0)
+ throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+ if (opArgs.size() != 0)
+ throw UsageError(format("no arguments expected"));
+
+ PathLocks lock;
+ lockProfile(lock, globals.profile);
+
+ int curGen;
+ Generations gens = findGenerations(globals.profile, curGen);
+
+ RunPager pager;
+
+ for (auto & i : gens) {
+ tm t;
+ if (!localtime_r(&i.creationTime, &t)) throw Error("cannot convert time");
+ cout << format("%|4| %|4|-%|02|-%|02| %|02|:%|02|:%|02| %||\n")
+ % i.number
+ % (t.tm_year + 1900) % (t.tm_mon + 1) % t.tm_mday
+ % t.tm_hour % t.tm_min % t.tm_sec
+ % (i.number == curGen ? "(current)" : "");
+ }
+}
+
+
+static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ if (opFlags.size() > 0)
+ throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+
+ if (opArgs.size() == 1 && opArgs.front() == "old") {
+ deleteOldGenerations(globals.profile, globals.dryRun);
+ } else if (opArgs.size() == 1 && opArgs.front().find('d') != string::npos) {
+ deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun);
+ } else {
+ std::set<unsigned int> gens;
+ for (auto & i : opArgs) {
+ unsigned int n;
+ if (!string2Int(i, n))
+ throw UsageError(format("invalid generation number ‘%1%’") % i);
+ gens.insert(n);
+ }
+ deleteGenerations(globals.profile, gens, globals.dryRun);
+ }
+}
+
+
+static void opVersion(Globals & globals, Strings opFlags, Strings opArgs)
+{
+ printVersion("nix-env");
+}
+
+
+int main(int argc, char * * argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+ initGC();
+
+ Strings opFlags, opArgs, searchPath;
+ std::map<string, string> autoArgs_;
+ Operation op = 0;
+ bool repair = false;
+ string file;
+
+ Globals globals;
+
+ globals.instSource.type = srcUnknown;
+ globals.instSource.nixExprPath = getDefNixExprPath();
+ globals.instSource.systemFilter = "*";
+
+ globals.dryRun = false;
+ globals.preserveInstalled = false;
+ globals.removeAll = false;
+ globals.prebuiltOnly = false;
+
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ Operation oldOp = op;
+
+ if (*arg == "--help")
+ showManPage("nix-env");
+ else if (*arg == "--version")
+ op = opVersion;
+ else if (*arg == "--install" || *arg == "-i")
+ op = opInstall;
+ else if (parseAutoArgs(arg, end, autoArgs_))
+ ;
+ else if (parseSearchPathArg(arg, end, searchPath))
+ ;
+ else if (*arg == "--force-name") // undocumented flag for nix-install-package
+ globals.forceName = getArg(*arg, arg, end);
+ else if (*arg == "--uninstall" || *arg == "-e")
+ op = opUninstall;
+ else if (*arg == "--upgrade" || *arg == "-u")
+ op = opUpgrade;
+ else if (*arg == "--set-flag")
+ op = opSetFlag;
+ else if (*arg == "--set")
+ op = opSet;
+ else if (*arg == "--query" || *arg == "-q")
+ op = opQuery;
+ else if (*arg == "--profile" || *arg == "-p")
+ globals.profile = absPath(getArg(*arg, arg, end));
+ else if (*arg == "--file" || *arg == "-f")
+ file = getArg(*arg, arg, end);
+ else if (*arg == "--switch-profile" || *arg == "-S")
+ op = opSwitchProfile;
+ else if (*arg == "--switch-generation" || *arg == "-G")
+ op = opSwitchGeneration;
+ else if (*arg == "--rollback")
+ op = opRollback;
+ else if (*arg == "--list-generations")
+ op = opListGenerations;
+ else if (*arg == "--delete-generations")
+ op = opDeleteGenerations;
+ else if (*arg == "--dry-run") {
+ printInfo("(dry run; not doing anything)");
+ globals.dryRun = true;
+ }
+ else if (*arg == "--system-filter")
+ globals.instSource.systemFilter = getArg(*arg, arg, end);
+ else if (*arg == "--prebuilt-only" || *arg == "-b")
+ globals.prebuiltOnly = true;
+ else if (*arg == "--repair")
+ repair = true;
+ else if (*arg != "" && arg->at(0) == '-') {
+ opFlags.push_back(*arg);
+ /* FIXME: hacky */
+ if (*arg == "--from-profile" ||
+ (op == opQuery && (*arg == "--attr" || *arg == "-A")))
+ opFlags.push_back(getArg(*arg, arg, end));
+ }
+ else
+ opArgs.push_back(*arg);
+
+ if (oldOp && oldOp != op)
+ throw UsageError("only one operation may be specified");
+
+ return true;
+ });
+
+ if (!op) throw UsageError("no operation specified");
+
+ auto store = openStore();
+
+ globals.state = std::shared_ptr<EvalState>(new EvalState(searchPath, store));
+ globals.state->repair = repair;
+
+ if (file != "")
+ globals.instSource.nixExprPath = lookupFileArg(*globals.state, file);
+
+ globals.instSource.autoArgs = evalAutoArgs(*globals.state, autoArgs_);
+
+ if (globals.profile == "")
+ globals.profile = getEnv("NIX_PROFILE", "");
+
+ if (globals.profile == "") {
+ Path profileLink = getHomeDir() + "/.nix-profile";
+ globals.profile = pathExists(profileLink)
+ ? absPath(readLink(profileLink), dirOf(profileLink))
+ : canonPath(settings.nixStateDir + "/profiles/default");
+ }
+
+ op(globals, opFlags, opArgs);
+
+ globals.state->printStats();
+ });
+}
diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc
new file mode 100644
index 000000000..e9997fae5
--- /dev/null
+++ b/src/nix-env/user-env.cc
@@ -0,0 +1,155 @@
+#include "user-env.hh"
+#include "util.hh"
+#include "derivations.hh"
+#include "store-api.hh"
+#include "globals.hh"
+#include "shared.hh"
+#include "eval.hh"
+#include "eval-inline.hh"
+#include "profiles.hh"
+
+
+namespace nix {
+
+
+DrvInfos queryInstalled(EvalState & state, const Path & userEnv)
+{
+ DrvInfos elems;
+ Path manifestFile = userEnv + "/manifest.nix";
+ if (pathExists(manifestFile)) {
+ Value v;
+ state.evalFile(manifestFile, v);
+ Bindings & bindings(*state.allocBindings(0));
+ getDerivations(state, v, "", bindings, elems, false);
+ }
+ return elems;
+}
+
+
+bool createUserEnv(EvalState & state, DrvInfos & elems,
+ const Path & profile, bool keepDerivations,
+ const string & lockToken)
+{
+ /* Build the components in the user environment, if they don't
+ exist already. */
+ PathSet drvsToBuild;
+ for (auto & i : elems)
+ if (i.queryDrvPath() != "")
+ drvsToBuild.insert(i.queryDrvPath());
+
+ debug(format("building user environment dependencies"));
+ state.store->buildPaths(drvsToBuild, state.repair ? bmRepair : bmNormal);
+
+ /* Construct the whole top level derivation. */
+ PathSet references;
+ Value manifest;
+ state.mkList(manifest, elems.size());
+ unsigned int n = 0;
+ for (auto & i : elems) {
+ /* Create a pseudo-derivation containing the name, system,
+ output paths, and optionally the derivation path, as well
+ as the meta attributes. */
+ Path drvPath = keepDerivations ? i.queryDrvPath() : "";
+
+ Value & v(*state.allocValue());
+ manifest.listElems()[n++] = &v;
+ state.mkAttrs(v, 16);
+
+ mkString(*state.allocAttr(v, state.sType), "derivation");
+ mkString(*state.allocAttr(v, state.sName), i.name);
+ if (!i.system.empty())
+ mkString(*state.allocAttr(v, state.sSystem), i.system);
+ mkString(*state.allocAttr(v, state.sOutPath), i.queryOutPath());
+ if (drvPath != "")
+ mkString(*state.allocAttr(v, state.sDrvPath), i.queryDrvPath());
+
+ // Copy each output meant for installation.
+ DrvInfo::Outputs outputs = i.queryOutputs(true);
+ Value & vOutputs = *state.allocAttr(v, state.sOutputs);
+ state.mkList(vOutputs, outputs.size());
+ unsigned int m = 0;
+ for (auto & j : outputs) {
+ mkString(*(vOutputs.listElems()[m++] = state.allocValue()), j.first);
+ Value & vOutputs = *state.allocAttr(v, state.symbols.create(j.first));
+ state.mkAttrs(vOutputs, 2);
+ mkString(*state.allocAttr(vOutputs, state.sOutPath), j.second);
+
+ /* This is only necessary when installing store paths, e.g.,
+ `nix-env -i /nix/store/abcd...-foo'. */
+ state.store->addTempRoot(j.second);
+ state.store->ensurePath(j.second);
+
+ references.insert(j.second);
+ }
+
+ // Copy the meta attributes.
+ Value & vMeta = *state.allocAttr(v, state.sMeta);
+ state.mkAttrs(vMeta, 16);
+ StringSet metaNames = i.queryMetaNames();
+ for (auto & j : metaNames) {
+ Value * v = i.queryMeta(j);
+ if (!v) continue;
+ vMeta.attrs->push_back(Attr(state.symbols.create(j), v));
+ }
+ vMeta.attrs->sort();
+ v.attrs->sort();
+
+ if (drvPath != "") references.insert(drvPath);
+ }
+
+ /* Also write a copy of the list of user environment elements to
+ the store; we need it for future modifications of the
+ environment. */
+ Path manifestFile = state.store->addTextToStore("env-manifest.nix",
+ (format("%1%") % manifest).str(), references);
+
+ /* Get the environment builder expression. */
+ Value envBuilder;
+ state.evalFile(state.findFile("nix/buildenv.nix"), envBuilder);
+
+ /* Construct a Nix expression that calls the user environment
+ builder with the manifest as argument. */
+ Value args, topLevel;
+ state.mkAttrs(args, 3);
+ mkString(*state.allocAttr(args, state.symbols.create("manifest")),
+ manifestFile, {manifestFile});
+ args.attrs->push_back(Attr(state.symbols.create("derivations"), &manifest));
+ args.attrs->sort();
+ mkApp(topLevel, envBuilder, args);
+
+ /* Evaluate it. */
+ debug("evaluating user environment builder");
+ state.forceValue(topLevel);
+ PathSet context;
+ Attr & aDrvPath(*topLevel.attrs->find(state.sDrvPath));
+ Path topLevelDrv = state.coerceToPath(aDrvPath.pos ? *(aDrvPath.pos) : noPos, *(aDrvPath.value), context);
+ Attr & aOutPath(*topLevel.attrs->find(state.sOutPath));
+ Path topLevelOut = state.coerceToPath(aOutPath.pos ? *(aOutPath.pos) : noPos, *(aOutPath.value), context);
+
+ /* Realise the resulting store expression. */
+ debug("building user environment");
+ state.store->buildPaths({topLevelDrv}, state.repair ? bmRepair : bmNormal);
+
+ /* Switch the current user environment to the output path. */
+ auto store2 = state.store.dynamic_pointer_cast<LocalFSStore>();
+
+ if (store2) {
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ Path lockTokenCur = optimisticLockProfile(profile);
+ if (lockToken != lockTokenCur) {
+ printError(format("profile ‘%1%’ changed while we were busy; restarting") % profile);
+ return false;
+ }
+
+ debug(format("switching to new user environment"));
+ Path generation = createGeneration(ref<LocalFSStore>(store2), profile, topLevelOut);
+ switchLink(profile, generation);
+ }
+
+ return true;
+}
+
+
+}
diff --git a/src/nix-env/user-env.hh b/src/nix-env/user-env.hh
new file mode 100644
index 000000000..f188efe9b
--- /dev/null
+++ b/src/nix-env/user-env.hh
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "get-drvs.hh"
+
+namespace nix {
+
+DrvInfos queryInstalled(EvalState & state, const Path & userEnv);
+
+bool createUserEnv(EvalState & state, DrvInfos & elems,
+ const Path & profile, bool keepDerivations,
+ const string & lockToken);
+
+}
diff --git a/src/nix-instantiate/local.mk b/src/nix-instantiate/local.mk
new file mode 100644
index 000000000..7d1bc5ec9
--- /dev/null
+++ b/src/nix-instantiate/local.mk
@@ -0,0 +1,7 @@
+programs += nix-instantiate
+
+nix-instantiate_DIR := $(d)
+
+nix-instantiate_SOURCES := $(d)/nix-instantiate.cc
+
+nix-instantiate_LIBS = libexpr libmain libstore libutil libformat
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
new file mode 100644
index 000000000..c1b0b0ea0
--- /dev/null
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -0,0 +1,197 @@
+#include "globals.hh"
+#include "shared.hh"
+#include "eval.hh"
+#include "eval-inline.hh"
+#include "get-drvs.hh"
+#include "attr-path.hh"
+#include "value-to-xml.hh"
+#include "value-to-json.hh"
+#include "util.hh"
+#include "store-api.hh"
+#include "common-opts.hh"
+
+#include <map>
+#include <iostream>
+
+
+using namespace nix;
+
+
+static Expr * parseStdin(EvalState & state)
+{
+ Activity act(*logger, lvlTalkative, format("parsing standard input"));
+ return state.parseExprFromString(drainFD(0), absPath("."));
+}
+
+
+static Path gcRoot;
+static int rootNr = 0;
+static bool indirectRoot = false;
+
+
+enum OutputKind { okPlain, okXML, okJSON };
+
+
+void processExpr(EvalState & state, const Strings & attrPaths,
+ bool parseOnly, bool strict, Bindings & autoArgs,
+ bool evalOnly, OutputKind output, bool location, Expr * e)
+{
+ if (parseOnly) {
+ std::cout << format("%1%\n") % *e;
+ return;
+ }
+
+ Value vRoot;
+ state.eval(e, vRoot);
+
+ for (auto & i : attrPaths) {
+ Value & v(*findAlongAttrPath(state, i, autoArgs, vRoot));
+ state.forceValue(v);
+
+ PathSet context;
+ if (evalOnly) {
+ Value vRes;
+ if (autoArgs.empty())
+ vRes = v;
+ else
+ state.autoCallFunction(autoArgs, v, vRes);
+ if (output == okXML)
+ printValueAsXML(state, strict, location, vRes, std::cout, context);
+ else if (output == okJSON)
+ printValueAsJSON(state, strict, vRes, std::cout, context);
+ else {
+ if (strict) state.forceValueDeep(vRes);
+ std::cout << vRes << std::endl;
+ }
+ } else {
+ DrvInfos drvs;
+ getDerivations(state, v, "", autoArgs, drvs, false);
+ for (auto & i : drvs) {
+ Path drvPath = i.queryDrvPath();
+
+ /* What output do we want? */
+ string outputName = i.queryOutputName();
+ if (outputName == "")
+ throw Error(format("derivation ‘%1%’ lacks an ‘outputName’ attribute ") % drvPath);
+
+ if (gcRoot == "")
+ printGCWarning();
+ else {
+ Path rootName = gcRoot;
+ if (++rootNr > 1) rootName += "-" + std::to_string(rootNr);
+ auto store2 = state.store.dynamic_pointer_cast<LocalFSStore>();
+ if (store2)
+ drvPath = store2->addPermRoot(drvPath, rootName, indirectRoot);
+ }
+ std::cout << format("%1%%2%\n") % drvPath % (outputName != "out" ? "!" + outputName : "");
+ }
+ }
+ }
+}
+
+
+int main(int argc, char * * argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+ initGC();
+
+ Strings files, searchPath;
+ bool readStdin = false;
+ bool fromArgs = false;
+ bool findFile = false;
+ bool evalOnly = false;
+ bool parseOnly = false;
+ OutputKind outputKind = okPlain;
+ bool xmlOutputSourceLocation = true;
+ bool strict = false;
+ Strings attrPaths;
+ bool wantsReadWrite = false;
+ std::map<string, string> autoArgs_;
+ bool repair = false;
+
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ if (*arg == "--help")
+ showManPage("nix-instantiate");
+ else if (*arg == "--version")
+ printVersion("nix-instantiate");
+ else if (*arg == "-")
+ readStdin = true;
+ else if (*arg == "--expr" || *arg == "-E")
+ fromArgs = true;
+ else if (*arg == "--eval" || *arg == "--eval-only")
+ evalOnly = true;
+ else if (*arg == "--read-write-mode")
+ wantsReadWrite = true;
+ else if (*arg == "--parse" || *arg == "--parse-only")
+ parseOnly = evalOnly = true;
+ else if (*arg == "--find-file")
+ findFile = true;
+ else if (*arg == "--attr" || *arg == "-A")
+ attrPaths.push_back(getArg(*arg, arg, end));
+ else if (parseAutoArgs(arg, end, autoArgs_))
+ ;
+ else if (parseSearchPathArg(arg, end, searchPath))
+ ;
+ else if (*arg == "--add-root")
+ gcRoot = getArg(*arg, arg, end);
+ else if (*arg == "--indirect")
+ indirectRoot = true;
+ else if (*arg == "--xml")
+ outputKind = okXML;
+ else if (*arg == "--json")
+ outputKind = okJSON;
+ else if (*arg == "--no-location")
+ xmlOutputSourceLocation = false;
+ else if (*arg == "--strict")
+ strict = true;
+ else if (*arg == "--repair")
+ repair = true;
+ else if (*arg == "--dry-run")
+ settings.readOnlyMode = true;
+ else if (*arg != "" && arg->at(0) == '-')
+ return false;
+ else
+ files.push_back(*arg);
+ return true;
+ });
+
+ if (evalOnly && !wantsReadWrite)
+ settings.readOnlyMode = true;
+
+ auto store = openStore();
+
+ EvalState state(searchPath, store);
+ state.repair = repair;
+
+ Bindings & autoArgs(*evalAutoArgs(state, autoArgs_));
+
+ if (attrPaths.empty()) attrPaths.push_back("");
+
+ if (findFile) {
+ for (auto & i : files) {
+ Path p = state.findFile(i);
+ if (p == "") throw Error(format("unable to find ‘%1%’") % i);
+ std::cout << p << std::endl;
+ }
+ return;
+ }
+
+ if (readStdin) {
+ Expr * e = parseStdin(state);
+ processExpr(state, attrPaths, parseOnly, strict, autoArgs,
+ evalOnly, outputKind, xmlOutputSourceLocation, e);
+ } else if (files.empty() && !fromArgs)
+ files.push_back("./default.nix");
+
+ for (auto & i : files) {
+ Expr * e = fromArgs
+ ? state.parseExprFromString(i, absPath("."))
+ : state.parseExprFromFile(resolveExprPath(lookupFileArg(state, i)));
+ processExpr(state, attrPaths, parseOnly, strict, autoArgs,
+ evalOnly, outputKind, xmlOutputSourceLocation, e);
+ }
+
+ state.printStats();
+ });
+}
diff --git a/src/nix-prefetch-url/local.mk b/src/nix-prefetch-url/local.mk
new file mode 100644
index 000000000..3e7735406
--- /dev/null
+++ b/src/nix-prefetch-url/local.mk
@@ -0,0 +1,7 @@
+programs += nix-prefetch-url
+
+nix-prefetch-url_DIR := $(d)
+
+nix-prefetch-url_SOURCES := $(d)/nix-prefetch-url.cc
+
+nix-prefetch-url_LIBS = libmain libexpr libstore libutil libformat
diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc
new file mode 100644
index 000000000..b3b2fcac7
--- /dev/null
+++ b/src/nix-prefetch-url/nix-prefetch-url.cc
@@ -0,0 +1,210 @@
+#include "hash.hh"
+#include "shared.hh"
+#include "download.hh"
+#include "store-api.hh"
+#include "eval.hh"
+#include "eval-inline.hh"
+#include "common-opts.hh"
+#include "attr-path.hh"
+
+#include <iostream>
+
+using namespace nix;
+
+
+/* If ‘uri’ starts with ‘mirror://’, then resolve it using the list of
+ mirrors defined in Nixpkgs. */
+string resolveMirrorUri(EvalState & state, string uri)
+{
+ if (string(uri, 0, 9) != "mirror://") return uri;
+
+ string s(uri, 9);
+ auto p = s.find('/');
+ if (p == string::npos) throw Error("invalid mirror URI");
+ string mirrorName(s, 0, p);
+
+ Value vMirrors;
+ state.eval(state.parseExprFromString("import <nixpkgs/pkgs/build-support/fetchurl/mirrors.nix>", "."), vMirrors);
+ state.forceAttrs(vMirrors);
+
+ auto mirrorList = vMirrors.attrs->find(state.symbols.create(mirrorName));
+ if (mirrorList == vMirrors.attrs->end())
+ throw Error(format("unknown mirror name ‘%1%’") % mirrorName);
+ state.forceList(*mirrorList->value);
+
+ if (mirrorList->value->listSize() < 1)
+ throw Error(format("mirror URI ‘%1%’ did not expand to anything") % uri);
+
+ string mirror = state.forceString(*mirrorList->value->listElems()[0]);
+ return mirror + (hasSuffix(mirror, "/") ? "" : "/") + string(s, p + 1);
+}
+
+
+int main(int argc, char * * argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+ initGC();
+
+ HashType ht = htSHA256;
+ std::vector<string> args;
+ Strings searchPath;
+ bool printPath = getEnv("PRINT_PATH") != "";
+ bool fromExpr = false;
+ string attrPath;
+ std::map<string, string> autoArgs_;
+ bool unpack = false;
+ string name;
+
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ if (*arg == "--help")
+ showManPage("nix-prefetch-url");
+ else if (*arg == "--version")
+ printVersion("nix-prefetch-url");
+ else if (*arg == "--type") {
+ string s = getArg(*arg, arg, end);
+ ht = parseHashType(s);
+ if (ht == htUnknown)
+ throw UsageError(format("unknown hash type ‘%1%’") % s);
+ }
+ else if (*arg == "--print-path")
+ printPath = true;
+ else if (*arg == "--attr" || *arg == "-A") {
+ fromExpr = true;
+ attrPath = getArg(*arg, arg, end);
+ }
+ else if (*arg == "--unpack")
+ unpack = true;
+ else if (*arg == "--name")
+ name = getArg(*arg, arg, end);
+ else if (parseAutoArgs(arg, end, autoArgs_))
+ ;
+ else if (parseSearchPathArg(arg, end, searchPath))
+ ;
+ else if (*arg != "" && arg->at(0) == '-')
+ return false;
+ else
+ args.push_back(*arg);
+ return true;
+ });
+
+ if (args.size() > 2)
+ throw UsageError("too many arguments");
+
+ auto store = openStore();
+ EvalState state(searchPath, store);
+
+ Bindings & autoArgs(*evalAutoArgs(state, autoArgs_));
+
+ /* If -A is given, get the URI from the specified Nix
+ expression. */
+ string uri;
+ if (!fromExpr) {
+ if (args.empty())
+ throw UsageError("you must specify a URI");
+ uri = args[0];
+ } else {
+ Path path = resolveExprPath(lookupFileArg(state, args.empty() ? "." : args[0]));
+ Value vRoot;
+ state.evalFile(path, vRoot);
+ Value & v(*findAlongAttrPath(state, attrPath, autoArgs, vRoot));
+ state.forceAttrs(v);
+
+ /* Extract the URI. */
+ auto attr = v.attrs->find(state.symbols.create("urls"));
+ if (attr == v.attrs->end())
+ throw Error("attribute set does not contain a ‘urls’ attribute");
+ state.forceList(*attr->value);
+ if (attr->value->listSize() < 1)
+ throw Error("‘urls’ list is empty");
+ uri = state.forceString(*attr->value->listElems()[0]);
+
+ /* Extract the hash mode. */
+ attr = v.attrs->find(state.symbols.create("outputHashMode"));
+ if (attr == v.attrs->end())
+ printInfo("warning: this does not look like a fetchurl call");
+ else
+ unpack = state.forceString(*attr->value) == "recursive";
+
+ /* Extract the name. */
+ if (name.empty()) {
+ attr = v.attrs->find(state.symbols.create("name"));
+ if (attr != v.attrs->end())
+ name = state.forceString(*attr->value);
+ }
+ }
+
+ /* Figure out a name in the Nix store. */
+ if (name.empty())
+ name = baseNameOf(uri);
+ if (name.empty())
+ throw Error(format("cannot figure out file name for ‘%1%’") % uri);
+
+ /* If an expected hash is given, the file may already exist in
+ the store. */
+ Hash hash, expectedHash(ht);
+ Path storePath;
+ if (args.size() == 2) {
+ expectedHash = parseHash16or32(ht, args[1]);
+ storePath = store->makeFixedOutputPath(unpack, expectedHash, name);
+ if (store->isValidPath(storePath))
+ hash = expectedHash;
+ else
+ storePath.clear();
+ }
+
+ if (storePath.empty()) {
+
+ auto actualUri = resolveMirrorUri(state, uri);
+
+ /* Download the file. */
+ auto result = getDownloader()->download(DownloadRequest(actualUri));
+
+ AutoDelete tmpDir(createTempDir(), true);
+ Path tmpFile = (Path) tmpDir + "/tmp";
+ writeFile(tmpFile, *result.data);
+
+ /* Optionally unpack the file. */
+ if (unpack) {
+ printInfo("unpacking...");
+ Path unpacked = (Path) tmpDir + "/unpacked";
+ createDirs(unpacked);
+ if (hasSuffix(baseNameOf(uri), ".zip"))
+ runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked});
+ else
+ // FIXME: this requires GNU tar for decompression.
+ runProgram("tar", true, {"xf", tmpFile, "-C", unpacked});
+
+ /* If the archive unpacks to a single file/directory, then use
+ that as the top-level. */
+ auto entries = readDirectory(unpacked);
+ if (entries.size() == 1)
+ tmpFile = unpacked + "/" + entries[0].name;
+ else
+ tmpFile = unpacked;
+ }
+
+ /* FIXME: inefficient; addToStore() will also hash
+ this. */
+ hash = unpack ? hashPath(ht, tmpFile).first : hashString(ht, *result.data);
+
+ if (expectedHash != Hash(ht) && expectedHash != hash)
+ throw Error(format("hash mismatch for ‘%1%’") % uri);
+
+ /* Copy the file to the Nix store. FIXME: if RemoteStore
+ implemented addToStoreFromDump() and downloadFile()
+ supported a sink, we could stream the download directly
+ into the Nix store. */
+ storePath = store->addToStore(name, tmpFile, unpack, ht);
+
+ assert(storePath == store->makeFixedOutputPath(unpack, hash, name));
+ }
+
+ if (!printPath)
+ printInfo(format("path is ‘%1%’") % storePath);
+
+ std::cout << printHash16or32(hash) << std::endl;
+ if (printPath)
+ std::cout << storePath << std::endl;
+ });
+}
diff --git a/src/nix-store/dotgraph.cc b/src/nix-store/dotgraph.cc
new file mode 100644
index 000000000..356a82510
--- /dev/null
+++ b/src/nix-store/dotgraph.cc
@@ -0,0 +1,156 @@
+#include "dotgraph.hh"
+#include "util.hh"
+#include "store-api.hh"
+
+#include <iostream>
+
+
+using std::cout;
+
+namespace nix {
+
+
+static string dotQuote(const string & s)
+{
+ return "\"" + s + "\"";
+}
+
+
+static string nextColour()
+{
+ static int n = 0;
+ static string colours[] =
+ { "black", "red", "green", "blue"
+ , "magenta", "burlywood" };
+ return colours[n++ % (sizeof(colours) / sizeof(string))];
+}
+
+
+static string makeEdge(const string & src, const string & dst)
+{
+ format f = format("%1% -> %2% [color = %3%];\n")
+ % dotQuote(src) % dotQuote(dst) % dotQuote(nextColour());
+ return f.str();
+}
+
+
+static string makeNode(const string & id, const string & label,
+ const string & colour)
+{
+ format f = format("%1% [label = %2%, shape = box, "
+ "style = filled, fillcolor = %3%];\n")
+ % dotQuote(id) % dotQuote(label) % dotQuote(colour);
+ return f.str();
+}
+
+
+static string symbolicName(const string & path)
+{
+ string p = baseNameOf(path);
+ int dash = p.find('-');
+ return string(p, dash + 1);
+}
+
+
+#if 0
+string pathLabel(const Path & nePath, const string & elemPath)
+{
+ return (string) nePath + "-" + elemPath;
+}
+
+
+void printClosure(const Path & nePath, const StoreExpr & fs)
+{
+ PathSet workList(fs.closure.roots);
+ PathSet doneSet;
+
+ for (PathSet::iterator i = workList.begin(); i != workList.end(); ++i) {
+ cout << makeEdge(pathLabel(nePath, *i), nePath);
+ }
+
+ while (!workList.empty()) {
+ Path path = *(workList.begin());
+ workList.erase(path);
+
+ if (doneSet.find(path) == doneSet.end()) {
+ doneSet.insert(path);
+
+ ClosureElems::const_iterator elem = fs.closure.elems.find(path);
+ if (elem == fs.closure.elems.end())
+ throw Error(format("bad closure, missing path ‘%1%’") % path);
+
+ for (StringSet::const_iterator i = elem->second.refs.begin();
+ i != elem->second.refs.end(); ++i)
+ {
+ workList.insert(*i);
+ cout << makeEdge(pathLabel(nePath, *i), pathLabel(nePath, path));
+ }
+
+ cout << makeNode(pathLabel(nePath, path),
+ symbolicName(path), "#ff0000");
+ }
+ }
+}
+#endif
+
+
+void printDotGraph(ref<Store> store, const PathSet & roots)
+{
+ PathSet workList(roots);
+ PathSet doneSet;
+
+ cout << "digraph G {\n";
+
+ while (!workList.empty()) {
+ Path path = *(workList.begin());
+ workList.erase(path);
+
+ if (doneSet.find(path) != doneSet.end()) continue;
+ doneSet.insert(path);
+
+ cout << makeNode(path, symbolicName(path), "#ff0000");
+
+ for (auto & p : store->queryPathInfo(path)->references) {
+ if (p != path) {
+ workList.insert(p);
+ cout << makeEdge(p, path);
+ }
+ }
+
+#if 0
+ StoreExpr ne = storeExprFromPath(path);
+
+ string label, colour;
+
+ if (ne.type == StoreExpr::neDerivation) {
+ for (PathSet::iterator i = ne.derivation.inputs.begin();
+ i != ne.derivation.inputs.end(); ++i)
+ {
+ workList.insert(*i);
+ cout << makeEdge(*i, path);
+ }
+
+ label = "derivation";
+ colour = "#00ff00";
+ for (StringPairs::iterator i = ne.derivation.env.begin();
+ i != ne.derivation.env.end(); ++i)
+ if (i->first == "name") label = i->second;
+ }
+
+ else if (ne.type == StoreExpr::neClosure) {
+ label = "<closure>";
+ colour = "#00ffff";
+ printClosure(path, ne);
+ }
+
+ else abort();
+
+ cout << makeNode(path, label, colour);
+#endif
+ }
+
+ cout << "}\n";
+}
+
+
+}
diff --git a/src/nix-store/dotgraph.hh b/src/nix-store/dotgraph.hh
new file mode 100644
index 000000000..e2b5fc72f
--- /dev/null
+++ b/src/nix-store/dotgraph.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "types.hh"
+
+namespace nix {
+
+class Store;
+
+void printDotGraph(ref<Store> store, const PathSet & roots);
+
+}
diff --git a/src/nix-store/local.mk b/src/nix-store/local.mk
new file mode 100644
index 000000000..ade0b233a
--- /dev/null
+++ b/src/nix-store/local.mk
@@ -0,0 +1,9 @@
+programs += nix-store
+
+nix-store_DIR := $(d)
+
+nix-store_SOURCES := $(wildcard $(d)/*.cc)
+
+nix-store_LIBS = libmain libstore libutil libformat
+
+nix-store_LDFLAGS = -lbz2 -pthread $(SODIUM_LIBS)
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
new file mode 100644
index 000000000..9131b74df
--- /dev/null
+++ b/src/nix-store/nix-store.cc
@@ -0,0 +1,1070 @@
+#include "archive.hh"
+#include "derivations.hh"
+#include "dotgraph.hh"
+#include "globals.hh"
+#include "local-store.hh"
+#include "monitor-fd.hh"
+#include "serve-protocol.hh"
+#include "shared.hh"
+#include "util.hh"
+#include "worker-protocol.hh"
+#include "xmlgraph.hh"
+
+#include <iostream>
+#include <algorithm>
+#include <cstdio>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#if HAVE_SODIUM
+#include <sodium.h>
+#endif
+
+
+using namespace nix;
+using std::cin;
+using std::cout;
+
+
+typedef void (* Operation) (Strings opFlags, Strings opArgs);
+
+
+static Path gcRoot;
+static int rootNr = 0;
+static bool indirectRoot = false;
+static bool noOutput = false;
+static std::shared_ptr<Store> store;
+
+
+ref<LocalStore> ensureLocalStore()
+{
+ auto store2 = std::dynamic_pointer_cast<LocalStore>(store);
+ if (!store2) throw Error("you don't have sufficient rights to use this command");
+ return ref<LocalStore>(store2);
+}
+
+
+static Path useDeriver(Path path)
+{
+ if (isDerivation(path)) return path;
+ Path drvPath = store->queryPathInfo(path)->deriver;
+ if (drvPath == "")
+ throw Error(format("deriver of path ‘%1%’ is not known") % path);
+ return drvPath;
+}
+
+
+/* Realise the given path. For a derivation that means build it; for
+ other paths it means ensure their validity. */
+static PathSet realisePath(Path path, bool build = true)
+{
+ DrvPathWithOutputs p = parseDrvPathWithOutputs(path);
+
+ auto store2 = std::dynamic_pointer_cast<LocalFSStore>(store);
+
+ if (isDerivation(p.first)) {
+ if (build) store->buildPaths({path});
+ Derivation drv = store->derivationFromPath(p.first);
+ rootNr++;
+
+ if (p.second.empty())
+ for (auto & i : drv.outputs) p.second.insert(i.first);
+
+ PathSet outputs;
+ for (auto & j : p.second) {
+ DerivationOutputs::iterator i = drv.outputs.find(j);
+ if (i == drv.outputs.end())
+ throw Error(format("derivation ‘%1%’ does not have an output named ‘%2%’") % p.first % j);
+ Path outPath = i->second.path;
+ if (store2) {
+ if (gcRoot == "")
+ printGCWarning();
+ else {
+ Path rootName = gcRoot;
+ if (rootNr > 1) rootName += "-" + std::to_string(rootNr);
+ if (i->first != "out") rootName += "-" + i->first;
+ outPath = store2->addPermRoot(outPath, rootName, indirectRoot);
+ }
+ }
+ outputs.insert(outPath);
+ }
+ return outputs;
+ }
+
+ else {
+ if (build) store->ensurePath(path);
+ else if (!store->isValidPath(path)) throw Error(format("path ‘%1%’ does not exist and cannot be created") % path);
+ if (store2) {
+ if (gcRoot == "")
+ printGCWarning();
+ else {
+ Path rootName = gcRoot;
+ rootNr++;
+ if (rootNr > 1) rootName += "-" + std::to_string(rootNr);
+ path = store2->addPermRoot(path, rootName, indirectRoot);
+ }
+ }
+ return {path};
+ }
+}
+
+
+/* Realise the given paths. */
+static void opRealise(Strings opFlags, Strings opArgs)
+{
+ bool dryRun = false;
+ BuildMode buildMode = bmNormal;
+ bool ignoreUnknown = false;
+
+ for (auto & i : opFlags)
+ if (i == "--dry-run") dryRun = true;
+ else if (i == "--repair") buildMode = bmRepair;
+ else if (i == "--check") buildMode = bmCheck;
+ else if (i == "--hash") buildMode = bmHash;
+ else if (i == "--ignore-unknown") ignoreUnknown = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ Paths paths;
+ for (auto & i : opArgs) {
+ DrvPathWithOutputs p = parseDrvPathWithOutputs(i);
+ paths.push_back(makeDrvPathWithOutputs(store->followLinksToStorePath(p.first), p.second));
+ }
+
+ unsigned long long downloadSize, narSize;
+ PathSet willBuild, willSubstitute, unknown;
+ store->queryMissing(PathSet(paths.begin(), paths.end()),
+ willBuild, willSubstitute, unknown, downloadSize, narSize);
+
+ if (ignoreUnknown) {
+ Paths paths2;
+ for (auto & i : paths)
+ if (unknown.find(i) == unknown.end()) paths2.push_back(i);
+ paths = paths2;
+ unknown = PathSet();
+ }
+
+ if (settings.printMissing)
+ printMissing(ref<Store>(store), willBuild, willSubstitute, unknown, downloadSize, narSize);
+
+ if (dryRun) return;
+
+ /* Build all paths at the same time to exploit parallelism. */
+ store->buildPaths(PathSet(paths.begin(), paths.end()), buildMode);
+
+ if (!ignoreUnknown)
+ for (auto & i : paths) {
+ PathSet paths = realisePath(i, false);
+ if (!noOutput)
+ for (auto & j : paths)
+ cout << format("%1%\n") % j;
+ }
+}
+
+
+/* Add files to the Nix store and print the resulting paths. */
+static void opAdd(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty()) throw UsageError("unknown flag");
+
+ for (auto & i : opArgs)
+ cout << format("%1%\n") % store->addToStore(baseNameOf(i), i);
+}
+
+
+/* Preload the output of a fixed-output derivation into the Nix
+ store. */
+static void opAddFixed(Strings opFlags, Strings opArgs)
+{
+ bool recursive = false;
+
+ for (auto & i : opFlags)
+ if (i == "--recursive") recursive = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ if (opArgs.empty())
+ throw UsageError("first argument must be hash algorithm");
+
+ HashType hashAlgo = parseHashType(opArgs.front());
+ opArgs.pop_front();
+
+ for (auto & i : opArgs)
+ cout << format("%1%\n") % store->addToStore(baseNameOf(i), i, recursive, hashAlgo);
+}
+
+
+/* Hack to support caching in `nix-prefetch-url'. */
+static void opPrintFixedPath(Strings opFlags, Strings opArgs)
+{
+ bool recursive = false;
+
+ for (auto i : opFlags)
+ if (i == "--recursive") recursive = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ if (opArgs.size() != 3)
+ throw UsageError(format("‘--print-fixed-path’ requires three arguments"));
+
+ Strings::iterator i = opArgs.begin();
+ HashType hashAlgo = parseHashType(*i++);
+ string hash = *i++;
+ string name = *i++;
+
+ cout << format("%1%\n") %
+ store->makeFixedOutputPath(recursive, parseHash16or32(hashAlgo, hash), name);
+}
+
+
+static PathSet maybeUseOutputs(const Path & storePath, bool useOutput, bool forceRealise)
+{
+ if (forceRealise) realisePath(storePath);
+ if (useOutput && isDerivation(storePath)) {
+ Derivation drv = store->derivationFromPath(storePath);
+ PathSet outputs;
+ for (auto & i : drv.outputs)
+ outputs.insert(i.second.path);
+ return outputs;
+ }
+ else return {storePath};
+}
+
+
+/* Some code to print a tree representation of a derivation dependency
+ graph. Topological sorting is used to keep the tree relatively
+ flat. */
+
+const string treeConn = "+---";
+const string treeLine = "| ";
+const string treeNull = " ";
+
+
+static void printTree(const Path & path,
+ const string & firstPad, const string & tailPad, PathSet & done)
+{
+ if (done.find(path) != done.end()) {
+ cout << format("%1%%2% [...]\n") % firstPad % path;
+ return;
+ }
+ done.insert(path);
+
+ cout << format("%1%%2%\n") % firstPad % path;
+
+ auto references = store->queryPathInfo(path)->references;
+
+ /* Topologically sort under the relation A < B iff A \in
+ closure(B). That is, if derivation A is an (possibly indirect)
+ input of B, then A is printed first. This has the effect of
+ flattening the tree, preventing deeply nested structures. */
+ Paths sorted = store->topoSortPaths(references);
+ reverse(sorted.begin(), sorted.end());
+
+ for (auto i = sorted.begin(); i != sorted.end(); ++i) {
+ auto j = i; ++j;
+ printTree(*i, tailPad + treeConn,
+ j == sorted.end() ? tailPad + treeNull : tailPad + treeLine,
+ done);
+ }
+}
+
+
+/* Perform various sorts of queries. */
+static void opQuery(Strings opFlags, Strings opArgs)
+{
+ enum QueryType
+ { qDefault, qOutputs, qRequisites, qReferences, qReferrers
+ , qReferrersClosure, qDeriver, qBinding, qHash, qSize
+ , qTree, qGraph, qXml, qResolve, qRoots };
+ QueryType query = qDefault;
+ bool useOutput = false;
+ bool includeOutputs = false;
+ bool forceRealise = false;
+ string bindingName;
+
+ for (auto & i : opFlags) {
+ QueryType prev = query;
+ if (i == "--outputs") query = qOutputs;
+ else if (i == "--requisites" || i == "-R") query = qRequisites;
+ else if (i == "--references") query = qReferences;
+ else if (i == "--referrers" || i == "--referers") query = qReferrers;
+ else if (i == "--referrers-closure" || i == "--referers-closure") query = qReferrersClosure;
+ else if (i == "--deriver" || i == "-d") query = qDeriver;
+ else if (i == "--binding" || i == "-b") {
+ if (opArgs.size() == 0)
+ throw UsageError("expected binding name");
+ bindingName = opArgs.front();
+ opArgs.pop_front();
+ query = qBinding;
+ }
+ else if (i == "--hash") query = qHash;
+ else if (i == "--size") query = qSize;
+ else if (i == "--tree") query = qTree;
+ else if (i == "--graph") query = qGraph;
+ else if (i == "--xml") query = qXml;
+ else if (i == "--resolve") query = qResolve;
+ else if (i == "--roots") query = qRoots;
+ else if (i == "--use-output" || i == "-u") useOutput = true;
+ else if (i == "--force-realise" || i == "--force-realize" || i == "-f") forceRealise = true;
+ else if (i == "--include-outputs") includeOutputs = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+ if (prev != qDefault && prev != query)
+ throw UsageError(format("query type ‘%1%’ conflicts with earlier flag") % i);
+ }
+
+ if (query == qDefault) query = qOutputs;
+
+ RunPager pager;
+
+ switch (query) {
+
+ case qOutputs: {
+ for (auto & i : opArgs) {
+ i = store->followLinksToStorePath(i);
+ if (forceRealise) realisePath(i);
+ Derivation drv = store->derivationFromPath(i);
+ for (auto & j : drv.outputs)
+ cout << format("%1%\n") % j.second.path;
+ }
+ break;
+ }
+
+ case qRequisites:
+ case qReferences:
+ case qReferrers:
+ case qReferrersClosure: {
+ PathSet paths;
+ for (auto & i : opArgs) {
+ PathSet ps = maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise);
+ for (auto & j : ps) {
+ if (query == qRequisites) store->computeFSClosure(j, paths, false, includeOutputs);
+ else if (query == qReferences) {
+ for (auto & p : store->queryPathInfo(j)->references)
+ paths.insert(p);
+ }
+ else if (query == qReferrers) store->queryReferrers(j, paths);
+ else if (query == qReferrersClosure) store->computeFSClosure(j, paths, true);
+ }
+ }
+ Paths sorted = store->topoSortPaths(paths);
+ for (Paths::reverse_iterator i = sorted.rbegin();
+ i != sorted.rend(); ++i)
+ cout << format("%s\n") % *i;
+ break;
+ }
+
+ case qDeriver:
+ for (auto & i : opArgs) {
+ Path deriver = store->queryPathInfo(store->followLinksToStorePath(i))->deriver;
+ cout << format("%1%\n") %
+ (deriver == "" ? "unknown-deriver" : deriver);
+ }
+ break;
+
+ case qBinding:
+ for (auto & i : opArgs) {
+ Path path = useDeriver(store->followLinksToStorePath(i));
+ Derivation drv = store->derivationFromPath(path);
+ StringPairs::iterator j = drv.env.find(bindingName);
+ if (j == drv.env.end())
+ throw Error(format("derivation ‘%1%’ has no environment binding named ‘%2%’")
+ % path % bindingName);
+ cout << format("%1%\n") % j->second;
+ }
+ break;
+
+ case qHash:
+ case qSize:
+ for (auto & i : opArgs) {
+ PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise);
+ for (auto & j : paths) {
+ auto info = store->queryPathInfo(j);
+ if (query == qHash) {
+ assert(info->narHash.type == htSHA256);
+ cout << format("sha256:%1%\n") % printHash32(info->narHash);
+ } else if (query == qSize)
+ cout << format("%1%\n") % info->narSize;
+ }
+ }
+ break;
+
+ case qTree: {
+ PathSet done;
+ for (auto & i : opArgs)
+ printTree(store->followLinksToStorePath(i), "", "", done);
+ break;
+ }
+
+ case qGraph: {
+ PathSet roots;
+ for (auto & i : opArgs) {
+ PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise);
+ roots.insert(paths.begin(), paths.end());
+ }
+ printDotGraph(ref<Store>(store), roots);
+ break;
+ }
+
+ case qXml: {
+ PathSet roots;
+ for (auto & i : opArgs) {
+ PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise);
+ roots.insert(paths.begin(), paths.end());
+ }
+ printXmlGraph(ref<Store>(store), roots);
+ break;
+ }
+
+ case qResolve: {
+ for (auto & i : opArgs)
+ cout << format("%1%\n") % store->followLinksToStorePath(i);
+ break;
+ }
+
+ case qRoots: {
+ PathSet referrers;
+ for (auto & i : opArgs) {
+ store->computeFSClosure(
+ maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise),
+ referrers, true, settings.gcKeepOutputs, settings.gcKeepDerivations);
+ }
+ Roots roots = store->findRoots();
+ for (auto & i : roots)
+ if (referrers.find(i.second) != referrers.end())
+ cout << format("%1%\n") % i.first;
+ break;
+ }
+
+ default:
+ abort();
+ }
+}
+
+
+static string shellEscape(const string & s)
+{
+ string r;
+ for (auto & i : s)
+ if (i == '\'') r += "'\\''"; else r += i;
+ return r;
+}
+
+
+static void opPrintEnv(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty()) throw UsageError("unknown flag");
+ if (opArgs.size() != 1) throw UsageError("‘--print-env’ requires one derivation store path");
+
+ Path drvPath = opArgs.front();
+ Derivation drv = store->derivationFromPath(drvPath);
+
+ /* Print each environment variable in the derivation in a format
+ that can be sourced by the shell. */
+ for (auto & i : drv.env)
+ cout << format("export %1%; %1%='%2%'\n") % i.first % shellEscape(i.second);
+
+ /* Also output the arguments. This doesn't preserve whitespace in
+ arguments. */
+ cout << "export _args; _args='";
+ bool first = true;
+ for (auto & i : drv.args) {
+ if (!first) cout << ' ';
+ first = false;
+ cout << shellEscape(i);
+ }
+ cout << "'\n";
+}
+
+
+static void opReadLog(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty()) throw UsageError("unknown flag");
+
+ RunPager pager;
+
+ for (auto & i : opArgs) {
+ auto path = store->followLinksToStorePath(i);
+ auto log = store->getBuildLog(path);
+ if (!log)
+ throw Error("build log of derivation ‘%s’ is not available", path);
+ std::cout << *log;
+ }
+}
+
+
+static void opDumpDB(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty()) throw UsageError("unknown flag");
+ if (!opArgs.empty())
+ throw UsageError("no arguments expected");
+ PathSet validPaths = store->queryAllValidPaths();
+ for (auto & i : validPaths)
+ cout << store->makeValidityRegistration({i}, true, true);
+}
+
+
+static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
+{
+ ValidPathInfos infos;
+
+ while (1) {
+ ValidPathInfo info = decodeValidPathInfo(cin, hashGiven);
+ if (info.path == "") break;
+ if (!store->isValidPath(info.path) || reregister) {
+ /* !!! races */
+ if (canonicalise)
+ canonicalisePathMetaData(info.path, -1);
+ if (!hashGiven) {
+ HashResult hash = hashPath(htSHA256, info.path);
+ info.narHash = hash.first;
+ info.narSize = hash.second;
+ }
+ infos.push_back(info);
+ }
+ }
+
+ ensureLocalStore()->registerValidPaths(infos);
+}
+
+
+static void opLoadDB(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty()) throw UsageError("unknown flag");
+ if (!opArgs.empty())
+ throw UsageError("no arguments expected");
+ registerValidity(true, true, false);
+}
+
+
+static void opRegisterValidity(Strings opFlags, Strings opArgs)
+{
+ bool reregister = false; // !!! maybe this should be the default
+ bool hashGiven = false;
+
+ for (auto & i : opFlags)
+ if (i == "--reregister") reregister = true;
+ else if (i == "--hash-given") hashGiven = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ if (!opArgs.empty()) throw UsageError("no arguments expected");
+
+ registerValidity(reregister, hashGiven, true);
+}
+
+
+static void opCheckValidity(Strings opFlags, Strings opArgs)
+{
+ bool printInvalid = false;
+
+ for (auto & i : opFlags)
+ if (i == "--print-invalid") printInvalid = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ for (auto & i : opArgs) {
+ Path path = store->followLinksToStorePath(i);
+ if (!store->isValidPath(path)) {
+ if (printInvalid)
+ cout << format("%1%\n") % path;
+ else
+ throw Error(format("path ‘%1%’ is not valid") % path);
+ }
+ }
+}
+
+
+static void opGC(Strings opFlags, Strings opArgs)
+{
+ bool printRoots = false;
+ GCOptions options;
+ options.action = GCOptions::gcDeleteDead;
+
+ GCResults results;
+
+ /* Do what? */
+ for (auto i = opFlags.begin(); i != opFlags.end(); ++i)
+ if (*i == "--print-roots") printRoots = true;
+ else if (*i == "--print-live") options.action = GCOptions::gcReturnLive;
+ else if (*i == "--print-dead") options.action = GCOptions::gcReturnDead;
+ else if (*i == "--delete") options.action = GCOptions::gcDeleteDead;
+ else if (*i == "--max-freed") {
+ long long maxFreed = getIntArg<long long>(*i, i, opFlags.end(), true);
+ options.maxFreed = maxFreed >= 0 ? maxFreed : 0;
+ }
+ else throw UsageError(format("bad sub-operation ‘%1%’ in GC") % *i);
+
+ if (!opArgs.empty()) throw UsageError("no arguments expected");
+
+ if (printRoots) {
+ Roots roots = store->findRoots();
+ for (auto & i : roots)
+ cout << i.first << " -> " << i.second << std::endl;
+ }
+
+ else {
+ PrintFreed freed(options.action == GCOptions::gcDeleteDead, results);
+ store->collectGarbage(options, results);
+
+ if (options.action != GCOptions::gcDeleteDead)
+ for (auto & i : results.paths)
+ cout << i << std::endl;
+ }
+}
+
+
+/* Remove paths from the Nix store if possible (i.e., if they do not
+ have any remaining referrers and are not reachable from any GC
+ roots). */
+static void opDelete(Strings opFlags, Strings opArgs)
+{
+ GCOptions options;
+ options.action = GCOptions::gcDeleteSpecific;
+
+ for (auto & i : opFlags)
+ if (i == "--ignore-liveness") options.ignoreLiveness = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ for (auto & i : opArgs)
+ options.pathsToDelete.insert(store->followLinksToStorePath(i));
+
+ GCResults results;
+ PrintFreed freed(true, results);
+ store->collectGarbage(options, results);
+}
+
+
+/* Dump a path as a Nix archive. The archive is written to standard
+ output. */
+static void opDump(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty()) throw UsageError("unknown flag");
+ if (opArgs.size() != 1) throw UsageError("only one argument allowed");
+
+ FdSink sink(STDOUT_FILENO);
+ string path = *opArgs.begin();
+ dumpPath(path, sink);
+}
+
+
+/* Restore a value from a Nix archive. The archive is read from
+ standard input. */
+static void opRestore(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty()) throw UsageError("unknown flag");
+ if (opArgs.size() != 1) throw UsageError("only one argument allowed");
+
+ FdSource source(STDIN_FILENO);
+ restorePath(*opArgs.begin(), source);
+}
+
+
+static void opExport(Strings opFlags, Strings opArgs)
+{
+ for (auto & i : opFlags)
+ throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ for (auto & i : opArgs)
+ i = store->followLinksToStorePath(i);
+
+ FdSink sink(STDOUT_FILENO);
+ store->exportPaths(opArgs, sink);
+}
+
+
+static void opImport(Strings opFlags, Strings opArgs)
+{
+ for (auto & i : opFlags)
+ throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ if (!opArgs.empty()) throw UsageError("no arguments expected");
+
+ FdSource source(STDIN_FILENO);
+ Paths paths = store->importPaths(source, nullptr, true);
+
+ for (auto & i : paths)
+ cout << format("%1%\n") % i << std::flush;
+}
+
+
+/* Initialise the Nix databases. */
+static void opInit(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty()) throw UsageError("unknown flag");
+ if (!opArgs.empty())
+ throw UsageError("no arguments expected");
+ /* Doesn't do anything right now; database tables are initialised
+ automatically. */
+}
+
+
+/* Verify the consistency of the Nix environment. */
+static void opVerify(Strings opFlags, Strings opArgs)
+{
+ if (!opArgs.empty())
+ throw UsageError("no arguments expected");
+
+ bool checkContents = false;
+ bool repair = false;
+
+ for (auto & i : opFlags)
+ if (i == "--check-contents") checkContents = true;
+ else if (i == "--repair") repair = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ if (store->verifyStore(checkContents, repair)) {
+ printError("warning: not all errors were fixed");
+ throw Exit(1);
+ }
+}
+
+
+/* Verify whether the contents of the given store path have not changed. */
+static void opVerifyPath(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty())
+ throw UsageError("no flags expected");
+
+ int status = 0;
+
+ for (auto & i : opArgs) {
+ Path path = store->followLinksToStorePath(i);
+ printMsg(lvlTalkative, format("checking path ‘%1%’...") % path);
+ auto info = store->queryPathInfo(path);
+ HashSink sink(info->narHash.type);
+ store->narFromPath(path, sink);
+ auto current = sink.finish();
+ if (current.first != info->narHash) {
+ printError(
+ format("path ‘%1%’ was modified! expected hash ‘%2%’, got ‘%3%’")
+ % path % printHash(info->narHash) % printHash(current.first));
+ status = 1;
+ }
+ }
+
+ throw Exit(status);
+}
+
+
+/* Repair the contents of the given path by redownloading it using a
+ substituter (if available). */
+static void opRepairPath(Strings opFlags, Strings opArgs)
+{
+ if (!opFlags.empty())
+ throw UsageError("no flags expected");
+
+ for (auto & i : opArgs) {
+ Path path = store->followLinksToStorePath(i);
+ ensureLocalStore()->repairPath(path);
+ }
+}
+
+/* Optimise the disk space usage of the Nix store by hard-linking
+ files with the same contents. */
+static void opOptimise(Strings opFlags, Strings opArgs)
+{
+ if (!opArgs.empty() || !opFlags.empty())
+ throw UsageError("no arguments expected");
+
+ store->optimiseStore();
+}
+
+/* Serve the nix store in a way usable by a restricted ssh user. */
+static void opServe(Strings opFlags, Strings opArgs)
+{
+ bool writeAllowed = false;
+ for (auto & i : opFlags)
+ if (i == "--write") writeAllowed = true;
+ else throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ if (!opArgs.empty()) throw UsageError("no arguments expected");
+
+ FdSource in(STDIN_FILENO);
+ FdSink out(STDOUT_FILENO);
+
+ /* Exchange the greeting. */
+ unsigned int magic = readInt(in);
+ if (magic != SERVE_MAGIC_1) throw Error("protocol mismatch");
+ out << SERVE_MAGIC_2 << SERVE_PROTOCOL_VERSION;
+ out.flush();
+ unsigned int clientVersion = readInt(in);
+
+ auto getBuildSettings = [&]() {
+ // FIXME: changing options here doesn't work if we're
+ // building through the daemon.
+ verbosity = lvlError;
+ settings.keepLog = false;
+ settings.useSubstitutes = false;
+ settings.maxSilentTime = readInt(in);
+ settings.buildTimeout = readInt(in);
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
+ settings.maxLogSize = readNum<unsigned long>(in);
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 3) {
+ settings.buildRepeat = readInt(in);
+ settings.enforceDeterminism = readInt(in);
+ settings.runDiffHook = readInt(in);
+ }
+ settings.printRepeatedBuilds = false;
+ };
+
+ while (true) {
+ ServeCommand cmd;
+ try {
+ cmd = (ServeCommand) readInt(in);
+ } catch (EndOfFile & e) {
+ break;
+ }
+
+ switch (cmd) {
+
+ case cmdQueryValidPaths: {
+ bool lock = readInt(in);
+ bool substitute = readInt(in);
+ PathSet paths = readStorePaths<PathSet>(*store, in);
+ if (lock && writeAllowed)
+ for (auto & path : paths)
+ store->addTempRoot(path);
+
+ /* If requested, substitute missing paths. This
+ implements nix-copy-closure's --use-substitutes
+ flag. */
+ if (substitute && writeAllowed) {
+ /* Filter out .drv files (we don't want to build anything). */
+ PathSet paths2;
+ for (auto & path : paths)
+ if (!isDerivation(path)) paths2.insert(path);
+ unsigned long long downloadSize, narSize;
+ PathSet willBuild, willSubstitute, unknown;
+ store->queryMissing(PathSet(paths2.begin(), paths2.end()),
+ willBuild, willSubstitute, unknown, downloadSize, narSize);
+ /* FIXME: should use ensurePath(), but it only
+ does one path at a time. */
+ if (!willSubstitute.empty())
+ try {
+ store->buildPaths(willSubstitute);
+ } catch (Error & e) {
+ printError(format("warning: %1%") % e.msg());
+ }
+ }
+
+ out << store->queryValidPaths(paths);
+ break;
+ }
+
+ case cmdQueryPathInfos: {
+ PathSet paths = readStorePaths<PathSet>(*store, in);
+ // !!! Maybe we want a queryPathInfos?
+ for (auto & i : paths) {
+ try {
+ auto info = store->queryPathInfo(i);
+ out << info->path << info->deriver << info->references;
+ // !!! Maybe we want compression?
+ out << info->narSize // downloadSize
+ << info->narSize;
+ } catch (InvalidPath &) {
+ }
+ }
+ out << "";
+ break;
+ }
+
+ case cmdDumpStorePath:
+ dumpPath(readStorePath(*store, in), out);
+ break;
+
+ case cmdImportPaths: {
+ if (!writeAllowed) throw Error("importing paths is not allowed");
+ store->importPaths(in, 0, true); // FIXME: should we skip sig checking?
+ out << 1; // indicate success
+ break;
+ }
+
+ case cmdExportPaths: {
+ readInt(in); // obsolete
+ store->exportPaths(readStorePaths<Paths>(*store, in), out);
+ break;
+ }
+
+ case cmdBuildPaths: {
+
+ if (!writeAllowed) throw Error("building paths is not allowed");
+ PathSet paths = readStorePaths<PathSet>(*store, in);
+
+ getBuildSettings();
+
+ try {
+ MonitorFdHup monitor(in.fd);
+ store->buildPaths(paths);
+ out << 0;
+ } catch (Error & e) {
+ assert(e.status);
+ out << e.status << e.msg();
+ }
+ break;
+ }
+
+ case cmdBuildDerivation: { /* Used by hydra-queue-runner. */
+
+ if (!writeAllowed) throw Error("building paths is not allowed");
+
+ Path drvPath = readStorePath(*store, in); // informational only
+ BasicDerivation drv;
+ readDerivation(in, *store, drv);
+
+ getBuildSettings();
+
+ MonitorFdHup monitor(in.fd);
+ auto status = store->buildDerivation(drvPath, drv);
+
+ out << status.status << status.errorMsg;
+
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
+ out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
+
+ break;
+ }
+
+ case cmdQueryClosure: {
+ bool includeOutputs = readInt(in);
+ PathSet closure;
+ store->computeFSClosure(readStorePaths<PathSet>(*store, in),
+ closure, false, includeOutputs);
+ out << closure;
+ break;
+ }
+
+ default:
+ throw Error(format("unknown serve command %1%") % cmd);
+ }
+
+ out.flush();
+ }
+}
+
+
+static void opGenerateBinaryCacheKey(Strings opFlags, Strings opArgs)
+{
+ for (auto & i : opFlags)
+ throw UsageError(format("unknown flag ‘%1%’") % i);
+
+ if (opArgs.size() != 3) throw UsageError("three arguments expected");
+ auto i = opArgs.begin();
+ string keyName = *i++;
+ string secretKeyFile = *i++;
+ string publicKeyFile = *i++;
+
+#if HAVE_SODIUM
+ if (sodium_init() == -1)
+ throw Error("could not initialise libsodium");
+
+ unsigned char pk[crypto_sign_PUBLICKEYBYTES];
+ unsigned char sk[crypto_sign_SECRETKEYBYTES];
+ if (crypto_sign_keypair(pk, sk) != 0)
+ throw Error("key generation failed");
+
+ writeFile(publicKeyFile, keyName + ":" + base64Encode(string((char *) pk, crypto_sign_PUBLICKEYBYTES)));
+ umask(0077);
+ writeFile(secretKeyFile, keyName + ":" + base64Encode(string((char *) sk, crypto_sign_SECRETKEYBYTES)));
+#else
+ throw Error("Nix was not compiled with libsodium, required for signed binary cache support");
+#endif
+}
+
+
+static void opVersion(Strings opFlags, Strings opArgs)
+{
+ printVersion("nix-store");
+}
+
+
+/* Scan the arguments; find the operation, set global flags, put all
+ other flags in a list, and put all other arguments in another
+ list. */
+int main(int argc, char * * argv)
+{
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+
+ Strings opFlags, opArgs;
+ Operation op = 0;
+
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ Operation oldOp = op;
+
+ if (*arg == "--help")
+ showManPage("nix-store");
+ else if (*arg == "--version")
+ op = opVersion;
+ else if (*arg == "--realise" || *arg == "--realize" || *arg == "-r")
+ op = opRealise;
+ else if (*arg == "--add" || *arg == "-A")
+ op = opAdd;
+ else if (*arg == "--add-fixed")
+ op = opAddFixed;
+ else if (*arg == "--print-fixed-path")
+ op = opPrintFixedPath;
+ else if (*arg == "--delete")
+ op = opDelete;
+ else if (*arg == "--query" || *arg == "-q")
+ op = opQuery;
+ else if (*arg == "--print-env")
+ op = opPrintEnv;
+ else if (*arg == "--read-log" || *arg == "-l")
+ op = opReadLog;
+ else if (*arg == "--dump-db")
+ op = opDumpDB;
+ else if (*arg == "--load-db")
+ op = opLoadDB;
+ else if (*arg == "--register-validity")
+ op = opRegisterValidity;
+ else if (*arg == "--check-validity")
+ op = opCheckValidity;
+ else if (*arg == "--gc")
+ op = opGC;
+ else if (*arg == "--dump")
+ op = opDump;
+ else if (*arg == "--restore")
+ op = opRestore;
+ else if (*arg == "--export")
+ op = opExport;
+ else if (*arg == "--import")
+ op = opImport;
+ else if (*arg == "--init")
+ op = opInit;
+ else if (*arg == "--verify")
+ op = opVerify;
+ else if (*arg == "--verify-path")
+ op = opVerifyPath;
+ else if (*arg == "--repair-path")
+ op = opRepairPath;
+ else if (*arg == "--optimise" || *arg == "--optimize")
+ op = opOptimise;
+ else if (*arg == "--serve")
+ op = opServe;
+ else if (*arg == "--generate-binary-cache-key")
+ op = opGenerateBinaryCacheKey;
+ else if (*arg == "--add-root")
+ gcRoot = absPath(getArg(*arg, arg, end));
+ else if (*arg == "--indirect")
+ indirectRoot = true;
+ else if (*arg == "--no-output")
+ noOutput = true;
+ else if (*arg != "" && arg->at(0) == '-') {
+ opFlags.push_back(*arg);
+ if (*arg == "--max-freed" || *arg == "--max-links" || *arg == "--max-atime") /* !!! hack */
+ opFlags.push_back(getArg(*arg, arg, end));
+ }
+ else
+ opArgs.push_back(*arg);
+
+ if (oldOp && oldOp != op)
+ throw UsageError("only one operation may be specified");
+
+ return true;
+ });
+
+ if (!op) throw UsageError("no operation specified");
+
+ if (op != opDump && op != opRestore) /* !!! hack */
+ store = openStore();
+
+ op(opFlags, opArgs);
+ });
+}
diff --git a/src/nix-store/xmlgraph.cc b/src/nix-store/xmlgraph.cc
new file mode 100644
index 000000000..0f7be7f7a
--- /dev/null
+++ b/src/nix-store/xmlgraph.cc
@@ -0,0 +1,66 @@
+#include "xmlgraph.hh"
+#include "util.hh"
+#include "store-api.hh"
+
+#include <iostream>
+
+
+using std::cout;
+
+namespace nix {
+
+
+static inline const string & xmlQuote(const string & s)
+{
+ // Luckily, store paths shouldn't contain any character that needs to be
+ // quoted.
+ return s;
+}
+
+
+static string makeEdge(const string & src, const string & dst)
+{
+ format f = format(" <edge src=\"%1%\" dst=\"%2%\"/>\n")
+ % xmlQuote(src) % xmlQuote(dst);
+ return f.str();
+}
+
+
+static string makeNode(const string & id)
+{
+ format f = format(" <node name=\"%1%\"/>\n") % xmlQuote(id);
+ return f.str();
+}
+
+
+void printXmlGraph(ref<Store> store, const PathSet & roots)
+{
+ PathSet workList(roots);
+ PathSet doneSet;
+
+ cout << "<?xml version='1.0' encoding='utf-8'?>\n"
+ << "<nix>\n";
+
+ while (!workList.empty()) {
+ Path path = *(workList.begin());
+ workList.erase(path);
+
+ if (doneSet.find(path) != doneSet.end()) continue;
+ doneSet.insert(path);
+
+ cout << makeNode(path);
+
+ for (auto & p : store->queryPathInfo(path)->references) {
+ if (p != path) {
+ workList.insert(p);
+ cout << makeEdge(p, path);
+ }
+ }
+
+ }
+
+ cout << "</nix>\n";
+}
+
+
+}
diff --git a/src/nix-store/xmlgraph.hh b/src/nix-store/xmlgraph.hh
new file mode 100644
index 000000000..a6e7d4e28
--- /dev/null
+++ b/src/nix-store/xmlgraph.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "types.hh"
+
+namespace nix {
+
+class Store;
+
+void printXmlGraph(ref<Store> store, const PathSet & roots);
+
+}
diff --git a/src/nix/build.cc b/src/nix/build.cc
new file mode 100644
index 000000000..00bda1fd1
--- /dev/null
+++ b/src/nix/build.cc
@@ -0,0 +1,32 @@
+#include "command.hh"
+#include "common-args.hh"
+#include "shared.hh"
+#include "store-api.hh"
+
+using namespace nix;
+
+struct CmdBuild : MixDryRun, InstallablesCommand
+{
+ CmdBuild()
+ {
+ }
+
+ std::string name() override
+ {
+ return "build";
+ }
+
+ std::string description() override
+ {
+ return "build a derivation or fetch a store path";
+ }
+
+ void run(ref<Store> store) override
+ {
+ auto paths = buildInstallables(store, dryRun);
+
+ printInfo("build result: %s", showPaths(paths));
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdBuild>());
diff --git a/src/nix/cat.cc b/src/nix/cat.cc
new file mode 100644
index 000000000..2405a8cb4
--- /dev/null
+++ b/src/nix/cat.cc
@@ -0,0 +1,74 @@
+#include "command.hh"
+#include "store-api.hh"
+#include "fs-accessor.hh"
+#include "nar-accessor.hh"
+
+using namespace nix;
+
+struct MixCat : virtual Args
+{
+ std::string path;
+
+ void cat(ref<FSAccessor> accessor)
+ {
+ auto st = accessor->stat(path);
+ if (st.type == FSAccessor::Type::tMissing)
+ throw Error(format("path ‘%1%’ does not exist") % path);
+ if (st.type != FSAccessor::Type::tRegular)
+ throw Error(format("path ‘%1%’ is not a regular file") % path);
+
+ std::cout << accessor->readFile(path);
+ }
+};
+
+struct CmdCatStore : StoreCommand, MixCat
+{
+ CmdCatStore()
+ {
+ expectArg("path", &path);
+ }
+
+ std::string name() override
+ {
+ return "cat-store";
+ }
+
+ std::string description() override
+ {
+ return "print the contents of a store file on stdout";
+ }
+
+ void run(ref<Store> store) override
+ {
+ cat(store->getFSAccessor());
+ }
+};
+
+struct CmdCatNar : StoreCommand, MixCat
+{
+ Path narPath;
+
+ CmdCatNar()
+ {
+ expectArg("nar", &narPath);
+ expectArg("path", &path);
+ }
+
+ std::string name() override
+ {
+ return "cat-nar";
+ }
+
+ std::string description() override
+ {
+ return "print the contents of a file inside a NAR file";
+ }
+
+ void run(ref<Store> store) override
+ {
+ cat(makeNarAccessor(make_ref<std::string>(readFile(narPath))));
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdCatStore>());
+static RegisterCommand r2(make_ref<CmdCatNar>());
diff --git a/src/nix/command.cc b/src/nix/command.cc
new file mode 100644
index 000000000..a45f2888b
--- /dev/null
+++ b/src/nix/command.cc
@@ -0,0 +1,132 @@
+#include "command.hh"
+#include "store-api.hh"
+#include "derivations.hh"
+
+namespace nix {
+
+Commands * RegisterCommand::commands = 0;
+
+void Command::printHelp(const string & programName, std::ostream & out)
+{
+ Args::printHelp(programName, out);
+
+ auto exs = examples();
+ if (!exs.empty()) {
+ out << "\n";
+ out << "Examples:\n";
+ for (auto & ex : exs)
+ out << "\n"
+ << " " << ex.description << "\n" // FIXME: wrap
+ << " $ " << ex.command << "\n";
+ }
+}
+
+MultiCommand::MultiCommand(const Commands & _commands)
+ : commands(_commands)
+{
+ expectedArgs.push_back(ExpectedArg{"command", 1, [=](Strings ss) {
+ assert(!command);
+ auto i = commands.find(ss.front());
+ if (i == commands.end())
+ throw UsageError(format("‘%1%’ is not a recognised command") % ss.front());
+ command = i->second;
+ }});
+}
+
+void MultiCommand::printHelp(const string & programName, std::ostream & out)
+{
+ if (command) {
+ command->printHelp(programName + " " + command->name(), out);
+ return;
+ }
+
+ out << "Usage: " << programName << " <COMMAND> <FLAGS>... <ARGS>...\n";
+
+ out << "\n";
+ out << "Common flags:\n";
+ printFlags(out);
+
+ out << "\n";
+ out << "Available commands:\n";
+
+ Table2 table;
+ for (auto & command : commands)
+ table.push_back(std::make_pair(command.second->name(), command.second->description()));
+ printTable(out, table);
+
+ out << "\n";
+ out << "For full documentation, run ‘man " << programName << "’ or ‘man " << programName << "-<COMMAND>’.\n";
+}
+
+bool MultiCommand::processFlag(Strings::iterator & pos, Strings::iterator end)
+{
+ if (Args::processFlag(pos, end)) return true;
+ if (command && command->processFlag(pos, end)) return true;
+ return false;
+}
+
+bool MultiCommand::processArgs(const Strings & args, bool finish)
+{
+ if (command)
+ return command->processArgs(args, finish);
+ else
+ return Args::processArgs(args, finish);
+}
+
+StoreCommand::StoreCommand()
+{
+ storeUri = getEnv("NIX_REMOTE");
+
+ mkFlag(0, "store", "store-uri", "URI of the Nix store to use", &storeUri);
+}
+
+ref<Store> StoreCommand::getStore()
+{
+ if (!_store)
+ _store = createStore();
+ return ref<Store>(_store);
+}
+
+ref<Store> StoreCommand::createStore()
+{
+ return openStore(storeUri);
+}
+
+void StoreCommand::run()
+{
+ run(createStore());
+}
+
+StorePathsCommand::StorePathsCommand()
+{
+ mkFlag('r', "recursive", "apply operation to closure of the specified paths", &recursive);
+ mkFlag(0, "all", "apply operation to the entire store", &all);
+}
+
+void StorePathsCommand::run(ref<Store> store)
+{
+ Paths storePaths;
+
+ if (all) {
+ if (installables.size())
+ throw UsageError("‘--all’ does not expect arguments");
+ for (auto & p : store->queryAllValidPaths())
+ storePaths.push_back(p);
+ }
+
+ else {
+ for (auto & p : buildInstallables(store, false))
+ storePaths.push_back(p);
+
+ if (recursive) {
+ PathSet closure;
+ store->computeFSClosure(PathSet(storePaths.begin(), storePaths.end()),
+ closure, false, false);
+ storePaths = Paths(closure.begin(), closure.end());
+ }
+ }
+
+ run(store, storePaths);
+}
+
+}
diff --git a/src/nix/command.hh b/src/nix/command.hh
new file mode 100644
index 000000000..dc7b2637d
--- /dev/null
+++ b/src/nix/command.hh
@@ -0,0 +1,149 @@
+#pragma once
+
+#include "args.hh"
+
+namespace nix {
+
+struct Value;
+class EvalState;
+
+/* A command is an argument parser that can be executed by calling its
+ run() method. */
+struct Command : virtual Args
+{
+ virtual std::string name() = 0;
+ virtual void prepare() { };
+ virtual void run() = 0;
+
+ struct Example
+ {
+ std::string description;
+ std::string command;
+ };
+
+ typedef std::list<Example> Examples;
+
+ virtual Examples examples() { return Examples(); }
+
+ void printHelp(const string & programName, std::ostream & out) override;
+};
+
+class Store;
+
+/* A command that require a Nix store. */
+struct StoreCommand : virtual Command
+{
+ std::string storeUri;
+ StoreCommand();
+ void run() override;
+ ref<Store> getStore();
+ virtual ref<Store> createStore();
+ virtual void run(ref<Store>) = 0;
+
+private:
+ std::shared_ptr<Store> _store;
+};
+
+struct Installable
+{
+ virtual std::string what() = 0;
+
+ virtual PathSet toBuildable()
+ {
+ throw Error("argument ‘%s’ cannot be built", what());
+ }
+
+ virtual Value * toValue(EvalState & state)
+ {
+ throw Error("argument ‘%s’ cannot be evaluated", what());
+ }
+};
+
+/* A command that operates on a list of "installables", which can be
+ store paths, attribute paths, Nix expressions, etc. */
+struct InstallablesCommand : virtual Args, StoreCommand
+{
+ std::vector<std::shared_ptr<Installable>> installables;
+ Path file;
+
+ InstallablesCommand()
+ {
+ mkFlag('f', "file", "file", "evaluate FILE rather than the default", &file);
+ expectArgs("installables", &_installables);
+ }
+
+ /* Return a value representing the Nix expression from which we
+ are installing. This is either the file specified by ‘--file’,
+ or an attribute set constructed from $NIX_PATH, e.g. ‘{ nixpkgs
+ = import ...; bla = import ...; }’. */
+ Value * getSourceExpr(EvalState & state);
+
+ std::vector<std::shared_ptr<Installable>> parseInstallables(ref<Store> store, Strings installables);
+
+ PathSet buildInstallables(ref<Store> store, bool dryRun);
+
+ ref<EvalState> getEvalState();
+
+ void prepare() override;
+
+private:
+
+ Strings _installables;
+
+ std::shared_ptr<EvalState> evalState;
+
+ Value * vSourceExpr = 0;
+};
+
+/* A command that operates on zero or more store paths. */
+struct StorePathsCommand : public InstallablesCommand
+{
+private:
+
+ bool recursive = false;
+ bool all = false;
+
+public:
+
+ StorePathsCommand();
+
+ using StoreCommand::run;
+
+ virtual void run(ref<Store> store, Paths storePaths) = 0;
+
+ void run(ref<Store> store) override;
+};
+
+typedef std::map<std::string, ref<Command>> Commands;
+
+/* An argument parser that supports multiple subcommands,
+ i.e. ‘<command> <subcommand>’. */
+class MultiCommand : virtual Args
+{
+public:
+ Commands commands;
+
+ std::shared_ptr<Command> command;
+
+ MultiCommand(const Commands & commands);
+
+ void printHelp(const string & programName, std::ostream & out) override;
+
+ bool processFlag(Strings::iterator & pos, Strings::iterator end) override;
+
+ bool processArgs(const Strings & args, bool finish) override;
+};
+
+/* A helper class for registering commands globally. */
+struct RegisterCommand
+{
+ static Commands * commands;
+
+ RegisterCommand(ref<Command> command)
+ {
+ if (!commands) commands = new Commands;
+ commands->emplace(command->name(), command);
+ }
+};
+
+}
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
new file mode 100644
index 000000000..b2165cb8f
--- /dev/null
+++ b/src/nix/copy.cc
@@ -0,0 +1,57 @@
+#include "command.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "sync.hh"
+#include "thread-pool.hh"
+
+#include <atomic>
+
+using namespace nix;
+
+struct CmdCopy : StorePathsCommand
+{
+ std::string srcUri, dstUri;
+
+ CmdCopy()
+ {
+ mkFlag(0, "from", "store-uri", "URI of the source Nix store", &srcUri);
+ mkFlag(0, "to", "store-uri", "URI of the destination Nix store", &dstUri);
+ }
+
+ std::string name() override
+ {
+ return "copy";
+ }
+
+ std::string description() override
+ {
+ return "copy paths between Nix stores";
+ }
+
+ Examples examples() override
+ {
+ return {
+ Example{
+ "To copy Firefox to the local store to a binary cache in file:///tmp/cache:",
+ "nix copy --to file:///tmp/cache -r $(type -p firefox)"
+ },
+ };
+ }
+
+ ref<Store> createStore() override
+ {
+ return srcUri.empty() ? StoreCommand::createStore() : openStore(srcUri);
+ }
+
+ void run(ref<Store> srcStore, Paths storePaths) override
+ {
+ if (srcUri.empty() && dstUri.empty())
+ throw UsageError("you must pass ‘--from’ and/or ‘--to’");
+
+ ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
+
+ copyPaths(srcStore, dstStore, PathSet(storePaths.begin(), storePaths.end()));
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdCopy>());
diff --git a/src/nix/eval.cc b/src/nix/eval.cc
new file mode 100644
index 000000000..eb2b13a2d
--- /dev/null
+++ b/src/nix/eval.cc
@@ -0,0 +1,43 @@
+#include "command.hh"
+#include "common-args.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "eval.hh"
+#include "json.hh"
+#include "value-to-json.hh"
+
+using namespace nix;
+
+struct CmdEval : MixJSON, InstallablesCommand
+{
+ std::string name() override
+ {
+ return "eval";
+ }
+
+ std::string description() override
+ {
+ return "evaluate a Nix expression";
+ }
+
+ void run(ref<Store> store) override
+ {
+ auto state = getEvalState();
+
+ auto jsonOut = json ? std::make_unique<JSONList>(std::cout) : nullptr;
+
+ for (auto & i : installables) {
+ auto v = i->toValue(*state);
+ if (json) {
+ PathSet context;
+ auto jsonElem = jsonOut->placeholder();
+ printValueAsJSON(*state, true, *v, jsonElem, context);
+ } else {
+ state->forceValueDeep(*v);
+ std::cout << *v << "\n";
+ }
+ }
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdEval>());
diff --git a/src/nix/hash.cc b/src/nix/hash.cc
new file mode 100644
index 000000000..5dd891e8a
--- /dev/null
+++ b/src/nix/hash.cc
@@ -0,0 +1,140 @@
+#include "command.hh"
+#include "hash.hh"
+#include "legacy.hh"
+#include "shared.hh"
+
+using namespace nix;
+
+struct CmdHash : Command
+{
+ enum Mode { mFile, mPath };
+ Mode mode;
+ bool base32 = false;
+ bool truncate = false;
+ HashType ht = htSHA512;
+ Strings paths;
+
+ CmdHash(Mode mode) : mode(mode)
+ {
+ mkFlag(0, "base32", "print hash in base-32", &base32);
+ mkFlag(0, "base16", "print hash in base-16", &base32, false);
+ mkHashTypeFlag("type", &ht);
+ expectArgs("paths", &paths);
+ }
+
+ std::string name() override
+ {
+ return mode == mFile ? "hash-file" : "hash-path";
+ }
+
+ std::string description() override
+ {
+ return mode == mFile
+ ? "print cryptographic hash of a regular file"
+ : "print cryptographic hash of the NAR serialisation of a path";
+ }
+
+ void run() override
+ {
+ for (auto path : paths) {
+ Hash h = mode == mFile ? hashFile(ht, path) : hashPath(ht, path).first;
+ if (truncate && h.hashSize > 20) h = compressHash(h, 20);
+ std::cout << format("%1%\n") %
+ (base32 ? printHash32(h) : printHash(h));
+ }
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdHash>(CmdHash::mFile));
+static RegisterCommand r2(make_ref<CmdHash>(CmdHash::mPath));
+
+struct CmdToBase : Command
+{
+ bool toBase32;
+ HashType ht = htSHA512;
+ Strings args;
+
+ CmdToBase(bool toBase32) : toBase32(toBase32)
+ {
+ mkHashTypeFlag("type", &ht);
+ expectArgs("strings", &args);
+ }
+
+ std::string name() override
+ {
+ return toBase32 ? "to-base32" : "to-base16";
+ }
+
+ std::string description() override
+ {
+ return toBase32
+ ? "convert a hash to base-32 representation"
+ : "convert a hash to base-16 representation";
+ }
+
+ void run() override
+ {
+ for (auto s : args) {
+ Hash h = parseHash16or32(ht, s);
+ std::cout << format("%1%\n") %
+ (toBase32 ? printHash32(h) : printHash(h));
+ }
+ }
+};
+
+static RegisterCommand r3(make_ref<CmdToBase>(false));
+static RegisterCommand r4(make_ref<CmdToBase>(true));
+
+/* Legacy nix-hash command. */
+static int compatNixHash(int argc, char * * argv)
+{
+ HashType ht = htMD5;
+ bool flat = false;
+ bool base32 = false;
+ bool truncate = false;
+ enum { opHash, opTo32, opTo16 } op = opHash;
+ Strings ss;
+
+ parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+ if (*arg == "--help")
+ showManPage("nix-hash");
+ else if (*arg == "--version")
+ printVersion("nix-hash");
+ else if (*arg == "--flat") flat = true;
+ else if (*arg == "--base32") base32 = true;
+ else if (*arg == "--truncate") truncate = true;
+ else if (*arg == "--type") {
+ string s = getArg(*arg, arg, end);
+ ht = parseHashType(s);
+ if (ht == htUnknown)
+ throw UsageError(format("unknown hash type ‘%1%’") % s);
+ }
+ else if (*arg == "--to-base16") op = opTo16;
+ else if (*arg == "--to-base32") op = opTo32;
+ else if (*arg != "" && arg->at(0) == '-')
+ return false;
+ else
+ ss.push_back(*arg);
+ return true;
+ });
+
+ if (op == opHash) {
+ CmdHash cmd(flat ? CmdHash::mFile : CmdHash::mPath);
+ cmd.ht = ht;
+ cmd.base32 = base32;
+ cmd.truncate = truncate;
+ cmd.paths = ss;
+ cmd.run();
+ }
+
+ else {
+ CmdToBase cmd(op == opTo32);
+ cmd.args = ss;
+ cmd.ht = ht;
+ cmd.run();
+ }
+
+ return 0;
+}
+
+static RegisterLegacyCommand s1("nix-hash", compatNixHash);
diff --git a/src/nix/installables.cc b/src/nix/installables.cc
new file mode 100644
index 000000000..57580049f
--- /dev/null
+++ b/src/nix/installables.cc
@@ -0,0 +1,255 @@
+#include "command.hh"
+#include "attr-path.hh"
+#include "common-opts.hh"
+#include "derivations.hh"
+#include "eval-inline.hh"
+#include "eval.hh"
+#include "get-drvs.hh"
+#include "store-api.hh"
+#include "shared.hh"
+
+#include <regex>
+
+namespace nix {
+
+Value * InstallablesCommand::getSourceExpr(EvalState & state)
+{
+ if (vSourceExpr) return vSourceExpr;
+
+ vSourceExpr = state.allocValue();
+
+ if (file != "") {
+ Expr * e = state.parseExprFromFile(resolveExprPath(lookupFileArg(state, file)));
+ state.eval(e, *vSourceExpr);
+ }
+
+ else {
+
+ /* Construct the installation source from $NIX_PATH. */
+
+ auto searchPath = state.getSearchPath();
+
+ state.mkAttrs(*vSourceExpr, searchPath.size());
+
+ std::unordered_set<std::string> seen;
+
+ for (auto & i : searchPath) {
+ if (i.first == "") continue;
+ if (seen.count(i.first)) continue;
+ seen.insert(i.first);
+#if 0
+ auto res = state.resolveSearchPathElem(i);
+ if (!res.first) continue;
+ if (!pathExists(res.second)) continue;
+ mkApp(*state.allocAttr(*vSourceExpr, state.symbols.create(i.first)),
+ state.getBuiltin("import"),
+ mkString(*state.allocValue(), res.second));
+#endif
+ Value * v1 = state.allocValue();
+ mkPrimOpApp(*v1, state.getBuiltin("findFile"), state.getBuiltin("nixPath"));
+ Value * v2 = state.allocValue();
+ mkApp(*v2, *v1, mkString(*state.allocValue(), i.first));
+ mkApp(*state.allocAttr(*vSourceExpr, state.symbols.create(i.first)),
+ state.getBuiltin("import"), *v2);
+ }
+
+ vSourceExpr->attrs->sort();
+ }
+
+ return vSourceExpr;
+}
+
+struct InstallableStoreDrv : Installable
+{
+ Path storePath;
+
+ InstallableStoreDrv(const Path & storePath) : storePath(storePath) { }
+
+ std::string what() override { return storePath; }
+
+ PathSet toBuildable() override
+ {
+ return {storePath};
+ }
+};
+
+struct InstallableStorePath : Installable
+{
+ Path storePath;
+
+ InstallableStorePath(const Path & storePath) : storePath(storePath) { }
+
+ std::string what() override { return storePath; }
+
+ PathSet toBuildable() override
+ {
+ return {storePath};
+ }
+};
+
+struct InstallableExpr : Installable
+{
+ InstallablesCommand & installables;
+ std::string text;
+
+ InstallableExpr(InstallablesCommand & installables, const std::string & text)
+ : installables(installables), text(text) { }
+
+ std::string what() override { return text; }
+
+ PathSet toBuildable() override
+ {
+ auto state = installables.getEvalState();
+
+ auto v = toValue(*state);
+
+ // FIXME
+ std::map<string, string> autoArgs_;
+ Bindings & autoArgs(*evalAutoArgs(*state, autoArgs_));
+
+ DrvInfos drvs;
+ getDerivations(*state, *v, "", autoArgs, drvs, false);
+
+ PathSet res;
+
+ for (auto & i : drvs)
+ res.insert(i.queryDrvPath());
+
+ return res;
+ }
+
+ Value * toValue(EvalState & state) override
+ {
+ auto v = state.allocValue();
+ state.eval(state.parseExprFromString(text, absPath(".")), *v);
+ return v;
+ }
+};
+
+struct InstallableAttrPath : Installable
+{
+ InstallablesCommand & installables;
+ std::string attrPath;
+
+ InstallableAttrPath(InstallablesCommand & installables, const std::string & attrPath)
+ : installables(installables), attrPath(attrPath)
+ { }
+
+ std::string what() override { return attrPath; }
+
+ PathSet toBuildable() override
+ {
+ auto state = installables.getEvalState();
+
+ auto v = toValue(*state);
+
+ // FIXME
+ std::map<string, string> autoArgs_;
+ Bindings & autoArgs(*evalAutoArgs(*state, autoArgs_));
+
+ DrvInfos drvs;
+ getDerivations(*state, *v, "", autoArgs, drvs, false);
+
+ PathSet res;
+
+ for (auto & i : drvs)
+ res.insert(i.queryDrvPath());
+
+ return res;
+ }
+
+ Value * toValue(EvalState & state) override
+ {
+ auto source = installables.getSourceExpr(state);
+
+ // FIXME
+ std::map<string, string> autoArgs_;
+ Bindings & autoArgs(*evalAutoArgs(state, autoArgs_));
+
+ Value * v = findAlongAttrPath(state, attrPath, autoArgs, *source);
+ state.forceValue(*v);
+
+ return v;
+ }
+};
+
+// FIXME: extend
+std::string attrRegex = R"([A-Za-z_][A-Za-z0-9-_+]*)";
+static std::regex attrPathRegex(fmt(R"(%1%(\.%1%)*)", attrRegex));
+
+std::vector<std::shared_ptr<Installable>> InstallablesCommand::parseInstallables(ref<Store> store, Strings installables)
+{
+ std::vector<std::shared_ptr<Installable>> result;
+
+ if (installables.empty()) {
+ if (file == "")
+ file = ".";
+ installables = Strings{""};
+ }
+
+ for (auto & installable : installables) {
+
+ if (installable.find("/") != std::string::npos) {
+
+ auto path = store->toStorePath(store->followLinksToStore(installable));
+
+ if (store->isStorePath(path)) {
+ if (isDerivation(path))
+ result.push_back(std::make_shared<InstallableStoreDrv>(path));
+ else
+ result.push_back(std::make_shared<InstallableStorePath>(path));
+ }
+ }
+
+ else if (installable.compare(0, 1, "(") == 0)
+ result.push_back(std::make_shared<InstallableExpr>(*this, installable));
+
+ else if (installable == "" || std::regex_match(installable, attrPathRegex))
+ result.push_back(std::make_shared<InstallableAttrPath>(*this, installable));
+
+ else
+ throw UsageError("don't know what to do with argument ‘%s’", installable);
+ }
+
+ return result;
+}
+
+PathSet InstallablesCommand::buildInstallables(ref<Store> store, bool dryRun)
+{
+ PathSet buildables;
+
+ for (auto & i : installables) {
+ auto b = i->toBuildable();
+ buildables.insert(b.begin(), b.end());
+ }
+
+ printMissing(store, buildables);
+
+ if (!dryRun)
+ store->buildPaths(buildables);
+
+ PathSet outPaths;
+ for (auto & path : buildables)
+ if (isDerivation(path)) {
+ Derivation drv = store->derivationFromPath(path);
+ for (auto & output : drv.outputs)
+ outPaths.insert(output.second.path);
+ } else
+ outPaths.insert(path);
+
+ return outPaths;
+}
+
+ref<EvalState> InstallablesCommand::getEvalState()
+{
+ if (!evalState)
+ evalState = std::make_shared<EvalState>(Strings{}, getStore());
+ return ref<EvalState>(evalState);
+}
+
+void InstallablesCommand::prepare()
+{
+ installables = parseInstallables(getStore(), _installables);
+}
+
+}
diff --git a/src/nix/legacy.cc b/src/nix/legacy.cc
new file mode 100644
index 000000000..6df09ee37
--- /dev/null
+++ b/src/nix/legacy.cc
@@ -0,0 +1,7 @@
+#include "legacy.hh"
+
+namespace nix {
+
+RegisterLegacyCommand::Commands * RegisterLegacyCommand::commands = 0;
+
+}
diff --git a/src/nix/legacy.hh b/src/nix/legacy.hh
new file mode 100644
index 000000000..f503b0da3
--- /dev/null
+++ b/src/nix/legacy.hh
@@ -0,0 +1,23 @@
+#pragma once
+
+#include <functional>
+#include <map>
+#include <string>
+
+namespace nix {
+
+typedef std::function<void(int, char * *)> MainFunction;
+
+struct RegisterLegacyCommand
+{
+ typedef std::map<std::string, MainFunction> Commands;
+ static Commands * commands;
+
+ RegisterLegacyCommand(const std::string & name, MainFunction fun)
+ {
+ if (!commands) commands = new Commands;
+ (*commands)[name] = fun;
+ }
+};
+
+}
diff --git a/src/nix/local.mk b/src/nix/local.mk
new file mode 100644
index 000000000..f6e7073b6
--- /dev/null
+++ b/src/nix/local.mk
@@ -0,0 +1,9 @@
+programs += nix
+
+nix_DIR := $(d)
+
+nix_SOURCES := $(wildcard $(d)/*.cc)
+
+nix_LIBS = libexpr libmain libstore libutil libformat
+
+$(eval $(call install-symlink, nix, $(bindir)/nix-hash))
diff --git a/src/nix/log.cc b/src/nix/log.cc
new file mode 100644
index 000000000..ed610261d
--- /dev/null
+++ b/src/nix/log.cc
@@ -0,0 +1,47 @@
+#include "command.hh"
+#include "common-args.hh"
+#include "shared.hh"
+#include "store-api.hh"
+
+using namespace nix;
+
+struct CmdLog : InstallablesCommand
+{
+ CmdLog()
+ {
+ }
+
+ std::string name() override
+ {
+ return "log";
+ }
+
+ std::string description() override
+ {
+ return "show the build log of the specified packages or paths";
+ }
+
+ void run(ref<Store> store) override
+ {
+ auto subs = getDefaultSubstituters();
+
+ subs.push_front(store);
+
+ for (auto & inst : installables) {
+ for (auto & path : inst->toBuildable()) {
+ bool found = false;
+ for (auto & sub : subs) {
+ auto log = sub->getBuildLog(path);
+ if (!log) continue;
+ std::cout << *log;
+ found = true;
+ break;
+ }
+ if (!found)
+ throw Error("build log of path ‘%s’ is not available", path);
+ }
+ }
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdLog>());
diff --git a/src/nix/ls.cc b/src/nix/ls.cc
new file mode 100644
index 000000000..3476dfb05
--- /dev/null
+++ b/src/nix/ls.cc
@@ -0,0 +1,123 @@
+#include "command.hh"
+#include "store-api.hh"
+#include "fs-accessor.hh"
+#include "nar-accessor.hh"
+
+using namespace nix;
+
+struct MixLs : virtual Args
+{
+ std::string path;
+
+ bool recursive = false;
+ bool verbose = false;
+ bool showDirectory = false;
+
+ MixLs()
+ {
+ mkFlag('R', "recursive", "list subdirectories recursively", &recursive);
+ mkFlag('l', "long", "show more file information", &verbose);
+ mkFlag('d', "directory", "show directories rather than their contents", &showDirectory);
+ }
+
+ void list(ref<FSAccessor> accessor)
+ {
+ std::function<void(const FSAccessor::Stat &, const Path &, const std::string &, bool)> doPath;
+
+ auto showFile = [&](const Path & curPath, const std::string & relPath) {
+ if (verbose) {
+ auto st = accessor->stat(curPath);
+ std::string tp =
+ st.type == FSAccessor::Type::tRegular ?
+ (st.isExecutable ? "-r-xr-xr-x" : "-r--r--r--") :
+ st.type == FSAccessor::Type::tSymlink ? "lrwxrwxrwx" :
+ "dr-xr-xr-x";
+ std::cout <<
+ (format("%s %20d %s") % tp % st.fileSize % relPath);
+ if (st.type == FSAccessor::Type::tSymlink)
+ std::cout << " -> " << accessor->readLink(curPath)
+ ;
+ std::cout << "\n";
+ if (recursive && st.type == FSAccessor::Type::tDirectory)
+ doPath(st, curPath, relPath, false);
+ } else {
+ std::cout << relPath << "\n";
+ if (recursive) {
+ auto st = accessor->stat(curPath);
+ if (st.type == FSAccessor::Type::tDirectory)
+ doPath(st, curPath, relPath, false);
+ }
+ }
+ };
+
+ doPath = [&](const FSAccessor::Stat & st , const Path & curPath,
+ const std::string & relPath, bool showDirectory)
+ {
+ if (st.type == FSAccessor::Type::tDirectory && !showDirectory) {
+ auto names = accessor->readDirectory(curPath);
+ for (auto & name : names)
+ showFile(curPath + "/" + name, relPath + "/" + name);
+ } else
+ showFile(curPath, relPath);
+ };
+
+ auto st = accessor->stat(path);
+ if (st.type == FSAccessor::Type::tMissing)
+ throw Error(format("path ‘%1%’ does not exist") % path);
+ doPath(st, path,
+ st.type == FSAccessor::Type::tDirectory ? "." : baseNameOf(path),
+ showDirectory);
+ }
+};
+
+struct CmdLsStore : StoreCommand, MixLs
+{
+ CmdLsStore()
+ {
+ expectArg("path", &path);
+ }
+
+ std::string name() override
+ {
+ return "ls-store";
+ }
+
+ std::string description() override
+ {
+ return "show information about a store path";
+ }
+
+ void run(ref<Store> store) override
+ {
+ list(store->getFSAccessor());
+ }
+};
+
+struct CmdLsNar : Command, MixLs
+{
+ Path narPath;
+
+ CmdLsNar()
+ {
+ expectArg("nar", &narPath);
+ expectArg("path", &path);
+ }
+
+ std::string name() override
+ {
+ return "ls-nar";
+ }
+
+ std::string description() override
+ {
+ return "show information about the contents of a NAR file";
+ }
+
+ void run() override
+ {
+ list(makeNarAccessor(make_ref<std::string>(readFile(narPath))));
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdLsStore>());
+static RegisterCommand r2(make_ref<CmdLsNar>());
diff --git a/src/nix/main.cc b/src/nix/main.cc
new file mode 100644
index 000000000..440ced97d
--- /dev/null
+++ b/src/nix/main.cc
@@ -0,0 +1,61 @@
+#include <algorithm>
+
+#include "command.hh"
+#include "common-args.hh"
+#include "eval.hh"
+#include "globals.hh"
+#include "legacy.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "progress-bar.hh"
+
+namespace nix {
+
+struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
+{
+ NixArgs() : MultiCommand(*RegisterCommand::commands), MixCommonArgs("nix")
+ {
+ mkFlag('h', "help", "show usage information", [=]() {
+ printHelp(programName, std::cout);
+ std::cout << "\nNote: this program is EXPERIMENTAL and subject to change.\n";
+ throw Exit();
+ });
+
+ mkFlag(0, "version", "show version information", std::bind(printVersion, programName));
+ }
+};
+
+void mainWrapped(int argc, char * * argv)
+{
+ settings.verboseBuild = false;
+
+ initNix();
+ initGC();
+
+ string programName = baseNameOf(argv[0]);
+
+ {
+ auto legacy = (*RegisterLegacyCommand::commands)[programName];
+ if (legacy) return legacy(argc, argv);
+ }
+
+ NixArgs args;
+
+ args.parseCmdline(argvToStrings(argc, argv));
+
+ assert(args.command);
+
+ StartProgressBar bar;
+
+ args.command->prepare();
+ args.command->run();
+}
+
+}
+
+int main(int argc, char * * argv)
+{
+ return nix::handleExceptions(argv[0], [&]() {
+ nix::mainWrapped(argc, argv);
+ });
+}
diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc
new file mode 100644
index 000000000..f16209238
--- /dev/null
+++ b/src/nix/path-info.cc
@@ -0,0 +1,105 @@
+#include "command.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "json.hh"
+#include "common-args.hh"
+
+#include <iomanip>
+#include <algorithm>
+
+using namespace nix;
+
+struct CmdPathInfo : StorePathsCommand, MixJSON
+{
+ bool showSize = false;
+ bool showClosureSize = false;
+ bool showSigs = false;
+
+ CmdPathInfo()
+ {
+ mkFlag('s', "size", "print size of the NAR dump of each path", &showSize);
+ mkFlag('S', "closure-size", "print sum size of the NAR dumps of the closure of each path", &showClosureSize);
+ mkFlag(0, "sigs", "show signatures", &showSigs);
+ }
+
+ std::string name() override
+ {
+ return "path-info";
+ }
+
+ std::string description() override
+ {
+ return "query information about store paths";
+ }
+
+ Examples examples() override
+ {
+ return {
+ Example{
+ "To show the closure sizes of every path in the current NixOS system closure, sorted by size:",
+ "nix path-info -rS /run/current-system | sort -nk2"
+ },
+ Example{
+ "To check the existence of a path in a binary cache:",
+ "nix path-info -r /nix/store/7qvk5c91...-geeqie-1.1 --store https://cache.nixos.org/"
+ },
+ Example{
+ "To print the 10 most recently added paths (using --json and the jq(1) command):",
+ "nix path-info --json --all | jq -r 'sort_by(.registrationTime)[-11:-1][].path'"
+ },
+ Example{
+ "To show the size of the entire Nix store:",
+ "nix path-info --json --all | jq 'map(.narSize) | add'"
+ },
+ Example{
+ "To show every path whose closure is bigger than 1 GB, sorted by closure size:",
+ "nix path-info --json --all -S | jq 'map(select(.closureSize > 1e9)) | sort_by(.closureSize) | map([.path, .closureSize])'"
+ },
+ };
+ }
+
+ void run(ref<Store> store, Paths storePaths) override
+ {
+ size_t pathLen = 0;
+ for (auto & storePath : storePaths)
+ pathLen = std::max(pathLen, storePath.size());
+
+ if (json) {
+ JSONPlaceholder jsonRoot(std::cout, true);
+ store->pathInfoToJSON(jsonRoot,
+ // FIXME: preserve order?
+ PathSet(storePaths.begin(), storePaths.end()),
+ true, showClosureSize);
+ }
+
+ else {
+
+ for (auto storePath : storePaths) {
+ auto info = store->queryPathInfo(storePath);
+ storePath = info->path; // FIXME: screws up padding
+
+ std::cout << storePath << std::string(std::max(0, (int) pathLen - (int) storePath.size()), ' ');
+
+ if (showSize)
+ std::cout << '\t' << std::setw(11) << info->narSize;
+
+ if (showClosureSize)
+ std::cout << '\t' << std::setw(11) << store->getClosureSize(storePath);
+
+ if (showSigs) {
+ std::cout << '\t';
+ Strings ss;
+ if (info->ultimate) ss.push_back("ultimate");
+ if (info->ca != "") ss.push_back("ca:" + info->ca);
+ for (auto & sig : info->sigs) ss.push_back(sig);
+ std::cout << concatStringsSep(" ", ss);
+ }
+
+ std::cout << std::endl;
+ }
+
+ }
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdPathInfo>());
diff --git a/src/nix/progress-bar.cc b/src/nix/progress-bar.cc
new file mode 100644
index 000000000..69811b282
--- /dev/null
+++ b/src/nix/progress-bar.cc
@@ -0,0 +1,157 @@
+#include "progress-bar.hh"
+#include "util.hh"
+#include "sync.hh"
+
+#include <map>
+
+namespace nix {
+
+class ProgressBar : public Logger
+{
+private:
+
+ struct ActInfo
+ {
+ Activity * activity;
+ Verbosity lvl;
+ std::string s;
+ };
+
+ struct Progress
+ {
+ uint64_t expected = 0, progress = 0;
+ };
+
+ struct State
+ {
+ std::list<ActInfo> activities;
+ std::map<Activity *, std::list<ActInfo>::iterator> its;
+ std::map<std::string, Progress> progress;
+ };
+
+ Sync<State> state_;
+
+public:
+
+ ~ProgressBar()
+ {
+ auto state(state_.lock());
+ assert(state->activities.empty());
+ writeToStderr("\r\e[K");
+ }
+
+ void log(Verbosity lvl, const FormatOrString & fs) override
+ {
+ auto state(state_.lock());
+ log(*state, lvl, fs.s);
+ }
+
+ void log(State & state, Verbosity lvl, const std::string & s)
+ {
+ writeToStderr("\r\e[K" + s + "\n");
+ update(state);
+ }
+
+ void startActivity(Activity & activity, Verbosity lvl, const FormatOrString & fs) override
+ {
+ if (lvl > verbosity) return;
+ auto state(state_.lock());
+ state->activities.emplace_back(ActInfo{&activity, lvl, fs.s});
+ state->its.emplace(&activity, std::prev(state->activities.end()));
+ update(*state);
+ }
+
+ void stopActivity(Activity & activity) override
+ {
+ auto state(state_.lock());
+ auto i = state->its.find(&activity);
+ if (i == state->its.end()) return;
+ state->activities.erase(i->second);
+ state->its.erase(i);
+ update(*state);
+ }
+
+ void setExpected(const std::string & label, uint64_t value) override
+ {
+ auto state(state_.lock());
+ state->progress[label].expected = value;
+ }
+
+ void setProgress(const std::string & label, uint64_t value) override
+ {
+ auto state(state_.lock());
+ state->progress[label].progress = value;
+ }
+
+ void incExpected(const std::string & label, uint64_t value) override
+ {
+ auto state(state_.lock());
+ state->progress[label].expected += value;
+ }
+
+ void incProgress(const std::string & label, uint64_t value) override
+ {
+ auto state(state_.lock());
+ state->progress[label].progress += value;
+ }
+
+ void update()
+ {
+ auto state(state_.lock());
+ }
+
+ void update(State & state)
+ {
+ std::string line = "\r";
+
+ std::string status = getStatus(state);
+ if (!status.empty()) {
+ line += '[';
+ line += status;
+ line += "]";
+ }
+
+ if (!state.activities.empty()) {
+ if (!status.empty()) line += " ";
+ line += state.activities.rbegin()->s;
+ }
+
+ line += "\e[K";
+ writeToStderr(line);
+ }
+
+ std::string getStatus(State & state)
+ {
+ std::string res;
+ for (auto & p : state.progress)
+ if (p.second.expected || p.second.progress) {
+ if (!res.empty()) res += ", ";
+ res += std::to_string(p.second.progress);
+ if (p.second.expected) {
+ res += "/";
+ res += std::to_string(p.second.expected);
+ }
+ res += " "; res += p.first;
+ }
+ return res;
+ }
+};
+
+StartProgressBar::StartProgressBar()
+{
+ if (isatty(STDERR_FILENO)) {
+ prev = logger;
+ logger = new ProgressBar();
+ }
+}
+
+StartProgressBar::~StartProgressBar()
+{
+ if (prev) {
+ auto bar = logger;
+ logger = prev;
+ delete bar;
+ }
+}
+
+}
diff --git a/src/nix/progress-bar.hh b/src/nix/progress-bar.hh
new file mode 100644
index 000000000..d2e44f7c4
--- /dev/null
+++ b/src/nix/progress-bar.hh
@@ -0,0 +1,15 @@
+#pragma once
+
+#include "logging.hh"
+
+namespace nix {
+
+class StartProgressBar
+{
+ Logger * prev = 0;
+public:
+ StartProgressBar();
+ ~StartProgressBar();
+};
+
+}
diff --git a/src/nix/run.cc b/src/nix/run.cc
new file mode 100644
index 000000000..bcfa74eb5
--- /dev/null
+++ b/src/nix/run.cc
@@ -0,0 +1,104 @@
+#include "command.hh"
+#include "common-args.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "derivations.hh"
+#include "local-store.hh"
+#include "finally.hh"
+
+#if __linux__
+#include <sys/mount.h>
+#endif
+
+using namespace nix;
+
+struct CmdRun : InstallablesCommand
+{
+ CmdRun()
+ {
+ }
+
+ std::string name() override
+ {
+ return "run";
+ }
+
+ std::string description() override
+ {
+ return "run a shell in which the specified packages are available";
+ }
+
+ void run(ref<Store> store) override
+ {
+ auto outPaths = buildInstallables(store, false);
+
+ auto store2 = store.dynamic_pointer_cast<LocalStore>();
+
+ if (store2 && store->storeDir != store2->realStoreDir) {
+#if __linux__
+ uid_t uid = getuid();
+ uid_t gid = getgid();
+
+ if (unshare(CLONE_NEWUSER | CLONE_NEWNS) == -1)
+ throw SysError("setting up a private mount namespace");
+
+ /* Bind-mount realStoreDir on /nix/store. If the latter
+ mount point doesn't already exists, we have to create a
+ chroot environment containing the mount point and bind
+ mounts for the children of /. Would be nice if we could
+ use overlayfs here, but that doesn't work in a user
+ namespace yet (Ubuntu has a patch for this:
+ https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1478578). */
+ if (!pathExists(store->storeDir)) {
+ // FIXME: Use overlayfs?
+
+ Path tmpDir = createTempDir();
+
+ createDirs(tmpDir + store->storeDir);
+
+ if (mount(store2->realStoreDir.c_str(), (tmpDir + store->storeDir).c_str(), "", MS_BIND, 0) == -1)
+ throw SysError(format("mounting ‘%s’ on ‘%s’") % store2->realStoreDir % store->storeDir);
+
+ for (auto entry : readDirectory("/")) {
+ Path dst = tmpDir + "/" + entry.name;
+ if (pathExists(dst)) continue;
+ if (mkdir(dst.c_str(), 0700) == -1)
+ throw SysError(format("creating directory ‘%s’") % dst);
+ if (mount(("/" + entry.name).c_str(), dst.c_str(), "", MS_BIND | MS_REC, 0) == -1)
+ throw SysError(format("mounting ‘%s’ on ‘%s’") % ("/" + entry.name) % dst);
+ }
+
+ char * cwd = getcwd(0, 0);
+ if (!cwd) throw SysError("getting current directory");
+ Finally freeCwd([&]() { free(cwd); });
+
+ if (chroot(tmpDir.c_str()) == -1)
+ throw SysError(format("chrooting into ‘%s’") % tmpDir);
+
+ if (chdir(cwd) == -1)
+ throw SysError(format("chdir to ‘%s’ in chroot") % cwd);
+ } else
+ if (mount(store2->realStoreDir.c_str(), store->storeDir.c_str(), "", MS_BIND, 0) == -1)
+ throw SysError(format("mounting ‘%s’ on ‘%s’") % store2->realStoreDir % store->storeDir);
+
+ writeFile("/proc/self/setgroups", "deny");
+ writeFile("/proc/self/uid_map", (format("%d %d %d") % uid % uid % 1).str());
+ writeFile("/proc/self/gid_map", (format("%d %d %d") % gid % gid % 1).str());
+#else
+ throw Error(format("mounting the Nix store on ‘%s’ is not supported on this platform") % store->storeDir);
+#endif
+ }
+
+
+ auto unixPath = tokenizeString<Strings>(getEnv("PATH"), ":");
+ for (auto & path : outPaths)
+ if (pathExists(path + "/bin"))
+ unixPath.push_front(path + "/bin");
+ setenv("PATH", concatStringsSep(":", unixPath).c_str(), 1);
+
+ if (execlp("bash", "bash", nullptr) == -1)
+ throw SysError("unable to exec ‘bash’");
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdRun>());
diff --git a/src/nix/show-config.cc b/src/nix/show-config.cc
new file mode 100644
index 000000000..c628c2898
--- /dev/null
+++ b/src/nix/show-config.cc
@@ -0,0 +1,38 @@
+#include "command.hh"
+#include "common-args.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "json.hh"
+
+using namespace nix;
+
+struct CmdShowConfig : Command, MixJSON
+{
+ CmdShowConfig()
+ {
+ }
+
+ std::string name() override
+ {
+ return "show-config";
+ }
+
+ std::string description() override
+ {
+ return "show the Nix configuration";
+ }
+
+ void run() override
+ {
+ if (json) {
+ // FIXME: use appropriate JSON types (bool, ints, etc).
+ JSONObject jsonObj(std::cout, true);
+ settings.toJSON(jsonObj);
+ } else {
+ for (auto & s : settings.getSettings())
+ std::cout << s.first + " = " + s.second + "\n";
+ }
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdShowConfig>());
diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc
new file mode 100644
index 000000000..d8d8c0f53
--- /dev/null
+++ b/src/nix/sigs.cc
@@ -0,0 +1,139 @@
+#include "command.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "thread-pool.hh"
+
+#include <atomic>
+
+using namespace nix;
+
+struct CmdCopySigs : StorePathsCommand
+{
+ Strings substituterUris;
+
+ CmdCopySigs()
+ {
+ mkFlag('s', "substituter", {"store-uri"}, "use signatures from specified store", 1,
+ [&](Strings ss) { substituterUris.push_back(ss.front()); });
+ }
+
+ std::string name() override
+ {
+ return "copy-sigs";
+ }
+
+ std::string description() override
+ {
+ return "copy path signatures from substituters (like binary caches)";
+ }
+
+ void run(ref<Store> store, Paths storePaths) override
+ {
+ if (substituterUris.empty())
+ throw UsageError("you must specify at least one substituter using ‘-s’");
+
+ // FIXME: factor out commonality with MixVerify.
+ std::vector<ref<Store>> substituters;
+ for (auto & s : substituterUris)
+ substituters.push_back(openStore(s));
+
+ ThreadPool pool;
+
+ std::string doneLabel = "done";
+ std::atomic<size_t> added{0};
+
+ logger->setExpected(doneLabel, storePaths.size());
+
+ auto doPath = [&](const Path & storePath) {
+ Activity act(*logger, lvlInfo, format("getting signatures for ‘%s’") % storePath);
+
+ checkInterrupt();
+
+ auto info = store->queryPathInfo(storePath);
+
+ StringSet newSigs;
+
+ for (auto & store2 : substituters) {
+ try {
+ auto info2 = store2->queryPathInfo(storePath);
+
+ /* Don't import signatures that don't match this
+ binary. */
+ if (info->narHash != info2->narHash ||
+ info->narSize != info2->narSize ||
+ info->references != info2->references)
+ continue;
+
+ for (auto & sig : info2->sigs)
+ if (!info->sigs.count(sig))
+ newSigs.insert(sig);
+ } catch (InvalidPath &) {
+ }
+ }
+
+ if (!newSigs.empty()) {
+ store->addSignatures(storePath, newSigs);
+ added += newSigs.size();
+ }
+
+ logger->incProgress(doneLabel);
+ };
+
+ for (auto & storePath : storePaths)
+ pool.enqueue(std::bind(doPath, storePath));
+
+ pool.process();
+
+ printInfo(format("imported %d signatures") % added);
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdCopySigs>());
+
+struct CmdSignPaths : StorePathsCommand
+{
+ Path secretKeyFile;
+
+ CmdSignPaths()
+ {
+ mkFlag('k', "key-file", {"file"}, "file containing the secret signing key", &secretKeyFile);
+ }
+
+ std::string name() override
+ {
+ return "sign-paths";
+ }
+
+ std::string description() override
+ {
+ return "sign the specified paths";
+ }
+
+ void run(ref<Store> store, Paths storePaths) override
+ {
+ if (secretKeyFile.empty())
+ throw UsageError("you must specify a secret key file using ‘-k’");
+
+ SecretKey secretKey(readFile(secretKeyFile));
+
+ size_t added{0};
+
+ for (auto & storePath : storePaths) {
+ auto info = store->queryPathInfo(storePath);
+
+ auto info2(*info);
+ info2.sigs.clear();
+ info2.sign(secretKey);
+ assert(!info2.sigs.empty());
+
+ if (!info->sigs.count(*info2.sigs.begin())) {
+ store->addSignatures(storePath, info2.sigs);
+ added++;
+ }
+ }
+
+ printInfo(format("added %d signatures") % added);
+ }
+};
+
+static RegisterCommand r3(make_ref<CmdSignPaths>());
diff --git a/src/nix/verify.cc b/src/nix/verify.cc
new file mode 100644
index 000000000..2f8d02fa0
--- /dev/null
+++ b/src/nix/verify.cc
@@ -0,0 +1,172 @@
+#include "command.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include "sync.hh"
+#include "thread-pool.hh"
+
+#include <atomic>
+
+using namespace nix;
+
+struct CmdVerify : StorePathsCommand
+{
+ bool noContents = false;
+ bool noTrust = false;
+ Strings substituterUris;
+ size_t sigsNeeded;
+
+ CmdVerify()
+ {
+ mkFlag(0, "no-contents", "do not verify the contents of each store path", &noContents);
+ mkFlag(0, "no-trust", "do not verify whether each store path is trusted", &noTrust);
+ mkFlag('s', "substituter", {"store-uri"}, "use signatures from specified store", 1,
+ [&](Strings ss) { substituterUris.push_back(ss.front()); });
+ mkIntFlag('n', "sigs-needed", "require that each path has at least N valid signatures", &sigsNeeded);
+ }
+
+ std::string name() override
+ {
+ return "verify";
+ }
+
+ std::string description() override
+ {
+ return "verify the integrity of store paths";
+ }
+
+ Examples examples() override
+ {
+ return {
+ Example{
+ "To verify the entire Nix store:",
+ "nix verify --all"
+ },
+ Example{
+ "To check whether each path in the closure of Firefox has at least 2 signatures:",
+ "nix verify -r -n2 --no-contents $(type -p firefox)"
+ },
+ };
+ }
+
+ void run(ref<Store> store, Paths storePaths) override
+ {
+ std::vector<ref<Store>> substituters;
+ for (auto & s : substituterUris)
+ substituters.push_back(openStore(s));
+
+ auto publicKeys = getDefaultPublicKeys();
+
+ std::atomic<size_t> done{0};
+ std::atomic<size_t> untrusted{0};
+ std::atomic<size_t> corrupted{0};
+ std::atomic<size_t> failed{0};
+
+ std::string doneLabel("paths checked");
+ std::string untrustedLabel("untrusted");
+ std::string corruptedLabel("corrupted");
+ std::string failedLabel("failed");
+ logger->setExpected(doneLabel, storePaths.size());
+
+ ThreadPool pool;
+
+ auto doPath = [&](const Path & storePath) {
+ try {
+ checkInterrupt();
+
+ Activity act(*logger, lvlInfo, format("checking ‘%s’") % storePath);
+
+ auto info = store->queryPathInfo(storePath);
+
+ if (!noContents) {
+
+ HashSink sink(info->narHash.type);
+ store->narFromPath(info->path, sink);
+
+ auto hash = sink.finish();
+
+ if (hash.first != info->narHash) {
+ logger->incProgress(corruptedLabel);
+ corrupted = 1;
+ printError(
+ format("path ‘%s’ was modified! expected hash ‘%s’, got ‘%s’")
+ % info->path % printHash(info->narHash) % printHash(hash.first));
+ }
+
+ }
+
+ if (!noTrust) {
+
+ bool good = false;
+
+ if (info->ultimate && !sigsNeeded)
+ good = true;
+
+ else {
+
+ StringSet sigsSeen;
+ size_t actualSigsNeeded = sigsNeeded ? sigsNeeded : 1;
+ size_t validSigs = 0;
+
+ auto doSigs = [&](StringSet sigs) {
+ for (auto sig : sigs) {
+ if (sigsSeen.count(sig)) continue;
+ sigsSeen.insert(sig);
+ if (info->checkSignature(publicKeys, sig))
+ validSigs++;
+ }
+ };
+
+ if (info->isContentAddressed(*store)) validSigs = ValidPathInfo::maxSigs;
+
+ doSigs(info->sigs);
+
+ for (auto & store2 : substituters) {
+ if (validSigs >= actualSigsNeeded) break;
+ try {
+ auto info2 = store2->queryPathInfo(info->path);
+ if (info2->isContentAddressed(*store)) validSigs = ValidPathInfo::maxSigs;
+ doSigs(info2->sigs);
+ } catch (InvalidPath &) {
+ } catch (Error & e) {
+ printError(format(ANSI_RED "error:" ANSI_NORMAL " %s") % e.what());
+ }
+ }
+
+ if (validSigs >= actualSigsNeeded)
+ good = true;
+ }
+
+ if (!good) {
+ logger->incProgress(untrustedLabel);
+ untrusted++;
+ printError(format("path ‘%s’ is untrusted") % info->path);
+ }
+
+ }
+
+ logger->incProgress(doneLabel);
+ done++;
+
+ } catch (Error & e) {
+ printError(format(ANSI_RED "error:" ANSI_NORMAL " %s") % e.what());
+ logger->incProgress(failedLabel);
+ failed++;
+ }
+ };
+
+ for (auto & storePath : storePaths)
+ pool.enqueue(std::bind(doPath, storePath));
+
+ pool.process();
+
+ printInfo(format("%d paths checked, %d untrusted, %d corrupted, %d failed")
+ % done % untrusted % corrupted % failed);
+
+ throw Exit(
+ (corrupted ? 1 : 0) |
+ (untrusted ? 2 : 0) |
+ (failed ? 4 : 0));
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdVerify>());
diff --git a/src/resolve-system-dependencies/local.mk b/src/resolve-system-dependencies/local.mk
new file mode 100644
index 000000000..8792a4a25
--- /dev/null
+++ b/src/resolve-system-dependencies/local.mk
@@ -0,0 +1,11 @@
+ifeq ($(OS), Darwin)
+ programs += resolve-system-dependencies
+endif
+
+resolve-system-dependencies_DIR := $(d)
+
+resolve-system-dependencies_INSTALL_DIR := $(libexecdir)/nix
+
+resolve-system-dependencies_LIBS := libstore libmain libutil libformat
+
+resolve-system-dependencies_SOURCES := $(d)/resolve-system-dependencies.cc
diff --git a/src/resolve-system-dependencies/resolve-system-dependencies.cc b/src/resolve-system-dependencies/resolve-system-dependencies.cc
new file mode 100644
index 000000000..ae8ca36ba
--- /dev/null
+++ b/src/resolve-system-dependencies/resolve-system-dependencies.cc
@@ -0,0 +1,194 @@
+#include "derivations.hh"
+#include "globals.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include <sys/utsname.h>
+#include <algorithm>
+#include <iostream>
+#include <fstream>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <mach-o/loader.h>
+#include <mach-o/swap.h>
+
+#define DO_SWAP(x, y) ((x) ? OSSwapInt32(y) : (y))
+
+using namespace nix;
+
+static auto cacheDir = Path{};
+
+Path resolveCacheFile(Path lib) {
+ std::replace(lib.begin(), lib.end(), '/', '%');
+ return cacheDir + "/" + lib;
+}
+
+std::set<string> readCacheFile(const Path & file) {
+ return tokenizeString<set<string>>(readFile(file), "\n");
+}
+
+void writeCacheFile(const Path & file, std::set<string> & deps) {
+ std::ofstream fp;
+ fp.open(file);
+ for (auto & d : deps) {
+ fp << d << "\n";
+ }
+ fp.close();
+}
+
+std::string findDylibName(bool should_swap, ptrdiff_t dylib_command_start) {
+ struct dylib_command *dylc = (struct dylib_command*)dylib_command_start;
+ return std::string((char*)(dylib_command_start + DO_SWAP(should_swap, dylc->dylib.name.offset)));
+}
+
+std::set<std::string> runResolver(const Path & filename) {
+ int fd = open(filename.c_str(), O_RDONLY);
+ struct stat s;
+ fstat(fd, &s);
+ void *obj = mmap(NULL, s.st_size, PROT_READ, MAP_SHARED, fd, 0);
+
+ ptrdiff_t mach64_offset = 0;
+
+ uint32_t magic = ((struct mach_header_64*)obj)->magic;
+ if(magic == FAT_CIGAM || magic == FAT_MAGIC) {
+ bool should_swap = magic == FAT_CIGAM;
+ uint32_t narches = DO_SWAP(should_swap, ((struct fat_header*)obj)->nfat_arch);
+
+ for(uint32_t iter = 0; iter < narches; iter++) {
+ ptrdiff_t header_offset = (ptrdiff_t)obj + sizeof(struct fat_header) * (iter + 1);
+ struct fat_arch* arch = (struct fat_arch*)header_offset;
+ if(DO_SWAP(should_swap, arch->cputype) == CPU_TYPE_X86_64) {
+ mach64_offset = (ptrdiff_t)DO_SWAP(should_swap, arch->offset);
+ break;
+ }
+ }
+ if (mach64_offset == 0) {
+ printError(format("Could not find any mach64 blobs in file ‘%1%’, continuing...") % filename);
+ return std::set<string>();
+ }
+ } else if (magic == MH_MAGIC_64 || magic == MH_CIGAM_64) {
+ mach64_offset = 0;
+ } else {
+ printError(format("Object file has unknown magic number ‘%1%’, skipping it...") % magic);
+ return std::set<string>();
+ }
+
+ ptrdiff_t mach_header_offset = (ptrdiff_t)obj + mach64_offset;
+ struct mach_header_64 *m_header = (struct mach_header_64 *)mach_header_offset;
+
+ bool should_swap = magic == MH_CIGAM_64;
+ ptrdiff_t cmd_offset = mach_header_offset + sizeof(struct mach_header_64);
+
+ std::set<string> libs;
+ for(uint32_t i = 0; i < DO_SWAP(should_swap, m_header->ncmds); i++) {
+ struct load_command *cmd = (struct load_command*)cmd_offset;
+ switch(DO_SWAP(should_swap, cmd->cmd)) {
+ case LC_LOAD_UPWARD_DYLIB:
+ case LC_LOAD_DYLIB:
+ case LC_REEXPORT_DYLIB:
+ libs.insert(findDylibName(should_swap, cmd_offset));
+ break;
+ }
+ cmd_offset += DO_SWAP(should_swap, cmd->cmdsize);
+ }
+
+ return libs;
+}
+
+bool isSymlink(const Path & path) {
+ struct stat st;
+ if(lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ return S_ISLNK(st.st_mode);
+}
+
+Path resolveSymlink(const Path & path) {
+ char buf[PATH_MAX];
+ ssize_t len = readlink(path.c_str(), buf, sizeof(buf) - 1);
+ if(len != -1) {
+ buf[len] = 0;
+ return Path(buf);
+ } else {
+ throw SysError(format("readlink('%1%')") % path);
+ }
+}
+
+std::set<string> resolveTree(const Path & path, PathSet & deps) {
+ std::set<string> results;
+ if(deps.find(path) != deps.end()) {
+ return std::set<string>();
+ }
+ deps.insert(path);
+ for (auto & lib : runResolver(path)) {
+ results.insert(lib);
+ for (auto & p : resolveTree(lib, deps)) {
+ results.insert(p);
+ }
+ }
+ return results;
+}
+
+std::set<string> getPath(const Path & path) {
+ Path cacheFile = resolveCacheFile(path);
+ if(pathExists(cacheFile)) {
+ return readCacheFile(cacheFile);
+ }
+
+ std::set<string> deps;
+ std::set<string> paths;
+ paths.insert(path);
+
+ Path next_path = Path(path);
+ while(isSymlink(next_path)) {
+ next_path = resolveSymlink(next_path);
+ paths.insert(next_path);
+ }
+
+ for(auto & t : resolveTree(next_path, deps)) {
+ paths.insert(t);
+ }
+
+ writeCacheFile(cacheFile, paths);
+
+ return paths;
+}
+
+int main(int argc, char ** argv) {
+ return handleExceptions(argv[0], [&]() {
+ initNix();
+
+ struct utsname _uname;
+
+ uname(&_uname);
+
+ auto cacheParentDir = (format("%1%/dependency-maps") % settings.nixStateDir).str();
+
+ cacheDir = (format("%1%/%2%-%3%-%4%")
+ % cacheParentDir
+ % _uname.machine
+ % _uname.sysname
+ % _uname.release).str();
+
+ mkdir(cacheParentDir.c_str(), 0755);
+ mkdir(cacheDir.c_str(), 0755);
+
+ auto store = openStore();
+
+ auto drv = store->derivationFromPath(Path(argv[1]));
+ Strings impurePaths = tokenizeString<Strings>(get(drv.env, "__impureHostDeps"));
+
+ std::set<string> all_paths;
+
+ for (auto & path : impurePaths) {
+ for(auto & p : getPath(path)) {
+ all_paths.insert(p);
+ }
+ }
+
+ std::cout << "extra-chroot-dirs" << std::endl;
+ for(auto & path : all_paths) {
+ std::cout << path << std::endl;
+ }
+ std::cout << std::endl;
+ });
+}