diff options
author | Eelco Dolstra <eelco.dolstra@logicblox.com> | 2016-02-25 11:25:11 +0100 |
---|---|---|
committer | Eelco Dolstra <eelco.dolstra@logicblox.com> | 2016-02-25 11:25:11 +0100 |
commit | f1bdeac9864de8cd9994bb41da79f3a4d812dadc (patch) | |
tree | 9ea00480f826dc8d8d8248e11323266a42f9d4ae /src | |
parent | 9b05d5848c2fce73b75b3411e362c2bd48d53dcb (diff) | |
parent | 152b1d6bf9c89b4db9848475e3000821e159d479 (diff) |
Merge branch 'master' into new-cli
Diffstat (limited to 'src')
54 files changed, 2044 insertions, 490 deletions
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index de9454422..8ce2f3dfa 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -128,6 +128,9 @@ static void printValue(std::ostream & str, std::set<const Value *> & active, con case tExternal: str << *v.external; break; + case tFloat: + str << v.fpoint; + break; default: throw Error("invalid value"); } @@ -161,6 +164,7 @@ string showType(const Value & v) case tPrimOp: return "a built-in function"; case tPrimOpApp: return "a partially applied built-in function"; case tExternal: return v.external->showType(); + case tFloat: return "a float"; } abort(); } @@ -579,6 +583,12 @@ Value * ExprInt::maybeThunk(EvalState & state, Env & env) return &v; } +Value * ExprFloat::maybeThunk(EvalState & state, Env & env) +{ + nrAvoided++; + return &v; +} + Value * ExprPath::maybeThunk(EvalState & state, Env & env) { nrAvoided++; @@ -666,6 +676,11 @@ void ExprInt::eval(EvalState & state, Env & env, Value & v) } +void ExprFloat::eval(EvalState & state, Env & env, Value & v) +{ + v = this->v; +} + void ExprString::eval(EvalState & state, Env & env, Value & v) { v = this->v; @@ -1211,6 +1226,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) PathSet context; std::ostringstream s; NixInt n = 0; + NixFloat nf = 0; bool first = !forceString; ValueType firstType = tString; @@ -1229,15 +1245,30 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) } if (firstType == tInt) { - if (vTmp.type != tInt) + if (vTmp.type == tInt) { + n += vTmp.integer; + } else if (vTmp.type == tFloat) { + // Upgrade the type from int to float; + firstType = tFloat; + nf = n; + nf += vTmp.fpoint; + } else throwEvalError("cannot add %1% to an integer, at %2%", showType(vTmp), pos); - n += vTmp.integer; + } else if (firstType == tFloat) { + if (vTmp.type == tInt) { + nf += vTmp.integer; + } else if (vTmp.type == tFloat) { + nf += vTmp.fpoint; + } else + throwEvalError("cannot add %1% to a float, at %2%", showType(vTmp), pos); } else s << state.coerceToString(pos, vTmp, context, false, firstType == tString); } if (firstType == tInt) mkInt(v, n); + else if (firstType == tFloat) + mkFloat(v, nf); else if (firstType == tPath) { if (!context.empty()) throwEvalError("a string that refers to a store path cannot be appended to a path, at %1%", pos); @@ -1295,6 +1326,17 @@ NixInt EvalState::forceInt(Value & v, const Pos & pos) } +NixFloat EvalState::forceFloat(Value & v, const Pos & pos) +{ + forceValue(v, pos); + if (v.type == tInt) + return v.integer; + else if (v.type != tFloat) + throwTypeError("value is %1% while a float was expected, at %2%", v, pos); + return v.fpoint; +} + + bool EvalState::forceBool(Value & v) { forceValue(v); @@ -1413,6 +1455,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context, if (v.type == tBool && v.boolean) return "1"; if (v.type == tBool && !v.boolean) return ""; if (v.type == tInt) return std::to_string(v.integer); + if (v.type == tFloat) return std::to_string(v.fpoint); if (v.type == tNull) return ""; if (v.isList()) { @@ -1474,6 +1517,13 @@ bool EvalState::eqValues(Value & v1, Value & v2) uniqList on a list of sets.) Will remove this eventually. */ if (&v1 == &v2) return true; + // Special case type-compatibility between float and int + if (v1.type == tInt && v2.type == tFloat) + return v1.integer == v2.fpoint; + if (v1.type == tFloat && v2.type == tInt) + return v1.fpoint == v2.integer; + + // All other types are not compatible with each other. if (v1.type != v2.type) return false; switch (v1.type) { @@ -1531,6 +1581,9 @@ bool EvalState::eqValues(Value & v1, Value & v2) case tExternal: return *v1.external == *v2.external; + case tFloat: + return v1.fpoint == v2.fpoint; + default: throwEvalError("cannot compare %1% with %2%", showType(v1), showType(v2)); } diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index cf2d57864..40e05712b 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -147,6 +147,7 @@ public: /* Force `v', and then verify that it has the expected type. */ NixInt forceInt(Value & v, const Pos & pos); + NixFloat forceFloat(Value & v, const Pos & pos); bool forceBool(Value & v); inline void forceAttrs(Value & v); inline void forceAttrs(Value & v, const Pos & pos); diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc index 1002ee628..996c2c5f4 100644 --- a/src/libexpr/get-drvs.cc +++ b/src/libexpr/get-drvs.cc @@ -106,7 +106,8 @@ bool DrvInfo::checkMeta(Value & v) if (!checkMeta(*i.value)) return false; return true; } - else return v.type == tInt || v.type == tBool || v.type == tString; + else return v.type == tInt || v.type == tBool || v.type == tString || + v.type == tFloat; } @@ -127,7 +128,7 @@ string DrvInfo::queryMetaString(const string & name) } -int DrvInfo::queryMetaInt(const string & name, int def) +NixInt DrvInfo::queryMetaInt(const string & name, NixInt def) { Value * v = queryMeta(name); if (!v) return def; @@ -135,12 +136,26 @@ int DrvInfo::queryMetaInt(const string & name, int def) if (v->type == tString) { /* Backwards compatibility with before we had support for integer meta fields. */ - int n; + NixInt n; if (string2Int(v->string.s, n)) return n; } return def; } +NixFloat DrvInfo::queryMetaFloat(const string & name, NixFloat def) +{ + Value * v = queryMeta(name); + if (!v) return def; + if (v->type == tFloat) return v->fpoint; + if (v->type == tString) { + /* Backwards compatibility with before we had support for + float meta fields. */ + NixFloat n; + if (string2Float(v->string.s, n)) return n; + } + return def; +} + bool DrvInfo::queryMetaBool(const string & name, bool def) { diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh index 98f762494..365c66c8d 100644 --- a/src/libexpr/get-drvs.hh +++ b/src/libexpr/get-drvs.hh @@ -47,7 +47,8 @@ public: StringSet queryMetaNames(); Value * queryMeta(const string & name); string queryMetaString(const string & name); - int queryMetaInt(const string & name, int def); + NixInt queryMetaInt(const string & name, NixInt def); + NixFloat queryMetaFloat(const string & name, NixFloat def); bool queryMetaBool(const string & name, bool def); void setMeta(const string & name, Value * v); diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc index 0898b5609..1daf84600 100644 --- a/src/libexpr/json-to-value.cc +++ b/src/libexpr/json-to-value.cc @@ -105,17 +105,21 @@ static void parseJSON(EvalState & state, const char * & s, Value & v) mkString(v, parseJSONString(s)); } - else if (isdigit(*s) || *s == '-') { - bool neg = false; - if (*s == '-') { - neg = true; - if (!*++s) throw JSONParseError("unexpected end of JSON number"); + else if (isdigit(*s) || *s == '-' || *s == '.' ) { + // Buffer into a string first, then use built-in C++ conversions + std::string tmp_number; + ValueType number_type = tInt; + + while (isdigit(*s) || *s == '-' || *s == '.' || *s == 'e' || *s == 'E') { + if (*s == '.' || *s == 'e' || *s == 'E') + number_type = tFloat; + tmp_number += *s++; } - NixInt n = 0; - // FIXME: detect overflow - while (isdigit(*s)) n = n * 10 + (*s++ - '0'); - if (*s == '.' || *s == 'e') throw JSONParseError("floating point JSON numbers are not supported"); - mkInt(v, neg ? -n : n); + + if (number_type == tFloat) + mkFloat(v, stod(tmp_number)); + else + mkInt(v, stoi(tmp_number)); } else if (strncmp(s, "true", 4) == 0) { diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l index 478f41641..f3660ab43 100644 --- a/src/libexpr/lexer.l +++ b/src/libexpr/lexer.l @@ -86,6 +86,7 @@ static Expr * unescapeStr(SymbolTable & symbols, const char * s) ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]* INT [0-9]+ +FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)? PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+ HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+ SPATH \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\> @@ -126,6 +127,12 @@ or { return OR_KW; } throw ParseError(format("invalid integer ‘%1%’") % yytext); return INT; } +{FLOAT} { errno = 0; + yylval->nf = strtod(yytext, 0); + if (errno != 0) + throw ParseError(format("invalid float ‘%1%’") % yytext); + return FLOAT; + } \$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; } } @@ -188,5 +195,7 @@ or { return OR_KW; } } +<<EOF>> { data->atEnd = true; return 0; } + %% diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 5bc8e4202..b2c9f0528 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -68,6 +68,11 @@ void ExprInt::show(std::ostream & str) str << n; } +void ExprFloat::show(std::ostream & str) +{ + str << nf; +} + void ExprString::show(std::ostream & str) { showString(str, s); @@ -226,6 +231,10 @@ void ExprInt::bindVars(const StaticEnv & env) { } +void ExprFloat::bindVars(const StaticEnv & env) +{ +} + void ExprString::bindVars(const StaticEnv & env) { } diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh index ef07d4557..d2ca09b3a 100644 --- a/src/libexpr/nixexpr.hh +++ b/src/libexpr/nixexpr.hh @@ -11,6 +11,7 @@ namespace nix { MakeError(EvalError, Error) MakeError(ParseError, Error) +MakeError(IncompleteParseError, ParseError) MakeError(AssertionError, EvalError) MakeError(ThrownError, AssertionError) MakeError(Abort, EvalError) @@ -98,6 +99,15 @@ struct ExprInt : Expr Value * maybeThunk(EvalState & state, Env & env); }; +struct ExprFloat : Expr +{ + NixFloat nf; + Value v; + ExprFloat(NixFloat nf) : nf(nf) { mkFloat(v, nf); }; + COMMON_METHODS + Value * maybeThunk(EvalState & state, Env & env); +}; + struct ExprString : Expr { Symbol s; diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 3e6c6d917..80ecd44c5 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -31,10 +31,12 @@ namespace nix { Path basePath; Symbol path; string error; + bool atEnd; Symbol sLetBody; ParseData(EvalState & state) : state(state) , symbols(state.symbols) + , atEnd(false) , sLetBody(symbols.create("<let-body>")) { }; }; @@ -244,6 +246,7 @@ void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * err nix::Formals * formals; nix::Formal * formal; nix::NixInt n; + nix::NixFloat nf; const char * id; // !!! -> Symbol char * path; char * uri; @@ -264,6 +267,7 @@ void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * err %token <id> ID ATTRPATH %token <e> STR IND_STR %token <n> INT +%token <nf> FLOAT %token <path> PATH HPATH SPATH %token <uri> URI %token IF THEN ELSE ASSERT WITH LET IN REC INHERIT EQ NEQ AND OR IMPL OR_KW @@ -366,6 +370,7 @@ expr_simple $$ = new ExprVar(CUR_POS, data->symbols.create($1)); } | INT { $$ = new ExprInt($1); } + | FLOAT { $$ = new ExprFloat($1); } | '"' string_parts '"' { $$ = $2; } | IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE { $$ = stripIndentation(CUR_POS, data->symbols, *$2); @@ -536,7 +541,12 @@ Expr * EvalState::parse(const char * text, int res = yyparse(scanner, &data); yylex_destroy(scanner); - if (res) throw ParseError(data.error); + if (res) { + if (data.atEnd) + throw IncompleteParseError(data.error); + else + throw ParseError(data.error); + } data.result->bindVars(staticEnv); diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 2371ae174..3c899d769 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -195,6 +195,7 @@ static void prim_typeOf(EvalState & state, const Pos & pos, Value * * args, Valu case tExternal: t = args[0]->external->typeOf(); break; + case tFloat: t = "float"; break; default: abort(); } mkString(v, state.symbols.create(t)); @@ -224,6 +225,12 @@ static void prim_isInt(EvalState & state, const Pos & pos, Value * * args, Value mkBool(v, args[0]->type == tInt); } +/* Determine whether the argument is a float. */ +static void prim_isFloat(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + state.forceValue(*args[0]); + mkBool(v, args[0]->type == tFloat); +} /* Determine whether the argument is a string. */ static void prim_isString(EvalState & state, const Pos & pos, Value * * args, Value & v) @@ -245,11 +252,17 @@ struct CompareValues { bool operator () (const Value * v1, const Value * v2) const { + if (v1->type == tFloat && v2->type == tInt) + return v1->fpoint < v2->integer; + if (v1->type == tInt && v2->type == tFloat) + return v1->integer < v2->fpoint; if (v1->type != v2->type) throw EvalError(format("cannot compare %1% with %2%") % showType(*v1) % showType(*v2)); switch (v1->type) { case tInt: return v1->integer < v2->integer; + case tFloat: + return v1->fpoint < v2->fpoint; case tString: return strcmp(v1->string.s, v2->string.s) < 0; case tPath: @@ -1423,27 +1436,40 @@ static void prim_sort(EvalState & state, const Pos & pos, Value * * args, Value static void prim_add(EvalState & state, const Pos & pos, Value * * args, Value & v) { - mkInt(v, state.forceInt(*args[0], pos) + state.forceInt(*args[1], pos)); + if (args[0]->type == tFloat || args[1]->type == tFloat) + mkFloat(v, state.forceFloat(*args[0], pos) + state.forceFloat(*args[1], pos)); + else + mkInt(v, state.forceInt(*args[0], pos) + state.forceInt(*args[1], pos)); } static void prim_sub(EvalState & state, const Pos & pos, Value * * args, Value & v) { - mkInt(v, state.forceInt(*args[0], pos) - state.forceInt(*args[1], pos)); + if (args[0]->type == tFloat || args[1]->type == tFloat) + mkFloat(v, state.forceFloat(*args[0], pos) - state.forceFloat(*args[1], pos)); + else + mkInt(v, state.forceInt(*args[0], pos) - state.forceInt(*args[1], pos)); } static void prim_mul(EvalState & state, const Pos & pos, Value * * args, Value & v) { - mkInt(v, state.forceInt(*args[0], pos) * state.forceInt(*args[1], pos)); + if (args[0]->type == tFloat || args[1]->type == tFloat) + mkFloat(v, state.forceFloat(*args[0], pos) * state.forceFloat(*args[1], pos)); + else + mkInt(v, state.forceInt(*args[0], pos) * state.forceInt(*args[1], pos)); } static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value & v) { - NixInt i2 = state.forceInt(*args[1], pos); - if (i2 == 0) throw EvalError(format("division by zero, at %1%") % pos); - mkInt(v, state.forceInt(*args[0], pos) / i2); + NixFloat f2 = state.forceFloat(*args[1], pos); + if (f2 == 0) throw EvalError(format("division by zero, at %1%") % pos); + + if (args[0]->type == tFloat || args[1]->type == tFloat) + mkFloat(v, state.forceFloat(*args[0], pos) / state.forceFloat(*args[1], pos)); + else + mkInt(v, state.forceInt(*args[0], pos) / state.forceInt(*args[1], pos)); } @@ -1735,7 +1761,7 @@ void EvalState::createBaseEnv() language feature gets added. It's not necessary to increase it when primops get added, because you can just use `builtins ? primOp' to check. */ - mkInt(v, 3); + mkInt(v, 4); addConstant("__langVersion", v); // Miscellaneous @@ -1752,6 +1778,7 @@ void EvalState::createBaseEnv() addPrimOp("__isFunction", 1, prim_isFunction); addPrimOp("__isString", 1, prim_isString); addPrimOp("__isInt", 1, prim_isInt); + addPrimOp("__isFloat", 1, prim_isFloat); addPrimOp("__isBool", 1, prim_isBool); addPrimOp("__genericClosure", 1, prim_genericClosure); addPrimOp("abort", 1, prim_abort); diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc index b0cf85e21..47ee324a6 100644 --- a/src/libexpr/value-to-json.cc +++ b/src/libexpr/value-to-json.cc @@ -84,6 +84,10 @@ void printValueAsJSON(EvalState & state, bool strict, v.external->printValueAsJSON(state, strict, str, context); break; + case tFloat: + str << v.fpoint; + break; + default: throw TypeError(format("cannot convert %1% to JSON") % showType(v)); } diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh index f6796f205..c59caf564 100644 --- a/src/libexpr/value-to-json.hh +++ b/src/libexpr/value-to-json.hh @@ -36,7 +36,18 @@ struct JSONObject attr(s); escapeJSON(str, t); } - void attr(const string & s, int n) + void attr(const string & s, const char * t) + { + attr(s); + escapeJSON(str, t); + } + void attr(const string & s, bool b) + { + attr(s); + str << (b ? "true" : "false"); + } + template<typename T> + void attr(const string & s, const T & n) { attr(s); str << n; diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc index cb52ce1e6..00b1918a8 100644 --- a/src/libexpr/value-to-xml.cc +++ b/src/libexpr/value-to-xml.cc @@ -148,6 +148,10 @@ static void printValueAsXML(EvalState & state, bool strict, bool location, v.external->printValueAsXML(state, strict, location, doc, context, drvsSeen); break; + case tFloat: + doc.writeEmptyElement("float", singletonAttrs("value", (format("%1%") % v.fpoint).str())); + break; + default: doc.writeEmptyElement("unevaluated"); } diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index e6d1502cb..62bdd9281 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -22,6 +22,7 @@ typedef enum { tPrimOp, tPrimOpApp, tExternal, + tFloat } ValueType; @@ -38,6 +39,7 @@ class XMLWriter; typedef long NixInt; +typedef float NixFloat; /* External values must descend from ExternalValueBase, so that * type-agnostic nix functions (e.g. showType) can be implemented @@ -141,6 +143,7 @@ struct Value Value * left, * right; } primOpApp; ExternalValueBase * external; + NixFloat fpoint; }; bool isList() const @@ -181,6 +184,14 @@ static inline void mkInt(Value & v, NixInt n) } +static inline void mkFloat(Value & v, NixFloat n) +{ + clearValue(v); + v.type = tFloat; + v.fpoint = n; +} + + static inline void mkBool(Value & v, bool b) { clearValue(v); diff --git a/src/libmain/local.mk b/src/libmain/local.mk index 16dbf7528..f1fd3eb72 100644 --- a/src/libmain/local.mk +++ b/src/libmain/local.mk @@ -6,6 +6,8 @@ libmain_DIR := $(d) libmain_SOURCES := $(wildcard $(d)/*.cc) +libmain_LDFLAGS = $(OPENSSL_LIBS) + libmain_LIBS = libstore libutil libformat libmain_ALLOW_UNDEFINED = 1 diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index bf6bf5b71..e883967b7 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -6,10 +6,11 @@ #include "store-api.hh" #include "util.hh" -#include <iostream> +#include <algorithm> #include <cctype> #include <exception> -#include <algorithm> +#include <iostream> +#include <mutex> #include <cstdlib> #include <sys/time.h> @@ -17,7 +18,7 @@ #include <unistd.h> #include <signal.h> -extern char * * environ; +#include <openssl/crypto.h> namespace nix { @@ -94,7 +95,18 @@ string getArg(const string & opt, } -void detectStackOverflow(); +/* OpenSSL is not thread-safe by default - it will randomly crash + unless the user supplies a mutex locking function. So let's do + that. */ +static std::vector<std::mutex> opensslLocks; + +static void opensslLockCallback(int mode, int type, const char * file, int line) +{ + if (mode & CRYPTO_LOCK) + opensslLocks[type].lock(); + else + opensslLocks[type].unlock(); +} void initNix() @@ -105,11 +117,16 @@ void initNix() std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); #endif + // FIXME: do we need this? It's not thread-safe. std::ios::sync_with_stdio(false); if (getEnv("IN_SYSTEMD") == "1") logType = ltSystemd; + /* Initialise OpenSSL locking. */ + opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks()); + CRYPTO_set_locking_callback(opensslLockCallback); + settings.processEnvironment(); settings.loadConfFile(); diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh index 20744be0a..6d94a22f7 100644 --- a/src/libmain/shared.hh +++ b/src/libmain/shared.hh @@ -65,6 +65,7 @@ template<class N> N getIntArg(const string & opt, return n * multiplier; } + /* Show the manual page for the specified program. */ void showManPage(const string & name); @@ -99,4 +100,8 @@ struct PrintFreed }; +/* Install a SIGSEGV handler to detect stack overflows. */ +void detectStackOverflow(); + + } diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc new file mode 100644 index 000000000..dc086fe9c --- /dev/null +++ b/src/libstore/binary-cache-store.cc @@ -0,0 +1,347 @@ +#include "binary-cache-store.hh" +#include "sync.hh" + +#include "archive.hh" +#include "compression.hh" +#include "derivations.hh" +#include "globals.hh" +#include "nar-info.hh" +#include "worker-protocol.hh" + +#include <chrono> + +namespace nix { + +BinaryCacheStore::BinaryCacheStore(std::shared_ptr<Store> localStore, + const Path & secretKeyFile, const Path & publicKeyFile) + : localStore(localStore) +{ + if (secretKeyFile != "") + secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile))); + + if (publicKeyFile != "") { + publicKeys = std::unique_ptr<PublicKeys>(new PublicKeys); + auto key = PublicKey(readFile(publicKeyFile)); + publicKeys->emplace(key.name, key); + } + + StringSink sink; + sink << narVersionMagic1; + narMagic = sink.s; +} + +void BinaryCacheStore::init() +{ + std::string cacheInfoFile = "nix-cache-info"; + if (!fileExists(cacheInfoFile)) + upsertFile(cacheInfoFile, "StoreDir: " + settings.nixStore + "\n"); +} + +void BinaryCacheStore::notImpl() +{ + throw Error("operation not implemented for binary cache stores"); +} + +const BinaryCacheStore::Stats & BinaryCacheStore::getStats() +{ + return stats; +} + +Path BinaryCacheStore::narInfoFileFor(const Path & storePath) +{ + assertStorePath(storePath); + return storePathToHash(storePath) + ".narinfo"; +} + +void BinaryCacheStore::addToCache(const ValidPathInfo & info, + const string & nar) +{ + auto narInfoFile = narInfoFileFor(info.path); + if (fileExists(narInfoFile)) return; + + assert(nar.compare(0, narMagic.size(), narMagic) == 0); + + auto narInfo = make_ref<NarInfo>(info); + + narInfo->narSize = nar.size(); + narInfo->narHash = hashString(htSHA256, nar); + + if (info.narHash.type != htUnknown && info.narHash != narInfo->narHash) + throw Error(format("refusing to copy corrupted path ‘%1%’ to binary cache") % info.path); + + /* Compress the NAR. */ + narInfo->compression = "xz"; + auto now1 = std::chrono::steady_clock::now(); + string narXz = compressXZ(nar); + auto now2 = std::chrono::steady_clock::now(); + narInfo->fileHash = hashString(htSHA256, narXz); + narInfo->fileSize = narXz.size(); + + auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count(); + printMsg(lvlTalkative, format("copying path ‘%1%’ (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache") + % narInfo->path % narInfo->narSize + % ((1.0 - (double) narXz.size() / nar.size()) * 100.0) + % duration); + + /* Atomically write the NAR file. */ + narInfo->url = "nar/" + printHash32(narInfo->fileHash) + ".nar.xz"; + if (!fileExists(narInfo->url)) { + stats.narWrite++; + upsertFile(narInfo->url, narXz); + } else + stats.narWriteAverted++; + + stats.narWriteBytes += nar.size(); + stats.narWriteCompressedBytes += narXz.size(); + stats.narWriteCompressionTimeMs += duration; + + /* Atomically write the NAR info file.*/ + if (secretKey) narInfo->sign(*secretKey); + + upsertFile(narInfoFile, narInfo->to_string()); + + { + auto state_(state.lock()); + state_->narInfoCache.upsert(narInfo->path, narInfo); + stats.narInfoCacheSize = state_->narInfoCache.size(); + } + + stats.narInfoWrite++; +} + +NarInfo BinaryCacheStore::readNarInfo(const Path & storePath) +{ + { + auto state_(state.lock()); + auto res = state_->narInfoCache.get(storePath); + if (res) { + stats.narInfoReadAverted++; + return **res; + } + } + + auto narInfoFile = narInfoFileFor(storePath); + auto narInfo = make_ref<NarInfo>(getFile(narInfoFile), narInfoFile); + assert(narInfo->path == storePath); + + stats.narInfoRead++; + + if (publicKeys) { + if (!narInfo->checkSignature(*publicKeys)) + throw Error(format("invalid signature on NAR info file ‘%1%’") % narInfoFile); + } + + { + auto state_(state.lock()); + state_->narInfoCache.upsert(storePath, narInfo); + stats.narInfoCacheSize = state_->narInfoCache.size(); + } + + return *narInfo; +} + +bool BinaryCacheStore::isValidPath(const Path & storePath) +{ + return fileExists(narInfoFileFor(storePath)); +} + +void BinaryCacheStore::exportPath(const Path & storePath, bool sign, Sink & sink) +{ + assert(!sign); + + auto res = readNarInfo(storePath); + + auto nar = getFile(res.url); + + stats.narRead++; + stats.narReadCompressedBytes += nar.size(); + + /* Decompress the NAR. FIXME: would be nice to have the remote + side do this. */ + if (res.compression == "none") + ; + else if (res.compression == "xz") + nar = decompressXZ(nar); + else + throw Error(format("unknown NAR compression type ‘%1%’") % nar); + + stats.narReadBytes += nar.size(); + + printMsg(lvlTalkative, format("exporting path ‘%1%’ (%2% bytes)") % storePath % nar.size()); + + assert(nar.size() % 8 == 0); + + sink((unsigned char *) nar.c_str(), nar.size()); + + // FIXME: check integrity of NAR. + + sink << exportMagic << storePath << res.references << res.deriver << 0; +} + +Paths BinaryCacheStore::importPaths(bool requireSignature, Source & source) +{ + assert(!requireSignature); + Paths res; + while (true) { + unsigned long long n = readLongLong(source); + if (n == 0) break; + if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’"); + res.push_back(importPath(source)); + } + return res; +} + +struct TeeSource : Source +{ + Source & readSource; + std::string data; + TeeSource(Source & readSource) : readSource(readSource) + { + } + size_t read(unsigned char * data, size_t len) + { + size_t n = readSource.read(data, len); + this->data.append((char *) data, n); + return n; + } +}; + +struct NopSink : ParseSink +{ +}; + +Path BinaryCacheStore::importPath(Source & source) +{ + /* FIXME: some cut&paste of LocalStore::importPath(). */ + + /* Extract the NAR from the source. */ + TeeSource tee(source); + NopSink sink; + parseDump(sink, tee); + + uint32_t magic = readInt(source); + if (magic != exportMagic) + throw Error("Nix archive cannot be imported; wrong format"); + + ValidPathInfo info; + info.path = readStorePath(source); + + info.references = readStorePaths<PathSet>(source); + + readString(source); // deriver, don't care + + bool haveSignature = readInt(source) == 1; + assert(!haveSignature); + + addToCache(info, tee.data); + + return info.path; +} + +ValidPathInfo BinaryCacheStore::queryPathInfo(const Path & storePath) +{ + return ValidPathInfo(readNarInfo(storePath)); +} + +void BinaryCacheStore::querySubstitutablePathInfos(const PathSet & paths, + SubstitutablePathInfos & infos) +{ + PathSet left; + + if (!localStore) return; + + for (auto & storePath : paths) { + if (!localStore->isValidPath(storePath)) { + left.insert(storePath); + continue; + } + ValidPathInfo info = localStore->queryPathInfo(storePath); + SubstitutablePathInfo sub; + sub.references = info.references; + sub.downloadSize = 0; + sub.narSize = info.narSize; + infos.emplace(storePath, sub); + } + + if (settings.useSubstitutes) + localStore->querySubstitutablePathInfos(left, infos); +} + +Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath, + bool recursive, HashType hashAlgo, PathFilter & filter, bool repair) +{ + // FIXME: some cut&paste from LocalStore::addToStore(). + + /* Read the whole path into memory. This is not a very scalable + method for very large paths, but `copyPath' is mainly used for + small files. */ + StringSink sink; + Hash h; + if (recursive) { + dumpPath(srcPath, sink, filter); + h = hashString(hashAlgo, sink.s); + } else { + auto s = readFile(srcPath); + dumpString(s, sink); + h = hashString(hashAlgo, s); + } + + ValidPathInfo info; + info.path = makeFixedOutputPath(recursive, hashAlgo, h, name); + + if (repair || !isValidPath(info.path)) + addToCache(info, sink.s); + + return info.path; +} + +Path BinaryCacheStore::addTextToStore(const string & name, const string & s, + const PathSet & references, bool repair) +{ + ValidPathInfo info; + info.path = computeStorePathForText(name, s, references); + info.references = references; + + if (repair || !isValidPath(info.path)) { + StringSink sink; + dumpString(s, sink); + addToCache(info, sink.s); + } + + return info.path; +} + +void BinaryCacheStore::buildPaths(const PathSet & paths, BuildMode buildMode) +{ + for (auto & storePath : paths) { + assert(!isDerivation(storePath)); + + if (isValidPath(storePath)) continue; + + if (!localStore) + throw Error(format("don't know how to realise path ‘%1%’ in a binary cache") % storePath); + + localStore->addTempRoot(storePath); + + if (!localStore->isValidPath(storePath)) + localStore->ensurePath(storePath); + + ValidPathInfo info = localStore->queryPathInfo(storePath); + + for (auto & ref : info.references) + if (ref != storePath) + ensurePath(ref); + + StringSink sink; + dumpPath(storePath, sink); + + addToCache(info, sink.s); + } +} + +void BinaryCacheStore::ensurePath(const Path & path) +{ + buildPaths({path}); +} + +} diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh new file mode 100644 index 000000000..2235d6d67 --- /dev/null +++ b/src/libstore/binary-cache-store.hh @@ -0,0 +1,172 @@ +#pragma once + +#include "crypto.hh" +#include "store-api.hh" + +#include "lru-cache.hh" +#include "sync.hh" +#include "pool.hh" + +#include <atomic> + +namespace nix { + +struct NarInfo; + +class BinaryCacheStore : public Store +{ +private: + + std::unique_ptr<SecretKey> secretKey; + std::unique_ptr<PublicKeys> publicKeys; + + std::shared_ptr<Store> localStore; + + struct State + { + LRUCache<Path, ref<NarInfo>> narInfoCache{32 * 1024}; + }; + + Sync<State> state; + +protected: + + BinaryCacheStore(std::shared_ptr<Store> localStore, + const Path & secretKeyFile, const Path & publicKeyFile); + + [[noreturn]] void notImpl(); + + virtual bool fileExists(const std::string & path) = 0; + + virtual void upsertFile(const std::string & path, const std::string & data) = 0; + + virtual std::string getFile(const std::string & path) = 0; + +public: + + virtual void init(); + + struct Stats + { + std::atomic<uint64_t> narInfoRead{0}; + std::atomic<uint64_t> narInfoReadAverted{0}; + std::atomic<uint64_t> narInfoWrite{0}; + std::atomic<uint64_t> narInfoCacheSize{0}; + std::atomic<uint64_t> narRead{0}; + std::atomic<uint64_t> narReadBytes{0}; + std::atomic<uint64_t> narReadCompressedBytes{0}; + std::atomic<uint64_t> narWrite{0}; + std::atomic<uint64_t> narWriteAverted{0}; + std::atomic<uint64_t> narWriteBytes{0}; + std::atomic<uint64_t> narWriteCompressedBytes{0}; + std::atomic<uint64_t> narWriteCompressionTimeMs{0}; + }; + + const Stats & getStats(); + +private: + + Stats stats; + + std::string narMagic; + + std::string narInfoFileFor(const Path & storePath); + + void addToCache(const ValidPathInfo & info, const string & nar); + +protected: + + NarInfo readNarInfo(const Path & storePath); + +public: + + bool isValidPath(const Path & path) override; + + PathSet queryValidPaths(const PathSet & paths) override + { notImpl(); } + + PathSet queryAllValidPaths() override + { notImpl(); } + + ValidPathInfo queryPathInfo(const Path & path) override; + + Hash queryPathHash(const Path & path) override + { notImpl(); } + + void queryReferrers(const Path & path, + PathSet & referrers) override + { notImpl(); } + + Path queryDeriver(const Path & path) override + { return ""; } + + PathSet queryValidDerivers(const Path & path) override + { return {}; } + + PathSet queryDerivationOutputs(const Path & path) override + { notImpl(); } + + StringSet queryDerivationOutputNames(const Path & path) override + { notImpl(); } + + Path queryPathFromHashPart(const string & hashPart) override + { notImpl(); } + + PathSet querySubstitutablePaths(const PathSet & paths) override + { return {}; } + + void querySubstitutablePathInfos(const PathSet & paths, + SubstitutablePathInfos & infos) override; + + Path addToStore(const string & name, const Path & srcPath, + bool recursive = true, HashType hashAlgo = htSHA256, + PathFilter & filter = defaultPathFilter, bool repair = false) override; + + Path addTextToStore(const string & name, const string & s, + const PathSet & references, bool repair = false) override; + + void exportPath(const Path & path, bool sign, + Sink & sink) override; + + Paths importPaths(bool requireSignature, Source & source) override; + + Path importPath(Source & source); + + void buildPaths(const PathSet & paths, BuildMode buildMode = bmNormal) override; + + BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, + BuildMode buildMode = bmNormal) override + { notImpl(); } + + void ensurePath(const Path & path) override; + + void addTempRoot(const Path & path) override + { notImpl(); } + + void addIndirectRoot(const Path & path) override + { notImpl(); } + + void syncWithGC() override + { } + + Roots findRoots() override + { notImpl(); } + + void collectGarbage(const GCOptions & options, GCResults & results) override + { notImpl(); } + + PathSet queryFailedPaths() override + { return {}; } + + void clearFailedPaths(const PathSet & paths) override + { } + + void optimiseStore() override + { } + + bool verifyStore(bool checkContents, bool repair) override + { return true; } + +}; + +} diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 180a558dc..547981e5b 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -1310,7 +1310,6 @@ void DerivationGoal::tryToBuild() for (auto & i : drv->outputs) { Path path = i.second.path; if (worker.store.isValidPath(path)) continue; - if (!pathExists(path)) continue; debug(format("removing invalid path ‘%1%’") % path); deletePath(path); } @@ -1390,8 +1389,7 @@ void replaceValidPath(const Path & storePath, const Path tmpPath) rename(storePath.c_str(), oldPath.c_str()); if (rename(tmpPath.c_str(), storePath.c_str()) == -1) throw SysError(format("moving ‘%1%’ to ‘%2%’") % tmpPath % storePath); - if (pathExists(oldPath)) - deletePath(oldPath); + deletePath(oldPath); } @@ -1490,7 +1488,7 @@ void DerivationGoal::buildDone() /* Delete unused redirected outputs (when doing hash rewriting). */ for (auto & i : redirectedOutputs) - if (pathExists(i.second)) deletePath(i.second); + deletePath(i.second); /* Delete the chroot (if we were using one). */ autoDelChroot.reset(); /* this runs the destructor */ @@ -1939,7 +1937,7 @@ void DerivationGoal::startBuilder() to ensure that we can create hard-links to non-directory inputs in the fake Nix store in the chroot (see below). */ chrootRootDir = drvPath + ".chroot"; - if (pathExists(chrootRootDir)) deletePath(chrootRootDir); + deletePath(chrootRootDir); /* Clean up the chroot directory automatically. */ autoDelChroot = std::make_shared<AutoDelete>(chrootRootDir); @@ -2514,7 +2512,7 @@ void DerivationGoal::runChild() debug(sandboxProfile); Path sandboxFile = drvPath + ".sb"; - if (pathExists(sandboxFile)) deletePath(sandboxFile); + deletePath(sandboxFile); autoDelSandbox.reset(sandboxFile, false); writeFile(sandboxFile, sandboxProfile); @@ -2706,8 +2704,7 @@ void DerivationGoal::registerOutputs() return; if (actualPath != dest) { PathLocks outputLocks({dest}); - if (pathExists(dest)) - deletePath(dest); + deletePath(dest); if (rename(actualPath.c_str(), dest.c_str()) == -1) throw SysError(format("moving ‘%1%’ to ‘%2%’") % actualPath % dest); } @@ -2735,10 +2732,10 @@ void DerivationGoal::registerOutputs() if (buildMode == bmCheck) { if (!worker.store.isValidPath(path)) continue; ValidPathInfo info = worker.store.queryPathInfo(path); - if (hash.first != info.hash) { + if (hash.first != info.narHash) { if (settings.keepFailed) { Path dst = path + checkSuffix; - if (pathExists(dst)) deletePath(dst); + deletePath(dst); if (rename(actualPath.c_str(), dst.c_str())) throw SysError(format("renaming ‘%1%’ to ‘%2%’") % actualPath % dst); throw Error(format("derivation ‘%1%’ may not be deterministic: output ‘%2%’ differs from ‘%3%’") @@ -2799,7 +2796,7 @@ void DerivationGoal::registerOutputs() ValidPathInfo info; info.path = path; - info.hash = hash.first; + info.narHash = hash.first; info.narSize = hash.second; info.references = references; info.deriver = drvPath; @@ -2830,7 +2827,7 @@ void DerivationGoal::registerOutputs() if (settings.keepFailed) { for (auto & i : drv->outputs) { Path prev = i.second.path + checkSuffix; - if (pathExists(prev)) deletePath(prev); + deletePath(prev); if (curRound < nrRounds) { Path dst = i.second.path + checkSuffix; if (rename(i.second.path.c_str(), dst.c_str())) @@ -2998,7 +2995,7 @@ Path DerivationGoal::addHashRewrite(const Path & path) string h1 = string(path, settings.nixStore.size() + 1, 32); string h2 = string(printHash32(hashString(htSHA256, "rewrite:" + drvPath + ":" + path)), 0, 32); Path p = settings.nixStore + "/" + h2 + string(path, settings.nixStore.size() + 33); - if (pathExists(p)) deletePath(p); + deletePath(p); assert(path.size() == p.size()); rewritesToTmp[h1] = h2; rewritesFromTmp[h2] = h1; @@ -3259,8 +3256,7 @@ void SubstitutionGoal::tryToRun() destPath = repair ? storePath + ".tmp" : storePath; /* Remove the (stale) output path if it exists. */ - if (pathExists(destPath)) - deletePath(destPath); + deletePath(destPath); worker.store.setSubstituterEnv(); @@ -3369,7 +3365,7 @@ void SubstitutionGoal::finished() ValidPathInfo info2; info2.path = storePath; - info2.hash = hash.first; + info2.narHash = hash.first; info2.narSize = hash.second; info2.references = info.references; info2.deriver = info.deriver; diff --git a/src/libstore/crypto.cc b/src/libstore/crypto.cc new file mode 100644 index 000000000..c1b57e51d --- /dev/null +++ b/src/libstore/crypto.cc @@ -0,0 +1,88 @@ +#include "crypto.hh" +#include "util.hh" + +#if HAVE_SODIUM +#include <sodium.h> +#endif + +namespace nix { + +static std::pair<std::string, std::string> split(const string & s) +{ + size_t colon = s.find(':'); + if (colon == std::string::npos || colon == 0) + return {"", ""}; + return {std::string(s, 0, colon), std::string(s, colon + 1)}; +} + +Key::Key(const string & s) +{ + auto ss = split(s); + + name = ss.first; + key = ss.second; + + if (name == "" || key == "") + throw Error("secret key is corrupt"); + + key = base64Decode(key); +} + +SecretKey::SecretKey(const string & s) + : Key(s) +{ +#if HAVE_SODIUM + if (key.size() != crypto_sign_SECRETKEYBYTES) + throw Error("secret key is not valid"); +#endif +} + +[[noreturn]] static void noSodium() +{ + throw Error("Nix was not compiled with libsodium, required for signed binary cache support"); +} + +std::string SecretKey::signDetached(const std::string & data) const +{ +#if HAVE_SODIUM + unsigned char sig[crypto_sign_BYTES]; + unsigned long long sigLen; + crypto_sign_detached(sig, &sigLen, (unsigned char *) data.data(), data.size(), + (unsigned char *) key.data()); + return name + ":" + base64Encode(std::string((char *) sig, sigLen)); +#else + noSodium(); +#endif +} + +PublicKey::PublicKey(const string & s) + : Key(s) +{ +#if HAVE_SODIUM + if (key.size() != crypto_sign_PUBLICKEYBYTES) + throw Error("public key is not valid"); +#endif +} + +bool verifyDetached(const std::string & data, const std::string & sig, + const PublicKeys & publicKeys) +{ +#if HAVE_SODIUM + auto ss = split(sig); + + auto key = publicKeys.find(ss.first); + if (key == publicKeys.end()) return false; + + auto sig2 = base64Decode(ss.second); + if (sig2.size() != crypto_sign_BYTES) + throw Error("signature is not valid"); + + return crypto_sign_verify_detached((unsigned char *) sig2.data(), + (unsigned char *) data.data(), data.size(), + (unsigned char *) key->second.key.data()) == 0; +#else + noSodium(); +#endif +} + +} diff --git a/src/libstore/crypto.hh b/src/libstore/crypto.hh new file mode 100644 index 000000000..a1489e753 --- /dev/null +++ b/src/libstore/crypto.hh @@ -0,0 +1,40 @@ +#pragma once + +#include "types.hh" + +#include <map> + +namespace nix { + +struct Key +{ + std::string name; + std::string key; + + /* Construct Key from a string in the format + ‘<name>:<key-in-base64>’. */ + Key(const std::string & s); + +}; + +struct SecretKey : Key +{ + SecretKey(const std::string & s); + + /* Return a detached signature of the given string. */ + std::string signDetached(const std::string & s) const; +}; + +struct PublicKey : Key +{ + PublicKey(const std::string & data); +}; + +typedef std::map<std::string, PublicKey> PublicKeys; + +/* Return true iff ‘sig’ is a correct signature over ‘data’ using one + of the given public keys. */ +bool verifyDetached(const std::string & data, const std::string & sig, + const PublicKeys & publicKeys); + +} diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 01ce1ea2f..e754e82fb 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -114,6 +114,8 @@ struct Curl curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progressCallback_); curl_easy_setopt(curl, CURLOPT_PROGRESSDATA, (void *) &curl); curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0); + + curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); } ~Curl() diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 48fb2167a..e082f6714 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -83,7 +83,7 @@ void LocalStore::addIndirectRoot(const Path & path) } -Path addPermRoot(ref<Store> store, const Path & _storePath, +Path Store::addPermRoot(const Path & _storePath, const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir) { Path storePath(canonPath(_storePath)); @@ -101,7 +101,7 @@ Path addPermRoot(ref<Store> store, const Path & _storePath, if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot)))) throw Error(format("cannot create symlink ‘%1%’; already exists") % gcRoot); makeSymlink(gcRoot, storePath); - store->addIndirectRoot(gcRoot); + addIndirectRoot(gcRoot); } else { @@ -127,7 +127,7 @@ Path addPermRoot(ref<Store> store, const Path & _storePath, check if the root is in a directory in or linked from the gcroots directory. */ if (settings.checkRootReachability) { - Roots roots = store->findRoots(); + Roots roots = findRoots(); if (roots.find(gcRoot) == roots.end()) printMsg(lvlError, format( @@ -139,7 +139,7 @@ Path addPermRoot(ref<Store> store, const Path & _storePath, /* Grab the global GC root, causing us to block while a GC is in progress. This prevents the set of permanent roots from increasing while a GC is in progress. */ - store->syncWithGC(); + syncWithGC(); return gcRoot; } @@ -608,6 +608,9 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific; + if (state.shouldDelete) + deletePath(reservedPath); + /* Acquire the global GC root. This prevents a) New roots from being added. b) Processes from creating new temporary root files. */ diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc new file mode 100644 index 000000000..5714688e0 --- /dev/null +++ b/src/libstore/local-binary-cache-store.cc @@ -0,0 +1,44 @@ +#include "local-binary-cache-store.hh" + +namespace nix { + +LocalBinaryCacheStore::LocalBinaryCacheStore(std::shared_ptr<Store> localStore, + const Path & secretKeyFile, const Path & publicKeyFile, + const Path & binaryCacheDir) + : BinaryCacheStore(localStore, secretKeyFile, publicKeyFile) + , binaryCacheDir(binaryCacheDir) +{ +} + +void LocalBinaryCacheStore::init() +{ + createDirs(binaryCacheDir + "/nar"); + BinaryCacheStore::init(); +} + +static void atomicWrite(const Path & path, const std::string & s) +{ + Path tmp = path + ".tmp." + std::to_string(getpid()); + AutoDelete del(tmp, false); + writeFile(tmp, s); + if (rename(tmp.c_str(), path.c_str())) + throw SysError(format("renaming ‘%1%’ to ‘%2%’") % tmp % path); + del.cancel(); +} + +bool LocalBinaryCacheStore::fileExists(const std::string & path) +{ + return pathExists(binaryCacheDir + "/" + path); +} + +void LocalBinaryCacheStore::upsertFile(const std::string & path, const std::string & data) +{ + atomicWrite(binaryCacheDir + "/" + path, data); +} + +std::string LocalBinaryCacheStore::getFile(const std::string & path) +{ + return readFile(binaryCacheDir + "/" + path); +} + +} diff --git a/src/libstore/local-binary-cache-store.hh b/src/libstore/local-binary-cache-store.hh new file mode 100644 index 000000000..0303ebe73 --- /dev/null +++ b/src/libstore/local-binary-cache-store.hh @@ -0,0 +1,31 @@ +#pragma once + +#include "binary-cache-store.hh" + +namespace nix { + +class LocalBinaryCacheStore : public BinaryCacheStore +{ +private: + + Path binaryCacheDir; + +public: + + LocalBinaryCacheStore(std::shared_ptr<Store> localStore, + const Path & secretKeyFile, const Path & publicKeyFile, + const Path & binaryCacheDir); + + void init() override; + +protected: + + bool fileExists(const std::string & path) override; + + void upsertFile(const std::string & path, const std::string & data) override; + + std::string getFile(const std::string & path) override; + +}; + +} diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 13179459f..33f256912 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -40,10 +40,7 @@ MakeError(SQLiteError, Error); MakeError(SQLiteBusy, SQLiteError); -static void throwSQLiteError(sqlite3 * db, const format & f) - __attribute__ ((noreturn)); - -static void throwSQLiteError(sqlite3 * db, const format & f) +[[noreturn]] static void throwSQLiteError(sqlite3 * db, const format & f) { int err = sqlite3_errcode(db); if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) { @@ -219,8 +216,9 @@ void checkStoreNotSymlink() } -LocalStore::LocalStore(bool reserveSpace) - : didSetSubstituterEnv(false) +LocalStore::LocalStore() + : reservedPath(settings.nixDBPath + "/reserved") + , didSetSubstituterEnv(false) { schemaPath = settings.nixDBPath + "/schema"; @@ -279,25 +277,20 @@ LocalStore::LocalStore(bool reserveSpace) needed, we reserve some dummy space that we can free just before doing a garbage collection. */ try { - Path reservedPath = settings.nixDBPath + "/reserved"; - if (reserveSpace) { - struct stat st; - if (stat(reservedPath.c_str(), &st) == -1 || - st.st_size != settings.reservedSize) - { - AutoCloseFD fd = open(reservedPath.c_str(), O_WRONLY | O_CREAT, 0600); - int res = -1; + struct stat st; + if (stat(reservedPath.c_str(), &st) == -1 || + st.st_size != settings.reservedSize) + { + AutoCloseFD fd = open(reservedPath.c_str(), O_WRONLY | O_CREAT, 0600); + int res = -1; #if HAVE_POSIX_FALLOCATE - res = posix_fallocate(fd, 0, settings.reservedSize); + res = posix_fallocate(fd, 0, settings.reservedSize); #endif - if (res == -1) { - writeFull(fd, string(settings.reservedSize, 'X')); - ftruncate(fd, settings.reservedSize); - } + if (res == -1) { + writeFull(fd, string(settings.reservedSize, 'X')); + ftruncate(fd, settings.reservedSize); } } - else - deletePath(reservedPath); } catch (SysError & e) { /* don't care about errors */ } @@ -694,7 +687,7 @@ unsigned long long LocalStore::addValidPath(const ValidPathInfo & info, bool che { SQLiteStmtUse use(stmtRegisterValidPath); stmtRegisterValidPath.bind(info.path); - stmtRegisterValidPath.bind("sha256:" + printHash(info.hash)); + stmtRegisterValidPath.bind("sha256:" + printHash(info.narHash)); stmtRegisterValidPath.bind(info.registrationTime == 0 ? time(0) : info.registrationTime); if (info.deriver != "") stmtRegisterValidPath.bind(info.deriver); @@ -845,7 +838,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path) const char * s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 1); assert(s); - info.hash = parseHashField(path, s); + info.narHash = parseHashField(path, s); info.registrationTime = sqlite3_column_int(stmtQueryPathInfo, 2); @@ -883,7 +876,7 @@ void LocalStore::updatePathInfo(const ValidPathInfo & info) stmtUpdatePathInfo.bind64(info.narSize); else stmtUpdatePathInfo.bind(); // null - stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash)); + stmtUpdatePathInfo.bind("sha256:" + printHash(info.narHash)); stmtUpdatePathInfo.bind(info.path); if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE) throwSQLiteError(db, format("updating info of path ‘%1%’ in database") % info.path); @@ -953,14 +946,6 @@ PathSet LocalStore::queryAllValidPaths() } -void LocalStore::queryReferences(const Path & path, - PathSet & references) -{ - ValidPathInfo info = queryPathInfo(path); - references.insert(info.references.begin(), info.references.end()); -} - - void LocalStore::queryReferrers_(const Path & path, PathSet & referrers) { SQLiteStmtUse use(stmtQueryReferrers); @@ -1064,7 +1049,7 @@ StringSet LocalStore::queryDerivationOutputNames(const Path & path) Path LocalStore::queryPathFromHashPart(const string & hashPart) { - if (hashPart.size() != 32) throw Error("invalid hash part"); + if (hashPart.size() != storePathHashLen) throw Error("invalid hash part"); Path prefix = settings.nixStore + "/" + hashPart; @@ -1282,7 +1267,7 @@ void LocalStore::querySubstitutablePathInfos(const PathSet & paths, Hash LocalStore::queryPathHash(const Path & path) { - return queryPathInfo(path).hash; + return queryPathInfo(path).narHash; } @@ -1306,7 +1291,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos) PathSet paths; for (auto & i : infos) { - assert(i.hash.type == htSHA256); + assert(i.narHash.type == htSHA256); if (isValidPath_(i.path)) updatePathInfo(i); else @@ -1380,7 +1365,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name, if (repair || !isValidPath(dstPath)) { - if (pathExists(dstPath)) deletePath(dstPath); + deletePath(dstPath); if (recursive) { StringSource source(dump); @@ -1405,7 +1390,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name, ValidPathInfo info; info.path = dstPath; - info.hash = hash.first; + info.narHash = hash.first; info.narSize = hash.second; registerValidPath(info); } @@ -1449,20 +1434,22 @@ Path LocalStore::addTextToStore(const string & name, const string & s, if (repair || !isValidPath(dstPath)) { - if (pathExists(dstPath)) deletePath(dstPath); + deletePath(dstPath); writeFile(dstPath, s); canonicalisePathMetaData(dstPath, -1); - HashResult hash = hashPath(htSHA256, dstPath); + StringSink sink; + dumpString(s, sink); + auto hash = hashString(htSHA256, sink.s); optimisePath(dstPath); ValidPathInfo info; info.path = dstPath; - info.hash = hash.first; - info.narSize = hash.second; + info.narHash = hash; + info.narSize = sink.s.size(); info.references = references; registerValidPath(info); } @@ -1493,9 +1480,6 @@ struct HashAndWriteSink : Sink }; -#define EXPORT_MAGIC 0x4558494e - - static void checkSecrecy(const Path & path) { struct stat st; @@ -1532,7 +1516,7 @@ void LocalStore::exportPath(const Path & path, bool sign, PathSet references; queryReferences(path, references); - hashAndWriteSink << EXPORT_MAGIC << path << references << queryDeriver(path); + hashAndWriteSink << exportMagic << path << references << queryDeriver(path); if (sign) { Hash hash = hashAndWriteSink.currentHash(); @@ -1608,8 +1592,8 @@ Path LocalStore::importPath(bool requireSignature, Source & source) restorePath(unpacked, hashAndReadSource); - unsigned int magic = readInt(hashAndReadSource); - if (magic != EXPORT_MAGIC) + uint32_t magic = readInt(hashAndReadSource); + if (magic != exportMagic) throw Error("Nix archive cannot be imported; wrong format"); Path dstPath = readStorePath(hashAndReadSource); @@ -1675,7 +1659,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source) if (!isValidPath(dstPath)) { - if (pathExists(dstPath)) deletePath(dstPath); + deletePath(dstPath); if (rename(unpacked.c_str(), dstPath.c_str()) == -1) throw SysError(format("cannot move ‘%1%’ to ‘%2%’") @@ -1691,7 +1675,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source) ValidPathInfo info; info.path = dstPath; - info.hash = hash.first; + info.narHash = hash.first; info.narSize = hash.second; info.references = references; info.deriver = deriver != "" && isValidPath(deriver) ? deriver : ""; @@ -1775,21 +1759,21 @@ bool LocalStore::verifyStore(bool checkContents, bool repair) /* Check the content hash (optionally - slow). */ printMsg(lvlTalkative, format("checking contents of ‘%1%’") % i); - HashResult current = hashPath(info.hash.type, i); + HashResult current = hashPath(info.narHash.type, i); - if (info.hash != nullHash && info.hash != current.first) { + if (info.narHash != nullHash && info.narHash != current.first) { printMsg(lvlError, format("path ‘%1%’ was modified! " "expected hash ‘%2%’, got ‘%3%’") - % i % printHash(info.hash) % printHash(current.first)); + % i % printHash(info.narHash) % printHash(current.first)); if (repair) repairPath(i); else errors = true; } else { bool update = false; /* Fill in missing hashes. */ - if (info.hash == nullHash) { + if (info.narHash == nullHash) { printMsg(lvlError, format("fixing missing hash on ‘%1%’") % i); - info.hash = current.first; + info.narHash = current.first; update = true; } @@ -1878,9 +1862,9 @@ bool LocalStore::pathContentsGood(const Path & path) if (!pathExists(path)) res = false; else { - HashResult current = hashPath(info.hash.type, path); + HashResult current = hashPath(info.narHash.type, path); Hash nullHash(htSHA256); - res = info.hash == nullHash || info.hash == current.first; + res = info.narHash == nullHash || info.narHash == current.first; } pathContentsGoodCache[path] = res; if (!res) printMsg(lvlError, format("path ‘%1%’ is corrupted or missing!") % path); @@ -1932,7 +1916,7 @@ ValidPathInfo LocalStore::queryPathInfoOld(const Path & path) } else if (name == "Deriver") { res.deriver = value; } else if (name == "Hash") { - res.hash = parseHashField(path, value); + res.narHash = parseHashField(path, value); } else if (name == "Registered-At") { int n = 0; string2Int(value, n); diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index b6d39d345..5582acd0f 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -88,11 +88,13 @@ private: Path linksDir; + Path reservedPath; + public: /* Initialise the local store, upgrading the schema if necessary. */ - LocalStore(bool reserveSpace = true); + LocalStore(); ~LocalStore(); @@ -108,8 +110,6 @@ public: Hash queryPathHash(const Path & path) override; - void queryReferences(const Path & path, PathSet & references) override; - void queryReferrers(const Path & path, PathSet & referrers) override; Path queryDeriver(const Path & path) override; diff --git a/src/libstore/local.mk b/src/libstore/local.mk index e78f47949..9a01596c3 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -8,7 +8,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc) libstore_LIBS = libutil libformat -libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) +libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) ifeq ($(OS), SunOS) libstore_LDFLAGS += -lsocket diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc new file mode 100644 index 000000000..e9260a09b --- /dev/null +++ b/src/libstore/nar-info.cc @@ -0,0 +1,134 @@ +#include "crypto.hh" +#include "globals.hh" +#include "nar-info.hh" + +namespace nix { + +NarInfo::NarInfo(const std::string & s, const std::string & whence) +{ + auto corrupt = [&]() { + throw Error("NAR info file ‘%1%’ is corrupt"); + }; + + auto parseHashField = [&](const string & s) { + string::size_type colon = s.find(':'); + if (colon == string::npos) corrupt(); + HashType ht = parseHashType(string(s, 0, colon)); + if (ht == htUnknown) corrupt(); + return parseHash16or32(ht, string(s, colon + 1)); + }; + + size_t pos = 0; + while (pos < s.size()) { + + size_t colon = s.find(':', pos); + if (colon == std::string::npos) corrupt(); + + std::string name(s, pos, colon - pos); + + size_t eol = s.find('\n', colon + 2); + if (eol == std::string::npos) corrupt(); + + std::string value(s, colon + 2, eol - colon - 2); + + if (name == "StorePath") { + if (!isStorePath(value)) corrupt(); + path = value; + } + else if (name == "URL") + url = value; + else if (name == "Compression") + compression = value; + else if (name == "FileHash") + fileHash = parseHashField(value); + else if (name == "FileSize") { + if (!string2Int(value, fileSize)) corrupt(); + } + else if (name == "NarHash") + narHash = parseHashField(value); + else if (name == "NarSize") { + if (!string2Int(value, narSize)) corrupt(); + } + else if (name == "References") { + auto refs = tokenizeString<Strings>(value, " "); + if (!references.empty()) corrupt(); + for (auto & r : refs) { + auto r2 = settings.nixStore + "/" + r; + if (!isStorePath(r2)) corrupt(); + references.insert(r2); + } + } + else if (name == "Deriver") { + auto p = settings.nixStore + "/" + value; + if (!isStorePath(p)) corrupt(); + deriver = p; + } + else if (name == "System") + system = value; + else if (name == "Sig") + sig = value; + + pos = eol + 1; + } + + if (compression == "") compression = "bzip2"; + + if (path.empty() || url.empty()) corrupt(); +} + +std::string NarInfo::to_string() const +{ + std::string res; + res += "StorePath: " + path + "\n"; + res += "URL: " + url + "\n"; + assert(compression != ""); + res += "Compression: " + compression + "\n"; + assert(fileHash.type == htSHA256); + res += "FileHash: sha256:" + printHash32(fileHash) + "\n"; + res += "FileSize: " + std::to_string(fileSize) + "\n"; + assert(narHash.type == htSHA256); + res += "NarHash: sha256:" + printHash32(narHash) + "\n"; + res += "NarSize: " + std::to_string(narSize) + "\n"; + + res += "References: " + concatStringsSep(" ", shortRefs()) + "\n"; + + if (!deriver.empty()) + res += "Deriver: " + baseNameOf(deriver) + "\n"; + + if (!system.empty()) + res += "System: " + system + "\n"; + + if (!sig.empty()) + res += "Sig: " + sig + "\n"; + + return res; +} + +std::string NarInfo::fingerprint() const +{ + return + "1;" + path + ";" + + printHashType(narHash.type) + ":" + printHash32(narHash) + ";" + + std::to_string(narSize) + ";" + + concatStringsSep(",", references); +} + +Strings NarInfo::shortRefs() const +{ + Strings refs; + for (auto & r : references) + refs.push_back(baseNameOf(r)); + return refs; +} + +void NarInfo::sign(const SecretKey & secretKey) +{ + sig = secretKey.signDetached(fingerprint()); +} + +bool NarInfo::checkSignature(const PublicKeys & publicKeys) const +{ + return sig != "" && verifyDetached(fingerprint(), sig, publicKeys); +} + +} diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh new file mode 100644 index 000000000..22e27cb42 --- /dev/null +++ b/src/libstore/nar-info.hh @@ -0,0 +1,43 @@ +#pragma once + +#include "types.hh" +#include "hash.hh" +#include "store-api.hh" + +namespace nix { + +struct NarInfo : ValidPathInfo +{ + std::string url; + std::string compression; + Hash fileHash; + uint64_t fileSize = 0; + std::string system; + std::string sig; // FIXME: support multiple signatures + + NarInfo() { } + NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { } + NarInfo(const std::string & s, const std::string & whence); + + std::string to_string() const; + + /* Return a fingerprint of the store path to be used in binary + cache signatures. It contains the store path, the base-32 + SHA-256 hash of the NAR serialisation of the path, the size of + the NAR, and the sorted references. The size field is strictly + speaking superfluous, but might prevent endless/excessive data + attacks. */ + std::string fingerprint() const; + + void sign(const SecretKey & secretKey); + + /* Return true iff this .narinfo is signed by one of the specified + keys. */ + bool checkSignature(const PublicKeys & publicKeys) const; + +private: + + Strings shortRefs() const; +}; + +} diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index 8ab23cd31..cc83a838e 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -108,7 +108,7 @@ Path createGeneration(ref<Store> store, Path profile, Path outPath) user environment etc. we've just built. */ Path generation; makeName(profile, num + 1, generation); - addPermRoot(store, outPath, generation, false, true); + store->addPermRoot(outPath, generation, false, true); return generation; } diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 679210d4c..a1ac169c2 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -6,6 +6,7 @@ #include "affinity.hh" #include "globals.hh" #include "derivations.hh" +#include "pool.hh" #include <sys/types.h> #include <sys/stat.h> @@ -13,9 +14,8 @@ #include <sys/un.h> #include <errno.h> #include <fcntl.h> - -#include <iostream> #include <unistd.h> + #include <cstring> namespace nix { @@ -39,62 +39,25 @@ template<class T> T readStorePaths(Source & from) template PathSet readStorePaths(Source & from); -RemoteStore::RemoteStore() +RemoteStore::RemoteStore(size_t maxConnections) + : connections(make_ref<Pool<Connection>>( + maxConnections, + [this]() { return openConnection(); }, + [](const ref<Connection> & r) { return r->to.good() && r->from.good(); } + )) { - initialised = false; } -void RemoteStore::openConnection(bool reserveSpace) +ref<RemoteStore::Connection> RemoteStore::openConnection() { - if (initialised) return; - initialised = true; + auto conn = make_ref<Connection>(); /* Connect to a daemon that does the privileged work for us. */ - connectToDaemon(); - - from.fd = fdSocket; - to.fd = fdSocket; - - /* Send the magic greeting, check for the reply. */ - try { - to << WORKER_MAGIC_1; - to.flush(); - unsigned int magic = readInt(from); - if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch"); - - daemonVersion = readInt(from); - if (GET_PROTOCOL_MAJOR(daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION)) - throw Error("Nix daemon protocol version not supported"); - to << PROTOCOL_VERSION; - - if (GET_PROTOCOL_MINOR(daemonVersion) >= 14) { - int cpu = settings.lockCPU ? lockToCurrentCPU() : -1; - if (cpu != -1) - to << 1 << cpu; - else - to << 0; - } - - if (GET_PROTOCOL_MINOR(daemonVersion) >= 11) - to << reserveSpace; - - processStderr(); - } - catch (Error & e) { - throw Error(format("cannot start daemon worker: %1%") % e.msg()); - } - - setOptions(); -} - - -void RemoteStore::connectToDaemon() -{ - fdSocket = socket(PF_UNIX, SOCK_STREAM, 0); - if (fdSocket == -1) + conn->fd = socket(PF_UNIX, SOCK_STREAM, 0); + if (conn->fd == -1) throw SysError("cannot create Unix domain socket"); - closeOnExec(fdSocket); + closeOnExec(conn->fd); string socketPath = settings.nixDaemonSocketFile; @@ -111,111 +74,135 @@ void RemoteStore::connectToDaemon() addr.sun_family = AF_UNIX; if (socketPathRel.size() >= sizeof(addr.sun_path)) throw Error(format("socket path ‘%1%’ is too long") % socketPathRel); - using namespace std; strcpy(addr.sun_path, socketPathRel.c_str()); - if (connect(fdSocket, (struct sockaddr *) &addr, sizeof(addr)) == -1) + if (connect(conn->fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) throw SysError(format("cannot connect to daemon at ‘%1%’") % socketPath); if (fchdir(fdPrevDir) == -1) throw SysError("couldn't change back to previous directory"); -} + conn->from.fd = conn->fd; + conn->to.fd = conn->fd; -RemoteStore::~RemoteStore() -{ + /* Send the magic greeting, check for the reply. */ try { - to.flush(); - fdSocket.close(); - } catch (...) { - ignoreException(); + conn->to << WORKER_MAGIC_1; + conn->to.flush(); + unsigned int magic = readInt(conn->from); + if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch"); + + conn->daemonVersion = readInt(conn->from); + if (GET_PROTOCOL_MAJOR(conn->daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION)) + throw Error("Nix daemon protocol version not supported"); + conn->to << PROTOCOL_VERSION; + + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 14) { + int cpu = settings.lockCPU ? lockToCurrentCPU() : -1; + if (cpu != -1) + conn->to << 1 << cpu; + else + conn->to << 0; + } + + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 11) + conn->to << false; + + conn->processStderr(); } + catch (Error & e) { + throw Error(format("cannot start daemon worker: %1%") % e.msg()); + } + + setOptions(conn); + + return conn; } -void RemoteStore::setOptions() +void RemoteStore::setOptions(ref<Connection> conn) { - to << wopSetOptions + conn->to << wopSetOptions << settings.keepFailed << settings.keepGoing << settings.tryFallback << verbosity << settings.maxBuildJobs << settings.maxSilentTime; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 2) - to << settings.useBuildHook; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 4) - to << settings.buildVerbosity + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 2) + conn->to << settings.useBuildHook; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 4) + conn->to << settings.buildVerbosity << logType << settings.printBuildTrace; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 6) - to << settings.buildCores; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 10) - to << settings.useSubstitutes; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 6) + conn->to << settings.buildCores; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 10) + conn->to << settings.useSubstitutes; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 12) { + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 12) { Settings::SettingsMap overrides = settings.getOverrides(); if (overrides["ssh-auth-sock"] == "") overrides["ssh-auth-sock"] = getEnv("SSH_AUTH_SOCK"); - to << overrides.size(); + conn->to << overrides.size(); for (auto & i : overrides) - to << i.first << i.second; + conn->to << i.first << i.second; } - processStderr(); + conn->processStderr(); } bool RemoteStore::isValidPath(const Path & path) { - openConnection(); - to << wopIsValidPath << path; - processStderr(); - unsigned int reply = readInt(from); + auto conn(connections->get()); + conn->to << wopIsValidPath << path; + conn->processStderr(); + unsigned int reply = readInt(conn->from); return reply != 0; } PathSet RemoteStore::queryValidPaths(const PathSet & paths) { - openConnection(); - if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + auto conn(connections->get()); + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { PathSet res; for (auto & i : paths) if (isValidPath(i)) res.insert(i); return res; } else { - to << wopQueryValidPaths << paths; - processStderr(); - return readStorePaths<PathSet>(from); + conn->to << wopQueryValidPaths << paths; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } } PathSet RemoteStore::queryAllValidPaths() { - openConnection(); - to << wopQueryAllValidPaths; - processStderr(); - return readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryAllValidPaths; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths) { - openConnection(); - if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + auto conn(connections->get()); + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { PathSet res; for (auto & i : paths) { - to << wopHasSubstitutes << i; - processStderr(); - if (readInt(from)) res.insert(i); + conn->to << wopHasSubstitutes << i; + conn->processStderr(); + if (readInt(conn->from)) res.insert(i); } return res; } else { - to << wopQuerySubstitutablePaths << paths; - processStderr(); - return readStorePaths<PathSet>(from); + conn->to << wopQuerySubstitutablePaths << paths; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } } @@ -225,39 +212,39 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, { if (paths.empty()) return; - openConnection(); + auto conn(connections->get()); - if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 3) return; - if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { for (auto & i : paths) { SubstitutablePathInfo info; - to << wopQuerySubstitutablePathInfo << i; - processStderr(); - unsigned int reply = readInt(from); + conn->to << wopQuerySubstitutablePathInfo << i; + conn->processStderr(); + unsigned int reply = readInt(conn->from); if (reply == 0) continue; - info.deriver = readString(from); + info.deriver = readString(conn->from); if (info.deriver != "") assertStorePath(info.deriver); - info.references = readStorePaths<PathSet>(from); - info.downloadSize = readLongLong(from); - info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0; + info.references = readStorePaths<PathSet>(conn->from); + info.downloadSize = readLongLong(conn->from); + info.narSize = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 7 ? readLongLong(conn->from) : 0; infos[i] = info; } } else { - to << wopQuerySubstitutablePathInfos << paths; - processStderr(); - unsigned int count = readInt(from); + conn->to << wopQuerySubstitutablePathInfos << paths; + conn->processStderr(); + unsigned int count = readInt(conn->from); for (unsigned int n = 0; n < count; n++) { - Path path = readStorePath(from); + Path path = readStorePath(conn->from); SubstitutablePathInfo & info(infos[path]); - info.deriver = readString(from); + info.deriver = readString(conn->from); if (info.deriver != "") assertStorePath(info.deriver); - info.references = readStorePaths<PathSet>(from); - info.downloadSize = readLongLong(from); - info.narSize = readLongLong(from); + info.references = readStorePaths<PathSet>(conn->from); + info.downloadSize = readLongLong(conn->from); + info.narSize = readLongLong(conn->from); } } @@ -266,27 +253,27 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, ValidPathInfo RemoteStore::queryPathInfo(const Path & path) { - openConnection(); - to << wopQueryPathInfo << path; - processStderr(); + auto conn(connections->get()); + conn->to << wopQueryPathInfo << path; + conn->processStderr(); ValidPathInfo info; info.path = path; - info.deriver = readString(from); + info.deriver = readString(conn->from); if (info.deriver != "") assertStorePath(info.deriver); - info.hash = parseHash(htSHA256, readString(from)); - info.references = readStorePaths<PathSet>(from); - info.registrationTime = readInt(from); - info.narSize = readLongLong(from); + info.narHash = parseHash(htSHA256, readString(conn->from)); + info.references = readStorePaths<PathSet>(conn->from); + info.registrationTime = readInt(conn->from); + info.narSize = readLongLong(conn->from); return info; } Hash RemoteStore::queryPathHash(const Path & path) { - openConnection(); - to << wopQueryPathHash << path; - processStderr(); - string hash = readString(from); + auto conn(connections->get()); + conn->to << wopQueryPathHash << path; + conn->processStderr(); + string hash = readString(conn->from); return parseHash(htSHA256, hash); } @@ -294,10 +281,10 @@ Hash RemoteStore::queryPathHash(const Path & path) void RemoteStore::queryReferences(const Path & path, PathSet & references) { - openConnection(); - to << wopQueryReferences << path; - processStderr(); - PathSet references2 = readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryReferences << path; + conn->processStderr(); + PathSet references2 = readStorePaths<PathSet>(conn->from); references.insert(references2.begin(), references2.end()); } @@ -305,20 +292,20 @@ void RemoteStore::queryReferences(const Path & path, void RemoteStore::queryReferrers(const Path & path, PathSet & referrers) { - openConnection(); - to << wopQueryReferrers << path; - processStderr(); - PathSet referrers2 = readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryReferrers << path; + conn->processStderr(); + PathSet referrers2 = readStorePaths<PathSet>(conn->from); referrers.insert(referrers2.begin(), referrers2.end()); } Path RemoteStore::queryDeriver(const Path & path) { - openConnection(); - to << wopQueryDeriver << path; - processStderr(); - Path drvPath = readString(from); + auto conn(connections->get()); + conn->to << wopQueryDeriver << path; + conn->processStderr(); + Path drvPath = readString(conn->from); if (drvPath != "") assertStorePath(drvPath); return drvPath; } @@ -326,37 +313,37 @@ Path RemoteStore::queryDeriver(const Path & path) PathSet RemoteStore::queryValidDerivers(const Path & path) { - openConnection(); - to << wopQueryValidDerivers << path; - processStderr(); - return readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryValidDerivers << path; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } PathSet RemoteStore::queryDerivationOutputs(const Path & path) { - openConnection(); - to << wopQueryDerivationOutputs << path; - processStderr(); - return readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryDerivationOutputs << path; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } PathSet RemoteStore::queryDerivationOutputNames(const Path & path) { - openConnection(); - to << wopQueryDerivationOutputNames << path; - processStderr(); - return readStrings<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryDerivationOutputNames << path; + conn->processStderr(); + return readStrings<PathSet>(conn->from); } Path RemoteStore::queryPathFromHashPart(const string & hashPart) { - openConnection(); - to << wopQueryPathFromHashPart << hashPart; - processStderr(); - Path path = readString(from); + auto conn(connections->get()); + conn->to << wopQueryPathFromHashPart << hashPart; + conn->processStderr(); + Path path = readString(conn->from); if (!path.empty()) assertStorePath(path); return path; } @@ -367,32 +354,32 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath, { if (repair) throw Error("repairing is not supported when building through the Nix daemon"); - openConnection(); + auto conn(connections->get()); Path srcPath(absPath(_srcPath)); - to << wopAddToStore << name + conn->to << wopAddToStore << name << ((hashAlgo == htSHA256 && recursive) ? 0 : 1) /* backwards compatibility hack */ << (recursive ? 1 : 0) << printHashType(hashAlgo); try { - to.written = 0; - to.warn = true; - dumpPath(srcPath, to, filter); - to.warn = false; - processStderr(); + conn->to.written = 0; + conn->to.warn = true; + dumpPath(srcPath, conn->to, filter); + conn->to.warn = false; + conn->processStderr(); } catch (SysError & e) { /* Daemon closed while we were sending the path. Probably OOM or I/O error. */ if (e.errNo == EPIPE) try { - processStderr(); + conn->processStderr(); } catch (EndOfFile & e) { } throw; } - return readStorePath(from); + return readStorePath(conn->from); } @@ -401,43 +388,43 @@ Path RemoteStore::addTextToStore(const string & name, const string & s, { if (repair) throw Error("repairing is not supported when building through the Nix daemon"); - openConnection(); - to << wopAddTextToStore << name << s << references; + auto conn(connections->get()); + conn->to << wopAddTextToStore << name << s << references; - processStderr(); - return readStorePath(from); + conn->processStderr(); + return readStorePath(conn->from); } void RemoteStore::exportPath(const Path & path, bool sign, Sink & sink) { - openConnection(); - to << wopExportPath << path << (sign ? 1 : 0); - processStderr(&sink); /* sink receives the actual data */ - readInt(from); + auto conn(connections->get()); + conn->to << wopExportPath << path << (sign ? 1 : 0); + conn->processStderr(&sink); /* sink receives the actual data */ + readInt(conn->from); } Paths RemoteStore::importPaths(bool requireSignature, Source & source) { - openConnection(); - to << wopImportPaths; + auto conn(connections->get()); + conn->to << wopImportPaths; /* We ignore requireSignature, since the worker forces it to true anyway. */ - processStderr(0, &source); - return readStorePaths<Paths>(from); + conn->processStderr(0, &source); + return readStorePaths<Paths>(conn->from); } void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) { - openConnection(); - to << wopBuildPaths; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 13) { - to << drvPaths; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 15) - to << buildMode; + auto conn(connections->get()); + conn->to << wopBuildPaths; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) { + conn->to << drvPaths; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15) + conn->to << buildMode; else /* Old daemons did not take a 'buildMode' parameter, so we need to validate it here on the client side. */ @@ -449,22 +436,22 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) PathSet drvPaths2; for (auto & i : drvPaths) drvPaths2.insert(string(i, 0, i.find('!'))); - to << drvPaths2; + conn->to << drvPaths2; } - processStderr(); - readInt(from); + conn->processStderr(); + readInt(conn->from); } BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv, BuildMode buildMode) { - openConnection(); - to << wopBuildDerivation << drvPath << drv << buildMode; - processStderr(); + auto conn(connections->get()); + conn->to << wopBuildDerivation << drvPath << drv << buildMode; + conn->processStderr(); BuildResult res; unsigned int status; - from >> status >> res.errorMsg; + conn->from >> status >> res.errorMsg; res.status = (BuildResult::Status) status; return res; } @@ -472,50 +459,50 @@ BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDeriva void RemoteStore::ensurePath(const Path & path) { - openConnection(); - to << wopEnsurePath << path; - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopEnsurePath << path; + conn->processStderr(); + readInt(conn->from); } void RemoteStore::addTempRoot(const Path & path) { - openConnection(); - to << wopAddTempRoot << path; - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopAddTempRoot << path; + conn->processStderr(); + readInt(conn->from); } void RemoteStore::addIndirectRoot(const Path & path) { - openConnection(); - to << wopAddIndirectRoot << path; - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopAddIndirectRoot << path; + conn->processStderr(); + readInt(conn->from); } void RemoteStore::syncWithGC() { - openConnection(); - to << wopSyncWithGC; - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopSyncWithGC; + conn->processStderr(); + readInt(conn->from); } Roots RemoteStore::findRoots() { - openConnection(); - to << wopFindRoots; - processStderr(); - unsigned int count = readInt(from); + auto conn(connections->get()); + conn->to << wopFindRoots; + conn->processStderr(); + unsigned int count = readInt(conn->from); Roots result; while (count--) { - Path link = readString(from); - Path target = readStorePath(from); + Path link = readString(conn->from); + Path target = readStorePath(conn->from); result[link] = target; } return result; @@ -524,56 +511,68 @@ Roots RemoteStore::findRoots() void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) { - openConnection(false); + auto conn(connections->get()); - to << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness + conn->to << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness << options.maxFreed << 0; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 5) + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 5) /* removed options */ - to << 0 << 0; + conn->to << 0 << 0; - processStderr(); + conn->processStderr(); - results.paths = readStrings<PathSet>(from); - results.bytesFreed = readLongLong(from); - readLongLong(from); // obsolete + results.paths = readStrings<PathSet>(conn->from); + results.bytesFreed = readLongLong(conn->from); + readLongLong(conn->from); // obsolete } PathSet RemoteStore::queryFailedPaths() { - openConnection(); - to << wopQueryFailedPaths; - processStderr(); - return readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryFailedPaths; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } void RemoteStore::clearFailedPaths(const PathSet & paths) { - openConnection(); - to << wopClearFailedPaths << paths; - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopClearFailedPaths << paths; + conn->processStderr(); + readInt(conn->from); } void RemoteStore::optimiseStore() { - openConnection(); - to << wopOptimiseStore; - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopOptimiseStore; + conn->processStderr(); + readInt(conn->from); } bool RemoteStore::verifyStore(bool checkContents, bool repair) { - openConnection(); - to << wopVerifyStore << checkContents << repair; - processStderr(); - return readInt(from) != 0; + auto conn(connections->get()); + conn->to << wopVerifyStore << checkContents << repair; + conn->processStderr(); + return readInt(conn->from) != 0; +} + + +RemoteStore::Connection::~Connection() +{ + try { + to.flush(); + fd.close(); + } catch (...) { + ignoreException(); + } } -void RemoteStore::processStderr(Sink * sink, Source * source) + +void RemoteStore::Connection::processStderr(Sink * sink, Source * source) { to.flush(); unsigned int msg; diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index f15182285..ddfb70a66 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -1,5 +1,6 @@ #pragma once +#include <limits> #include <string> #include "store-api.hh" @@ -12,15 +13,14 @@ class Pipe; class Pid; struct FdSink; struct FdSource; +template<typename T> class Pool; class RemoteStore : public Store { public: - RemoteStore(); - - ~RemoteStore(); + RemoteStore(size_t maxConnections = std::numeric_limits<size_t>::max()); /* Implementations of abstract store API methods. */ @@ -91,19 +91,24 @@ public: bool verifyStore(bool checkContents, bool repair) override; private: - AutoCloseFD fdSocket; - FdSink to; - FdSource from; - unsigned int daemonVersion; - bool initialised; - void openConnection(bool reserveSpace = true); + struct Connection + { + AutoCloseFD fd; + FdSink to; + FdSource from; + unsigned int daemonVersion; + + ~Connection(); + + void processStderr(Sink * sink = 0, Source * source = 0); + }; - void processStderr(Sink * sink = 0, Source * source = 0); + ref<Pool<Connection>> connections; - void connectToDaemon(); + ref<Connection> openConnection(); - void setOptions(); + void setOptions(ref<Connection> conn); }; diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index e205fd0e8..7058249f0 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -61,7 +61,14 @@ Path followLinksToStorePath(const Path & path) string storePathToName(const Path & path) { assertStorePath(path); - return string(path, settings.nixStore.size() + 34); + return string(path, settings.nixStore.size() + storePathHashLen + 2); +} + + +string storePathToHash(const Path & path) +{ + assertStorePath(path); + return string(path, settings.nixStore.size() + 1, storePathHashLen); } @@ -217,6 +224,13 @@ Path computeStorePathForText(const string & name, const string & s, } +void Store::queryReferences(const Path & path, PathSet & references) +{ + ValidPathInfo info = queryPathInfo(path); + references.insert(info.references.begin(), info.references.end()); +} + + /* Return a string accepted by decodeValidPathInfo() that registers the specified paths as valid. Note: it's the responsibility of the caller to provide a closure. */ @@ -231,7 +245,7 @@ string Store::makeValidityRegistration(const PathSet & paths, ValidPathInfo info = queryPathInfo(i); if (showHash) { - s += printHash(info.hash) + "\n"; + s += printHash(info.narHash) + "\n"; s += (format("%1%\n") % info.narSize).str(); } @@ -256,7 +270,7 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven) if (hashGiven) { string s; getline(str, s); - info.hash = parseHash(htSHA256, s); + info.narHash = parseHash(htSHA256, s); getline(str, s); if (!string2Int(s, info.narSize)) throw Error("number expected"); } @@ -299,18 +313,26 @@ void Store::exportPaths(const Paths & paths, #include "local-store.hh" -#include "serialise.hh" #include "remote-store.hh" +#include "local-binary-cache-store.hh" namespace nix { -ref<Store> openStore(bool reserveSpace) +ref<Store> openStoreAt(const std::string & uri) { + if (std::string(uri, 0, 7) == "file://") { + auto store = make_ref<LocalBinaryCacheStore>(std::shared_ptr<Store>(0), + "", "", // FIXME: allow the signing key to be set + std::string(uri, 7)); + store->init(); + return store; + } + enum { mDaemon, mLocal, mAuto } mode; - mode = getEnv("NIX_REMOTE") == "daemon" ? mDaemon : mAuto; + mode = uri == "daemon" ? mDaemon : mAuto; if (mode == mAuto) { if (LocalStore::haveWriteAccess()) @@ -322,8 +344,14 @@ ref<Store> openStore(bool reserveSpace) } return mode == mDaemon - ? make_ref<Store, RemoteStore>() - : make_ref<Store, LocalStore>(reserveSpace); + ? (ref<Store>) make_ref<RemoteStore>() + : (ref<Store>) make_ref<LocalStore>(); +} + + +ref<Store> openStore() +{ + return openStoreAt(getEnv("NIX_REMOTE")); } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 1e4306439..84ede157e 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -12,6 +12,13 @@ namespace nix { +/* Size of the hash part of store paths, in base-32 characters. */ +const size_t storePathHashLen = 32; // i.e. 160 bits + +/* Magic header of exportPath() output. */ +const uint32_t exportMagic = 0x4558494e; + + typedef std::map<Path, Path> Roots; @@ -85,7 +92,7 @@ struct ValidPathInfo { Path path; Path deriver; - Hash hash; + Hash narHash; PathSet references; time_t registrationTime = 0; unsigned long long narSize = 0; // 0 = unknown @@ -95,7 +102,7 @@ struct ValidPathInfo { return path == i.path - && hash == i.hash + && narHash == i.narHash && references == i.references; } }; @@ -156,10 +163,9 @@ public: /* Query the hash of a valid path. */ virtual Hash queryPathHash(const Path & path) = 0; - /* Query the set of outgoing FS references for a store path. The + /* Query the set of outgoing FS references for a store path. The result is not cleared. */ - virtual void queryReferences(const Path & path, - PathSet & references) = 0; + virtual void queryReferences(const Path & path, PathSet & references); /* Queries the set of incoming FS references for a store path. The result is not cleared. */ @@ -255,6 +261,10 @@ public: `path' has disappeared. */ virtual void addIndirectRoot(const Path & path) = 0; + /* Register a permanent GC root. */ + Path addPermRoot(const Path & storePath, + const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false); + /* Acquire the global GC lock, then immediately release it. This function must be called after registering a new permanent root, but before exiting. Otherwise, it is possible that a running @@ -346,6 +356,9 @@ bool isStorePath(const Path & path); /* Extract the name part of the given store path. */ string storePathToName(const Path & path); +/* Extract the hash part of the given store path. */ +string storePathToHash(const Path & path); + void checkStoreName(const string & name); @@ -406,14 +419,25 @@ Path computeStorePathForText(const string & name, const string & s, void removeTempRoots(); -/* Register a permanent GC root. */ -Path addPermRoot(ref<Store> store, const Path & storePath, - const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false); +/* Return a Store object to access the Nix store denoted by + ‘uri’ (slight misnomer...). Supported values are: + + * ‘direct’: The Nix store in /nix/store and database in + /nix/var/nix/db, accessed directly. + + * ‘daemon’: The Nix store accessed via a Unix domain socket + connection to nix-daemon. + + * ‘file://<path>’: A binary cache stored in <path>. + + If ‘uri’ is empty, it defaults to ‘direct’ or ‘daemon’ depending on + whether the user has write access to the local Nix store/database. + set to true *unless* you're going to collect garbage. */ +ref<Store> openStoreAt(const std::string & uri); -/* Factory method: open the Nix database, either through the local or - remote implementation. */ -ref<Store> openStore(bool reserveSpace = true); +/* Open the store indicated by the ‘NIX_REMOTE’ environment variable. */ +ref<Store> openStore(); /* Display a set of paths in human-readable form (i.e., between quotes diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 6ee798143..5363496c2 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -29,7 +29,7 @@ bool useCaseHack = false; #endif -static string archiveVersion1 = "nix-archive-1"; +const std::string narVersionMagic1 = "nix-archive-1"; static string caseHackSuffix = "~nix~case~hack~"; @@ -113,11 +113,17 @@ static void dump(const Path & path, Sink & sink, PathFilter & filter) void dumpPath(const Path & path, Sink & sink, PathFilter & filter) { - sink << archiveVersion1; + sink << narVersionMagic1; dump(path, sink, filter); } +void dumpString(const std::string & s, Sink & sink) +{ + sink << narVersionMagic1 << "(" << "type" << "regular" << "contents" << s << ")"; +} + + static SerialisationError badArchive(string s) { return SerialisationError("bad archive: " + s); @@ -214,7 +220,8 @@ static void parse(ParseSink & sink, Source & source, const Path & path) } else if (s == "executable" && type == tpRegular) { - readString(source); + auto s = readString(source); + if (s != "") throw badArchive("executable marker has non-empty value"); sink.isExecutable(); } @@ -275,7 +282,7 @@ void parseDump(ParseSink & sink, Source & source) /* This generally means the integer at the start couldn't be decoded. Ignore and throw the exception below. */ } - if (version != archiveVersion1) + if (version != narVersionMagic1) throw badArchive("input doesn't look like a Nix archive"); parse(sink, source, ""); } diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh index c216e9768..90117f5ff 100644 --- a/src/libutil/archive.hh +++ b/src/libutil/archive.hh @@ -55,6 +55,8 @@ extern PathFilter defaultPathFilter; void dumpPath(const Path & path, Sink & sink, PathFilter & filter = defaultPathFilter); +void dumpString(const std::string & s, Sink & sink); + struct ParseSink { virtual void createDirectory(const Path & path) { }; @@ -76,4 +78,7 @@ void restorePath(const Path & path, Source & source); extern bool useCaseHack; +extern const std::string narVersionMagic1; + + } diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index fb4160669..a3fa0dab7 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -6,34 +6,83 @@ namespace nix { +/* RAII wrapper around lzma_stream. */ +struct LzmaStream +{ + lzma_stream strm; + LzmaStream() : strm(LZMA_STREAM_INIT) { }; + ~LzmaStream() { lzma_end(&strm); }; + lzma_stream & operator()() { return strm; } +}; + +std::string compressXZ(const std::string & in) +{ + LzmaStream strm; + + // FIXME: apply the x86 BCJ filter? + + lzma_ret ret = lzma_easy_encoder( + &strm(), 6, LZMA_CHECK_CRC64); + if (ret != LZMA_OK) + throw Error("unable to initialise lzma encoder"); + + lzma_action action = LZMA_RUN; + uint8_t outbuf[BUFSIZ]; + string res; + strm().next_in = (uint8_t *) in.c_str(); + strm().avail_in = in.size(); + strm().next_out = outbuf; + strm().avail_out = sizeof(outbuf); + + while (true) { + + if (strm().avail_in == 0) + action = LZMA_FINISH; + + lzma_ret ret = lzma_code(&strm(), action); + + if (strm().avail_out == 0 || ret == LZMA_STREAM_END) { + res.append((char *) outbuf, sizeof(outbuf) - strm().avail_out); + strm().next_out = outbuf; + strm().avail_out = sizeof(outbuf); + } + + if (ret == LZMA_STREAM_END) + return res; + + if (ret != LZMA_OK) + throw Error("error while decompressing xz file"); + } +} + std::string decompressXZ(const std::string & in) { - lzma_stream strm = LZMA_STREAM_INIT; + LzmaStream strm; lzma_ret ret = lzma_stream_decoder( - &strm, UINT64_MAX, LZMA_CONCATENATED); + &strm(), UINT64_MAX, LZMA_CONCATENATED); if (ret != LZMA_OK) throw Error("unable to initialise lzma decoder"); lzma_action action = LZMA_RUN; uint8_t outbuf[BUFSIZ]; string res; - strm.next_in = (uint8_t *) in.c_str(); - strm.avail_in = in.size(); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); + strm().next_in = (uint8_t *) in.c_str(); + strm().avail_in = in.size(); + strm().next_out = outbuf; + strm().avail_out = sizeof(outbuf); while (true) { - if (strm.avail_in == 0) + if (strm().avail_in == 0) action = LZMA_FINISH; - lzma_ret ret = lzma_code(&strm, action); + lzma_ret ret = lzma_code(&strm(), action); - if (strm.avail_out == 0 || ret == LZMA_STREAM_END) { - res.append((char *) outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); + if (strm().avail_out == 0 || ret == LZMA_STREAM_END) { + res.append((char *) outbuf, sizeof(outbuf) - strm().avail_out); + strm().next_out = outbuf; + strm().avail_out = sizeof(outbuf); } if (ret == LZMA_STREAM_END) diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh index 962ce5ac7..eb1697fc4 100644 --- a/src/libutil/compression.hh +++ b/src/libutil/compression.hh @@ -4,6 +4,8 @@ namespace nix { +std::string compressXZ(const std::string & in); + std::string decompressXZ(const std::string & in); } diff --git a/src/libutil/lru-cache.hh b/src/libutil/lru-cache.hh new file mode 100644 index 000000000..4344d6601 --- /dev/null +++ b/src/libutil/lru-cache.hh @@ -0,0 +1,84 @@ +#pragma once + +#include <map> +#include <list> + +namespace nix { + +/* A simple least-recently used cache. Not thread-safe. */ +template<typename Key, typename Value> +class LRUCache +{ +private: + + size_t maxSize; + + // Stupid wrapper to get around circular dependency between Data + // and LRU. + struct LRUIterator; + + using Data = std::map<Key, std::pair<LRUIterator, Value>>; + using LRU = std::list<typename Data::iterator>; + + struct LRUIterator { typename LRU::iterator it; }; + + Data data; + LRU lru; + +public: + + LRUCache(size_t maxSize) : maxSize(maxSize) { } + + /* Insert or upsert an item in the cache. */ + void upsert(const Key & key, const Value & value) + { + erase(key); + + if (data.size() >= maxSize) { + /* Retire the oldest item. */ + auto oldest = lru.begin(); + data.erase(*oldest); + lru.erase(oldest); + } + + auto res = data.emplace(key, std::make_pair(LRUIterator(), value)); + assert(res.second); + auto & i(res.first); + + auto j = lru.insert(lru.end(), i); + + i->second.first.it = j; + } + + bool erase(const Key & key) + { + auto i = data.find(key); + if (i == data.end()) return false; + lru.erase(i->second.first.it); + data.erase(i); + return true; + } + + /* Look up an item in the cache. If it exists, it becomes the most + recently used item. */ + // FIXME: use boost::optional? + Value * get(const Key & key) + { + auto i = data.find(key); + if (i == data.end()) return 0; + + /* Move this item to the back of the LRU list. */ + lru.erase(i->second.first.it); + auto j = lru.insert(lru.end(), i); + i->second.first.it = j; + + return &i->second.second; + } + + size_t size() + { + return data.size(); + } +}; + +} diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh new file mode 100644 index 000000000..f291cd578 --- /dev/null +++ b/src/libutil/pool.hh @@ -0,0 +1,151 @@ +#pragma once + +#include <functional> +#include <limits> +#include <list> +#include <memory> +#include <cassert> + +#include "sync.hh" +#include "ref.hh" + +namespace nix { + +/* This template class implements a simple pool manager of resources + of some type R, such as database connections. It is used as + follows: + + class Connection { ... }; + + Pool<Connection> pool; + + { + auto conn(pool.get()); + conn->exec("select ..."); + } + + Here, the Connection object referenced by ‘conn’ is automatically + returned to the pool when ‘conn’ goes out of scope. +*/ + +template <class R> +class Pool +{ +public: + + /* A function that produces new instances of R on demand. */ + typedef std::function<ref<R>()> Factory; + + /* A function that checks whether an instance of R is still + usable. Unusable instances are removed from the pool. */ + typedef std::function<bool(const ref<R> &)> Validator; + +private: + + Factory factory; + Validator validator; + + struct State + { + size_t inUse = 0; + size_t max; + std::vector<ref<R>> idle; + }; + + Sync<State> state; + + std::condition_variable wakeup; + +public: + + Pool(size_t max = std::numeric_limits<size_t>::max(), + const Factory & factory = []() { return make_ref<R>(); }, + const Validator & validator = [](ref<R> r) { return true; }) + : factory(factory) + , validator(validator) + { + auto state_(state.lock()); + state_->max = max; + } + + ~Pool() + { + auto state_(state.lock()); + assert(!state_->inUse); + state_->max = 0; + state_->idle.clear(); + } + + class Handle + { + private: + Pool & pool; + std::shared_ptr<R> r; + + friend Pool; + + Handle(Pool & pool, std::shared_ptr<R> r) : pool(pool), r(r) { } + + public: + Handle(Handle && h) : pool(h.pool), r(h.r) { h.r.reset(); } + + Handle(const Handle & l) = delete; + + ~Handle() + { + if (!r) return; + { + auto state_(pool.state.lock()); + state_->idle.push_back(ref<R>(r)); + assert(state_->inUse); + state_->inUse--; + } + pool.wakeup.notify_one(); + } + + R * operator -> () { return &*r; } + R & operator * () { return *r; } + }; + + Handle get() + { + { + auto state_(state.lock()); + + /* If we're over the maximum number of instance, we need + to wait until a slot becomes available. */ + while (state_->idle.empty() && state_->inUse >= state_->max) + state_.wait(wakeup); + + while (!state_->idle.empty()) { + auto p = state_->idle.back(); + state_->idle.pop_back(); + if (validator(p)) { + state_->inUse++; + return Handle(*this, p); + } + } + + state_->inUse++; + } + + /* We need to create a new instance. Because that might take a + while, we don't hold the lock in the meantime. */ + try { + Handle h(*this, factory()); + return h; + } catch (...) { + auto state_(state.lock()); + state_->inUse--; + throw; + } + } + + unsigned int count() + { + auto state_(state.lock()); + return state_->idle.size() + state_->inUse; + } +}; + +} diff --git a/src/libutil/ref.hh b/src/libutil/ref.hh new file mode 100644 index 000000000..a6d338d79 --- /dev/null +++ b/src/libutil/ref.hh @@ -0,0 +1,67 @@ +#pragma once + +#include <memory> +#include <exception> + +namespace nix { + +/* A simple non-nullable reference-counted pointer. Actually a wrapper + around std::shared_ptr that prevents non-null constructions. */ +template<typename T> +class ref +{ +private: + + std::shared_ptr<T> p; + +public: + + ref<T>(const ref<T> & r) + : p(r.p) + { } + + explicit ref<T>(const std::shared_ptr<T> & p) + : p(p) + { + if (!p) + throw std::invalid_argument("null pointer cast to ref"); + } + + T* operator ->() const + { + return &*p; + } + + T& operator *() const + { + return *p; + } + + operator std::shared_ptr<T> () + { + return p; + } + + template<typename T2> + operator ref<T2> () + { + return ref<T2>((std::shared_ptr<T2>) p); + } + +private: + + template<typename T2, typename... Args> + friend ref<T2> + make_ref(Args&&... args); + +}; + +template<typename T, typename... Args> +inline ref<T> +make_ref(Args&&... args) +{ + auto p = std::make_shared<T>(std::forward<Args>(args)...); + return ref<T>(p); +} + +} diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index f136a1324..c9620e2bf 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -72,7 +72,17 @@ void FdSink::write(const unsigned char * data, size_t len) warned = true; } } - writeFull(fd, data, len); + try { + writeFull(fd, data, len); + } catch (SysError & e) { + _good = true; + } +} + + +bool FdSink::good() +{ + return _good; } @@ -119,12 +129,18 @@ size_t FdSource::readUnbuffered(unsigned char * data, size_t len) checkInterrupt(); n = ::read(fd, (char *) data, bufSize); } while (n == -1 && errno == EINTR); - if (n == -1) throw SysError("reading from file"); - if (n == 0) throw EndOfFile("unexpected end-of-file"); + if (n == -1) { _good = false; throw SysError("reading from file"); } + if (n == 0) { _good = false; throw EndOfFile("unexpected end-of-file"); } return n; } +bool FdSource::good() +{ + return _good; +} + + size_t StringSource::read(unsigned char * data, size_t len) { if (pos == s.size()) throw EndOfFile("end of string reached"); diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 979ff849f..9e269f392 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -12,6 +12,7 @@ struct Sink { virtual ~Sink() { } virtual void operator () (const unsigned char * data, size_t len) = 0; + virtual bool good() { return true; } }; @@ -25,7 +26,7 @@ struct BufferedSink : Sink : bufSize(bufSize), bufPos(0), buffer(0) { } ~BufferedSink(); - void operator () (const unsigned char * data, size_t len); + void operator () (const unsigned char * data, size_t len) override; void flush(); @@ -47,6 +48,8 @@ struct Source return the number of bytes stored. If blocks until at least one byte is available. */ virtual size_t read(unsigned char * data, size_t len) = 0; + + virtual bool good() { return true; } }; @@ -60,7 +63,7 @@ struct BufferedSource : Source : bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { } ~BufferedSource(); - size_t read(unsigned char * data, size_t len); + size_t read(unsigned char * data, size_t len) override; /* Underlying read call, to be overridden. */ virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0; @@ -80,7 +83,12 @@ struct FdSink : BufferedSink FdSink(int fd) : fd(fd), warn(false), written(0) { } ~FdSink(); - void write(const unsigned char * data, size_t len); + void write(const unsigned char * data, size_t len) override; + + bool good() override; + +private: + bool _good = true; }; @@ -90,7 +98,10 @@ struct FdSource : BufferedSource int fd; FdSource() : fd(-1) { } FdSource(int fd) : fd(fd) { } - size_t readUnbuffered(unsigned char * data, size_t len); + size_t readUnbuffered(unsigned char * data, size_t len) override; + bool good() override; +private: + bool _good = true; }; @@ -98,7 +109,7 @@ struct FdSource : BufferedSource struct StringSink : Sink { string s; - void operator () (const unsigned char * data, size_t len); + void operator () (const unsigned char * data, size_t len) override; }; @@ -108,7 +119,7 @@ struct StringSource : Source const string & s; size_t pos; StringSource(const string & _s) : s(_s), pos(0) { } - size_t read(unsigned char * data, size_t len); + size_t read(unsigned char * data, size_t len) override; }; diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh new file mode 100644 index 000000000..c99c098ac --- /dev/null +++ b/src/libutil/sync.hh @@ -0,0 +1,78 @@ +#pragma once + +#include <mutex> +#include <condition_variable> +#include <cassert> + +namespace nix { + +/* This template class ensures synchronized access to a value of type + T. It is used as follows: + + struct Data { int x; ... }; + + Sync<Data> data; + + { + auto data_(data.lock()); + data_->x = 123; + } + + Here, "data" is automatically unlocked when "data_" goes out of + scope. +*/ + +template<class T> +class Sync +{ +private: + std::mutex mutex; + T data; + +public: + + Sync() { } + Sync(const T & data) : data(data) { } + + class Lock + { + private: + Sync * s; + std::unique_lock<std::mutex> lk; + friend Sync; + Lock(Sync * s) : s(s), lk(s->mutex) { } + public: + Lock(Lock && l) : s(l.s) { abort(); } + Lock(const Lock & l) = delete; + ~Lock() { } + T * operator -> () { return &s->data; } + T & operator * () { return s->data; } + + void wait(std::condition_variable & cv) + { + assert(s); + cv.wait(lk); + } + + template<class Rep, class Period, class Predicate> + bool wait_for(std::condition_variable & cv, + const std::chrono::duration<Rep, Period> & duration, + Predicate pred) + { + assert(s); + return cv.wait_for(lk, duration, pred); + } + + template<class Clock, class Duration> + std::cv_status wait_until(std::condition_variable & cv, + const std::chrono::time_point<Clock, Duration> & duration) + { + assert(s); + return cv.wait_until(lk, duration); + } + }; + + Lock lock() { return Lock(this); } +}; + +} diff --git a/src/libutil/types.hh b/src/libutil/types.hh index 23eb52512..33aaf5fc9 100644 --- a/src/libutil/types.hh +++ b/src/libutil/types.hh @@ -2,6 +2,8 @@ #include "config.h" +#include "ref.hh" + #include <string> #include <list> #include <set> @@ -97,70 +99,4 @@ typedef enum { } Verbosity; -/* A simple non-nullable reference-counted pointer. Actually a wrapper - around std::shared_ptr that prevents non-null constructions. */ -template<typename T> -class ref -{ -private: - - std::shared_ptr<T> p; - -public: - - ref<T>(const ref<T> & r) - : p(r.p) - { } - - explicit ref<T>(const std::shared_ptr<T> & p) - : p(p) - { - if (!p) - throw std::invalid_argument("null pointer cast to ref"); - } - - T* operator ->() const - { - return &*p; - } - - T& operator *() const - { - return *p; - } - - operator std::shared_ptr<T> () - { - return p; - } - -private: - - template<typename T2, typename... Args> - friend ref<T2> - make_ref(Args&&... args); - - template<typename T2, typename T3, typename... Args> - friend ref<T2> - make_ref(Args&&... args); - -}; - -template<typename T, typename... Args> -inline ref<T> -make_ref(Args&&... args) -{ - auto p = std::make_shared<T>(std::forward<Args>(args)...); - return ref<T>(p); -} - -template<typename T, typename T2, typename... Args> -inline ref<T> -make_ref(Args&&... args) -{ - auto p = std::make_shared<T2>(std::forward<Args>(args)...); - return ref<T>(p); -} - - } diff --git a/src/libutil/util.cc b/src/libutil/util.cc index def0525ab..3becbbabc 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -320,9 +320,11 @@ static void _deletePath(const Path & path, unsigned long long & bytesFreed) { checkInterrupt(); - printMsg(lvlVomit, format("%1%") % path); - - struct stat st = lstat(path); + struct stat st; + if (lstat(path.c_str(), &st) == -1) { + if (errno == ENOENT) return; + throw SysError(format("getting status of ‘%1%’") % path); + } if (!S_ISDIR(st.st_mode) && st.st_nlink == 1) bytesFreed += st.st_blocks * 512; @@ -338,8 +340,10 @@ static void _deletePath(const Path & path, unsigned long long & bytesFreed) _deletePath(path + "/" + i.name, bytesFreed); } - if (remove(path.c_str()) == -1) + if (remove(path.c_str()) == -1) { + if (errno == ENOENT) return; throw SysError(format("cannot unlink ‘%1%’") % path); + } } diff --git a/src/libutil/util.hh b/src/libutil/util.hh index b714cdc64..3606f6ec9 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -92,8 +92,8 @@ string readLine(int fd); void writeLine(int fd, string s); /* Delete a path; i.e., in the case of a directory, it is deleted - recursively. Don't use this at home, kids. The second variant - returns the number of bytes and blocks freed. */ + recursively. It's not an error if the path does not exist. The + second variant returns the number of bytes and blocks freed. */ void deletePath(const Path & path); void deletePath(const Path & path, unsigned long long & bytesFreed); @@ -366,6 +366,14 @@ template<class N> bool string2Int(const string & s, N & n) return str && str.get() == EOF; } +/* Parse a string into a float. */ +template<class N> bool string2Float(const string & s, N & n) +{ + std::istringstream str(s); + str >> n; + return str && str.get() == EOF; +} + /* Return true iff `s' ends in `suffix'. */ bool hasSuffix(const string & s, const string & suffix); diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index b9ccafb75..3aa348581 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -82,7 +82,7 @@ int main(int argc, char * * argv) // Run the actual garbage collector. if (!dryRun) { - auto store = openStore(false); + auto store = openStore(); options.action = GCOptions::gcDeleteDead; GCResults results; PrintFreed freed(true, results); diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc index aa20aa67d..e3c12ea0e 100644 --- a/src/nix-daemon/nix-daemon.cc +++ b/src/nix-daemon/nix-daemon.cc @@ -515,7 +515,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe startWork(); ValidPathInfo info = store->queryPathInfo(path); stopWork(); - to << info.deriver << printHash(info.hash) << info.references + to << info.deriver << printHash(info.narHash) << info.references << info.registrationTime << info.narSize; break; } @@ -562,9 +562,8 @@ static void processConnection(bool trusted) if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from)) setAffinityTo(readInt(from)); - bool reserveSpace = true; if (GET_PROTOCOL_MINOR(clientVersion) >= 11) - reserveSpace = readInt(from) != 0; + readInt(from); // obsolete reserveSpace /* Send startup error messages to the client. */ startWork(); @@ -582,7 +581,7 @@ static void processConnection(bool trusted) #endif /* Open the store. */ - auto store = make_ref<LocalStore>(reserveSpace); + auto store = make_ref<LocalStore>(); stopWork(); to.flush(); diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 12e19d4af..a9d1ed024 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -1127,6 +1127,10 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) attrs2["type"] = "int"; attrs2["value"] = (format("%1%") % v->integer).str(); xml.writeEmptyElement("meta", attrs2); + } else if (v->type == tFloat) { + attrs2["type"] = "float"; + attrs2["value"] = (format("%1%") % v->fpoint).str(); + xml.writeEmptyElement("meta", attrs2); } else if (v->type == tBool) { attrs2["type"] = "bool"; attrs2["value"] = v->boolean ? "true" : "false"; diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index 1604c0817..81c1c8d56 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -32,7 +32,7 @@ static bool indirectRoot = false; enum OutputKind { okPlain, okXML, okJSON }; -void processExpr(ref<Store> store, EvalState & state, const Strings & attrPaths, +void processExpr(EvalState & state, const Strings & attrPaths, bool parseOnly, bool strict, Bindings & autoArgs, bool evalOnly, OutputKind output, bool location, Expr * e) { @@ -79,7 +79,7 @@ void processExpr(ref<Store> store, EvalState & state, const Strings & attrPaths, else { Path rootName = gcRoot; if (++rootNr > 1) rootName += "-" + std::to_string(rootNr); - drvPath = addPermRoot(store, drvPath, rootName, indirectRoot); + drvPath = state.store->addPermRoot(drvPath, rootName, indirectRoot); } std::cout << format("%1%%2%\n") % drvPath % (outputName != "out" ? "!" + outputName : ""); } @@ -177,7 +177,7 @@ int main(int argc, char * * argv) if (readStdin) { Expr * e = parseStdin(state); - processExpr(store, state, attrPaths, parseOnly, strict, autoArgs, + processExpr(state, attrPaths, parseOnly, strict, autoArgs, evalOnly, outputKind, xmlOutputSourceLocation, e); } else if (files.empty() && !fromArgs) files.push_back("./default.nix"); @@ -186,7 +186,7 @@ int main(int argc, char * * argv) Expr * e = fromArgs ? state.parseExprFromString(i, absPath(".")) : state.parseExprFromFile(resolveExprPath(lookupFileArg(state, i))); - processExpr(store, state, attrPaths, parseOnly, strict, autoArgs, + processExpr(state, attrPaths, parseOnly, strict, autoArgs, evalOnly, outputKind, xmlOutputSourceLocation, e); } diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 4e706f93c..48c2865e5 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -84,7 +84,7 @@ static PathSet realisePath(Path path, bool build = true) Path rootName = gcRoot; if (rootNr > 1) rootName += "-" + std::to_string(rootNr); if (i->first != "out") rootName += "-" + i->first; - outPath = addPermRoot(ref<Store>(store), outPath, rootName, indirectRoot); + outPath = store->addPermRoot(outPath, rootName, indirectRoot); } outputs.insert(outPath); } @@ -100,7 +100,7 @@ static PathSet realisePath(Path path, bool build = true) Path rootName = gcRoot; rootNr++; if (rootNr > 1) rootName += "-" + std::to_string(rootNr); - path = addPermRoot(ref<Store>(store), path, rootName, indirectRoot); + path = store->addPermRoot(path, rootName, indirectRoot); } return singleton<PathSet>(path); } @@ -374,8 +374,8 @@ static void opQuery(Strings opFlags, Strings opArgs) for (auto & j : paths) { ValidPathInfo info = store->queryPathInfo(j); if (query == qHash) { - assert(info.hash.type == htSHA256); - cout << format("sha256:%1%\n") % printHash32(info.hash); + assert(info.narHash.type == htSHA256); + cout << format("sha256:%1%\n") % printHash32(info.narHash); } else if (query == qSize) cout << format("%1%\n") % info.narSize; } @@ -567,7 +567,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise) canonicalisePathMetaData(info.path, -1); if (!hashGiven) { HashResult hash = hashPath(htSHA256, info.path); - info.hash = hash.first; + info.narHash = hash.first; info.narSize = hash.second; } infos.push_back(info); @@ -783,11 +783,11 @@ static void opVerifyPath(Strings opFlags, Strings opArgs) Path path = followLinksToStorePath(i); printMsg(lvlTalkative, format("checking path ‘%1%’...") % path); ValidPathInfo info = store->queryPathInfo(path); - HashResult current = hashPath(info.hash.type, path); - if (current.first != info.hash) { + HashResult current = hashPath(info.narHash.type, path); + if (current.first != info.narHash) { printMsg(lvlError, format("path ‘%1%’ was modified! expected hash ‘%2%’, got ‘%3%’") - % path % printHash(info.hash) % printHash(current.first)); + % path % printHash(info.narHash) % printHash(current.first)); status = 1; } } @@ -1131,7 +1131,7 @@ int main(int argc, char * * argv) if (!op) throw UsageError("no operation specified"); if (op != opDump && op != opRestore) /* !!! hack */ - store = openStore(op != opGC); + store = openStore(); op(opFlags, opArgs); }); |