aboutsummaryrefslogtreecommitdiff
path: root/src/libexpr/eval-inline.hh
diff options
context:
space:
mode:
Diffstat (limited to 'src/libexpr/eval-inline.hh')
-rw-r--r--src/libexpr/eval-inline.hh104
1 files changed, 75 insertions, 29 deletions
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
index aef1f6351..7f01d08e3 100644
--- a/src/libexpr/eval-inline.hh
+++ b/src/libexpr/eval-inline.hh
@@ -2,29 +2,85 @@
#include "eval.hh"
-#define LocalNoInline(f) static f __attribute__((noinline)); f
-#define LocalNoInlineNoReturn(f) static f __attribute__((noinline, noreturn)); f
-
namespace nix {
-LocalNoInlineNoReturn(void throwEvalError(const Pos & pos, const char * s))
+
+/* Note: Various places expect the allocated memory to be zeroed. */
+[[gnu::always_inline]]
+inline void * allocBytes(size_t n)
{
- throw EvalError({
- .msg = hintfmt(s),
- .errPos = pos
- });
+ void * p;
+#if HAVE_BOEHMGC
+ p = GC_MALLOC(n);
+#else
+ p = calloc(n, 1);
+#endif
+ if (!p) throw std::bad_alloc();
+ return p;
}
-LocalNoInlineNoReturn(void throwTypeError(const Pos & pos, const char * s, const Value & v))
+
+[[gnu::always_inline]]
+Value * EvalState::allocValue()
{
- throw TypeError({
- .msg = hintfmt(s, showType(v)),
- .errPos = pos
- });
+#if HAVE_BOEHMGC
+ /* We use the boehm batch allocator to speed up allocations of Values (of which there are many).
+ GC_malloc_many returns a linked list of objects of the given size, where the first word
+ of each object is also the pointer to the next object in the list. This also means that we
+ have to explicitly clear the first word of every object we take. */
+ if (!*valueAllocCache) {
+ *valueAllocCache = GC_malloc_many(sizeof(Value));
+ if (!*valueAllocCache) throw std::bad_alloc();
+ }
+
+ /* GC_NEXT is a convenience macro for accessing the first word of an object.
+ Take the first list item, advance the list to the next item, and clear the next pointer. */
+ void * p = *valueAllocCache;
+ *valueAllocCache = GC_NEXT(p);
+ GC_NEXT(p) = nullptr;
+#else
+ void * p = allocBytes(sizeof(Value));
+#endif
+
+ nrValues++;
+ return (Value *) p;
}
-void EvalState::forceValue(Value & v, const Pos & pos)
+[[gnu::always_inline]]
+Env & EvalState::allocEnv(size_t size)
+{
+ nrEnvs++;
+ nrValuesInEnvs += size;
+
+ Env * env;
+
+#if HAVE_BOEHMGC
+ if (size == 1) {
+ /* see allocValue for explanations. */
+ if (!*env1AllocCache) {
+ *env1AllocCache = GC_malloc_many(sizeof(Env) + sizeof(Value *));
+ if (!*env1AllocCache) throw std::bad_alloc();
+ }
+
+ void * p = *env1AllocCache;
+ *env1AllocCache = GC_NEXT(p);
+ GC_NEXT(p) = nullptr;
+ env = (Env *) p;
+ } else
+#endif
+ env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
+
+ env->type = Env::Plain;
+
+ /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */
+
+ return *env;
+}
+
+
+[[gnu::always_inline]]
+void EvalState::forceValue(Value & v, const PosIdx pos)
{
forceValue(v, [&]() { return pos; });
}
@@ -52,13 +108,15 @@ void EvalState::forceValue(Value & v, Callable getPos)
}
-inline void EvalState::forceAttrs(Value & v, const Pos & pos)
+[[gnu::always_inline]]
+inline void EvalState::forceAttrs(Value & v, const PosIdx pos)
{
forceAttrs(v, [&]() { return pos; });
}
template <typename Callable>
+[[gnu::always_inline]]
inline void EvalState::forceAttrs(Value & v, Callable getPos)
{
forceValue(v, getPos);
@@ -67,25 +125,13 @@ inline void EvalState::forceAttrs(Value & v, Callable getPos)
}
-inline void EvalState::forceList(Value & v, const Pos & pos)
+[[gnu::always_inline]]
+inline void EvalState::forceList(Value & v, const PosIdx pos)
{
forceValue(v, pos);
if (!v.isList())
throwTypeError(pos, "value is %1% while a list was expected", v);
}
-/* Note: Various places expect the allocated memory to be zeroed. */
-inline void * allocBytes(size_t n)
-{
- void * p;
-#if HAVE_BOEHMGC
- p = GC_MALLOC(n);
-#else
- p = calloc(n, 1);
-#endif
- if (!p) throw std::bad_alloc();
- return p;
-}
-
}