aboutsummaryrefslogtreecommitdiff
path: root/src/libexpr/eval-inline.hh
diff options
context:
space:
mode:
authorGuillaume Maudoux <guillaume.maudoux@tweag.io>2022-03-18 01:25:55 +0100
committerGuillaume Maudoux <guillaume.maudoux@tweag.io>2022-03-18 01:25:55 +0100
commitca5c3e86abf4ba7ff8e680a0a89c895d452931b9 (patch)
tree1a5dc481a375e6ab060221118f0d61959a06ecf6 /src/libexpr/eval-inline.hh
parent1942fed6d9cee95775046c5ad3d253ab2e8ab210 (diff)
parent6afc3617982e872fac2142c3aeccd1e8482e7e52 (diff)
Merge remote-tracking branch 'origin/master' into coerce-string
Diffstat (limited to 'src/libexpr/eval-inline.hh')
-rw-r--r--src/libexpr/eval-inline.hh91
1 files changed, 78 insertions, 13 deletions
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
index 8980e8d4b..77ed07b2e 100644
--- a/src/libexpr/eval-inline.hh
+++ b/src/libexpr/eval-inline.hh
@@ -24,6 +24,81 @@ LocalNoInlineNoReturn(void throwTypeError(const Pos & pos, const char * s, const
}
+/* Note: Various places expect the allocated memory to be zeroed. */
+[[gnu::always_inline]]
+inline void * allocBytes(size_t n)
+{
+ void * p;
+#if HAVE_BOEHMGC
+ p = GC_MALLOC(n);
+#else
+ p = calloc(n, 1);
+#endif
+ if (!p) throw std::bad_alloc();
+ return p;
+}
+
+
+[[gnu::always_inline]]
+Value * EvalState::allocValue()
+{
+#if HAVE_BOEHMGC
+ /* We use the boehm batch allocator to speed up allocations of Values (of which there are many).
+ GC_malloc_many returns a linked list of objects of the given size, where the first word
+ of each object is also the pointer to the next object in the list. This also means that we
+ have to explicitly clear the first word of every object we take. */
+ if (!*valueAllocCache) {
+ *valueAllocCache = GC_malloc_many(sizeof(Value));
+ if (!*valueAllocCache) throw std::bad_alloc();
+ }
+
+ /* GC_NEXT is a convenience macro for accessing the first word of an object.
+ Take the first list item, advance the list to the next item, and clear the next pointer. */
+ void * p = *valueAllocCache;
+ *valueAllocCache = GC_NEXT(p);
+ GC_NEXT(p) = nullptr;
+#else
+ void * p = allocBytes(sizeof(Value));
+#endif
+
+ nrValues++;
+ return (Value *) p;
+}
+
+
+[[gnu::always_inline]]
+Env & EvalState::allocEnv(size_t size)
+{
+ nrEnvs++;
+ nrValuesInEnvs += size;
+
+ Env * env;
+
+#if HAVE_BOEHMGC
+ if (size == 1) {
+ /* see allocValue for explanations. */
+ if (!*env1AllocCache) {
+ *env1AllocCache = GC_malloc_many(sizeof(Env) + sizeof(Value *));
+ if (!*env1AllocCache) throw std::bad_alloc();
+ }
+
+ void * p = *env1AllocCache;
+ *env1AllocCache = GC_NEXT(p);
+ GC_NEXT(p) = nullptr;
+ env = (Env *) p;
+ } else
+#endif
+ env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
+
+ env->type = Env::Plain;
+
+ /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */
+
+ return *env;
+}
+
+
+[[gnu::always_inline]]
void EvalState::forceValue(Value & v, const Pos & pos)
{
forceValue(v, [&]() { return pos; });
@@ -52,6 +127,7 @@ void EvalState::forceValue(Value & v, Callable getPos)
}
+[[gnu::always_inline]]
inline void EvalState::forceAttrs(Value & v, const Pos & pos, const std::string_view & errorCtx)
{
forceAttrs(v, [&]() { return pos; }, errorCtx);
@@ -59,6 +135,7 @@ inline void EvalState::forceAttrs(Value & v, const Pos & pos, const std::string_
template <typename Callable>
+[[gnu::always_inline]]
inline void EvalState::forceAttrs(Value & v, Callable getPos, const std::string_view & errorCtx)
{
try {
@@ -74,6 +151,7 @@ inline void EvalState::forceAttrs(Value & v, Callable getPos, const std::string_
}
+[[gnu::always_inline]]
inline void EvalState::forceList(Value & v, const Pos & pos, const std::string_view & errorCtx)
{
try {
@@ -87,18 +165,5 @@ inline void EvalState::forceList(Value & v, const Pos & pos, const std::string_v
}
}
-/* Note: Various places expect the allocated memory to be zeroed. */
-inline void * allocBytes(size_t n)
-{
- void * p;
-#if HAVE_BOEHMGC
- p = GC_MALLOC(n);
-#else
- p = calloc(n, 1);
-#endif
- if (!p) throw std::bad_alloc();
- return p;
-}
-
}