aboutsummaryrefslogtreecommitdiff
path: root/src/libexpr/eval-inline.hh
diff options
context:
space:
mode:
authorpennae <github@quasiparticle.net>2022-01-05 01:48:26 +0100
committerpennae <github@quasiparticle.net>2022-03-09 00:18:50 +0100
commit4d629c4f7abbbe58dfe6d9d2b37541cdf2331606 (patch)
treee0e0fc3dd753212f8c783ade1e932f5d7f1592e0 /src/libexpr/eval-inline.hh
parent8e2eaaaf69d9e216fce3ca6f7913bd0e2048e4b2 (diff)
add HAVE_BOEHMGC guards to batched allocation functions
Diffstat (limited to 'src/libexpr/eval-inline.hh')
-rw-r--r--src/libexpr/eval-inline.hh13
1 files changed, 9 insertions, 4 deletions
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
index 3331a7643..08a419923 100644
--- a/src/libexpr/eval-inline.hh
+++ b/src/libexpr/eval-inline.hh
@@ -42,6 +42,7 @@ inline void * allocBytes(size_t n)
[[gnu::always_inline]]
Value * EvalState::allocValue()
{
+#if HAVE_BOEHMGC
/* We use the boehm batch allocator to speed up allocations of Values (of which there are many).
GC_malloc_many returns a linked list of objects of the given size, where the first word
of each object is also the pointer to the next object in the list. This also means that we
@@ -56,6 +57,9 @@ Value * EvalState::allocValue()
void * p = *valueAllocCache;
*valueAllocCache = GC_NEXT(p);
GC_NEXT(p) = nullptr;
+#else
+ void * p = allocBytes(sizeof(Value));
+#endif
nrValues++;
return (Value *) p;
@@ -70,9 +74,8 @@ Env & EvalState::allocEnv(size_t size)
Env * env;
- if (size != 1)
- env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
- else {
+#if HAVE_BOEHMGC
+ if (size == 1) {
/* see allocValue for explanations. */
if (!*env1AllocCache) {
*env1AllocCache = GC_malloc_many(sizeof(Env) + sizeof(Value *));
@@ -83,7 +86,9 @@ Env & EvalState::allocEnv(size_t size)
*env1AllocCache = GC_NEXT(p);
GC_NEXT(p) = nullptr;
env = (Env *) p;
- }
+ } else
+#endif
+ env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
env->type = Env::Plain;