summaryrefslogtreecommitdiffstats
path: root/gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c330
1 files changed, 266 insertions, 64 deletions
diff --git a/gc.c b/gc.c
index a945f7fd..bf5482f7 100644
--- a/gc.c
+++ b/gc.c
@@ -1,4 +1,4 @@
-/* Copyright 2009-2020
+/* Copyright 2009-2024
* Kaz Kylheku <kaz@kylheku.com>
* Vancouver, Canada
* All rights reserved.
@@ -6,28 +6,30 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
- * 1. Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
+#include <stddef.h>
#include <assert.h>
#include <wchar.h>
#include <signal.h>
@@ -36,10 +38,12 @@
#if HAVE_VALGRIND
#include <valgrind/memcheck.h>
#endif
+#if HAVE_RLIMIT
+#include <sys/resource.h>
+#endif
#include "lib.h"
#include "stream.h"
#include "hash.h"
-#include "txr.h"
#include "eval.h"
#include "gc.h"
#include "signal.h"
@@ -55,6 +59,7 @@
#define FULL_GC_INTERVAL 20
#define FRESHOBJ_VEC_SIZE (2 * HEAP_SIZE)
#define DFL_MALLOC_DELTA_THRESH (16L * 1024 * 1024)
+#define DFL_STACK_LIMIT (128 * 1024L)
#else
#define HEAP_SIZE 16384
#define CHECKOBJ_VEC_SIZE (2 * HEAP_SIZE)
@@ -62,8 +67,11 @@
#define FULL_GC_INTERVAL 40
#define FRESHOBJ_VEC_SIZE (8 * HEAP_SIZE)
#define DFL_MALLOC_DELTA_THRESH (64L * 1024 * 1024)
+#define DFL_STACK_LIMIT (16384 * 1024L)
#endif
+#define MIN_STACK_LIMIT 32768
+
#if HAVE_MEMALIGN || HAVE_POSIX_MEMALIGN
#define OBJ_ALIGN (sizeof (obj_t))
#else
@@ -73,6 +81,9 @@
typedef struct heap {
obj_t block[HEAP_SIZE];
struct heap *next;
+#if CONFIG_NAN_BOXING_STRIP_TAG
+ ucnum tag;
+#endif
} heap_t;
typedef struct mach_context {
@@ -85,7 +96,9 @@ int opt_gc_debug;
#if HAVE_VALGRIND
int opt_vg_debug;
#endif
-static val *gc_stack_bottom;
+
+val *gc_stack_bottom;
+val *gc_stack_limit;
static val *prot_stack[PROT_STACK_SIZE];
static val **prot_stack_limit = prot_stack + PROT_STACK_SIZE;
@@ -123,6 +136,14 @@ int full_gc;
val break_obj;
#endif
+struct prot_array {
+ cnum size;
+ val self;
+ val arr[FLEX_ARRAY];
+};
+
+struct cobj_class *prot_array_cls;
+
val prot1(val *loc)
{
assert (gc_prot_top < prot_stack_limit);
@@ -147,9 +168,18 @@ void protect(val *first, ...)
static void more(void)
{
+#if CONFIG_NAN_BOXING_STRIP_TAG
+ ucnum tagged_ptr = coerce(cnum, chk_malloc_gc_more(sizeof (heap_t)));
+ heap_t *heap = coerce(heap_t *, tagged_ptr & ~TAG_BIGMASK);
+#else
heap_t *heap = coerce(heap_t *, chk_malloc_gc_more(sizeof *heap));
+#endif
obj_t *block = heap->block, *end = heap->block + HEAP_SIZE;
+#if CONFIG_NAN_BOXING_STRIP_TAG
+ heap->tag = tagged_ptr >> TAG_BIGSHIFT;
+#endif
+
if (free_list == 0)
free_tail = &heap->block[0].t.next;
@@ -221,6 +251,7 @@ val make_obj(void)
#endif
#if CONFIG_GEN_GC
ret->t.gen = 0;
+ ret->t.fincount = 0;
if (!full_gc)
freshobj[freshobj_idx++] = ret;
#endif
@@ -259,6 +290,17 @@ val make_obj(void)
abort();
}
+val copy_obj(val orig)
+{
+ val copy = make_obj();
+ *copy = *orig;
+#if CONFIG_GEN_GC
+ copy->t.fincount = 0;
+ copy->t.gen = 0;
+#endif
+ return copy;
+}
+
static void finalize(val obj)
{
switch (convert(type_t, obj->t.type)) {
@@ -316,6 +358,7 @@ static void finalize(val obj)
void cobj_destroy_stub_op(val obj)
{
+ (void) obj;
}
void cobj_destroy_free_op(val obj)
@@ -325,6 +368,7 @@ void cobj_destroy_free_op(val obj)
static void mark_obj(val obj)
{
+ val self = lit("gc");
type_t t;
tail_call:
@@ -374,8 +418,7 @@ tail_call:
mark_obj(obj->c.car);
mark_obj_tail(obj->c.cdr);
case STR:
- mark_obj(obj->st.len);
- mark_obj_tail(obj->st.alloc);
+ mark_obj_tail(obj->st.len);
case SYM:
mark_obj(obj->s.name);
mark_obj_tail(obj->s.package);
@@ -397,7 +440,7 @@ tail_call:
{
val alloc_size = obj->v.vec[vec_alloc];
val len = obj->v.vec[vec_length];
- cnum i, fp = c_num(len);
+ cnum i, fp = c_num(len, self);
mark_obj(alloc_size);
mark_obj(len);
@@ -416,9 +459,11 @@ tail_call:
mark_obj(obj->ls.props->term);
mark_obj_tail(obj->ls.list);
case COBJ:
+ obj->co.ops->mark(obj);
+ return;
case CPTR:
obj->co.ops->mark(obj);
- mark_obj_tail(obj->co.cls);
+ mark_obj_tail(obj->cp.cls);
case ENV:
mark_obj(obj->e.vbindings);
mark_obj(obj->e.fbindings);
@@ -435,7 +480,7 @@ tail_call:
mark_obj_tail(obj->tn.key);
case DARG:
{
- struct args *args = obj->a.args;
+ varg args = obj->a.args;
cnum i, n = args->fill;
val *arg = args->arg;
@@ -452,8 +497,46 @@ tail_call:
assert (0 && "corrupt type field");
}
+static void mark_obj_norec(val obj)
+{
+ type_t t;
+
+ if (!is_ptr(obj))
+ return;
+
+ t = obj->t.type;
+
+ if ((t & REACHABLE) != 0)
+ return;
+
+#if CONFIG_GEN_GC
+ if (!full_gc && obj->t.gen > 0)
+ return;
+#endif
+
+ if ((t & FREE) != 0)
+ abort();
+
+#if CONFIG_GEN_GC
+ if (obj->t.gen == -1)
+ obj->t.gen = 0; /* Will be promoted to generation 1 by sweep_one */
+#endif
+
+ obj->t.type = convert(type_t, t | REACHABLE);
+
+#if CONFIG_EXTRA_DEBUGGING
+ if (obj == break_obj) {
+#if HAVE_VALGRIND
+ VALGRIND_PRINTF_BACKTRACE("object %p marked\n", convert(void *, obj));
+#endif
+ breakpt();
+ }
+#endif
+}
+
void cobj_mark_op(val obj)
{
+ (void) obj;
}
static int in_heap(val ptr)
@@ -490,11 +573,12 @@ static void mark_obj_maybe(val maybe_obj)
VALGRIND_MAKE_MEM_DEFINED(&maybe_obj, sizeof maybe_obj);
#endif
if (in_heap(maybe_obj)) {
+ type_t t;
#if HAVE_VALGRIND
if (opt_vg_debug)
VALGRIND_MAKE_MEM_DEFINED(maybe_obj, SIZEOF_PTR);
#endif
- type_t t = maybe_obj->t.type;
+ t = maybe_obj->t.type;
if ((t & FREE) == 0) {
mark_obj(maybe_obj);
} else {
@@ -518,7 +602,7 @@ static void mark_mem_region(val *low, val *high)
mark_obj_maybe(*low);
}
-static void mark(val *gc_stack_top)
+NOINLINE static void mark(val *gc_stack_top)
{
val **rootloc;
@@ -624,7 +708,7 @@ static int sweep_one(obj_t *block)
return 1;
}
-static int_ptr_t sweep(void)
+NOINLINE static int_ptr_t sweep(void)
{
int_ptr_t free_count = 0;
heap_t **pph;
@@ -692,7 +776,12 @@ static int_ptr_t sweep(void)
}
}
*pph = heap->next;
+#if CONFIG_NAN_BOXING_STRIP_TAG
+ free(coerce(heap_t *, coerce(ucnum, heap) | (heap->tag << TAG_BIGSHIFT)));
+#else
free(heap);
+#endif
+
#if HAVE_VALGRIND
if (vg_dbg) {
val iter, next;
@@ -730,7 +819,7 @@ static int is_reachable(val obj)
return (t & REACHABLE) != 0;
}
-static void prepare_finals(void)
+NOINLINE static void prepare_finals(void)
{
struct fin_reg *f;
@@ -742,9 +831,6 @@ static void prepare_finals(void)
for (f = final_list; f; f = f->next) {
if (!f->reachable) {
-#if CONFIG_GEN_GC
- f->obj->t.gen = 0;
-#endif
mark_obj(f->obj);
}
mark_obj(f->fun);
@@ -754,47 +840,53 @@ static void prepare_finals(void)
static val call_finalizers_impl(val ctx,
int (*should_call)(struct fin_reg *, val))
{
- struct fin_reg *f, **tail;
- struct fin_reg *found = 0, **ftail = &found;
val ret = nil;
- if (!final_list)
- return ret;
+ for (;;) {
+ struct fin_reg *f, **tail;
+ struct fin_reg *found = 0, **ftail = &found;
- for (f = final_list, tail = &final_list; f; ) {
- struct fin_reg *next = f->next;
+ for (f = final_list, tail = &final_list; f; ) {
+ struct fin_reg *next = f->next;
- if (should_call(f, ctx)) {
- *ftail = f;
- ftail = &f->next;
- f->next = 0;
- } else {
- *tail = f;
- tail = &f->next;
+ if (should_call(f, ctx)) {
+ *ftail = f;
+ ftail = &f->next;
+ } else {
+ *tail = f;
+ tail = &f->next;
+ }
+
+ f = next;
}
- f = next;
- }
+ *ftail = 0;
+ *tail = 0;
+ final_tail = tail;
- *tail = 0;
- final_tail = tail;
+ if (!found)
+ break;
- while (found) {
- struct fin_reg *next = found->next;
- val obj = found->obj;
- funcall1(found->fun, obj);
+ do {
+ struct fin_reg *next = found->next;
+ val obj = found->obj;
+ funcall1(found->fun, obj);
#if CONFIG_GEN_GC
- /* Note: here an object may be added to freshobj more than once, since
- * multiple finalizers can be registered.
- */
- if (freshobj_idx < FRESHOBJ_VEC_SIZE && obj->t.gen == 0)
- freshobj[freshobj_idx++] = obj;
- else
- full_gc = 1;
+ if (--obj->t.fincount == 0 && inprogress &&
+ !full_gc && !found->reachable)
+ {
+ if (freshobj_idx < FRESHOBJ_VEC_SIZE) {
+ obj->t.gen = 0;
+ freshobj[freshobj_idx++] = obj;
+ } else {
+ full_gc = 1;
+ }
+ }
#endif
- free(found);
- found = next;
- ret = t;
+ free(found);
+ found = next;
+ ret = t;
+ } while (found);
}
return ret;
@@ -806,7 +898,7 @@ static int is_unreachable_final(struct fin_reg *f, val ctx)
return !f->reachable;
}
-static void call_finals(void)
+NOINLINE static void call_finals(void)
{
(void) call_finalizers_impl(nil, is_unreachable_final);
}
@@ -882,6 +974,21 @@ int gc_inprogress(void)
void gc_init(val *stack_bottom)
{
gc_stack_bottom = stack_bottom;
+ gc_stack_limit = gc_stack_bottom - DFL_STACK_LIMIT / sizeof (val);
+#if HAVE_RLIMIT
+ {
+ struct rlimit rl;
+ if (getrlimit(RLIMIT_STACK, &rl) == 0) {
+ rlim_t lim = rl.rlim_cur;
+ if (lim != RLIM_INFINITY) {
+ ptrdiff_t delta = (lim >= MIN_STACK_LIMIT
+ ? (lim - lim / 16)
+ : MIN_STACK_LIMIT) / sizeof (val);
+ gc_stack_limit = gc_stack_bottom - delta;
+ }
+ }
+ }
+#endif
}
void gc_mark(val obj)
@@ -889,6 +996,11 @@ void gc_mark(val obj)
mark_obj(obj);
}
+void gc_mark_norec(val obj)
+{
+ mark_obj_norec(obj);
+}
+
void gc_conservative_mark(val maybe_obj)
{
mark_obj_maybe(maybe_obj);
@@ -959,13 +1071,41 @@ val gc_push(val obj, loc plist)
static val gc_set_delta(val delta)
{
- opt_gc_delta = c_num(delta);
+ val self = lit("gc");
+ opt_gc_delta = c_num(delta, self);
return nil;
}
-static val gc_wrap(void)
+static val set_stack_limit(val limit)
+{
+ val self = lit("set-stack-limit");
+ val *gsl = gc_stack_limit;
+
+ if (limit == nil || limit == zero) {
+ gc_stack_limit = 0;
+ } else {
+ ucnum lim = c_unum(limit, self);
+ gc_stack_limit = gc_stack_bottom - lim / sizeof (val);
+ }
+
+ return if2(gsl, num((gc_stack_bottom - gsl) * sizeof (val)));
+}
+
+static val get_stack_limit(void)
+{
+ val *gsl = gc_stack_limit;
+ return if2(gsl, num((gc_stack_bottom - gsl) * sizeof (val)));
+}
+
+static val gc_wrap(val full)
{
if (gc_enabled) {
+#if CONFIG_GEN_GC
+ if (!null_or_missing_p(full))
+ full_gc = 1;
+#else
+ (void) full;
+#endif
gc();
return t;
}
@@ -974,7 +1114,8 @@ static val gc_wrap(void)
val gc_finalize(val obj, val fun, val rev_order_p)
{
- type_check(lit("gc-finalize"), fun, FUN);
+ val self = lit("gc-finalize");
+ type_check(self, fun, FUN);
rev_order_p = default_null_arg(rev_order_p);
@@ -982,7 +1123,17 @@ val gc_finalize(val obj, val fun, val rev_order_p)
struct fin_reg *f = coerce(struct fin_reg *, chk_malloc(sizeof *f));
f->obj = obj;
f->fun = fun;
- f->reachable = 0;
+ f->reachable = 1;
+
+#if CONFIG_GEN_GC
+ if (++obj->t.fincount == 0) {
+ obj->t.fincount--;
+ free(f);
+ uw_throwf(error_s,
+ lit("~a: too many finalizations registered against object ~s"),
+ self, obj, nao);
+ }
+#endif
if (rev_order_p) {
if (!final_list)
@@ -1024,11 +1175,15 @@ val valid_object_p(val obj)
void gc_late_init(void)
{
- reg_fun(intern(lit("gc"), system_package), func_n0(gc_wrap));
+ reg_fun(intern(lit("gc"), system_package), func_n1o(gc_wrap, 0));
reg_fun(intern(lit("gc-set-delta"), system_package), func_n1(gc_set_delta));
reg_fun(intern(lit("finalize"), user_package), func_n3o(gc_finalize, 2));
reg_fun(intern(lit("call-finalizers"), user_package),
func_n1(gc_call_finalizers));
+ reg_fun(intern(lit("set-stack-limit"), user_package), func_n1(set_stack_limit));
+ reg_fun(intern(lit("get-stack-limit"), user_package), func_n0(get_stack_limit));
+
+ prot_array_cls = cobj_register(intern(lit("gc-prot-array"), system_package));
}
/*
@@ -1124,7 +1279,11 @@ void gc_free_all(void)
finalize(block);
}
+#if CONFIG_NAN_BOXING_STRIP_TAG
+ free(coerce(heap_t *, coerce(ucnum, iter) | (iter->tag << TAG_BIGSHIFT)));
+#else
free(iter);
+#endif
iter = next;
}
}
@@ -1139,3 +1298,46 @@ void gc_free_all(void)
}
}
}
+
+void gc_stack_overflow(void)
+{
+ uw_throwf(stack_overflow_s, lit("computation exceeded stack limit"), nao);
+}
+
+static void prot_array_mark(val obj)
+{
+ struct prot_array *pa = coerce(struct prot_array *, obj->co.handle);
+
+ if (pa) {
+ cnum i;
+ for (i = 0; i < pa->size; i++)
+ gc_mark(pa->arr[i]);
+ }
+}
+
+static struct cobj_ops prot_array_ops = cobj_ops_init(eq,
+ cobj_print_op,
+ cobj_destroy_free_op,
+ prot_array_mark,
+ cobj_eq_hash_op);
+
+val *gc_prot_array_alloc(cnum size, val *obj)
+{
+ struct prot_array *pa = coerce(struct prot_array *,
+ chk_calloc(offsetof(struct prot_array, arr) +
+ size * sizeof(val), 1));
+ pa->size = size;
+ *obj = pa->self = cobj(coerce(mem_t *, pa), prot_array_cls, &prot_array_ops);
+
+ return pa->arr;
+}
+
+void gc_prot_array_free(val *arr)
+{
+ if (arr) {
+ struct prot_array *pa = container(arr, struct prot_array, arr);
+ val obj = pa->self;
+ obj->co.handle = 0;
+ free(pa);
+ }
+}