summaryrefslogtreecommitdiffstats
path: root/gc.c
diff options
context:
space:
mode:
authorKaz Kylheku <kaz@kylheku.com>2012-03-24 10:41:50 -0700
committerKaz Kylheku <kaz@kylheku.com>2012-03-24 10:41:50 -0700
commit1e95ef5e4bfee32bb4c52a59b5bd510f87288569 (patch)
tree5495d40f0f4d5b5593080925ba4efa8360599316 /gc.c
parent52715b72cd4959569ad021b69bf9cf1b26ec55fb (diff)
downloadtxr-1e95ef5e4bfee32bb4c52a59b5bd510f87288569.tar.gz
txr-1e95ef5e4bfee32bb4c52a59b5bd510f87288569.tar.bz2
txr-1e95ef5e4bfee32bb4c52a59b5bd510f87288569.zip
Performance improvement in the GC: keep at least one heap's worth
of free space, so programs close to exhausting a heap do not waste cycles frequently calling the collector. * gc.c (more): Do not assert that free_list is null; this will not be the case any more. (make_obj): Comment added regarding why we the free_tail variable is fixed up. (sweep): Now returns a count of the objects that are free. (gc): If sweep reports that less than 75% of the objects are free then let us add another heap.
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/gc.c b/gc.c
index 2b2567b9..7df908c6 100644
--- a/gc.c
+++ b/gc.c
@@ -123,8 +123,6 @@ static void more(void)
heap_t *heap = (heap_t *) chk_malloc(sizeof *heap);
obj_t *block = heap->block, *end = heap->block + HEAP_SIZE;
- assert (free_list == 0);
-
if (end > heap_max_bound)
heap_max_bound = end;
@@ -170,6 +168,8 @@ val make_obj(void)
return ret;
}
+ /* To save cycles, make_obj draws from the free list without
+ updating this, but before calling gc, it has to be. */
free_tail = &free_list;
switch (tries) {
@@ -389,10 +389,11 @@ static void mark(mach_context_t *pmc, val *gc_stack_top)
mark_mem_region(gc_stack_top, gc_stack_bottom);
}
-static void sweep(void)
+static int_ptr_t sweep(void)
{
heap_t *heap;
int gc_dbg = opt_gc_debug;
+ int_ptr_t free_count = 0;
#ifdef HAVE_VALGRIND
int vg_dbg = opt_vg_debug;
#else
@@ -424,6 +425,7 @@ static void sweep(void)
if (vg_dbg)
VALGRIND_MAKE_MEM_NOACCESS(block, sizeof *block);
#endif
+ free_count++;
continue;
}
@@ -434,6 +436,7 @@ static void sweep(void)
}
finalize(block);
block->t.type = (type_t) (block->t.type | FREE);
+ free_count++;
/* If debugging is turned on, we want to catch instances
where a reachable object is wrongly freed. This is difficult
to do if the object is recycled soon after.
@@ -462,6 +465,7 @@ static void sweep(void)
}
}
}
+ return free_count;
}
void gc(void)
@@ -474,7 +478,8 @@ void gc(void)
gc_enabled = 0;
mark(&mc, &gc_stack_top);
hash_process_weak();
- sweep();
+ if (sweep() < 3 * HEAP_SIZE / 4)
+ more();
gc_enabled = 1;
}
}