mirror of
https://github.com/justinethier/cyclone.git
synced 2025-05-21 06:39:16 +02:00
Merge or the worthwhile changes from gc-opt3-dev
This commit is contained in:
parent
2ce541ed4b
commit
b637d13783
2 changed files with 40 additions and 32 deletions
68
gc.c
68
gc.c
|
@ -59,8 +59,8 @@ static int mark_stack_i = 0;
|
||||||
static pthread_mutex_t heap_lock;
|
static pthread_mutex_t heap_lock;
|
||||||
|
|
||||||
// Cached heap statistics
|
// Cached heap statistics
|
||||||
static int cached_heap_free_sizes[7] = { 0, 0, 0, 0, 0, 0, 0 };
|
static uint64_t cached_heap_free_sizes[7] = { 0, 0, 0, 0, 0, 0, 0 };
|
||||||
static int cached_heap_total_sizes[7] = { 0, 0, 0, 0, 0, 0, 0 };
|
static uint64_t cached_heap_total_sizes[7] = { 0, 0, 0, 0, 0, 0, 0 };
|
||||||
|
|
||||||
// Data for each individual mutator thread
|
// Data for each individual mutator thread
|
||||||
ck_array_t Cyc_mutators, old_mutators;
|
ck_array_t Cyc_mutators, old_mutators;
|
||||||
|
@ -222,12 +222,12 @@ gc_heap *gc_heap_create(int heap_type, size_t size, size_t max_size,
|
||||||
return NULL;
|
return NULL;
|
||||||
h->type = heap_type;
|
h->type = heap_type;
|
||||||
h->size = size;
|
h->size = size;
|
||||||
h->newly_created = 1;
|
h->ttl = 10;
|
||||||
h->next_free = h;
|
h->next_free = h;
|
||||||
h->last_alloc_size = 0;
|
h->last_alloc_size = 0;
|
||||||
//h->free_size = size;
|
//h->free_size = size;
|
||||||
cached_heap_total_sizes[heap_type] += size;
|
ck_pr_add_64(&(cached_heap_total_sizes[heap_type]), size);
|
||||||
cached_heap_free_sizes[heap_type] += size;
|
ck_pr_add_64(&(cached_heap_free_sizes[heap_type]), size);
|
||||||
h->chunk_size = chunk_size;
|
h->chunk_size = chunk_size;
|
||||||
h->max_size = max_size;
|
h->max_size = max_size;
|
||||||
h->data = (char *)gc_heap_align(sizeof(h->data) + (uintptr_t) & (h->data));
|
h->data = (char *)gc_heap_align(sizeof(h->data) + (uintptr_t) & (h->data));
|
||||||
|
@ -493,6 +493,9 @@ int gc_grow_heap(gc_heap * h, int heap_type, size_t size, size_t chunk_size)
|
||||||
if (new_size < HEAP_SIZE) {
|
if (new_size < HEAP_SIZE) {
|
||||||
new_size = prev_size + h_last->size;
|
new_size = prev_size + h_last->size;
|
||||||
prev_size = h_last->size;
|
prev_size = h_last->size;
|
||||||
|
if (new_size > HEAP_SIZE) {
|
||||||
|
new_size = HEAP_SIZE;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
new_size = HEAP_SIZE;
|
new_size = HEAP_SIZE;
|
||||||
}
|
}
|
||||||
|
@ -557,8 +560,8 @@ void *gc_try_alloc(gc_heap * h, int heap_type, size_t size, char *obj,
|
||||||
// Copy object into heap now to avoid any uninitialized memory issues
|
// Copy object into heap now to avoid any uninitialized memory issues
|
||||||
gc_copy_obj(f2, obj, thd);
|
gc_copy_obj(f2, obj, thd);
|
||||||
//h->free_size -= gc_allocated_bytes(obj, NULL, NULL);
|
//h->free_size -= gc_allocated_bytes(obj, NULL, NULL);
|
||||||
cached_heap_free_sizes[heap_type] -=
|
ck_pr_sub_64(&(cached_heap_free_sizes[heap_type]),
|
||||||
gc_allocated_bytes(obj, NULL, NULL);
|
gc_allocated_bytes(obj, NULL, NULL));
|
||||||
}
|
}
|
||||||
h_passed->next_free = h;
|
h_passed->next_free = h;
|
||||||
h_passed->last_alloc_size = size;
|
h_passed->last_alloc_size = size;
|
||||||
|
@ -612,6 +615,10 @@ void *gc_alloc(gc_heap_root * hrt, size_t size, char *obj, gc_thread_data * thd,
|
||||||
result = gc_try_alloc(h, heap_type, size, obj, thd);
|
result = gc_try_alloc(h, heap_type, size, obj, thd);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
fprintf(stderr, "out of memory error allocating %zu bytes\n", size);
|
fprintf(stderr, "out of memory error allocating %zu bytes\n", size);
|
||||||
|
fprintf(stderr, "Heap type %d diagnostics:\n", heap_type);
|
||||||
|
pthread_mutex_lock(&heap_lock);
|
||||||
|
gc_print_stats(h);
|
||||||
|
pthread_mutex_unlock(&heap_lock); // why not
|
||||||
exit(1); // could throw error, but OOM is a major issue, so...
|
exit(1); // could throw error, but OOM is a major issue, so...
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -840,7 +847,7 @@ size_t gc_sweep(gc_heap * h, int heap_type, size_t * sum_freed_ptr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//h->free_size += heap_freed;
|
//h->free_size += heap_freed;
|
||||||
cached_heap_free_sizes[heap_type] += heap_freed;
|
ck_pr_add_64(&(cached_heap_free_sizes[heap_type]), heap_freed);
|
||||||
// Free the heap page if possible.
|
// Free the heap page if possible.
|
||||||
//
|
//
|
||||||
// With huge heaps, this becomes more important. one of the huge
|
// With huge heaps, this becomes more important. one of the huge
|
||||||
|
@ -855,13 +862,14 @@ size_t gc_sweep(gc_heap * h, int heap_type, size_t * sum_freed_ptr)
|
||||||
// remaining without them.
|
// remaining without them.
|
||||||
//
|
//
|
||||||
// Experimenting with only freeing huge heaps
|
// Experimenting with only freeing huge heaps
|
||||||
if (h->type == HEAP_HUGE && gc_is_heap_empty(h) && !h->newly_created){
|
if (gc_is_heap_empty(h) &&
|
||||||
|
(h->type == HEAP_HUGE || !(h->ttl--))) {
|
||||||
unsigned int h_size = h->size;
|
unsigned int h_size = h->size;
|
||||||
h = gc_heap_free(h, prev_h);
|
h = gc_heap_free(h, prev_h);
|
||||||
cached_heap_free_sizes[heap_type] -= h_size;
|
ck_pr_sub_64(&(cached_heap_free_sizes[heap_type] ), h_size);
|
||||||
cached_heap_total_sizes[heap_type] -= h_size;
|
ck_pr_sub_64(&(cached_heap_total_sizes[heap_type]), h_size);
|
||||||
|
|
||||||
}
|
}
|
||||||
h->newly_created = 0;
|
|
||||||
sum_freed += heap_freed;
|
sum_freed += heap_freed;
|
||||||
heap_freed = 0;
|
heap_freed = 0;
|
||||||
}
|
}
|
||||||
|
@ -1073,16 +1081,16 @@ void gc_mut_cooperate(gc_thread_data * thd, int buf_len)
|
||||||
// Threshold is intentially low because we have to go through an
|
// Threshold is intentially low because we have to go through an
|
||||||
// entire handshake/trace/sweep cycle, ideally without growing heap.
|
// entire handshake/trace/sweep cycle, ideally without growing heap.
|
||||||
if (ck_pr_load_int(&gc_stage) == STAGE_RESTING &&
|
if (ck_pr_load_int(&gc_stage) == STAGE_RESTING &&
|
||||||
((cached_heap_free_sizes[HEAP_SM] <
|
((ck_pr_load_64(&(cached_heap_free_sizes[HEAP_SM])) <
|
||||||
cached_heap_total_sizes[HEAP_SM] * GC_COLLECTION_THRESHOLD) ||
|
ck_pr_load_64(&(cached_heap_total_sizes[HEAP_SM])) * GC_COLLECTION_THRESHOLD) ||
|
||||||
(cached_heap_free_sizes[HEAP_64] <
|
(ck_pr_load_64(&(cached_heap_free_sizes[HEAP_64])) <
|
||||||
cached_heap_total_sizes[HEAP_64] * GC_COLLECTION_THRESHOLD) ||
|
ck_pr_load_64(&(cached_heap_total_sizes[HEAP_64])) * GC_COLLECTION_THRESHOLD) ||
|
||||||
#if INTPTR_MAX == INT64_MAX
|
#if INTPTR_MAX == INT64_MAX
|
||||||
(cached_heap_free_sizes[HEAP_96] <
|
(ck_pr_load_64(&(cached_heap_free_sizes[HEAP_96])) <
|
||||||
cached_heap_total_sizes[HEAP_96] * GC_COLLECTION_THRESHOLD) ||
|
ck_pr_load_64(&(cached_heap_total_sizes[HEAP_96])) * GC_COLLECTION_THRESHOLD) ||
|
||||||
#endif
|
#endif
|
||||||
(cached_heap_free_sizes[HEAP_REST] <
|
(ck_pr_load_64(&(cached_heap_free_sizes[HEAP_REST])) <
|
||||||
cached_heap_total_sizes[HEAP_REST] * GC_COLLECTION_THRESHOLD))) {
|
ck_pr_load_64(&(cached_heap_total_sizes[HEAP_REST])) * GC_COLLECTION_THRESHOLD))) {
|
||||||
#if GC_DEBUG_TRACE
|
#if GC_DEBUG_TRACE
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"Less than %f%% of the heap is free, initiating collector\n",
|
"Less than %f%% of the heap is free, initiating collector\n",
|
||||||
|
@ -1487,8 +1495,8 @@ void gc_collector()
|
||||||
|
|
||||||
// TODO: this loop only includes smallest 2 heaps, is that sufficient??
|
// TODO: this loop only includes smallest 2 heaps, is that sufficient??
|
||||||
for (heap_type = 0; heap_type < 2; heap_type++) {
|
for (heap_type = 0; heap_type < 2; heap_type++) {
|
||||||
while (cached_heap_free_sizes[heap_type] <
|
while ( ck_pr_load_64(&(cached_heap_free_sizes[heap_type])) <
|
||||||
(cached_heap_total_sizes[heap_type] * GC_FREE_THRESHOLD)) {
|
(ck_pr_load_64(&(cached_heap_total_sizes[heap_type])) * GC_FREE_THRESHOLD)) {
|
||||||
#if GC_DEBUG_TRACE
|
#if GC_DEBUG_TRACE
|
||||||
fprintf(stderr, "Less than %f%% of the heap %d is free, growing it\n",
|
fprintf(stderr, "Less than %f%% of the heap %d is free, growing it\n",
|
||||||
100.0 * GC_FREE_THRESHOLD, heap_type);
|
100.0 * GC_FREE_THRESHOLD, heap_type);
|
||||||
|
@ -1503,18 +1511,18 @@ void gc_collector()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if GC_DEBUG_TRACE
|
#if GC_DEBUG_TRACE
|
||||||
total_size = cached_heap_total_sizes[HEAP_SM] +
|
total_size = ck_pr_load_64(&(cached_heap_total_sizes[HEAP_SM])) +
|
||||||
cached_heap_total_sizes[HEAP_64] +
|
ck_pr_load_64(&(cached_heap_total_sizes[HEAP_64])) +
|
||||||
#if INTPTR_MAX == INT64_MAX
|
#if INTPTR_MAX == INT64_MAX
|
||||||
cached_heap_total_sizes[HEAP_96] +
|
ck_pr_load_64(&(cached_heap_total_sizes[HEAP_96])) +
|
||||||
#endif
|
#endif
|
||||||
cached_heap_total_sizes[HEAP_REST];
|
ck_pr_load_64(&(cached_heap_total_sizes[HEAP_REST]));
|
||||||
total_free = cached_heap_free_sizes[HEAP_SM] +
|
total_free = ck_pr_load_64(&(cached_heap_free_sizes[HEAP_SM])) +
|
||||||
cached_heap_free_sizes[HEAP_64] +
|
ck_pr_load_64(&(cached_heap_free_sizes[HEAP_64])) +
|
||||||
#if INTPTR_MAX == INT64_MAX
|
#if INTPTR_MAX == INT64_MAX
|
||||||
cached_heap_free_sizes[HEAP_96] +
|
ck_pr_load_64(&(cached_heap_free_sizes[HEAP_96])) +
|
||||||
#endif
|
#endif
|
||||||
cached_heap_free_sizes[HEAP_REST];
|
ck_pr_load_64(&(cached_heap_free_sizes[HEAP_REST]));
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"sweep done, total_size = %zu, total_free = %zu, freed = %zu, elapsed = %ld\n",
|
"sweep done, total_size = %zu, total_free = %zu, freed = %zu, elapsed = %ld\n",
|
||||||
total_size, total_free, freed,
|
total_size, total_free, freed,
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
// Parameters for size of a "page" on the heap (the second generation GC), in bytes.
|
// Parameters for size of a "page" on the heap (the second generation GC), in bytes.
|
||||||
#define GROW_HEAP_BY_SIZE (2 * 1024 * 1024) // Grow first page by adding this amount to it
|
#define GROW_HEAP_BY_SIZE (2 * 1024 * 1024) // Grow first page by adding this amount to it
|
||||||
#define INITIAL_HEAP_SIZE (3 * 1024 * 1024) // Size of the first page
|
#define INITIAL_HEAP_SIZE (3 * 1024 * 1024) // Size of the first page
|
||||||
#define HEAP_SIZE (377 * 1024 * 1024) // Normal size of a page
|
#define HEAP_SIZE (32 * 1024 * 1024) // Normal size of a page
|
||||||
|
|
||||||
/////////////////////////////
|
/////////////////////////////
|
||||||
// Major GC tuning parameters
|
// Major GC tuning parameters
|
||||||
|
@ -191,7 +191,7 @@ struct gc_heap_t {
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
unsigned int chunk_size; // 0 for any size, other and heap will only alloc chunks of that size
|
unsigned int chunk_size; // 0 for any size, other and heap will only alloc chunks of that size
|
||||||
unsigned int max_size;
|
unsigned int max_size;
|
||||||
unsigned int newly_created;
|
unsigned int ttl; // Keep empty page alive this many times before freeing
|
||||||
//
|
//
|
||||||
gc_heap *next_free;
|
gc_heap *next_free;
|
||||||
unsigned int last_alloc_size;
|
unsigned int last_alloc_size;
|
||||||
|
|
Loading…
Add table
Reference in a new issue