From ea2550a88217039bb30603d7b9d0cbb24239c810 Mon Sep 17 00:00:00 2001 From: Justin Ethier Date: Wed, 10 May 2017 10:12:25 +0000 Subject: [PATCH] Issue #199 - More efficient memory usage Only use a single int on the heap to store the number of huge heap allocations. There is no need to track allocations on the other heaps, at least at this time. --- gc.c | 16 ++++++---------- include/cyclone/types.h | 2 +- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/gc.c b/gc.c index a139904a..30513d62 100644 --- a/gc.c +++ b/gc.c @@ -250,7 +250,6 @@ gc_heap *gc_heap_create(int heap_type, size_t size, size_t max_size, //h->free_size = size; ck_pr_add_ptr(&(thd->cached_heap_total_sizes[heap_type]), size); ck_pr_add_ptr(&(thd->cached_heap_free_sizes[heap_type]), size); - // heap_num_allocations already initialized to 0, nothing else needed here h->chunk_size = chunk_size; h->max_size = max_size; h->data = (char *)gc_heap_align(sizeof(h->data) + (uintptr_t) & (h->data)); @@ -637,7 +636,7 @@ void *gc_try_alloc(gc_heap * h, int heap_type, size_t size, char *obj, ck_pr_sub_ptr(&(thd->cached_heap_free_sizes[heap_type]), gc_allocated_bytes(obj, NULL, NULL)); } else { - ck_pr_add_ptr(&(thd->heap_num_allocations[heap_type]), 1); + ck_pr_add_int(&(thd->heap_num_huge_allocations), 1); } h_passed->next_free = h; h_passed->last_alloc_size = size; @@ -905,7 +904,7 @@ void gc_collector_sweep() } } // Clear allocation counts to delay next GC trigger - ck_pr_store_ptr(&(m->heap_num_allocations[HEAP_HUGE]), 0); + ck_pr_store_int(&(m->heap_num_huge_allocations), 0); #if GC_DEBUG_TRACE total_size = ck_pr_load_ptr(&(m->cached_heap_total_sizes[HEAP_SM])) + ck_pr_load_ptr(&(m->cached_heap_total_sizes[HEAP_64])) + @@ -1352,8 +1351,8 @@ void gc_mut_cooperate(gc_thread_data * thd, int buf_len) #endif (ck_pr_load_ptr(&(thd->cached_heap_free_sizes[HEAP_REST])) < ck_pr_load_ptr(&(thd->cached_heap_total_sizes[HEAP_REST])) * GC_COLLECTION_THRESHOLD) || - // Experimenting with separate huge heap threshold - (ck_pr_load_ptr(&(thd->heap_num_allocations[HEAP_HUGE])) > 100) + // Separate huge heap threshold since these are typically allocated as whole pages + (ck_pr_load_int(&(thd->heap_num_huge_allocations)) > 100) )) { #if GC_DEBUG_TRACE fprintf(stderr, @@ -1935,7 +1934,6 @@ void gc_thread_data_init(gc_thread_data * thd, int mut_num, char *stack_base, } thd->cached_heap_free_sizes = calloc(5, sizeof(uintptr_t)); thd->cached_heap_total_sizes = calloc(5, sizeof(uintptr_t)); - thd->heap_num_allocations = calloc(5, sizeof(uintptr_t)); thd->heap = calloc(1, sizeof(gc_heap_root)); thd->heap->heap = calloc(1, sizeof(gc_heap *) * NUM_HEAP_TYPES); thd->heap->heap[HEAP_REST] = gc_heap_create(HEAP_REST, INITIAL_HEAP_SIZE, 0, 0, thd); @@ -1978,8 +1976,6 @@ void gc_thread_data_free(gc_thread_data * thd) free(thd->cached_heap_free_sizes); if (thd->cached_heap_total_sizes) free(thd->cached_heap_total_sizes); - if (thd->heap_num_allocations) - free(thd->heap_num_allocations); if (thd->jmp_start) free(thd->jmp_start); if (thd->gc_args) @@ -2031,10 +2027,10 @@ void gc_merge_all_heaps(gc_thread_data *dest, gc_thread_data *src) ck_pr_load_ptr(&(src->cached_heap_total_sizes[heap_type]))); ck_pr_add_ptr(&(dest->cached_heap_free_sizes[heap_type]), ck_pr_load_ptr(&(src->cached_heap_free_sizes[heap_type]))); - ck_pr_add_ptr(&(dest->heap_num_allocations[heap_type]), - ck_pr_load_ptr(&(src->heap_num_allocations[heap_type]))); } } + ck_pr_add_int(&(dest->heap_num_huge_allocations), + ck_pr_load_int(&(src->heap_num_huge_allocations))); #ifdef GC_DEBUG_TRACE fprintf(stderr, "Finished merging old heap data\n"); #endif diff --git a/include/cyclone/types.h b/include/cyclone/types.h index 163c08a1..11fdb302 100644 --- a/include/cyclone/types.h +++ b/include/cyclone/types.h @@ -289,7 +289,7 @@ struct gc_thread_data_t { gc_heap_root *heap; uintptr_t *cached_heap_free_sizes; uintptr_t *cached_heap_total_sizes; - uintptr_t *heap_num_allocations; + int heap_num_huge_allocations; // Data needed for call history char **stack_traces; int stack_trace_idx;