this repo has no description

Grow the heap if need be (#186)

Fix #162

authored by bernsteinbear.com and committed by

GitHub c19fac56 786a151f

+40 -16
+14
compiler_tests.py
··· 121 121 def test_match_heap_variant(self) -> None: 122 122 self.assertEqual(self._run("f #bar 1 . f = | # bar 1 -> 3 | # foo () -> 4"), "3\n") 123 123 124 + @unittest.skipIf("STATIC_HEAP" in os.environ.get("CFLAGS", ""), "Can't grow heap in static heap mode") 125 + def test_heap_growth(self) -> None: 126 + self.assertEqual( 127 + self._run( 128 + """ 129 + countdown 1000 130 + . countdown = 131 + | 0 -> [] 132 + | n -> n >+ countdown (n - 1) 133 + """ 134 + ), 135 + "[" + ", ".join(str(i) for i in range(1000, 0, -1)) + "]\n", 136 + ) 137 + 124 138 125 139 if __name__ == "__main__": 126 140 unittest.main()
+26 -16
runtime.c
··· 120 120 uintptr_t limit; 121 121 uintptr_t from_space; 122 122 uintptr_t to_space; 123 - size_t size; 123 + struct space space; 124 124 }; 125 125 126 126 static uintptr_t align(uintptr_t val, uintptr_t alignment) { ··· 157 157 space.size, kPageSize); 158 158 abort(); 159 159 } 160 - heap->size = space.size; 160 + heap->space = space; 161 161 heap->to_space = heap->hp = space.start; 162 162 heap->from_space = heap->limit = heap->hp + space.size / 2; 163 163 } ··· 175 175 heap->hp = heap->from_space; 176 176 heap->from_space = heap->to_space; 177 177 heap->to_space = heap->hp; 178 - heap->limit = heap->hp + heap->size / 2; 178 + heap->limit = heap->hp + heap->space.size / 2; 179 179 } 180 180 181 181 struct object* heap_tag(uintptr_t addr) { ··· 220 220 struct gc_obj* obj = (struct gc_obj*)scan; 221 221 scan += align_size(trace_heap_object(obj, heap, visit_field)); 222 222 } 223 + // TODO(max): If we have < 25% heap utilization, shrink the heap 223 224 #ifndef NDEBUG 224 225 // Zero out the rest of the heap for debugging 225 226 memset((void*)scan, 0, heap->limit - scan); ··· 235 236 #endif 236 237 #define ALLOCATOR __attribute__((__malloc__)) 237 238 238 - static NEVER_INLINE ALLOCATOR struct object* allocate_slow_path( 239 - struct gc_heap* heap, size_t size) { 240 - // size is already aligned 239 + #ifndef STATIC_HEAP 240 + static NEVER_INLINE void heap_grow(struct gc_heap* heap) { 241 + struct space old_space = heap->space; 242 + struct space new_space = make_space(old_space.size * 2); 243 + init_heap(heap, new_space); 241 244 collect(heap); 242 - if (UNLIKELY(heap->limit - heap->hp < size)) { 243 - fprintf(stderr, "out of memory\n"); 244 - abort(); 245 - } 246 - uintptr_t addr = heap->hp; 247 - uintptr_t new_hp = align_size(addr + size); 248 - heap->hp = new_hp; 249 - return heap_tag(addr); 245 + destroy_space(old_space); 250 246 } 247 + #endif 251 248 252 249 bool is_power_of_two(uword x) { return (x & (x - 1)) == 0; } 253 250 ··· 265 262 266 263 bool obj_has_tag(struct gc_obj* obj, byte tag) { return obj_tag(obj) == tag; } 267 264 265 + static NEVER_INLINE void allocate_slow_path(struct gc_heap* heap, uword size) { 266 + #ifndef STATIC_HEAP 267 + heap_grow(heap); 268 + #endif 269 + // size is already aligned 270 + if (UNLIKELY(heap->limit - heap->hp < size)) { 271 + fprintf(stderr, "out of memory\n"); 272 + abort(); 273 + } 274 + } 275 + 268 276 static ALWAYS_INLINE ALLOCATOR struct object* allocate(struct gc_heap* heap, 269 277 uword tag, uword size) { 270 278 assert(is_aligned(size, 1 << kPrimaryTagBits) && "need 3 bits for tagging"); 271 279 uintptr_t addr = heap->hp; 272 280 uintptr_t new_hp = align_size(addr + size); 273 281 if (UNLIKELY(heap->limit < new_hp)) { 274 - return allocate_slow_path(heap, size); 282 + allocate_slow_path(heap, size); 283 + addr = heap->hp; 284 + new_hp = align_size(addr + size); 275 285 } 276 286 heap->hp = new_hp; 277 287 ((struct gc_obj*)addr)->tag = make_tag(tag, size); ··· 584 594 as_variant(variant)->value = value; 585 595 } 586 596 587 - #define MAX_HANDLES 1024 597 + #define MAX_HANDLES 4096 588 598 589 599 struct handle_scope { 590 600 struct object*** base;