this repo has no description

Don't align unnecessarily

Just assert alignment.

authored by bernsteinbear.com and committed by

Max Bernstein d3d70231 c48b1966

+17 -10
+17 -10
runtime.c
··· 106 106 107 107 // To implement by the user: 108 108 size_t heap_object_size(struct gc_obj* obj); 109 - size_t trace_heap_object(struct gc_obj* obj, struct gc_heap* heap, 110 - VisitFn visit); 109 + void trace_heap_object(struct gc_obj* obj, struct gc_heap* heap, VisitFn visit); 111 110 void trace_roots(struct gc_heap* heap, VisitFn visit); 112 111 113 112 struct space { ··· 252 251 uintptr_t scan = heap->base; 253 252 while (scan < heap->hp) { 254 253 struct gc_obj* obj = (struct gc_obj*)scan; 255 - scan += align_size(trace_heap_object(obj, heap, assert_in_heap)); 254 + size_t size = heap_object_size(obj); 255 + uword end = scan + size; 256 + assert(is_size_aligned(end)); 257 + trace_heap_object(obj, heap, assert_in_heap); 258 + scan = end; 256 259 } 257 260 } 258 261 ··· 262 265 trace_roots(heap, visit_field); 263 266 while (scan < heap->hp) { 264 267 struct gc_obj* obj = (struct gc_obj*)scan; 265 - scan += align_size(trace_heap_object(obj, heap, visit_field)); 268 + size_t size = heap_object_size(obj); 269 + uword end = scan + size; 270 + assert(is_size_aligned(end)); 271 + trace_heap_object(obj, heap, visit_field); 272 + scan = end; 266 273 } 267 274 // TODO(max): If we have < 25% heap utilization, shrink the heap 268 275 #ifndef NDEBUG ··· 417 424 return result; 418 425 } 419 426 420 - size_t trace_heap_object(struct gc_obj* obj, struct gc_heap* heap, 421 - VisitFn visit) { 427 + void trace_heap_object(struct gc_obj* obj, struct gc_heap* heap, 428 + VisitFn visit) { 422 429 switch (obj_tag(obj)) { 423 430 case TAG_LIST: 424 431 visit(&((struct list*)obj)->first, heap); ··· 443 450 fprintf(stderr, "unknown tag: %u\n", obj_tag(obj)); 444 451 abort(); 445 452 } 446 - return heap_object_size(obj); 447 453 } 448 454 449 455 bool smallint_is_valid(word value) { ··· 515 521 516 522 struct object* mkclosure(struct gc_heap* heap, ClosureFn fn, 517 523 size_t num_fields) { 518 - uword size = align_size(sizeof(struct closure) + num_fields * kPointerSize); 524 + uword size = sizeof(struct closure) + num_fields * kPointerSize; 525 + assert(is_size_aligned(size)); 519 526 struct object* result = allocate(heap, TAG_CLOSURE, size); 520 527 as_closure(result)->fn = fn; 521 528 as_closure(result)->size = num_fields; ··· 553 560 } 554 561 555 562 struct object* mkrecord(struct gc_heap* heap, size_t num_fields) { 556 - uword size = align_size(sizeof(struct record) + 557 - num_fields * sizeof(struct record_field)); 563 + uword size = sizeof(struct record) + num_fields * sizeof(struct record_field); 564 + assert(is_size_aligned(size)); 558 565 struct object* result = allocate(heap, TAG_RECORD, size); 559 566 as_record(result)->size = num_fields; 560 567 // Assumes the items will be filled in immediately after calling mkrecord so