this repo has no description
1#include <assert.h>
2#include <stdbool.h>
3#include <stddef.h>
4#include <stdint.h>
5#include <stdio.h>
6#include <stdlib.h>
7#include <string.h>
8
9#ifndef STATIC_HEAP
10#include <sys/mman.h>
11#endif
12
13#define ALWAYS_INLINE inline __attribute__((always_inline))
14#define NEVER_INLINE __attribute__((noinline))
15
16const int kPointerSize = sizeof(void*);
17typedef intptr_t word;
18typedef uintptr_t uword;
19typedef unsigned char byte;
20
21// Garbage collector core by Andy Wingo <wingo@pobox.com>.
22
23struct gc_obj {
24 uintptr_t tag; // low bit is 0 if forwarding ptr
25};
26
27// The low bit of the pointer is 1 if it's a heap object and 0 if it's an
28// immediate integer
29struct object;
30
31bool is_small_int(struct object* obj) {
32 return (((uword)obj) & kSmallIntTagMask) == kSmallIntTag;
33}
34bool is_immediate_not_small_int(struct object* obj) {
35 return (((uword)obj) & (kPrimaryTagMask & ~kSmallIntTagMask)) != 0;
36}
37bool is_heap_object(struct object* obj) {
38 return (((uword)obj) & kPrimaryTagMask) == kHeapObjectTag;
39}
40#define empty_list() ((struct object*)kEmptyListTag)
41bool is_empty_list(struct object* obj) { return obj == empty_list(); }
42#define hole() ((struct object*)kHoleTag)
43bool is_hole(struct object* obj) { return (uword)obj == kHoleTag; }
44static ALWAYS_INLINE bool is_small_string(struct object* obj) {
45 return (((uword)obj) & kImmediateTagMask) == kSmallStringTag;
46}
47#define mk_immediate_variant(tag) \
48 (struct object*)(((uword)(tag) << kImmediateTagBits) | kVariantTag)
49static ALWAYS_INLINE bool is_immediate_variant(struct object* obj) {
50 return ((uword)obj & kImmediateTagMask) == kVariantTag;
51}
52static uword immediate_variant_tag(struct object* obj) {
53 assert(is_immediate_variant(obj));
54 return ((uword)obj) >> kImmediateTagBits;
55}
56static ALWAYS_INLINE uword small_string_length(struct object* obj) {
57 assert(is_small_string(obj));
58 return (((uword)obj) >> kImmediateTagBits) & kMaxSmallStringLength;
59}
60static ALWAYS_INLINE struct object* mksmallstring(const char* data,
61 uword length) {
62 assert(length <= kMaxSmallStringLength);
63 uword result = 0;
64 for (word i = length - 1; i >= 0; i--) {
65 result = (result << kBitsPerByte) | data[i];
66 }
67 struct object* result_obj =
68 (struct object*)((result << kBitsPerByte) |
69 (length << kImmediateTagBits) | kSmallStringTag);
70 assert(!is_heap_object(result_obj));
71 assert(is_small_string(result_obj));
72 assert(small_string_length(result_obj) == length);
73 return result_obj;
74}
75struct object* empty_string() { return (struct object*)kSmallStringTag; }
76bool is_empty_string(struct object* obj) { return obj == empty_string(); }
77static ALWAYS_INLINE char small_string_at(struct object* obj, uword index) {
78 assert(is_small_string(obj));
79 assert(index < small_string_length(obj));
80 // +1 for (length | tag) byte
81 return ((uword)obj >> ((index + 1) * kBitsPerByte)) & 0xFF;
82}
83static ALWAYS_INLINE struct gc_obj* as_heap_object(struct object* obj) {
84 assert(is_heap_object(obj));
85 assert(kHeapObjectTag == 1);
86 return (struct gc_obj*)((uword)obj - 1);
87}
88
89static const uintptr_t kNotForwardedBit = 1ULL;
90int is_forwarded(struct gc_obj* obj) {
91 return (obj->tag & kNotForwardedBit) == 0;
92}
93struct gc_obj* forwarded(struct gc_obj* obj) {
94 assert(is_forwarded(obj));
95 return (struct gc_obj*)obj->tag;
96}
97void forward(struct gc_obj* from, struct gc_obj* to) {
98 assert(!is_forwarded(from));
99 assert((((uintptr_t)to) & kNotForwardedBit) == 0);
100 from->tag = (uintptr_t)to;
101}
102
103struct gc_heap;
104
105typedef void (*VisitFn)(struct object**, struct gc_heap*);
106
107// To implement by the user:
108size_t heap_object_size(struct gc_obj* obj);
109void trace_heap_object(struct gc_obj* obj, struct gc_heap* heap, VisitFn visit);
110void trace_roots(struct gc_heap* heap, VisitFn visit);
111
112struct space {
113 uintptr_t start;
114 uintptr_t size;
115};
116
117struct gc_heap {
118 uintptr_t hp;
119 uintptr_t limit;
120 uintptr_t from_space;
121 uintptr_t to_space;
122 uintptr_t base;
123 struct space space;
124};
125
126static ALWAYS_INLINE uintptr_t align(uintptr_t val, uintptr_t alignment) {
127 return (val + alignment - 1) & ~(alignment - 1);
128}
129static ALWAYS_INLINE uintptr_t align_size(uintptr_t size) {
130 return align(size, kObjectAlignment);
131}
132static ALWAYS_INLINE bool is_size_aligned(uword size) {
133 return size == align_size(size);
134}
135
136#ifdef STATIC_HEAP
137struct space make_space(void* mem, uintptr_t size) {
138 return (struct space){(uintptr_t)mem, size};
139}
140void destroy_space(struct space space) {}
141#else
142struct space make_space(uintptr_t size) {
143 size = align(size, kPageSize);
144 void* mem = mmap(NULL, size, PROT_READ | PROT_WRITE,
145 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
146 if (mem == MAP_FAILED) {
147 fprintf(stderr, "mmap failed\n");
148 abort();
149 }
150 return (struct space){(uintptr_t)mem, size};
151}
152void destroy_space(struct space space) {
153 munmap((void*)space.start, space.size);
154}
155#endif
156
157void init_heap(struct gc_heap* heap, struct space space) {
158 if (align(space.size, kPageSize) != space.size) {
159 fprintf(stderr, "heap size (%lu) must be a multiple of %lu\n", space.size,
160 kPageSize);
161 abort();
162 }
163 heap->space = space;
164 heap->base = heap->to_space = heap->hp = space.start;
165 heap->from_space = heap->limit = heap->hp + space.size / 2;
166}
167
168static ALWAYS_INLINE uintptr_t heap_ptr(struct gc_heap* heap) {
169#if defined(NDEBUG) && defined(__GNUC__)
170 // Clang and GCC support this; TCC does not
171 return (uintptr_t)__builtin_assume_aligned((void*)heap->hp, kObjectAlignment);
172#else
173 assert(is_size_aligned(heap->hp) && "need 3 bits for tagging");
174 return heap->hp;
175#endif
176}
177
178struct gc_obj* copy(struct gc_heap* heap, struct gc_obj* obj) {
179 size_t size = heap_object_size(obj);
180 struct gc_obj* new_obj = (struct gc_obj*)heap_ptr(heap);
181 memcpy(new_obj, obj, size);
182 forward(obj, new_obj);
183 heap->hp += size;
184 assert(is_size_aligned(heap->hp) && "need 3 bits for tagging");
185 return new_obj;
186}
187
188void flip(struct gc_heap* heap) {
189 heap->base = heap->hp = heap->from_space;
190 heap->from_space = heap->to_space;
191 heap->to_space = heap->hp;
192 heap->limit = heap->hp + heap->space.size / 2;
193}
194
195struct object* heap_tag(uintptr_t addr) {
196 return (struct object*)(addr | (uword)1ULL);
197}
198
199#ifdef __TINYC__
200// libc defines __attribute__ as an empty macro if the compiler is not GCC or
201// GCC < 2. We know tcc has supported __attribute__(section(...)) for 20+ years
202// so we can undefine it.
203// See tinycc-devel:
204// https://lists.nongnu.org/archive/html/tinycc-devel/2018-04/msg00008.html and
205// my StackOverflow question: https://stackoverflow.com/q/78638571/569183
206#undef __attribute__
207#endif
208
209extern char __start_const_heap[]
210#ifdef __APPLE__
211__asm("section$start$__DATA$const_heap")
212#endif
213;
214extern char __stop_const_heap[]
215#ifdef __APPLE__
216__asm("section$end$__DATA$const_heap")
217#endif
218;
219
220bool in_const_heap(struct gc_obj* obj) {
221 return (uword)obj >= (uword)__start_const_heap &&
222 (uword)obj < (uword)__stop_const_heap;
223}
224
225void visit_field(struct object** pointer, struct gc_heap* heap) {
226 if (!is_heap_object(*pointer)) {
227 return;
228 }
229 struct gc_obj* from = as_heap_object(*pointer);
230 if (in_const_heap(from)) {
231 return;
232 }
233 struct gc_obj* to = is_forwarded(from) ? forwarded(from) : copy(heap, from);
234 *pointer = heap_tag((uintptr_t)to);
235}
236
237static bool in_heap(struct gc_heap* heap, struct gc_obj* obj) {
238 return (uword)obj >= heap->base && (uword)obj < heap->hp;
239}
240
241void assert_in_heap(struct object** pointer, struct gc_heap* heap) {
242 if (!is_heap_object(*pointer)) {
243 return;
244 }
245 struct gc_obj* obj = as_heap_object(*pointer);
246 if (in_const_heap(obj)) {
247 return;
248 }
249 if (!in_heap(heap, obj)) {
250 fprintf(stderr, "pointer %p not in heap [%p, %p)\n", obj,
251 (void*)heap->to_space, (void*)heap->hp);
252 abort();
253 }
254}
255
256static NEVER_INLINE void heap_verify(struct gc_heap* heap) {
257 assert(heap->base <= heap->hp);
258 trace_roots(heap, assert_in_heap);
259 uintptr_t scan = heap->base;
260 while (scan < heap->hp) {
261 struct gc_obj* obj = (struct gc_obj*)scan;
262 size_t size = heap_object_size(obj);
263 uword end = scan + size;
264 assert(is_size_aligned(end));
265 trace_heap_object(obj, heap, assert_in_heap);
266 scan = end;
267 }
268}
269
270void collect_no_verify(struct gc_heap* heap) {
271 flip(heap);
272 uintptr_t scan = heap->hp;
273 trace_roots(heap, visit_field);
274 while (scan < heap->hp) {
275 struct gc_obj* obj = (struct gc_obj*)scan;
276 size_t size = heap_object_size(obj);
277 uword end = scan + size;
278 assert(is_size_aligned(end));
279 trace_heap_object(obj, heap, visit_field);
280 scan = end;
281 }
282 // TODO(max): If we have < 25% heap utilization, shrink the heap
283#ifndef NDEBUG
284 // Zero out the rest of the heap for debugging
285 memset((void*)scan, 0, heap->limit - scan);
286#endif
287}
288
289void collect(struct gc_heap* heap) {
290#ifndef NDEBUG
291 heap_verify(heap);
292#endif
293 collect_no_verify(heap);
294#ifndef NDEBUG
295 heap_verify(heap);
296#endif
297}
298
299#if defined(__builtin_expect)
300#define LIKELY(x) __builtin_expect(!!(x), 1)
301#define UNLIKELY(x) __builtin_expect(!!(x), 0)
302#else
303#define LIKELY(x) x
304#define UNLIKELY(x) x
305#endif
306#define ALLOCATOR __attribute__((__malloc__))
307
308#ifndef STATIC_HEAP
309static NEVER_INLINE void heap_grow(struct gc_heap* heap) {
310 struct space old_space = heap->space;
311 struct space new_space = make_space(old_space.size * 2);
312#ifndef NDEBUG
313 heap_verify(heap);
314#endif
315 init_heap(heap, new_space);
316 collect_no_verify(heap);
317#ifndef NDEBUG
318 heap_verify(heap);
319#endif
320 destroy_space(old_space);
321}
322#endif
323
324uword make_tag(uword tag, uword size_bytes) {
325 assert(size_bytes <= 0xffffffff);
326 return (size_bytes << kBitsPerByte) | tag;
327}
328
329byte obj_tag(struct gc_obj* obj) { return (obj->tag & 0xff); }
330
331bool obj_has_tag(struct gc_obj* obj, byte tag) { return obj_tag(obj) == tag; }
332
333static NEVER_INLINE ALLOCATOR struct object* allocate_slow_path(
334 struct gc_heap* heap, uword tag, uword size) {
335 // Outlining allocate_slow_path like this helps the compiler generate better
336 // code in callers of allocate such as mklist. For some reason we have to
337 // tail-duplicate allocate, too :(
338#ifndef STATIC_HEAP
339 heap_grow(heap);
340#endif
341 assert(is_size_aligned(size) && "need 3 bits for tagging");
342 if (UNLIKELY(heap->limit - heap->hp < size)) {
343 fprintf(stderr, "out of memory\n");
344 abort();
345 }
346 // NOTE: Keep in sync with allocate
347 uintptr_t addr = heap_ptr(heap);
348 uintptr_t new_hp = addr + size;
349 assert(is_size_aligned(new_hp) && "need 3 bits for tagging");
350 heap->hp = new_hp;
351 ((struct gc_obj*)addr)->tag = make_tag(tag, size);
352 return heap_tag(addr);
353}
354
355static ALWAYS_INLINE ALLOCATOR struct object* allocate(struct gc_heap* heap,
356 uword tag, uword size) {
357 assert(is_size_aligned(size) && "need 3 bits for tagging");
358 // NOTE: Keep in sync with allocate_slow_path
359 uintptr_t addr = heap_ptr(heap);
360 uintptr_t new_hp = addr + size;
361 assert(is_size_aligned(new_hp) && "need 3 bits for tagging");
362 if (UNLIKELY(heap->limit < new_hp)) {
363 return allocate_slow_path(heap, tag, size);
364 }
365 // NOTE: Keep in sync with allocate_slow_path
366 heap->hp = new_hp;
367 ((struct gc_obj*)addr)->tag = make_tag(tag, size);
368 return heap_tag(addr);
369}
370
371// Application
372
373#define FOREACH_TAG(TAG) \
374 TAG(TAG_LIST) \
375 TAG(TAG_CLOSURE) \
376 TAG(TAG_RECORD) \
377 TAG(TAG_STRING) \
378 TAG(TAG_VARIANT)
379
380enum {
381// All odd becase of the kNotForwardedBit
382#define ENUM_TAG(TAG) TAG = __COUNTER__ * 2 + 1,
383 FOREACH_TAG(ENUM_TAG)
384#undef ENUM_TAG
385};
386
387#define HEAP_ALIGNED __attribute__((__aligned__(kObjectAlignment)))
388
389struct list {
390 struct gc_obj HEAD;
391 struct object* first;
392 struct object* rest;
393} HEAP_ALIGNED;
394
395typedef struct object* (*ClosureFn)(struct object*, struct object*);
396
397// TODO(max): Figure out if there is a way to do a PyObject_HEAD version of
398// this where each closure actually has its own struct with named members
399struct closure {
400 struct gc_obj HEAD;
401 ClosureFn fn;
402 size_t size;
403 struct object* env[];
404}; // Not HEAP_ALIGNED; env is variable size
405
406struct record_field {
407 size_t key;
408 struct object* value;
409};
410
411struct record {
412 struct gc_obj HEAD;
413 size_t size;
414 struct record_field fields[];
415}; // Not HEAP_ALIGNED; fields is variable size
416
417struct heap_string {
418 struct gc_obj HEAD;
419 size_t size;
420 char data[];
421}; // Not HEAP_ALIGNED; data is variable size
422
423struct variant {
424 struct gc_obj HEAD;
425 size_t tag;
426 struct object* value;
427} HEAP_ALIGNED;
428
429size_t heap_object_size(struct gc_obj* obj) {
430 size_t result = obj->tag >> kBitsPerByte;
431 assert(is_size_aligned(result));
432 return result;
433}
434
435void trace_heap_object(struct gc_obj* obj, struct gc_heap* heap,
436 VisitFn visit) {
437 switch (obj_tag(obj)) {
438 case TAG_LIST:
439 visit(&((struct list*)obj)->first, heap);
440 visit(&((struct list*)obj)->rest, heap);
441 break;
442 case TAG_CLOSURE:
443 for (size_t i = 0; i < ((struct closure*)obj)->size; i++) {
444 visit(&((struct closure*)obj)->env[i], heap);
445 }
446 break;
447 case TAG_RECORD:
448 for (size_t i = 0; i < ((struct record*)obj)->size; i++) {
449 visit(&((struct record*)obj)->fields[i].value, heap);
450 }
451 break;
452 case TAG_STRING:
453 break;
454 case TAG_VARIANT:
455 visit(&((struct variant*)obj)->value, heap);
456 break;
457 default:
458 fprintf(stderr, "unknown tag: %u\n", obj_tag(obj));
459 abort();
460 }
461}
462
463bool smallint_is_valid(word value) {
464 return (value >= kSmallIntMinValue) && (value <= kSmallIntMaxValue);
465}
466
467#define _mksmallint(value) \
468 (struct object*)(((uword)(value) << kSmallIntTagBits) | kSmallIntTag)
469
470struct object* mksmallint(word value) {
471 assert(smallint_is_valid(value));
472 return _mksmallint(value);
473}
474
475struct object* mknum(struct gc_heap* heap, word value) {
476 (void)heap;
477 return mksmallint(value);
478}
479
480bool is_num(struct object* obj) { return is_small_int(obj); }
481
482bool is_num_equal_word(struct object* obj, word value) {
483 assert(smallint_is_valid(value));
484 return obj == mksmallint(value);
485}
486
487word num_value(struct object* obj) {
488 assert(is_num(obj));
489 return ((word)obj) >> 1; // sign extend
490}
491
492bool is_list(struct object* obj) {
493 if (is_empty_list(obj)) {
494 return true;
495 }
496 return is_heap_object(obj) && obj_has_tag(as_heap_object(obj), TAG_LIST);
497}
498
499struct list* as_list(struct object* obj) {
500 assert(is_list(obj));
501 return (struct list*)as_heap_object(obj);
502}
503
504struct object* list_first(struct object* obj) {
505 assert(!is_empty_list(obj));
506 return as_list(obj)->first;
507}
508
509struct object* list_rest(struct object* list) {
510 assert(!is_empty_list(list));
511 return as_list(list)->rest;
512}
513
514struct object* mklist(struct gc_heap* heap) {
515 struct object* result = allocate(heap, TAG_LIST, sizeof(struct list));
516 as_list(result)->first = empty_list();
517 as_list(result)->rest = empty_list();
518 return result;
519}
520
521bool is_closure(struct object* obj) {
522 return is_heap_object(obj) && obj_has_tag(as_heap_object(obj), TAG_CLOSURE);
523}
524
525struct closure* as_closure(struct object* obj) {
526 assert(is_closure(obj));
527 return (struct closure*)as_heap_object(obj);
528}
529
530struct object* mkclosure(struct gc_heap* heap, ClosureFn fn,
531 size_t num_fields) {
532 uword size = sizeof(struct closure) + num_fields * kPointerSize;
533 assert(is_size_aligned(size));
534 struct object* result = allocate(heap, TAG_CLOSURE, size);
535 as_closure(result)->fn = fn;
536 as_closure(result)->size = num_fields;
537 // Assumes the items will be filled in immediately after calling mkclosure so
538 // they are not initialized
539 return result;
540}
541
542ClosureFn closure_fn(struct object* obj) { return as_closure(obj)->fn; }
543
544void closure_set(struct object* closure, size_t i, struct object* item) {
545 struct closure* c = as_closure(closure);
546 assert(i < c->size);
547 c->env[i] = item;
548}
549
550struct object* closure_get(struct object* closure, size_t i) {
551 struct closure* c = as_closure(closure);
552 assert(i < c->size);
553 return c->env[i];
554}
555
556struct object* closure_call(struct object* closure, struct object* arg) {
557 ClosureFn fn = closure_fn(closure);
558 return fn(closure, arg);
559}
560
561bool is_record(struct object* obj) {
562 return is_heap_object(obj) && obj_has_tag(as_heap_object(obj), TAG_RECORD);
563}
564
565struct record* as_record(struct object* obj) {
566 assert(is_record(obj));
567 return (struct record*)as_heap_object(obj);
568}
569
570struct object* mkrecord(struct gc_heap* heap, size_t num_fields) {
571 uword size = sizeof(struct record) + num_fields * sizeof(struct record_field);
572 assert(is_size_aligned(size));
573 struct object* result = allocate(heap, TAG_RECORD, size);
574 as_record(result)->size = num_fields;
575 // Assumes the items will be filled in immediately after calling mkrecord so
576 // they are not initialized
577 return result;
578}
579
580size_t record_num_fields(struct object* record) {
581 return as_record(record)->size;
582}
583
584void record_set(struct object* record, size_t index,
585 struct record_field field) {
586 struct record* r = as_record(record);
587 assert(index < r->size);
588 r->fields[index] = field;
589}
590
591struct object* record_get(struct object* record, size_t key) {
592 struct record* r = as_record(record);
593 struct record_field* fields = r->fields;
594 for (size_t i = 0; i < r->size; i++) {
595 struct record_field field = fields[i];
596 if (field.key == key) {
597 return field.value;
598 }
599 }
600 return NULL;
601}
602
603bool is_string(struct object* obj) {
604 if (is_small_string(obj)) {
605 return true;
606 }
607 return is_heap_object(obj) && obj_has_tag(as_heap_object(obj), TAG_STRING);
608}
609
610struct heap_string* as_heap_string(struct object* obj) {
611 assert(is_string(obj));
612 return (struct heap_string*)as_heap_object(obj);
613}
614
615struct object* mkstring_uninit_private(struct gc_heap* heap, size_t count) {
616 assert(count > kMaxSmallStringLength); // can't fill in small string later
617 uword size = align_size(sizeof(struct heap_string) + count);
618 struct object* result = allocate(heap, TAG_STRING, size);
619 as_heap_string(result)->size = count;
620 return result;
621}
622
623struct object* mkstring(struct gc_heap* heap, const char* data, uword length) {
624 if (length <= kMaxSmallStringLength) {
625 return mksmallstring(data, length);
626 }
627 struct object* result = mkstring_uninit_private(heap, length);
628 memcpy(as_heap_string(result)->data, data, length);
629 return result;
630}
631
632static ALWAYS_INLINE uword string_length(struct object* obj) {
633 if (is_small_string(obj)) {
634 return small_string_length(obj);
635 }
636 return as_heap_string(obj)->size;
637}
638
639char string_at(struct object* obj, uword index) {
640 if (is_small_string(obj)) {
641 return small_string_at(obj, index);
642 }
643 return as_heap_string(obj)->data[index];
644}
645
646bool is_variant(struct object* obj) {
647 if (is_immediate_variant(obj)) {
648 return true;
649 }
650 return is_heap_object(obj) && obj_has_tag(as_heap_object(obj), TAG_VARIANT);
651}
652
653struct variant* as_variant(struct object* obj) {
654 assert(is_variant(obj));
655 assert(is_heap_object(obj)); // This only makes sense for heap variants.
656 return (struct variant*)as_heap_object(obj);
657}
658
659struct object* mkvariant(struct gc_heap* heap, size_t tag) {
660 struct object* result = allocate(heap, TAG_VARIANT, sizeof(struct variant));
661 as_variant(result)->tag = tag;
662 return result;
663}
664
665size_t variant_tag(struct object* obj) {
666 if (is_immediate_variant(obj)) {
667 return immediate_variant_tag(obj);
668 }
669 return as_variant(obj)->tag;
670}
671
672struct object* variant_value(struct object* obj) {
673 if (is_immediate_variant(obj)) {
674 return hole();
675 }
676 return as_variant(obj)->value;
677}
678
679void variant_set(struct object* variant, struct object* value) {
680 as_variant(variant)->value = value;
681}
682
683struct handle_scope {
684 struct object*** base;
685};
686
687#ifndef HANDLE_STACK_SIZE
688#define HANDLE_STACK_SIZE 4096
689#endif
690
691static struct object** handle_stack[HANDLE_STACK_SIZE];
692static struct object*** handles = handle_stack;
693#ifndef NDEBUG
694// Only used to check for handle stack overflow.
695static struct object*** handles_end = &handle_stack[HANDLE_STACK_SIZE];
696#endif
697
698void pop_handles(void* local_handles) {
699 handles = ((struct handle_scope*)local_handles)->base;
700}
701
702#define HANDLES() \
703 struct handle_scope local_handles __attribute__((__cleanup__(pop_handles))); \
704 local_handles.base = handles;
705#define GC_PROTECT(x) \
706 assert(handles != handles_end); \
707 (*handles++) = (struct object**)(&x)
708#define GC_HANDLE(type, name, val) \
709 type name = val; \
710 GC_PROTECT(name)
711
712void trace_roots(struct gc_heap* heap, VisitFn visit) {
713 for (struct object*** h = handle_stack; h != handles; h++) {
714 visit(*h, heap);
715 }
716}
717
718struct gc_heap heap_object;
719struct gc_heap* heap = &heap_object;
720
721struct object* num_add(struct object* a, struct object* b) {
722 // NB: doesn't use pointers after allocating
723 return mknum(heap, num_value(a) + num_value(b));
724}
725
726struct object* num_sub(struct object* a, struct object* b) {
727 // NB: doesn't use pointers after allocating
728 return mknum(heap, num_value(a) - num_value(b));
729}
730
731struct object* num_mul(struct object* a, struct object* b) {
732 // NB: doesn't use pointers after allocating
733 return mknum(heap, num_value(a) * num_value(b));
734}
735
736struct object* list_cons(struct object* item, struct object* list) {
737 HANDLES();
738 GC_PROTECT(item);
739 GC_PROTECT(list);
740 struct object* result = mklist(heap);
741 as_list(result)->first = item;
742 as_list(result)->rest = list;
743 return result;
744}
745
746struct object* heap_string_concat(struct object* a, struct object* b) {
747 uword a_size = string_length(a);
748 uword b_size = string_length(b);
749 assert(a_size + b_size > kMaxSmallStringLength);
750 HANDLES();
751 GC_PROTECT(a);
752 GC_PROTECT(b);
753 struct object* result = mkstring_uninit_private(heap, a_size + b_size);
754 for (uword i = 0; i < a_size; i++) {
755 as_heap_string(result)->data[i] = string_at(a, i);
756 }
757 for (uword i = 0; i < b_size; i++) {
758 as_heap_string(result)->data[a_size + i] = string_at(b, i);
759 }
760 return result;
761}
762
763static ALWAYS_INLINE struct object* small_string_concat(struct object* a_obj,
764 struct object* b_obj) {
765 // a: CBAT
766 // b: FEDT
767 // result: FEDCBAT
768 assert(is_small_string(a_obj));
769 assert(is_small_string(b_obj));
770 uword length = small_string_length(a_obj) + small_string_length(b_obj);
771 assert(length <= kMaxSmallStringLength);
772 uword result = ((uword)b_obj) & ~(uword)0xFFULL;
773 result <<= small_string_length(a_obj) * kBitsPerByte;
774 result |= ((uword)a_obj) & ~(uword)0xFFULL;
775 result |= length << kImmediateTagBits;
776 result |= kSmallStringTag;
777 struct object* result_obj = (struct object*)result;
778 assert(!is_heap_object(result_obj));
779 assert(is_small_string(result_obj));
780 return result_obj;
781}
782
783ALWAYS_INLINE static struct object* string_concat(struct object* a,
784 struct object* b) {
785 if (is_empty_string(a)) {
786 return b;
787 }
788 if (is_empty_string(b)) {
789 return a;
790 }
791 uword a_size = string_length(a);
792 uword b_size = string_length(b);
793 if (a_size + b_size <= kMaxSmallStringLength) {
794 return small_string_concat(a, b);
795 }
796 return heap_string_concat(a, b);
797}
798
799bool string_equal_cstr_len(struct object* string, const char* cstr, uword len) {
800 assert(is_string(string));
801 if (string_length(string) != len) {
802 return false;
803 }
804 for (uword i = 0; i < len; i++) {
805 if (string_at(string, i) != cstr[i]) {
806 return false;
807 }
808 }
809 return true;
810}
811
812extern const char* record_keys[];
813extern const char* variant_names[];
814
815struct object* print(struct object* obj) {
816 if (is_num(obj)) {
817 printf("%ld", num_value(obj));
818 } else if (is_list(obj)) {
819 putchar('[');
820 while (!is_empty_list(obj)) {
821 print(list_first(obj));
822 obj = list_rest(obj);
823 if (!is_empty_list(obj)) {
824 putchar(',');
825 putchar(' ');
826 }
827 }
828 putchar(']');
829 } else if (is_record(obj)) {
830 struct record* record = as_record(obj);
831 putchar('{');
832 for (size_t i = 0; i < record->size; i++) {
833 printf("%s = ", record_keys[record->fields[i].key]);
834 print(record->fields[i].value);
835 if (i + 1 < record->size) {
836 fputs(", ", stdout);
837 }
838 }
839 putchar('}');
840 } else if (is_closure(obj)) {
841 fputs("<closure>", stdout);
842 } else if (is_string(obj)) {
843 putchar('"');
844 for (uword i = 0; i < string_length(obj); i++) {
845 putchar(string_at(obj, i));
846 }
847 putchar('"');
848 } else if (is_variant(obj)) {
849 putchar('#');
850 printf("%s ", variant_names[variant_tag(obj)]);
851 print(variant_value(obj));
852 } else if (is_hole(obj)) {
853 fputs("()", stdout);
854 } else {
855 assert(is_heap_object(obj));
856 fprintf(stderr, "unknown tag: %u\n", obj_tag(as_heap_object(obj)));
857 abort();
858 }
859 return obj;
860}
861
862struct object* println(struct object* obj) {
863 print(obj);
864 putchar('\n');
865 return obj;
866}
867
868#ifndef MEMORY_SIZE
869#define MEMORY_SIZE 4096
870#endif
871
872// Put something in the const heap so that __start_const_heap and
873// __stop_const_heap are defined by the linker.
874#ifdef __APPLE__
875#define CONST_HEAP const __attribute__((section("__DATA,const_heap")))
876#else
877#define CONST_HEAP const __attribute__((section("const_heap")))
878#endif
879CONST_HEAP
880__attribute__((used)) struct heap_string private_unused_const_heap = {
881 .HEAD.tag = TAG_STRING, .size = 11, .data = "hello world"};