region-based memory management in a c-like form
1use std::collections::HashMap;
2use std::path::Path;
3
4use frontend::mir::AllocKind;
5use frontend::{BinOp, Ctx as FrontendCtx, mir};
6
7use anyhow::Result;
8use inkwell::basic_block::BasicBlock;
9use inkwell::builder::Builder;
10use inkwell::context::{Context, ContextRef};
11use inkwell::module::{Linkage, Module};
12use inkwell::targets::{
13 CodeModel, FileType, InitializationConfig, RelocMode, Target, TargetMachine,
14};
15use inkwell::types::{BasicMetadataTypeEnum, BasicType, BasicTypeEnum, FunctionType};
16use inkwell::values::{BasicValue, BasicValueEnum, FunctionValue, PointerValue};
17use inkwell::{AddressSpace, IntPredicate, OptimizationLevel};
18
19use crate::Compiler;
20
21struct FunctionLocalInfo<'ctx> {
22 ty: BasicTypeEnum<'ctx>,
23 tk: mir::Ty,
24 alloc: PointerValue<'ctx>,
25 defining_block: BasicBlock<'ctx>,
26}
27
28impl<'ctx> FunctionLocalInfo<'ctx> {
29 fn new(
30 ty: BasicTypeEnum<'ctx>,
31 tk: mir::Ty,
32 alloc: PointerValue<'ctx>,
33 defining_block: BasicBlock<'ctx>,
34 ) -> Self {
35 Self {
36 ty,
37 tk,
38 alloc,
39 defining_block,
40 }
41 }
42}
43
44pub struct Llvm<'ctx> {
45 module: Module<'ctx>,
46 builder: Builder<'ctx>,
47 function_locals: HashMap<mir::LocalId, FunctionLocalInfo<'ctx>>,
48 function_blocks: HashMap<mir::BlockId, BasicBlock<'ctx>>,
49 target_machine: TargetMachine,
50}
51
52impl<'ctx> Llvm<'ctx> {
53 fn new(ctx: &'ctx Context) -> Self {
54 let module = ctx.create_module("mnemo");
55 let builder = ctx.create_builder();
56
57 Target::initialize_native(&InitializationConfig::default()).unwrap();
58
59 let triple = TargetMachine::get_default_triple();
60 let target = Target::from_triple(&triple).unwrap();
61
62 let target_machine = target
63 .create_target_machine(
64 &triple,
65 "generic",
66 "",
67 OptimizationLevel::None,
68 RelocMode::Default,
69 CodeModel::Default,
70 )
71 .expect("to create a target machine");
72
73 Self {
74 module,
75 builder,
76 target_machine,
77 function_locals: HashMap::new(),
78 function_blocks: HashMap::new(),
79 }
80 }
81
82 fn ctx(&self) -> ContextRef<'ctx> {
83 self.module.get_context()
84 }
85
86 fn basic_type_to_llvm_basic_type(ctx: ContextRef<'ctx>, ty: &mir::Ty) -> BasicTypeEnum<'ctx> {
87 match ty {
88 mir::Ty::Int => ctx.i32_type().as_basic_type_enum(),
89 mir::Ty::Bool => ctx.bool_type().as_basic_type_enum(),
90 mir::Ty::Char => ctx.i8_type().as_basic_type_enum(),
91 mir::Ty::Unit => panic!("Unit type not supported as a basic type"),
92 mir::Ty::Array(ty, len) => {
93 let ty = Self::basic_type_to_llvm_basic_type(ctx, ty);
94 ty.array_type(*len as u32).as_basic_type_enum()
95 }
96 mir::Ty::Ptr(_, _) => ctx.ptr_type(AddressSpace::default()).as_basic_type_enum(),
97 mir::Ty::Record(tys) => {
98 let field_tys: Vec<BasicTypeEnum<'ctx>> = tys
99 .iter()
100 .map(|t| Self::basic_type_to_llvm_basic_type(ctx, t))
101 .collect();
102
103 ctx.struct_type(&field_tys, false).as_basic_type_enum()
104 }
105 mir::Ty::TaggedUnion(_) => {
106 let union_size = ty.bytes();
107
108 let tag_ty = ctx.i8_type().as_basic_type_enum();
109 let payload_ty = ctx
110 .i8_type()
111 .array_type(union_size as u32)
112 .as_basic_type_enum();
113
114 ctx.struct_type(&[tag_ty, payload_ty], false)
115 .as_basic_type_enum()
116 }
117 mir::Ty::Tuple(tys) => {
118 let field_tys: Vec<BasicTypeEnum<'ctx>> = tys
119 .iter()
120 .map(|t| Self::basic_type_to_llvm_basic_type(ctx, t))
121 .collect();
122
123 ctx.struct_type(&field_tys, false).as_basic_type_enum()
124 }
125 mir::Ty::Str => ctx.ptr_type(AddressSpace::default()).as_basic_type_enum(),
126 mir::Ty::Variadic => unreachable!(),
127 ty => panic!("unimplemented type {:?}", ty),
128 }
129 }
130
131 fn basic_type_to_llvm_basic_metadata_type(
132 ctx: ContextRef<'ctx>,
133 ty: &mir::Ty,
134 ) -> BasicMetadataTypeEnum<'ctx> {
135 Self::basic_type_to_llvm_basic_type(ctx, ty).into()
136 }
137
138 fn type_to_fn_type(
139 ctx: ContextRef<'ctx>,
140 ty: &mir::Ty,
141 param_types: Vec<BasicMetadataTypeEnum<'ctx>>,
142 is_variadic: bool,
143 ) -> FunctionType<'ctx> {
144 match ty {
145 mir::Ty::Int => ctx.i32_type().fn_type(¶m_types, is_variadic),
146 mir::Ty::Bool => ctx.bool_type().fn_type(¶m_types, is_variadic),
147 mir::Ty::Char => ctx.i8_type().fn_type(¶m_types, is_variadic),
148 mir::Ty::Unit => ctx.void_type().fn_type(¶m_types, is_variadic),
149 mir::Ty::Ptr(_, _) => ctx
150 .ptr_type(AddressSpace::default())
151 .fn_type(¶m_types, is_variadic),
152
153 mir::Ty::Array(ty, len) => {
154 let ty = Self::basic_type_to_llvm_basic_type(ctx, ty);
155 ty.array_type(*len as u32)
156 .fn_type(¶m_types, is_variadic)
157 }
158 _ => todo!(),
159 }
160 }
161
162 fn get_or_create_bb(
163 &mut self,
164 llvm_fn: &FunctionValue<'ctx>,
165 block_id: mir::BlockId,
166 ) -> BasicBlock<'ctx> {
167 if let Some(bb) = self.function_blocks.get(&block_id) {
168 return *bb;
169 }
170
171 let bb = self
172 .ctx()
173 .append_basic_block(*llvm_fn, format!("bb{}", block_id).as_str());
174
175 self.function_blocks.insert(block_id, bb);
176 bb
177 }
178
179 // returns ptr and ptr type
180 fn place_ptr(&self, place: &mir::Place) -> Result<(PointerValue<'ctx>, BasicTypeEnum<'ctx>)> {
181 let local = self.function_locals.get(&place.local).unwrap();
182 let (ptr, ty) = match &place.kind {
183 mir::PlaceKind::Deref => (local.alloc, local.ty),
184 mir::PlaceKind::Field(idx, ty) => {
185 let field_alloca = self.builder.build_struct_gep(
186 local.ty,
187 local.alloc,
188 *idx as u32,
189 "field_alloca",
190 )?;
191
192 let load_ty = Self::basic_type_to_llvm_basic_type(self.ctx(), ty);
193
194 (field_alloca, load_ty)
195 }
196 mir::PlaceKind::Index(local_idx) => {
197 let pointee_ty = local.ty.into_array_type().get_element_type();
198
199 let index_local = self.function_locals.get(local_idx).unwrap();
200 let index_val = self
201 .builder
202 .build_load(index_local.ty, index_local.alloc, "index_load")?
203 .into_int_value();
204 let zero = self.ctx().i32_type().const_zero();
205
206 let gep = unsafe {
207 self.builder.build_in_bounds_gep(
208 local.ty,
209 local.alloc,
210 &[zero, index_val],
211 "arr_index_gep",
212 )
213 }?;
214
215 (gep, pointee_ty)
216 }
217 };
218
219 Ok((ptr, ty))
220 }
221
222 fn load_place(&self, place: &mir::Place) -> Result<BasicValueEnum<'ctx>> {
223 let (ptr, ty) = self.place_ptr(place)?;
224 let load = self.builder.build_load(ty, ptr, "load")?;
225 Ok(load.as_basic_value_enum())
226 }
227
228 fn compile_operand(&self, operand: &mir::Operand) -> Result<BasicValueEnum<'ctx>> {
229 let value = match operand {
230 mir::Operand::Constant(constant) => match constant {
231 mir::Constant::Int(i) => self
232 .ctx()
233 .i32_type()
234 .const_int(*i as u64, false)
235 .as_basic_value_enum(),
236 mir::Constant::Bool(b) => self
237 .ctx()
238 .bool_type()
239 .const_int(*b as u64, false)
240 .as_basic_value_enum(),
241 },
242
243 // copies are trivial loads
244 mir::Operand::Copy(place) => self.load_place(place)?,
245 };
246
247 Ok(value)
248 }
249
250 fn compile_rvalue(
251 &self,
252 rvalue: &mir::RValue,
253 _destination: Option<PointerValue<'ctx>>,
254 _llvm_fn: &FunctionValue<'ctx>,
255 _ctx: &FrontendCtx,
256 ) -> Result<BasicValueEnum<'ctx>> {
257 let int_val = match rvalue {
258 mir::RValue::Use(operand) => self.compile_operand(operand)?,
259 mir::RValue::BinOp(binop, lhs, rhs) => {
260 let lhs = self.compile_operand(lhs)?;
261 let rhs = self.compile_operand(rhs)?;
262
263 // TOOD: we are assuming ints here
264 match binop {
265 BinOp::Add => self
266 .builder
267 .build_int_add(lhs.into_int_value(), rhs.into_int_value(), "add")
268 .unwrap()
269 .as_basic_value_enum(),
270 BinOp::Sub => self
271 .builder
272 .build_int_sub(lhs.into_int_value(), rhs.into_int_value(), "sub")
273 .unwrap()
274 .as_basic_value_enum(),
275 BinOp::Mul => self
276 .builder
277 .build_int_mul(lhs.into_int_value(), rhs.into_int_value(), "mul")
278 .unwrap()
279 .as_basic_value_enum(),
280 BinOp::Div => self
281 .builder
282 .build_int_unsigned_div(lhs.into_int_value(), rhs.into_int_value(), "div")
283 .unwrap()
284 .as_basic_value_enum(),
285 BinOp::Lt => self
286 .builder
287 .build_int_compare(
288 IntPredicate::SLT,
289 lhs.into_int_value(),
290 rhs.into_int_value(),
291 "lt",
292 )
293 .unwrap()
294 .as_basic_value_enum(),
295 BinOp::LtEq => self
296 .builder
297 .build_int_compare(
298 IntPredicate::SLE,
299 lhs.into_int_value(),
300 rhs.into_int_value(),
301 "lteq",
302 )
303 .unwrap()
304 .as_basic_value_enum(),
305 BinOp::NEq => self
306 .builder
307 .build_int_compare(
308 IntPredicate::NE,
309 lhs.into_int_value(),
310 rhs.into_int_value(),
311 "neq",
312 )
313 .unwrap()
314 .as_basic_value_enum(),
315 _ => todo!(),
316 }
317 }
318 mir::RValue::Alloc {
319 kind: alloc_kind,
320 operands,
321 malloc,
322 } => {
323 match alloc_kind {
324 AllocKind::Array(ty_hint) => {
325 let elem_type = Self::basic_type_to_llvm_basic_type(self.ctx(), ty_hint);
326
327 let array_len = operands.len() as u32;
328 let array_type = elem_type.array_type(array_len);
329
330 let array_ptr = if *malloc {
331 self.builder.build_malloc(array_type, "array_malloc")?
332 } else {
333 // TODO: for mem2reg, allocas should ideally happen
334 // in the function's entry block, not the current builder position.
335 self.builder.build_alloca(array_type, "array_alloca")?
336 };
337
338 let i32_type = self.ctx().i32_type();
339 let zero = i32_type.const_zero();
340
341 for (i, operand) in operands.iter().enumerate() {
342 let val = self.compile_operand(operand)?;
343
344 let index = i32_type.const_int(i as u64, false);
345
346 let elem_ptr = unsafe {
347 self.builder.build_in_bounds_gep(
348 array_type,
349 array_ptr,
350 &[zero, index],
351 "elem_ptr",
352 )?
353 };
354
355 self.builder.build_store(elem_ptr, val)?;
356 }
357
358 // match destination {
359 // Some(destination) => {
360 // self.builder.build_store(destination, array_ptr)?;
361 // destination.as_basic_value_enum()
362 // }
363 // // if there is no destination, we just return the array ptr, as in calls
364 // None => array_ptr.as_basic_value_enum(),
365 // }
366
367 // copy array into memory to be assigned in compile_statement, this is inefficient
368 self.builder
369 .build_load(array_type, array_ptr, "array_load")?
370 .as_basic_value_enum()
371 }
372 AllocKind::Record(fields) => {
373 let field_tys: Vec<BasicTypeEnum<'ctx>> = fields
374 .iter()
375 .map(|ty| Self::basic_type_to_llvm_basic_type(self.ctx(), ty))
376 .collect();
377
378 let struct_ty = self.ctx().struct_type(&field_tys, false);
379
380 let struct_ptr = self.builder.build_alloca(struct_ty, "struct_alloca")?;
381
382 for (i, operand) in operands.iter().take(fields.len()).enumerate() {
383 let field_alloca = self.builder.build_struct_gep(
384 struct_ty,
385 struct_ptr,
386 i as u32,
387 "field_alloca",
388 )?;
389 self.builder
390 .build_store(field_alloca, self.compile_operand(operand)?)?;
391 }
392
393 self.builder
394 .build_load(struct_ty, struct_ptr, "struct_load")?
395 }
396 AllocKind::Variant(tag, payload_ty) => {
397 let tag_ty = self.ctx().i8_type().as_basic_type_enum();
398 let variant_ty = match payload_ty {
399 mir::Ty::Unit => self.ctx().struct_type(&[tag_ty], false),
400 _ => {
401 // Calculate payload size accounting for alignment.
402 // The payload type determines both size and alignment requirements.
403 // For proper union semantics, we use the payload's alignment
404 // to determine padding, ensuring consistent layout with local variables.
405 let payload_size = payload_ty.bytes();
406 let payload_align = payload_ty.align();
407
408 // Calculate total struct size: 1 byte tag + padding + payload
409 // This must match what Ty::TaggedUnion::bytes() returns
410 let base_size = 1 + payload_size;
411 let padding = if payload_align > 1 {
412 (payload_align - (base_size % payload_align)) % payload_align
413 } else {
414 0
415 };
416 let total_size = base_size + padding;
417
418 // The array size should match what basic_type_to_llvm_basic_type
419 // uses for TaggedUnion, which is ty.bytes() directly
420 let array_size = total_size;
421
422 let payload_ty = self
423 .ctx()
424 .i8_type()
425 .array_type(array_size as u32)
426 .as_basic_type_enum();
427 self.ctx().struct_type(&[tag_ty, payload_ty], false)
428 }
429 };
430
431 let variant_ptr =
432 self.builder.build_alloca(variant_ty, "variant_alloca")?;
433
434 let tag_val = self.ctx().i8_type().const_int(*tag as u64, false);
435 self.builder.build_store(variant_ptr, tag_val)?;
436
437 if operands.len() > 1 {
438 todo!("payloads with more than one operand not implemented");
439 } else if operands.len() == 1 {
440 let payload_gep = self.builder.build_struct_gep(
441 variant_ty,
442 variant_ptr,
443 1,
444 "payload_gep",
445 )?;
446
447 let payload_val = self.compile_operand(&operands[0])?;
448 self.builder.build_store(payload_gep, payload_val)?;
449 }
450
451 self.builder
452 .build_load(variant_ty, variant_ptr, "variant_load")?
453 }
454 AllocKind::Tuple(tys) => {
455 let field_tys: Vec<BasicTypeEnum<'ctx>> = tys
456 .iter()
457 .map(|t| Self::basic_type_to_llvm_basic_type(self.ctx(), t))
458 .collect();
459
460 let struct_ty = self.ctx().struct_type(&field_tys, false);
461
462 let struct_ptr = self.builder.build_alloca(struct_ty, "struct_alloca")?;
463
464 for (i, operand) in operands.iter().take(tys.len()).enumerate() {
465 let field_alloca = self.builder.build_struct_gep(
466 struct_ty,
467 struct_ptr,
468 i as u32,
469 "field_alloca",
470 )?;
471 self.builder
472 .build_store(field_alloca, self.compile_operand(operand)?)?;
473 }
474
475 self.builder
476 .build_load(struct_ty, struct_ptr, "struct_load")?
477 }
478 AllocKind::Str(s) => self
479 .builder
480 .build_global_string_ptr(s, format!("str_{}", s).as_str())?
481 .as_pointer_value()
482 .as_basic_value_enum(),
483 }
484 }
485 };
486
487 Ok(int_val)
488 }
489
490 fn compile_statement(
491 &mut self,
492 stmt: &mir::Statement,
493 llvm_fn: &FunctionValue<'ctx>,
494 ctx: &FrontendCtx,
495 ) -> Result<()> {
496 match stmt {
497 mir::Statement::Assign(localid, rvalue) => {
498 let alloca = self.function_locals.get(localid).unwrap().alloc;
499 let value = self.compile_rvalue(rvalue, Some(alloca), llvm_fn, ctx)?;
500
501 self.builder.build_store(alloca, value).unwrap();
502 }
503 mir::Statement::Phi(localid, local_ids) => {
504 let i32_type = self.ctx().i32_type();
505 let phi_value = self
506 .builder
507 .build_phi(i32_type, format!("phi_{}", localid).as_str())
508 .unwrap();
509
510 for local_id in local_ids {
511 let local = self.function_locals.get(local_id).unwrap();
512 phi_value.add_incoming(&[(&local.alloc, local.defining_block)]);
513 }
514
515 let alloca = self.function_locals.get(localid).unwrap().alloc;
516 self.builder
517 .build_store(alloca, phi_value.as_basic_value())
518 .unwrap();
519 }
520 mir::Statement::Call {
521 function_name,
522 args,
523 destination,
524 } => {
525 let fn_val = self.module.get_function(function_name.as_str()).unwrap();
526
527 let mut call_args = Vec::new();
528 for rval in args {
529 let basic_elem_type = self.compile_rvalue(rval, None, llvm_fn, ctx)?;
530 call_args.push(basic_elem_type.into());
531 }
532
533 let call = self.builder.build_call(fn_val, &call_args, "call_result")?;
534
535 if let Some(destination) = destination {
536 let alloc = self.function_locals.get(destination).unwrap().alloc;
537 //
538 // SAFETY: destination should only be SOME if the call returns
539 let call_value = call.try_as_basic_value().basic().unwrap();
540 self.builder.build_store(alloc, call_value)?;
541 }
542 }
543 mir::Statement::Store(place, rvalue) => {
544 let (place_ptr, _) = self.place_ptr(place)?;
545 let rvalue = self.compile_rvalue(rvalue, None, llvm_fn, ctx)?;
546 self.builder.build_store(place_ptr, rvalue)?;
547 }
548 mir::Statement::RegionStart(_) => {
549 // Region boundaries are erased at runtime - no codegen needed
550 }
551 mir::Statement::RegionEnd(_) => {
552 // Region boundaries are erased at runtime - no codegen needed
553 }
554 }
555
556 Ok(())
557 }
558
559 fn compile_terminator(
560 &mut self,
561 terminator: mir::Terminator,
562 llvm_fn: &FunctionValue<'ctx>,
563 _ctx: &FrontendCtx,
564 ) -> Result<()> {
565 match terminator {
566 mir::Terminator::Return(local_id) => match local_id {
567 Some(local_id) => {
568 let local = self.function_locals.get(&local_id).unwrap();
569 let val = self
570 .builder
571 .build_load(local.ty, local.alloc, "return_val")
572 .unwrap();
573 self.builder.build_return(Some(&val))?;
574 }
575 None => {
576 self.builder.build_return(None)?;
577 }
578 },
579 mir::Terminator::BrIf(cond_local_id, then_block, else_block) => {
580 let cond_local = self.function_locals.get(&cond_local_id).unwrap();
581 let cond_val = self
582 .builder
583 .build_load(cond_local.ty, cond_local.alloc, "cond_val")
584 .unwrap();
585
586 let then_bb = self.get_or_create_bb(llvm_fn, then_block);
587 let else_bb = self.get_or_create_bb(llvm_fn, else_block);
588
589 self.builder.build_conditional_branch(
590 cond_val.into_int_value(),
591 then_bb,
592 else_bb,
593 )?;
594 }
595 mir::Terminator::Br(target_block) => {
596 let target_block = self.get_or_create_bb(llvm_fn, target_block);
597 self.builder.build_unconditional_branch(target_block)?;
598 }
599 mir::Terminator::BrTable(local_id, jump_table) => {
600 let jump_local_info = self.function_locals.get(&local_id).unwrap();
601 let (jump_val, switch_type) = match jump_local_info.tk {
602 mir::Ty::Int => {
603 let switch_type = self.ctx().i32_type();
604
605 let jump_val = self
606 .builder
607 .build_load(switch_type, jump_local_info.alloc, "jump_val")
608 .unwrap()
609 .into_int_value();
610
611 (jump_val, switch_type)
612 }
613 mir::Ty::TaggedUnion(_) => {
614 let switch_type = self.ctx().i8_type();
615
616 let jump_ptr = self.builder.build_struct_gep(
617 jump_local_info.ty,
618 jump_local_info.alloc,
619 0,
620 "jump_ptr",
621 )?;
622
623 let jump_val = self
624 .builder
625 .build_load(switch_type, jump_ptr, "jump_val")
626 .unwrap()
627 .into_int_value();
628
629 (jump_val, switch_type)
630 }
631 _ => todo!(),
632 };
633
634 let default_bb = self.get_or_create_bb(llvm_fn, jump_table.default);
635
636 let mut cases = Vec::new();
637 for (val, block_id) in jump_table.cases {
638 let bb = self.get_or_create_bb(llvm_fn, block_id);
639
640 let val = switch_type.const_int(val as u64, false);
641 cases.push((val, bb));
642 }
643
644 self.builder.build_switch(jump_val, default_bb, &cases)?;
645 }
646 }
647
648 Ok(())
649 }
650
651 fn compile_block(
652 &mut self,
653 block: mir::BasicBlock,
654 llvm_fn: &FunctionValue<'ctx>,
655 ctx: &FrontendCtx,
656 ) -> Result<BasicBlock<'ctx>> {
657 let bb = self.get_or_create_bb(llvm_fn, block.block_id);
658
659 self.builder.position_at_end(bb);
660
661 for stmt in block.stmts.iter() {
662 self.compile_statement(stmt, llvm_fn, ctx)?;
663 }
664
665 self.compile_terminator(block.terminator, llvm_fn, ctx)?;
666
667 Ok(bb)
668 }
669
670 fn precompute_function(&mut self, function: &mir::Function) {
671 let mut arg_types = Vec::new();
672 let llvm_ctx = self.ctx();
673
674 let mut is_variadic = false;
675 for (_i, local) in function.locals.iter().enumerate().take(function.parameters) {
676 match &local.ty {
677 mir::Ty::Variadic => {
678 is_variadic = true;
679 }
680 _ => {
681 arg_types.push(Self::basic_type_to_llvm_basic_metadata_type(
682 llvm_ctx, &local.ty,
683 ));
684 }
685 }
686 }
687
688 let fn_type = Self::type_to_fn_type(llvm_ctx, &function.return_ty, arg_types, is_variadic);
689 let _ = self
690 .module
691 .add_function(function.name.as_str(), fn_type, Some(Linkage::External));
692 }
693
694 fn compile_function(&mut self, function: mir::Function, ctx: &FrontendCtx) -> Result<()> {
695 self.function_locals.clear();
696 self.function_blocks.clear();
697
698 let llvm_ctx = self.ctx();
699 let llvm_fn = self.module.get_function(&function.name).unwrap();
700
701 let entry_block = self
702 .ctx()
703 .append_basic_block(llvm_fn, format!("{}_entry", function.name).as_str());
704 self.builder.position_at_end(entry_block);
705
706 for (param_idx, param_local) in function.locals.iter().take(function.parameters).enumerate()
707 {
708 let local_ty = Self::basic_type_to_llvm_basic_type(llvm_ctx, ¶m_local.ty);
709 let local_alloca = self
710 .builder
711 .build_alloca(local_ty, format!("param{}", param_local.id).as_str())?;
712
713 let param = llvm_fn.get_nth_param(param_idx as u32).unwrap();
714
715 self.builder.build_store(local_alloca, param).unwrap();
716
717 let local_info =
718 FunctionLocalInfo::new(local_ty, param_local.ty.clone(), local_alloca, entry_block);
719 self.function_locals.insert(param_local.id, local_info);
720 }
721
722 for local in function.locals.iter().skip(function.parameters) {
723 let local_ty = Self::basic_type_to_llvm_basic_type(llvm_ctx, &local.ty);
724 let local_alloca = self
725 .builder
726 .build_alloca(local_ty, format!("x{}", local.id).as_str())?;
727
728 let local_info =
729 FunctionLocalInfo::new(local_ty, local.ty.clone(), local_alloca, entry_block);
730 self.function_locals.insert(local.id, local_info);
731 }
732
733 for block in function.into_iter() {
734 let bb = self.compile_block(block, &llvm_fn, ctx)?;
735 if entry_block.get_terminator().is_none() {
736 self.builder.position_at_end(entry_block);
737 self.builder.build_unconditional_branch(bb).unwrap();
738 self.builder.position_at_end(bb);
739 }
740 }
741
742 Ok(())
743 }
744
745 fn compile_extern(&mut self, extern_: mir::Extern) -> Result<()> {
746 let mut param_types = Vec::new();
747 let mut is_variadic = false;
748
749 for ty in extern_.params.iter() {
750 match ty {
751 mir::Ty::Variadic => {
752 is_variadic = true;
753 }
754 _ => {
755 let ty = Self::basic_type_to_llvm_basic_metadata_type(self.ctx(), ty);
756 param_types.push(ty);
757 }
758 }
759 }
760
761 let fn_ty = Self::type_to_fn_type(self.ctx(), &extern_.return_ty, param_types, is_variadic);
762 let _ = self.module.add_function(&extern_.name, fn_ty, None);
763 Ok(())
764 }
765
766 fn wrap_main(&self) -> Result<()> {
767 let actual_main = self.module.get_function("__entry").unwrap();
768
769 let fn_ty = self.ctx().i32_type().fn_type(&[], false);
770 let fn_val = self.module.add_function("main", fn_ty, None);
771 let entry_block = self.ctx().append_basic_block(fn_val, "entry");
772
773 self.builder.position_at_end(entry_block);
774 self.builder.build_call(actual_main, &[], "_")?;
775 self.builder
776 .build_return(Some(&self.ctx().i32_type().const_zero()))?;
777
778 Ok(())
779 }
780
781 fn output(self) {
782 let triple = TargetMachine::get_default_triple();
783
784 self.module
785 .set_data_layout(&self.target_machine.get_target_data().get_data_layout());
786 self.module.set_triple(&triple);
787
788 let path = Path::new("a.o");
789 let _ = self
790 .target_machine
791 .write_to_file(&self.module, FileType::Object, path);
792 }
793}
794
795impl<'ctx> Compiler for Llvm<'ctx> {
796 fn compile(mut self, module: mir::Module, ctx: FrontendCtx, debug: bool) -> Result<()> {
797 for extern_ in module.externs {
798 self.compile_extern(extern_)?;
799 }
800
801 for function in &module.functions {
802 self.precompute_function(function);
803 }
804
805 for function in module.functions {
806 self.compile_function(function, &ctx)?;
807 }
808
809 self.wrap_main()?;
810
811 if debug {
812 self.module.print_to_stderr();
813 }
814
815 self.module.verify().unwrap();
816
817 self.output();
818
819 Ok(())
820 }
821}
822pub fn compile(module: mir::Module, ctx: FrontendCtx, debug: bool) -> Result<()> {
823 let context = Context::create();
824
825 let llvm = Llvm::new(&context);
826 llvm.compile(module, ctx, debug)?;
827
828 Ok(())
829}