this repo has no description
1#include <stdint.h>
2#include <mach-o/loader.h>
3#include <mach-o/fat.h>
4#include <stdlib.h>
5#include <unistd.h>
6#include <stdio.h>
7#include <stdbool.h>
8#include <sys/mman.h>
9#include <errno.h>
10#include <string.h>
11
12#include "loader.h"
13
14static int native_prot(int prot);
15static void load(const char* path, cpu_type_t cpu, bool expect_dylinker, char** argv, struct load_results* lr);
16static void setup_space(struct load_results* lr, bool is_64_bit);
17static void* compatible_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset);
18
19#ifndef PAGE_SIZE
20# define PAGE_SIZE 4096
21#endif
22#define PAGE_ALIGN(x) (x & ~(PAGE_SIZE-1))
23#define PAGE_ROUNDUP(x) (((((x)-1) / PAGE_SIZE)+1) * PAGE_SIZE)
24
25// Definitions:
26// FUNCTION_NAME (load32/load64)
27// SEGMENT_STRUCT (segment_command/SEGMENT_STRUCT)
28// SEGMENT_COMMAND (LC_SEGMENT/SEGMENT_COMMAND)
29// MACH_HEADER_STRUCT (mach_header/MACH_HEADER_STRUCT)
30// SECTION_STRUCT (section/SECTION_STRUCT)
31
32#if defined(GEN_64BIT)
33# define FUNCTION_NAME load64
34# define SEGMENT_STRUCT segment_command_64
35# define SEGMENT_COMMAND LC_SEGMENT_64
36# define MACH_HEADER_STRUCT mach_header_64
37# define SECTION_STRUCT section_64
38# define MAP_EXTRA 0
39#elif defined(GEN_32BIT)
40# define FUNCTION_NAME load32
41# define SEGMENT_STRUCT segment_command
42# define SEGMENT_COMMAND LC_SEGMENT
43# define MACH_HEADER_STRUCT mach_header
44# define SECTION_STRUCT section
45# define MAP_EXTRA MAP_32BIT
46#else
47# error See above
48#endif
49
50void FUNCTION_NAME(int fd, bool expect_dylinker, struct load_results* lr)
51{
52 struct MACH_HEADER_STRUCT header;
53 uint8_t* cmds;
54 uintptr_t entryPoint = 0, entryPointDylinker = 0;
55 struct MACH_HEADER_STRUCT* mappedHeader = NULL;
56 uintptr_t slide = 0;
57 uintptr_t mmapSize = 0;
58 bool pie = false;
59 uint32_t fat_offset;
60 void* tmp_map_base = NULL;
61
62 if (!expect_dylinker)
63 {
64#if defined(GEN_64BIT)
65 setup_space(lr, true);
66#elif defined(GEN_32BIT)
67 lr->_32on64 = true;
68 setup_space(lr, false);
69#else
70 #error Unsupported architecture
71#endif
72 }
73
74 fat_offset = lseek(fd, 0, SEEK_CUR);
75
76 if (read(fd, &header, sizeof(header)) != sizeof(header))
77 {
78 fprintf(stderr, "Cannot read the mach header.\n");
79 exit(1);
80 }
81
82 if (header.filetype != (expect_dylinker ? MH_DYLINKER : MH_EXECUTE))
83 {
84 fprintf(stderr, "Found unexpected Mach-O file type: %u\n", header.filetype);
85 exit(1);
86 }
87
88 tmp_map_base = mmap(NULL, PAGE_ROUNDUP(sizeof(header) + header.sizeofcmds), PROT_READ, MAP_PRIVATE, fd, fat_offset);
89 if (tmp_map_base == MAP_FAILED) {
90 fprintf(stderr, "Failed to mmap header + commands\n");
91 exit(1);
92 }
93
94 cmds = (void*)((char*)tmp_map_base + sizeof(header));
95
96 if ((header.filetype == MH_EXECUTE && header.flags & MH_PIE) || header.filetype == MH_DYLINKER)
97 {
98 uintptr_t base = -1;
99
100 // Go through all SEGMENT_COMMAND commands to get the total continuous range required.
101 for (uint32_t i = 0, p = 0; i < header.ncmds; i++)
102 {
103 struct SEGMENT_STRUCT* seg = (struct SEGMENT_STRUCT*) &cmds[p];
104
105 // Load commands are always sorted, so this will get us the maximum address.
106 if (seg->cmd == SEGMENT_COMMAND && strcmp(seg->segname, "__PAGEZERO") != 0)
107 {
108 if (base == -1)
109 {
110 base = seg->vmaddr;
111 //if (base != 0 && header.filetype == MH_DYLINKER)
112 // goto no_slide;
113 }
114 mmapSize = seg->vmaddr + seg->vmsize - base;
115 }
116
117 p += seg->cmdsize;
118 }
119
120 slide = (uintptr_t) mmap((void*) base, mmapSize, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_EXTRA, -1, 0);
121 if (slide == (uintptr_t)MAP_FAILED)
122 {
123 fprintf(stderr, "Cannot mmap anonymous memory range: %s\n", strerror(errno));
124 exit(1);
125 }
126
127 // unmap it so we can map the actual segments later using MAP_FIXED_NOREPLACE;
128 // we're the only thread running, so there's no chance this memory range will become occupied from now until then
129 munmap((void*)slide, mmapSize);
130
131 if (slide + mmapSize > lr->vm_addr_max)
132 lr->vm_addr_max = lr->base = slide + mmapSize;
133 slide -= base;
134
135 pie = true;
136 }
137no_slide:
138
139 for (uint32_t i = 0, p = 0; i < header.ncmds && p < header.sizeofcmds; i++)
140 {
141 struct load_command* lc;
142
143 lc = (struct load_command*) &cmds[p];
144
145 switch (lc->cmd)
146 {
147 case SEGMENT_COMMAND:
148 {
149 struct SEGMENT_STRUCT* seg = (struct SEGMENT_STRUCT*) lc;
150 void* rv;
151
152 // This logic is wrong and made up. But it's the only combination where
153 // some apps stop crashing (TBD why) and LLDB recognized the memory layout
154 // of processes started as suspended.
155 int maxprot = native_prot(seg->maxprot);
156 int initprot = native_prot(seg->initprot);
157 int useprot = (initprot & PROT_EXEC) ? maxprot : initprot;
158
159 if (seg->filesize < seg->vmsize)
160 {
161 unsigned long map_addr;
162 if (slide != 0)
163 {
164 unsigned long addr = seg->vmaddr;
165
166 if (addr != 0)
167 addr += slide;
168
169 // Some segments' filesize != vmsize, thus this mprotect().
170 rv = compatible_mmap((void*)addr, seg->vmsize, useprot, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0);
171 if (rv == (void*)MAP_FAILED)
172 {
173 if (seg->vmaddr == 0 && useprot == 0) {
174 // this is the PAGEZERO segment;
175 // if we can't map it, assume everything is fine and the system has already made that area inaccessible
176 rv = 0;
177 } else {
178 fprintf(stderr, "Cannot mmap segment %s at %p: %s\n", seg->segname, (void*)(uintptr_t)seg->vmaddr, strerror(errno));
179 exit(1);
180 }
181 }
182 }
183 else
184 {
185 size_t size = seg->vmsize - seg->filesize;
186 rv = compatible_mmap((void*) PAGE_ALIGN(seg->vmaddr + seg->vmsize - size), PAGE_ROUNDUP(size), useprot,
187 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0);
188 if (rv == (void*)MAP_FAILED)
189 {
190 if (seg->vmaddr == 0 && useprot == 0) {
191 // this is the PAGEZERO segment;
192 // if we can't map it, assume everything is fine and the system has already made that area inaccessible
193 rv = 0;
194 } else {
195 fprintf(stderr, "Cannot mmap segment %s at %p: %s\n", seg->segname, (void*)(uintptr_t)seg->vmaddr, strerror(errno));
196 exit(1);
197 }
198 }
199 }
200 }
201
202 if (seg->filesize > 0)
203 {
204 unsigned long addr = seg->vmaddr + slide;
205 int flag = MAP_FIXED_NOREPLACE;
206 if (seg->filesize < seg->vmsize) {
207 flag = MAP_FIXED;
208 }
209 rv = compatible_mmap((void*)addr, seg->filesize, useprot,
210 flag | MAP_PRIVATE, fd, seg->fileoff + fat_offset);
211 if (rv == (void*)MAP_FAILED)
212 {
213 if (seg->vmaddr == 0 && useprot == 0) {
214 // this is the PAGEZERO segment;
215 // if we can't map it, assume everything is fine and the system has already made that area inaccessible
216 rv = 0;
217 } else {
218 fprintf(stderr, "Cannot mmap segment %s at %p: %s\n", seg->segname, (void*)(uintptr_t)seg->vmaddr, strerror(errno));
219 exit(1);
220 }
221 }
222
223 if (seg->fileoff == 0)
224 mappedHeader = (struct MACH_HEADER_STRUCT*) (seg->vmaddr + slide);
225 }
226
227 if (seg->vmaddr + slide + seg->vmsize > lr->vm_addr_max)
228 lr->vm_addr_max = seg->vmaddr + slide + seg->vmsize;
229
230 if (strcmp(SEG_DATA, seg->segname) == 0)
231 {
232 // Look for section named __all_image_info for GDB integration
233 struct SECTION_STRUCT* sect = (struct SECTION_STRUCT*) (seg+1);
234 struct SECTION_STRUCT* end = (struct SECTION_STRUCT*) (&cmds[p + lc->cmdsize]);
235
236 while (sect < end)
237 {
238 if (strncmp(sect->sectname, "__all_image_info", 16) == 0)
239 {
240 lr->dyld_all_image_location = slide + sect->addr;
241 lr->dyld_all_image_size = sect->size;
242 break;
243 }
244 sect++;
245 }
246 }
247 break;
248 }
249 case LC_UNIXTHREAD:
250 {
251#ifdef GEN_64BIT
252 entryPoint = ((uint64_t*) lc)[18];
253#endif
254#ifdef GEN_32BIT
255 entryPoint = ((uint32_t*) lc)[14];
256#endif
257 entryPoint += slide;
258 break;
259 }
260 case LC_LOAD_DYLINKER:
261 {
262 if (header.filetype != MH_EXECUTE)
263 {
264 // dylinker can't reference another dylinker
265 fprintf(stderr, "Dynamic linker can't reference another dynamic linker\n");
266 exit(1);
267 }
268
269 struct dylinker_command* dy = (struct dylinker_command*) lc;
270 char* path;
271 size_t length;
272 static char path_buffer[4096];
273
274 if (lr->root_path != NULL)
275 {
276 const size_t root_len = strlen(lr->root_path);
277 const size_t linker_len = dy->cmdsize - dy->name.offset;
278
279 length = linker_len + root_len;
280 if (length > sizeof(path_buffer) - 1) {
281 fprintf(stderr, "Dynamic loader path too long");
282 exit(1);
283 }
284 path = path_buffer;
285
286 // Concat root path and linker path
287 memcpy(path, lr->root_path, root_len);
288 memcpy(path + root_len, ((char*) dy) + dy->name.offset, linker_len);
289 path[length] = '\0';
290 }
291
292 if (path == NULL)
293 {
294 length = dy->cmdsize - dy->name.offset;
295 if (length > sizeof(path_buffer) - 1) {
296 fprintf(stderr, "Dynamic loader path too long");
297 exit(1);
298 }
299 path = path_buffer;
300
301 memcpy(path, ((char*) dy) + dy->name.offset, length);
302 path[length] = '\0';
303 }
304
305 if (path == NULL)
306 {
307 fprintf(stderr, "Failed to load dynamic linker for executable\n");
308 exit(1);
309 }
310
311 load(path, header.cputype, true, NULL, lr);
312
313 break;
314 }
315 case LC_MAIN:
316 {
317 struct entry_point_command* ee = (struct entry_point_command*) lc;
318 if (ee->stacksize > lr->stack_size)
319 lr->stack_size = ee->stacksize;
320 break;
321 }
322 case LC_UUID:
323 {
324 if (header.filetype == MH_EXECUTE)
325 {
326 struct uuid_command* ue = (struct uuid_command*) lc;
327 memcpy(lr->uuid, ue->uuid, sizeof(ue->uuid));
328 }
329 break;
330 }
331 }
332
333 p += lc->cmdsize;
334 }
335
336 if (header.filetype == MH_EXECUTE)
337 lr->mh = (uintptr_t) mappedHeader;
338 if (entryPoint && !lr->entry_point)
339 lr->entry_point = entryPoint;
340
341 if (tmp_map_base)
342 munmap(tmp_map_base, PAGE_ROUNDUP(sizeof(header) + header.sizeofcmds));
343}
344
345
346#undef FUNCTION_NAME
347#undef SEGMENT_STRUCT
348#undef SEGMENT_COMMAND
349#undef MACH_HEADER_STRUCT
350#undef SECTION_STRUCT
351#undef MAP_EXTRA
352