"Das U-Boot" Source Tree
at master 2611 lines 78 kB view raw
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * This code is based on a version (aka dlmalloc) of malloc/free/realloc written 4 * by Doug Lea and released to the public domain, as explained at 5 * http://creativecommons.org/publicdomain/zero/1.0/- 6 * 7 * The original code is available at http://gee.cs.oswego.edu/pub/misc/ 8 * as file malloc-2.6.6.c. 9 */ 10 11#if CONFIG_IS_ENABLED(UNIT_TEST) 12#define DEBUG 13#endif 14 15#include <log.h> 16#include <asm/global_data.h> 17 18#include <malloc.h> 19#include <mapmem.h> 20#include <string.h> 21#include <asm/io.h> 22#include <valgrind/memcheck.h> 23 24#ifdef DEBUG 25#if __STD_C 26static void malloc_update_mallinfo (void); 27void malloc_stats (void); 28#else 29static void malloc_update_mallinfo (); 30void malloc_stats(); 31#endif 32#endif /* DEBUG */ 33 34DECLARE_GLOBAL_DATA_PTR; 35 36#ifdef MCHECK_HEAP_PROTECTION 37 #define STATIC_IF_MCHECK static 38 #undef MALLOC_COPY 39 #undef MALLOC_ZERO 40static inline void MALLOC_ZERO(void *p, size_t sz) { memset(p, 0, sz); } 41static inline void MALLOC_COPY(void *dest, const void *src, size_t sz) { memcpy(dest, src, sz); } 42#else 43 #define STATIC_IF_MCHECK 44 #define mALLOc_impl mALLOc 45 #define fREe_impl fREe 46 #define rEALLOc_impl rEALLOc 47 #define mEMALIGn_impl mEMALIGn 48 #define cALLOc_impl cALLOc 49#endif 50 51/* 52 Emulation of sbrk for WIN32 53 All code within the ifdef WIN32 is untested by me. 54 55 Thanks to Martin Fong and others for supplying this. 56*/ 57 58#ifdef WIN32 59 60#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \ 61~(malloc_getpagesize-1)) 62#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1)) 63 64/* resrve 64MB to insure large contiguous space */ 65#define RESERVED_SIZE (1024*1024*64) 66#define NEXT_SIZE (2048*1024) 67#define TOP_MEMORY ((unsigned long)2*1024*1024*1024) 68 69struct GmListElement; 70typedef struct GmListElement GmListElement; 71 72struct GmListElement 73{ 74 GmListElement* next; 75 void* base; 76}; 77 78static GmListElement* head = 0; 79static unsigned int gNextAddress = 0; 80static unsigned int gAddressBase = 0; 81static unsigned int gAllocatedSize = 0; 82 83static 84GmListElement* makeGmListElement (void* bas) 85{ 86 GmListElement* this; 87 this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement)); 88 assert (this); 89 if (this) 90 { 91 this->base = bas; 92 this->next = head; 93 head = this; 94 } 95 return this; 96} 97 98void gcleanup (void) 99{ 100 BOOL rval; 101 assert ( (head == NULL) || (head->base == (void*)gAddressBase)); 102 if (gAddressBase && (gNextAddress - gAddressBase)) 103 { 104 rval = VirtualFree ((void*)gAddressBase, 105 gNextAddress - gAddressBase, 106 MEM_DECOMMIT); 107 assert (rval); 108 } 109 while (head) 110 { 111 GmListElement* next = head->next; 112 rval = VirtualFree (head->base, 0, MEM_RELEASE); 113 assert (rval); 114 LocalFree (head); 115 head = next; 116 } 117} 118 119static 120void* findRegion (void* start_address, unsigned long size) 121{ 122 MEMORY_BASIC_INFORMATION info; 123 if (size >= TOP_MEMORY) return NULL; 124 125 while ((unsigned long)start_address + size < TOP_MEMORY) 126 { 127 VirtualQuery (start_address, &info, sizeof (info)); 128 if ((info.State == MEM_FREE) && (info.RegionSize >= size)) 129 return start_address; 130 else 131 { 132 /* Requested region is not available so see if the */ 133 /* next region is available. Set 'start_address' */ 134 /* to the next region and call 'VirtualQuery()' */ 135 /* again. */ 136 137 start_address = (char*)info.BaseAddress + info.RegionSize; 138 139 /* Make sure we start looking for the next region */ 140 /* on the *next* 64K boundary. Otherwise, even if */ 141 /* the new region is free according to */ 142 /* 'VirtualQuery()', the subsequent call to */ 143 /* 'VirtualAlloc()' (which follows the call to */ 144 /* this routine in 'wsbrk()') will round *down* */ 145 /* the requested address to a 64K boundary which */ 146 /* we already know is an address in the */ 147 /* unavailable region. Thus, the subsequent call */ 148 /* to 'VirtualAlloc()' will fail and bring us back */ 149 /* here, causing us to go into an infinite loop. */ 150 151 start_address = 152 (void *) AlignPage64K((unsigned long) start_address); 153 } 154 } 155 return NULL; 156 157} 158 159void* wsbrk (long size) 160{ 161 void* tmp; 162 if (size > 0) 163 { 164 if (gAddressBase == 0) 165 { 166 gAllocatedSize = max (RESERVED_SIZE, AlignPage (size)); 167 gNextAddress = gAddressBase = 168 (unsigned int)VirtualAlloc (NULL, gAllocatedSize, 169 MEM_RESERVE, PAGE_NOACCESS); 170 } else if (AlignPage (gNextAddress + size) > (gAddressBase + 171gAllocatedSize)) 172 { 173 long new_size = max (NEXT_SIZE, AlignPage (size)); 174 void* new_address = (void*)(gAddressBase+gAllocatedSize); 175 do 176 { 177 new_address = findRegion (new_address, new_size); 178 179 if (!new_address) 180 return (void*)-1; 181 182 gAddressBase = gNextAddress = 183 (unsigned int)VirtualAlloc (new_address, new_size, 184 MEM_RESERVE, PAGE_NOACCESS); 185 /* repeat in case of race condition */ 186 /* The region that we found has been snagged */ 187 /* by another thread */ 188 } 189 while (gAddressBase == 0); 190 191 assert (new_address == (void*)gAddressBase); 192 193 gAllocatedSize = new_size; 194 195 if (!makeGmListElement ((void*)gAddressBase)) 196 return (void*)-1; 197 } 198 if ((size + gNextAddress) > AlignPage (gNextAddress)) 199 { 200 void* res; 201 res = VirtualAlloc ((void*)AlignPage (gNextAddress), 202 (size + gNextAddress - 203 AlignPage (gNextAddress)), 204 MEM_COMMIT, PAGE_READWRITE); 205 if (!res) 206 return (void*)-1; 207 } 208 tmp = (void*)gNextAddress; 209 gNextAddress = (unsigned int)tmp + size; 210 return tmp; 211 } 212 else if (size < 0) 213 { 214 unsigned int alignedGoal = AlignPage (gNextAddress + size); 215 /* Trim by releasing the virtual memory */ 216 if (alignedGoal >= gAddressBase) 217 { 218 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal, 219 MEM_DECOMMIT); 220 gNextAddress = gNextAddress + size; 221 return (void*)gNextAddress; 222 } 223 else 224 { 225 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase, 226 MEM_DECOMMIT); 227 gNextAddress = gAddressBase; 228 return (void*)-1; 229 } 230 } 231 else 232 { 233 return (void*)gNextAddress; 234 } 235} 236 237#endif 238 239/* 240 Type declarations 241*/ 242 243struct malloc_chunk 244{ 245 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ 246 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ 247 struct malloc_chunk* fd; /* double links -- used only if free. */ 248 struct malloc_chunk* bk; 249} __attribute__((__may_alias__)) ; 250 251typedef struct malloc_chunk* mchunkptr; 252 253/* 254 255 malloc_chunk details: 256 257 (The following includes lightly edited explanations by Colin Plumb.) 258 259 Chunks of memory are maintained using a `boundary tag' method as 260 described in e.g., Knuth or Standish. (See the paper by Paul 261 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a 262 survey of such techniques.) Sizes of free chunks are stored both 263 in the front of each chunk and at the end. This makes 264 consolidating fragmented chunks into bigger chunks very fast. The 265 size fields also hold bits representing whether chunks are free or 266 in use. 267 268 An allocated chunk looks like this: 269 270 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 271 | Size of previous chunk, if allocated | | 272 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 273 | Size of chunk, in bytes |P| 274 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 275 | User data starts here... . 276 . . 277 . (malloc_usable_space() bytes) . 278 . | 279nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 280 | Size of chunk | 281 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 282 283 Where "chunk" is the front of the chunk for the purpose of most of 284 the malloc code, but "mem" is the pointer that is returned to the 285 user. "Nextchunk" is the beginning of the next contiguous chunk. 286 287 Chunks always begin on even word boundries, so the mem portion 288 (which is returned to the user) is also on an even word boundary, and 289 thus double-word aligned. 290 291 Free chunks are stored in circular doubly-linked lists, and look like this: 292 293 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 294 | Size of previous chunk | 295 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 296 `head:' | Size of chunk, in bytes |P| 297 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 298 | Forward pointer to next chunk in list | 299 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 300 | Back pointer to previous chunk in list | 301 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 302 | Unused space (may be 0 bytes long) . 303 . . 304 . | 305 306nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 307 `foot:' | Size of chunk, in bytes | 308 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 309 310 The P (PREV_INUSE) bit, stored in the unused low-order bit of the 311 chunk size (which is always a multiple of two words), is an in-use 312 bit for the *previous* chunk. If that bit is *clear*, then the 313 word before the current chunk size contains the previous chunk 314 size, and can be used to find the front of the previous chunk. 315 (The very first chunk allocated always has this bit set, 316 preventing access to non-existent (or non-owned) memory.) 317 318 Note that the `foot' of the current chunk is actually represented 319 as the prev_size of the NEXT chunk. (This makes it easier to 320 deal with alignments etc). 321 322 The two exceptions to all this are 323 324 1. The special chunk `top', which doesn't bother using the 325 trailing size field since there is no 326 next contiguous chunk that would have to index off it. (After 327 initialization, `top' is forced to always exist. If it would 328 become less than MINSIZE bytes long, it is replenished via 329 malloc_extend_top.) 330 331 2. Chunks allocated via mmap, which have the second-lowest-order 332 bit (IS_MMAPPED) set in their size fields. Because they are 333 never merged or traversed from any other chunk, they have no 334 foot size or inuse information. 335 336 Available chunks are kept in any of several places (all declared below): 337 338 * `av': An array of chunks serving as bin headers for consolidated 339 chunks. Each bin is doubly linked. The bins are approximately 340 proportionally (log) spaced. There are a lot of these bins 341 (128). This may look excessive, but works very well in 342 practice. All procedures maintain the invariant that no 343 consolidated chunk physically borders another one. Chunks in 344 bins are kept in size order, with ties going to the 345 approximately least recently used chunk. 346 347 The chunks in each bin are maintained in decreasing sorted order by 348 size. This is irrelevant for the small bins, which all contain 349 the same-sized chunks, but facilitates best-fit allocation for 350 larger chunks. (These lists are just sequential. Keeping them in 351 order almost never requires enough traversal to warrant using 352 fancier ordered data structures.) Chunks of the same size are 353 linked with the most recently freed at the front, and allocations 354 are taken from the back. This results in LRU or FIFO allocation 355 order, which tends to give each chunk an equal opportunity to be 356 consolidated with adjacent freed chunks, resulting in larger free 357 chunks and less fragmentation. 358 359 * `top': The top-most available chunk (i.e., the one bordering the 360 end of available memory) is treated specially. It is never 361 included in any bin, is used only if no other chunk is 362 available, and is released back to the system if it is very 363 large (see M_TRIM_THRESHOLD). 364 365 * `last_remainder': A bin holding only the remainder of the 366 most recently split (non-top) chunk. This bin is checked 367 before other non-fitting chunks, so as to provide better 368 locality for runs of sequentially allocated chunks. 369 370 * Implicitly, through the host system's memory mapping tables. 371 If supported, requests greater than a threshold are usually 372 serviced via calls to mmap, and then later released via munmap. 373 374*/ 375 376/* sizes, alignments */ 377 378#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) 379#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ) 380#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) 381#define MINSIZE (sizeof(struct malloc_chunk)) 382 383/* conversion from malloc headers to user pointers, and back */ 384 385#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) 386#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) 387 388/* pad request bytes into a usable size */ 389 390#define request2size(req) \ 391 ((((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \ 392 (MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \ 393 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK))) 394 395/* Check if m has acceptable alignment */ 396 397#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) 398 399/* 400 Physical chunk operations 401*/ 402 403/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ 404 405#define PREV_INUSE 0x1 406 407/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ 408 409#define IS_MMAPPED 0x2 410 411/* Bits to mask off when extracting size */ 412 413#define SIZE_BITS (PREV_INUSE|IS_MMAPPED) 414 415/* Ptr to next physical malloc_chunk. */ 416 417#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) 418 419/* Ptr to previous physical malloc_chunk */ 420 421#define prev_chunk(p)\ 422 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) 423 424/* Treat space at ptr + offset as a chunk */ 425 426#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) 427 428/* 429 Dealing with use bits 430*/ 431 432/* extract p's inuse bit */ 433 434#define inuse(p)\ 435((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) 436 437/* extract inuse bit of previous chunk */ 438 439#define prev_inuse(p) ((p)->size & PREV_INUSE) 440 441/* check for mmap()'ed chunk */ 442 443#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) 444 445/* set/clear chunk as in use without otherwise disturbing */ 446 447#define set_inuse(p)\ 448((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE 449 450#define clear_inuse(p)\ 451((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) 452 453/* check/set/clear inuse bits in known places */ 454 455#define inuse_bit_at_offset(p, s)\ 456 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) 457 458#define set_inuse_bit_at_offset(p, s)\ 459 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) 460 461#define clear_inuse_bit_at_offset(p, s)\ 462 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) 463 464/* 465 Dealing with size fields 466*/ 467 468/* Get size, ignoring use bits */ 469 470#define chunksize(p) ((p)->size & ~(SIZE_BITS)) 471 472/* Set size at head, without disturbing its use bit */ 473 474#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) 475 476/* Set size/use ignoring previous bits in header */ 477 478#define set_head(p, s) ((p)->size = (s)) 479 480/* Set size at footer (only when chunk is not in use) */ 481 482#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) 483 484/* 485 Bins 486 487 The bins, `av_' are an array of pairs of pointers serving as the 488 heads of (initially empty) doubly-linked lists of chunks, laid out 489 in a way so that each pair can be treated as if it were in a 490 malloc_chunk. (This way, the fd/bk offsets for linking bin heads 491 and chunks are the same). 492 493 Bins for sizes < 512 bytes contain chunks of all the same size, spaced 494 8 bytes apart. Larger bins are approximately logarithmically 495 spaced. (See the table below.) The `av_' array is never mentioned 496 directly in the code, but instead via bin access macros. 497 498 Bin layout: 499 500 64 bins of size 8 501 32 bins of size 64 502 16 bins of size 512 503 8 bins of size 4096 504 4 bins of size 32768 505 2 bins of size 262144 506 1 bin of size what's left 507 508 There is actually a little bit of slop in the numbers in bin_index 509 for the sake of speed. This makes no difference elsewhere. 510 511 The special chunks `top' and `last_remainder' get their own bins, 512 (this is implemented via yet more trickery with the av_ array), 513 although `top' is never properly linked to its bin since it is 514 always handled specially. 515 516*/ 517 518#define NAV 128 /* number of bins */ 519 520typedef struct malloc_chunk* mbinptr; 521 522/* access macros */ 523 524#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ)) 525#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr))) 526#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr))) 527 528/* 529 The first 2 bins are never indexed. The corresponding av_ cells are instead 530 used for bookkeeping. This is not to save space, but to simplify 531 indexing, maintain locality, and avoid some initialization tests. 532*/ 533 534#define top (av_[2]) /* The topmost chunk */ 535#define last_remainder (bin_at(1)) /* remainder from last split */ 536 537/* 538 Because top initially points to its own bin with initial 539 zero size, thus forcing extension on the first malloc request, 540 we avoid having any special code in malloc to check whether 541 it even exists yet. But we still need to in malloc_extend_top. 542*/ 543 544#define initial_top ((mchunkptr)(bin_at(0))) 545 546/* Helper macro to initialize bins */ 547 548#define IAV(i) bin_at(i), bin_at(i) 549 550static mbinptr av_[NAV * 2 + 2] = { 551 NULL, NULL, 552 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), 553 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), 554 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), 555 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31), 556 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39), 557 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47), 558 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55), 559 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63), 560 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71), 561 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79), 562 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), 563 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), 564 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), 565 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), 566 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), 567 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) 568}; 569 570#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT 571static void malloc_init(void); 572#endif 573 574ulong mem_malloc_start = 0; 575ulong mem_malloc_end = 0; 576ulong mem_malloc_brk = 0; 577 578static bool malloc_testing; /* enable test mode */ 579static int malloc_max_allocs; /* return NULL after this many calls to malloc() */ 580 581void *sbrk(ptrdiff_t increment) 582{ 583 ulong old = mem_malloc_brk; 584 ulong new = old + increment; 585 586 if ((new < mem_malloc_start) || (new > mem_malloc_end)) 587 return (void *)MORECORE_FAILURE; 588 589 /* 590 * if we are giving memory back make sure we clear it out since 591 * we set MORECORE_CLEARS to 1 592 */ 593 if (increment < 0) 594 memset((void *)new, 0, -increment); 595 596 mem_malloc_brk = new; 597 598 return (void *)old; 599} 600 601void mem_malloc_init(ulong start, ulong size) 602{ 603 mem_malloc_start = (ulong)map_sysmem(start, size); 604 mem_malloc_end = mem_malloc_start + size; 605 mem_malloc_brk = mem_malloc_start; 606 607#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT 608 malloc_init(); 609#endif 610 611 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start, 612 mem_malloc_end); 613#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT) 614 memset((void *)mem_malloc_start, 0x0, size); 615#endif 616} 617 618/* field-extraction macros */ 619 620#define first(b) ((b)->fd) 621#define last(b) ((b)->bk) 622 623/* 624 Indexing into bins 625*/ 626 627#define bin_index(sz) \ 628(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ 629 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ 630 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ 631 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ 632 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ 633 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ 634 126) 635/* 636 bins for chunks < 512 are all spaced 8 bytes apart, and hold 637 identically sized chunks. This is exploited in malloc. 638*/ 639 640#define MAX_SMALLBIN 63 641#define MAX_SMALLBIN_SIZE 512 642#define SMALLBIN_WIDTH 8 643 644#define smallbin_index(sz) (((unsigned long)(sz)) >> 3) 645 646/* 647 Requests are `small' if both the corresponding and the next bin are small 648*/ 649 650#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) 651 652/* 653 To help compensate for the large number of bins, a one-level index 654 structure is used for bin-by-bin searching. `binblocks' is a 655 one-word bitvector recording whether groups of BINBLOCKWIDTH bins 656 have any (possibly) non-empty bins, so they can be skipped over 657 all at once during during traversals. The bits are NOT always 658 cleared as soon as all bins in a block are empty, but instead only 659 when all are noticed to be empty during traversal in malloc. 660*/ 661 662#define BINBLOCKWIDTH 4 /* bins per block */ 663 664#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */ 665#define binblocks_w (av_[1]) 666 667/* bin<->block macros */ 668 669#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH)) 670#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii))) 671#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii)))) 672 673/* Other static bookkeeping data */ 674 675/* variables holding tunable values */ 676 677static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD; 678static unsigned long top_pad = DEFAULT_TOP_PAD; 679static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX; 680static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD; 681 682/* The first value returned from sbrk */ 683static char* sbrk_base = (char*)(-1); 684 685/* The maximum memory obtained from system via sbrk */ 686static unsigned long max_sbrked_mem = 0; 687 688/* The maximum via either sbrk or mmap */ 689static unsigned long max_total_mem = 0; 690 691/* internal working copy of mallinfo */ 692static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 693 694/* The total memory obtained from system via sbrk */ 695#define sbrked_mem (current_mallinfo.arena) 696 697/* Tracking mmaps */ 698 699#ifdef DEBUG 700static unsigned int n_mmaps = 0; 701#endif /* DEBUG */ 702static unsigned long mmapped_mem = 0; 703#if HAVE_MMAP 704static unsigned int max_n_mmaps = 0; 705static unsigned long max_mmapped_mem = 0; 706#endif 707 708#ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT 709static void malloc_init(void) 710{ 711 int i, j; 712 713 debug("bins (av_ array) are at %p\n", (void *)av_); 714 715 av_[0] = NULL; av_[1] = NULL; 716 for (i = 2, j = 2; i < NAV * 2 + 2; i += 2, j++) { 717 av_[i] = bin_at(j - 2); 718 av_[i + 1] = bin_at(j - 2); 719 720 /* Just print the first few bins so that 721 * we can see there are alright. 722 */ 723 if (i < 10) 724 debug("av_[%d]=%lx av_[%d]=%lx\n", 725 i, (ulong)av_[i], 726 i + 1, (ulong)av_[i + 1]); 727 } 728 729 /* Init the static bookkeeping as well */ 730 sbrk_base = (char *)(-1); 731 max_sbrked_mem = 0; 732 max_total_mem = 0; 733#ifdef DEBUG 734 memset((void *)&current_mallinfo, 0, sizeof(struct mallinfo)); 735#endif 736} 737#endif 738 739/* 740 Debugging support 741*/ 742 743#ifdef DEBUG 744 745/* 746 These routines make a number of assertions about the states 747 of data structures that should be true at all times. If any 748 are not true, it's very likely that a user program has somehow 749 trashed memory. (It's also possible that there is a coding error 750 in malloc. In which case, please report it!) 751*/ 752 753#if __STD_C 754static void do_check_chunk(mchunkptr p) 755#else 756static void do_check_chunk(p) mchunkptr p; 757#endif 758{ 759 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; 760 761 /* No checkable chunk is mmapped */ 762 assert(!chunk_is_mmapped(p)); 763 764 /* Check for legal address ... */ 765 assert((char*)p >= sbrk_base); 766 if (p != top) 767 assert((char*)p + sz <= (char*)top); 768 else 769 assert((char*)p + sz <= sbrk_base + sbrked_mem); 770 771} 772 773#if __STD_C 774static void do_check_free_chunk(mchunkptr p) 775#else 776static void do_check_free_chunk(p) mchunkptr p; 777#endif 778{ 779 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; 780 mchunkptr next = chunk_at_offset(p, sz); 781 782 do_check_chunk(p); 783 784 /* Check whether it claims to be free ... */ 785 assert(!inuse(p)); 786 787 /* Unless a special marker, must have OK fields */ 788 if ((long)sz >= (long)MINSIZE) 789 { 790 assert((sz & MALLOC_ALIGN_MASK) == 0); 791 assert(aligned_OK(chunk2mem(p))); 792 /* ... matching footer field */ 793 assert(next->prev_size == sz); 794 /* ... and is fully consolidated */ 795 assert(prev_inuse(p)); 796 assert (next == top || inuse(next)); 797 798 /* ... and has minimally sane links */ 799 assert(p->fd->bk == p); 800 assert(p->bk->fd == p); 801 } 802 else /* markers are always of size SIZE_SZ */ 803 assert(sz == SIZE_SZ); 804} 805 806#if __STD_C 807static void do_check_inuse_chunk(mchunkptr p) 808#else 809static void do_check_inuse_chunk(p) mchunkptr p; 810#endif 811{ 812 mchunkptr next = next_chunk(p); 813 do_check_chunk(p); 814 815 /* Check whether it claims to be in use ... */ 816 assert(inuse(p)); 817 818 /* ... and is surrounded by OK chunks. 819 Since more things can be checked with free chunks than inuse ones, 820 if an inuse chunk borders them and debug is on, it's worth doing them. 821 */ 822 if (!prev_inuse(p)) 823 { 824 mchunkptr prv = prev_chunk(p); 825 assert(next_chunk(prv) == p); 826 do_check_free_chunk(prv); 827 } 828 if (next == top) 829 { 830 assert(prev_inuse(next)); 831 assert(chunksize(next) >= MINSIZE); 832 } 833 else if (!inuse(next)) 834 do_check_free_chunk(next); 835 836} 837 838#if __STD_C 839static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) 840#else 841static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; 842#endif 843{ 844 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; 845 long room = sz - s; 846 847 do_check_inuse_chunk(p); 848 849 /* Legal size ... */ 850 assert((long)sz >= (long)MINSIZE); 851 assert((sz & MALLOC_ALIGN_MASK) == 0); 852 assert(room >= 0); 853 assert(room < (long)MINSIZE); 854 855 /* ... and alignment */ 856 assert(aligned_OK(chunk2mem(p))); 857 858 /* ... and was allocated at front of an available chunk */ 859 assert(prev_inuse(p)); 860 861} 862 863#define check_free_chunk(P) do_check_free_chunk(P) 864#define check_inuse_chunk(P) do_check_inuse_chunk(P) 865#define check_chunk(P) do_check_chunk(P) 866#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) 867#else 868#define check_free_chunk(P) 869#define check_inuse_chunk(P) 870#define check_chunk(P) 871#define check_malloced_chunk(P,N) 872#endif 873 874/* 875 Macro-based internal utilities 876*/ 877 878/* 879 Linking chunks in bin lists. 880 Call these only with variables, not arbitrary expressions, as arguments. 881*/ 882 883/* 884 Place chunk p of size s in its bin, in size order, 885 putting it ahead of others of same size. 886*/ 887 888#define frontlink(P, S, IDX, BK, FD) \ 889{ \ 890 if (S < MAX_SMALLBIN_SIZE) \ 891 { \ 892 IDX = smallbin_index(S); \ 893 mark_binblock(IDX); \ 894 BK = bin_at(IDX); \ 895 FD = BK->fd; \ 896 P->bk = BK; \ 897 P->fd = FD; \ 898 FD->bk = BK->fd = P; \ 899 } \ 900 else \ 901 { \ 902 IDX = bin_index(S); \ 903 BK = bin_at(IDX); \ 904 FD = BK->fd; \ 905 if (FD == BK) mark_binblock(IDX); \ 906 else \ 907 { \ 908 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ 909 BK = FD->bk; \ 910 } \ 911 P->bk = BK; \ 912 P->fd = FD; \ 913 FD->bk = BK->fd = P; \ 914 } \ 915} 916 917/* take a chunk off a list */ 918 919#define unlink(P, BK, FD) \ 920{ \ 921 BK = P->bk; \ 922 FD = P->fd; \ 923 FD->bk = BK; \ 924 BK->fd = FD; \ 925} \ 926 927/* Place p as the last remainder */ 928 929#define link_last_remainder(P) \ 930{ \ 931 last_remainder->fd = last_remainder->bk = P; \ 932 P->fd = P->bk = last_remainder; \ 933} 934 935/* Clear the last_remainder bin */ 936 937#define clear_last_remainder \ 938 (last_remainder->fd = last_remainder->bk = last_remainder) 939 940/* Routines dealing with mmap(). */ 941 942#if HAVE_MMAP 943 944#if __STD_C 945static mchunkptr mmap_chunk(size_t size) 946#else 947static mchunkptr mmap_chunk(size) size_t size; 948#endif 949{ 950 size_t page_mask = malloc_getpagesize - 1; 951 mchunkptr p; 952 953#ifndef MAP_ANONYMOUS 954 static int fd = -1; 955#endif 956 957 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ 958 959 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because 960 * there is no following chunk whose prev_size field could be used. 961 */ 962 size = (size + SIZE_SZ + page_mask) & ~page_mask; 963 964#ifdef MAP_ANONYMOUS 965 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, 966 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 967#else /* !MAP_ANONYMOUS */ 968 if (fd < 0) 969 { 970 fd = open("/dev/zero", O_RDWR); 971 if(fd < 0) return 0; 972 } 973 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); 974#endif 975 976 if(p == (mchunkptr)-1) return 0; 977 978 n_mmaps++; 979 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; 980 981 /* We demand that eight bytes into a page must be 8-byte aligned. */ 982 assert(aligned_OK(chunk2mem(p))); 983 984 /* The offset to the start of the mmapped region is stored 985 * in the prev_size field of the chunk; normally it is zero, 986 * but that can be changed in memalign(). 987 */ 988 p->prev_size = 0; 989 set_head(p, size|IS_MMAPPED); 990 991 mmapped_mem += size; 992 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) 993 max_mmapped_mem = mmapped_mem; 994 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) 995 max_total_mem = mmapped_mem + sbrked_mem; 996 return p; 997} 998 999#if __STD_C 1000static void munmap_chunk(mchunkptr p) 1001#else 1002static void munmap_chunk(p) mchunkptr p; 1003#endif 1004{ 1005 INTERNAL_SIZE_T size = chunksize(p); 1006 int ret; 1007 1008 assert (chunk_is_mmapped(p)); 1009 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); 1010 assert((n_mmaps > 0)); 1011 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); 1012 1013 n_mmaps--; 1014 mmapped_mem -= (size + p->prev_size); 1015 1016 ret = munmap((char *)p - p->prev_size, size + p->prev_size); 1017 1018 /* munmap returns non-zero on failure */ 1019 assert(ret == 0); 1020} 1021 1022#if HAVE_MREMAP 1023 1024#if __STD_C 1025static mchunkptr mremap_chunk(mchunkptr p, size_t new_size) 1026#else 1027static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size; 1028#endif 1029{ 1030 size_t page_mask = malloc_getpagesize - 1; 1031 INTERNAL_SIZE_T offset = p->prev_size; 1032 INTERNAL_SIZE_T size = chunksize(p); 1033 char *cp; 1034 1035 assert (chunk_is_mmapped(p)); 1036 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); 1037 assert((n_mmaps > 0)); 1038 assert(((size + offset) & (malloc_getpagesize-1)) == 0); 1039 1040 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ 1041 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; 1042 1043 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1); 1044 1045 if (cp == (char *)-1) return 0; 1046 1047 p = (mchunkptr)(cp + offset); 1048 1049 assert(aligned_OK(chunk2mem(p))); 1050 1051 assert((p->prev_size == offset)); 1052 set_head(p, (new_size - offset)|IS_MMAPPED); 1053 1054 mmapped_mem -= size + offset; 1055 mmapped_mem += new_size; 1056 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) 1057 max_mmapped_mem = mmapped_mem; 1058 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) 1059 max_total_mem = mmapped_mem + sbrked_mem; 1060 return p; 1061} 1062 1063#endif /* HAVE_MREMAP */ 1064 1065#endif /* HAVE_MMAP */ 1066 1067/* 1068 Extend the top-most chunk by obtaining memory from system. 1069 Main interface to sbrk (but see also malloc_trim). 1070*/ 1071 1072#if __STD_C 1073static void malloc_extend_top(INTERNAL_SIZE_T nb) 1074#else 1075static void malloc_extend_top(nb) INTERNAL_SIZE_T nb; 1076#endif 1077{ 1078 char* brk; /* return value from sbrk */ 1079 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */ 1080 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */ 1081 char* new_brk; /* return of 2nd sbrk call */ 1082 INTERNAL_SIZE_T top_size; /* new size of top chunk */ 1083 1084 mchunkptr old_top = top; /* Record state of old top */ 1085 INTERNAL_SIZE_T old_top_size = chunksize(old_top); 1086 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size)); 1087 1088 /* Pad request with top_pad plus minimal overhead */ 1089 1090 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; 1091 unsigned long pagesz = malloc_getpagesize; 1092 1093 /* If not the first time through, round to preserve page boundary */ 1094 /* Otherwise, we need to correct to a page size below anyway. */ 1095 /* (We also correct below if an intervening foreign sbrk call.) */ 1096 1097 if (sbrk_base != (char*)(-1)) 1098 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1); 1099 1100 brk = (char*)(MORECORE (sbrk_size)); 1101 1102 /* Fail if sbrk failed or if a foreign sbrk call killed our space */ 1103 if (brk == (char*)(MORECORE_FAILURE) || 1104 (brk < old_end && old_top != initial_top)) 1105 return; 1106 1107 sbrked_mem += sbrk_size; 1108 1109 if (brk == old_end) /* can just add bytes to current top */ 1110 { 1111 top_size = sbrk_size + old_top_size; 1112 set_head(top, top_size | PREV_INUSE); 1113 } 1114 else 1115 { 1116 if (sbrk_base == (char*)(-1)) /* First time through. Record base */ 1117 sbrk_base = brk; 1118 else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */ 1119 sbrked_mem += brk - (char*)old_end; 1120 1121 /* Guarantee alignment of first new chunk made from this space */ 1122 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK; 1123 if (front_misalign > 0) 1124 { 1125 correction = (MALLOC_ALIGNMENT) - front_misalign; 1126 brk += correction; 1127 } 1128 else 1129 correction = 0; 1130 1131 /* Guarantee the next brk will be at a page boundary */ 1132 1133 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) & 1134 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size)); 1135 1136 /* Allocate correction */ 1137 new_brk = (char*)(MORECORE (correction)); 1138 if (new_brk == (char*)(MORECORE_FAILURE)) return; 1139 1140 sbrked_mem += correction; 1141 1142 top = (mchunkptr)brk; 1143 top_size = new_brk - brk + correction; 1144 set_head(top, top_size | PREV_INUSE); 1145 1146 if (old_top != initial_top) 1147 { 1148 1149 /* There must have been an intervening foreign sbrk call. */ 1150 /* A double fencepost is necessary to prevent consolidation */ 1151 1152 /* If not enough space to do this, then user did something very wrong */ 1153 if (old_top_size < MINSIZE) 1154 { 1155 set_head(top, PREV_INUSE); /* will force null return from malloc */ 1156 return; 1157 } 1158 1159 /* Also keep size a multiple of MALLOC_ALIGNMENT */ 1160 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; 1161 set_head_size(old_top, old_top_size); 1162 chunk_at_offset(old_top, old_top_size )->size = 1163 SIZE_SZ|PREV_INUSE; 1164 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size = 1165 SIZE_SZ|PREV_INUSE; 1166 /* If possible, release the rest. */ 1167 if (old_top_size >= MINSIZE) 1168 fREe(chunk2mem(old_top)); 1169 } 1170 } 1171 1172 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem) 1173 max_sbrked_mem = sbrked_mem; 1174 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) 1175 max_total_mem = mmapped_mem + sbrked_mem; 1176 1177 /* We always land on a page boundary */ 1178 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0); 1179} 1180 1181/* Main public routines */ 1182 1183/* 1184 Malloc Algorthim: 1185 1186 The requested size is first converted into a usable form, `nb'. 1187 This currently means to add 4 bytes overhead plus possibly more to 1188 obtain 8-byte alignment and/or to obtain a size of at least 1189 MINSIZE (currently 16 bytes), the smallest allocatable size. 1190 (All fits are considered `exact' if they are within MINSIZE bytes.) 1191 1192 From there, the first successful of the following steps is taken: 1193 1194 1. The bin corresponding to the request size is scanned, and if 1195 a chunk of exactly the right size is found, it is taken. 1196 1197 2. The most recently remaindered chunk is used if it is big 1198 enough. This is a form of (roving) first fit, used only in 1199 the absence of exact fits. Runs of consecutive requests use 1200 the remainder of the chunk used for the previous such request 1201 whenever possible. This limited use of a first-fit style 1202 allocation strategy tends to give contiguous chunks 1203 coextensive lifetimes, which improves locality and can reduce 1204 fragmentation in the long run. 1205 1206 3. Other bins are scanned in increasing size order, using a 1207 chunk big enough to fulfill the request, and splitting off 1208 any remainder. This search is strictly by best-fit; i.e., 1209 the smallest (with ties going to approximately the least 1210 recently used) chunk that fits is selected. 1211 1212 4. If large enough, the chunk bordering the end of memory 1213 (`top') is split off. (This use of `top' is in accord with 1214 the best-fit search rule. In effect, `top' is treated as 1215 larger (and thus less well fitting) than any other available 1216 chunk since it can be extended to be as large as necessary 1217 (up to system limitations). 1218 1219 5. If the request size meets the mmap threshold and the 1220 system supports mmap, and there are few enough currently 1221 allocated mmapped regions, and a call to mmap succeeds, 1222 the request is allocated via direct memory mapping. 1223 1224 6. Otherwise, the top of memory is extended by 1225 obtaining more space from the system (normally using sbrk, 1226 but definable to anything else via the MORECORE macro). 1227 Memory is gathered from the system (in system page-sized 1228 units) in a way that allows chunks obtained across different 1229 sbrk calls to be consolidated, but does not require 1230 contiguous memory. Thus, it should be safe to intersperse 1231 mallocs with other sbrk calls. 1232 1233 All allocations are made from the the `lowest' part of any found 1234 chunk. (The implementation invariant is that prev_inuse is 1235 always true of any allocated chunk; i.e., that each allocated 1236 chunk borders either a previously allocated and still in-use chunk, 1237 or the base of its memory arena.) 1238 1239*/ 1240 1241STATIC_IF_MCHECK 1242#if __STD_C 1243Void_t* mALLOc_impl(size_t bytes) 1244#else 1245Void_t* mALLOc_impl(bytes) size_t bytes; 1246#endif 1247{ 1248 mchunkptr victim; /* inspected/selected chunk */ 1249 INTERNAL_SIZE_T victim_size; /* its size */ 1250 int idx; /* index for bin traversal */ 1251 mbinptr bin; /* associated bin */ 1252 mchunkptr remainder; /* remainder from a split */ 1253 long remainder_size; /* its size */ 1254 int remainder_index; /* its bin index */ 1255 unsigned long block; /* block traverser bit */ 1256 int startidx; /* first bin of a traversed block */ 1257 mchunkptr fwd; /* misc temp for linking */ 1258 mchunkptr bck; /* misc temp for linking */ 1259 mbinptr q; /* misc temp */ 1260 1261 INTERNAL_SIZE_T nb; 1262 1263#if CONFIG_IS_ENABLED(SYS_MALLOC_F) 1264 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) 1265 return malloc_simple(bytes); 1266#endif 1267 1268 if (CONFIG_IS_ENABLED(UNIT_TEST) && malloc_testing) { 1269 if (--malloc_max_allocs < 0) 1270 return NULL; 1271 } 1272 1273 /* check if mem_malloc_init() was run */ 1274 if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) { 1275 /* not initialized yet */ 1276 return NULL; 1277 } 1278 1279 if (bytes > CONFIG_SYS_MALLOC_LEN || (long)bytes < 0) 1280 return NULL; 1281 1282 nb = request2size(bytes); /* padded request size; */ 1283 1284 /* Check for exact match in a bin */ 1285 1286 if (is_small_request(nb)) /* Faster version for small requests */ 1287 { 1288 idx = smallbin_index(nb); 1289 1290 /* No traversal or size check necessary for small bins. */ 1291 1292 q = bin_at(idx); 1293 victim = last(q); 1294 1295 /* Also scan the next one, since it would have a remainder < MINSIZE */ 1296 if (victim == q) 1297 { 1298 q = next_bin(q); 1299 victim = last(q); 1300 } 1301 if (victim != q) 1302 { 1303 victim_size = chunksize(victim); 1304 unlink(victim, bck, fwd); 1305 set_inuse_bit_at_offset(victim, victim_size); 1306 check_malloced_chunk(victim, nb); 1307 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false); 1308 return chunk2mem(victim); 1309 } 1310 1311 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */ 1312 1313 } 1314 else 1315 { 1316 idx = bin_index(nb); 1317 bin = bin_at(idx); 1318 1319 for (victim = last(bin); victim != bin; victim = victim->bk) 1320 { 1321 victim_size = chunksize(victim); 1322 remainder_size = victim_size - nb; 1323 1324 if (remainder_size >= (long)MINSIZE) /* too big */ 1325 { 1326 --idx; /* adjust to rescan below after checking last remainder */ 1327 break; 1328 } 1329 1330 else if (remainder_size >= 0) /* exact fit */ 1331 { 1332 unlink(victim, bck, fwd); 1333 set_inuse_bit_at_offset(victim, victim_size); 1334 check_malloced_chunk(victim, nb); 1335 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false); 1336 return chunk2mem(victim); 1337 } 1338 } 1339 1340 ++idx; 1341 1342 } 1343 1344 /* Try to use the last split-off remainder */ 1345 1346 if ( (victim = last_remainder->fd) != last_remainder) 1347 { 1348 victim_size = chunksize(victim); 1349 remainder_size = victim_size - nb; 1350 1351 if (remainder_size >= (long)MINSIZE) /* re-split */ 1352 { 1353 remainder = chunk_at_offset(victim, nb); 1354 set_head(victim, nb | PREV_INUSE); 1355 link_last_remainder(remainder); 1356 set_head(remainder, remainder_size | PREV_INUSE); 1357 set_foot(remainder, remainder_size); 1358 check_malloced_chunk(victim, nb); 1359 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false); 1360 return chunk2mem(victim); 1361 } 1362 1363 clear_last_remainder; 1364 1365 if (remainder_size >= 0) /* exhaust */ 1366 { 1367 set_inuse_bit_at_offset(victim, victim_size); 1368 check_malloced_chunk(victim, nb); 1369 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false); 1370 return chunk2mem(victim); 1371 } 1372 1373 /* Else place in bin */ 1374 1375 frontlink(victim, victim_size, remainder_index, bck, fwd); 1376 } 1377 1378 /* 1379 If there are any possibly nonempty big-enough blocks, 1380 search for best fitting chunk by scanning bins in blockwidth units. 1381 */ 1382 1383 if ( (block = idx2binblock(idx)) <= binblocks_r) 1384 { 1385 1386 /* Get to the first marked block */ 1387 1388 if ( (block & binblocks_r) == 0) 1389 { 1390 /* force to an even block boundary */ 1391 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; 1392 block <<= 1; 1393 while ((block & binblocks_r) == 0) 1394 { 1395 idx += BINBLOCKWIDTH; 1396 block <<= 1; 1397 } 1398 } 1399 1400 /* For each possibly nonempty block ... */ 1401 for (;;) 1402 { 1403 startidx = idx; /* (track incomplete blocks) */ 1404 q = bin = bin_at(idx); 1405 1406 /* For each bin in this block ... */ 1407 do 1408 { 1409 /* Find and use first big enough chunk ... */ 1410 1411 for (victim = last(bin); victim != bin; victim = victim->bk) 1412 { 1413 victim_size = chunksize(victim); 1414 remainder_size = victim_size - nb; 1415 1416 if (remainder_size >= (long)MINSIZE) /* split */ 1417 { 1418 remainder = chunk_at_offset(victim, nb); 1419 set_head(victim, nb | PREV_INUSE); 1420 unlink(victim, bck, fwd); 1421 link_last_remainder(remainder); 1422 set_head(remainder, remainder_size | PREV_INUSE); 1423 set_foot(remainder, remainder_size); 1424 check_malloced_chunk(victim, nb); 1425 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false); 1426 return chunk2mem(victim); 1427 } 1428 1429 else if (remainder_size >= 0) /* take */ 1430 { 1431 set_inuse_bit_at_offset(victim, victim_size); 1432 unlink(victim, bck, fwd); 1433 check_malloced_chunk(victim, nb); 1434 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false); 1435 return chunk2mem(victim); 1436 } 1437 1438 } 1439 1440 bin = next_bin(bin); 1441 1442 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0); 1443 1444 /* Clear out the block bit. */ 1445 1446 do /* Possibly backtrack to try to clear a partial block */ 1447 { 1448 if ((startidx & (BINBLOCKWIDTH - 1)) == 0) 1449 { 1450 av_[1] = (mbinptr)(binblocks_r & ~block); 1451 break; 1452 } 1453 --startidx; 1454 q = prev_bin(q); 1455 } while (first(q) == q); 1456 1457 /* Get to the next possibly nonempty block */ 1458 1459 if ( (block <<= 1) <= binblocks_r && (block != 0) ) 1460 { 1461 while ((block & binblocks_r) == 0) 1462 { 1463 idx += BINBLOCKWIDTH; 1464 block <<= 1; 1465 } 1466 } 1467 else 1468 break; 1469 } 1470 } 1471 1472 /* Try to use top chunk */ 1473 1474 /* Require that there be a remainder, ensuring top always exists */ 1475 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) 1476 { 1477 1478#if HAVE_MMAP 1479 /* If big and would otherwise need to extend, try to use mmap instead */ 1480 if ((unsigned long)nb >= (unsigned long)mmap_threshold && 1481 (victim = mmap_chunk(nb))) 1482 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false); 1483 return chunk2mem(victim); 1484#endif 1485 1486 /* Try to extend */ 1487 malloc_extend_top(nb); 1488 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) 1489 return NULL; /* propagate failure */ 1490 } 1491 1492 victim = top; 1493 set_head(victim, nb | PREV_INUSE); 1494 top = chunk_at_offset(victim, nb); 1495 set_head(top, remainder_size | PREV_INUSE); 1496 check_malloced_chunk(victim, nb); 1497 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false); 1498 return chunk2mem(victim); 1499 1500} 1501 1502/* 1503 1504 free() algorithm : 1505 1506 cases: 1507 1508 1. free(0) has no effect. 1509 1510 2. If the chunk was allocated via mmap, it is release via munmap(). 1511 1512 3. If a returned chunk borders the current high end of memory, 1513 it is consolidated into the top, and if the total unused 1514 topmost memory exceeds the trim threshold, malloc_trim is 1515 called. 1516 1517 4. Other chunks are consolidated as they arrive, and 1518 placed in corresponding bins. (This includes the case of 1519 consolidating with the current `last_remainder'). 1520 1521*/ 1522 1523STATIC_IF_MCHECK 1524#if __STD_C 1525void fREe_impl(Void_t* mem) 1526#else 1527void fREe_impl(mem) Void_t* mem; 1528#endif 1529{ 1530 mchunkptr p; /* chunk corresponding to mem */ 1531 INTERNAL_SIZE_T hd; /* its head field */ 1532 INTERNAL_SIZE_T sz; /* its size */ 1533 int idx; /* its bin index */ 1534 mchunkptr next; /* next contiguous chunk */ 1535 INTERNAL_SIZE_T nextsz; /* its size */ 1536 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ 1537 mchunkptr bck; /* misc temp for linking */ 1538 mchunkptr fwd; /* misc temp for linking */ 1539 int islr; /* track whether merging with last_remainder */ 1540 1541#if CONFIG_IS_ENABLED(SYS_MALLOC_F) 1542 /* free() is a no-op - all the memory will be freed on relocation */ 1543 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) { 1544 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ); 1545 return; 1546 } 1547#endif 1548 1549 if (mem == NULL) /* free(0) has no effect */ 1550 return; 1551 1552 p = mem2chunk(mem); 1553 hd = p->size; 1554 1555#if HAVE_MMAP 1556 if (hd & IS_MMAPPED) /* release mmapped memory. */ 1557 { 1558 munmap_chunk(p); 1559 return; 1560 } 1561#endif 1562 1563 check_inuse_chunk(p); 1564 1565 sz = hd & ~PREV_INUSE; 1566 next = chunk_at_offset(p, sz); 1567 nextsz = chunksize(next); 1568 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ); 1569 1570 if (next == top) /* merge with top */ 1571 { 1572 sz += nextsz; 1573 1574 if (!(hd & PREV_INUSE)) /* consolidate backward */ 1575 { 1576 prevsz = p->prev_size; 1577 p = chunk_at_offset(p, -((long) prevsz)); 1578 sz += prevsz; 1579 unlink(p, bck, fwd); 1580 } 1581 1582 set_head(p, sz | PREV_INUSE); 1583 top = p; 1584 if ((unsigned long)(sz) >= (unsigned long)trim_threshold) 1585 malloc_trim(top_pad); 1586 return; 1587 } 1588 1589 set_head(next, nextsz); /* clear inuse bit */ 1590 1591 islr = 0; 1592 1593 if (!(hd & PREV_INUSE)) /* consolidate backward */ 1594 { 1595 prevsz = p->prev_size; 1596 p = chunk_at_offset(p, -((long) prevsz)); 1597 sz += prevsz; 1598 1599 if (p->fd == last_remainder) /* keep as last_remainder */ 1600 islr = 1; 1601 else 1602 unlink(p, bck, fwd); 1603 } 1604 1605 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */ 1606 { 1607 sz += nextsz; 1608 1609 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ 1610 { 1611 islr = 1; 1612 link_last_remainder(p); 1613 } 1614 else 1615 unlink(next, bck, fwd); 1616 } 1617 1618 set_head(p, sz | PREV_INUSE); 1619 set_foot(p, sz); 1620 if (!islr) 1621 frontlink(p, sz, idx, bck, fwd); 1622} 1623 1624/* 1625 1626 Realloc algorithm: 1627 1628 Chunks that were obtained via mmap cannot be extended or shrunk 1629 unless HAVE_MREMAP is defined, in which case mremap is used. 1630 Otherwise, if their reallocation is for additional space, they are 1631 copied. If for less, they are just left alone. 1632 1633 Otherwise, if the reallocation is for additional space, and the 1634 chunk can be extended, it is, else a malloc-copy-free sequence is 1635 taken. There are several different ways that a chunk could be 1636 extended. All are tried: 1637 1638 * Extending forward into following adjacent free chunk. 1639 * Shifting backwards, joining preceding adjacent space 1640 * Both shifting backwards and extending forward. 1641 * Extending into newly sbrked space 1642 1643 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a 1644 size argument of zero (re)allocates a minimum-sized chunk. 1645 1646 If the reallocation is for less space, and the new request is for 1647 a `small' (<512 bytes) size, then the newly unused space is lopped 1648 off and freed. 1649 1650 The old unix realloc convention of allowing the last-free'd chunk 1651 to be used as an argument to realloc is no longer supported. 1652 I don't know of any programs still relying on this feature, 1653 and allowing it would also allow too many other incorrect 1654 usages of realloc to be sensible. 1655 1656*/ 1657 1658STATIC_IF_MCHECK 1659#if __STD_C 1660Void_t* rEALLOc_impl(Void_t* oldmem, size_t bytes) 1661#else 1662Void_t* rEALLOc_impl(oldmem, bytes) Void_t* oldmem; size_t bytes; 1663#endif 1664{ 1665 INTERNAL_SIZE_T nb; /* padded request size */ 1666 1667 mchunkptr oldp; /* chunk corresponding to oldmem */ 1668 INTERNAL_SIZE_T oldsize; /* its size */ 1669 1670 mchunkptr newp; /* chunk to return */ 1671 INTERNAL_SIZE_T newsize; /* its size */ 1672 Void_t* newmem; /* corresponding user mem */ 1673 1674 mchunkptr next; /* next contiguous chunk after oldp */ 1675 INTERNAL_SIZE_T nextsize; /* its size */ 1676 1677 mchunkptr prev; /* previous contiguous chunk before oldp */ 1678 INTERNAL_SIZE_T prevsize; /* its size */ 1679 1680 mchunkptr remainder; /* holds split off extra space from newp */ 1681 INTERNAL_SIZE_T remainder_size; /* its size */ 1682 1683 mchunkptr bck; /* misc temp for linking */ 1684 mchunkptr fwd; /* misc temp for linking */ 1685 1686#ifdef REALLOC_ZERO_BYTES_FREES 1687 if (!bytes) { 1688 fREe_impl(oldmem); 1689 return NULL; 1690 } 1691#endif 1692 1693 if (bytes > CONFIG_SYS_MALLOC_LEN || (long)bytes < 0) 1694 return NULL; 1695 1696 /* realloc of null is supposed to be same as malloc */ 1697 if (oldmem == NULL) return mALLOc_impl(bytes); 1698 1699#if CONFIG_IS_ENABLED(SYS_MALLOC_F) 1700 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) { 1701 /* This is harder to support and should not be needed */ 1702 panic("pre-reloc realloc() is not supported"); 1703 } 1704#endif 1705 if (CONFIG_IS_ENABLED(UNIT_TEST) && malloc_testing) { 1706 if (--malloc_max_allocs < 0) 1707 return NULL; 1708 } 1709 1710 newp = oldp = mem2chunk(oldmem); 1711 newsize = oldsize = chunksize(oldp); 1712 1713 nb = request2size(bytes); 1714 1715#if HAVE_MMAP 1716 if (chunk_is_mmapped(oldp)) 1717 { 1718#if HAVE_MREMAP 1719 newp = mremap_chunk(oldp, nb); 1720 if(newp) return chunk2mem(newp); 1721#endif 1722 /* Note the extra SIZE_SZ overhead. */ 1723 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */ 1724 /* Must alloc, copy, free. */ 1725 newmem = mALLOc_impl(bytes); 1726 if (!newmem) 1727 return NULL; /* propagate failure */ 1728 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); 1729 munmap_chunk(oldp); 1730 return newmem; 1731 } 1732#endif 1733 1734 check_inuse_chunk(oldp); 1735 1736 if ((long)(oldsize) < (long)(nb)) 1737 { 1738 1739 /* Try expanding forward */ 1740 1741 next = chunk_at_offset(oldp, oldsize); 1742 if (next == top || !inuse(next)) 1743 { 1744 nextsize = chunksize(next); 1745 1746 /* Forward into top only if a remainder */ 1747 if (next == top) 1748 { 1749 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) 1750 { 1751 newsize += nextsize; 1752 top = chunk_at_offset(oldp, nb); 1753 set_head(top, (newsize - nb) | PREV_INUSE); 1754 set_head_size(oldp, nb); 1755 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ); 1756 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes); 1757 return chunk2mem(oldp); 1758 } 1759 } 1760 1761 /* Forward into next chunk */ 1762 else if (((long)(nextsize + newsize) >= (long)(nb))) 1763 { 1764 unlink(next, bck, fwd); 1765 newsize += nextsize; 1766 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ); 1767 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes); 1768 goto split; 1769 } 1770 } 1771 else 1772 { 1773 next = NULL; 1774 nextsize = 0; 1775 } 1776 1777 /* Try shifting backwards. */ 1778 1779 if (!prev_inuse(oldp)) 1780 { 1781 prev = prev_chunk(oldp); 1782 prevsize = chunksize(prev); 1783 1784 /* try forward + backward first to save a later consolidation */ 1785 1786 if (next != NULL) 1787 { 1788 /* into top */ 1789 if (next == top) 1790 { 1791 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) 1792 { 1793 unlink(prev, bck, fwd); 1794 newp = prev; 1795 newsize += prevsize + nextsize; 1796 newmem = chunk2mem(newp); 1797 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false); 1798 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); 1799 top = chunk_at_offset(newp, nb); 1800 set_head(top, (newsize - nb) | PREV_INUSE); 1801 set_head_size(newp, nb); 1802 VALGRIND_FREELIKE_BLOCK(oldmem, SIZE_SZ); 1803 return newmem; 1804 } 1805 } 1806 1807 /* into next chunk */ 1808 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb))) 1809 { 1810 unlink(next, bck, fwd); 1811 unlink(prev, bck, fwd); 1812 newp = prev; 1813 newsize += nextsize + prevsize; 1814 newmem = chunk2mem(newp); 1815 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false); 1816 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); 1817 goto split; 1818 } 1819 } 1820 1821 /* backward only */ 1822 if (prev != NULL && (long)(prevsize + newsize) >= (long)nb) 1823 { 1824 unlink(prev, bck, fwd); 1825 newp = prev; 1826 newsize += prevsize; 1827 newmem = chunk2mem(newp); 1828 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false); 1829 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); 1830 goto split; 1831 } 1832 } 1833 1834 /* Must allocate */ 1835 1836 newmem = mALLOc_impl (bytes); 1837 1838 if (newmem == NULL) /* propagate failure */ 1839 return NULL; 1840 1841 /* Avoid copy if newp is next chunk after oldp. */ 1842 /* (This can only happen when new chunk is sbrk'ed.) */ 1843 1844 if ( (newp = mem2chunk(newmem)) == next_chunk(oldp)) 1845 { 1846 newsize += chunksize(newp); 1847 newp = oldp; 1848 goto split; 1849 } 1850 1851 /* Otherwise copy, free, and exit */ 1852 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); 1853 fREe_impl(oldmem); 1854 return newmem; 1855 } else { 1856 VALGRIND_RESIZEINPLACE_BLOCK(oldmem, 0, bytes, SIZE_SZ); 1857 VALGRIND_MAKE_MEM_DEFINED(oldmem, bytes); 1858 } 1859 1860 split: /* split off extra room in old or expanded chunk */ 1861 1862 if (newsize - nb >= MINSIZE) /* split off remainder */ 1863 { 1864 remainder = chunk_at_offset(newp, nb); 1865 remainder_size = newsize - nb; 1866 set_head_size(newp, nb); 1867 set_head(remainder, remainder_size | PREV_INUSE); 1868 set_inuse_bit_at_offset(remainder, remainder_size); 1869 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ, 1870 false); 1871 fREe_impl(chunk2mem(remainder)); /* let free() deal with it */ 1872 } 1873 else 1874 { 1875 set_head_size(newp, newsize); 1876 set_inuse_bit_at_offset(newp, newsize); 1877 } 1878 1879 check_inuse_chunk(newp); 1880 return chunk2mem(newp); 1881} 1882 1883/* 1884 1885 memalign algorithm: 1886 1887 memalign requests more than enough space from malloc, finds a spot 1888 within that chunk that meets the alignment request, and then 1889 possibly frees the leading and trailing space. 1890 1891 The alignment argument must be a power of two. This property is not 1892 checked by memalign, so misuse may result in random runtime errors. 1893 1894 8-byte alignment is guaranteed by normal malloc calls, so don't 1895 bother calling memalign with an argument of 8 or less. 1896 1897 Overreliance on memalign is a sure way to fragment space. 1898 1899*/ 1900 1901STATIC_IF_MCHECK 1902#if __STD_C 1903Void_t* mEMALIGn_impl(size_t alignment, size_t bytes) 1904#else 1905Void_t* mEMALIGn_impl(alignment, bytes) size_t alignment; size_t bytes; 1906#endif 1907{ 1908 INTERNAL_SIZE_T nb; /* padded request size */ 1909 char* m; /* memory returned by malloc call */ 1910 mchunkptr p; /* corresponding chunk */ 1911 char* brk; /* alignment point within p */ 1912 mchunkptr newp; /* chunk to return */ 1913 INTERNAL_SIZE_T newsize; /* its size */ 1914 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */ 1915 mchunkptr remainder; /* spare room at end to split off */ 1916 long remainder_size; /* its size */ 1917 1918 if (bytes > CONFIG_SYS_MALLOC_LEN || (long)bytes < 0) 1919 return NULL; 1920 1921#if CONFIG_IS_ENABLED(SYS_MALLOC_F) 1922 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) { 1923 return memalign_simple(alignment, bytes); 1924 } 1925#endif 1926 1927 /* If need less alignment than we give anyway, just relay to malloc */ 1928 1929 if (alignment <= MALLOC_ALIGNMENT) return mALLOc_impl(bytes); 1930 1931 /* Otherwise, ensure that it is at least a minimum chunk size */ 1932 1933 if (alignment < MINSIZE) alignment = MINSIZE; 1934 1935 /* Call malloc with worst case padding to hit alignment. */ 1936 1937 nb = request2size(bytes); 1938 m = (char*)(mALLOc_impl(nb + alignment + MINSIZE)); 1939 1940 /* 1941 * The attempt to over-allocate (with a size large enough to guarantee the 1942 * ability to find an aligned region within allocated memory) failed. 1943 * 1944 * Try again, this time only allocating exactly the size the user wants. If 1945 * the allocation now succeeds and just happens to be aligned, we can still 1946 * fulfill the user's request. 1947 */ 1948 if (m == NULL) { 1949 size_t extra, extra2; 1950 /* 1951 * Use bytes not nb, since mALLOc internally calls request2size too, and 1952 * each call increases the size to allocate, to account for the header. 1953 */ 1954 m = (char*)(mALLOc_impl(bytes)); 1955 /* Aligned -> return it */ 1956 if ((((unsigned long)(m)) % alignment) == 0) 1957 return m; 1958 /* 1959 * Otherwise, try again, requesting enough extra space to be able to 1960 * acquire alignment. 1961 */ 1962 fREe_impl(m); 1963 /* Add in extra bytes to match misalignment of unexpanded allocation */ 1964 extra = alignment - (((unsigned long)(m)) % alignment); 1965 m = (char*)(mALLOc_impl(bytes + extra)); 1966 /* 1967 * m might not be the same as before. Validate that the previous value of 1968 * extra still works for the current value of m. 1969 * If (!m), extra2=alignment so 1970 */ 1971 if (m) { 1972 extra2 = alignment - (((unsigned long)(m)) % alignment); 1973 if (extra2 > extra) { 1974 fREe_impl(m); 1975 m = NULL; 1976 } 1977 } 1978 /* Fall through to original NULL check and chunk splitting logic */ 1979 } 1980 1981 if (m == NULL) return NULL; /* propagate failure */ 1982 1983 p = mem2chunk(m); 1984 1985 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */ 1986 { 1987#if HAVE_MMAP 1988 if(chunk_is_mmapped(p)) 1989 return chunk2mem(p); /* nothing more to do */ 1990#endif 1991 } 1992 else /* misaligned */ 1993 { 1994 /* 1995 Find an aligned spot inside chunk. 1996 Since we need to give back leading space in a chunk of at 1997 least MINSIZE, if the first calculation places us at 1998 a spot with less than MINSIZE leader, we can move to the 1999 next aligned spot -- we've allocated enough total room so that 2000 this is always possible. 2001 */ 2002 2003 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment)); 2004 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment; 2005 2006 newp = (mchunkptr)brk; 2007 leadsize = brk - (char*)(p); 2008 newsize = chunksize(p) - leadsize; 2009 2010#if HAVE_MMAP 2011 if(chunk_is_mmapped(p)) 2012 { 2013 newp->prev_size = p->prev_size + leadsize; 2014 set_head(newp, newsize|IS_MMAPPED); 2015 return chunk2mem(newp); 2016 } 2017#endif 2018 2019 /* give back leader, use the rest */ 2020 2021 set_head(newp, newsize | PREV_INUSE); 2022 set_inuse_bit_at_offset(newp, newsize); 2023 set_head_size(p, leadsize); 2024 fREe_impl(chunk2mem(p)); 2025 p = newp; 2026 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(p), bytes, SIZE_SZ, false); 2027 2028 assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0); 2029 } 2030 2031 /* Also give back spare room at the end */ 2032 2033 remainder_size = chunksize(p) - nb; 2034 2035 if (remainder_size >= (long)MINSIZE) 2036 { 2037 remainder = chunk_at_offset(p, nb); 2038 set_head(remainder, remainder_size | PREV_INUSE); 2039 set_head_size(p, nb); 2040 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ, 2041 false); 2042 fREe_impl(chunk2mem(remainder)); 2043 } 2044 2045 check_inuse_chunk(p); 2046 return chunk2mem(p); 2047 2048} 2049 2050/* 2051 valloc just invokes memalign with alignment argument equal 2052 to the page size of the system (or as near to this as can 2053 be figured out from all the includes/defines above.) 2054*/ 2055 2056#if __STD_C 2057Void_t* vALLOc(size_t bytes) 2058#else 2059Void_t* vALLOc(bytes) size_t bytes; 2060#endif 2061{ 2062 return mEMALIGn (malloc_getpagesize, bytes); 2063} 2064 2065/* 2066 pvalloc just invokes valloc for the nearest pagesize 2067 that will accommodate request 2068*/ 2069 2070#if __STD_C 2071Void_t* pvALLOc(size_t bytes) 2072#else 2073Void_t* pvALLOc(bytes) size_t bytes; 2074#endif 2075{ 2076 size_t pagesize = malloc_getpagesize; 2077 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1)); 2078} 2079 2080/* 2081 2082 calloc calls malloc, then zeroes out the allocated chunk. 2083 2084*/ 2085 2086STATIC_IF_MCHECK 2087#if __STD_C 2088Void_t* cALLOc_impl(size_t n, size_t elem_size) 2089#else 2090Void_t* cALLOc_impl(n, elem_size) size_t n; size_t elem_size; 2091#endif 2092{ 2093 mchunkptr p; 2094 INTERNAL_SIZE_T csz; 2095 2096 INTERNAL_SIZE_T sz = n * elem_size; 2097 2098 /* check if expand_top called, in which case don't need to clear */ 2099#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT) 2100#if MORECORE_CLEARS 2101 mchunkptr oldtop = top; 2102 INTERNAL_SIZE_T oldtopsize = chunksize(top); 2103#endif 2104#endif 2105 Void_t* mem = mALLOc_impl (sz); 2106 2107 if ((long)n < 0) return NULL; 2108 2109 if (mem == NULL) 2110 return NULL; 2111 else 2112 { 2113#if CONFIG_IS_ENABLED(SYS_MALLOC_F) 2114 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) { 2115 memset(mem, 0, sz); 2116 return mem; 2117 } 2118#endif 2119 p = mem2chunk(mem); 2120 2121 /* Two optional cases in which clearing not necessary */ 2122 2123#if HAVE_MMAP 2124 if (chunk_is_mmapped(p)) return mem; 2125#endif 2126 2127 csz = chunksize(p); 2128 2129#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT) 2130#if MORECORE_CLEARS 2131 if (p == oldtop && csz > oldtopsize) 2132 { 2133 /* clear only the bytes from non-freshly-sbrked memory */ 2134 csz = oldtopsize; 2135 } 2136#endif 2137#endif 2138 2139 MALLOC_ZERO(mem, csz - SIZE_SZ); 2140 VALGRIND_MAKE_MEM_DEFINED(mem, sz); 2141 return mem; 2142 } 2143} 2144 2145/* 2146 2147 cfree just calls free. It is needed/defined on some systems 2148 that pair it with calloc, presumably for odd historical reasons. 2149 2150*/ 2151 2152#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__) 2153#if __STD_C 2154void cfree(Void_t *mem) 2155#else 2156void cfree(mem) Void_t *mem; 2157#endif 2158{ 2159 fREe(mem); 2160} 2161#endif 2162 2163#ifdef MCHECK_HEAP_PROTECTION 2164 #include "mcheck_core.inc.h" 2165 #if !__STD_C 2166 #error "must have __STD_C" 2167 #endif 2168 2169Void_t *mALLOc(size_t bytes) 2170{ 2171 mcheck_pedantic_prehook(); 2172 size_t fullsz = mcheck_alloc_prehook(bytes); 2173 void *p = mALLOc_impl(fullsz); 2174 2175 if (!p) 2176 return p; 2177 return mcheck_alloc_posthook(p, bytes); 2178} 2179 2180void fREe(Void_t *mem) { fREe_impl(mcheck_free_prehook(mem)); } 2181 2182Void_t *rEALLOc(Void_t *oldmem, size_t bytes) 2183{ 2184 mcheck_pedantic_prehook(); 2185 if (bytes == 0) { 2186 if (oldmem) 2187 fREe(oldmem); 2188 return NULL; 2189 } 2190 2191 if (oldmem == NULL) 2192 return mALLOc(bytes); 2193 2194 void *p = mcheck_reallocfree_prehook(oldmem); 2195 size_t newsz = mcheck_alloc_prehook(bytes); 2196 2197 p = rEALLOc_impl(p, newsz); 2198 if (!p) 2199 return p; 2200 return mcheck_alloc_noclean_posthook(p, bytes); 2201} 2202 2203Void_t *mEMALIGn(size_t alignment, size_t bytes) 2204{ 2205 mcheck_pedantic_prehook(); 2206 size_t fullsz = mcheck_memalign_prehook(alignment, bytes); 2207 void *p = mEMALIGn_impl(alignment, fullsz); 2208 2209 if (!p) 2210 return p; 2211 return mcheck_memalign_posthook(alignment, p, bytes); 2212} 2213 2214// pvALLOc, vALLOc - redirect to mEMALIGn, defined here, so they need no wrapping. 2215 2216Void_t *cALLOc(size_t n, size_t elem_size) 2217{ 2218 mcheck_pedantic_prehook(); 2219 // NB: here is no overflow check. 2220 size_t fullsz = mcheck_alloc_prehook(n * elem_size); 2221 void *p = cALLOc_impl(1, fullsz); 2222 2223 if (!p) 2224 return p; 2225 return mcheck_alloc_noclean_posthook(p, n * elem_size); 2226} 2227 2228// mcheck API { 2229int mcheck_pedantic(mcheck_abortfunc_t f) 2230{ 2231 mcheck_initialize(f, 1); 2232 return 0; 2233} 2234 2235int mcheck(mcheck_abortfunc_t f) 2236{ 2237 mcheck_initialize(f, 0); 2238 return 0; 2239} 2240 2241void mcheck_check_all(void) { mcheck_pedantic_check(); } 2242 2243enum mcheck_status mprobe(void *__ptr) { return mcheck_mprobe(__ptr); } 2244// mcheck API } 2245#endif 2246 2247/* 2248 2249 Malloc_trim gives memory back to the system (via negative 2250 arguments to sbrk) if there is unused memory at the `high' end of 2251 the malloc pool. You can call this after freeing large blocks of 2252 memory to potentially reduce the system-level memory requirements 2253 of a program. However, it cannot guarantee to reduce memory. Under 2254 some allocation patterns, some large free blocks of memory will be 2255 locked between two used chunks, so they cannot be given back to 2256 the system. 2257 2258 The `pad' argument to malloc_trim represents the amount of free 2259 trailing space to leave untrimmed. If this argument is zero, 2260 only the minimum amount of memory to maintain internal data 2261 structures will be left (one page or less). Non-zero arguments 2262 can be supplied to maintain enough trailing space to service 2263 future expected allocations without having to re-obtain memory 2264 from the system. 2265 2266 Malloc_trim returns 1 if it actually released any memory, else 0. 2267 2268*/ 2269 2270#if __STD_C 2271int malloc_trim(size_t pad) 2272#else 2273int malloc_trim(pad) size_t pad; 2274#endif 2275{ 2276 long top_size; /* Amount of top-most memory */ 2277 long extra; /* Amount to release */ 2278 char* current_brk; /* address returned by pre-check sbrk call */ 2279 char* new_brk; /* address returned by negative sbrk call */ 2280 2281 unsigned long pagesz = malloc_getpagesize; 2282 2283 top_size = chunksize(top); 2284 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; 2285 2286 if (extra < (long)pagesz) /* Not enough memory to release */ 2287 return 0; 2288 2289 else 2290 { 2291 /* Test to make sure no one else called sbrk */ 2292 current_brk = (char*)(MORECORE (0)); 2293 if (current_brk != (char*)(top) + top_size) 2294 return 0; /* Apparently we don't own memory; must fail */ 2295 2296 else 2297 { 2298 new_brk = (char*)(MORECORE (-extra)); 2299 2300 if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */ 2301 { 2302 /* Try to figure out what we have */ 2303 current_brk = (char*)(MORECORE (0)); 2304 top_size = current_brk - (char*)top; 2305 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */ 2306 { 2307 sbrked_mem = current_brk - sbrk_base; 2308 set_head(top, top_size | PREV_INUSE); 2309 } 2310 check_chunk(top); 2311 return 0; 2312 } 2313 2314 else 2315 { 2316 /* Success. Adjust top accordingly. */ 2317 set_head(top, (top_size - extra) | PREV_INUSE); 2318 sbrked_mem -= extra; 2319 check_chunk(top); 2320 return 1; 2321 } 2322 } 2323 } 2324} 2325 2326/* 2327 malloc_usable_size: 2328 2329 This routine tells you how many bytes you can actually use in an 2330 allocated chunk, which may be more than you requested (although 2331 often not). You can use this many bytes without worrying about 2332 overwriting other allocated objects. Not a particularly great 2333 programming practice, but still sometimes useful. 2334 2335*/ 2336 2337#if __STD_C 2338size_t malloc_usable_size(Void_t* mem) 2339#else 2340size_t malloc_usable_size(mem) Void_t* mem; 2341#endif 2342{ 2343 mchunkptr p; 2344 if (mem == NULL) 2345 return 0; 2346 else 2347 { 2348 p = mem2chunk(mem); 2349 if(!chunk_is_mmapped(p)) 2350 { 2351 if (!inuse(p)) return 0; 2352 check_inuse_chunk(p); 2353 return chunksize(p) - SIZE_SZ; 2354 } 2355 return chunksize(p) - 2*SIZE_SZ; 2356 } 2357} 2358 2359/* Utility to update current_mallinfo for malloc_stats and mallinfo() */ 2360 2361#ifdef DEBUG 2362static void malloc_update_mallinfo(void) 2363{ 2364 int i; 2365 mbinptr b; 2366 mchunkptr p; 2367#ifdef DEBUG 2368 mchunkptr q; 2369#endif 2370 2371 INTERNAL_SIZE_T avail = chunksize(top); 2372 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; 2373 2374 for (i = 1; i < NAV; ++i) 2375 { 2376 b = bin_at(i); 2377 for (p = last(b); p != b; p = p->bk) 2378 { 2379#ifdef DEBUG 2380 check_free_chunk(p); 2381 for (q = next_chunk(p); 2382 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE; 2383 q = next_chunk(q)) 2384 check_inuse_chunk(q); 2385#endif 2386 avail += chunksize(p); 2387 navail++; 2388 } 2389 } 2390 2391 current_mallinfo.ordblks = navail; 2392 current_mallinfo.uordblks = sbrked_mem - avail; 2393 current_mallinfo.fordblks = avail; 2394 current_mallinfo.hblks = n_mmaps; 2395 current_mallinfo.hblkhd = mmapped_mem; 2396 current_mallinfo.keepcost = chunksize(top); 2397 2398} 2399#endif /* DEBUG */ 2400 2401/* 2402 2403 malloc_stats: 2404 2405 Prints on the amount of space obtain from the system (both 2406 via sbrk and mmap), the maximum amount (which may be more than 2407 current if malloc_trim and/or munmap got called), the maximum 2408 number of simultaneous mmap regions used, and the current number 2409 of bytes allocated via malloc (or realloc, etc) but not yet 2410 freed. (Note that this is the number of bytes allocated, not the 2411 number requested. It will be larger than the number requested 2412 because of alignment and bookkeeping overhead.) 2413 2414*/ 2415 2416#ifdef DEBUG 2417void malloc_stats(void) 2418{ 2419 malloc_update_mallinfo(); 2420 printf("max system bytes = %10u\n", 2421 (unsigned int)(max_total_mem)); 2422 printf("system bytes = %10u\n", 2423 (unsigned int)(sbrked_mem + mmapped_mem)); 2424 printf("in use bytes = %10u\n", 2425 (unsigned int)(current_mallinfo.uordblks + mmapped_mem)); 2426#if HAVE_MMAP 2427 printf("max mmap regions = %10u\n", 2428 (unsigned int)max_n_mmaps); 2429#endif 2430} 2431#endif /* DEBUG */ 2432 2433/* 2434 mallinfo returns a copy of updated current mallinfo. 2435*/ 2436 2437#ifdef DEBUG 2438struct mallinfo mALLINFo(void) 2439{ 2440 malloc_update_mallinfo(); 2441 return current_mallinfo; 2442} 2443#endif /* DEBUG */ 2444 2445/* 2446 mallopt: 2447 2448 mallopt is the general SVID/XPG interface to tunable parameters. 2449 The format is to provide a (parameter-number, parameter-value) pair. 2450 mallopt then sets the corresponding parameter to the argument 2451 value if it can (i.e., so long as the value is meaningful), 2452 and returns 1 if successful else 0. 2453 2454 See descriptions of tunable parameters above. 2455 2456*/ 2457 2458#if __STD_C 2459int mALLOPt(int param_number, int value) 2460#else 2461int mALLOPt(param_number, value) int param_number; int value; 2462#endif 2463{ 2464 switch(param_number) 2465 { 2466 case M_TRIM_THRESHOLD: 2467 trim_threshold = value; return 1; 2468 case M_TOP_PAD: 2469 top_pad = value; return 1; 2470 case M_MMAP_THRESHOLD: 2471 mmap_threshold = value; return 1; 2472 case M_MMAP_MAX: 2473#if HAVE_MMAP 2474 n_mmaps_max = value; return 1; 2475#else 2476 if (value != 0) return 0; else n_mmaps_max = value; return 1; 2477#endif 2478 2479 default: 2480 return 0; 2481 } 2482} 2483 2484int initf_malloc(void) 2485{ 2486#if CONFIG_IS_ENABLED(SYS_MALLOC_F) 2487 assert(gd->malloc_base); /* Set up by crt0.S */ 2488 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN); 2489 gd->malloc_ptr = 0; 2490#endif 2491 2492 return 0; 2493} 2494 2495void malloc_enable_testing(int max_allocs) 2496{ 2497 malloc_testing = true; 2498 malloc_max_allocs = max_allocs; 2499} 2500 2501void malloc_disable_testing(void) 2502{ 2503 malloc_testing = false; 2504} 2505 2506/* 2507 2508History: 2509 2510 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) 2511 * return null for negative arguments 2512 * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com> 2513 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' 2514 (e.g. WIN32 platforms) 2515 * Cleanup up header file inclusion for WIN32 platforms 2516 * Cleanup code to avoid Microsoft Visual C++ compiler complaints 2517 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing 2518 memory allocation routines 2519 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) 2520 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to 2521 usage of 'assert' in non-WIN32 code 2522 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to 2523 avoid infinite loop 2524 * Always call 'fREe()' rather than 'free()' 2525 2526 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) 2527 * Fixed ordering problem with boundary-stamping 2528 2529 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) 2530 * Added pvalloc, as recommended by H.J. Liu 2531 * Added 64bit pointer support mainly from Wolfram Gloger 2532 * Added anonymously donated WIN32 sbrk emulation 2533 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen 2534 * malloc_extend_top: fix mask error that caused wastage after 2535 foreign sbrks 2536 * Add linux mremap support code from HJ Liu 2537 2538 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) 2539 * Integrated most documentation with the code. 2540 * Add support for mmap, with help from 2541 Wolfram Gloger (Gloger@lrz.uni-muenchen.de). 2542 * Use last_remainder in more cases. 2543 * Pack bins using idea from colin@nyx10.cs.du.edu 2544 * Use ordered bins instead of best-fit threshhold 2545 * Eliminate block-local decls to simplify tracing and debugging. 2546 * Support another case of realloc via move into top 2547 * Fix error occuring when initial sbrk_base not word-aligned. 2548 * Rely on page size for units instead of SBRK_UNIT to 2549 avoid surprises about sbrk alignment conventions. 2550 * Add mallinfo, mallopt. Thanks to Raymond Nijssen 2551 (raymond@es.ele.tue.nl) for the suggestion. 2552 * Add `pad' argument to malloc_trim and top_pad mallopt parameter. 2553 * More precautions for cases where other routines call sbrk, 2554 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). 2555 * Added macros etc., allowing use in linux libc from 2556 H.J. Lu (hjl@gnu.ai.mit.edu) 2557 * Inverted this history list 2558 2559 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) 2560 * Re-tuned and fixed to behave more nicely with V2.6.0 changes. 2561 * Removed all preallocation code since under current scheme 2562 the work required to undo bad preallocations exceeds 2563 the work saved in good cases for most test programs. 2564 * No longer use return list or unconsolidated bins since 2565 no scheme using them consistently outperforms those that don't 2566 given above changes. 2567 * Use best fit for very large chunks to prevent some worst-cases. 2568 * Added some support for debugging 2569 2570 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) 2571 * Removed footers when chunks are in use. Thanks to 2572 Paul Wilson (wilson@cs.texas.edu) for the suggestion. 2573 2574 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) 2575 * Added malloc_trim, with help from Wolfram Gloger 2576 (wmglo@Dent.MED.Uni-Muenchen.DE). 2577 2578 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) 2579 2580 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) 2581 * realloc: try to expand in both directions 2582 * malloc: swap order of clean-bin strategy; 2583 * realloc: only conditionally expand backwards 2584 * Try not to scavenge used bins 2585 * Use bin counts as a guide to preallocation 2586 * Occasionally bin return list chunks in first scan 2587 * Add a few optimizations from colin@nyx10.cs.du.edu 2588 2589 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) 2590 * faster bin computation & slightly different binning 2591 * merged all consolidations to one part of malloc proper 2592 (eliminating old malloc_find_space & malloc_clean_bin) 2593 * Scan 2 returns chunks (not just 1) 2594 * Propagate failure in realloc if malloc returns 0 2595 * Add stuff to allow compilation on non-ANSI compilers 2596 from kpv@research.att.com 2597 2598 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) 2599 * removed potential for odd address access in prev_chunk 2600 * removed dependency on getpagesize.h 2601 * misc cosmetics and a bit more internal documentation 2602 * anticosmetics: mangled names in macros to evade debugger strangeness 2603 * tested on sparc, hp-700, dec-mips, rs6000 2604 with gcc & native cc (hp, dec only) allowing 2605 Detlefs & Zorn comparison study (in SIGPLAN Notices.) 2606 2607 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) 2608 * Based loosely on libg++-1.2X malloc. (It retains some of the overall 2609 structure of old version, but most details differ.) 2610 2611*/