1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2024 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23/* This file contains portable memory management functions for SDL */
24
25#ifndef HAVE_MALLOC
26#define LACKS_SYS_TYPES_H
27#define LACKS_STDIO_H
28#define LACKS_STRINGS_H
29#define LACKS_STRING_H
30#define LACKS_STDLIB_H
31#define FORCEINLINE
32#define ABORT
33#define USE_LOCKS 1
34#define USE_DL_PREFIX
35
36/*
37 This is a version (aka dlmalloc) of malloc/free/realloc written by
38 Doug Lea and released to the public domain, as explained at
39 http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
40 comments, complaints, performance data, etc to dl@cs.oswego.edu
41
42* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
43 Note: There may be an updated version of this malloc obtainable at
44 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
45 Check before installing!
46
47* Quickstart
48
49 This library is all in one file to simplify the most common usage:
50 ftp it, compile it (-O3), and link it into another program. All of
51 the compile-time options default to reasonable values for use on
52 most platforms. You might later want to step through various
53 compile-time and dynamic tuning options.
54
55 For convenience, an include file for code using this malloc is at:
56 ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h
57 You don't really need this .h file unless you call functions not
58 defined in your system include files. The .h file contains only the
59 excerpts from this file needed for using this malloc on ANSI C/C++
60 systems, so long as you haven't changed compile-time options about
61 naming and tuning parameters. If you do, then you can create your
62 own malloc.h that does include all settings by cutting at the point
63 indicated below. Note that you may already by default be using a C
64 library containing a malloc that is based on some version of this
65 malloc (for example in linux). You might still want to use the one
66 in this file to customize settings or to avoid overheads associated
67 with library versions.
68
69* Vital statistics:
70
71 Supported pointer/size_t representation: 4 or 8 bytes
72 size_t MUST be an unsigned type of the same width as
73 pointers. (If you are using an ancient system that declares
74 size_t as a signed type, or need it to be a different width
75 than pointers, you can use a previous release of this malloc
76 (e.g. 2.7.2) supporting these.)
77
78 Alignment: 8 bytes (minimum)
79 This suffices for nearly all current machines and C compilers.
80 However, you can define MALLOC_ALIGNMENT to be wider than this
81 if necessary (up to 128bytes), at the expense of using more space.
82
83 Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
84 8 or 16 bytes (if 8byte sizes)
85 Each malloced chunk has a hidden word of overhead holding size
86 and status information, and additional cross-check word
87 if FOOTERS is defined.
88
89 Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
90 8-byte ptrs: 32 bytes (including overhead)
91
92 Even a request for zero bytes (i.e., malloc(0)) returns a
93 pointer to something of the minimum allocatable size.
94 The maximum overhead wastage (i.e., number of extra bytes
95 allocated than were requested in malloc) is less than or equal
96 to the minimum size, except for requests >= mmap_threshold that
97 are serviced via mmap(), where the worst case wastage is about
98 32 bytes plus the remainder from a system page (the minimal
99 mmap unit); typically 4096 or 8192 bytes.
100
101 Security: static-safe; optionally more or less
102 The "security" of malloc refers to the ability of malicious
103 code to accentuate the effects of errors (for example, freeing
104 space that is not currently malloc'ed or overwriting past the
105 ends of chunks) in code that calls malloc. This malloc
106 guarantees not to modify any memory locations below the base of
107 heap, i.e., static variables, even in the presence of usage
108 errors. The routines additionally detect most improper frees
109 and reallocs. All this holds as long as the static bookkeeping
110 for malloc itself is not corrupted by some other means. This
111 is only one aspect of security -- these checks do not, and
112 cannot, detect all possible programming errors.
113
114 If FOOTERS is defined nonzero, then each allocated chunk
115 carries an additional check word to verify that it was malloced
116 from its space. These check words are the same within each
117 execution of a program using malloc, but differ across
118 executions, so externally crafted fake chunks cannot be
119 freed. This improves security by rejecting frees/reallocs that
120 could corrupt heap memory, in addition to the checks preventing
121 writes to statics that are always on. This may further improve
122 security at the expense of time and space overhead. (Note that
123 FOOTERS may also be worth using with MSPACES.)
124
125 By default detected errors cause the program to abort (calling
126 "abort()"). You can override this to instead proceed past
127 errors by defining PROCEED_ON_ERROR. In this case, a bad free
128 has no effect, and a malloc that encounters a bad address
129 caused by user overwrites will ignore the bad address by
130 dropping pointers and indices to all known memory. This may
131 be appropriate for programs that should continue if at all
132 possible in the face of programming errors, although they may
133 run out of memory because dropped memory is never reclaimed.
134
135 If you don't like either of these options, you can define
136 CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
137 else. And if if you are sure that your program using malloc has
138 no errors or vulnerabilities, you can define INSECURE to 1,
139 which might (or might not) provide a small performance improvement.
140
141 It is also possible to limit the maximum total allocatable
142 space, using malloc_set_footprint_limit. This is not
143 designed as a security feature in itself (calls to set limits
144 are not screened or privileged), but may be useful as one
145 aspect of a secure implementation.
146
147 Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero
148 When USE_LOCKS is defined, each public call to malloc, free,
149 etc is surrounded with a lock. By default, this uses a plain
150 pthread mutex, win32 critical section, or a spin-lock if if
151 available for the platform and not disabled by setting
152 USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined,
153 recursive versions are used instead (which are not required for
154 base functionality but may be needed in layered extensions).
155 Using a global lock is not especially fast, and can be a major
156 bottleneck. It is designed only to provide minimal protection
157 in concurrent environments, and to provide a basis for
158 extensions. If you are using malloc in a concurrent program,
159 consider instead using nedmalloc
160 (http://www.nedprod.com/programs/portable/nedmalloc/) or
161 ptmalloc (See http://www.malloc.de), which are derived from
162 versions of this malloc.
163
164 System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
165 This malloc can use unix sbrk or any emulation (invoked using
166 the CALL_MORECORE macro) and/or mmap/munmap or any emulation
167 (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
168 memory. On most unix systems, it tends to work best if both
169 MORECORE and MMAP are enabled. On Win32, it uses emulations
170 based on VirtualAlloc. It also uses common C library functions
171 like memset.
172
173 Compliance: I believe it is compliant with the Single Unix Specification
174 (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
175 others as well.
176
177* Overview of algorithms
178
179 This is not the fastest, most space-conserving, most portable, or
180 most tunable malloc ever written. However it is among the fastest
181 while also being among the most space-conserving, portable and
182 tunable. Consistent balance across these factors results in a good
183 general-purpose allocator for malloc-intensive programs.
184
185 In most ways, this malloc is a best-fit allocator. Generally, it
186 chooses the best-fitting existing chunk for a request, with ties
187 broken in approximately least-recently-used order. (This strategy
188 normally maintains low fragmentation.) However, for requests less
189 than 256bytes, it deviates from best-fit when there is not an
190 exactly fitting available chunk by preferring to use space adjacent
191 to that used for the previous small request, as well as by breaking
192 ties in approximately most-recently-used order. (These enhance
193 locality of series of small allocations.) And for very large requests
194 (>= 256Kb by default), it relies on system memory mapping
195 facilities, if supported. (This helps avoid carrying around and
196 possibly fragmenting memory used only for large chunks.)
197
198 All operations (except malloc_stats and mallinfo) have execution
199 times that are bounded by a constant factor of the number of bits in
200 a size_t, not counting any clearing in calloc or copying in realloc,
201 or actions surrounding MORECORE and MMAP that have times
202 proportional to the number of non-contiguous regions returned by
203 system allocation routines, which is often just 1. In real-time
204 applications, you can optionally suppress segment traversals using
205 NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
206 system allocators return non-contiguous spaces, at the typical
207 expense of carrying around more memory and increased fragmentation.
208
209 The implementation is not very modular and seriously overuses
210 macros. Perhaps someday all C compilers will do as good a job
211 inlining modular code as can now be done by brute-force expansion,
212 but now, enough of them seem not to.
213
214 Some compilers issue a lot of warnings about code that is
215 dead/unreachable only on some platforms, and also about intentional
216 uses of negation on unsigned types. All known cases of each can be
217 ignored.
218
219 For a longer but out of date high-level description, see
220 http://gee.cs.oswego.edu/dl/html/malloc.html
221
222* MSPACES
223 If MSPACES is defined, then in addition to malloc, free, etc.,
224 this file also defines mspace_malloc, mspace_free, etc. These
225 are versions of malloc routines that take an "mspace" argument
226 obtained using create_mspace, to control all internal bookkeeping.
227 If ONLY_MSPACES is defined, only these versions are compiled.
228 So if you would like to use this allocator for only some allocations,
229 and your system malloc for others, you can compile with
230 ONLY_MSPACES and then do something like...
231 static mspace mymspace = create_mspace(0,0); // for example
232 #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
233
234 (Note: If you only need one instance of an mspace, you can instead
235 use "USE_DL_PREFIX" to relabel the global malloc.)
236
237 You can similarly create thread-local allocators by storing
238 mspaces as thread-locals. For example:
239 static __thread mspace tlms = 0;
240 void* tlmalloc(size_t bytes) {
241 if (tlms == 0) tlms = create_mspace(0, 0);
242 return mspace_malloc(tlms, bytes);
243 }
244 void tlfree(void* mem) { mspace_free(tlms, mem); }
245
246 Unless FOOTERS is defined, each mspace is completely independent.
247 You cannot allocate from one and free to another (although
248 conformance is only weakly checked, so usage errors are not always
249 caught). If FOOTERS is defined, then each chunk carries around a tag
250 indicating its originating mspace, and frees are directed to their
251 originating spaces. Normally, this requires use of locks.
252
253 ------------------------- Compile-time options ---------------------------
254
255Be careful in setting #define values for numerical constants of type
256size_t. On some systems, literal values are not automatically extended
257to size_t precision unless they are explicitly casted. You can also
258use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
259
260WIN32 default: defined if _WIN32 defined
261 Defining WIN32 sets up defaults for MS environment and compilers.
262 Otherwise defaults are for unix. Beware that there seem to be some
263 cases where this malloc might not be a pure drop-in replacement for
264 Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
265 SetDIBits()) may be due to bugs in some video driver implementations
266 when pixel buffers are malloc()ed, and the region spans more than
267 one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)
268 default granularity, pixel buffers may straddle virtual allocation
269 regions more often than when using the Microsoft allocator. You can
270 avoid this by using VirtualAlloc() and VirtualFree() for all pixel
271 buffers rather than using malloc(). If this is not possible,
272 recompile this malloc with a larger DEFAULT_GRANULARITY. Note:
273 in cases where MSC and gcc (cygwin) are known to differ on WIN32,
274 conditions use _MSC_VER to distinguish them.
275
276DLMALLOC_EXPORT default: extern
277 Defines how public APIs are declared. If you want to export via a
278 Windows DLL, you might define this as
279 #define DLMALLOC_EXPORT extern __declspec(dllexport)
280 If you want a POSIX ELF shared object, you might use
281 #define DLMALLOC_EXPORT extern __attribute__((visibility("default")))
282
283MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *))
284 Controls the minimum alignment for malloc'ed chunks. It must be a
285 power of two and at least 8, even on machines for which smaller
286 alignments would suffice. It may be defined as larger than this
287 though. Note however that code and data structures are optimized for
288 the case of 8-byte alignment.
289
290MSPACES default: 0 (false)
291 If true, compile in support for independent allocation spaces.
292 This is only supported if HAVE_MMAP is true.
293
294ONLY_MSPACES default: 0 (false)
295 If true, only compile in mspace versions, not regular versions.
296
297USE_LOCKS default: 0 (false)
298 Causes each call to each public routine to be surrounded with
299 pthread or WIN32 mutex lock/unlock. (If set true, this can be
300 overridden on a per-mspace basis for mspace versions.) If set to a
301 non-zero value other than 1, locks are used, but their
302 implementation is left out, so lock functions must be supplied manually,
303 as described below.
304
305USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available
306 If true, uses custom spin locks for locking. This is currently
307 supported only gcc >= 4.1, older gccs on x86 platforms, and recent
308 MS compilers. Otherwise, posix locks or win32 critical sections are
309 used.
310
311USE_RECURSIVE_LOCKS default: not defined
312 If defined nonzero, uses recursive (aka reentrant) locks, otherwise
313 uses plain mutexes. This is not required for malloc proper, but may
314 be needed for layered allocators such as nedmalloc.
315
316LOCK_AT_FORK default: not defined
317 If defined nonzero, performs pthread_atfork upon initialization
318 to initialize child lock while holding parent lock. The implementation
319 assumes that pthread locks (not custom locks) are being used. In other
320 cases, you may need to customize the implementation.
321
322FOOTERS default: 0
323 If true, provide extra checking and dispatching by placing
324 information in the footers of allocated chunks. This adds
325 space and time overhead.
326
327INSECURE default: 0
328 If true, omit checks for usage errors and heap space overwrites.
329
330USE_DL_PREFIX default: NOT defined
331 Causes compiler to prefix all public routines with the string 'dl'.
332 This can be useful when you only want to use this malloc in one part
333 of a program, using your regular system malloc elsewhere.
334
335MALLOC_INSPECT_ALL default: NOT defined
336 If defined, compiles malloc_inspect_all and mspace_inspect_all, that
337 perform traversal of all heap space. Unless access to these
338 functions is otherwise restricted, you probably do not want to
339 include them in secure implementations.
340
341ABORT default: defined as abort()
342 Defines how to abort on failed checks. On most systems, a failed
343 check cannot die with an "assert" or even print an informative
344 message, because the underlying print routines in turn call malloc,
345 which will fail again. Generally, the best policy is to simply call
346 abort(). It's not very useful to do more than this because many
347 errors due to overwriting will show up as address faults (null, odd
348 addresses etc) rather than malloc-triggered checks, so will also
349 abort. Also, most compilers know that abort() does not return, so
350 can better optimize code conditionally calling it.
351
352PROCEED_ON_ERROR default: defined as 0 (false)
353 Controls whether detected bad addresses cause them to bypassed
354 rather than aborting. If set, detected bad arguments to free and
355 realloc are ignored. And all bookkeeping information is zeroed out
356 upon a detected overwrite of freed heap space, thus losing the
357 ability to ever return it from malloc again, but enabling the
358 application to proceed. If PROCEED_ON_ERROR is defined, the
359 static variable malloc_corruption_error_count is compiled in
360 and can be examined to see if errors have occurred. This option
361 generates slower code than the default abort policy.
362
363DEBUG default: NOT defined
364 The DEBUG setting is mainly intended for people trying to modify
365 this code or diagnose problems when porting to new platforms.
366 However, it may also be able to better isolate user errors than just
367 using runtime checks. The assertions in the check routines spell
368 out in more detail the assumptions and invariants underlying the
369 algorithms. The checking is fairly extensive, and will slow down
370 execution noticeably. Calling malloc_stats or mallinfo with DEBUG
371 set will attempt to check every non-mmapped allocated and free chunk
372 in the course of computing the summaries.
373
374ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
375 Debugging assertion failures can be nearly impossible if your
376 version of the assert macro causes malloc to be called, which will
377 lead to a cascade of further failures, blowing the runtime stack.
378 ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
379 which will usually make debugging easier.
380
381MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
382 The action to take before "return 0" when malloc fails to be able to
383 return memory because there is none available.
384
385HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
386 True if this system supports sbrk or an emulation of it.
387
388MORECORE default: sbrk
389 The name of the sbrk-style system routine to call to obtain more
390 memory. See below for guidance on writing custom MORECORE
391 functions. The type of the argument to sbrk/MORECORE varies across
392 systems. It cannot be size_t, because it supports negative
393 arguments, so it is normally the signed type of the same width as
394 size_t (sometimes declared as "intptr_t"). It doesn't much matter
395 though. Internally, we only call it with arguments less than half
396 the max value of a size_t, which should work across all reasonable
397 possibilities, although sometimes generating compiler warnings.
398
399MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE
400 If true, take advantage of fact that consecutive calls to MORECORE
401 with positive arguments always return contiguous increasing
402 addresses. This is true of unix sbrk. It does not hurt too much to
403 set it true anyway, since malloc copes with non-contiguities.
404 Setting it false when definitely non-contiguous saves time
405 and possibly wasted space it would take to discover this though.
406
407MORECORE_CANNOT_TRIM default: NOT defined
408 True if MORECORE cannot release space back to the system when given
409 negative arguments. This is generally necessary only if you are
410 using a hand-crafted MORECORE function that cannot handle negative
411 arguments.
412
413NO_SEGMENT_TRAVERSAL default: 0
414 If non-zero, suppresses traversals of memory segments
415 returned by either MORECORE or CALL_MMAP. This disables
416 merging of segments that are contiguous, and selectively
417 releasing them to the OS if unused, but bounds execution times.
418
419HAVE_MMAP default: 1 (true)
420 True if this system supports mmap or an emulation of it. If so, and
421 HAVE_MORECORE is not true, MMAP is used for all system
422 allocation. If set and HAVE_MORECORE is true as well, MMAP is
423 primarily used to directly allocate very large blocks. It is also
424 used as a backup strategy in cases where MORECORE fails to provide
425 space from system. Note: A single call to MUNMAP is assumed to be
426 able to unmap memory that may have be allocated using multiple calls
427 to MMAP, so long as they are adjacent.
428
429HAVE_MREMAP default: 1 on linux, else 0
430 If true realloc() uses mremap() to re-allocate large blocks and
431 extend or shrink allocation spaces.
432
433MMAP_CLEARS default: 1 except on WINCE.
434 True if mmap clears memory so calloc doesn't need to. This is true
435 for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
436
437USE_BUILTIN_FFS default: 0 (i.e., not used)
438 Causes malloc to use the builtin ffs() function to compute indices.
439 Some compilers may recognize and intrinsify ffs to be faster than the
440 supplied C version. Also, the case of x86 using gcc is special-cased
441 to an asm instruction, so is already as fast as it can be, and so
442 this setting has no effect. Similarly for Win32 under recent MS compilers.
443 (On most x86s, the asm version is only slightly faster than the C version.)
444
445malloc_getpagesize default: derive from system includes, or 4096.
446 The system page size. To the extent possible, this malloc manages
447 memory from the system in page-size units. This may be (and
448 usually is) a function rather than a constant. This is ignored
449 if WIN32, where page size is determined using getSystemInfo during
450 initialization.
451
452USE_DEV_RANDOM default: 0 (i.e., not used)
453 Causes malloc to use /dev/random to initialize secure magic seed for
454 stamping footers. Otherwise, the current time is used.
455
456NO_MALLINFO default: 0
457 If defined, don't compile "mallinfo". This can be a simple way
458 of dealing with mismatches between system declarations and
459 those in this file.
460
461MALLINFO_FIELD_TYPE default: size_t
462 The type of the fields in the mallinfo struct. This was originally
463 defined as "int" in SVID etc, but is more usefully defined as
464 size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
465
466NO_MALLOC_STATS default: 0
467 If defined, don't compile "malloc_stats". This avoids calls to
468 fprintf and bringing in stdio dependencies you might not want.
469
470REALLOC_ZERO_BYTES_FREES default: not defined
471 This should be set if a call to realloc with zero bytes should
472 be the same as a call to free. Some people think it should. Otherwise,
473 since this malloc returns a unique pointer for malloc(0), so does
474 realloc(p, 0).
475
476LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
477LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
478LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32
479 Define these if your system does not have these header files.
480 You might need to manually insert some of the declarations they provide.
481
482DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
483 system_info.dwAllocationGranularity in WIN32,
484 otherwise 64K.
485 Also settable using mallopt(M_GRANULARITY, x)
486 The unit for allocating and deallocating memory from the system. On
487 most systems with contiguous MORECORE, there is no reason to
488 make this more than a page. However, systems with MMAP tend to
489 either require or encourage larger granularities. You can increase
490 this value to prevent system allocation functions to be called so
491 often, especially if they are slow. The value must be at least one
492 page and must be a power of two. Setting to 0 causes initialization
493 to either page size or win32 region size. (Note: In previous
494 versions of malloc, the equivalent of this option was called
495 "TOP_PAD")
496
497DEFAULT_TRIM_THRESHOLD default: 2MB
498 Also settable using mallopt(M_TRIM_THRESHOLD, x)
499 The maximum amount of unused top-most memory to keep before
500 releasing via malloc_trim in free(). Automatic trimming is mainly
501 useful in long-lived programs using contiguous MORECORE. Because
502 trimming via sbrk can be slow on some systems, and can sometimes be
503 wasteful (in cases where programs immediately afterward allocate
504 more large chunks) the value should be high enough so that your
505 overall system performance would improve by releasing this much
506 memory. As a rough guide, you might set to a value close to the
507 average size of a process (program) running on your system.
508 Releasing this much memory would allow such a process to run in
509 memory. Generally, it is worth tuning trim thresholds when a
510 program undergoes phases where several large chunks are allocated
511 and released in ways that can reuse each other's storage, perhaps
512 mixed with phases where there are no such chunks at all. The trim
513 value must be greater than page size to have any useful effect. To
514 disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
515 some people use of mallocing a huge space and then freeing it at
516 program startup, in an attempt to reserve system memory, doesn't
517 have the intended effect under automatic trimming, since that memory
518 will immediately be returned to the system.
519
520DEFAULT_MMAP_THRESHOLD default: 256K
521 Also settable using mallopt(M_MMAP_THRESHOLD, x)
522 The request size threshold for using MMAP to directly service a
523 request. Requests of at least this size that cannot be allocated
524 using already-existing space will be serviced via mmap. (If enough
525 normal freed space already exists it is used instead.) Using mmap
526 segregates relatively large chunks of memory so that they can be
527 individually obtained and released from the host system. A request
528 serviced through mmap is never reused by any other request (at least
529 not directly; the system may just so happen to remap successive
530 requests to the same locations). Segregating space in this way has
531 the benefits that: Mmapped space can always be individually released
532 back to the system, which helps keep the system level memory demands
533 of a long-lived program low. Also, mapped memory doesn't become
534 `locked' between other chunks, as can happen with normally allocated
535 chunks, which means that even trimming via malloc_trim would not
536 release them. However, it has the disadvantage that the space
537 cannot be reclaimed, consolidated, and then used to service later
538 requests, as happens with normal chunks. The advantages of mmap
539 nearly always outweigh disadvantages for "large" chunks, but the
540 value of "large" may vary across systems. The default is an
541 empirically derived value that works well in most systems. You can
542 disable mmap by setting to MAX_SIZE_T.
543
544MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
545 The number of consolidated frees between checks to release
546 unused segments when freeing. When using non-contiguous segments,
547 especially with multiple mspaces, checking only for topmost space
548 doesn't always suffice to trigger trimming. To compensate for this,
549 free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
550 current number of segments, if greater) try to release unused
551 segments to the OS when freeing chunks that result in
552 consolidation. The best value for this parameter is a compromise
553 between slowing down frees with relatively costly checks that
554 rarely trigger versus holding on to unused memory. To effectively
555 disable, set to MAX_SIZE_T. This may lead to a very slight speed
556 improvement at the expense of carrying around more memory.
557*/
558
559/* Version identifier to allow people to support multiple versions */
560#ifndef DLMALLOC_VERSION
561#define DLMALLOC_VERSION 20806
562#endif /* DLMALLOC_VERSION */
563
564#ifndef DLMALLOC_EXPORT
565#define DLMALLOC_EXPORT extern
566#endif
567
568#ifndef WIN32
569#ifdef _WIN32
570#define WIN32 1
571#endif /* _WIN32 */
572#ifdef _WIN32_WCE
573#define LACKS_FCNTL_H
574#define WIN32 1
575#endif /* _WIN32_WCE */
576#endif /* WIN32 */
577#ifdef WIN32
578#define WIN32_LEAN_AND_MEAN
579#include <windows.h>
580#include <tchar.h>
581#define HAVE_MMAP 1
582#define HAVE_MORECORE 0
583#define LACKS_UNISTD_H
584#define LACKS_SYS_PARAM_H
585#define LACKS_SYS_MMAN_H
586#define LACKS_STRING_H
587#define LACKS_STRINGS_H
588#define LACKS_SYS_TYPES_H
589#define LACKS_ERRNO_H
590#define LACKS_SCHED_H
591#ifndef MALLOC_FAILURE_ACTION
592#define MALLOC_FAILURE_ACTION
593#endif /* MALLOC_FAILURE_ACTION */
594#ifndef MMAP_CLEARS
595#ifdef _WIN32_WCE /* WINCE reportedly does not clear */
596#define MMAP_CLEARS 0
597#else
598#define MMAP_CLEARS 1
599#endif /* _WIN32_WCE */
600#endif /*MMAP_CLEARS */
601#endif /* WIN32 */
602
603#if defined(DARWIN) || defined(_DARWIN)
604/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
605#ifndef HAVE_MORECORE
606#define HAVE_MORECORE 0
607#define HAVE_MMAP 1
608/* OSX allocators provide 16 byte alignment */
609#ifndef MALLOC_ALIGNMENT
610#define MALLOC_ALIGNMENT ((size_t)16U)
611#endif
612#endif /* HAVE_MORECORE */
613#endif /* DARWIN */
614
615#ifndef LACKS_SYS_TYPES_H
616#include <sys/types.h> /* For size_t */
617#endif /* LACKS_SYS_TYPES_H */
618
619/* The maximum possible size_t value has all bits set */
620#define MAX_SIZE_T (~(size_t)0)
621
622#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */
623#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
624 (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
625#endif /* USE_LOCKS */
626
627#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
628#if ((defined(__GNUC__) && \
629 ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
630 defined(__i386__) || defined(__x86_64__))) || \
631 (defined(_MSC_VER) && _MSC_VER>=1310))
632#ifndef USE_SPIN_LOCKS
633#define USE_SPIN_LOCKS 1
634#endif /* USE_SPIN_LOCKS */
635#elif USE_SPIN_LOCKS
636#error "USE_SPIN_LOCKS defined without implementation"
637#endif /* ... locks available... */
638#elif !defined(USE_SPIN_LOCKS)
639#define USE_SPIN_LOCKS 0
640#endif /* USE_LOCKS */
641
642#ifndef ONLY_MSPACES
643#define ONLY_MSPACES 0
644#endif /* ONLY_MSPACES */
645#ifndef MSPACES
646#if ONLY_MSPACES
647#define MSPACES 1
648#else /* ONLY_MSPACES */
649#define MSPACES 0
650#endif /* ONLY_MSPACES */
651#endif /* MSPACES */
652#ifndef MALLOC_ALIGNMENT
653#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
654#endif /* MALLOC_ALIGNMENT */
655#ifndef FOOTERS
656#define FOOTERS 0
657#endif /* FOOTERS */
658#ifndef ABORT
659#define ABORT abort()
660#endif /* ABORT */
661#ifndef ABORT_ON_ASSERT_FAILURE
662#define ABORT_ON_ASSERT_FAILURE 1
663#endif /* ABORT_ON_ASSERT_FAILURE */
664#ifndef PROCEED_ON_ERROR
665#define PROCEED_ON_ERROR 0
666#endif /* PROCEED_ON_ERROR */
667
668#ifndef INSECURE
669#define INSECURE 0
670#endif /* INSECURE */
671#ifndef MALLOC_INSPECT_ALL
672#define MALLOC_INSPECT_ALL 0
673#endif /* MALLOC_INSPECT_ALL */
674#ifndef HAVE_MMAP
675#define HAVE_MMAP 1
676#endif /* HAVE_MMAP */
677#ifndef MMAP_CLEARS
678#define MMAP_CLEARS 1
679#endif /* MMAP_CLEARS */
680#ifndef HAVE_MREMAP
681#ifdef linux
682#define HAVE_MREMAP 1
683#define _GNU_SOURCE /* Turns on mremap() definition */
684#else /* linux */
685#define HAVE_MREMAP 0
686#endif /* linux */
687#endif /* HAVE_MREMAP */
688#ifndef MALLOC_FAILURE_ACTION
689#define MALLOC_FAILURE_ACTION errno = ENOMEM;
690#endif /* MALLOC_FAILURE_ACTION */
691#ifndef HAVE_MORECORE
692#if ONLY_MSPACES
693#define HAVE_MORECORE 0
694#else /* ONLY_MSPACES */
695#define HAVE_MORECORE 1
696#endif /* ONLY_MSPACES */
697#endif /* HAVE_MORECORE */
698#if !HAVE_MORECORE
699#define MORECORE_CONTIGUOUS 0
700#else /* !HAVE_MORECORE */
701#define MORECORE_DEFAULT sbrk
702#ifndef MORECORE_CONTIGUOUS
703#define MORECORE_CONTIGUOUS 1
704#endif /* MORECORE_CONTIGUOUS */
705#endif /* HAVE_MORECORE */
706#ifndef DEFAULT_GRANULARITY
707#if (MORECORE_CONTIGUOUS || defined(WIN32))
708#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
709#else /* MORECORE_CONTIGUOUS */
710#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
711#endif /* MORECORE_CONTIGUOUS */
712#endif /* DEFAULT_GRANULARITY */
713#ifndef DEFAULT_TRIM_THRESHOLD
714#ifndef MORECORE_CANNOT_TRIM
715#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
716#else /* MORECORE_CANNOT_TRIM */
717#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
718#endif /* MORECORE_CANNOT_TRIM */
719#endif /* DEFAULT_TRIM_THRESHOLD */
720#ifndef DEFAULT_MMAP_THRESHOLD
721#if HAVE_MMAP
722#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
723#else /* HAVE_MMAP */
724#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
725#endif /* HAVE_MMAP */
726#endif /* DEFAULT_MMAP_THRESHOLD */
727#ifndef MAX_RELEASE_CHECK_RATE
728#if HAVE_MMAP
729#define MAX_RELEASE_CHECK_RATE 4095
730#else
731#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
732#endif /* HAVE_MMAP */
733#endif /* MAX_RELEASE_CHECK_RATE */
734#ifndef USE_BUILTIN_FFS
735#define USE_BUILTIN_FFS 0
736#endif /* USE_BUILTIN_FFS */
737#ifndef USE_DEV_RANDOM
738#define USE_DEV_RANDOM 0
739#endif /* USE_DEV_RANDOM */
740#ifndef NO_MALLINFO
741#define NO_MALLINFO 0
742#endif /* NO_MALLINFO */
743#ifndef MALLINFO_FIELD_TYPE
744#define MALLINFO_FIELD_TYPE size_t
745#endif /* MALLINFO_FIELD_TYPE */
746#ifndef NO_MALLOC_STATS
747#define NO_MALLOC_STATS 0
748#endif /* NO_MALLOC_STATS */
749#ifndef NO_SEGMENT_TRAVERSAL
750#define NO_SEGMENT_TRAVERSAL 0
751#endif /* NO_SEGMENT_TRAVERSAL */
752
753/*
754 mallopt tuning options. SVID/XPG defines four standard parameter
755 numbers for mallopt, normally defined in malloc.h. None of these
756 are used in this malloc, so setting them has no effect. But this
757 malloc does support the following options.
758*/
759
760#define M_TRIM_THRESHOLD (-1)
761#define M_GRANULARITY (-2)
762#define M_MMAP_THRESHOLD (-3)
763
764/* ------------------------ Mallinfo declarations ------------------------ */
765
766#if !NO_MALLINFO
767/*
768 This version of malloc supports the standard SVID/XPG mallinfo
769 routine that returns a struct containing usage properties and
770 statistics. It should work on any system that has a
771 /usr/include/malloc.h defining struct mallinfo. The main
772 declaration needed is the mallinfo struct that is returned (by-copy)
773 by mallinfo(). The malloinfo struct contains a bunch of fields that
774 are not even meaningful in this version of malloc. These fields are
775 are instead filled by mallinfo() with other numbers that might be of
776 interest.
777
778 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
779 /usr/include/malloc.h file that includes a declaration of struct
780 mallinfo. If so, it is included; else a compliant version is
781 declared below. These must be precisely the same for mallinfo() to
782 work. The original SVID version of this struct, defined on most
783 systems with mallinfo, declares all fields as ints. But some others
784 define as unsigned long. If your system defines the fields using a
785 type of different width than listed here, you MUST #include your
786 system version and #define HAVE_USR_INCLUDE_MALLOC_H.
787*/
788
789/* #define HAVE_USR_INCLUDE_MALLOC_H */
790
791#ifdef HAVE_USR_INCLUDE_MALLOC_H
792#include "/usr/include/malloc.h"
793#else /* HAVE_USR_INCLUDE_MALLOC_H */
794#ifndef STRUCT_MALLINFO_DECLARED
795/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */
796#define _STRUCT_MALLINFO
797#define STRUCT_MALLINFO_DECLARED 1
798struct mallinfo {
799 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
800 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
801 MALLINFO_FIELD_TYPE smblks; /* always 0 */
802 MALLINFO_FIELD_TYPE hblks; /* always 0 */
803 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
804 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
805 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
806 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
807 MALLINFO_FIELD_TYPE fordblks; /* total free space */
808 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
809};
810#endif /* STRUCT_MALLINFO_DECLARED */
811#endif /* HAVE_USR_INCLUDE_MALLOC_H */
812#endif /* NO_MALLINFO */
813
814/*
815 Try to persuade compilers to inline. The most critical functions for
816 inlining are defined as macros, so these aren't used for them.
817*/
818
819#ifndef FORCEINLINE
820 #if defined(__GNUC__)
821#define FORCEINLINE __inline __attribute__ ((always_inline))
822 #elif defined(_MSC_VER)
823 #define FORCEINLINE __forceinline
824 #endif
825#endif
826#ifndef NOINLINE
827 #if defined(__GNUC__)
828 #define NOINLINE __attribute__ ((noinline))
829 #elif defined(_MSC_VER)
830 #define NOINLINE __declspec(noinline)
831 #else
832 #define NOINLINE
833 #endif
834#endif
835
836#ifdef __cplusplus
837extern "C" {
838#ifndef FORCEINLINE
839 #define FORCEINLINE inline
840#endif
841#endif /* __cplusplus */
842#ifndef FORCEINLINE
843 #define FORCEINLINE
844#endif
845
846#if !ONLY_MSPACES
847
848/* ------------------- Declarations of public routines ------------------- */
849
850#ifndef USE_DL_PREFIX
851#define dlcalloc calloc
852#define dlfree free
853#define dlmalloc malloc
854#define dlmemalign memalign
855#define dlposix_memalign posix_memalign
856#define dlrealloc realloc
857#define dlrealloc_in_place realloc_in_place
858#define dlvalloc valloc
859#define dlpvalloc pvalloc
860#define dlmallinfo mallinfo
861#define dlmallopt mallopt
862#define dlmalloc_trim malloc_trim
863#define dlmalloc_stats malloc_stats
864#define dlmalloc_usable_size malloc_usable_size
865#define dlmalloc_footprint malloc_footprint
866#define dlmalloc_max_footprint malloc_max_footprint
867#define dlmalloc_footprint_limit malloc_footprint_limit
868#define dlmalloc_set_footprint_limit malloc_set_footprint_limit
869#define dlmalloc_inspect_all malloc_inspect_all
870#define dlindependent_calloc independent_calloc
871#define dlindependent_comalloc independent_comalloc
872#define dlbulk_free bulk_free
873#endif /* USE_DL_PREFIX */
874
875/*
876 malloc(size_t n)
877 Returns a pointer to a newly allocated chunk of at least n bytes, or
878 null if no space is available, in which case errno is set to ENOMEM
879 on ANSI C systems.
880
881 If n is zero, malloc returns a minimum-sized chunk. (The minimum
882 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
883 systems.) Note that size_t is an unsigned type, so calls with
884 arguments that would be negative if signed are interpreted as
885 requests for huge amounts of space, which will often fail. The
886 maximum supported value of n differs across systems, but is in all
887 cases less than the maximum representable value of a size_t.
888*/
889DLMALLOC_EXPORT void* dlmalloc(size_t);
890
891/*
892 free(void* p)
893 Releases the chunk of memory pointed to by p, that had been previously
894 allocated using malloc or a related routine such as realloc.
895 It has no effect if p is null. If p was not malloced or already
896 freed, free(p) will by default cause the current program to abort.
897*/
898DLMALLOC_EXPORT void dlfree(void*);
899
900/*
901 calloc(size_t n_elements, size_t element_size);
902 Returns a pointer to n_elements * element_size bytes, with all locations
903 set to zero.
904*/
905DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);
906
907/*
908 realloc(void* p, size_t n)
909 Returns a pointer to a chunk of size n that contains the same data
910 as does chunk p up to the minimum of (n, p's size) bytes, or null
911 if no space is available.
912
913 The returned pointer may or may not be the same as p. The algorithm
914 prefers extending p in most cases when possible, otherwise it
915 employs the equivalent of a malloc-copy-free sequence.
916
917 If p is null, realloc is equivalent to malloc.
918
919 If space is not available, realloc returns null, errno is set (if on
920 ANSI) and p is NOT freed.
921
922 if n is for fewer bytes than already held by p, the newly unused
923 space is lopped off and freed if possible. realloc with a size
924 argument of zero (re)allocates a minimum-sized chunk.
925
926 The old unix realloc convention of allowing the last-free'd chunk
927 to be used as an argument to realloc is not supported.
928*/
929DLMALLOC_EXPORT void* dlrealloc(void*, size_t);
930
931/*
932 realloc_in_place(void* p, size_t n)
933 Resizes the space allocated for p to size n, only if this can be
934 done without moving p (i.e., only if there is adjacent space
935 available if n is greater than p's current allocated size, or n is
936 less than or equal to p's size). This may be used instead of plain
937 realloc if an alternative allocation strategy is needed upon failure
938 to expand space; for example, reallocation of a buffer that must be
939 memory-aligned or cleared. You can use realloc_in_place to trigger
940 these alternatives only when needed.
941
942 Returns p if successful; otherwise null.
943*/
944DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);
945
946/*
947 memalign(size_t alignment, size_t n);
948 Returns a pointer to a newly allocated chunk of n bytes, aligned
949 in accord with the alignment argument.
950
951 The alignment argument should be a power of two. If the argument is
952 not a power of two, the nearest greater power is used.
953 8-byte alignment is guaranteed by normal malloc calls, so don't
954 bother calling memalign with an argument of 8 or less.
955
956 Overreliance on memalign is a sure way to fragment space.
957*/
958DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);
959
960/*
961 int posix_memalign(void** pp, size_t alignment, size_t n);
962 Allocates a chunk of n bytes, aligned in accord with the alignment
963 argument. Differs from memalign only in that it (1) assigns the
964 allocated memory to *pp rather than returning it, (2) fails and
965 returns EINVAL if the alignment is not a power of two (3) fails and
966 returns ENOMEM if memory cannot be allocated.
967*/
968DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);
969
970/*
971 valloc(size_t n);
972 Equivalent to memalign(pagesize, n), where pagesize is the page
973 size of the system. If the pagesize is unknown, 4096 is used.
974*/
975DLMALLOC_EXPORT void* dlvalloc(size_t);
976
977/*
978 mallopt(int parameter_number, int parameter_value)
979 Sets tunable parameters The format is to provide a
980 (parameter-number, parameter-value) pair. mallopt then sets the
981 corresponding parameter to the argument value if it can (i.e., so
982 long as the value is meaningful), and returns 1 if successful else
983 0. To workaround the fact that mallopt is specified to use int,
984 not size_t parameters, the value -1 is specially treated as the
985 maximum unsigned size_t value.
986
987 SVID/XPG/ANSI defines four standard param numbers for mallopt,
988 normally defined in malloc.h. None of these are use in this malloc,
989 so setting them has no effect. But this malloc also supports other
990 options in mallopt. See below for details. Briefly, supported
991 parameters are as follows (listed defaults are for "typical"
992 configurations).
993
994 Symbol param # default allowed param values
995 M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables)
996 M_GRANULARITY -2 page size any power of 2 >= page size
997 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
998*/
999DLMALLOC_EXPORT int dlmallopt(int, int);
1000
1001/*
1002 malloc_footprint();
1003 Returns the number of bytes obtained from the system. The total
1004 number of bytes allocated by malloc, realloc etc., is less than this
1005 value. Unlike mallinfo, this function returns only a precomputed
1006 result, so can be called frequently to monitor memory consumption.
1007 Even if locks are otherwise defined, this function does not use them,
1008 so results might not be up to date.
1009*/
1010DLMALLOC_EXPORT size_t dlmalloc_footprint(void);
1011
1012/*
1013 malloc_max_footprint();
1014 Returns the maximum number of bytes obtained from the system. This
1015 value will be greater than current footprint if deallocated space
1016 has been reclaimed by the system. The peak number of bytes allocated
1017 by malloc, realloc etc., is less than this value. Unlike mallinfo,
1018 this function returns only a precomputed result, so can be called
1019 frequently to monitor memory consumption. Even if locks are
1020 otherwise defined, this function does not use them, so results might
1021 not be up to date.
1022*/
1023DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);
1024
1025/*
1026 malloc_footprint_limit();
1027 Returns the number of bytes that the heap is allowed to obtain from
1028 the system, returning the last value returned by
1029 malloc_set_footprint_limit, or the maximum size_t value if
1030 never set. The returned value reflects a permission. There is no
1031 guarantee that this number of bytes can actually be obtained from
1032 the system.
1033*/
1034DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();
1035
1036/*
1037 malloc_set_footprint_limit();
1038 Sets the maximum number of bytes to obtain from the system, causing
1039 failure returns from malloc and related functions upon attempts to
1040 exceed this value. The argument value may be subject to page
1041 rounding to an enforceable limit; this actual value is returned.
1042 Using an argument of the maximum possible size_t effectively
1043 disables checks. If the argument is less than or equal to the
1044 current malloc_footprint, then all future allocations that require
1045 additional system memory will fail. However, invocation cannot
1046 retroactively deallocate existing used memory.
1047*/
1048DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
1049
1050#if MALLOC_INSPECT_ALL
1051/*
1052 malloc_inspect_all(void(*handler)(void *start,
1053 void *end,
1054 size_t used_bytes,
1055 void* callback_arg),
1056 void* arg);
1057 Traverses the heap and calls the given handler for each managed
1058 region, skipping all bytes that are (or may be) used for bookkeeping
1059 purposes. Traversal does not include include chunks that have been
1060 directly memory mapped. Each reported region begins at the start
1061 address, and continues up to but not including the end address. The
1062 first used_bytes of the region contain allocated data. If
1063 used_bytes is zero, the region is unallocated. The handler is
1064 invoked with the given callback argument. If locks are defined, they
1065 are held during the entire traversal. It is a bad idea to invoke
1066 other malloc functions from within the handler.
1067
1068 For example, to count the number of in-use chunks with size greater
1069 than 1000, you could write:
1070 static int count = 0;
1071 void count_chunks(void* start, void* end, size_t used, void* arg) {
1072 if (used >= 1000) ++count;
1073 }
1074 then:
1075 malloc_inspect_all(count_chunks, NULL);
1076
1077 malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
1078*/
1079DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
1080 void* arg);
1081
1082#endif /* MALLOC_INSPECT_ALL */
1083
1084#if !NO_MALLINFO
1085/*
1086 mallinfo()
1087 Returns (by copy) a struct containing various summary statistics:
1088
1089 arena: current total non-mmapped bytes allocated from system
1090 ordblks: the number of free chunks
1091 smblks: always zero.
1092 hblks: current number of mmapped regions
1093 hblkhd: total bytes held in mmapped regions
1094 usmblks: the maximum total allocated space. This will be greater
1095 than current total if trimming has occurred.
1096 fsmblks: always zero
1097 uordblks: current total allocated space (normal or mmapped)
1098 fordblks: total free space
1099 keepcost: the maximum number of bytes that could ideally be released
1100 back to system via malloc_trim. ("ideally" means that
1101 it ignores page restrictions etc.)
1102
1103 Because these fields are ints, but internal bookkeeping may
1104 be kept as longs, the reported values may wrap around zero and
1105 thus be inaccurate.
1106*/
1107DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
1108#endif /* NO_MALLINFO */
1109
1110/*
1111 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
1112
1113 independent_calloc is similar to calloc, but instead of returning a
1114 single cleared space, it returns an array of pointers to n_elements
1115 independent elements that can hold contents of size elem_size, each
1116 of which starts out cleared, and can be independently freed,
1117 realloc'ed etc. The elements are guaranteed to be adjacently
1118 allocated (this is not guaranteed to occur with multiple callocs or
1119 mallocs), which may also improve cache locality in some
1120 applications.
1121
1122 The "chunks" argument is optional (i.e., may be null, which is
1123 probably the most typical usage). If it is null, the returned array
1124 is itself dynamically allocated and should also be freed when it is
1125 no longer needed. Otherwise, the chunks array must be of at least
1126 n_elements in length. It is filled in with the pointers to the
1127 chunks.
1128
1129 In either case, independent_calloc returns this pointer array, or
1130 null if the allocation failed. If n_elements is zero and "chunks"
1131 is null, it returns a chunk representing an array with zero elements
1132 (which should be freed if not wanted).
1133
1134 Each element must be freed when it is no longer needed. This can be
1135 done all at once using bulk_free.
1136
1137 independent_calloc simplifies and speeds up implementations of many
1138 kinds of pools. It may also be useful when constructing large data
1139 structures that initially have a fixed number of fixed-sized nodes,
1140 but the number is not known at compile time, and some of the nodes
1141 may later need to be freed. For example:
1142
1143 struct Node { int item; struct Node* next; };
1144
1145 struct Node* build_list() {
1146 struct Node** pool;
1147 int n = read_number_of_nodes_needed();
1148 if (n <= 0) return 0;
1149 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1150 if (pool == 0) die();
1151 // organize into a linked list...
1152 struct Node* first = pool[0];
1153 for (i = 0; i < n-1; ++i)
1154 pool[i]->next = pool[i+1];
1155 free(pool); // Can now free the array (or not, if it is needed later)
1156 return first;
1157 }
1158*/
1159DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);
1160
1161/*
1162 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
1163
1164 independent_comalloc allocates, all at once, a set of n_elements
1165 chunks with sizes indicated in the "sizes" array. It returns
1166 an array of pointers to these elements, each of which can be
1167 independently freed, realloc'ed etc. The elements are guaranteed to
1168 be adjacently allocated (this is not guaranteed to occur with
1169 multiple callocs or mallocs), which may also improve cache locality
1170 in some applications.
1171
1172 The "chunks" argument is optional (i.e., may be null). If it is null
1173 the returned array is itself dynamically allocated and should also
1174 be freed when it is no longer needed. Otherwise, the chunks array
1175 must be of at least n_elements in length. It is filled in with the
1176 pointers to the chunks.
1177
1178 In either case, independent_comalloc returns this pointer array, or
1179 null if the allocation failed. If n_elements is zero and chunks is
1180 null, it returns a chunk representing an array with zero elements
1181 (which should be freed if not wanted).
1182
1183 Each element must be freed when it is no longer needed. This can be
1184 done all at once using bulk_free.
1185
1186 independent_comallac differs from independent_calloc in that each
1187 element may have a different size, and also that it does not
1188 automatically clear elements.
1189
1190 independent_comalloc can be used to speed up allocation in cases
1191 where several structs or objects must always be allocated at the
1192 same time. For example:
1193
1194 struct Head { ... }
1195 struct Foot { ... }
1196
1197 void send_message(char* msg) {
1198 int msglen = strlen(msg);
1199 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1200 void* chunks[3];
1201 if (independent_comalloc(3, sizes, chunks) == 0)
1202 die();
1203 struct Head* head = (struct Head*)(chunks[0]);
1204 char* body = (char*)(chunks[1]);
1205 struct Foot* foot = (struct Foot*)(chunks[2]);
1206 // ...
1207 }
1208
1209 In general though, independent_comalloc is worth using only for
1210 larger values of n_elements. For small values, you probably won't
1211 detect enough difference from series of malloc calls to bother.
1212
1213 Overuse of independent_comalloc can increase overall memory usage,
1214 since it cannot reuse existing noncontiguous small chunks that
1215 might be available for some of the elements.
1216*/
1217DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);
1218
1219/*
1220 bulk_free(void* array[], size_t n_elements)
1221 Frees and clears (sets to null) each non-null pointer in the given
1222 array. This is likely to be faster than freeing them one-by-one.
1223 If footers are used, pointers that have been allocated in different
1224 mspaces are not freed or cleared, and the count of all such pointers
1225 is returned. For large arrays of pointers with poor locality, it
1226 may be worthwhile to sort this array before calling bulk_free.
1227*/
1228DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements);
1229
1230/*
1231 pvalloc(size_t n);
1232 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1233 round up n to nearest pagesize.
1234 */
1235DLMALLOC_EXPORT void* dlpvalloc(size_t);
1236
1237/*
1238 malloc_trim(size_t pad);
1239
1240 If possible, gives memory back to the system (via negative arguments
1241 to sbrk) if there is unused memory at the `high' end of the malloc
1242 pool or in unused MMAP segments. You can call this after freeing
1243 large blocks of memory to potentially reduce the system-level memory
1244 requirements of a program. However, it cannot guarantee to reduce
1245 memory. Under some allocation patterns, some large free blocks of
1246 memory will be locked between two used chunks, so they cannot be
1247 given back to the system.
1248
1249 The `pad' argument to malloc_trim represents the amount of free
1250 trailing space to leave untrimmed. If this argument is zero, only
1251 the minimum amount of memory to maintain internal data structures
1252 will be left. Non-zero arguments can be supplied to maintain enough
1253 trailing space to service future expected allocations without having
1254 to re-obtain memory from the system.
1255
1256 Malloc_trim returns 1 if it actually released any memory, else 0.
1257*/
1258DLMALLOC_EXPORT int dlmalloc_trim(size_t);
1259
1260/*
1261 malloc_stats();
1262 Prints on stderr the amount of space obtained from the system (both
1263 via sbrk and mmap), the maximum amount (which may be more than
1264 current if malloc_trim and/or munmap got called), and the current
1265 number of bytes allocated via malloc (or realloc, etc) but not yet
1266 freed. Note that this is the number of bytes allocated, not the
1267 number requested. It will be larger than the number requested
1268 because of alignment and bookkeeping overhead. Because it includes
1269 alignment wastage as being in use, this figure may be greater than
1270 zero even when no user-level chunks are allocated.
1271
1272 The reported current and maximum system memory can be inaccurate if
1273 a program makes other calls to system memory allocation functions
1274 (normally sbrk) outside of malloc.
1275
1276 malloc_stats prints only the most commonly interesting statistics.
1277 More information can be obtained by calling mallinfo.
1278*/
1279DLMALLOC_EXPORT void dlmalloc_stats(void);
1280
1281/*
1282 malloc_usable_size(void* p);
1283
1284 Returns the number of bytes you can actually use in
1285 an allocated chunk, which may be more than you requested (although
1286 often not) due to alignment and minimum size constraints.
1287 You can use this many bytes without worrying about
1288 overwriting other allocated objects. This is not a particularly great
1289 programming practice. malloc_usable_size can be more useful in
1290 debugging and assertions, for example:
1291
1292 p = malloc(n);
1293 assert(malloc_usable_size(p) >= 256);
1294*/
1295size_t dlmalloc_usable_size(void*);
1296
1297#endif /* ONLY_MSPACES */
1298
1299#if MSPACES
1300
1301/*
1302 mspace is an opaque type representing an independent
1303 region of space that supports mspace_malloc, etc.
1304*/
1305typedef void* mspace;
1306
1307/*
1308 create_mspace creates and returns a new independent space with the
1309 given initial capacity, or, if 0, the default granularity size. It
1310 returns null if there is no system memory available to create the
1311 space. If argument locked is non-zero, the space uses a separate
1312 lock to control access. The capacity of the space will grow
1313 dynamically as needed to service mspace_malloc requests. You can
1314 control the sizes of incremental increases of this space by
1315 compiling with a different DEFAULT_GRANULARITY or dynamically
1316 setting with mallopt(M_GRANULARITY, value).
1317*/
1318DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);
1319
1320/*
1321 destroy_mspace destroys the given space, and attempts to return all
1322 of its memory back to the system, returning the total number of
1323 bytes freed. After destruction, the results of access to all memory
1324 used by the space become undefined.
1325*/
1326DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);
1327
1328/*
1329 create_mspace_with_base uses the memory supplied as the initial base
1330 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1331 space is used for bookkeeping, so the capacity must be at least this
1332 large. (Otherwise 0 is returned.) When this initial space is
1333 exhausted, additional memory will be obtained from the system.
1334 Destroying this space will deallocate all additionally allocated
1335 space (if possible) but not the initial base.
1336*/
1337DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);
1338
1339/*
1340 mspace_track_large_chunks controls whether requests for large chunks
1341 are allocated in their own untracked mmapped regions, separate from
1342 others in this mspace. By default large chunks are not tracked,
1343 which reduces fragmentation. However, such chunks are not
1344 necessarily released to the system upon destroy_mspace. Enabling
1345 tracking by setting to true may increase fragmentation, but avoids
1346 leakage when relying on destroy_mspace to release all memory
1347 allocated using this space. The function returns the previous
1348 setting.
1349*/
1350DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);
1351
1352
1353/*
1354 mspace_malloc behaves as malloc, but operates within
1355 the given space.
1356*/
1357DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);
1358
1359/*
1360 mspace_free behaves as free, but operates within
1361 the given space.
1362
1363 If compiled with FOOTERS==1, mspace_free is not actually needed.
1364 free may be called instead of mspace_free because freed chunks from
1365 any space are handled by their originating spaces.
1366*/
1367DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);
1368
1369/*
1370 mspace_realloc behaves as realloc, but operates within
1371 the given space.
1372
1373 If compiled with FOOTERS==1, mspace_realloc is not actually
1374 needed. realloc may be called instead of mspace_realloc because
1375 realloced chunks from any space are handled by their originating
1376 spaces.
1377*/
1378DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);
1379
1380/*
1381 mspace_calloc behaves as calloc, but operates within
1382 the given space.
1383*/
1384DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1385
1386/*
1387 mspace_memalign behaves as memalign, but operates within
1388 the given space.
1389*/
1390DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1391
1392/*
1393 mspace_independent_calloc behaves as independent_calloc, but
1394 operates within the given space.
1395*/
1396DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,
1397 size_t elem_size, void* chunks[]);
1398
1399/*
1400 mspace_independent_comalloc behaves as independent_comalloc, but
1401 operates within the given space.
1402*/
1403DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,
1404 size_t sizes[], void* chunks[]);
1405
1406/*
1407 mspace_footprint() returns the number of bytes obtained from the
1408 system for this space.
1409*/
1410DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);
1411
1412/*
1413 mspace_max_footprint() returns the peak number of bytes obtained from the
1414 system for this space.
1415*/
1416DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);
1417
1418
1419#if !NO_MALLINFO
1420/*
1421 mspace_mallinfo behaves as mallinfo, but reports properties of
1422 the given space.
1423*/
1424DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);
1425#endif /* NO_MALLINFO */
1426
1427/*
1428 malloc_usable_size(void* p) behaves the same as malloc_usable_size;
1429*/
1430DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);
1431
1432/*
1433 mspace_malloc_stats behaves as malloc_stats, but reports
1434 properties of the given space.
1435*/
1436DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);
1437
1438/*
1439 mspace_trim behaves as malloc_trim, but
1440 operates within the given space.
1441*/
1442DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);
1443
1444/*
1445 An alias for mallopt.
1446*/
1447DLMALLOC_EXPORT int mspace_mallopt(int, int);
1448
1449#endif /* MSPACES */
1450
1451#ifdef __cplusplus
1452} /* end of extern "C" */
1453#endif /* __cplusplus */
1454
1455/*
1456 ========================================================================
1457 To make a fully customizable malloc.h header file, cut everything
1458 above this line, put into file malloc.h, edit to suit, and #include it
1459 on the next line, as well as in programs that use this malloc.
1460 ========================================================================
1461*/
1462
1463/* #include "malloc.h" */
1464
1465/*------------------------------ internal #includes ---------------------- */
1466
1467#ifdef _MSC_VER
1468#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1469#endif /* _MSC_VER */
1470#if !NO_MALLOC_STATS
1471#include <stdio.h> /* for printing in malloc_stats */
1472#endif /* NO_MALLOC_STATS */
1473#ifndef LACKS_ERRNO_H
1474#include <errno.h> /* for MALLOC_FAILURE_ACTION */
1475#endif /* LACKS_ERRNO_H */
1476#ifdef DEBUG
1477#if ABORT_ON_ASSERT_FAILURE
1478#undef assert
1479#define assert(x) if(!(x)) ABORT
1480#else /* ABORT_ON_ASSERT_FAILURE */
1481#include <assert.h>
1482#endif /* ABORT_ON_ASSERT_FAILURE */
1483#else /* DEBUG */
1484#ifndef assert
1485#define assert(x)
1486#endif
1487#define DEBUG 0
1488#endif /* DEBUG */
1489#if !defined(WIN32) && !defined(LACKS_TIME_H)
1490#include <time.h> /* for magic initialization */
1491#endif /* WIN32 */
1492#ifndef LACKS_STDLIB_H
1493#include <stdlib.h> /* for abort() */
1494#endif /* LACKS_STDLIB_H */
1495#ifndef LACKS_STRING_H
1496#include <string.h> /* for memset etc */
1497#endif /* LACKS_STRING_H */
1498#if USE_BUILTIN_FFS
1499#ifndef LACKS_STRINGS_H
1500#include <strings.h> /* for ffs */
1501#endif /* LACKS_STRINGS_H */
1502#endif /* USE_BUILTIN_FFS */
1503#if HAVE_MMAP
1504#ifndef LACKS_SYS_MMAN_H
1505/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
1506#if (defined(linux) && !defined(__USE_GNU))
1507#define __USE_GNU 1
1508#include <sys/mman.h> /* for mmap */
1509#undef __USE_GNU
1510#else
1511#include <sys/mman.h> /* for mmap */
1512#endif /* linux */
1513#endif /* LACKS_SYS_MMAN_H */
1514#ifndef LACKS_FCNTL_H
1515#include <fcntl.h>
1516#endif /* LACKS_FCNTL_H */
1517#endif /* HAVE_MMAP */
1518#ifndef LACKS_UNISTD_H
1519#include <unistd.h> /* for sbrk, sysconf */
1520#else /* LACKS_UNISTD_H */
1521#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1522extern void* sbrk(ptrdiff_t);
1523#endif /* FreeBSD etc */
1524#endif /* LACKS_UNISTD_H */
1525
1526/* Declarations for locking */
1527#if USE_LOCKS
1528#ifndef WIN32
1529#if defined (__SVR4) && defined (__sun) /* solaris */
1530#include <thread.h>
1531#elif !defined(LACKS_SCHED_H)
1532#include <sched.h>
1533#endif /* solaris or LACKS_SCHED_H */
1534#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
1535#include <pthread.h>
1536#endif /* USE_RECURSIVE_LOCKS ... */
1537#elif defined(_MSC_VER)
1538#ifndef _M_AMD64
1539/* These are already defined on AMD64 builds */
1540#ifdef __cplusplus
1541extern "C" {
1542#endif /* __cplusplus */
1543LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
1544LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
1545#ifdef __cplusplus
1546}
1547#endif /* __cplusplus */
1548#endif /* _M_AMD64 */
1549#pragma intrinsic (_InterlockedCompareExchange)
1550#pragma intrinsic (_InterlockedExchange)
1551#define interlockedcompareexchange _InterlockedCompareExchange
1552#define interlockedexchange _InterlockedExchange
1553#elif defined(WIN32) && defined(__GNUC__)
1554#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
1555#define interlockedexchange __sync_lock_test_and_set
1556#endif /* Win32 */
1557#else /* USE_LOCKS */
1558#endif /* USE_LOCKS */
1559
1560#ifndef LOCK_AT_FORK
1561#define LOCK_AT_FORK 0
1562#endif
1563
1564/* Declarations for bit scanning on win32 */
1565#if defined(_MSC_VER) && _MSC_VER>=1300
1566#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
1567#ifdef __cplusplus
1568extern "C" {
1569#endif /* __cplusplus */
1570unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
1571unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
1572#ifdef __cplusplus
1573}
1574#endif /* __cplusplus */
1575
1576#define BitScanForward _BitScanForward
1577#define BitScanReverse _BitScanReverse
1578#pragma intrinsic(_BitScanForward)
1579#pragma intrinsic(_BitScanReverse)
1580#endif /* BitScanForward */
1581#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
1582
1583#ifndef WIN32
1584#ifndef malloc_getpagesize
1585# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
1586# ifndef _SC_PAGE_SIZE
1587# define _SC_PAGE_SIZE _SC_PAGESIZE
1588# endif
1589# endif
1590# ifdef _SC_PAGE_SIZE
1591# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1592# else
1593# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1594 extern int getpagesize();
1595# define malloc_getpagesize getpagesize()
1596# else
1597# ifdef WIN32 /* use supplied emulation of getpagesize */
1598# define malloc_getpagesize getpagesize()
1599# else
1600# ifndef LACKS_SYS_PARAM_H
1601# include <sys/param.h>
1602# endif
1603# ifdef EXEC_PAGESIZE
1604# define malloc_getpagesize EXEC_PAGESIZE
1605# else
1606# ifdef NBPG
1607# ifndef CLSIZE
1608# define malloc_getpagesize NBPG
1609# else
1610# define malloc_getpagesize (NBPG * CLSIZE)
1611# endif
1612# else
1613# ifdef NBPC
1614# define malloc_getpagesize NBPC
1615# else
1616# ifdef PAGESIZE
1617# define malloc_getpagesize PAGESIZE
1618# else /* just guess */
1619# define malloc_getpagesize ((size_t)4096U)
1620# endif
1621# endif
1622# endif
1623# endif
1624# endif
1625# endif
1626# endif
1627#endif
1628#endif
1629
1630/* ------------------- size_t and alignment properties -------------------- */
1631
1632/* The byte and bit size of a size_t */
1633#define SIZE_T_SIZE (sizeof(size_t))
1634#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1635
1636/* Some constants coerced to size_t */
1637/* Annoying but necessary to avoid errors on some platforms */
1638#define SIZE_T_ZERO ((size_t)0)
1639#define SIZE_T_ONE ((size_t)1)
1640#define SIZE_T_TWO ((size_t)2)
1641#define SIZE_T_FOUR ((size_t)4)
1642#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1643#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1644#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1645#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1646
1647/* The bit mask value corresponding to MALLOC_ALIGNMENT */
1648#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1649
1650/* True if address a has acceptable alignment */
1651#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1652
1653/* the number of bytes to offset an address to align it */
1654#define align_offset(A)\
1655 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1656 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1657
1658/* -------------------------- MMAP preliminaries ------------------------- */
1659
1660/*
1661 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
1662 checks to fail so compiler optimizer can delete code rather than
1663 using so many "#if"s.
1664*/
1665
1666
1667/* MORECORE and MMAP must return MFAIL on failure */
1668#define MFAIL ((void*)(MAX_SIZE_T))
1669#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
1670
1671#if HAVE_MMAP
1672
1673#ifndef WIN32
1674#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
1675#define MMAP_PROT (PROT_READ|PROT_WRITE)
1676#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1677#define MAP_ANONYMOUS MAP_ANON
1678#endif /* MAP_ANON */
1679#ifdef MAP_ANONYMOUS
1680#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1681#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1682#else /* MAP_ANONYMOUS */
1683/*
1684 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1685 is unlikely to be needed, but is supplied just in case.
1686*/
1687#define MMAP_FLAGS (MAP_PRIVATE)
1688static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1689#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
1690 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1691 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1692 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1693#endif /* MAP_ANONYMOUS */
1694
1695#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
1696
1697#else /* WIN32 */
1698
1699/* Win32 MMAP via VirtualAlloc */
1700static FORCEINLINE void* win32mmap(size_t size) {
1701 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
1702 return (ptr != 0)? ptr: MFAIL;
1703}
1704
1705/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1706static FORCEINLINE void* win32direct_mmap(size_t size) {
1707 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1708 PAGE_READWRITE);
1709 return (ptr != 0)? ptr: MFAIL;
1710}
1711
1712/* This function supports releasing coalesed segments */
1713static FORCEINLINE int win32munmap(void* ptr, size_t size) {
1714 MEMORY_BASIC_INFORMATION minfo;
1715 char* cptr = (char*)ptr;
1716 while (size) {
1717 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1718 return -1;
1719 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1720 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1721 return -1;
1722 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1723 return -1;
1724 cptr += minfo.RegionSize;
1725 size -= minfo.RegionSize;
1726 }
1727 return 0;
1728}
1729
1730#define MMAP_DEFAULT(s) win32mmap(s)
1731#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
1732#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
1733#endif /* WIN32 */
1734#endif /* HAVE_MMAP */
1735
1736#if HAVE_MREMAP
1737#ifndef WIN32
1738#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1739#endif /* WIN32 */
1740#endif /* HAVE_MREMAP */
1741
1742/**
1743 * Define CALL_MORECORE
1744 */
1745#if HAVE_MORECORE
1746 #ifdef MORECORE
1747 #define CALL_MORECORE(S) MORECORE(S)
1748 #else /* MORECORE */
1749 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
1750 #endif /* MORECORE */
1751#else /* HAVE_MORECORE */
1752 #define CALL_MORECORE(S) MFAIL
1753#endif /* HAVE_MORECORE */
1754
1755/**
1756 * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
1757 */
1758#if HAVE_MMAP
1759 #define USE_MMAP_BIT (SIZE_T_ONE)
1760
1761 #ifdef MMAP
1762 #define CALL_MMAP(s) MMAP(s)
1763 #else /* MMAP */
1764 #define CALL_MMAP(s) MMAP_DEFAULT(s)
1765 #endif /* MMAP */
1766 #ifdef MUNMAP
1767 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1768 #else /* MUNMAP */
1769 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
1770 #endif /* MUNMAP */
1771 #ifdef DIRECT_MMAP
1772 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1773 #else /* DIRECT_MMAP */
1774 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
1775 #endif /* DIRECT_MMAP */
1776#else /* HAVE_MMAP */
1777 #define USE_MMAP_BIT (SIZE_T_ZERO)
1778
1779 #define MMAP(s) MFAIL
1780 #define MUNMAP(a, s) (-1)
1781 #define DIRECT_MMAP(s) MFAIL
1782 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1783 #define CALL_MMAP(s) MMAP(s)
1784 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1785#endif /* HAVE_MMAP */
1786
1787/**
1788 * Define CALL_MREMAP
1789 */
1790#if HAVE_MMAP && HAVE_MREMAP
1791 #ifdef MREMAP
1792 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
1793 #else /* MREMAP */
1794 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
1795 #endif /* MREMAP */
1796#else /* HAVE_MMAP && HAVE_MREMAP */
1797 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1798#endif /* HAVE_MMAP && HAVE_MREMAP */
1799
1800/* mstate bit set if continguous morecore disabled or failed */
1801#define USE_NONCONTIGUOUS_BIT (4U)
1802
1803/* segment bit set in create_mspace_with_base */
1804#define EXTERN_BIT (8U)
1805
1806
1807/* --------------------------- Lock preliminaries ------------------------ */
1808
1809/*
1810 When locks are defined, there is one global lock, plus
1811 one per-mspace lock.
1812
1813 The global lock_ensures that mparams.magic and other unique
1814 mparams values are initialized only once. It also protects
1815 sequences of calls to MORECORE. In many cases sys_alloc requires
1816 two calls, that should not be interleaved with calls by other
1817 threads. This does not protect against direct calls to MORECORE
1818 by other threads not using this lock, so there is still code to
1819 cope the best we can on interference.
1820
1821 Per-mspace locks surround calls to malloc, free, etc.
1822 By default, locks are simple non-reentrant mutexes.
1823
1824 Because lock-protected regions generally have bounded times, it is
1825 OK to use the supplied simple spinlocks. Spinlocks are likely to
1826 improve performance for lightly contended applications, but worsen
1827 performance under heavy contention.
1828
1829 If USE_LOCKS is > 1, the definitions of lock routines here are
1830 bypassed, in which case you will need to define the type MLOCK_T,
1831 and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
1832 and TRY_LOCK. You must also declare a
1833 static MLOCK_T malloc_global_mutex = { initialization values };.
1834
1835*/
1836
1837#if !USE_LOCKS
1838#define USE_LOCK_BIT (0U)
1839#define INITIAL_LOCK(l) (0)
1840#define DESTROY_LOCK(l) (0)
1841#define ACQUIRE_MALLOC_GLOBAL_LOCK()
1842#define RELEASE_MALLOC_GLOBAL_LOCK()
1843
1844#else
1845#if USE_LOCKS > 1
1846/* ----------------------- User-defined locks ------------------------ */
1847/* Define your own lock implementation here */
1848/* #define INITIAL_LOCK(lk) ... */
1849/* #define DESTROY_LOCK(lk) ... */
1850/* #define ACQUIRE_LOCK(lk) ... */
1851/* #define RELEASE_LOCK(lk) ... */
1852/* #define TRY_LOCK(lk) ... */
1853/* static MLOCK_T malloc_global_mutex = ... */
1854
1855#elif USE_SPIN_LOCKS
1856
1857/* First, define CAS_LOCK and CLEAR_LOCK on ints */
1858/* Note CAS_LOCK defined to return 0 on success */
1859
1860#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
1861#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
1862#define CLEAR_LOCK(sl) __sync_lock_release(sl)
1863
1864#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
1865/* Custom spin locks for older gcc on x86 */
1866static FORCEINLINE int x86_cas_lock(int *sl) {
1867 int ret;
1868 int val = 1;
1869 int cmp = 0;
1870 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
1871 : "=a" (ret)
1872 : "r" (val), "m" (*(sl)), "0"(cmp)
1873 : "memory", "cc");
1874 return ret;
1875}
1876
1877static FORCEINLINE void x86_clear_lock(int* sl) {
1878 assert(*sl != 0);
1879 int prev = 0;
1880 int ret;
1881 __asm__ __volatile__ ("lock; xchgl %0, %1"
1882 : "=r" (ret)
1883 : "m" (*(sl)), "0"(prev)
1884 : "memory");
1885}
1886
1887#define CAS_LOCK(sl) x86_cas_lock(sl)
1888#define CLEAR_LOCK(sl) x86_clear_lock(sl)
1889
1890#else /* Win32 MSC */
1891#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
1892#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
1893
1894#endif /* ... gcc spins locks ... */
1895
1896/* How to yield for a spin lock */
1897#define SPINS_PER_YIELD 63
1898#if defined(_MSC_VER)
1899#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
1900#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
1901#elif defined (__SVR4) && defined (__sun) /* solaris */
1902#define SPIN_LOCK_YIELD thr_yield();
1903#elif !defined(LACKS_SCHED_H)
1904#define SPIN_LOCK_YIELD sched_yield();
1905#else
1906#define SPIN_LOCK_YIELD
1907#endif /* ... yield ... */
1908
1909#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
1910/* Plain spin locks use single word (embedded in malloc_states) */
1911static int spin_acquire_lock(volatile long *sl) {
1912 int spins = 0;
1913 while (*sl != 0 || CAS_LOCK(sl)) {
1914 if ((++spins & SPINS_PER_YIELD) == 0) {
1915 SPIN_LOCK_YIELD;
1916 }
1917 }
1918 return 0;
1919}
1920
1921#define MLOCK_T volatile long
1922#define TRY_LOCK(sl) !CAS_LOCK(sl)
1923#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
1924#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
1925#define INITIAL_LOCK(sl) (*sl = 0)
1926#define DESTROY_LOCK(sl) (0)
1927static MLOCK_T malloc_global_mutex = 0;
1928
1929#else /* USE_RECURSIVE_LOCKS */
1930/* types for lock owners */
1931#ifdef WIN32
1932#define THREAD_ID_T DWORD
1933#define CURRENT_THREAD GetCurrentThreadId()
1934#define EQ_OWNER(X,Y) ((X) == (Y))
1935#else
1936/*
1937 Note: the following assume that pthread_t is a type that can be
1938 initialized to (casted) zero. If this is not the case, you will need to
1939 somehow redefine these or not use spin locks.
1940*/
1941#define THREAD_ID_T pthread_t
1942#define CURRENT_THREAD pthread_self()
1943#define EQ_OWNER(X,Y) pthread_equal(X, Y)
1944#endif
1945
1946struct malloc_recursive_lock {
1947 int sl;
1948 unsigned int c;
1949 THREAD_ID_T threadid;
1950};
1951
1952#define MLOCK_T struct malloc_recursive_lock
1953static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
1954
1955static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
1956 assert(lk->sl != 0);
1957 if (--lk->c == 0) {
1958 CLEAR_LOCK(&lk->sl);
1959 }
1960}
1961
1962static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
1963 THREAD_ID_T mythreadid = CURRENT_THREAD;
1964 int spins = 0;
1965 for (;;) {
1966 if (*((volatile int *)(&lk->sl)) == 0) {
1967 if (!CAS_LOCK(&lk->sl)) {
1968 lk->threadid = mythreadid;
1969 lk->c = 1;
1970 return 0;
1971 }
1972 }
1973 else if (EQ_OWNER(lk->threadid, mythreadid)) {
1974 ++lk->c;
1975 return 0;
1976 }
1977 if ((++spins & SPINS_PER_YIELD) == 0) {
1978 SPIN_LOCK_YIELD;
1979 }
1980 }
1981}
1982
1983static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
1984 THREAD_ID_T mythreadid = CURRENT_THREAD;
1985 if (*((volatile int *)(&lk->sl)) == 0) {
1986 if (!CAS_LOCK(&lk->sl)) {
1987 lk->threadid = mythreadid;
1988 lk->c = 1;
1989 return 1;
1990 }
1991 }
1992 else if (EQ_OWNER(lk->threadid, mythreadid)) {
1993 ++lk->c;
1994 return 1;
1995 }
1996 return 0;
1997}
1998
1999#define RELEASE_LOCK(lk) recursive_release_lock(lk)
2000#define TRY_LOCK(lk) recursive_try_lock(lk)
2001#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
2002#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
2003#define DESTROY_LOCK(lk) (0)
2004#endif /* USE_RECURSIVE_LOCKS */
2005
2006#elif defined(WIN32) /* Win32 critical sections */
2007#define MLOCK_T CRITICAL_SECTION
2008#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
2009#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
2010#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
2011#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
2012#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
2013#define NEED_GLOBAL_LOCK_INIT
2014
2015static MLOCK_T malloc_global_mutex;
2016static volatile LONG malloc_global_mutex_status;
2017
2018/* Use spin loop to initialize global lock */
2019static void init_malloc_global_mutex() {
2020 for (;;) {
2021 long stat = malloc_global_mutex_status;
2022 if (stat > 0)
2023 return;
2024 /* transition to < 0 while initializing, then to > 0) */
2025 if (stat == 0 &&
2026 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
2027 InitializeCriticalSection(&malloc_global_mutex);
2028 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
2029 return;
2030 }
2031 SleepEx(0, FALSE);
2032 }
2033}
2034
2035#else /* pthreads-based locks */
2036#define MLOCK_T pthread_mutex_t
2037#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
2038#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
2039#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
2040#define INITIAL_LOCK(lk) pthread_init_lock(lk)
2041#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
2042
2043#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
2044/* Cope with old-style linux recursive lock initialization by adding */
2045/* skipped internal declaration from pthread.h */
2046extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
2047 int __kind));
2048#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
2049#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
2050#endif /* USE_RECURSIVE_LOCKS ... */
2051
2052static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
2053
2054static int pthread_init_lock (MLOCK_T *lk) {
2055 pthread_mutexattr_t attr;
2056 if (pthread_mutexattr_init(&attr)) return 1;
2057#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
2058 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
2059#endif
2060 if (pthread_mutex_init(lk, &attr)) return 1;
2061 if (pthread_mutexattr_destroy(&attr)) return 1;
2062 return 0;
2063}
2064
2065#endif /* ... lock types ... */
2066
2067/* Common code for all lock types */
2068#define USE_LOCK_BIT (2U)
2069
2070#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
2071#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
2072#endif
2073
2074#ifndef RELEASE_MALLOC_GLOBAL_LOCK
2075#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
2076#endif
2077
2078#endif /* USE_LOCKS */
2079
2080/* ----------------------- Chunk representations ------------------------ */
2081
2082/*
2083 (The following includes lightly edited explanations by Colin Plumb.)
2084
2085 The malloc_chunk declaration below is misleading (but accurate and
2086 necessary). It declares a "view" into memory allowing access to
2087 necessary fields at known offsets from a given base.
2088
2089 Chunks of memory are maintained using a `boundary tag' method as
2090 originally described by Knuth. (See the paper by Paul Wilson
2091 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
2092 techniques.) Sizes of free chunks are stored both in the front of
2093 each chunk and at the end. This makes consolidating fragmented
2094 chunks into bigger chunks fast. The head fields also hold bits
2095 representing whether chunks are free or in use.
2096
2097 Here are some pictures to make it clearer. They are "exploded" to
2098 show that the state of a chunk can be thought of as extending from
2099 the high 31 bits of the head field of its header through the
2100 prev_foot and PINUSE_BIT bit of the following chunk header.
2101
2102 A chunk that's in use looks like:
2103
2104 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2105 | Size of previous chunk (if P = 0) |
2106 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2107 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
2108 | Size of this chunk 1| +-+
2109 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2110 | |
2111 +- -+
2112 | |
2113 +- -+
2114 | :
2115 +- size - sizeof(size_t) available payload bytes -+
2116 : |
2117 chunk-> +- -+
2118 | |
2119 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2120 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
2121 | Size of next chunk (may or may not be in use) | +-+
2122 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2123
2124 And if it's free, it looks like this:
2125
2126 chunk-> +- -+
2127 | User payload (must be in use, or we would have merged!) |
2128 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2129 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
2130 | Size of this chunk 0| +-+
2131 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2132 | Next pointer |
2133 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2134 | Prev pointer |
2135 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2136 | :
2137 +- size - sizeof(struct chunk) unused bytes -+
2138 : |
2139 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2140 | Size of this chunk |
2141 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2142 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
2143 | Size of next chunk (must be in use, or we would have merged)| +-+
2144 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2145 | :
2146 +- User payload -+
2147 : |
2148 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2149 |0|
2150 +-+
2151 Note that since we always merge adjacent free chunks, the chunks
2152 adjacent to a free chunk must be in use.
2153
2154 Given a pointer to a chunk (which can be derived trivially from the
2155 payload pointer) we can, in O(1) time, find out whether the adjacent
2156 chunks are free, and if so, unlink them from the lists that they
2157 are on and merge them with the current chunk.
2158
2159 Chunks always begin on even word boundaries, so the mem portion
2160 (which is returned to the user) is also on an even word boundary, and
2161 thus at least double-word aligned.
2162
2163 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
2164 chunk size (which is always a multiple of two words), is an in-use
2165 bit for the *previous* chunk. If that bit is *clear*, then the
2166 word before the current chunk size contains the previous chunk
2167 size, and can be used to find the front of the previous chunk.
2168 The very first chunk allocated always has this bit set, preventing
2169 access to non-existent (or non-owned) memory. If pinuse is set for
2170 any given chunk, then you CANNOT determine the size of the
2171 previous chunk, and might even get a memory addressing fault when
2172 trying to do so.
2173
2174 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
2175 the chunk size redundantly records whether the current chunk is
2176 inuse (unless the chunk is mmapped). This redundancy enables usage
2177 checks within free and realloc, and reduces indirection when freeing
2178 and consolidating chunks.
2179
2180 Each freshly allocated chunk must have both cinuse and pinuse set.
2181 That is, each allocated chunk borders either a previously allocated
2182 and still in-use chunk, or the base of its memory arena. This is
2183 ensured by making all allocations from the `lowest' part of any
2184 found chunk. Further, no free chunk physically borders another one,
2185 so each free chunk is known to be preceded and followed by either
2186 inuse chunks or the ends of memory.
2187
2188 Note that the `foot' of the current chunk is actually represented
2189 as the prev_foot of the NEXT chunk. This makes it easier to
2190 deal with alignments etc but can be very confusing when trying
2191 to extend or adapt this code.
2192
2193 The exceptions to all this are
2194
2195 1. The special chunk `top' is the top-most available chunk (i.e.,
2196 the one bordering the end of available memory). It is treated
2197 specially. Top is never included in any bin, is used only if
2198 no other chunk is available, and is released back to the
2199 system if it is very large (see M_TRIM_THRESHOLD). In effect,
2200 the top chunk is treated as larger (and thus less well
2201 fitting) than any other available chunk. The top chunk
2202 doesn't update its trailing size field since there is no next
2203 contiguous chunk that would have to index off it. However,
2204 space is still allocated for it (TOP_FOOT_SIZE) to enable
2205 separation or merging when space is extended.
2206
2207 3. Chunks allocated via mmap, have both cinuse and pinuse bits
2208 cleared in their head fields. Because they are allocated
2209 one-by-one, each must carry its own prev_foot field, which is
2210 also used to hold the offset this chunk has within its mmapped
2211 region, which is needed to preserve alignment. Each mmapped
2212 chunk is trailed by the first two fields of a fake next-chunk
2213 for sake of usage checks.
2214
2215*/
2216
2217struct malloc_chunk {
2218 size_t prev_foot; /* Size of previous chunk (if free). */
2219 size_t head; /* Size and inuse bits. */
2220 struct malloc_chunk* fd; /* double links -- used only if free. */
2221 struct malloc_chunk* bk;
2222};
2223
2224typedef struct malloc_chunk mchunk;
2225typedef struct malloc_chunk* mchunkptr;
2226typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
2227typedef unsigned int bindex_t; /* Described below */
2228typedef unsigned int binmap_t; /* Described below */
2229typedef unsigned int flag_t; /* The type of various bit flag sets */
2230
2231/* ------------------- Chunks sizes and alignments ----------------------- */
2232
2233#define MCHUNK_SIZE (sizeof(mchunk))
2234
2235#if FOOTERS
2236#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2237#else /* FOOTERS */
2238#define CHUNK_OVERHEAD (SIZE_T_SIZE)
2239#endif /* FOOTERS */
2240
2241/* MMapped chunks need a second word of overhead ... */
2242#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2243/* ... and additional padding for fake next-chunk at foot */
2244#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
2245
2246/* The smallest size we can malloc is an aligned minimal chunk */
2247#define MIN_CHUNK_SIZE\
2248 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2249
2250/* conversion from malloc headers to user pointers, and back */
2251#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
2252#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
2253/* chunk associated with aligned address A */
2254#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
2255
2256/* Bounds on request (not chunk) sizes. */
2257#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
2258#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
2259
2260/* pad request bytes into a usable size */
2261#define pad_request(req) \
2262 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2263
2264/* pad request, checking for minimum (but not maximum) */
2265#define request2size(req) \
2266 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
2267
2268
2269/* ------------------ Operations on head and foot fields ----------------- */
2270
2271/*
2272 The head field of a chunk is or'ed with PINUSE_BIT when previous
2273 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
2274 use, unless mmapped, in which case both bits are cleared.
2275
2276 FLAG4_BIT is not used by this malloc, but might be useful in extensions.
2277*/
2278
2279#define PINUSE_BIT (SIZE_T_ONE)
2280#define CINUSE_BIT (SIZE_T_TWO)
2281#define FLAG4_BIT (SIZE_T_FOUR)
2282#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
2283#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
2284
2285/* Head value for fenceposts */
2286#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
2287
2288/* extraction of fields from head words */
2289#define cinuse(p) ((p)->head & CINUSE_BIT)
2290#define pinuse(p) ((p)->head & PINUSE_BIT)
2291#define flag4inuse(p) ((p)->head & FLAG4_BIT)
2292#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
2293#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
2294
2295#define chunksize(p) ((p)->head & ~(FLAG_BITS))
2296
2297#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
2298#define set_flag4(p) ((p)->head |= FLAG4_BIT)
2299#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
2300
2301/* Treat space at ptr +/- offset as a chunk */
2302#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2303#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
2304
2305/* Ptr to next or previous physical malloc_chunk. */
2306#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
2307#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
2308
2309/* extract next chunk's pinuse bit */
2310#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
2311
2312/* Get/set size at footer */
2313#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
2314#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
2315
2316/* Set size, pinuse bit, and foot */
2317#define set_size_and_pinuse_of_free_chunk(p, s)\
2318 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
2319
2320/* Set size, pinuse bit, foot, and clear next pinuse */
2321#define set_free_with_pinuse(p, s, n)\
2322 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
2323
2324/* Get the internal overhead associated with chunk p */
2325#define overhead_for(p)\
2326 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
2327
2328/* Return true if malloced space is not necessarily cleared */
2329#if MMAP_CLEARS
2330#define calloc_must_clear(p) (!is_mmapped(p))
2331#else /* MMAP_CLEARS */
2332#define calloc_must_clear(p) (1)
2333#endif /* MMAP_CLEARS */
2334
2335/* ---------------------- Overlaid data structures ----------------------- */
2336
2337/*
2338 When chunks are not in use, they are treated as nodes of either
2339 lists or trees.
2340
2341 "Small" chunks are stored in circular doubly-linked lists, and look
2342 like this:
2343
2344 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2345 | Size of previous chunk |
2346 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2347 `head:' | Size of chunk, in bytes |P|
2348 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2349 | Forward pointer to next chunk in list |
2350 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2351 | Back pointer to previous chunk in list |
2352 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2353 | Unused space (may be 0 bytes long) .
2354 . .
2355 . |
2356nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2357 `foot:' | Size of chunk, in bytes |
2358 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2359
2360 Larger chunks are kept in a form of bitwise digital trees (aka
2361 tries) keyed on chunksizes. Because malloc_tree_chunks are only for
2362 free chunks greater than 256 bytes, their size doesn't impose any
2363 constraints on user chunk sizes. Each node looks like:
2364
2365 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2366 | Size of previous chunk |
2367 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2368 `head:' | Size of chunk, in bytes |P|
2369 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2370 | Forward pointer to next chunk of same size |
2371 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2372 | Back pointer to previous chunk of same size |
2373 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2374 | Pointer to left child (child[0]) |
2375 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2376 | Pointer to right child (child[1]) |
2377 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2378 | Pointer to parent |
2379 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2380 | bin index of this chunk |
2381 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2382 | Unused space .
2383 . |
2384nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2385 `foot:' | Size of chunk, in bytes |
2386 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2387
2388 Each tree holding treenodes is a tree of unique chunk sizes. Chunks
2389 of the same size are arranged in a circularly-linked list, with only
2390 the oldest chunk (the next to be used, in our FIFO ordering)
2391 actually in the tree. (Tree members are distinguished by a non-null
2392 parent pointer.) If a chunk with the same size an an existing node
2393 is inserted, it is linked off the existing node using pointers that
2394 work in the same way as fd/bk pointers of small chunks.
2395
2396 Each tree contains a power of 2 sized range of chunk sizes (the
2397 smallest is 0x100 <= x < 0x180), which is is divided in half at each
2398 tree level, with the chunks in the smaller half of the range (0x100
2399 <= x < 0x140 for the top nose) in the left subtree and the larger
2400 half (0x140 <= x < 0x180) in the right subtree. This is, of course,
2401 done by inspecting individual bits.
2402
2403 Using these rules, each node's left subtree contains all smaller
2404 sizes than its right subtree. However, the node at the root of each
2405 subtree has no particular ordering relationship to either. (The
2406 dividing line between the subtree sizes is based on trie relation.)
2407 If we remove the last chunk of a given size from the interior of the
2408 tree, we need to replace it with a leaf node. The tree ordering
2409 rules permit a node to be replaced by any leaf below it.
2410
2411 The smallest chunk in a tree (a common operation in a best-fit
2412 allocator) can be found by walking a path to the leftmost leaf in
2413 the tree. Unlike a usual binary tree, where we follow left child
2414 pointers until we reach a null, here we follow the right child
2415 pointer any time the left one is null, until we reach a leaf with
2416 both child pointers null. The smallest chunk in the tree will be
2417 somewhere along that path.
2418
2419 The worst case number of steps to add, find, or remove a node is
2420 bounded by the number of bits differentiating chunks within
2421 bins. Under current bin calculations, this ranges from 6 up to 21
2422 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
2423 is of course much better.
2424*/
2425
2426struct malloc_tree_chunk {
2427 /* The first four fields must be compatible with malloc_chunk */
2428 size_t prev_foot;
2429 size_t head;
2430 struct malloc_tree_chunk* fd;
2431 struct malloc_tree_chunk* bk;
2432
2433 struct malloc_tree_chunk* child[2];
2434 struct malloc_tree_chunk* parent;
2435 bindex_t index;
2436};
2437
2438typedef struct malloc_tree_chunk tchunk;
2439typedef struct malloc_tree_chunk* tchunkptr;
2440typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
2441
2442/* A little helper macro for trees */
2443#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
2444
2445/* ----------------------------- Segments -------------------------------- */
2446
2447/*
2448 Each malloc space may include non-contiguous segments, held in a
2449 list headed by an embedded malloc_segment record representing the
2450 top-most space. Segments also include flags holding properties of
2451 the space. Large chunks that are directly allocated by mmap are not
2452 included in this list. They are instead independently created and
2453 destroyed without otherwise keeping track of them.
2454
2455 Segment management mainly comes into play for spaces allocated by
2456 MMAP. Any call to MMAP might or might not return memory that is
2457 adjacent to an existing segment. MORECORE normally contiguously
2458 extends the current space, so this space is almost always adjacent,
2459 which is simpler and faster to deal with. (This is why MORECORE is
2460 used preferentially to MMAP when both are available -- see
2461 sys_alloc.) When allocating using MMAP, we don't use any of the
2462 hinting mechanisms (inconsistently) supported in various
2463 implementations of unix mmap, or distinguish reserving from
2464 committing memory. Instead, we just ask for space, and exploit
2465 contiguity when we get it. It is probably possible to do
2466 better than this on some systems, but no general scheme seems
2467 to be significantly better.
2468
2469 Management entails a simpler variant of the consolidation scheme
2470 used for chunks to reduce fragmentation -- new adjacent memory is
2471 normally prepended or appended to an existing segment. However,
2472 there are limitations compared to chunk consolidation that mostly
2473 reflect the fact that segment processing is relatively infrequent
2474 (occurring only when getting memory from system) and that we
2475 don't expect to have huge numbers of segments:
2476
2477 * Segments are not indexed, so traversal requires linear scans. (It
2478 would be possible to index these, but is not worth the extra
2479 overhead and complexity for most programs on most platforms.)
2480 * New segments are only appended to old ones when holding top-most
2481 memory; if they cannot be prepended to others, they are held in
2482 different segments.
2483
2484 Except for the top-most segment of an mstate, each segment record
2485 is kept at the tail of its segment. Segments are added by pushing
2486 segment records onto the list headed by &mstate.seg for the
2487 containing mstate.
2488
2489 Segment flags control allocation/merge/deallocation policies:
2490 * If EXTERN_BIT set, then we did not allocate this segment,
2491 and so should not try to deallocate or merge with others.
2492 (This currently holds only for the initial segment passed
2493 into create_mspace_with_base.)
2494 * If USE_MMAP_BIT set, the segment may be merged with
2495 other surrounding mmapped segments and trimmed/de-allocated
2496 using munmap.
2497 * If neither bit is set, then the segment was obtained using
2498 MORECORE so can be merged with surrounding MORECORE'd segments
2499 and deallocated/trimmed using MORECORE with negative arguments.
2500*/
2501
2502struct malloc_segment {
2503 char* base; /* base address */
2504 size_t size; /* allocated size */
2505 struct malloc_segment* next; /* ptr to next segment */
2506 flag_t sflags; /* mmap and extern flag */
2507};
2508
2509#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
2510#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2511
2512typedef struct malloc_segment msegment;
2513typedef struct malloc_segment* msegmentptr;
2514
2515/* ---------------------------- malloc_state ----------------------------- */
2516
2517/*
2518 A malloc_state holds all of the bookkeeping for a space.
2519 The main fields are:
2520
2521 Top
2522 The topmost chunk of the currently active segment. Its size is
2523 cached in topsize. The actual size of topmost space is
2524 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
2525 fenceposts and segment records if necessary when getting more
2526 space from the system. The size at which to autotrim top is
2527 cached from mparams in trim_check, except that it is disabled if
2528 an autotrim fails.
2529
2530 Designated victim (dv)
2531 This is the preferred chunk for servicing small requests that
2532 don't have exact fits. It is normally the chunk split off most
2533 recently to service another small request. Its size is cached in
2534 dvsize. The link fields of this chunk are not maintained since it
2535 is not kept in a bin.
2536
2537 SmallBins
2538 An array of bin headers for free chunks. These bins hold chunks
2539 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
2540 chunks of all the same size, spaced 8 bytes apart. To simplify
2541 use in double-linked lists, each bin header acts as a malloc_chunk
2542 pointing to the real first node, if it exists (else pointing to
2543 itself). This avoids special-casing for headers. But to avoid
2544 waste, we allocate only the fd/bk pointers of bins, and then use
2545 repositioning tricks to treat these as the fields of a chunk.
2546
2547 TreeBins
2548 Treebins are pointers to the roots of trees holding a range of
2549 sizes. There are 2 equally spaced treebins for each power of two
2550 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
2551 larger.
2552
2553 Bin maps
2554 There is one bit map for small bins ("smallmap") and one for
2555 treebins ("treemap). Each bin sets its bit when non-empty, and
2556 clears the bit when empty. Bit operations are then used to avoid
2557 bin-by-bin searching -- nearly all "search" is done without ever
2558 looking at bins that won't be selected. The bit maps
2559 conservatively use 32 bits per map word, even if on 64bit system.
2560 For a good description of some of the bit-based techniques used
2561 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2562 supplement at http://hackersdelight.org/). Many of these are
2563 intended to reduce the branchiness of paths through malloc etc, as
2564 well as to reduce the number of memory locations read or written.
2565
2566 Segments
2567 A list of segments headed by an embedded malloc_segment record
2568 representing the initial space.
2569
2570 Address check support
2571 The least_addr field is the least address ever obtained from
2572 MORECORE or MMAP. Attempted frees and reallocs of any address less
2573 than this are trapped (unless INSECURE is defined).
2574
2575 Magic tag
2576 A cross-check field that should always hold same value as mparams.magic.
2577
2578 Max allowed footprint
2579 The maximum allowed bytes to allocate from system (zero means no limit)
2580
2581 Flags
2582 Bits recording whether to use MMAP, locks, or contiguous MORECORE
2583
2584 Statistics
2585 Each space keeps track of current and maximum system memory
2586 obtained via MORECORE or MMAP.
2587
2588 Trim support
2589 Fields holding the amount of unused topmost memory that should trigger
2590 trimming, and a counter to force periodic scanning to release unused
2591 non-topmost segments.
2592
2593 Locking
2594 If USE_LOCKS is defined, the "mutex" lock is acquired and released
2595 around every public call using this mspace.
2596
2597 Extension support
2598 A void* pointer and a size_t field that can be used to help implement
2599 extensions to this malloc.
2600*/
2601
2602/* Bin types, widths and sizes */
2603#define NSMALLBINS (32U)
2604#define NTREEBINS (32U)
2605#define SMALLBIN_SHIFT (3U)
2606#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2607#define TREEBIN_SHIFT (8U)
2608#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2609#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2610#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2611
2612struct malloc_state {
2613 binmap_t smallmap;
2614 binmap_t treemap;
2615 size_t dvsize;
2616 size_t topsize;
2617 char* least_addr;
2618 mchunkptr dv;
2619 mchunkptr top;
2620 size_t trim_check;
2621 size_t release_checks;
2622 size_t magic;
2623 mchunkptr smallbins[(NSMALLBINS+1)*2];
2624 tbinptr treebins[NTREEBINS];
2625 size_t footprint;
2626 size_t max_footprint;
2627 size_t footprint_limit; /* zero means no limit */
2628 flag_t mflags;
2629#if USE_LOCKS
2630 MLOCK_T mutex; /* locate lock among fields that rarely change */
2631#endif /* USE_LOCKS */
2632 msegment seg;
2633 void* extp; /* Unused but available for extensions */
2634 size_t exts;
2635};
2636
2637typedef struct malloc_state* mstate;
2638
2639/* ------------- Global malloc_state and malloc_params ------------------- */
2640
2641/*
2642 malloc_params holds global properties, including those that can be
2643 dynamically set using mallopt. There is a single instance, mparams,
2644 initialized in init_mparams. Note that the non-zeroness of "magic"
2645 also serves as an initialization flag.
2646*/
2647
2648struct malloc_params {
2649 size_t magic;
2650 size_t page_size;
2651 size_t granularity;
2652 size_t mmap_threshold;
2653 size_t trim_threshold;
2654 flag_t default_mflags;
2655};
2656
2657static struct malloc_params mparams;
2658
2659/* Ensure mparams initialized */
2660#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
2661
2662#if !ONLY_MSPACES
2663
2664/* The global malloc_state used for all non-"mspace" calls */
2665static struct malloc_state _gm_;
2666#define gm (&_gm_)
2667#define is_global(M) ((M) == &_gm_)
2668
2669#endif /* !ONLY_MSPACES */
2670
2671#define is_initialized(M) ((M)->top != 0)
2672
2673/* -------------------------- system alloc setup ------------------------- */
2674
2675/* Operations on mflags */
2676
2677#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2678#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2679#if USE_LOCKS
2680#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2681#else
2682#define disable_lock(M)
2683#endif
2684
2685#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2686#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2687#if HAVE_MMAP
2688#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2689#else
2690#define disable_mmap(M)
2691#endif
2692
2693#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2694#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2695
2696#define set_lock(M,L)\
2697 ((M)->mflags = (L)?\
2698 ((M)->mflags | USE_LOCK_BIT) :\
2699 ((M)->mflags & ~USE_LOCK_BIT))
2700
2701/* page-align a size */
2702#define page_align(S)\
2703 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
2704
2705/* granularity-align a size */
2706#define granularity_align(S)\
2707 (((S) + (mparams.granularity - SIZE_T_ONE))\
2708 & ~(mparams.granularity - SIZE_T_ONE))
2709
2710
2711/* For mmap, use granularity alignment on windows, else page-align */
2712#ifdef WIN32
2713#define mmap_align(S) granularity_align(S)
2714#else
2715#define mmap_align(S) page_align(S)
2716#endif
2717
2718/* For sys_alloc, enough padding to ensure can malloc request on success */
2719#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
2720
2721#define is_page_aligned(S)\
2722 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2723#define is_granularity_aligned(S)\
2724 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2725
2726/* True if segment S holds address A */
2727#define segment_holds(S, A)\
2728 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2729
2730/* Return segment holding given address */
2731static msegmentptr segment_holding(mstate m, char* addr) {
2732 msegmentptr sp = &m->seg;
2733 for (;;) {
2734 if (addr >= sp->base && addr < sp->base + sp->size)
2735 return sp;
2736 if ((sp = sp->next) == 0)
2737 return 0;
2738 }
2739}
2740
2741/* Return true if segment contains a segment link */
2742static int has_segment_link(mstate m, msegmentptr ss) {
2743 msegmentptr sp = &m->seg;
2744 for (;;) {
2745 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
2746 return 1;
2747 if ((sp = sp->next) == 0)
2748 return 0;
2749 }
2750}
2751
2752#ifndef MORECORE_CANNOT_TRIM
2753#define should_trim(M,s) ((s) > (M)->trim_check)
2754#else /* MORECORE_CANNOT_TRIM */
2755#define should_trim(M,s) (0)
2756#endif /* MORECORE_CANNOT_TRIM */
2757
2758/*
2759 TOP_FOOT_SIZE is padding at the end of a segment, including space
2760 that may be needed to place segment records and fenceposts when new
2761 noncontiguous segments are added.
2762*/
2763#define TOP_FOOT_SIZE\
2764 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2765
2766
2767/* ------------------------------- Hooks -------------------------------- */
2768
2769/*
2770 PREACTION should be defined to return 0 on success, and nonzero on
2771 failure. If you are not using locking, you can redefine these to do
2772 anything you like.
2773*/
2774
2775#if USE_LOCKS
2776#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2777#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2778#else /* USE_LOCKS */
2779
2780#ifndef PREACTION
2781#define PREACTION(M) (0)
2782#endif /* PREACTION */
2783
2784#ifndef POSTACTION
2785#define POSTACTION(M)
2786#endif /* POSTACTION */
2787
2788#endif /* USE_LOCKS */
2789
2790/*
2791 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2792 USAGE_ERROR_ACTION is triggered on detected bad frees and
2793 reallocs. The argument p is an address that might have triggered the
2794 fault. It is ignored by the two predefined actions, but might be
2795 useful in custom actions that try to help diagnose errors.
2796*/
2797
2798#if PROCEED_ON_ERROR
2799
2800/* A count of the number of corruption errors causing resets */
2801int malloc_corruption_error_count;
2802
2803/* default corruption action */
2804static void reset_on_error(mstate m);
2805
2806#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2807#define USAGE_ERROR_ACTION(m, p)
2808
2809#else /* PROCEED_ON_ERROR */
2810
2811#ifndef CORRUPTION_ERROR_ACTION
2812#define CORRUPTION_ERROR_ACTION(m) ABORT
2813#endif /* CORRUPTION_ERROR_ACTION */
2814
2815#ifndef USAGE_ERROR_ACTION
2816#define USAGE_ERROR_ACTION(m,p) ABORT
2817#endif /* USAGE_ERROR_ACTION */
2818
2819#endif /* PROCEED_ON_ERROR */
2820
2821
2822/* -------------------------- Debugging setup ---------------------------- */
2823
2824#if ! DEBUG
2825
2826#define check_free_chunk(M,P)
2827#define check_inuse_chunk(M,P)
2828#define check_malloced_chunk(M,P,N)
2829#define check_mmapped_chunk(M,P)
2830#define check_malloc_state(M)
2831#define check_top_chunk(M,P)
2832
2833#else /* DEBUG */
2834#define check_free_chunk(M,P) do_check_free_chunk(M,P)
2835#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2836#define check_top_chunk(M,P) do_check_top_chunk(M,P)
2837#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2838#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2839#define check_malloc_state(M) do_check_malloc_state(M)
2840
2841static void do_check_any_chunk(mstate m, mchunkptr p);
2842static void do_check_top_chunk(mstate m, mchunkptr p);
2843static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2844static void do_check_inuse_chunk(mstate m, mchunkptr p);
2845static void do_check_free_chunk(mstate m, mchunkptr p);
2846static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
2847static void do_check_tree(mstate m, tchunkptr t);
2848static void do_check_treebin(mstate m, bindex_t i);
2849static void do_check_smallbin(mstate m, bindex_t i);
2850static void do_check_malloc_state(mstate m);
2851static int bin_find(mstate m, mchunkptr x);
2852static size_t traverse_and_check(mstate m);
2853#endif /* DEBUG */
2854
2855/* ---------------------------- Indexing Bins ---------------------------- */
2856
2857#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2858#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
2859#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2860#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2861
2862/* addressing by index. See above about smallbin repositioning */
2863#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2864#define treebin_at(M,i) (&((M)->treebins[i]))
2865
2866/* assign tree index for size S to variable I. Use x86 asm if possible */
2867#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2868#define compute_tree_index(S, I)\
2869{\
2870 unsigned int X = S >> TREEBIN_SHIFT;\
2871 if (X == 0)\
2872 I = 0;\
2873 else if (X > 0xFFFF)\
2874 I = NTREEBINS-1;\
2875 else {\
2876 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
2877 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2878 }\
2879}
2880
2881#elif defined (__INTEL_COMPILER)
2882#define compute_tree_index(S, I)\
2883{\
2884 size_t X = S >> TREEBIN_SHIFT;\
2885 if (X == 0)\
2886 I = 0;\
2887 else if (X > 0xFFFF)\
2888 I = NTREEBINS-1;\
2889 else {\
2890 unsigned int K = _bit_scan_reverse (X); \
2891 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2892 }\
2893}
2894
2895#elif defined(_MSC_VER) && _MSC_VER>=1300
2896#define compute_tree_index(S, I)\
2897{\
2898 size_t X = S >> TREEBIN_SHIFT;\
2899 if (X == 0)\
2900 I = 0;\
2901 else if (X > 0xFFFF)\
2902 I = NTREEBINS-1;\
2903 else {\
2904 unsigned int K;\
2905 _BitScanReverse((DWORD *) &K, (DWORD) X);\
2906 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2907 }\
2908}
2909
2910#else /* GNUC */
2911#define compute_tree_index(S, I)\
2912{\
2913 size_t X = S >> TREEBIN_SHIFT;\
2914 if (X == 0)\
2915 I = 0;\
2916 else if (X > 0xFFFF)\
2917 I = NTREEBINS-1;\
2918 else {\
2919 unsigned int Y = (unsigned int)X;\
2920 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2921 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2922 N += K;\
2923 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2924 K = 14 - N + ((Y <<= K) >> 15);\
2925 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2926 }\
2927}
2928#endif /* GNUC */
2929
2930/* Bit representing maximum resolved size in a treebin at i */
2931#define bit_for_tree_index(i) \
2932 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2933
2934/* Shift placing maximum resolved bit in a treebin at i as sign bit */
2935#define leftshift_for_tree_index(i) \
2936 ((i == NTREEBINS-1)? 0 : \
2937 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2938
2939/* The size of the smallest chunk held in bin with index i */
2940#define minsize_for_tree_index(i) \
2941 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2942 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2943
2944
2945/* ------------------------ Operations on bin maps ----------------------- */
2946
2947/* bit corresponding to given index */
2948#define idx2bit(i) ((binmap_t)(1) << (i))
2949
2950/* Mark/Clear bits with given index */
2951#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2952#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2953#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2954
2955#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2956#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2957#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2958
2959/* isolate the least set bit of a bitmap */
2960#define least_bit(x) ((x) & -(x))
2961
2962/* mask with all bits to left of least bit of x on */
2963#define left_bits(x) ((x<<1) | -(x<<1))
2964
2965/* mask with all bits to left of or equal to least bit of x on */
2966#define same_or_left_bits(x) ((x) | -(x))
2967
2968/* index corresponding to given bit. Use x86 asm if possible */
2969
2970#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2971#define compute_bit2idx(X, I)\
2972{\
2973 unsigned int J;\
2974 J = __builtin_ctz(X); \
2975 I = (bindex_t)J;\
2976}
2977
2978#elif defined (__INTEL_COMPILER)
2979#define compute_bit2idx(X, I)\
2980{\
2981 unsigned int J;\
2982 J = _bit_scan_forward (X); \
2983 I = (bindex_t)J;\
2984}
2985
2986#elif defined(_MSC_VER) && _MSC_VER>=1300
2987#define compute_bit2idx(X, I)\
2988{\
2989 unsigned int J;\
2990 _BitScanForward((DWORD *) &J, X);\
2991 I = (bindex_t)J;\
2992}
2993
2994#elif USE_BUILTIN_FFS
2995#define compute_bit2idx(X, I) I = ffs(X)-1
2996
2997#else
2998#define compute_bit2idx(X, I)\
2999{\
3000 unsigned int Y = X - 1;\
3001 unsigned int K = Y >> (16-4) & 16;\
3002 unsigned int N = K; Y >>= K;\
3003 N += K = Y >> (8-3) & 8; Y >>= K;\
3004 N += K = Y >> (4-2) & 4; Y >>= K;\
3005 N += K = Y >> (2-1) & 2; Y >>= K;\
3006 N += K = Y >> (1-0) & 1; Y >>= K;\
3007 I = (bindex_t)(N + Y);\
3008}
3009#endif /* GNUC */
3010
3011
3012/* ----------------------- Runtime Check Support ------------------------- */
3013
3014/*
3015 For security, the main invariant is that malloc/free/etc never
3016 writes to a static address other than malloc_state, unless static
3017 malloc_state itself has been corrupted, which cannot occur via
3018 malloc (because of these checks). In essence this means that we
3019 believe all pointers, sizes, maps etc held in malloc_state, but
3020 check all of those linked or offsetted from other embedded data
3021 structures. These checks are interspersed with main code in a way
3022 that tends to minimize their run-time cost.
3023
3024 When FOOTERS is defined, in addition to range checking, we also
3025 verify footer fields of inuse chunks, which can be used guarantee
3026 that the mstate controlling malloc/free is intact. This is a
3027 streamlined version of the approach described by William Robertson
3028 et al in "Run-time Detection of Heap-based Overflows" LISA'03
3029 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
3030 of an inuse chunk holds the xor of its mstate and a random seed,
3031 that is checked upon calls to free() and realloc(). This is
3032 (probabalistically) unguessable from outside the program, but can be
3033 computed by any code successfully malloc'ing any chunk, so does not
3034 itself provide protection against code that has already broken
3035 security through some other means. Unlike Robertson et al, we
3036 always dynamically check addresses of all offset chunks (previous,
3037 next, etc). This turns out to be cheaper than relying on hashes.
3038*/
3039
3040#if !INSECURE
3041/* Check if address a is at least as high as any from MORECORE or MMAP */
3042#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
3043/* Check if address of next chunk n is higher than base chunk p */
3044#define ok_next(p, n) ((char*)(p) < (char*)(n))
3045/* Check if p has inuse status */
3046#define ok_inuse(p) is_inuse(p)
3047/* Check if p has its pinuse bit on */
3048#define ok_pinuse(p) pinuse(p)
3049
3050#else /* !INSECURE */
3051#define ok_address(M, a) (1)
3052#define ok_next(b, n) (1)
3053#define ok_inuse(p) (1)
3054#define ok_pinuse(p) (1)
3055#endif /* !INSECURE */
3056
3057#if (FOOTERS && !INSECURE)
3058/* Check if (alleged) mstate m has expected magic field */
3059#define ok_magic(M) ((M)->magic == mparams.magic)
3060#else /* (FOOTERS && !INSECURE) */
3061#define ok_magic(M) (1)
3062#endif /* (FOOTERS && !INSECURE) */
3063
3064/* In gcc, use __builtin_expect to minimize impact of checks */
3065#if !INSECURE
3066#if defined(__GNUC__) && __GNUC__ >= 3
3067#define RTCHECK(e) __builtin_expect(e, 1)
3068#else /* GNUC */
3069#define RTCHECK(e) (e)
3070#endif /* GNUC */
3071#else /* !INSECURE */
3072#define RTCHECK(e) (1)
3073#endif /* !INSECURE */
3074
3075/* macros to set up inuse chunks with or without footers */
3076
3077#if !FOOTERS
3078
3079#define mark_inuse_foot(M,p,s)
3080
3081/* Macros for setting head/foot of non-mmapped chunks */
3082
3083/* Set cinuse bit and pinuse bit of next chunk */
3084#define set_inuse(M,p,s)\
3085 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
3086 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
3087
3088/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
3089#define set_inuse_and_pinuse(M,p,s)\
3090 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3091 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
3092
3093/* Set size, cinuse and pinuse bit of this chunk */
3094#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
3095 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
3096
3097#else /* FOOTERS */
3098
3099/* Set foot of inuse chunk to be xor of mstate and seed */
3100#define mark_inuse_foot(M,p,s)\
3101 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
3102
3103#define get_mstate_for(p)\
3104 ((mstate)(((mchunkptr)((char*)(p) +\
3105 (chunksize(p))))->prev_foot ^ mparams.magic))
3106
3107#define set_inuse(M,p,s)\
3108 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
3109 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
3110 mark_inuse_foot(M,p,s))
3111
3112#define set_inuse_and_pinuse(M,p,s)\
3113 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3114 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
3115 mark_inuse_foot(M,p,s))
3116
3117#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
3118 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3119 mark_inuse_foot(M, p, s))
3120
3121#endif /* !FOOTERS */
3122
3123/* ---------------------------- setting mparams -------------------------- */
3124
3125#if LOCK_AT_FORK
3126static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
3127static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
3128static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
3129#endif /* LOCK_AT_FORK */
3130
3131/* Initialize mparams */
3132static int init_mparams(void) {
3133#ifdef NEED_GLOBAL_LOCK_INIT
3134 if (malloc_global_mutex_status <= 0)
3135 init_malloc_global_mutex();
3136#endif
3137
3138 ACQUIRE_MALLOC_GLOBAL_LOCK();
3139 if (mparams.magic == 0) {
3140 size_t magic;
3141 size_t psize;
3142 size_t gsize;
3143
3144#ifndef WIN32
3145 psize = malloc_getpagesize;
3146 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
3147#else /* WIN32 */
3148 {
3149 SYSTEM_INFO system_info;
3150 GetSystemInfo(&system_info);
3151 psize = system_info.dwPageSize;
3152 gsize = ((DEFAULT_GRANULARITY != 0)?
3153 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
3154 }
3155#endif /* WIN32 */
3156
3157 /* Sanity-check configuration:
3158 size_t must be unsigned and as wide as pointer type.
3159 ints must be at least 4 bytes.
3160 alignment must be at least 8.
3161 Alignment, min chunk size, and page size must all be powers of 2.
3162 */
3163 if ((sizeof(size_t) != sizeof(char*)) ||
3164 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
3165 (sizeof(int) < 4) ||
3166 (MALLOC_ALIGNMENT < (size_t)8U) ||
3167 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
3168 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
3169 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
3170 ((psize & (psize-SIZE_T_ONE)) != 0))
3171 ABORT;
3172 mparams.granularity = gsize;
3173 mparams.page_size = psize;
3174 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
3175 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
3176#if MORECORE_CONTIGUOUS
3177 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
3178#else /* MORECORE_CONTIGUOUS */
3179 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
3180#endif /* MORECORE_CONTIGUOUS */
3181
3182#if !ONLY_MSPACES
3183 /* Set up lock for main malloc area */
3184 gm->mflags = mparams.default_mflags;
3185 (void)INITIAL_LOCK(&gm->mutex);
3186#endif
3187#if LOCK_AT_FORK
3188 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
3189#endif
3190
3191 {
3192#if USE_DEV_RANDOM
3193 int fd;
3194 unsigned char buf[sizeof(size_t)];
3195 /* Try to use /dev/urandom, else fall back on using time */
3196 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
3197 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
3198 magic = *((size_t *) buf);
3199 close(fd);
3200 }
3201 else
3202#endif /* USE_DEV_RANDOM */
3203#ifdef WIN32
3204 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
3205#elif defined(LACKS_TIME_H)
3206 magic = (size_t)&magic ^ (size_t)0x55555555U;
3207#else
3208 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
3209#endif
3210 magic |= (size_t)8U; /* ensure nonzero */
3211 magic &= ~(size_t)7U; /* improve chances of fault for bad values */
3212 /* Until memory modes commonly available, use volatile-write */
3213 (*(volatile size_t *)(&(mparams.magic))) = magic;
3214 }
3215 }
3216
3217 RELEASE_MALLOC_GLOBAL_LOCK();
3218 return 1;
3219}
3220
3221/* support for mallopt */
3222static int change_mparam(int param_number, int value) {
3223 size_t val;
3224 ensure_initialization();
3225 val = (value == -1)? MAX_SIZE_T : (size_t)value;
3226 switch(param_number) {
3227 case M_TRIM_THRESHOLD:
3228 mparams.trim_threshold = val;
3229 return 1;
3230 case M_GRANULARITY:
3231 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
3232 mparams.granularity = val;
3233 return 1;
3234 }
3235 else
3236 return 0;
3237 case M_MMAP_THRESHOLD:
3238 mparams.mmap_threshold = val;
3239 return 1;
3240 default:
3241 return 0;
3242 }
3243}
3244
3245#if DEBUG
3246/* ------------------------- Debugging Support --------------------------- */
3247
3248/* Check properties of any chunk, whether free, inuse, mmapped etc */
3249static void do_check_any_chunk(mstate m, mchunkptr p) {
3250 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3251 assert(ok_address(m, p));
3252}
3253
3254/* Check properties of top chunk */
3255static void do_check_top_chunk(mstate m, mchunkptr p) {
3256 msegmentptr sp = segment_holding(m, (char*)p);
3257 size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
3258 assert(sp != 0);
3259 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3260 assert(ok_address(m, p));
3261 assert(sz == m->topsize);
3262 assert(sz > 0);
3263 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
3264 assert(pinuse(p));
3265 assert(!pinuse(chunk_plus_offset(p, sz)));
3266}
3267
3268/* Check properties of (inuse) mmapped chunks */
3269static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
3270 size_t sz = chunksize(p);
3271 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
3272 assert(is_mmapped(p));
3273 assert(use_mmap(m));
3274 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
3275 assert(ok_address(m, p));
3276 assert(!is_small(sz));
3277 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
3278 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
3279 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
3280}
3281
3282/* Check properties of inuse chunks */
3283static void do_check_inuse_chunk(mstate m, mchunkptr p) {
3284 do_check_any_chunk(m, p);
3285 assert(is_inuse(p));
3286 assert(next_pinuse(p));
3287 /* If not pinuse and not mmapped, previous chunk has OK offset */
3288 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
3289 if (is_mmapped(p))
3290 do_check_mmapped_chunk(m, p);
3291}
3292
3293/* Check properties of free chunks */
3294static void do_check_free_chunk(mstate m, mchunkptr p) {
3295 size_t sz = chunksize(p);
3296 mchunkptr next = chunk_plus_offset(p, sz);
3297 do_check_any_chunk(m, p);
3298 assert(!is_inuse(p));
3299 assert(!next_pinuse(p));
3300 assert (!is_mmapped(p));
3301 if (p != m->dv && p != m->top) {
3302 if (sz >= MIN_CHUNK_SIZE) {
3303 assert((sz & CHUNK_ALIGN_MASK) == 0);
3304 assert(is_aligned(chunk2mem(p)));
3305 assert(next->prev_foot == sz);
3306 assert(pinuse(p));
3307 assert (next == m->top || is_inuse(next));
3308 assert(p->fd->bk == p);
3309 assert(p->bk->fd == p);
3310 }
3311 else /* markers are always of size SIZE_T_SIZE */
3312 assert(sz == SIZE_T_SIZE);
3313 }
3314}
3315
3316/* Check properties of malloced chunks at the point they are malloced */
3317static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
3318 if (mem != 0) {
3319 mchunkptr p = mem2chunk(mem);
3320 size_t sz = p->head & ~INUSE_BITS;
3321 do_check_inuse_chunk(m, p);
3322 assert((sz & CHUNK_ALIGN_MASK) == 0);
3323 assert(sz >= MIN_CHUNK_SIZE);
3324 assert(sz >= s);
3325 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
3326 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
3327 }
3328}
3329
3330/* Check a tree and its subtrees. */
3331static void do_check_tree(mstate m, tchunkptr t) {
3332 tchunkptr head = 0;
3333 tchunkptr u = t;
3334 bindex_t tindex = t->index;
3335 size_t tsize = chunksize(t);
3336 bindex_t idx;
3337 compute_tree_index(tsize, idx);
3338 assert(tindex == idx);
3339 assert(tsize >= MIN_LARGE_SIZE);
3340 assert(tsize >= minsize_for_tree_index(idx));
3341 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
3342
3343 do { /* traverse through chain of same-sized nodes */
3344 do_check_any_chunk(m, ((mchunkptr)u));
3345 assert(u->index == tindex);
3346 assert(chunksize(u) == tsize);
3347 assert(!is_inuse(u));
3348 assert(!next_pinuse(u));
3349 assert(u->fd->bk == u);
3350 assert(u->bk->fd == u);
3351 if (u->parent == 0) {
3352 assert(u->child[0] == 0);
3353 assert(u->child[1] == 0);
3354 }
3355 else {
3356 assert(head == 0); /* only one node on chain has parent */
3357 head = u;
3358 assert(u->parent != u);
3359 assert (u->parent->child[0] == u ||
3360 u->parent->child[1] == u ||
3361 *((tbinptr*)(u->parent)) == u);
3362 if (u->child[0] != 0) {
3363 assert(u->child[0]->parent == u);
3364 assert(u->child[0] != u);
3365 do_check_tree(m, u->child[0]);
3366 }
3367 if (u->child[1] != 0) {
3368 assert(u->child[1]->parent == u);
3369 assert(u->child[1] != u);
3370 do_check_tree(m, u->child[1]);
3371 }
3372 if (u->child[0] != 0 && u->child[1] != 0) {
3373 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
3374 }
3375 }
3376 u = u->fd;
3377 } while (u != t);
3378 assert(head != 0);
3379}
3380
3381/* Check all the chunks in a treebin. */
3382static void do_check_treebin(mstate m, bindex_t i) {
3383 tbinptr* tb = treebin_at(m, i);
3384 tchunkptr t = *tb;
3385 int empty = (m->treemap & (1U << i)) == 0;
3386 if (t == 0)
3387 assert(empty);
3388 if (!empty)
3389 do_check_tree(m, t);
3390}
3391
3392/* Check all the chunks in a smallbin. */
3393static void do_check_smallbin(mstate m, bindex_t i) {
3394 sbinptr b = smallbin_at(m, i);
3395 mchunkptr p = b->bk;
3396 unsigned int empty = (m->smallmap & (1U << i)) == 0;
3397 if (p == b)
3398 assert(empty);
3399 if (!empty) {
3400 for (; p != b; p = p->bk) {
3401 size_t size = chunksize(p);
3402 mchunkptr q;
3403 /* each chunk claims to be free */
3404 do_check_free_chunk(m, p);
3405 /* chunk belongs in bin */
3406 assert(small_index(size) == i);
3407 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
3408 /* chunk is followed by an inuse chunk */
3409 q = next_chunk(p);
3410 if (q->head != FENCEPOST_HEAD)
3411 do_check_inuse_chunk(m, q);
3412 }
3413 }
3414}
3415
3416/* Find x in a bin. Used in other check functions. */
3417static int bin_find(mstate m, mchunkptr x) {
3418 size_t size = chunksize(x);
3419 if (is_small(size)) {
3420 bindex_t sidx = small_index(size);
3421 sbinptr b = smallbin_at(m, sidx);
3422 if (smallmap_is_marked(m, sidx)) {
3423 mchunkptr p = b;
3424 do {
3425 if (p == x)
3426 return 1;
3427 } while ((p = p->fd) != b);
3428 }
3429 }
3430 else {
3431 bindex_t tidx;
3432 compute_tree_index(size, tidx);
3433 if (treemap_is_marked(m, tidx)) {
3434 tchunkptr t = *treebin_at(m, tidx);
3435 size_t sizebits = size << leftshift_for_tree_index(tidx);
3436 while (t != 0 && chunksize(t) != size) {
3437 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3438 sizebits <<= 1;
3439 }
3440 if (t != 0) {
3441 tchunkptr u = t;
3442 do {
3443 if (u == (tchunkptr)x)
3444 return 1;
3445 } while ((u = u->fd) != t);
3446 }
3447 }
3448 }
3449 return 0;
3450}
3451
3452/* Traverse each chunk and check it; return total */
3453static size_t traverse_and_check(mstate m) {
3454 size_t sum = 0;
3455 if (is_initialized(m)) {
3456 msegmentptr s = &m->seg;
3457 sum += m->topsize + TOP_FOOT_SIZE;
3458 while (s != 0) {
3459 mchunkptr q = align_as_chunk(s->base);
3460 mchunkptr lastq = 0;
3461 assert(pinuse(q));
3462 while (segment_holds(s, q) &&
3463 q != m->top && q->head != FENCEPOST_HEAD) {
3464 sum += chunksize(q);
3465 if (is_inuse(q)) {
3466 assert(!bin_find(m, q));
3467 do_check_inuse_chunk(m, q);
3468 }
3469 else {
3470 assert(q == m->dv || bin_find(m, q));
3471 assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
3472 do_check_free_chunk(m, q);
3473 }
3474 lastq = q;
3475 q = next_chunk(q);
3476 }
3477 s = s->next;
3478 }
3479 }
3480 return sum;
3481}
3482
3483
3484/* Check all properties of malloc_state. */
3485static void do_check_malloc_state(mstate m) {
3486 bindex_t i;
3487 size_t total;
3488 /* check bins */
3489 for (i = 0; i < NSMALLBINS; ++i)
3490 do_check_smallbin(m, i);
3491 for (i = 0; i < NTREEBINS; ++i)
3492 do_check_treebin(m, i);
3493
3494 if (m->dvsize != 0) { /* check dv chunk */
3495 do_check_any_chunk(m, m->dv);
3496 assert(m->dvsize == chunksize(m->dv));
3497 assert(m->dvsize >= MIN_CHUNK_SIZE);
3498 assert(bin_find(m, m->dv) == 0);
3499 }
3500
3501 if (m->top != 0) { /* check top chunk */
3502 do_check_top_chunk(m, m->top);
3503 /*assert(m->topsize == chunksize(m->top)); redundant */
3504 assert(m->topsize > 0);
3505 assert(bin_find(m, m->top) == 0);
3506 }
3507
3508 total = traverse_and_check(m);
3509 assert(total <= m->footprint);
3510 assert(m->footprint <= m->max_footprint);
3511}
3512#endif /* DEBUG */
3513
3514/* ----------------------------- statistics ------------------------------ */
3515
3516#if !NO_MALLINFO
3517static struct mallinfo internal_mallinfo(mstate m) {
3518 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3519 ensure_initialization();
3520 if (!PREACTION(m)) {
3521 check_malloc_state(m);
3522 if (is_initialized(m)) {
3523 size_t nfree = SIZE_T_ONE; /* top always free */
3524 size_t mfree = m->topsize + TOP_FOOT_SIZE;
3525 size_t sum = mfree;
3526 msegmentptr s = &m->seg;
3527 while (s != 0) {
3528 mchunkptr q = align_as_chunk(s->base);
3529 while (segment_holds(s, q) &&
3530 q != m->top && q->head != FENCEPOST_HEAD) {
3531 size_t sz = chunksize(q);
3532 sum += sz;
3533 if (!is_inuse(q)) {
3534 mfree += sz;
3535 ++nfree;
3536 }
3537 q = next_chunk(q);
3538 }
3539 s = s->next;
3540 }
3541
3542 nm.arena = sum;
3543 nm.ordblks = nfree;
3544 nm.hblkhd = m->footprint - sum;
3545 nm.usmblks = m->max_footprint;
3546 nm.uordblks = m->footprint - mfree;
3547 nm.fordblks = mfree;
3548 nm.keepcost = m->topsize;
3549 }
3550
3551 POSTACTION(m);
3552 }
3553 return nm;
3554}
3555#endif /* !NO_MALLINFO */
3556
3557#if !NO_MALLOC_STATS
3558static void internal_malloc_stats(mstate m) {
3559 ensure_initialization();
3560 if (!PREACTION(m)) {
3561 size_t maxfp = 0;
3562 size_t fp = 0;
3563 size_t used = 0;
3564 check_malloc_state(m);
3565 if (is_initialized(m)) {
3566 msegmentptr s = &m->seg;
3567 maxfp = m->max_footprint;
3568 fp = m->footprint;
3569 used = fp - (m->topsize + TOP_FOOT_SIZE);
3570
3571 while (s != 0) {
3572 mchunkptr q = align_as_chunk(s->base);
3573 while (segment_holds(s, q) &&
3574 q != m->top && q->head != FENCEPOST_HEAD) {
3575 if (!is_inuse(q))
3576 used -= chunksize(q);
3577 q = next_chunk(q);
3578 }
3579 s = s->next;
3580 }
3581 }
3582 POSTACTION(m); /* drop lock */
3583 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
3584 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
3585 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
3586 }
3587}
3588#endif /* NO_MALLOC_STATS */
3589
3590/* ----------------------- Operations on smallbins ----------------------- */
3591
3592/*
3593 Various forms of linking and unlinking are defined as macros. Even
3594 the ones for trees, which are very long but have very short typical
3595 paths. This is ugly but reduces reliance on inlining support of
3596 compilers.
3597*/
3598
3599/* Link a free chunk into a smallbin */
3600#define insert_small_chunk(M, P, S) {\
3601 bindex_t I = small_index(S);\
3602 mchunkptr B = smallbin_at(M, I);\
3603 mchunkptr F = B;\
3604 assert(S >= MIN_CHUNK_SIZE);\
3605 if (!smallmap_is_marked(M, I))\
3606 mark_smallmap(M, I);\
3607 else if (RTCHECK(ok_address(M, B->fd)))\
3608 F = B->fd;\
3609 else {\
3610 CORRUPTION_ERROR_ACTION(M);\
3611 }\
3612 B->fd = P;\
3613 F->bk = P;\
3614 P->fd = F;\
3615 P->bk = B;\
3616}
3617
3618/* Unlink a chunk from a smallbin */
3619#define unlink_small_chunk(M, P, S) {\
3620 mchunkptr F = P->fd;\
3621 mchunkptr B = P->bk;\
3622 bindex_t I = small_index(S);\
3623 assert(P != B);\
3624 assert(P != F);\
3625 assert(chunksize(P) == small_index2size(I));\
3626 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
3627 if (B == F) {\
3628 clear_smallmap(M, I);\
3629 }\
3630 else if (RTCHECK(B == smallbin_at(M,I) ||\
3631 (ok_address(M, B) && B->fd == P))) {\
3632 F->bk = B;\
3633 B->fd = F;\
3634 }\
3635 else {\
3636 CORRUPTION_ERROR_ACTION(M);\
3637 }\
3638 }\
3639 else {\
3640 CORRUPTION_ERROR_ACTION(M);\
3641 }\
3642}
3643
3644/* Unlink the first chunk from a smallbin */
3645#define unlink_first_small_chunk(M, B, P, I) {\
3646 mchunkptr F = P->fd;\
3647 assert(P != B);\
3648 assert(P != F);\
3649 assert(chunksize(P) == small_index2size(I));\
3650 if (B == F) {\
3651 clear_smallmap(M, I);\
3652 }\
3653 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
3654 F->bk = B;\
3655 B->fd = F;\
3656 }\
3657 else {\
3658 CORRUPTION_ERROR_ACTION(M);\
3659 }\
3660}
3661
3662/* Replace dv node, binning the old one */
3663/* Used only when dvsize known to be small */
3664#define replace_dv(M, P, S) {\
3665 size_t DVS = M->dvsize;\
3666 assert(is_small(DVS));\
3667 if (DVS != 0) {\
3668 mchunkptr DV = M->dv;\
3669 insert_small_chunk(M, DV, DVS);\
3670 }\
3671 M->dvsize = S;\
3672 M->dv = P;\
3673}
3674
3675/* ------------------------- Operations on trees ------------------------- */
3676
3677/* Insert chunk into tree */
3678#define insert_large_chunk(M, X, S) {\
3679 tbinptr* H;\
3680 bindex_t I;\
3681 compute_tree_index(S, I);\
3682 H = treebin_at(M, I);\
3683 X->index = I;\
3684 X->child[0] = X->child[1] = 0;\
3685 if (!treemap_is_marked(M, I)) {\
3686 mark_treemap(M, I);\
3687 *H = X;\
3688 X->parent = (tchunkptr)H;\
3689 X->fd = X->bk = X;\
3690 }\
3691 else {\
3692 tchunkptr T = *H;\
3693 size_t K = S << leftshift_for_tree_index(I);\
3694 for (;;) {\
3695 if (chunksize(T) != S) {\
3696 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3697 K <<= 1;\
3698 if (*C != 0)\
3699 T = *C;\
3700 else if (RTCHECK(ok_address(M, C))) {\
3701 *C = X;\
3702 X->parent = T;\
3703 X->fd = X->bk = X;\
3704 break;\
3705 }\
3706 else {\
3707 CORRUPTION_ERROR_ACTION(M);\
3708 break;\
3709 }\
3710 }\
3711 else {\
3712 tchunkptr F = T->fd;\
3713 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3714 T->fd = F->bk = X;\
3715 X->fd = F;\
3716 X->bk = T;\
3717 X->parent = 0;\
3718 break;\
3719 }\
3720 else {\
3721 CORRUPTION_ERROR_ACTION(M);\
3722 break;\
3723 }\
3724 }\
3725 }\
3726 }\
3727}
3728
3729/*
3730 Unlink steps:
3731
3732 1. If x is a chained node, unlink it from its same-sized fd/bk links
3733 and choose its bk node as its replacement.
3734 2. If x was the last node of its size, but not a leaf node, it must
3735 be replaced with a leaf node (not merely one with an open left or
3736 right), to make sure that lefts and rights of descendents
3737 correspond properly to bit masks. We use the rightmost descendent
3738 of x. We could use any other leaf, but this is easy to locate and
3739 tends to counteract removal of leftmosts elsewhere, and so keeps
3740 paths shorter than minimally guaranteed. This doesn't loop much
3741 because on average a node in a tree is near the bottom.
3742 3. If x is the base of a chain (i.e., has parent links) relink
3743 x's parent and children to x's replacement (or null if none).
3744*/
3745
3746#define unlink_large_chunk(M, X) {\
3747 tchunkptr XP = X->parent;\
3748 tchunkptr R;\
3749 if (X->bk != X) {\
3750 tchunkptr F = X->fd;\
3751 R = X->bk;\
3752 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
3753 F->bk = R;\
3754 R->fd = F;\
3755 }\
3756 else {\
3757 CORRUPTION_ERROR_ACTION(M);\
3758 }\
3759 }\
3760 else {\
3761 tchunkptr* RP;\
3762 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3763 ((R = *(RP = &(X->child[0]))) != 0)) {\
3764 tchunkptr* CP;\
3765 while ((*(CP = &(R->child[1])) != 0) ||\
3766 (*(CP = &(R->child[0])) != 0)) {\
3767 R = *(RP = CP);\
3768 }\
3769 if (RTCHECK(ok_address(M, RP)))\
3770 *RP = 0;\
3771 else {\
3772 CORRUPTION_ERROR_ACTION(M);\
3773 }\
3774 }\
3775 }\
3776 if (XP != 0) {\
3777 tbinptr* H = treebin_at(M, X->index);\
3778 if (X == *H) {\
3779 if ((*H = R) == 0) \
3780 clear_treemap(M, X->index);\
3781 }\
3782 else if (RTCHECK(ok_address(M, XP))) {\
3783 if (XP->child[0] == X) \
3784 XP->child[0] = R;\
3785 else \
3786 XP->child[1] = R;\
3787 }\
3788 else\
3789 CORRUPTION_ERROR_ACTION(M);\
3790 if (R != 0) {\
3791 if (RTCHECK(ok_address(M, R))) {\
3792 tchunkptr C0, C1;\
3793 R->parent = XP;\
3794 if ((C0 = X->child[0]) != 0) {\
3795 if (RTCHECK(ok_address(M, C0))) {\
3796 R->child[0] = C0;\
3797 C0->parent = R;\
3798 }\
3799 else\
3800 CORRUPTION_ERROR_ACTION(M);\
3801 }\
3802 if ((C1 = X->child[1]) != 0) {\
3803 if (RTCHECK(ok_address(M, C1))) {\
3804 R->child[1] = C1;\
3805 C1->parent = R;\
3806 }\
3807 else\
3808 CORRUPTION_ERROR_ACTION(M);\
3809 }\
3810 }\
3811 else\
3812 CORRUPTION_ERROR_ACTION(M);\
3813 }\
3814 }\
3815}
3816
3817/* Relays to large vs small bin operations */
3818
3819#define insert_chunk(M, P, S)\
3820 if (is_small(S)) insert_small_chunk(M, P, S)\
3821 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3822
3823#define unlink_chunk(M, P, S)\
3824 if (is_small(S)) unlink_small_chunk(M, P, S)\
3825 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3826
3827
3828/* Relays to internal calls to malloc/free from realloc, memalign etc */
3829
3830#if ONLY_MSPACES
3831#define internal_malloc(m, b) mspace_malloc(m, b)
3832#define internal_free(m, mem) mspace_free(m,mem);
3833#else /* ONLY_MSPACES */
3834#if MSPACES
3835#define internal_malloc(m, b)\
3836 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
3837#define internal_free(m, mem)\
3838 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3839#else /* MSPACES */
3840#define internal_malloc(m, b) dlmalloc(b)
3841#define internal_free(m, mem) dlfree(mem)
3842#endif /* MSPACES */
3843#endif /* ONLY_MSPACES */
3844
3845/* ----------------------- Direct-mmapping chunks ----------------------- */
3846
3847/*
3848 Directly mmapped chunks are set up with an offset to the start of
3849 the mmapped region stored in the prev_foot field of the chunk. This
3850 allows reconstruction of the required argument to MUNMAP when freed,
3851 and also allows adjustment of the returned chunk to meet alignment
3852 requirements (especially in memalign).
3853*/
3854
3855/* Malloc using mmap */
3856static void* mmap_alloc(mstate m, size_t nb) {
3857 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3858 if (m->footprint_limit != 0) {
3859 size_t fp = m->footprint + mmsize;
3860 if (fp <= m->footprint || fp > m->footprint_limit)
3861 return 0;
3862 }
3863 if (mmsize > nb) { /* Check for wrap around 0 */
3864 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
3865 if (mm != CMFAIL) {
3866 size_t offset = align_offset(chunk2mem(mm));
3867 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3868 mchunkptr p = (mchunkptr)(mm + offset);
3869 p->prev_foot = offset;
3870 p->head = psize;
3871 mark_inuse_foot(m, p, psize);
3872 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3873 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3874
3875 if (m->least_addr == 0 || mm < m->least_addr)
3876 m->least_addr = mm;
3877 if ((m->footprint += mmsize) > m->max_footprint)
3878 m->max_footprint = m->footprint;
3879 assert(is_aligned(chunk2mem(p)));
3880 check_mmapped_chunk(m, p);
3881 return chunk2mem(p);
3882 }
3883 }
3884 return 0;
3885}
3886
3887/* Realloc using mmap */
3888static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
3889 size_t oldsize = chunksize(oldp);
3890 (void)flags; /* placate people compiling -Wunused */
3891 if (is_small(nb)) /* Can't shrink mmap regions below small size */
3892 return 0;
3893 /* Keep old chunk if big enough but not too big */
3894 if (oldsize >= nb + SIZE_T_SIZE &&
3895 (oldsize - nb) <= (mparams.granularity << 1))
3896 return oldp;
3897 else {
3898 size_t offset = oldp->prev_foot;
3899 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3900 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3901 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
3902 oldmmsize, newmmsize, flags);
3903 if (cp != CMFAIL) {
3904 mchunkptr newp = (mchunkptr)(cp + offset);
3905 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3906 newp->head = psize;
3907 mark_inuse_foot(m, newp, psize);
3908 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3909 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
3910
3911 if (cp < m->least_addr)
3912 m->least_addr = cp;
3913 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3914 m->max_footprint = m->footprint;
3915 check_mmapped_chunk(m, newp);
3916 return newp;
3917 }
3918 }
3919 return 0;
3920}
3921
3922
3923/* -------------------------- mspace management -------------------------- */
3924
3925/* Initialize top chunk and its size */
3926static void init_top(mstate m, mchunkptr p, size_t psize) {
3927 /* Ensure alignment */
3928 size_t offset = align_offset(chunk2mem(p));
3929 p = (mchunkptr)((char*)p + offset);
3930 psize -= offset;
3931
3932 m->top = p;
3933 m->topsize = psize;
3934 p->head = psize | PINUSE_BIT;
3935 /* set size of fake trailing chunk holding overhead space only once */
3936 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3937 m->trim_check = mparams.trim_threshold; /* reset on each update */
3938}
3939
3940/* Initialize bins for a new mstate that is otherwise zeroed out */
3941static void init_bins(mstate m) {
3942 /* Establish circular links for smallbins */
3943 bindex_t i;
3944 for (i = 0; i < NSMALLBINS; ++i) {
3945 sbinptr bin = smallbin_at(m,i);
3946 bin->fd = bin->bk = bin;
3947 }
3948}
3949
3950#if PROCEED_ON_ERROR
3951
3952/* default corruption action */
3953static void reset_on_error(mstate m) {
3954 int i;
3955 ++malloc_corruption_error_count;
3956 /* Reinitialize fields to forget about all memory */
3957 m->smallmap = m->treemap = 0;
3958 m->dvsize = m->topsize = 0;
3959 m->seg.base = 0;
3960 m->seg.size = 0;
3961 m->seg.next = 0;
3962 m->top = m->dv = 0;
3963 for (i = 0; i < NTREEBINS; ++i)
3964 *treebin_at(m, i) = 0;
3965 init_bins(m);
3966}
3967#endif /* PROCEED_ON_ERROR */
3968
3969/* Allocate chunk and prepend remainder with chunk in successor base. */
3970static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
3971 size_t nb) {
3972 mchunkptr p = align_as_chunk(newbase);
3973 mchunkptr oldfirst = align_as_chunk(oldbase);
3974 size_t psize = (char*)oldfirst - (char*)p;
3975 mchunkptr q = chunk_plus_offset(p, nb);
3976 size_t qsize = psize - nb;
3977 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3978
3979 assert((char*)oldfirst > (char*)q);
3980 assert(pinuse(oldfirst));
3981 assert(qsize >= MIN_CHUNK_SIZE);
3982
3983 /* consolidate remainder with first chunk of old base */
3984 if (oldfirst == m->top) {
3985 size_t tsize = m->topsize += qsize;
3986 m->top = q;
3987 q->head = tsize | PINUSE_BIT;
3988 check_top_chunk(m, q);
3989 }
3990 else if (oldfirst == m->dv) {
3991 size_t dsize = m->dvsize += qsize;
3992 m->dv = q;
3993 set_size_and_pinuse_of_free_chunk(q, dsize);
3994 }
3995 else {
3996 if (!is_inuse(oldfirst)) {
3997 size_t nsize = chunksize(oldfirst);
3998 unlink_chunk(m, oldfirst, nsize);
3999 oldfirst = chunk_plus_offset(oldfirst, nsize);
4000 qsize += nsize;
4001 }
4002 set_free_with_pinuse(q, qsize, oldfirst);
4003 insert_chunk(m, q, qsize);
4004 check_free_chunk(m, q);
4005 }
4006
4007 check_malloced_chunk(m, chunk2mem(p), nb);
4008 return chunk2mem(p);
4009}
4010
4011/* Add a segment to hold a new noncontiguous region */
4012static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
4013 /* Determine locations and sizes of segment, fenceposts, old top */
4014 char* old_top = (char*)m->top;
4015 msegmentptr oldsp = segment_holding(m, old_top);
4016 char* old_end = oldsp->base + oldsp->size;
4017 size_t ssize = pad_request(sizeof(struct malloc_segment));
4018 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
4019 size_t offset = align_offset(chunk2mem(rawsp));
4020 char* asp = rawsp + offset;
4021 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
4022 mchunkptr sp = (mchunkptr)csp;
4023 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
4024 mchunkptr tnext = chunk_plus_offset(sp, ssize);
4025 mchunkptr p = tnext;
4026 int nfences = 0;
4027
4028 /* reset top to new space */
4029 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
4030
4031 /* Set up segment record */
4032 assert(is_aligned(ss));
4033 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
4034 *ss = m->seg; /* Push current record */
4035 m->seg.base = tbase;
4036 m->seg.size = tsize;
4037 m->seg.sflags = mmapped;
4038 m->seg.next = ss;
4039
4040 /* Insert trailing fenceposts */
4041 for (;;) {
4042 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
4043 p->head = FENCEPOST_HEAD;
4044 ++nfences;
4045 if ((char*)(&(nextp->head)) < old_end)
4046 p = nextp;
4047 else
4048 break;
4049 }
4050 (void)nfences;
4051 assert(nfences >= 2);
4052
4053 /* Insert the rest of old top into a bin as an ordinary free chunk */
4054 if (csp != old_top) {
4055 mchunkptr q = (mchunkptr)old_top;
4056 size_t psize = csp - old_top;
4057 mchunkptr tn = chunk_plus_offset(q, psize);
4058 set_free_with_pinuse(q, psize, tn);
4059 insert_chunk(m, q, psize);
4060 }
4061
4062 check_top_chunk(m, m->top);
4063}
4064
4065/* -------------------------- System allocation -------------------------- */
4066
4067/* Get memory from system using MORECORE or MMAP */
4068static void* sys_alloc(mstate m, size_t nb) {
4069 char* tbase = CMFAIL;
4070 size_t tsize = 0;
4071 flag_t mmap_flag = 0;
4072 size_t asize; /* allocation size */
4073
4074 ensure_initialization();
4075
4076 /* Directly map large chunks, but only if already initialized */
4077 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
4078 void* mem = mmap_alloc(m, nb);
4079 if (mem != 0)
4080 return mem;
4081 }
4082
4083 asize = granularity_align(nb + SYS_ALLOC_PADDING);
4084 if (asize <= nb)
4085 return 0; /* wraparound */
4086 if (m->footprint_limit != 0) {
4087 size_t fp = m->footprint + asize;
4088 if (fp <= m->footprint || fp > m->footprint_limit)
4089 return 0;
4090 }
4091
4092 /*
4093 Try getting memory in any of three ways (in most-preferred to
4094 least-preferred order):
4095 1. A call to MORECORE that can normally contiguously extend memory.
4096 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
4097 or main space is mmapped or a previous contiguous call failed)
4098 2. A call to MMAP new space (disabled if not HAVE_MMAP).
4099 Note that under the default settings, if MORECORE is unable to
4100 fulfill a request, and HAVE_MMAP is true, then mmap is
4101 used as a noncontiguous system allocator. This is a useful backup
4102 strategy for systems with holes in address spaces -- in this case
4103 sbrk cannot contiguously expand the heap, but mmap may be able to
4104 find space.
4105 3. A call to MORECORE that cannot usually contiguously extend memory.
4106 (disabled if not HAVE_MORECORE)
4107
4108 In all cases, we need to request enough bytes from system to ensure
4109 we can malloc nb bytes upon success, so pad with enough space for
4110 top_foot, plus alignment-pad to make sure we don't lose bytes if
4111 not on boundary, and round this up to a granularity unit.
4112 */
4113
4114 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
4115 char* br = CMFAIL;
4116 size_t ssize = asize; /* sbrk call size */
4117 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
4118 ACQUIRE_MALLOC_GLOBAL_LOCK();
4119
4120 if (ss == 0) { /* First time through or recovery */
4121 char* base = (char*)CALL_MORECORE(0);
4122 if (base != CMFAIL) {
4123 size_t fp;
4124 /* Adjust to end on a page boundary */
4125 if (!is_page_aligned(base))
4126 ssize += (page_align((size_t)base) - (size_t)base);
4127 fp = m->footprint + ssize; /* recheck limits */
4128 if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
4129 (m->footprint_limit == 0 ||
4130 (fp > m->footprint && fp <= m->footprint_limit)) &&
4131 (br = (char*)(CALL_MORECORE(ssize))) == base) {
4132 tbase = base;
4133 tsize = ssize;
4134 }
4135 }
4136 }
4137 else {
4138 /* Subtract out existing available top space from MORECORE request. */
4139 ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
4140 /* Use mem here only if it did continuously extend old space */
4141 if (ssize < HALF_MAX_SIZE_T &&
4142 (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
4143 tbase = br;
4144 tsize = ssize;
4145 }
4146 }
4147
4148 if (tbase == CMFAIL) { /* Cope with partial failure */
4149 if (br != CMFAIL) { /* Try to use/extend the space we did get */
4150 if (ssize < HALF_MAX_SIZE_T &&
4151 ssize < nb + SYS_ALLOC_PADDING) {
4152 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
4153 if (esize < HALF_MAX_SIZE_T) {
4154 char* end = (char*)CALL_MORECORE(esize);
4155 if (end != CMFAIL)
4156 ssize += esize;
4157 else { /* Can't use; try to release */
4158 (void) CALL_MORECORE(-ssize);
4159 br = CMFAIL;
4160 }
4161 }
4162 }
4163 }
4164 if (br != CMFAIL) { /* Use the space we did get */
4165 tbase = br;
4166 tsize = ssize;
4167 }
4168 else
4169 disable_contiguous(m); /* Don't try contiguous path in the future */
4170 }
4171
4172 RELEASE_MALLOC_GLOBAL_LOCK();
4173 }
4174
4175 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
4176 char* mp = (char*)(CALL_MMAP(asize));
4177 if (mp != CMFAIL) {
4178 tbase = mp;
4179 tsize = asize;
4180 mmap_flag = USE_MMAP_BIT;
4181 }
4182 }
4183
4184 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
4185 if (asize < HALF_MAX_SIZE_T) {
4186 char* br = CMFAIL;
4187 char* end = CMFAIL;
4188 ACQUIRE_MALLOC_GLOBAL_LOCK();
4189 br = (char*)(CALL_MORECORE(asize));
4190 end = (char*)(CALL_MORECORE(0));
4191 RELEASE_MALLOC_GLOBAL_LOCK();
4192 if (br != CMFAIL && end != CMFAIL && br < end) {
4193 size_t ssize = end - br;
4194 if (ssize > nb + TOP_FOOT_SIZE) {
4195 tbase = br;
4196 tsize = ssize;
4197 }
4198 }
4199 }
4200 }
4201
4202 if (tbase != CMFAIL) {
4203
4204 if ((m->footprint += tsize) > m->max_footprint)
4205 m->max_footprint = m->footprint;
4206
4207 if (!is_initialized(m)) { /* first-time initialization */
4208 if (m->least_addr == 0 || tbase < m->least_addr)
4209 m->least_addr = tbase;
4210 m->seg.base = tbase;
4211 m->seg.size = tsize;
4212 m->seg.sflags = mmap_flag;
4213 m->magic = mparams.magic;
4214 m->release_checks = MAX_RELEASE_CHECK_RATE;
4215 init_bins(m);
4216#if !ONLY_MSPACES
4217 if (is_global(m))
4218 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
4219 else
4220#endif
4221 {
4222 /* Offset top by embedded malloc_state */
4223 mchunkptr mn = next_chunk(mem2chunk(m));
4224 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
4225 }
4226 }
4227
4228 else {
4229 /* Try to merge with an existing segment */
4230 msegmentptr sp = &m->seg;
4231 /* Only consider most recent segment if traversal suppressed */
4232 while (sp != 0 && tbase != sp->base + sp->size)
4233 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
4234 if (sp != 0 &&
4235 !is_extern_segment(sp) &&
4236 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
4237 segment_holds(sp, m->top)) { /* append */
4238 sp->size += tsize;
4239 init_top(m, m->top, m->topsize + tsize);
4240 }
4241 else {
4242 if (tbase < m->least_addr)
4243 m->least_addr = tbase;
4244 sp = &m->seg;
4245 while (sp != 0 && sp->base != tbase + tsize)
4246 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
4247 if (sp != 0 &&
4248 !is_extern_segment(sp) &&
4249 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
4250 char* oldbase = sp->base;
4251 sp->base = tbase;
4252 sp->size += tsize;
4253 return prepend_alloc(m, tbase, oldbase, nb);
4254 }
4255 else
4256 add_segment(m, tbase, tsize, mmap_flag);
4257 }
4258 }
4259
4260 if (nb < m->topsize) { /* Allocate from new or extended top space */
4261 size_t rsize = m->topsize -= nb;
4262 mchunkptr p = m->top;
4263 mchunkptr r = m->top = chunk_plus_offset(p, nb);
4264 r->head = rsize | PINUSE_BIT;
4265 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
4266 check_top_chunk(m, m->top);
4267 check_malloced_chunk(m, chunk2mem(p), nb);
4268 return chunk2mem(p);
4269 }
4270 }
4271
4272 MALLOC_FAILURE_ACTION;
4273 return 0;
4274}
4275
4276/* ----------------------- system deallocation -------------------------- */
4277
4278/* Unmap and unlink any mmapped segments that don't contain used chunks */
4279static size_t release_unused_segments(mstate m) {
4280 size_t released = 0;
4281 int nsegs = 0;
4282 msegmentptr pred = &m->seg;
4283 msegmentptr sp = pred->next;
4284 while (sp != 0) {
4285 char* base = sp->base;
4286 size_t size = sp->size;
4287 msegmentptr next = sp->next;
4288 ++nsegs;
4289 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
4290 mchunkptr p = align_as_chunk(base);
4291 size_t psize = chunksize(p);
4292 /* Can unmap if first chunk holds entire segment and not pinned */
4293 if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
4294 tchunkptr tp = (tchunkptr)p;
4295 assert(segment_holds(sp, (char*)sp));
4296 if (p == m->dv) {
4297 m->dv = 0;
4298 m->dvsize = 0;
4299 }
4300 else {
4301 unlink_large_chunk(m, tp);
4302 }
4303 if (CALL_MUNMAP(base, size) == 0) {
4304 released += size;
4305 m->footprint -= size;
4306 /* unlink obsoleted record */
4307 sp = pred;
4308 sp->next = next;
4309 }
4310 else { /* back out if cannot unmap */
4311 insert_large_chunk(m, tp, psize);
4312 }
4313 }
4314 }
4315 if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
4316 break;
4317 pred = sp;
4318 sp = next;
4319 }
4320 /* Reset check counter */
4321 m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
4322 (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
4323 return released;
4324}
4325
4326static int sys_trim(mstate m, size_t pad) {
4327 size_t released = 0;
4328 ensure_initialization();
4329 if (pad < MAX_REQUEST && is_initialized(m)) {
4330 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
4331
4332 if (m->topsize > pad) {
4333 /* Shrink top space in granularity-size units, keeping at least one */
4334 size_t unit = mparams.granularity;
4335 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
4336 SIZE_T_ONE) * unit;
4337 msegmentptr sp = segment_holding(m, (char*)m->top);
4338
4339 if (!is_extern_segment(sp)) {
4340 if (is_mmapped_segment(sp)) {
4341 if (HAVE_MMAP &&
4342 sp->size >= extra &&
4343 !has_segment_link(m, sp)) { /* can't shrink if pinned */
4344 size_t newsize = sp->size - extra;
4345 (void)newsize; /* placate people compiling -Wunused-variable */
4346 /* Prefer mremap, fall back to munmap */
4347 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
4348 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
4349 released = extra;
4350 }
4351 }
4352 }
4353 else if (HAVE_MORECORE) {
4354 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
4355 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
4356 ACQUIRE_MALLOC_GLOBAL_LOCK();
4357 {
4358 /* Make sure end of memory is where we last set it. */
4359 char* old_br = (char*)(CALL_MORECORE(0));
4360 if (old_br == sp->base + sp->size) {
4361 char* rel_br = (char*)(CALL_MORECORE(-extra));
4362 char* new_br = (char*)(CALL_MORECORE(0));
4363 if (rel_br != CMFAIL && new_br < old_br)
4364 released = old_br - new_br;
4365 }
4366 }
4367 RELEASE_MALLOC_GLOBAL_LOCK();
4368 }
4369 }
4370
4371 if (released != 0) {
4372 sp->size -= released;
4373 m->footprint -= released;
4374 init_top(m, m->top, m->topsize - released);
4375 check_top_chunk(m, m->top);
4376 }
4377 }
4378
4379 /* Unmap any unused mmapped segments */
4380 if (HAVE_MMAP)
4381 released += release_unused_segments(m);
4382
4383 /* On failure, disable autotrim to avoid repeated failed future calls */
4384 if (released == 0 && m->topsize > m->trim_check)
4385 m->trim_check = MAX_SIZE_T;
4386 }
4387
4388 return (released != 0)? 1 : 0;
4389}
4390
4391/* Consolidate and bin a chunk. Differs from exported versions
4392 of free mainly in that the chunk need not be marked as inuse.
4393*/
4394static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
4395 mchunkptr next = chunk_plus_offset(p, psize);
4396 if (!pinuse(p)) {
4397 mchunkptr prev;
4398 size_t prevsize = p->prev_foot;
4399 if (is_mmapped(p)) {
4400 psize += prevsize + MMAP_FOOT_PAD;
4401 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4402 m->footprint -= psize;
4403 return;
4404 }
4405 prev = chunk_minus_offset(p, prevsize);
4406 psize += prevsize;
4407 p = prev;
4408 if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
4409 if (p != m->dv) {
4410 unlink_chunk(m, p, prevsize);
4411 }
4412 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4413 m->dvsize = psize;
4414 set_free_with_pinuse(p, psize, next);
4415 return;
4416 }
4417 }
4418 else {
4419 CORRUPTION_ERROR_ACTION(m);
4420 return;
4421 }
4422 }
4423 if (RTCHECK(ok_address(m, next))) {
4424 if (!cinuse(next)) { /* consolidate forward */
4425 if (next == m->top) {
4426 size_t tsize = m->topsize += psize;
4427 m->top = p;
4428 p->head = tsize | PINUSE_BIT;
4429 if (p == m->dv) {
4430 m->dv = 0;
4431 m->dvsize = 0;
4432 }
4433 return;
4434 }
4435 else if (next == m->dv) {
4436 size_t dsize = m->dvsize += psize;
4437 m->dv = p;
4438 set_size_and_pinuse_of_free_chunk(p, dsize);
4439 return;
4440 }
4441 else {
4442 size_t nsize = chunksize(next);
4443 psize += nsize;
4444 unlink_chunk(m, next, nsize);
4445 set_size_and_pinuse_of_free_chunk(p, psize);
4446 if (p == m->dv) {
4447 m->dvsize = psize;
4448 return;
4449 }
4450 }
4451 }
4452 else {
4453 set_free_with_pinuse(p, psize, next);
4454 }
4455 insert_chunk(m, p, psize);
4456 }
4457 else {
4458 CORRUPTION_ERROR_ACTION(m);
4459 }
4460}
4461
4462/* ---------------------------- malloc --------------------------- */
4463
4464/* allocate a large request from the best fitting chunk in a treebin */
4465static void* tmalloc_large(mstate m, size_t nb) {
4466 tchunkptr v = 0;
4467 size_t rsize = -nb; /* Unsigned negation */
4468 tchunkptr t;
4469 bindex_t idx;
4470 compute_tree_index(nb, idx);
4471 if ((t = *treebin_at(m, idx)) != 0) {
4472 /* Traverse tree for this bin looking for node with size == nb */
4473 size_t sizebits = nb << leftshift_for_tree_index(idx);
4474 tchunkptr rst = 0; /* The deepest untaken right subtree */
4475 for (;;) {
4476 tchunkptr rt;
4477 size_t trem = chunksize(t) - nb;
4478 if (trem < rsize) {
4479 v = t;
4480 if ((rsize = trem) == 0)
4481 break;
4482 }
4483 rt = t->child[1];
4484 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
4485 if (rt != 0 && rt != t)
4486 rst = rt;
4487 if (t == 0) {
4488 t = rst; /* set t to least subtree holding sizes > nb */
4489 break;
4490 }
4491 sizebits <<= 1;
4492 }
4493 }
4494 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
4495 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
4496 if (leftbits != 0) {
4497 bindex_t i;
4498 binmap_t leastbit = least_bit(leftbits);
4499 compute_bit2idx(leastbit, i);
4500 t = *treebin_at(m, i);
4501 }
4502 }
4503
4504 while (t != 0) { /* find smallest of tree or subtree */
4505 size_t trem = chunksize(t) - nb;
4506 if (trem < rsize) {
4507 rsize = trem;
4508 v = t;
4509 }
4510 t = leftmost_child(t);
4511 }
4512
4513 /* If dv is a better fit, return 0 so malloc will use it */
4514 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
4515 if (RTCHECK(ok_address(m, v))) { /* split */
4516 mchunkptr r = chunk_plus_offset(v, nb);
4517 assert(chunksize(v) == rsize + nb);
4518 if (RTCHECK(ok_next(v, r))) {
4519 unlink_large_chunk(m, v);
4520 if (rsize < MIN_CHUNK_SIZE)
4521 set_inuse_and_pinuse(m, v, (rsize + nb));
4522 else {
4523 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4524 set_size_and_pinuse_of_free_chunk(r, rsize);
4525 insert_chunk(m, r, rsize);
4526 }
4527 return chunk2mem(v);
4528 }
4529 }
4530 CORRUPTION_ERROR_ACTION(m);
4531 }
4532 return 0;
4533}
4534
4535/* allocate a small request from the best fitting chunk in a treebin */
4536static void* tmalloc_small(mstate m, size_t nb) {
4537 tchunkptr t, v;
4538 size_t rsize;
4539 bindex_t i;
4540 binmap_t leastbit = least_bit(m->treemap);
4541 compute_bit2idx(leastbit, i);
4542 v = t = *treebin_at(m, i);
4543 rsize = chunksize(t) - nb;
4544
4545 while ((t = leftmost_child(t)) != 0) {
4546 size_t trem = chunksize(t) - nb;
4547 if (trem < rsize) {
4548 rsize = trem;
4549 v = t;
4550 }
4551 }
4552
4553 if (RTCHECK(ok_address(m, v))) {
4554 mchunkptr r = chunk_plus_offset(v, nb);
4555 assert(chunksize(v) == rsize + nb);
4556 if (RTCHECK(ok_next(v, r))) {
4557 unlink_large_chunk(m, v);
4558 if (rsize < MIN_CHUNK_SIZE)
4559 set_inuse_and_pinuse(m, v, (rsize + nb));
4560 else {
4561 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4562 set_size_and_pinuse_of_free_chunk(r, rsize);
4563 replace_dv(m, r, rsize);
4564 }
4565 return chunk2mem(v);
4566 }
4567 }
4568
4569 CORRUPTION_ERROR_ACTION(m);
4570 return 0;
4571}
4572
4573#if !ONLY_MSPACES
4574
4575void* dlmalloc(size_t bytes) {
4576 /*
4577 Basic algorithm:
4578 If a small request (< 256 bytes minus per-chunk overhead):
4579 1. If one exists, use a remainderless chunk in associated smallbin.
4580 (Remainderless means that there are too few excess bytes to
4581 represent as a chunk.)
4582 2. If it is big enough, use the dv chunk, which is normally the
4583 chunk adjacent to the one used for the most recent small request.
4584 3. If one exists, split the smallest available chunk in a bin,
4585 saving remainder in dv.
4586 4. If it is big enough, use the top chunk.
4587 5. If available, get memory from system and use it
4588 Otherwise, for a large request:
4589 1. Find the smallest available binned chunk that fits, and use it
4590 if it is better fitting than dv chunk, splitting if necessary.
4591 2. If better fitting than any binned chunk, use the dv chunk.
4592 3. If it is big enough, use the top chunk.
4593 4. If request size >= mmap threshold, try to directly mmap this chunk.
4594 5. If available, get memory from system and use it
4595
4596 The ugly goto's here ensure that postaction occurs along all paths.
4597 */
4598
4599#if USE_LOCKS
4600 ensure_initialization(); /* initialize in sys_alloc if not using locks */
4601#endif
4602
4603 if (!PREACTION(gm)) {
4604 void* mem;
4605 size_t nb;
4606 if (bytes <= MAX_SMALL_REQUEST) {
4607 bindex_t idx;
4608 binmap_t smallbits;
4609 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4610 idx = small_index(nb);
4611 smallbits = gm->smallmap >> idx;
4612
4613 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4614 mchunkptr b, p;
4615 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4616 b = smallbin_at(gm, idx);
4617 p = b->fd;
4618 assert(chunksize(p) == small_index2size(idx));
4619 unlink_first_small_chunk(gm, b, p, idx);
4620 set_inuse_and_pinuse(gm, p, small_index2size(idx));
4621 mem = chunk2mem(p);
4622 check_malloced_chunk(gm, mem, nb);
4623 goto postaction;
4624 }
4625
4626 else if (nb > gm->dvsize) {
4627 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4628 mchunkptr b, p, r;
4629 size_t rsize;
4630 bindex_t i;
4631 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4632 binmap_t leastbit = least_bit(leftbits);
4633 compute_bit2idx(leastbit, i);
4634 b = smallbin_at(gm, i);
4635 p = b->fd;
4636 assert(chunksize(p) == small_index2size(i));
4637 unlink_first_small_chunk(gm, b, p, i);
4638 rsize = small_index2size(i) - nb;
4639 /* Fit here cannot be remainderless if 4byte sizes */
4640 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4641 set_inuse_and_pinuse(gm, p, small_index2size(i));
4642 else {
4643 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4644 r = chunk_plus_offset(p, nb);
4645 set_size_and_pinuse_of_free_chunk(r, rsize);
4646 replace_dv(gm, r, rsize);
4647 }
4648 mem = chunk2mem(p);
4649 check_malloced_chunk(gm, mem, nb);
4650 goto postaction;
4651 }
4652
4653 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
4654 check_malloced_chunk(gm, mem, nb);
4655 goto postaction;
4656 }
4657 }
4658 }
4659 else if (bytes >= MAX_REQUEST)
4660 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4661 else {
4662 nb = pad_request(bytes);
4663 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4664 check_malloced_chunk(gm, mem, nb);
4665 goto postaction;
4666 }
4667 }
4668
4669 if (nb <= gm->dvsize) {
4670 size_t rsize = gm->dvsize - nb;
4671 mchunkptr p = gm->dv;
4672 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4673 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4674 gm->dvsize = rsize;
4675 set_size_and_pinuse_of_free_chunk(r, rsize);
4676 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4677 }
4678 else { /* exhaust dv */
4679 size_t dvs = gm->dvsize;
4680 gm->dvsize = 0;
4681 gm->dv = 0;
4682 set_inuse_and_pinuse(gm, p, dvs);
4683 }
4684 mem = chunk2mem(p);
4685 check_malloced_chunk(gm, mem, nb);
4686 goto postaction;
4687 }
4688
4689 else if (nb < gm->topsize) { /* Split top */
4690 size_t rsize = gm->topsize -= nb;
4691 mchunkptr p = gm->top;
4692 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4693 r->head = rsize | PINUSE_BIT;
4694 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4695 mem = chunk2mem(p);
4696 check_top_chunk(gm, gm->top);
4697 check_malloced_chunk(gm, mem, nb);
4698 goto postaction;
4699 }
4700
4701 mem = sys_alloc(gm, nb);
4702
4703 postaction:
4704 POSTACTION(gm);
4705 return mem;
4706 }
4707
4708 return 0;
4709}
4710
4711/* ---------------------------- free --------------------------- */
4712
4713void dlfree(void* mem) {
4714 /*
4715 Consolidate freed chunks with preceeding or succeeding bordering
4716 free chunks, if they exist, and then place in a bin. Intermixed
4717 with special cases for top, dv, mmapped chunks, and usage errors.
4718 */
4719
4720 if (mem != 0) {
4721 mchunkptr p = mem2chunk(mem);
4722#if FOOTERS
4723 mstate fm = get_mstate_for(p);
4724 if (!ok_magic(fm)) {
4725 USAGE_ERROR_ACTION(fm, p);
4726 return;
4727 }
4728#else /* FOOTERS */
4729#define fm gm
4730#endif /* FOOTERS */
4731 if (!PREACTION(fm)) {
4732 check_inuse_chunk(fm, p);
4733 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4734 size_t psize = chunksize(p);
4735 mchunkptr next = chunk_plus_offset(p, psize);
4736 if (!pinuse(p)) {
4737 size_t prevsize = p->prev_foot;
4738 if (is_mmapped(p)) {
4739 psize += prevsize + MMAP_FOOT_PAD;
4740 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4741 fm->footprint -= psize;
4742 goto postaction;
4743 }
4744 else {
4745 mchunkptr prev = chunk_minus_offset(p, prevsize);
4746 psize += prevsize;
4747 p = prev;
4748 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4749 if (p != fm->dv) {
4750 unlink_chunk(fm, p, prevsize);
4751 }
4752 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4753 fm->dvsize = psize;
4754 set_free_with_pinuse(p, psize, next);
4755 goto postaction;
4756 }
4757 }
4758 else
4759 goto erroraction;
4760 }
4761 }
4762
4763 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4764 if (!cinuse(next)) { /* consolidate forward */
4765 if (next == fm->top) {
4766 size_t tsize = fm->topsize += psize;
4767 fm->top = p;
4768 p->head = tsize | PINUSE_BIT;
4769 if (p == fm->dv) {
4770 fm->dv = 0;
4771 fm->dvsize = 0;
4772 }
4773 if (should_trim(fm, tsize))
4774 sys_trim(fm, 0);
4775 goto postaction;
4776 }
4777 else if (next == fm->dv) {
4778 size_t dsize = fm->dvsize += psize;
4779 fm->dv = p;
4780 set_size_and_pinuse_of_free_chunk(p, dsize);
4781 goto postaction;
4782 }
4783 else {
4784 size_t nsize = chunksize(next);
4785 psize += nsize;
4786 unlink_chunk(fm, next, nsize);
4787 set_size_and_pinuse_of_free_chunk(p, psize);
4788 if (p == fm->dv) {
4789 fm->dvsize = psize;
4790 goto postaction;
4791 }
4792 }
4793 }
4794 else
4795 set_free_with_pinuse(p, psize, next);
4796
4797 if (is_small(psize)) {
4798 insert_small_chunk(fm, p, psize);
4799 check_free_chunk(fm, p);
4800 }
4801 else {
4802 tchunkptr tp = (tchunkptr)p;
4803 insert_large_chunk(fm, tp, psize);
4804 check_free_chunk(fm, p);
4805 if (--fm->release_checks == 0)
4806 release_unused_segments(fm);
4807 }
4808 goto postaction;
4809 }
4810 }
4811 erroraction:
4812 USAGE_ERROR_ACTION(fm, p);
4813 postaction:
4814 POSTACTION(fm);
4815 }
4816 }
4817#if !FOOTERS
4818#undef fm
4819#endif /* FOOTERS */
4820}
4821
4822void* dlcalloc(size_t n_elements, size_t elem_size) {
4823 void* mem;
4824 size_t req = 0;
4825 if (n_elements != 0) {
4826 req = n_elements * elem_size;
4827 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4828 (req / n_elements != elem_size))
4829 req = MAX_SIZE_T; /* force downstream failure on overflow */
4830 }
4831 mem = dlmalloc(req);
4832 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4833 memset(mem, 0, req);
4834 return mem;
4835}
4836
4837#endif /* !ONLY_MSPACES */
4838
4839/* ------------ Internal support for realloc, memalign, etc -------------- */
4840
4841/* Try to realloc; only in-place unless can_move true */
4842static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
4843 int can_move) {
4844 mchunkptr newp = 0;
4845 size_t oldsize = chunksize(p);
4846 mchunkptr next = chunk_plus_offset(p, oldsize);
4847 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
4848 ok_next(p, next) && ok_pinuse(next))) {
4849 if (is_mmapped(p)) {
4850 newp = mmap_resize(m, p, nb, can_move);
4851 }
4852 else if (oldsize >= nb) { /* already big enough */
4853 size_t rsize = oldsize - nb;
4854 if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
4855 mchunkptr r = chunk_plus_offset(p, nb);
4856 set_inuse(m, p, nb);
4857 set_inuse(m, r, rsize);
4858 dispose_chunk(m, r, rsize);
4859 }
4860 newp = p;
4861 }
4862 else if (next == m->top) { /* extend into top */
4863 if (oldsize + m->topsize > nb) {
4864 size_t newsize = oldsize + m->topsize;
4865 size_t newtopsize = newsize - nb;
4866 mchunkptr newtop = chunk_plus_offset(p, nb);
4867 set_inuse(m, p, nb);
4868 newtop->head = newtopsize |PINUSE_BIT;
4869 m->top = newtop;
4870 m->topsize = newtopsize;
4871 newp = p;
4872 }
4873 }
4874 else if (next == m->dv) { /* extend into dv */
4875 size_t dvs = m->dvsize;
4876 if (oldsize + dvs >= nb) {
4877 size_t dsize = oldsize + dvs - nb;
4878 if (dsize >= MIN_CHUNK_SIZE) {
4879 mchunkptr r = chunk_plus_offset(p, nb);
4880 mchunkptr n = chunk_plus_offset(r, dsize);
4881 set_inuse(m, p, nb);
4882 set_size_and_pinuse_of_free_chunk(r, dsize);
4883 clear_pinuse(n);
4884 m->dvsize = dsize;
4885 m->dv = r;
4886 }
4887 else { /* exhaust dv */
4888 size_t newsize = oldsize + dvs;
4889 set_inuse(m, p, newsize);
4890 m->dvsize = 0;
4891 m->dv = 0;
4892 }
4893 newp = p;
4894 }
4895 }
4896 else if (!cinuse(next)) { /* extend into next free chunk */
4897 size_t nextsize = chunksize(next);
4898 if (oldsize + nextsize >= nb) {
4899 size_t rsize = oldsize + nextsize - nb;
4900 unlink_chunk(m, next, nextsize);
4901 if (rsize < MIN_CHUNK_SIZE) {
4902 size_t newsize = oldsize + nextsize;
4903 set_inuse(m, p, newsize);
4904 }
4905 else {
4906 mchunkptr r = chunk_plus_offset(p, nb);
4907 set_inuse(m, p, nb);
4908 set_inuse(m, r, rsize);
4909 dispose_chunk(m, r, rsize);
4910 }
4911 newp = p;
4912 }
4913 }
4914 }
4915 else {
4916 USAGE_ERROR_ACTION(m, chunk2mem(p));
4917 }
4918 return newp;
4919}
4920
4921static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
4922 void* mem = 0;
4923 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
4924 alignment = MIN_CHUNK_SIZE;
4925 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
4926 size_t a = MALLOC_ALIGNMENT << 1;
4927 while (a < alignment) a <<= 1;
4928 alignment = a;
4929 }
4930 if (bytes >= MAX_REQUEST - alignment) {
4931 if (m != 0) { /* Test isn't needed but avoids compiler warning */
4932 MALLOC_FAILURE_ACTION;
4933 }
4934 }
4935 else {
4936 size_t nb = request2size(bytes);
4937 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4938 mem = internal_malloc(m, req);
4939 if (mem != 0) {
4940 mchunkptr p = mem2chunk(mem);
4941 if (PREACTION(m))
4942 return 0;
4943 if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
4944 /*
4945 Find an aligned spot inside chunk. Since we need to give
4946 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
4947 the first calculation places us at a spot with less than
4948 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
4949 We've allocated enough total room so that this is always
4950 possible.
4951 */
4952 char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
4953 SIZE_T_ONE)) &
4954 -alignment));
4955 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
4956 br : br+alignment;
4957 mchunkptr newp = (mchunkptr)pos;
4958 size_t leadsize = pos - (char*)(p);
4959 size_t newsize = chunksize(p) - leadsize;
4960
4961 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
4962 newp->prev_foot = p->prev_foot + leadsize;
4963 newp->head = newsize;
4964 }
4965 else { /* Otherwise, give back leader, use the rest */
4966 set_inuse(m, newp, newsize);
4967 set_inuse(m, p, leadsize);
4968 dispose_chunk(m, p, leadsize);
4969 }
4970 p = newp;
4971 }
4972
4973 /* Give back spare room at the end */
4974 if (!is_mmapped(p)) {
4975 size_t size = chunksize(p);
4976 if (size > nb + MIN_CHUNK_SIZE) {
4977 size_t remainder_size = size - nb;
4978 mchunkptr remainder = chunk_plus_offset(p, nb);
4979 set_inuse(m, p, nb);
4980 set_inuse(m, remainder, remainder_size);
4981 dispose_chunk(m, remainder, remainder_size);
4982 }
4983 }
4984
4985 mem = chunk2mem(p);
4986 assert (chunksize(p) >= nb);
4987 assert(((size_t)mem & (alignment - 1)) == 0);
4988 check_inuse_chunk(m, p);
4989 POSTACTION(m);
4990 }
4991 }
4992 return mem;
4993}
4994
4995/*
4996 Common support for independent_X routines, handling
4997 all of the combinations that can result.
4998 The opts arg has:
4999 bit 0 set if all elements are same size (using sizes[0])
5000 bit 1 set if elements should be zeroed
5001*/
5002static void** ialloc(mstate m,
5003 size_t n_elements,
5004 size_t* sizes,
5005 int opts,
5006 void* chunks[]) {
5007
5008 size_t element_size; /* chunksize of each element, if all same */
5009 size_t contents_size; /* total size of elements */
5010 size_t array_size; /* request size of pointer array */
5011 void* mem; /* malloced aggregate space */
5012 mchunkptr p; /* corresponding chunk */
5013 size_t remainder_size; /* remaining bytes while splitting */
5014 void** marray; /* either "chunks" or malloced ptr array */
5015 mchunkptr array_chunk; /* chunk for malloced ptr array */
5016 flag_t was_enabled; /* to disable mmap */
5017 size_t size;
5018 size_t i;
5019
5020 ensure_initialization();
5021 /* compute array length, if needed */
5022 if (chunks != 0) {
5023 if (n_elements == 0)
5024 return chunks; /* nothing to do */
5025 marray = chunks;
5026 array_size = 0;
5027 }
5028 else {
5029 /* if empty req, must still return chunk representing empty array */
5030 if (n_elements == 0)
5031 return (void**)internal_malloc(m, 0);
5032 marray = 0;
5033 array_size = request2size(n_elements * (sizeof(void*)));
5034 }
5035
5036 /* compute total element size */
5037 if (opts & 0x1) { /* all-same-size */
5038 element_size = request2size(*sizes);
5039 contents_size = n_elements * element_size;
5040 }
5041 else { /* add up all the sizes */
5042 element_size = 0;
5043 contents_size = 0;
5044 for (i = 0; i != n_elements; ++i)
5045 contents_size += request2size(sizes[i]);
5046 }
5047
5048 size = contents_size + array_size;
5049
5050 /*
5051 Allocate the aggregate chunk. First disable direct-mmapping so
5052 malloc won't use it, since we would not be able to later
5053 free/realloc space internal to a segregated mmap region.
5054 */
5055 was_enabled = use_mmap(m);
5056 disable_mmap(m);
5057 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
5058 if (was_enabled)
5059 enable_mmap(m);
5060 if (mem == 0)
5061 return 0;
5062
5063 if (PREACTION(m)) return 0;
5064 p = mem2chunk(mem);
5065 remainder_size = chunksize(p);
5066
5067 assert(!is_mmapped(p));
5068
5069 if (opts & 0x2) { /* optionally clear the elements */
5070 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
5071 }
5072
5073 /* If not provided, allocate the pointer array as final part of chunk */
5074 if (marray == 0) {
5075 size_t array_chunk_size;
5076 array_chunk = chunk_plus_offset(p, contents_size);
5077 array_chunk_size = remainder_size - contents_size;
5078 marray = (void**) (chunk2mem(array_chunk));
5079 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
5080 remainder_size = contents_size;
5081 }
5082
5083 /* split out elements */
5084 for (i = 0; ; ++i) {
5085 marray[i] = chunk2mem(p);
5086 if (i != n_elements-1) {
5087 if (element_size != 0)
5088 size = element_size;
5089 else
5090 size = request2size(sizes[i]);
5091 remainder_size -= size;
5092 set_size_and_pinuse_of_inuse_chunk(m, p, size);
5093 p = chunk_plus_offset(p, size);
5094 }
5095 else { /* the final element absorbs any overallocation slop */
5096 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
5097 break;
5098 }
5099 }
5100
5101#if DEBUG
5102 if (marray != chunks) {
5103 /* final element must have exactly exhausted chunk */
5104 if (element_size != 0) {
5105 assert(remainder_size == element_size);
5106 }
5107 else {
5108 assert(remainder_size == request2size(sizes[i]));
5109 }
5110 check_inuse_chunk(m, mem2chunk(marray));
5111 }
5112 for (i = 0; i != n_elements; ++i)
5113 check_inuse_chunk(m, mem2chunk(marray[i]));
5114
5115#endif /* DEBUG */
5116
5117 POSTACTION(m);
5118 return marray;
5119}
5120
5121/* Try to free all pointers in the given array.
5122 Note: this could be made faster, by delaying consolidation,
5123 at the price of disabling some user integrity checks, We
5124 still optimize some consolidations by combining adjacent
5125 chunks before freeing, which will occur often if allocated
5126 with ialloc or the array is sorted.
5127*/
5128static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
5129 size_t unfreed = 0;
5130 if (!PREACTION(m)) {
5131 void** a;
5132 void** fence = &(array[nelem]);
5133 for (a = array; a != fence; ++a) {
5134 void* mem = *a;
5135 if (mem != 0) {
5136 mchunkptr p = mem2chunk(mem);
5137 size_t psize = chunksize(p);
5138#if FOOTERS
5139 if (get_mstate_for(p) != m) {
5140 ++unfreed;
5141 continue;
5142 }
5143#endif
5144 check_inuse_chunk(m, p);
5145 *a = 0;
5146 if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
5147 void ** b = a + 1; /* try to merge with next chunk */
5148 mchunkptr next = next_chunk(p);
5149 if (b != fence && *b == chunk2mem(next)) {
5150 size_t newsize = chunksize(next) + psize;
5151 set_inuse(m, p, newsize);
5152 *b = chunk2mem(p);
5153 }
5154 else
5155 dispose_chunk(m, p, psize);
5156 }
5157 else {
5158 CORRUPTION_ERROR_ACTION(m);
5159 break;
5160 }
5161 }
5162 }
5163 if (should_trim(m, m->topsize))
5164 sys_trim(m, 0);
5165 POSTACTION(m);
5166 }
5167 return unfreed;
5168}
5169
5170/* Traversal */
5171#if MALLOC_INSPECT_ALL
5172static void internal_inspect_all(mstate m,
5173 void(*handler)(void *start,
5174 void *end,
5175 size_t used_bytes,
5176 void* callback_arg),
5177 void* arg) {
5178 if (is_initialized(m)) {
5179 mchunkptr top = m->top;
5180 msegmentptr s;
5181 for (s = &m->seg; s != 0; s = s->next) {
5182 mchunkptr q = align_as_chunk(s->base);
5183 while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
5184 mchunkptr next = next_chunk(q);
5185 size_t sz = chunksize(q);
5186 size_t used;
5187 void* start;
5188 if (is_inuse(q)) {
5189 used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
5190 start = chunk2mem(q);
5191 }
5192 else {
5193 used = 0;
5194 if (is_small(sz)) { /* offset by possible bookkeeping */
5195 start = (void*)((char*)q + sizeof(struct malloc_chunk));
5196 }
5197 else {
5198 start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
5199 }
5200 }
5201 if (start < (void*)next) /* skip if all space is bookkeeping */
5202 handler(start, next, used, arg);
5203 if (q == top)
5204 break;
5205 q = next;
5206 }
5207 }
5208 }
5209}
5210#endif /* MALLOC_INSPECT_ALL */
5211
5212/* ------------------ Exported realloc, memalign, etc -------------------- */
5213
5214#if !ONLY_MSPACES
5215
5216void* dlrealloc(void* oldmem, size_t bytes) {
5217 void* mem = 0;
5218 if (oldmem == 0) {
5219 mem = dlmalloc(bytes);
5220 }
5221 else if (bytes >= MAX_REQUEST) {
5222 MALLOC_FAILURE_ACTION;
5223 }
5224#ifdef REALLOC_ZERO_BYTES_FREES
5225 else if (bytes == 0) {
5226 dlfree(oldmem);
5227 }
5228#endif /* REALLOC_ZERO_BYTES_FREES */
5229 else {
5230 size_t nb = request2size(bytes);
5231 mchunkptr oldp = mem2chunk(oldmem);
5232#if ! FOOTERS
5233 mstate m = gm;
5234#else /* FOOTERS */
5235 mstate m = get_mstate_for(oldp);
5236 if (!ok_magic(m)) {
5237 USAGE_ERROR_ACTION(m, oldmem);
5238 return 0;
5239 }
5240#endif /* FOOTERS */
5241 if (!PREACTION(m)) {
5242 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
5243 POSTACTION(m);
5244 if (newp != 0) {
5245 check_inuse_chunk(m, newp);
5246 mem = chunk2mem(newp);
5247 }
5248 else {
5249 mem = internal_malloc(m, bytes);
5250 if (mem != 0) {
5251 size_t oc = chunksize(oldp) - overhead_for(oldp);
5252 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
5253 internal_free(m, oldmem);
5254 }
5255 }
5256 }
5257 }
5258 return mem;
5259}
5260
5261void* dlrealloc_in_place(void* oldmem, size_t bytes) {
5262 void* mem = 0;
5263 if (oldmem != 0) {
5264 if (bytes >= MAX_REQUEST) {
5265 MALLOC_FAILURE_ACTION;
5266 }
5267 else {
5268 size_t nb = request2size(bytes);
5269 mchunkptr oldp = mem2chunk(oldmem);
5270#if ! FOOTERS
5271 mstate m = gm;
5272#else /* FOOTERS */
5273 mstate m = get_mstate_for(oldp);
5274 if (!ok_magic(m)) {
5275 USAGE_ERROR_ACTION(m, oldmem);
5276 return 0;
5277 }
5278#endif /* FOOTERS */
5279 if (!PREACTION(m)) {
5280 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
5281 POSTACTION(m);
5282 if (newp == oldp) {
5283 check_inuse_chunk(m, newp);
5284 mem = oldmem;
5285 }
5286 }
5287 }
5288 }
5289 return mem;
5290}
5291
5292void* dlmemalign(size_t alignment, size_t bytes) {
5293 if (alignment <= MALLOC_ALIGNMENT) {
5294 return dlmalloc(bytes);
5295 }
5296 return internal_memalign(gm, alignment, bytes);
5297}
5298
5299int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
5300 void* mem = 0;
5301 if (alignment == MALLOC_ALIGNMENT)
5302 mem = dlmalloc(bytes);
5303 else {
5304 size_t d = alignment / sizeof(void*);
5305 size_t r = alignment % sizeof(void*);
5306 if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
5307 return EINVAL;
5308 else if (bytes <= MAX_REQUEST - alignment) {
5309 if (alignment < MIN_CHUNK_SIZE)
5310 alignment = MIN_CHUNK_SIZE;
5311 mem = internal_memalign(gm, alignment, bytes);
5312 }
5313 }
5314 if (mem == 0)
5315 return ENOMEM;
5316 else {
5317 *pp = mem;
5318 return 0;
5319 }
5320}
5321
5322void* dlvalloc(size_t bytes) {
5323 size_t pagesz;
5324 ensure_initialization();
5325 pagesz = mparams.page_size;
5326 return dlmemalign(pagesz, bytes);
5327}
5328
5329void* dlpvalloc(size_t bytes) {
5330 size_t pagesz;
5331 ensure_initialization();
5332 pagesz = mparams.page_size;
5333 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
5334}
5335
5336void** dlindependent_calloc(size_t n_elements, size_t elem_size,
5337 void* chunks[]) {
5338 size_t sz = elem_size; /* serves as 1-element array */
5339 return ialloc(gm, n_elements, &sz, 3, chunks);
5340}
5341
5342void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
5343 void* chunks[]) {
5344 return ialloc(gm, n_elements, sizes, 0, chunks);
5345}
5346
5347size_t dlbulk_free(void* array[], size_t nelem) {
5348 return internal_bulk_free(gm, array, nelem);
5349}
5350
5351#if MALLOC_INSPECT_ALL
5352void dlmalloc_inspect_all(void(*handler)(void *start,
5353 void *end,
5354 size_t used_bytes,
5355 void* callback_arg),
5356 void* arg) {
5357 ensure_initialization();
5358 if (!PREACTION(gm)) {
5359 internal_inspect_all(gm, handler, arg);
5360 POSTACTION(gm);
5361 }
5362}
5363#endif /* MALLOC_INSPECT_ALL */
5364
5365int dlmalloc_trim(size_t pad) {
5366 int result = 0;
5367 ensure_initialization();
5368 if (!PREACTION(gm)) {
5369 result = sys_trim(gm, pad);
5370 POSTACTION(gm);
5371 }
5372 return result;
5373}
5374
5375size_t dlmalloc_footprint(void) {
5376 return gm->footprint;
5377}
5378
5379size_t dlmalloc_max_footprint(void) {
5380 return gm->max_footprint;
5381}
5382
5383size_t dlmalloc_footprint_limit(void) {
5384 size_t maf = gm->footprint_limit;
5385 return maf == 0 ? MAX_SIZE_T : maf;
5386}
5387
5388size_t dlmalloc_set_footprint_limit(size_t bytes) {
5389 size_t result; /* invert sense of 0 */
5390 if (bytes == 0)
5391 result = granularity_align(1); /* Use minimal size */
5392 if (bytes == MAX_SIZE_T)
5393 result = 0; /* disable */
5394 else
5395 result = granularity_align(bytes);
5396 return gm->footprint_limit = result;
5397}
5398
5399#if !NO_MALLINFO
5400struct mallinfo dlmallinfo(void) {
5401 return internal_mallinfo(gm);
5402}
5403#endif /* NO_MALLINFO */
5404
5405#if !NO_MALLOC_STATS
5406void dlmalloc_stats() {
5407 internal_malloc_stats(gm);
5408}
5409#endif /* NO_MALLOC_STATS */
5410
5411int dlmallopt(int param_number, int value) {
5412 return change_mparam(param_number, value);
5413}
5414
5415size_t dlmalloc_usable_size(void* mem) {
5416 if (mem != 0) {
5417 mchunkptr p = mem2chunk(mem);
5418 if (is_inuse(p))
5419 return chunksize(p) - overhead_for(p);
5420 }
5421 return 0;
5422}
5423
5424#endif /* !ONLY_MSPACES */
5425
5426/* ----------------------------- user mspaces ---------------------------- */
5427
5428#if MSPACES
5429
5430static mstate init_user_mstate(char* tbase, size_t tsize) {
5431 size_t msize = pad_request(sizeof(struct malloc_state));
5432 mchunkptr mn;
5433 mchunkptr msp = align_as_chunk(tbase);
5434 mstate m = (mstate)(chunk2mem(msp));
5435 memset(m, 0, msize);
5436 (void)INITIAL_LOCK(&m->mutex);
5437 msp->head = (msize|INUSE_BITS);
5438 m->seg.base = m->least_addr = tbase;
5439 m->seg.size = m->footprint = m->max_footprint = tsize;
5440 m->magic = mparams.magic;
5441 m->release_checks = MAX_RELEASE_CHECK_RATE;
5442 m->mflags = mparams.default_mflags;
5443 m->extp = 0;
5444 m->exts = 0;
5445 disable_contiguous(m);
5446 init_bins(m);
5447 mn = next_chunk(mem2chunk(m));
5448 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
5449 check_top_chunk(m, m->top);
5450 return m;
5451}
5452
5453mspace create_mspace(size_t capacity, int locked) {
5454 mstate m = 0;
5455 size_t msize;
5456 ensure_initialization();
5457 msize = pad_request(sizeof(struct malloc_state));
5458 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
5459 size_t rs = ((capacity == 0)? mparams.granularity :
5460 (capacity + TOP_FOOT_SIZE + msize));
5461 size_t tsize = granularity_align(rs);
5462 char* tbase = (char*)(CALL_MMAP(tsize));
5463 if (tbase != CMFAIL) {
5464 m = init_user_mstate(tbase, tsize);
5465 m->seg.sflags = USE_MMAP_BIT;
5466 set_lock(m, locked);
5467 }
5468 }
5469 return (mspace)m;
5470}
5471
5472mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
5473 mstate m = 0;
5474 size_t msize;
5475 ensure_initialization();
5476 msize = pad_request(sizeof(struct malloc_state));
5477 if (capacity > msize + TOP_FOOT_SIZE &&
5478 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
5479 m = init_user_mstate((char*)base, capacity);
5480 m->seg.sflags = EXTERN_BIT;
5481 set_lock(m, locked);
5482 }
5483 return (mspace)m;
5484}
5485
5486int mspace_track_large_chunks(mspace msp, int enable) {
5487 int ret = 0;
5488 mstate ms = (mstate)msp;
5489 if (!PREACTION(ms)) {
5490 if (!use_mmap(ms)) {
5491 ret = 1;
5492 }
5493 if (!enable) {
5494 enable_mmap(ms);
5495 } else {
5496 disable_mmap(ms);
5497 }
5498 POSTACTION(ms);
5499 }
5500 return ret;
5501}
5502
5503size_t destroy_mspace(mspace msp) {
5504 size_t freed = 0;
5505 mstate ms = (mstate)msp;
5506 if (ok_magic(ms)) {
5507 msegmentptr sp = &ms->seg;
5508 (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
5509 while (sp != 0) {
5510 char* base = sp->base;
5511 size_t size = sp->size;
5512 flag_t flag = sp->sflags;
5513 (void)base; /* placate people compiling -Wunused-variable */
5514 sp = sp->next;
5515 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
5516 CALL_MUNMAP(base, size) == 0)
5517 freed += size;
5518 }
5519 }
5520 else {
5521 USAGE_ERROR_ACTION(ms,ms);
5522 }
5523 return freed;
5524}
5525
5526/*
5527 mspace versions of routines are near-clones of the global
5528 versions. This is not so nice but better than the alternatives.
5529*/
5530
5531void* mspace_malloc(mspace msp, size_t bytes) {
5532 mstate ms = (mstate)msp;
5533 if (!ok_magic(ms)) {
5534 USAGE_ERROR_ACTION(ms,ms);
5535 return 0;
5536 }
5537 if (!PREACTION(ms)) {
5538 void* mem;
5539 size_t nb;
5540 if (bytes <= MAX_SMALL_REQUEST) {
5541 bindex_t idx;
5542 binmap_t smallbits;
5543 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
5544 idx = small_index(nb);
5545 smallbits = ms->smallmap >> idx;
5546
5547 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
5548 mchunkptr b, p;
5549 idx += ~smallbits & 1; /* Uses next bin if idx empty */
5550 b = smallbin_at(ms, idx);
5551 p = b->fd;
5552 assert(chunksize(p) == small_index2size(idx));
5553 unlink_first_small_chunk(ms, b, p, idx);
5554 set_inuse_and_pinuse(ms, p, small_index2size(idx));
5555 mem = chunk2mem(p);
5556 check_malloced_chunk(ms, mem, nb);
5557 goto postaction;
5558 }
5559
5560 else if (nb > ms->dvsize) {
5561 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
5562 mchunkptr b, p, r;
5563 size_t rsize;
5564 bindex_t i;
5565 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
5566 binmap_t leastbit = least_bit(leftbits);
5567 compute_bit2idx(leastbit, i);
5568 b = smallbin_at(ms, i);
5569 p = b->fd;
5570 assert(chunksize(p) == small_index2size(i));
5571 unlink_first_small_chunk(ms, b, p, i);
5572 rsize = small_index2size(i) - nb;
5573 /* Fit here cannot be remainderless if 4byte sizes */
5574 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
5575 set_inuse_and_pinuse(ms, p, small_index2size(i));
5576 else {
5577 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5578 r = chunk_plus_offset(p, nb);
5579 set_size_and_pinuse_of_free_chunk(r, rsize);
5580 replace_dv(ms, r, rsize);
5581 }
5582 mem = chunk2mem(p);
5583 check_malloced_chunk(ms, mem, nb);
5584 goto postaction;
5585 }
5586
5587 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
5588 check_malloced_chunk(ms, mem, nb);
5589 goto postaction;
5590 }
5591 }
5592 }
5593 else if (bytes >= MAX_REQUEST)
5594 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
5595 else {
5596 nb = pad_request(bytes);
5597 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
5598 check_malloced_chunk(ms, mem, nb);
5599 goto postaction;
5600 }
5601 }
5602
5603 if (nb <= ms->dvsize) {
5604 size_t rsize = ms->dvsize - nb;
5605 mchunkptr p = ms->dv;
5606 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
5607 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
5608 ms->dvsize = rsize;
5609 set_size_and_pinuse_of_free_chunk(r, rsize);
5610 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5611 }
5612 else { /* exhaust dv */
5613 size_t dvs = ms->dvsize;
5614 ms->dvsize = 0;
5615 ms->dv = 0;
5616 set_inuse_and_pinuse(ms, p, dvs);
5617 }
5618 mem = chunk2mem(p);
5619 check_malloced_chunk(ms, mem, nb);
5620 goto postaction;
5621 }
5622
5623 else if (nb < ms->topsize) { /* Split top */
5624 size_t rsize = ms->topsize -= nb;
5625 mchunkptr p = ms->top;
5626 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
5627 r->head = rsize | PINUSE_BIT;
5628 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
5629 mem = chunk2mem(p);
5630 check_top_chunk(ms, ms->top);
5631 check_malloced_chunk(ms, mem, nb);
5632 goto postaction;
5633 }
5634
5635 mem = sys_alloc(ms, nb);
5636
5637 postaction:
5638 POSTACTION(ms);
5639 return mem;
5640 }
5641
5642 return 0;
5643}
5644
5645void mspace_free(mspace msp, void* mem) {
5646 if (mem != 0) {
5647 mchunkptr p = mem2chunk(mem);
5648#if FOOTERS
5649 mstate fm = get_mstate_for(p);
5650 (void)msp; /* placate people compiling -Wunused */
5651#else /* FOOTERS */
5652 mstate fm = (mstate)msp;
5653#endif /* FOOTERS */
5654 if (!ok_magic(fm)) {
5655 USAGE_ERROR_ACTION(fm, p);
5656 return;
5657 }
5658 if (!PREACTION(fm)) {
5659 check_inuse_chunk(fm, p);
5660 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
5661 size_t psize = chunksize(p);
5662 mchunkptr next = chunk_plus_offset(p, psize);
5663 if (!pinuse(p)) {
5664 size_t prevsize = p->prev_foot;
5665 if (is_mmapped(p)) {
5666 psize += prevsize + MMAP_FOOT_PAD;
5667 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
5668 fm->footprint -= psize;
5669 goto postaction;
5670 }
5671 else {
5672 mchunkptr prev = chunk_minus_offset(p, prevsize);
5673 psize += prevsize;
5674 p = prev;
5675 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
5676 if (p != fm->dv) {
5677 unlink_chunk(fm, p, prevsize);
5678 }
5679 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
5680 fm->dvsize = psize;
5681 set_free_with_pinuse(p, psize, next);
5682 goto postaction;
5683 }
5684 }
5685 else
5686 goto erroraction;
5687 }
5688 }
5689
5690 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
5691 if (!cinuse(next)) { /* consolidate forward */
5692 if (next == fm->top) {
5693 size_t tsize = fm->topsize += psize;
5694 fm->top = p;
5695 p->head = tsize | PINUSE_BIT;
5696 if (p == fm->dv) {
5697 fm->dv = 0;
5698 fm->dvsize = 0;
5699 }
5700 if (should_trim(fm, tsize))
5701 sys_trim(fm, 0);
5702 goto postaction;
5703 }
5704 else if (next == fm->dv) {
5705 size_t dsize = fm->dvsize += psize;
5706 fm->dv = p;
5707 set_size_and_pinuse_of_free_chunk(p, dsize);
5708 goto postaction;
5709 }
5710 else {
5711 size_t nsize = chunksize(next);
5712 psize += nsize;
5713 unlink_chunk(fm, next, nsize);
5714 set_size_and_pinuse_of_free_chunk(p, psize);
5715 if (p == fm->dv) {
5716 fm->dvsize = psize;
5717 goto postaction;
5718 }
5719 }
5720 }
5721 else
5722 set_free_with_pinuse(p, psize, next);
5723
5724 if (is_small(psize)) {
5725 insert_small_chunk(fm, p, psize);
5726 check_free_chunk(fm, p);
5727 }
5728 else {
5729 tchunkptr tp = (tchunkptr)p;
5730 insert_large_chunk(fm, tp, psize);
5731 check_free_chunk(fm, p);
5732 if (--fm->release_checks == 0)
5733 release_unused_segments(fm);
5734 }
5735 goto postaction;
5736 }
5737 }
5738 erroraction:
5739 USAGE_ERROR_ACTION(fm, p);
5740 postaction:
5741 POSTACTION(fm);
5742 }
5743 }
5744}
5745
5746void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
5747 void* mem;
5748 size_t req = 0;
5749 mstate ms = (mstate)msp;
5750 if (!ok_magic(ms)) {
5751 USAGE_ERROR_ACTION(ms,ms);
5752 return 0;
5753 }
5754 if (n_elements != 0) {
5755 req = n_elements * elem_size;
5756 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
5757 (req / n_elements != elem_size))
5758 req = MAX_SIZE_T; /* force downstream failure on overflow */
5759 }
5760 mem = internal_malloc(ms, req);
5761 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
5762 memset(mem, 0, req);
5763 return mem;
5764}
5765
5766void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
5767 void* mem = 0;
5768 if (oldmem == 0) {
5769 mem = mspace_malloc(msp, bytes);
5770 }
5771 else if (bytes >= MAX_REQUEST) {
5772 MALLOC_FAILURE_ACTION;
5773 }
5774#ifdef REALLOC_ZERO_BYTES_FREES
5775 else if (bytes == 0) {
5776 mspace_free(msp, oldmem);
5777 }
5778#endif /* REALLOC_ZERO_BYTES_FREES */
5779 else {
5780 size_t nb = request2size(bytes);
5781 mchunkptr oldp = mem2chunk(oldmem);
5782#if ! FOOTERS
5783 mstate m = (mstate)msp;
5784#else /* FOOTERS */
5785 mstate m = get_mstate_for(oldp);
5786 if (!ok_magic(m)) {
5787 USAGE_ERROR_ACTION(m, oldmem);
5788 return 0;
5789 }
5790#endif /* FOOTERS */
5791 if (!PREACTION(m)) {
5792 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
5793 POSTACTION(m);
5794 if (newp != 0) {
5795 check_inuse_chunk(m, newp);
5796 mem = chunk2mem(newp);
5797 }
5798 else {
5799 mem = mspace_malloc(m, bytes);
5800 if (mem != 0) {
5801 size_t oc = chunksize(oldp) - overhead_for(oldp);
5802 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
5803 mspace_free(m, oldmem);
5804 }
5805 }
5806 }
5807 }
5808 return mem;
5809}
5810
5811void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
5812 void* mem = 0;
5813 if (oldmem != 0) {
5814 if (bytes >= MAX_REQUEST) {
5815 MALLOC_FAILURE_ACTION;
5816 }
5817 else {
5818 size_t nb = request2size(bytes);
5819 mchunkptr oldp = mem2chunk(oldmem);
5820#if ! FOOTERS
5821 mstate m = (mstate)msp;
5822#else /* FOOTERS */
5823 mstate m = get_mstate_for(oldp);
5824 (void)msp; /* placate people compiling -Wunused */
5825 if (!ok_magic(m)) {
5826 USAGE_ERROR_ACTION(m, oldmem);
5827 return 0;
5828 }
5829#endif /* FOOTERS */
5830 if (!PREACTION(m)) {
5831 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
5832 POSTACTION(m);
5833 if (newp == oldp) {
5834 check_inuse_chunk(m, newp);
5835 mem = oldmem;
5836 }
5837 }
5838 }
5839 }
5840 return mem;
5841}
5842
5843void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
5844 mstate ms = (mstate)msp;
5845 if (!ok_magic(ms)) {
5846 USAGE_ERROR_ACTION(ms,ms);
5847 return 0;
5848 }
5849 if (alignment <= MALLOC_ALIGNMENT)
5850 return mspace_malloc(msp, bytes);
5851 return internal_memalign(ms, alignment, bytes);
5852}
5853
5854void** mspace_independent_calloc(mspace msp, size_t n_elements,
5855 size_t elem_size, void* chunks[]) {
5856 size_t sz = elem_size; /* serves as 1-element array */
5857 mstate ms = (mstate)msp;
5858 if (!ok_magic(ms)) {
5859 USAGE_ERROR_ACTION(ms,ms);
5860 return 0;
5861 }
5862 return ialloc(ms, n_elements, &sz, 3, chunks);
5863}
5864
5865void** mspace_independent_comalloc(mspace msp, size_t n_elements,
5866 size_t sizes[], void* chunks[]) {
5867 mstate ms = (mstate)msp;
5868 if (!ok_magic(ms)) {
5869 USAGE_ERROR_ACTION(ms,ms);
5870 return 0;
5871 }
5872 return ialloc(ms, n_elements, sizes, 0, chunks);
5873}
5874
5875size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
5876 return internal_bulk_free((mstate)msp, array, nelem);
5877}
5878
5879#if MALLOC_INSPECT_ALL
5880void mspace_inspect_all(mspace msp,
5881 void(*handler)(void *start,
5882 void *end,
5883 size_t used_bytes,
5884 void* callback_arg),
5885 void* arg) {
5886 mstate ms = (mstate)msp;
5887 if (ok_magic(ms)) {
5888 if (!PREACTION(ms)) {
5889 internal_inspect_all(ms, handler, arg);
5890 POSTACTION(ms);
5891 }
5892 }
5893 else {
5894 USAGE_ERROR_ACTION(ms,ms);
5895 }
5896}
5897#endif /* MALLOC_INSPECT_ALL */
5898
5899int mspace_trim(mspace msp, size_t pad) {
5900 int result = 0;
5901 mstate ms = (mstate)msp;
5902 if (ok_magic(ms)) {
5903 if (!PREACTION(ms)) {
5904 result = sys_trim(ms, pad);
5905 POSTACTION(ms);
5906 }
5907 }
5908 else {
5909 USAGE_ERROR_ACTION(ms,ms);
5910 }
5911 return result;
5912}
5913
5914#if !NO_MALLOC_STATS
5915void mspace_malloc_stats(mspace msp) {
5916 mstate ms = (mstate)msp;
5917 if (ok_magic(ms)) {
5918 internal_malloc_stats(ms);
5919 }
5920 else {
5921 USAGE_ERROR_ACTION(ms,ms);
5922 }
5923}
5924#endif /* NO_MALLOC_STATS */
5925
5926size_t mspace_footprint(mspace msp) {
5927 size_t result = 0;
5928 mstate ms = (mstate)msp;
5929 if (ok_magic(ms)) {
5930 result = ms->footprint;
5931 }
5932 else {
5933 USAGE_ERROR_ACTION(ms,ms);
5934 }
5935 return result;
5936}
5937
5938size_t mspace_max_footprint(mspace msp) {
5939 size_t result = 0;
5940 mstate ms = (mstate)msp;
5941 if (ok_magic(ms)) {
5942 result = ms->max_footprint;
5943 }
5944 else {
5945 USAGE_ERROR_ACTION(ms,ms);
5946 }
5947 return result;
5948}
5949
5950size_t mspace_footprint_limit(mspace msp) {
5951 size_t result = 0;
5952 mstate ms = (mstate)msp;
5953 if (ok_magic(ms)) {
5954 size_t maf = ms->footprint_limit;
5955 result = (maf == 0) ? MAX_SIZE_T : maf;
5956 }
5957 else {
5958 USAGE_ERROR_ACTION(ms,ms);
5959 }
5960 return result;
5961}
5962
5963size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
5964 size_t result = 0;
5965 mstate ms = (mstate)msp;
5966 if (ok_magic(ms)) {
5967 if (bytes == 0)
5968 result = granularity_align(1); /* Use minimal size */
5969 if (bytes == MAX_SIZE_T)
5970 result = 0; /* disable */
5971 else
5972 result = granularity_align(bytes);
5973 ms->footprint_limit = result;
5974 }
5975 else {
5976 USAGE_ERROR_ACTION(ms,ms);
5977 }
5978 return result;
5979}
5980
5981#if !NO_MALLINFO
5982struct mallinfo mspace_mallinfo(mspace msp) {
5983 mstate ms = (mstate)msp;
5984 if (!ok_magic(ms)) {
5985 USAGE_ERROR_ACTION(ms,ms);
5986 }
5987 return internal_mallinfo(ms);
5988}
5989#endif /* NO_MALLINFO */
5990
5991size_t mspace_usable_size(const void* mem) {
5992 if (mem != 0) {
5993 mchunkptr p = mem2chunk(mem);
5994 if (is_inuse(p))
5995 return chunksize(p) - overhead_for(p);
5996 }
5997 return 0;
5998}
5999
6000int mspace_mallopt(int param_number, int value) {
6001 return change_mparam(param_number, value);
6002}
6003
6004#endif /* MSPACES */
6005
6006
6007/* -------------------- Alternative MORECORE functions ------------------- */
6008
6009/*
6010 Guidelines for creating a custom version of MORECORE:
6011
6012 * For best performance, MORECORE should allocate in multiples of pagesize.
6013 * MORECORE may allocate more memory than requested. (Or even less,
6014 but this will usually result in a malloc failure.)
6015 * MORECORE must not allocate memory when given argument zero, but
6016 instead return one past the end address of memory from previous
6017 nonzero call.
6018 * For best performance, consecutive calls to MORECORE with positive
6019 arguments should return increasing addresses, indicating that
6020 space has been contiguously extended.
6021 * Even though consecutive calls to MORECORE need not return contiguous
6022 addresses, it must be OK for malloc'ed chunks to span multiple
6023 regions in those cases where they do happen to be contiguous.
6024 * MORECORE need not handle negative arguments -- it may instead
6025 just return MFAIL when given negative arguments.
6026 Negative arguments are always multiples of pagesize. MORECORE
6027 must not misinterpret negative args as large positive unsigned
6028 args. You can suppress all such calls from even occurring by defining
6029 MORECORE_CANNOT_TRIM,
6030
6031 As an example alternative MORECORE, here is a custom allocator
6032 kindly contributed for pre-OSX macOS. It uses virtually but not
6033 necessarily physically contiguous non-paged memory (locked in,
6034 present and won't get swapped out). You can use it by uncommenting
6035 this section, adding some #includes, and setting up the appropriate
6036 defines above:
6037
6038 #define MORECORE osMoreCore
6039
6040 There is also a shutdown routine that should somehow be called for
6041 cleanup upon program exit.
6042
6043 #define MAX_POOL_ENTRIES 100
6044 #define MINIMUM_MORECORE_SIZE (64 * 1024U)
6045 static int next_os_pool;
6046 void *our_os_pools[MAX_POOL_ENTRIES];
6047
6048 void *osMoreCore(int size)
6049 {
6050 void *ptr = 0;
6051 static void *sbrk_top = 0;
6052
6053 if (size > 0)
6054 {
6055 if (size < MINIMUM_MORECORE_SIZE)
6056 size = MINIMUM_MORECORE_SIZE;
6057 if (CurrentExecutionLevel() == kTaskLevel)
6058 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
6059 if (ptr == 0)
6060 {
6061 return (void *) MFAIL;
6062 }
6063 // save ptrs so they can be freed during cleanup
6064 our_os_pools[next_os_pool] = ptr;
6065 next_os_pool++;
6066 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
6067 sbrk_top = (char *) ptr + size;
6068 return ptr;
6069 }
6070 else if (size < 0)
6071 {
6072 // we don't currently support shrink behavior
6073 return (void *) MFAIL;
6074 }
6075 else
6076 {
6077 return sbrk_top;
6078 }
6079 }
6080
6081 // cleanup any allocated memory pools
6082 // called as last thing before shutting down driver
6083
6084 void osCleanupMem(void)
6085 {
6086 void **ptr;
6087
6088 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
6089 if (*ptr)
6090 {
6091 PoolDeallocate(*ptr);
6092 *ptr = 0;
6093 }
6094 }
6095
6096*/
6097
6098
6099/* -----------------------------------------------------------------------
6100History:
6101 v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
6102 * fix bad comparison in dlposix_memalign
6103 * don't reuse adjusted asize in sys_alloc
6104 * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
6105 * reduce compiler warnings -- thanks to all who reported/suggested these
6106
6107 v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
6108 * Always perform unlink checks unless INSECURE
6109 * Add posix_memalign.
6110 * Improve realloc to expand in more cases; expose realloc_in_place.
6111 Thanks to Peter Buhr for the suggestion.
6112 * Add footprint_limit, inspect_all, bulk_free. Thanks
6113 to Barry Hayes and others for the suggestions.
6114 * Internal refactorings to avoid calls while holding locks
6115 * Use non-reentrant locks by default. Thanks to Roland McGrath
6116 for the suggestion.
6117 * Small fixes to mspace_destroy, reset_on_error.
6118 * Various configuration extensions/changes. Thanks
6119 to all who contributed these.
6120
6121 V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
6122 * Update Creative Commons URL
6123
6124 V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
6125 * Use zeros instead of prev foot for is_mmapped
6126 * Add mspace_track_large_chunks; thanks to Jean Brouwers
6127 * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
6128 * Fix insufficient sys_alloc padding when using 16byte alignment
6129 * Fix bad error check in mspace_footprint
6130 * Adaptations for ptmalloc; thanks to Wolfram Gloger.
6131 * Reentrant spin locks; thanks to Earl Chew and others
6132 * Win32 improvements; thanks to Niall Douglas and Earl Chew
6133 * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
6134 * Extension hook in malloc_state
6135 * Various small adjustments to reduce warnings on some compilers
6136 * Various configuration extensions/changes for more platforms. Thanks
6137 to all who contributed these.
6138
6139 V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
6140 * Add max_footprint functions
6141 * Ensure all appropriate literals are size_t
6142 * Fix conditional compilation problem for some #define settings
6143 * Avoid concatenating segments with the one provided
6144 in create_mspace_with_base
6145 * Rename some variables to avoid compiler shadowing warnings
6146 * Use explicit lock initialization.
6147 * Better handling of sbrk interference.
6148 * Simplify and fix segment insertion, trimming and mspace_destroy
6149 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
6150 * Thanks especially to Dennis Flanagan for help on these.
6151
6152 V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
6153 * Fix memalign brace error.
6154
6155 V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
6156 * Fix improper #endif nesting in C++
6157 * Add explicit casts needed for C++
6158
6159 V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
6160 * Use trees for large bins
6161 * Support mspaces
6162 * Use segments to unify sbrk-based and mmap-based system allocation,
6163 removing need for emulation on most platforms without sbrk.
6164 * Default safety checks
6165 * Optional footer checks. Thanks to William Robertson for the idea.
6166 * Internal code refactoring
6167 * Incorporate suggestions and platform-specific changes.
6168 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
6169 Aaron Bachmann, Emery Berger, and others.
6170 * Speed up non-fastbin processing enough to remove fastbins.
6171 * Remove useless cfree() to avoid conflicts with other apps.
6172 * Remove internal memcpy, memset. Compilers handle builtins better.
6173 * Remove some options that no one ever used and rename others.
6174
6175 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
6176 * Fix malloc_state bitmap array misdeclaration
6177
6178 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
6179 * Allow tuning of FIRST_SORTED_BIN_SIZE
6180 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
6181 * Better detection and support for non-contiguousness of MORECORE.
6182 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
6183 * Bypass most of malloc if no frees. Thanks To Emery Berger.
6184 * Fix freeing of old top non-contiguous chunk im sysmalloc.
6185 * Raised default trim and map thresholds to 256K.
6186 * Fix mmap-related #defines. Thanks to Lubos Lunak.
6187 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
6188 * Branch-free bin calculation
6189 * Default trim and mmap thresholds now 256K.
6190
6191 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
6192 * Introduce independent_comalloc and independent_calloc.
6193 Thanks to Michael Pachos for motivation and help.
6194 * Make optional .h file available
6195 * Allow > 2GB requests on 32bit systems.
6196 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
6197 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
6198 and Anonymous.
6199 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
6200 helping test this.)
6201 * memalign: check alignment arg
6202 * realloc: don't try to shift chunks backwards, since this
6203 leads to more fragmentation in some programs and doesn't
6204 seem to help in any others.
6205 * Collect all cases in malloc requiring system memory into sysmalloc
6206 * Use mmap as backup to sbrk
6207 * Place all internal state in malloc_state
6208 * Introduce fastbins (although similar to 2.5.1)
6209 * Many minor tunings and cosmetic improvements
6210 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
6211 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
6212 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
6213 * Include errno.h to support default failure action.
6214
6215 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
6216 * return null for negative arguments
6217 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
6218 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
6219 (e.g. WIN32 platforms)
6220 * Cleanup header file inclusion for WIN32 platforms
6221 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
6222 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
6223 memory allocation routines
6224 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
6225 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
6226 usage of 'assert' in non-WIN32 code
6227 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
6228 avoid infinite loop
6229 * Always call 'fREe()' rather than 'free()'
6230
6231 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
6232 * Fixed ordering problem with boundary-stamping
6233
6234 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
6235 * Added pvalloc, as recommended by H.J. Liu
6236 * Added 64bit pointer support mainly from Wolfram Gloger
6237 * Added anonymously donated WIN32 sbrk emulation
6238 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
6239 * malloc_extend_top: fix mask error that caused wastage after
6240 foreign sbrks
6241 * Add linux mremap support code from HJ Liu
6242
6243 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
6244 * Integrated most documentation with the code.
6245 * Add support for mmap, with help from
6246 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
6247 * Use last_remainder in more cases.
6248 * Pack bins using idea from colin@nyx10.cs.du.edu
6249 * Use ordered bins instead of best-fit threshhold
6250 * Eliminate block-local decls to simplify tracing and debugging.
6251 * Support another case of realloc via move into top
6252 * Fix error occuring when initial sbrk_base not word-aligned.
6253 * Rely on page size for units instead of SBRK_UNIT to
6254 avoid surprises about sbrk alignment conventions.
6255 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
6256 (raymond@es.ele.tue.nl) for the suggestion.
6257 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
6258 * More precautions for cases where other routines call sbrk,
6259 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
6260 * Added macros etc., allowing use in linux libc from
6261 H.J. Lu (hjl@gnu.ai.mit.edu)
6262 * Inverted this history list
6263
6264 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
6265 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
6266 * Removed all preallocation code since under current scheme
6267 the work required to undo bad preallocations exceeds
6268 the work saved in good cases for most test programs.
6269 * No longer use return list or unconsolidated bins since
6270 no scheme using them consistently outperforms those that don't
6271 given above changes.
6272 * Use best fit for very large chunks to prevent some worst-cases.
6273 * Added some support for debugging
6274
6275 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
6276 * Removed footers when chunks are in use. Thanks to
6277 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
6278
6279 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
6280 * Added malloc_trim, with help from Wolfram Gloger
6281 (wmglo@Dent.MED.Uni-Muenchen.DE).
6282
6283 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
6284
6285 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
6286 * realloc: try to expand in both directions
6287 * malloc: swap order of clean-bin strategy;
6288 * realloc: only conditionally expand backwards
6289 * Try not to scavenge used bins
6290 * Use bin counts as a guide to preallocation
6291 * Occasionally bin return list chunks in first scan
6292 * Add a few optimizations from colin@nyx10.cs.du.edu
6293
6294 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
6295 * faster bin computation & slightly different binning
6296 * merged all consolidations to one part of malloc proper
6297 (eliminating old malloc_find_space & malloc_clean_bin)
6298 * Scan 2 returns chunks (not just 1)
6299 * Propagate failure in realloc if malloc returns 0
6300 * Add stuff to allow compilation on non-ANSI compilers
6301 from kpv@research.att.com
6302
6303 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
6304 * removed potential for odd address access in prev_chunk
6305 * removed dependency on getpagesize.h
6306 * misc cosmetics and a bit more internal documentation
6307 * anticosmetics: mangled names in macros to evade debugger strangeness
6308 * tested on sparc, hp-700, dec-mips, rs6000
6309 with gcc & native cc (hp, dec only) allowing
6310 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
6311
6312 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
6313 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
6314 structure of old version, but most details differ.)
6315
6316*/
6317
6318#endif /* !HAVE_MALLOC */
6319
6320#ifdef HAVE_MALLOC
6321static void* SDLCALL real_malloc(size_t s) { return malloc(s); }
6322static void* SDLCALL real_calloc(size_t n, size_t s) { return calloc(n, s); }
6323static void* SDLCALL real_realloc(void *p, size_t s) { return realloc(p,s); }
6324static void SDLCALL real_free(void *p) { free(p); }
6325#else
6326#define real_malloc dlmalloc
6327#define real_calloc dlcalloc
6328#define real_realloc dlrealloc
6329#define real_free dlfree
6330#endif
6331
6332// mark the allocator entry points as KEEPALIVE so we can call these from JavaScript.
6333// otherwise they could could get so aggressively inlined that their symbols
6334// don't exist at all in the final binary!
6335#ifdef SDL_PLATFORM_EMSCRIPTEN
6336#include <emscripten/emscripten.h>
6337extern SDL_DECLSPEC SDL_MALLOC EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_malloc(size_t size);
6338extern SDL_DECLSPEC SDL_MALLOC SDL_ALLOC_SIZE2(1, 2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_calloc(size_t nmemb, size_t size);
6339extern SDL_DECLSPEC SDL_ALLOC_SIZE(2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_realloc(void *mem, size_t size);
6340extern SDL_DECLSPEC EMSCRIPTEN_KEEPALIVE void SDLCALL SDL_free(void *mem);
6341#endif
6342
6343/* Memory functions used by SDL that can be replaced by the application */
6344static struct
6345{
6346 SDL_malloc_func malloc_func;
6347 SDL_calloc_func calloc_func;
6348 SDL_realloc_func realloc_func;
6349 SDL_free_func free_func;
6350 SDL_AtomicInt num_allocations;
6351} s_mem = {
6352 real_malloc, real_calloc, real_realloc, real_free, { 0 }
6353};
6354
6355// Define this if you want to track the number of allocations active
6356// #define SDL_TRACK_ALLOCATION_COUNT
6357#ifdef SDL_TRACK_ALLOCATION_COUNT
6358#define INCREMENT_ALLOCATION_COUNT() (void)SDL_AtomicIncRef(&s_mem.num_allocations)
6359#define DECREMENT_ALLOCATION_COUNT() (void)SDL_AtomicDecRef(&s_mem.num_allocations)
6360#else
6361#define INCREMENT_ALLOCATION_COUNT()
6362#define DECREMENT_ALLOCATION_COUNT()
6363#endif
6364
6365
6366void SDL_GetOriginalMemoryFunctions(SDL_malloc_func *malloc_func,
6367 SDL_calloc_func *calloc_func,
6368 SDL_realloc_func *realloc_func,
6369 SDL_free_func *free_func)
6370{
6371 if (malloc_func) {
6372 *malloc_func = real_malloc;
6373 }
6374 if (calloc_func) {
6375 *calloc_func = real_calloc;
6376 }
6377 if (realloc_func) {
6378 *realloc_func = real_realloc;
6379 }
6380 if (free_func) {
6381 *free_func = real_free;
6382 }
6383}
6384
6385void SDL_GetMemoryFunctions(SDL_malloc_func *malloc_func,
6386 SDL_calloc_func *calloc_func,
6387 SDL_realloc_func *realloc_func,
6388 SDL_free_func *free_func)
6389{
6390 if (malloc_func) {
6391 *malloc_func = s_mem.malloc_func;
6392 }
6393 if (calloc_func) {
6394 *calloc_func = s_mem.calloc_func;
6395 }
6396 if (realloc_func) {
6397 *realloc_func = s_mem.realloc_func;
6398 }
6399 if (free_func) {
6400 *free_func = s_mem.free_func;
6401 }
6402}
6403
6404bool SDL_SetMemoryFunctions(SDL_malloc_func malloc_func,
6405 SDL_calloc_func calloc_func,
6406 SDL_realloc_func realloc_func,
6407 SDL_free_func free_func)
6408{
6409 if (!malloc_func) {
6410 return SDL_InvalidParamError("malloc_func");
6411 }
6412 if (!calloc_func) {
6413 return SDL_InvalidParamError("calloc_func");
6414 }
6415 if (!realloc_func) {
6416 return SDL_InvalidParamError("realloc_func");
6417 }
6418 if (!free_func) {
6419 return SDL_InvalidParamError("free_func");
6420 }
6421
6422 s_mem.malloc_func = malloc_func;
6423 s_mem.calloc_func = calloc_func;
6424 s_mem.realloc_func = realloc_func;
6425 s_mem.free_func = free_func;
6426 return true;
6427}
6428
6429int SDL_GetNumAllocations(void)
6430{
6431#ifdef SDL_TRACK_ALLOCATION_COUNT
6432 return SDL_GetAtomicInt(&s_mem.num_allocations);
6433#else
6434 return -1;
6435#endif
6436}
6437
6438void *SDL_malloc(size_t size)
6439{
6440 void *mem;
6441
6442 if (!size) {
6443 size = 1;
6444 }
6445
6446 mem = s_mem.malloc_func(size);
6447 if (mem) {
6448 INCREMENT_ALLOCATION_COUNT();
6449 } else {
6450 SDL_OutOfMemory();
6451 }
6452
6453 return mem;
6454}
6455
6456void *SDL_calloc(size_t nmemb, size_t size)
6457{
6458 void *mem;
6459
6460 if (!nmemb || !size) {
6461 nmemb = 1;
6462 size = 1;
6463 }
6464
6465 mem = s_mem.calloc_func(nmemb, size);
6466 if (mem) {
6467 INCREMENT_ALLOCATION_COUNT();
6468 } else {
6469 SDL_OutOfMemory();
6470 }
6471
6472 return mem;
6473}
6474
6475void *SDL_realloc(void *ptr, size_t size)
6476{
6477 void *mem;
6478
6479 if (!size) {
6480 size = 1;
6481 }
6482
6483 mem = s_mem.realloc_func(ptr, size);
6484 if (mem && !ptr) {
6485 INCREMENT_ALLOCATION_COUNT();
6486 } else if (!mem) {
6487 SDL_OutOfMemory();
6488 }
6489
6490 return mem;
6491}
6492
6493void SDL_free(void *ptr)
6494{
6495 if (!ptr) {
6496 return;
6497 }
6498
6499 s_mem.free_func(ptr);
6500 DECREMENT_ALLOCATION_COUNT();
6501}