SDL 2.0
SDL_malloc.c
Go to the documentation of this file.
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2019 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21
22#if defined(__clang_analyzer__) && !defined(SDL_DISABLE_ANALYZE_MACROS)
23#define SDL_DISABLE_ANALYZE_MACROS 1
24#endif
25
26#include "../SDL_internal.h"
27
28/* This file contains portable memory management functions for SDL */
29#include "SDL_stdinc.h"
30#include "SDL_atomic.h"
31#include "SDL_error.h"
32
33#ifndef HAVE_MALLOC
34#define LACKS_SYS_TYPES_H
35#define LACKS_STDIO_H
36#define LACKS_STRINGS_H
37#define LACKS_STRING_H
38#define LACKS_STDLIB_H
39#define ABORT
40#define USE_LOCKS 1
41#define USE_DL_PREFIX
42
43/*
44 This is a version (aka dlmalloc) of malloc/free/realloc written by
45 Doug Lea and released to the public domain, as explained at
46 http://creativecommons.org/licenses/publicdomain. Send questions,
47 comments, complaints, performance data, etc to dl@cs.oswego.edu
48
49* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
50
51 Note: There may be an updated version of this malloc obtainable at
52 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
53 Check before installing!
54
55* Quickstart
56
57 This library is all in one file to simplify the most common usage:
58 ftp it, compile it (-O3), and link it into another program. All of
59 the compile-time options default to reasonable values for use on
60 most platforms. You might later want to step through various
61 compile-time and dynamic tuning options.
62
63 For convenience, an include file for code using this malloc is at:
64 ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
65 You don't really need this .h file unless you call functions not
66 defined in your system include files. The .h file contains only the
67 excerpts from this file needed for using this malloc on ANSI C/C++
68 systems, so long as you haven't changed compile-time options about
69 naming and tuning parameters. If you do, then you can create your
70 own malloc.h that does include all settings by cutting at the point
71 indicated below. Note that you may already by default be using a C
72 library containing a malloc that is based on some version of this
73 malloc (for example in linux). You might still want to use the one
74 in this file to customize settings or to avoid overheads associated
75 with library versions.
76
77* Vital statistics:
78
79 Supported pointer/size_t representation: 4 or 8 bytes
80 size_t MUST be an unsigned type of the same width as
81 pointers. (If you are using an ancient system that declares
82 size_t as a signed type, or need it to be a different width
83 than pointers, you can use a previous release of this malloc
84 (e.g. 2.7.2) supporting these.)
85
86 Alignment: 8 bytes (default)
87 This suffices for nearly all current machines and C compilers.
88 However, you can define MALLOC_ALIGNMENT to be wider than this
89 if necessary (up to 128bytes), at the expense of using more space.
90
91 Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
92 8 or 16 bytes (if 8byte sizes)
93 Each malloced chunk has a hidden word of overhead holding size
94 and status information, and additional cross-check word
95 if FOOTERS is defined.
96
97 Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
98 8-byte ptrs: 32 bytes (including overhead)
99
100 Even a request for zero bytes (i.e., malloc(0)) returns a
101 pointer to something of the minimum allocatable size.
102 The maximum overhead wastage (i.e., number of extra bytes
103 allocated than were requested in malloc) is less than or equal
104 to the minimum size, except for requests >= mmap_threshold that
105 are serviced via mmap(), where the worst case wastage is about
106 32 bytes plus the remainder from a system page (the minimal
107 mmap unit); typically 4096 or 8192 bytes.
108
109 Security: static-safe; optionally more or less
110 The "security" of malloc refers to the ability of malicious
111 code to accentuate the effects of errors (for example, freeing
112 space that is not currently malloc'ed or overwriting past the
113 ends of chunks) in code that calls malloc. This malloc
114 guarantees not to modify any memory locations below the base of
115 heap, i.e., static variables, even in the presence of usage
116 errors. The routines additionally detect most improper frees
117 and reallocs. All this holds as long as the static bookkeeping
118 for malloc itself is not corrupted by some other means. This
119 is only one aspect of security -- these checks do not, and
120 cannot, detect all possible programming errors.
121
122 If FOOTERS is defined nonzero, then each allocated chunk
123 carries an additional check word to verify that it was malloced
124 from its space. These check words are the same within each
125 execution of a program using malloc, but differ across
126 executions, so externally crafted fake chunks cannot be
127 freed. This improves security by rejecting frees/reallocs that
128 could corrupt heap memory, in addition to the checks preventing
129 writes to statics that are always on. This may further improve
130 security at the expense of time and space overhead. (Note that
131 FOOTERS may also be worth using with MSPACES.)
132
133 By default detected errors cause the program to abort (calling
134 "abort()"). You can override this to instead proceed past
135 errors by defining PROCEED_ON_ERROR. In this case, a bad free
136 has no effect, and a malloc that encounters a bad address
137 caused by user overwrites will ignore the bad address by
138 dropping pointers and indices to all known memory. This may
139 be appropriate for programs that should continue if at all
140 possible in the face of programming errors, although they may
141 run out of memory because dropped memory is never reclaimed.
142
143 If you don't like either of these options, you can define
144 CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
145 else. And if if you are sure that your program using malloc has
146 no errors or vulnerabilities, you can define INSECURE to 1,
147 which might (or might not) provide a small performance improvement.
148
149 Thread-safety: NOT thread-safe unless USE_LOCKS defined
150 When USE_LOCKS is defined, each public call to malloc, free,
151 etc is surrounded with either a pthread mutex or a win32
152 spinlock (depending on WIN32). This is not especially fast, and
153 can be a major bottleneck. It is designed only to provide
154 minimal protection in concurrent environments, and to provide a
155 basis for extensions. If you are using malloc in a concurrent
156 program, consider instead using ptmalloc, which is derived from
157 a version of this malloc. (See http://www.malloc.de).
158
159 System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
160 This malloc can use unix sbrk or any emulation (invoked using
161 the CALL_MORECORE macro) and/or mmap/munmap or any emulation
162 (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
163 memory. On most unix systems, it tends to work best if both
164 MORECORE and MMAP are enabled. On Win32, it uses emulations
165 based on VirtualAlloc. It also uses common C library functions
166 like memset.
167
168 Compliance: I believe it is compliant with the Single Unix Specification
169 (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
170 others as well.
171
172* Overview of algorithms
173
174 This is not the fastest, most space-conserving, most portable, or
175 most tunable malloc ever written. However it is among the fastest
176 while also being among the most space-conserving, portable and
177 tunable. Consistent balance across these factors results in a good
178 general-purpose allocator for malloc-intensive programs.
179
180 In most ways, this malloc is a best-fit allocator. Generally, it
181 chooses the best-fitting existing chunk for a request, with ties
182 broken in approximately least-recently-used order. (This strategy
183 normally maintains low fragmentation.) However, for requests less
184 than 256bytes, it deviates from best-fit when there is not an
185 exactly fitting available chunk by preferring to use space adjacent
186 to that used for the previous small request, as well as by breaking
187 ties in approximately most-recently-used order. (These enhance
188 locality of series of small allocations.) And for very large requests
189 (>= 256Kb by default), it relies on system memory mapping
190 facilities, if supported. (This helps avoid carrying around and
191 possibly fragmenting memory used only for large chunks.)
192
193 All operations (except malloc_stats and mallinfo) have execution
194 times that are bounded by a constant factor of the number of bits in
195 a size_t, not counting any clearing in calloc or copying in realloc,
196 or actions surrounding MORECORE and MMAP that have times
197 proportional to the number of non-contiguous regions returned by
198 system allocation routines, which is often just 1.
199
200 The implementation is not very modular and seriously overuses
201 macros. Perhaps someday all C compilers will do as good a job
202 inlining modular code as can now be done by brute-force expansion,
203 but now, enough of them seem not to.
204
205 Some compilers issue a lot of warnings about code that is
206 dead/unreachable only on some platforms, and also about intentional
207 uses of negation on unsigned types. All known cases of each can be
208 ignored.
209
210 For a longer but out of date high-level description, see
211 http://gee.cs.oswego.edu/dl/html/malloc.html
212
213* MSPACES
214 If MSPACES is defined, then in addition to malloc, free, etc.,
215 this file also defines mspace_malloc, mspace_free, etc. These
216 are versions of malloc routines that take an "mspace" argument
217 obtained using create_mspace, to control all internal bookkeeping.
218 If ONLY_MSPACES is defined, only these versions are compiled.
219 So if you would like to use this allocator for only some allocations,
220 and your system malloc for others, you can compile with
221 ONLY_MSPACES and then do something like...
222 static mspace mymspace = create_mspace(0,0); // for example
223 #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
224
225 (Note: If you only need one instance of an mspace, you can instead
226 use "USE_DL_PREFIX" to relabel the global malloc.)
227
228 You can similarly create thread-local allocators by storing
229 mspaces as thread-locals. For example:
230 static __thread mspace tlms = 0;
231 void* tlmalloc(size_t bytes) {
232 if (tlms == 0) tlms = create_mspace(0, 0);
233 return mspace_malloc(tlms, bytes);
234 }
235 void tlfree(void* mem) { mspace_free(tlms, mem); }
236
237 Unless FOOTERS is defined, each mspace is completely independent.
238 You cannot allocate from one and free to another (although
239 conformance is only weakly checked, so usage errors are not always
240 caught). If FOOTERS is defined, then each chunk carries around a tag
241 indicating its originating mspace, and frees are directed to their
242 originating spaces.
243
244 ------------------------- Compile-time options ---------------------------
245
246Be careful in setting #define values for numerical constants of type
247size_t. On some systems, literal values are not automatically extended
248to size_t precision unless they are explicitly casted.
249
250WIN32 default: defined if _WIN32 defined
251 Defining WIN32 sets up defaults for MS environment and compilers.
252 Otherwise defaults are for unix.
253
254MALLOC_ALIGNMENT default: (size_t)8
255 Controls the minimum alignment for malloc'ed chunks. It must be a
256 power of two and at least 8, even on machines for which smaller
257 alignments would suffice. It may be defined as larger than this
258 though. Note however that code and data structures are optimized for
259 the case of 8-byte alignment.
260
261MSPACES default: 0 (false)
262 If true, compile in support for independent allocation spaces.
263 This is only supported if HAVE_MMAP is true.
264
265ONLY_MSPACES default: 0 (false)
266 If true, only compile in mspace versions, not regular versions.
267
268USE_LOCKS default: 0 (false)
269 Causes each call to each public routine to be surrounded with
270 pthread or WIN32 mutex lock/unlock. (If set true, this can be
271 overridden on a per-mspace basis for mspace versions.)
272
273FOOTERS default: 0
274 If true, provide extra checking and dispatching by placing
275 information in the footers of allocated chunks. This adds
276 space and time overhead.
277
278INSECURE default: 0
279 If true, omit checks for usage errors and heap space overwrites.
280
281USE_DL_PREFIX default: NOT defined
282 Causes compiler to prefix all public routines with the string 'dl'.
283 This can be useful when you only want to use this malloc in one part
284 of a program, using your regular system malloc elsewhere.
285
286ABORT default: defined as abort()
287 Defines how to abort on failed checks. On most systems, a failed
288 check cannot die with an "assert" or even print an informative
289 message, because the underlying print routines in turn call malloc,
290 which will fail again. Generally, the best policy is to simply call
291 abort(). It's not very useful to do more than this because many
292 errors due to overwriting will show up as address faults (null, odd
293 addresses etc) rather than malloc-triggered checks, so will also
294 abort. Also, most compilers know that abort() does not return, so
295 can better optimize code conditionally calling it.
296
297PROCEED_ON_ERROR default: defined as 0 (false)
298 Controls whether detected bad addresses cause them to bypassed
299 rather than aborting. If set, detected bad arguments to free and
300 realloc are ignored. And all bookkeeping information is zeroed out
301 upon a detected overwrite of freed heap space, thus losing the
302 ability to ever return it from malloc again, but enabling the
303 application to proceed. If PROCEED_ON_ERROR is defined, the
304 static variable malloc_corruption_error_count is compiled in
305 and can be examined to see if errors have occurred. This option
306 generates slower code than the default abort policy.
307
308DEBUG default: NOT defined
309 The DEBUG setting is mainly intended for people trying to modify
310 this code or diagnose problems when porting to new platforms.
311 However, it may also be able to better isolate user errors than just
312 using runtime checks. The assertions in the check routines spell
313 out in more detail the assumptions and invariants underlying the
314 algorithms. The checking is fairly extensive, and will slow down
315 execution noticeably. Calling malloc_stats or mallinfo with DEBUG
316 set will attempt to check every non-mmapped allocated and free chunk
317 in the course of computing the summaries.
318
319ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
320 Debugging assertion failures can be nearly impossible if your
321 version of the assert macro causes malloc to be called, which will
322 lead to a cascade of further failures, blowing the runtime stack.
323 ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
324 which will usually make debugging easier.
325
326MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
327 The action to take before "return 0" when malloc fails to be able to
328 return memory because there is none available.
329
330HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
331 True if this system supports sbrk or an emulation of it.
332
333MORECORE default: sbrk
334 The name of the sbrk-style system routine to call to obtain more
335 memory. See below for guidance on writing custom MORECORE
336 functions. The type of the argument to sbrk/MORECORE varies across
337 systems. It cannot be size_t, because it supports negative
338 arguments, so it is normally the signed type of the same width as
339 size_t (sometimes declared as "intptr_t"). It doesn't much matter
340 though. Internally, we only call it with arguments less than half
341 the max value of a size_t, which should work across all reasonable
342 possibilities, although sometimes generating compiler warnings. See
343 near the end of this file for guidelines for creating a custom
344 version of MORECORE.
345
346MORECORE_CONTIGUOUS default: 1 (true)
347 If true, take advantage of fact that consecutive calls to MORECORE
348 with positive arguments always return contiguous increasing
349 addresses. This is true of unix sbrk. It does not hurt too much to
350 set it true anyway, since malloc copes with non-contiguities.
351 Setting it false when definitely non-contiguous saves time
352 and possibly wasted space it would take to discover this though.
353
354MORECORE_CANNOT_TRIM default: NOT defined
355 True if MORECORE cannot release space back to the system when given
356 negative arguments. This is generally necessary only if you are
357 using a hand-crafted MORECORE function that cannot handle negative
358 arguments.
359
360HAVE_MMAP default: 1 (true)
361 True if this system supports mmap or an emulation of it. If so, and
362 HAVE_MORECORE is not true, MMAP is used for all system
363 allocation. If set and HAVE_MORECORE is true as well, MMAP is
364 primarily used to directly allocate very large blocks. It is also
365 used as a backup strategy in cases where MORECORE fails to provide
366 space from system. Note: A single call to MUNMAP is assumed to be
367 able to unmap memory that may have be allocated using multiple calls
368 to MMAP, so long as they are adjacent.
369
370HAVE_MREMAP default: 1 on linux, else 0
371 If true realloc() uses mremap() to re-allocate large blocks and
372 extend or shrink allocation spaces.
373
374MMAP_CLEARS default: 1 on unix
375 True if mmap clears memory so calloc doesn't need to. This is true
376 for standard unix mmap using /dev/zero.
377
378USE_BUILTIN_FFS default: 0 (i.e., not used)
379 Causes malloc to use the builtin ffs() function to compute indices.
380 Some compilers may recognize and intrinsify ffs to be faster than the
381 supplied C version. Also, the case of x86 using gcc is special-cased
382 to an asm instruction, so is already as fast as it can be, and so
383 this setting has no effect. (On most x86s, the asm version is only
384 slightly faster than the C version.)
385
386malloc_getpagesize default: derive from system includes, or 4096.
387 The system page size. To the extent possible, this malloc manages
388 memory from the system in page-size units. This may be (and
389 usually is) a function rather than a constant. This is ignored
390 if WIN32, where page size is determined using getSystemInfo during
391 initialization.
392
393USE_DEV_RANDOM default: 0 (i.e., not used)
394 Causes malloc to use /dev/random to initialize secure magic seed for
395 stamping footers. Otherwise, the current time is used.
396
397NO_MALLINFO default: 0
398 If defined, don't compile "mallinfo". This can be a simple way
399 of dealing with mismatches between system declarations and
400 those in this file.
401
402MALLINFO_FIELD_TYPE default: size_t
403 The type of the fields in the mallinfo struct. This was originally
404 defined as "int" in SVID etc, but is more usefully defined as
405 size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
406
407REALLOC_ZERO_BYTES_FREES default: not defined
408 This should be set if a call to realloc with zero bytes should
409 be the same as a call to free. Some people think it should. Otherwise,
410 since this malloc returns a unique pointer for malloc(0), so does
411 realloc(p, 0).
412
413LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
414LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
415LACKS_STDLIB_H default: NOT defined unless on WIN32
416 Define these if your system does not have these header files.
417 You might need to manually insert some of the declarations they provide.
418
419DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
420 system_info.dwAllocationGranularity in WIN32,
421 otherwise 64K.
422 Also settable using mallopt(M_GRANULARITY, x)
423 The unit for allocating and deallocating memory from the system. On
424 most systems with contiguous MORECORE, there is no reason to
425 make this more than a page. However, systems with MMAP tend to
426 either require or encourage larger granularities. You can increase
427 this value to prevent system allocation functions to be called so
428 often, especially if they are slow. The value must be at least one
429 page and must be a power of two. Setting to 0 causes initialization
430 to either page size or win32 region size. (Note: In previous
431 versions of malloc, the equivalent of this option was called
432 "TOP_PAD")
433
434DEFAULT_TRIM_THRESHOLD default: 2MB
435 Also settable using mallopt(M_TRIM_THRESHOLD, x)
436 The maximum amount of unused top-most memory to keep before
437 releasing via malloc_trim in free(). Automatic trimming is mainly
438 useful in long-lived programs using contiguous MORECORE. Because
439 trimming via sbrk can be slow on some systems, and can sometimes be
440 wasteful (in cases where programs immediately afterward allocate
441 more large chunks) the value should be high enough so that your
442 overall system performance would improve by releasing this much
443 memory. As a rough guide, you might set to a value close to the
444 average size of a process (program) running on your system.
445 Releasing this much memory would allow such a process to run in
446 memory. Generally, it is worth tuning trim thresholds when a
447 program undergoes phases where several large chunks are allocated
448 and released in ways that can reuse each other's storage, perhaps
449 mixed with phases where there are no such chunks at all. The trim
450 value must be greater than page size to have any useful effect. To
451 disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
452 some people use of mallocing a huge space and then freeing it at
453 program startup, in an attempt to reserve system memory, doesn't
454 have the intended effect under automatic trimming, since that memory
455 will immediately be returned to the system.
456
457DEFAULT_MMAP_THRESHOLD default: 256K
458 Also settable using mallopt(M_MMAP_THRESHOLD, x)
459 The request size threshold for using MMAP to directly service a
460 request. Requests of at least this size that cannot be allocated
461 using already-existing space will be serviced via mmap. (If enough
462 normal freed space already exists it is used instead.) Using mmap
463 segregates relatively large chunks of memory so that they can be
464 individually obtained and released from the host system. A request
465 serviced through mmap is never reused by any other request (at least
466 not directly; the system may just so happen to remap successive
467 requests to the same locations). Segregating space in this way has
468 the benefits that: Mmapped space can always be individually released
469 back to the system, which helps keep the system level memory demands
470 of a long-lived program low. Also, mapped memory doesn't become
471 `locked' between other chunks, as can happen with normally allocated
472 chunks, which means that even trimming via malloc_trim would not
473 release them. However, it has the disadvantage that the space
474 cannot be reclaimed, consolidated, and then used to service later
475 requests, as happens with normal chunks. The advantages of mmap
476 nearly always outweigh disadvantages for "large" chunks, but the
477 value of "large" may vary across systems. The default is an
478 empirically derived value that works well in most systems. You can
479 disable mmap by setting to MAX_SIZE_T.
480
481*/
482
483#ifndef WIN32
484#ifdef _WIN32
485#define WIN32 1
486#endif /* _WIN32 */
487#endif /* WIN32 */
488#ifdef WIN32
489#define WIN32_LEAN_AND_MEAN
490#include <windows.h>
491#define HAVE_MMAP 1
492#define HAVE_MORECORE 0
493#define LACKS_UNISTD_H
494#define LACKS_SYS_PARAM_H
495#define LACKS_SYS_MMAN_H
496#define LACKS_STRING_H
497#define LACKS_STRINGS_H
498#define LACKS_SYS_TYPES_H
499#define LACKS_ERRNO_H
500#define LACKS_FCNTL_H
501#define MALLOC_FAILURE_ACTION
502#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
503#endif /* WIN32 */
504
505#ifdef __OS2__
506#define INCL_DOS
507#include <os2.h>
508#define HAVE_MMAP 1
509#define HAVE_MORECORE 0
510#define LACKS_SYS_MMAN_H
511#endif /* __OS2__ */
512
513#if defined(DARWIN) || defined(_DARWIN)
514/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
515#ifndef HAVE_MORECORE
516#define HAVE_MORECORE 0
517#define HAVE_MMAP 1
518#endif /* HAVE_MORECORE */
519#endif /* DARWIN */
520
521#ifndef LACKS_SYS_TYPES_H
522#include <sys/types.h> /* For size_t */
523#endif /* LACKS_SYS_TYPES_H */
524
525/* The maximum possible size_t value has all bits set */
526#define MAX_SIZE_T (~(size_t)0)
527
528#ifndef ONLY_MSPACES
529#define ONLY_MSPACES 0
530#endif /* ONLY_MSPACES */
531#ifndef MSPACES
532#if ONLY_MSPACES
533#define MSPACES 1
534#else /* ONLY_MSPACES */
535#define MSPACES 0
536#endif /* ONLY_MSPACES */
537#endif /* MSPACES */
538#ifndef MALLOC_ALIGNMENT
539#define MALLOC_ALIGNMENT ((size_t)8U)
540#endif /* MALLOC_ALIGNMENT */
541#ifndef FOOTERS
542#define FOOTERS 0
543#endif /* FOOTERS */
544#ifndef ABORT
545#define ABORT abort()
546#endif /* ABORT */
547#ifndef ABORT_ON_ASSERT_FAILURE
548#define ABORT_ON_ASSERT_FAILURE 1
549#endif /* ABORT_ON_ASSERT_FAILURE */
550#ifndef PROCEED_ON_ERROR
551#define PROCEED_ON_ERROR 0
552#endif /* PROCEED_ON_ERROR */
553#ifndef USE_LOCKS
554#define USE_LOCKS 0
555#endif /* USE_LOCKS */
556#ifndef INSECURE
557#define INSECURE 0
558#endif /* INSECURE */
559#ifndef HAVE_MMAP
560#define HAVE_MMAP 1
561#endif /* HAVE_MMAP */
562#ifndef MMAP_CLEARS
563#define MMAP_CLEARS 1
564#endif /* MMAP_CLEARS */
565#ifndef HAVE_MREMAP
566#ifdef linux
567#define HAVE_MREMAP 1
568#else /* linux */
569#define HAVE_MREMAP 0
570#endif /* linux */
571#endif /* HAVE_MREMAP */
572#ifndef MALLOC_FAILURE_ACTION
573#define MALLOC_FAILURE_ACTION errno = ENOMEM;
574#endif /* MALLOC_FAILURE_ACTION */
575#ifndef HAVE_MORECORE
576#if ONLY_MSPACES
577#define HAVE_MORECORE 0
578#else /* ONLY_MSPACES */
579#define HAVE_MORECORE 1
580#endif /* ONLY_MSPACES */
581#endif /* HAVE_MORECORE */
582#if !HAVE_MORECORE
583#define MORECORE_CONTIGUOUS 0
584#else /* !HAVE_MORECORE */
585#ifndef MORECORE
586#define MORECORE sbrk
587#endif /* MORECORE */
588#ifndef MORECORE_CONTIGUOUS
589#define MORECORE_CONTIGUOUS 1
590#endif /* MORECORE_CONTIGUOUS */
591#endif /* HAVE_MORECORE */
592#ifndef DEFAULT_GRANULARITY
593#if MORECORE_CONTIGUOUS
594#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
595#else /* MORECORE_CONTIGUOUS */
596#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
597#endif /* MORECORE_CONTIGUOUS */
598#endif /* DEFAULT_GRANULARITY */
599#ifndef DEFAULT_TRIM_THRESHOLD
600#ifndef MORECORE_CANNOT_TRIM
601#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
602#else /* MORECORE_CANNOT_TRIM */
603#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
604#endif /* MORECORE_CANNOT_TRIM */
605#endif /* DEFAULT_TRIM_THRESHOLD */
606#ifndef DEFAULT_MMAP_THRESHOLD
607#if HAVE_MMAP
608#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
609#else /* HAVE_MMAP */
610#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
611#endif /* HAVE_MMAP */
612#endif /* DEFAULT_MMAP_THRESHOLD */
613#ifndef USE_BUILTIN_FFS
614#define USE_BUILTIN_FFS 0
615#endif /* USE_BUILTIN_FFS */
616#ifndef USE_DEV_RANDOM
617#define USE_DEV_RANDOM 0
618#endif /* USE_DEV_RANDOM */
619#ifndef NO_MALLINFO
620#define NO_MALLINFO 0
621#endif /* NO_MALLINFO */
622#ifndef MALLINFO_FIELD_TYPE
623#define MALLINFO_FIELD_TYPE size_t
624#endif /* MALLINFO_FIELD_TYPE */
625
626#ifndef memset
627#define memset SDL_memset
628#endif
629#ifndef memcpy
630#define memcpy SDL_memcpy
631#endif
632
633/*
634 mallopt tuning options. SVID/XPG defines four standard parameter
635 numbers for mallopt, normally defined in malloc.h. None of these
636 are used in this malloc, so setting them has no effect. But this
637 malloc does support the following options.
638*/
639
640#define M_TRIM_THRESHOLD (-1)
641#define M_GRANULARITY (-2)
642#define M_MMAP_THRESHOLD (-3)
643
644/* ------------------------ Mallinfo declarations ------------------------ */
645
646#if !NO_MALLINFO
647/*
648 This version of malloc supports the standard SVID/XPG mallinfo
649 routine that returns a struct containing usage properties and
650 statistics. It should work on any system that has a
651 /usr/include/malloc.h defining struct mallinfo. The main
652 declaration needed is the mallinfo struct that is returned (by-copy)
653 by mallinfo(). The malloinfo struct contains a bunch of fields that
654 are not even meaningful in this version of malloc. These fields are
655 are instead filled by mallinfo() with other numbers that might be of
656 interest.
657
658 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
659 /usr/include/malloc.h file that includes a declaration of struct
660 mallinfo. If so, it is included; else a compliant version is
661 declared below. These must be precisely the same for mallinfo() to
662 work. The original SVID version of this struct, defined on most
663 systems with mallinfo, declares all fields as ints. But some others
664 define as unsigned long. If your system defines the fields using a
665 type of different width than listed here, you MUST #include your
666 system version and #define HAVE_USR_INCLUDE_MALLOC_H.
667*/
668
669/* #define HAVE_USR_INCLUDE_MALLOC_H */
670
671#ifdef HAVE_USR_INCLUDE_MALLOC_H
672#include "/usr/include/malloc.h"
673#else /* HAVE_USR_INCLUDE_MALLOC_H */
674
676{
677 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
678 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
681 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
682 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
684 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
685 MALLINFO_FIELD_TYPE fordblks; /* total free space */
686 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
687};
688
689#endif /* HAVE_USR_INCLUDE_MALLOC_H */
690#endif /* NO_MALLINFO */
691
692#ifdef __cplusplus
693extern "C"
694{
695#endif /* __cplusplus */
696
697#if !ONLY_MSPACES
698
699/* ------------------- Declarations of public routines ------------------- */
700
701#ifndef USE_DL_PREFIX
702#define dlcalloc calloc
703#define dlfree free
704#define dlmalloc malloc
705#define dlmemalign memalign
706#define dlrealloc realloc
707#define dlvalloc valloc
708#define dlpvalloc pvalloc
709#define dlmallinfo mallinfo
710#define dlmallopt mallopt
711#define dlmalloc_trim malloc_trim
712#define dlmalloc_stats malloc_stats
713#define dlmalloc_usable_size malloc_usable_size
714#define dlmalloc_footprint malloc_footprint
715#define dlmalloc_max_footprint malloc_max_footprint
716#define dlindependent_calloc independent_calloc
717#define dlindependent_comalloc independent_comalloc
718#endif /* USE_DL_PREFIX */
719
720
721/*
722 malloc(size_t n)
723 Returns a pointer to a newly allocated chunk of at least n bytes, or
724 null if no space is available, in which case errno is set to ENOMEM
725 on ANSI C systems.
726
727 If n is zero, malloc returns a minimum-sized chunk. (The minimum
728 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
729 systems.) Note that size_t is an unsigned type, so calls with
730 arguments that would be negative if signed are interpreted as
731 requests for huge amounts of space, which will often fail. The
732 maximum supported value of n differs across systems, but is in all
733 cases less than the maximum representable value of a size_t.
734*/
735 void *dlmalloc(size_t);
736
737/*
738 free(void* p)
739 Releases the chunk of memory pointed to by p, that had been previously
740 allocated using malloc or a related routine such as realloc.
741 It has no effect if p is null. If p was not malloced or already
742 freed, free(p) will by default cause the current program to abort.
743*/
744 void dlfree(void *);
745
746/*
747 calloc(size_t n_elements, size_t element_size);
748 Returns a pointer to n_elements * element_size bytes, with all locations
749 set to zero.
750*/
751 void *dlcalloc(size_t, size_t);
752
753/*
754 realloc(void* p, size_t n)
755 Returns a pointer to a chunk of size n that contains the same data
756 as does chunk p up to the minimum of (n, p's size) bytes, or null
757 if no space is available.
758
759 The returned pointer may or may not be the same as p. The algorithm
760 prefers extending p in most cases when possible, otherwise it
761 employs the equivalent of a malloc-copy-free sequence.
762
763 If p is null, realloc is equivalent to malloc.
764
765 If space is not available, realloc returns null, errno is set (if on
766 ANSI) and p is NOT freed.
767
768 if n is for fewer bytes than already held by p, the newly unused
769 space is lopped off and freed if possible. realloc with a size
770 argument of zero (re)allocates a minimum-sized chunk.
771
772 The old unix realloc convention of allowing the last-free'd chunk
773 to be used as an argument to realloc is not supported.
774*/
775
776 void *dlrealloc(void *, size_t);
777
778/*
779 memalign(size_t alignment, size_t n);
780 Returns a pointer to a newly allocated chunk of n bytes, aligned
781 in accord with the alignment argument.
782
783 The alignment argument should be a power of two. If the argument is
784 not a power of two, the nearest greater power is used.
785 8-byte alignment is guaranteed by normal malloc calls, so don't
786 bother calling memalign with an argument of 8 or less.
787
788 Overreliance on memalign is a sure way to fragment space.
789*/
790 void *dlmemalign(size_t, size_t);
791
792/*
793 valloc(size_t n);
794 Equivalent to memalign(pagesize, n), where pagesize is the page
795 size of the system. If the pagesize is unknown, 4096 is used.
796*/
797 void *dlvalloc(size_t);
798
799/*
800 mallopt(int parameter_number, int parameter_value)
801 Sets tunable parameters The format is to provide a
802 (parameter-number, parameter-value) pair. mallopt then sets the
803 corresponding parameter to the argument value if it can (i.e., so
804 long as the value is meaningful), and returns 1 if successful else
805 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
806 normally defined in malloc.h. None of these are use in this malloc,
807 so setting them has no effect. But this malloc also supports other
808 options in mallopt. See below for details. Briefly, supported
809 parameters are as follows (listed defaults are for "typical"
810 configurations).
811
812 Symbol param # default allowed param values
813 M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
814 M_GRANULARITY -2 page size any power of 2 >= page size
815 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
816*/
817 int dlmallopt(int, int);
818
819/*
820 malloc_footprint();
821 Returns the number of bytes obtained from the system. The total
822 number of bytes allocated by malloc, realloc etc., is less than this
823 value. Unlike mallinfo, this function returns only a precomputed
824 result, so can be called frequently to monitor memory consumption.
825 Even if locks are otherwise defined, this function does not use them,
826 so results might not be up to date.
827*/
828 size_t dlmalloc_footprint(void);
829
830/*
831 malloc_max_footprint();
832 Returns the maximum number of bytes obtained from the system. This
833 value will be greater than current footprint if deallocated space
834 has been reclaimed by the system. The peak number of bytes allocated
835 by malloc, realloc etc., is less than this value. Unlike mallinfo,
836 this function returns only a precomputed result, so can be called
837 frequently to monitor memory consumption. Even if locks are
838 otherwise defined, this function does not use them, so results might
839 not be up to date.
840*/
841 size_t dlmalloc_max_footprint(void);
842
843#if !NO_MALLINFO
844/*
845 mallinfo()
846 Returns (by copy) a struct containing various summary statistics:
847
848 arena: current total non-mmapped bytes allocated from system
849 ordblks: the number of free chunks
850 smblks: always zero.
851 hblks: current number of mmapped regions
852 hblkhd: total bytes held in mmapped regions
853 usmblks: the maximum total allocated space. This will be greater
854 than current total if trimming has occurred.
855 fsmblks: always zero
856 uordblks: current total allocated space (normal or mmapped)
857 fordblks: total free space
858 keepcost: the maximum number of bytes that could ideally be released
859 back to system via malloc_trim. ("ideally" means that
860 it ignores page restrictions etc.)
861
862 Because these fields are ints, but internal bookkeeping may
863 be kept as longs, the reported values may wrap around zero and
864 thus be inaccurate.
865*/
866 struct mallinfo dlmallinfo(void);
867#endif /* NO_MALLINFO */
868
869/*
870 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
871
872 independent_calloc is similar to calloc, but instead of returning a
873 single cleared space, it returns an array of pointers to n_elements
874 independent elements that can hold contents of size elem_size, each
875 of which starts out cleared, and can be independently freed,
876 realloc'ed etc. The elements are guaranteed to be adjacently
877 allocated (this is not guaranteed to occur with multiple callocs or
878 mallocs), which may also improve cache locality in some
879 applications.
880
881 The "chunks" argument is optional (i.e., may be null, which is
882 probably the most typical usage). If it is null, the returned array
883 is itself dynamically allocated and should also be freed when it is
884 no longer needed. Otherwise, the chunks array must be of at least
885 n_elements in length. It is filled in with the pointers to the
886 chunks.
887
888 In either case, independent_calloc returns this pointer array, or
889 null if the allocation failed. If n_elements is zero and "chunks"
890 is null, it returns a chunk representing an array with zero elements
891 (which should be freed if not wanted).
892
893 Each element must be individually freed when it is no longer
894 needed. If you'd like to instead be able to free all at once, you
895 should instead use regular calloc and assign pointers into this
896 space to represent elements. (In this case though, you cannot
897 independently free elements.)
898
899 independent_calloc simplifies and speeds up implementations of many
900 kinds of pools. It may also be useful when constructing large data
901 structures that initially have a fixed number of fixed-sized nodes,
902 but the number is not known at compile time, and some of the nodes
903 may later need to be freed. For example:
904
905 struct Node { int item; struct Node* next; };
906
907 struct Node* build_list() {
908 struct Node** pool;
909 int n = read_number_of_nodes_needed();
910 if (n <= 0) return 0;
911 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
912 if (pool == 0) die();
913 // organize into a linked list...
914 struct Node* first = pool[0];
915 for (i = 0; i < n-1; ++i)
916 pool[i]->next = pool[i+1];
917 free(pool); // Can now free the array (or not, if it is needed later)
918 return first;
919 }
920*/
921 void **dlindependent_calloc(size_t, size_t, void **);
922
923/*
924 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
925
926 independent_comalloc allocates, all at once, a set of n_elements
927 chunks with sizes indicated in the "sizes" array. It returns
928 an array of pointers to these elements, each of which can be
929 independently freed, realloc'ed etc. The elements are guaranteed to
930 be adjacently allocated (this is not guaranteed to occur with
931 multiple callocs or mallocs), which may also improve cache locality
932 in some applications.
933
934 The "chunks" argument is optional (i.e., may be null). If it is null
935 the returned array is itself dynamically allocated and should also
936 be freed when it is no longer needed. Otherwise, the chunks array
937 must be of at least n_elements in length. It is filled in with the
938 pointers to the chunks.
939
940 In either case, independent_comalloc returns this pointer array, or
941 null if the allocation failed. If n_elements is zero and chunks is
942 null, it returns a chunk representing an array with zero elements
943 (which should be freed if not wanted).
944
945 Each element must be individually freed when it is no longer
946 needed. If you'd like to instead be able to free all at once, you
947 should instead use a single regular malloc, and assign pointers at
948 particular offsets in the aggregate space. (In this case though, you
949 cannot independently free elements.)
950
951 independent_comallac differs from independent_calloc in that each
952 element may have a different size, and also that it does not
953 automatically clear elements.
954
955 independent_comalloc can be used to speed up allocation in cases
956 where several structs or objects must always be allocated at the
957 same time. For example:
958
959 struct Head { ... }
960 struct Foot { ... }
961
962 void send_message(char* msg) {
963 int msglen = strlen(msg);
964 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
965 void* chunks[3];
966 if (independent_comalloc(3, sizes, chunks) == 0)
967 die();
968 struct Head* head = (struct Head*)(chunks[0]);
969 char* body = (char*)(chunks[1]);
970 struct Foot* foot = (struct Foot*)(chunks[2]);
971 // ...
972 }
973
974 In general though, independent_comalloc is worth using only for
975 larger values of n_elements. For small values, you probably won't
976 detect enough difference from series of malloc calls to bother.
977
978 Overuse of independent_comalloc can increase overall memory usage,
979 since it cannot reuse existing noncontiguous small chunks that
980 might be available for some of the elements.
981*/
982 void **dlindependent_comalloc(size_t, size_t *, void **);
983
984
985/*
986 pvalloc(size_t n);
987 Equivalent to valloc(minimum-page-that-holds(n)), that is,
988 round up n to nearest pagesize.
989 */
990 void *dlpvalloc(size_t);
991
992/*
993 malloc_trim(size_t pad);
994
995 If possible, gives memory back to the system (via negative arguments
996 to sbrk) if there is unused memory at the `high' end of the malloc
997 pool or in unused MMAP segments. You can call this after freeing
998 large blocks of memory to potentially reduce the system-level memory
999 requirements of a program. However, it cannot guarantee to reduce
1000 memory. Under some allocation patterns, some large free blocks of
1001 memory will be locked between two used chunks, so they cannot be
1002 given back to the system.
1003
1004 The `pad' argument to malloc_trim represents the amount of free
1005 trailing space to leave untrimmed. If this argument is zero, only
1006 the minimum amount of memory to maintain internal data structures
1007 will be left. Non-zero arguments can be supplied to maintain enough
1008 trailing space to service future expected allocations without having
1009 to re-obtain memory from the system.
1010
1011 Malloc_trim returns 1 if it actually released any memory, else 0.
1012*/
1013 int dlmalloc_trim(size_t);
1014
1015/*
1016 malloc_usable_size(void* p);
1017
1018 Returns the number of bytes you can actually use in
1019 an allocated chunk, which may be more than you requested (although
1020 often not) due to alignment and minimum size constraints.
1021 You can use this many bytes without worrying about
1022 overwriting other allocated objects. This is not a particularly great
1023 programming practice. malloc_usable_size can be more useful in
1024 debugging and assertions, for example:
1025
1026 p = malloc(n);
1027 assert(malloc_usable_size(p) >= 256);
1028*/
1029 size_t dlmalloc_usable_size(void *);
1030
1031/*
1032 malloc_stats();
1033 Prints on stderr the amount of space obtained from the system (both
1034 via sbrk and mmap), the maximum amount (which may be more than
1035 current if malloc_trim and/or munmap got called), and the current
1036 number of bytes allocated via malloc (or realloc, etc) but not yet
1037 freed. Note that this is the number of bytes allocated, not the
1038 number requested. It will be larger than the number requested
1039 because of alignment and bookkeeping overhead. Because it includes
1040 alignment wastage as being in use, this figure may be greater than
1041 zero even when no user-level chunks are allocated.
1042
1043 The reported current and maximum system memory can be inaccurate if
1044 a program makes other calls to system memory allocation functions
1045 (normally sbrk) outside of malloc.
1046
1047 malloc_stats prints only the most commonly interesting statistics.
1048 More information can be obtained by calling mallinfo.
1049*/
1050 void dlmalloc_stats(void);
1051
1052#endif /* ONLY_MSPACES */
1053
1054#if MSPACES
1055
1056/*
1057 mspace is an opaque type representing an independent
1058 region of space that supports mspace_malloc, etc.
1059*/
1060 typedef void *mspace;
1061
1062/*
1063 create_mspace creates and returns a new independent space with the
1064 given initial capacity, or, if 0, the default granularity size. It
1065 returns null if there is no system memory available to create the
1066 space. If argument locked is non-zero, the space uses a separate
1067 lock to control access. The capacity of the space will grow
1068 dynamically as needed to service mspace_malloc requests. You can
1069 control the sizes of incremental increases of this space by
1070 compiling with a different DEFAULT_GRANULARITY or dynamically
1071 setting with mallopt(M_GRANULARITY, value).
1072*/
1073 mspace create_mspace(size_t capacity, int locked);
1074
1075/*
1076 destroy_mspace destroys the given space, and attempts to return all
1077 of its memory back to the system, returning the total number of
1078 bytes freed. After destruction, the results of access to all memory
1079 used by the space become undefined.
1080*/
1081 size_t destroy_mspace(mspace msp);
1082
1083/*
1084 create_mspace_with_base uses the memory supplied as the initial base
1085 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1086 space is used for bookkeeping, so the capacity must be at least this
1087 large. (Otherwise 0 is returned.) When this initial space is
1088 exhausted, additional memory will be obtained from the system.
1089 Destroying this space will deallocate all additionally allocated
1090 space (if possible) but not the initial base.
1091*/
1092 mspace create_mspace_with_base(void *base, size_t capacity, int locked);
1093
1094/*
1095 mspace_malloc behaves as malloc, but operates within
1096 the given space.
1097*/
1098 void *mspace_malloc(mspace msp, size_t bytes);
1099
1100/*
1101 mspace_free behaves as free, but operates within
1102 the given space.
1103
1104 If compiled with FOOTERS==1, mspace_free is not actually needed.
1105 free may be called instead of mspace_free because freed chunks from
1106 any space are handled by their originating spaces.
1107*/
1108 void mspace_free(mspace msp, void *mem);
1109
1110/*
1111 mspace_realloc behaves as realloc, but operates within
1112 the given space.
1113
1114 If compiled with FOOTERS==1, mspace_realloc is not actually
1115 needed. realloc may be called instead of mspace_realloc because
1116 realloced chunks from any space are handled by their originating
1117 spaces.
1118*/
1119 void *mspace_realloc(mspace msp, void *mem, size_t newsize);
1120
1121/*
1122 mspace_calloc behaves as calloc, but operates within
1123 the given space.
1124*/
1125 void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1126
1127/*
1128 mspace_memalign behaves as memalign, but operates within
1129 the given space.
1130*/
1131 void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1132
1133/*
1134 mspace_independent_calloc behaves as independent_calloc, but
1135 operates within the given space.
1136*/
1137 void **mspace_independent_calloc(mspace msp, size_t n_elements,
1138 size_t elem_size, void *chunks[]);
1139
1140/*
1141 mspace_independent_comalloc behaves as independent_comalloc, but
1142 operates within the given space.
1143*/
1144 void **mspace_independent_comalloc(mspace msp, size_t n_elements,
1145 size_t sizes[], void *chunks[]);
1146
1147/*
1148 mspace_footprint() returns the number of bytes obtained from the
1149 system for this space.
1150*/
1151 size_t mspace_footprint(mspace msp);
1152
1153/*
1154 mspace_max_footprint() returns the peak number of bytes obtained from the
1155 system for this space.
1156*/
1157 size_t mspace_max_footprint(mspace msp);
1158
1159
1160#if !NO_MALLINFO
1161/*
1162 mspace_mallinfo behaves as mallinfo, but reports properties of
1163 the given space.
1164*/
1165 struct mallinfo mspace_mallinfo(mspace msp);
1166#endif /* NO_MALLINFO */
1167
1168/*
1169 mspace_malloc_stats behaves as malloc_stats, but reports
1170 properties of the given space.
1171*/
1172 void mspace_malloc_stats(mspace msp);
1173
1174/*
1175 mspace_trim behaves as malloc_trim, but
1176 operates within the given space.
1177*/
1178 int mspace_trim(mspace msp, size_t pad);
1179
1180/*
1181 An alias for mallopt.
1182*/
1183 int mspace_mallopt(int, int);
1184
1185#endif /* MSPACES */
1186
1187#ifdef __cplusplus
1188}; /* end of extern "C" */
1189#endif /* __cplusplus */
1190
1191/*
1192 ========================================================================
1193 To make a fully customizable malloc.h header file, cut everything
1194 above this line, put into file malloc.h, edit to suit, and #include it
1195 on the next line, as well as in programs that use this malloc.
1196 ========================================================================
1197*/
1198
1199/* #include "malloc.h" */
1200
1201/*------------------------------ internal #includes ---------------------- */
1202
1203#ifdef _MSC_VER
1204#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1205#endif /* _MSC_VER */
1206
1207#ifndef LACKS_STDIO_H
1208#include <stdio.h> /* for printing in malloc_stats */
1209#endif
1210
1211#ifndef LACKS_ERRNO_H
1212#include <errno.h> /* for MALLOC_FAILURE_ACTION */
1213#endif /* LACKS_ERRNO_H */
1214#if FOOTERS
1215#include <time.h> /* for magic initialization */
1216#endif /* FOOTERS */
1217#ifndef LACKS_STDLIB_H
1218#include <stdlib.h> /* for abort() */
1219#endif /* LACKS_STDLIB_H */
1220#ifdef DEBUG
1221#if ABORT_ON_ASSERT_FAILURE
1222#define assert(x) if(!(x)) ABORT
1223#else /* ABORT_ON_ASSERT_FAILURE */
1224#include <assert.h>
1225#endif /* ABORT_ON_ASSERT_FAILURE */
1226#else /* DEBUG */
1227#define assert(x)
1228#endif /* DEBUG */
1229#ifndef LACKS_STRING_H
1230#include <string.h> /* for memset etc */
1231#endif /* LACKS_STRING_H */
1232#if USE_BUILTIN_FFS
1233#ifndef LACKS_STRINGS_H
1234#include <strings.h> /* for ffs */
1235#endif /* LACKS_STRINGS_H */
1236#endif /* USE_BUILTIN_FFS */
1237#if HAVE_MMAP
1238#ifndef LACKS_SYS_MMAN_H
1239#include <sys/mman.h> /* for mmap */
1240#endif /* LACKS_SYS_MMAN_H */
1241#ifndef LACKS_FCNTL_H
1242#include <fcntl.h>
1243#endif /* LACKS_FCNTL_H */
1244#endif /* HAVE_MMAP */
1245#if HAVE_MORECORE
1246#ifndef LACKS_UNISTD_H
1247#include <unistd.h> /* for sbrk */
1248#else /* LACKS_UNISTD_H */
1249#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1250extern void *sbrk(ptrdiff_t);
1251#endif /* FreeBSD etc */
1252#endif /* LACKS_UNISTD_H */
1253#endif /* HAVE_MMAP */
1254
1255#ifndef WIN32
1256#ifndef malloc_getpagesize
1257# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
1258# ifndef _SC_PAGE_SIZE
1259# define _SC_PAGE_SIZE _SC_PAGESIZE
1260# endif
1261# endif
1262# ifdef _SC_PAGE_SIZE
1263# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1264# else
1265# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1266extern size_t getpagesize();
1267# define malloc_getpagesize getpagesize()
1268# else
1269# ifdef WIN32 /* use supplied emulation of getpagesize */
1270# define malloc_getpagesize getpagesize()
1271# else
1272# ifndef LACKS_SYS_PARAM_H
1273# include <sys/param.h>
1274# endif
1275# ifdef EXEC_PAGESIZE
1276# define malloc_getpagesize EXEC_PAGESIZE
1277# else
1278# ifdef NBPG
1279# ifndef CLSIZE
1280# define malloc_getpagesize NBPG
1281# else
1282# define malloc_getpagesize (NBPG * CLSIZE)
1283# endif
1284# else
1285# ifdef NBPC
1286# define malloc_getpagesize NBPC
1287# else
1288# ifdef PAGESIZE
1289# define malloc_getpagesize PAGESIZE
1290# else /* just guess */
1291# define malloc_getpagesize ((size_t)4096U)
1292# endif
1293# endif
1294# endif
1295# endif
1296# endif
1297# endif
1298# endif
1299#endif
1300#endif
1301
1302/* ------------------- size_t and alignment properties -------------------- */
1303
1304/* The byte and bit size of a size_t */
1305#define SIZE_T_SIZE (sizeof(size_t))
1306#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1307
1308/* Some constants coerced to size_t */
1309/* Annoying but necessary to avoid errors on some plaftorms */
1310#define SIZE_T_ZERO ((size_t)0)
1311#define SIZE_T_ONE ((size_t)1)
1312#define SIZE_T_TWO ((size_t)2)
1313#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1314#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1315#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1316#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1317
1318/* The bit mask value corresponding to MALLOC_ALIGNMENT */
1319#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1320
1321/* True if address a has acceptable alignment */
1322#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1323
1324/* the number of bytes to offset an address to align it */
1325#define align_offset(A)\
1326 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1327 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1328
1329/* -------------------------- MMAP preliminaries ------------------------- */
1330
1331/*
1332 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
1333 checks to fail so compiler optimizer can delete code rather than
1334 using so many "#if"s.
1335*/
1336
1337
1338/* MORECORE and MMAP must return MFAIL on failure */
1339#define MFAIL ((void*)(MAX_SIZE_T))
1340#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
1341
1342#if !HAVE_MMAP
1343#define IS_MMAPPED_BIT (SIZE_T_ZERO)
1344#define USE_MMAP_BIT (SIZE_T_ZERO)
1345#define CALL_MMAP(s) MFAIL
1346#define CALL_MUNMAP(a, s) (-1)
1347#define DIRECT_MMAP(s) MFAIL
1348
1349#else /* HAVE_MMAP */
1350#define IS_MMAPPED_BIT (SIZE_T_ONE)
1351#define USE_MMAP_BIT (SIZE_T_ONE)
1352
1353#if !defined(WIN32) && !defined (__OS2__)
1354#define CALL_MUNMAP(a, s) munmap((a), (s))
1355#define MMAP_PROT (PROT_READ|PROT_WRITE)
1356#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1357#define MAP_ANONYMOUS MAP_ANON
1358#endif /* MAP_ANON */
1359#ifdef MAP_ANONYMOUS
1360#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1361#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1362#else /* MAP_ANONYMOUS */
1363/*
1364 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1365 is unlikely to be needed, but is supplied just in case.
1366*/
1367#define MMAP_FLAGS (MAP_PRIVATE)
1368static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1369#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1370 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1371 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1372 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1373#endif /* MAP_ANONYMOUS */
1374
1375#define DIRECT_MMAP(s) CALL_MMAP(s)
1376
1377#elif defined(__OS2__)
1378
1379/* OS/2 MMAP via DosAllocMem */
1380static void* os2mmap(size_t size) {
1381 void* ptr;
1382 if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) &&
1383 DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE))
1384 return MFAIL;
1385 return ptr;
1386}
1387
1388#define os2direct_mmap(n) os2mmap(n)
1389
1390/* This function supports releasing coalesed segments */
1391static int os2munmap(void* ptr, size_t size) {
1392 while (size) {
1393 ULONG ulSize = size;
1394 ULONG ulFlags = 0;
1395 if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0)
1396 return -1;
1397 if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 ||
1398 ulSize > size)
1399 return -1;
1400 if (DosFreeMem(ptr) != 0)
1401 return -1;
1402 ptr = ( void * ) ( ( char * ) ptr + ulSize );
1403 size -= ulSize;
1404 }
1405 return 0;
1406}
1407
1408#define CALL_MMAP(s) os2mmap(s)
1409#define CALL_MUNMAP(a, s) os2munmap((a), (s))
1410#define DIRECT_MMAP(s) os2direct_mmap(s)
1411
1412#else /* WIN32 */
1413
1414/* Win32 MMAP via VirtualAlloc */
1415static void *
1417{
1418 void *ptr =
1419 VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1420 return (ptr != 0) ? ptr : MFAIL;
1421}
1422
1423/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1424static void *
1426{
1427 void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1428 PAGE_READWRITE);
1429 return (ptr != 0) ? ptr : MFAIL;
1430}
1431
1432/* This function supports releasing coalesed segments */
1433static int
1434win32munmap(void *ptr, size_t size)
1435{
1436 MEMORY_BASIC_INFORMATION minfo;
1437 char *cptr = ptr;
1438 while (size) {
1439 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1440 return -1;
1441 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1442 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1443 return -1;
1444 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1445 return -1;
1446 cptr += minfo.RegionSize;
1447 size -= minfo.RegionSize;
1448 }
1449 return 0;
1450}
1451
1452#define CALL_MMAP(s) win32mmap(s)
1453#define CALL_MUNMAP(a, s) win32munmap((a), (s))
1454#define DIRECT_MMAP(s) win32direct_mmap(s)
1455#endif /* WIN32 */
1456#endif /* HAVE_MMAP */
1457
1458#if HAVE_MMAP && HAVE_MREMAP
1459#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1460#else /* HAVE_MMAP && HAVE_MREMAP */
1461#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1462#endif /* HAVE_MMAP && HAVE_MREMAP */
1463
1464#if HAVE_MORECORE
1465#define CALL_MORECORE(S) MORECORE(S)
1466#else /* HAVE_MORECORE */
1467#define CALL_MORECORE(S) MFAIL
1468#endif /* HAVE_MORECORE */
1469
1470/* mstate bit set if continguous morecore disabled or failed */
1471#define USE_NONCONTIGUOUS_BIT (4U)
1472
1473/* segment bit set in create_mspace_with_base */
1474#define EXTERN_BIT (8U)
1475
1476
1477/* --------------------------- Lock preliminaries ------------------------ */
1478
1479#if USE_LOCKS
1480
1481/*
1482 When locks are defined, there are up to two global locks:
1483
1484 * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
1485 MORECORE. In many cases sys_alloc requires two calls, that should
1486 not be interleaved with calls by other threads. This does not
1487 protect against direct calls to MORECORE by other threads not
1488 using this lock, so there is still code to cope the best we can on
1489 interference.
1490
1491 * magic_init_mutex ensures that mparams.magic and other
1492 unique mparams values are initialized only once.
1493*/
1494
1495#if !defined(WIN32) && !defined(__OS2__)
1496/* By default use posix locks */
1497#include <pthread.h>
1498#define MLOCK_T pthread_mutex_t
1499#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
1500#define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
1501#define RELEASE_LOCK(l) pthread_mutex_unlock(l)
1502
1503#if HAVE_MORECORE
1504static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
1505#endif /* HAVE_MORECORE */
1506
1507static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
1508
1509#elif defined(__OS2__)
1510#define MLOCK_T HMTX
1511#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE)
1512#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT)
1513#define RELEASE_LOCK(l) DosReleaseMutexSem(*l)
1514#if HAVE_MORECORE
1515static MLOCK_T morecore_mutex;
1516#endif /* HAVE_MORECORE */
1518
1519#else /* WIN32 */
1520/*
1521 Because lock-protected regions have bounded times, and there
1522 are no recursive lock calls, we can use simple spinlocks.
1523*/
1524
1525#define MLOCK_T long
1526static int
1528{
1529 for (;;) {
1530#ifdef InterlockedCompareExchangePointer
1531 if (!InterlockedCompareExchange(sl, 1, 0))
1532 return 0;
1533#else /* Use older void* version */
1534 if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
1535 return 0;
1536#endif /* InterlockedCompareExchangePointer */
1537 Sleep(0);
1538 }
1539}
1540
1541static void
1543{
1544 InterlockedExchange(sl, 0);
1545}
1546
1547#define INITIAL_LOCK(l) *(l)=0
1548#define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1549#define RELEASE_LOCK(l) win32_release_lock(l)
1550#if HAVE_MORECORE
1551static MLOCK_T morecore_mutex;
1552#endif /* HAVE_MORECORE */
1554#endif /* WIN32 */
1555
1556#define USE_LOCK_BIT (2U)
1557#else /* USE_LOCKS */
1558#define USE_LOCK_BIT (0U)
1559#define INITIAL_LOCK(l)
1560#endif /* USE_LOCKS */
1561
1562#if USE_LOCKS && HAVE_MORECORE
1563#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
1564#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
1565#else /* USE_LOCKS && HAVE_MORECORE */
1566#define ACQUIRE_MORECORE_LOCK()
1567#define RELEASE_MORECORE_LOCK()
1568#endif /* USE_LOCKS && HAVE_MORECORE */
1569
1570#if USE_LOCKS
1571#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1572#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1573#else /* USE_LOCKS */
1574#define ACQUIRE_MAGIC_INIT_LOCK()
1575#define RELEASE_MAGIC_INIT_LOCK()
1576#endif /* USE_LOCKS */
1577
1578
1579/* ----------------------- Chunk representations ------------------------ */
1580
1581/*
1582 (The following includes lightly edited explanations by Colin Plumb.)
1583
1584 The malloc_chunk declaration below is misleading (but accurate and
1585 necessary). It declares a "view" into memory allowing access to
1586 necessary fields at known offsets from a given base.
1587
1588 Chunks of memory are maintained using a `boundary tag' method as
1589 originally described by Knuth. (See the paper by Paul Wilson
1590 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
1591 techniques.) Sizes of free chunks are stored both in the front of
1592 each chunk and at the end. This makes consolidating fragmented
1593 chunks into bigger chunks fast. The head fields also hold bits
1594 representing whether chunks are free or in use.
1595
1596 Here are some pictures to make it clearer. They are "exploded" to
1597 show that the state of a chunk can be thought of as extending from
1598 the high 31 bits of the head field of its header through the
1599 prev_foot and PINUSE_BIT bit of the following chunk header.
1600
1601 A chunk that's in use looks like:
1602
1603 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1604 | Size of previous chunk (if P = 1) |
1605 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1606 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1607 | Size of this chunk 1| +-+
1608 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1609 | |
1610 +- -+
1611 | |
1612 +- -+
1613 | :
1614 +- size - sizeof(size_t) available payload bytes -+
1615 : |
1616 chunk-> +- -+
1617 | |
1618 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1619 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
1620 | Size of next chunk (may or may not be in use) | +-+
1621 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1622
1623 And if it's free, it looks like this:
1624
1625 chunk-> +- -+
1626 | User payload (must be in use, or we would have merged!) |
1627 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1628 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1629 | Size of this chunk 0| +-+
1630 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1631 | Next pointer |
1632 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1633 | Prev pointer |
1634 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1635 | :
1636 +- size - sizeof(struct chunk) unused bytes -+
1637 : |
1638 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1639 | Size of this chunk |
1640 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1641 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
1642 | Size of next chunk (must be in use, or we would have merged)| +-+
1643 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1644 | :
1645 +- User payload -+
1646 : |
1647 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1648 |0|
1649 +-+
1650 Note that since we always merge adjacent free chunks, the chunks
1651 adjacent to a free chunk must be in use.
1652
1653 Given a pointer to a chunk (which can be derived trivially from the
1654 payload pointer) we can, in O(1) time, find out whether the adjacent
1655 chunks are free, and if so, unlink them from the lists that they
1656 are on and merge them with the current chunk.
1657
1658 Chunks always begin on even word boundaries, so the mem portion
1659 (which is returned to the user) is also on an even word boundary, and
1660 thus at least double-word aligned.
1661
1662 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
1663 chunk size (which is always a multiple of two words), is an in-use
1664 bit for the *previous* chunk. If that bit is *clear*, then the
1665 word before the current chunk size contains the previous chunk
1666 size, and can be used to find the front of the previous chunk.
1667 The very first chunk allocated always has this bit set, preventing
1668 access to non-existent (or non-owned) memory. If pinuse is set for
1669 any given chunk, then you CANNOT determine the size of the
1670 previous chunk, and might even get a memory addressing fault when
1671 trying to do so.
1672
1673 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
1674 the chunk size redundantly records whether the current chunk is
1675 inuse. This redundancy enables usage checks within free and realloc,
1676 and reduces indirection when freeing and consolidating chunks.
1677
1678 Each freshly allocated chunk must have both cinuse and pinuse set.
1679 That is, each allocated chunk borders either a previously allocated
1680 and still in-use chunk, or the base of its memory arena. This is
1681 ensured by making all allocations from the the `lowest' part of any
1682 found chunk. Further, no free chunk physically borders another one,
1683 so each free chunk is known to be preceded and followed by either
1684 inuse chunks or the ends of memory.
1685
1686 Note that the `foot' of the current chunk is actually represented
1687 as the prev_foot of the NEXT chunk. This makes it easier to
1688 deal with alignments etc but can be very confusing when trying
1689 to extend or adapt this code.
1690
1691 The exceptions to all this are
1692
1693 1. The special chunk `top' is the top-most available chunk (i.e.,
1694 the one bordering the end of available memory). It is treated
1695 specially. Top is never included in any bin, is used only if
1696 no other chunk is available, and is released back to the
1697 system if it is very large (see M_TRIM_THRESHOLD). In effect,
1698 the top chunk is treated as larger (and thus less well
1699 fitting) than any other available chunk. The top chunk
1700 doesn't update its trailing size field since there is no next
1701 contiguous chunk that would have to index off it. However,
1702 space is still allocated for it (TOP_FOOT_SIZE) to enable
1703 separation or merging when space is extended.
1704
1705 3. Chunks allocated via mmap, which have the lowest-order bit
1706 (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
1707 PINUSE_BIT in their head fields. Because they are allocated
1708 one-by-one, each must carry its own prev_foot field, which is
1709 also used to hold the offset this chunk has within its mmapped
1710 region, which is needed to preserve alignment. Each mmapped
1711 chunk is trailed by the first two fields of a fake next-chunk
1712 for sake of usage checks.
1713
1714*/
1715
1717{
1718 size_t prev_foot; /* Size of previous chunk (if free). */
1719 size_t head; /* Size and inuse bits. */
1720 struct malloc_chunk *fd; /* double links -- used only if free. */
1722};
1723
1724typedef struct malloc_chunk mchunk;
1725typedef struct malloc_chunk *mchunkptr;
1726typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
1727typedef size_t bindex_t; /* Described below */
1728typedef unsigned int binmap_t; /* Described below */
1729typedef unsigned int flag_t; /* The type of various bit flag sets */
1730
1731/* ------------------- Chunks sizes and alignments ----------------------- */
1732
1733#define MCHUNK_SIZE (sizeof(mchunk))
1734
1735#if FOOTERS
1736#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1737#else /* FOOTERS */
1738#define CHUNK_OVERHEAD (SIZE_T_SIZE)
1739#endif /* FOOTERS */
1740
1741/* MMapped chunks need a second word of overhead ... */
1742#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1743/* ... and additional padding for fake next-chunk at foot */
1744#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
1745
1746/* The smallest size we can malloc is an aligned minimal chunk */
1747#define MIN_CHUNK_SIZE\
1748 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1749
1750/* conversion from malloc headers to user pointers, and back */
1751#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
1752#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1753/* chunk associated with aligned address A */
1754#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
1755
1756/* Bounds on request (not chunk) sizes. */
1757#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
1758#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1759
1760/* pad request bytes into a usable size */
1761#define pad_request(req) \
1762 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1763
1764/* pad request, checking for minimum (but not maximum) */
1765#define request2size(req) \
1766 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1767
1768
1769/* ------------------ Operations on head and foot fields ----------------- */
1770
1771/*
1772 The head field of a chunk is or'ed with PINUSE_BIT when previous
1773 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
1774 use. If the chunk was obtained with mmap, the prev_foot field has
1775 IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
1776 mmapped region to the base of the chunk.
1777*/
1778
1779#define PINUSE_BIT (SIZE_T_ONE)
1780#define CINUSE_BIT (SIZE_T_TWO)
1781#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
1782
1783/* Head value for fenceposts */
1784#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
1785
1786/* extraction of fields from head words */
1787#define cinuse(p) ((p)->head & CINUSE_BIT)
1788#define pinuse(p) ((p)->head & PINUSE_BIT)
1789#define chunksize(p) ((p)->head & ~(INUSE_BITS))
1790
1791#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
1792#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
1793
1794/* Treat space at ptr +/- offset as a chunk */
1795#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1796#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
1797
1798/* Ptr to next or previous physical malloc_chunk. */
1799#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
1800#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
1801
1802/* extract next chunk's pinuse bit */
1803#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
1804
1805/* Get/set size at footer */
1806#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
1807#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
1808
1809/* Set size, pinuse bit, and foot */
1810#define set_size_and_pinuse_of_free_chunk(p, s)\
1811 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
1812
1813/* Set size, pinuse bit, foot, and clear next pinuse */
1814#define set_free_with_pinuse(p, s, n)\
1815 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
1816
1817#define is_mmapped(p)\
1818 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
1819
1820/* Get the internal overhead associated with chunk p */
1821#define overhead_for(p)\
1822 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
1823
1824/* Return true if malloced space is not necessarily cleared */
1825#if MMAP_CLEARS
1826#define calloc_must_clear(p) (!is_mmapped(p))
1827#else /* MMAP_CLEARS */
1828#define calloc_must_clear(p) (1)
1829#endif /* MMAP_CLEARS */
1830
1831/* ---------------------- Overlaid data structures ----------------------- */
1832
1833/*
1834 When chunks are not in use, they are treated as nodes of either
1835 lists or trees.
1836
1837 "Small" chunks are stored in circular doubly-linked lists, and look
1838 like this:
1839
1840 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1841 | Size of previous chunk |
1842 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1843 `head:' | Size of chunk, in bytes |P|
1844 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1845 | Forward pointer to next chunk in list |
1846 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1847 | Back pointer to previous chunk in list |
1848 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1849 | Unused space (may be 0 bytes long) .
1850 . .
1851 . |
1852nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1853 `foot:' | Size of chunk, in bytes |
1854 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1855
1856 Larger chunks are kept in a form of bitwise digital trees (aka
1857 tries) keyed on chunksizes. Because malloc_tree_chunks are only for
1858 free chunks greater than 256 bytes, their size doesn't impose any
1859 constraints on user chunk sizes. Each node looks like:
1860
1861 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1862 | Size of previous chunk |
1863 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1864 `head:' | Size of chunk, in bytes |P|
1865 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1866 | Forward pointer to next chunk of same size |
1867 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1868 | Back pointer to previous chunk of same size |
1869 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1870 | Pointer to left child (child[0]) |
1871 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1872 | Pointer to right child (child[1]) |
1873 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1874 | Pointer to parent |
1875 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1876 | bin index of this chunk |
1877 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1878 | Unused space .
1879 . |
1880nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1881 `foot:' | Size of chunk, in bytes |
1882 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1883
1884 Each tree holding treenodes is a tree of unique chunk sizes. Chunks
1885 of the same size are arranged in a circularly-linked list, with only
1886 the oldest chunk (the next to be used, in our FIFO ordering)
1887 actually in the tree. (Tree members are distinguished by a non-null
1888 parent pointer.) If a chunk with the same size an an existing node
1889 is inserted, it is linked off the existing node using pointers that
1890 work in the same way as fd/bk pointers of small chunks.
1891
1892 Each tree contains a power of 2 sized range of chunk sizes (the
1893 smallest is 0x100 <= x < 0x180), which is is divided in half at each
1894 tree level, with the chunks in the smaller half of the range (0x100
1895 <= x < 0x140 for the top nose) in the left subtree and the larger
1896 half (0x140 <= x < 0x180) in the right subtree. This is, of course,
1897 done by inspecting individual bits.
1898
1899 Using these rules, each node's left subtree contains all smaller
1900 sizes than its right subtree. However, the node at the root of each
1901 subtree has no particular ordering relationship to either. (The
1902 dividing line between the subtree sizes is based on trie relation.)
1903 If we remove the last chunk of a given size from the interior of the
1904 tree, we need to replace it with a leaf node. The tree ordering
1905 rules permit a node to be replaced by any leaf below it.
1906
1907 The smallest chunk in a tree (a common operation in a best-fit
1908 allocator) can be found by walking a path to the leftmost leaf in
1909 the tree. Unlike a usual binary tree, where we follow left child
1910 pointers until we reach a null, here we follow the right child
1911 pointer any time the left one is null, until we reach a leaf with
1912 both child pointers null. The smallest chunk in the tree will be
1913 somewhere along that path.
1914
1915 The worst case number of steps to add, find, or remove a node is
1916 bounded by the number of bits differentiating chunks within
1917 bins. Under current bin calculations, this ranges from 6 up to 21
1918 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1919 is of course much better.
1920*/
1921
1923{
1924 /* The first four fields must be compatible with malloc_chunk */
1926 size_t head;
1929
1933};
1934
1935typedef struct malloc_tree_chunk tchunk;
1936typedef struct malloc_tree_chunk *tchunkptr;
1937typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
1938
1939/* A little helper macro for trees */
1940#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1941
1942/* ----------------------------- Segments -------------------------------- */
1943
1944/*
1945 Each malloc space may include non-contiguous segments, held in a
1946 list headed by an embedded malloc_segment record representing the
1947 top-most space. Segments also include flags holding properties of
1948 the space. Large chunks that are directly allocated by mmap are not
1949 included in this list. They are instead independently created and
1950 destroyed without otherwise keeping track of them.
1951
1952 Segment management mainly comes into play for spaces allocated by
1953 MMAP. Any call to MMAP might or might not return memory that is
1954 adjacent to an existing segment. MORECORE normally contiguously
1955 extends the current space, so this space is almost always adjacent,
1956 which is simpler and faster to deal with. (This is why MORECORE is
1957 used preferentially to MMAP when both are available -- see
1958 sys_alloc.) When allocating using MMAP, we don't use any of the
1959 hinting mechanisms (inconsistently) supported in various
1960 implementations of unix mmap, or distinguish reserving from
1961 committing memory. Instead, we just ask for space, and exploit
1962 contiguity when we get it. It is probably possible to do
1963 better than this on some systems, but no general scheme seems
1964 to be significantly better.
1965
1966 Management entails a simpler variant of the consolidation scheme
1967 used for chunks to reduce fragmentation -- new adjacent memory is
1968 normally prepended or appended to an existing segment. However,
1969 there are limitations compared to chunk consolidation that mostly
1970 reflect the fact that segment processing is relatively infrequent
1971 (occurring only when getting memory from system) and that we
1972 don't expect to have huge numbers of segments:
1973
1974 * Segments are not indexed, so traversal requires linear scans. (It
1975 would be possible to index these, but is not worth the extra
1976 overhead and complexity for most programs on most platforms.)
1977 * New segments are only appended to old ones when holding top-most
1978 memory; if they cannot be prepended to others, they are held in
1979 different segments.
1980
1981 Except for the top-most segment of an mstate, each segment record
1982 is kept at the tail of its segment. Segments are added by pushing
1983 segment records onto the list headed by &mstate.seg for the
1984 containing mstate.
1985
1986 Segment flags control allocation/merge/deallocation policies:
1987 * If EXTERN_BIT set, then we did not allocate this segment,
1988 and so should not try to deallocate or merge with others.
1989 (This currently holds only for the initial segment passed
1990 into create_mspace_with_base.)
1991 * If IS_MMAPPED_BIT set, the segment may be merged with
1992 other surrounding mmapped segments and trimmed/de-allocated
1993 using munmap.
1994 * If neither bit is set, then the segment was obtained using
1995 MORECORE so can be merged with surrounding MORECORE'd segments
1996 and deallocated/trimmed using MORECORE with negative arguments.
1997*/
1998
2000{
2001 char *base; /* base address */
2002 size_t size; /* allocated size */
2003 struct malloc_segment *next; /* ptr to next segment */
2004 flag_t sflags; /* mmap and extern flag */
2005};
2006
2007#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
2008#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2009
2010typedef struct malloc_segment msegment;
2011typedef struct malloc_segment *msegmentptr;
2012
2013/* ---------------------------- malloc_state ----------------------------- */
2014
2015/*
2016 A malloc_state holds all of the bookkeeping for a space.
2017 The main fields are:
2018
2019 Top
2020 The topmost chunk of the currently active segment. Its size is
2021 cached in topsize. The actual size of topmost space is
2022 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
2023 fenceposts and segment records if necessary when getting more
2024 space from the system. The size at which to autotrim top is
2025 cached from mparams in trim_check, except that it is disabled if
2026 an autotrim fails.
2027
2028 Designated victim (dv)
2029 This is the preferred chunk for servicing small requests that
2030 don't have exact fits. It is normally the chunk split off most
2031 recently to service another small request. Its size is cached in
2032 dvsize. The link fields of this chunk are not maintained since it
2033 is not kept in a bin.
2034
2035 SmallBins
2036 An array of bin headers for free chunks. These bins hold chunks
2037 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
2038 chunks of all the same size, spaced 8 bytes apart. To simplify
2039 use in double-linked lists, each bin header acts as a malloc_chunk
2040 pointing to the real first node, if it exists (else pointing to
2041 itself). This avoids special-casing for headers. But to avoid
2042 waste, we allocate only the fd/bk pointers of bins, and then use
2043 repositioning tricks to treat these as the fields of a chunk.
2044
2045 TreeBins
2046 Treebins are pointers to the roots of trees holding a range of
2047 sizes. There are 2 equally spaced treebins for each power of two
2048 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
2049 larger.
2050
2051 Bin maps
2052 There is one bit map for small bins ("smallmap") and one for
2053 treebins ("treemap). Each bin sets its bit when non-empty, and
2054 clears the bit when empty. Bit operations are then used to avoid
2055 bin-by-bin searching -- nearly all "search" is done without ever
2056 looking at bins that won't be selected. The bit maps
2057 conservatively use 32 bits per map word, even if on 64bit system.
2058 For a good description of some of the bit-based techniques used
2059 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2060 supplement at http://hackersdelight.org/). Many of these are
2061 intended to reduce the branchiness of paths through malloc etc, as
2062 well as to reduce the number of memory locations read or written.
2063
2064 Segments
2065 A list of segments headed by an embedded malloc_segment record
2066 representing the initial space.
2067
2068 Address check support
2069 The least_addr field is the least address ever obtained from
2070 MORECORE or MMAP. Attempted frees and reallocs of any address less
2071 than this are trapped (unless INSECURE is defined).
2072
2073 Magic tag
2074 A cross-check field that should always hold same value as mparams.magic.
2075
2076 Flags
2077 Bits recording whether to use MMAP, locks, or contiguous MORECORE
2078
2079 Statistics
2080 Each space keeps track of current and maximum system memory
2081 obtained via MORECORE or MMAP.
2082
2083 Locking
2084 If USE_LOCKS is defined, the "mutex" lock is acquired and released
2085 around every public call using this mspace.
2086*/
2087
2088/* Bin types, widths and sizes */
2089#define NSMALLBINS (32U)
2090#define NTREEBINS (32U)
2091#define SMALLBIN_SHIFT (3U)
2092#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2093#define TREEBIN_SHIFT (8U)
2094#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2095#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2096#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2097
2099{
2102 size_t dvsize;
2103 size_t topsize;
2105 mchunkptr dv;
2106 mchunkptr top;
2108 size_t magic;
2109 mchunkptr smallbins[(NSMALLBINS + 1) * 2];
2114#if USE_LOCKS
2115 MLOCK_T mutex; /* locate lock among fields that rarely change */
2116#endif /* USE_LOCKS */
2117 msegment seg;
2118};
2119
2120typedef struct malloc_state *mstate;
2121
2122/* ------------- Global malloc_state and malloc_params ------------------- */
2123
2124/*
2125 malloc_params holds global properties, including those that can be
2126 dynamically set using mallopt. There is a single instance, mparams,
2127 initialized in init_mparams.
2128*/
2129
2131{
2132 size_t magic;
2138};
2139
2141
2142/* The global malloc_state used for all non-"mspace" calls */
2143static struct malloc_state _gm_;
2144#define gm (&_gm_)
2145#define is_global(M) ((M) == &_gm_)
2146#define is_initialized(M) ((M)->top != 0)
2147
2148/* -------------------------- system alloc setup ------------------------- */
2149
2150/* Operations on mflags */
2151
2152#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2153#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2154#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2155
2156#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2157#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2158#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2159
2160#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2161#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2162
2163#define set_lock(M,L)\
2164 ((M)->mflags = (L)?\
2165 ((M)->mflags | USE_LOCK_BIT) :\
2166 ((M)->mflags & ~USE_LOCK_BIT))
2167
2168/* page-align a size */
2169#define page_align(S)\
2170 (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
2171
2172/* granularity-align a size */
2173#define granularity_align(S)\
2174 (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
2175
2176#define is_page_aligned(S)\
2177 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2178#define is_granularity_aligned(S)\
2179 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2180
2181/* True if segment S holds address A */
2182#define segment_holds(S, A)\
2183 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2184
2185/* Return segment holding given address */
2186static msegmentptr
2187segment_holding(mstate m, char *addr)
2188{
2189 msegmentptr sp = &m->seg;
2190 for (;;) {
2191 if (addr >= sp->base && addr < sp->base + sp->size)
2192 return sp;
2193 if ((sp = sp->next) == 0)
2194 return 0;
2195 }
2196}
2197
2198/* Return true if segment contains a segment link */
2199static int
2200has_segment_link(mstate m, msegmentptr ss)
2201{
2202 msegmentptr sp = &m->seg;
2203 for (;;) {
2204 if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
2205 return 1;
2206 if ((sp = sp->next) == 0)
2207 return 0;
2208 }
2209}
2210
2211#ifndef MORECORE_CANNOT_TRIM
2212#define should_trim(M,s) ((s) > (M)->trim_check)
2213#else /* MORECORE_CANNOT_TRIM */
2214#define should_trim(M,s) (0)
2215#endif /* MORECORE_CANNOT_TRIM */
2216
2217/*
2218 TOP_FOOT_SIZE is padding at the end of a segment, including space
2219 that may be needed to place segment records and fenceposts when new
2220 noncontiguous segments are added.
2221*/
2222#define TOP_FOOT_SIZE\
2223 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2224
2225
2226/* ------------------------------- Hooks -------------------------------- */
2227
2228/*
2229 PREACTION should be defined to return 0 on success, and nonzero on
2230 failure. If you are not using locking, you can redefine these to do
2231 anything you like.
2232*/
2233
2234#if USE_LOCKS
2235
2236/* Ensure locks are initialized */
2237#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2238
2239#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2240#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2241#else /* USE_LOCKS */
2242
2243#ifndef PREACTION
2244#define PREACTION(M) (0)
2245#endif /* PREACTION */
2246
2247#ifndef POSTACTION
2248#define POSTACTION(M)
2249#endif /* POSTACTION */
2250
2251#endif /* USE_LOCKS */
2252
2253/*
2254 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2255 USAGE_ERROR_ACTION is triggered on detected bad frees and
2256 reallocs. The argument p is an address that might have triggered the
2257 fault. It is ignored by the two predefined actions, but might be
2258 useful in custom actions that try to help diagnose errors.
2259*/
2260
2261#if PROCEED_ON_ERROR
2262
2263/* A count of the number of corruption errors causing resets */
2264int malloc_corruption_error_count;
2265
2266/* default corruption action */
2267static void reset_on_error(mstate m);
2268
2269#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2270#define USAGE_ERROR_ACTION(m, p)
2271
2272#else /* PROCEED_ON_ERROR */
2273
2274#ifndef CORRUPTION_ERROR_ACTION
2275#define CORRUPTION_ERROR_ACTION(m) ABORT
2276#endif /* CORRUPTION_ERROR_ACTION */
2277
2278#ifndef USAGE_ERROR_ACTION
2279#define USAGE_ERROR_ACTION(m,p) ABORT
2280#endif /* USAGE_ERROR_ACTION */
2281
2282#endif /* PROCEED_ON_ERROR */
2283
2284/* -------------------------- Debugging setup ---------------------------- */
2285
2286#if ! DEBUG
2287
2288#define check_free_chunk(M,P)
2289#define check_inuse_chunk(M,P)
2290#define check_malloced_chunk(M,P,N)
2291#define check_mmapped_chunk(M,P)
2292#define check_malloc_state(M)
2293#define check_top_chunk(M,P)
2294
2295#else /* DEBUG */
2296#define check_free_chunk(M,P) do_check_free_chunk(M,P)
2297#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2298#define check_top_chunk(M,P) do_check_top_chunk(M,P)
2299#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2300#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2301#define check_malloc_state(M) do_check_malloc_state(M)
2302
2303static void do_check_any_chunk(mstate m, mchunkptr p);
2304static void do_check_top_chunk(mstate m, mchunkptr p);
2305static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2306static void do_check_inuse_chunk(mstate m, mchunkptr p);
2307static void do_check_free_chunk(mstate m, mchunkptr p);
2308static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
2309static void do_check_tree(mstate m, tchunkptr t);
2310static void do_check_treebin(mstate m, bindex_t i);
2311static void do_check_smallbin(mstate m, bindex_t i);
2312static void do_check_malloc_state(mstate m);
2313static int bin_find(mstate m, mchunkptr x);
2314static size_t traverse_and_check(mstate m);
2315#endif /* DEBUG */
2316
2317/* ---------------------------- Indexing Bins ---------------------------- */
2318
2319#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2320#define small_index(s) ((s) >> SMALLBIN_SHIFT)
2321#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2322#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2323
2324/* addressing by index. See above about smallbin repositioning */
2325#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2326#define treebin_at(M,i) (&((M)->treebins[i]))
2327
2328/* assign tree index for size S to variable I */
2329#if defined(__GNUC__) && defined(i386)
2330#define compute_tree_index(S, I)\
2331{\
2332 size_t X = S >> TREEBIN_SHIFT;\
2333 if (X == 0)\
2334 I = 0;\
2335 else if (X > 0xFFFF)\
2336 I = NTREEBINS-1;\
2337 else {\
2338 unsigned int K;\
2339 __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
2340 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2341 }\
2342}
2343#else /* GNUC */
2344#define compute_tree_index(S, I)\
2345{\
2346 size_t X = S >> TREEBIN_SHIFT;\
2347 if (X == 0)\
2348 I = 0;\
2349 else if (X > 0xFFFF)\
2350 I = NTREEBINS-1;\
2351 else {\
2352 unsigned int Y = (unsigned int)X;\
2353 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2354 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2355 N += K;\
2356 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2357 K = 14 - N + ((Y <<= K) >> 15);\
2358 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2359 }\
2360}
2361#endif /* GNUC */
2362
2363/* Bit representing maximum resolved size in a treebin at i */
2364#define bit_for_tree_index(i) \
2365 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2366
2367/* Shift placing maximum resolved bit in a treebin at i as sign bit */
2368#define leftshift_for_tree_index(i) \
2369 ((i == NTREEBINS-1)? 0 : \
2370 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2371
2372/* The size of the smallest chunk held in bin with index i */
2373#define minsize_for_tree_index(i) \
2374 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2375 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2376
2377
2378/* ------------------------ Operations on bin maps ----------------------- */
2379
2380/* bit corresponding to given index */
2381#define idx2bit(i) ((binmap_t)(1) << (i))
2382
2383/* Mark/Clear bits with given index */
2384#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2385#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2386#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2387
2388#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2389#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2390#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2391
2392/* index corresponding to given bit */
2393
2394#if defined(__GNUC__) && defined(i386)
2395#define compute_bit2idx(X, I)\
2396{\
2397 unsigned int J;\
2398 __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2399 I = (bindex_t)J;\
2400}
2401
2402#else /* GNUC */
2403#if USE_BUILTIN_FFS
2404#define compute_bit2idx(X, I) I = ffs(X)-1
2405
2406#else /* USE_BUILTIN_FFS */
2407#define compute_bit2idx(X, I)\
2408{\
2409 unsigned int Y = X - 1;\
2410 unsigned int K = Y >> (16-4) & 16;\
2411 unsigned int N = K; Y >>= K;\
2412 N += K = Y >> (8-3) & 8; Y >>= K;\
2413 N += K = Y >> (4-2) & 4; Y >>= K;\
2414 N += K = Y >> (2-1) & 2; Y >>= K;\
2415 N += K = Y >> (1-0) & 1; Y >>= K;\
2416 I = (bindex_t)(N + Y);\
2417}
2418#endif /* USE_BUILTIN_FFS */
2419#endif /* GNUC */
2420
2421/* isolate the least set bit of a bitmap */
2422#define least_bit(x) ((x) & -(x))
2423
2424/* mask with all bits to left of least bit of x on */
2425#define left_bits(x) ((x<<1) | -(x<<1))
2426
2427/* mask with all bits to left of or equal to least bit of x on */
2428#define same_or_left_bits(x) ((x) | -(x))
2429
2430
2431/* ----------------------- Runtime Check Support ------------------------- */
2432
2433/*
2434 For security, the main invariant is that malloc/free/etc never
2435 writes to a static address other than malloc_state, unless static
2436 malloc_state itself has been corrupted, which cannot occur via
2437 malloc (because of these checks). In essence this means that we
2438 believe all pointers, sizes, maps etc held in malloc_state, but
2439 check all of those linked or offsetted from other embedded data
2440 structures. These checks are interspersed with main code in a way
2441 that tends to minimize their run-time cost.
2442
2443 When FOOTERS is defined, in addition to range checking, we also
2444 verify footer fields of inuse chunks, which can be used guarantee
2445 that the mstate controlling malloc/free is intact. This is a
2446 streamlined version of the approach described by William Robertson
2447 et al in "Run-time Detection of Heap-based Overflows" LISA'03
2448 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
2449 of an inuse chunk holds the xor of its mstate and a random seed,
2450 that is checked upon calls to free() and realloc(). This is
2451 (probablistically) unguessable from outside the program, but can be
2452 computed by any code successfully malloc'ing any chunk, so does not
2453 itself provide protection against code that has already broken
2454 security through some other means. Unlike Robertson et al, we
2455 always dynamically check addresses of all offset chunks (previous,
2456 next, etc). This turns out to be cheaper than relying on hashes.
2457*/
2458
2459#if !INSECURE
2460/* Check if address a is at least as high as any from MORECORE or MMAP */
2461#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2462/* Check if address of next chunk n is higher than base chunk p */
2463#define ok_next(p, n) ((char*)(p) < (char*)(n))
2464/* Check if p has its cinuse bit on */
2465#define ok_cinuse(p) cinuse(p)
2466/* Check if p has its pinuse bit on */
2467#define ok_pinuse(p) pinuse(p)
2468
2469#else /* !INSECURE */
2470#define ok_address(M, a) (1)
2471#define ok_next(b, n) (1)
2472#define ok_cinuse(p) (1)
2473#define ok_pinuse(p) (1)
2474#endif /* !INSECURE */
2475
2476#if (FOOTERS && !INSECURE)
2477/* Check if (alleged) mstate m has expected magic field */
2478#define ok_magic(M) ((M)->magic == mparams.magic)
2479#else /* (FOOTERS && !INSECURE) */
2480#define ok_magic(M) (1)
2481#endif /* (FOOTERS && !INSECURE) */
2482
2483
2484/* In gcc, use __builtin_expect to minimize impact of checks */
2485#if !INSECURE
2486#if defined(__GNUC__) && __GNUC__ >= 3
2487#define RTCHECK(e) __builtin_expect(e, 1)
2488#else /* GNUC */
2489#define RTCHECK(e) (e)
2490#endif /* GNUC */
2491#else /* !INSECURE */
2492#define RTCHECK(e) (1)
2493#endif /* !INSECURE */
2494
2495/* macros to set up inuse chunks with or without footers */
2496
2497#if !FOOTERS
2498
2499#define mark_inuse_foot(M,p,s)
2500
2501/* Set cinuse bit and pinuse bit of next chunk */
2502#define set_inuse(M,p,s)\
2503 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2504 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2505
2506/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
2507#define set_inuse_and_pinuse(M,p,s)\
2508 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2509 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2510
2511/* Set size, cinuse and pinuse bit of this chunk */
2512#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2513 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2514
2515#else /* FOOTERS */
2516
2517/* Set foot of inuse chunk to be xor of mstate and seed */
2518#define mark_inuse_foot(M,p,s)\
2519 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2520
2521#define get_mstate_for(p)\
2522 ((mstate)(((mchunkptr)((char*)(p) +\
2523 (chunksize(p))))->prev_foot ^ mparams.magic))
2524
2525#define set_inuse(M,p,s)\
2526 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2527 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2528 mark_inuse_foot(M,p,s))
2529
2530#define set_inuse_and_pinuse(M,p,s)\
2531 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2532 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2533 mark_inuse_foot(M,p,s))
2534
2535#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2536 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2537 mark_inuse_foot(M, p, s))
2538
2539#endif /* !FOOTERS */
2540
2541/* ---------------------------- setting mparams -------------------------- */
2542
2543/* Initialize mparams */
2544static int
2546{
2547 if (mparams.page_size == 0) {
2548 size_t s;
2549
2552#if MORECORE_CONTIGUOUS
2554#else /* MORECORE_CONTIGUOUS */
2557#endif /* MORECORE_CONTIGUOUS */
2558
2559#if (FOOTERS && !INSECURE)
2560 {
2561#if USE_DEV_RANDOM
2562 int fd;
2563 unsigned char buf[sizeof(size_t)];
2564 /* Try to use /dev/urandom, else fall back on using time */
2565 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
2566 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
2567 s = *((size_t *) buf);
2568 close(fd);
2569 } else
2570#endif /* USE_DEV_RANDOM */
2571 s = (size_t) (time(0) ^ (size_t) 0x55555555U);
2572
2573 s |= (size_t) 8U; /* ensure nonzero */
2574 s &= ~(size_t) 7U; /* improve chances of fault for bad values */
2575
2576 }
2577#else /* (FOOTERS && !INSECURE) */
2578 s = (size_t) 0x58585858U;
2579#endif /* (FOOTERS && !INSECURE) */
2581 if (mparams.magic == 0) {
2582 mparams.magic = s;
2583 /* Set up lock for main malloc area */
2584 INITIAL_LOCK(&gm->mutex);
2585 gm->mflags = mparams.default_mflags;
2586 }
2588
2589#if !defined(WIN32) && !defined(__OS2__)
2590 mparams.page_size = malloc_getpagesize;
2593#elif defined (__OS2__)
2594 /* if low-memory is used, os2munmap() would break
2595 if it were anything other than 64k */
2596 mparams.page_size = 4096u;
2597 mparams.granularity = 65536u;
2598#else /* WIN32 */
2599 {
2600 SYSTEM_INFO system_info;
2601 GetSystemInfo(&system_info);
2602 mparams.page_size = system_info.dwPageSize;
2603 mparams.granularity = system_info.dwAllocationGranularity;
2604 }
2605#endif /* WIN32 */
2606
2607 /* Sanity-check configuration:
2608 size_t must be unsigned and as wide as pointer type.
2609 ints must be at least 4 bytes.
2610 alignment must be at least 8.
2611 Alignment, min chunk size, and page size must all be powers of 2.
2612 */
2613 if ((sizeof(size_t) != sizeof(char *)) ||
2615 (sizeof(int) < 4) ||
2616 (MALLOC_ALIGNMENT < (size_t) 8U) ||
2618 ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
2620 || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
2621 ABORT;
2622 }
2623 return 0;
2624}
2625
2626/* support for mallopt */
2627static int
2628change_mparam(int param_number, int value)
2629{
2630 size_t val = (size_t) value;
2631 init_mparams();
2632 switch (param_number) {
2633 case M_TRIM_THRESHOLD:
2635 return 1;
2636 case M_GRANULARITY:
2637 if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
2639 return 1;
2640 } else
2641 return 0;
2642 case M_MMAP_THRESHOLD:
2644 return 1;
2645 default:
2646 return 0;
2647 }
2648}
2649
2650#if DEBUG
2651/* ------------------------- Debugging Support --------------------------- */
2652
2653/* Check properties of any chunk, whether free, inuse, mmapped etc */
2654static void
2655do_check_any_chunk(mstate m, mchunkptr p)
2656{
2657 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2658 assert(ok_address(m, p));
2659}
2660
2661/* Check properties of top chunk */
2662static void
2663do_check_top_chunk(mstate m, mchunkptr p)
2664{
2665 msegmentptr sp = segment_holding(m, (char *) p);
2666 size_t sz = chunksize(p);
2667 assert(sp != 0);
2668 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2669 assert(ok_address(m, p));
2670 assert(sz == m->topsize);
2671 assert(sz > 0);
2672 assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
2673 assert(pinuse(p));
2674 assert(!next_pinuse(p));
2675}
2676
2677/* Check properties of (inuse) mmapped chunks */
2678static void
2679do_check_mmapped_chunk(mstate m, mchunkptr p)
2680{
2681 size_t sz = chunksize(p);
2682 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2684 assert(use_mmap(m));
2685 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2686 assert(ok_address(m, p));
2687 assert(!is_small(sz));
2688 assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
2691}
2692
2693/* Check properties of inuse chunks */
2694static void
2695do_check_inuse_chunk(mstate m, mchunkptr p)
2696{
2697 do_check_any_chunk(m, p);
2698 assert(cinuse(p));
2700 /* If not pinuse and not mmapped, previous chunk has OK offset */
2702 if (is_mmapped(p))
2703 do_check_mmapped_chunk(m, p);
2704}
2705
2706/* Check properties of free chunks */
2707static void
2708do_check_free_chunk(mstate m, mchunkptr p)
2709{
2710 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2711 mchunkptr next = chunk_plus_offset(p, sz);
2712 do_check_any_chunk(m, p);
2713 assert(!cinuse(p));
2714 assert(!next_pinuse(p));
2715 assert(!is_mmapped(p));
2716 if (p != m->dv && p != m->top) {
2717 if (sz >= MIN_CHUNK_SIZE) {
2718 assert((sz & CHUNK_ALIGN_MASK) == 0);
2720 assert(next->prev_foot == sz);
2721 assert(pinuse(p));
2722 assert(next == m->top || cinuse(next));
2723 assert(p->fd->bk == p);
2724 assert(p->bk->fd == p);
2725 } else /* markers are always of size SIZE_T_SIZE */
2726 assert(sz == SIZE_T_SIZE);
2727 }
2728}
2729
2730/* Check properties of malloced chunks at the point they are malloced */
2731static void
2732do_check_malloced_chunk(mstate m, void *mem, size_t s)
2733{
2734 if (mem != 0) {
2735 mchunkptr p = mem2chunk(mem);
2736 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2737 do_check_inuse_chunk(m, p);
2738 assert((sz & CHUNK_ALIGN_MASK) == 0);
2739 assert(sz >= MIN_CHUNK_SIZE);
2740 assert(sz >= s);
2741 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
2742 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
2743 }
2744}
2745
2746/* Check a tree and its subtrees. */
2747static void
2748do_check_tree(mstate m, tchunkptr t)
2749{
2750 tchunkptr head = 0;
2751 tchunkptr u = t;
2752 bindex_t tindex = t->index;
2753 size_t tsize = chunksize(t);
2754 bindex_t idx;
2755 compute_tree_index(tsize, idx);
2756 assert(tindex == idx);
2757 assert(tsize >= MIN_LARGE_SIZE);
2758 assert(tsize >= minsize_for_tree_index(idx));
2759 assert((idx == NTREEBINS - 1)
2760 || (tsize < minsize_for_tree_index((idx + 1))));
2761
2762 do { /* traverse through chain of same-sized nodes */
2763 do_check_any_chunk(m, ((mchunkptr) u));
2764 assert(u->index == tindex);
2765 assert(chunksize(u) == tsize);
2766 assert(!cinuse(u));
2767 assert(!next_pinuse(u));
2768 assert(u->fd->bk == u);
2769 assert(u->bk->fd == u);
2770 if (u->parent == 0) {
2771 assert(u->child[0] == 0);
2772 assert(u->child[1] == 0);
2773 } else {
2774 assert(head == 0); /* only one node on chain has parent */
2775 head = u;
2776 assert(u->parent != u);
2777 assert(u->parent->child[0] == u ||
2778 u->parent->child[1] == u ||
2779 *((tbinptr *) (u->parent)) == u);
2780 if (u->child[0] != 0) {
2781 assert(u->child[0]->parent == u);
2782 assert(u->child[0] != u);
2783 do_check_tree(m, u->child[0]);
2784 }
2785 if (u->child[1] != 0) {
2786 assert(u->child[1]->parent == u);
2787 assert(u->child[1] != u);
2788 do_check_tree(m, u->child[1]);
2789 }
2790 if (u->child[0] != 0 && u->child[1] != 0) {
2791 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
2792 }
2793 }
2794 u = u->fd;
2795 } while (u != t);
2796 assert(head != 0);
2797}
2798
2799/* Check all the chunks in a treebin. */
2800static void
2801do_check_treebin(mstate m, bindex_t i)
2802{
2803 tbinptr *tb = treebin_at(m, i);
2804 tchunkptr t = *tb;
2805 int empty = (m->treemap & (1U << i)) == 0;
2806 if (t == 0)
2807 assert(empty);
2808 if (!empty)
2809 do_check_tree(m, t);
2810}
2811
2812/* Check all the chunks in a smallbin. */
2813static void
2814do_check_smallbin(mstate m, bindex_t i)
2815{
2816 sbinptr b = smallbin_at(m, i);
2817 mchunkptr p = b->bk;
2818 unsigned int empty = (m->smallmap & (1U << i)) == 0;
2819 if (p == b)
2820 assert(empty);
2821 if (!empty) {
2822 for (; p != b; p = p->bk) {
2823 size_t size = chunksize(p);
2824 mchunkptr q;
2825 /* each chunk claims to be free */
2826 do_check_free_chunk(m, p);
2827 /* chunk belongs in bin */
2828 assert(small_index(size) == i);
2829 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
2830 /* chunk is followed by an inuse chunk */
2831 q = next_chunk(p);
2832 if (q->head != FENCEPOST_HEAD)
2833 do_check_inuse_chunk(m, q);
2834 }
2835 }
2836}
2837
2838/* Find x in a bin. Used in other check functions. */
2839static int
2840bin_find(mstate m, mchunkptr x)
2841{
2842 size_t size = chunksize(x);
2843 if (is_small(size)) {
2844 bindex_t sidx = small_index(size);
2845 sbinptr b = smallbin_at(m, sidx);
2846 if (smallmap_is_marked(m, sidx)) {
2847 mchunkptr p = b;
2848 do {
2849 if (p == x)
2850 return 1;
2851 } while ((p = p->fd) != b);
2852 }
2853 } else {
2854 bindex_t tidx;
2855 compute_tree_index(size, tidx);
2856 if (treemap_is_marked(m, tidx)) {
2857 tchunkptr t = *treebin_at(m, tidx);
2858 size_t sizebits = size << leftshift_for_tree_index(tidx);
2859 while (t != 0 && chunksize(t) != size) {
2860 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
2861 sizebits <<= 1;
2862 }
2863 if (t != 0) {
2864 tchunkptr u = t;
2865 do {
2866 if (u == (tchunkptr) x)
2867 return 1;
2868 } while ((u = u->fd) != t);
2869 }
2870 }
2871 }
2872 return 0;
2873}
2874
2875/* Traverse each chunk and check it; return total */
2876static size_t
2877traverse_and_check(mstate m)
2878{
2879 size_t sum = 0;
2880 if (is_initialized(m)) {
2881 msegmentptr s = &m->seg;
2882 sum += m->topsize + TOP_FOOT_SIZE;
2883 while (s != 0) {
2884 mchunkptr q = align_as_chunk(s->base);
2885 mchunkptr lastq = 0;
2886 assert(pinuse(q));
2887 while (segment_holds(s, q) &&
2888 q != m->top && q->head != FENCEPOST_HEAD) {
2889 sum += chunksize(q);
2890 if (cinuse(q)) {
2891 assert(!bin_find(m, q));
2892 do_check_inuse_chunk(m, q);
2893 } else {
2894 assert(q == m->dv || bin_find(m, q));
2895 assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
2896 do_check_free_chunk(m, q);
2897 }
2898 lastq = q;
2899 q = next_chunk(q);
2900 }
2901 s = s->next;
2902 }
2903 }
2904 return sum;
2905}
2906
2907/* Check all properties of malloc_state. */
2908static void
2909do_check_malloc_state(mstate m)
2910{
2911 bindex_t i;
2912 size_t total;
2913 /* check bins */
2914 for (i = 0; i < NSMALLBINS; ++i)
2915 do_check_smallbin(m, i);
2916 for (i = 0; i < NTREEBINS; ++i)
2917 do_check_treebin(m, i);
2918
2919 if (m->dvsize != 0) { /* check dv chunk */
2920 do_check_any_chunk(m, m->dv);
2921 assert(m->dvsize == chunksize(m->dv));
2922 assert(m->dvsize >= MIN_CHUNK_SIZE);
2923 assert(bin_find(m, m->dv) == 0);
2924 }
2925
2926 if (m->top != 0) { /* check top chunk */
2927 do_check_top_chunk(m, m->top);
2928 assert(m->topsize == chunksize(m->top));
2929 assert(m->topsize > 0);
2930 assert(bin_find(m, m->top) == 0);
2931 }
2932
2933 total = traverse_and_check(m);
2934 assert(total <= m->footprint);
2935 assert(m->footprint <= m->max_footprint);
2936}
2937#endif /* DEBUG */
2938
2939/* ----------------------------- statistics ------------------------------ */
2940
2941#if !NO_MALLINFO
2942static struct mallinfo
2944{
2945 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2946 if (!PREACTION(m)) {
2948 if (is_initialized(m)) {
2949 size_t nfree = SIZE_T_ONE; /* top always free */
2950 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2951 size_t sum = mfree;
2952 msegmentptr s = &m->seg;
2953 while (s != 0) {
2954 mchunkptr q = align_as_chunk(s->base);
2955 while (segment_holds(s, q) &&
2956 q != m->top && q->head != FENCEPOST_HEAD) {
2957 size_t sz = chunksize(q);
2958 sum += sz;
2959 if (!cinuse(q)) {
2960 mfree += sz;
2961 ++nfree;
2962 }
2963 q = next_chunk(q);
2964 }
2965 s = s->next;
2966 }
2967
2968 nm.arena = sum;
2969 nm.ordblks = nfree;
2970 nm.hblkhd = m->footprint - sum;
2971 nm.usmblks = m->max_footprint;
2972 nm.uordblks = m->footprint - mfree;
2973 nm.fordblks = mfree;
2974 nm.keepcost = m->topsize;
2975 }
2976
2977 POSTACTION(m);
2978 }
2979 return nm;
2980}
2981#endif /* !NO_MALLINFO */
2982
2983static void
2985{
2986 if (!PREACTION(m)) {
2987#ifndef LACKS_STDIO_H
2988 size_t maxfp = 0;
2989#endif
2990 size_t fp = 0;
2991 size_t used = 0;
2993 if (is_initialized(m)) {
2994 msegmentptr s = &m->seg;
2995#ifndef LACKS_STDIO_H
2996 maxfp = m->max_footprint;
2997#endif
2998 fp = m->footprint;
2999 used = fp - (m->topsize + TOP_FOOT_SIZE);
3000
3001 while (s != 0) {
3002 mchunkptr q = align_as_chunk(s->base);
3003 while (segment_holds(s, q) &&
3004 q != m->top && q->head != FENCEPOST_HEAD) {
3005 if (!cinuse(q))
3006 used -= chunksize(q);
3007 q = next_chunk(q);
3008 }
3009 s = s->next;
3010 }
3011 }
3012#ifndef LACKS_STDIO_H
3013 fprintf(stderr, "max system bytes = %10lu\n",
3014 (unsigned long) (maxfp));
3015 fprintf(stderr, "system bytes = %10lu\n", (unsigned long) (fp));
3016 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long) (used));
3017#endif
3018
3019 POSTACTION(m);
3020 }
3021}
3022
3023/* ----------------------- Operations on smallbins ----------------------- */
3024
3025/*
3026 Various forms of linking and unlinking are defined as macros. Even
3027 the ones for trees, which are very long but have very short typical
3028 paths. This is ugly but reduces reliance on inlining support of
3029 compilers.
3030*/
3031
3032/* Link a free chunk into a smallbin */
3033#define insert_small_chunk(M, P, S) {\
3034 bindex_t I = small_index(S);\
3035 mchunkptr B = smallbin_at(M, I);\
3036 mchunkptr F = B;\
3037 assert(S >= MIN_CHUNK_SIZE);\
3038 if (!smallmap_is_marked(M, I))\
3039 mark_smallmap(M, I);\
3040 else if (RTCHECK(ok_address(M, B->fd)))\
3041 F = B->fd;\
3042 else {\
3043 CORRUPTION_ERROR_ACTION(M);\
3044 }\
3045 B->fd = P;\
3046 F->bk = P;\
3047 P->fd = F;\
3048 P->bk = B;\
3049}
3050
3051/* Unlink a chunk from a smallbin */
3052#define unlink_small_chunk(M, P, S) {\
3053 mchunkptr F = P->fd;\
3054 mchunkptr B = P->bk;\
3055 bindex_t I = small_index(S);\
3056 assert(P != B);\
3057 assert(P != F);\
3058 assert(chunksize(P) == small_index2size(I));\
3059 if (F == B)\
3060 clear_smallmap(M, I);\
3061 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
3062 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
3063 F->bk = B;\
3064 B->fd = F;\
3065 }\
3066 else {\
3067 CORRUPTION_ERROR_ACTION(M);\
3068 }\
3069}
3070
3071/* Unlink the first chunk from a smallbin */
3072#define unlink_first_small_chunk(M, B, P, I) {\
3073 mchunkptr F = P->fd;\
3074 assert(P != B);\
3075 assert(P != F);\
3076 assert(chunksize(P) == small_index2size(I));\
3077 if (B == F)\
3078 clear_smallmap(M, I);\
3079 else if (RTCHECK(ok_address(M, F))) {\
3080 B->fd = F;\
3081 F->bk = B;\
3082 }\
3083 else {\
3084 CORRUPTION_ERROR_ACTION(M);\
3085 }\
3086}
3087
3088/* Replace dv node, binning the old one */
3089/* Used only when dvsize known to be small */
3090#define replace_dv(M, P, S) {\
3091 size_t DVS = M->dvsize;\
3092 if (DVS != 0) {\
3093 mchunkptr DV = M->dv;\
3094 assert(is_small(DVS));\
3095 insert_small_chunk(M, DV, DVS);\
3096 }\
3097 M->dvsize = S;\
3098 M->dv = P;\
3099}
3100
3101/* ------------------------- Operations on trees ------------------------- */
3102
3103/* Insert chunk into tree */
3104#define insert_large_chunk(M, X, S) {\
3105 tbinptr* H;\
3106 bindex_t I;\
3107 compute_tree_index(S, I);\
3108 H = treebin_at(M, I);\
3109 X->index = I;\
3110 X->child[0] = X->child[1] = 0;\
3111 if (!treemap_is_marked(M, I)) {\
3112 mark_treemap(M, I);\
3113 *H = X;\
3114 X->parent = (tchunkptr)H;\
3115 X->fd = X->bk = X;\
3116 }\
3117 else {\
3118 tchunkptr T = *H;\
3119 size_t K = S << leftshift_for_tree_index(I);\
3120 for (;;) {\
3121 if (chunksize(T) != S) {\
3122 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3123 K <<= 1;\
3124 if (*C != 0)\
3125 T = *C;\
3126 else if (RTCHECK(ok_address(M, C))) {\
3127 *C = X;\
3128 X->parent = T;\
3129 X->fd = X->bk = X;\
3130 break;\
3131 }\
3132 else {\
3133 CORRUPTION_ERROR_ACTION(M);\
3134 break;\
3135 }\
3136 }\
3137 else {\
3138 tchunkptr F = T->fd;\
3139 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3140 T->fd = F->bk = X;\
3141 X->fd = F;\
3142 X->bk = T;\
3143 X->parent = 0;\
3144 break;\
3145 }\
3146 else {\
3147 CORRUPTION_ERROR_ACTION(M);\
3148 break;\
3149 }\
3150 }\
3151 }\
3152 }\
3153}
3154
3155/*
3156 Unlink steps:
3157
3158 1. If x is a chained node, unlink it from its same-sized fd/bk links
3159 and choose its bk node as its replacement.
3160 2. If x was the last node of its size, but not a leaf node, it must
3161 be replaced with a leaf node (not merely one with an open left or
3162 right), to make sure that lefts and rights of descendents
3163 correspond properly to bit masks. We use the rightmost descendent
3164 of x. We could use any other leaf, but this is easy to locate and
3165 tends to counteract removal of leftmosts elsewhere, and so keeps
3166 paths shorter than minimally guaranteed. This doesn't loop much
3167 because on average a node in a tree is near the bottom.
3168 3. If x is the base of a chain (i.e., has parent links) relink
3169 x's parent and children to x's replacement (or null if none).
3170*/
3171
3172#define unlink_large_chunk(M, X) {\
3173 tchunkptr XP = X->parent;\
3174 tchunkptr R;\
3175 if (X->bk != X) {\
3176 tchunkptr F = X->fd;\
3177 R = X->bk;\
3178 if (RTCHECK(ok_address(M, F))) {\
3179 F->bk = R;\
3180 R->fd = F;\
3181 }\
3182 else {\
3183 CORRUPTION_ERROR_ACTION(M);\
3184 }\
3185 }\
3186 else {\
3187 tchunkptr* RP;\
3188 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3189 ((R = *(RP = &(X->child[0]))) != 0)) {\
3190 tchunkptr* CP;\
3191 while ((*(CP = &(R->child[1])) != 0) ||\
3192 (*(CP = &(R->child[0])) != 0)) {\
3193 R = *(RP = CP);\
3194 }\
3195 if (RTCHECK(ok_address(M, RP)))\
3196 *RP = 0;\
3197 else {\
3198 CORRUPTION_ERROR_ACTION(M);\
3199 }\
3200 }\
3201 }\
3202 if (XP != 0) {\
3203 tbinptr* H = treebin_at(M, X->index);\
3204 if (X == *H) {\
3205 if ((*H = R) == 0) \
3206 clear_treemap(M, X->index);\
3207 }\
3208 else if (RTCHECK(ok_address(M, XP))) {\
3209 if (XP->child[0] == X) \
3210 XP->child[0] = R;\
3211 else \
3212 XP->child[1] = R;\
3213 }\
3214 else\
3215 CORRUPTION_ERROR_ACTION(M);\
3216 if (R != 0) {\
3217 if (RTCHECK(ok_address(M, R))) {\
3218 tchunkptr C0, C1;\
3219 R->parent = XP;\
3220 if ((C0 = X->child[0]) != 0) {\
3221 if (RTCHECK(ok_address(M, C0))) {\
3222 R->child[0] = C0;\
3223 C0->parent = R;\
3224 }\
3225 else\
3226 CORRUPTION_ERROR_ACTION(M);\
3227 }\
3228 if ((C1 = X->child[1]) != 0) {\
3229 if (RTCHECK(ok_address(M, C1))) {\
3230 R->child[1] = C1;\
3231 C1->parent = R;\
3232 }\
3233 else\
3234 CORRUPTION_ERROR_ACTION(M);\
3235 }\
3236 }\
3237 else\
3238 CORRUPTION_ERROR_ACTION(M);\
3239 }\
3240 }\
3241}
3242
3243/* Relays to large vs small bin operations */
3244
3245#define insert_chunk(M, P, S)\
3246 if (is_small(S)) insert_small_chunk(M, P, S)\
3247 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3248
3249#define unlink_chunk(M, P, S)\
3250 if (is_small(S)) unlink_small_chunk(M, P, S)\
3251 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3252
3253
3254/* Relays to internal calls to malloc/free from realloc, memalign etc */
3255
3256#if ONLY_MSPACES
3257#define internal_malloc(m, b) mspace_malloc(m, b)
3258#define internal_free(m, mem) mspace_free(m,mem);
3259#else /* ONLY_MSPACES */
3260#if MSPACES
3261#define internal_malloc(m, b)\
3262 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3263#define internal_free(m, mem)\
3264 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3265#else /* MSPACES */
3266#define internal_malloc(m, b) dlmalloc(b)
3267#define internal_free(m, mem) dlfree(mem)
3268#endif /* MSPACES */
3269#endif /* ONLY_MSPACES */
3270
3271/* ----------------------- Direct-mmapping chunks ----------------------- */
3272
3273/*
3274 Directly mmapped chunks are set up with an offset to the start of
3275 the mmapped region stored in the prev_foot field of the chunk. This
3276 allows reconstruction of the required argument to MUNMAP when freed,
3277 and also allows adjustment of the returned chunk to meet alignment
3278 requirements (especially in memalign). There is also enough space
3279 allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
3280 the PINUSE bit so frees can be checked.
3281*/
3282
3283/* Malloc using mmap */
3284static void *
3285mmap_alloc(mstate m, size_t nb)
3286{
3287 size_t mmsize =
3289 if (mmsize > nb) { /* Check for wrap around 0 */
3290 char *mm = (char *) (DIRECT_MMAP(mmsize));
3291 if (mm != CMFAIL) {
3292 size_t offset = align_offset(chunk2mem(mm));
3293 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3294 mchunkptr p = (mchunkptr) (mm + offset);
3296 (p)->head = (psize | CINUSE_BIT);
3297 mark_inuse_foot(m, p, psize);
3298 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3299 chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
3300
3301 if (mm < m->least_addr)
3302 m->least_addr = mm;
3303 if ((m->footprint += mmsize) > m->max_footprint)
3304 m->max_footprint = m->footprint;
3307 return chunk2mem(p);
3308 }
3309 }
3310 return 0;
3311}
3312
3313/* Realloc using mmap */
3314static mchunkptr
3315mmap_resize(mstate m, mchunkptr oldp, size_t nb)
3316{
3317 size_t oldsize = chunksize(oldp);
3318 if (is_small(nb)) /* Can't shrink mmap regions below small size */
3319 return 0;
3320 /* Keep old chunk if big enough but not too big */
3321 if (oldsize >= nb + SIZE_T_SIZE &&
3322 (oldsize - nb) <= (mparams.granularity << 1))
3323 return oldp;
3324 else {
3325 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3326 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3327 size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
3329 char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
3330 oldmmsize, newmmsize, 1);
3331 if (cp != CMFAIL) {
3332 mchunkptr newp = (mchunkptr) (cp + offset);
3333 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3334 newp->head = (psize | CINUSE_BIT);
3335 mark_inuse_foot(m, newp, psize);
3336 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3337 chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
3338
3339 if (cp < m->least_addr)
3340 m->least_addr = cp;
3341 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3342 m->max_footprint = m->footprint;
3343 check_mmapped_chunk(m, newp);
3344 return newp;
3345 }
3346 }
3347 return 0;
3348}
3349
3350/* -------------------------- mspace management -------------------------- */
3351
3352/* Initialize top chunk and its size */
3353static void
3354init_top(mstate m, mchunkptr p, size_t psize)
3355{
3356 /* Ensure alignment */
3357 size_t offset = align_offset(chunk2mem(p));
3358 p = (mchunkptr) ((char *) p + offset);
3359 psize -= offset;
3360
3361 m->top = p;
3362 m->topsize = psize;
3363 p->head = psize | PINUSE_BIT;
3364 /* set size of fake trailing chunk holding overhead space only once */
3365 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3366 m->trim_check = mparams.trim_threshold; /* reset on each update */
3367}
3368
3369/* Initialize bins for a new mstate that is otherwise zeroed out */
3370static void
3372{
3373 /* Establish circular links for smallbins */
3374 bindex_t i;
3375 for (i = 0; i < NSMALLBINS; ++i) {
3376 sbinptr bin = smallbin_at(m, i);
3377 bin->fd = bin->bk = bin;
3378 }
3379}
3380
3381#if PROCEED_ON_ERROR
3382
3383/* default corruption action */
3384static void
3385reset_on_error(mstate m)
3386{
3387 int i;
3388 ++malloc_corruption_error_count;
3389 /* Reinitialize fields to forget about all memory */
3390 m->smallbins = m->treebins = 0;
3391 m->dvsize = m->topsize = 0;
3392 m->seg.base = 0;
3393 m->seg.size = 0;
3394 m->seg.next = 0;
3395 m->top = m->dv = 0;
3396 for (i = 0; i < NTREEBINS; ++i)
3397 *treebin_at(m, i) = 0;
3398 init_bins(m);
3399}
3400#endif /* PROCEED_ON_ERROR */
3401
3402/* Allocate chunk and prepend remainder with chunk in successor base. */
3403static void *
3404prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
3405{
3406 mchunkptr p = align_as_chunk(newbase);
3407 mchunkptr oldfirst = align_as_chunk(oldbase);
3408 size_t psize = (char *) oldfirst - (char *) p;
3409 mchunkptr q = chunk_plus_offset(p, nb);
3410 size_t qsize = psize - nb;
3412
3413 assert((char *) oldfirst > (char *) q);
3414 assert(pinuse(oldfirst));
3415 assert(qsize >= MIN_CHUNK_SIZE);
3416
3417 /* consolidate remainder with first chunk of old base */
3418 if (oldfirst == m->top) {
3419 size_t tsize = m->topsize += qsize;
3420 m->top = q;
3421 q->head = tsize | PINUSE_BIT;
3423 } else if (oldfirst == m->dv) {
3424 size_t dsize = m->dvsize += qsize;
3425 m->dv = q;
3427 } else {
3428 if (!cinuse(oldfirst)) {
3429 size_t nsize = chunksize(oldfirst);
3430 unlink_chunk(m, oldfirst, nsize);
3431 oldfirst = chunk_plus_offset(oldfirst, nsize);
3432 qsize += nsize;
3433 }
3434 set_free_with_pinuse(q, qsize, oldfirst);
3435 insert_chunk(m, q, qsize);
3437 }
3438
3440 return chunk2mem(p);
3441}
3442
3443
3444/* Add a segment to hold a new noncontiguous region */
3445static void
3446add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
3447{
3448 /* Determine locations and sizes of segment, fenceposts, old top */
3449 char *old_top = (char *) m->top;
3450 msegmentptr oldsp = segment_holding(m, old_top);
3451 char *old_end = oldsp->base + oldsp->size;
3452 size_t ssize = pad_request(sizeof(struct malloc_segment));
3453 char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3454 size_t offset = align_offset(chunk2mem(rawsp));
3455 char *asp = rawsp + offset;
3456 char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
3457 mchunkptr sp = (mchunkptr) csp;
3458 msegmentptr ss = (msegmentptr) (chunk2mem(sp));
3459 mchunkptr tnext = chunk_plus_offset(sp, ssize);
3460 mchunkptr p = tnext;
3461 int nfences = 0;
3462
3463 /* reset top to new space */
3464 init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3465
3466 /* Set up segment record */
3467 assert(is_aligned(ss));
3469 *ss = m->seg; /* Push current record */
3470 m->seg.base = tbase;
3471 m->seg.size = tsize;
3472 m->seg.sflags = mmapped;
3473 m->seg.next = ss;
3474
3475 /* Insert trailing fenceposts */
3476 for (;;) {
3477 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
3478 p->head = FENCEPOST_HEAD;
3479 ++nfences;
3480 if ((char *) (&(nextp->head)) < old_end)
3481 p = nextp;
3482 else
3483 break;
3484 }
3485 assert(nfences >= 2);
3486
3487 /* Insert the rest of old top into a bin as an ordinary free chunk */
3488 if (csp != old_top) {
3489 mchunkptr q = (mchunkptr) old_top;
3490 size_t psize = csp - old_top;
3491 mchunkptr tn = chunk_plus_offset(q, psize);
3492 set_free_with_pinuse(q, psize, tn);
3493 insert_chunk(m, q, psize);
3494 }
3495
3496 check_top_chunk(m, m->top);
3497}
3498
3499/* -------------------------- System allocation -------------------------- */
3500
3501/* Get memory from system using MORECORE or MMAP */
3502static void *
3503sys_alloc(mstate m, size_t nb)
3504{
3505 char *tbase = CMFAIL;
3506 size_t tsize = 0;
3507 flag_t mmap_flag = 0;
3508
3509 init_mparams();
3510
3511 /* Directly map large chunks */
3512 if (use_mmap(m) && nb >= mparams.mmap_threshold) {
3513 void *mem = mmap_alloc(m, nb);
3514 if (mem != 0)
3515 return mem;
3516 }
3517
3518 /*
3519 Try getting memory in any of three ways (in most-preferred to
3520 least-preferred order):
3521 1. A call to MORECORE that can normally contiguously extend memory.
3522 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
3523 or main space is mmapped or a previous contiguous call failed)
3524 2. A call to MMAP new space (disabled if not HAVE_MMAP).
3525 Note that under the default settings, if MORECORE is unable to
3526 fulfill a request, and HAVE_MMAP is true, then mmap is
3527 used as a noncontiguous system allocator. This is a useful backup
3528 strategy for systems with holes in address spaces -- in this case
3529 sbrk cannot contiguously expand the heap, but mmap may be able to
3530 find space.
3531 3. A call to MORECORE that cannot usually contiguously extend memory.
3532 (disabled if not HAVE_MORECORE)
3533 */
3534
3536 char *br = CMFAIL;
3537 msegmentptr ss =
3538 (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
3539 size_t asize = 0;
3541
3542 if (ss == 0) { /* First time through or recovery */
3543 char *base = (char *) CALL_MORECORE(0);
3544 if (base != CMFAIL) {
3545 asize =
3547 SIZE_T_ONE);
3548 /* Adjust to end on a page boundary */
3549 if (!is_page_aligned(base))
3550 asize += (page_align((size_t) base) - (size_t) base);
3551 /* Can't call MORECORE if size is negative when treated as signed */
3552 if (asize < HALF_MAX_SIZE_T &&
3553 (br = (char *) (CALL_MORECORE(asize))) == base) {
3554 tbase = base;
3555 tsize = asize;
3556 }
3557 }
3558 } else {
3559 /* Subtract out existing available top space from MORECORE request. */
3560 asize =
3561 granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
3563 /* Use mem here only if it did continuously extend old space */
3564 if (asize < HALF_MAX_SIZE_T &&
3565 (br =
3566 (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
3567 tbase = br;
3568 tsize = asize;
3569 }
3570 }
3571
3572 if (tbase == CMFAIL) { /* Cope with partial failure */
3573 if (br != CMFAIL) { /* Try to use/extend the space we did get */
3574 if (asize < HALF_MAX_SIZE_T &&
3575 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
3576 size_t esize =
3579 asize);
3580 if (esize < HALF_MAX_SIZE_T) {
3581 char *end = (char *) CALL_MORECORE(esize);
3582 if (end != CMFAIL)
3583 asize += esize;
3584 else { /* Can't use; try to release */
3585 end = (char *) CALL_MORECORE(-asize);
3586 br = CMFAIL;
3587 }
3588 }
3589 }
3590 }
3591 if (br != CMFAIL) { /* Use the space we did get */
3592 tbase = br;
3593 tsize = asize;
3594 } else
3595 disable_contiguous(m); /* Don't try contiguous path in the future */
3596 }
3597
3599 }
3600
3601 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
3602 size_t req = nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE;
3603 size_t rsize = granularity_align(req);
3604 if (rsize > nb) { /* Fail if wraps around zero */
3605 char *mp = (char *) (CALL_MMAP(rsize));
3606 if (mp != CMFAIL) {
3607 tbase = mp;
3608 tsize = rsize;
3609 mmap_flag = IS_MMAPPED_BIT;
3610 }
3611 }
3612 }
3613
3614 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
3615 size_t asize =
3617 SIZE_T_ONE);
3618 if (asize < HALF_MAX_SIZE_T) {
3619 char *br = CMFAIL;
3620 char *end = CMFAIL;
3622 br = (char *) (CALL_MORECORE(asize));
3623 end = (char *) (CALL_MORECORE(0));
3625 if (br != CMFAIL && end != CMFAIL && br < end) {
3626 size_t ssize = end - br;
3627 if (ssize > nb + TOP_FOOT_SIZE) {
3628 tbase = br;
3629 tsize = ssize;
3630 }
3631 }
3632 }
3633 }
3634
3635 if (tbase != CMFAIL) {
3636
3637 if ((m->footprint += tsize) > m->max_footprint)
3638 m->max_footprint = m->footprint;
3639
3640 if (!is_initialized(m)) { /* first-time initialization */
3641 m->seg.base = m->least_addr = tbase;
3642 m->seg.size = tsize;
3643 m->seg.sflags = mmap_flag;
3644 m->magic = mparams.magic;
3645 init_bins(m);
3646 if (is_global(m))
3647 init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3648 else {
3649 /* Offset top by embedded malloc_state */
3650 mchunkptr mn = next_chunk(mem2chunk(m));
3651 init_top(m, mn,
3652 (size_t) ((tbase + tsize) - (char *) mn) -
3654 }
3655 }
3656
3657 else {
3658 /* Try to merge with an existing segment */
3659 msegmentptr sp = &m->seg;
3660 while (sp != 0 && tbase != sp->base + sp->size)
3661 sp = sp->next;
3662 if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
3663 sp->size += tsize;
3664 init_top(m, m->top, m->topsize + tsize);
3665 } else {
3666 if (tbase < m->least_addr)
3667 m->least_addr = tbase;
3668 sp = &m->seg;
3669 while (sp != 0 && sp->base != tbase + tsize)
3670 sp = sp->next;
3671 if (sp != 0 &&
3672 !is_extern_segment(sp) &&
3673 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3674 char *oldbase = sp->base;
3675 sp->base = tbase;
3676 sp->size += tsize;
3677 return prepend_alloc(m, tbase, oldbase, nb);
3678 } else
3679 add_segment(m, tbase, tsize, mmap_flag);
3680 }
3681 }
3682
3683 if (nb < m->topsize) { /* Allocate from new or extended top space */
3684 size_t rsize = m->topsize -= nb;
3685 mchunkptr p = m->top;
3686 mchunkptr r = m->top = chunk_plus_offset(p, nb);
3687 r->head = rsize | PINUSE_BIT;
3689 check_top_chunk(m, m->top);
3691 return chunk2mem(p);
3692 }
3693 }
3694
3696 return 0;
3697}
3698
3699/* ----------------------- system deallocation -------------------------- */
3700
3701/* Unmap and unlink any mmapped segments that don't contain used chunks */
3702static size_t
3704{
3705 size_t released = 0;
3706 msegmentptr pred = &m->seg;
3707 msegmentptr sp = pred->next;
3708 while (sp != 0) {
3709 char *base = sp->base;
3710 size_t size = sp->size;
3711 msegmentptr next = sp->next;
3712 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
3713 mchunkptr p = align_as_chunk(base);
3714 size_t psize = chunksize(p);
3715 /* Can unmap if first chunk holds entire segment and not pinned */
3716 if (!cinuse(p)
3717 && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
3718 tchunkptr tp = (tchunkptr) p;
3719 assert(segment_holds(sp, (char *) sp));
3720 if (p == m->dv) {
3721 m->dv = 0;
3722 m->dvsize = 0;
3723 } else {
3724 unlink_large_chunk(m, tp);
3725 }
3726 if (CALL_MUNMAP(base, size) == 0) {
3727 released += size;
3728 m->footprint -= size;
3729 /* unlink obsoleted record */
3730 sp = pred;
3731 sp->next = next;
3732 } else { /* back out if cannot unmap */
3733 insert_large_chunk(m, tp, psize);
3734 }
3735 }
3736 }
3737 pred = sp;
3738 sp = next;
3739 }
3740 return released;
3741}
3742
3743static int
3744sys_trim(mstate m, size_t pad)
3745{
3746 size_t released = 0;
3747 if (pad < MAX_REQUEST && is_initialized(m)) {
3748 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3749
3750 if (m->topsize > pad) {
3751 /* Shrink top space in granularity-size units, keeping at least one */
3752 size_t unit = mparams.granularity;
3753 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3754 SIZE_T_ONE) * unit;
3755 msegmentptr sp = segment_holding(m, (char *) m->top);
3756
3757 if (!is_extern_segment(sp)) {
3758 if (is_mmapped_segment(sp)) {
3759 if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */
3760 size_t newsize = sp->size - extra;
3761 /* Prefer mremap, fall back to munmap */
3762 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) !=
3763 MFAIL)
3764 || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
3765 released = extra;
3766 }
3767 }
3768 } else if (HAVE_MORECORE) {
3769 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3770 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3772 {
3773 /* Make sure end of memory is where we last set it. */
3774 char *old_br = (char *) (CALL_MORECORE(0));
3775 if (old_br == sp->base + sp->size) {
3776 char *rel_br = (char *) (CALL_MORECORE(-extra));
3777 char *new_br = (char *) (CALL_MORECORE(0));
3778 if (rel_br != CMFAIL && new_br < old_br)
3779 released = old_br - new_br;
3780 }
3781 }
3783 }
3784 }
3785
3786 if (released != 0) {
3787 sp->size -= released;
3788 m->footprint -= released;
3789 init_top(m, m->top, m->topsize - released);
3790 check_top_chunk(m, m->top);
3791 }
3792 }
3793
3794 /* Unmap any unused mmapped segments */
3795 if (HAVE_MMAP)
3796 released += release_unused_segments(m);
3797
3798 /* On failure, disable autotrim to avoid repeated failed future calls */
3799 if (released == 0)
3800 m->trim_check = MAX_SIZE_T;
3801 }
3802
3803 return (released != 0) ? 1 : 0;
3804}
3805
3806/* ---------------------------- malloc support --------------------------- */
3807
3808/* allocate a large request from the best fitting chunk in a treebin */
3809static void *
3810tmalloc_large(mstate m, size_t nb)
3811{
3812 tchunkptr v = 0;
3813 size_t rsize = -nb; /* Unsigned negation */
3814 tchunkptr t;
3815 bindex_t idx;
3816 compute_tree_index(nb, idx);
3817
3818 if ((t = *treebin_at(m, idx)) != 0) {
3819 /* Traverse tree for this bin looking for node with size == nb */
3820 size_t sizebits = nb << leftshift_for_tree_index(idx);
3821 tchunkptr rst = 0; /* The deepest untaken right subtree */
3822 for (;;) {
3823 tchunkptr rt;
3824 size_t trem = chunksize(t) - nb;
3825 if (trem < rsize) {
3826 v = t;
3827 if ((rsize = trem) == 0)
3828 break;
3829 }
3830 rt = t->child[1];
3831 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
3832 if (rt != 0 && rt != t)
3833 rst = rt;
3834 if (t == 0) {
3835 t = rst; /* set t to least subtree holding sizes > nb */
3836 break;
3837 }
3838 sizebits <<= 1;
3839 }
3840 }
3841
3842 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3843 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3844 if (leftbits != 0) {
3845 bindex_t i;
3846 binmap_t leastbit = least_bit(leftbits);
3847 compute_bit2idx(leastbit, i);
3848 t = *treebin_at(m, i);
3849 }
3850 }
3851
3852 while (t != 0) { /* find smallest of tree or subtree */
3853 size_t trem = chunksize(t) - nb;
3854 if (trem < rsize) {
3855 rsize = trem;
3856 v = t;
3857 }
3858 t = leftmost_child(t);
3859 }
3860
3861 /* If dv is a better fit, return 0 so malloc will use it */
3862 if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
3863 if (RTCHECK(ok_address(m, v))) { /* split */
3864 mchunkptr r = chunk_plus_offset(v, nb);
3865 assert(chunksize(v) == rsize + nb);
3866 if (RTCHECK(ok_next(v, r))) {
3868 if (rsize < MIN_CHUNK_SIZE)
3869 set_inuse_and_pinuse(m, v, (rsize + nb));
3870 else {
3873 insert_chunk(m, r, rsize);
3874 }
3875 return chunk2mem(v);
3876 }
3877 }
3879 }
3880 return 0;
3881}
3882
3883/* allocate a small request from the best fitting chunk in a treebin */
3884static void *
3885tmalloc_small(mstate m, size_t nb)
3886{
3887 tchunkptr t, v;
3888 size_t rsize;
3889 bindex_t i;
3890 binmap_t leastbit = least_bit(m->treemap);
3891 compute_bit2idx(leastbit, i);
3892
3893 v = t = *treebin_at(m, i);
3894 rsize = chunksize(t) - nb;
3895
3896 while ((t = leftmost_child(t)) != 0) {
3897 size_t trem = chunksize(t) - nb;
3898 if (trem < rsize) {
3899 rsize = trem;
3900 v = t;
3901 }
3902 }
3903
3904 if (RTCHECK(ok_address(m, v))) {
3905 mchunkptr r = chunk_plus_offset(v, nb);
3906 assert(chunksize(v) == rsize + nb);
3907 if (RTCHECK(ok_next(v, r))) {
3909 if (rsize < MIN_CHUNK_SIZE)
3910 set_inuse_and_pinuse(m, v, (rsize + nb));
3911 else {
3914 replace_dv(m, r, rsize);
3915 }
3916 return chunk2mem(v);
3917 }
3918 }
3919
3921 return 0;
3922}
3923
3924/* --------------------------- realloc support --------------------------- */
3925
3926static void *
3927internal_realloc(mstate m, void *oldmem, size_t bytes)
3928{
3929 if (bytes >= MAX_REQUEST) {
3931 return 0;
3932 }
3933 if (!PREACTION(m)) {
3934 mchunkptr oldp = mem2chunk(oldmem);
3935 size_t oldsize = chunksize(oldp);
3936 mchunkptr next = chunk_plus_offset(oldp, oldsize);
3937 mchunkptr newp = 0;
3938 void *extra = 0;
3939
3940 /* Try to either shrink or extend into top. Else malloc-copy-free */
3941
3942 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
3943 ok_next(oldp, next) && ok_pinuse(next))) {
3944 size_t nb = request2size(bytes);
3945 if (is_mmapped(oldp))
3946 newp = mmap_resize(m, oldp, nb);
3947 else if (oldsize >= nb) { /* already big enough */
3948 size_t rsize = oldsize - nb;
3949 newp = oldp;
3950 if (rsize >= MIN_CHUNK_SIZE) {
3951 mchunkptr remainder = chunk_plus_offset(newp, nb);
3952 set_inuse(m, newp, nb);
3953 set_inuse(m, remainder, rsize);
3954 extra = chunk2mem(remainder);
3955 }
3956 } else if (next == m->top && oldsize + m->topsize > nb) {
3957 /* Expand into top */
3958 size_t newsize = oldsize + m->topsize;
3959 size_t newtopsize = newsize - nb;
3960 mchunkptr newtop = chunk_plus_offset(oldp, nb);
3961 set_inuse(m, oldp, nb);
3962 newtop->head = newtopsize | PINUSE_BIT;
3963 m->top = newtop;
3964 m->topsize = newtopsize;
3965 newp = oldp;
3966 }
3967 } else {
3968 USAGE_ERROR_ACTION(m, oldmem);
3969 POSTACTION(m);
3970 return 0;
3971 }
3972
3973 POSTACTION(m);
3974
3975 if (newp != 0) {
3976 if (extra != 0) {
3977 internal_free(m, extra);
3978 }
3979 check_inuse_chunk(m, newp);
3980 return chunk2mem(newp);
3981 } else {
3982 void *newmem = internal_malloc(m, bytes);
3983 if (newmem != 0) {
3984 size_t oc = oldsize - overhead_for(oldp);
3985 memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
3986 internal_free(m, oldmem);
3987 }
3988 return newmem;
3989 }
3990 }
3991 return 0;
3992}
3993
3994/* --------------------------- memalign support -------------------------- */
3995
3996static void *
3997internal_memalign(mstate m, size_t alignment, size_t bytes)
3998{
3999 if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
4000 return internal_malloc(m, bytes);
4001 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
4002 alignment = MIN_CHUNK_SIZE;
4003 if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */
4004 size_t a = MALLOC_ALIGNMENT << 1;
4005 while (a < alignment)
4006 a <<= 1;
4007 alignment = a;
4008 }
4009
4010 if (bytes >= MAX_REQUEST - alignment) {
4011 if (m != 0) { /* Test isn't needed but avoids compiler warning */
4013 }
4014 } else {
4015 size_t nb = request2size(bytes);
4016 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4017 char *mem = (char *) internal_malloc(m, req);
4018 if (mem != 0) {
4019 void *leader = 0;
4020 void *trailer = 0;
4021 mchunkptr p = mem2chunk(mem);
4022
4023 if (PREACTION(m))
4024 return 0;
4025 if ((((size_t) (mem)) % alignment) != 0) { /* misaligned */
4026 /*
4027 Find an aligned spot inside chunk. Since we need to give
4028 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
4029 the first calculation places us at a spot with less than
4030 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
4031 We've allocated enough total room so that this is always
4032 possible.
4033 */
4034 char *br = (char *) mem2chunk((size_t) (((size_t) (mem +
4035 alignment -
4036 SIZE_T_ONE))
4037 & -alignment));
4038 char *pos =
4039 ((size_t) (br - (char *) (p)) >=
4040 MIN_CHUNK_SIZE) ? br : br + alignment;
4041 mchunkptr newp = (mchunkptr) pos;
4042 size_t leadsize = pos - (char *) (p);
4043 size_t newsize = chunksize(p) - leadsize;
4044
4045 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
4046 newp->prev_foot = p->prev_foot + leadsize;
4047 newp->head = (newsize | CINUSE_BIT);
4048 } else { /* Otherwise, give back leader, use the rest */
4049 set_inuse(m, newp, newsize);
4050 set_inuse(m, p, leadsize);
4051 leader = chunk2mem(p);
4052 }
4053 p = newp;
4054 }
4055
4056 /* Give back spare room at the end */
4057 if (!is_mmapped(p)) {
4058 size_t size = chunksize(p);
4059 if (size > nb + MIN_CHUNK_SIZE) {
4060 size_t remainder_size = size - nb;
4061 mchunkptr remainder = chunk_plus_offset(p, nb);
4062 set_inuse(m, p, nb);
4063 set_inuse(m, remainder, remainder_size);
4064 trailer = chunk2mem(remainder);
4065 }
4066 }
4067
4068 assert(chunksize(p) >= nb);
4069 assert((((size_t) (chunk2mem(p))) % alignment) == 0);
4071 POSTACTION(m);
4072 if (leader != 0) {
4073 internal_free(m, leader);
4074 }
4075 if (trailer != 0) {
4076 internal_free(m, trailer);
4077 }
4078 return chunk2mem(p);
4079 }
4080 }
4081 return 0;
4082}
4083
4084/* ------------------------ comalloc/coalloc support --------------------- */
4085
4086static void **
4087ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
4088{
4089 /*
4090 This provides common support for independent_X routines, handling
4091 all of the combinations that can result.
4092
4093 The opts arg has:
4094 bit 0 set if all elements are same size (using sizes[0])
4095 bit 1 set if elements should be zeroed
4096 */
4097
4098 size_t element_size; /* chunksize of each element, if all same */
4099 size_t contents_size; /* total size of elements */
4100 size_t array_size; /* request size of pointer array */
4101 void *mem; /* malloced aggregate space */
4102 mchunkptr p; /* corresponding chunk */
4103 size_t remainder_size; /* remaining bytes while splitting */
4104 void **marray; /* either "chunks" or malloced ptr array */
4105 mchunkptr array_chunk; /* chunk for malloced ptr array */
4106 flag_t was_enabled; /* to disable mmap */
4107 size_t size;
4108 size_t i;
4109
4110 /* compute array length, if needed */
4111 if (chunks != 0) {
4112 if (n_elements == 0)
4113 return chunks; /* nothing to do */
4114 marray = chunks;
4115 array_size = 0;
4116 } else {
4117 /* if empty req, must still return chunk representing empty array */
4118 if (n_elements == 0)
4119 return (void **) internal_malloc(m, 0);
4120 marray = 0;
4121 array_size = request2size(n_elements * (sizeof(void *)));
4122 }
4123
4124 /* compute total element size */
4125 if (opts & 0x1) { /* all-same-size */
4126 element_size = request2size(*sizes);
4127 contents_size = n_elements * element_size;
4128 } else { /* add up all the sizes */
4129 element_size = 0;
4130 contents_size = 0;
4131 for (i = 0; i != n_elements; ++i)
4132 contents_size += request2size(sizes[i]);
4133 }
4134
4135 size = contents_size + array_size;
4136
4137 /*
4138 Allocate the aggregate chunk. First disable direct-mmapping so
4139 malloc won't use it, since we would not be able to later
4140 free/realloc space internal to a segregated mmap region.
4141 */
4142 was_enabled = use_mmap(m);
4143 disable_mmap(m);
4145 if (was_enabled)
4146 enable_mmap(m);
4147 if (mem == 0)
4148 return 0;
4149
4150 if (PREACTION(m))
4151 return 0;
4152 p = mem2chunk(mem);
4153 remainder_size = chunksize(p);
4154
4155 assert(!is_mmapped(p));
4156
4157 if (opts & 0x2) { /* optionally clear the elements */
4158 memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4159 }
4160
4161 /* If not provided, allocate the pointer array as final part of chunk */
4162 if (marray == 0) {
4163 size_t array_chunk_size;
4164 array_chunk = chunk_plus_offset(p, contents_size);
4165 array_chunk_size = remainder_size - contents_size;
4166 marray = (void **) (chunk2mem(array_chunk));
4167 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4168 remainder_size = contents_size;
4169 }
4170
4171 /* split out elements */
4172 for (i = 0;; ++i) {
4173 marray[i] = chunk2mem(p);
4174 if (i != n_elements - 1) {
4175 if (element_size != 0)
4176 size = element_size;
4177 else
4179 remainder_size -= size;
4182 } else { /* the final element absorbs any overallocation slop */
4183 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4184 break;
4185 }
4186 }
4187
4188#if DEBUG
4189 if (marray != chunks) {
4190 /* final element must have exactly exhausted chunk */
4191 if (element_size != 0) {
4192 assert(remainder_size == element_size);
4193 } else {
4194 assert(remainder_size == request2size(sizes[i]));
4195 }
4196 check_inuse_chunk(m, mem2chunk(marray));
4197 }
4198 for (i = 0; i != n_elements; ++i)
4199 check_inuse_chunk(m, mem2chunk(marray[i]));
4200
4201#endif /* DEBUG */
4202
4203 POSTACTION(m);
4204 return marray;
4205}
4206
4207
4208/* -------------------------- public routines ---------------------------- */
4209
4210#if !ONLY_MSPACES
4211
4212void *
4213dlmalloc(size_t bytes)
4214{
4215 /*
4216 Basic algorithm:
4217 If a small request (< 256 bytes minus per-chunk overhead):
4218 1. If one exists, use a remainderless chunk in associated smallbin.
4219 (Remainderless means that there are too few excess bytes to
4220 represent as a chunk.)
4221 2. If it is big enough, use the dv chunk, which is normally the
4222 chunk adjacent to the one used for the most recent small request.
4223 3. If one exists, split the smallest available chunk in a bin,
4224 saving remainder in dv.
4225 4. If it is big enough, use the top chunk.
4226 5. If available, get memory from system and use it
4227 Otherwise, for a large request:
4228 1. Find the smallest available binned chunk that fits, and use it
4229 if it is better fitting than dv chunk, splitting if necessary.
4230 2. If better fitting than any binned chunk, use the dv chunk.
4231 3. If it is big enough, use the top chunk.
4232 4. If request size >= mmap threshold, try to directly mmap this chunk.
4233 5. If available, get memory from system and use it
4234
4235 The ugly goto's here ensure that postaction occurs along all paths.
4236 */
4237
4238 if (!PREACTION(gm)) {
4239 void *mem;
4240 size_t nb;
4241 if (bytes <= MAX_SMALL_REQUEST) {
4242 bindex_t idx;
4243 binmap_t smallbits;
4244 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4245 idx = small_index(nb);
4246 smallbits = gm->smallmap >> idx;
4247
4248 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4249 mchunkptr b, p;
4250 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4251 b = smallbin_at(gm, idx);
4252 p = b->fd;
4256 mem = chunk2mem(p);
4257 check_malloced_chunk(gm, mem, nb);
4258 goto postaction;
4259 }
4260
4261 else if (nb > gm->dvsize) {
4262 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4263 mchunkptr b, p, r;
4264 size_t rsize;
4265 bindex_t i;
4266 binmap_t leftbits =
4267 (smallbits << idx) & left_bits(idx2bit(idx));
4268 binmap_t leastbit = least_bit(leftbits);
4269 compute_bit2idx(leastbit, i);
4270 b = smallbin_at(gm, i);
4271 p = b->fd;
4274 rsize = small_index2size(i) - nb;
4275 /* Fit here cannot be remainderless if 4byte sizes */
4276 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4278 else {
4280 r = chunk_plus_offset(p, nb);
4282 replace_dv(gm, r, rsize);
4283 }
4284 mem = chunk2mem(p);
4285 check_malloced_chunk(gm, mem, nb);
4286 goto postaction;
4287 }
4288
4289 else if (gm->treemap != 0
4290 && (mem = tmalloc_small(gm, nb)) != 0) {
4291 check_malloced_chunk(gm, mem, nb);
4292 goto postaction;
4293 }
4294 }
4295 } else if (bytes >= MAX_REQUEST)
4296 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4297 else {
4298 nb = pad_request(bytes);
4299 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4300 check_malloced_chunk(gm, mem, nb);
4301 goto postaction;
4302 }
4303 }
4304
4305 if (nb <= gm->dvsize) {
4306 size_t rsize = gm->dvsize - nb;
4307 mchunkptr p = gm->dv;
4308 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4309 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4310 gm->dvsize = rsize;
4313 } else { /* exhaust dv */
4314 size_t dvs = gm->dvsize;
4315 gm->dvsize = 0;
4316 gm->dv = 0;
4317 set_inuse_and_pinuse(gm, p, dvs);
4318 }
4319 mem = chunk2mem(p);
4320 check_malloced_chunk(gm, mem, nb);
4321 goto postaction;
4322 }
4323
4324 else if (nb < gm->topsize) { /* Split top */
4325 size_t rsize = gm->topsize -= nb;
4326 mchunkptr p = gm->top;
4327 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4328 r->head = rsize | PINUSE_BIT;
4330 mem = chunk2mem(p);
4331 check_top_chunk(gm, gm->top);
4332 check_malloced_chunk(gm, mem, nb);
4333 goto postaction;
4334 }
4335
4336 mem = sys_alloc(gm, nb);
4337
4338 postaction:
4339 POSTACTION(gm);
4340 return mem;
4341 }
4342
4343 return 0;
4344}
4345
4346void
4347dlfree(void *mem)
4348{
4349 /*
4350 Consolidate freed chunks with preceeding or succeeding bordering
4351 free chunks, if they exist, and then place in a bin. Intermixed
4352 with special cases for top, dv, mmapped chunks, and usage errors.
4353 */
4354
4355 if (mem != 0) {
4356 mchunkptr p = mem2chunk(mem);
4357#if FOOTERS
4358 mstate fm = get_mstate_for(p);
4359 if (!ok_magic(fm)) {
4361 return;
4362 }
4363#else /* FOOTERS */
4364#define fm gm
4365#endif /* FOOTERS */
4366 if (!PREACTION(fm)) {
4368 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4369 size_t psize = chunksize(p);
4370 mchunkptr next = chunk_plus_offset(p, psize);
4371 if (!pinuse(p)) {
4372 size_t prevsize = p->prev_foot;
4373 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4374 prevsize &= ~IS_MMAPPED_BIT;
4375 psize += prevsize + MMAP_FOOT_PAD;
4376 if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4377 fm->footprint -= psize;
4378 goto postaction;
4379 } else {
4380 mchunkptr prev = chunk_minus_offset(p, prevsize);
4381 psize += prevsize;
4382 p = prev;
4383 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4384 if (p != fm->dv) {
4385 unlink_chunk(fm, p, prevsize);
4386 } else if ((next->head & INUSE_BITS) ==
4387 INUSE_BITS) {
4388 fm->dvsize = psize;
4389 set_free_with_pinuse(p, psize, next);
4390 goto postaction;
4391 }
4392 } else
4393 goto erroraction;
4394 }
4395 }
4396
4397 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4398 if (!cinuse(next)) { /* consolidate forward */
4399 if (next == fm->top) {
4400 size_t tsize = fm->topsize += psize;
4401 fm->top = p;
4402 p->head = tsize | PINUSE_BIT;
4403 if (p == fm->dv) {
4404 fm->dv = 0;
4405 fm->dvsize = 0;
4406 }
4407 if (should_trim(fm, tsize))
4408 sys_trim(fm, 0);
4409 goto postaction;
4410 } else if (next == fm->dv) {
4411 size_t dsize = fm->dvsize += psize;
4412 fm->dv = p;
4414 goto postaction;
4415 } else {
4416 size_t nsize = chunksize(next);
4417 psize += nsize;
4418 unlink_chunk(fm, next, nsize);
4420 if (p == fm->dv) {
4421 fm->dvsize = psize;
4422 goto postaction;
4423 }
4424 }
4425 } else
4426 set_free_with_pinuse(p, psize, next);
4427 insert_chunk(fm, p, psize);
4429 goto postaction;
4430 }
4431 }
4432 erroraction:
4434 postaction:
4435 POSTACTION(fm);
4436 }
4437 }
4438#if !FOOTERS
4439#undef fm
4440#endif /* FOOTERS */
4441}
4442
4443void *
4444dlcalloc(size_t n_elements, size_t elem_size)
4445{
4446 void *mem;
4447 size_t req = 0;
4448 if (n_elements != 0) {
4449 req = n_elements * elem_size;
4450 if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4451 (req / n_elements != elem_size))
4452 req = MAX_SIZE_T; /* force downstream failure on overflow */
4453 }
4454 mem = dlmalloc(req);
4455 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4456 memset(mem, 0, req);
4457 return mem;
4458}
4459
4460void *
4461dlrealloc(void *oldmem, size_t bytes)
4462{
4463 if (oldmem == 0)
4464 return dlmalloc(bytes);
4465#ifdef REALLOC_ZERO_BYTES_FREES
4466 if (bytes == 0) {
4467 dlfree(oldmem);
4468 return 0;
4469 }
4470#endif /* REALLOC_ZERO_BYTES_FREES */
4471 else {
4472#if ! FOOTERS
4473 mstate m = gm;
4474#else /* FOOTERS */
4475 mstate m = get_mstate_for(mem2chunk(oldmem));
4476 if (!ok_magic(m)) {
4477 USAGE_ERROR_ACTION(m, oldmem);
4478 return 0;
4479 }
4480#endif /* FOOTERS */
4481 return internal_realloc(m, oldmem, bytes);
4482 }
4483}
4484
4485void *
4486dlmemalign(size_t alignment, size_t bytes)
4487{
4488 return internal_memalign(gm, alignment, bytes);
4489}
4490
4491void **
4492dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
4493{
4494 size_t sz = elem_size; /* serves as 1-element array */
4495 return ialloc(gm, n_elements, &sz, 3, chunks);
4496}
4497
4498void **
4499dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
4500{
4501 return ialloc(gm, n_elements, sizes, 0, chunks);
4502}
4503
4504void *
4505dlvalloc(size_t bytes)
4506{
4507 size_t pagesz;
4508 init_mparams();
4509 pagesz = mparams.page_size;
4510 return dlmemalign(pagesz, bytes);
4511}
4512
4513void *
4514dlpvalloc(size_t bytes)
4515{
4516 size_t pagesz;
4517 init_mparams();
4518 pagesz = mparams.page_size;
4519 return dlmemalign(pagesz,
4520 (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4521}
4522
4523int
4524dlmalloc_trim(size_t pad)
4525{
4526 int result = 0;
4527 if (!PREACTION(gm)) {
4528 result = sys_trim(gm, pad);
4529 POSTACTION(gm);
4530 }
4531 return result;
4532}
4533
4534size_t
4536{
4537 return gm->footprint;
4538}
4539
4540size_t
4542{
4543 return gm->max_footprint;
4544}
4545
4546#if !NO_MALLINFO
4547struct mallinfo
4549{
4550 return internal_mallinfo(gm);
4551}
4552#endif /* NO_MALLINFO */
4553
4554void
4556{
4558}
4559
4560size_t
4562{
4563 if (mem != 0) {
4564 mchunkptr p = mem2chunk(mem);
4565 if (cinuse(p))
4566 return chunksize(p) - overhead_for(p);
4567 }
4568 return 0;
4569}
4570
4571int
4572dlmallopt(int param_number, int value)
4573{
4574 return change_mparam(param_number, value);
4575}
4576
4577#endif /* !ONLY_MSPACES */
4578
4579/* ----------------------------- user mspaces ---------------------------- */
4580
4581#if MSPACES
4582
4583static mstate
4584init_user_mstate(char *tbase, size_t tsize)
4585{
4586 size_t msize = pad_request(sizeof(struct malloc_state));
4587 mchunkptr mn;
4588 mchunkptr msp = align_as_chunk(tbase);
4589 mstate m = (mstate) (chunk2mem(msp));
4590 memset(m, 0, msize);
4591 INITIAL_LOCK(&m->mutex);
4592 msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
4593 m->seg.base = m->least_addr = tbase;
4594 m->seg.size = m->footprint = m->max_footprint = tsize;
4595 m->magic = mparams.magic;
4596 m->mflags = mparams.default_mflags;
4598 init_bins(m);
4599 mn = next_chunk(mem2chunk(m));
4600 init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
4601 check_top_chunk(m, m->top);
4602 return m;
4603}
4604
4605mspace
4606create_mspace(size_t capacity, int locked)
4607{
4608 mstate m = 0;
4609 size_t msize = pad_request(sizeof(struct malloc_state));
4610 init_mparams(); /* Ensure pagesize etc initialized */
4611
4612 if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4613 size_t rs = ((capacity == 0) ? mparams.granularity :
4614 (capacity + TOP_FOOT_SIZE + msize));
4615 size_t tsize = granularity_align(rs);
4616 char *tbase = (char *) (CALL_MMAP(tsize));
4617 if (tbase != CMFAIL) {
4618 m = init_user_mstate(tbase, tsize);
4619 m->seg.sflags = IS_MMAPPED_BIT;
4620 set_lock(m, locked);
4621 }
4622 }
4623 return (mspace) m;
4624}
4625
4626mspace
4627create_mspace_with_base(void *base, size_t capacity, int locked)
4628{
4629 mstate m = 0;
4630 size_t msize = pad_request(sizeof(struct malloc_state));
4631 init_mparams(); /* Ensure pagesize etc initialized */
4632
4633 if (capacity > msize + TOP_FOOT_SIZE &&
4634 capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4635 m = init_user_mstate((char *) base, capacity);
4636 m->seg.sflags = EXTERN_BIT;
4637 set_lock(m, locked);
4638 }
4639 return (mspace) m;
4640}
4641
4642size_t
4643destroy_mspace(mspace msp)
4644{
4645 size_t freed = 0;
4646 mstate ms = (mstate) msp;
4647 if (ok_magic(ms)) {
4648 msegmentptr sp = &ms->seg;
4649 while (sp != 0) {
4650 char *base = sp->base;
4651 size_t size = sp->size;
4652 flag_t flag = sp->sflags;
4653 sp = sp->next;
4654 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
4655 CALL_MUNMAP(base, size) == 0)
4656 freed += size;
4657 }
4658 } else {
4659 USAGE_ERROR_ACTION(ms, ms);
4660 }
4661 return freed;
4662}
4663
4664/*
4665 mspace versions of routines are near-clones of the global
4666 versions. This is not so nice but better than the alternatives.
4667*/
4668
4669
4670void *
4671mspace_malloc(mspace msp, size_t bytes)
4672{
4673 mstate ms = (mstate) msp;
4674 if (!ok_magic(ms)) {
4675 USAGE_ERROR_ACTION(ms, ms);
4676 return 0;
4677 }
4678 if (!PREACTION(ms)) {
4679 void *mem;
4680 size_t nb;
4681 if (bytes <= MAX_SMALL_REQUEST) {
4682 bindex_t idx;
4683 binmap_t smallbits;
4684 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4685 idx = small_index(nb);
4686 smallbits = ms->smallmap >> idx;
4687
4688 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4689 mchunkptr b, p;
4690 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4691 b = smallbin_at(ms, idx);
4692 p = b->fd;
4694 unlink_first_small_chunk(ms, b, p, idx);
4696 mem = chunk2mem(p);
4697 check_malloced_chunk(ms, mem, nb);
4698 goto postaction;
4699 }
4700
4701 else if (nb > ms->dvsize) {
4702 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4703 mchunkptr b, p, r;
4704 size_t rsize;
4705 bindex_t i;
4706 binmap_t leftbits =
4707 (smallbits << idx) & left_bits(idx2bit(idx));
4708 binmap_t leastbit = least_bit(leftbits);
4709 compute_bit2idx(leastbit, i);
4710 b = smallbin_at(ms, i);
4711 p = b->fd;
4714 rsize = small_index2size(i) - nb;
4715 /* Fit here cannot be remainderless if 4byte sizes */
4716 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4718 else {
4720 r = chunk_plus_offset(p, nb);
4722 replace_dv(ms, r, rsize);
4723 }
4724 mem = chunk2mem(p);
4725 check_malloced_chunk(ms, mem, nb);
4726 goto postaction;
4727 }
4728
4729 else if (ms->treemap != 0
4730 && (mem = tmalloc_small(ms, nb)) != 0) {
4731 check_malloced_chunk(ms, mem, nb);
4732 goto postaction;
4733 }
4734 }
4735 } else if (bytes >= MAX_REQUEST)
4736 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4737 else {
4738 nb = pad_request(bytes);
4739 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4740 check_malloced_chunk(ms, mem, nb);
4741 goto postaction;
4742 }
4743 }
4744
4745 if (nb <= ms->dvsize) {
4746 size_t rsize = ms->dvsize - nb;
4747 mchunkptr p = ms->dv;
4748 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4749 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4750 ms->dvsize = rsize;
4753 } else { /* exhaust dv */
4754 size_t dvs = ms->dvsize;
4755 ms->dvsize = 0;
4756 ms->dv = 0;
4757 set_inuse_and_pinuse(ms, p, dvs);
4758 }
4759 mem = chunk2mem(p);
4760 check_malloced_chunk(ms, mem, nb);
4761 goto postaction;
4762 }
4763
4764 else if (nb < ms->topsize) { /* Split top */
4765 size_t rsize = ms->topsize -= nb;
4766 mchunkptr p = ms->top;
4767 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4768 r->head = rsize | PINUSE_BIT;
4770 mem = chunk2mem(p);
4771 check_top_chunk(ms, ms->top);
4772 check_malloced_chunk(ms, mem, nb);
4773 goto postaction;
4774 }
4775
4776 mem = sys_alloc(ms, nb);
4777
4778 postaction:
4779 POSTACTION(ms);
4780 return mem;
4781 }
4782
4783 return 0;
4784}
4785
4786void
4787mspace_free(mspace msp, void *mem)
4788{
4789 if (mem != 0) {
4790 mchunkptr p = mem2chunk(mem);
4791#if FOOTERS
4792 mstate fm = get_mstate_for(p);
4793#else /* FOOTERS */
4794 mstate fm = (mstate) msp;
4795#endif /* FOOTERS */
4796 if (!ok_magic(fm)) {
4798 return;
4799 }
4800 if (!PREACTION(fm)) {
4802 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4803 size_t psize = chunksize(p);
4804 mchunkptr next = chunk_plus_offset(p, psize);
4805 if (!pinuse(p)) {
4806 size_t prevsize = p->prev_foot;
4807 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4808 prevsize &= ~IS_MMAPPED_BIT;
4809 psize += prevsize + MMAP_FOOT_PAD;
4810 if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4811 fm->footprint -= psize;
4812 goto postaction;
4813 } else {
4814 mchunkptr prev = chunk_minus_offset(p, prevsize);
4815 psize += prevsize;
4816 p = prev;
4817 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4818 if (p != fm->dv) {
4819 unlink_chunk(fm, p, prevsize);
4820 } else if ((next->head & INUSE_BITS) ==
4821 INUSE_BITS) {
4822 fm->dvsize = psize;
4823 set_free_with_pinuse(p, psize, next);
4824 goto postaction;
4825 }
4826 } else
4827 goto erroraction;
4828 }
4829 }
4830
4831 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4832 if (!cinuse(next)) { /* consolidate forward */
4833 if (next == fm->top) {
4834 size_t tsize = fm->topsize += psize;
4835 fm->top = p;
4836 p->head = tsize | PINUSE_BIT;
4837 if (p == fm->dv) {
4838 fm->dv = 0;
4839 fm->dvsize = 0;
4840 }
4841 if (should_trim(fm, tsize))
4842 sys_trim(fm, 0);
4843 goto postaction;
4844 } else if (next == fm->dv) {
4845 size_t dsize = fm->dvsize += psize;
4846 fm->dv = p;
4848 goto postaction;
4849 } else {
4850 size_t nsize = chunksize(next);
4851 psize += nsize;
4852 unlink_chunk(fm, next, nsize);
4854 if (p == fm->dv) {
4855 fm->dvsize = psize;
4856 goto postaction;
4857 }
4858 }
4859 } else
4860 set_free_with_pinuse(p, psize, next);
4861 insert_chunk(fm, p, psize);
4863 goto postaction;
4864 }
4865 }
4866 erroraction:
4868 postaction:
4869 POSTACTION(fm);
4870 }
4871 }
4872}
4873
4874void *
4875mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
4876{
4877 void *mem;
4878 size_t req = 0;
4879 mstate ms = (mstate) msp;
4880 if (!ok_magic(ms)) {
4881 USAGE_ERROR_ACTION(ms, ms);
4882 return 0;
4883 }
4884 if (n_elements != 0) {
4885 req = n_elements * elem_size;
4886 if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4887 (req / n_elements != elem_size))
4888 req = MAX_SIZE_T; /* force downstream failure on overflow */
4889 }
4890 mem = internal_malloc(ms, req);
4891 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4892 memset(mem, 0, req);
4893 return mem;
4894}
4895
4896void *
4897mspace_realloc(mspace msp, void *oldmem, size_t bytes)
4898{
4899 if (oldmem == 0)
4900 return mspace_malloc(msp, bytes);
4901#ifdef REALLOC_ZERO_BYTES_FREES
4902 if (bytes == 0) {
4903 mspace_free(msp, oldmem);
4904 return 0;
4905 }
4906#endif /* REALLOC_ZERO_BYTES_FREES */
4907 else {
4908#if FOOTERS
4909 mchunkptr p = mem2chunk(oldmem);
4910 mstate ms = get_mstate_for(p);
4911#else /* FOOTERS */
4912 mstate ms = (mstate) msp;
4913#endif /* FOOTERS */
4914 if (!ok_magic(ms)) {
4915 USAGE_ERROR_ACTION(ms, ms);
4916 return 0;
4917 }
4918 return internal_realloc(ms, oldmem, bytes);
4919 }
4920}
4921
4922void *
4923mspace_memalign(mspace msp, size_t alignment, size_t bytes)
4924{
4925 mstate ms = (mstate) msp;
4926 if (!ok_magic(ms)) {
4927 USAGE_ERROR_ACTION(ms, ms);
4928 return 0;
4929 }
4930 return internal_memalign(ms, alignment, bytes);
4931}
4932
4933void **
4934mspace_independent_calloc(mspace msp, size_t n_elements,
4935 size_t elem_size, void *chunks[])
4936{
4937 size_t sz = elem_size; /* serves as 1-element array */
4938 mstate ms = (mstate) msp;
4939 if (!ok_magic(ms)) {
4940 USAGE_ERROR_ACTION(ms, ms);
4941 return 0;
4942 }
4943 return ialloc(ms, n_elements, &sz, 3, chunks);
4944}
4945
4946void **
4947mspace_independent_comalloc(mspace msp, size_t n_elements,
4948 size_t sizes[], void *chunks[])
4949{
4950 mstate ms = (mstate) msp;
4951 if (!ok_magic(ms)) {
4952 USAGE_ERROR_ACTION(ms, ms);
4953 return 0;
4954 }
4955 return ialloc(ms, n_elements, sizes, 0, chunks);
4956}
4957
4958int
4959mspace_trim(mspace msp, size_t pad)
4960{
4961 int result = 0;
4962 mstate ms = (mstate) msp;
4963 if (ok_magic(ms)) {
4964 if (!PREACTION(ms)) {
4965 result = sys_trim(ms, pad);
4966 POSTACTION(ms);
4967 }
4968 } else {
4969 USAGE_ERROR_ACTION(ms, ms);
4970 }
4971 return result;
4972}
4973
4974void
4975mspace_malloc_stats(mspace msp)
4976{
4977 mstate ms = (mstate) msp;
4978 if (ok_magic(ms)) {
4980 } else {
4981 USAGE_ERROR_ACTION(ms, ms);
4982 }
4983}
4984
4985size_t
4986mspace_footprint(mspace msp)
4987{
4988 size_t result;
4989 mstate ms = (mstate) msp;
4990 if (ok_magic(ms)) {
4991 result = ms->footprint;
4992 }
4993 USAGE_ERROR_ACTION(ms, ms);
4994 return result;
4995}
4996
4997
4998size_t
4999mspace_max_footprint(mspace msp)
5000{
5001 size_t result;
5002 mstate ms = (mstate) msp;
5003 if (ok_magic(ms)) {
5004 result = ms->max_footprint;
5005 }
5006 USAGE_ERROR_ACTION(ms, ms);
5007 return result;
5008}
5009
5010
5011#if !NO_MALLINFO
5012struct mallinfo
5013mspace_mallinfo(mspace msp)
5014{
5015 mstate ms = (mstate) msp;
5016 if (!ok_magic(ms)) {
5017 USAGE_ERROR_ACTION(ms, ms);
5018 }
5019 return internal_mallinfo(ms);
5020}
5021#endif /* NO_MALLINFO */
5022
5023int
5024mspace_mallopt(int param_number, int value)
5025{
5026 return change_mparam(param_number, value);
5027}
5028
5029#endif /* MSPACES */
5030
5031/* -------------------- Alternative MORECORE functions ------------------- */
5032
5033/*
5034 Guidelines for creating a custom version of MORECORE:
5035
5036 * For best performance, MORECORE should allocate in multiples of pagesize.
5037 * MORECORE may allocate more memory than requested. (Or even less,
5038 but this will usually result in a malloc failure.)
5039 * MORECORE must not allocate memory when given argument zero, but
5040 instead return one past the end address of memory from previous
5041 nonzero call.
5042 * For best performance, consecutive calls to MORECORE with positive
5043 arguments should return increasing addresses, indicating that
5044 space has been contiguously extended.
5045 * Even though consecutive calls to MORECORE need not return contiguous
5046 addresses, it must be OK for malloc'ed chunks to span multiple
5047 regions in those cases where they do happen to be contiguous.
5048 * MORECORE need not handle negative arguments -- it may instead
5049 just return MFAIL when given negative arguments.
5050 Negative arguments are always multiples of pagesize. MORECORE
5051 must not misinterpret negative args as large positive unsigned
5052 args. You can suppress all such calls from even occurring by defining
5053 MORECORE_CANNOT_TRIM,
5054
5055 As an example alternative MORECORE, here is a custom allocator
5056 kindly contributed for pre-OSX macOS. It uses virtually but not
5057 necessarily physically contiguous non-paged memory (locked in,
5058 present and won't get swapped out). You can use it by uncommenting
5059 this section, adding some #includes, and setting up the appropriate
5060 defines above:
5061
5062 #define MORECORE osMoreCore
5063
5064 There is also a shutdown routine that should somehow be called for
5065 cleanup upon program exit.
5066
5067 #define MAX_POOL_ENTRIES 100
5068 #define MINIMUM_MORECORE_SIZE (64 * 1024U)
5069 static int next_os_pool;
5070 void *our_os_pools[MAX_POOL_ENTRIES];
5071
5072 void *osMoreCore(int size)
5073 {
5074 void *ptr = 0;
5075 static void *sbrk_top = 0;
5076
5077 if (size > 0)
5078 {
5079 if (size < MINIMUM_MORECORE_SIZE)
5080 size = MINIMUM_MORECORE_SIZE;
5081 if (CurrentExecutionLevel() == kTaskLevel)
5082 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5083 if (ptr == 0)
5084 {
5085 return (void *) MFAIL;
5086 }
5087 // save ptrs so they can be freed during cleanup
5088 our_os_pools[next_os_pool] = ptr;
5089 next_os_pool++;
5090 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5091 sbrk_top = (char *) ptr + size;
5092 return ptr;
5093 }
5094 else if (size < 0)
5095 {
5096 // we don't currently support shrink behavior
5097 return (void *) MFAIL;
5098 }
5099 else
5100 {
5101 return sbrk_top;
5102 }
5103 }
5104
5105 // cleanup any allocated memory pools
5106 // called as last thing before shutting down driver
5107
5108 void osCleanupMem(void)
5109 {
5110 void **ptr;
5111
5112 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5113 if (*ptr)
5114 {
5115 PoolDeallocate(*ptr);
5116 *ptr = 0;
5117 }
5118 }
5119
5120*/
5121
5122
5123/* -----------------------------------------------------------------------
5124History:
5125 V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
5126 * Add max_footprint functions
5127 * Ensure all appropriate literals are size_t
5128 * Fix conditional compilation problem for some #define settings
5129 * Avoid concatenating segments with the one provided
5130 in create_mspace_with_base
5131 * Rename some variables to avoid compiler shadowing warnings
5132 * Use explicit lock initialization.
5133 * Better handling of sbrk interference.
5134 * Simplify and fix segment insertion, trimming and mspace_destroy
5135 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
5136 * Thanks especially to Dennis Flanagan for help on these.
5137
5138 V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
5139 * Fix memalign brace error.
5140
5141 V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
5142 * Fix improper #endif nesting in C++
5143 * Add explicit casts needed for C++
5144
5145 V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
5146 * Use trees for large bins
5147 * Support mspaces
5148 * Use segments to unify sbrk-based and mmap-based system allocation,
5149 removing need for emulation on most platforms without sbrk.
5150 * Default safety checks
5151 * Optional footer checks. Thanks to William Robertson for the idea.
5152 * Internal code refactoring
5153 * Incorporate suggestions and platform-specific changes.
5154 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
5155 Aaron Bachmann, Emery Berger, and others.
5156 * Speed up non-fastbin processing enough to remove fastbins.
5157 * Remove useless cfree() to avoid conflicts with other apps.
5158 * Remove internal memcpy, memset. Compilers handle builtins better.
5159 * Remove some options that no one ever used and rename others.
5160
5161 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
5162 * Fix malloc_state bitmap array misdeclaration
5163
5164 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
5165 * Allow tuning of FIRST_SORTED_BIN_SIZE
5166 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5167 * Better detection and support for non-contiguousness of MORECORE.
5168 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5169 * Bypass most of malloc if no frees. Thanks To Emery Berger.
5170 * Fix freeing of old top non-contiguous chunk im sysmalloc.
5171 * Raised default trim and map thresholds to 256K.
5172 * Fix mmap-related #defines. Thanks to Lubos Lunak.
5173 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5174 * Branch-free bin calculation
5175 * Default trim and mmap thresholds now 256K.
5176
5177 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5178 * Introduce independent_comalloc and independent_calloc.
5179 Thanks to Michael Pachos for motivation and help.
5180 * Make optional .h file available
5181 * Allow > 2GB requests on 32bit systems.
5182 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5183 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5184 and Anonymous.
5185 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5186 helping test this.)
5187 * memalign: check alignment arg
5188 * realloc: don't try to shift chunks backwards, since this
5189 leads to more fragmentation in some programs and doesn't
5190 seem to help in any others.
5191 * Collect all cases in malloc requiring system memory into sysmalloc
5192 * Use mmap as backup to sbrk
5193 * Place all internal state in malloc_state
5194 * Introduce fastbins (although similar to 2.5.1)
5195 * Many minor tunings and cosmetic improvements
5196 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5197 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5198 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5199 * Include errno.h to support default failure action.
5200
5201 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5202 * return null for negative arguments
5203 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5204 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5205 (e.g. WIN32 platforms)
5206 * Cleanup header file inclusion for WIN32 platforms
5207 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5208 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5209 memory allocation routines
5210 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5211 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5212 usage of 'assert' in non-WIN32 code
5213 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5214 avoid infinite loop
5215 * Always call 'fREe()' rather than 'free()'
5216
5217 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5218 * Fixed ordering problem with boundary-stamping
5219
5220 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5221 * Added pvalloc, as recommended by H.J. Liu
5222 * Added 64bit pointer support mainly from Wolfram Gloger
5223 * Added anonymously donated WIN32 sbrk emulation
5224 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5225 * malloc_extend_top: fix mask error that caused wastage after
5226 foreign sbrks
5227 * Add linux mremap support code from HJ Liu
5228
5229 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5230 * Integrated most documentation with the code.
5231 * Add support for mmap, with help from
5232 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5233 * Use last_remainder in more cases.
5234 * Pack bins using idea from colin@nyx10.cs.du.edu
5235 * Use ordered bins instead of best-fit threshhold
5236 * Eliminate block-local decls to simplify tracing and debugging.
5237 * Support another case of realloc via move into top
5238 * Fix error occuring when initial sbrk_base not word-aligned.
5239 * Rely on page size for units instead of SBRK_UNIT to
5240 avoid surprises about sbrk alignment conventions.
5241 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5242 (raymond@es.ele.tue.nl) for the suggestion.
5243 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5244 * More precautions for cases where other routines call sbrk,
5245 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5246 * Added macros etc., allowing use in linux libc from
5247 H.J. Lu (hjl@gnu.ai.mit.edu)
5248 * Inverted this history list
5249
5250 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5251 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5252 * Removed all preallocation code since under current scheme
5253 the work required to undo bad preallocations exceeds
5254 the work saved in good cases for most test programs.
5255 * No longer use return list or unconsolidated bins since
5256 no scheme using them consistently outperforms those that don't
5257 given above changes.
5258 * Use best fit for very large chunks to prevent some worst-cases.
5259 * Added some support for debugging
5260
5261 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5262 * Removed footers when chunks are in use. Thanks to
5263 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5264
5265 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5266 * Added malloc_trim, with help from Wolfram Gloger
5267 (wmglo@Dent.MED.Uni-Muenchen.DE).
5268
5269 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5270
5271 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5272 * realloc: try to expand in both directions
5273 * malloc: swap order of clean-bin strategy;
5274 * realloc: only conditionally expand backwards
5275 * Try not to scavenge used bins
5276 * Use bin counts as a guide to preallocation
5277 * Occasionally bin return list chunks in first scan
5278 * Add a few optimizations from colin@nyx10.cs.du.edu
5279
5280 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5281 * faster bin computation & slightly different binning
5282 * merged all consolidations to one part of malloc proper
5283 (eliminating old malloc_find_space & malloc_clean_bin)
5284 * Scan 2 returns chunks (not just 1)
5285 * Propagate failure in realloc if malloc returns 0
5286 * Add stuff to allow compilation on non-ANSI compilers
5287 from kpv@research.att.com
5288
5289 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5290 * removed potential for odd address access in prev_chunk
5291 * removed dependency on getpagesize.h
5292 * misc cosmetics and a bit more internal documentation
5293 * anticosmetics: mangled names in macros to evade debugger strangeness
5294 * tested on sparc, hp-700, dec-mips, rs6000
5295 with gcc & native cc (hp, dec only) allowing
5296 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5297
5298 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5299 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5300 structure of old version, but most details differ.)
5301
5302*/
5303
5304#endif /* !HAVE_MALLOC */
5305
5306#ifdef HAVE_MALLOC
5307#define real_malloc malloc
5308#define real_calloc calloc
5309#define real_realloc realloc
5310#define real_free free
5311#else
5312#define real_malloc dlmalloc
5313#define real_calloc dlcalloc
5314#define real_realloc dlrealloc
5315#define real_free dlfree
5316#endif
5317
5318/* Memory functions used by SDL that can be replaced by the application */
5319static struct
5320{
5326} s_mem = {
5329
5334{
5335 if (malloc_func) {
5336 *malloc_func = s_mem.malloc_func;
5337 }
5338 if (calloc_func) {
5339 *calloc_func = s_mem.calloc_func;
5340 }
5341 if (realloc_func) {
5342 *realloc_func = s_mem.realloc_func;
5343 }
5344 if (free_func) {
5345 *free_func = s_mem.free_func;
5346 }
5347}
5348
5353{
5354 if (!malloc_func) {
5355 return SDL_InvalidParamError("malloc_func");
5356 }
5357 if (!calloc_func) {
5358 return SDL_InvalidParamError("calloc_func");
5359 }
5360 if (!realloc_func) {
5361 return SDL_InvalidParamError("realloc_func");
5362 }
5363 if (!free_func) {
5364 return SDL_InvalidParamError("free_func");
5365 }
5366
5367 s_mem.malloc_func = malloc_func;
5368 s_mem.calloc_func = calloc_func;
5369 s_mem.realloc_func = realloc_func;
5370 s_mem.free_func = free_func;
5371 return 0;
5372}
5373
5375{
5376 return SDL_AtomicGet(&s_mem.num_allocations);
5377}
5378
5379void *SDL_malloc(size_t size)
5380{
5381 void *mem;
5382
5383 if (!size) {
5384 size = 1;
5385 }
5386
5387 mem = s_mem.malloc_func(size);
5388 if (mem) {
5389 SDL_AtomicIncRef(&s_mem.num_allocations);
5390 }
5391 return mem;
5392}
5393
5394void *SDL_calloc(size_t nmemb, size_t size)
5395{
5396 void *mem;
5397
5398 if (!nmemb || !size) {
5399 nmemb = 1;
5400 size = 1;
5401 }
5402
5403 mem = s_mem.calloc_func(nmemb, size);
5404 if (mem) {
5405 SDL_AtomicIncRef(&s_mem.num_allocations);
5406 }
5407 return mem;
5408}
5409
5410void *SDL_realloc(void *ptr, size_t size)
5411{
5412 void *mem;
5413
5414 if (!ptr && !size) {
5415 size = 1;
5416 }
5417
5418 mem = s_mem.realloc_func(ptr, size);
5419 if (mem && !ptr) {
5420 SDL_AtomicIncRef(&s_mem.num_allocations);
5421 }
5422 return mem;
5423}
5424
5425void SDL_free(void *ptr)
5426{
5427 if (!ptr) {
5428 return;
5429 }
5430
5431 s_mem.free_func(ptr);
5432 (void)SDL_AtomicDecRef(&s_mem.num_allocations);
5433}
5434
5435/* vi: set ts=4 sw=4 expandtab: */
#define SDL_AtomicDecRef(a)
Decrement an atomic variable used as a reference count.
Definition: SDL_atomic.h:262
#define SDL_AtomicIncRef(a)
Increment an atomic variable used as a reference count.
Definition: SDL_atomic.h:252
unsigned int size_t
#define SDL_AtomicGet
SDL_PRINTF_FORMAT_STRING const char int SDL_PRINTF_FORMAT_STRING const char int SDL_PRINTF_FORMAT_STRING const char int SDL_PRINTF_FORMAT_STRING const char const char SDL_SCANF_FORMAT_STRING const char return SDL_ThreadFunction const char void return Uint32 return Uint32 void
#define SDL_InvalidParamError(param)
Definition: SDL_error.h:54
SDL_EventEntry * head
Definition: SDL_events.c:80
void * dlmalloc(size_t)
Definition: SDL_malloc.c:4213
#define cinuse(p)
Definition: SDL_malloc.c:1787
#define USE_LOCK_BIT
Definition: SDL_malloc.c:1556
#define is_aligned(A)
Definition: SDL_malloc.c:1322
#define compute_tree_index(S, I)
Definition: SDL_malloc.c:2344
#define MALLOC_ALIGNMENT
Definition: SDL_malloc.c:539
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
Definition: SDL_malloc.c:2512
#define is_page_aligned(S)
Definition: SDL_malloc.c:2176
#define RELEASE_MAGIC_INIT_LOCK()
Definition: SDL_malloc.c:1572
#define DEFAULT_GRANULARITY
Definition: SDL_malloc.c:596
static void init_top(mstate m, mchunkptr p, size_t psize)
Definition: SDL_malloc.c:3354
#define treemap_is_marked(M, i)
Definition: SDL_malloc.c:2390
#define chunksize(p)
Definition: SDL_malloc.c:1789
int dlmalloc_trim(size_t)
Definition: SDL_malloc.c:4524
#define M_MMAP_THRESHOLD
Definition: SDL_malloc.c:642
SDL_malloc_func malloc_func
Definition: SDL_malloc.c:5321
static void * tmalloc_small(mstate m, size_t nb)
Definition: SDL_malloc.c:3885
#define FENCEPOST_HEAD
Definition: SDL_malloc.c:1784
static void * mmap_alloc(mstate m, size_t nb)
Definition: SDL_malloc.c:3285
#define ok_address(M, a)
Definition: SDL_malloc.c:2461
#define NTREEBINS
Definition: SDL_malloc.c:2090
#define set_size_and_pinuse_of_free_chunk(p, s)
Definition: SDL_malloc.c:1810
#define SIZE_T_ONE
Definition: SDL_malloc.c:1311
#define real_realloc
Definition: SDL_malloc.c:5314
void dlfree(void *)
Definition: SDL_malloc.c:4347
#define disable_contiguous(M)
Definition: SDL_malloc.c:2161
#define real_calloc
Definition: SDL_malloc.c:5313
#define set_inuse(M, p, s)
Definition: SDL_malloc.c:2502
#define internal_free(m, mem)
Definition: SDL_malloc.c:3267
#define small_index(s)
Definition: SDL_malloc.c:2320
#define M_GRANULARITY
Definition: SDL_malloc.c:641
#define CALL_MORECORE(S)
Definition: SDL_malloc.c:1467
#define chunk_plus_offset(p, s)
Definition: SDL_malloc.c:1795
size_t bindex_t
Definition: SDL_malloc.c:1727
#define left_bits(x)
Definition: SDL_malloc.c:2425
#define page_align(S)
Definition: SDL_malloc.c:2169
#define chunk_minus_offset(p, s)
Definition: SDL_malloc.c:1796
unsigned int binmap_t
Definition: SDL_malloc.c:1728
#define FOUR_SIZE_T_SIZES
Definition: SDL_malloc.c:1314
#define unlink_large_chunk(M, X)
Definition: SDL_malloc.c:3172
#define CALL_MMAP(s)
Definition: SDL_malloc.c:1452
void ** dlindependent_calloc(size_t, size_t, void **)
static struct @38 s_mem
#define align_offset(A)
Definition: SDL_malloc.c:1325
#define SIZE_T_SIZE
Definition: SDL_malloc.c:1305
#define enable_mmap(M)
Definition: SDL_malloc.c:2157
#define memcpy
Definition: SDL_malloc.c:630
#define is_mmapped(p)
Definition: SDL_malloc.c:1817
#define fm
#define segment_holds(S, A)
Definition: SDL_malloc.c:2182
#define replace_dv(M, P, S)
Definition: SDL_malloc.c:3090
#define small_index2size(i)
Definition: SDL_malloc.c:2321
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
Definition: SDL_malloc.c:3404
static void * tmalloc_large(mstate m, size_t nb)
Definition: SDL_malloc.c:3810
#define TOP_FOOT_SIZE
Definition: SDL_malloc.c:2222
#define DEFAULT_MMAP_THRESHOLD
Definition: SDL_malloc.c:608
static int sys_trim(mstate m, size_t pad)
Definition: SDL_malloc.c:3744
int SDL_SetMemoryFunctions(SDL_malloc_func malloc_func, SDL_calloc_func calloc_func, SDL_realloc_func realloc_func, SDL_free_func free_func)
Replace SDL's memory allocation functions with a custom set.
Definition: SDL_malloc.c:5349
#define set_free_with_pinuse(p, s, n)
Definition: SDL_malloc.c:1814
#define mem2chunk(mem)
Definition: SDL_malloc.c:1752
static void * win32direct_mmap(size_t size)
Definition: SDL_malloc.c:1425
#define CHUNK_ALIGN_MASK
Definition: SDL_malloc.c:1319
#define unlink_first_small_chunk(M, B, P, I)
Definition: SDL_malloc.c:3072
#define granularity_align(S)
Definition: SDL_malloc.c:2173
#define check_inuse_chunk(M, P)
Definition: SDL_malloc.c:2289
#define is_extern_segment(S)
Definition: SDL_malloc.c:2008
void * SDL_malloc(size_t size)
Definition: SDL_malloc.c:5379
#define check_free_chunk(M, P)
Definition: SDL_malloc.c:2288
#define MLOCK_T
Definition: SDL_malloc.c:1525
#define MALLINFO_FIELD_TYPE
Definition: SDL_malloc.c:623
#define ABORT
Definition: SDL_malloc.c:39
#define internal_malloc(m, b)
Definition: SDL_malloc.c:3266
#define smallmap_is_marked(M, i)
Definition: SDL_malloc.c:2386
void * dlcalloc(size_t, size_t)
Definition: SDL_malloc.c:4444
static void init_bins(mstate m)
Definition: SDL_malloc.c:3371
static int win32munmap(void *ptr, size_t size)
Definition: SDL_malloc.c:1434
#define CALL_MUNMAP(a, s)
Definition: SDL_malloc.c:1453
#define EXTERN_BIT
Definition: SDL_malloc.c:1474
#define ok_next(p, n)
Definition: SDL_malloc.c:2463
SDL_calloc_func calloc_func
Definition: SDL_malloc.c:5322
static void * internal_realloc(mstate m, void *oldmem, size_t bytes)
Definition: SDL_malloc.c:3927
#define minsize_for_tree_index(i)
Definition: SDL_malloc.c:2373
int dlmallopt(int, int)
Definition: SDL_malloc.c:4572
struct mallinfo dlmallinfo(void)
Definition: SDL_malloc.c:4548
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
Definition: SDL_malloc.c:4087
#define INITIAL_LOCK(l)
Definition: SDL_malloc.c:1547
static struct malloc_state _gm_
Definition: SDL_malloc.c:2143
#define leftmost_child(t)
Definition: SDL_malloc.c:1940
#define next_pinuse(p)
Definition: SDL_malloc.c:1803
#define least_bit(x)
Definition: SDL_malloc.c:2422
#define leftshift_for_tree_index(i)
Definition: SDL_malloc.c:2368
#define USE_MMAP_BIT
Definition: SDL_malloc.c:1351
#define set_inuse_and_pinuse(M, p, s)
Definition: SDL_malloc.c:2507
void dlmalloc_stats(void)
Definition: SDL_malloc.c:4555
#define use_noncontiguous(M)
Definition: SDL_malloc.c:2160
#define MFAIL
Definition: SDL_malloc.c:1339
#define DIRECT_MMAP(s)
Definition: SDL_malloc.c:1454
static void * sys_alloc(mstate m, size_t nb)
Definition: SDL_malloc.c:3503
#define disable_mmap(M)
Definition: SDL_malloc.c:2158
SDL_realloc_func realloc_func
Definition: SDL_malloc.c:5323
int SDL_GetNumAllocations(void)
Get the number of outstanding (unfreed) allocations.
Definition: SDL_malloc.c:5374
#define MAX_SMALL_REQUEST
Definition: SDL_malloc.c:2096
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
Definition: SDL_malloc.c:3997
void * dlmemalign(size_t, size_t)
Definition: SDL_malloc.c:4486
#define MAX_REQUEST
Definition: SDL_malloc.c:1757
void * dlrealloc(void *, size_t)
Definition: SDL_malloc.c:4461
void * SDL_calloc(size_t nmemb, size_t size)
Definition: SDL_malloc.c:5394
#define pinuse(p)
Definition: SDL_malloc.c:1788
#define INUSE_BITS
Definition: SDL_malloc.c:1781
#define gm
Definition: SDL_malloc.c:2144
#define RELEASE_MORECORE_LOCK()
Definition: SDL_malloc.c:1567
void * SDL_realloc(void *ptr, size_t size)
Definition: SDL_malloc.c:5410
#define smallbin_at(M, i)
Definition: SDL_malloc.c:2325
void SDL_free(void *ptr)
Definition: SDL_malloc.c:5425
#define next_chunk(p)
Definition: SDL_malloc.c:1799
size_t dlmalloc_usable_size(void *)
Definition: SDL_malloc.c:4561
#define align_as_chunk(A)
Definition: SDL_malloc.c:1754
#define MIN_REQUEST
Definition: SDL_malloc.c:1758
#define POSTACTION(M)
Definition: SDL_malloc.c:2240
static void win32_release_lock(MLOCK_T *sl)
Definition: SDL_malloc.c:1542
#define CINUSE_BIT
Definition: SDL_malloc.c:1780
#define should_trim(M, s)
Definition: SDL_malloc.c:2212
#define ACQUIRE_MORECORE_LOCK()
Definition: SDL_malloc.c:1566
#define ok_magic(M)
Definition: SDL_malloc.c:2480
#define HAVE_MORECORE
Definition: SDL_malloc.c:492
SDL_atomic_t num_allocations
Definition: SDL_malloc.c:5325
static size_t release_unused_segments(mstate m)
Definition: SDL_malloc.c:3703
#define treebin_at(M, i)
Definition: SDL_malloc.c:2326
unsigned int flag_t
Definition: SDL_malloc.c:1729
static int change_mparam(int param_number, int value)
Definition: SDL_malloc.c:2628
#define is_mmapped_segment(S)
Definition: SDL_malloc.c:2007
static void * win32mmap(size_t size)
Definition: SDL_malloc.c:1416
#define PREACTION(M)
Definition: SDL_malloc.c:2239
#define SIZE_T_BITSIZE
Definition: SDL_malloc.c:1306
#define check_top_chunk(M, P)
Definition: SDL_malloc.c:2293
#define PINUSE_BIT
Definition: SDL_malloc.c:1779
#define IS_MMAPPED_BIT
Definition: SDL_malloc.c:1350
#define calloc_must_clear(p)
Definition: SDL_malloc.c:1828
#define memset
Definition: SDL_malloc.c:627
#define is_small(s)
Definition: SDL_malloc.c:2319
#define set_lock(M, L)
Definition: SDL_malloc.c:2163
static int init_mparams(void)
Definition: SDL_malloc.c:2545
#define USAGE_ERROR_ACTION(m, p)
Definition: SDL_malloc.c:2279
static int has_segment_link(mstate m, msegmentptr ss)
Definition: SDL_malloc.c:2200
size_t dlmalloc_footprint(void)
Definition: SDL_malloc.c:4535
void * dlvalloc(size_t)
Definition: SDL_malloc.c:4505
#define real_malloc
Definition: SDL_malloc.c:5312
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
Definition: SDL_malloc.c:3315
#define check_mmapped_chunk(M, P)
Definition: SDL_malloc.c:2291
#define request2size(req)
Definition: SDL_malloc.c:1765
void SDL_GetMemoryFunctions(SDL_malloc_func *malloc_func, SDL_calloc_func *calloc_func, SDL_realloc_func *realloc_func, SDL_free_func *free_func)
Get the current set of SDL memory functions.
Definition: SDL_malloc.c:5330
static MLOCK_T magic_init_mutex
Definition: SDL_malloc.c:1553
#define pad_request(req)
Definition: SDL_malloc.c:1761
#define MIN_CHUNK_SIZE
Definition: SDL_malloc.c:1747
#define is_global(M)
Definition: SDL_malloc.c:2145
#define HAVE_MMAP
Definition: SDL_malloc.c:491
#define insert_large_chunk(M, X, S)
Definition: SDL_malloc.c:3104
#define ok_cinuse(p)
Definition: SDL_malloc.c:2465
#define MIN_LARGE_SIZE
Definition: SDL_malloc.c:2094
#define USE_NONCONTIGUOUS_BIT
Definition: SDL_malloc.c:1471
#define compute_bit2idx(X, I)
Definition: SDL_malloc.c:2407
#define HALF_MAX_SIZE_T
Definition: SDL_malloc.c:1316
#define use_mmap(M)
Definition: SDL_malloc.c:2156
#define CORRUPTION_ERROR_ACTION(m)
Definition: SDL_malloc.c:2275
#define chunk2mem(p)
Definition: SDL_malloc.c:1751
#define unlink_chunk(M, P, S)
Definition: SDL_malloc.c:3249
#define MAX_SIZE_T
Definition: SDL_malloc.c:526
#define overhead_for(p)
Definition: SDL_malloc.c:1821
static struct mallinfo internal_mallinfo(mstate m)
Definition: SDL_malloc.c:2943
static struct malloc_params mparams
Definition: SDL_malloc.c:2140
#define CHUNK_OVERHEAD
Definition: SDL_malloc.c:1738
#define MORECORE_CONTIGUOUS
Definition: SDL_malloc.c:583
#define MMAP_FOOT_PAD
Definition: SDL_malloc.c:1744
static msegmentptr segment_holding(mstate m, char *addr)
Definition: SDL_malloc.c:2187
void * dlpvalloc(size_t)
Definition: SDL_malloc.c:4514
#define ok_pinuse(p)
Definition: SDL_malloc.c:2467
#define ACQUIRE_MAGIC_INIT_LOCK()
Definition: SDL_malloc.c:1571
SDL_free_func free_func
Definition: SDL_malloc.c:5324
size_t dlmalloc_max_footprint(void)
Definition: SDL_malloc.c:4541
#define check_malloced_chunk(M, P, N)
Definition: SDL_malloc.c:2290
#define mark_inuse_foot(M, p, s)
Definition: SDL_malloc.c:2499
#define MALLOC_FAILURE_ACTION
Definition: SDL_malloc.c:501
#define M_TRIM_THRESHOLD
Definition: SDL_malloc.c:640
#define SIX_SIZE_T_SIZES
Definition: SDL_malloc.c:1315
#define real_free
Definition: SDL_malloc.c:5315
#define CMFAIL
Definition: SDL_malloc.c:1340
#define insert_chunk(M, P, S)
Definition: SDL_malloc.c:3245
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
Definition: SDL_malloc.c:3446
#define DEFAULT_TRIM_THRESHOLD
Definition: SDL_malloc.c:601
#define prev_chunk(p)
Definition: SDL_malloc.c:1800
#define idx2bit(i)
Definition: SDL_malloc.c:2381
#define is_initialized(M)
Definition: SDL_malloc.c:2146
#define MCHUNK_SIZE
Definition: SDL_malloc.c:1733
#define assert(x)
Definition: SDL_malloc.c:1227
static void internal_malloc_stats(mstate m)
Definition: SDL_malloc.c:2984
void ** dlindependent_comalloc(size_t, size_t *, void **)
#define check_malloc_state(M)
Definition: SDL_malloc.c:2292
static int win32_acquire_lock(MLOCK_T *sl)
Definition: SDL_malloc.c:1527
#define CALL_MREMAP(addr, osz, nsz, mv)
Definition: SDL_malloc.c:1461
#define RTCHECK(e)
Definition: SDL_malloc.c:2489
#define NSMALLBINS
Definition: SDL_malloc.c:2089
GLuint GLuint end
Definition: SDL_opengl.h:1571
const GLdouble * v
Definition: SDL_opengl.h:2064
GLdouble GLdouble t
Definition: SDL_opengl.h:2071
GLdouble GLdouble GLdouble GLdouble q
Definition: SDL_opengl.h:2087
GLdouble GLdouble GLdouble r
Definition: SDL_opengl.h:2079
GLint GLint GLint GLint GLint x
Definition: SDL_opengl.h:1574
GLdouble s
Definition: SDL_opengl.h:2063
GLboolean GLboolean GLboolean b
const GLfloat * m
GLuint GLfloat * val
GLuint64EXT * result
GLintptr offset
GLenum GLsizei len
GLboolean GLboolean GLboolean GLboolean a
GLenum const void * addr
GLuint GLsizei const GLuint const GLintptr const GLsizeiptr * sizes
GLsizeiptr size
GLenum GLuint GLenum GLsizei const GLchar * buf
GLfloat GLfloat p
GLsizei const GLfloat * value
void *(* SDL_malloc_func)(size_t size)
Definition: SDL_stdinc.h:366
void(* SDL_free_func)(void *mem)
Definition: SDL_stdinc.h:369
void *(* SDL_calloc_func)(size_t nmemb, size_t size)
Definition: SDL_stdinc.h:367
void *(* SDL_realloc_func)(void *mem, size_t size)
Definition: SDL_stdinc.h:368
return Display return Display Bool Bool int int int return Display XEvent Bool(*) XPointer return Display return Display Drawable _Xconst char unsigned int unsigned int return Display Pixmap Pixmap XColor XColor unsigned int unsigned int return Display _Xconst char char int char return Display Visual unsigned int int int char unsigned int unsigned int in i)
Definition: SDL_x11sym.h:50
static const double cp
Definition: e_pow.c:96
EGLSurface EGLnsecsANDROID time
Definition: eglext.h:518
GLuint64 GLenum GLint fd
Definition: gl2ext.h:1508
A type representing an atomic integer value. It is a struct so people don't accidentally use numeric ...
Definition: SDL_atomic.h:216
MALLINFO_FIELD_TYPE arena
Definition: SDL_malloc.c:677
MALLINFO_FIELD_TYPE fordblks
Definition: SDL_malloc.c:685
MALLINFO_FIELD_TYPE uordblks
Definition: SDL_malloc.c:684
MALLINFO_FIELD_TYPE usmblks
Definition: SDL_malloc.c:682
MALLINFO_FIELD_TYPE fsmblks
Definition: SDL_malloc.c:683
MALLINFO_FIELD_TYPE smblks
Definition: SDL_malloc.c:679
MALLINFO_FIELD_TYPE keepcost
Definition: SDL_malloc.c:686
MALLINFO_FIELD_TYPE hblks
Definition: SDL_malloc.c:680
MALLINFO_FIELD_TYPE hblkhd
Definition: SDL_malloc.c:681
MALLINFO_FIELD_TYPE ordblks
Definition: SDL_malloc.c:678
struct malloc_chunk * bk
Definition: SDL_malloc.c:1721
size_t prev_foot
Definition: SDL_malloc.c:1718
struct malloc_chunk * fd
Definition: SDL_malloc.c:1720
flag_t default_mflags
Definition: SDL_malloc.c:2137
size_t page_size
Definition: SDL_malloc.c:2133
size_t mmap_threshold
Definition: SDL_malloc.c:2135
size_t granularity
Definition: SDL_malloc.c:2134
size_t trim_threshold
Definition: SDL_malloc.c:2136
struct malloc_segment * next
Definition: SDL_malloc.c:2003
char * least_addr
Definition: SDL_malloc.c:2104
mchunkptr top
Definition: SDL_malloc.c:2106
binmap_t smallmap
Definition: SDL_malloc.c:2100
size_t topsize
Definition: SDL_malloc.c:2103
size_t magic
Definition: SDL_malloc.c:2108
flag_t mflags
Definition: SDL_malloc.c:2113
msegment seg
Definition: SDL_malloc.c:2117
mchunkptr dv
Definition: SDL_malloc.c:2105
size_t trim_check
Definition: SDL_malloc.c:2107
size_t max_footprint
Definition: SDL_malloc.c:2112
size_t dvsize
Definition: SDL_malloc.c:2102
size_t footprint
Definition: SDL_malloc.c:2111
tbinptr treebins[NTREEBINS]
Definition: SDL_malloc.c:2110
mchunkptr smallbins[(NSMALLBINS+1) *2]
Definition: SDL_malloc.c:2109
binmap_t treemap
Definition: SDL_malloc.c:2101
MLOCK_T mutex
Definition: SDL_malloc.c:2115
struct malloc_tree_chunk * fd
Definition: SDL_malloc.c:1927
struct malloc_tree_chunk * bk
Definition: SDL_malloc.c:1928
struct malloc_tree_chunk * parent
Definition: SDL_malloc.c:1931
struct malloc_tree_chunk * child[2]
Definition: SDL_malloc.c:1930