< prev index next >

modules/javafx.media/src/main/native/gstreamer/3rd_party/libffi/src/dlmalloc.c

Print this page

 421   segregates relatively large chunks of memory so that they can be
 422   individually obtained and released from the host system. A request
 423   serviced through mmap is never reused by any other request (at least
 424   not directly; the system may just so happen to remap successive
 425   requests to the same locations).  Segregating space in this way has
 426   the benefits that: Mmapped space can always be individually released
 427   back to the system, which helps keep the system level memory demands
 428   of a long-lived program low.  Also, mapped memory doesn't become
 429   `locked' between other chunks, as can happen with normally allocated
 430   chunks, which means that even trimming via malloc_trim would not
 431   release them.  However, it has the disadvantage that the space
 432   cannot be reclaimed, consolidated, and then used to service later
 433   requests, as happens with normal chunks.  The advantages of mmap
 434   nearly always outweigh disadvantages for "large" chunks, but the
 435   value of "large" may vary across systems.  The default is an
 436   empirically derived value that works well in most systems. You can
 437   disable mmap by setting to MAX_SIZE_T.
 438 
 439 */
 440 





 441 #ifndef WIN32
 442 #ifdef _WIN32
 443 #define WIN32 1
 444 #endif  /* _WIN32 */
 445 #endif  /* WIN32 */
 446 #ifdef WIN32
 447 #define WIN32_LEAN_AND_MEAN
 448 #include <windows.h>
 449 #define HAVE_MMAP 1
 450 #define HAVE_MORECORE 0
 451 #define LACKS_UNISTD_H
 452 #define LACKS_SYS_PARAM_H
 453 #define LACKS_SYS_MMAN_H
 454 #define LACKS_STRING_H
 455 #define LACKS_STRINGS_H
 456 #define LACKS_SYS_TYPES_H
 457 #define LACKS_ERRNO_H
 458 #define MALLOC_FAILURE_ACTION
 459 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
 460 #endif  /* WIN32 */

2274 static void   do_check_tree(mstate m, tchunkptr t);
2275 static void   do_check_treebin(mstate m, bindex_t i);
2276 static void   do_check_smallbin(mstate m, bindex_t i);
2277 static void   do_check_malloc_state(mstate m);
2278 static int    bin_find(mstate m, mchunkptr x);
2279 static size_t traverse_and_check(mstate m);
2280 #endif /* DEBUG */
2281 
2282 /* ---------------------------- Indexing Bins ---------------------------- */
2283 
2284 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2285 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
2286 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
2287 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
2288 
2289 /* addressing by index. See above about smallbin repositioning */
2290 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2291 #define treebin_at(M,i)     (&((M)->treebins[i]))
2292 
2293 /* assign tree index for size S to variable I */
2294 #if defined(__GNUC__) && defined(i386)
2295 #define compute_tree_index(S, I)\
2296 {\
2297   size_t X = S >> TREEBIN_SHIFT;\
2298   if (X == 0)\
2299     I = 0;\
2300   else if (X > 0xFFFF)\
2301     I = NTREEBINS-1;\
2302   else {\
2303     unsigned int K;\
2304     __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
2305     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2306   }\
2307 }
2308 #else /* GNUC */
2309 #define compute_tree_index(S, I)\
2310 {\
2311   size_t X = S >> TREEBIN_SHIFT;\
2312   if (X == 0)\
2313     I = 0;\
2314   else if (X > 0xFFFF)\

2339    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
2340    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2341 
2342 
2343 /* ------------------------ Operations on bin maps ----------------------- */
2344 
2345 /* bit corresponding to given index */
2346 #define idx2bit(i)              ((binmap_t)(1) << (i))
2347 
2348 /* Mark/Clear bits with given index */
2349 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
2350 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
2351 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
2352 
2353 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
2354 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
2355 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
2356 
2357 /* index corresponding to given bit */
2358 
2359 #if defined(__GNUC__) && defined(i386)
2360 #define compute_bit2idx(X, I)\
2361 {\
2362   unsigned int J;\
2363   __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2364   I = (bindex_t)J;\
2365 }
2366 
2367 #else /* GNUC */
2368 #if  USE_BUILTIN_FFS
2369 #define compute_bit2idx(X, I) I = ffs(X)-1
2370 
2371 #else /* USE_BUILTIN_FFS */
2372 #define compute_bit2idx(X, I)\
2373 {\
2374   unsigned int Y = X - 1;\
2375   unsigned int K = Y >> (16-4) & 16;\
2376   unsigned int N = K;        Y >>= K;\
2377   N += K = Y >> (8-3) &  8;  Y >>= K;\
2378   N += K = Y >> (4-2) &  4;  Y >>= K;\
2379   N += K = Y >> (2-1) &  2;  Y >>= K;\

 421   segregates relatively large chunks of memory so that they can be
 422   individually obtained and released from the host system. A request
 423   serviced through mmap is never reused by any other request (at least
 424   not directly; the system may just so happen to remap successive
 425   requests to the same locations).  Segregating space in this way has
 426   the benefits that: Mmapped space can always be individually released
 427   back to the system, which helps keep the system level memory demands
 428   of a long-lived program low.  Also, mapped memory doesn't become
 429   `locked' between other chunks, as can happen with normally allocated
 430   chunks, which means that even trimming via malloc_trim would not
 431   release them.  However, it has the disadvantage that the space
 432   cannot be reclaimed, consolidated, and then used to service later
 433   requests, as happens with normal chunks.  The advantages of mmap
 434   nearly always outweigh disadvantages for "large" chunks, but the
 435   value of "large" may vary across systems.  The default is an
 436   empirically derived value that works well in most systems. You can
 437   disable mmap by setting to MAX_SIZE_T.
 438 
 439 */
 440 
 441 #if defined __linux__ && !defined _GNU_SOURCE
 442 /* mremap() on Linux requires this via sys/mman.h */
 443 #define _GNU_SOURCE 1
 444 #endif
 445 
 446 #ifndef WIN32
 447 #ifdef _WIN32
 448 #define WIN32 1
 449 #endif  /* _WIN32 */
 450 #endif  /* WIN32 */
 451 #ifdef WIN32
 452 #define WIN32_LEAN_AND_MEAN
 453 #include <windows.h>
 454 #define HAVE_MMAP 1
 455 #define HAVE_MORECORE 0
 456 #define LACKS_UNISTD_H
 457 #define LACKS_SYS_PARAM_H
 458 #define LACKS_SYS_MMAN_H
 459 #define LACKS_STRING_H
 460 #define LACKS_STRINGS_H
 461 #define LACKS_SYS_TYPES_H
 462 #define LACKS_ERRNO_H
 463 #define MALLOC_FAILURE_ACTION
 464 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
 465 #endif  /* WIN32 */

2279 static void   do_check_tree(mstate m, tchunkptr t);
2280 static void   do_check_treebin(mstate m, bindex_t i);
2281 static void   do_check_smallbin(mstate m, bindex_t i);
2282 static void   do_check_malloc_state(mstate m);
2283 static int    bin_find(mstate m, mchunkptr x);
2284 static size_t traverse_and_check(mstate m);
2285 #endif /* DEBUG */
2286 
2287 /* ---------------------------- Indexing Bins ---------------------------- */
2288 
2289 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2290 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
2291 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
2292 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
2293 
2294 /* addressing by index. See above about smallbin repositioning */
2295 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2296 #define treebin_at(M,i)     (&((M)->treebins[i]))
2297 
2298 /* assign tree index for size S to variable I */
2299 #if defined(__GNUC__) && defined(__i386__)
2300 #define compute_tree_index(S, I)\
2301 {\
2302   size_t X = S >> TREEBIN_SHIFT;\
2303   if (X == 0)\
2304     I = 0;\
2305   else if (X > 0xFFFF)\
2306     I = NTREEBINS-1;\
2307   else {\
2308     unsigned int K;\
2309     __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
2310     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2311   }\
2312 }
2313 #else /* GNUC */
2314 #define compute_tree_index(S, I)\
2315 {\
2316   size_t X = S >> TREEBIN_SHIFT;\
2317   if (X == 0)\
2318     I = 0;\
2319   else if (X > 0xFFFF)\

2344    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
2345    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2346 
2347 
2348 /* ------------------------ Operations on bin maps ----------------------- */
2349 
2350 /* bit corresponding to given index */
2351 #define idx2bit(i)              ((binmap_t)(1) << (i))
2352 
2353 /* Mark/Clear bits with given index */
2354 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
2355 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
2356 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
2357 
2358 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
2359 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
2360 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
2361 
2362 /* index corresponding to given bit */
2363 
2364 #if defined(__GNUC__) && defined(__i386__)
2365 #define compute_bit2idx(X, I)\
2366 {\
2367   unsigned int J;\
2368   __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2369   I = (bindex_t)J;\
2370 }
2371 
2372 #else /* GNUC */
2373 #if  USE_BUILTIN_FFS
2374 #define compute_bit2idx(X, I) I = ffs(X)-1
2375 
2376 #else /* USE_BUILTIN_FFS */
2377 #define compute_bit2idx(X, I)\
2378 {\
2379   unsigned int Y = X - 1;\
2380   unsigned int K = Y >> (16-4) & 16;\
2381   unsigned int N = K;        Y >>= K;\
2382   N += K = Y >> (8-3) &  8;  Y >>= K;\
2383   N += K = Y >> (4-2) &  4;  Y >>= K;\
2384   N += K = Y >> (2-1) &  2;  Y >>= K;\
< prev index next >