00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00450 #include <sys/types.h>
00451
00453 #define LACKS_FCNTL_H
00454 #define LACKS_SYS_MMAN_H
00455 #define LACKS_SYS_PARAM_H
00456 #undef HAVE_MMAP
00457 #define HAVE_MMAP 0
00458 #define LACKS_ERRNO_H
00459
00460 #undef MALLOC_FAILURE_ACTION
00461 #define MALLOC_FAILURE_ACTION
00462
00463
00464 #define MAX_SIZE_T (~(size_t)0)
00465
00466 #define ONLY_MSPACES 0
00467 #define MSPACES 0
00468
00469 #ifdef MALLOC_ALIGNMENT_16
00470 #define MALLOC_ALIGNMENT ((size_t)16U)
00471 #else
00472 #define MALLOC_ALIGNMENT ((size_t)8U)
00473 #endif
00474
00475 #define FOOTERS 0
00476 #define ABORT abort()
00477 #define ABORT_ON_ASSERT_FAILURE 1
00478 #define PROCEED_ON_ERROR 0
00479 #define USE_LOCKS 1
00480 #define INSECURE 0
00481 #define HAVE_MMAP 0
00482
00483 #define MMAP_CLEARS 1
00484
00485 #define HAVE_MORECORE 1
00486 #define MORECORE_CONTIGUOUS 1
00487 #define MORECORE sbrk
00488 #define DEFAULT_GRANULARITY (0)
00489
00490 #ifndef DEFAULT_TRIM_THRESHOLD
00491 #ifndef MORECORE_CANNOT_TRIM
00492 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
00493 #else
00494 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
00495 #endif
00496 #endif
00497 #ifndef DEFAULT_MMAP_THRESHOLD
00498 #if HAVE_MMAP
00499 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
00500 #else
00501 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
00502 #endif
00503 #endif
00504 #ifndef USE_BUILTIN_FFS
00505 #define USE_BUILTIN_FFS 0
00506 #endif
00507 #ifndef USE_DEV_RANDOM
00508 #define USE_DEV_RANDOM 0
00509 #endif
00510 #ifndef NO_MALLINFO
00511 #define NO_MALLINFO 0
00512 #endif
00513 #ifndef MALLINFO_FIELD_TYPE
00514 #define MALLINFO_FIELD_TYPE size_t
00515 #endif
00516
00517
00518
00519
00520
00521
00522
00523
00524 #define M_TRIM_THRESHOLD (-1)
00525 #define M_GRANULARITY (-2)
00526 #define M_MMAP_THRESHOLD (-3)
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536 #include "malloc.h"
00537
00538
00539
00540 #include <stdio.h>
00541 #include <string.h>
00542
00543 #ifndef LACKS_ERRNO_H
00544 #include <errno.h>
00545 #endif
00546 #if FOOTERS
00547 #include <time.h>
00548 #endif
00549 #ifndef LACKS_STDLIB_H
00550 #include <stdlib.h>
00551 #endif
00552 #ifdef DEBUG
00553 #if ABORT_ON_ASSERT_FAILURE
00554 #define assert(x) {if(!(x)) {printf(#x);ABORT;}}
00555 #else
00556 #include <assert.h>
00557 #endif
00558 #else
00559 #define assert(x)
00560 #endif
00561 #if USE_BUILTIN_FFS
00562 #ifndef LACKS_STRINGS_H
00563 #include <strings.h>
00564 #endif
00565 #endif
00566 #if HAVE_MMAP
00567 #ifndef LACKS_SYS_MMAN_H
00568 #include <sys/mman.h>
00569 #endif
00570 #ifndef LACKS_FCNTL_H
00571 #include <fcntl.h>
00572 #endif
00573 #endif
00574 #if HAVE_MORECORE
00575 #ifndef LACKS_UNISTD_H
00576 #include <unistd.h>
00577 #else
00578 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
00579 extern void* sbrk(ptrdiff_t);
00580 #endif
00581 #endif
00582 #endif
00583
00584 #ifndef WIN32
00585 #ifndef malloc_getpagesize
00586 # ifdef _SC_PAGESIZE
00587 # ifndef _SC_PAGE_SIZE
00588 # define _SC_PAGE_SIZE _SC_PAGESIZE
00589 # endif
00590 # endif
00591 # ifdef _SC_PAGE_SIZE
00592 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
00593 # else
00594 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
00595 extern size_t getpagesize();
00596 # define malloc_getpagesize getpagesize()
00597 # else
00598 # ifdef WIN32
00599 # define malloc_getpagesize getpagesize()
00600 # else
00601 # ifndef LACKS_SYS_PARAM_H
00602 # include <sys/param.h>
00603 # endif
00604 # ifdef EXEC_PAGESIZE
00605 # define malloc_getpagesize EXEC_PAGESIZE
00606 # else
00607 # ifdef NBPG
00608 # ifndef CLSIZE
00609 # define malloc_getpagesize NBPG
00610 # else
00611 # define malloc_getpagesize (NBPG * CLSIZE)
00612 # endif
00613 # else
00614 # ifdef NBPC
00615 # define malloc_getpagesize NBPC
00616 # else
00617 # ifdef PAGESIZE
00618 # define malloc_getpagesize PAGESIZE
00619 # else
00620 # define malloc_getpagesize ((size_t)4096U)
00621 # endif
00622 # endif
00623 # endif
00624 # endif
00625 # endif
00626 # endif
00627 # endif
00628 #endif
00629 #endif
00630
00631
00632
00633
00634 #define SIZE_T_SIZE (sizeof(size_t))
00635 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
00636
00637
00638
00639 #define SIZE_T_ZERO ((size_t)0)
00640 #define SIZE_T_ONE ((size_t)1)
00641 #define SIZE_T_TWO ((size_t)2)
00642 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
00643 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
00644 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
00645 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
00646
00647
00648 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
00649
00650
00651 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
00652
00653
00654 #define align_offset(A)\
00655 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
00656 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
00657
00658
00659
00660
00661
00662
00663
00664
00665
00666
00667
00668 #define MFAIL ((void*)(MAX_SIZE_T))
00669 #define CMFAIL ((char*)(MFAIL))
00670
00671 #if !HAVE_MMAP
00672 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
00673 #define USE_MMAP_BIT (SIZE_T_ZERO)
00674 #define CALL_MMAP(s) MFAIL
00675 #define CALL_MUNMAP(a, s) (-1)
00676 #define DIRECT_MMAP(s) MFAIL
00677
00678 #else
00679 #define IS_MMAPPED_BIT (SIZE_T_ONE)
00680 #define USE_MMAP_BIT (SIZE_T_ONE)
00681
00682 #ifndef WIN32
00683 #define CALL_MUNMAP(a, s) munmap((a), (s))
00684 #define MMAP_PROT (PROT_READ|PROT_WRITE)
00685 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
00686 #define MAP_ANONYMOUS MAP_ANON
00687 #endif
00688 #ifdef MAP_ANONYMOUS
00689 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
00690 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
00691 #else
00692
00693
00694
00695
00696 #define MMAP_FLAGS (MAP_PRIVATE)
00697 static int dev_zero_fd = -1;
00698 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
00699 (dev_zero_fd = open("/dev/zero", O_RDWR), \
00700 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
00701 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
00702 #endif
00703
00704 #define DIRECT_MMAP(s) CALL_MMAP(s)
00705 #else
00706
00707
00708 static void* win32mmap(size_t size) {
00709 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
00710 return (ptr != 0)? ptr: MFAIL;
00711 }
00712
00713
00714 static void* win32direct_mmap(size_t size) {
00715 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
00716 PAGE_READWRITE);
00717 return (ptr != 0)? ptr: MFAIL;
00718 }
00719
00720
00721 static int win32munmap(void* ptr, size_t size) {
00722 MEMORY_BASIC_INFORMATION minfo;
00723 char* cptr = ptr;
00724 while (size) {
00725 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
00726 return -1;
00727 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
00728 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
00729 return -1;
00730 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
00731 return -1;
00732 cptr += minfo.RegionSize;
00733 size -= minfo.RegionSize;
00734 }
00735 return 0;
00736 }
00737
00738 #define CALL_MMAP(s) win32mmap(s)
00739 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
00740 #define DIRECT_MMAP(s) win32direct_mmap(s)
00741 #endif
00742 #endif
00743
00744 #if HAVE_MMAP && HAVE_MREMAP
00745 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
00746 #else
00747 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
00748 #endif
00749
00750 #if HAVE_MORECORE
00751 #define CALL_MORECORE(S) MORECORE(S)
00752 #else
00753 #define CALL_MORECORE(S) MFAIL
00754 #endif
00755
00756
00757 #define USE_NONCONTIGUOUS_BIT (4U)
00758
00759
00760 #define EXTERN_BIT (8U)
00761
00762
00763
00764
00765 #if USE_LOCKS
00766
00767
00768
00769
00770
00771
00772
00773
00774
00775
00776
00777
00778
00779
00780
00781
00782 #include <futex.h>
00783 #define MLOCK_T atomic_t
00784 #define INITIAL_LOCK(l) futex_initialize(l, 1)
00785
00786
00787
00788 #define ACQUIRE_LOCK(l) ({futex_down(l);0;})
00789 #define RELEASE_LOCK(l) futex_up(l)
00790
00791 #if HAVE_MORECORE
00792 static MLOCK_T morecore_mutex = FUTEX_INITIALIZER;
00793 #endif
00794
00795 static MLOCK_T magic_init_mutex = FUTEX_INITIALIZER;
00796
00797
00798 #define USE_LOCK_BIT (2U)
00799 #else
00800 #define USE_LOCK_BIT (0U)
00801 #define INITIAL_LOCK(l)
00802 #endif
00803
00804 #if USE_LOCKS && HAVE_MORECORE
00805 #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
00806 #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
00807 #else
00808 #define ACQUIRE_MORECORE_LOCK()
00809 #define RELEASE_MORECORE_LOCK()
00810 #endif
00811
00812 #if USE_LOCKS
00813 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
00814 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
00815 #else
00816 #define ACQUIRE_MAGIC_INIT_LOCK()
00817 #define RELEASE_MAGIC_INIT_LOCK()
00818 #endif
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828
00829
00830
00831
00832
00833
00834
00835
00836
00837
00838
00839
00840
00841
00842
00843
00844
00845
00846
00847
00848
00849
00850
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860
00861
00862
00863
00864
00865
00866
00867
00868
00869
00870
00871
00872
00873
00874
00875
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885
00886
00887
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924
00925
00926
00927
00928
00929
00930
00931
00932
00933
00934
00935
00936
00937
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953
00954
00955
00956
00957
00958 struct malloc_chunk {
00959 size_t prev_foot;
00960 size_t head;
00961 struct malloc_chunk* fd;
00962 struct malloc_chunk* bk;
00963 };
00964
00965 typedef struct malloc_chunk mchunk;
00966 typedef struct malloc_chunk* mchunkptr;
00967 typedef struct malloc_chunk* sbinptr;
00968 typedef unsigned int bindex_t;
00969 typedef unsigned int binmap_t;
00970 typedef unsigned int flag_t;
00971
00972
00973
00974 #define MCHUNK_SIZE (sizeof(mchunk))
00975
00976 #if FOOTERS
00977 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
00978 #else
00979 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
00980 #endif
00981
00982
00983 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
00984
00985 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
00986
00987
00988 #define MIN_CHUNK_SIZE\
00989 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
00990
00991
00992 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
00993 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
00994
00995 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
00996
00997
00998 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
00999 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
01000
01001
01002 #define pad_request(req) \
01003 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
01004
01005
01006 #define request2size(req) \
01007 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
01008
01009
01010
01011
01012
01013
01014
01015
01016
01017
01018
01019
01020 #define PINUSE_BIT (SIZE_T_ONE)
01021 #define CINUSE_BIT (SIZE_T_TWO)
01022 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
01023
01024
01025 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
01026
01027
01028 #define cinuse(p) ((p)->head & CINUSE_BIT)
01029 #define pinuse(p) ((p)->head & PINUSE_BIT)
01030 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
01031
01032 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
01033 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
01034
01035
01036 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
01037 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
01038
01039
01040 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
01041 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
01042
01043
01044 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
01045
01046
01047 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
01048 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
01049
01050
01051 #define set_size_and_pinuse_of_free_chunk(p, s)\
01052 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
01053
01054
01055 #define set_free_with_pinuse(p, s, n)\
01056 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
01057
01058 #define is_mmapped(p)\
01059 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
01060
01061
01062 #define overhead_for(p)\
01063 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
01064
01065
01066 #if MMAP_CLEARS
01067 #define calloc_must_clear(p) (!is_mmapped(p))
01068 #else
01069 #define calloc_must_clear(p) (1)
01070 #endif
01071
01072
01073
01074
01075
01076
01077
01078
01079
01080
01081
01082
01083
01084
01085
01086
01087
01088
01089
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135
01136
01137
01138
01139
01140
01141
01142
01143
01144
01145
01146
01147
01148
01149
01150
01151
01152
01153
01154
01155
01156
01157
01158
01159
01160
01161
01162
01163 struct malloc_tree_chunk {
01164
01165 size_t prev_foot;
01166 size_t head;
01167 struct malloc_tree_chunk* fd;
01168 struct malloc_tree_chunk* bk;
01169
01170 struct malloc_tree_chunk* child[2];
01171 struct malloc_tree_chunk* parent;
01172 bindex_t index;
01173 };
01174
01175 typedef struct malloc_tree_chunk tchunk;
01176 typedef struct malloc_tree_chunk* tchunkptr;
01177 typedef struct malloc_tree_chunk* tbinptr;
01178
01179
01180 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
01181
01182
01183
01184
01185
01186
01187
01188
01189
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208
01209
01210
01211
01212
01213
01214
01215
01216
01217
01218
01219
01220
01221
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235
01236
01237
01238
01239 struct malloc_segment {
01240 char* base;
01241 size_t size;
01242 struct malloc_segment* next;
01243 flag_t sflags;
01244 };
01245
01246 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
01247 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
01248
01249 typedef struct malloc_segment msegment;
01250 typedef struct malloc_segment* msegmentptr;
01251
01252
01253
01254
01255
01256
01257
01258
01259
01260
01261
01262
01263
01264
01265
01266
01267
01268
01269
01270
01271
01272
01273
01274
01275
01276
01277
01278
01279
01280
01281
01282
01283
01284
01285
01286
01287
01288
01289
01290
01291
01292
01293
01294
01295
01296
01297
01298
01299
01300
01301
01302
01303
01304
01305
01306
01307
01308
01309
01310
01311
01312
01313
01314
01315
01316
01317
01318
01319
01320
01321
01322
01323
01324
01325
01326
01327
01328 #define NSMALLBINS (32U)
01329 #define NTREEBINS (32U)
01330 #define SMALLBIN_SHIFT (3U)
01331 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
01332 #define TREEBIN_SHIFT (8U)
01333 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
01334 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
01335 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
01336
01337 struct malloc_state {
01338 binmap_t smallmap;
01339 binmap_t treemap;
01340 size_t dvsize;
01341 size_t topsize;
01342 char* least_addr;
01343 mchunkptr dv;
01344 mchunkptr top;
01345 size_t trim_check;
01346 size_t magic;
01347 mchunkptr smallbins[(NSMALLBINS+1)*2];
01348 tbinptr treebins[NTREEBINS];
01349 size_t footprint;
01350 size_t max_footprint;
01351 flag_t mflags;
01352 #if USE_LOCKS
01353 MLOCK_T mutex;
01354 #endif
01355 msegment seg;
01356 };
01357
01358 typedef struct malloc_state* mstate;
01359
01360
01361
01362
01363
01364
01365
01366
01367
01368 struct malloc_params {
01369 size_t magic;
01370 size_t page_size;
01371 size_t granularity;
01372 size_t mmap_threshold;
01373 size_t trim_threshold;
01374 flag_t default_mflags;
01375 };
01376
01377 static struct malloc_params mparams;
01378
01379
01380 static struct malloc_state _gm_;
01381 #define gm (&_gm_)
01382 #define is_global(M) ((M) == &_gm_)
01383 #define is_initialized(M) ((M)->top != 0)
01384
01385
01386
01387
01388
01389 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
01390 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
01391 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
01392
01393 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
01394 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
01395 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
01396
01397 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
01398 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
01399
01400 #define set_lock(M,L)\
01401 ((M)->mflags = (L)?\
01402 ((M)->mflags | USE_LOCK_BIT) :\
01403 ((M)->mflags & ~USE_LOCK_BIT))
01404
01405
01406 #define page_align(S)\
01407 (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
01408
01409
01410 #define granularity_align(S)\
01411 (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
01412
01413 #define is_page_aligned(S)\
01414 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
01415 #define is_granularity_aligned(S)\
01416 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
01417
01418
01419 #define segment_holds(S, A)\
01420 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
01421
01422
01423 static msegmentptr segment_holding(mstate m, char* addr) {
01424 msegmentptr sp = &m->seg;
01425 for (;;) {
01426 if (addr >= sp->base && addr < sp->base + sp->size)
01427 return sp;
01428 if ((sp = sp->next) == 0)
01429 return 0;
01430 }
01431 }
01432
01433
01434 static int has_segment_link(mstate m, msegmentptr ss) {
01435 msegmentptr sp = &m->seg;
01436 for (;;) {
01437 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
01438 return 1;
01439 if ((sp = sp->next) == 0)
01440 return 0;
01441 }
01442 }
01443
01444 #ifndef MORECORE_CANNOT_TRIM
01445 #define should_trim(M,s) ((s) > (M)->trim_check)
01446 #else
01447 #define should_trim(M,s) (0)
01448 #endif
01449
01450
01451
01452
01453
01454
01455 #define TOP_FOOT_SIZE\
01456 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
01457
01458
01459
01460
01461
01462
01463
01464
01465
01466
01467 #if USE_LOCKS
01468
01469
01470 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
01471
01472 #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
01473 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
01474 #else
01475
01476 #ifndef PREACTION
01477 #define PREACTION(M) (0)
01478 #endif
01479
01480 #ifndef POSTACTION
01481 #define POSTACTION(M)
01482 #endif
01483
01484 #endif
01485
01486
01487
01488
01489
01490
01491
01492
01493
01494 #if PROCEED_ON_ERROR
01495
01496
01497 int malloc_corruption_error_count;
01498
01499
01500 static void reset_on_error(mstate m);
01501
01502 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
01503 #define USAGE_ERROR_ACTION(m, p)
01504
01505 #else
01506
01507 #ifndef CORRUPTION_ERROR_ACTION
01508 #define CORRUPTION_ERROR_ACTION(m) ABORT
01509 #endif
01510
01511 #ifndef USAGE_ERROR_ACTION
01512 #define USAGE_ERROR_ACTION(m,p) ABORT
01513 #endif
01514
01515 #endif
01516
01517
01518
01519 #if ! DEBUG
01520
01521 #define check_free_chunk(M,P)
01522 #define check_inuse_chunk(M,P)
01523 #define check_malloced_chunk(M,P,N)
01524 #define check_mmapped_chunk(M,P)
01525 #define check_malloc_state(M)
01526 #define check_top_chunk(M,P)
01527
01528 #else
01529 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
01530 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
01531 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
01532 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
01533 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
01534 #define check_malloc_state(M) do_check_malloc_state(M)
01535
01536 static void do_check_any_chunk(mstate m, mchunkptr p);
01537 static void do_check_top_chunk(mstate m, mchunkptr p);
01538 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
01539 static void do_check_inuse_chunk(mstate m, mchunkptr p);
01540 static void do_check_free_chunk(mstate m, mchunkptr p);
01541 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
01542 static void do_check_tree(mstate m, tchunkptr t);
01543 static void do_check_treebin(mstate m, bindex_t i);
01544 static void do_check_smallbin(mstate m, bindex_t i);
01545 static void do_check_malloc_state(mstate m);
01546 static int bin_find(mstate m, mchunkptr x);
01547 static size_t traverse_and_check(mstate m);
01548 #endif
01549
01550
01551
01552 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
01553 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
01554 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
01555 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
01556
01557
01558 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
01559 #define treebin_at(M,i) (&((M)->treebins[i]))
01560
01561
01562 #if defined(__GNUC__) && defined(i386)
01563 #define compute_tree_index(S, I)\
01564 {\
01565 size_t X = S >> TREEBIN_SHIFT;\
01566 if (X == 0)\
01567 I = 0;\
01568 else if (X > 0xFFFF)\
01569 I = NTREEBINS-1;\
01570 else {\
01571 unsigned int K;\
01572 __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
01573 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
01574 }\
01575 }
01576 #else
01577 #define compute_tree_index(S, I)\
01578 {\
01579 size_t X = S >> TREEBIN_SHIFT;\
01580 if (X == 0)\
01581 I = 0;\
01582 else if (X > 0xFFFF)\
01583 I = NTREEBINS-1;\
01584 else {\
01585 unsigned int Y = (unsigned int)X;\
01586 unsigned int N = ((Y - 0x100) >> 16) & 8;\
01587 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
01588 N += K;\
01589 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
01590 K = 14 - N + ((Y <<= K) >> 15);\
01591 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
01592 }\
01593 }
01594 #endif
01595
01596
01597 #define bit_for_tree_index(i) \
01598 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
01599
01600
01601 #define leftshift_for_tree_index(i) \
01602 ((i == NTREEBINS-1)? 0 : \
01603 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
01604
01605
01606 #define minsize_for_tree_index(i) \
01607 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
01608 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
01609
01610
01611
01612
01613
01614 #define idx2bit(i) ((binmap_t)(1) << (i))
01615
01616
01617 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
01618 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
01619 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
01620
01621 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
01622 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
01623 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
01624
01625
01626
01627 #if defined(__GNUC__) && defined(i386)
01628 #define compute_bit2idx(X, I)\
01629 {\
01630 unsigned int J;\
01631 __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
01632 I = (bindex_t)J;\
01633 }
01634
01635 #else
01636 #if USE_BUILTIN_FFS
01637 #define compute_bit2idx(X, I) I = ffs(X)-1
01638
01639 #else
01640 #define compute_bit2idx(X, I)\
01641 {\
01642 unsigned int Y = X - 1;\
01643 unsigned int K = Y >> (16-4) & 16;\
01644 unsigned int N = K; Y >>= K;\
01645 N += K = Y >> (8-3) & 8; Y >>= K;\
01646 N += K = Y >> (4-2) & 4; Y >>= K;\
01647 N += K = Y >> (2-1) & 2; Y >>= K;\
01648 N += K = Y >> (1-0) & 1; Y >>= K;\
01649 I = (bindex_t)(N + Y);\
01650 }
01651 #endif
01652 #endif
01653
01654
01655 #define least_bit(x) ((x) & -(x))
01656
01657
01658 #define left_bits(x) ((x<<1) | -(x<<1))
01659
01660
01661 #define same_or_left_bits(x) ((x) | -(x))
01662
01663
01664
01665
01666
01667
01668
01669
01670
01671
01672
01673
01674
01675
01676
01677
01678
01679
01680
01681
01682
01683
01684
01685
01686
01687
01688
01689
01690
01691
01692 #if !INSECURE
01693
01694 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
01695
01696 #define ok_next(p, n) ((char*)(p) < (char*)(n))
01697
01698 #define ok_cinuse(p) cinuse(p)
01699
01700 #define ok_pinuse(p) pinuse(p)
01701
01702 #else
01703 #define ok_address(M, a) (1)
01704 #define ok_next(b, n) (1)
01705 #define ok_cinuse(p) (1)
01706 #define ok_pinuse(p) (1)
01707 #endif
01708
01709 #if (FOOTERS && !INSECURE)
01710
01711 #define ok_magic(M) ((M)->magic == mparams.magic)
01712 #else
01713 #define ok_magic(M) (1)
01714 #endif
01715
01716
01717
01718 #if !INSECURE
01719 #if defined(__GNUC__) && __GNUC__ >= 3
01720 #define RTCHECK(e) __builtin_expect(e, 1)
01721 #else
01722 #define RTCHECK(e) (e)
01723 #endif
01724 #else
01725 #define RTCHECK(e) (1)
01726 #endif
01727
01728
01729
01730 #if !FOOTERS
01731
01732 #define mark_inuse_foot(M,p,s)
01733
01734
01735 #define set_inuse(M,p,s)\
01736 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
01737 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
01738
01739
01740 #define set_inuse_and_pinuse(M,p,s)\
01741 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01742 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
01743
01744
01745 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
01746 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
01747
01748 #else
01749
01750
01751 #define mark_inuse_foot(M,p,s)\
01752 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
01753
01754 #define get_mstate_for(p)\
01755 ((mstate)(((mchunkptr)((char*)(p) +\
01756 (chunksize(p))))->prev_foot ^ mparams.magic))
01757
01758 #define set_inuse(M,p,s)\
01759 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
01760 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
01761 mark_inuse_foot(M,p,s))
01762
01763 #define set_inuse_and_pinuse(M,p,s)\
01764 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01765 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
01766 mark_inuse_foot(M,p,s))
01767
01768 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
01769 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01770 mark_inuse_foot(M, p, s))
01771
01772 #endif
01773
01774
01775
01776
01777 static int init_mparams(void) {
01778 if (mparams.page_size == 0) {
01779 size_t s;
01780
01781 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
01782 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
01783 #if MORECORE_CONTIGUOUS
01784 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
01785 #else
01786 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
01787 #endif
01788
01789 #if (FOOTERS && !INSECURE)
01790 {
01791 #if USE_DEV_RANDOM
01792 int fd;
01793 unsigned char buf[sizeof(size_t)];
01794
01795 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
01796 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
01797 s = *((size_t *) buf);
01798 close(fd);
01799 }
01800 else
01801 #endif
01802 s = (size_t)(time(0) ^ (size_t)0x55555555U);
01803
01804 s |= (size_t)8U;
01805 s &= ~(size_t)7U;
01806
01807 }
01808 #else
01809 s = (size_t)0x58585858U;
01810 #endif
01811 ACQUIRE_MAGIC_INIT_LOCK();
01812 if (mparams.magic == 0) {
01813 mparams.magic = s;
01814
01815 INITIAL_LOCK(&gm->mutex);
01816 gm->mflags = mparams.default_mflags;
01817 }
01818 RELEASE_MAGIC_INIT_LOCK();
01819
01820 #ifndef WIN32
01821 mparams.page_size = malloc_getpagesize;
01822 mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
01823 DEFAULT_GRANULARITY : mparams.page_size);
01824 #else
01825 {
01826 SYSTEM_INFO system_info;
01827 GetSystemInfo(&system_info);
01828 mparams.page_size = system_info.dwPageSize;
01829 mparams.granularity = system_info.dwAllocationGranularity;
01830 }
01831 #endif
01832
01833
01834
01835
01836
01837
01838
01839 if ((sizeof(size_t) != sizeof(char*)) ||
01840 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
01841 (sizeof(int) < 4) ||
01842 (MALLOC_ALIGNMENT < (size_t)8U) ||
01843 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
01844 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
01845 ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
01846 ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0))
01847 ABORT;
01848 }
01849 return 0;
01850 }
01851
01852
01853 static int change_mparam(int param_number, int value) {
01854 size_t val = (size_t)value;
01855 init_mparams();
01856 switch(param_number) {
01857 case M_TRIM_THRESHOLD:
01858 mparams.trim_threshold = val;
01859 return 1;
01860 case M_GRANULARITY:
01861 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
01862 mparams.granularity = val;
01863 return 1;
01864 }
01865 else
01866 return 0;
01867 case M_MMAP_THRESHOLD:
01868 mparams.mmap_threshold = val;
01869 return 1;
01870 default:
01871 return 0;
01872 }
01873 }
01874
01875 #if DEBUG
01876
01877
01878
01879 static void do_check_any_chunk(mstate m, mchunkptr p) {
01880 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01881 assert(ok_address(m, p));
01882 }
01883
01884
01885 static void do_check_top_chunk(mstate m, mchunkptr p) {
01886 msegmentptr sp = segment_holding(m, (char*)p);
01887 size_t sz = chunksize(p);
01888 assert(sp != 0);
01889 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01890 assert(ok_address(m, p));
01891 assert(sz == m->topsize);
01892 assert(sz > 0);
01893 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
01894 assert(pinuse(p));
01895 assert(!next_pinuse(p));
01896 }
01897
01898
01899 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
01900 size_t sz = chunksize(p);
01901 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
01902 assert(is_mmapped(p));
01903 assert(use_mmap(m));
01904 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01905 assert(ok_address(m, p));
01906 assert(!is_small(sz));
01907 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
01908 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
01909 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
01910 }
01911
01912
01913 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
01914 do_check_any_chunk(m, p);
01915 assert(cinuse(p));
01916 assert(next_pinuse(p));
01917
01918 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
01919 if (is_mmapped(p))
01920 do_check_mmapped_chunk(m, p);
01921 }
01922
01923
01924 static void do_check_free_chunk(mstate m, mchunkptr p) {
01925 size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
01926 mchunkptr next = chunk_plus_offset(p, sz);
01927 do_check_any_chunk(m, p);
01928 assert(!cinuse(p));
01929 assert(!next_pinuse(p));
01930 assert (!is_mmapped(p));
01931 if (p != m->dv && p != m->top) {
01932 if (sz >= MIN_CHUNK_SIZE) {
01933 assert((sz & CHUNK_ALIGN_MASK) == 0);
01934 assert(is_aligned(chunk2mem(p)));
01935 assert(next->prev_foot == sz);
01936 assert(pinuse(p));
01937 assert (next == m->top || cinuse(next));
01938 assert(p->fd->bk == p);
01939 assert(p->bk->fd == p);
01940 }
01941 else
01942 assert(sz == SIZE_T_SIZE);
01943 }
01944 }
01945
01946
01947 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
01948 if (mem != 0) {
01949 mchunkptr p = mem2chunk(mem);
01950 size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
01951 do_check_inuse_chunk(m, p);
01952 assert((sz & CHUNK_ALIGN_MASK) == 0);
01953 assert(sz >= MIN_CHUNK_SIZE);
01954 assert(sz >= s);
01955
01956 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
01957 }
01958 }
01959
01960
01961 static void do_check_tree(mstate m, tchunkptr t) {
01962 tchunkptr head = 0;
01963 tchunkptr u = t;
01964 bindex_t tindex = t->index;
01965 size_t tsize = chunksize(t);
01966 bindex_t idx;
01967 compute_tree_index(tsize, idx);
01968 assert(tindex == idx);
01969 assert(tsize >= MIN_LARGE_SIZE);
01970 assert(tsize >= minsize_for_tree_index(idx));
01971 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
01972
01973 do {
01974 do_check_any_chunk(m, ((mchunkptr)u));
01975 assert(u->index == tindex);
01976 assert(chunksize(u) == tsize);
01977 assert(!cinuse(u));
01978 assert(!next_pinuse(u));
01979 assert(u->fd->bk == u);
01980 assert(u->bk->fd == u);
01981 if (u->parent == 0) {
01982 assert(u->child[0] == 0);
01983 assert(u->child[1] == 0);
01984 }
01985 else {
01986 assert(head == 0);
01987 head = u;
01988 assert(u->parent != u);
01989 assert (u->parent->child[0] == u ||
01990 u->parent->child[1] == u ||
01991 *((tbinptr*)(u->parent)) == u);
01992 if (u->child[0] != 0) {
01993 assert(u->child[0]->parent == u);
01994 assert(u->child[0] != u);
01995 do_check_tree(m, u->child[0]);
01996 }
01997 if (u->child[1] != 0) {
01998 assert(u->child[1]->parent == u);
01999 assert(u->child[1] != u);
02000 do_check_tree(m, u->child[1]);
02001 }
02002 if (u->child[0] != 0 && u->child[1] != 0) {
02003 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
02004 }
02005 }
02006 u = u->fd;
02007 } while (u != t);
02008 assert(head != 0);
02009 }
02010
02011
02012 static void do_check_treebin(mstate m, bindex_t i) {
02013 tbinptr* tb = treebin_at(m, i);
02014 tchunkptr t = *tb;
02015 int empty = (m->treemap & (1U << i)) == 0;
02016 if (t == 0)
02017 assert(empty);
02018 if (!empty)
02019 do_check_tree(m, t);
02020 }
02021
02022
02023 static void do_check_smallbin(mstate m, bindex_t i) {
02024 sbinptr b = smallbin_at(m, i);
02025 mchunkptr p = b->bk;
02026 unsigned int empty = (m->smallmap & (1U << i)) == 0;
02027 if (p == b)
02028 assert(empty);
02029 if (!empty) {
02030 for (; p != b; p = p->bk) {
02031 size_t size = chunksize(p);
02032 mchunkptr q;
02033
02034 do_check_free_chunk(m, p);
02035
02036 assert(small_index(size) == i);
02037 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
02038
02039 q = next_chunk(p);
02040 if (q->head != FENCEPOST_HEAD)
02041 do_check_inuse_chunk(m, q);
02042 }
02043 }
02044 }
02045
02046
02047 static int bin_find(mstate m, mchunkptr x) {
02048 size_t size = chunksize(x);
02049 if (is_small(size)) {
02050 bindex_t sidx = small_index(size);
02051 sbinptr b = smallbin_at(m, sidx);
02052 if (smallmap_is_marked(m, sidx)) {
02053 mchunkptr p = b;
02054 do {
02055 if (p == x)
02056 return 1;
02057 } while ((p = p->fd) != b);
02058 }
02059 }
02060 else {
02061 bindex_t tidx;
02062 compute_tree_index(size, tidx);
02063 if (treemap_is_marked(m, tidx)) {
02064 tchunkptr t = *treebin_at(m, tidx);
02065 size_t sizebits = size << leftshift_for_tree_index(tidx);
02066 while (t != 0 && chunksize(t) != size) {
02067 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
02068 sizebits <<= 1;
02069 }
02070 if (t != 0) {
02071 tchunkptr u = t;
02072 do {
02073 if (u == (tchunkptr)x)
02074 return 1;
02075 } while ((u = u->fd) != t);
02076 }
02077 }
02078 }
02079 return 0;
02080 }
02081
02082
02083 static size_t traverse_and_check(mstate m) {
02084 size_t sum = 0;
02085 if (is_initialized(m)) {
02086 msegmentptr s = &m->seg;
02087 sum += m->topsize + TOP_FOOT_SIZE;
02088 while (s != 0) {
02089 mchunkptr q = align_as_chunk(s->base);
02090 mchunkptr lastq = 0;
02091 assert(pinuse(q));
02092 while (segment_holds(s, q) &&
02093 q != m->top && q->head != FENCEPOST_HEAD) {
02094 sum += chunksize(q);
02095 if (cinuse(q)) {
02096 assert(!bin_find(m, q));
02097 do_check_inuse_chunk(m, q);
02098 }
02099 else {
02100 assert(q == m->dv || bin_find(m, q));
02101 assert(lastq == 0 || cinuse(lastq));
02102 do_check_free_chunk(m, q);
02103 }
02104 lastq = q;
02105 q = next_chunk(q);
02106 }
02107 s = s->next;
02108 }
02109 }
02110 return sum;
02111 }
02112
02113
02114 static void do_check_malloc_state(mstate m) {
02115 bindex_t i;
02116 size_t total;
02117
02118 for (i = 0; i < NSMALLBINS; ++i)
02119 do_check_smallbin(m, i);
02120 for (i = 0; i < NTREEBINS; ++i)
02121 do_check_treebin(m, i);
02122
02123 if (m->dvsize != 0) {
02124 do_check_any_chunk(m, m->dv);
02125 assert(m->dvsize == chunksize(m->dv));
02126 assert(m->dvsize >= MIN_CHUNK_SIZE);
02127 assert(bin_find(m, m->dv) == 0);
02128 }
02129
02130 if (m->top != 0) {
02131 do_check_top_chunk(m, m->top);
02132 assert(m->topsize == chunksize(m->top));
02133 assert(m->topsize > 0);
02134 assert(bin_find(m, m->top) == 0);
02135 }
02136
02137 total = traverse_and_check(m);
02138 assert(total <= m->footprint);
02139 assert(m->footprint <= m->max_footprint);
02140 }
02141 #endif
02142
02143
02144
02145 #if !NO_MALLINFO
02146 static struct mallinfo internal_mallinfo(mstate m) {
02147 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
02148 if (!PREACTION(m)) {
02149 check_malloc_state(m);
02150 if (is_initialized(m)) {
02151 size_t nfree = SIZE_T_ONE;
02152 size_t mfree = m->topsize + TOP_FOOT_SIZE;
02153 size_t sum = mfree;
02154 msegmentptr s = &m->seg;
02155 while (s != 0) {
02156 mchunkptr q = align_as_chunk(s->base);
02157 while (segment_holds(s, q) &&
02158 q != m->top && q->head != FENCEPOST_HEAD) {
02159 size_t sz = chunksize(q);
02160 sum += sz;
02161 if (!cinuse(q)) {
02162 mfree += sz;
02163 ++nfree;
02164 }
02165 q = next_chunk(q);
02166 }
02167 s = s->next;
02168 }
02169
02170 nm.arena = sum;
02171 nm.ordblks = nfree;
02172 nm.hblkhd = m->footprint - sum;
02173 nm.usmblks = m->max_footprint;
02174 nm.uordblks = m->footprint - mfree;
02175 nm.fordblks = mfree;
02176 nm.keepcost = m->topsize;
02177 }
02178
02179 POSTACTION(m);
02180 }
02181 return nm;
02182 }
02183 #endif
02184
02185 static void internal_malloc_stats(mstate m) {
02186 if (!PREACTION(m)) {
02187 size_t maxfp = 0;
02188 size_t fp = 0;
02189 size_t used = 0;
02190 check_malloc_state(m);
02191 if (is_initialized(m)) {
02192 msegmentptr s = &m->seg;
02193 maxfp = m->max_footprint;
02194 fp = m->footprint;
02195 used = fp - (m->topsize + TOP_FOOT_SIZE);
02196
02197 while (s != 0) {
02198 mchunkptr q = align_as_chunk(s->base);
02199 while (segment_holds(s, q) &&
02200 q != m->top && q->head != FENCEPOST_HEAD) {
02201 if (!cinuse(q))
02202 used -= chunksize(q);
02203 q = next_chunk(q);
02204 }
02205 s = s->next;
02206 }
02207 }
02208
02209 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
02210 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
02211 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
02212
02213 POSTACTION(m);
02214 }
02215 }
02216
02217
02218
02219
02220
02221
02222
02223
02224
02225
02226
02227 #define insert_small_chunk(M, P, S) {\
02228 bindex_t I = small_index(S);\
02229 mchunkptr B = smallbin_at(M, I);\
02230 mchunkptr F = B;\
02231 assert(S >= MIN_CHUNK_SIZE);\
02232 if (!smallmap_is_marked(M, I))\
02233 mark_smallmap(M, I);\
02234 else if (RTCHECK(ok_address(M, B->fd)))\
02235 F = B->fd;\
02236 else {\
02237 CORRUPTION_ERROR_ACTION(M);\
02238 }\
02239 B->fd = P;\
02240 F->bk = P;\
02241 P->fd = F;\
02242 P->bk = B;\
02243 }
02244
02245
02246 #define unlink_small_chunk(M, P, S) {\
02247 mchunkptr F = P->fd;\
02248 mchunkptr B = P->bk;\
02249 bindex_t I = small_index(S);\
02250 assert(P != B);\
02251 assert(P != F);\
02252 assert(chunksize(P) == small_index2size(I));\
02253 if (F == B)\
02254 clear_smallmap(M, I);\
02255 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
02256 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
02257 F->bk = B;\
02258 B->fd = F;\
02259 }\
02260 else {\
02261 CORRUPTION_ERROR_ACTION(M);\
02262 }\
02263 }
02264
02265
02266 #define unlink_first_small_chunk(M, B, P, I) {\
02267 mchunkptr F = P->fd;\
02268 assert(P != B);\
02269 assert(P != F);\
02270 assert(chunksize(P) == small_index2size(I));\
02271 if (B == F)\
02272 clear_smallmap(M, I);\
02273 else if (RTCHECK(ok_address(M, F))) {\
02274 B->fd = F;\
02275 F->bk = B;\
02276 }\
02277 else {\
02278 CORRUPTION_ERROR_ACTION(M);\
02279 }\
02280 }
02281
02282
02283
02284 #define replace_dv(M, P, S) {\
02285 size_t DVS = M->dvsize;\
02286 if (DVS != 0) {\
02287 mchunkptr DV = M->dv;\
02288 assert(is_small(DVS));\
02289 insert_small_chunk(M, DV, DVS);\
02290 }\
02291 M->dvsize = S;\
02292 M->dv = P;\
02293 }
02294
02295
02296
02297
02298 #define insert_large_chunk(M, X, S) {\
02299 tbinptr* H;\
02300 bindex_t I;\
02301 compute_tree_index(S, I);\
02302 H = treebin_at(M, I);\
02303 X->index = I;\
02304 X->child[0] = X->child[1] = 0;\
02305 if (!treemap_is_marked(M, I)) {\
02306 mark_treemap(M, I);\
02307 *H = X;\
02308 X->parent = (tchunkptr)H;\
02309 X->fd = X->bk = X;\
02310 }\
02311 else {\
02312 tchunkptr T = *H;\
02313 size_t K = S << leftshift_for_tree_index(I);\
02314 for (;;) {\
02315 if (chunksize(T) != S) {\
02316 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
02317 K <<= 1;\
02318 if (*C != 0)\
02319 T = *C;\
02320 else if (RTCHECK(ok_address(M, C))) {\
02321 *C = X;\
02322 X->parent = T;\
02323 X->fd = X->bk = X;\
02324 break;\
02325 }\
02326 else {\
02327 CORRUPTION_ERROR_ACTION(M);\
02328 break;\
02329 }\
02330 }\
02331 else {\
02332 tchunkptr F = T->fd;\
02333 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
02334 T->fd = F->bk = X;\
02335 X->fd = F;\
02336 X->bk = T;\
02337 X->parent = 0;\
02338 break;\
02339 }\
02340 else {\
02341 CORRUPTION_ERROR_ACTION(M);\
02342 break;\
02343 }\
02344 }\
02345 }\
02346 }\
02347 }
02348
02349
02350
02351
02352
02353
02354
02355
02356
02357
02358
02359
02360
02361
02362
02363
02364
02365
02366 #define unlink_large_chunk(M, X) {\
02367 tchunkptr XP = X->parent;\
02368 tchunkptr R;\
02369 if (X->bk != X) {\
02370 tchunkptr F = X->fd;\
02371 R = X->bk;\
02372 if (RTCHECK(ok_address(M, F))) {\
02373 F->bk = R;\
02374 R->fd = F;\
02375 }\
02376 else {\
02377 CORRUPTION_ERROR_ACTION(M);\
02378 }\
02379 }\
02380 else {\
02381 tchunkptr* RP;\
02382 if (((R = *(RP = &(X->child[1]))) != 0) ||\
02383 ((R = *(RP = &(X->child[0]))) != 0)) {\
02384 tchunkptr* CP;\
02385 while ((*(CP = &(R->child[1])) != 0) ||\
02386 (*(CP = &(R->child[0])) != 0)) {\
02387 R = *(RP = CP);\
02388 }\
02389 if (RTCHECK(ok_address(M, RP)))\
02390 *RP = 0;\
02391 else {\
02392 CORRUPTION_ERROR_ACTION(M);\
02393 }\
02394 }\
02395 }\
02396 if (XP != 0) {\
02397 tbinptr* H = treebin_at(M, X->index);\
02398 if (X == *H) {\
02399 if ((*H = R) == 0) \
02400 clear_treemap(M, X->index);\
02401 }\
02402 else if (RTCHECK(ok_address(M, XP))) {\
02403 if (XP->child[0] == X) \
02404 XP->child[0] = R;\
02405 else \
02406 XP->child[1] = R;\
02407 }\
02408 else\
02409 CORRUPTION_ERROR_ACTION(M);\
02410 if (R != 0) {\
02411 if (RTCHECK(ok_address(M, R))) {\
02412 tchunkptr C0, C1;\
02413 R->parent = XP;\
02414 if ((C0 = X->child[0]) != 0) {\
02415 if (RTCHECK(ok_address(M, C0))) {\
02416 R->child[0] = C0;\
02417 C0->parent = R;\
02418 }\
02419 else\
02420 CORRUPTION_ERROR_ACTION(M);\
02421 }\
02422 if ((C1 = X->child[1]) != 0) {\
02423 if (RTCHECK(ok_address(M, C1))) {\
02424 R->child[1] = C1;\
02425 C1->parent = R;\
02426 }\
02427 else\
02428 CORRUPTION_ERROR_ACTION(M);\
02429 }\
02430 }\
02431 else\
02432 CORRUPTION_ERROR_ACTION(M);\
02433 }\
02434 }\
02435 }
02436
02437
02438
02439 #define insert_chunk(M, P, S)\
02440 if (is_small(S)) insert_small_chunk(M, P, S)\
02441 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
02442
02443 #define unlink_chunk(M, P, S)\
02444 if (is_small(S)) unlink_small_chunk(M, P, S)\
02445 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
02446
02447
02448
02449
02450 #if ONLY_MSPACES
02451 #define internal_malloc(m, b) mspace_malloc(m, b)
02452 #define internal_free(m, mem) mspace_free(m,mem);
02453 #else
02454 #if MSPACES
02455 #define internal_malloc(m, b)\
02456 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
02457 #define internal_free(m, mem)\
02458 if (m == gm) dlfree(mem); else mspace_free(m,mem);
02459 #else
02460 #define internal_malloc(m, b) dlmalloc(b)
02461 #define internal_free(m, mem) dlfree(mem)
02462 #endif
02463 #endif
02464
02465
02466
02467
02468
02469
02470
02471
02472
02473
02474
02475
02476
02477
02478 static void* mmap_alloc(mstate m, size_t nb) {
02479 size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
02480 if (mmsize > nb) {
02481 char* mm = (char*)(DIRECT_MMAP(mmsize));
02482 if (mm != CMFAIL) {
02483 size_t offset = align_offset(chunk2mem(mm));
02484 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
02485 mchunkptr p = (mchunkptr)(mm + offset);
02486 p->prev_foot = offset | IS_MMAPPED_BIT;
02487 (p)->head = (psize|CINUSE_BIT);
02488 mark_inuse_foot(m, p, psize);
02489 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
02490 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
02491
02492 if (mm < m->least_addr)
02493 m->least_addr = mm;
02494 if ((m->footprint += mmsize) > m->max_footprint)
02495 m->max_footprint = m->footprint;
02496 assert(is_aligned(chunk2mem(p)));
02497 check_mmapped_chunk(m, p);
02498 return chunk2mem(p);
02499 }
02500 }
02501 return 0;
02502 }
02503
02504
02505 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
02506 size_t oldsize = chunksize(oldp);
02507 if (is_small(nb))
02508 return 0;
02509
02510 if (oldsize >= nb + SIZE_T_SIZE &&
02511 (oldsize - nb) <= (mparams.granularity << 1))
02512 return oldp;
02513 else {
02514 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
02515 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
02516 size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
02517 CHUNK_ALIGN_MASK);
02518 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
02519 oldmmsize, newmmsize, 1);
02520 if (cp != CMFAIL) {
02521 mchunkptr newp = (mchunkptr)(cp + offset);
02522 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
02523 newp->head = (psize|CINUSE_BIT);
02524 mark_inuse_foot(m, newp, psize);
02525 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
02526 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
02527
02528 if (cp < m->least_addr)
02529 m->least_addr = cp;
02530 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
02531 m->max_footprint = m->footprint;
02532 check_mmapped_chunk(m, newp);
02533 return newp;
02534 }
02535 }
02536 return 0;
02537 }
02538
02539
02540
02541
02542 static void init_top(mstate m, mchunkptr p, size_t psize) {
02543
02544 size_t offset = align_offset(chunk2mem(p));
02545 p = (mchunkptr)((char*)p + offset);
02546 psize -= offset;
02547
02548 m->top = p;
02549 m->topsize = psize;
02550 p->head = psize | PINUSE_BIT;
02551
02552 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
02553 m->trim_check = mparams.trim_threshold;
02554 }
02555
02556
02557 static void init_bins(mstate m) {
02558
02559 bindex_t i;
02560 for (i = 0; i < NSMALLBINS; ++i) {
02561 sbinptr bin = smallbin_at(m,i);
02562 bin->fd = bin->bk = bin;
02563 }
02564 }
02565
02566 #if PROCEED_ON_ERROR
02567
02568
02569 static void reset_on_error(mstate m) {
02570 int i;
02571 ++malloc_corruption_error_count;
02572
02573 m->smallbins = m->treebins = 0;
02574 m->dvsize = m->topsize = 0;
02575 m->seg.base = 0;
02576 m->seg.size = 0;
02577 m->seg.next = 0;
02578 m->top = m->dv = 0;
02579 for (i = 0; i < NTREEBINS; ++i)
02580 *treebin_at(m, i) = 0;
02581 init_bins(m);
02582 }
02583 #endif
02584
02585
02586 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
02587 size_t nb) {
02588 mchunkptr p = align_as_chunk(newbase);
02589 mchunkptr oldfirst = align_as_chunk(oldbase);
02590 size_t psize = (char*)oldfirst - (char*)p;
02591 mchunkptr q = chunk_plus_offset(p, nb);
02592 size_t qsize = psize - nb;
02593 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
02594
02595 assert((char*)oldfirst > (char*)q);
02596 assert(pinuse(oldfirst));
02597 assert(qsize >= MIN_CHUNK_SIZE);
02598
02599
02600 if (oldfirst == m->top) {
02601 size_t tsize = m->topsize += qsize;
02602 m->top = q;
02603 q->head = tsize | PINUSE_BIT;
02604 check_top_chunk(m, q);
02605 }
02606 else if (oldfirst == m->dv) {
02607 size_t dsize = m->dvsize += qsize;
02608 m->dv = q;
02609 set_size_and_pinuse_of_free_chunk(q, dsize);
02610 }
02611 else {
02612 if (!cinuse(oldfirst)) {
02613 size_t nsize = chunksize(oldfirst);
02614 unlink_chunk(m, oldfirst, nsize);
02615 oldfirst = chunk_plus_offset(oldfirst, nsize);
02616 qsize += nsize;
02617 }
02618 set_free_with_pinuse(q, qsize, oldfirst);
02619 insert_chunk(m, q, qsize);
02620 check_free_chunk(m, q);
02621 }
02622
02623 check_malloced_chunk(m, chunk2mem(p), nb);
02624 return chunk2mem(p);
02625 }
02626
02627
02628
02629 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
02630
02631 char* old_top = (char*)m->top;
02632 msegmentptr oldsp = segment_holding(m, old_top);
02633 char* old_end = oldsp->base + oldsp->size;
02634 size_t ssize = pad_request(sizeof(struct malloc_segment));
02635 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
02636 size_t offset = align_offset(chunk2mem(rawsp));
02637 char* asp = rawsp + offset;
02638 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
02639 mchunkptr sp = (mchunkptr)csp;
02640 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
02641 mchunkptr tnext = chunk_plus_offset(sp, ssize);
02642 mchunkptr p = tnext;
02643 int nfences = 0;
02644
02645
02646 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
02647
02648
02649 assert(is_aligned(ss));
02650 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
02651 *ss = m->seg;
02652 m->seg.base = tbase;
02653 m->seg.size = tsize;
02654 m->seg.sflags = mmapped;
02655 m->seg.next = ss;
02656
02657
02658 for (;;) {
02659 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
02660 p->head = FENCEPOST_HEAD;
02661 ++nfences;
02662 if ((char*)(&(nextp->head)) < old_end)
02663 p = nextp;
02664 else
02665 break;
02666 }
02667 assert(nfences >= 2);
02668
02669
02670 if (csp != old_top) {
02671 mchunkptr q = (mchunkptr)old_top;
02672 size_t psize = csp - old_top;
02673 mchunkptr tn = chunk_plus_offset(q, psize);
02674 set_free_with_pinuse(q, psize, tn);
02675 insert_chunk(m, q, psize);
02676 }
02677
02678 check_top_chunk(m, m->top);
02679 }
02680
02681
02682
02683
02684 static void* sys_alloc(mstate m, size_t nb) {
02685 char* tbase = CMFAIL;
02686 size_t tsize = 0;
02687 flag_t mmap_flag = 0;
02688
02689 init_mparams();
02690
02691
02692 if (use_mmap(m) && nb >= mparams.mmap_threshold) {
02693 void* mem = mmap_alloc(m, nb);
02694 if (mem != 0)
02695 return mem;
02696 }
02697
02698
02699
02700
02701
02702
02703
02704
02705
02706
02707
02708
02709
02710
02711
02712
02713
02714
02715 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
02716 char* br = CMFAIL;
02717 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
02718 size_t asize = 0;
02719 ACQUIRE_MORECORE_LOCK();
02720
02721 if (ss == 0) {
02722 char* base = (char*)CALL_MORECORE(0);
02723 if (base != CMFAIL) {
02724 asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
02725
02726 if (!is_page_aligned(base))
02727 asize += (page_align((size_t)base) - (size_t)base);
02728
02729 if (asize < HALF_MAX_SIZE_T &&
02730 (br = (char*)(CALL_MORECORE(asize))) == base) {
02731 tbase = base;
02732 tsize = asize;
02733 }
02734 }
02735 }
02736 else {
02737
02738 asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
02739
02740 if (asize < HALF_MAX_SIZE_T &&
02741 (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
02742 tbase = br;
02743 tsize = asize;
02744 }
02745 }
02746
02747 if (tbase == CMFAIL) {
02748 if (br != CMFAIL) {
02749 if (asize < HALF_MAX_SIZE_T &&
02750 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
02751 size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
02752 if (esize < HALF_MAX_SIZE_T) {
02753 char* end = (char*)CALL_MORECORE(esize);
02754 if (end != CMFAIL)
02755 asize += esize;
02756 else {
02757 CALL_MORECORE(-asize);
02758 br = CMFAIL;
02759 }
02760 }
02761 }
02762 }
02763 if (br != CMFAIL) {
02764 tbase = br;
02765 tsize = asize;
02766 }
02767 else
02768 disable_contiguous(m);
02769 }
02770
02771 RELEASE_MORECORE_LOCK();
02772 }
02773
02774 if (HAVE_MMAP && tbase == CMFAIL) {
02775 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
02776 size_t rsize = granularity_align(req);
02777 if (rsize > nb) {
02778 char* mp = (char*)(CALL_MMAP(rsize));
02779 if (mp != CMFAIL) {
02780 tbase = mp;
02781 tsize = rsize;
02782 mmap_flag = IS_MMAPPED_BIT;
02783 }
02784 }
02785 }
02786
02787 if (HAVE_MORECORE && tbase == CMFAIL) {
02788 size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
02789 if (asize < HALF_MAX_SIZE_T) {
02790 char* br = CMFAIL;
02791 char* end = CMFAIL;
02792 ACQUIRE_MORECORE_LOCK();
02793 br = (char*)(CALL_MORECORE(asize));
02794 end = (char*)(CALL_MORECORE(0));
02795 RELEASE_MORECORE_LOCK();
02796 if (br != CMFAIL && end != CMFAIL && br < end) {
02797 size_t ssize = end - br;
02798 if (ssize > nb + TOP_FOOT_SIZE) {
02799 tbase = br;
02800 tsize = ssize;
02801 }
02802 }
02803 }
02804 }
02805
02806 if (tbase != CMFAIL) {
02807
02808 if ((m->footprint += tsize) > m->max_footprint)
02809 m->max_footprint = m->footprint;
02810
02811 if (!is_initialized(m)) {
02812 m->seg.base = m->least_addr = tbase;
02813 m->seg.size = tsize;
02814 m->seg.sflags = mmap_flag;
02815 m->magic = mparams.magic;
02816 init_bins(m);
02817 if (is_global(m))
02818 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
02819 else {
02820
02821 mchunkptr mn = next_chunk(mem2chunk(m));
02822 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
02823 }
02824 }
02825
02826 else {
02827
02828 msegmentptr sp = &m->seg;
02829 while (sp != 0 && tbase != sp->base + sp->size)
02830 sp = sp->next;
02831 if (sp != 0 &&
02832 !is_extern_segment(sp) &&
02833 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
02834 segment_holds(sp, m->top)) {
02835 sp->size += tsize;
02836 init_top(m, m->top, m->topsize + tsize);
02837 }
02838 else {
02839 if (tbase < m->least_addr)
02840 m->least_addr = tbase;
02841 sp = &m->seg;
02842 while (sp != 0 && sp->base != tbase + tsize)
02843 sp = sp->next;
02844 if (sp != 0 &&
02845 !is_extern_segment(sp) &&
02846 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
02847 char* oldbase = sp->base;
02848 sp->base = tbase;
02849 sp->size += tsize;
02850 return prepend_alloc(m, tbase, oldbase, nb);
02851 }
02852 else
02853 add_segment(m, tbase, tsize, mmap_flag);
02854 }
02855 }
02856
02857 if (nb < m->topsize) {
02858 size_t rsize = m->topsize -= nb;
02859 mchunkptr p = m->top;
02860 mchunkptr r = m->top = chunk_plus_offset(p, nb);
02861 r->head = rsize | PINUSE_BIT;
02862 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
02863 check_top_chunk(m, m->top);
02864 check_malloced_chunk(m, chunk2mem(p), nb);
02865 return chunk2mem(p);
02866 }
02867 }
02868
02869 MALLOC_FAILURE_ACTION;
02870 return 0;
02871 }
02872
02873
02874
02875
02876 static size_t release_unused_segments(mstate m) {
02877 size_t released = 0;
02878 msegmentptr pred = &m->seg;
02879 msegmentptr sp = pred->next;
02880 while (sp != 0) {
02881 char* base = sp->base;
02882 size_t size = sp->size;
02883 msegmentptr next = sp->next;
02884 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
02885 mchunkptr p = align_as_chunk(base);
02886 size_t psize = chunksize(p);
02887
02888 if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
02889 tchunkptr tp = (tchunkptr)p;
02890 assert(segment_holds(sp, (char*)sp));
02891 if (p == m->dv) {
02892 m->dv = 0;
02893 m->dvsize = 0;
02894 }
02895 else {
02896 unlink_large_chunk(m, tp);
02897 }
02898 if (CALL_MUNMAP(base, size) == 0) {
02899 released += size;
02900 m->footprint -= size;
02901
02902 sp = pred;
02903 sp->next = next;
02904 }
02905 else {
02906 insert_large_chunk(m, tp, psize);
02907 }
02908 }
02909 }
02910 pred = sp;
02911 sp = next;
02912 }
02913 return released;
02914 }
02915
02916 static int sys_trim(mstate m, size_t pad) {
02917 size_t released = 0;
02918 if (pad < MAX_REQUEST && is_initialized(m)) {
02919 pad += TOP_FOOT_SIZE;
02920
02921 if (m->topsize > pad) {
02922
02923 size_t unit = mparams.granularity;
02924 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
02925 SIZE_T_ONE) * unit;
02926 msegmentptr sp = segment_holding(m, (char*)m->top);
02927
02928 if (!is_extern_segment(sp)) {
02929 if (is_mmapped_segment(sp)) {
02930 if (HAVE_MMAP &&
02931 sp->size >= extra &&
02932 !has_segment_link(m, sp)) {
02933
02934 if ((CALL_MREMAP(sp->base, sp->size, sp->size - extra, 0) != MFAIL) ||
02935 (CALL_MUNMAP(sp->base + sp->size - extra, extra) == 0)) {
02936 released = extra;
02937 }
02938 }
02939 }
02940 else if (HAVE_MORECORE) {
02941 if (extra >= HALF_MAX_SIZE_T)
02942 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
02943 ACQUIRE_MORECORE_LOCK();
02944 {
02945
02946 char* old_br = (char*)(CALL_MORECORE(0));
02947 if (old_br == sp->base + sp->size) {
02948 char* rel_br = (char*)(CALL_MORECORE(-extra));
02949 char* new_br = (char*)(CALL_MORECORE(0));
02950 if (rel_br != CMFAIL && new_br < old_br)
02951 released = old_br - new_br;
02952 }
02953 }
02954 RELEASE_MORECORE_LOCK();
02955 }
02956 }
02957
02958 if (released != 0) {
02959 sp->size -= released;
02960 m->footprint -= released;
02961 init_top(m, m->top, m->topsize - released);
02962 check_top_chunk(m, m->top);
02963 }
02964 }
02965
02966
02967 if (HAVE_MMAP)
02968 released += release_unused_segments(m);
02969
02970
02971 if (released == 0)
02972 m->trim_check = MAX_SIZE_T;
02973 }
02974
02975 return (released != 0)? 1 : 0;
02976 }
02977
02978
02979
02980
02981 static void* tmalloc_large(mstate m, size_t nb) {
02982 tchunkptr v = 0;
02983 size_t rsize = -nb;
02984 tchunkptr t;
02985 bindex_t idx;
02986 compute_tree_index(nb, idx);
02987
02988 if ((t = *treebin_at(m, idx)) != 0) {
02989
02990 size_t sizebits = nb << leftshift_for_tree_index(idx);
02991 tchunkptr rst = 0;
02992 for (;;) {
02993 tchunkptr rt;
02994 size_t trem = chunksize(t) - nb;
02995 if (trem < rsize) {
02996 v = t;
02997 if ((rsize = trem) == 0)
02998 break;
02999 }
03000 rt = t->child[1];
03001 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
03002 if (rt != 0 && rt != t)
03003 rst = rt;
03004 if (t == 0) {
03005 t = rst;
03006 break;
03007 }
03008 sizebits <<= 1;
03009 }
03010 }
03011
03012 if (t == 0 && v == 0) {
03013 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
03014 if (leftbits != 0) {
03015 bindex_t i;
03016 binmap_t leastbit = least_bit(leftbits);
03017 compute_bit2idx(leastbit, i);
03018 t = *treebin_at(m, i);
03019 }
03020 }
03021
03022 while (t != 0) {
03023 size_t trem = chunksize(t) - nb;
03024 if (trem < rsize) {
03025 rsize = trem;
03026 v = t;
03027 }
03028 t = leftmost_child(t);
03029 }
03030
03031
03032 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
03033 if (RTCHECK(ok_address(m, v))) {
03034 mchunkptr r = chunk_plus_offset(v, nb);
03035 assert(chunksize(v) == rsize + nb);
03036 if (RTCHECK(ok_next(v, r))) {
03037 unlink_large_chunk(m, v);
03038 if (rsize < MIN_CHUNK_SIZE)
03039 set_inuse_and_pinuse(m, v, (rsize + nb));
03040 else {
03041 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
03042 set_size_and_pinuse_of_free_chunk(r, rsize);
03043 insert_chunk(m, r, rsize);
03044 }
03045 return chunk2mem(v);
03046 }
03047 }
03048 CORRUPTION_ERROR_ACTION(m);
03049 }
03050 return 0;
03051 }
03052
03053
03054 static void* tmalloc_small(mstate m, size_t nb) {
03055 tchunkptr t, v;
03056 size_t rsize;
03057 bindex_t i;
03058 binmap_t leastbit = least_bit(m->treemap);
03059 compute_bit2idx(leastbit, i);
03060
03061 v = t = *treebin_at(m, i);
03062 rsize = chunksize(t) - nb;
03063
03064 while ((t = leftmost_child(t)) != 0) {
03065 size_t trem = chunksize(t) - nb;
03066 if (trem < rsize) {
03067 rsize = trem;
03068 v = t;
03069 }
03070 }
03071
03072 if (RTCHECK(ok_address(m, v))) {
03073 mchunkptr r = chunk_plus_offset(v, nb);
03074 assert(chunksize(v) == rsize + nb);
03075 if (RTCHECK(ok_next(v, r))) {
03076 unlink_large_chunk(m, v);
03077 if (rsize < MIN_CHUNK_SIZE)
03078 set_inuse_and_pinuse(m, v, (rsize + nb));
03079 else {
03080 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
03081 set_size_and_pinuse_of_free_chunk(r, rsize);
03082 replace_dv(m, r, rsize);
03083 }
03084 return chunk2mem(v);
03085 }
03086 }
03087
03088 CORRUPTION_ERROR_ACTION(m);
03089 return 0;
03090 }
03091
03092
03093
03094 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
03095 if (bytes >= MAX_REQUEST) {
03096 MALLOC_FAILURE_ACTION;
03097 return 0;
03098 }
03099 if (!PREACTION(m)) {
03100 mchunkptr oldp = mem2chunk(oldmem);
03101 size_t oldsize = chunksize(oldp);
03102 mchunkptr next = chunk_plus_offset(oldp, oldsize);
03103 mchunkptr newp = 0;
03104 void* extra = 0;
03105
03106
03107
03108 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
03109 ok_next(oldp, next) && ok_pinuse(next))) {
03110 size_t nb = request2size(bytes);
03111 if (is_mmapped(oldp))
03112 newp = mmap_resize(m, oldp, nb);
03113 else if (oldsize >= nb) {
03114 size_t rsize = oldsize - nb;
03115 newp = oldp;
03116 if (rsize >= MIN_CHUNK_SIZE) {
03117 mchunkptr remainder = chunk_plus_offset(newp, nb);
03118 set_inuse(m, newp, nb);
03119 set_inuse(m, remainder, rsize);
03120 extra = chunk2mem(remainder);
03121 }
03122 }
03123 else if (next == m->top && oldsize + m->topsize > nb) {
03124
03125 size_t newsize = oldsize + m->topsize;
03126 size_t newtopsize = newsize - nb;
03127 mchunkptr newtop = chunk_plus_offset(oldp, nb);
03128 set_inuse(m, oldp, nb);
03129 newtop->head = newtopsize |PINUSE_BIT;
03130 m->top = newtop;
03131 m->topsize = newtopsize;
03132 newp = oldp;
03133 }
03134 }
03135 else {
03136 USAGE_ERROR_ACTION(m, oldmem);
03137 POSTACTION(m);
03138 return 0;
03139 }
03140
03141 POSTACTION(m);
03142
03143 if (newp != 0) {
03144 if (extra != 0) {
03145 internal_free(m, extra);
03146 }
03147 check_inuse_chunk(m, newp);
03148 return chunk2mem(newp);
03149 }
03150 else {
03151 void* newmem = internal_malloc(m, bytes);
03152 if (newmem != 0) {
03153 size_t oc = oldsize - overhead_for(oldp);
03154 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
03155 internal_free(m, oldmem);
03156 }
03157 return newmem;
03158 }
03159 }
03160 return 0;
03161 }
03162
03163
03164
03165 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
03166 if (alignment <= MALLOC_ALIGNMENT)
03167 return internal_malloc(m, bytes);
03168 if (alignment < MIN_CHUNK_SIZE)
03169 alignment = MIN_CHUNK_SIZE;
03170 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {
03171 size_t a = MALLOC_ALIGNMENT << 1;
03172 while (a < alignment) a <<= 1;
03173 alignment = a;
03174 }
03175
03176 if (bytes >= MAX_REQUEST - alignment) {
03177 if (m != 0) {
03178 MALLOC_FAILURE_ACTION;
03179 }
03180 }
03181 else {
03182 size_t nb = request2size(bytes);
03183 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
03184 char* mem = (char*)internal_malloc(m, req);
03185 if (mem != 0) {
03186 void* leader = 0;
03187 void* trailer = 0;
03188 mchunkptr p = mem2chunk(mem);
03189
03190 if (PREACTION(m)) return 0;
03191 if ((((size_t)(mem)) % alignment) != 0) {
03192
03193
03194
03195
03196
03197
03198
03199
03200 char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
03201 alignment -
03202 SIZE_T_ONE)) &
03203 -alignment));
03204 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
03205 br : br+alignment;
03206 mchunkptr newp = (mchunkptr)pos;
03207 size_t leadsize = pos - (char*)(p);
03208 size_t newsize = chunksize(p) - leadsize;
03209
03210 if (is_mmapped(p)) {
03211 newp->prev_foot = p->prev_foot + leadsize;
03212 newp->head = (newsize|CINUSE_BIT);
03213 }
03214 else {
03215 set_inuse(m, newp, newsize);
03216 set_inuse(m, p, leadsize);
03217 leader = chunk2mem(p);
03218 }
03219 p = newp;
03220 }
03221
03222
03223 if (!is_mmapped(p)) {
03224 size_t size = chunksize(p);
03225 if (size > nb + MIN_CHUNK_SIZE) {
03226 size_t remainder_size = size - nb;
03227 mchunkptr remainder = chunk_plus_offset(p, nb);
03228 set_inuse(m, p, nb);
03229 set_inuse(m, remainder, remainder_size);
03230 trailer = chunk2mem(remainder);
03231 }
03232 }
03233
03234 assert (chunksize(p) >= nb);
03235 assert((((size_t)(chunk2mem(p))) % alignment) == 0);
03236 check_inuse_chunk(m, p);
03237 POSTACTION(m);
03238 if (leader != 0) {
03239 internal_free(m, leader);
03240 }
03241 if (trailer != 0) {
03242 internal_free(m, trailer);
03243 }
03244 return chunk2mem(p);
03245 }
03246 }
03247 return 0;
03248 }
03249
03250
03251
03252 static void** ialloc(mstate m,
03253 size_t n_elements,
03254 size_t* sizes,
03255 int opts,
03256 void* chunks[]) {
03257
03258
03259
03260
03261
03262
03263
03264
03265
03266 size_t element_size;
03267 size_t contents_size;
03268 size_t array_size;
03269 void* mem;
03270 mchunkptr p;
03271 size_t remainder_size;
03272 void** marray;
03273 mchunkptr array_chunk;
03274 flag_t was_enabled;
03275 size_t size;
03276 size_t i;
03277
03278
03279 if (chunks != 0) {
03280 if (n_elements == 0)
03281 return chunks;
03282 marray = chunks;
03283 array_size = 0;
03284 }
03285 else {
03286
03287 if (n_elements == 0)
03288 return (void**)internal_malloc(m, 0);
03289 marray = 0;
03290 array_size = request2size(n_elements * (sizeof(void*)));
03291 }
03292
03293
03294 if (opts & 0x1) {
03295 element_size = request2size(*sizes);
03296 contents_size = n_elements * element_size;
03297 }
03298 else {
03299 element_size = 0;
03300 contents_size = 0;
03301 for (i = 0; i != n_elements; ++i)
03302 contents_size += request2size(sizes[i]);
03303 }
03304
03305 size = contents_size + array_size;
03306
03307
03308
03309
03310
03311
03312 was_enabled = use_mmap(m);
03313 disable_mmap(m);
03314 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
03315 if (was_enabled)
03316 enable_mmap(m);
03317 if (mem == 0)
03318 return 0;
03319
03320 if (PREACTION(m)) return 0;
03321 p = mem2chunk(mem);
03322 remainder_size = chunksize(p);
03323
03324 assert(!is_mmapped(p));
03325
03326 if (opts & 0x2) {
03327 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
03328 }
03329
03330
03331 if (marray == 0) {
03332 size_t array_chunk_size;
03333 array_chunk = chunk_plus_offset(p, contents_size);
03334 array_chunk_size = remainder_size - contents_size;
03335 marray = (void**) (chunk2mem(array_chunk));
03336 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
03337 remainder_size = contents_size;
03338 }
03339
03340
03341 for (i = 0; ; ++i) {
03342 marray[i] = chunk2mem(p);
03343 if (i != n_elements-1) {
03344 if (element_size != 0)
03345 size = element_size;
03346 else
03347 size = request2size(sizes[i]);
03348 remainder_size -= size;
03349 set_size_and_pinuse_of_inuse_chunk(m, p, size);
03350 p = chunk_plus_offset(p, size);
03351 }
03352 else {
03353 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
03354 break;
03355 }
03356 }
03357
03358 #if DEBUG
03359 if (marray != chunks) {
03360
03361 if (element_size != 0) {
03362 assert(remainder_size == element_size);
03363 }
03364 else {
03365 assert(remainder_size == request2size(sizes[i]));
03366 }
03367 check_inuse_chunk(m, mem2chunk(marray));
03368 }
03369 for (i = 0; i != n_elements; ++i)
03370 check_inuse_chunk(m, mem2chunk(marray[i]));
03371
03372 #endif
03373
03374 POSTACTION(m);
03375 return marray;
03376 }
03377
03378
03379
03380
03381 #if !ONLY_MSPACES
03382
03383 void* dlmalloc(size_t bytes) {
03384
03385
03386
03387
03388
03389
03390
03391
03392
03393
03394
03395
03396
03397
03398
03399
03400
03401
03402
03403
03404
03405
03406
03407 if (!PREACTION(gm)) {
03408 void* mem;
03409 size_t nb;
03410 if (bytes <= MAX_SMALL_REQUEST) {
03411 bindex_t idx;
03412 binmap_t smallbits;
03413 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
03414 idx = small_index(nb);
03415 smallbits = gm->smallmap >> idx;
03416
03417 if ((smallbits & 0x3U) != 0) {
03418 mchunkptr b, p;
03419 idx += ~smallbits & 1;
03420 b = smallbin_at(gm, idx);
03421 p = b->fd;
03422 assert(chunksize(p) == small_index2size(idx));
03423 unlink_first_small_chunk(gm, b, p, idx);
03424 set_inuse_and_pinuse(gm, p, small_index2size(idx));
03425 mem = chunk2mem(p);
03426 check_malloced_chunk(gm, mem, nb);
03427 goto postaction;
03428 }
03429
03430 else if (nb > gm->dvsize) {
03431 if (smallbits != 0) {
03432 mchunkptr b, p, r;
03433 size_t rsize;
03434 bindex_t i;
03435 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
03436 binmap_t leastbit = least_bit(leftbits);
03437 compute_bit2idx(leastbit, i);
03438 b = smallbin_at(gm, i);
03439 p = b->fd;
03440 assert(chunksize(p) == small_index2size(i));
03441 unlink_first_small_chunk(gm, b, p, i);
03442 rsize = small_index2size(i) - nb;
03443
03444 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
03445 set_inuse_and_pinuse(gm, p, small_index2size(i));
03446 else {
03447 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03448 r = chunk_plus_offset(p, nb);
03449 set_size_and_pinuse_of_free_chunk(r, rsize);
03450 replace_dv(gm, r, rsize);
03451 }
03452 mem = chunk2mem(p);
03453 check_malloced_chunk(gm, mem, nb);
03454 goto postaction;
03455 }
03456
03457 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
03458 check_malloced_chunk(gm, mem, nb);
03459 goto postaction;
03460 }
03461 }
03462 }
03463 else if (bytes >= MAX_REQUEST)
03464 nb = MAX_SIZE_T;
03465 else {
03466 nb = pad_request(bytes);
03467 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
03468 check_malloced_chunk(gm, mem, nb);
03469 goto postaction;
03470 }
03471 }
03472
03473 if (nb <= gm->dvsize) {
03474 size_t rsize = gm->dvsize - nb;
03475 mchunkptr p = gm->dv;
03476 if (rsize >= MIN_CHUNK_SIZE) {
03477 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
03478 gm->dvsize = rsize;
03479 set_size_and_pinuse_of_free_chunk(r, rsize);
03480 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03481 }
03482 else {
03483 size_t dvs = gm->dvsize;
03484 gm->dvsize = 0;
03485 gm->dv = 0;
03486 set_inuse_and_pinuse(gm, p, dvs);
03487 }
03488 mem = chunk2mem(p);
03489 check_malloced_chunk(gm, mem, nb);
03490 goto postaction;
03491 }
03492
03493 else if (nb < gm->topsize) {
03494 size_t rsize = gm->topsize -= nb;
03495 mchunkptr p = gm->top;
03496 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
03497 r->head = rsize | PINUSE_BIT;
03498 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03499 mem = chunk2mem(p);
03500 check_top_chunk(gm, gm->top);
03501 check_malloced_chunk(gm, mem, nb);
03502 goto postaction;
03503 }
03504
03505 mem = sys_alloc(gm, nb);
03506
03507 postaction:
03508 POSTACTION(gm);
03509 return mem;
03510 }
03511
03512 return 0;
03513 }
03514
03515 void dlfree(void* mem) {
03516
03517
03518
03519
03520
03521
03522 if (mem != 0) {
03523 mchunkptr p = mem2chunk(mem);
03524 #if FOOTERS
03525 mstate fm = get_mstate_for(p);
03526 if (!ok_magic(fm)) {
03527 USAGE_ERROR_ACTION(fm, p);
03528 return;
03529 }
03530 #else
03531 #define fm gm
03532 #endif
03533 if (!PREACTION(fm)) {
03534 check_inuse_chunk(fm, p);
03535 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
03536 size_t psize = chunksize(p);
03537 mchunkptr next = chunk_plus_offset(p, psize);
03538 if (!pinuse(p)) {
03539 size_t prevsize = p->prev_foot;
03540 if ((prevsize & IS_MMAPPED_BIT) != 0) {
03541 prevsize &= ~IS_MMAPPED_BIT;
03542 psize += prevsize + MMAP_FOOT_PAD;
03543 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
03544 fm->footprint -= psize;
03545 goto postaction;
03546 }
03547 else {
03548 mchunkptr prev = chunk_minus_offset(p, prevsize);
03549 psize += prevsize;
03550 p = prev;
03551 if (RTCHECK(ok_address(fm, prev))) {
03552 if (p != fm->dv) {
03553 unlink_chunk(fm, p, prevsize);
03554 }
03555 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
03556 fm->dvsize = psize;
03557 set_free_with_pinuse(p, psize, next);
03558 goto postaction;
03559 }
03560 }
03561 else
03562 goto erroraction;
03563 }
03564 }
03565
03566 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
03567 if (!cinuse(next)) {
03568 if (next == fm->top) {
03569 size_t tsize = fm->topsize += psize;
03570 fm->top = p;
03571 p->head = tsize | PINUSE_BIT;
03572 if (p == fm->dv) {
03573 fm->dv = 0;
03574 fm->dvsize = 0;
03575 }
03576 if (should_trim(fm, tsize))
03577 sys_trim(fm, 0);
03578 goto postaction;
03579 }
03580 else if (next == fm->dv) {
03581 size_t dsize = fm->dvsize += psize;
03582 fm->dv = p;
03583 set_size_and_pinuse_of_free_chunk(p, dsize);
03584 goto postaction;
03585 }
03586 else {
03587 size_t nsize = chunksize(next);
03588 psize += nsize;
03589 unlink_chunk(fm, next, nsize);
03590 set_size_and_pinuse_of_free_chunk(p, psize);
03591 if (p == fm->dv) {
03592 fm->dvsize = psize;
03593 goto postaction;
03594 }
03595 }
03596 }
03597 else
03598 set_free_with_pinuse(p, psize, next);
03599 insert_chunk(fm, p, psize);
03600 check_free_chunk(fm, p);
03601 goto postaction;
03602 }
03603 }
03604 erroraction:
03605 USAGE_ERROR_ACTION(fm, p);
03606 postaction:
03607 POSTACTION(fm);
03608 }
03609 }
03610 #if !FOOTERS
03611 #undef fm
03612 #endif
03613 }
03614
03615 void* dlcalloc(size_t n_elements, size_t elem_size) {
03616 void* mem;
03617 size_t req = 0;
03618 if (n_elements != 0) {
03619 req = n_elements * elem_size;
03620 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
03621 (req / n_elements != elem_size))
03622 req = MAX_SIZE_T;
03623 }
03624 mem = dlmalloc(req);
03625 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
03626 memset(mem, 0, req);
03627 return mem;
03628 }
03629
03630 void* dlrealloc(void* oldmem, size_t bytes) {
03631 if (oldmem == 0)
03632 return dlmalloc(bytes);
03633 #ifdef REALLOC_ZERO_BYTES_FREES
03634 if (bytes == 0) {
03635 dlfree(oldmem);
03636 return 0;
03637 }
03638 #endif
03639 else {
03640 #if ! FOOTERS
03641 mstate m = gm;
03642 #else
03643 mstate m = get_mstate_for(mem2chunk(oldmem));
03644 if (!ok_magic(m)) {
03645 USAGE_ERROR_ACTION(m, oldmem);
03646 return 0;
03647 }
03648 #endif
03649 return internal_realloc(m, oldmem, bytes);
03650 }
03651 }
03652
03653 void* dlmemalign(size_t alignment, size_t bytes) {
03654 return internal_memalign(gm, alignment, bytes);
03655 }
03656
03657 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
03658 void* chunks[]) {
03659 size_t sz = elem_size;
03660 return ialloc(gm, n_elements, &sz, 3, chunks);
03661 }
03662
03663 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
03664 void* chunks[]) {
03665 return ialloc(gm, n_elements, sizes, 0, chunks);
03666 }
03667
03668 void* dlvalloc(size_t bytes) {
03669 size_t pagesz;
03670 init_mparams();
03671 pagesz = mparams.page_size;
03672 return dlmemalign(pagesz, bytes);
03673 }
03674
03675 void* dlpvalloc(size_t bytes) {
03676 size_t pagesz;
03677 init_mparams();
03678 pagesz = mparams.page_size;
03679 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
03680 }
03681
03682 int dlmalloc_trim(size_t pad) {
03683 int result = 0;
03684 if (!PREACTION(gm)) {
03685 result = sys_trim(gm, pad);
03686 POSTACTION(gm);
03687 }
03688 return result;
03689 }
03690
03691 size_t dlmalloc_footprint(void) {
03692 return gm->footprint;
03693 }
03694
03695 size_t dlmalloc_max_footprint(void) {
03696 return gm->max_footprint;
03697 }
03698
03699 #if !NO_MALLINFO
03700 struct mallinfo dlmallinfo(void) {
03701 return internal_mallinfo(gm);
03702 }
03703 #endif
03704
03705 void dlmalloc_stats() {
03706 internal_malloc_stats(gm);
03707 }
03708
03709 size_t dlmalloc_usable_size(void* mem) {
03710 if (mem != 0) {
03711 mchunkptr p = mem2chunk(mem);
03712 if (cinuse(p))
03713 return chunksize(p) - overhead_for(p);
03714 }
03715 return 0;
03716 }
03717
03718 int dlmallopt(int param_number, int value) {
03719 return change_mparam(param_number, value);
03720 }
03721
03722 #endif
03723
03724
03725
03726 #if MSPACES
03727
03728 static mstate init_user_mstate(char* tbase, size_t tsize) {
03729 size_t msize = pad_request(sizeof(struct malloc_state));
03730 mchunkptr mn;
03731 mchunkptr msp = align_as_chunk(tbase);
03732 mstate m = (mstate)(chunk2mem(msp));
03733 memset(m, 0, msize);
03734 INITIAL_LOCK(&m->mutex);
03735 msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
03736 m->seg.base = m->least_addr = tbase;
03737 m->seg.size = m->footprint = m->max_footprint = tsize;
03738 m->magic = mparams.magic;
03739 m->mflags = mparams.default_mflags;
03740 disable_contiguous(m);
03741 init_bins(m);
03742 mn = next_chunk(mem2chunk(m));
03743 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
03744 check_top_chunk(m, m->top);
03745 return m;
03746 }
03747
03748 mspace create_mspace(size_t capacity, int locked) {
03749 mstate m = 0;
03750 size_t msize = pad_request(sizeof(struct malloc_state));
03751 init_mparams();
03752
03753 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
03754 size_t rs = ((capacity == 0)? mparams.granularity :
03755 (capacity + TOP_FOOT_SIZE + msize));
03756 size_t tsize = granularity_align(rs);
03757 char* tbase = (char*)(CALL_MMAP(tsize));
03758 if (tbase != CMFAIL) {
03759 m = init_user_mstate(tbase, tsize);
03760 m->seg.sflags = IS_MMAPPED_BIT;
03761 set_lock(m, locked);
03762 }
03763 }
03764 return (mspace)m;
03765 }
03766
03767 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
03768 mstate m = 0;
03769 size_t msize = pad_request(sizeof(struct malloc_state));
03770 init_mparams();
03771
03772 if (capacity > msize + TOP_FOOT_SIZE &&
03773 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
03774 m = init_user_mstate((char*)base, capacity);
03775 m->seg.sflags = EXTERN_BIT;
03776 set_lock(m, locked);
03777 }
03778 return (mspace)m;
03779 }
03780
03781 size_t destroy_mspace(mspace msp) {
03782 size_t freed = 0;
03783 mstate ms = (mstate)msp;
03784 if (ok_magic(ms)) {
03785 msegmentptr sp = &ms->seg;
03786 while (sp != 0) {
03787 char* base = sp->base;
03788 size_t size = sp->size;
03789 flag_t flag = sp->sflags;
03790 sp = sp->next;
03791 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
03792 CALL_MUNMAP(base, size) == 0)
03793 freed += size;
03794 }
03795 }
03796 else {
03797 USAGE_ERROR_ACTION(ms,ms);
03798 }
03799 return freed;
03800 }
03801
03802
03803
03804
03805
03806
03807
03808 void* mspace_malloc(mspace msp, size_t bytes) {
03809 mstate ms = (mstate)msp;
03810 if (!ok_magic(ms)) {
03811 USAGE_ERROR_ACTION(ms,ms);
03812 return 0;
03813 }
03814 if (!PREACTION(ms)) {
03815 void* mem;
03816 size_t nb;
03817 if (bytes <= MAX_SMALL_REQUEST) {
03818 bindex_t idx;
03819 binmap_t smallbits;
03820 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
03821 idx = small_index(nb);
03822 smallbits = ms->smallmap >> idx;
03823
03824 if ((smallbits & 0x3U) != 0) {
03825 mchunkptr b, p;
03826 idx += ~smallbits & 1;
03827 b = smallbin_at(ms, idx);
03828 p = b->fd;
03829 assert(chunksize(p) == small_index2size(idx));
03830 unlink_first_small_chunk(ms, b, p, idx);
03831 set_inuse_and_pinuse(ms, p, small_index2size(idx));
03832 mem = chunk2mem(p);
03833 check_malloced_chunk(ms, mem, nb);
03834 goto postaction;
03835 }
03836
03837 else if (nb > ms->dvsize) {
03838 if (smallbits != 0) {
03839 mchunkptr b, p, r;
03840 size_t rsize;
03841 bindex_t i;
03842 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
03843 binmap_t leastbit = least_bit(leftbits);
03844 compute_bit2idx(leastbit, i);
03845 b = smallbin_at(ms, i);
03846 p = b->fd;
03847 assert(chunksize(p) == small_index2size(i));
03848 unlink_first_small_chunk(ms, b, p, i);
03849 rsize = small_index2size(i) - nb;
03850
03851 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
03852 set_inuse_and_pinuse(ms, p, small_index2size(i));
03853 else {
03854 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03855 r = chunk_plus_offset(p, nb);
03856 set_size_and_pinuse_of_free_chunk(r, rsize);
03857 replace_dv(ms, r, rsize);
03858 }
03859 mem = chunk2mem(p);
03860 check_malloced_chunk(ms, mem, nb);
03861 goto postaction;
03862 }
03863
03864 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
03865 check_malloced_chunk(ms, mem, nb);
03866 goto postaction;
03867 }
03868 }
03869 }
03870 else if (bytes >= MAX_REQUEST)
03871 nb = MAX_SIZE_T;
03872 else {
03873 nb = pad_request(bytes);
03874 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
03875 check_malloced_chunk(ms, mem, nb);
03876 goto postaction;
03877 }
03878 }
03879
03880 if (nb <= ms->dvsize) {
03881 size_t rsize = ms->dvsize - nb;
03882 mchunkptr p = ms->dv;
03883 if (rsize >= MIN_CHUNK_SIZE) {
03884 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
03885 ms->dvsize = rsize;
03886 set_size_and_pinuse_of_free_chunk(r, rsize);
03887 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03888 }
03889 else {
03890 size_t dvs = ms->dvsize;
03891 ms->dvsize = 0;
03892 ms->dv = 0;
03893 set_inuse_and_pinuse(ms, p, dvs);
03894 }
03895 mem = chunk2mem(p);
03896 check_malloced_chunk(ms, mem, nb);
03897 goto postaction;
03898 }
03899
03900 else if (nb < ms->topsize) {
03901 size_t rsize = ms->topsize -= nb;
03902 mchunkptr p = ms->top;
03903 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
03904 r->head = rsize | PINUSE_BIT;
03905 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03906 mem = chunk2mem(p);
03907 check_top_chunk(ms, ms->top);
03908 check_malloced_chunk(ms, mem, nb);
03909 goto postaction;
03910 }
03911
03912 mem = sys_alloc(ms, nb);
03913
03914 postaction:
03915 POSTACTION(ms);
03916 return mem;
03917 }
03918
03919 return 0;
03920 }
03921
03922 void mspace_free(mspace msp, void* mem) {
03923 if (mem != 0) {
03924 mchunkptr p = mem2chunk(mem);
03925 #if FOOTERS
03926 mstate fm = get_mstate_for(p);
03927 #else
03928 mstate fm = (mstate)msp;
03929 #endif
03930 if (!ok_magic(fm)) {
03931 USAGE_ERROR_ACTION(fm, p);
03932 return;
03933 }
03934 if (!PREACTION(fm)) {
03935 check_inuse_chunk(fm, p);
03936 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
03937 size_t psize = chunksize(p);
03938 mchunkptr next = chunk_plus_offset(p, psize);
03939 if (!pinuse(p)) {
03940 size_t prevsize = p->prev_foot;
03941 if ((prevsize & IS_MMAPPED_BIT) != 0) {
03942 prevsize &= ~IS_MMAPPED_BIT;
03943 psize += prevsize + MMAP_FOOT_PAD;
03944 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
03945 fm->footprint -= psize;
03946 goto postaction;
03947 }
03948 else {
03949 mchunkptr prev = chunk_minus_offset(p, prevsize);
03950 psize += prevsize;
03951 p = prev;
03952 if (RTCHECK(ok_address(fm, prev))) {
03953 if (p != fm->dv) {
03954 unlink_chunk(fm, p, prevsize);
03955 }
03956 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
03957 fm->dvsize = psize;
03958 set_free_with_pinuse(p, psize, next);
03959 goto postaction;
03960 }
03961 }
03962 else
03963 goto erroraction;
03964 }
03965 }
03966
03967 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
03968 if (!cinuse(next)) {
03969 if (next == fm->top) {
03970 size_t tsize = fm->topsize += psize;
03971 fm->top = p;
03972 p->head = tsize | PINUSE_BIT;
03973 if (p == fm->dv) {
03974 fm->dv = 0;
03975 fm->dvsize = 0;
03976 }
03977 if (should_trim(fm, tsize))
03978 sys_trim(fm, 0);
03979 goto postaction;
03980 }
03981 else if (next == fm->dv) {
03982 size_t dsize = fm->dvsize += psize;
03983 fm->dv = p;
03984 set_size_and_pinuse_of_free_chunk(p, dsize);
03985 goto postaction;
03986 }
03987 else {
03988 size_t nsize = chunksize(next);
03989 psize += nsize;
03990 unlink_chunk(fm, next, nsize);
03991 set_size_and_pinuse_of_free_chunk(p, psize);
03992 if (p == fm->dv) {
03993 fm->dvsize = psize;
03994 goto postaction;
03995 }
03996 }
03997 }
03998 else
03999 set_free_with_pinuse(p, psize, next);
04000 insert_chunk(fm, p, psize);
04001 check_free_chunk(fm, p);
04002 goto postaction;
04003 }
04004 }
04005 erroraction:
04006 USAGE_ERROR_ACTION(fm, p);
04007 postaction:
04008 POSTACTION(fm);
04009 }
04010 }
04011 }
04012
04013 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
04014 void* mem;
04015 size_t req = 0;
04016 mstate ms = (mstate)msp;
04017 if (!ok_magic(ms)) {
04018 USAGE_ERROR_ACTION(ms,ms);
04019 return 0;
04020 }
04021 if (n_elements != 0) {
04022 req = n_elements * elem_size;
04023 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
04024 (req / n_elements != elem_size))
04025 req = MAX_SIZE_T;
04026 }
04027 mem = internal_malloc(ms, req);
04028 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
04029 memset(mem, 0, req);
04030 return mem;
04031 }
04032
04033 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
04034 if (oldmem == 0)
04035 return mspace_malloc(msp, bytes);
04036 #ifdef REALLOC_ZERO_BYTES_FREES
04037 if (bytes == 0) {
04038 mspace_free(msp, oldmem);
04039 return 0;
04040 }
04041 #endif
04042 else {
04043 #if FOOTERS
04044 mchunkptr p = mem2chunk(oldmem);
04045 mstate ms = get_mstate_for(p);
04046 #else
04047 mstate ms = (mstate)msp;
04048 #endif
04049 if (!ok_magic(ms)) {
04050 USAGE_ERROR_ACTION(ms,ms);
04051 return 0;
04052 }
04053 return internal_realloc(ms, oldmem, bytes);
04054 }
04055 }
04056
04057 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
04058 mstate ms = (mstate)msp;
04059 if (!ok_magic(ms)) {
04060 USAGE_ERROR_ACTION(ms,ms);
04061 return 0;
04062 }
04063 return internal_memalign(ms, alignment, bytes);
04064 }
04065
04066 void** mspace_independent_calloc(mspace msp, size_t n_elements,
04067 size_t elem_size, void* chunks[]) {
04068 size_t sz = elem_size;
04069 mstate ms = (mstate)msp;
04070 if (!ok_magic(ms)) {
04071 USAGE_ERROR_ACTION(ms,ms);
04072 return 0;
04073 }
04074 return ialloc(ms, n_elements, &sz, 3, chunks);
04075 }
04076
04077 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
04078 size_t sizes[], void* chunks[]) {
04079 mstate ms = (mstate)msp;
04080 if (!ok_magic(ms)) {
04081 USAGE_ERROR_ACTION(ms,ms);
04082 return 0;
04083 }
04084 return ialloc(ms, n_elements, sizes, 0, chunks);
04085 }
04086
04087 int mspace_trim(mspace msp, size_t pad) {
04088 int result = 0;
04089 mstate ms = (mstate)msp;
04090 if (ok_magic(ms)) {
04091 if (!PREACTION(ms)) {
04092 result = sys_trim(ms, pad);
04093 POSTACTION(ms);
04094 }
04095 }
04096 else {
04097 USAGE_ERROR_ACTION(ms,ms);
04098 }
04099 return result;
04100 }
04101
04102 void mspace_malloc_stats(mspace msp) {
04103 mstate ms = (mstate)msp;
04104 if (ok_magic(ms)) {
04105 internal_malloc_stats(ms);
04106 }
04107 else {
04108 USAGE_ERROR_ACTION(ms,ms);
04109 }
04110 }
04111
04112 size_t mspace_footprint(mspace msp) {
04113 size_t result;
04114 mstate ms = (mstate)msp;
04115 if (ok_magic(ms)) {
04116 result = ms->footprint;
04117 }
04118 USAGE_ERROR_ACTION(ms,ms);
04119 return result;
04120 }
04121
04122
04123 size_t mspace_max_footprint(mspace msp) {
04124 size_t result;
04125 mstate ms = (mstate)msp;
04126 if (ok_magic(ms)) {
04127 result = ms->max_footprint;
04128 }
04129 USAGE_ERROR_ACTION(ms,ms);
04130 return result;
04131 }
04132
04133
04134 #if !NO_MALLINFO
04135 struct mallinfo mspace_mallinfo(mspace msp) {
04136 mstate ms = (mstate)msp;
04137 if (!ok_magic(ms)) {
04138 USAGE_ERROR_ACTION(ms,ms);
04139 }
04140 return internal_mallinfo(ms);
04141 }
04142 #endif
04143
04144 int mspace_mallopt(int param_number, int value) {
04145 return change_mparam(param_number, value);
04146 }
04147
04148 #endif
04149
04150
04151
04152
04153
04154
04155
04156
04157
04158
04159
04160
04161
04162
04163
04164
04165
04166
04167
04168
04169
04170
04171
04172
04173
04174
04175
04176
04177
04178
04179
04180
04181
04182
04183
04184
04185
04186
04187
04188
04189
04190
04191
04192
04193
04194
04195
04196
04197
04198
04199
04200
04201
04202
04203
04204
04205
04206
04207
04208
04209
04210
04211
04212
04213
04214
04215
04216
04217
04218
04219
04220
04221
04222
04223
04224
04225
04226
04227
04228
04229
04230
04231
04232
04233
04234
04235
04236
04237
04238
04239
04240
04241
04242
04243
04244
04245
04246
04247
04248
04249
04250
04251
04252
04253
04254
04255
04256
04257
04258
04259
04260
04261
04262
04263
04264
04265
04266
04267
04268
04269
04270
04271
04272
04273
04274
04275
04276
04277
04278
04279
04280
04281
04282
04283
04284
04285
04286
04287
04288
04289
04290
04291
04292
04293
04294
04295
04296
04297
04298
04299
04300
04301
04302
04303
04304
04305
04306
04307
04308
04309
04310
04311
04312
04313
04314
04315
04316
04317
04318
04319
04320
04321
04322
04323
04324
04325
04326
04327
04328
04329
04330
04331
04332
04333
04334
04335
04336
04337
04338
04339
04340
04341
04342
04343
04344
04345
04346
04347
04348
04349
04350
04351
04352
04353
04354
04355
04356
04357
04358
04359
04360
04361
04362
04363
04364
04365
04366
04367
04368
04369
04370
04371
04372
04373
04374
04375
04376
04377
04378
04379
04380
04381
04382
04383
04384
04385
04386
04387
04388
04389
04390
04391
04392
04393
04394
04395
04396
04397
04398
04399
04400
04401
04402
04403
04404
04405
04406
04407
04408
04409
04410
04411
04412
04413
04414
04415
04416
04417
04418
04419
04420
04421
04422