_Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2, "PTRDIFF_MAX is not more than half of SIZE_MAX");
if (!__malloc_initialized) ptmalloc_init (); #if USE_TCACHE /* int_free also calls request2size, be careful to not pad twice. */ size_t tbytes; if (!checked_request2size (bytes, &tbytes)) { __set_errno (ENOMEM); returnNULL; } size_t tc_idx = csize2tidx (tbytes);
victim = _int_malloc (ar_ptr, bytes); /* Retry with another arena only if we were able to find a usable arena before. */ if (!victim && ar_ptr != NULL) { LIBC_PROBE (memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); }
if (ar_ptr != NULL) __libc_lock_unlock (ar_ptr->mutex);
if (SINGLE_THREAD_P) av = &main_arena; else arena_get (av, sz);
if (av) { /* Check if we hand out the top chunk, in which case there may be no need to clear. */ #if MORECORE_CLEARS oldtop = top (av); oldtopsize = chunksize (top (av)); # if MORECORE_CLEARS < 2 /* Only newly allocated memory is guaranteed to be cleared. */ if (av == &main_arena && oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop) oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop); # endif if (av != &main_arena) { heap_info *heap = heap_for_ptr (oldtop); if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop) oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop; } #endif } else { /* No usable arenas. */ oldtop = 0; oldtopsize = 0; } mem = _int_malloc (av, sz);
if (!SINGLE_THREAD_P) { if (mem == 0 && av != NULL) { LIBC_PROBE (memory_calloc_retry, 1, sz); av = arena_get_retry (av, sz); mem = _int_malloc (av, sz); }
if (av != NULL) __libc_lock_unlock (av->mutex); }
/* Allocation failed even after a retry. */ if (mem == 0) return0;
mchunkptr p = mem2chunk (mem);
/* If we are using memory tagging, then we need to set the tags regardless of MORECORE_CLEARS, so we zero the whole block while doing so. */ if (__glibc_unlikely (mtag_enabled)) return tag_new_zero_region (mem, memsize (p));
INTERNAL_SIZE_T csz = chunksize (p);
/* Two optional cases in which clearing not necessary */ if (chunk_is_mmapped (p)) { if (__builtin_expect (perturb_byte, 0)) returnmemset (mem, 0, sz);
return mem; }
#if MORECORE_CLEARS if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) { /* clear only the bytes from non-freshly-sbrked memory */ csz = oldtopsize; } #endif
/* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that contents have an odd number of INTERNAL_SIZE_T-sized words; minimally 3. */ d = (INTERNAL_SIZE_T *) mem; clearsize = csz - SIZE_SZ; nclears = clearsize / sizeof (INTERNAL_SIZE_T); assert (nclears >= 3);
/* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx (nb); if (tcache && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = *fb) != NULL) { if (__glibc_unlikely (misaligned_chunk (tc_victim))) malloc_printerr ("malloc(): unaligned fastbin chunk detected 3"); if (SINGLE_THREAD_P) *fb = REVEAL_PTR (tc_victim->fd); else { REMOVE_FB (fb, pp, tc_victim); if (__glibc_unlikely (tc_victim == NULL)) break; } tcache_put (tc_victim, tc_idx); } }
v7 = __readfsqword(0x28u); sub_40091D(a1, a2, a3); puts("Happy to see you darling!"); puts("Give me your name:"); read(0, buf, 0x10uLL); puts("Give me your key:"); read(0, v4, 0x20uLL); puts("Now start the game!"); do { puts("Input your password!:"); read(0, v6, 0x2CuLL); result = sub_400DB8(v6); } while ( (_DWORD)result != 1 ); return result; }
Y7n05h 在伪码中没有找到任何漏洞.
难道这是个栈题,漏洞存在于 sub_400BFD() 里面吗?
通过 IDA 插件 Findcrypt 得到 TEA_DELTA_400C64 的内容.通过搜索引擎得知 TEA 加密算法.
#include<stdio.h> #include<unistd.h> intmain(void) { setvbuf(stderr, NULL, _IONBF, 0); setvbuf(stdin, NULL, _IONBF, 0); setvbuf(stdout, NULL, _IONBF, 0); char buf[0x50]; puts("Happy to see you darling!"); puts("Give me your name:"); read(0, buf, 0x10uLL); puts("Give me your key:"); read(0, buf, 0x20uLL); puts("Now start the game!"); do { puts("Input your password!:"); read(0, buf, 0x2CuLL); } while (1); }
if (SINGLE_THREAD_P)// 是单线程 { return _int_malloc(&main_arena, bytes); }
进入 _int_malloc()
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/* Convert request size to internal form by adding SIZE_SZ bytes overhead plus possibly more to obtain necessary alignment and/or to obtain a size of at least MINSIZE, the smallest allocatable size. Also, checked_request2size returns false for request sizes that are so large that they wrap around zero when padded and aligned. */
if (!checked_request2size(bytes, &nb))// 根据 bytes 、元数据大小、对齐要求计算 nb { __set_errno(ENOMEM); returnNULL; }
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from mmap. */ if (__glibc_unlikely(av == NULL)) { void *p = sysmalloc(nb, av);// av 不存在,向 system 申请更多的 memory if (p != NULL) alloc_perturb(p, bytes); return p; }
if ((unsignedlong) (nb) <= (unsignedlong) (get_max_fast()))// get_max_fast() 默认值为 DEFAULT_MXFAST { // 因为 nb 是对齐的,当 nb <= get_max_fast() 时,必为 fastbin 的合法大小.不存在 nb 不超过 get_max_fast() 却无 fastbin_index 与之对应的情况. idx = fastbin_index(nb); mfastbinptr *fb = &fastbin(av, idx); mchunkptr pp; victim = *fb;
if (victim != NULL) {//对应的 fastbin 不为空 if (__glibc_unlikely(misaligned_chunk(victim))) malloc_printerr("malloc(): unaligned fastbin chunk detected 2");
if (SINGLE_THREAD_P) //单线程 *fb = REVEAL_PTR(victim->fd);// 删除第一个节点.REVEAL_PTR 是为了提高安全性,抵抗恶意攻击. else// 多线程 REMOVE_FB(fb, pp, victim); if (__glibc_likely(victim != NULL)) { size_t victim_idx = fastbin_index(chunksize(victim));// 本行的计算是为了下一行的 assert if (__builtin_expect(victim_idx != idx, 0)) malloc_printerr("malloc(): memory corruption (fast)"); check_remalloced_chunk(av, victim, nb);// malloc_debug 的 assert #if USE_TCACHE /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx(nb); if (tcache && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = *fb) != NULL) { if (__glibc_unlikely(misaligned_chunk(tc_victim))) malloc_printerr("malloc(): unaligned fastbin chunk detected 3"); if (SINGLE_THREAD_P) *fb = REVEAL_PTR(tc_victim->fd); else { REMOVE_FB(fb, pp, tc_victim); if (__glibc_unlikely(tc_victim == NULL)) break; } tcache_put(tc_victim, tc_idx); } } #endif void *p = chunk2mem(victim); alloc_perturb(p, bytes); return p; } }//对应的 fastbin 为空 }
/* If a small request, check regular bin. Since these "smallbins" hold one size each, no searching within bins is necessary. (For a large request, we need to wait until unsorted chunks are processed to find best fit. But for small ones, fits are exact anyway, so we can check now, which is faster.) */
if (in_smallbin_range(nb)) {// 准确的表述为:不满足 LargeBin 分配的最小值
//值得注意的是:在fastbin 环节分配失败会进入此处. idx = smallbin_index(nb); bin = bin_at(av, idx);
if (av != &main_arena) set_non_main_arena(victim); check_malloced_chunk(av, victim, nb); #if USE_TCACHE /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx(nb); if (tcache && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks over. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = last(bin)) != bin) { if (tc_victim != 0) { bck = tc_victim->bk; set_inuse_bit_at_offset(tc_victim, nb); if (av != &main_arena) set_non_main_arena(tc_victim); bin->bk = bck; bck->fd = bin;
/* If a small request, try to use last remainder if it is the only chunk in unsorted bin. This helps promote locality for runs of consecutive small requests. This is the only exception to best-fit, and applies only when there is no exact fit for a small chunk. */
/* If a large request, scan through the chunks of current bin in sorted order to find smallest that fits. Use the skip list for this. */
//是 大请求 if (!in_smallbin_range(nb)) { bin = bin_at(av, idx);
/* skip scan if empty or largest chunk is too small */ if ((victim = first(bin)) != bin && (unsignedlong) chunksize_nomask(victim) >= (unsignedlong) (nb)) { victim = victim->bk_nextsize;//victim 现在指向最小的 chunk while (((unsignedlong) (size = chunksize(victim)) < (unsignedlong) (nb))) victim = victim->bk_nextsize;
// 两个 chunk 一样大,用第二个 chunk ??? /* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ if (victim != last(bin) && chunksize_nomask(victim) == chunksize_nomask(victim->fd)) victim = victim->fd;
// UnsortBin 最多只遍历 MAX_ITERS 次,无法保证为空 /* We cannot assume the unsorted list is empty and therefore have to perform a complete insert here. */ bck = unsorted_chunks(av); fwd = bck->fd; if (__glibc_unlikely(fwd->bk != bck)) malloc_printerr("malloc(): corrupted unsorted chunks"); remainder->bk = bck;//在 UnsortBin 的头节点之后插入剩余 chunk remainder->fd = fwd; bck->fd = remainder; fwd->bk = remainder; if (!in_smallbin_range(remainder_size)) {//如果不是 SmallChunk 则将 fd_next bk_next 置为 NULL remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head(remainder, remainder_size | PREV_INUSE); set_foot(remainder, remainder_size); } check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); alloc_perturb(p, bytes); return p; } }
若 nb 不在 SmallBin 范围内,则从 LargeBin 中寻找最合适的chunk(大于等于 nb 的最小 chunk). 若最合适的 chunk 的大小大于 nb + MINSIZE 则进行分割,剩余部分放在 UnsortBin 中.之后返回切分出的 chunk 对应的指针.结束分配流程.
其他情况
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Search for a chunk by scanning bins, starting with next largest bin. This search is strictly by best-fit; i.e., the smallest (with ties going to approximately the least recently used) chunk that fits is selected. The bitmap avoids needing to check that most blocks are nonempty. The particular case of skipping all bins during warm-up phases when no chunks have been returned yet is faster than it might look. */
++idx; bin = bin_at(av, idx); block = idx2block(idx); map = av->binmap[block]; bit = idx2bit(idx);
if (in_smallbin_range(nb)) {// 准确的表述为:不满足 LargeBin 分配的最小值
//在fastbin 环节分配失败会进入此处. idx = smallbin_index(nb); bin = bin_at(av, idx);// bin 是头节点的指针
/* 有删节 */ }
/* If this is a large request, consolidate fastbins before continuing. While it might look excessive to kill all fastbins before even seeing if there is space available, this avoids fragmentation problems normally associated with fastbins. Also, in practice, programs tend to have runs of either small or large requests, but less often mixtures, so consolidation is not invoked all that often in most programs. And the programs that it is called frequently in otherwise tend to fragment. */
/* Conservatively use 32 bits per map word, even if on 64bit system */ #define BINMAPSHIFT 5 #define BITSPERMAP (1U << BINMAPSHIFT) #define BINMAPSIZE (NBINS / BITSPERMAP)
for (;;) { /* Skip rest of block if there are no more set bits in this block. */ if (bit > map || bit == 0) {//??? do { if (++block >= BINMAPSIZE) /* out of bins */ goto use_top; } while ((map = av->binmap[block]) == 0);
bin = bin_at(av, (block << BINMAPSHIFT)); bit = 1; }
/* Advance to bin with set bit. There must be one. */ while ((bit & map) == 0) { bin = next_bin(bin); bit <<= 1; assert(bit != 0); }
/* Inspect the bin. It is likely to be non-empty */ victim = last(bin);
/* If a false alarm (empty bin), clear the bit. */ if (victim == bin) { av->binmap[block] = map &= ~bit; /* Write through */ bin = next_bin(bin); bit <<= 1; }
else { size = chunksize(victim);
/* We know the first chunk in this bin is big enough to use. */ assert((unsignedlong) (size) >= (unsignedlong) (nb));
remainder_size = size - nb;
/* unlink */ unlink_chunk(av, victim);
/* Exhaust */ if (remainder_size < MINSIZE) { set_inuse_bit_at_offset(victim, size); if (av != &main_arena) set_non_main_arena(victim); }
use_top: /* If large enough, split off the chunk bordering the end of memory (held in av->top). Note that this is in accord with the best-fit search rule. In effect, av->top is treated as larger (and thus less well fitting) than any other available chunk since it can be extended to be as large as necessary (up to system limitations). We require that av->top always exists (i.e., has size >= MINSIZE) after initialization, so if it would otherwise beexhausted by current request, it is replenished. (The mainreason for ensuring it exists is that we may need MINSIZE spaceto put in fenceposts in sysmalloc.) */
victim = av->top; size = chunksize(victim);
if (__glibc_unlikely(size > av->system_mem)) malloc_printerr("malloc(): corrupted top size");
/* When we are using atomic ops to free fast chunks we can get here for all block sizes. */ elseif (atomic_load_relaxed(&av->have_fastchunks)) { malloc_consolidate(av); /* restore original bin index */ if (in_smallbin_range(nb)) idx = smallbin_index(nb); else idx = largebin_index(nb); }
#ifdef USE_MTAG /* Quickly check that the freed pointer matches the tag for the memory. This gives a useful double-free detection. */ *(volatilechar *) mem; #endif
int err = errno;
p = mem2chunk(mem);
/* Mark the chunk as belonging to the library again. */ (void) TAG_REGION(chunk2rawmem(p), CHUNK_AVAILABLE_SIZE(p) - CHUNK_HDR_SZ);
if (chunk_is_mmapped(p)) /* release mmapped memory. */ { /* See if the dynamic brk/mmap threshold needs adjusting. Dumped fake mmapped chunks do not affect the threshold. */ if (!mp_.no_dyn_threshold && chunksize_nomask(p) > mp_.mmap_threshold && chunksize_nomask(p) <= DEFAULT_MMAP_THRESHOLD_MAX && !DUMPED_MAIN_ARENA_CHUNK(p)) { mp_.mmap_threshold = chunksize(p); mp_.trim_threshold = 2 * mp_.mmap_threshold; LIBC_PROBE(memory_mallopt_free_dyn_thresholds, 2, mp_.mmap_threshold, mp_.trim_threshold); } munmap_chunk(p); } else { MAYBE_INIT_TCACHE();
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect(misaligned_chunk(p), 0)) malloc_printerr("free(): invalid pointer"); /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ if (__glibc_unlikely(size < MINSIZE || !aligned_OK(size))) malloc_printerr("free(): invalid size");
if ((unsignedlong) (size) <= (unsignedlong) (get_max_fast())
#if TRIM_FASTBINS /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ && (chunk_at_offset(p, size) != av->top) #endif ) {
if (__builtin_expect(chunksize_nomask(chunk_at_offset(p, size)) <= CHUNK_HDR_SZ, 0) || __builtin_expect(chunksize(chunk_at_offset(p, size)) >= av->system_mem, 0)) { bool fail = true; /* We might not have a lock at this point and concurrent modifications of system_mem might result in a false positive. Redo the test after getting the lock. */ if (!have_lock) { __libc_lock_lock(av->mutex); fail = (chunksize_nomask(chunk_at_offset(p, size)) <= CHUNK_HDR_SZ || chunksize(chunk_at_offset(p, size)) >= av->system_mem); __libc_lock_unlock(av->mutex); }
if (fail) malloc_printerr("free(): invalid next size (fast)"); }
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ mchunkptr old = *fb, old2;
if (SINGLE_THREAD_P) {//单线程 /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect(old == p, 0))//安全检查 malloc_printerr("double free or corruption (fasttop)"); /* 在 FastBin 链表的头指针处 */ p->fd = PROTECT_PTR(&p->fd, old);//为阻碍恶意攻击,把 &p->fd 处理后存到 old *fb = p; } else//多线程 do { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect(old == p, 0)) malloc_printerr("double free or corruption (fasttop)"); old2 = old; p->fd = PROTECT_PTR(&p->fd, old); } while ((old = catomic_compare_and_exchange_val_rel(fb, p, old2)) != old2);
/* Check that size of fastbin chunk at the top is the same as size of the chunk that we are adding. We can dereference OLD only if we have the lock, otherwise it might have already been allocated again. */ if (have_lock && old != NULL && __builtin_expect(fastbin_index(chunksize(old)) != idx, 0)) malloc_printerr("invalid fastbin entry (free)"); }
victim = _int_malloc(ar_ptr, bytes); /* Retry with another arena only if we were able to find a usable arena before. */ if (!victim && ar_ptr != NULL) { LIBC_PROBE(memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry(ar_ptr, bytes); victim = _int_malloc(ar_ptr, bytes); }
if (ar_ptr != NULL) __libc_lock_unlock(ar_ptr->mutex);
#if USE_TCACHE /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx(nb); if (tcache && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = *fb) != NULL) { if (__glibc_unlikely(misaligned_chunk(tc_victim))) malloc_printerr("malloc(): unaligned fastbin chunk detected 3"); if (SINGLE_THREAD_P) *fb = REVEAL_PTR(tc_victim->fd); else { REMOVE_FB(fb, pp, tc_victim); if (__glibc_unlikely(tc_victim == NULL)) break; } tcache_put(tc_victim, tc_idx); } } #endif
/* Caller must ensure that we know tc_idx is valid and there's room for more chunks. */ static __always_inline voidtcache_put(mchunkptr chunk, size_t tc_idx) { tcache_entry *e = (tcache_entry *) chunk2mem(chunk);
/* Mark this chunk as "in the tcache" so the test in _int_free will detect a double free. */ e->key = tcache;
#if USE_TCACHE /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx(nb); if (tcache && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks over. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = last(bin)) != bin) { if (tc_victim != 0) { bck = tc_victim->bk; set_inuse_bit_at_offset(tc_victim, nb); if (av != &main_arena) set_non_main_arena(tc_victim); bin->bk = bck; bck->fd = bin;
#if USE_TCACHE /* If we've processed as many chunks as we're allowed while filling the cache, return one of the cached ones. */ ++tcache_unsorted_count; if (return_cached && mp_.tcache_unsorted_limit > 0 && tcache_unsorted_count > mp_.tcache_unsorted_limit) { return tcache_get(tc_idx); } #endif
1 2 3 4 5 6
#if USE_TCACHE /* If all the small chunks we found ended up cached, return one now. */ if (return_cached) { return tcache_get(tc_idx); } #endif
这两段代码是如此的相似,都先检查了 return_cached (第一段还检查了其他的条件),满足则从 tcache 中取出 nb 对应的 chunk,并将其返回.
#if USE_TCACHE { size_t tc_idx = csize2tidx(size); if (tcache != NULL && tc_idx < mp_.tcache_bins) { /* Check to see if it's already in the tcache. */ tcache_entry *e = (tcache_entry *) chunk2mem(p);
/* This test succeeds on double free. However, we don't 100% trust it (it also matches random payload data at a 1 in 2^<size_t> chance), so verify it's not an unlikely coincidence before aborting. */ if (__glibc_unlikely(e->key == tcache)) { tcache_entry *tmp; size_t cnt = 0; LIBC_PROBE(memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = REVEAL_PTR(tmp->next), ++cnt) { if (cnt >= mp_.tcache_count) malloc_printerr("free(): too many chunks detected in tcache"); if (__glibc_unlikely(!aligned_OK(tmp))) malloc_printerr("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ } }
graph TD BEGIN-->__libc_malloc[\__libc_mallloc\] __libc_malloc-->A A[分配 x bytes 内存] --> B[计算所需 chunk 大小 tbytes]; B --> C[计算 tcache 下标 tc_idx]; C --> D[tcache 对应链表非空]; D -- True --> tcache_get[\tcache_get\] tcache_get --> E[返回指针]; E --> END; D -- False --> F{单线程?}; F -- True --> G1[\_int_malloc\] F -- False --> H[arena_get 为 arena 加锁] H --> G2[\_int_malloc\] G2 --> L{分配成功?} L -- True -->I[解锁 arena] L -- False -->M[更换 arean] M --> G3[\_int_malloc\] G3 -->I I --> E G1 -->E;
整体流程
-->
_int_malloc
Init 计算需要分配的 chunk 大小
sysmalloc:若无合适的 arena 则调用 sysmalloc 通过 mmap 分配
Fast Bin:相同 chunk 大小、LIFO 顺序,取出头节点
tcache:若在 FastBin 中分配成功,将同 Fast Bin 中的 chunk 头插入 tcache
Small Bin:取出尾节点
tcache:若在 SmallBin 中分配成功,将同 Small Bin 中的 chunk 头插入 tcache
Unsorted Bin:
特殊规则:对于 Small Bin 范围内的请求,当 Unsorted Bin 仅剩唯一 chunk 且该 chunk 来源于 last remainder,进行分配(能切则切)
查找大小相同的 chunk,头插入 tcache,直到 tcache 满
大小不同的 chunk 按照大小插入 Large Bin(按照大小,相同大小则将当前 chunk 插入在第 2 个) 或者 Small Bin(头插)
tcache 满了后则从 Unsorted Bin 中拿出相同大小 chunk 返回
在 Unsorted Bin 中若缓存过 chunk 则返回
Large Bin:选择满足大小的 chunk 中最小的,由链表头开始遍历,若有相同大小的 chunk 则用第 2 个,能拆则拆
根据 bitmap 在更大的 Bin 中搜索,
从 Top chunk 分割
_int_free
tcache:未满则放入 tcache
Fast Bin 头插
不是 mmap 分配则尝试合并
mmap 分配则 ummap -
ptmalloc2 changelog
2.23
2.24
2.25
New
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Same, except also perform an argument and result check. First, we check that the padding done by request2size didn't result in an integer overflow. Then we check (using REQUEST_OUT_OF_RANGE) that the resulting size isn't so large that a later alignment would lead to another integer overflow. */ #define checked_request2size(req, sz) \ ({ \ (sz) = request2size (req); \ if (((sz) < (req)) \ || REQUEST_OUT_OF_RANGE (sz)) \ { \ __set_errno (ENOMEM); \ return 0; \ } \ })
/* We overlay this structure on the user-data portion of a chunk when the chunk is stored in the per-thread cache. */ typedefstructtcache_entry { structtcache_entry *next; /* This field exists to detect double frees. */ structtcache_perthread_struct *key; } tcache_entry; /* 有删节 */ /* Caller must ensure that we know tc_idx is valid and there's room for more chunks. */ static __always_inline void tcache_put(mchunkptr chunk, size_t tc_idx) { tcache_entry *e = (tcache_entry *) chunk2mem (chunk); assert (tc_idx < TCACHE_MAX_BINS);
/* Mark this chunk as "in the tcache" so the test in _int_free will detect a double free. */ e->key = tcache;
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect (misaligned_chunk (p), 0)) malloc_printerr ("free(): invalid pointer"); /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) malloc_printerr ("free(): invalid size");
check_inuse_chunk(av, p);
#if USE_TCACHE { size_t tc_idx = csize2tidx (size); if (tcache != NULL && tc_idx < mp_.tcache_bins) { /* Check to see if it's already in the tcache. */ tcache_entry *e = (tcache_entry *) chunk2mem (p);
/* This test succeeds on double free. However, we don't 100% trust it (it also matches random payload data at a 1 in 2^<size_t> chance), so verify it's not an unlikely coincidence before aborting. */ if (__glibc_unlikely (e->key == tcache)) { tcache_entry *tmp; LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = tmp->next) if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ }
/* We overlay this structure on the user-data portion of a chunk when the chunk is stored in the per-thread cache. */ typedefstructtcache_entry { structtcache_entry *next; } tcache_entry; /* 有删节 */ /* Caller must ensure that we know tc_idx is valid and there's room for more chunks. */ static __always_inline void tcache_put(mchunkptr chunk, size_t tc_idx) { tcache_entry *e = (tcache_entry *) chunk2mem (chunk); assert (tc_idx < TCACHE_MAX_BINS); e->next = tcache->entries[tc_idx]; tcache->entries[tc_idx] = e; ++(tcache->counts[tc_idx]); } /* 有删节 */ staticvoid _int_free (mstate av, mchunkptr p, int have_lock) { /* 有删节 */ size = chunksize (p);
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect (misaligned_chunk (p), 0)) malloc_printerr ("free(): invalid pointer"); /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) malloc_printerr ("free(): invalid size");
/* If a small request, try to use last remainder if it is the only chunk in unsorted bin. This helps promote locality for runs of consecutive small requests. This is the only exception to best-fit, and applies only when there is no exact fit for a small chunk. */
/* remove from unsorted list */ if (__glibc_unlikely (bck->fd != victim))//此处有改动 malloc_printerr ("malloc(): corrupted unsorted chunks 3"); unsorted_chunks (av)->bk = bck; bck->fd = unsorted_chunks (av);
/* Take now instead of binning if exact fit */
if (size == nb) { set_inuse_bit_at_offset (victim, size); if (av != &main_arena) set_non_main_arena (victim); #if USE_TCACHE /* Fill cache first, return to user only if cache fills. We may return one of these chunks later. */ if (tcache_nb && tcache->counts[tc_idx] < mp_.tcache_count) { tcache_put (victim, tc_idx); return_cached = 1; continue; } else { #endif check_malloced_chunk (av, victim, nb); void *p = chunk2mem (victim); alloc_perturb (p, bytes); return p; #if USE_TCACHE } #endif } /* place chunk in bin */
/* maintain large bins in sorted order */ if (fwd != bck) { /* Or with inuse bit to speed comparisons */ size |= PREV_INUSE; /* if smaller than smallest, bypass loop below */ assert (chunk_main_arena (bck->bk)); if ((unsignedlong) (size) < (unsignedlong) chunksize_nomask (bck->bk)) { fwd = bck; bck = bck->bk;
#if USE_TCACHE /* If we've processed as many chunks as we're allowed while filling the cache, return one of the cached ones. */ ++tcache_unsorted_count; if (return_cached && mp_.tcache_unsorted_limit > 0 && tcache_unsorted_count > mp_.tcache_unsorted_limit) { return tcache_get (tc_idx); } #endif
#define MAX_ITERS 10000 if (++iters >= MAX_ITERS) break; }
if (__glibc_unlikely (e->key == tcache)) { tcache_entry *tmp; size_t cnt = 0; LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = REVEAL_PTR (tmp->next), ++cnt) { if (cnt >= mp_.tcache_count) malloc_printerr ("free(): too many chunks detected in tcache"); if (__glibc_unlikely (!aligned_OK (tmp))) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
if (__glibc_unlikely (e->key == tcache)) { tcache_entry *tmp; size_t cnt = 0; LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = REVEAL_PTR (tmp->next), ++cnt) { if (cnt >= mp_.tcache_count) malloc_printerr ("free(): too many chunks detected in tcache"); if (__glibc_unlikely (!aligned_OK (tmp))) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ } }
New:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
if (__glibc_unlikely (e->key == tcache_key)) { tcache_entry *tmp; size_t cnt = 0; LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = REVEAL_PTR (tmp->next), ++cnt) { if (cnt >= mp_.tcache_count) malloc_printerr ("free(): too many chunks detected in tcache"); if (__glibc_unlikely (!aligned_OK (tmp))) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
if (__glibc_unlikely (e->key == tcache_key)) { tcache_entry *tmp; size_t cnt = 0; LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = REVEAL_PTR (tmp->next), ++cnt) { if (cnt >= mp_.tcache_count) malloc_printerr ("free(): too many chunks detected in tcache"); if (__glibc_unlikely (!aligned_OK (tmp))) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ } }