+ /* Check to see if it's already in the tcache. */ + tcache_entry *e = (tcache_entry *) chunk2mem (p); + + /* This test succeeds on double free. However, we don't 100% + trust it (it also matches random payload data at a 1 in + 2^<size_t> chance), so verify it's not an unlikely coincidence + before aborting. */ + if (__glibc_unlikely (e->key == tcache && tcache)) //如果要free的chunk有tcache_key,就遍历 + { + tcache_entry *tmp; + LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); + for (tmp = tcache->entries[tc_idx]; + tmp; + tmp = tmp->next) + if (tmp == e) //找到地址一样的chunk + malloc_printerr ("free(): double free detected in tcache 2"); + /* If we get here, it was a coincidence. We've wasted a few + cycles, but don't abort. */ + } + if (tcache && tc_idx < mp_.tcache_bins && tcache->counts[tc_idx] < mp_.tcache_count)
@@ -327,6 +327,18 @@ __malloc_assert (constchar *assertion, constchar *file, unsignedint line, # define MAX_TCACHE_COUNT UINT16_MAX #endif +/* Safe-Linking: + Use randomness from ASLR (mmap_base) to protect single-linked lists + of Fast-Bins and TCache. That is, mask the "next" pointers of the + lists' chunks, and also perform allocation alignment checks on them. + This mechanism reduces the risk of pointer hijacking, as was done with + Safe-Unlinking in the double-linked lists of Small-Bins. + It assumes a minimum page size of 4096 bytes (12 bits). Systems with + larger pages provide less entropy, although the pointer mangling + still works. */ +#define PROTECT_PTR(pos, ptr) \ + ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr))) //把要存到自己里面的 下一个chunk的地址 用自己的地址加密 +#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr) //用自己的地址 把自己存的内容 解密
@@ -3605,8 +3631,10 @@ _int_malloc (mstate av, size_t bytes) /* While bin not empty and tcache not full, copy chunks. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = *fb) != NULL) { + if (__glibc_unlikely (!aligned_OK (tc_victim))) + malloc_printerr ("malloc(): unaligned fastbin chunk detected 3"); if (SINGLE_THREAD_P) - *fb = tc_victim->fd; + *fb = REVEAL_PTR (tc_victim->fd); else { REMOVE_FB (fb, pp, tc_victim);
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
@@ -4196,11 +4224,15 @@ _int_free (mstate av, mchunkptr p, int have_lock) LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; - tmp = tmp->next) + tmp = REVEAL_PTR (tmp->next)) + { + if (__glibc_unlikely (!aligned_OK (tmp))) + malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ + } } if (tcache->counts[tc_idx] < mp_.tcache_count)