除比较常见的内核 panic 与 soft lockup 外,普通的内核死锁可能并不会对操作系统产生致命的影响,例如马上要分析到的这个 case —— 某个运维同学发现在 ECS 上执行 top 并按下 c 后会 hang 住,且无法响应任何命令。
经过观察,在 top 中按下 c 是打开/关闭进程启动时的完整命令,由于只是 top 进程 hang,新建一个 shell 可以观察到 top 进程处于 UN 状态,查看 stack 实际上是由于 rwsem_down_read_failed 被调度走了。rwsem_down_read_failed 是尝试读取 rw_semaphore 信号量失败时会调用的函数,因此关键在于这个信号量具体是什么?又是谁拿走了这个信号量?话不多说,直接上 core。
信号量地址推导
core 里抓到了好几个 UN 状态的 top,随便找一个看,是在从 proc 文件系统中读取 /proc/4424/cmdline
crash> bt
PID: 28968 TASK: ffff88041a820fb0 CPU: 3 COMMAND: "top"
#0 [ffff880387b8bd28] __schedule at ffffffff8168c1a5
#1 [ffff880387b8bd90] schedule at ffffffff8168c7f9
#2 [ffff880387b8bda0] rwsem_down_read_failed at ffffffff8168e1a5
#3 [ffff880387b8be08] call_rwsem_down_read_failed at ffffffff81327618
#4 [ffff880387b8be58] down_read at ffffffff8168b980
#5 [ffff880387b8be70] proc_pid_cmdline_read at ffffffff8126f712
#6 [ffff880387b8bf00] vfs_read at ffffffff811fe86e
#7 [ffff880387b8bf38] sys_read at ffffffff811ff43f
#8 [ffff880387b8bf80] system_call_fastpath at ffffffff81697809
RIP: 00007f83249077e0 RSP: 00007fff1f5c99e8 RFLAGS: 00000246
RAX: 0000000000000000 RBX: ffffffff81697809 RCX: ffffffffffffffff
RDX: 0000000000020000 RSI: 0000000000c07700 RDI: 0000000000000009
RBP: 0000000000020000 R8: 00007f8324866988 R9: 0000000000000012
R10: 0000000000000007 R11: 0000000000000246 R12: 0000000000000000
R13: 0000000000c07700 R14: 0000000000000000 R15: 0000000000c07700
ORIG_RAX: 0000000000000000 CS: 0033 SS: 002b
crash> files
PID: 28968 TASK: ffff88041a820fb0 CPU: 3 COMMAND: "top"
ROOT: / CWD: /root
FD FILE DENTRY INODE TYPE PATH
0 ffff8804c0f47900 ffff88017f80ad80 ffff8807e05a7028 CHR /dev/tty1
1 ffff8804c0f47900 ffff88017f80ad80 ffff8807e05a7028 CHR /dev/tty1
2 ffff8804bfadbc00 ffff88017f80a240 ffff8807e05a4850 CHR /dev/null
3 ffff8804c0f47900 ffff88017f80ad80 ffff8807e05a7028 CHR /dev/tty1
4 ffff8804bfadb400 ffff880449bc18c0 ffff8802e1bad750 REG /proc/stat
5 ffff8804bfadb000 ffff8807dc5bf980 ffff88048fbfdf00 REG /proc/uptime
6 ffff8803d3217200 ffff8807dc5befc0 ffff88048fbfd750 REG /proc/meminfo
7 ffff8800686c5200 ffff8802e290c240 ffff8802e290ae60 REG /proc/loadavg
8 ffff8800686c5000 ffff88017f808240 ffff88017f80c040 DIR /proc/
9 ffff8804bf16c400 ffff8806afc70900 ffff8805366f1f00 REG /proc/4424/cmdline
可以看到是 proc_pid_cmdline_read 在 down_read 的时候失败了,相关代码在 238 行:
203 static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
204 size_t _count, loff_t *pos)
205 {
206 struct task_struct *tsk;
207 struct mm_struct *mm;
208 char *page;
209 unsigned long count = _count;
210 unsigned long arg_start, arg_end, env_start, env_end;
211 unsigned long len1, len2, len;
212 unsigned long p;
213 char c;
214 ssize_t rv;
215
216 BUG_ON(*pos < 0);
217
218 tsk = get_proc_task(file_inode(file));
219 if (!tsk)
220 return -ESRCH;
221 mm = get_task_mm(tsk);
222 put_task_struct(tsk);
223 if (!mm)
224 return 0;
225 /* Check if process spawned far enough to have cmdline. */
226 if (!mm->env_end) {
227 rv = 0;
228 goto out_mmput;
229 }
230
231 page = (char *)__get_free_page(GFP_TEMPORARY);
232 if (!page) {
233 rv = -ENOMEM;
234 goto out_mmput;
235 }
236
237 down_read(&mm->mmap_sem);
238 arg_start = mm->arg_start;
239 arg_end = mm->arg_end;
240 env_start = mm->env_start;
241 env_end = mm->env_end;
242 up_read(&mm->mmap_sem);
......
有多种方法可以找到这里的 &mm->mmap_sem。这里通过汇编和栈中的数据来尝试推导。在调用点附近可以看到,proc_pid_cmdline_read 在调用 down_read 之前,把 mmap_sem 拷贝到了 [rbp-0x60] 中:
0xffffffff8126f6eb <proc_pid_cmdline_read+139>: mov edi,0x800d0
0xffffffff8126f6f0 <proc_pid_cmdline_read+144>: call 0xffffffff81185f70 <__get_free_pages>
0xffffffff8126f6f5 <proc_pid_cmdline_read+149>: test rax,rax
0xffffffff8126f6f8 <proc_pid_cmdline_read+152>: mov QWORD PTR [rbp-0x40],rax
0xffffffff8126f6fc <proc_pid_cmdline_read+156>: je 0xffffffff8126f9f0 <proc_pid_cmdline_read+912>
0xffffffff8126f702 <proc_pid_cmdline_read+162>: lea rax,[rbx+0x78]
0xffffffff8126f706 <proc_pid_cmdline_read+166>: mov rdi,rax
0xffffffff8126f709 <proc_pid_cmdline_read+169>: mov QWORD PTR [rbp-0x60],rax
0xffffffff8126f70d <proc_pid_cmdline_read+173>: call 0xffffffff8168b960 <down_read>
0xffffffff8126f712 <proc_pid_cmdline_read+178>: mov rax,QWORD PTR [rbx+0x128]
由于在后续的调用中,proc_pid_cmdline_read 函数的栈帧不会改变,所以将 proc_pid_cmdline_read 函数的栈底减去 0x60 就能得到 mmap_sem 的地址,即 ffff8801f7b151b8
#5 [ffff880387b8be70] proc_pid_cmdline_read at ffffffff8126f712
ffff880387b8be78: ffff8804bf16c400 0000000000020000
ffff880387b8be88: ffff8805366f1f00 ffff8804bf16c410
ffff880387b8be98: ffff8801f7b151b8 ffff880387b8bed0
ffff880387b8bea8: ffffffff812a9504 0000000000020000
ffff880387b8beb8: ffff8804897de000 0000000000000000
ffff880387b8bec8: 00000000f38e5979 ffff8804bf16c400
ffff880387b8bed8: 0000000000c07700 ffff880387b8bf48
ffff880387b8bee8: 0000000000020000 0000000000000009
ffff880387b8bef8: ffff880387b8bf30 ffffffff811fe86e
信号量的等待队列
上一节中找到了 top 等待的信号量 mmap_sem 的地址是 ffff8801f7b151b8,这是一个 rw_semaphore 类型的变量,在内核中这个变量通常用在读多写少的场景。
crash> rw_semaphore ffff8801f7b151b8
struct rw_semaphore {
count = -4294967295,
wait_lock = {
raw_lock = {
{
head_tail = 195300260,
tickets = {
head = 2980,
tail = 2980
}
}
}
},
wait_list = {
next = 0xffff8807d9b03dd0,
prev = 0xffff8804d1f6bdb0
}
}
在 rw_semaphore 的实现中可以发现,有另一个变量 rwsem_waiter 中的 task 成员会记录等待 rw_semaphore 信号量的进程,而 rw_semaphore.wait_list 就是 rwsem_waiter.list,因此通过 rwsem_waiter 来解析 rw_semaphore.wait_list 可以得到进程等待队列。
crash> list rwsem_waiter.list -s rwsem_waiter.task,type -h 0xffff8807d9b03dd0
ffff8807d9b03dd0
task = 0xffff880426cbaf10
type = RWSEM_WAITING_FOR_WRITE
ffff8802d3c17db0
task = 0xffff8802b3bd4e70
type = RWSEM_WAITING_FOR_READ
ffff8807de05fdb0
task = 0xffff8802a1e03ec0
type = RWSEM_WAITING_FOR_READ
ffff88018dbe3db0
task = 0xffff88018da3de20
type = RWSEM_WAITING_FOR_READ
ffff88011032bdb0
task = 0xffff8807c1fd3ec0
type = RWSEM_WAITING_FOR_READ
ffff8804fd3fbdb0
task = 0xffff8800ba3f4e70
type = RWSEM_WAITING_FOR_READ
ffff8807ffd87db0
task = 0xffff880012dd8fb0
type = RWSEM_WAITING_FOR_READ
ffff8801bc5ebdb0
task = 0xffff88046094de20
type = RWSEM_WAITING_FOR_READ
ffff8805c11b7db0
task = 0xffff8807bcf8edd0
type = RWSEM_WAITING_FOR_READ
......
谁拿走了信号量?
上一节中获取到的等待 mmap_sem 的队列非常长,足足有一千多个,即有一千多个进程在等待 mmap_sem 而处于 UN 状态。要怎么样才能知道到底是谁拿走了这个信号量呢?换个方向来思考,不难想到不管是等待 mmap_sem 的进程还是已经拿走了 mmap_sem 的进程,它一定像 top 一样是通过 down_read/write 函数来获取的,也一样要经历寄存器传递、将变量压栈的过程,因此它们的内核函数栈帧中应该保留有 mmap_sem 的地址。将所有栈帧中保留有 mmap_sem 地址的进程与等待队列中的进程一对比,就能知道谁是那个占着鸡窝不下蛋的进程了。先从数量上对比,发现堆栈中有 mmap_sem 地址的进程恰好比等待队列中的进程多一个。
crash> search -t ffff8801f7b151b8 | grep TASK | wc -l
1470
crash> list rwsem_waiter.list -s rwsem_waiter.task -h 0xffff8807d9b03dd0 | grep task | wc -l
1469
......
顺藤摸瓜不难找到,多出来的进程是 PID 为 4442 的进程
crash> bt 4442
PID: 4442 TASK: ffff880426cbbec0 CPU: 2 COMMAND: "filebeat"
#0 [ffff8807a6643690] __schedule at ffffffff8168c1a5
#1 [ffff8807a66436f8] schedule at ffffffff8168c7f9
#2 [ffff8807a6643708] schedule_timeout at ffffffff8168a239
#3 [ffff8807a66437b0] io_schedule_timeout at ffffffff8168bd9e
#4 [ffff8807a66437e0] io_schedule at ffffffff8168be38
#5 [ffff8807a66437f0] bt_get at ffffffff812fb915
#6 [ffff8807a6643860] blk_mq_get_tag at ffffffff812fbe7f
#7 [ffff8807a6643888] __blk_mq_alloc_request at ffffffff812f725b
#8 [ffff8807a66438b8] blk_mq_map_request at ffffffff812f96d1
#9 [ffff8807a6643928] blk_sq_make_request at ffffffff812fa430
#10 [ffff8807a66439b0] generic_make_request at ffffffff812eee69
#11 [ffff8807a66439f8] submit_bio at ffffffff812eefb1
#12 [ffff8807a6643a50] do_mpage_readpage at ffffffff8123ffed
#13 [ffff8807a6643b28] mpage_readpages at ffffffff8124058b
#14 [ffff8807a6643bf8] ext4_readpages at ffffffffa01df23c [ext4]
#15 [ffff8807a6643c08] __do_page_cache_readahead at ffffffff8118dd2c
#16 [ffff8807a6643cc8] ra_submit at ffffffff8118e3c1
#17 [ffff8807a6643cd8] filemap_fault at ffffffff811836f5
#18 [ffff8807a6643d38] ext4_filemap_fault at ffffffffa01e8016 [ext4]
#19 [ffff8807a6643d60] __do_fault at ffffffff811ac83c
#20 [ffff8807a6643db0] do_read_fault at ffffffff811accd3
#21 [ffff8807a6643e00] handle_mm_fault at ffffffff811b1461
#22 [ffff8807a6643e98] __do_page_fault at ffffffff81692cc4
#23 [ffff8807a6643ef8] trace_do_page_fault at ffffffff816930a6
#24 [ffff8807a6643f38] do_async_page_fault at ffffffff8169274b
#25 [ffff8807a6643f50] async_page_fault at ffffffff8168f238
RIP: 0000000000adf1f9 RSP: 00007fcefbe06860 RFLAGS: 00010297
RAX: 0000000000000004 RBX: 0000000000000000 RCX: 0000000000ad1100
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
RBP: 00007fcefbe06b28 R8: 000000c420066080 R9: 000000007fffffff
R10: 0000000001a14630 R11: 0000000001e89ee0 R12: 000000c42239cd70
R13: 0000000001a14630 R14: 0000000000aea430 R15: 0000000000000000
ORIG_RAX: ffffffffffffffff CS: 0033 SS: 002b
这个进程看起来 hang 在了 io 上,通过回溯函数调用可以发现,在 __do_page_fault 函数中曾经获取过 mmap_sem 信号量:
1122 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1123 if ((error_code & PF_USER) == 0 &&
1124 !search_exception_tables(regs->ip)) {
1125 bad_area_nosemaphore(regs, error_code, address);
1126 return;
1127 }
1128 retry:
1129 down_read(&mm->mmap_sem);
1130 } else {
1131 /*
1132 * The above down_read_trylock() might have succeeded in
1133 * which case we'll have missed the might_sleep() from
1134 * down_read():
1135 */
1136 might_sleep();
1137 }
至于为什么 4442 进程一直都没有释放 mmap_sem,经过一番查找后发现应该是踩到了 bt_get 的内核 bug 而一直 hang 在这个函数中:https://lore.kernel.org/lkml/5485BBD2.4040103@acm.org/#Z30::20block:blk-mq-tag.c 。再回过头来看 top 是在读取 /proc/4424/cmdline 时 hang 的,4442 与 4424 同属一个线程组,共享 mm_struct,自然 mmap_sem 也是相同的。在 4424 的进程 down_read(&mm->mmap_sem); 之后,等待队列中的第一个进程 0xffff880426cbaf10 尝试 down_write,而 down_read 和 down_write 是互斥的,导致后续所有请求读 mmap_sem(mm_struct)的进程都进入了等待队列中,也就出现了 top 按 c 后 hang 住的现象。