Linux探测工具BCC(网络)
承接上文,本节以ICMP和TCP为例介绍与网络相关的部分内容。
Icmp的探测
首先看下促使我学习bcc的这篇文章中的程序traceicmpsoftirq.py,使用该程序的本意是找出对ping响应的进程位于哪个CPU core上,然后使用perf
扫描该core,找出造成网络延迟的原因。源码如下:
#!/usr/bin/python
bpf_text = """
#include <linux/ptrace.h>
#include <linux/sched.h> /* For TASK_COMM_LEN */
#include <linux/icmp.h>
#include <linux/netdevice.h>
struct probe_icmp_data_t
{
u64 timestamp_ns;
u32 tgid;
u32 pid;
char comm[TASK_COMM_LEN];
int v0;
};
BPF_PERF_OUTPUT(probe_icmp_events);
static inline unsigned char *my_skb_transport_header(const struct sk_buff *skb)
{
return skb->head + skb->transport_header;
}
static inline struct icmphdr *my_icmp_hdr(const struct sk_buff *skb)
{
return (struct icmphdr *)my_skb_transport_header(skb);
}
int probe_icmp(struct pt_regs *ctx, struct sk_buff *skb)
{
u64 __pid_tgid = bpf_get_current_pid_tgid();
u32 __tgid = __pid_tgid >> 32;
u32 __pid = __pid_tgid; // implicit cast to u32 for bottom half
struct probe_icmp_data_t __data = {0};
__data.timestamp_ns = bpf_ktime_get_ns();
__data.tgid = __tgid;
__data.pid = __pid;
bpf_get_current_comm(&__data.comm, sizeof(__data.comm));
__be16 seq;
bpf_probe_read_kernel(&seq, sizeof(seq), &my_icmp_hdr(skb)->un.echo.sequence);
__data.v0 = (int)seq;
probe_icmp_events.perf_submit(ctx, &__data, sizeof(__data));
return 0;
}
"""
from bcc import BPF
import ctypes as ct
class Data_icmp(ct.Structure):
_fields_ = [
("timestamp_ns", ct.c_ulonglong),
("tgid", ct.c_uint),
("pid", ct.c_uint),
("comm", ct.c_char * 16), # TASK_COMM_LEN
(‘v0‘, ct.c_uint),
]
b = BPF(text=bpf_text)
def print_icmp_event(cpu, data, size):
#event = b["probe_icmp_events"].event(data)
event = ct.cast(data, ct.POINTER(Data_icmp)).contents
print("%-7d %-7d %-15s %s" %
(event.tgid, event.pid,
event.comm.decode(‘utf-8‘, ‘replace‘),
event.v0))
b.attach_kprobe(event="icmp_echo", fn_name="probe_icmp")
b["probe_icmp_events"].open_perf_buffer(print_icmp_event)
while 1:
try:
b.kprobe_poll()
except KeyboardInterrupt:
exit()
上面程序对icmp_echo
内核函数进行打点探测,当内核运行该函数时会执行自定义的函数probe_icmp
,并获取当前的tgid,pid以及icmp报文的序列号。
内容如下:
-
my_skb_transport_header
:该函数通过偏移sk_buff指针获取传输层首部地址,用于后续获取icmp首部的序列号。此处的操作可以直接参考static bool icmp_echo(struct sk_buff *skb)
的内核源码,其获取icmp首部的方式依次为:static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { return (struct icmphdr *)skb_transport_header(skb); } static inline unsigned char *skb_transport_header(const struct sk_buff *skb) { return skb->head + skb->transport_header; }
可以看到
skb_transport_header
的处理与本程序的方式是一样的,将该函数的实现直接移植过去即可。需要注意的是,不能直接调用内核函数skb_transport_header
获取transport_header
的地址。 -
bpf_get_current_pid_tgid()
:获取当前的PID。需要注意的是该函数获取的是当前CPU上运行的进程ID,而不是某一个特定的进程ID。其内核源码如下:BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) { struct cgroup *cgrp = task_dfl_cgroup(current); struct cgroup *ancestor; ancestor = cgroup_ancestor(cgrp, ancestor_level); if (!ancestor) return 0; return cgroup_id(ancestor); }
而current定义如下,用于获得当前执行进程的task_struct指针。更多参见这篇文章。
#define current get_current()
因此以本程序为例,如果对icmp_echo的打点采集中如果发生了上下文切换,可能
bpf_get_current_pid_tgid
获取到的可能是切换后的程序。本文也是借助这种机制,发现在切换到cadvisor
导致了网络延时。 -
bpf_probe_read_kernel
:读取内核结构体的成员,原文中使用的是bpf_probe_read
,更多参见issue。
其余部分与检测可观测性相同。
TCP的探测
下面看一下TCP的探测,用于跟踪内核代码tcp_v4_connect
或tcp_v6_connect
,代码源自官方库tools/tcpconnect
#!/usr/bin/python
from __future__ import print_function
from bcc import BPF
from bcc.containers import filter_by_containers
from bcc.utils import printb
import argparse
from socket import inet_ntop, ntohs, AF_INET, AF_INET6
from struct import pack
from time import sleep
# arguments
examples = """examples:
./tcpconnect # trace all TCP connect()s
./tcpconnect -t # include timestamps
./tcpconnect -p 181 # only trace PID 181
./tcpconnect -P 80 # only trace port 80
./tcpconnect -P 80,81 # only trace port 80 and 81
./tcpconnect -U # include UID
./tcpconnect -u 1000 # only trace UID 1000
./tcpconnect -c # count connects per src ip and dest ip/port
./tcpconnect --cgroupmap mappath # only trace cgroups in this BPF map
./tcpconnect --mntnsmap mappath # only trace mount namespaces in the map
"""
parser = argparse.ArgumentParser(
description="Trace TCP connects",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-P", "--port",
help="comma-separated list of destination ports to trace.")
parser.add_argument("-U", "--print-uid", action="store_true",
help="include UID on output")
parser.add_argument("-u", "--uid",
help="trace this UID only")
parser.add_argument("-c", "--count", action="store_true",
help="count connects per src ip and dest ip/port")
parser.add_argument("--cgroupmap",
help="trace cgroups in this BPF map only")
parser.add_argument("--mntnsmap",
help="trace mount namespaces in this BPF map only")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args() #解析入参
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
BPF_HASH(currsock, u32, struct sock *); #创建保存socket指针的哈希
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 uid;
u32 saddr;
u32 daddr;
u64 ip;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events); //创建ipv4的输出
struct ipv6_data_t {
u64 ts_us;
u32 pid;
u32 uid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events); //创建ipv6的输出
// separate flow keys per address family
struct ipv4_flow_key_t { //用于根据地址统计执行tcp_v4_connect的次数,即指定了"-c"或"--count"选项
u32 saddr;
u32 daddr;
u16 dport;
};
BPF_HASH(ipv4_count, struct ipv4_flow_key_t); //统计执行tcp_v4_connect的次数
struct ipv6_flow_key_t { //用于根据地址统计执行tcp_v6_connect的次数,即指定了"-c"或"--count"选项
unsigned __int128 saddr;
unsigned __int128 daddr;
u16 dport;
};
BPF_HASH(ipv6_count, struct ipv6_flow_key_t); //统计执行tcp_v6_connect的次数
int trace_connect_entry(struct pt_regs *ctx, struct sock *sk) //在进入tcp_v4_connect时调用
{
if (container_should_be_filtered()) {
return 0;
}
u64 pid_tgid = bpf_get_current_pid_tgid(); //获取64位的pid_tgid
u32 pid = pid_tgid >> 32; //tgid位于高32位,右移32位获取
u32 tid = pid_tgid; //tid线程唯一
FILTER_PID //bpf程序对python来说就是一段字符串,此处可以看作是一个标记符,后续使用python的string.replace进行替换。此处表示过滤特定的PID
u32 uid = bpf_get_current_uid_gid();
FILTER_UID //过滤特定的UID
// stash the sock ptr for lookup on return
currsock.update(&tid, &sk); //使用tid作为key,保存sk指针指向的地址
return 0;
};
static int trace_connect_return(struct pt_regs *ctx, short ipver) //在从tcp_v4_connect返回时调用
{
int ret = PT_REGS_RC(ctx); //获取tcp_v4_connect函数的返回值
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = pid_tgid;
struct sock **skpp;
skpp = currsock.lookup(&tid); //判断当前线程在进入tcp_v4_connect时是否打点采集,即是否执行了上面的trace_connect_entry
if (skpp == 0) {
return 0; // missed entry
}
if (ret != 0) { //如果tcp_v4_connect的返回值非0,表示无法发送SYNC报文
// failed to send SYNC packet, may not have populated
// socket __sk_common.{skc_rcv_saddr, ...}
currsock.delete(&tid); //本次采集失败,删除哈希
return 0;
}
// pull in details
struct sock *skp = *skpp;
u16 dport = skp->__sk_common.skc_dport;
FILTER_PORT //过滤特定的端口
if (ipver == 4) {
IPV4_CODE //根据入参替换为IPV4的处理
} else /* 6 */ {
IPV6_CODE //根据入参替换为位IPV6的处理
}
currsock.delete(&tid);
return 0;
}
int trace_connect_v4_return(struct pt_regs *ctx)
{
return trace_connect_return(ctx, 4);
}
int trace_connect_v6_return(struct pt_regs *ctx)
{
return trace_connect_return(ctx, 6);
}
"""
struct_init = { ‘ipv4‘:
{ ‘count‘ : #统计执行tcp_v4_connect的次数
"""
struct ipv4_flow_key_t flow_key = {};
flow_key.saddr = skp->__sk_common.skc_rcv_saddr;
flow_key.daddr = skp->__sk_common.skc_daddr;
flow_key.dport = ntohs(dport);
ipv4_count.increment(flow_key);""",
‘trace‘ : #默认执行tcp_v4_connect的跟踪,记录地址,端口等信息
"""
struct ipv4_data_t data4 = {.pid = pid, .ip = ipver};
data4.uid = bpf_get_current_uid_gid();
data4.ts_us = bpf_ktime_get_ns() / 1000;
data4.saddr = skp->__sk_common.skc_rcv_saddr;
data4.daddr = skp->__sk_common.skc_daddr;
data4.dport = ntohs(dport);
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));"""
},
‘ipv6‘:
{ ‘count‘ :#统计执行tcp_v6_connect的次数
"""
struct ipv6_flow_key_t flow_key = {};
bpf_probe_read_kernel(&flow_key.saddr, sizeof(flow_key.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&flow_key.daddr, sizeof(flow_key.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
flow_key.dport = ntohs(dport);
ipv6_count.increment(flow_key);""",
‘trace‘ : #默认执行tcp_v6_connect的跟踪,记录地址,端口等信息
"""
struct ipv6_data_t data6 = {.pid = pid, .ip = ipver};
data6.uid = bpf_get_current_uid_gid();
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&data6.daddr, sizeof(data6.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.dport = ntohs(dport);
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));"""
}
}
# code substitutions
if args.count: #如果入参指定了"-c"或"-count",则执行count
bpf_text = bpf_text.replace("IPV4_CODE", struct_init[‘ipv4‘][‘count‘])
bpf_text = bpf_text.replace("IPV6_CODE", struct_init[‘ipv6‘][‘count‘])
else: #如果入参没有指定"-c"或"-count",则执行trace
bpf_text = bpf_text.replace("IPV4_CODE", struct_init[‘ipv4‘][‘trace‘])
bpf_text = bpf_text.replace("IPV6_CODE", struct_init[‘ipv6‘][‘trace‘])
if args.pid: #如果入参指定了"-p"或"--pid",则对PID进行过滤
bpf_text = bpf_text.replace(‘FILTER_PID‘,
‘if (pid != %s) { return 0; }‘ % args.pid)
if args.port:#如果入参指定了"-P"或"--port",则对端口进行过滤
dports = [int(dport) for dport in args.port.split(‘,‘)]
dports_if = ‘ && ‘.join([‘dport != %d‘ % ntohs(dport) for dport in dports])
bpf_text = bpf_text.replace(‘FILTER_PORT‘,
‘if (%s) { currsock.delete(&pid); return 0; }‘ % dports_if)
if args.uid:#如果入参指定了"-u"或"--uid",则对UID进行过滤
bpf_text = bpf_text.replace(‘FILTER_UID‘,
‘if (uid != %s) { return 0; }‘ % args.uid)
bpf_text = filter_by_containers(args) + bpf_text
#下面的处理在没有指定特定的过滤时去除标记符
bpf_text = bpf_text.replace(‘FILTER_PID‘, ‘‘)
bpf_text = bpf_text.replace(‘FILTER_PORT‘, ‘‘)
bpf_text = bpf_text.replace(‘FILTER_UID‘, ‘‘)
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# process event
def print_ipv4_event(cpu, data, size): #TCP4跟踪的打印函数
event = b["ipv4_events"].event(data)
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="")
if args.print_uid:
printb(b"%-6d" % event.uid, nl="")
printb(b"%-6d %-12.12s %-2d %-16s %-16s %-4d" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET, pack("I", event.saddr)).encode(), #转换为主机序地址
inet_ntop(AF_INET, pack("I", event.daddr)).encode(), event.dport)) #转换为主机序地址和端口
def print_ipv6_event(cpu, data, size): #TCP6跟踪的打印函数
event = b["ipv6_events"].event(data)
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="")
if args.print_uid:
printb(b"%-6d" % event.uid, nl="")
printb(b"%-6d %-12.12s %-2d %-16s %-16s %-4d" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET6, event.saddr).encode(), inet_ntop(AF_INET6, event.daddr).encode(),
event.dport))
def depict_cnt(counts_tab, l3prot=‘ipv4‘): #
for k, v in sorted(counts_tab.items(), key=lambda counts: counts[1].value, reverse=True):
depict_key = ""
if l3prot == ‘ipv4‘:
depict_key = "%-25s %-25s %-20s" % ((inet_ntop(AF_INET, pack(‘I‘, k.saddr))),
inet_ntop(AF_INET, pack(‘I‘, k.daddr)), k.dport)
else:
depict_key = "%-25s %-25s %-20s" % ((inet_ntop(AF_INET6, k.saddr)),
inet_ntop(AF_INET6, k.daddr), k.dport)
print ("%s %-10d" % (depict_key, v.value))
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="tcp_v4_connect", fn_name="trace_connect_entry")
b.attach_kprobe(event="tcp_v6_connect", fn_name="trace_connect_entry")
b.attach_kretprobe(event="tcp_v4_connect", fn_name="trace_connect_v4_return")
b.attach_kretprobe(event="tcp_v6_connect", fn_name="trace_connect_v6_return")
print("Tracing connect ... Hit Ctrl-C to end")
if args.count:
try:
while 1:
sleep(99999999)
except KeyboardInterrupt:
pass
# header
print("\n%-25s %-25s %-20s %-10s" % (
"LADDR", "RADDR", "RPORT", "CONNECTS"))
depict_cnt(b["ipv4_count"])
depict_cnt(b["ipv6_count"], l3prot=‘ipv6‘)
# read events
else:
# header
if args.timestamp:
print("%-9s" % ("TIME(s)"), end="")
if args.print_uid:
print("%-6s" % ("UID"), end="")
print("%-6s %-12s %-2s %-16s %-16s %-4s" % ("PID", "COMM", "IP", "SADDR",
"DADDR", "DPORT"))
start_ts = 0
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
上面C程序采集了内核数据skp->sk_common.skc_dport,skp->sk_common.skc_rcv_saddr和skp->__sk_common.skc_daddr。与第一个例子类似,这类数据可以直接参考tcp_v4_connect内核源码的实现,源码中通过struct inet_sock *inet = inet_sk(sk);
来获取源目的地址和端口,inet_sock的结构体定义如下,可以明显看到inet_daddr,inet_rcv_saddr和inet_dport与上述代码获取的内容相同,进而可以了解到获取这些成员的方式。
struct inet_sock {
/* sk and pinet6 has to be the first two members of inet_sock */
struct sock sk;
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6_pinfo *pinet6;
#endif
/* Socket demultiplex comparisons on incoming packets. */
#define inet_daddr sk.__sk_common.skc_daddr
#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
#define inet_dport sk.__sk_common.skc_dport
#define inet_num sk.__sk_common.skc_num
...
此外在inet_sock
结构体的注释中给出详细的说明,非常明了:
* @inet_daddr - Foreign IPv4 addr
* @inet_rcv_saddr - Bound local IPv4 addr
* @inet_dport - Destination port
* @inet_num - Local port
因此可以直接参考tcp_v4_connect
的源码修改ipv4中获取地址和端口的实现,效果是一样的:
struct_init = { ‘ipv4‘:
{ ‘count‘ :
"""
struct ipv4_flow_key_t flow_key = {};
struct inet_sock *inet = inet_sk(skp);
flow_key.saddr = inet->inet_rcv_saddr;
flow_key.daddr = inet->inet_daddr;
u16 dport = inet->inet_dport;
flow_key.dport = ntohs(dport);
ipv4_count.increment(flow_key);""",
‘trace‘ :
"""
struct ipv4_data_t data4 = {.pid = pid, .ip = ipver};
data4.uid = bpf_get_current_uid_gid();
data4.ts_us = bpf_ktime_get_ns() / 1000;
struct inet_sock *inet = inet_sk(skp);
data4.saddr = inet->inet_rcv_saddr;
data4.daddr = inet->inet_daddr;
u16 dport = inet->inet_dport;
data4.dport = ntohs(dport);
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));"""
},
‘ipv6‘:
{ ‘count‘ :
"""
struct ipv6_flow_key_t flow_key = {};
bpf_probe_read_kernel(&flow_key.saddr, sizeof(flow_key.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&flow_key.daddr, sizeof(flow_key.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
flow_key.dport = ntohs(dport);
ipv6_count.increment(flow_key);""",
‘trace‘ :
"""
struct ipv6_data_t data6 = {.pid = pid, .ip = ipver};
data6.uid = bpf_get_current_uid_gid();
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&data6.daddr, sizeof(data6.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.dport = ntohs(dport);
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));"""
}
}
整体看,上面代码使用了python处理了一些C程序的替换和拼接,大部分跟可观测性并没有什么不同,当然,最主要的还是需要了解内核处理流程,选择正确的内核函数进行打点。