这一篇主要围绕网络协议接口层的发送函数的解析
#### int dev_queue_xmit(struct sk_buff *skb) 函数解析
- 声明:
/* include/linux/netdevice.h */
int dev_queue_xmit(struct sk_buff *skb);
- 定义:
/* net/core/dev.c */
int dev_queue_xmit(struct sk_buff *skb)
{
return __dev_queue_xmit(skb, NULL);
}
EXPORT_SYMBOL(dev_queue_xmit);
/* __dev_queue_xmit() */
/**
* __dev_queue_xmit - transmit a buffer 传输一个缓冲区
* @skb: buffer to transmit 需要传输的缓冲区
* @accel_priv: private data used for L2 forwarding offload 需要顺带一起送过去的个人数据
* 将缓冲区制作成队列传输到一个网络设备,这个函数的使用必须先设置设备和优先级,
* 而且要先创建一个缓冲区在调用它之前,这个函数可以在一个中断中被调用。
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
* this function. The function can be called from an interrupt.
* 一个否定的错误码是返回一个错误数字,成功并不保证帧即将传输,
* 他可以因为缓冲向下传输造成堵塞或者流量控制。
* A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*
* -----------------------------------------------------------------------------------
* I notice this method can also return errors from the queue disciplines,
* including NET_XMIT_DROP, which is a positive value. So, errors can also
* be positive.
*
* Regardless of the return value, the skb is consumed, so it is currently
* difficult to retry a send to this method. (You can bump the ref count
* before sending to hold a reference for retry if you are careful.)
*
* When calling this method, interrupts MUST be enabled. This is because
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
{
struct net_device *dev = skb->dev; // 网络设备结构体
struct netdev_queue *txq; // 网络设备队列结构体
struct Qdisc *q; // 排队准则结构体
int rc = -ENOMEM;
skb_reset_mac_header(skb); // 这里是计算了套接字头到数据的这段大小
// 这里告诉cpu 这种概率不是很大,但是有可能是真的,我比较倾向是假的
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
rcu_read_lock_bh();
skb_update_prio(skb);
/* If device/qdisc don't need skb->dst, release it right now while
* its hot in this cpu cache.
*/
/* 检查netdevice的flag 是否要去掉skb DTS相关的信息,一般情况下这个flag是默认被设置的。
* 在alloc_netdev_mqs 的时候,已经被默认设置了。
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
else
skb_dst_force(skb);
#ifdef CONFIG_NET_SWITCHDEV
/* Don't forward if offload device already forwarded */
if (skb->offload_fwd_mark &&
skb->offload_fwd_mark == dev->offload_fwd_mark) {
consume_skb(skb);
rc = NET_XMIT_SUCCESS;
goto out;
}
#endif
/* 这里是要取出此netdevice的txq和txq的Qdisc, Qdisc主要是用于进行堵塞处理
* 一般情况下,直接将数据包给driver了,如果遇到busy的情况,就需要进行阻塞处理了,就会用到Qdisc
*/
txq = netdev_pick_tx(dev, skb, accel_priv);
q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
#endif
trace_net_dev_queue(skb);
/* 检查Qdisc 中是否存在enqueue规则,如果有就会调用__dev_xmit_skb,进入带有阻塞的控制的Flow,
* 注意这个地方,虽然是走阻塞控制的,Flow但是并不一定进行enqueue操作,只有busy的情况下,
* 才会走Qdisc的enqueue 操作进行
*/
if (q->enqueue) {
rc = __dev_xmit_skb(skb, q, dev, txq);
goto out;
}
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
Really, it is unlikely that netif_tx_lock protection is necessary
here. (f.e. loopback and IP tunnels are clean ignoring statistics
counters.)
However, it is possible, that they rely on protection
made by us here.
Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8)
*/
/* 如果Qdisc 的enqueue 不存在,就会到这里,
* 对于一些loopback/tunnel interface 比较常见,判断设备是不是UP状态
*/
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
if (!skb)
goto drop;
HARD_TX_LOCK(dev, txq, cpu);
/* 如果txq 不是stop 状态,那么就会调用dev_hard_start_xmit 函数发送数据 */
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
__this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
}
}
HARD_TX_UNLOCK(dev, txq);
net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
dev->name);
} else {
/* Recursion is detected! It is possible,
* unfortunately
*/
recursion_alert:
net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
dev->name);
}
}
rc = -ENETDOWN;
drop:
rcu_read_unlock_bh();
atomic_long_inc(&dev->tx_dropped);
kfree_skb_list(skb);
return rc;
out:
rcu_read_unlock_bh();
return rc;
}