根據Linux的協定棧,在發送tcp資料時,最終會調用如下函數:
檔案 net/ipv4/tcp.c
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size)
在此函數中,要根據mss來将大資料拆分成小封包發送到網絡上。
mss是通過如下函數擷取的:
mss_now = tcp_send_mss(sk, &size_goal, flags);
static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
{
int mss_now;
mss_now = tcp_current_mss(sk);
*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
return mss_now;
}
tcp_current_mss傳回TCP連結建立時協商的MSS,考慮協商的TCP選項。
tcp_xmit_size_goal考慮網卡是否支援gso,如果網卡支援gso,傳回網卡的gso max size最為mss。
static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
int large_allowed)
{
…
if (large_allowed && sk_can_gso(sk)) {
xmit_size_goal = ((sk->sk_gso_max_size - 1) -
inet_csk(sk)->icsk_af_ops->net_header_len -
inet_csk(sk)->icsk_ext_hdr_len -
tp->tcp_header_len);
…
return max(xmit_size_goal, mss_now);
}
socket建立時對gso的設定:
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
__sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features;
if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
sk->sk_route_caps &= ~sk->sk_route_nocaps;
if (sk_can_gso(sk)) {
if (dst->header_len) {
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
sk->sk_gso_max_size = dst->dev->gso_max_size;
}
}
}
//完整的看一下gso對tcp發包過程的影響
int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size)
{
mss_now = tcp_send_mss(sk, &size_goal, flags); //考慮gso後的mss
sg = sk->sk_route_caps & NETIF_F_SG;
...
if (merge) {
skb_shinfo(skb)->frags[i - 1].size += copy;
} else {
skb_fill_page_desc(skb, i, page, off, copy); //安排skb資料的存儲方式
if (TCP_PAGE(sk)) {
get_page(page);
} else if (off + copy < PAGE_SIZE) {
get_page(page);
TCP_PAGE(sk) = page;
}
}
......
}
//填充tcp資料分片結構:
skb_fill_page_desc()
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
frag->page = page;
frag->page_offset = off;
frag->size = size;
skb_shinfo(skb)->nr_frags = i + 1;
}