hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/core/tso.c
....@@ -6,18 +6,17 @@
66 #include <asm/unaligned.h>
77
88 /* Calculate expected number of TX descriptors */
9
-int tso_count_descs(struct sk_buff *skb)
9
+int tso_count_descs(const struct sk_buff *skb)
1010 {
1111 /* The Marvell Way */
1212 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
1313 }
1414 EXPORT_SYMBOL(tso_count_descs);
1515
16
-void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
16
+void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
1717 int size, bool is_last)
1818 {
19
- struct tcphdr *tcph;
20
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
19
+ int hdr_len = skb_transport_offset(skb) + tso->tlen;
2120 int mac_hdr_len = skb_network_offset(skb);
2221
2322 memcpy(hdr, skb->data, hdr_len);
....@@ -30,23 +29,31 @@
3029 } else {
3130 struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len);
3231
33
- iph->payload_len = htons(size + tcp_hdrlen(skb));
32
+ iph->payload_len = htons(size + tso->tlen);
3433 }
35
- tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
36
- put_unaligned_be32(tso->tcp_seq, &tcph->seq);
34
+ hdr += skb_transport_offset(skb);
35
+ if (tso->tlen != sizeof(struct udphdr)) {
36
+ struct tcphdr *tcph = (struct tcphdr *)hdr;
3737
38
- if (!is_last) {
39
- /* Clear all special flags for not last packet */
40
- tcph->psh = 0;
41
- tcph->fin = 0;
42
- tcph->rst = 0;
38
+ put_unaligned_be32(tso->tcp_seq, &tcph->seq);
39
+
40
+ if (!is_last) {
41
+ /* Clear all special flags for not last packet */
42
+ tcph->psh = 0;
43
+ tcph->fin = 0;
44
+ tcph->rst = 0;
45
+ }
46
+ } else {
47
+ struct udphdr *uh = (struct udphdr *)hdr;
48
+
49
+ uh->len = htons(sizeof(*uh) + size);
4350 }
4451 }
4552 EXPORT_SYMBOL(tso_build_hdr);
4653
47
-void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
54
+void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size)
4855 {
49
- tso->tcp_seq += size;
56
+ tso->tcp_seq += size; /* not worth avoiding this operation for UDP */
5057 tso->size -= size;
5158 tso->data += size;
5259
....@@ -55,19 +62,21 @@
5562 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
5663
5764 /* Move to next segment */
58
- tso->size = frag->size;
59
- tso->data = page_address(frag->page.p) + frag->page_offset;
65
+ tso->size = skb_frag_size(frag);
66
+ tso->data = skb_frag_address(frag);
6067 tso->next_frag_idx++;
6168 }
6269 }
6370 EXPORT_SYMBOL(tso_build_data);
6471
65
-void tso_start(struct sk_buff *skb, struct tso_t *tso)
72
+int tso_start(struct sk_buff *skb, struct tso_t *tso)
6673 {
67
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
74
+ int tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
75
+ int hdr_len = skb_transport_offset(skb) + tlen;
6876
77
+ tso->tlen = tlen;
6978 tso->ip_id = ntohs(ip_hdr(skb)->id);
70
- tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
79
+ tso->tcp_seq = (tlen != sizeof(struct udphdr)) ? ntohl(tcp_hdr(skb)->seq) : 0;
7180 tso->next_frag_idx = 0;
7281 tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6);
7382
....@@ -79,9 +88,10 @@
7988 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
8089
8190 /* Move to next segment */
82
- tso->size = frag->size;
83
- tso->data = page_address(frag->page.p) + frag->page_offset;
91
+ tso->size = skb_frag_size(frag);
92
+ tso->data = skb_frag_address(frag);
8493 tso->next_frag_idx++;
8594 }
95
+ return hdr_len;
8696 }
8797 EXPORT_SYMBOL(tso_start);