hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/drivers/net/xen-netback/netback.c
....@@ -334,6 +334,7 @@
334334 struct xenvif_tx_cb {
335335 u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
336336 u8 copy_count;
337
+ u32 split_mask;
337338 };
338339
339340 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
....@@ -361,6 +362,8 @@
361362 struct sk_buff *skb =
362363 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
363364 GFP_ATOMIC | __GFP_NOWARN);
365
+
366
+ BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
364367 if (unlikely(skb == NULL))
365368 return NULL;
366369
....@@ -393,14 +396,16 @@
393396 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
394397 struct xen_netif_tx_request *txp = first;
395398
396
- nr_slots = shinfo->nr_frags + 1;
399
+ nr_slots = shinfo->nr_frags + frag_overflow + 1;
397400
398401 copy_count(skb) = 0;
402
+ XENVIF_TX_CB(skb)->split_mask = 0;
399403
400404 /* Create copy ops for exactly data_len bytes into the skb head. */
401405 __skb_put(skb, data_len);
402406 while (data_len > 0) {
403407 int amount = data_len > txp->size ? txp->size : data_len;
408
+ bool split = false;
404409
405410 cop->source.u.ref = txp->gref;
406411 cop->source.domid = queue->vif->domid;
....@@ -413,6 +418,13 @@
413418 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
414419 - data_len);
415420
421
+ /* Don't cross local page boundary! */
422
+ if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
423
+ amount = XEN_PAGE_SIZE - cop->dest.offset;
424
+ XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
425
+ split = true;
426
+ }
427
+
416428 cop->len = amount;
417429 cop->flags = GNTCOPY_source_gref;
418430
....@@ -420,7 +432,8 @@
420432 pending_idx = queue->pending_ring[index];
421433 callback_param(queue, pending_idx).ctx = NULL;
422434 copy_pending_idx(skb, copy_count(skb)) = pending_idx;
423
- copy_count(skb)++;
435
+ if (!split)
436
+ copy_count(skb)++;
424437
425438 cop++;
426439 data_len -= amount;
....@@ -441,15 +454,16 @@
441454 nr_slots--;
442455 } else {
443456 /* The copy op partially covered the tx_request.
444
- * The remainder will be mapped.
457
+ * The remainder will be mapped or copied in the next
458
+ * iteration.
445459 */
446460 txp->offset += amount;
447461 txp->size -= amount;
448462 }
449463 }
450464
451
- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
452
- shinfo->nr_frags++, gop++) {
465
+ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
466
+ shinfo->nr_frags++, gop++, nr_slots--) {
453467 index = pending_index(queue->pending_cons++);
454468 pending_idx = queue->pending_ring[index];
455469 xenvif_tx_create_map_op(queue, pending_idx, txp,
....@@ -462,12 +476,12 @@
462476 txp++;
463477 }
464478
465
- if (frag_overflow) {
479
+ if (nr_slots > 0) {
466480
467481 shinfo = skb_shinfo(nskb);
468482 frags = shinfo->frags;
469483
470
- for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
484
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
471485 shinfo->nr_frags++, txp++, gop++) {
472486 index = pending_index(queue->pending_cons++);
473487 pending_idx = queue->pending_ring[index];
....@@ -478,6 +492,11 @@
478492 }
479493
480494 skb_shinfo(skb)->frag_list = nskb;
495
+ } else if (nskb) {
496
+ /* A frag_list skb was allocated but it is no longer needed
497
+ * because enough slots were converted to copy ops above.
498
+ */
499
+ kfree_skb(nskb);
481500 }
482501
483502 (*copy_ops) = cop - queue->tx_copy_ops;
....@@ -539,6 +558,13 @@
539558 pending_idx = copy_pending_idx(skb, i);
540559
541560 newerr = (*gopp_copy)->status;
561
+
562
+ /* Split copies need to be handled together. */
563
+ if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
564
+ (*gopp_copy)++;
565
+ if (!newerr)
566
+ newerr = (*gopp_copy)->status;
567
+ }
542568 if (likely(!newerr)) {
543569 /* The first frag might still have this slot mapped */
544570 if (i < copy_count(skb) - 1 || !sharedslot)
....@@ -975,10 +1001,8 @@
9751001
9761002 /* No crossing a page as the payload mustn't fragment. */
9771003 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
978
- netdev_err(queue->vif->dev,
979
- "txreq.offset: %u, size: %u, end: %lu\n",
980
- txreq.offset, txreq.size,
981
- (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
1004
+ netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
1005
+ txreq.offset, txreq.size);
9821006 xenvif_fatal_tx_err(queue->vif);
9831007 break;
9841008 }