.. | .. |
---|
334 | 334 | struct xenvif_tx_cb { |
---|
335 | 335 | u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; |
---|
336 | 336 | u8 copy_count; |
---|
| 337 | + u32 split_mask; |
---|
337 | 338 | }; |
---|
338 | 339 | |
---|
339 | 340 | #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) |
---|
.. | .. |
---|
361 | 362 | struct sk_buff *skb = |
---|
362 | 363 | alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, |
---|
363 | 364 | GFP_ATOMIC | __GFP_NOWARN); |
---|
| 365 | + |
---|
| 366 | + BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb)); |
---|
364 | 367 | if (unlikely(skb == NULL)) |
---|
365 | 368 | return NULL; |
---|
366 | 369 | |
---|
.. | .. |
---|
393 | 396 | struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; |
---|
394 | 397 | struct xen_netif_tx_request *txp = first; |
---|
395 | 398 | |
---|
396 | | - nr_slots = shinfo->nr_frags + 1; |
---|
| 399 | + nr_slots = shinfo->nr_frags + frag_overflow + 1; |
---|
397 | 400 | |
---|
398 | 401 | copy_count(skb) = 0; |
---|
| 402 | + XENVIF_TX_CB(skb)->split_mask = 0; |
---|
399 | 403 | |
---|
400 | 404 | /* Create copy ops for exactly data_len bytes into the skb head. */ |
---|
401 | 405 | __skb_put(skb, data_len); |
---|
402 | 406 | while (data_len > 0) { |
---|
403 | 407 | int amount = data_len > txp->size ? txp->size : data_len; |
---|
| 408 | + bool split = false; |
---|
404 | 409 | |
---|
405 | 410 | cop->source.u.ref = txp->gref; |
---|
406 | 411 | cop->source.domid = queue->vif->domid; |
---|
.. | .. |
---|
413 | 418 | cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) |
---|
414 | 419 | - data_len); |
---|
415 | 420 | |
---|
| 421 | + /* Don't cross local page boundary! */ |
---|
| 422 | + if (cop->dest.offset + amount > XEN_PAGE_SIZE) { |
---|
| 423 | + amount = XEN_PAGE_SIZE - cop->dest.offset; |
---|
| 424 | + XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); |
---|
| 425 | + split = true; |
---|
| 426 | + } |
---|
| 427 | + |
---|
416 | 428 | cop->len = amount; |
---|
417 | 429 | cop->flags = GNTCOPY_source_gref; |
---|
418 | 430 | |
---|
.. | .. |
---|
420 | 432 | pending_idx = queue->pending_ring[index]; |
---|
421 | 433 | callback_param(queue, pending_idx).ctx = NULL; |
---|
422 | 434 | copy_pending_idx(skb, copy_count(skb)) = pending_idx; |
---|
423 | | - copy_count(skb)++; |
---|
| 435 | + if (!split) |
---|
| 436 | + copy_count(skb)++; |
---|
424 | 437 | |
---|
425 | 438 | cop++; |
---|
426 | 439 | data_len -= amount; |
---|
.. | .. |
---|
441 | 454 | nr_slots--; |
---|
442 | 455 | } else { |
---|
443 | 456 | /* The copy op partially covered the tx_request. |
---|
444 | | - * The remainder will be mapped. |
---|
| 457 | + * The remainder will be mapped or copied in the next |
---|
| 458 | + * iteration. |
---|
445 | 459 | */ |
---|
446 | 460 | txp->offset += amount; |
---|
447 | 461 | txp->size -= amount; |
---|
448 | 462 | } |
---|
449 | 463 | } |
---|
450 | 464 | |
---|
451 | | - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; |
---|
452 | | - shinfo->nr_frags++, gop++) { |
---|
| 465 | + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; |
---|
| 466 | + shinfo->nr_frags++, gop++, nr_slots--) { |
---|
453 | 467 | index = pending_index(queue->pending_cons++); |
---|
454 | 468 | pending_idx = queue->pending_ring[index]; |
---|
455 | 469 | xenvif_tx_create_map_op(queue, pending_idx, txp, |
---|
.. | .. |
---|
462 | 476 | txp++; |
---|
463 | 477 | } |
---|
464 | 478 | |
---|
465 | | - if (frag_overflow) { |
---|
| 479 | + if (nr_slots > 0) { |
---|
466 | 480 | |
---|
467 | 481 | shinfo = skb_shinfo(nskb); |
---|
468 | 482 | frags = shinfo->frags; |
---|
469 | 483 | |
---|
470 | | - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; |
---|
| 484 | + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; |
---|
471 | 485 | shinfo->nr_frags++, txp++, gop++) { |
---|
472 | 486 | index = pending_index(queue->pending_cons++); |
---|
473 | 487 | pending_idx = queue->pending_ring[index]; |
---|
.. | .. |
---|
478 | 492 | } |
---|
479 | 493 | |
---|
480 | 494 | skb_shinfo(skb)->frag_list = nskb; |
---|
| 495 | + } else if (nskb) { |
---|
| 496 | + /* A frag_list skb was allocated but it is no longer needed |
---|
| 497 | + * because enough slots were converted to copy ops above. |
---|
| 498 | + */ |
---|
| 499 | + kfree_skb(nskb); |
---|
481 | 500 | } |
---|
482 | 501 | |
---|
483 | 502 | (*copy_ops) = cop - queue->tx_copy_ops; |
---|
.. | .. |
---|
539 | 558 | pending_idx = copy_pending_idx(skb, i); |
---|
540 | 559 | |
---|
541 | 560 | newerr = (*gopp_copy)->status; |
---|
| 561 | + |
---|
| 562 | + /* Split copies need to be handled together. */ |
---|
| 563 | + if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { |
---|
| 564 | + (*gopp_copy)++; |
---|
| 565 | + if (!newerr) |
---|
| 566 | + newerr = (*gopp_copy)->status; |
---|
| 567 | + } |
---|
542 | 568 | if (likely(!newerr)) { |
---|
543 | 569 | /* The first frag might still have this slot mapped */ |
---|
544 | 570 | if (i < copy_count(skb) - 1 || !sharedslot) |
---|
.. | .. |
---|
975 | 1001 | |
---|
976 | 1002 | /* No crossing a page as the payload mustn't fragment. */ |
---|
977 | 1003 | if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { |
---|
978 | | - netdev_err(queue->vif->dev, |
---|
979 | | - "txreq.offset: %u, size: %u, end: %lu\n", |
---|
980 | | - txreq.offset, txreq.size, |
---|
981 | | - (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size); |
---|
| 1004 | + netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", |
---|
| 1005 | + txreq.offset, txreq.size); |
---|
982 | 1006 | xenvif_fatal_tx_err(queue->vif); |
---|
983 | 1007 | break; |
---|
984 | 1008 | } |
---|