| .. | .. |
|---|
| 96 | 96 | module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644); |
|---|
| 97 | 97 | MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache"); |
|---|
| 98 | 98 | |
|---|
| 99 | +/* The module parameter tells that we have to put data |
|---|
| 100 | + * for xen-netfront with the XDP_PACKET_HEADROOM offset |
|---|
| 101 | + * needed for XDP processing |
|---|
| 102 | + */ |
|---|
| 103 | +bool provides_xdp_headroom = true; |
|---|
| 104 | +module_param(provides_xdp_headroom, bool, 0644); |
|---|
| 105 | + |
|---|
| 99 | 106 | static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, |
|---|
| 100 | 107 | u8 status); |
|---|
| 101 | 108 | |
|---|
| .. | .. |
|---|
| 104 | 111 | unsigned int extra_count, |
|---|
| 105 | 112 | s8 st); |
|---|
| 106 | 113 | static void push_tx_responses(struct xenvif_queue *queue); |
|---|
| 114 | + |
|---|
| 115 | +static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); |
|---|
| 107 | 116 | |
|---|
| 108 | 117 | static inline int tx_work_todo(struct xenvif_queue *queue); |
|---|
| 109 | 118 | |
|---|
| .. | .. |
|---|
| 136 | 145 | |
|---|
| 137 | 146 | static u16 frag_get_pending_idx(skb_frag_t *frag) |
|---|
| 138 | 147 | { |
|---|
| 139 | | - return (u16)frag->page_offset; |
|---|
| 148 | + return (u16)skb_frag_off(frag); |
|---|
| 140 | 149 | } |
|---|
| 141 | 150 | |
|---|
| 142 | 151 | static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) |
|---|
| 143 | 152 | { |
|---|
| 144 | | - frag->page_offset = pending_idx; |
|---|
| 153 | + skb_frag_off_set(frag, pending_idx); |
|---|
| 145 | 154 | } |
|---|
| 146 | 155 | |
|---|
| 147 | 156 | static inline pending_ring_idx_t pending_index(unsigned i) |
|---|
| .. | .. |
|---|
| 323 | 332 | |
|---|
| 324 | 333 | |
|---|
| 325 | 334 | struct xenvif_tx_cb { |
|---|
| 326 | | - u16 pending_idx; |
|---|
| 335 | + u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; |
|---|
| 336 | + u8 copy_count; |
|---|
| 337 | + u32 split_mask; |
|---|
| 327 | 338 | }; |
|---|
| 328 | 339 | |
|---|
| 329 | 340 | #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) |
|---|
| 341 | +#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i]) |
|---|
| 342 | +#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count) |
|---|
| 330 | 343 | |
|---|
| 331 | 344 | static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, |
|---|
| 332 | 345 | u16 pending_idx, |
|---|
| .. | .. |
|---|
| 349 | 362 | struct sk_buff *skb = |
|---|
| 350 | 363 | alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, |
|---|
| 351 | 364 | GFP_ATOMIC | __GFP_NOWARN); |
|---|
| 365 | + |
|---|
| 366 | + BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb)); |
|---|
| 352 | 367 | if (unlikely(skb == NULL)) |
|---|
| 353 | 368 | return NULL; |
|---|
| 354 | 369 | |
|---|
| .. | .. |
|---|
| 361 | 376 | return skb; |
|---|
| 362 | 377 | } |
|---|
| 363 | 378 | |
|---|
| 364 | | -static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, |
|---|
| 365 | | - struct sk_buff *skb, |
|---|
| 366 | | - struct xen_netif_tx_request *txp, |
|---|
| 367 | | - struct gnttab_map_grant_ref *gop, |
|---|
| 368 | | - unsigned int frag_overflow, |
|---|
| 369 | | - struct sk_buff *nskb) |
|---|
| 379 | +static void xenvif_get_requests(struct xenvif_queue *queue, |
|---|
| 380 | + struct sk_buff *skb, |
|---|
| 381 | + struct xen_netif_tx_request *first, |
|---|
| 382 | + struct xen_netif_tx_request *txfrags, |
|---|
| 383 | + unsigned *copy_ops, |
|---|
| 384 | + unsigned *map_ops, |
|---|
| 385 | + unsigned int frag_overflow, |
|---|
| 386 | + struct sk_buff *nskb, |
|---|
| 387 | + unsigned int extra_count, |
|---|
| 388 | + unsigned int data_len) |
|---|
| 370 | 389 | { |
|---|
| 371 | 390 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
|---|
| 372 | 391 | skb_frag_t *frags = shinfo->frags; |
|---|
| 373 | | - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; |
|---|
| 374 | | - int start; |
|---|
| 392 | + u16 pending_idx; |
|---|
| 375 | 393 | pending_ring_idx_t index; |
|---|
| 376 | 394 | unsigned int nr_slots; |
|---|
| 395 | + struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; |
|---|
| 396 | + struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; |
|---|
| 397 | + struct xen_netif_tx_request *txp = first; |
|---|
| 377 | 398 | |
|---|
| 378 | | - nr_slots = shinfo->nr_frags; |
|---|
| 399 | + nr_slots = shinfo->nr_frags + frag_overflow + 1; |
|---|
| 379 | 400 | |
|---|
| 380 | | - /* Skip first skb fragment if it is on same page as header fragment. */ |
|---|
| 381 | | - start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); |
|---|
| 401 | + copy_count(skb) = 0; |
|---|
| 402 | + XENVIF_TX_CB(skb)->split_mask = 0; |
|---|
| 382 | 403 | |
|---|
| 383 | | - for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; |
|---|
| 384 | | - shinfo->nr_frags++, txp++, gop++) { |
|---|
| 385 | | - index = pending_index(queue->pending_cons++); |
|---|
| 404 | + /* Create copy ops for exactly data_len bytes into the skb head. */ |
|---|
| 405 | + __skb_put(skb, data_len); |
|---|
| 406 | + while (data_len > 0) { |
|---|
| 407 | + int amount = data_len > txp->size ? txp->size : data_len; |
|---|
| 408 | + bool split = false; |
|---|
| 409 | + |
|---|
| 410 | + cop->source.u.ref = txp->gref; |
|---|
| 411 | + cop->source.domid = queue->vif->domid; |
|---|
| 412 | + cop->source.offset = txp->offset; |
|---|
| 413 | + |
|---|
| 414 | + cop->dest.domid = DOMID_SELF; |
|---|
| 415 | + cop->dest.offset = (offset_in_page(skb->data + |
|---|
| 416 | + skb_headlen(skb) - |
|---|
| 417 | + data_len)) & ~XEN_PAGE_MASK; |
|---|
| 418 | + cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) |
|---|
| 419 | + - data_len); |
|---|
| 420 | + |
|---|
| 421 | + /* Don't cross local page boundary! */ |
|---|
| 422 | + if (cop->dest.offset + amount > XEN_PAGE_SIZE) { |
|---|
| 423 | + amount = XEN_PAGE_SIZE - cop->dest.offset; |
|---|
| 424 | + XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); |
|---|
| 425 | + split = true; |
|---|
| 426 | + } |
|---|
| 427 | + |
|---|
| 428 | + cop->len = amount; |
|---|
| 429 | + cop->flags = GNTCOPY_source_gref; |
|---|
| 430 | + |
|---|
| 431 | + index = pending_index(queue->pending_cons); |
|---|
| 386 | 432 | pending_idx = queue->pending_ring[index]; |
|---|
| 387 | | - xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); |
|---|
| 388 | | - frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); |
|---|
| 433 | + callback_param(queue, pending_idx).ctx = NULL; |
|---|
| 434 | + copy_pending_idx(skb, copy_count(skb)) = pending_idx; |
|---|
| 435 | + if (!split) |
|---|
| 436 | + copy_count(skb)++; |
|---|
| 437 | + |
|---|
| 438 | + cop++; |
|---|
| 439 | + data_len -= amount; |
|---|
| 440 | + |
|---|
| 441 | + if (amount == txp->size) { |
|---|
| 442 | + /* The copy op covered the full tx_request */ |
|---|
| 443 | + |
|---|
| 444 | + memcpy(&queue->pending_tx_info[pending_idx].req, |
|---|
| 445 | + txp, sizeof(*txp)); |
|---|
| 446 | + queue->pending_tx_info[pending_idx].extra_count = |
|---|
| 447 | + (txp == first) ? extra_count : 0; |
|---|
| 448 | + |
|---|
| 449 | + if (txp == first) |
|---|
| 450 | + txp = txfrags; |
|---|
| 451 | + else |
|---|
| 452 | + txp++; |
|---|
| 453 | + queue->pending_cons++; |
|---|
| 454 | + nr_slots--; |
|---|
| 455 | + } else { |
|---|
| 456 | + /* The copy op partially covered the tx_request. |
|---|
| 457 | + * The remainder will be mapped or copied in the next |
|---|
| 458 | + * iteration. |
|---|
| 459 | + */ |
|---|
| 460 | + txp->offset += amount; |
|---|
| 461 | + txp->size -= amount; |
|---|
| 462 | + } |
|---|
| 389 | 463 | } |
|---|
| 390 | 464 | |
|---|
| 391 | | - if (frag_overflow) { |
|---|
| 465 | + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; |
|---|
| 466 | + shinfo->nr_frags++, gop++, nr_slots--) { |
|---|
| 467 | + index = pending_index(queue->pending_cons++); |
|---|
| 468 | + pending_idx = queue->pending_ring[index]; |
|---|
| 469 | + xenvif_tx_create_map_op(queue, pending_idx, txp, |
|---|
| 470 | + txp == first ? extra_count : 0, gop); |
|---|
| 471 | + frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); |
|---|
| 472 | + |
|---|
| 473 | + if (txp == first) |
|---|
| 474 | + txp = txfrags; |
|---|
| 475 | + else |
|---|
| 476 | + txp++; |
|---|
| 477 | + } |
|---|
| 478 | + |
|---|
| 479 | + if (nr_slots > 0) { |
|---|
| 392 | 480 | |
|---|
| 393 | 481 | shinfo = skb_shinfo(nskb); |
|---|
| 394 | 482 | frags = shinfo->frags; |
|---|
| 395 | 483 | |
|---|
| 396 | | - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; |
|---|
| 484 | + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; |
|---|
| 397 | 485 | shinfo->nr_frags++, txp++, gop++) { |
|---|
| 398 | 486 | index = pending_index(queue->pending_cons++); |
|---|
| 399 | 487 | pending_idx = queue->pending_ring[index]; |
|---|
| .. | .. |
|---|
| 404 | 492 | } |
|---|
| 405 | 493 | |
|---|
| 406 | 494 | skb_shinfo(skb)->frag_list = nskb; |
|---|
| 495 | + } else if (nskb) { |
|---|
| 496 | + /* A frag_list skb was allocated but it is no longer needed |
|---|
| 497 | + * because enough slots were converted to copy ops above. |
|---|
| 498 | + */ |
|---|
| 499 | + kfree_skb(nskb); |
|---|
| 407 | 500 | } |
|---|
| 408 | 501 | |
|---|
| 409 | | - return gop; |
|---|
| 502 | + (*copy_ops) = cop - queue->tx_copy_ops; |
|---|
| 503 | + (*map_ops) = gop - queue->tx_map_ops; |
|---|
| 410 | 504 | } |
|---|
| 411 | 505 | |
|---|
| 412 | 506 | static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, |
|---|
| .. | .. |
|---|
| 442 | 536 | struct gnttab_copy **gopp_copy) |
|---|
| 443 | 537 | { |
|---|
| 444 | 538 | struct gnttab_map_grant_ref *gop_map = *gopp_map; |
|---|
| 445 | | - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; |
|---|
| 539 | + u16 pending_idx; |
|---|
| 446 | 540 | /* This always points to the shinfo of the skb being checked, which |
|---|
| 447 | 541 | * could be either the first or the one on the frag_list |
|---|
| 448 | 542 | */ |
|---|
| .. | .. |
|---|
| 453 | 547 | struct skb_shared_info *first_shinfo = NULL; |
|---|
| 454 | 548 | int nr_frags = shinfo->nr_frags; |
|---|
| 455 | 549 | const bool sharedslot = nr_frags && |
|---|
| 456 | | - frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; |
|---|
| 457 | | - int i, err; |
|---|
| 550 | + frag_get_pending_idx(&shinfo->frags[0]) == |
|---|
| 551 | + copy_pending_idx(skb, copy_count(skb) - 1); |
|---|
| 552 | + int i, err = 0; |
|---|
| 458 | 553 | |
|---|
| 459 | | - /* Check status of header. */ |
|---|
| 460 | | - err = (*gopp_copy)->status; |
|---|
| 461 | | - if (unlikely(err)) { |
|---|
| 462 | | - if (net_ratelimit()) |
|---|
| 463 | | - netdev_dbg(queue->vif->dev, |
|---|
| 464 | | - "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", |
|---|
| 465 | | - (*gopp_copy)->status, |
|---|
| 466 | | - pending_idx, |
|---|
| 467 | | - (*gopp_copy)->source.u.ref); |
|---|
| 468 | | - /* The first frag might still have this slot mapped */ |
|---|
| 469 | | - if (!sharedslot) |
|---|
| 470 | | - xenvif_idx_release(queue, pending_idx, |
|---|
| 471 | | - XEN_NETIF_RSP_ERROR); |
|---|
| 554 | + for (i = 0; i < copy_count(skb); i++) { |
|---|
| 555 | + int newerr; |
|---|
| 556 | + |
|---|
| 557 | + /* Check status of header. */ |
|---|
| 558 | + pending_idx = copy_pending_idx(skb, i); |
|---|
| 559 | + |
|---|
| 560 | + newerr = (*gopp_copy)->status; |
|---|
| 561 | + |
|---|
| 562 | + /* Split copies need to be handled together. */ |
|---|
| 563 | + if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { |
|---|
| 564 | + (*gopp_copy)++; |
|---|
| 565 | + if (!newerr) |
|---|
| 566 | + newerr = (*gopp_copy)->status; |
|---|
| 567 | + } |
|---|
| 568 | + if (likely(!newerr)) { |
|---|
| 569 | + /* The first frag might still have this slot mapped */ |
|---|
| 570 | + if (i < copy_count(skb) - 1 || !sharedslot) |
|---|
| 571 | + xenvif_idx_release(queue, pending_idx, |
|---|
| 572 | + XEN_NETIF_RSP_OKAY); |
|---|
| 573 | + } else { |
|---|
| 574 | + err = newerr; |
|---|
| 575 | + if (net_ratelimit()) |
|---|
| 576 | + netdev_dbg(queue->vif->dev, |
|---|
| 577 | + "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", |
|---|
| 578 | + (*gopp_copy)->status, |
|---|
| 579 | + pending_idx, |
|---|
| 580 | + (*gopp_copy)->source.u.ref); |
|---|
| 581 | + /* The first frag might still have this slot mapped */ |
|---|
| 582 | + if (i < copy_count(skb) - 1 || !sharedslot) |
|---|
| 583 | + xenvif_idx_release(queue, pending_idx, |
|---|
| 584 | + XEN_NETIF_RSP_ERROR); |
|---|
| 585 | + } |
|---|
| 586 | + (*gopp_copy)++; |
|---|
| 472 | 587 | } |
|---|
| 473 | | - (*gopp_copy)++; |
|---|
| 474 | 588 | |
|---|
| 475 | 589 | check_frags: |
|---|
| 476 | 590 | for (i = 0; i < nr_frags; i++, gop_map++) { |
|---|
| .. | .. |
|---|
| 516 | 630 | /* Not the first error? Preceding frags already invalidated. */ |
|---|
| 517 | 631 | if (err) |
|---|
| 518 | 632 | continue; |
|---|
| 519 | | - |
|---|
| 520 | | - /* First error: if the header haven't shared a slot with the |
|---|
| 521 | | - * first frag, release it as well. |
|---|
| 522 | | - */ |
|---|
| 523 | | - if (!sharedslot) |
|---|
| 524 | | - xenvif_idx_release(queue, |
|---|
| 525 | | - XENVIF_TX_CB(skb)->pending_idx, |
|---|
| 526 | | - XEN_NETIF_RSP_OKAY); |
|---|
| 527 | 633 | |
|---|
| 528 | 634 | /* Invalidate preceding fragments of this skb. */ |
|---|
| 529 | 635 | for (j = 0; j < i; j++) { |
|---|
| .. | .. |
|---|
| 794 | 900 | unsigned *copy_ops, |
|---|
| 795 | 901 | unsigned *map_ops) |
|---|
| 796 | 902 | { |
|---|
| 797 | | - struct gnttab_map_grant_ref *gop = queue->tx_map_ops; |
|---|
| 798 | 903 | struct sk_buff *skb, *nskb; |
|---|
| 799 | 904 | int ret; |
|---|
| 800 | 905 | unsigned int frag_overflow; |
|---|
| .. | .. |
|---|
| 876 | 981 | continue; |
|---|
| 877 | 982 | } |
|---|
| 878 | 983 | |
|---|
| 984 | + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? |
|---|
| 985 | + XEN_NETBACK_TX_COPY_LEN : txreq.size; |
|---|
| 986 | + |
|---|
| 879 | 987 | ret = xenvif_count_requests(queue, &txreq, extra_count, |
|---|
| 880 | 988 | txfrags, work_to_do); |
|---|
| 989 | + |
|---|
| 881 | 990 | if (unlikely(ret < 0)) |
|---|
| 882 | 991 | break; |
|---|
| 883 | 992 | |
|---|
| .. | .. |
|---|
| 892 | 1001 | |
|---|
| 893 | 1002 | /* No crossing a page as the payload mustn't fragment. */ |
|---|
| 894 | 1003 | if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { |
|---|
| 895 | | - netdev_err(queue->vif->dev, |
|---|
| 896 | | - "txreq.offset: %u, size: %u, end: %lu\n", |
|---|
| 897 | | - txreq.offset, txreq.size, |
|---|
| 898 | | - (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size); |
|---|
| 1004 | + netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", |
|---|
| 1005 | + txreq.offset, txreq.size); |
|---|
| 899 | 1006 | xenvif_fatal_tx_err(queue->vif); |
|---|
| 900 | 1007 | break; |
|---|
| 901 | 1008 | } |
|---|
| .. | .. |
|---|
| 903 | 1010 | index = pending_index(queue->pending_cons); |
|---|
| 904 | 1011 | pending_idx = queue->pending_ring[index]; |
|---|
| 905 | 1012 | |
|---|
| 906 | | - data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && |
|---|
| 907 | | - ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? |
|---|
| 908 | | - XEN_NETBACK_TX_COPY_LEN : txreq.size; |
|---|
| 1013 | + if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) |
|---|
| 1014 | + data_len = txreq.size; |
|---|
| 909 | 1015 | |
|---|
| 910 | 1016 | skb = xenvif_alloc_skb(data_len); |
|---|
| 911 | 1017 | if (unlikely(skb == NULL)) { |
|---|
| .. | .. |
|---|
| 916 | 1022 | } |
|---|
| 917 | 1023 | |
|---|
| 918 | 1024 | skb_shinfo(skb)->nr_frags = ret; |
|---|
| 919 | | - if (data_len < txreq.size) |
|---|
| 920 | | - skb_shinfo(skb)->nr_frags++; |
|---|
| 921 | 1025 | /* At this point shinfo->nr_frags is in fact the number of |
|---|
| 922 | 1026 | * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. |
|---|
| 923 | 1027 | */ |
|---|
| .. | .. |
|---|
| 979 | 1083 | type); |
|---|
| 980 | 1084 | } |
|---|
| 981 | 1085 | |
|---|
| 982 | | - XENVIF_TX_CB(skb)->pending_idx = pending_idx; |
|---|
| 983 | | - |
|---|
| 984 | | - __skb_put(skb, data_len); |
|---|
| 985 | | - queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; |
|---|
| 986 | | - queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; |
|---|
| 987 | | - queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; |
|---|
| 988 | | - |
|---|
| 989 | | - queue->tx_copy_ops[*copy_ops].dest.u.gmfn = |
|---|
| 990 | | - virt_to_gfn(skb->data); |
|---|
| 991 | | - queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; |
|---|
| 992 | | - queue->tx_copy_ops[*copy_ops].dest.offset = |
|---|
| 993 | | - offset_in_page(skb->data) & ~XEN_PAGE_MASK; |
|---|
| 994 | | - |
|---|
| 995 | | - queue->tx_copy_ops[*copy_ops].len = data_len; |
|---|
| 996 | | - queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; |
|---|
| 997 | | - |
|---|
| 998 | | - (*copy_ops)++; |
|---|
| 999 | | - |
|---|
| 1000 | | - if (data_len < txreq.size) { |
|---|
| 1001 | | - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], |
|---|
| 1002 | | - pending_idx); |
|---|
| 1003 | | - xenvif_tx_create_map_op(queue, pending_idx, &txreq, |
|---|
| 1004 | | - extra_count, gop); |
|---|
| 1005 | | - gop++; |
|---|
| 1006 | | - } else { |
|---|
| 1007 | | - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], |
|---|
| 1008 | | - INVALID_PENDING_IDX); |
|---|
| 1009 | | - memcpy(&queue->pending_tx_info[pending_idx].req, |
|---|
| 1010 | | - &txreq, sizeof(txreq)); |
|---|
| 1011 | | - queue->pending_tx_info[pending_idx].extra_count = |
|---|
| 1012 | | - extra_count; |
|---|
| 1013 | | - } |
|---|
| 1014 | | - |
|---|
| 1015 | | - queue->pending_cons++; |
|---|
| 1016 | | - |
|---|
| 1017 | | - gop = xenvif_get_requests(queue, skb, txfrags, gop, |
|---|
| 1018 | | - frag_overflow, nskb); |
|---|
| 1086 | + xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops, |
|---|
| 1087 | + map_ops, frag_overflow, nskb, extra_count, |
|---|
| 1088 | + data_len); |
|---|
| 1019 | 1089 | |
|---|
| 1020 | 1090 | __skb_queue_tail(&queue->tx_queue, skb); |
|---|
| 1021 | 1091 | |
|---|
| 1022 | 1092 | queue->tx.req_cons = idx; |
|---|
| 1023 | 1093 | |
|---|
| 1024 | | - if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || |
|---|
| 1094 | + if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || |
|---|
| 1025 | 1095 | (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) |
|---|
| 1026 | 1096 | break; |
|---|
| 1027 | 1097 | } |
|---|
| 1028 | 1098 | |
|---|
| 1029 | | - (*map_ops) = gop - queue->tx_map_ops; |
|---|
| 1030 | 1099 | return; |
|---|
| 1031 | 1100 | } |
|---|
| 1032 | 1101 | |
|---|
| .. | .. |
|---|
| 1061 | 1130 | int j; |
|---|
| 1062 | 1131 | skb->truesize += skb->data_len; |
|---|
| 1063 | 1132 | for (j = 0; j < i; j++) |
|---|
| 1064 | | - put_page(frags[j].page.p); |
|---|
| 1133 | + put_page(skb_frag_page(&frags[j])); |
|---|
| 1065 | 1134 | return -ENOMEM; |
|---|
| 1066 | 1135 | } |
|---|
| 1067 | 1136 | |
|---|
| .. | .. |
|---|
| 1073 | 1142 | BUG(); |
|---|
| 1074 | 1143 | |
|---|
| 1075 | 1144 | offset += len; |
|---|
| 1076 | | - frags[i].page.p = page; |
|---|
| 1077 | | - frags[i].page_offset = 0; |
|---|
| 1145 | + __skb_frag_set_page(&frags[i], page); |
|---|
| 1146 | + skb_frag_off_set(&frags[i], 0); |
|---|
| 1078 | 1147 | skb_frag_size_set(&frags[i], len); |
|---|
| 1079 | 1148 | } |
|---|
| 1080 | 1149 | |
|---|
| .. | .. |
|---|
| 1105 | 1174 | while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { |
|---|
| 1106 | 1175 | struct xen_netif_tx_request *txp; |
|---|
| 1107 | 1176 | u16 pending_idx; |
|---|
| 1108 | | - unsigned data_len; |
|---|
| 1109 | 1177 | |
|---|
| 1110 | | - pending_idx = XENVIF_TX_CB(skb)->pending_idx; |
|---|
| 1178 | + pending_idx = copy_pending_idx(skb, 0); |
|---|
| 1111 | 1179 | txp = &queue->pending_tx_info[pending_idx].req; |
|---|
| 1112 | 1180 | |
|---|
| 1113 | 1181 | /* Check the remap error code. */ |
|---|
| .. | .. |
|---|
| 1124 | 1192 | } |
|---|
| 1125 | 1193 | kfree_skb(skb); |
|---|
| 1126 | 1194 | continue; |
|---|
| 1127 | | - } |
|---|
| 1128 | | - |
|---|
| 1129 | | - data_len = skb->len; |
|---|
| 1130 | | - callback_param(queue, pending_idx).ctx = NULL; |
|---|
| 1131 | | - if (data_len < txp->size) { |
|---|
| 1132 | | - /* Append the packet payload as a fragment. */ |
|---|
| 1133 | | - txp->offset += data_len; |
|---|
| 1134 | | - txp->size -= data_len; |
|---|
| 1135 | | - } else { |
|---|
| 1136 | | - /* Schedule a response immediately. */ |
|---|
| 1137 | | - xenvif_idx_release(queue, pending_idx, |
|---|
| 1138 | | - XEN_NETIF_RSP_OKAY); |
|---|
| 1139 | 1195 | } |
|---|
| 1140 | 1196 | |
|---|
| 1141 | 1197 | if (txp->flags & XEN_NETTXF_csum_blank) |
|---|
| .. | .. |
|---|
| 1175 | 1231 | continue; |
|---|
| 1176 | 1232 | } |
|---|
| 1177 | 1233 | |
|---|
| 1178 | | - skb_probe_transport_header(skb, 0); |
|---|
| 1234 | + skb_probe_transport_header(skb); |
|---|
| 1179 | 1235 | |
|---|
| 1180 | 1236 | /* If the packet is GSO then we will have just set up the |
|---|
| 1181 | 1237 | * transport header offset in checksum_setup so it's now |
|---|
| 1182 | 1238 | * straightforward to calculate gso_segs. |
|---|
| 1183 | 1239 | */ |
|---|
| 1184 | 1240 | if (skb_is_gso(skb)) { |
|---|
| 1185 | | - int mss = skb_shinfo(skb)->gso_size; |
|---|
| 1186 | | - int hdrlen = skb_transport_header(skb) - |
|---|
| 1241 | + int mss, hdrlen; |
|---|
| 1242 | + |
|---|
| 1243 | + /* GSO implies having the L4 header. */ |
|---|
| 1244 | + WARN_ON_ONCE(!skb_transport_header_was_set(skb)); |
|---|
| 1245 | + if (unlikely(!skb_transport_header_was_set(skb))) { |
|---|
| 1246 | + kfree_skb(skb); |
|---|
| 1247 | + continue; |
|---|
| 1248 | + } |
|---|
| 1249 | + |
|---|
| 1250 | + mss = skb_shinfo(skb)->gso_size; |
|---|
| 1251 | + hdrlen = skb_transport_header(skb) - |
|---|
| 1187 | 1252 | skb_mac_header(skb) + |
|---|
| 1188 | 1253 | tcp_hdrlen(skb); |
|---|
| 1189 | 1254 | |
|---|
| .. | .. |
|---|
| 1314 | 1379 | /* Called after netfront has transmitted */ |
|---|
| 1315 | 1380 | int xenvif_tx_action(struct xenvif_queue *queue, int budget) |
|---|
| 1316 | 1381 | { |
|---|
| 1317 | | - unsigned nr_mops, nr_cops = 0; |
|---|
| 1382 | + unsigned nr_mops = 0, nr_cops = 0; |
|---|
| 1318 | 1383 | int work_done, ret; |
|---|
| 1319 | 1384 | |
|---|
| 1320 | 1385 | if (unlikely(!tx_work_todo(queue))) |
|---|
| .. | .. |
|---|
| 1401 | 1466 | notify_remote_via_irq(queue->tx_irq); |
|---|
| 1402 | 1467 | } |
|---|
| 1403 | 1468 | |
|---|
| 1404 | | -void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) |
|---|
| 1469 | +static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) |
|---|
| 1405 | 1470 | { |
|---|
| 1406 | 1471 | int ret; |
|---|
| 1407 | 1472 | struct gnttab_unmap_grant_ref tx_unmap_op; |
|---|
| .. | .. |
|---|
| 1456 | 1521 | void *addr; |
|---|
| 1457 | 1522 | struct xen_netif_tx_sring *txs; |
|---|
| 1458 | 1523 | struct xen_netif_rx_sring *rxs; |
|---|
| 1459 | | - |
|---|
| 1524 | + RING_IDX rsp_prod, req_prod; |
|---|
| 1460 | 1525 | int err = -ENOMEM; |
|---|
| 1461 | 1526 | |
|---|
| 1462 | 1527 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), |
|---|
| .. | .. |
|---|
| 1465 | 1530 | goto err; |
|---|
| 1466 | 1531 | |
|---|
| 1467 | 1532 | txs = (struct xen_netif_tx_sring *)addr; |
|---|
| 1468 | | - BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); |
|---|
| 1533 | + rsp_prod = READ_ONCE(txs->rsp_prod); |
|---|
| 1534 | + req_prod = READ_ONCE(txs->req_prod); |
|---|
| 1535 | + |
|---|
| 1536 | + BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE); |
|---|
| 1537 | + |
|---|
| 1538 | + err = -EIO; |
|---|
| 1539 | + if (req_prod - rsp_prod > RING_SIZE(&queue->tx)) |
|---|
| 1540 | + goto err; |
|---|
| 1469 | 1541 | |
|---|
| 1470 | 1542 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), |
|---|
| 1471 | 1543 | &rx_ring_ref, 1, &addr); |
|---|
| .. | .. |
|---|
| 1473 | 1545 | goto err; |
|---|
| 1474 | 1546 | |
|---|
| 1475 | 1547 | rxs = (struct xen_netif_rx_sring *)addr; |
|---|
| 1476 | | - BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); |
|---|
| 1548 | + rsp_prod = READ_ONCE(rxs->rsp_prod); |
|---|
| 1549 | + req_prod = READ_ONCE(rxs->req_prod); |
|---|
| 1550 | + |
|---|
| 1551 | + BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE); |
|---|
| 1552 | + |
|---|
| 1553 | + err = -EIO; |
|---|
| 1554 | + if (req_prod - rsp_prod > RING_SIZE(&queue->rx)) |
|---|
| 1555 | + goto err; |
|---|
| 1477 | 1556 | |
|---|
| 1478 | 1557 | return 0; |
|---|
| 1479 | 1558 | |
|---|
| .. | .. |
|---|
| 1663 | 1742 | |
|---|
| 1664 | 1743 | #ifdef CONFIG_DEBUG_FS |
|---|
| 1665 | 1744 | xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); |
|---|
| 1666 | | - if (IS_ERR_OR_NULL(xen_netback_dbg_root)) |
|---|
| 1667 | | - pr_warn("Init of debugfs returned %ld!\n", |
|---|
| 1668 | | - PTR_ERR(xen_netback_dbg_root)); |
|---|
| 1669 | 1745 | #endif /* CONFIG_DEBUG_FS */ |
|---|
| 1670 | 1746 | |
|---|
| 1671 | 1747 | return 0; |
|---|
| .. | .. |
|---|
| 1679 | 1755 | static void __exit netback_fini(void) |
|---|
| 1680 | 1756 | { |
|---|
| 1681 | 1757 | #ifdef CONFIG_DEBUG_FS |
|---|
| 1682 | | - if (!IS_ERR_OR_NULL(xen_netback_dbg_root)) |
|---|
| 1683 | | - debugfs_remove_recursive(xen_netback_dbg_root); |
|---|
| 1758 | + debugfs_remove_recursive(xen_netback_dbg_root); |
|---|
| 1684 | 1759 | #endif /* CONFIG_DEBUG_FS */ |
|---|
| 1685 | 1760 | xenvif_xenbus_fini(); |
|---|
| 1686 | 1761 | } |
|---|