hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/drivers/net/xen-netback/netback.c
....@@ -96,6 +96,13 @@
9696 module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
9797 MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
9898
99
+/* The module parameter tells that we have to put data
100
+ * for xen-netfront with the XDP_PACKET_HEADROOM offset
101
+ * needed for XDP processing
102
+ */
103
+bool provides_xdp_headroom = true;
104
+module_param(provides_xdp_headroom, bool, 0644);
105
+
99106 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
100107 u8 status);
101108
....@@ -104,6 +111,8 @@
104111 unsigned int extra_count,
105112 s8 st);
106113 static void push_tx_responses(struct xenvif_queue *queue);
114
+
115
+static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
107116
108117 static inline int tx_work_todo(struct xenvif_queue *queue);
109118
....@@ -136,12 +145,12 @@
136145
137146 static u16 frag_get_pending_idx(skb_frag_t *frag)
138147 {
139
- return (u16)frag->page_offset;
148
+ return (u16)skb_frag_off(frag);
140149 }
141150
142151 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
143152 {
144
- frag->page_offset = pending_idx;
153
+ skb_frag_off_set(frag, pending_idx);
145154 }
146155
147156 static inline pending_ring_idx_t pending_index(unsigned i)
....@@ -323,10 +332,14 @@
323332
324333
325334 struct xenvif_tx_cb {
326
- u16 pending_idx;
335
+ u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
336
+ u8 copy_count;
337
+ u32 split_mask;
327338 };
328339
329340 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
341
+#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
342
+#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
330343
331344 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
332345 u16 pending_idx,
....@@ -349,6 +362,8 @@
349362 struct sk_buff *skb =
350363 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
351364 GFP_ATOMIC | __GFP_NOWARN);
365
+
366
+ BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
352367 if (unlikely(skb == NULL))
353368 return NULL;
354369
....@@ -361,39 +376,112 @@
361376 return skb;
362377 }
363378
364
-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
365
- struct sk_buff *skb,
366
- struct xen_netif_tx_request *txp,
367
- struct gnttab_map_grant_ref *gop,
368
- unsigned int frag_overflow,
369
- struct sk_buff *nskb)
379
+static void xenvif_get_requests(struct xenvif_queue *queue,
380
+ struct sk_buff *skb,
381
+ struct xen_netif_tx_request *first,
382
+ struct xen_netif_tx_request *txfrags,
383
+ unsigned *copy_ops,
384
+ unsigned *map_ops,
385
+ unsigned int frag_overflow,
386
+ struct sk_buff *nskb,
387
+ unsigned int extra_count,
388
+ unsigned int data_len)
370389 {
371390 struct skb_shared_info *shinfo = skb_shinfo(skb);
372391 skb_frag_t *frags = shinfo->frags;
373
- u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
374
- int start;
392
+ u16 pending_idx;
375393 pending_ring_idx_t index;
376394 unsigned int nr_slots;
395
+ struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
396
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
397
+ struct xen_netif_tx_request *txp = first;
377398
378
- nr_slots = shinfo->nr_frags;
399
+ nr_slots = shinfo->nr_frags + frag_overflow + 1;
379400
380
- /* Skip first skb fragment if it is on same page as header fragment. */
381
- start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
401
+ copy_count(skb) = 0;
402
+ XENVIF_TX_CB(skb)->split_mask = 0;
382403
383
- for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
384
- shinfo->nr_frags++, txp++, gop++) {
385
- index = pending_index(queue->pending_cons++);
404
+ /* Create copy ops for exactly data_len bytes into the skb head. */
405
+ __skb_put(skb, data_len);
406
+ while (data_len > 0) {
407
+ int amount = data_len > txp->size ? txp->size : data_len;
408
+ bool split = false;
409
+
410
+ cop->source.u.ref = txp->gref;
411
+ cop->source.domid = queue->vif->domid;
412
+ cop->source.offset = txp->offset;
413
+
414
+ cop->dest.domid = DOMID_SELF;
415
+ cop->dest.offset = (offset_in_page(skb->data +
416
+ skb_headlen(skb) -
417
+ data_len)) & ~XEN_PAGE_MASK;
418
+ cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
419
+ - data_len);
420
+
421
+ /* Don't cross local page boundary! */
422
+ if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
423
+ amount = XEN_PAGE_SIZE - cop->dest.offset;
424
+ XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
425
+ split = true;
426
+ }
427
+
428
+ cop->len = amount;
429
+ cop->flags = GNTCOPY_source_gref;
430
+
431
+ index = pending_index(queue->pending_cons);
386432 pending_idx = queue->pending_ring[index];
387
- xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
388
- frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
433
+ callback_param(queue, pending_idx).ctx = NULL;
434
+ copy_pending_idx(skb, copy_count(skb)) = pending_idx;
435
+ if (!split)
436
+ copy_count(skb)++;
437
+
438
+ cop++;
439
+ data_len -= amount;
440
+
441
+ if (amount == txp->size) {
442
+ /* The copy op covered the full tx_request */
443
+
444
+ memcpy(&queue->pending_tx_info[pending_idx].req,
445
+ txp, sizeof(*txp));
446
+ queue->pending_tx_info[pending_idx].extra_count =
447
+ (txp == first) ? extra_count : 0;
448
+
449
+ if (txp == first)
450
+ txp = txfrags;
451
+ else
452
+ txp++;
453
+ queue->pending_cons++;
454
+ nr_slots--;
455
+ } else {
456
+ /* The copy op partially covered the tx_request.
457
+ * The remainder will be mapped or copied in the next
458
+ * iteration.
459
+ */
460
+ txp->offset += amount;
461
+ txp->size -= amount;
462
+ }
389463 }
390464
391
- if (frag_overflow) {
465
+ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
466
+ shinfo->nr_frags++, gop++, nr_slots--) {
467
+ index = pending_index(queue->pending_cons++);
468
+ pending_idx = queue->pending_ring[index];
469
+ xenvif_tx_create_map_op(queue, pending_idx, txp,
470
+ txp == first ? extra_count : 0, gop);
471
+ frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
472
+
473
+ if (txp == first)
474
+ txp = txfrags;
475
+ else
476
+ txp++;
477
+ }
478
+
479
+ if (nr_slots > 0) {
392480
393481 shinfo = skb_shinfo(nskb);
394482 frags = shinfo->frags;
395483
396
- for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
484
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
397485 shinfo->nr_frags++, txp++, gop++) {
398486 index = pending_index(queue->pending_cons++);
399487 pending_idx = queue->pending_ring[index];
....@@ -404,9 +492,15 @@
404492 }
405493
406494 skb_shinfo(skb)->frag_list = nskb;
495
+ } else if (nskb) {
496
+ /* A frag_list skb was allocated but it is no longer needed
497
+ * because enough slots were converted to copy ops above.
498
+ */
499
+ kfree_skb(nskb);
407500 }
408501
409
- return gop;
502
+ (*copy_ops) = cop - queue->tx_copy_ops;
503
+ (*map_ops) = gop - queue->tx_map_ops;
410504 }
411505
412506 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
....@@ -442,7 +536,7 @@
442536 struct gnttab_copy **gopp_copy)
443537 {
444538 struct gnttab_map_grant_ref *gop_map = *gopp_map;
445
- u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
539
+ u16 pending_idx;
446540 /* This always points to the shinfo of the skb being checked, which
447541 * could be either the first or the one on the frag_list
448542 */
....@@ -453,24 +547,44 @@
453547 struct skb_shared_info *first_shinfo = NULL;
454548 int nr_frags = shinfo->nr_frags;
455549 const bool sharedslot = nr_frags &&
456
- frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
457
- int i, err;
550
+ frag_get_pending_idx(&shinfo->frags[0]) ==
551
+ copy_pending_idx(skb, copy_count(skb) - 1);
552
+ int i, err = 0;
458553
459
- /* Check status of header. */
460
- err = (*gopp_copy)->status;
461
- if (unlikely(err)) {
462
- if (net_ratelimit())
463
- netdev_dbg(queue->vif->dev,
464
- "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
465
- (*gopp_copy)->status,
466
- pending_idx,
467
- (*gopp_copy)->source.u.ref);
468
- /* The first frag might still have this slot mapped */
469
- if (!sharedslot)
470
- xenvif_idx_release(queue, pending_idx,
471
- XEN_NETIF_RSP_ERROR);
554
+ for (i = 0; i < copy_count(skb); i++) {
555
+ int newerr;
556
+
557
+ /* Check status of header. */
558
+ pending_idx = copy_pending_idx(skb, i);
559
+
560
+ newerr = (*gopp_copy)->status;
561
+
562
+ /* Split copies need to be handled together. */
563
+ if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
564
+ (*gopp_copy)++;
565
+ if (!newerr)
566
+ newerr = (*gopp_copy)->status;
567
+ }
568
+ if (likely(!newerr)) {
569
+ /* The first frag might still have this slot mapped */
570
+ if (i < copy_count(skb) - 1 || !sharedslot)
571
+ xenvif_idx_release(queue, pending_idx,
572
+ XEN_NETIF_RSP_OKAY);
573
+ } else {
574
+ err = newerr;
575
+ if (net_ratelimit())
576
+ netdev_dbg(queue->vif->dev,
577
+ "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
578
+ (*gopp_copy)->status,
579
+ pending_idx,
580
+ (*gopp_copy)->source.u.ref);
581
+ /* The first frag might still have this slot mapped */
582
+ if (i < copy_count(skb) - 1 || !sharedslot)
583
+ xenvif_idx_release(queue, pending_idx,
584
+ XEN_NETIF_RSP_ERROR);
585
+ }
586
+ (*gopp_copy)++;
472587 }
473
- (*gopp_copy)++;
474588
475589 check_frags:
476590 for (i = 0; i < nr_frags; i++, gop_map++) {
....@@ -516,14 +630,6 @@
516630 /* Not the first error? Preceding frags already invalidated. */
517631 if (err)
518632 continue;
519
-
520
- /* First error: if the header haven't shared a slot with the
521
- * first frag, release it as well.
522
- */
523
- if (!sharedslot)
524
- xenvif_idx_release(queue,
525
- XENVIF_TX_CB(skb)->pending_idx,
526
- XEN_NETIF_RSP_OKAY);
527633
528634 /* Invalidate preceding fragments of this skb. */
529635 for (j = 0; j < i; j++) {
....@@ -794,7 +900,6 @@
794900 unsigned *copy_ops,
795901 unsigned *map_ops)
796902 {
797
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
798903 struct sk_buff *skb, *nskb;
799904 int ret;
800905 unsigned int frag_overflow;
....@@ -876,8 +981,12 @@
876981 continue;
877982 }
878983
984
+ data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
985
+ XEN_NETBACK_TX_COPY_LEN : txreq.size;
986
+
879987 ret = xenvif_count_requests(queue, &txreq, extra_count,
880988 txfrags, work_to_do);
989
+
881990 if (unlikely(ret < 0))
882991 break;
883992
....@@ -892,10 +1001,8 @@
8921001
8931002 /* No crossing a page as the payload mustn't fragment. */
8941003 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
895
- netdev_err(queue->vif->dev,
896
- "txreq.offset: %u, size: %u, end: %lu\n",
897
- txreq.offset, txreq.size,
898
- (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
1004
+ netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
1005
+ txreq.offset, txreq.size);
8991006 xenvif_fatal_tx_err(queue->vif);
9001007 break;
9011008 }
....@@ -903,9 +1010,8 @@
9031010 index = pending_index(queue->pending_cons);
9041011 pending_idx = queue->pending_ring[index];
9051012
906
- data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
907
- ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
908
- XEN_NETBACK_TX_COPY_LEN : txreq.size;
1013
+ if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1014
+ data_len = txreq.size;
9091015
9101016 skb = xenvif_alloc_skb(data_len);
9111017 if (unlikely(skb == NULL)) {
....@@ -916,8 +1022,6 @@
9161022 }
9171023
9181024 skb_shinfo(skb)->nr_frags = ret;
919
- if (data_len < txreq.size)
920
- skb_shinfo(skb)->nr_frags++;
9211025 /* At this point shinfo->nr_frags is in fact the number of
9221026 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
9231027 */
....@@ -979,54 +1083,19 @@
9791083 type);
9801084 }
9811085
982
- XENVIF_TX_CB(skb)->pending_idx = pending_idx;
983
-
984
- __skb_put(skb, data_len);
985
- queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
986
- queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
987
- queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
988
-
989
- queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
990
- virt_to_gfn(skb->data);
991
- queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
992
- queue->tx_copy_ops[*copy_ops].dest.offset =
993
- offset_in_page(skb->data) & ~XEN_PAGE_MASK;
994
-
995
- queue->tx_copy_ops[*copy_ops].len = data_len;
996
- queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
997
-
998
- (*copy_ops)++;
999
-
1000
- if (data_len < txreq.size) {
1001
- frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1002
- pending_idx);
1003
- xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1004
- extra_count, gop);
1005
- gop++;
1006
- } else {
1007
- frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1008
- INVALID_PENDING_IDX);
1009
- memcpy(&queue->pending_tx_info[pending_idx].req,
1010
- &txreq, sizeof(txreq));
1011
- queue->pending_tx_info[pending_idx].extra_count =
1012
- extra_count;
1013
- }
1014
-
1015
- queue->pending_cons++;
1016
-
1017
- gop = xenvif_get_requests(queue, skb, txfrags, gop,
1018
- frag_overflow, nskb);
1086
+ xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1087
+ map_ops, frag_overflow, nskb, extra_count,
1088
+ data_len);
10191089
10201090 __skb_queue_tail(&queue->tx_queue, skb);
10211091
10221092 queue->tx.req_cons = idx;
10231093
1024
- if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1094
+ if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
10251095 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
10261096 break;
10271097 }
10281098
1029
- (*map_ops) = gop - queue->tx_map_ops;
10301099 return;
10311100 }
10321101
....@@ -1061,7 +1130,7 @@
10611130 int j;
10621131 skb->truesize += skb->data_len;
10631132 for (j = 0; j < i; j++)
1064
- put_page(frags[j].page.p);
1133
+ put_page(skb_frag_page(&frags[j]));
10651134 return -ENOMEM;
10661135 }
10671136
....@@ -1073,8 +1142,8 @@
10731142 BUG();
10741143
10751144 offset += len;
1076
- frags[i].page.p = page;
1077
- frags[i].page_offset = 0;
1145
+ __skb_frag_set_page(&frags[i], page);
1146
+ skb_frag_off_set(&frags[i], 0);
10781147 skb_frag_size_set(&frags[i], len);
10791148 }
10801149
....@@ -1105,9 +1174,8 @@
11051174 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
11061175 struct xen_netif_tx_request *txp;
11071176 u16 pending_idx;
1108
- unsigned data_len;
11091177
1110
- pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1178
+ pending_idx = copy_pending_idx(skb, 0);
11111179 txp = &queue->pending_tx_info[pending_idx].req;
11121180
11131181 /* Check the remap error code. */
....@@ -1124,18 +1192,6 @@
11241192 }
11251193 kfree_skb(skb);
11261194 continue;
1127
- }
1128
-
1129
- data_len = skb->len;
1130
- callback_param(queue, pending_idx).ctx = NULL;
1131
- if (data_len < txp->size) {
1132
- /* Append the packet payload as a fragment. */
1133
- txp->offset += data_len;
1134
- txp->size -= data_len;
1135
- } else {
1136
- /* Schedule a response immediately. */
1137
- xenvif_idx_release(queue, pending_idx,
1138
- XEN_NETIF_RSP_OKAY);
11391195 }
11401196
11411197 if (txp->flags & XEN_NETTXF_csum_blank)
....@@ -1175,15 +1231,24 @@
11751231 continue;
11761232 }
11771233
1178
- skb_probe_transport_header(skb, 0);
1234
+ skb_probe_transport_header(skb);
11791235
11801236 /* If the packet is GSO then we will have just set up the
11811237 * transport header offset in checksum_setup so it's now
11821238 * straightforward to calculate gso_segs.
11831239 */
11841240 if (skb_is_gso(skb)) {
1185
- int mss = skb_shinfo(skb)->gso_size;
1186
- int hdrlen = skb_transport_header(skb) -
1241
+ int mss, hdrlen;
1242
+
1243
+ /* GSO implies having the L4 header. */
1244
+ WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1245
+ if (unlikely(!skb_transport_header_was_set(skb))) {
1246
+ kfree_skb(skb);
1247
+ continue;
1248
+ }
1249
+
1250
+ mss = skb_shinfo(skb)->gso_size;
1251
+ hdrlen = skb_transport_header(skb) -
11871252 skb_mac_header(skb) +
11881253 tcp_hdrlen(skb);
11891254
....@@ -1314,7 +1379,7 @@
13141379 /* Called after netfront has transmitted */
13151380 int xenvif_tx_action(struct xenvif_queue *queue, int budget)
13161381 {
1317
- unsigned nr_mops, nr_cops = 0;
1382
+ unsigned nr_mops = 0, nr_cops = 0;
13181383 int work_done, ret;
13191384
13201385 if (unlikely(!tx_work_todo(queue)))
....@@ -1401,7 +1466,7 @@
14011466 notify_remote_via_irq(queue->tx_irq);
14021467 }
14031468
1404
-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1469
+static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
14051470 {
14061471 int ret;
14071472 struct gnttab_unmap_grant_ref tx_unmap_op;
....@@ -1456,7 +1521,7 @@
14561521 void *addr;
14571522 struct xen_netif_tx_sring *txs;
14581523 struct xen_netif_rx_sring *rxs;
1459
-
1524
+ RING_IDX rsp_prod, req_prod;
14601525 int err = -ENOMEM;
14611526
14621527 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
....@@ -1465,7 +1530,14 @@
14651530 goto err;
14661531
14671532 txs = (struct xen_netif_tx_sring *)addr;
1468
- BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1533
+ rsp_prod = READ_ONCE(txs->rsp_prod);
1534
+ req_prod = READ_ONCE(txs->req_prod);
1535
+
1536
+ BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
1537
+
1538
+ err = -EIO;
1539
+ if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
1540
+ goto err;
14691541
14701542 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
14711543 &rx_ring_ref, 1, &addr);
....@@ -1473,7 +1545,14 @@
14731545 goto err;
14741546
14751547 rxs = (struct xen_netif_rx_sring *)addr;
1476
- BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1548
+ rsp_prod = READ_ONCE(rxs->rsp_prod);
1549
+ req_prod = READ_ONCE(rxs->req_prod);
1550
+
1551
+ BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
1552
+
1553
+ err = -EIO;
1554
+ if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
1555
+ goto err;
14771556
14781557 return 0;
14791558
....@@ -1663,9 +1742,6 @@
16631742
16641743 #ifdef CONFIG_DEBUG_FS
16651744 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1666
- if (IS_ERR_OR_NULL(xen_netback_dbg_root))
1667
- pr_warn("Init of debugfs returned %ld!\n",
1668
- PTR_ERR(xen_netback_dbg_root));
16691745 #endif /* CONFIG_DEBUG_FS */
16701746
16711747 return 0;
....@@ -1679,8 +1755,7 @@
16791755 static void __exit netback_fini(void)
16801756 {
16811757 #ifdef CONFIG_DEBUG_FS
1682
- if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
1683
- debugfs_remove_recursive(xen_netback_dbg_root);
1758
+ debugfs_remove_recursive(xen_netback_dbg_root);
16841759 #endif /* CONFIG_DEBUG_FS */
16851760 xenvif_xenbus_fini();
16861761 }