.. | .. |
---|
1 | 1 | /* |
---|
2 | | - * Copyright(c) 2015 - 2018 Intel Corporation. |
---|
| 2 | + * Copyright(c) 2015 - 2020 Intel Corporation. |
---|
3 | 3 | * |
---|
4 | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
---|
5 | 5 | * redistributing this file, you may do so under either license. |
---|
.. | .. |
---|
49 | 49 | #include <linux/netdevice.h> |
---|
50 | 50 | #include <linux/vmalloc.h> |
---|
51 | 51 | #include <linux/delay.h> |
---|
52 | | -#include <linux/idr.h> |
---|
| 52 | +#include <linux/xarray.h> |
---|
53 | 53 | #include <linux/module.h> |
---|
54 | 54 | #include <linux/printk.h> |
---|
55 | 55 | #include <linux/hrtimer.h> |
---|
56 | 56 | #include <linux/bitmap.h> |
---|
| 57 | +#include <linux/numa.h> |
---|
57 | 58 | #include <rdma/rdma_vt.h> |
---|
58 | 59 | |
---|
59 | 60 | #include "hfi.h" |
---|
.. | .. |
---|
68 | 69 | #include "affinity.h" |
---|
69 | 70 | #include "vnic.h" |
---|
70 | 71 | #include "exp_rcv.h" |
---|
| 72 | +#include "netdev.h" |
---|
71 | 73 | |
---|
72 | 74 | #undef pr_fmt |
---|
73 | 75 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt |
---|
74 | 76 | |
---|
75 | | -#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5 |
---|
76 | 77 | /* |
---|
77 | 78 | * min buffers we want to have per context, after driver |
---|
78 | 79 | */ |
---|
79 | 80 | #define HFI1_MIN_USER_CTXT_BUFCNT 7 |
---|
80 | 81 | |
---|
81 | | -#define HFI1_MIN_HDRQ_EGRBUF_CNT 2 |
---|
82 | | -#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 |
---|
83 | 82 | #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ |
---|
84 | 83 | #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ |
---|
| 84 | + |
---|
| 85 | +#define NUM_IB_PORTS 1 |
---|
85 | 86 | |
---|
86 | 87 | /* |
---|
87 | 88 | * Number of user receive contexts we are configured to use (to allow for more |
---|
.. | .. |
---|
120 | 121 | module_param(user_credit_return_threshold, uint, S_IRUGO); |
---|
121 | 122 | MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); |
---|
122 | 123 | |
---|
123 | | -static inline u64 encode_rcv_header_entry_size(u16 size); |
---|
124 | | - |
---|
125 | | -static struct idr hfi1_unit_table; |
---|
| 124 | +DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); |
---|
126 | 125 | |
---|
127 | 126 | static int hfi1_create_kctxt(struct hfi1_devdata *dd, |
---|
128 | 127 | struct hfi1_pportdata *ppd) |
---|
.. | .. |
---|
152 | 151 | /* Control context must use DMA_RTAIL */ |
---|
153 | 152 | if (rcd->ctxt == HFI1_CTRL_CTXT) |
---|
154 | 153 | rcd->flags |= HFI1_CAP_DMA_RTAIL; |
---|
155 | | - rcd->seq_cnt = 1; |
---|
| 154 | + rcd->fast_handler = get_dma_rtail_setting(rcd) ? |
---|
| 155 | + handle_receive_interrupt_dma_rtail : |
---|
| 156 | + handle_receive_interrupt_nodma_rtail; |
---|
| 157 | + |
---|
| 158 | + hfi1_set_seq_cnt(rcd, 1); |
---|
156 | 159 | |
---|
157 | 160 | rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); |
---|
158 | 161 | if (!rcd->sc) { |
---|
.. | .. |
---|
371 | 374 | rcd->numa_id = numa; |
---|
372 | 375 | rcd->rcv_array_groups = dd->rcv_entries.ngroups; |
---|
373 | 376 | rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; |
---|
| 377 | + rcd->slow_handler = handle_receive_interrupt; |
---|
| 378 | + rcd->do_interrupt = rcd->slow_handler; |
---|
| 379 | + rcd->msix_intr = CCE_NUM_MSIX_VECTORS; |
---|
374 | 380 | |
---|
375 | 381 | mutex_init(&rcd->exp_mutex); |
---|
| 382 | + spin_lock_init(&rcd->exp_lock); |
---|
| 383 | + INIT_LIST_HEAD(&rcd->flow_queue.queue_head); |
---|
| 384 | + INIT_LIST_HEAD(&rcd->rarr_queue.queue_head); |
---|
376 | 385 | |
---|
377 | 386 | hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); |
---|
378 | 387 | |
---|
.. | .. |
---|
464 | 473 | if (rcd->egrbufs.size < hfi1_max_mtu) { |
---|
465 | 474 | rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); |
---|
466 | 475 | hfi1_cdbg(PROC, |
---|
467 | | - "ctxt%u: eager bufs size too small. Adjusting to %zu\n", |
---|
| 476 | + "ctxt%u: eager bufs size too small. Adjusting to %u\n", |
---|
468 | 477 | rcd->ctxt, rcd->egrbufs.size); |
---|
469 | 478 | } |
---|
470 | 479 | rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; |
---|
.. | .. |
---|
475 | 484 | GFP_KERNEL, numa); |
---|
476 | 485 | if (!rcd->opstats) |
---|
477 | 486 | goto bail; |
---|
| 487 | + |
---|
| 488 | + /* Initialize TID flow generations for the context */ |
---|
| 489 | + hfi1_kern_init_ctxt_generations(rcd); |
---|
478 | 490 | } |
---|
479 | 491 | |
---|
480 | 492 | *context = rcd; |
---|
.. | .. |
---|
503 | 515 | } |
---|
504 | 516 | |
---|
505 | 517 | /* |
---|
506 | | - * Convert a receive header entry size that to the encoding used in the CSR. |
---|
507 | | - * |
---|
508 | | - * Return a zero if the given size is invalid. |
---|
509 | | - */ |
---|
510 | | -static inline u64 encode_rcv_header_entry_size(u16 size) |
---|
511 | | -{ |
---|
512 | | - /* there are only 3 valid receive header entry sizes */ |
---|
513 | | - if (size == 2) |
---|
514 | | - return 1; |
---|
515 | | - if (size == 16) |
---|
516 | | - return 2; |
---|
517 | | - else if (size == 32) |
---|
518 | | - return 4; |
---|
519 | | - return 0; /* invalid */ |
---|
520 | | -} |
---|
521 | | - |
---|
522 | | -/* |
---|
523 | 518 | * Select the largest ccti value over all SLs to determine the intra- |
---|
524 | 519 | * packet gap for the link. |
---|
525 | 520 | * |
---|
.. | .. |
---|
535 | 530 | u16 shift, mult; |
---|
536 | 531 | u64 src; |
---|
537 | 532 | u32 current_egress_rate; /* Mbits /sec */ |
---|
538 | | - u32 max_pkt_time; |
---|
| 533 | + u64 max_pkt_time; |
---|
539 | 534 | /* |
---|
540 | 535 | * max_pkt_time is the maximum packet egress time in units |
---|
541 | 536 | * of the fabric clock period 1/(805 MHz). |
---|
.. | .. |
---|
656 | 651 | |
---|
657 | 652 | ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; |
---|
658 | 653 | ppd->part_enforce |= HFI1_PART_ENFORCE_IN; |
---|
659 | | - |
---|
660 | | - if (loopback) { |
---|
661 | | - hfi1_early_err(&pdev->dev, |
---|
662 | | - "Faking data partition 0x8001 in idx %u\n", |
---|
663 | | - !default_pkey_idx); |
---|
664 | | - ppd->pkeys[!default_pkey_idx] = 0x8001; |
---|
665 | | - } |
---|
| 654 | + ppd->pkeys[0] = 0x8001; |
---|
666 | 655 | |
---|
667 | 656 | INIT_WORK(&ppd->link_vc_work, handle_verify_cap); |
---|
668 | 657 | INIT_WORK(&ppd->link_up_work, handle_link_up); |
---|
.. | .. |
---|
706 | 695 | return; |
---|
707 | 696 | |
---|
708 | 697 | bail: |
---|
709 | | - |
---|
710 | | - hfi1_early_err(&pdev->dev, |
---|
711 | | - "Congestion Control Agent disabled for port %d\n", port); |
---|
| 698 | + dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port); |
---|
712 | 699 | } |
---|
713 | 700 | |
---|
714 | 701 | /* |
---|
.. | .. |
---|
777 | 764 | rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; |
---|
778 | 765 | if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) |
---|
779 | 766 | rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; |
---|
| 767 | + if (HFI1_CAP_IS_KSET(TID_RDMA)) |
---|
| 768 | + rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB; |
---|
780 | 769 | hfi1_rcvctrl(dd, rcvmask, rcd); |
---|
781 | 770 | sc_enable(rcd->sc); |
---|
782 | 771 | hfi1_rcd_put(rcd); |
---|
.. | .. |
---|
838 | 827 | } |
---|
839 | 828 | |
---|
840 | 829 | /** |
---|
| 830 | + * destroy_workqueues - destroy per port workqueues |
---|
| 831 | + * @dd: the hfi1_ib device |
---|
| 832 | + */ |
---|
| 833 | +static void destroy_workqueues(struct hfi1_devdata *dd) |
---|
| 834 | +{ |
---|
| 835 | + int pidx; |
---|
| 836 | + struct hfi1_pportdata *ppd; |
---|
| 837 | + |
---|
| 838 | + for (pidx = 0; pidx < dd->num_pports; ++pidx) { |
---|
| 839 | + ppd = dd->pport + pidx; |
---|
| 840 | + |
---|
| 841 | + if (ppd->hfi1_wq) { |
---|
| 842 | + destroy_workqueue(ppd->hfi1_wq); |
---|
| 843 | + ppd->hfi1_wq = NULL; |
---|
| 844 | + } |
---|
| 845 | + if (ppd->link_wq) { |
---|
| 846 | + destroy_workqueue(ppd->link_wq); |
---|
| 847 | + ppd->link_wq = NULL; |
---|
| 848 | + } |
---|
| 849 | + } |
---|
| 850 | +} |
---|
| 851 | + |
---|
| 852 | +/** |
---|
| 853 | + * enable_general_intr() - Enable the IRQs that will be handled by the |
---|
| 854 | + * general interrupt handler. |
---|
| 855 | + * @dd: valid devdata |
---|
| 856 | + * |
---|
| 857 | + */ |
---|
| 858 | +static void enable_general_intr(struct hfi1_devdata *dd) |
---|
| 859 | +{ |
---|
| 860 | + set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true); |
---|
| 861 | + set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true); |
---|
| 862 | + set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true); |
---|
| 863 | + set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true); |
---|
| 864 | + set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true); |
---|
| 865 | + set_intr_bits(dd, IS_DC_START, IS_DC_END, true); |
---|
| 866 | + set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true); |
---|
| 867 | +} |
---|
| 868 | + |
---|
| 869 | +/** |
---|
841 | 870 | * hfi1_init - do the actual initialization sequence on the chip |
---|
842 | 871 | * @dd: the hfi1_ib device |
---|
843 | 872 | * @reinit: re-initializing, so don't allocate new memory |
---|
.. | .. |
---|
868 | 897 | |
---|
869 | 898 | if (is_ax(dd)) { |
---|
870 | 899 | atomic_set(&dd->drop_packet, DROP_PACKET_ON); |
---|
871 | | - dd->do_drop = 1; |
---|
| 900 | + dd->do_drop = true; |
---|
872 | 901 | } else { |
---|
873 | 902 | atomic_set(&dd->drop_packet, DROP_PACKET_OFF); |
---|
874 | | - dd->do_drop = 0; |
---|
| 903 | + dd->do_drop = false; |
---|
875 | 904 | } |
---|
876 | 905 | |
---|
877 | 906 | /* make sure the link is not "up" */ |
---|
.. | .. |
---|
887 | 916 | if (ret) |
---|
888 | 917 | goto done; |
---|
889 | 918 | |
---|
890 | | - /* allocate dummy tail memory for all receive contexts */ |
---|
891 | | - dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( |
---|
892 | | - &dd->pcidev->dev, sizeof(u64), |
---|
893 | | - &dd->rcvhdrtail_dummy_dma, |
---|
894 | | - GFP_KERNEL); |
---|
895 | | - |
---|
896 | | - if (!dd->rcvhdrtail_dummy_kvaddr) { |
---|
897 | | - dd_dev_err(dd, "cannot allocate dummy tail memory\n"); |
---|
898 | | - ret = -ENOMEM; |
---|
899 | | - goto done; |
---|
900 | | - } |
---|
901 | | - |
---|
902 | 919 | /* dd->rcd can be NULL if early initialization failed */ |
---|
903 | 920 | for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { |
---|
904 | 921 | /* |
---|
.. | .. |
---|
911 | 928 | if (!rcd) |
---|
912 | 929 | continue; |
---|
913 | 930 | |
---|
914 | | - rcd->do_interrupt = &handle_receive_interrupt; |
---|
915 | | - |
---|
916 | 931 | lastfail = hfi1_create_rcvhdrq(dd, rcd); |
---|
917 | 932 | if (!lastfail) |
---|
918 | 933 | lastfail = hfi1_setup_eagerbufs(rcd); |
---|
| 934 | + if (!lastfail) |
---|
| 935 | + lastfail = hfi1_kern_exp_rcv_init(rcd, reinit); |
---|
919 | 936 | if (lastfail) { |
---|
920 | 937 | dd_dev_err(dd, |
---|
921 | 938 | "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); |
---|
922 | 939 | ret = lastfail; |
---|
923 | 940 | } |
---|
| 941 | + /* enable IRQ */ |
---|
924 | 942 | hfi1_rcd_put(rcd); |
---|
925 | 943 | } |
---|
926 | 944 | |
---|
.. | .. |
---|
959 | 977 | HFI1_STATUS_INITTED; |
---|
960 | 978 | if (!ret) { |
---|
961 | 979 | /* enable all interrupts from the chip */ |
---|
962 | | - set_intr_state(dd, 1); |
---|
| 980 | + enable_general_intr(dd); |
---|
| 981 | + init_qsfp_int(dd); |
---|
963 | 982 | |
---|
964 | 983 | /* chip is OK for user apps; mark it as initialized */ |
---|
965 | 984 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { |
---|
.. | .. |
---|
991 | 1010 | return ret; |
---|
992 | 1011 | } |
---|
993 | 1012 | |
---|
994 | | -static inline struct hfi1_devdata *__hfi1_lookup(int unit) |
---|
995 | | -{ |
---|
996 | | - return idr_find(&hfi1_unit_table, unit); |
---|
997 | | -} |
---|
998 | | - |
---|
999 | 1013 | struct hfi1_devdata *hfi1_lookup(int unit) |
---|
1000 | 1014 | { |
---|
1001 | | - struct hfi1_devdata *dd; |
---|
1002 | | - unsigned long flags; |
---|
1003 | | - |
---|
1004 | | - spin_lock_irqsave(&hfi1_devs_lock, flags); |
---|
1005 | | - dd = __hfi1_lookup(unit); |
---|
1006 | | - spin_unlock_irqrestore(&hfi1_devs_lock, flags); |
---|
1007 | | - |
---|
1008 | | - return dd; |
---|
| 1015 | + return xa_load(&hfi1_dev_table, unit); |
---|
1009 | 1016 | } |
---|
1010 | 1017 | |
---|
1011 | 1018 | /* |
---|
.. | .. |
---|
1056 | 1063 | } |
---|
1057 | 1064 | dd->flags &= ~HFI1_INITTED; |
---|
1058 | 1065 | |
---|
1059 | | - /* mask and clean up interrupts, but not errors */ |
---|
1060 | | - set_intr_state(dd, 0); |
---|
1061 | | - hfi1_clean_up_interrupts(dd); |
---|
| 1066 | + /* mask and clean up interrupts */ |
---|
| 1067 | + set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); |
---|
| 1068 | + msix_clean_up_interrupts(dd); |
---|
1062 | 1069 | |
---|
1063 | 1070 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { |
---|
1064 | 1071 | ppd = dd->pport + pidx; |
---|
.. | .. |
---|
1101 | 1108 | * We can't count on interrupts since we are stopping. |
---|
1102 | 1109 | */ |
---|
1103 | 1110 | hfi1_quiet_serdes(ppd); |
---|
1104 | | - |
---|
1105 | | - if (ppd->hfi1_wq) { |
---|
1106 | | - destroy_workqueue(ppd->hfi1_wq); |
---|
1107 | | - ppd->hfi1_wq = NULL; |
---|
1108 | | - } |
---|
1109 | | - if (ppd->link_wq) { |
---|
1110 | | - destroy_workqueue(ppd->link_wq); |
---|
1111 | | - ppd->link_wq = NULL; |
---|
1112 | | - } |
---|
| 1111 | + if (ppd->hfi1_wq) |
---|
| 1112 | + flush_workqueue(ppd->hfi1_wq); |
---|
| 1113 | + if (ppd->link_wq) |
---|
| 1114 | + flush_workqueue(ppd->link_wq); |
---|
1113 | 1115 | } |
---|
1114 | 1116 | sdma_exit(dd); |
---|
1115 | 1117 | } |
---|
.. | .. |
---|
1133 | 1135 | dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), |
---|
1134 | 1136 | rcd->rcvhdrq, rcd->rcvhdrq_dma); |
---|
1135 | 1137 | rcd->rcvhdrq = NULL; |
---|
1136 | | - if (rcd->rcvhdrtail_kvaddr) { |
---|
| 1138 | + if (hfi1_rcvhdrtail_kvaddr(rcd)) { |
---|
1137 | 1139 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, |
---|
1138 | | - (void *)rcd->rcvhdrtail_kvaddr, |
---|
| 1140 | + (void *)hfi1_rcvhdrtail_kvaddr(rcd), |
---|
1139 | 1141 | rcd->rcvhdrqtailaddr_dma); |
---|
1140 | 1142 | rcd->rcvhdrtail_kvaddr = NULL; |
---|
1141 | 1143 | } |
---|
.. | .. |
---|
1173 | 1175 | /* |
---|
1174 | 1176 | * Release our hold on the shared asic data. If we are the last one, |
---|
1175 | 1177 | * return the structure to be finalized outside the lock. Must be |
---|
1176 | | - * holding hfi1_devs_lock. |
---|
| 1178 | + * holding hfi1_dev_table lock. |
---|
1177 | 1179 | */ |
---|
1178 | 1180 | static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) |
---|
1179 | 1181 | { |
---|
.. | .. |
---|
1198 | 1200 | } |
---|
1199 | 1201 | |
---|
1200 | 1202 | /** |
---|
1201 | | - * hfi1_clean_devdata - cleans up per-unit data structure |
---|
| 1203 | + * hfi1_free_devdata - cleans up and frees per-unit data structure |
---|
1202 | 1204 | * @dd: pointer to a valid devdata structure |
---|
1203 | 1205 | * |
---|
1204 | | - * It cleans up all data structures set up by |
---|
| 1206 | + * It cleans up and frees all data structures set up by |
---|
1205 | 1207 | * by hfi1_alloc_devdata(). |
---|
1206 | 1208 | */ |
---|
1207 | | -static void hfi1_clean_devdata(struct hfi1_devdata *dd) |
---|
| 1209 | +void hfi1_free_devdata(struct hfi1_devdata *dd) |
---|
1208 | 1210 | { |
---|
1209 | 1211 | struct hfi1_asic_data *ad; |
---|
1210 | 1212 | unsigned long flags; |
---|
1211 | 1213 | |
---|
1212 | | - spin_lock_irqsave(&hfi1_devs_lock, flags); |
---|
1213 | | - if (!list_empty(&dd->list)) { |
---|
1214 | | - idr_remove(&hfi1_unit_table, dd->unit); |
---|
1215 | | - list_del_init(&dd->list); |
---|
1216 | | - } |
---|
| 1214 | + xa_lock_irqsave(&hfi1_dev_table, flags); |
---|
| 1215 | + __xa_erase(&hfi1_dev_table, dd->unit); |
---|
1217 | 1216 | ad = release_asic_data(dd); |
---|
1218 | | - spin_unlock_irqrestore(&hfi1_devs_lock, flags); |
---|
| 1217 | + xa_unlock_irqrestore(&hfi1_dev_table, flags); |
---|
1219 | 1218 | |
---|
1220 | 1219 | finalize_asic_data(dd, ad); |
---|
1221 | 1220 | free_platform_config(dd); |
---|
.. | .. |
---|
1230 | 1229 | dd->tx_opstats = NULL; |
---|
1231 | 1230 | kfree(dd->comp_vect); |
---|
1232 | 1231 | dd->comp_vect = NULL; |
---|
| 1232 | + if (dd->rcvhdrtail_dummy_kvaddr) |
---|
| 1233 | + dma_free_coherent(&dd->pcidev->dev, sizeof(u64), |
---|
| 1234 | + (void *)dd->rcvhdrtail_dummy_kvaddr, |
---|
| 1235 | + dd->rcvhdrtail_dummy_dma); |
---|
| 1236 | + dd->rcvhdrtail_dummy_kvaddr = NULL; |
---|
1233 | 1237 | sdma_clean(dd, dd->num_sdma); |
---|
1234 | 1238 | rvt_dealloc_device(&dd->verbs_dev.rdi); |
---|
1235 | 1239 | } |
---|
1236 | 1240 | |
---|
1237 | | -static void __hfi1_free_devdata(struct kobject *kobj) |
---|
1238 | | -{ |
---|
1239 | | - struct hfi1_devdata *dd = |
---|
1240 | | - container_of(kobj, struct hfi1_devdata, kobj); |
---|
1241 | | - |
---|
1242 | | - hfi1_clean_devdata(dd); |
---|
1243 | | -} |
---|
1244 | | - |
---|
1245 | | -static struct kobj_type hfi1_devdata_type = { |
---|
1246 | | - .release = __hfi1_free_devdata, |
---|
1247 | | -}; |
---|
1248 | | - |
---|
1249 | | -void hfi1_free_devdata(struct hfi1_devdata *dd) |
---|
1250 | | -{ |
---|
1251 | | - kobject_put(&dd->kobj); |
---|
1252 | | -} |
---|
1253 | | - |
---|
1254 | | -/* |
---|
1255 | | - * Allocate our primary per-unit data structure. Must be done via verbs |
---|
1256 | | - * allocator, because the verbs cleanup process both does cleanup and |
---|
1257 | | - * free of the data structure. |
---|
1258 | | - * "extra" is for chip-specific data. |
---|
| 1241 | +/** |
---|
| 1242 | + * hfi1_alloc_devdata - Allocate our primary per-unit data structure. |
---|
| 1243 | + * @pdev: Valid PCI device |
---|
| 1244 | + * @extra: How many bytes to alloc past the default |
---|
1259 | 1245 | * |
---|
1260 | | - * Use the idr mechanism to get a unit number for this unit. |
---|
| 1246 | + * Must be done via verbs allocator, because the verbs cleanup process |
---|
| 1247 | + * both does cleanup and free of the data structure. |
---|
| 1248 | + * "extra" is for chip-specific data. |
---|
1261 | 1249 | */ |
---|
1262 | | -struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) |
---|
| 1250 | +static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, |
---|
| 1251 | + size_t extra) |
---|
1263 | 1252 | { |
---|
1264 | | - unsigned long flags; |
---|
1265 | 1253 | struct hfi1_devdata *dd; |
---|
1266 | 1254 | int ret, nports; |
---|
1267 | 1255 | |
---|
.. | .. |
---|
1277 | 1265 | dd->pcidev = pdev; |
---|
1278 | 1266 | pci_set_drvdata(pdev, dd); |
---|
1279 | 1267 | |
---|
1280 | | - INIT_LIST_HEAD(&dd->list); |
---|
1281 | | - idr_preload(GFP_KERNEL); |
---|
1282 | | - spin_lock_irqsave(&hfi1_devs_lock, flags); |
---|
1283 | | - |
---|
1284 | | - ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); |
---|
1285 | | - if (ret >= 0) { |
---|
1286 | | - dd->unit = ret; |
---|
1287 | | - list_add(&dd->list, &hfi1_dev_list); |
---|
1288 | | - } |
---|
1289 | | - dd->node = -1; |
---|
1290 | | - |
---|
1291 | | - spin_unlock_irqrestore(&hfi1_devs_lock, flags); |
---|
1292 | | - idr_preload_end(); |
---|
1293 | | - |
---|
| 1268 | + ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, |
---|
| 1269 | + GFP_KERNEL); |
---|
1294 | 1270 | if (ret < 0) { |
---|
1295 | | - hfi1_early_err(&pdev->dev, |
---|
1296 | | - "Could not allocate unit ID: error %d\n", -ret); |
---|
| 1271 | + dev_err(&pdev->dev, |
---|
| 1272 | + "Could not allocate unit ID: error %d\n", -ret); |
---|
1297 | 1273 | goto bail; |
---|
1298 | 1274 | } |
---|
1299 | 1275 | rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); |
---|
| 1276 | + /* |
---|
| 1277 | + * If the BIOS does not have the NUMA node information set, select |
---|
| 1278 | + * NUMA 0 so we get consistent performance. |
---|
| 1279 | + */ |
---|
| 1280 | + dd->node = pcibus_to_node(pdev->bus); |
---|
| 1281 | + if (dd->node == NUMA_NO_NODE) { |
---|
| 1282 | + dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); |
---|
| 1283 | + dd->node = 0; |
---|
| 1284 | + } |
---|
1300 | 1285 | |
---|
1301 | 1286 | /* |
---|
1302 | 1287 | * Initialize all locks for the device. This needs to be as early as |
---|
.. | .. |
---|
1314 | 1299 | spin_lock_init(&dd->pio_map_lock); |
---|
1315 | 1300 | mutex_init(&dd->dc8051_lock); |
---|
1316 | 1301 | init_waitqueue_head(&dd->event_queue); |
---|
| 1302 | + spin_lock_init(&dd->irq_src_lock); |
---|
1317 | 1303 | |
---|
1318 | 1304 | dd->int_counter = alloc_percpu(u64); |
---|
1319 | 1305 | if (!dd->int_counter) { |
---|
.. | .. |
---|
1345 | 1331 | goto bail; |
---|
1346 | 1332 | } |
---|
1347 | 1333 | |
---|
1348 | | - kobject_init(&dd->kobj, &hfi1_devdata_type); |
---|
| 1334 | + /* allocate dummy tail memory for all receive contexts */ |
---|
| 1335 | + dd->rcvhdrtail_dummy_kvaddr = |
---|
| 1336 | + dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64), |
---|
| 1337 | + &dd->rcvhdrtail_dummy_dma, GFP_KERNEL); |
---|
| 1338 | + if (!dd->rcvhdrtail_dummy_kvaddr) { |
---|
| 1339 | + ret = -ENOMEM; |
---|
| 1340 | + goto bail; |
---|
| 1341 | + } |
---|
| 1342 | + |
---|
| 1343 | + atomic_set(&dd->ipoib_rsm_usr_num, 0); |
---|
1349 | 1344 | return dd; |
---|
1350 | 1345 | |
---|
1351 | 1346 | bail: |
---|
1352 | | - hfi1_clean_devdata(dd); |
---|
| 1347 | + hfi1_free_devdata(dd); |
---|
1353 | 1348 | return ERR_PTR(ret); |
---|
1354 | 1349 | } |
---|
1355 | 1350 | |
---|
.. | .. |
---|
1479 | 1474 | /* sanitize link CRC options */ |
---|
1480 | 1475 | link_crc_mask &= SUPPORTED_CRCS; |
---|
1481 | 1476 | |
---|
| 1477 | + ret = opfn_init(); |
---|
| 1478 | + if (ret < 0) { |
---|
| 1479 | + pr_err("Failed to allocate opfn_wq"); |
---|
| 1480 | + goto bail_dev; |
---|
| 1481 | + } |
---|
| 1482 | + |
---|
1482 | 1483 | /* |
---|
1483 | 1484 | * These must be called before the driver is registered with |
---|
1484 | 1485 | * the PCI subsystem. |
---|
1485 | 1486 | */ |
---|
1486 | | - idr_init(&hfi1_unit_table); |
---|
1487 | | - |
---|
1488 | 1487 | hfi1_dbg_init(); |
---|
1489 | | - ret = hfi1_wss_init(); |
---|
1490 | | - if (ret < 0) |
---|
1491 | | - goto bail_wss; |
---|
1492 | 1488 | ret = pci_register_driver(&hfi1_pci_driver); |
---|
1493 | 1489 | if (ret < 0) { |
---|
1494 | 1490 | pr_err("Unable to register driver: error %d\n", -ret); |
---|
.. | .. |
---|
1497 | 1493 | goto bail; /* all OK */ |
---|
1498 | 1494 | |
---|
1499 | 1495 | bail_dev: |
---|
1500 | | - hfi1_wss_exit(); |
---|
1501 | | -bail_wss: |
---|
1502 | 1496 | hfi1_dbg_exit(); |
---|
1503 | | - idr_destroy(&hfi1_unit_table); |
---|
1504 | 1497 | dev_cleanup(); |
---|
1505 | 1498 | bail: |
---|
1506 | 1499 | return ret; |
---|
.. | .. |
---|
1514 | 1507 | static void __exit hfi1_mod_cleanup(void) |
---|
1515 | 1508 | { |
---|
1516 | 1509 | pci_unregister_driver(&hfi1_pci_driver); |
---|
| 1510 | + opfn_exit(); |
---|
1517 | 1511 | node_affinity_destroy_all(); |
---|
1518 | | - hfi1_wss_exit(); |
---|
1519 | 1512 | hfi1_dbg_exit(); |
---|
1520 | 1513 | |
---|
1521 | | - idr_destroy(&hfi1_unit_table); |
---|
| 1514 | + WARN_ON(!xa_empty(&hfi1_dev_table)); |
---|
1522 | 1515 | dispose_firmware(); /* asymmetric with obtain_firmware() */ |
---|
1523 | 1516 | dev_cleanup(); |
---|
1524 | 1517 | } |
---|
.. | .. |
---|
1554 | 1547 | |
---|
1555 | 1548 | free_credit_return(dd); |
---|
1556 | 1549 | |
---|
1557 | | - if (dd->rcvhdrtail_dummy_kvaddr) { |
---|
1558 | | - dma_free_coherent(&dd->pcidev->dev, sizeof(u64), |
---|
1559 | | - (void *)dd->rcvhdrtail_dummy_kvaddr, |
---|
1560 | | - dd->rcvhdrtail_dummy_dma); |
---|
1561 | | - dd->rcvhdrtail_dummy_kvaddr = NULL; |
---|
1562 | | - } |
---|
1563 | | - |
---|
1564 | 1550 | /* |
---|
1565 | 1551 | * Free any resources still in use (usually just kernel contexts) |
---|
1566 | 1552 | * at unload; we do for ctxtcnt, because that's what we allocate. |
---|
.. | .. |
---|
1569 | 1555 | struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; |
---|
1570 | 1556 | |
---|
1571 | 1557 | if (rcd) { |
---|
1572 | | - hfi1_clear_tids(rcd); |
---|
| 1558 | + hfi1_free_ctxt_rcv_groups(rcd); |
---|
1573 | 1559 | hfi1_free_ctxt(rcd); |
---|
1574 | 1560 | } |
---|
1575 | 1561 | } |
---|
.. | .. |
---|
1609 | 1595 | hfi1_free_devdata(dd); |
---|
1610 | 1596 | } |
---|
1611 | 1597 | |
---|
1612 | | -static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) |
---|
1613 | | -{ |
---|
1614 | | - if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { |
---|
1615 | | - hfi1_early_err(dev, "Receive header queue count too small\n"); |
---|
1616 | | - return -EINVAL; |
---|
1617 | | - } |
---|
1618 | | - |
---|
1619 | | - if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { |
---|
1620 | | - hfi1_early_err(dev, |
---|
1621 | | - "Receive header queue count cannot be greater than %u\n", |
---|
1622 | | - HFI1_MAX_HDRQ_EGRBUF_CNT); |
---|
1623 | | - return -EINVAL; |
---|
1624 | | - } |
---|
1625 | | - |
---|
1626 | | - if (thecnt % HDRQ_INCREMENT) { |
---|
1627 | | - hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", |
---|
1628 | | - thecnt, HDRQ_INCREMENT); |
---|
1629 | | - return -EINVAL; |
---|
1630 | | - } |
---|
1631 | | - |
---|
1632 | | - return 0; |
---|
1633 | | -} |
---|
1634 | | - |
---|
1635 | 1598 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
---|
1636 | 1599 | { |
---|
1637 | 1600 | int ret = 0, j, pidx, initfail; |
---|
.. | .. |
---|
1644 | 1607 | /* Validate dev ids */ |
---|
1645 | 1608 | if (!(ent->device == PCI_DEVICE_ID_INTEL0 || |
---|
1646 | 1609 | ent->device == PCI_DEVICE_ID_INTEL1)) { |
---|
1647 | | - hfi1_early_err(&pdev->dev, |
---|
1648 | | - "Failing on unknown Intel deviceid 0x%x\n", |
---|
1649 | | - ent->device); |
---|
| 1610 | + dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n", |
---|
| 1611 | + ent->device); |
---|
1650 | 1612 | ret = -ENODEV; |
---|
1651 | 1613 | goto bail; |
---|
1652 | 1614 | } |
---|
1653 | 1615 | |
---|
| 1616 | + /* Allocate the dd so we can get to work */ |
---|
| 1617 | + dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * |
---|
| 1618 | + sizeof(struct hfi1_pportdata)); |
---|
| 1619 | + if (IS_ERR(dd)) { |
---|
| 1620 | + ret = PTR_ERR(dd); |
---|
| 1621 | + goto bail; |
---|
| 1622 | + } |
---|
| 1623 | + |
---|
1654 | 1624 | /* Validate some global module parameters */ |
---|
1655 | | - ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); |
---|
| 1625 | + ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt); |
---|
1656 | 1626 | if (ret) |
---|
1657 | 1627 | goto bail; |
---|
1658 | 1628 | |
---|
1659 | 1629 | /* use the encoding function as a sanitization check */ |
---|
1660 | 1630 | if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { |
---|
1661 | | - hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", |
---|
1662 | | - hfi1_hdrq_entsize); |
---|
| 1631 | + dd_dev_err(dd, "Invalid HdrQ Entry size %u\n", |
---|
| 1632 | + hfi1_hdrq_entsize); |
---|
1663 | 1633 | ret = -EINVAL; |
---|
1664 | 1634 | goto bail; |
---|
1665 | 1635 | } |
---|
.. | .. |
---|
1681 | 1651 | clamp_val(eager_buffer_size, |
---|
1682 | 1652 | MIN_EAGER_BUFFER * 8, |
---|
1683 | 1653 | MAX_EAGER_BUFFER_TOTAL); |
---|
1684 | | - hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", |
---|
1685 | | - eager_buffer_size); |
---|
| 1654 | + dd_dev_info(dd, "Eager buffer size %u\n", |
---|
| 1655 | + eager_buffer_size); |
---|
1686 | 1656 | } else { |
---|
1687 | | - hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); |
---|
| 1657 | + dd_dev_err(dd, "Invalid Eager buffer size of 0\n"); |
---|
1688 | 1658 | ret = -EINVAL; |
---|
1689 | 1659 | goto bail; |
---|
1690 | 1660 | } |
---|
.. | .. |
---|
1692 | 1662 | /* restrict value of hfi1_rcvarr_split */ |
---|
1693 | 1663 | hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); |
---|
1694 | 1664 | |
---|
1695 | | - ret = hfi1_pcie_init(pdev, ent); |
---|
| 1665 | + ret = hfi1_pcie_init(dd); |
---|
1696 | 1666 | if (ret) |
---|
1697 | 1667 | goto bail; |
---|
1698 | 1668 | |
---|
.. | .. |
---|
1700 | 1670 | * Do device-specific initialization, function table setup, dd |
---|
1701 | 1671 | * allocation, etc. |
---|
1702 | 1672 | */ |
---|
1703 | | - dd = hfi1_init_dd(pdev, ent); |
---|
1704 | | - |
---|
1705 | | - if (IS_ERR(dd)) { |
---|
1706 | | - ret = PTR_ERR(dd); |
---|
| 1673 | + ret = hfi1_init_dd(dd); |
---|
| 1674 | + if (ret) |
---|
1707 | 1675 | goto clean_bail; /* error already printed */ |
---|
1708 | | - } |
---|
1709 | 1676 | |
---|
1710 | 1677 | ret = create_workqueues(dd); |
---|
1711 | 1678 | if (ret) |
---|
.. | .. |
---|
1713 | 1680 | |
---|
1714 | 1681 | /* do the generic initialization */ |
---|
1715 | 1682 | initfail = hfi1_init(dd, 0); |
---|
1716 | | - |
---|
1717 | | - /* setup vnic */ |
---|
1718 | | - hfi1_vnic_setup(dd); |
---|
1719 | 1683 | |
---|
1720 | 1684 | ret = hfi1_register_ib_device(dd); |
---|
1721 | 1685 | |
---|
.. | .. |
---|
1736 | 1700 | dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); |
---|
1737 | 1701 | |
---|
1738 | 1702 | if (initfail || ret) { |
---|
1739 | | - hfi1_clean_up_interrupts(dd); |
---|
| 1703 | + msix_clean_up_interrupts(dd); |
---|
1740 | 1704 | stop_timers(dd); |
---|
1741 | 1705 | flush_workqueue(ib_wq); |
---|
1742 | 1706 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { |
---|
.. | .. |
---|
1755 | 1719 | hfi1_device_remove(dd); |
---|
1756 | 1720 | if (!ret) |
---|
1757 | 1721 | hfi1_unregister_ib_device(dd); |
---|
1758 | | - hfi1_vnic_cleanup(dd); |
---|
1759 | 1722 | postinit_cleanup(dd); |
---|
1760 | 1723 | if (initfail) |
---|
1761 | 1724 | ret = initfail; |
---|
.. | .. |
---|
1800 | 1763 | /* unregister from IB core */ |
---|
1801 | 1764 | hfi1_unregister_ib_device(dd); |
---|
1802 | 1765 | |
---|
1803 | | - /* cleanup vnic */ |
---|
1804 | | - hfi1_vnic_cleanup(dd); |
---|
| 1766 | + /* free netdev data */ |
---|
| 1767 | + hfi1_netdev_free(dd); |
---|
1805 | 1768 | |
---|
1806 | 1769 | /* |
---|
1807 | 1770 | * Disable the IB link, disable interrupts on the device, |
---|
1808 | 1771 | * clear dma engines, etc. |
---|
1809 | 1772 | */ |
---|
1810 | 1773 | shutdown_device(dd); |
---|
| 1774 | + destroy_workqueues(dd); |
---|
1811 | 1775 | |
---|
1812 | 1776 | stop_timers(dd); |
---|
1813 | 1777 | |
---|
.. | .. |
---|
1836 | 1800 | int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) |
---|
1837 | 1801 | { |
---|
1838 | 1802 | unsigned amt; |
---|
1839 | | - u64 reg; |
---|
1840 | 1803 | |
---|
1841 | 1804 | if (!rcd->rcvhdrq) { |
---|
1842 | 1805 | gfp_t gfp_flags; |
---|
.. | .. |
---|
1847 | 1810 | gfp_flags = GFP_KERNEL; |
---|
1848 | 1811 | else |
---|
1849 | 1812 | gfp_flags = GFP_USER; |
---|
1850 | | - rcd->rcvhdrq = dma_zalloc_coherent( |
---|
1851 | | - &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, |
---|
1852 | | - gfp_flags | __GFP_COMP); |
---|
| 1813 | + rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, |
---|
| 1814 | + &rcd->rcvhdrq_dma, |
---|
| 1815 | + gfp_flags | __GFP_COMP); |
---|
1853 | 1816 | |
---|
1854 | 1817 | if (!rcd->rcvhdrq) { |
---|
1855 | 1818 | dd_dev_err(dd, |
---|
.. | .. |
---|
1860 | 1823 | |
---|
1861 | 1824 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || |
---|
1862 | 1825 | HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { |
---|
1863 | | - rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( |
---|
1864 | | - &dd->pcidev->dev, PAGE_SIZE, |
---|
1865 | | - &rcd->rcvhdrqtailaddr_dma, gfp_flags); |
---|
| 1826 | + rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, |
---|
| 1827 | + PAGE_SIZE, |
---|
| 1828 | + &rcd->rcvhdrqtailaddr_dma, |
---|
| 1829 | + gfp_flags); |
---|
1866 | 1830 | if (!rcd->rcvhdrtail_kvaddr) |
---|
1867 | 1831 | goto bail_free; |
---|
1868 | 1832 | } |
---|
1869 | 1833 | } |
---|
1870 | | - /* |
---|
1871 | | - * These values are per-context: |
---|
1872 | | - * RcvHdrCnt |
---|
1873 | | - * RcvHdrEntSize |
---|
1874 | | - * RcvHdrSize |
---|
1875 | | - */ |
---|
1876 | | - reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) |
---|
1877 | | - & RCV_HDR_CNT_CNT_MASK) |
---|
1878 | | - << RCV_HDR_CNT_CNT_SHIFT; |
---|
1879 | | - write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); |
---|
1880 | | - reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) |
---|
1881 | | - & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) |
---|
1882 | | - << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; |
---|
1883 | | - write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); |
---|
1884 | | - reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) |
---|
1885 | | - << RCV_HDR_SIZE_HDR_SIZE_SHIFT; |
---|
1886 | | - write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); |
---|
1887 | 1834 | |
---|
1888 | | - /* |
---|
1889 | | - * Program dummy tail address for every receive context |
---|
1890 | | - * before enabling any receive context |
---|
1891 | | - */ |
---|
1892 | | - write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, |
---|
1893 | | - dd->rcvhdrtail_dummy_dma); |
---|
| 1835 | + set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, |
---|
| 1836 | + rcd->rcvhdrq_cnt); |
---|
1894 | 1837 | |
---|
1895 | 1838 | return 0; |
---|
1896 | 1839 | |
---|
.. | .. |
---|
1958 | 1901 | while (alloced_bytes < rcd->egrbufs.size && |
---|
1959 | 1902 | rcd->egrbufs.alloced < rcd->egrbufs.count) { |
---|
1960 | 1903 | rcd->egrbufs.buffers[idx].addr = |
---|
1961 | | - dma_zalloc_coherent(&dd->pcidev->dev, |
---|
1962 | | - rcd->egrbufs.rcvtid_size, |
---|
1963 | | - &rcd->egrbufs.buffers[idx].dma, |
---|
1964 | | - gfp_flags); |
---|
| 1904 | + dma_alloc_coherent(&dd->pcidev->dev, |
---|
| 1905 | + rcd->egrbufs.rcvtid_size, |
---|
| 1906 | + &rcd->egrbufs.buffers[idx].dma, |
---|
| 1907 | + gfp_flags); |
---|
1965 | 1908 | if (rcd->egrbufs.buffers[idx].addr) { |
---|
1966 | 1909 | rcd->egrbufs.buffers[idx].len = |
---|
1967 | 1910 | rcd->egrbufs.rcvtid_size; |
---|
.. | .. |
---|
2032 | 1975 | rcd->egrbufs.size = alloced_bytes; |
---|
2033 | 1976 | |
---|
2034 | 1977 | hfi1_cdbg(PROC, |
---|
2035 | | - "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", |
---|
| 1978 | + "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n", |
---|
2036 | 1979 | rcd->ctxt, rcd->egrbufs.alloced, |
---|
2037 | 1980 | rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); |
---|
2038 | 1981 | |
---|