.. | .. |
---|
1 | 1 | #ifndef _HFI1_KERNEL_H |
---|
2 | 2 | #define _HFI1_KERNEL_H |
---|
3 | 3 | /* |
---|
4 | | - * Copyright(c) 2015-2018 Intel Corporation. |
---|
| 4 | + * Copyright(c) 2020 Cornelis Networks, Inc. |
---|
| 5 | + * Copyright(c) 2015-2020 Intel Corporation. |
---|
5 | 6 | * |
---|
6 | 7 | * This file is provided under a dual BSD/GPLv2 license. When using or |
---|
7 | 8 | * redistributing this file, you may do so under either license. |
---|
.. | .. |
---|
54 | 55 | #include <linux/list.h> |
---|
55 | 56 | #include <linux/scatterlist.h> |
---|
56 | 57 | #include <linux/slab.h> |
---|
57 | | -#include <linux/idr.h> |
---|
58 | 58 | #include <linux/io.h> |
---|
59 | 59 | #include <linux/fs.h> |
---|
60 | 60 | #include <linux/completion.h> |
---|
.. | .. |
---|
65 | 65 | #include <linux/kthread.h> |
---|
66 | 66 | #include <linux/i2c.h> |
---|
67 | 67 | #include <linux/i2c-algo-bit.h> |
---|
| 68 | +#include <linux/xarray.h> |
---|
68 | 69 | #include <rdma/ib_hdrs.h> |
---|
69 | 70 | #include <rdma/opa_addr.h> |
---|
70 | 71 | #include <linux/rhashtable.h> |
---|
.. | .. |
---|
73 | 74 | |
---|
74 | 75 | #include "chip_registers.h" |
---|
75 | 76 | #include "common.h" |
---|
| 77 | +#include "opfn.h" |
---|
76 | 78 | #include "verbs.h" |
---|
77 | 79 | #include "pio.h" |
---|
78 | 80 | #include "chip.h" |
---|
.. | .. |
---|
80 | 82 | #include "qsfp.h" |
---|
81 | 83 | #include "platform.h" |
---|
82 | 84 | #include "affinity.h" |
---|
| 85 | +#include "msix.h" |
---|
83 | 86 | |
---|
84 | 87 | /* bumped 1 from s/w major version of TrueScale */ |
---|
85 | 88 | #define HFI1_CHIP_VERS_MAJ 3U |
---|
.. | .. |
---|
96 | 99 | |
---|
97 | 100 | #define NEIGHBOR_TYPE_HFI 0 |
---|
98 | 101 | #define NEIGHBOR_TYPE_SWITCH 1 |
---|
| 102 | + |
---|
| 103 | +#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5 |
---|
99 | 104 | |
---|
100 | 105 | extern unsigned long hfi1_cap_mask; |
---|
101 | 106 | #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap) |
---|
.. | .. |
---|
193 | 198 | u32 count; |
---|
194 | 199 | }; |
---|
195 | 200 | |
---|
196 | | -typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); |
---|
| 201 | +struct hfi1_ctxtdata; |
---|
| 202 | +typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data); |
---|
| 203 | +typedef void (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); |
---|
| 204 | + |
---|
| 205 | +struct tid_queue { |
---|
| 206 | + struct list_head queue_head; |
---|
| 207 | + /* queue head for QP TID resource waiters */ |
---|
| 208 | + u32 enqueue; /* count of tid enqueues */ |
---|
| 209 | + u32 dequeue; /* count of tid dequeues */ |
---|
| 210 | +}; |
---|
| 211 | + |
---|
197 | 212 | struct hfi1_ctxtdata { |
---|
198 | 213 | /* rcvhdrq base, needs mmap before useful */ |
---|
199 | 214 | void *rcvhdrq; |
---|
.. | .. |
---|
214 | 229 | * be valid. Worst case is we process an extra interrupt and up to 64 |
---|
215 | 230 | * packets with the wrong interrupt handler. |
---|
216 | 231 | */ |
---|
217 | | - int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded); |
---|
| 232 | + intr_handler do_interrupt; |
---|
| 233 | + /** fast handler after autoactive */ |
---|
| 234 | + intr_handler fast_handler; |
---|
| 235 | + /** slow handler */ |
---|
| 236 | + intr_handler slow_handler; |
---|
| 237 | + /* napi pointer assiociated with netdev */ |
---|
| 238 | + struct napi_struct *napi; |
---|
218 | 239 | /* verbs rx_stats per rcd */ |
---|
219 | 240 | struct hfi1_opcode_stats_perctx *opstats; |
---|
220 | 241 | /* clear interrupt mask */ |
---|
.. | .. |
---|
287 | 308 | /* PSM Specific fields */ |
---|
288 | 309 | /* lock protecting all Expected TID data */ |
---|
289 | 310 | struct mutex exp_mutex; |
---|
| 311 | + /* lock protecting all Expected TID data of kernel contexts */ |
---|
| 312 | + spinlock_t exp_lock; |
---|
| 313 | + /* Queue for QP's waiting for HW TID flows */ |
---|
| 314 | + struct tid_queue flow_queue; |
---|
| 315 | + /* Queue for QP's waiting for HW receive array entries */ |
---|
| 316 | + struct tid_queue rarr_queue; |
---|
290 | 317 | /* when waiting for rcv or pioavail */ |
---|
291 | 318 | wait_queue_head_t wait; |
---|
292 | 319 | /* uuid from PSM */ |
---|
.. | .. |
---|
319 | 346 | */ |
---|
320 | 347 | u8 subctxt_cnt; |
---|
321 | 348 | |
---|
| 349 | + /* Bit mask to track free TID RDMA HW flows */ |
---|
| 350 | + unsigned long flow_mask; |
---|
| 351 | + struct tid_flow_state flows[RXE_NUM_TID_FLOWS]; |
---|
322 | 352 | }; |
---|
323 | 353 | |
---|
324 | 354 | /** |
---|
.. | .. |
---|
356 | 386 | u32 rhqoff; |
---|
357 | 387 | u32 dlid; |
---|
358 | 388 | u32 slid; |
---|
| 389 | + int numpkt; |
---|
359 | 390 | u16 tlen; |
---|
360 | 391 | s16 etail; |
---|
361 | 392 | u16 pkey; |
---|
362 | 393 | u8 hlen; |
---|
363 | | - u8 numpkt; |
---|
364 | 394 | u8 rsize; |
---|
365 | 395 | u8 updegr; |
---|
366 | 396 | u8 etype; |
---|
.. | .. |
---|
518 | 548 | mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK); |
---|
519 | 549 | } |
---|
520 | 550 | |
---|
| 551 | +/** |
---|
| 552 | + * hfi1_get_rc_ohdr - get extended header |
---|
| 553 | + * @opah - the opaheader |
---|
| 554 | + */ |
---|
| 555 | +static inline struct ib_other_headers * |
---|
| 556 | +hfi1_get_rc_ohdr(struct hfi1_opa_header *opah) |
---|
| 557 | +{ |
---|
| 558 | + struct ib_other_headers *ohdr; |
---|
| 559 | + struct ib_header *hdr = NULL; |
---|
| 560 | + struct hfi1_16b_header *hdr_16b = NULL; |
---|
| 561 | + |
---|
| 562 | + /* Find out where the BTH is */ |
---|
| 563 | + if (opah->hdr_type == HFI1_PKT_TYPE_9B) { |
---|
| 564 | + hdr = &opah->ibh; |
---|
| 565 | + if (ib_get_lnh(hdr) == HFI1_LRH_BTH) |
---|
| 566 | + ohdr = &hdr->u.oth; |
---|
| 567 | + else |
---|
| 568 | + ohdr = &hdr->u.l.oth; |
---|
| 569 | + } else { |
---|
| 570 | + u8 l4; |
---|
| 571 | + |
---|
| 572 | + hdr_16b = &opah->opah; |
---|
| 573 | + l4 = hfi1_16B_get_l4(hdr_16b); |
---|
| 574 | + if (l4 == OPA_16B_L4_IB_LOCAL) |
---|
| 575 | + ohdr = &hdr_16b->u.oth; |
---|
| 576 | + else |
---|
| 577 | + ohdr = &hdr_16b->u.l.oth; |
---|
| 578 | + } |
---|
| 579 | + return ohdr; |
---|
| 580 | +} |
---|
| 581 | + |
---|
521 | 582 | struct rvt_sge_state; |
---|
522 | 583 | |
---|
523 | 584 | /* |
---|
.. | .. |
---|
622 | 683 | #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000 |
---|
623 | 684 | #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000 |
---|
624 | 685 | #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000 |
---|
| 686 | +#define HFI1_RCVCTRL_URGENT_ENB 0x40000 |
---|
| 687 | +#define HFI1_RCVCTRL_URGENT_DIS 0x80000 |
---|
625 | 688 | |
---|
626 | 689 | /* partition enforcement flags */ |
---|
627 | 690 | #define HFI1_PART_ENFORCE_IN 0x1 |
---|
.. | .. |
---|
667 | 730 | void *arg; |
---|
668 | 731 | cpumask_t mask; |
---|
669 | 732 | struct irq_affinity_notify notify; |
---|
| 733 | +}; |
---|
| 734 | + |
---|
| 735 | +struct hfi1_msix_info { |
---|
| 736 | + /* lock to synchronize in_use_msix access */ |
---|
| 737 | + spinlock_t msix_lock; |
---|
| 738 | + DECLARE_BITMAP(in_use_msix, CCE_NUM_MSIX_VECTORS); |
---|
| 739 | + struct hfi1_msix_entry *msix_entries; |
---|
| 740 | + u16 max_requested; |
---|
670 | 741 | }; |
---|
671 | 742 | |
---|
672 | 743 | /* per-SL CCA information */ |
---|
.. | .. |
---|
917 | 988 | struct hfi1_pkt_state *ps, |
---|
918 | 989 | struct rvt_swqe *wqe); |
---|
919 | 990 | extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[]; |
---|
920 | | - |
---|
| 991 | +extern const rhf_rcv_function_ptr netdev_rhf_rcv_functions[]; |
---|
921 | 992 | |
---|
922 | 993 | /* return values for the RHF receive functions */ |
---|
923 | 994 | #define RHF_RCV_CONTINUE 0 /* keep going */ |
---|
.. | .. |
---|
977 | 1048 | #define NUM_MAP_ENTRIES 256 |
---|
978 | 1049 | #define NUM_MAP_REGS 32 |
---|
979 | 1050 | |
---|
980 | | -/* |
---|
981 | | - * Number of VNIC contexts used. Ensure it is less than or equal to |
---|
982 | | - * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE). |
---|
983 | | - */ |
---|
984 | | -#define HFI1_NUM_VNIC_CTXT 8 |
---|
985 | | - |
---|
986 | | -/* Number of VNIC RSM entries */ |
---|
987 | | -#define NUM_VNIC_MAP_ENTRIES 8 |
---|
988 | | - |
---|
989 | 1051 | /* Virtual NIC information */ |
---|
990 | 1052 | struct hfi1_vnic_data { |
---|
991 | | - struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT]; |
---|
992 | 1053 | struct kmem_cache *txreq_cache; |
---|
993 | 1054 | u8 num_vports; |
---|
994 | | - struct idr vesw_idr; |
---|
995 | | - u8 rmt_start; |
---|
996 | | - u8 num_ctxt; |
---|
997 | | - u32 msix_idx; |
---|
998 | 1055 | }; |
---|
999 | 1056 | |
---|
1000 | 1057 | struct hfi1_vnic_vport_info; |
---|
.. | .. |
---|
1011 | 1068 | typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64); |
---|
1012 | 1069 | struct hfi1_devdata { |
---|
1013 | 1070 | struct hfi1_ibdev verbs_dev; /* must be first */ |
---|
1014 | | - struct list_head list; |
---|
1015 | 1071 | /* pointers to related structs for this device */ |
---|
1016 | 1072 | /* pci access data structure */ |
---|
1017 | 1073 | struct pci_dev *pcidev; |
---|
.. | .. |
---|
1101 | 1157 | u64 z_send_schedule; |
---|
1102 | 1158 | |
---|
1103 | 1159 | u64 __percpu *send_schedule; |
---|
1104 | | - /* number of reserved contexts for VNIC usage */ |
---|
1105 | | - u16 num_vnic_contexts; |
---|
| 1160 | + /* number of reserved contexts for netdev usage */ |
---|
| 1161 | + u16 num_netdev_contexts; |
---|
1106 | 1162 | /* number of receive contexts in use by the driver */ |
---|
1107 | 1163 | u32 num_rcv_contexts; |
---|
1108 | 1164 | /* number of pio send contexts in use by the driver */ |
---|
.. | .. |
---|
1209 | 1265 | |
---|
1210 | 1266 | struct diag_client *diag_client; |
---|
1211 | 1267 | |
---|
1212 | | - /* MSI-X information */ |
---|
1213 | | - struct hfi1_msix_entry *msix_entries; |
---|
1214 | | - u32 num_msix_entries; |
---|
1215 | | - u32 first_dyn_msix_idx; |
---|
1216 | | - |
---|
1217 | 1268 | /* general interrupt: mask of handled interrupts */ |
---|
1218 | 1269 | u64 gi_mask[CCE_NUM_INT_CSRS]; |
---|
1219 | 1270 | |
---|
.. | .. |
---|
1226 | 1277 | * 64 bit synthetic counters |
---|
1227 | 1278 | */ |
---|
1228 | 1279 | struct timer_list synth_stats_timer; |
---|
| 1280 | + |
---|
| 1281 | + /* MSI-X information */ |
---|
| 1282 | + struct hfi1_msix_info msix_info; |
---|
1229 | 1283 | |
---|
1230 | 1284 | /* |
---|
1231 | 1285 | * device counters |
---|
.. | .. |
---|
1254 | 1308 | struct err_info_constraint err_info_xmit_constraint; |
---|
1255 | 1309 | |
---|
1256 | 1310 | atomic_t drop_packet; |
---|
1257 | | - u8 do_drop; |
---|
| 1311 | + bool do_drop; |
---|
1258 | 1312 | u8 err_info_uncorrectable; |
---|
1259 | 1313 | u8 err_info_fmconfig; |
---|
1260 | 1314 | |
---|
.. | .. |
---|
1349 | 1403 | bool aspm_enabled; /* ASPM state: enabled/disabled */ |
---|
1350 | 1404 | struct rhashtable *sdma_rht; |
---|
1351 | 1405 | |
---|
1352 | | - struct kobject kobj; |
---|
1353 | | - |
---|
1354 | 1406 | /* vnic data */ |
---|
1355 | 1407 | struct hfi1_vnic_data vnic; |
---|
1356 | | -}; |
---|
| 1408 | + /* Lock to protect IRQ SRC register access */ |
---|
| 1409 | + spinlock_t irq_src_lock; |
---|
| 1410 | + int vnic_num_vports; |
---|
| 1411 | + struct net_device *dummy_netdev; |
---|
| 1412 | + struct hfi1_affinity_node *affinity_entry; |
---|
1357 | 1413 | |
---|
1358 | | -static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare) |
---|
1359 | | -{ |
---|
1360 | | - return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES; |
---|
1361 | | -} |
---|
| 1414 | + /* Keeps track of IPoIB RSM rule users */ |
---|
| 1415 | + atomic_t ipoib_rsm_usr_num; |
---|
| 1416 | +}; |
---|
1362 | 1417 | |
---|
1363 | 1418 | /* 8051 firmware version helper */ |
---|
1364 | 1419 | #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c)) |
---|
.. | .. |
---|
1389 | 1444 | /* for cpu affinity; -1 if none */ |
---|
1390 | 1445 | int rec_cpu_num; |
---|
1391 | 1446 | u32 tid_n_pinned; |
---|
1392 | | - struct mmu_rb_handler *handler; |
---|
| 1447 | + bool use_mn; |
---|
1393 | 1448 | struct tid_rb_node **entry_to_rb; |
---|
1394 | 1449 | spinlock_t tid_lock; /* protect tid_[limit,used] counters */ |
---|
1395 | 1450 | u32 tid_limit; |
---|
.. | .. |
---|
1398 | 1453 | u32 invalid_tid_idx; |
---|
1399 | 1454 | /* protect invalid_tids array and invalid_tid_idx */ |
---|
1400 | 1455 | spinlock_t invalid_lock; |
---|
1401 | | - struct mm_struct *mm; |
---|
1402 | 1456 | }; |
---|
1403 | 1457 | |
---|
1404 | | -extern struct list_head hfi1_dev_list; |
---|
1405 | | -extern spinlock_t hfi1_devs_lock; |
---|
| 1458 | +extern struct xarray hfi1_dev_table; |
---|
1406 | 1459 | struct hfi1_devdata *hfi1_lookup(int unit); |
---|
1407 | 1460 | |
---|
1408 | 1461 | static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt) |
---|
.. | .. |
---|
1437 | 1490 | int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread); |
---|
1438 | 1491 | int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread); |
---|
1439 | 1492 | int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread); |
---|
| 1493 | +int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget); |
---|
| 1494 | +int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget); |
---|
1440 | 1495 | void set_all_slowpath(struct hfi1_devdata *dd); |
---|
1441 | | -void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd); |
---|
1442 | | -void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd); |
---|
1443 | | -void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd); |
---|
1444 | 1496 | |
---|
1445 | 1497 | extern const struct pci_device_id hfi1_pci_tbl[]; |
---|
1446 | 1498 | void hfi1_make_ud_req_9B(struct rvt_qp *qp, |
---|
.. | .. |
---|
1456 | 1508 | #define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */ |
---|
1457 | 1509 | #define RCV_PKT_DONE 0x2 /* stop, no more packets detected */ |
---|
1458 | 1510 | |
---|
| 1511 | +/** |
---|
| 1512 | + * hfi1_rcd_head - add accessor for rcd head |
---|
| 1513 | + * @rcd: the context |
---|
| 1514 | + */ |
---|
| 1515 | +static inline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd) |
---|
| 1516 | +{ |
---|
| 1517 | + return rcd->head; |
---|
| 1518 | +} |
---|
| 1519 | + |
---|
| 1520 | +/** |
---|
| 1521 | + * hfi1_set_rcd_head - add accessor for rcd head |
---|
| 1522 | + * @rcd: the context |
---|
| 1523 | + * @head: the new head |
---|
| 1524 | + */ |
---|
| 1525 | +static inline void hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head) |
---|
| 1526 | +{ |
---|
| 1527 | + rcd->head = head; |
---|
| 1528 | +} |
---|
| 1529 | + |
---|
1459 | 1530 | /* calculate the current RHF address */ |
---|
1460 | 1531 | static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd) |
---|
1461 | 1532 | { |
---|
1462 | 1533 | return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset; |
---|
| 1534 | +} |
---|
| 1535 | + |
---|
| 1536 | +/* return DMA_RTAIL configuration */ |
---|
| 1537 | +static inline bool get_dma_rtail_setting(struct hfi1_ctxtdata *rcd) |
---|
| 1538 | +{ |
---|
| 1539 | + return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL); |
---|
| 1540 | +} |
---|
| 1541 | + |
---|
| 1542 | +/** |
---|
| 1543 | + * hfi1_seq_incr_wrap - wrapping increment for sequence |
---|
| 1544 | + * @seq: the current sequence number |
---|
| 1545 | + * |
---|
| 1546 | + * Returns: the incremented seq |
---|
| 1547 | + */ |
---|
| 1548 | +static inline u8 hfi1_seq_incr_wrap(u8 seq) |
---|
| 1549 | +{ |
---|
| 1550 | + if (++seq > RHF_MAX_SEQ) |
---|
| 1551 | + seq = 1; |
---|
| 1552 | + return seq; |
---|
| 1553 | +} |
---|
| 1554 | + |
---|
| 1555 | +/** |
---|
| 1556 | + * hfi1_seq_cnt - return seq_cnt member |
---|
| 1557 | + * @rcd: the receive context |
---|
| 1558 | + * |
---|
| 1559 | + * Return seq_cnt member |
---|
| 1560 | + */ |
---|
| 1561 | +static inline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd) |
---|
| 1562 | +{ |
---|
| 1563 | + return rcd->seq_cnt; |
---|
| 1564 | +} |
---|
| 1565 | + |
---|
| 1566 | +/** |
---|
| 1567 | + * hfi1_set_seq_cnt - return seq_cnt member |
---|
| 1568 | + * @rcd: the receive context |
---|
| 1569 | + * |
---|
| 1570 | + * Return seq_cnt member |
---|
| 1571 | + */ |
---|
| 1572 | +static inline void hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt) |
---|
| 1573 | +{ |
---|
| 1574 | + rcd->seq_cnt = cnt; |
---|
| 1575 | +} |
---|
| 1576 | + |
---|
| 1577 | +/** |
---|
| 1578 | + * last_rcv_seq - is last |
---|
| 1579 | + * @rcd: the receive context |
---|
| 1580 | + * @seq: sequence |
---|
| 1581 | + * |
---|
| 1582 | + * return true if last packet |
---|
| 1583 | + */ |
---|
| 1584 | +static inline bool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq) |
---|
| 1585 | +{ |
---|
| 1586 | + return seq != rcd->seq_cnt; |
---|
| 1587 | +} |
---|
| 1588 | + |
---|
| 1589 | +/** |
---|
| 1590 | + * rcd_seq_incr - increment context sequence number |
---|
| 1591 | + * @rcd: the receive context |
---|
| 1592 | + * @seq: the current sequence number |
---|
| 1593 | + * |
---|
| 1594 | + * Returns: true if the this was the last packet |
---|
| 1595 | + */ |
---|
| 1596 | +static inline bool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq) |
---|
| 1597 | +{ |
---|
| 1598 | + rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt); |
---|
| 1599 | + return last_rcv_seq(rcd, seq); |
---|
| 1600 | +} |
---|
| 1601 | + |
---|
| 1602 | +/** |
---|
| 1603 | + * get_hdrqentsize - return hdrq entry size |
---|
| 1604 | + * @rcd: the receive context |
---|
| 1605 | + */ |
---|
| 1606 | +static inline u8 get_hdrqentsize(struct hfi1_ctxtdata *rcd) |
---|
| 1607 | +{ |
---|
| 1608 | + return rcd->rcvhdrqentsize; |
---|
| 1609 | +} |
---|
| 1610 | + |
---|
| 1611 | +/** |
---|
| 1612 | + * get_hdrq_cnt - return hdrq count |
---|
| 1613 | + * @rcd: the receive context |
---|
| 1614 | + */ |
---|
| 1615 | +static inline u16 get_hdrq_cnt(struct hfi1_ctxtdata *rcd) |
---|
| 1616 | +{ |
---|
| 1617 | + return rcd->rcvhdrq_cnt; |
---|
| 1618 | +} |
---|
| 1619 | + |
---|
| 1620 | +/** |
---|
| 1621 | + * hfi1_is_slowpath - check if this context is slow path |
---|
| 1622 | + * @rcd: the receive context |
---|
| 1623 | + */ |
---|
| 1624 | +static inline bool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd) |
---|
| 1625 | +{ |
---|
| 1626 | + return rcd->do_interrupt == rcd->slow_handler; |
---|
| 1627 | +} |
---|
| 1628 | + |
---|
| 1629 | +/** |
---|
| 1630 | + * hfi1_is_fastpath - check if this context is fast path |
---|
| 1631 | + * @rcd: the receive context |
---|
| 1632 | + */ |
---|
| 1633 | +static inline bool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd) |
---|
| 1634 | +{ |
---|
| 1635 | + if (rcd->ctxt == HFI1_CTRL_CTXT) |
---|
| 1636 | + return false; |
---|
| 1637 | + |
---|
| 1638 | + return rcd->do_interrupt == rcd->fast_handler; |
---|
| 1639 | +} |
---|
| 1640 | + |
---|
| 1641 | +/** |
---|
| 1642 | + * hfi1_set_fast - change to the fast handler |
---|
| 1643 | + * @rcd: the receive context |
---|
| 1644 | + */ |
---|
| 1645 | +static inline void hfi1_set_fast(struct hfi1_ctxtdata *rcd) |
---|
| 1646 | +{ |
---|
| 1647 | + if (unlikely(!rcd)) |
---|
| 1648 | + return; |
---|
| 1649 | + if (unlikely(!hfi1_is_fastpath(rcd))) |
---|
| 1650 | + rcd->do_interrupt = rcd->fast_handler; |
---|
1463 | 1651 | } |
---|
1464 | 1652 | |
---|
1465 | 1653 | int hfi1_reset_device(int); |
---|
.. | .. |
---|
1909 | 2097 | #define HFI1_CTXT_WAITING_URG 4 |
---|
1910 | 2098 | |
---|
1911 | 2099 | /* free up any allocated data at closes */ |
---|
1912 | | -struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, |
---|
1913 | | - const struct pci_device_id *ent); |
---|
| 2100 | +int hfi1_init_dd(struct hfi1_devdata *dd); |
---|
1914 | 2101 | void hfi1_free_devdata(struct hfi1_devdata *dd); |
---|
1915 | | -struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); |
---|
1916 | 2102 | |
---|
1917 | 2103 | /* LED beaconing functions */ |
---|
1918 | 2104 | void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, |
---|
.. | .. |
---|
1966 | 2152 | void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, |
---|
1967 | 2153 | size_t npages, bool dirty); |
---|
1968 | 2154 | |
---|
| 2155 | +/** |
---|
| 2156 | + * hfi1_rcvhdrtail_kvaddr - return tail kvaddr |
---|
| 2157 | + * @rcd - the receive context |
---|
| 2158 | + */ |
---|
| 2159 | +static inline __le64 *hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd) |
---|
| 2160 | +{ |
---|
| 2161 | + return (__le64 *)rcd->rcvhdrtail_kvaddr; |
---|
| 2162 | +} |
---|
| 2163 | + |
---|
1969 | 2164 | static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) |
---|
1970 | 2165 | { |
---|
1971 | | - *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL; |
---|
| 2166 | + u64 *kv = (u64 *)hfi1_rcvhdrtail_kvaddr(rcd); |
---|
| 2167 | + |
---|
| 2168 | + if (kv) |
---|
| 2169 | + *kv = 0ULL; |
---|
1972 | 2170 | } |
---|
1973 | 2171 | |
---|
1974 | 2172 | static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) |
---|
.. | .. |
---|
1977 | 2175 | * volatile because it's a DMA target from the chip, routine is |
---|
1978 | 2176 | * inlined, and don't want register caching or reordering. |
---|
1979 | 2177 | */ |
---|
1980 | | - return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr); |
---|
| 2178 | + return (u32)le64_to_cpu(*hfi1_rcvhdrtail_kvaddr(rcd)); |
---|
| 2179 | +} |
---|
| 2180 | + |
---|
| 2181 | +static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd) |
---|
| 2182 | +{ |
---|
| 2183 | + if (likely(!rcd->rcvhdrtail_kvaddr)) { |
---|
| 2184 | + u32 seq = rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))); |
---|
| 2185 | + |
---|
| 2186 | + return !last_rcv_seq(rcd, seq); |
---|
| 2187 | + } |
---|
| 2188 | + return hfi1_rcd_head(rcd) != get_rcvhdrtail(rcd); |
---|
1981 | 2189 | } |
---|
1982 | 2190 | |
---|
1983 | 2191 | /* |
---|
.. | .. |
---|
1985 | 2193 | */ |
---|
1986 | 2194 | |
---|
1987 | 2195 | extern const char ib_hfi1_version[]; |
---|
| 2196 | +extern const struct attribute_group ib_hfi1_attr_group; |
---|
1988 | 2197 | |
---|
1989 | 2198 | int hfi1_device_create(struct hfi1_devdata *dd); |
---|
1990 | 2199 | void hfi1_device_remove(struct hfi1_devdata *dd); |
---|
.. | .. |
---|
1996 | 2205 | /* Hook for sysfs read of QSFP */ |
---|
1997 | 2206 | int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); |
---|
1998 | 2207 | |
---|
1999 | | -int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent); |
---|
2000 | | -void hfi1_clean_up_interrupts(struct hfi1_devdata *dd); |
---|
| 2208 | +int hfi1_pcie_init(struct hfi1_devdata *dd); |
---|
2001 | 2209 | void hfi1_pcie_cleanup(struct pci_dev *pdev); |
---|
2002 | 2210 | int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); |
---|
2003 | 2211 | void hfi1_pcie_ddcleanup(struct hfi1_devdata *); |
---|
2004 | 2212 | int pcie_speeds(struct hfi1_devdata *dd); |
---|
2005 | | -int request_msix(struct hfi1_devdata *dd, u32 msireq); |
---|
2006 | 2213 | int restore_pci_variables(struct hfi1_devdata *dd); |
---|
2007 | 2214 | int save_pci_variables(struct hfi1_devdata *dd); |
---|
2008 | 2215 | int do_pcie_gen3_transition(struct hfi1_devdata *dd); |
---|
| 2216 | +void tune_pcie_caps(struct hfi1_devdata *dd); |
---|
2009 | 2217 | int parse_platform_config(struct hfi1_devdata *dd); |
---|
2010 | 2218 | int get_platform_config_field(struct hfi1_devdata *dd, |
---|
2011 | 2219 | enum platform_config_table_type_encoding |
---|
.. | .. |
---|
2034 | 2242 | extern unsigned long n_krcvqs; |
---|
2035 | 2243 | extern uint krcvqs[]; |
---|
2036 | 2244 | extern int krcvqsset; |
---|
2037 | | -extern uint kdeth_qp; |
---|
2038 | 2245 | extern uint loopback; |
---|
2039 | 2246 | extern uint quick_linkup; |
---|
2040 | 2247 | extern uint rcv_intr_timeout; |
---|
.. | .. |
---|
2100 | 2307 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK | |
---|
2101 | 2308 | #endif |
---|
2102 | 2309 | HFI1_PKT_USER_SC_INTEGRITY; |
---|
2103 | | - else |
---|
| 2310 | + else if (ctxt_type != SC_KERNEL) |
---|
2104 | 2311 | base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; |
---|
2105 | 2312 | |
---|
2106 | 2313 | /* turn on send-side job key checks if !A0 */ |
---|
.. | .. |
---|
2145 | 2352 | |
---|
2146 | 2353 | return base_sdma_integrity; |
---|
2147 | 2354 | } |
---|
2148 | | - |
---|
2149 | | -/* |
---|
2150 | | - * hfi1_early_err is used (only!) to print early errors before devdata is |
---|
2151 | | - * allocated, or when dd->pcidev may not be valid, and at the tail end of |
---|
2152 | | - * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is |
---|
2153 | | - * the same as dd_dev_err, but is used when the message really needs |
---|
2154 | | - * the IB port# to be definitive as to what's happening.. |
---|
2155 | | - */ |
---|
2156 | | -#define hfi1_early_err(dev, fmt, ...) \ |
---|
2157 | | - dev_err(dev, fmt, ##__VA_ARGS__) |
---|
2158 | | - |
---|
2159 | | -#define hfi1_early_info(dev, fmt, ...) \ |
---|
2160 | | - dev_info(dev, fmt, ##__VA_ARGS__) |
---|
2161 | 2355 | |
---|
2162 | 2356 | #define dd_dev_emerg(dd, fmt, ...) \ |
---|
2163 | 2357 | dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \ |
---|
.. | .. |
---|
2262 | 2456 | return dd->pcidev->device == PCI_DEVICE_ID_INTEL1; |
---|
2263 | 2457 | } |
---|
2264 | 2458 | |
---|
| 2459 | +/** |
---|
| 2460 | + * hfi1_need_drop - detect need for drop |
---|
| 2461 | + * @dd: - the device |
---|
| 2462 | + * |
---|
| 2463 | + * In some cases, the first packet needs to be dropped. |
---|
| 2464 | + * |
---|
| 2465 | + * Return true is the current packet needs to be dropped and false otherwise. |
---|
| 2466 | + */ |
---|
| 2467 | +static inline bool hfi1_need_drop(struct hfi1_devdata *dd) |
---|
| 2468 | +{ |
---|
| 2469 | + if (unlikely(dd->do_drop && |
---|
| 2470 | + atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) == |
---|
| 2471 | + DROP_PACKET_ON)) { |
---|
| 2472 | + dd->do_drop = false; |
---|
| 2473 | + return true; |
---|
| 2474 | + } |
---|
| 2475 | + return false; |
---|
| 2476 | +} |
---|
| 2477 | + |
---|
2265 | 2478 | int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp); |
---|
2266 | 2479 | |
---|
2267 | 2480 | #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev)) |
---|