hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/infiniband/hw/hfi1/hfi.h
....@@ -1,7 +1,8 @@
11 #ifndef _HFI1_KERNEL_H
22 #define _HFI1_KERNEL_H
33 /*
4
- * Copyright(c) 2015-2018 Intel Corporation.
4
+ * Copyright(c) 2020 Cornelis Networks, Inc.
5
+ * Copyright(c) 2015-2020 Intel Corporation.
56 *
67 * This file is provided under a dual BSD/GPLv2 license. When using or
78 * redistributing this file, you may do so under either license.
....@@ -54,7 +55,6 @@
5455 #include <linux/list.h>
5556 #include <linux/scatterlist.h>
5657 #include <linux/slab.h>
57
-#include <linux/idr.h>
5858 #include <linux/io.h>
5959 #include <linux/fs.h>
6060 #include <linux/completion.h>
....@@ -65,6 +65,7 @@
6565 #include <linux/kthread.h>
6666 #include <linux/i2c.h>
6767 #include <linux/i2c-algo-bit.h>
68
+#include <linux/xarray.h>
6869 #include <rdma/ib_hdrs.h>
6970 #include <rdma/opa_addr.h>
7071 #include <linux/rhashtable.h>
....@@ -73,6 +74,7 @@
7374
7475 #include "chip_registers.h"
7576 #include "common.h"
77
+#include "opfn.h"
7678 #include "verbs.h"
7779 #include "pio.h"
7880 #include "chip.h"
....@@ -80,6 +82,7 @@
8082 #include "qsfp.h"
8183 #include "platform.h"
8284 #include "affinity.h"
85
+#include "msix.h"
8386
8487 /* bumped 1 from s/w major version of TrueScale */
8588 #define HFI1_CHIP_VERS_MAJ 3U
....@@ -96,6 +99,8 @@
9699
97100 #define NEIGHBOR_TYPE_HFI 0
98101 #define NEIGHBOR_TYPE_SWITCH 1
102
+
103
+#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5
99104
100105 extern unsigned long hfi1_cap_mask;
101106 #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
....@@ -193,7 +198,17 @@
193198 u32 count;
194199 };
195200
196
-typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
201
+struct hfi1_ctxtdata;
202
+typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data);
203
+typedef void (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
204
+
205
+struct tid_queue {
206
+ struct list_head queue_head;
207
+ /* queue head for QP TID resource waiters */
208
+ u32 enqueue; /* count of tid enqueues */
209
+ u32 dequeue; /* count of tid dequeues */
210
+};
211
+
197212 struct hfi1_ctxtdata {
198213 /* rcvhdrq base, needs mmap before useful */
199214 void *rcvhdrq;
....@@ -214,7 +229,13 @@
214229 * be valid. Worst case is we process an extra interrupt and up to 64
215230 * packets with the wrong interrupt handler.
216231 */
217
- int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
232
+ intr_handler do_interrupt;
233
+ /** fast handler after autoactive */
234
+ intr_handler fast_handler;
235
+ /** slow handler */
236
+ intr_handler slow_handler;
237
+ /* napi pointer assiociated with netdev */
238
+ struct napi_struct *napi;
218239 /* verbs rx_stats per rcd */
219240 struct hfi1_opcode_stats_perctx *opstats;
220241 /* clear interrupt mask */
....@@ -287,6 +308,12 @@
287308 /* PSM Specific fields */
288309 /* lock protecting all Expected TID data */
289310 struct mutex exp_mutex;
311
+ /* lock protecting all Expected TID data of kernel contexts */
312
+ spinlock_t exp_lock;
313
+ /* Queue for QP's waiting for HW TID flows */
314
+ struct tid_queue flow_queue;
315
+ /* Queue for QP's waiting for HW receive array entries */
316
+ struct tid_queue rarr_queue;
290317 /* when waiting for rcv or pioavail */
291318 wait_queue_head_t wait;
292319 /* uuid from PSM */
....@@ -319,6 +346,9 @@
319346 */
320347 u8 subctxt_cnt;
321348
349
+ /* Bit mask to track free TID RDMA HW flows */
350
+ unsigned long flow_mask;
351
+ struct tid_flow_state flows[RXE_NUM_TID_FLOWS];
322352 };
323353
324354 /**
....@@ -356,11 +386,11 @@
356386 u32 rhqoff;
357387 u32 dlid;
358388 u32 slid;
389
+ int numpkt;
359390 u16 tlen;
360391 s16 etail;
361392 u16 pkey;
362393 u8 hlen;
363
- u8 numpkt;
364394 u8 rsize;
365395 u8 updegr;
366396 u8 etype;
....@@ -518,6 +548,37 @@
518548 mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
519549 }
520550
551
+/**
552
+ * hfi1_get_rc_ohdr - get extended header
553
+ * @opah - the opaheader
554
+ */
555
+static inline struct ib_other_headers *
556
+hfi1_get_rc_ohdr(struct hfi1_opa_header *opah)
557
+{
558
+ struct ib_other_headers *ohdr;
559
+ struct ib_header *hdr = NULL;
560
+ struct hfi1_16b_header *hdr_16b = NULL;
561
+
562
+ /* Find out where the BTH is */
563
+ if (opah->hdr_type == HFI1_PKT_TYPE_9B) {
564
+ hdr = &opah->ibh;
565
+ if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
566
+ ohdr = &hdr->u.oth;
567
+ else
568
+ ohdr = &hdr->u.l.oth;
569
+ } else {
570
+ u8 l4;
571
+
572
+ hdr_16b = &opah->opah;
573
+ l4 = hfi1_16B_get_l4(hdr_16b);
574
+ if (l4 == OPA_16B_L4_IB_LOCAL)
575
+ ohdr = &hdr_16b->u.oth;
576
+ else
577
+ ohdr = &hdr_16b->u.l.oth;
578
+ }
579
+ return ohdr;
580
+}
581
+
521582 struct rvt_sge_state;
522583
523584 /*
....@@ -622,6 +683,8 @@
622683 #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
623684 #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
624685 #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
686
+#define HFI1_RCVCTRL_URGENT_ENB 0x40000
687
+#define HFI1_RCVCTRL_URGENT_DIS 0x80000
625688
626689 /* partition enforcement flags */
627690 #define HFI1_PART_ENFORCE_IN 0x1
....@@ -667,6 +730,14 @@
667730 void *arg;
668731 cpumask_t mask;
669732 struct irq_affinity_notify notify;
733
+};
734
+
735
+struct hfi1_msix_info {
736
+ /* lock to synchronize in_use_msix access */
737
+ spinlock_t msix_lock;
738
+ DECLARE_BITMAP(in_use_msix, CCE_NUM_MSIX_VECTORS);
739
+ struct hfi1_msix_entry *msix_entries;
740
+ u16 max_requested;
670741 };
671742
672743 /* per-SL CCA information */
....@@ -917,7 +988,7 @@
917988 struct hfi1_pkt_state *ps,
918989 struct rvt_swqe *wqe);
919990 extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
920
-
991
+extern const rhf_rcv_function_ptr netdev_rhf_rcv_functions[];
921992
922993 /* return values for the RHF receive functions */
923994 #define RHF_RCV_CONTINUE 0 /* keep going */
....@@ -977,24 +1048,10 @@
9771048 #define NUM_MAP_ENTRIES 256
9781049 #define NUM_MAP_REGS 32
9791050
980
-/*
981
- * Number of VNIC contexts used. Ensure it is less than or equal to
982
- * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
983
- */
984
-#define HFI1_NUM_VNIC_CTXT 8
985
-
986
-/* Number of VNIC RSM entries */
987
-#define NUM_VNIC_MAP_ENTRIES 8
988
-
9891051 /* Virtual NIC information */
9901052 struct hfi1_vnic_data {
991
- struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT];
9921053 struct kmem_cache *txreq_cache;
9931054 u8 num_vports;
994
- struct idr vesw_idr;
995
- u8 rmt_start;
996
- u8 num_ctxt;
997
- u32 msix_idx;
9981055 };
9991056
10001057 struct hfi1_vnic_vport_info;
....@@ -1011,7 +1068,6 @@
10111068 typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
10121069 struct hfi1_devdata {
10131070 struct hfi1_ibdev verbs_dev; /* must be first */
1014
- struct list_head list;
10151071 /* pointers to related structs for this device */
10161072 /* pci access data structure */
10171073 struct pci_dev *pcidev;
....@@ -1101,8 +1157,8 @@
11011157 u64 z_send_schedule;
11021158
11031159 u64 __percpu *send_schedule;
1104
- /* number of reserved contexts for VNIC usage */
1105
- u16 num_vnic_contexts;
1160
+ /* number of reserved contexts for netdev usage */
1161
+ u16 num_netdev_contexts;
11061162 /* number of receive contexts in use by the driver */
11071163 u32 num_rcv_contexts;
11081164 /* number of pio send contexts in use by the driver */
....@@ -1209,11 +1265,6 @@
12091265
12101266 struct diag_client *diag_client;
12111267
1212
- /* MSI-X information */
1213
- struct hfi1_msix_entry *msix_entries;
1214
- u32 num_msix_entries;
1215
- u32 first_dyn_msix_idx;
1216
-
12171268 /* general interrupt: mask of handled interrupts */
12181269 u64 gi_mask[CCE_NUM_INT_CSRS];
12191270
....@@ -1226,6 +1277,9 @@
12261277 * 64 bit synthetic counters
12271278 */
12281279 struct timer_list synth_stats_timer;
1280
+
1281
+ /* MSI-X information */
1282
+ struct hfi1_msix_info msix_info;
12291283
12301284 /*
12311285 * device counters
....@@ -1254,7 +1308,7 @@
12541308 struct err_info_constraint err_info_xmit_constraint;
12551309
12561310 atomic_t drop_packet;
1257
- u8 do_drop;
1311
+ bool do_drop;
12581312 u8 err_info_uncorrectable;
12591313 u8 err_info_fmconfig;
12601314
....@@ -1349,16 +1403,17 @@
13491403 bool aspm_enabled; /* ASPM state: enabled/disabled */
13501404 struct rhashtable *sdma_rht;
13511405
1352
- struct kobject kobj;
1353
-
13541406 /* vnic data */
13551407 struct hfi1_vnic_data vnic;
1356
-};
1408
+ /* Lock to protect IRQ SRC register access */
1409
+ spinlock_t irq_src_lock;
1410
+ int vnic_num_vports;
1411
+ struct net_device *dummy_netdev;
1412
+ struct hfi1_affinity_node *affinity_entry;
13571413
1358
-static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
1359
-{
1360
- return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES;
1361
-}
1414
+ /* Keeps track of IPoIB RSM rule users */
1415
+ atomic_t ipoib_rsm_usr_num;
1416
+};
13621417
13631418 /* 8051 firmware version helper */
13641419 #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
....@@ -1389,7 +1444,7 @@
13891444 /* for cpu affinity; -1 if none */
13901445 int rec_cpu_num;
13911446 u32 tid_n_pinned;
1392
- struct mmu_rb_handler *handler;
1447
+ bool use_mn;
13931448 struct tid_rb_node **entry_to_rb;
13941449 spinlock_t tid_lock; /* protect tid_[limit,used] counters */
13951450 u32 tid_limit;
....@@ -1398,11 +1453,9 @@
13981453 u32 invalid_tid_idx;
13991454 /* protect invalid_tids array and invalid_tid_idx */
14001455 spinlock_t invalid_lock;
1401
- struct mm_struct *mm;
14021456 };
14031457
1404
-extern struct list_head hfi1_dev_list;
1405
-extern spinlock_t hfi1_devs_lock;
1458
+extern struct xarray hfi1_dev_table;
14061459 struct hfi1_devdata *hfi1_lookup(int unit);
14071460
14081461 static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
....@@ -1437,10 +1490,9 @@
14371490 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
14381491 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
14391492 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
1493
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget);
1494
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget);
14401495 void set_all_slowpath(struct hfi1_devdata *dd);
1441
-void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd);
1442
-void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
1443
-void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd);
14441496
14451497 extern const struct pci_device_id hfi1_pci_tbl[];
14461498 void hfi1_make_ud_req_9B(struct rvt_qp *qp,
....@@ -1456,10 +1508,146 @@
14561508 #define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
14571509 #define RCV_PKT_DONE 0x2 /* stop, no more packets detected */
14581510
1511
+/**
1512
+ * hfi1_rcd_head - add accessor for rcd head
1513
+ * @rcd: the context
1514
+ */
1515
+static inline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd)
1516
+{
1517
+ return rcd->head;
1518
+}
1519
+
1520
+/**
1521
+ * hfi1_set_rcd_head - add accessor for rcd head
1522
+ * @rcd: the context
1523
+ * @head: the new head
1524
+ */
1525
+static inline void hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head)
1526
+{
1527
+ rcd->head = head;
1528
+}
1529
+
14591530 /* calculate the current RHF address */
14601531 static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
14611532 {
14621533 return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset;
1534
+}
1535
+
1536
+/* return DMA_RTAIL configuration */
1537
+static inline bool get_dma_rtail_setting(struct hfi1_ctxtdata *rcd)
1538
+{
1539
+ return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL);
1540
+}
1541
+
1542
+/**
1543
+ * hfi1_seq_incr_wrap - wrapping increment for sequence
1544
+ * @seq: the current sequence number
1545
+ *
1546
+ * Returns: the incremented seq
1547
+ */
1548
+static inline u8 hfi1_seq_incr_wrap(u8 seq)
1549
+{
1550
+ if (++seq > RHF_MAX_SEQ)
1551
+ seq = 1;
1552
+ return seq;
1553
+}
1554
+
1555
+/**
1556
+ * hfi1_seq_cnt - return seq_cnt member
1557
+ * @rcd: the receive context
1558
+ *
1559
+ * Return seq_cnt member
1560
+ */
1561
+static inline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd)
1562
+{
1563
+ return rcd->seq_cnt;
1564
+}
1565
+
1566
+/**
1567
+ * hfi1_set_seq_cnt - return seq_cnt member
1568
+ * @rcd: the receive context
1569
+ *
1570
+ * Return seq_cnt member
1571
+ */
1572
+static inline void hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt)
1573
+{
1574
+ rcd->seq_cnt = cnt;
1575
+}
1576
+
1577
+/**
1578
+ * last_rcv_seq - is last
1579
+ * @rcd: the receive context
1580
+ * @seq: sequence
1581
+ *
1582
+ * return true if last packet
1583
+ */
1584
+static inline bool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq)
1585
+{
1586
+ return seq != rcd->seq_cnt;
1587
+}
1588
+
1589
+/**
1590
+ * rcd_seq_incr - increment context sequence number
1591
+ * @rcd: the receive context
1592
+ * @seq: the current sequence number
1593
+ *
1594
+ * Returns: true if the this was the last packet
1595
+ */
1596
+static inline bool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq)
1597
+{
1598
+ rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt);
1599
+ return last_rcv_seq(rcd, seq);
1600
+}
1601
+
1602
+/**
1603
+ * get_hdrqentsize - return hdrq entry size
1604
+ * @rcd: the receive context
1605
+ */
1606
+static inline u8 get_hdrqentsize(struct hfi1_ctxtdata *rcd)
1607
+{
1608
+ return rcd->rcvhdrqentsize;
1609
+}
1610
+
1611
+/**
1612
+ * get_hdrq_cnt - return hdrq count
1613
+ * @rcd: the receive context
1614
+ */
1615
+static inline u16 get_hdrq_cnt(struct hfi1_ctxtdata *rcd)
1616
+{
1617
+ return rcd->rcvhdrq_cnt;
1618
+}
1619
+
1620
+/**
1621
+ * hfi1_is_slowpath - check if this context is slow path
1622
+ * @rcd: the receive context
1623
+ */
1624
+static inline bool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd)
1625
+{
1626
+ return rcd->do_interrupt == rcd->slow_handler;
1627
+}
1628
+
1629
+/**
1630
+ * hfi1_is_fastpath - check if this context is fast path
1631
+ * @rcd: the receive context
1632
+ */
1633
+static inline bool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd)
1634
+{
1635
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
1636
+ return false;
1637
+
1638
+ return rcd->do_interrupt == rcd->fast_handler;
1639
+}
1640
+
1641
+/**
1642
+ * hfi1_set_fast - change to the fast handler
1643
+ * @rcd: the receive context
1644
+ */
1645
+static inline void hfi1_set_fast(struct hfi1_ctxtdata *rcd)
1646
+{
1647
+ if (unlikely(!rcd))
1648
+ return;
1649
+ if (unlikely(!hfi1_is_fastpath(rcd)))
1650
+ rcd->do_interrupt = rcd->fast_handler;
14631651 }
14641652
14651653 int hfi1_reset_device(int);
....@@ -1909,10 +2097,8 @@
19092097 #define HFI1_CTXT_WAITING_URG 4
19102098
19112099 /* free up any allocated data at closes */
1912
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
1913
- const struct pci_device_id *ent);
2100
+int hfi1_init_dd(struct hfi1_devdata *dd);
19142101 void hfi1_free_devdata(struct hfi1_devdata *dd);
1915
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
19162102
19172103 /* LED beaconing functions */
19182104 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
....@@ -1966,9 +2152,21 @@
19662152 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
19672153 size_t npages, bool dirty);
19682154
2155
+/**
2156
+ * hfi1_rcvhdrtail_kvaddr - return tail kvaddr
2157
+ * @rcd - the receive context
2158
+ */
2159
+static inline __le64 *hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd)
2160
+{
2161
+ return (__le64 *)rcd->rcvhdrtail_kvaddr;
2162
+}
2163
+
19692164 static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
19702165 {
1971
- *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL;
2166
+ u64 *kv = (u64 *)hfi1_rcvhdrtail_kvaddr(rcd);
2167
+
2168
+ if (kv)
2169
+ *kv = 0ULL;
19722170 }
19732171
19742172 static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
....@@ -1977,7 +2175,17 @@
19772175 * volatile because it's a DMA target from the chip, routine is
19782176 * inlined, and don't want register caching or reordering.
19792177 */
1980
- return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
2178
+ return (u32)le64_to_cpu(*hfi1_rcvhdrtail_kvaddr(rcd));
2179
+}
2180
+
2181
+static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd)
2182
+{
2183
+ if (likely(!rcd->rcvhdrtail_kvaddr)) {
2184
+ u32 seq = rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)));
2185
+
2186
+ return !last_rcv_seq(rcd, seq);
2187
+ }
2188
+ return hfi1_rcd_head(rcd) != get_rcvhdrtail(rcd);
19812189 }
19822190
19832191 /*
....@@ -1985,6 +2193,7 @@
19852193 */
19862194
19872195 extern const char ib_hfi1_version[];
2196
+extern const struct attribute_group ib_hfi1_attr_group;
19882197
19892198 int hfi1_device_create(struct hfi1_devdata *dd);
19902199 void hfi1_device_remove(struct hfi1_devdata *dd);
....@@ -1996,16 +2205,15 @@
19962205 /* Hook for sysfs read of QSFP */
19972206 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
19982207
1999
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent);
2000
-void hfi1_clean_up_interrupts(struct hfi1_devdata *dd);
2208
+int hfi1_pcie_init(struct hfi1_devdata *dd);
20012209 void hfi1_pcie_cleanup(struct pci_dev *pdev);
20022210 int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
20032211 void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
20042212 int pcie_speeds(struct hfi1_devdata *dd);
2005
-int request_msix(struct hfi1_devdata *dd, u32 msireq);
20062213 int restore_pci_variables(struct hfi1_devdata *dd);
20072214 int save_pci_variables(struct hfi1_devdata *dd);
20082215 int do_pcie_gen3_transition(struct hfi1_devdata *dd);
2216
+void tune_pcie_caps(struct hfi1_devdata *dd);
20092217 int parse_platform_config(struct hfi1_devdata *dd);
20102218 int get_platform_config_field(struct hfi1_devdata *dd,
20112219 enum platform_config_table_type_encoding
....@@ -2034,7 +2242,6 @@
20342242 extern unsigned long n_krcvqs;
20352243 extern uint krcvqs[];
20362244 extern int krcvqsset;
2037
-extern uint kdeth_qp;
20382245 extern uint loopback;
20392246 extern uint quick_linkup;
20402247 extern uint rcv_intr_timeout;
....@@ -2100,7 +2307,7 @@
21002307 SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK |
21012308 #endif
21022309 HFI1_PKT_USER_SC_INTEGRITY;
2103
- else
2310
+ else if (ctxt_type != SC_KERNEL)
21042311 base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
21052312
21062313 /* turn on send-side job key checks if !A0 */
....@@ -2145,19 +2352,6 @@
21452352
21462353 return base_sdma_integrity;
21472354 }
2148
-
2149
-/*
2150
- * hfi1_early_err is used (only!) to print early errors before devdata is
2151
- * allocated, or when dd->pcidev may not be valid, and at the tail end of
2152
- * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
2153
- * the same as dd_dev_err, but is used when the message really needs
2154
- * the IB port# to be definitive as to what's happening..
2155
- */
2156
-#define hfi1_early_err(dev, fmt, ...) \
2157
- dev_err(dev, fmt, ##__VA_ARGS__)
2158
-
2159
-#define hfi1_early_info(dev, fmt, ...) \
2160
- dev_info(dev, fmt, ##__VA_ARGS__)
21612355
21622356 #define dd_dev_emerg(dd, fmt, ...) \
21632357 dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
....@@ -2262,6 +2456,25 @@
22622456 return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
22632457 }
22642458
2459
+/**
2460
+ * hfi1_need_drop - detect need for drop
2461
+ * @dd: - the device
2462
+ *
2463
+ * In some cases, the first packet needs to be dropped.
2464
+ *
2465
+ * Return true is the current packet needs to be dropped and false otherwise.
2466
+ */
2467
+static inline bool hfi1_need_drop(struct hfi1_devdata *dd)
2468
+{
2469
+ if (unlikely(dd->do_drop &&
2470
+ atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
2471
+ DROP_PACKET_ON)) {
2472
+ dd->do_drop = false;
2473
+ return true;
2474
+ }
2475
+ return false;
2476
+}
2477
+
22652478 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
22662479
22672480 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))