hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/bnxt_re/qplib_fp.h
....@@ -39,12 +39,58 @@
3939 #ifndef __BNXT_QPLIB_FP_H__
4040 #define __BNXT_QPLIB_FP_H__
4141
42
+/* Few helper structures temporarily defined here
43
+ * should get rid of these when roce_hsi.h is updated
44
+ * in original code base
45
+ */
46
+struct sq_ud_ext_hdr {
47
+ __le32 dst_qp;
48
+ __le32 avid;
49
+ __le64 rsvd;
50
+};
51
+
52
+struct sq_raw_ext_hdr {
53
+ __le32 cfa_meta;
54
+ __le32 rsvd0;
55
+ __le64 rsvd1;
56
+};
57
+
58
+struct sq_rdma_ext_hdr {
59
+ __le64 remote_va;
60
+ __le32 remote_key;
61
+ __le32 rsvd;
62
+};
63
+
64
+struct sq_atomic_ext_hdr {
65
+ __le64 swap_data;
66
+ __le64 cmp_data;
67
+};
68
+
69
+struct sq_fr_pmr_ext_hdr {
70
+ __le64 pblptr;
71
+ __le64 va;
72
+};
73
+
74
+struct sq_bind_ext_hdr {
75
+ __le64 va;
76
+ __le32 length_lo;
77
+ __le32 length_hi;
78
+};
79
+
80
+struct rq_ext_hdr {
81
+ __le64 rsvd1;
82
+ __le64 rsvd2;
83
+};
84
+
85
+/* Helper structures end */
86
+
4287 struct bnxt_qplib_srq {
4388 struct bnxt_qplib_pd *pd;
4489 struct bnxt_qplib_dpi *dpi;
45
- void __iomem *dbr_base;
90
+ struct bnxt_qplib_db_info dbinfo;
4691 u64 srq_handle;
4792 u32 id;
93
+ u16 wqe_size;
4894 u32 max_wqe;
4995 u32 max_sge;
5096 u32 threshold;
....@@ -52,10 +98,9 @@
5298 struct bnxt_qplib_cq *cq;
5399 struct bnxt_qplib_hwq hwq;
54100 struct bnxt_qplib_swq *swq;
55
- struct scatterlist *sglist;
56101 int start_idx;
57102 int last_idx;
58
- u32 nmap;
103
+ struct bnxt_qplib_sg_info sg_info;
59104 u16 eventq_hw_ring_id;
60105 spinlock_t lock; /* protect SRQE link list */
61106 };
....@@ -66,38 +111,7 @@
66111 u32 size;
67112 };
68113
69
-#define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
70
-
71
-#define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
72
-#define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
73
-
74
-static inline u32 get_sqe_pg(u32 val)
75
-{
76
- return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
77
-}
78
-
79
-static inline u32 get_sqe_idx(u32 val)
80
-{
81
- return (val & SQE_MAX_IDX_PER_PG);
82
-}
83
-
84
-#define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
85
-
86
-#define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
87
-#define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
88
-
89
-static inline u32 get_psne_pg(u32 val)
90
-{
91
- return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
92
-}
93
-
94
-static inline u32 get_psne_idx(u32 val)
95
-{
96
- return (val & PSNE_MAX_IDX_PER_PG);
97
-}
98
-
99114 #define BNXT_QPLIB_QP_MAX_SGL 6
100
-
101115 struct bnxt_qplib_swq {
102116 u64 wr_id;
103117 int next_idx;
....@@ -105,7 +119,10 @@
105119 u8 flags;
106120 u32 start_psn;
107121 u32 next_psn;
122
+ u32 slot_idx;
123
+ u8 slots;
108124 struct sq_psn_search *psn_search;
125
+ struct sq_psn_search_ext *psn_ext;
109126 };
110127
111128 struct bnxt_qplib_swqe {
....@@ -226,19 +243,13 @@
226243 };
227244 };
228245
229
-#define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
230
-
231
-#define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
232
-#define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
233
-#define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
234
-#define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
235
-
236246 struct bnxt_qplib_q {
237247 struct bnxt_qplib_hwq hwq;
238248 struct bnxt_qplib_swq *swq;
239
- struct scatterlist *sglist;
240
- u32 nmap;
249
+ struct bnxt_qplib_db_info dbinfo;
250
+ struct bnxt_qplib_sg_info sg_info;
241251 u32 max_wqe;
252
+ u16 wqe_size;
242253 u16 q_full_delta;
243254 u16 max_sge;
244255 u32 psn;
....@@ -249,19 +260,23 @@
249260 u32 phantom_cqe_cnt;
250261 u32 next_cq_cons;
251262 bool flushed;
263
+ u32 swq_start;
264
+ u32 swq_last;
252265 };
253266
254267 struct bnxt_qplib_qp {
255268 struct bnxt_qplib_pd *pd;
256269 struct bnxt_qplib_dpi *dpi;
270
+ struct bnxt_qplib_chip_ctx *cctx;
257271 u64 qp_handle;
258
-#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
272
+#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
259273 u32 id;
260274 u8 type;
261275 u8 sig_type;
262
- u32 modify_flags;
276
+ u8 wqe_mode;
263277 u8 state;
264278 u8 cur_qp_state;
279
+ u64 modify_flags;
265280 u32 max_inline_data;
266281 u32 mtu;
267282 u8 path_mtu;
....@@ -335,11 +350,18 @@
335350 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
336351 !((raw_cons) & (cp_bit)))
337352
338
-static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
353
+static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que,
354
+ u8 slots)
339355 {
340
- return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
341
- &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
342
- &qplib_q->hwq);
356
+ struct bnxt_qplib_hwq *hwq;
357
+ int avail;
358
+
359
+ hwq = &que->hwq;
360
+ /* False full is possible, retrying post-send makes sense */
361
+ avail = hwq->cons - hwq->prod;
362
+ if (hwq->cons <= hwq->prod)
363
+ avail += hwq->depth;
364
+ return avail <= slots;
343365 }
344366
345367 struct bnxt_qplib_cqe {
....@@ -347,6 +369,7 @@
347369 u8 type;
348370 u8 opcode;
349371 u32 length;
372
+ u16 cfa_meta;
350373 u64 wr_id;
351374 union {
352375 __be32 immdata;
....@@ -369,7 +392,7 @@
369392 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
370393 struct bnxt_qplib_cq {
371394 struct bnxt_qplib_dpi *dpi;
372
- void __iomem *dbr_base;
395
+ struct bnxt_qplib_db_info dbinfo;
373396 u32 max_wqe;
374397 u32 id;
375398 u16 count;
....@@ -378,8 +401,7 @@
378401 u32 cnq_hw_ring_id;
379402 struct bnxt_qplib_nq *nq;
380403 bool resize_in_progress;
381
- struct scatterlist *sghead;
382
- u32 nmap;
404
+ struct bnxt_qplib_sg_info sg_info;
383405 u64 cq_handle;
384406
385407 #define CQ_RESIZE_WAIT_TIME_MS 500
....@@ -401,6 +423,7 @@
401423 * of the same QP while manipulating the flush list.
402424 */
403425 spinlock_t flush_lock; /* QP flush management */
426
+ u16 cnq_events;
404427 };
405428
406429 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
....@@ -432,33 +455,33 @@
432455 #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
433456 NQ_DB_IDX_VALID | \
434457 NQ_DB_IRQ_DIS)
435
-#define NQ_DB_REARM(db, raw_cons, cp_bit) \
436
- writel(NQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
437
-#define NQ_DB(db, raw_cons, cp_bit) \
438
- writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
458
+
459
+struct bnxt_qplib_nq_db {
460
+ struct bnxt_qplib_reg_desc reg;
461
+ struct bnxt_qplib_db_info dbinfo;
462
+};
463
+
464
+typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
465
+ struct bnxt_qplib_cq *cq);
466
+typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
467
+ struct bnxt_qplib_srq *srq, u8 event);
439468
440469 struct bnxt_qplib_nq {
441
- struct pci_dev *pdev;
470
+ struct pci_dev *pdev;
471
+ struct bnxt_qplib_res *res;
472
+ char *name;
473
+ struct bnxt_qplib_hwq hwq;
474
+ struct bnxt_qplib_nq_db nq_db;
475
+ u16 ring_id;
476
+ int msix_vec;
477
+ cpumask_t mask;
478
+ struct tasklet_struct nq_tasklet;
479
+ bool requested;
480
+ int budget;
442481
443
- int vector;
444
- cpumask_t mask;
445
- int budget;
446
- bool requested;
447
- struct tasklet_struct worker;
448
- struct bnxt_qplib_hwq hwq;
449
-
450
- u16 bar_reg;
451
- u16 bar_reg_off;
452
- u16 ring_id;
453
- void __iomem *bar_reg_iomem;
454
-
455
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
456
- struct bnxt_qplib_cq *cq);
457
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
458
- struct bnxt_qplib_srq *srq,
459
- u8 event);
460
- struct workqueue_struct *cqn_wq;
461
- char name[32];
482
+ cqn_handler_t cqn_handler;
483
+ srqn_handler_t srqn_handler;
484
+ struct workqueue_struct *cqn_wq;
462485 };
463486
464487 struct bnxt_qplib_nq_work {
....@@ -473,19 +496,16 @@
473496 int msix_vector, bool need_init);
474497 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
475498 int nq_idx, int msix_vector, int bar_reg_offset,
476
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
477
- struct bnxt_qplib_cq *cq),
478
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
479
- struct bnxt_qplib_srq *srq,
480
- u8 event));
499
+ cqn_handler_t cqn_handler,
500
+ srqn_handler_t srq_handler);
481501 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
482502 struct bnxt_qplib_srq *srq);
483503 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
484504 struct bnxt_qplib_srq *srq);
485505 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
486506 struct bnxt_qplib_srq *srq);
487
-int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
488
- struct bnxt_qplib_srq *srq);
507
+void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
508
+ struct bnxt_qplib_srq *srq);
489509 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
490510 struct bnxt_qplib_swqe *wqe);
491511 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
....@@ -516,7 +536,7 @@
516536 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
517537 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
518538 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
519
-int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
539
+int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq);
520540 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
521541 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
522542 unsigned long *flags);
....@@ -526,4 +546,64 @@
526546 struct bnxt_qplib_cqe *cqe,
527547 int num_cqes);
528548 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
549
+
550
+static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx)
551
+{
552
+ u32 idx;
553
+
554
+ idx = que->swq_start;
555
+ if (swq_idx)
556
+ *swq_idx = idx;
557
+ return &que->swq[idx];
558
+}
559
+
560
+static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
561
+{
562
+ que->swq_start = que->swq[idx].next_idx;
563
+}
564
+
565
+static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que)
566
+{
567
+ return (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
568
+}
569
+
570
+static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
571
+{
572
+ return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
573
+ que->max_wqe : bnxt_qplib_get_depth(que);
574
+}
575
+
576
+static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode)
577
+{
578
+ return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
579
+ sizeof(struct sq_send) / sizeof(struct sq_sge) : 1;
580
+}
581
+
582
+static inline u32 bnxt_qplib_set_rq_max_slot(u32 wqe_size)
583
+{
584
+ return (wqe_size / sizeof(struct sq_sge));
585
+}
586
+
587
+static inline u16 __xlate_qfd(u16 delta, u16 wqe_bytes)
588
+{
589
+ /* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128
590
+ * For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128.
591
+ * For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512.
592
+ * when 8916 is disabled.
593
+ */
594
+ return (delta * wqe_bytes) / sizeof(struct sq_sge);
595
+}
596
+
597
+static inline u16 bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe *wqe, u16 max)
598
+{
599
+ u16 size = 0;
600
+ int indx;
601
+
602
+ for (indx = 0; indx < wqe->num_sge; indx++)
603
+ size += wqe->sg_list[indx].size;
604
+ if (size > max)
605
+ size = max;
606
+
607
+ return size;
608
+}
529609 #endif /* __BNXT_QPLIB_FP_H__ */