forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/infiniband/hw/bnxt_re/qplib_fp.c
....@@ -36,12 +36,16 @@
3636 * Description: Fast Path Operators
3737 */
3838
39
+#define dev_fmt(fmt) "QPLIB: " fmt
40
+
3941 #include <linux/interrupt.h>
4042 #include <linux/spinlock.h>
4143 #include <linux/sched.h>
4244 #include <linux/slab.h>
4345 #include <linux/pci.h>
46
+#include <linux/delay.h>
4447 #include <linux/prefetch.h>
48
+#include <linux/if_ether.h>
4549
4650 #include "roce_hsi.h"
4751
....@@ -50,9 +54,7 @@
5054 #include "qplib_sp.h"
5155 #include "qplib_fp.h"
5256
53
-static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
5457 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
55
-static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
5658
5759 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
5860 {
....@@ -71,8 +73,7 @@
7173
7274 if (!qp->sq.flushed) {
7375 dev_dbg(&scq->hwq.pdev->dev,
74
- "QPLIB: FP: Adding to SQ Flush list = %p",
75
- qp);
76
+ "FP: Adding to SQ Flush list = %p\n", qp);
7677 bnxt_qplib_cancel_phantom_processing(qp);
7778 list_add_tail(&qp->sq_flush, &scq->sqf_head);
7879 qp->sq.flushed = true;
....@@ -80,8 +81,7 @@
8081 if (!qp->srq) {
8182 if (!qp->rq.flushed) {
8283 dev_dbg(&rcq->hwq.pdev->dev,
83
- "QPLIB: FP: Adding to RQ Flush list = %p",
84
- qp);
84
+ "FP: Adding to RQ Flush list = %p\n", qp);
8585 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
8686 qp->rq.flushed = true;
8787 }
....@@ -178,11 +178,11 @@
178178
179179 if (qp->rq_hdr_buf)
180180 dma_free_coherent(&res->pdev->dev,
181
- rq->hwq.max_elements * qp->rq_hdr_buf_size,
181
+ rq->max_wqe * qp->rq_hdr_buf_size,
182182 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
183183 if (qp->sq_hdr_buf)
184184 dma_free_coherent(&res->pdev->dev,
185
- sq->hwq.max_elements * qp->sq_hdr_buf_size,
185
+ sq->max_wqe * qp->sq_hdr_buf_size,
186186 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
187187 qp->rq_hdr_buf = NULL;
188188 qp->sq_hdr_buf = NULL;
....@@ -199,29 +199,28 @@
199199 struct bnxt_qplib_q *sq = &qp->sq;
200200 int rc = 0;
201201
202
- if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
202
+ if (qp->sq_hdr_buf_size && sq->max_wqe) {
203203 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
204
- sq->hwq.max_elements *
205
- qp->sq_hdr_buf_size,
204
+ sq->max_wqe * qp->sq_hdr_buf_size,
206205 &qp->sq_hdr_buf_map, GFP_KERNEL);
207206 if (!qp->sq_hdr_buf) {
208207 rc = -ENOMEM;
209208 dev_err(&res->pdev->dev,
210
- "QPLIB: Failed to create sq_hdr_buf");
209
+ "Failed to create sq_hdr_buf\n");
211210 goto fail;
212211 }
213212 }
214213
215
- if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
214
+ if (qp->rq_hdr_buf_size && rq->max_wqe) {
216215 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217
- rq->hwq.max_elements *
216
+ rq->max_wqe *
218217 qp->rq_hdr_buf_size,
219218 &qp->rq_hdr_buf_map,
220219 GFP_KERNEL);
221220 if (!qp->rq_hdr_buf) {
222221 rc = -ENOMEM;
223222 dev_err(&res->pdev->dev,
224
- "QPLIB: Failed to create rq_hdr_buf");
223
+ "Failed to create rq_hdr_buf\n");
225224 goto fail;
226225 }
227226 }
....@@ -232,19 +231,16 @@
232231 return rc;
233232 }
234233
235
-static void bnxt_qplib_service_nq(unsigned long data)
234
+static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236235 {
237
- struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
238236 struct bnxt_qplib_hwq *hwq = &nq->hwq;
239237 struct nq_base *nqe, **nq_ptr;
240
- struct bnxt_qplib_cq *cq;
241
- int num_cqne_processed = 0;
242
- int num_srqne_processed = 0;
243
- u32 sw_cons, raw_cons;
244
- u16 type;
245238 int budget = nq->budget;
239
+ u32 sw_cons, raw_cons;
246240 uintptr_t q_handle;
241
+ u16 type;
247242
243
+ spin_lock_bh(&hwq->lock);
248244 /* Service the NQ until empty */
249245 raw_cons = hwq->cons;
250246 while (budget--) {
....@@ -269,36 +265,108 @@
269265 q_handle = le32_to_cpu(nqcne->cq_handle_low);
270266 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
271267 << 32;
268
+ if ((unsigned long)cq == q_handle) {
269
+ nqcne->cq_handle_low = 0;
270
+ nqcne->cq_handle_high = 0;
271
+ cq->cnq_events++;
272
+ }
273
+ break;
274
+ }
275
+ default:
276
+ break;
277
+ }
278
+ raw_cons++;
279
+ }
280
+ spin_unlock_bh(&hwq->lock);
281
+}
282
+
283
+/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
284
+ * this CQ.
285
+ */
286
+static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
287
+{
288
+ u32 retry_cnt = 100;
289
+
290
+ while (retry_cnt--) {
291
+ if (cnq_events == cq->cnq_events)
292
+ return;
293
+ usleep_range(50, 100);
294
+ clean_nq(cq->nq, cq);
295
+ }
296
+}
297
+
298
+static void bnxt_qplib_service_nq(struct tasklet_struct *t)
299
+{
300
+ struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
301
+ struct bnxt_qplib_hwq *hwq = &nq->hwq;
302
+ int num_srqne_processed = 0;
303
+ int num_cqne_processed = 0;
304
+ struct bnxt_qplib_cq *cq;
305
+ int budget = nq->budget;
306
+ u32 sw_cons, raw_cons;
307
+ struct nq_base *nqe;
308
+ uintptr_t q_handle;
309
+ u16 type;
310
+
311
+ spin_lock_bh(&hwq->lock);
312
+ /* Service the NQ until empty */
313
+ raw_cons = hwq->cons;
314
+ while (budget--) {
315
+ sw_cons = HWQ_CMP(raw_cons, hwq);
316
+ nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
317
+ if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
318
+ break;
319
+
320
+ /*
321
+ * The valid test of the entry must be done first before
322
+ * reading any further.
323
+ */
324
+ dma_rmb();
325
+
326
+ type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
327
+ switch (type) {
328
+ case NQ_BASE_TYPE_CQ_NOTIFICATION:
329
+ {
330
+ struct nq_cn *nqcne = (struct nq_cn *)nqe;
331
+
332
+ q_handle = le32_to_cpu(nqcne->cq_handle_low);
333
+ q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334
+ << 32;
272335 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
273
- bnxt_qplib_arm_cq_enable(cq);
336
+ if (!cq)
337
+ break;
338
+ bnxt_qplib_armen_db(&cq->dbinfo,
339
+ DBC_DBC_TYPE_CQ_ARMENA);
274340 spin_lock_bh(&cq->compl_lock);
275341 atomic_set(&cq->arm_state, 0);
276342 if (!nq->cqn_handler(nq, (cq)))
277343 num_cqne_processed++;
278344 else
279345 dev_warn(&nq->pdev->dev,
280
- "QPLIB: cqn - type 0x%x not handled",
281
- type);
346
+ "cqn - type 0x%x not handled\n", type);
347
+ cq->cnq_events++;
282348 spin_unlock_bh(&cq->compl_lock);
283349 break;
284350 }
285351 case NQ_BASE_TYPE_SRQ_EVENT:
286352 {
353
+ struct bnxt_qplib_srq *srq;
287354 struct nq_srq_event *nqsrqe =
288355 (struct nq_srq_event *)nqe;
289356
290357 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
291358 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
292359 << 32;
293
- bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
294
- DBR_DBR_TYPE_SRQ_ARMENA);
360
+ srq = (struct bnxt_qplib_srq *)q_handle;
361
+ bnxt_qplib_armen_db(&srq->dbinfo,
362
+ DBC_DBC_TYPE_SRQ_ARMENA);
295363 if (!nq->srqn_handler(nq,
296364 (struct bnxt_qplib_srq *)q_handle,
297365 nqsrqe->event))
298366 num_srqne_processed++;
299367 else
300368 dev_warn(&nq->pdev->dev,
301
- "QPLIB: SRQ event 0x%x not handled",
369
+ "SRQ event 0x%x not handled\n",
302370 nqsrqe->event);
303371 break;
304372 }
....@@ -306,50 +374,52 @@
306374 break;
307375 default:
308376 dev_warn(&nq->pdev->dev,
309
- "QPLIB: nqe with type = 0x%x not handled",
310
- type);
377
+ "nqe with type = 0x%x not handled\n", type);
311378 break;
312379 }
313380 raw_cons++;
314381 }
315382 if (hwq->cons != raw_cons) {
316383 hwq->cons = raw_cons;
317
- NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
384
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
318385 }
386
+ spin_unlock_bh(&hwq->lock);
319387 }
320388
321389 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
322390 {
323391 struct bnxt_qplib_nq *nq = dev_instance;
324392 struct bnxt_qplib_hwq *hwq = &nq->hwq;
325
- struct nq_base **nq_ptr;
326393 u32 sw_cons;
327394
328395 /* Prefetch the NQ element */
329396 sw_cons = HWQ_CMP(hwq->cons, hwq);
330
- nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
331
- prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
397
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
332398
333399 /* Fan out to CPU affinitized kthreads? */
334
- tasklet_schedule(&nq->worker);
400
+ tasklet_schedule(&nq->nq_tasklet);
335401
336402 return IRQ_HANDLED;
337403 }
338404
339405 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
340406 {
341
- tasklet_disable(&nq->worker);
407
+ if (!nq->requested)
408
+ return;
409
+
410
+ tasklet_disable(&nq->nq_tasklet);
342411 /* Mask h/w interrupt */
343
- NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
412
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
344413 /* Sync with last running IRQ handler */
345
- synchronize_irq(nq->vector);
414
+ synchronize_irq(nq->msix_vec);
346415 if (kill)
347
- tasklet_kill(&nq->worker);
348
- if (nq->requested) {
349
- irq_set_affinity_hint(nq->vector, NULL);
350
- free_irq(nq->vector, nq);
351
- nq->requested = false;
352
- }
416
+ tasklet_kill(&nq->nq_tasklet);
417
+
418
+ irq_set_affinity_hint(nq->msix_vec, NULL);
419
+ free_irq(nq->msix_vec, nq);
420
+ kfree(nq->name);
421
+ nq->name = NULL;
422
+ nq->requested = false;
353423 }
354424
355425 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
....@@ -360,91 +430,120 @@
360430 }
361431
362432 /* Make sure the HW is stopped! */
363
- if (nq->requested)
364
- bnxt_qplib_nq_stop_irq(nq, true);
433
+ bnxt_qplib_nq_stop_irq(nq, true);
365434
366
- if (nq->bar_reg_iomem)
367
- iounmap(nq->bar_reg_iomem);
368
- nq->bar_reg_iomem = NULL;
435
+ if (nq->nq_db.reg.bar_reg) {
436
+ iounmap(nq->nq_db.reg.bar_reg);
437
+ nq->nq_db.reg.bar_reg = NULL;
438
+ }
369439
370440 nq->cqn_handler = NULL;
371441 nq->srqn_handler = NULL;
372
- nq->vector = 0;
442
+ nq->msix_vec = 0;
373443 }
374444
375445 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
376446 int msix_vector, bool need_init)
377447 {
448
+ struct bnxt_qplib_res *res = nq->res;
378449 int rc;
379450
380451 if (nq->requested)
381452 return -EFAULT;
382453
383
- nq->vector = msix_vector;
454
+ nq->msix_vec = msix_vector;
384455 if (need_init)
385
- tasklet_init(&nq->worker, bnxt_qplib_service_nq,
386
- (unsigned long)nq);
456
+ tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
387457 else
388
- tasklet_enable(&nq->worker);
458
+ tasklet_enable(&nq->nq_tasklet);
389459
390
- snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
391
- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
392
- if (rc)
460
+ nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
461
+ nq_indx, pci_name(res->pdev));
462
+ if (!nq->name)
463
+ return -ENOMEM;
464
+ rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
465
+ if (rc) {
466
+ kfree(nq->name);
467
+ nq->name = NULL;
468
+ tasklet_disable(&nq->nq_tasklet);
393469 return rc;
470
+ }
394471
395472 cpumask_clear(&nq->mask);
396473 cpumask_set_cpu(nq_indx, &nq->mask);
397
- rc = irq_set_affinity_hint(nq->vector, &nq->mask);
474
+ rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
398475 if (rc) {
399476 dev_warn(&nq->pdev->dev,
400
- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
401
- nq->vector, nq_indx);
477
+ "set affinity failed; vector: %d nq_idx: %d\n",
478
+ nq->msix_vec, nq_indx);
402479 }
403480 nq->requested = true;
404
- NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
481
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
405482
483
+ return rc;
484
+}
485
+
486
+static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
487
+{
488
+ resource_size_t reg_base;
489
+ struct bnxt_qplib_nq_db *nq_db;
490
+ struct pci_dev *pdev;
491
+ int rc = 0;
492
+
493
+ pdev = nq->pdev;
494
+ nq_db = &nq->nq_db;
495
+
496
+ nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
497
+ nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
498
+ if (!nq_db->reg.bar_base) {
499
+ dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
500
+ nq_db->reg.bar_id);
501
+ rc = -ENOMEM;
502
+ goto fail;
503
+ }
504
+
505
+ reg_base = nq_db->reg.bar_base + reg_offt;
506
+ /* Unconditionally map 8 bytes to support 57500 series */
507
+ nq_db->reg.len = 8;
508
+ nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
509
+ if (!nq_db->reg.bar_reg) {
510
+ dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
511
+ nq_db->reg.bar_id);
512
+ rc = -ENOMEM;
513
+ goto fail;
514
+ }
515
+
516
+ nq_db->dbinfo.db = nq_db->reg.bar_reg;
517
+ nq_db->dbinfo.hwq = &nq->hwq;
518
+ nq_db->dbinfo.xid = nq->ring_id;
519
+fail:
406520 return rc;
407521 }
408522
409523 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
410524 int nq_idx, int msix_vector, int bar_reg_offset,
411
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
412
- struct bnxt_qplib_cq *),
413
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
414
- struct bnxt_qplib_srq *,
415
- u8 event))
525
+ cqn_handler_t cqn_handler,
526
+ srqn_handler_t srqn_handler)
416527 {
417
- resource_size_t nq_base;
418528 int rc = -1;
419529
420
- if (cqn_handler)
421
- nq->cqn_handler = cqn_handler;
422
-
423
- if (srqn_handler)
424
- nq->srqn_handler = srqn_handler;
530
+ nq->pdev = pdev;
531
+ nq->cqn_handler = cqn_handler;
532
+ nq->srqn_handler = srqn_handler;
425533
426534 /* Have a task to schedule CQ notifiers in post send case */
427535 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
428536 if (!nq->cqn_wq)
429537 return -ENOMEM;
430538
431
- nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
432
- nq->bar_reg_off = bar_reg_offset;
433
- nq_base = pci_resource_start(pdev, nq->bar_reg);
434
- if (!nq_base) {
435
- rc = -ENOMEM;
539
+ rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
540
+ if (rc)
436541 goto fail;
437
- }
438
- nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
439
- if (!nq->bar_reg_iomem) {
440
- rc = -ENOMEM;
441
- goto fail;
442
- }
443542
444543 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
445544 if (rc) {
446545 dev_err(&nq->pdev->dev,
447
- "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
546
+ "Failed to request irq for nq-idx %d\n", nq_idx);
448547 goto fail;
449548 }
450549
....@@ -457,50 +556,39 @@
457556 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
458557 {
459558 if (nq->hwq.max_elements) {
460
- bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
559
+ bnxt_qplib_free_hwq(nq->res, &nq->hwq);
461560 nq->hwq.max_elements = 0;
462561 }
463562 }
464563
465
-int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
564
+int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
466565 {
467
- nq->pdev = pdev;
566
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
567
+ struct bnxt_qplib_sg_info sginfo = {};
568
+
569
+ nq->pdev = res->pdev;
570
+ nq->res = res;
468571 if (!nq->hwq.max_elements ||
469572 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
470573 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
471574
472
- if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
473
- &nq->hwq.max_elements,
474
- BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
475
- PAGE_SIZE, HWQ_TYPE_L2_CMPL))
575
+ sginfo.pgsize = PAGE_SIZE;
576
+ sginfo.pgshft = PAGE_SHIFT;
577
+ hwq_attr.res = res;
578
+ hwq_attr.sginfo = &sginfo;
579
+ hwq_attr.depth = nq->hwq.max_elements;
580
+ hwq_attr.stride = sizeof(struct nq_base);
581
+ hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
582
+ if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
583
+ dev_err(&nq->pdev->dev, "FP NQ allocation failed");
476584 return -ENOMEM;
477
-
585
+ }
478586 nq->budget = 8;
479587 return 0;
480588 }
481589
482590 /* SRQ */
483
-static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
484
-{
485
- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
486
- struct dbr_dbr db_msg = { 0 };
487
- void __iomem *db;
488
- u32 sw_prod = 0;
489
-
490
- /* Ring DB */
491
- sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
492
- HWQ_CMP(srq_hwq->prod, srq_hwq);
493
- db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
494
- DBR_DBR_INDEX_MASK);
495
- db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
496
- DBR_DBR_XID_MASK) | arm_type);
497
- db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
498
- srq->dbr_base : srq->dpi->dbr;
499
- wmb(); /* barrier before db ring */
500
- __iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
501
-}
502
-
503
-int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
591
+void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
504592 struct bnxt_qplib_srq *srq)
505593 {
506594 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
....@@ -514,31 +602,32 @@
514602 /* Configure the request */
515603 req.srq_cid = cpu_to_le32(srq->id);
516604
517
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
518
- (void *)&resp, NULL, 0);
519
- if (rc)
520
- return rc;
521
-
522
- bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
605
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
606
+ (struct creq_base *)&resp, NULL, 0);
523607 kfree(srq->swq);
524
- return 0;
608
+ if (rc)
609
+ return;
610
+ bnxt_qplib_free_hwq(res, &srq->hwq);
525611 }
526612
527613 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
528614 struct bnxt_qplib_srq *srq)
529615 {
530616 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
531
- struct cmdq_create_srq req;
617
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
532618 struct creq_create_srq_resp resp;
619
+ struct cmdq_create_srq req;
533620 struct bnxt_qplib_pbl *pbl;
534621 u16 cmd_flags = 0;
622
+ u16 pg_sz_lvl;
535623 int rc, idx;
536624
537
- srq->hwq.max_elements = srq->max_wqe;
538
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
539
- srq->nmap, &srq->hwq.max_elements,
540
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
541
- PAGE_SIZE, HWQ_TYPE_QUEUE);
625
+ hwq_attr.res = res;
626
+ hwq_attr.sginfo = &srq->sg_info;
627
+ hwq_attr.depth = srq->max_wqe;
628
+ hwq_attr.stride = srq->wqe_size;
629
+ hwq_attr.type = HWQ_TYPE_QUEUE;
630
+ rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
542631 if (rc)
543632 goto exit;
544633
....@@ -557,22 +646,11 @@
557646
558647 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
559648 pbl = &srq->hwq.pbl[PBL_LVL_0];
560
- req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
561
- CMDQ_CREATE_SRQ_LVL_MASK) <<
562
- CMDQ_CREATE_SRQ_LVL_SFT) |
563
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
564
- CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
565
- pbl->pg_size == ROCE_PG_SIZE_8K ?
566
- CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
567
- pbl->pg_size == ROCE_PG_SIZE_64K ?
568
- CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
569
- pbl->pg_size == ROCE_PG_SIZE_2M ?
570
- CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
571
- pbl->pg_size == ROCE_PG_SIZE_8M ?
572
- CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
573
- pbl->pg_size == ROCE_PG_SIZE_1G ?
574
- CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
575
- CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
649
+ pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
650
+ CMDQ_CREATE_SRQ_PG_SIZE_SFT);
651
+ pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
652
+ CMDQ_CREATE_SRQ_LVL_SFT;
653
+ req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
576654 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
577655 req.pd_id = cpu_to_le32(srq->pd->id);
578656 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
....@@ -590,14 +668,18 @@
590668 srq->swq[srq->last_idx].next_idx = -1;
591669
592670 srq->id = le32_to_cpu(resp.xid);
593
- srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
671
+ srq->dbinfo.hwq = &srq->hwq;
672
+ srq->dbinfo.xid = srq->id;
673
+ srq->dbinfo.db = srq->dpi->dbr;
674
+ srq->dbinfo.max_slot = 1;
675
+ srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
594676 if (srq->threshold)
595
- bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
677
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
596678 srq->arm_req = false;
597679
598680 return 0;
599681 fail:
600
- bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
682
+ bnxt_qplib_free_hwq(res, &srq->hwq);
601683 kfree(srq->swq);
602684 exit:
603685 return rc;
....@@ -616,7 +698,7 @@
616698 srq_hwq->max_elements - sw_cons + sw_prod;
617699 if (count > srq->threshold) {
618700 srq->arm_req = false;
619
- bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
701
+ bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
620702 } else {
621703 /* Deferred arming */
622704 srq->arm_req = true;
....@@ -657,15 +739,15 @@
657739 struct bnxt_qplib_swqe *wqe)
658740 {
659741 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
660
- struct rq_wqe *srqe, **srqe_ptr;
742
+ struct rq_wqe *srqe;
661743 struct sq_sge *hw_sge;
662744 u32 sw_prod, sw_cons, count = 0;
663745 int i, rc = 0, next;
664746
665747 spin_lock(&srq_hwq->lock);
666748 if (srq->start_idx == srq->last_idx) {
667
- dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
668
- srq->id);
749
+ dev_err(&srq_hwq->pdev->dev,
750
+ "FP: SRQ (0x%x) is full!\n", srq->id);
669751 rc = -EINVAL;
670752 spin_unlock(&srq_hwq->lock);
671753 goto done;
....@@ -675,9 +757,8 @@
675757 spin_unlock(&srq_hwq->lock);
676758
677759 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
678
- srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
679
- srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
680
- memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
760
+ srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
761
+ memset(srqe, 0, srq->wqe_size);
681762 /* Calculate wqe_size16 and data_len */
682763 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
683764 i < wqe->num_sge; i++, hw_sge++) {
....@@ -705,27 +786,52 @@
705786 srq_hwq->max_elements - sw_cons + sw_prod;
706787 spin_unlock(&srq_hwq->lock);
707788 /* Ring DB */
708
- bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
789
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
709790 if (srq->arm_req == true && count > srq->threshold) {
710791 srq->arm_req = false;
711
- bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
792
+ bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
712793 }
713794 done:
714795 return rc;
715796 }
716797
717798 /* QP */
799
+
800
+static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
801
+{
802
+ int rc = 0;
803
+ int indx;
804
+
805
+ que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
806
+ if (!que->swq) {
807
+ rc = -ENOMEM;
808
+ goto out;
809
+ }
810
+
811
+ que->swq_start = 0;
812
+ que->swq_last = que->max_wqe - 1;
813
+ for (indx = 0; indx < que->max_wqe; indx++)
814
+ que->swq[indx].next_idx = indx + 1;
815
+ que->swq[que->swq_last].next_idx = 0; /* Make it circular */
816
+ que->swq_last = 0;
817
+out:
818
+ return rc;
819
+}
820
+
718821 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
719822 {
823
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
720824 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
721
- struct cmdq_create_qp1 req;
722
- struct creq_create_qp1_resp resp;
723
- struct bnxt_qplib_pbl *pbl;
724825 struct bnxt_qplib_q *sq = &qp->sq;
725826 struct bnxt_qplib_q *rq = &qp->rq;
726
- int rc;
827
+ struct creq_create_qp1_resp resp;
828
+ struct cmdq_create_qp1 req;
829
+ struct bnxt_qplib_pbl *pbl;
727830 u16 cmd_flags = 0;
728831 u32 qp_flags = 0;
832
+ u8 pg_sz_lvl;
833
+ u32 tbl_indx;
834
+ int rc;
729835
730836 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
731837
....@@ -735,98 +841,65 @@
735841 req.qp_handle = cpu_to_le64(qp->qp_handle);
736842
737843 /* SQ */
738
- sq->hwq.max_elements = sq->max_wqe;
739
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
740
- &sq->hwq.max_elements,
741
- BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
742
- PAGE_SIZE, HWQ_TYPE_QUEUE);
844
+ hwq_attr.res = res;
845
+ hwq_attr.sginfo = &sq->sg_info;
846
+ hwq_attr.stride = sizeof(struct sq_sge);
847
+ hwq_attr.depth = bnxt_qplib_get_depth(sq);
848
+ hwq_attr.type = HWQ_TYPE_QUEUE;
849
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
743850 if (rc)
744851 goto exit;
745852
746
- sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
747
- if (!sq->swq) {
748
- rc = -ENOMEM;
853
+ rc = bnxt_qplib_alloc_init_swq(sq);
854
+ if (rc)
749855 goto fail_sq;
750
- }
856
+
857
+ req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
751858 pbl = &sq->hwq.pbl[PBL_LVL_0];
752859 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
753
- req.sq_pg_size_sq_lvl =
754
- ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
755
- << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
756
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
757
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
758
- pbl->pg_size == ROCE_PG_SIZE_8K ?
759
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
760
- pbl->pg_size == ROCE_PG_SIZE_64K ?
761
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
762
- pbl->pg_size == ROCE_PG_SIZE_2M ?
763
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
764
- pbl->pg_size == ROCE_PG_SIZE_8M ?
765
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
766
- pbl->pg_size == ROCE_PG_SIZE_1G ?
767
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
768
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
769
-
770
- if (qp->scq)
771
- req.scq_cid = cpu_to_le32(qp->scq->id);
772
-
773
- qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
860
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
861
+ CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
862
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
863
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
864
+ req.sq_fwo_sq_sge =
865
+ cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
866
+ CMDQ_CREATE_QP1_SQ_SGE_SFT);
867
+ req.scq_cid = cpu_to_le32(qp->scq->id);
774868
775869 /* RQ */
776870 if (rq->max_wqe) {
777
- rq->hwq.max_elements = qp->rq.max_wqe;
778
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
779
- &rq->hwq.max_elements,
780
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
781
- PAGE_SIZE, HWQ_TYPE_QUEUE);
871
+ hwq_attr.res = res;
872
+ hwq_attr.sginfo = &rq->sg_info;
873
+ hwq_attr.stride = sizeof(struct sq_sge);
874
+ hwq_attr.depth = bnxt_qplib_get_depth(rq);
875
+ hwq_attr.type = HWQ_TYPE_QUEUE;
876
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
782877 if (rc)
783
- goto fail_sq;
784
-
785
- rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
786
- GFP_KERNEL);
787
- if (!rq->swq) {
788
- rc = -ENOMEM;
878
+ goto sq_swq;
879
+ rc = bnxt_qplib_alloc_init_swq(rq);
880
+ if (rc)
789881 goto fail_rq;
790
- }
882
+ req.rq_size = cpu_to_le32(rq->max_wqe);
791883 pbl = &rq->hwq.pbl[PBL_LVL_0];
792884 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
793
- req.rq_pg_size_rq_lvl =
794
- ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
795
- CMDQ_CREATE_QP1_RQ_LVL_SFT) |
796
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
797
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
798
- pbl->pg_size == ROCE_PG_SIZE_8K ?
799
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
800
- pbl->pg_size == ROCE_PG_SIZE_64K ?
801
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
802
- pbl->pg_size == ROCE_PG_SIZE_2M ?
803
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
804
- pbl->pg_size == ROCE_PG_SIZE_8M ?
805
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
806
- pbl->pg_size == ROCE_PG_SIZE_1G ?
807
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
808
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
809
- if (qp->rcq)
810
- req.rcq_cid = cpu_to_le32(qp->rcq->id);
885
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
886
+ CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
887
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
888
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
889
+ req.rq_fwo_rq_sge =
890
+ cpu_to_le16((rq->max_sge &
891
+ CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
892
+ CMDQ_CREATE_QP1_RQ_SGE_SFT);
811893 }
812
-
894
+ req.rcq_cid = cpu_to_le32(qp->rcq->id);
813895 /* Header buffer - allow hdr_buf pass in */
814896 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
815897 if (rc) {
816898 rc = -ENOMEM;
817
- goto fail;
899
+ goto rq_rwq;
818900 }
901
+ qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
819902 req.qp_flags = cpu_to_le32(qp_flags);
820
- req.sq_size = cpu_to_le32(sq->hwq.max_elements);
821
- req.rq_size = cpu_to_le32(rq->hwq.max_elements);
822
-
823
- req.sq_fwo_sq_sge =
824
- cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
825
- CMDQ_CREATE_QP1_SQ_SGE_SFT);
826
- req.rq_fwo_rq_sge =
827
- cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
828
- CMDQ_CREATE_QP1_RQ_SGE_SFT);
829
-
830903 req.pd_id = cpu_to_le32(qp->pd->id);
831904
832905 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
....@@ -836,38 +909,72 @@
836909
837910 qp->id = le32_to_cpu(resp.xid);
838911 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
839
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
840
- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
912
+ qp->cctx = res->cctx;
913
+ sq->dbinfo.hwq = &sq->hwq;
914
+ sq->dbinfo.xid = qp->id;
915
+ sq->dbinfo.db = qp->dpi->dbr;
916
+ sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
917
+ if (rq->max_wqe) {
918
+ rq->dbinfo.hwq = &rq->hwq;
919
+ rq->dbinfo.xid = qp->id;
920
+ rq->dbinfo.db = qp->dpi->dbr;
921
+ rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
922
+ }
923
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
924
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
925
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
841926
842927 return 0;
843928
844929 fail:
845930 bnxt_qplib_free_qp_hdr_buf(res, qp);
846
-fail_rq:
847
- bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
931
+rq_rwq:
848932 kfree(rq->swq);
849
-fail_sq:
850
- bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
933
+fail_rq:
934
+ bnxt_qplib_free_hwq(res, &rq->hwq);
935
+sq_swq:
851936 kfree(sq->swq);
937
+fail_sq:
938
+ bnxt_qplib_free_hwq(res, &sq->hwq);
852939 exit:
853940 return rc;
941
+}
942
+
943
+static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
944
+{
945
+ struct bnxt_qplib_hwq *hwq;
946
+ struct bnxt_qplib_q *sq;
947
+ u64 fpsne, psn_pg;
948
+ u16 indx_pad = 0;
949
+
950
+ sq = &qp->sq;
951
+ hwq = &sq->hwq;
952
+ /* First psn entry */
953
+ fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
954
+ if (!IS_ALIGNED(fpsne, PAGE_SIZE))
955
+ indx_pad = (fpsne & ~PAGE_MASK) / size;
956
+ hwq->pad_pgofft = indx_pad;
957
+ hwq->pad_pg = (u64 *)psn_pg;
958
+ hwq->pad_stride = size;
854959 }
855960
856961 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
857962 {
858963 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
859
- struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
860
- struct cmdq_create_qp req;
861
- struct creq_create_qp_resp resp;
862
- struct bnxt_qplib_pbl *pbl;
863
- struct sq_psn_search **psn_search_ptr;
864
- unsigned long int psn_search, poff = 0;
964
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
965
+ struct bnxt_qplib_sg_info sginfo = {};
865966 struct bnxt_qplib_q *sq = &qp->sq;
866967 struct bnxt_qplib_q *rq = &qp->rq;
968
+ struct creq_create_qp_resp resp;
969
+ int rc, req_size, psn_sz = 0;
867970 struct bnxt_qplib_hwq *xrrq;
868
- int i, rc, req_size, psn_sz;
869
- u16 cmd_flags = 0, max_ssge;
870
- u32 sw_prod, qp_flags = 0;
971
+ struct bnxt_qplib_pbl *pbl;
972
+ struct cmdq_create_qp req;
973
+ u16 cmd_flags = 0;
974
+ u32 qp_flags = 0;
975
+ u8 pg_sz_lvl;
976
+ u32 tbl_indx;
977
+ u16 nsge;
871978
872979 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
873980
....@@ -877,142 +984,86 @@
877984 req.qp_handle = cpu_to_le64(qp->qp_handle);
878985
879986 /* SQ */
880
- psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
881
- sizeof(struct sq_psn_search) : 0;
882
- sq->hwq.max_elements = sq->max_wqe;
883
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
884
- sq->nmap, &sq->hwq.max_elements,
885
- BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
886
- psn_sz,
887
- PAGE_SIZE, HWQ_TYPE_QUEUE);
987
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
988
+ psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
989
+ sizeof(struct sq_psn_search_ext) :
990
+ sizeof(struct sq_psn_search);
991
+ }
992
+
993
+ hwq_attr.res = res;
994
+ hwq_attr.sginfo = &sq->sg_info;
995
+ hwq_attr.stride = sizeof(struct sq_sge);
996
+ hwq_attr.depth = bnxt_qplib_get_depth(sq);
997
+ hwq_attr.aux_stride = psn_sz;
998
+ hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
999
+ hwq_attr.type = HWQ_TYPE_QUEUE;
1000
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
8881001 if (rc)
8891002 goto exit;
8901003
891
- sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
892
- if (!sq->swq) {
893
- rc = -ENOMEM;
1004
+ rc = bnxt_qplib_alloc_init_swq(sq);
1005
+ if (rc)
8941006 goto fail_sq;
895
- }
896
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
897
- if (psn_sz) {
898
- psn_search_ptr = (struct sq_psn_search **)
899
- &hw_sq_send_ptr[get_sqe_pg
900
- (sq->hwq.max_elements)];
901
- psn_search = (unsigned long int)
902
- &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
903
- [get_sqe_idx(sq->hwq.max_elements)];
904
- if (psn_search & ~PAGE_MASK) {
905
- /* If the psn_search does not start on a page boundary,
906
- * then calculate the offset
907
- */
908
- poff = (psn_search & ~PAGE_MASK) /
909
- BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
910
- }
911
- for (i = 0; i < sq->hwq.max_elements; i++)
912
- sq->swq[i].psn_search =
913
- &psn_search_ptr[get_psne_pg(i + poff)]
914
- [get_psne_idx(i + poff)];
915
- }
1007
+
1008
+ if (psn_sz)
1009
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
1010
+
1011
+ req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
9161012 pbl = &sq->hwq.pbl[PBL_LVL_0];
9171013 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
918
- req.sq_pg_size_sq_lvl =
919
- ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
920
- << CMDQ_CREATE_QP_SQ_LVL_SFT) |
921
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
922
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
923
- pbl->pg_size == ROCE_PG_SIZE_8K ?
924
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
925
- pbl->pg_size == ROCE_PG_SIZE_64K ?
926
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
927
- pbl->pg_size == ROCE_PG_SIZE_2M ?
928
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
929
- pbl->pg_size == ROCE_PG_SIZE_8M ?
930
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
931
- pbl->pg_size == ROCE_PG_SIZE_1G ?
932
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
933
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
1014
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1015
+ CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1016
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1017
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
1018
+ req.sq_fwo_sq_sge =
1019
+ cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1020
+ CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1021
+ req.scq_cid = cpu_to_le32(qp->scq->id);
9341022
935
- /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
936
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
937
- for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
938
- hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
939
- [get_sqe_idx(sw_prod)];
940
- hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
1023
+ /* RQ */
1024
+ if (!qp->srq) {
1025
+ hwq_attr.res = res;
1026
+ hwq_attr.sginfo = &rq->sg_info;
1027
+ hwq_attr.stride = sizeof(struct sq_sge);
1028
+ hwq_attr.depth = bnxt_qplib_get_depth(rq);
1029
+ hwq_attr.aux_stride = 0;
1030
+ hwq_attr.aux_depth = 0;
1031
+ hwq_attr.type = HWQ_TYPE_QUEUE;
1032
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1033
+ if (rc)
1034
+ goto sq_swq;
1035
+ rc = bnxt_qplib_alloc_init_swq(rq);
1036
+ if (rc)
1037
+ goto fail_rq;
1038
+
1039
+ req.rq_size = cpu_to_le32(rq->max_wqe);
1040
+ pbl = &rq->hwq.pbl[PBL_LVL_0];
1041
+ req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1042
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1043
+ CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1044
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1045
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
1046
+ nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1047
+ 6 : rq->max_sge;
1048
+ req.rq_fwo_rq_sge =
1049
+ cpu_to_le16(((nsge &
1050
+ CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1051
+ CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1052
+ } else {
1053
+ /* SRQ */
1054
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1055
+ req.srq_cid = cpu_to_le32(qp->srq->id);
9411056 }
942
-
943
- if (qp->scq)
944
- req.scq_cid = cpu_to_le32(qp->scq->id);
1057
+ req.rcq_cid = cpu_to_le32(qp->rcq->id);
9451058
9461059 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
9471060 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
9481061 if (qp->sig_type)
9491062 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
950
-
951
- /* RQ */
952
- if (rq->max_wqe) {
953
- rq->hwq.max_elements = rq->max_wqe;
954
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
955
- rq->nmap, &rq->hwq.max_elements,
956
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
957
- PAGE_SIZE, HWQ_TYPE_QUEUE);
958
- if (rc)
959
- goto fail_sq;
960
-
961
- rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
962
- GFP_KERNEL);
963
- if (!rq->swq) {
964
- rc = -ENOMEM;
965
- goto fail_rq;
966
- }
967
- pbl = &rq->hwq.pbl[PBL_LVL_0];
968
- req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
969
- req.rq_pg_size_rq_lvl =
970
- ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
971
- CMDQ_CREATE_QP_RQ_LVL_SFT) |
972
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
973
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
974
- pbl->pg_size == ROCE_PG_SIZE_8K ?
975
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
976
- pbl->pg_size == ROCE_PG_SIZE_64K ?
977
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
978
- pbl->pg_size == ROCE_PG_SIZE_2M ?
979
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
980
- pbl->pg_size == ROCE_PG_SIZE_8M ?
981
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
982
- pbl->pg_size == ROCE_PG_SIZE_1G ?
983
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
984
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
985
- } else {
986
- /* SRQ */
987
- if (qp->srq) {
988
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
989
- req.srq_cid = cpu_to_le32(qp->srq->id);
990
- }
991
- }
992
-
993
- if (qp->rcq)
994
- req.rcq_cid = cpu_to_le32(qp->rcq->id);
1063
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1064
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
9951065 req.qp_flags = cpu_to_le32(qp_flags);
996
- req.sq_size = cpu_to_le32(sq->hwq.max_elements);
997
- req.rq_size = cpu_to_le32(rq->hwq.max_elements);
998
- qp->sq_hdr_buf = NULL;
999
- qp->rq_hdr_buf = NULL;
10001066
1001
- rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
1002
- if (rc)
1003
- goto fail_rq;
1004
-
1005
- /* CTRL-22434: Irrespective of the requested SGE count on the SQ
1006
- * always create the QP with max send sges possible if the requested
1007
- * inline size is greater than 0.
1008
- */
1009
- max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1010
- req.sq_fwo_sq_sge = cpu_to_le16(
1011
- ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1012
- << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1013
- req.rq_fwo_rq_sge = cpu_to_le16(
1014
- ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1015
- << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
10161067 /* ORRQ and IRRQ */
10171068 if (psn_sz) {
10181069 xrrq = &qp->orrq;
....@@ -1021,12 +1072,19 @@
10211072 req_size = xrrq->max_elements *
10221073 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
10231074 req_size &= ~(PAGE_SIZE - 1);
1024
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1025
- &xrrq->max_elements,
1026
- BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
1027
- 0, req_size, HWQ_TYPE_CTX);
1075
+ sginfo.pgsize = req_size;
1076
+ sginfo.pgshft = PAGE_SHIFT;
1077
+
1078
+ hwq_attr.res = res;
1079
+ hwq_attr.sginfo = &sginfo;
1080
+ hwq_attr.depth = xrrq->max_elements;
1081
+ hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1082
+ hwq_attr.aux_stride = 0;
1083
+ hwq_attr.aux_depth = 0;
1084
+ hwq_attr.type = HWQ_TYPE_CTX;
1085
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
10281086 if (rc)
1029
- goto fail_buf_free;
1087
+ goto rq_swq;
10301088 pbl = &xrrq->pbl[PBL_LVL_0];
10311089 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
10321090
....@@ -1036,11 +1094,10 @@
10361094 req_size = xrrq->max_elements *
10371095 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
10381096 req_size &= ~(PAGE_SIZE - 1);
1039
-
1040
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1041
- &xrrq->max_elements,
1042
- BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1043
- 0, req_size, HWQ_TYPE_CTX);
1097
+ sginfo.pgsize = req_size;
1098
+ hwq_attr.depth = xrrq->max_elements;
1099
+ hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1100
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
10441101 if (rc)
10451102 goto fail_orrq;
10461103
....@@ -1058,25 +1115,34 @@
10581115 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
10591116 INIT_LIST_HEAD(&qp->sq_flush);
10601117 INIT_LIST_HEAD(&qp->rq_flush);
1061
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
1062
- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1118
+ qp->cctx = res->cctx;
1119
+ sq->dbinfo.hwq = &sq->hwq;
1120
+ sq->dbinfo.xid = qp->id;
1121
+ sq->dbinfo.db = qp->dpi->dbr;
1122
+ sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1123
+ if (rq->max_wqe) {
1124
+ rq->dbinfo.hwq = &rq->hwq;
1125
+ rq->dbinfo.xid = qp->id;
1126
+ rq->dbinfo.db = qp->dpi->dbr;
1127
+ rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1128
+ }
1129
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1130
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1131
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
10631132
10641133 return 0;
1065
-
10661134 fail:
1067
- if (qp->irrq.max_elements)
1068
- bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1135
+ bnxt_qplib_free_hwq(res, &qp->irrq);
10691136 fail_orrq:
1070
- if (qp->orrq.max_elements)
1071
- bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1072
-fail_buf_free:
1073
- bnxt_qplib_free_qp_hdr_buf(res, qp);
1074
-fail_rq:
1075
- bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1137
+ bnxt_qplib_free_hwq(res, &qp->orrq);
1138
+rq_swq:
10761139 kfree(rq->swq);
1077
-fail_sq:
1078
- bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1140
+fail_rq:
1141
+ bnxt_qplib_free_hwq(res, &rq->hwq);
1142
+sq_swq:
10791143 kfree(sq->swq);
1144
+fail_sq:
1145
+ bnxt_qplib_free_hwq(res, &sq->hwq);
10801146 exit:
10811147 return rc;
10821148 }
....@@ -1326,7 +1392,7 @@
13261392 }
13271393 }
13281394 if (i == res->sgid_tbl.max)
1329
- dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
1395
+ dev_warn(&res->pdev->dev, "SGID not found??\n");
13301396
13311397 qp->ah.hop_limit = sb->hop_limit;
13321398 qp->ah.traffic_class = sb->traffic_class;
....@@ -1362,12 +1428,11 @@
13621428 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
13631429 {
13641430 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1365
- struct cq_base *hw_cqe, **hw_cqe_ptr;
1431
+ struct cq_base *hw_cqe;
13661432 int i;
13671433
13681434 for (i = 0; i < cq_hwq->max_elements; i++) {
1369
- hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1370
- hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1435
+ hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
13711436 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
13721437 continue;
13731438 /*
....@@ -1408,10 +1473,12 @@
14081473 struct cmdq_destroy_qp req;
14091474 struct creq_destroy_qp_resp resp;
14101475 u16 cmd_flags = 0;
1476
+ u32 tbl_indx;
14111477 int rc;
14121478
1413
- rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1414
- rcfw->qp_tbl[qp->id].qp_handle = NULL;
1479
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1480
+ rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1481
+ rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
14151482
14161483 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
14171484
....@@ -1419,8 +1486,8 @@
14191486 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
14201487 (void *)&resp, NULL, 0);
14211488 if (rc) {
1422
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
1423
- rcfw->qp_tbl[qp->id].qp_handle = qp;
1489
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1490
+ rcfw->qp_tbl[tbl_indx].qp_handle = qp;
14241491 return rc;
14251492 }
14261493
....@@ -1431,16 +1498,16 @@
14311498 struct bnxt_qplib_qp *qp)
14321499 {
14331500 bnxt_qplib_free_qp_hdr_buf(res, qp);
1434
- bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1501
+ bnxt_qplib_free_hwq(res, &qp->sq.hwq);
14351502 kfree(qp->sq.swq);
14361503
1437
- bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1504
+ bnxt_qplib_free_hwq(res, &qp->rq.hwq);
14381505 kfree(qp->rq.swq);
14391506
14401507 if (qp->irrq.max_elements)
1441
- bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1508
+ bnxt_qplib_free_hwq(res, &qp->irrq);
14421509 if (qp->orrq.max_elements)
1443
- bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1510
+ bnxt_qplib_free_hwq(res, &qp->orrq);
14441511
14451512 }
14461513
....@@ -1453,7 +1520,7 @@
14531520 memset(sge, 0, sizeof(*sge));
14541521
14551522 if (qp->sq_hdr_buf) {
1456
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1523
+ sw_prod = sq->swq_start;
14571524 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
14581525 sw_prod * qp->sq_hdr_buf_size);
14591526 sge->lkey = 0xFFFFFFFF;
....@@ -1467,7 +1534,7 @@
14671534 {
14681535 struct bnxt_qplib_q *rq = &qp->rq;
14691536
1470
- return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1537
+ return rq->swq_start;
14711538 }
14721539
14731540 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
....@@ -1484,7 +1551,7 @@
14841551 memset(sge, 0, sizeof(*sge));
14851552
14861553 if (qp->rq_hdr_buf) {
1487
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1554
+ sw_prod = rq->swq_start;
14881555 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
14891556 sw_prod * qp->rq_hdr_buf_size);
14901557 sge->lkey = 0xFFFFFFFF;
....@@ -1494,144 +1561,262 @@
14941561 return NULL;
14951562 }
14961563
1564
+static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1565
+ struct bnxt_qplib_swqe *wqe,
1566
+ struct bnxt_qplib_swq *swq)
1567
+{
1568
+ struct sq_psn_search_ext *psns_ext;
1569
+ struct sq_psn_search *psns;
1570
+ u32 flg_npsn;
1571
+ u32 op_spsn;
1572
+
1573
+ if (!swq->psn_search)
1574
+ return;
1575
+ psns = swq->psn_search;
1576
+ psns_ext = swq->psn_ext;
1577
+
1578
+ op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1579
+ SQ_PSN_SEARCH_START_PSN_MASK);
1580
+ op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1581
+ SQ_PSN_SEARCH_OPCODE_MASK);
1582
+ flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1583
+ SQ_PSN_SEARCH_NEXT_PSN_MASK);
1584
+
1585
+ if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1586
+ psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1587
+ psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1588
+ psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1589
+ } else {
1590
+ psns->opcode_start_psn = cpu_to_le32(op_spsn);
1591
+ psns->flags_next_psn = cpu_to_le32(flg_npsn);
1592
+ }
1593
+}
1594
+
1595
+static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1596
+ struct bnxt_qplib_swqe *wqe,
1597
+ u16 *idx)
1598
+{
1599
+ struct bnxt_qplib_hwq *hwq;
1600
+ int len, t_len, offt;
1601
+ bool pull_dst = true;
1602
+ void *il_dst = NULL;
1603
+ void *il_src = NULL;
1604
+ int t_cplen, cplen;
1605
+ int indx;
1606
+
1607
+ hwq = &qp->sq.hwq;
1608
+ t_len = 0;
1609
+ for (indx = 0; indx < wqe->num_sge; indx++) {
1610
+ len = wqe->sg_list[indx].size;
1611
+ il_src = (void *)wqe->sg_list[indx].addr;
1612
+ t_len += len;
1613
+ if (t_len > qp->max_inline_data)
1614
+ return -ENOMEM;
1615
+ while (len) {
1616
+ if (pull_dst) {
1617
+ pull_dst = false;
1618
+ il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1619
+ (*idx)++;
1620
+ t_cplen = 0;
1621
+ offt = 0;
1622
+ }
1623
+ cplen = min_t(int, len, sizeof(struct sq_sge));
1624
+ cplen = min_t(int, cplen,
1625
+ (sizeof(struct sq_sge) - offt));
1626
+ memcpy(il_dst, il_src, cplen);
1627
+ t_cplen += cplen;
1628
+ il_src += cplen;
1629
+ il_dst += cplen;
1630
+ offt += cplen;
1631
+ len -= cplen;
1632
+ if (t_cplen == sizeof(struct sq_sge))
1633
+ pull_dst = true;
1634
+ }
1635
+ }
1636
+
1637
+ return t_len;
1638
+}
1639
+
1640
+static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1641
+ struct bnxt_qplib_sge *ssge,
1642
+ u16 nsge, u16 *idx)
1643
+{
1644
+ struct sq_sge *dsge;
1645
+ int indx, len = 0;
1646
+
1647
+ for (indx = 0; indx < nsge; indx++, (*idx)++) {
1648
+ dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1649
+ dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1650
+ dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1651
+ dsge->size = cpu_to_le32(ssge[indx].size);
1652
+ len += ssge[indx].size;
1653
+ }
1654
+
1655
+ return len;
1656
+}
1657
+
1658
+static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1659
+ struct bnxt_qplib_swqe *wqe,
1660
+ u16 *wqe_sz, u16 *qdf, u8 mode)
1661
+{
1662
+ u32 ilsize, bytes;
1663
+ u16 nsge;
1664
+ u16 slot;
1665
+
1666
+ nsge = wqe->num_sge;
1667
+ /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1668
+ bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1669
+ if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1670
+ ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1671
+ bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1672
+ bytes += sizeof(struct sq_send_hdr);
1673
+ }
1674
+
1675
+ *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1676
+ slot = bytes >> 4;
1677
+ *wqe_sz = slot;
1678
+ if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1679
+ slot = 8;
1680
+ return slot;
1681
+}
1682
+
1683
+static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1684
+ struct bnxt_qplib_swq *swq)
1685
+{
1686
+ struct bnxt_qplib_hwq *hwq;
1687
+ u32 pg_num, pg_indx;
1688
+ void *buff;
1689
+ u32 tail;
1690
+
1691
+ hwq = &sq->hwq;
1692
+ if (!hwq->pad_pg)
1693
+ return;
1694
+ tail = swq->slot_idx / sq->dbinfo.max_slot;
1695
+ pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1696
+ pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1697
+ buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1698
+ swq->psn_ext = buff;
1699
+ swq->psn_search = buff;
1700
+}
1701
+
14971702 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
14981703 {
14991704 struct bnxt_qplib_q *sq = &qp->sq;
1500
- struct dbr_dbr db_msg = { 0 };
1501
- u32 sw_prod;
15021705
1503
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1504
-
1505
- db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1506
- DBR_DBR_INDEX_MASK);
1507
- db_msg.type_xid =
1508
- cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1509
- DBR_DBR_TYPE_SQ);
1510
- /* Flush all the WQE writes to HW */
1511
- wmb();
1512
- __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1706
+ bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
15131707 }
15141708
15151709 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
15161710 struct bnxt_qplib_swqe *wqe)
15171711 {
1518
- struct bnxt_qplib_q *sq = &qp->sq;
1519
- struct bnxt_qplib_swq *swq;
1520
- struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1521
- struct sq_sge *hw_sge;
15221712 struct bnxt_qplib_nq_work *nq_work = NULL;
1523
- bool sch_handler = false;
1524
- u32 sw_prod;
1525
- u8 wqe_size16;
15261713 int i, rc = 0, data_len = 0, pkt_num = 0;
1714
+ struct bnxt_qplib_q *sq = &qp->sq;
1715
+ struct bnxt_qplib_hwq *hwq;
1716
+ struct bnxt_qplib_swq *swq;
1717
+ bool sch_handler = false;
1718
+ u16 wqe_sz, qdf = 0;
1719
+ void *base_hdr;
1720
+ void *ext_hdr;
15271721 __le32 temp32;
1722
+ u32 wqe_idx;
1723
+ u32 slots;
1724
+ u16 idx;
15281725
1529
- if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1530
- if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1531
- sch_handler = true;
1532
- dev_dbg(&sq->hwq.pdev->dev,
1533
- "%s Error QP. Scheduling for poll_cq\n",
1534
- __func__);
1535
- goto queue_err;
1536
- }
1726
+ hwq = &sq->hwq;
1727
+ if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1728
+ qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1729
+ dev_err(&hwq->pdev->dev,
1730
+ "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1731
+ qp->id, qp->state);
1732
+ rc = -EINVAL;
1733
+ goto done;
15371734 }
15381735
1539
- if (bnxt_qplib_queue_full(sq)) {
1540
- dev_err(&sq->hwq.pdev->dev,
1541
- "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1542
- sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1543
- sq->q_full_delta);
1736
+ slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1737
+ if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1738
+ dev_err(&hwq->pdev->dev,
1739
+ "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1740
+ hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
15441741 rc = -ENOMEM;
15451742 goto done;
15461743 }
1547
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1548
- swq = &sq->swq[sw_prod];
1744
+
1745
+ swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1746
+ bnxt_qplib_pull_psn_buff(sq, swq);
1747
+
1748
+ idx = 0;
1749
+ swq->slot_idx = hwq->prod;
1750
+ swq->slots = slots;
15491751 swq->wr_id = wqe->wr_id;
15501752 swq->type = wqe->type;
15511753 swq->flags = wqe->flags;
1754
+ swq->start_psn = sq->psn & BTH_PSN_MASK;
15521755 if (qp->sig_type)
15531756 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1554
- swq->start_psn = sq->psn & BTH_PSN_MASK;
15551757
1556
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1557
- hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1558
- [get_sqe_idx(sw_prod)];
1559
-
1560
- memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1561
-
1562
- if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1563
- /* Copy the inline data */
1564
- if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1565
- dev_warn(&sq->hwq.pdev->dev,
1566
- "QPLIB: Inline data length > 96 detected");
1567
- data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1568
- } else {
1569
- data_len = wqe->inline_len;
1570
- }
1571
- memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1572
- wqe_size16 = (data_len + 15) >> 4;
1573
- } else {
1574
- for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1575
- i < wqe->num_sge; i++, hw_sge++) {
1576
- hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1577
- hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1578
- hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1579
- data_len += wqe->sg_list[i].size;
1580
- }
1581
- /* Each SGE entry = 1 WQE size16 */
1582
- wqe_size16 = wqe->num_sge;
1583
- /* HW requires wqe size has room for atleast one SGE even if
1584
- * none was supplied by ULP
1585
- */
1586
- if (!wqe->num_sge)
1587
- wqe_size16++;
1758
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1759
+ sch_handler = true;
1760
+ dev_dbg(&hwq->pdev->dev,
1761
+ "%s Error QP. Scheduling for poll_cq\n", __func__);
1762
+ goto queue_err;
15881763 }
15891764
1765
+ base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1766
+ ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1767
+ memset(base_hdr, 0, sizeof(struct sq_sge));
1768
+ memset(ext_hdr, 0, sizeof(struct sq_sge));
1769
+
1770
+ if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1771
+ /* Copy the inline data */
1772
+ data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1773
+ else
1774
+ data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1775
+ &idx);
1776
+ if (data_len < 0)
1777
+ goto queue_err;
15901778 /* Specifics */
15911779 switch (wqe->type) {
15921780 case BNXT_QPLIB_SWQE_TYPE_SEND:
15931781 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1782
+ struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1783
+ struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
15941784 /* Assemble info for Raw Ethertype QPs */
1595
- struct sq_send_raweth_qp1 *sqe =
1596
- (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
15971785
15981786 sqe->wqe_type = wqe->type;
15991787 sqe->flags = wqe->flags;
1600
- sqe->wqe_size = wqe_size16 +
1601
- ((offsetof(typeof(*sqe), data) + 15) >> 4);
1788
+ sqe->wqe_size = wqe_sz;
16021789 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
16031790 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
16041791 sqe->length = cpu_to_le32(data_len);
1605
- sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1792
+ ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
16061793 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
16071794 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
16081795
16091796 break;
16101797 }
1611
- /* fall thru */
1798
+ fallthrough;
16121799 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
16131800 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
16141801 {
1615
- struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1802
+ struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1803
+ struct sq_send_hdr *sqe = base_hdr;
16161804
16171805 sqe->wqe_type = wqe->type;
16181806 sqe->flags = wqe->flags;
1619
- sqe->wqe_size = wqe_size16 +
1620
- ((offsetof(typeof(*sqe), data) + 15) >> 4);
1621
- sqe->inv_key_or_imm_data = cpu_to_le32(
1622
- wqe->send.inv_key);
1623
- if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1807
+ sqe->wqe_size = wqe_sz;
1808
+ sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1809
+ if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1810
+ qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
16241811 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1625
- sqe->dst_qp = cpu_to_le32(
1626
- wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
16271812 sqe->length = cpu_to_le32(data_len);
1628
- sqe->avid = cpu_to_le32(wqe->send.avid &
1629
- SQ_SEND_AVID_MASK);
16301813 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1814
+ ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1815
+ SQ_SEND_DST_QP_MASK);
1816
+ ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1817
+ SQ_SEND_AVID_MASK);
16311818 } else {
16321819 sqe->length = cpu_to_le32(data_len);
1633
- sqe->dst_qp = 0;
1634
- sqe->avid = 0;
16351820 if (qp->mtu)
16361821 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
16371822 if (!pkt_num)
....@@ -1644,16 +1829,16 @@
16441829 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
16451830 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
16461831 {
1647
- struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1832
+ struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1833
+ struct sq_rdma_hdr *sqe = base_hdr;
16481834
16491835 sqe->wqe_type = wqe->type;
16501836 sqe->flags = wqe->flags;
1651
- sqe->wqe_size = wqe_size16 +
1652
- ((offsetof(typeof(*sqe), data) + 15) >> 4);
1837
+ sqe->wqe_size = wqe_sz;
16531838 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
16541839 sqe->length = cpu_to_le32((u32)data_len);
1655
- sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1656
- sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1840
+ ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1841
+ ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
16571842 if (qp->mtu)
16581843 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
16591844 if (!pkt_num)
....@@ -1664,14 +1849,15 @@
16641849 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
16651850 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
16661851 {
1667
- struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1852
+ struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1853
+ struct sq_atomic_hdr *sqe = base_hdr;
16681854
16691855 sqe->wqe_type = wqe->type;
16701856 sqe->flags = wqe->flags;
16711857 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
16721858 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1673
- sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1674
- sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1859
+ ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1860
+ ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
16751861 if (qp->mtu)
16761862 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
16771863 if (!pkt_num)
....@@ -1681,8 +1867,7 @@
16811867 }
16821868 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
16831869 {
1684
- struct sq_localinvalidate *sqe =
1685
- (struct sq_localinvalidate *)hw_sq_send_hdr;
1870
+ struct sq_localinvalidate *sqe = base_hdr;
16861871
16871872 sqe->wqe_type = wqe->type;
16881873 sqe->flags = wqe->flags;
....@@ -1692,7 +1877,8 @@
16921877 }
16931878 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
16941879 {
1695
- struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1880
+ struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1881
+ struct sq_fr_pmr_hdr *sqe = base_hdr;
16961882
16971883 sqe->wqe_type = wqe->type;
16981884 sqe->flags = wqe->flags;
....@@ -1716,14 +1902,15 @@
17161902 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
17171903 wqe->frmr.page_list[i] |
17181904 PTU_PTE_VALID);
1719
- sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1720
- sqe->va = cpu_to_le64(wqe->frmr.va);
1905
+ ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1906
+ ext_sqe->va = cpu_to_le64(wqe->frmr.va);
17211907
17221908 break;
17231909 }
17241910 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
17251911 {
1726
- struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1912
+ struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1913
+ struct sq_bind_hdr *sqe = base_hdr;
17271914
17281915 sqe->wqe_type = wqe->type;
17291916 sqe->flags = wqe->flags;
....@@ -1732,9 +1919,8 @@
17321919 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
17331920 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
17341921 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1735
- sqe->va = cpu_to_le64(wqe->bind.va);
1736
- temp32 = cpu_to_le32(wqe->bind.length);
1737
- memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1922
+ ext_sqe->va = cpu_to_le64(wqe->bind.va);
1923
+ ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
17381924 break;
17391925 }
17401926 default:
....@@ -1743,31 +1929,11 @@
17431929 goto done;
17441930 }
17451931 swq->next_psn = sq->psn & BTH_PSN_MASK;
1746
- if (swq->psn_search) {
1747
- swq->psn_search->opcode_start_psn = cpu_to_le32(
1748
- ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1749
- SQ_PSN_SEARCH_START_PSN_MASK) |
1750
- ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1751
- SQ_PSN_SEARCH_OPCODE_MASK));
1752
- swq->psn_search->flags_next_psn = cpu_to_le32(
1753
- ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1754
- SQ_PSN_SEARCH_NEXT_PSN_MASK));
1755
- }
1932
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
17561933 queue_err:
1757
- if (sch_handler) {
1758
- /* Store the ULP info in the software structures */
1759
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1760
- swq = &sq->swq[sw_prod];
1761
- swq->wr_id = wqe->wr_id;
1762
- swq->type = wqe->type;
1763
- swq->flags = wqe->flags;
1764
- if (qp->sig_type)
1765
- swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1766
- swq->start_psn = sq->psn & BTH_PSN_MASK;
1767
- }
1768
- sq->hwq.prod++;
1934
+ bnxt_qplib_swq_mod_start(sq, wqe_idx);
1935
+ bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
17691936 qp->wqe_cnt++;
1770
-
17711937 done:
17721938 if (sch_handler) {
17731939 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
....@@ -1777,8 +1943,8 @@
17771943 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
17781944 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
17791945 } else {
1780
- dev_err(&sq->hwq.pdev->dev,
1781
- "QPLIB: FP: Failed to allocate SQ nq_work!");
1946
+ dev_err(&hwq->pdev->dev,
1947
+ "FP: Failed to allocate SQ nq_work!\n");
17821948 rc = -ENOMEM;
17831949 }
17841950 }
....@@ -1788,81 +1954,74 @@
17881954 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
17891955 {
17901956 struct bnxt_qplib_q *rq = &qp->rq;
1791
- struct dbr_dbr db_msg = { 0 };
1792
- u32 sw_prod;
17931957
1794
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1795
- db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1796
- DBR_DBR_INDEX_MASK);
1797
- db_msg.type_xid =
1798
- cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1799
- DBR_DBR_TYPE_RQ);
1800
-
1801
- /* Flush the writes to HW Rx WQE before the ringing Rx DB */
1802
- wmb();
1803
- __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1958
+ bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
18041959 }
18051960
18061961 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
18071962 struct bnxt_qplib_swqe *wqe)
18081963 {
1809
- struct bnxt_qplib_q *rq = &qp->rq;
1810
- struct rq_wqe *rqe, **rqe_ptr;
1811
- struct sq_sge *hw_sge;
18121964 struct bnxt_qplib_nq_work *nq_work = NULL;
1965
+ struct bnxt_qplib_q *rq = &qp->rq;
1966
+ struct rq_wqe_hdr *base_hdr;
1967
+ struct rq_ext_hdr *ext_hdr;
1968
+ struct bnxt_qplib_hwq *hwq;
1969
+ struct bnxt_qplib_swq *swq;
18131970 bool sch_handler = false;
1814
- u32 sw_prod;
1815
- int i, rc = 0;
1971
+ u16 wqe_sz, idx;
1972
+ u32 wqe_idx;
1973
+ int rc = 0;
18161974
1817
- if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1818
- sch_handler = true;
1819
- dev_dbg(&rq->hwq.pdev->dev,
1820
- "%s Error QP. Scheduling for poll_cq\n",
1821
- __func__);
1822
- goto queue_err;
1823
- }
1824
- if (bnxt_qplib_queue_full(rq)) {
1825
- dev_err(&rq->hwq.pdev->dev,
1826
- "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1975
+ hwq = &rq->hwq;
1976
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1977
+ dev_err(&hwq->pdev->dev,
1978
+ "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1979
+ qp->id, qp->state);
18271980 rc = -EINVAL;
18281981 goto done;
18291982 }
1830
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1831
- rq->swq[sw_prod].wr_id = wqe->wr_id;
18321983
1833
- rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1834
- rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1835
-
1836
- memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1837
-
1838
- /* Calculate wqe_size16 and data_len */
1839
- for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1840
- i < wqe->num_sge; i++, hw_sge++) {
1841
- hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1842
- hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1843
- hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1984
+ if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1985
+ dev_err(&hwq->pdev->dev,
1986
+ "FP: QP (0x%x) RQ is full!\n", qp->id);
1987
+ rc = -EINVAL;
1988
+ goto done;
18441989 }
1845
- rqe->wqe_type = wqe->type;
1846
- rqe->flags = wqe->flags;
1847
- rqe->wqe_size = wqe->num_sge +
1848
- ((offsetof(typeof(*rqe), data) + 15) >> 4);
1849
- /* HW requires wqe size has room for atleast one SGE even if none
1850
- * was supplied by ULP
1851
- */
1852
- if (!wqe->num_sge)
1853
- rqe->wqe_size++;
18541990
1855
- /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1856
- rqe->wr_id[0] = cpu_to_le32(sw_prod);
1991
+ swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1992
+ swq->wr_id = wqe->wr_id;
1993
+ swq->slots = rq->dbinfo.max_slot;
18571994
1995
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1996
+ sch_handler = true;
1997
+ dev_dbg(&hwq->pdev->dev,
1998
+ "%s: Error QP. Scheduling for poll_cq\n", __func__);
1999
+ goto queue_err;
2000
+ }
2001
+
2002
+ idx = 0;
2003
+ base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2004
+ ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2005
+ memset(base_hdr, 0, sizeof(struct sq_sge));
2006
+ memset(ext_hdr, 0, sizeof(struct sq_sge));
2007
+ wqe_sz = (sizeof(struct rq_wqe_hdr) +
2008
+ wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2009
+ bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2010
+ if (!wqe->num_sge) {
2011
+ struct sq_sge *sge;
2012
+
2013
+ sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2014
+ sge->size = 0;
2015
+ wqe_sz++;
2016
+ }
2017
+ base_hdr->wqe_type = wqe->type;
2018
+ base_hdr->flags = wqe->flags;
2019
+ base_hdr->wqe_size = wqe_sz;
2020
+ base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
18582021 queue_err:
1859
- if (sch_handler) {
1860
- /* Store the ULP info in the software structures */
1861
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1862
- rq->swq[sw_prod].wr_id = wqe->wr_id;
1863
- }
1864
-
1865
- rq->hwq.prod++;
2022
+ bnxt_qplib_swq_mod_start(rq, wqe_idx);
2023
+ bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2024
+done:
18662025 if (sch_handler) {
18672026 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
18682027 if (nq_work) {
....@@ -1871,90 +2030,53 @@
18712030 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
18722031 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
18732032 } else {
1874
- dev_err(&rq->hwq.pdev->dev,
1875
- "QPLIB: FP: Failed to allocate RQ nq_work!");
2033
+ dev_err(&hwq->pdev->dev,
2034
+ "FP: Failed to allocate RQ nq_work!\n");
18762035 rc = -ENOMEM;
18772036 }
18782037 }
1879
-done:
2038
+
18802039 return rc;
18812040 }
18822041
18832042 /* CQ */
1884
-
1885
-/* Spinlock must be held */
1886
-static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1887
-{
1888
- struct dbr_dbr db_msg = { 0 };
1889
-
1890
- db_msg.type_xid =
1891
- cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1892
- DBR_DBR_TYPE_CQ_ARMENA);
1893
- /* Flush memory writes before enabling the CQ */
1894
- wmb();
1895
- __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1896
-}
1897
-
1898
-static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1899
-{
1900
- struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1901
- struct dbr_dbr db_msg = { 0 };
1902
- u32 sw_cons;
1903
-
1904
- /* Ring DB */
1905
- sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1906
- db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1907
- DBR_DBR_INDEX_MASK);
1908
- db_msg.type_xid =
1909
- cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1910
- arm_type);
1911
- /* flush memory writes before arming the CQ */
1912
- wmb();
1913
- __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1914
-}
1915
-
19162043 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
19172044 {
19182045 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1919
- struct cmdq_create_cq req;
2046
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
19202047 struct creq_create_cq_resp resp;
19212048 struct bnxt_qplib_pbl *pbl;
2049
+ struct cmdq_create_cq req;
19222050 u16 cmd_flags = 0;
2051
+ u32 pg_sz_lvl;
19232052 int rc;
1924
-
1925
- cq->hwq.max_elements = cq->max_wqe;
1926
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1927
- cq->nmap, &cq->hwq.max_elements,
1928
- BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1929
- PAGE_SIZE, HWQ_TYPE_QUEUE);
1930
- if (rc)
1931
- goto exit;
1932
-
1933
- RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
19342053
19352054 if (!cq->dpi) {
19362055 dev_err(&rcfw->pdev->dev,
1937
- "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
2056
+ "FP: CREATE_CQ failed due to NULL DPI\n");
19382057 return -EINVAL;
19392058 }
2059
+
2060
+ hwq_attr.res = res;
2061
+ hwq_attr.depth = cq->max_wqe;
2062
+ hwq_attr.stride = sizeof(struct cq_base);
2063
+ hwq_attr.type = HWQ_TYPE_QUEUE;
2064
+ hwq_attr.sginfo = &cq->sg_info;
2065
+ rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2066
+ if (rc)
2067
+ return rc;
2068
+
2069
+ RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
2070
+
19402071 req.dpi = cpu_to_le32(cq->dpi->dpi);
19412072 req.cq_handle = cpu_to_le64(cq->cq_handle);
1942
-
19432073 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
19442074 pbl = &cq->hwq.pbl[PBL_LVL_0];
1945
- req.pg_size_lvl = cpu_to_le32(
1946
- ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1947
- CMDQ_CREATE_CQ_LVL_SFT) |
1948
- (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1949
- pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1950
- pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1951
- pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1952
- pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1953
- pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1954
- CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1955
-
2075
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2076
+ CMDQ_CREATE_CQ_PG_SIZE_SFT);
2077
+ pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2078
+ req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
19562079 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1957
-
19582080 req.cq_fco_cnq_id = cpu_to_le32(
19592081 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
19602082 CMDQ_CREATE_CQ_CNQ_ID_SFT);
....@@ -1965,7 +2087,6 @@
19652087 goto fail;
19662088
19672089 cq->id = le32_to_cpu(resp.xid);
1968
- cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
19692090 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
19702091 init_waitqueue_head(&cq->waitq);
19712092 INIT_LIST_HEAD(&cq->sqf_head);
....@@ -1973,12 +2094,17 @@
19732094 spin_lock_init(&cq->compl_lock);
19742095 spin_lock_init(&cq->flush_lock);
19752096
1976
- bnxt_qplib_arm_cq_enable(cq);
2097
+ cq->dbinfo.hwq = &cq->hwq;
2098
+ cq->dbinfo.xid = cq->id;
2099
+ cq->dbinfo.db = cq->dpi->dbr;
2100
+ cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2101
+
2102
+ bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2103
+
19772104 return 0;
19782105
19792106 fail:
1980
- bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1981
-exit:
2107
+ bnxt_qplib_free_hwq(res, &cq->hwq);
19822108 return rc;
19832109 }
19842110
....@@ -1987,6 +2113,7 @@
19872113 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
19882114 struct cmdq_destroy_cq req;
19892115 struct creq_destroy_cq_resp resp;
2116
+ u16 total_cnq_events;
19902117 u16 cmd_flags = 0;
19912118 int rc;
19922119
....@@ -1997,27 +2124,28 @@
19972124 (void *)&resp, NULL, 0);
19982125 if (rc)
19992126 return rc;
2000
- bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
2127
+ total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2128
+ __wait_for_all_nqes(cq, total_cnq_events);
2129
+ bnxt_qplib_free_hwq(res, &cq->hwq);
20012130 return 0;
20022131 }
20032132
20042133 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
20052134 struct bnxt_qplib_cqe **pcqe, int *budget)
20062135 {
2007
- u32 sw_prod, sw_cons;
20082136 struct bnxt_qplib_cqe *cqe;
2137
+ u32 start, last;
20092138 int rc = 0;
20102139
20112140 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2012
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2141
+ start = sq->swq_start;
20132142 cqe = *pcqe;
20142143 while (*budget) {
2015
- sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2016
- if (sw_cons == sw_prod) {
2144
+ last = sq->swq_last;
2145
+ if (start == last)
20172146 break;
2018
- }
20192147 /* Skip the FENCE WQE completions */
2020
- if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2148
+ if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
20212149 bnxt_qplib_cancel_phantom_processing(qp);
20222150 goto skip_compl;
20232151 }
....@@ -2025,16 +2153,17 @@
20252153 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
20262154 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
20272155 cqe->qp_handle = (u64)(unsigned long)qp;
2028
- cqe->wr_id = sq->swq[sw_cons].wr_id;
2156
+ cqe->wr_id = sq->swq[last].wr_id;
20292157 cqe->src_qp = qp->id;
2030
- cqe->type = sq->swq[sw_cons].type;
2158
+ cqe->type = sq->swq[last].type;
20312159 cqe++;
20322160 (*budget)--;
20332161 skip_compl:
2034
- sq->hwq.cons++;
2162
+ bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2163
+ sq->swq_last = sq->swq[last].next_idx;
20352164 }
20362165 *pcqe = cqe;
2037
- if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2166
+ if (!(*budget) && sq->swq_last != start)
20382167 /* Out of budget */
20392168 rc = -EAGAIN;
20402169
....@@ -2045,9 +2174,9 @@
20452174 struct bnxt_qplib_cqe **pcqe, int *budget)
20462175 {
20472176 struct bnxt_qplib_cqe *cqe;
2048
- u32 sw_prod, sw_cons;
2049
- int rc = 0;
2177
+ u32 start, last;
20502178 int opcode = 0;
2179
+ int rc = 0;
20512180
20522181 switch (qp->type) {
20532182 case CMDQ_CREATE_QP1_TYPE_GSI:
....@@ -2057,29 +2186,31 @@
20572186 opcode = CQ_BASE_CQE_TYPE_RES_RC;
20582187 break;
20592188 case CMDQ_CREATE_QP_TYPE_UD:
2189
+ case CMDQ_CREATE_QP_TYPE_GSI:
20602190 opcode = CQ_BASE_CQE_TYPE_RES_UD;
20612191 break;
20622192 }
20632193
20642194 /* Flush the rest of the RQ */
2065
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2195
+ start = rq->swq_start;
20662196 cqe = *pcqe;
20672197 while (*budget) {
2068
- sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2069
- if (sw_cons == sw_prod)
2198
+ last = rq->swq_last;
2199
+ if (last == start)
20702200 break;
20712201 memset(cqe, 0, sizeof(*cqe));
20722202 cqe->status =
20732203 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
20742204 cqe->opcode = opcode;
20752205 cqe->qp_handle = (unsigned long)qp;
2076
- cqe->wr_id = rq->swq[sw_cons].wr_id;
2206
+ cqe->wr_id = rq->swq[last].wr_id;
20772207 cqe++;
20782208 (*budget)--;
2079
- rq->hwq.cons++;
2209
+ bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2210
+ rq->swq_last = rq->swq[last].next_idx;
20802211 }
20812212 *pcqe = cqe;
2082
- if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2213
+ if (!*budget && rq->swq_last != start)
20832214 /* Out of budget */
20842215 rc = -EAGAIN;
20852216
....@@ -2102,20 +2233,20 @@
21022233 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
21032234 */
21042235 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2105
- u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2236
+ u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
21062237 {
2107
- struct bnxt_qplib_q *sq = &qp->sq;
2108
- struct bnxt_qplib_swq *swq;
21092238 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2110
- struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2239
+ struct bnxt_qplib_q *sq = &qp->sq;
21112240 struct cq_req *peek_req_hwcqe;
21122241 struct bnxt_qplib_qp *peek_qp;
21132242 struct bnxt_qplib_q *peek_sq;
2243
+ struct bnxt_qplib_swq *swq;
2244
+ struct cq_base *peek_hwcqe;
21142245 int i, rc = 0;
21152246
21162247 /* Normal mode */
21172248 /* Check for the psn_search marking before completing */
2118
- swq = &sq->swq[sw_sq_cons];
2249
+ swq = &sq->swq[swq_last];
21192250 if (swq->psn_search &&
21202251 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
21212252 /* Unmark */
....@@ -2124,13 +2255,12 @@
21242255 & ~0x80000000);
21252256 dev_dbg(&cq->hwq.pdev->dev,
21262257 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2127
- cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2258
+ cq_cons, qp->id, swq_last, cqe_sq_cons);
21282259 sq->condition = true;
21292260 sq->send_phantom = true;
21302261
21312262 /* TODO: Only ARM if the previous SQE is ARMALL */
2132
- bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
2133
-
2263
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
21342264 rc = -EAGAIN;
21352265 goto out;
21362266 }
....@@ -2141,9 +2271,8 @@
21412271 i = cq->hwq.max_elements;
21422272 while (i--) {
21432273 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2144
- peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2145
- peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2146
- [CQE_IDX(peek_sw_cq_cons)];
2274
+ peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2275
+ peek_sw_cq_cons, NULL);
21472276 /* If the next hwcqe is VALID */
21482277 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
21492278 cq->hwq.max_elements)) {
....@@ -2163,9 +2292,10 @@
21632292 le64_to_cpu
21642293 (peek_req_hwcqe->qp_handle));
21652294 peek_sq = &peek_qp->sq;
2166
- peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2167
- peek_req_hwcqe->sq_cons_idx) - 1
2168
- , &sq->hwq);
2295
+ peek_sq_cons_idx =
2296
+ ((le16_to_cpu(
2297
+ peek_req_hwcqe->sq_cons_idx)
2298
+ - 1) % sq->max_wqe);
21692299 /* If the hwcqe's sq's wr_id matches */
21702300 if (peek_sq == sq &&
21712301 sq->swq[peek_sq_cons_idx].wr_id ==
....@@ -2175,7 +2305,7 @@
21752305 * comes back
21762306 */
21772307 dev_dbg(&cq->hwq.pdev->dev,
2178
- "FP:Got Phantom CQE");
2308
+ "FP: Got Phantom CQE\n");
21792309 sq->condition = false;
21802310 sq->single = true;
21812311 rc = 0;
....@@ -2192,8 +2322,8 @@
21922322 peek_raw_cq_cons++;
21932323 }
21942324 dev_err(&cq->hwq.pdev->dev,
2195
- "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
2196
- cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2325
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2326
+ cq_cons, qp->id, swq_last, cqe_sq_cons);
21972327 rc = -EINVAL;
21982328 }
21992329 out:
....@@ -2205,35 +2335,26 @@
22052335 struct bnxt_qplib_cqe **pcqe, int *budget,
22062336 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
22072337 {
2338
+ struct bnxt_qplib_swq *swq;
2339
+ struct bnxt_qplib_cqe *cqe;
22082340 struct bnxt_qplib_qp *qp;
22092341 struct bnxt_qplib_q *sq;
2210
- struct bnxt_qplib_cqe *cqe;
2211
- u32 sw_sq_cons, cqe_sq_cons;
2212
- struct bnxt_qplib_swq *swq;
2342
+ u32 cqe_sq_cons;
22132343 int rc = 0;
22142344
22152345 qp = (struct bnxt_qplib_qp *)((unsigned long)
22162346 le64_to_cpu(hwcqe->qp_handle));
22172347 if (!qp) {
22182348 dev_err(&cq->hwq.pdev->dev,
2219
- "QPLIB: FP: Process Req qp is NULL");
2349
+ "FP: Process Req qp is NULL\n");
22202350 return -EINVAL;
22212351 }
22222352 sq = &qp->sq;
22232353
2224
- cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2225
- if (cqe_sq_cons > sq->hwq.max_elements) {
2226
- dev_err(&cq->hwq.pdev->dev,
2227
- "QPLIB: FP: CQ Process req reported ");
2228
- dev_err(&cq->hwq.pdev->dev,
2229
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2230
- cqe_sq_cons, sq->hwq.max_elements);
2231
- return -EINVAL;
2232
- }
2233
-
2354
+ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
22342355 if (qp->sq.flushed) {
22352356 dev_dbg(&cq->hwq.pdev->dev,
2236
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2357
+ "%s: QP in Flush QP = %p\n", __func__, qp);
22372358 goto done;
22382359 }
22392360 /* Require to walk the sq's swq to fabricate CQEs for all previously
....@@ -2242,12 +2363,11 @@
22422363 */
22432364 cqe = *pcqe;
22442365 while (*budget) {
2245
- sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2246
- if (sw_sq_cons == cqe_sq_cons)
2366
+ if (sq->swq_last == cqe_sq_cons)
22472367 /* Done */
22482368 break;
22492369
2250
- swq = &sq->swq[sw_sq_cons];
2370
+ swq = &sq->swq[sq->swq_last];
22512371 memset(cqe, 0, sizeof(*cqe));
22522372 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
22532373 cqe->qp_handle = (u64)(unsigned long)qp;
....@@ -2261,14 +2381,12 @@
22612381 * of the request being signaled or not, it must complete with
22622382 * the hwcqe error status
22632383 */
2264
- if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2384
+ if (swq->next_idx == cqe_sq_cons &&
22652385 hwcqe->status != CQ_REQ_STATUS_OK) {
22662386 cqe->status = hwcqe->status;
22672387 dev_err(&cq->hwq.pdev->dev,
2268
- "QPLIB: FP: CQ Processed Req ");
2269
- dev_err(&cq->hwq.pdev->dev,
2270
- "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
2271
- sw_sq_cons, cqe->wr_id, cqe->status);
2388
+ "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2389
+ sq->swq_last, cqe->wr_id, cqe->status);
22722390 cqe++;
22732391 (*budget)--;
22742392 bnxt_qplib_mark_qp_error(qp);
....@@ -2276,7 +2394,7 @@
22762394 bnxt_qplib_add_flush_qp(qp);
22772395 } else {
22782396 /* Before we complete, do WA 9060 */
2279
- if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2397
+ if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
22802398 cqe_sq_cons)) {
22812399 *lib_qp = qp;
22822400 goto out;
....@@ -2288,13 +2406,14 @@
22882406 }
22892407 }
22902408 skip:
2291
- sq->hwq.cons++;
2409
+ bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2410
+ sq->swq_last = swq->next_idx;
22922411 if (sq->single)
22932412 break;
22942413 }
22952414 out:
22962415 *pcqe = cqe;
2297
- if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2416
+ if (sq->swq_last != cqe_sq_cons) {
22982417 /* Out of budget */
22992418 rc = -EAGAIN;
23002419 goto done;
....@@ -2323,22 +2442,22 @@
23232442 struct bnxt_qplib_cqe **pcqe,
23242443 int *budget)
23252444 {
2326
- struct bnxt_qplib_qp *qp;
2327
- struct bnxt_qplib_q *rq;
23282445 struct bnxt_qplib_srq *srq;
23292446 struct bnxt_qplib_cqe *cqe;
2447
+ struct bnxt_qplib_qp *qp;
2448
+ struct bnxt_qplib_q *rq;
23302449 u32 wr_id_idx;
23312450 int rc = 0;
23322451
23332452 qp = (struct bnxt_qplib_qp *)((unsigned long)
23342453 le64_to_cpu(hwcqe->qp_handle));
23352454 if (!qp) {
2336
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
2455
+ dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
23372456 return -EINVAL;
23382457 }
23392458 if (qp->rq.flushed) {
23402459 dev_dbg(&cq->hwq.pdev->dev,
2341
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2460
+ "%s: QP in Flush QP = %p\n", __func__, qp);
23422461 goto done;
23432462 }
23442463
....@@ -2359,9 +2478,7 @@
23592478 return -EINVAL;
23602479 if (wr_id_idx >= srq->hwq.max_elements) {
23612480 dev_err(&cq->hwq.pdev->dev,
2362
- "QPLIB: FP: CQ Process RC ");
2363
- dev_err(&cq->hwq.pdev->dev,
2364
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2481
+ "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
23652482 wr_id_idx, srq->hwq.max_elements);
23662483 return -EINVAL;
23672484 }
....@@ -2371,19 +2488,23 @@
23712488 (*budget)--;
23722489 *pcqe = cqe;
23732490 } else {
2491
+ struct bnxt_qplib_swq *swq;
2492
+
23742493 rq = &qp->rq;
2375
- if (wr_id_idx >= rq->hwq.max_elements) {
2494
+ if (wr_id_idx > (rq->max_wqe - 1)) {
23762495 dev_err(&cq->hwq.pdev->dev,
2377
- "QPLIB: FP: CQ Process RC ");
2378
- dev_err(&cq->hwq.pdev->dev,
2379
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2380
- wr_id_idx, rq->hwq.max_elements);
2496
+ "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2497
+ wr_id_idx, rq->max_wqe);
23812498 return -EINVAL;
23822499 }
2383
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2500
+ if (wr_id_idx != rq->swq_last)
2501
+ return -EINVAL;
2502
+ swq = &rq->swq[rq->swq_last];
2503
+ cqe->wr_id = swq->wr_id;
23842504 cqe++;
23852505 (*budget)--;
2386
- rq->hwq.cons++;
2506
+ bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2507
+ rq->swq_last = swq->next_idx;
23872508 *pcqe = cqe;
23882509
23892510 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
....@@ -2402,32 +2523,34 @@
24022523 struct bnxt_qplib_cqe **pcqe,
24032524 int *budget)
24042525 {
2405
- struct bnxt_qplib_qp *qp;
2406
- struct bnxt_qplib_q *rq;
24072526 struct bnxt_qplib_srq *srq;
24082527 struct bnxt_qplib_cqe *cqe;
2528
+ struct bnxt_qplib_qp *qp;
2529
+ struct bnxt_qplib_q *rq;
24092530 u32 wr_id_idx;
24102531 int rc = 0;
24112532
24122533 qp = (struct bnxt_qplib_qp *)((unsigned long)
24132534 le64_to_cpu(hwcqe->qp_handle));
24142535 if (!qp) {
2415
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
2536
+ dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
24162537 return -EINVAL;
24172538 }
24182539 if (qp->rq.flushed) {
24192540 dev_dbg(&cq->hwq.pdev->dev,
2420
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2541
+ "%s: QP in Flush QP = %p\n", __func__, qp);
24212542 goto done;
24222543 }
24232544 cqe = *pcqe;
24242545 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2425
- cqe->length = le32_to_cpu(hwcqe->length);
2546
+ cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2547
+ cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
24262548 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
24272549 cqe->flags = le16_to_cpu(hwcqe->flags);
24282550 cqe->status = hwcqe->status;
24292551 cqe->qp_handle = (u64)(unsigned long)qp;
2430
- memcpy(cqe->smac, hwcqe->src_mac, 6);
2552
+ /*FIXME: Endianness fix needed for smace */
2553
+ memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
24312554 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
24322555 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
24332556 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
....@@ -2442,9 +2565,7 @@
24422565
24432566 if (wr_id_idx >= srq->hwq.max_elements) {
24442567 dev_err(&cq->hwq.pdev->dev,
2445
- "QPLIB: FP: CQ Process UD ");
2446
- dev_err(&cq->hwq.pdev->dev,
2447
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2568
+ "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
24482569 wr_id_idx, srq->hwq.max_elements);
24492570 return -EINVAL;
24502571 }
....@@ -2454,20 +2575,24 @@
24542575 (*budget)--;
24552576 *pcqe = cqe;
24562577 } else {
2578
+ struct bnxt_qplib_swq *swq;
2579
+
24572580 rq = &qp->rq;
2458
- if (wr_id_idx >= rq->hwq.max_elements) {
2581
+ if (wr_id_idx > (rq->max_wqe - 1)) {
24592582 dev_err(&cq->hwq.pdev->dev,
2460
- "QPLIB: FP: CQ Process UD ");
2461
- dev_err(&cq->hwq.pdev->dev,
2462
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2463
- wr_id_idx, rq->hwq.max_elements);
2583
+ "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2584
+ wr_id_idx, rq->max_wqe);
24642585 return -EINVAL;
24652586 }
24662587
2467
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2588
+ if (rq->swq_last != wr_id_idx)
2589
+ return -EINVAL;
2590
+ swq = &rq->swq[rq->swq_last];
2591
+ cqe->wr_id = swq->wr_id;
24682592 cqe++;
24692593 (*budget)--;
2470
- rq->hwq.cons++;
2594
+ bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2595
+ rq->swq_last = swq->next_idx;
24712596 *pcqe = cqe;
24722597
24732598 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
....@@ -2482,15 +2607,13 @@
24822607
24832608 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
24842609 {
2485
- struct cq_base *hw_cqe, **hw_cqe_ptr;
2610
+ struct cq_base *hw_cqe;
24862611 u32 sw_cons, raw_cons;
24872612 bool rc = true;
24882613
24892614 raw_cons = cq->hwq.cons;
24902615 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2491
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2492
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2493
-
2616
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
24942617 /* Check for Valid bit. If the CQE is valid, return false */
24952618 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
24962619 return rc;
....@@ -2511,13 +2634,12 @@
25112634 qp = (struct bnxt_qplib_qp *)((unsigned long)
25122635 le64_to_cpu(hwcqe->qp_handle));
25132636 if (!qp) {
2514
- dev_err(&cq->hwq.pdev->dev,
2515
- "QPLIB: process_cq Raw/QP1 qp is NULL");
2637
+ dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
25162638 return -EINVAL;
25172639 }
25182640 if (qp->rq.flushed) {
25192641 dev_dbg(&cq->hwq.pdev->dev,
2520
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2642
+ "%s: QP in Flush QP = %p\n", __func__, qp);
25212643 goto done;
25222644 }
25232645 cqe = *pcqe;
....@@ -2546,14 +2668,12 @@
25462668 srq = qp->srq;
25472669 if (!srq) {
25482670 dev_err(&cq->hwq.pdev->dev,
2549
- "QPLIB: FP: SRQ used but not defined??");
2671
+ "FP: SRQ used but not defined??\n");
25502672 return -EINVAL;
25512673 }
25522674 if (wr_id_idx >= srq->hwq.max_elements) {
25532675 dev_err(&cq->hwq.pdev->dev,
2554
- "QPLIB: FP: CQ Process Raw/QP1 ");
2555
- dev_err(&cq->hwq.pdev->dev,
2556
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2676
+ "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
25572677 wr_id_idx, srq->hwq.max_elements);
25582678 return -EINVAL;
25592679 }
....@@ -2563,19 +2683,23 @@
25632683 (*budget)--;
25642684 *pcqe = cqe;
25652685 } else {
2686
+ struct bnxt_qplib_swq *swq;
2687
+
25662688 rq = &qp->rq;
2567
- if (wr_id_idx >= rq->hwq.max_elements) {
2689
+ if (wr_id_idx > (rq->max_wqe - 1)) {
25682690 dev_err(&cq->hwq.pdev->dev,
2569
- "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
2570
- dev_err(&cq->hwq.pdev->dev,
2571
- "QPLIB: ix 0x%x exceeded RQ max 0x%x",
2572
- wr_id_idx, rq->hwq.max_elements);
2691
+ "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2692
+ wr_id_idx, rq->max_wqe);
25732693 return -EINVAL;
25742694 }
2575
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2695
+ if (rq->swq_last != wr_id_idx)
2696
+ return -EINVAL;
2697
+ swq = &rq->swq[rq->swq_last];
2698
+ cqe->wr_id = swq->wr_id;
25762699 cqe++;
25772700 (*budget)--;
2578
- rq->hwq.cons++;
2701
+ bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2702
+ rq->swq_last = swq->next_idx;
25792703 *pcqe = cqe;
25802704
25812705 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
....@@ -2597,22 +2721,19 @@
25972721 struct bnxt_qplib_qp *qp;
25982722 struct bnxt_qplib_q *sq, *rq;
25992723 struct bnxt_qplib_cqe *cqe;
2600
- u32 sw_cons = 0, cqe_cons;
2724
+ u32 swq_last = 0, cqe_cons;
26012725 int rc = 0;
26022726
26032727 /* Check the Status */
26042728 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
26052729 dev_warn(&cq->hwq.pdev->dev,
2606
- "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
2730
+ "FP: CQ Process Terminal Error status = 0x%x\n",
26072731 hwcqe->status);
26082732
26092733 qp = (struct bnxt_qplib_qp *)((unsigned long)
26102734 le64_to_cpu(hwcqe->qp_handle));
2611
- if (!qp) {
2612
- dev_err(&cq->hwq.pdev->dev,
2613
- "QPLIB: FP: CQ Process terminal qp is NULL");
2735
+ if (!qp)
26142736 return -EINVAL;
2615
- }
26162737
26172738 /* Must block new posting of SQ and RQ */
26182739 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
....@@ -2623,19 +2744,11 @@
26232744 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
26242745 if (cqe_cons == 0xFFFF)
26252746 goto do_rq;
2626
-
2627
- if (cqe_cons > sq->hwq.max_elements) {
2628
- dev_err(&cq->hwq.pdev->dev,
2629
- "QPLIB: FP: CQ Process terminal reported ");
2630
- dev_err(&cq->hwq.pdev->dev,
2631
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2632
- cqe_cons, sq->hwq.max_elements);
2633
- goto do_rq;
2634
- }
2747
+ cqe_cons %= sq->max_wqe;
26352748
26362749 if (qp->sq.flushed) {
26372750 dev_dbg(&cq->hwq.pdev->dev,
2638
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2751
+ "%s: QP in Flush QP = %p\n", __func__, qp);
26392752 goto sq_done;
26402753 }
26412754
....@@ -2645,24 +2758,25 @@
26452758 */
26462759 cqe = *pcqe;
26472760 while (*budget) {
2648
- sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2649
- if (sw_cons == cqe_cons)
2761
+ swq_last = sq->swq_last;
2762
+ if (swq_last == cqe_cons)
26502763 break;
2651
- if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2764
+ if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
26522765 memset(cqe, 0, sizeof(*cqe));
26532766 cqe->status = CQ_REQ_STATUS_OK;
26542767 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
26552768 cqe->qp_handle = (u64)(unsigned long)qp;
26562769 cqe->src_qp = qp->id;
2657
- cqe->wr_id = sq->swq[sw_cons].wr_id;
2658
- cqe->type = sq->swq[sw_cons].type;
2770
+ cqe->wr_id = sq->swq[swq_last].wr_id;
2771
+ cqe->type = sq->swq[swq_last].type;
26592772 cqe++;
26602773 (*budget)--;
26612774 }
2662
- sq->hwq.cons++;
2775
+ bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2776
+ sq->swq_last = sq->swq[swq_last].next_idx;
26632777 }
26642778 *pcqe = cqe;
2665
- if (!(*budget) && sw_cons != cqe_cons) {
2779
+ if (!(*budget) && swq_last != cqe_cons) {
26662780 /* Out of budget */
26672781 rc = -EAGAIN;
26682782 goto sq_done;
....@@ -2674,18 +2788,17 @@
26742788 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
26752789 if (cqe_cons == 0xFFFF) {
26762790 goto done;
2677
- } else if (cqe_cons > rq->hwq.max_elements) {
2791
+ } else if (cqe_cons > rq->max_wqe - 1) {
26782792 dev_err(&cq->hwq.pdev->dev,
2679
- "QPLIB: FP: CQ Processed terminal ");
2680
- dev_err(&cq->hwq.pdev->dev,
2681
- "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2682
- cqe_cons, rq->hwq.max_elements);
2793
+ "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2794
+ cqe_cons, rq->max_wqe);
2795
+ rc = -EINVAL;
26832796 goto done;
26842797 }
26852798
26862799 if (qp->rq.flushed) {
26872800 dev_dbg(&cq->hwq.pdev->dev,
2688
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2801
+ "%s: QP in Flush QP = %p\n", __func__, qp);
26892802 rc = 0;
26902803 goto done;
26912804 }
....@@ -2707,7 +2820,7 @@
27072820 /* Check the Status */
27082821 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
27092822 dev_err(&cq->hwq.pdev->dev,
2710
- "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2823
+ "FP: CQ Process Cutoff Error status = 0x%x\n",
27112824 hwcqe->status);
27122825 return -EINVAL;
27132826 }
....@@ -2727,16 +2840,12 @@
27272840
27282841 spin_lock_irqsave(&cq->flush_lock, flags);
27292842 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2730
- dev_dbg(&cq->hwq.pdev->dev,
2731
- "QPLIB: FP: Flushing SQ QP= %p",
2732
- qp);
2843
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
27332844 __flush_sq(&qp->sq, qp, &cqe, &budget);
27342845 }
27352846
27362847 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2737
- dev_dbg(&cq->hwq.pdev->dev,
2738
- "QPLIB: FP: Flushing RQ QP= %p",
2739
- qp);
2848
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
27402849 __flush_rq(&qp->rq, qp, &cqe, &budget);
27412850 }
27422851 spin_unlock_irqrestore(&cq->flush_lock, flags);
....@@ -2747,7 +2856,7 @@
27472856 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
27482857 int num_cqes, struct bnxt_qplib_qp **lib_qp)
27492858 {
2750
- struct cq_base *hw_cqe, **hw_cqe_ptr;
2859
+ struct cq_base *hw_cqe;
27512860 u32 sw_cons, raw_cons;
27522861 int budget, rc = 0;
27532862
....@@ -2756,8 +2865,7 @@
27562865
27572866 while (budget) {
27582867 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2759
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2760
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2868
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
27612869
27622870 /* Check for Valid bit */
27632871 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
....@@ -2804,7 +2912,7 @@
28042912 goto exit;
28052913 default:
28062914 dev_err(&cq->hwq.pdev->dev,
2807
- "QPLIB: process_cq unknown type 0x%lx",
2915
+ "process_cq unknown type 0x%lx\n",
28082916 hw_cqe->cqe_type_toggle &
28092917 CQ_BASE_CQE_TYPE_MASK);
28102918 rc = -EINVAL;
....@@ -2817,13 +2925,13 @@
28172925 * next one
28182926 */
28192927 dev_err(&cq->hwq.pdev->dev,
2820
- "QPLIB: process_cqe error rc = 0x%x", rc);
2928
+ "process_cqe error rc = 0x%x\n", rc);
28212929 }
28222930 raw_cons++;
28232931 }
28242932 if (cq->hwq.cons != raw_cons) {
28252933 cq->hwq.cons = raw_cons;
2826
- bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2934
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
28272935 }
28282936 exit:
28292937 return num_cqes - budget;
....@@ -2832,7 +2940,7 @@
28322940 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
28332941 {
28342942 if (arm_type)
2835
- bnxt_qplib_arm_cq(cq, arm_type);
2943
+ bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
28362944 /* Using cq->arm_state variable to track whether to issue cq handler */
28372945 atomic_set(&cq->arm_state, 1);
28382946 }