hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/infiniband/hw/bnxt_re/qplib_res.c
....@@ -36,12 +36,18 @@
3636 * Description: QPLib resource manager
3737 */
3838
39
+#define dev_fmt(fmt) "QPLIB: " fmt
40
+
3941 #include <linux/spinlock.h>
4042 #include <linux/pci.h>
4143 #include <linux/interrupt.h>
4244 #include <linux/inetdevice.h>
4345 #include <linux/dma-mapping.h>
4446 #include <linux/if_vlan.h>
47
+#include <linux/vmalloc.h>
48
+#include <rdma/ib_verbs.h>
49
+#include <rdma/ib_umem.h>
50
+
4551 #include "roce_hsi.h"
4652 #include "qplib_res.h"
4753 #include "qplib_sp.h"
....@@ -50,12 +56,14 @@
5056 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
5157 struct bnxt_qplib_stats *stats);
5258 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59
+ struct bnxt_qplib_chip_ctx *cctx,
5360 struct bnxt_qplib_stats *stats);
5461
5562 /* PBL */
56
-static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
63
+static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
5764 bool is_umem)
5865 {
66
+ struct pci_dev *pdev = res->pdev;
5967 int i;
6068
6169 if (!is_umem) {
....@@ -68,72 +76,85 @@
6876 pbl->pg_map_arr[i]);
6977 else
7078 dev_warn(&pdev->dev,
71
- "QPLIB: PBL free pg_arr[%d] empty?!",
72
- i);
79
+ "PBL free pg_arr[%d] empty?!\n", i);
7380 pbl->pg_arr[i] = NULL;
7481 }
7582 }
76
- kfree(pbl->pg_arr);
83
+ vfree(pbl->pg_arr);
7784 pbl->pg_arr = NULL;
78
- kfree(pbl->pg_map_arr);
85
+ vfree(pbl->pg_map_arr);
7986 pbl->pg_map_arr = NULL;
8087 pbl->pg_count = 0;
8188 pbl->pg_size = 0;
8289 }
8390
84
-static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
85
- struct scatterlist *sghead, u32 pages, u32 pg_size)
91
+static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92
+ struct bnxt_qplib_sg_info *sginfo)
8693 {
87
- struct scatterlist *sg;
94
+ struct ib_block_iter biter;
95
+ int i = 0;
96
+
97
+ rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98
+ pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99
+ pbl->pg_arr[i] = NULL;
100
+ pbl->pg_count++;
101
+ i++;
102
+ }
103
+}
104
+
105
+static int __alloc_pbl(struct bnxt_qplib_res *res,
106
+ struct bnxt_qplib_pbl *pbl,
107
+ struct bnxt_qplib_sg_info *sginfo)
108
+{
109
+ struct pci_dev *pdev = res->pdev;
88110 bool is_umem = false;
111
+ u32 pages;
89112 int i;
90113
114
+ if (sginfo->nopte)
115
+ return 0;
116
+ if (sginfo->umem)
117
+ pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118
+ else
119
+ pages = sginfo->npages;
91120 /* page ptr arrays */
92
- pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
121
+ pbl->pg_arr = vmalloc(pages * sizeof(void *));
93122 if (!pbl->pg_arr)
94123 return -ENOMEM;
95124
96
- pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
125
+ pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
97126 if (!pbl->pg_map_arr) {
98
- kfree(pbl->pg_arr);
127
+ vfree(pbl->pg_arr);
99128 pbl->pg_arr = NULL;
100129 return -ENOMEM;
101130 }
102131 pbl->pg_count = 0;
103
- pbl->pg_size = pg_size;
132
+ pbl->pg_size = sginfo->pgsize;
104133
105
- if (!sghead) {
134
+ if (!sginfo->umem) {
106135 for (i = 0; i < pages; i++) {
107
- pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
108
- pbl->pg_size,
109
- &pbl->pg_map_arr[i],
110
- GFP_KERNEL);
136
+ pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
137
+ pbl->pg_size,
138
+ &pbl->pg_map_arr[i],
139
+ GFP_KERNEL);
111140 if (!pbl->pg_arr[i])
112141 goto fail;
113142 pbl->pg_count++;
114143 }
115144 } else {
116
- i = 0;
117145 is_umem = true;
118
- for_each_sg(sghead, sg, pages, i) {
119
- pbl->pg_map_arr[i] = sg_dma_address(sg);
120
- pbl->pg_arr[i] = sg_virt(sg);
121
- if (!pbl->pg_arr[i])
122
- goto fail;
123
-
124
- pbl->pg_count++;
125
- }
146
+ bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
126147 }
127148
128149 return 0;
129
-
130150 fail:
131
- __free_pbl(pdev, pbl, is_umem);
151
+ __free_pbl(res, pbl, is_umem);
132152 return -ENOMEM;
133153 }
134154
135155 /* HWQ */
136
-void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
156
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157
+ struct bnxt_qplib_hwq *hwq)
137158 {
138159 int i;
139160
....@@ -144,9 +165,9 @@
144165
145166 for (i = 0; i < hwq->level + 1; i++) {
146167 if (i == hwq->level)
147
- __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
168
+ __free_pbl(res, &hwq->pbl[i], hwq->is_user);
148169 else
149
- __free_pbl(pdev, &hwq->pbl[i], false);
170
+ __free_pbl(res, &hwq->pbl[i], false);
150171 }
151172
152173 hwq->level = PBL_LVL_MAX;
....@@ -158,71 +179,114 @@
158179 }
159180
160181 /* All HWQs are power of 2 in size */
161
-int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
162
- struct scatterlist *sghead, int nmap,
163
- u32 *elements, u32 element_size, u32 aux,
164
- u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
165
-{
166
- u32 pages, slots, size, aux_pages = 0, aux_size = 0;
167
- dma_addr_t *src_phys_ptr, **dst_virt_ptr;
168
- int i, rc;
169182
183
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184
+ struct bnxt_qplib_hwq_attr *hwq_attr)
185
+{
186
+ u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187
+ struct bnxt_qplib_sg_info sginfo = {};
188
+ u32 depth, stride, npbl, npde;
189
+ dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190
+ struct bnxt_qplib_res *res;
191
+ struct pci_dev *pdev;
192
+ int i, rc, lvl;
193
+
194
+ res = hwq_attr->res;
195
+ pdev = res->pdev;
196
+ pg_size = hwq_attr->sginfo->pgsize;
170197 hwq->level = PBL_LVL_MAX;
171198
172
- slots = roundup_pow_of_two(*elements);
173
- if (aux) {
174
- aux_size = roundup_pow_of_two(aux);
175
- aux_pages = (slots * aux_size) / pg_size;
176
- if ((slots * aux_size) % pg_size)
199
+ depth = roundup_pow_of_two(hwq_attr->depth);
200
+ stride = roundup_pow_of_two(hwq_attr->stride);
201
+ if (hwq_attr->aux_depth) {
202
+ aux_slots = hwq_attr->aux_depth;
203
+ aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204
+ aux_pages = (aux_slots * aux_size) / pg_size;
205
+ if ((aux_slots * aux_size) % pg_size)
177206 aux_pages++;
178207 }
179
- size = roundup_pow_of_two(element_size);
180208
181
- if (!sghead) {
209
+ if (!hwq_attr->sginfo->umem) {
182210 hwq->is_user = false;
183
- pages = (slots * size) / pg_size + aux_pages;
184
- if ((slots * size) % pg_size)
185
- pages++;
186
- if (!pages)
211
+ npages = (depth * stride) / pg_size + aux_pages;
212
+ if ((depth * stride) % pg_size)
213
+ npages++;
214
+ if (!npages)
187215 return -EINVAL;
216
+ hwq_attr->sginfo->npages = npages;
188217 } else {
218
+ unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
219
+ hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
220
+
189221 hwq->is_user = true;
190
- pages = nmap;
222
+ npages = sginfo_num_pages;
223
+ npages = (npages * PAGE_SIZE) /
224
+ BIT_ULL(hwq_attr->sginfo->pgshft);
225
+ if ((sginfo_num_pages * PAGE_SIZE) %
226
+ BIT_ULL(hwq_attr->sginfo->pgshft))
227
+ if (!npages)
228
+ npages++;
191229 }
192230
193
- /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
194
- if (sghead && (pages == MAX_PBL_LVL_0_PGS))
195
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
196
- pages, pg_size);
197
- else
198
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
199
- if (rc)
200
- goto fail;
231
+ if (npages == MAX_PBL_LVL_0_PGS) {
232
+ /* This request is Level 0, map PTE */
233
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
234
+ if (rc)
235
+ goto fail;
236
+ hwq->level = PBL_LVL_0;
237
+ }
201238
202
- hwq->level = PBL_LVL_0;
203
-
204
- if (pages > MAX_PBL_LVL_0_PGS) {
205
- if (pages > MAX_PBL_LVL_1_PGS) {
239
+ if (npages > MAX_PBL_LVL_0_PGS) {
240
+ if (npages > MAX_PBL_LVL_1_PGS) {
241
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
242
+ 0 : PTU_PTE_VALID;
206243 /* 2 levels of indirection */
207
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
208
- MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
244
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
245
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
246
+ npbl++;
247
+ npde = npbl >> MAX_PDL_LVL_SHIFT;
248
+ if (npbl % BIT(MAX_PDL_LVL_SHIFT))
249
+ npde++;
250
+ /* Alloc PDE pages */
251
+ sginfo.pgsize = npde * pg_size;
252
+ sginfo.npages = 1;
253
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
254
+
255
+ /* Alloc PBL pages */
256
+ sginfo.npages = npbl;
257
+ sginfo.pgsize = PAGE_SIZE;
258
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
209259 if (rc)
210260 goto fail;
211
- /* Fill in lvl0 PBL */
261
+ /* Fill PDL with PBL page pointers */
212262 dst_virt_ptr =
213263 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
214264 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
215
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
216
- dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
217
- src_phys_ptr[i] | PTU_PDE_VALID;
218
- hwq->level = PBL_LVL_1;
219
-
220
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
221
- pages, pg_size);
265
+ if (hwq_attr->type == HWQ_TYPE_MR) {
266
+ /* For MR it is expected that we supply only 1 contigous
267
+ * page i.e only 1 entry in the PDL that will contain
268
+ * all the PBLs for the user supplied memory region
269
+ */
270
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
271
+ i++)
272
+ dst_virt_ptr[0][i] = src_phys_ptr[i] |
273
+ flag;
274
+ } else {
275
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
276
+ i++)
277
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
278
+ src_phys_ptr[i] |
279
+ PTU_PDE_VALID;
280
+ }
281
+ /* Alloc or init PTEs */
282
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
283
+ hwq_attr->sginfo);
222284 if (rc)
223285 goto fail;
224
-
225
- /* Fill in lvl1 PBL */
286
+ hwq->level = PBL_LVL_2;
287
+ if (hwq_attr->sginfo->nopte)
288
+ goto done;
289
+ /* Fill PBLs with PTE pointers */
226290 dst_virt_ptr =
227291 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
228292 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
....@@ -230,7 +294,7 @@
230294 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
231295 src_phys_ptr[i] | PTU_PTE_VALID;
232296 }
233
- if (hwq_type == HWQ_TYPE_QUEUE) {
297
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
234298 /* Find the last pg of the size */
235299 i = hwq->pbl[PBL_LVL_2].pg_count;
236300 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
....@@ -240,25 +304,36 @@
240304 [PTR_IDX(i - 2)] |=
241305 PTU_PTE_NEXT_TO_LAST;
242306 }
243
- hwq->level = PBL_LVL_2;
244
- } else {
245
- u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
246
- PTU_PTE_VALID;
307
+ } else { /* pages < 512 npbl = 1, npde = 0 */
308
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
309
+ 0 : PTU_PTE_VALID;
247310
248311 /* 1 level of indirection */
249
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
250
- pages, pg_size);
312
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
313
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
314
+ npbl++;
315
+ sginfo.npages = npbl;
316
+ sginfo.pgsize = PAGE_SIZE;
317
+ /* Alloc PBL page */
318
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
251319 if (rc)
252320 goto fail;
253
- /* Fill in lvl0 PBL */
321
+ /* Alloc or init PTEs */
322
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
323
+ hwq_attr->sginfo);
324
+ if (rc)
325
+ goto fail;
326
+ hwq->level = PBL_LVL_1;
327
+ if (hwq_attr->sginfo->nopte)
328
+ goto done;
329
+ /* Fill PBL with PTE pointers */
254330 dst_virt_ptr =
255331 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
256332 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
257
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
333
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
258334 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
259335 src_phys_ptr[i] | flag;
260
- }
261
- if (hwq_type == HWQ_TYPE_QUEUE) {
336
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
262337 /* Find the last pg of the size */
263338 i = hwq->pbl[PBL_LVL_1].pg_count;
264339 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
....@@ -268,42 +343,142 @@
268343 [PTR_IDX(i - 2)] |=
269344 PTU_PTE_NEXT_TO_LAST;
270345 }
271
- hwq->level = PBL_LVL_1;
272346 }
273347 }
274
- hwq->pdev = pdev;
275
- spin_lock_init(&hwq->lock);
348
+done:
276349 hwq->prod = 0;
277350 hwq->cons = 0;
278
- *elements = hwq->max_elements = slots;
279
- hwq->element_size = size;
280
-
351
+ hwq->pdev = pdev;
352
+ hwq->depth = hwq_attr->depth;
353
+ hwq->max_elements = depth;
354
+ hwq->element_size = stride;
355
+ hwq->qe_ppg = pg_size / stride;
281356 /* For direct access to the elements */
282
- hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
283
- hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
357
+ lvl = hwq->level;
358
+ if (hwq_attr->sginfo->nopte && hwq->level)
359
+ lvl = hwq->level - 1;
360
+ hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
361
+ hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
362
+ spin_lock_init(&hwq->lock);
284363
285364 return 0;
286
-
287365 fail:
288
- bnxt_qplib_free_hwq(pdev, hwq);
366
+ bnxt_qplib_free_hwq(res, hwq);
289367 return -ENOMEM;
290368 }
291369
292370 /* Context Tables */
293
-void bnxt_qplib_free_ctx(struct pci_dev *pdev,
371
+void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
294372 struct bnxt_qplib_ctx *ctx)
295373 {
296374 int i;
297375
298
- bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
299
- bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
300
- bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
301
- bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
302
- bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
376
+ bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
377
+ bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
378
+ bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
379
+ bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
380
+ bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
303381 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
304
- bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
305
- bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
306
- bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
382
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
383
+ /* restore original pde level before destroy */
384
+ ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
385
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
386
+ bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
387
+}
388
+
389
+static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
390
+ struct bnxt_qplib_ctx *ctx)
391
+{
392
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
393
+ struct bnxt_qplib_sg_info sginfo = {};
394
+ struct bnxt_qplib_tqm_ctx *tqmctx;
395
+ int rc = 0;
396
+ int i;
397
+
398
+ tqmctx = &ctx->tqm_ctx;
399
+
400
+ sginfo.pgsize = PAGE_SIZE;
401
+ sginfo.pgshft = PAGE_SHIFT;
402
+ hwq_attr.sginfo = &sginfo;
403
+ hwq_attr.res = res;
404
+ hwq_attr.type = HWQ_TYPE_CTX;
405
+ hwq_attr.depth = 512;
406
+ hwq_attr.stride = sizeof(u64);
407
+ /* Alloc pdl buffer */
408
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
409
+ if (rc)
410
+ goto out;
411
+ /* Save original pdl level */
412
+ tqmctx->pde_level = tqmctx->pde.level;
413
+
414
+ hwq_attr.stride = 1;
415
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
416
+ if (!tqmctx->qcount[i])
417
+ continue;
418
+ hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
419
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
420
+ if (rc)
421
+ goto out;
422
+ }
423
+out:
424
+ return rc;
425
+}
426
+
427
+static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
428
+{
429
+ struct bnxt_qplib_hwq *tbl;
430
+ dma_addr_t *dma_ptr;
431
+ __le64 **pbl_ptr, *ptr;
432
+ int i, j, k;
433
+ int fnz_idx = -1;
434
+ int pg_count;
435
+
436
+ pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
437
+
438
+ for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
439
+ i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
440
+ tbl = &ctx->qtbl[i];
441
+ if (!tbl->max_elements)
442
+ continue;
443
+ if (fnz_idx == -1)
444
+ fnz_idx = i; /* first non-zero index */
445
+ switch (tbl->level) {
446
+ case PBL_LVL_2:
447
+ pg_count = tbl->pbl[PBL_LVL_1].pg_count;
448
+ for (k = 0; k < pg_count; k++) {
449
+ ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
450
+ dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
451
+ *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
452
+ }
453
+ break;
454
+ case PBL_LVL_1:
455
+ case PBL_LVL_0:
456
+ default:
457
+ ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
458
+ *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
459
+ PTU_PTE_VALID);
460
+ break;
461
+ }
462
+ }
463
+ if (fnz_idx == -1)
464
+ fnz_idx = 0;
465
+ /* update pde level as per page table programming */
466
+ ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
467
+ ctx->qtbl[fnz_idx].level + 1;
468
+}
469
+
470
+static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
471
+ struct bnxt_qplib_ctx *ctx)
472
+{
473
+ int rc = 0;
474
+
475
+ rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
476
+ if (rc)
477
+ goto fail;
478
+
479
+ bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
480
+fail:
481
+ return rc;
307482 }
308483
309484 /*
....@@ -327,120 +502,72 @@
327502 * Returns:
328503 * 0 if success, else -ERRORS
329504 */
330
-int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
505
+int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
331506 struct bnxt_qplib_ctx *ctx,
332
- bool virt_fn)
507
+ bool virt_fn, bool is_p5)
333508 {
334
- int i, j, k, rc = 0;
335
- int fnz_idx = -1;
336
- __le64 **pbl_ptr;
509
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
510
+ struct bnxt_qplib_sg_info sginfo = {};
511
+ int rc = 0;
337512
338
- if (virt_fn)
513
+ if (virt_fn || is_p5)
339514 goto stats_alloc;
340515
341516 /* QPC Tables */
342
- ctx->qpc_tbl.max_elements = ctx->qpc_count;
343
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
344
- &ctx->qpc_tbl.max_elements,
345
- BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
346
- PAGE_SIZE, HWQ_TYPE_CTX);
517
+ sginfo.pgsize = PAGE_SIZE;
518
+ sginfo.pgshft = PAGE_SHIFT;
519
+ hwq_attr.sginfo = &sginfo;
520
+
521
+ hwq_attr.res = res;
522
+ hwq_attr.depth = ctx->qpc_count;
523
+ hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
524
+ hwq_attr.type = HWQ_TYPE_CTX;
525
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
347526 if (rc)
348527 goto fail;
349528
350529 /* MRW Tables */
351
- ctx->mrw_tbl.max_elements = ctx->mrw_count;
352
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
353
- &ctx->mrw_tbl.max_elements,
354
- BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
355
- PAGE_SIZE, HWQ_TYPE_CTX);
530
+ hwq_attr.depth = ctx->mrw_count;
531
+ hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
532
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
356533 if (rc)
357534 goto fail;
358535
359536 /* SRQ Tables */
360
- ctx->srqc_tbl.max_elements = ctx->srqc_count;
361
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
362
- &ctx->srqc_tbl.max_elements,
363
- BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
364
- PAGE_SIZE, HWQ_TYPE_CTX);
537
+ hwq_attr.depth = ctx->srqc_count;
538
+ hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
539
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
365540 if (rc)
366541 goto fail;
367542
368543 /* CQ Tables */
369
- ctx->cq_tbl.max_elements = ctx->cq_count;
370
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
371
- &ctx->cq_tbl.max_elements,
372
- BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
373
- PAGE_SIZE, HWQ_TYPE_CTX);
544
+ hwq_attr.depth = ctx->cq_count;
545
+ hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
546
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
374547 if (rc)
375548 goto fail;
376549
377550 /* TQM Buffer */
378
- ctx->tqm_pde.max_elements = 512;
379
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
380
- &ctx->tqm_pde.max_elements, sizeof(u64),
381
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
551
+ rc = bnxt_qplib_setup_tqm_rings(res, ctx);
382552 if (rc)
383553 goto fail;
384
-
385
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
386
- if (!ctx->tqm_count[i])
387
- continue;
388
- ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
389
- ctx->tqm_count[i];
390
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
391
- &ctx->tqm_tbl[i].max_elements, 1,
392
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
393
- if (rc)
394
- goto fail;
395
- }
396
- pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
397
- for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
398
- i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
399
- if (!ctx->tqm_tbl[i].max_elements)
400
- continue;
401
- if (fnz_idx == -1)
402
- fnz_idx = i;
403
- switch (ctx->tqm_tbl[i].level) {
404
- case PBL_LVL_2:
405
- for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
406
- k++)
407
- pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
408
- cpu_to_le64(
409
- ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
410
- | PTU_PTE_VALID);
411
- break;
412
- case PBL_LVL_1:
413
- case PBL_LVL_0:
414
- default:
415
- pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
416
- ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
417
- PTU_PTE_VALID);
418
- break;
419
- }
420
- }
421
- if (fnz_idx == -1)
422
- fnz_idx = 0;
423
- ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
424
- PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
425
-
426554 /* TIM Buffer */
427555 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
428
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
429
- &ctx->tim_tbl.max_elements, 1,
430
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
556
+ hwq_attr.depth = ctx->qpc_count * 16;
557
+ hwq_attr.stride = 1;
558
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
431559 if (rc)
432560 goto fail;
433
-
434561 stats_alloc:
435562 /* Stats */
436
- rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
563
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
437564 if (rc)
438565 goto fail;
439566
440567 return 0;
441568
442569 fail:
443
- bnxt_qplib_free_ctx(pdev, ctx);
570
+ bnxt_qplib_free_ctx(res, ctx);
444571 return rc;
445572 }
446573
....@@ -480,7 +607,7 @@
480607 struct bnxt_qplib_sgid_tbl *sgid_tbl,
481608 u16 max)
482609 {
483
- sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
610
+ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
484611 if (!sgid_tbl->tbl)
485612 return -ENOMEM;
486613
....@@ -518,9 +645,10 @@
518645 for (i = 0; i < sgid_tbl->max; i++) {
519646 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
520647 sizeof(bnxt_qplib_gid_zero)))
521
- bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
648
+ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
649
+ sgid_tbl->tbl[i].vlan_id, true);
522650 }
523
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
651
+ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
524652 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
525653 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
526654 sgid_tbl->active = 0;
....@@ -529,7 +657,11 @@
529657 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
530658 struct net_device *netdev)
531659 {
532
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
660
+ u32 i;
661
+
662
+ for (i = 0; i < sgid_tbl->max; i++)
663
+ sgid_tbl->tbl[i].vlan_id = 0xffff;
664
+
533665 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
534666 }
535667
....@@ -537,7 +669,7 @@
537669 struct bnxt_qplib_pkey_tbl *pkey_tbl)
538670 {
539671 if (!pkey_tbl->tbl)
540
- dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
672
+ dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
541673 else
542674 kfree(pkey_tbl->tbl);
543675
....@@ -578,7 +710,7 @@
578710 struct bnxt_qplib_pd *pd)
579711 {
580712 if (test_and_set_bit(pd->id, pdt->tbl)) {
581
- dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
713
+ dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
582714 pd->id);
583715 return -EINVAL;
584716 }
....@@ -639,11 +771,11 @@
639771 struct bnxt_qplib_dpi *dpi)
640772 {
641773 if (dpi->dpi >= dpit->max) {
642
- dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
774
+ dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
643775 return -EINVAL;
644776 }
645777 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
646
- dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
778
+ dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
647779 dpi->dpi);
648780 return -EINVAL;
649781 }
....@@ -673,31 +805,29 @@
673805 u32 dbr_len, bytes;
674806
675807 if (dpit->dbr_bar_reg_iomem) {
676
- dev_err(&res->pdev->dev,
677
- "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
808
+ dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
809
+ dbr_bar_reg);
678810 return -EALREADY;
679811 }
680812
681813 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
682814 if (!bar_reg_base) {
683
- dev_err(&res->pdev->dev,
684
- "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
815
+ dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
816
+ dbr_bar_reg);
685817 return -ENOMEM;
686818 }
687819
688820 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
689821 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
690
- dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
691
- dbr_len);
822
+ dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
692823 return -ENOMEM;
693824 }
694825
695
- dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
826
+ dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
696827 dbr_len);
697828 if (!dpit->dbr_bar_reg_iomem) {
698829 dev_err(&res->pdev->dev,
699
- "QPLIB: FP: DBR BAR region %d mapping failed",
700
- dbr_bar_reg);
830
+ "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
701831 return -ENOMEM;
702832 }
703833
....@@ -760,15 +890,16 @@
760890 }
761891
762892 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
893
+ struct bnxt_qplib_chip_ctx *cctx,
763894 struct bnxt_qplib_stats *stats)
764895 {
765896 memset(stats, 0, sizeof(*stats));
766897 stats->fw_id = -1;
767
- stats->size = sizeof(struct ctx_hw_stats);
898
+ stats->size = cctx->hw_stats_size;
768899 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
769900 &stats->dma_map, GFP_KERNEL);
770901 if (!stats->dma) {
771
- dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
902
+ dev_err(&pdev->dev, "Stats DMA allocation failed\n");
772903 return -ENOMEM;
773904 }
774905 return 0;
....@@ -794,9 +925,6 @@
794925 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
795926 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
796927 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
797
-
798
- res->netdev = NULL;
799
- res->pdev = NULL;
800928 }
801929
802930 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,