forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/infiniband/hw/bnxt_re/qplib_res.c
....@@ -36,12 +36,18 @@
3636 * Description: QPLib resource manager
3737 */
3838
39
+#define dev_fmt(fmt) "QPLIB: " fmt
40
+
3941 #include <linux/spinlock.h>
4042 #include <linux/pci.h>
4143 #include <linux/interrupt.h>
4244 #include <linux/inetdevice.h>
4345 #include <linux/dma-mapping.h>
4446 #include <linux/if_vlan.h>
47
+#include <linux/vmalloc.h>
48
+#include <rdma/ib_verbs.h>
49
+#include <rdma/ib_umem.h>
50
+
4551 #include "roce_hsi.h"
4652 #include "qplib_res.h"
4753 #include "qplib_sp.h"
....@@ -50,12 +56,14 @@
5056 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
5157 struct bnxt_qplib_stats *stats);
5258 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59
+ struct bnxt_qplib_chip_ctx *cctx,
5360 struct bnxt_qplib_stats *stats);
5461
5562 /* PBL */
56
-static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
63
+static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
5764 bool is_umem)
5865 {
66
+ struct pci_dev *pdev = res->pdev;
5967 int i;
6068
6169 if (!is_umem) {
....@@ -68,72 +76,85 @@
6876 pbl->pg_map_arr[i]);
6977 else
7078 dev_warn(&pdev->dev,
71
- "QPLIB: PBL free pg_arr[%d] empty?!",
72
- i);
79
+ "PBL free pg_arr[%d] empty?!\n", i);
7380 pbl->pg_arr[i] = NULL;
7481 }
7582 }
76
- kfree(pbl->pg_arr);
83
+ vfree(pbl->pg_arr);
7784 pbl->pg_arr = NULL;
78
- kfree(pbl->pg_map_arr);
85
+ vfree(pbl->pg_map_arr);
7986 pbl->pg_map_arr = NULL;
8087 pbl->pg_count = 0;
8188 pbl->pg_size = 0;
8289 }
8390
84
-static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
85
- struct scatterlist *sghead, u32 pages, u32 pg_size)
91
+static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92
+ struct bnxt_qplib_sg_info *sginfo)
8693 {
87
- struct scatterlist *sg;
94
+ struct ib_block_iter biter;
95
+ int i = 0;
96
+
97
+ rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98
+ pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99
+ pbl->pg_arr[i] = NULL;
100
+ pbl->pg_count++;
101
+ i++;
102
+ }
103
+}
104
+
105
+static int __alloc_pbl(struct bnxt_qplib_res *res,
106
+ struct bnxt_qplib_pbl *pbl,
107
+ struct bnxt_qplib_sg_info *sginfo)
108
+{
109
+ struct pci_dev *pdev = res->pdev;
88110 bool is_umem = false;
111
+ u32 pages;
89112 int i;
90113
114
+ if (sginfo->nopte)
115
+ return 0;
116
+ if (sginfo->umem)
117
+ pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118
+ else
119
+ pages = sginfo->npages;
91120 /* page ptr arrays */
92
- pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
121
+ pbl->pg_arr = vmalloc(pages * sizeof(void *));
93122 if (!pbl->pg_arr)
94123 return -ENOMEM;
95124
96
- pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
125
+ pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
97126 if (!pbl->pg_map_arr) {
98
- kfree(pbl->pg_arr);
127
+ vfree(pbl->pg_arr);
99128 pbl->pg_arr = NULL;
100129 return -ENOMEM;
101130 }
102131 pbl->pg_count = 0;
103
- pbl->pg_size = pg_size;
132
+ pbl->pg_size = sginfo->pgsize;
104133
105
- if (!sghead) {
134
+ if (!sginfo->umem) {
106135 for (i = 0; i < pages; i++) {
107
- pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
108
- pbl->pg_size,
109
- &pbl->pg_map_arr[i],
110
- GFP_KERNEL);
136
+ pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
137
+ pbl->pg_size,
138
+ &pbl->pg_map_arr[i],
139
+ GFP_KERNEL);
111140 if (!pbl->pg_arr[i])
112141 goto fail;
113142 pbl->pg_count++;
114143 }
115144 } else {
116
- i = 0;
117145 is_umem = true;
118
- for_each_sg(sghead, sg, pages, i) {
119
- pbl->pg_map_arr[i] = sg_dma_address(sg);
120
- pbl->pg_arr[i] = sg_virt(sg);
121
- if (!pbl->pg_arr[i])
122
- goto fail;
123
-
124
- pbl->pg_count++;
125
- }
146
+ bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
126147 }
127148
128149 return 0;
129
-
130150 fail:
131
- __free_pbl(pdev, pbl, is_umem);
151
+ __free_pbl(res, pbl, is_umem);
132152 return -ENOMEM;
133153 }
134154
135155 /* HWQ */
136
-void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
156
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157
+ struct bnxt_qplib_hwq *hwq)
137158 {
138159 int i;
139160
....@@ -144,9 +165,9 @@
144165
145166 for (i = 0; i < hwq->level + 1; i++) {
146167 if (i == hwq->level)
147
- __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
168
+ __free_pbl(res, &hwq->pbl[i], hwq->is_user);
148169 else
149
- __free_pbl(pdev, &hwq->pbl[i], false);
170
+ __free_pbl(res, &hwq->pbl[i], false);
150171 }
151172
152173 hwq->level = PBL_LVL_MAX;
....@@ -158,71 +179,106 @@
158179 }
159180
160181 /* All HWQs are power of 2 in size */
161
-int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
162
- struct scatterlist *sghead, int nmap,
163
- u32 *elements, u32 element_size, u32 aux,
164
- u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
165
-{
166
- u32 pages, slots, size, aux_pages = 0, aux_size = 0;
167
- dma_addr_t *src_phys_ptr, **dst_virt_ptr;
168
- int i, rc;
169182
183
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184
+ struct bnxt_qplib_hwq_attr *hwq_attr)
185
+{
186
+ u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187
+ struct bnxt_qplib_sg_info sginfo = {};
188
+ u32 depth, stride, npbl, npde;
189
+ dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190
+ struct bnxt_qplib_res *res;
191
+ struct pci_dev *pdev;
192
+ int i, rc, lvl;
193
+
194
+ res = hwq_attr->res;
195
+ pdev = res->pdev;
196
+ pg_size = hwq_attr->sginfo->pgsize;
170197 hwq->level = PBL_LVL_MAX;
171198
172
- slots = roundup_pow_of_two(*elements);
173
- if (aux) {
174
- aux_size = roundup_pow_of_two(aux);
175
- aux_pages = (slots * aux_size) / pg_size;
176
- if ((slots * aux_size) % pg_size)
199
+ depth = roundup_pow_of_two(hwq_attr->depth);
200
+ stride = roundup_pow_of_two(hwq_attr->stride);
201
+ if (hwq_attr->aux_depth) {
202
+ aux_slots = hwq_attr->aux_depth;
203
+ aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204
+ aux_pages = (aux_slots * aux_size) / pg_size;
205
+ if ((aux_slots * aux_size) % pg_size)
177206 aux_pages++;
178207 }
179
- size = roundup_pow_of_two(element_size);
180208
181
- if (!sghead) {
209
+ if (!hwq_attr->sginfo->umem) {
182210 hwq->is_user = false;
183
- pages = (slots * size) / pg_size + aux_pages;
184
- if ((slots * size) % pg_size)
185
- pages++;
186
- if (!pages)
211
+ npages = (depth * stride) / pg_size + aux_pages;
212
+ if ((depth * stride) % pg_size)
213
+ npages++;
214
+ if (!npages)
187215 return -EINVAL;
216
+ hwq_attr->sginfo->npages = npages;
188217 } else {
218
+ npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
219
+ hwq_attr->sginfo->pgsize);
189220 hwq->is_user = true;
190
- pages = nmap;
191221 }
192222
193
- /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
194
- if (sghead && (pages == MAX_PBL_LVL_0_PGS))
195
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
196
- pages, pg_size);
197
- else
198
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
199
- if (rc)
200
- goto fail;
223
+ if (npages == MAX_PBL_LVL_0_PGS) {
224
+ /* This request is Level 0, map PTE */
225
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
226
+ if (rc)
227
+ goto fail;
228
+ hwq->level = PBL_LVL_0;
229
+ }
201230
202
- hwq->level = PBL_LVL_0;
203
-
204
- if (pages > MAX_PBL_LVL_0_PGS) {
205
- if (pages > MAX_PBL_LVL_1_PGS) {
231
+ if (npages > MAX_PBL_LVL_0_PGS) {
232
+ if (npages > MAX_PBL_LVL_1_PGS) {
233
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
234
+ 0 : PTU_PTE_VALID;
206235 /* 2 levels of indirection */
207
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
208
- MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
236
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
237
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
238
+ npbl++;
239
+ npde = npbl >> MAX_PDL_LVL_SHIFT;
240
+ if (npbl % BIT(MAX_PDL_LVL_SHIFT))
241
+ npde++;
242
+ /* Alloc PDE pages */
243
+ sginfo.pgsize = npde * pg_size;
244
+ sginfo.npages = 1;
245
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
246
+
247
+ /* Alloc PBL pages */
248
+ sginfo.npages = npbl;
249
+ sginfo.pgsize = PAGE_SIZE;
250
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
209251 if (rc)
210252 goto fail;
211
- /* Fill in lvl0 PBL */
253
+ /* Fill PDL with PBL page pointers */
212254 dst_virt_ptr =
213255 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
214256 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
215
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
216
- dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
217
- src_phys_ptr[i] | PTU_PDE_VALID;
218
- hwq->level = PBL_LVL_1;
219
-
220
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
221
- pages, pg_size);
257
+ if (hwq_attr->type == HWQ_TYPE_MR) {
258
+ /* For MR it is expected that we supply only 1 contigous
259
+ * page i.e only 1 entry in the PDL that will contain
260
+ * all the PBLs for the user supplied memory region
261
+ */
262
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
263
+ i++)
264
+ dst_virt_ptr[0][i] = src_phys_ptr[i] |
265
+ flag;
266
+ } else {
267
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
268
+ i++)
269
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
270
+ src_phys_ptr[i] |
271
+ PTU_PDE_VALID;
272
+ }
273
+ /* Alloc or init PTEs */
274
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
275
+ hwq_attr->sginfo);
222276 if (rc)
223277 goto fail;
224
-
225
- /* Fill in lvl1 PBL */
278
+ hwq->level = PBL_LVL_2;
279
+ if (hwq_attr->sginfo->nopte)
280
+ goto done;
281
+ /* Fill PBLs with PTE pointers */
226282 dst_virt_ptr =
227283 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
228284 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
....@@ -230,7 +286,7 @@
230286 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
231287 src_phys_ptr[i] | PTU_PTE_VALID;
232288 }
233
- if (hwq_type == HWQ_TYPE_QUEUE) {
289
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
234290 /* Find the last pg of the size */
235291 i = hwq->pbl[PBL_LVL_2].pg_count;
236292 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
....@@ -240,25 +296,36 @@
240296 [PTR_IDX(i - 2)] |=
241297 PTU_PTE_NEXT_TO_LAST;
242298 }
243
- hwq->level = PBL_LVL_2;
244
- } else {
245
- u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
246
- PTU_PTE_VALID;
299
+ } else { /* pages < 512 npbl = 1, npde = 0 */
300
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
301
+ 0 : PTU_PTE_VALID;
247302
248303 /* 1 level of indirection */
249
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
250
- pages, pg_size);
304
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
305
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
306
+ npbl++;
307
+ sginfo.npages = npbl;
308
+ sginfo.pgsize = PAGE_SIZE;
309
+ /* Alloc PBL page */
310
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
251311 if (rc)
252312 goto fail;
253
- /* Fill in lvl0 PBL */
313
+ /* Alloc or init PTEs */
314
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
315
+ hwq_attr->sginfo);
316
+ if (rc)
317
+ goto fail;
318
+ hwq->level = PBL_LVL_1;
319
+ if (hwq_attr->sginfo->nopte)
320
+ goto done;
321
+ /* Fill PBL with PTE pointers */
254322 dst_virt_ptr =
255323 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
256324 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
257
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
325
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
258326 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
259327 src_phys_ptr[i] | flag;
260
- }
261
- if (hwq_type == HWQ_TYPE_QUEUE) {
328
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
262329 /* Find the last pg of the size */
263330 i = hwq->pbl[PBL_LVL_1].pg_count;
264331 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
....@@ -268,42 +335,142 @@
268335 [PTR_IDX(i - 2)] |=
269336 PTU_PTE_NEXT_TO_LAST;
270337 }
271
- hwq->level = PBL_LVL_1;
272338 }
273339 }
274
- hwq->pdev = pdev;
275
- spin_lock_init(&hwq->lock);
340
+done:
276341 hwq->prod = 0;
277342 hwq->cons = 0;
278
- *elements = hwq->max_elements = slots;
279
- hwq->element_size = size;
280
-
343
+ hwq->pdev = pdev;
344
+ hwq->depth = hwq_attr->depth;
345
+ hwq->max_elements = depth;
346
+ hwq->element_size = stride;
347
+ hwq->qe_ppg = pg_size / stride;
281348 /* For direct access to the elements */
282
- hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
283
- hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
349
+ lvl = hwq->level;
350
+ if (hwq_attr->sginfo->nopte && hwq->level)
351
+ lvl = hwq->level - 1;
352
+ hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
353
+ hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
354
+ spin_lock_init(&hwq->lock);
284355
285356 return 0;
286
-
287357 fail:
288
- bnxt_qplib_free_hwq(pdev, hwq);
358
+ bnxt_qplib_free_hwq(res, hwq);
289359 return -ENOMEM;
290360 }
291361
292362 /* Context Tables */
293
-void bnxt_qplib_free_ctx(struct pci_dev *pdev,
363
+void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
294364 struct bnxt_qplib_ctx *ctx)
295365 {
296366 int i;
297367
298
- bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
299
- bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
300
- bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
301
- bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
302
- bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
368
+ bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
369
+ bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
370
+ bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
371
+ bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
372
+ bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
303373 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
304
- bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
305
- bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
306
- bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
374
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
375
+ /* restore original pde level before destroy */
376
+ ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
377
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
378
+ bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
379
+}
380
+
381
+static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
382
+ struct bnxt_qplib_ctx *ctx)
383
+{
384
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
385
+ struct bnxt_qplib_sg_info sginfo = {};
386
+ struct bnxt_qplib_tqm_ctx *tqmctx;
387
+ int rc = 0;
388
+ int i;
389
+
390
+ tqmctx = &ctx->tqm_ctx;
391
+
392
+ sginfo.pgsize = PAGE_SIZE;
393
+ sginfo.pgshft = PAGE_SHIFT;
394
+ hwq_attr.sginfo = &sginfo;
395
+ hwq_attr.res = res;
396
+ hwq_attr.type = HWQ_TYPE_CTX;
397
+ hwq_attr.depth = 512;
398
+ hwq_attr.stride = sizeof(u64);
399
+ /* Alloc pdl buffer */
400
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
401
+ if (rc)
402
+ goto out;
403
+ /* Save original pdl level */
404
+ tqmctx->pde_level = tqmctx->pde.level;
405
+
406
+ hwq_attr.stride = 1;
407
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
408
+ if (!tqmctx->qcount[i])
409
+ continue;
410
+ hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
411
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
412
+ if (rc)
413
+ goto out;
414
+ }
415
+out:
416
+ return rc;
417
+}
418
+
419
+static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
420
+{
421
+ struct bnxt_qplib_hwq *tbl;
422
+ dma_addr_t *dma_ptr;
423
+ __le64 **pbl_ptr, *ptr;
424
+ int i, j, k;
425
+ int fnz_idx = -1;
426
+ int pg_count;
427
+
428
+ pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
429
+
430
+ for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
431
+ i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
432
+ tbl = &ctx->qtbl[i];
433
+ if (!tbl->max_elements)
434
+ continue;
435
+ if (fnz_idx == -1)
436
+ fnz_idx = i; /* first non-zero index */
437
+ switch (tbl->level) {
438
+ case PBL_LVL_2:
439
+ pg_count = tbl->pbl[PBL_LVL_1].pg_count;
440
+ for (k = 0; k < pg_count; k++) {
441
+ ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
442
+ dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
443
+ *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
444
+ }
445
+ break;
446
+ case PBL_LVL_1:
447
+ case PBL_LVL_0:
448
+ default:
449
+ ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
450
+ *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
451
+ PTU_PTE_VALID);
452
+ break;
453
+ }
454
+ }
455
+ if (fnz_idx == -1)
456
+ fnz_idx = 0;
457
+ /* update pde level as per page table programming */
458
+ ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
459
+ ctx->qtbl[fnz_idx].level + 1;
460
+}
461
+
462
+static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
463
+ struct bnxt_qplib_ctx *ctx)
464
+{
465
+ int rc = 0;
466
+
467
+ rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
468
+ if (rc)
469
+ goto fail;
470
+
471
+ bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
472
+fail:
473
+ return rc;
307474 }
308475
309476 /*
....@@ -327,120 +494,72 @@
327494 * Returns:
328495 * 0 if success, else -ERRORS
329496 */
330
-int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
497
+int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
331498 struct bnxt_qplib_ctx *ctx,
332
- bool virt_fn)
499
+ bool virt_fn, bool is_p5)
333500 {
334
- int i, j, k, rc = 0;
335
- int fnz_idx = -1;
336
- __le64 **pbl_ptr;
501
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
502
+ struct bnxt_qplib_sg_info sginfo = {};
503
+ int rc = 0;
337504
338
- if (virt_fn)
505
+ if (virt_fn || is_p5)
339506 goto stats_alloc;
340507
341508 /* QPC Tables */
342
- ctx->qpc_tbl.max_elements = ctx->qpc_count;
343
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
344
- &ctx->qpc_tbl.max_elements,
345
- BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
346
- PAGE_SIZE, HWQ_TYPE_CTX);
509
+ sginfo.pgsize = PAGE_SIZE;
510
+ sginfo.pgshft = PAGE_SHIFT;
511
+ hwq_attr.sginfo = &sginfo;
512
+
513
+ hwq_attr.res = res;
514
+ hwq_attr.depth = ctx->qpc_count;
515
+ hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
516
+ hwq_attr.type = HWQ_TYPE_CTX;
517
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
347518 if (rc)
348519 goto fail;
349520
350521 /* MRW Tables */
351
- ctx->mrw_tbl.max_elements = ctx->mrw_count;
352
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
353
- &ctx->mrw_tbl.max_elements,
354
- BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
355
- PAGE_SIZE, HWQ_TYPE_CTX);
522
+ hwq_attr.depth = ctx->mrw_count;
523
+ hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
524
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
356525 if (rc)
357526 goto fail;
358527
359528 /* SRQ Tables */
360
- ctx->srqc_tbl.max_elements = ctx->srqc_count;
361
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
362
- &ctx->srqc_tbl.max_elements,
363
- BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
364
- PAGE_SIZE, HWQ_TYPE_CTX);
529
+ hwq_attr.depth = ctx->srqc_count;
530
+ hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
531
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
365532 if (rc)
366533 goto fail;
367534
368535 /* CQ Tables */
369
- ctx->cq_tbl.max_elements = ctx->cq_count;
370
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
371
- &ctx->cq_tbl.max_elements,
372
- BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
373
- PAGE_SIZE, HWQ_TYPE_CTX);
536
+ hwq_attr.depth = ctx->cq_count;
537
+ hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
538
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
374539 if (rc)
375540 goto fail;
376541
377542 /* TQM Buffer */
378
- ctx->tqm_pde.max_elements = 512;
379
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
380
- &ctx->tqm_pde.max_elements, sizeof(u64),
381
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
543
+ rc = bnxt_qplib_setup_tqm_rings(res, ctx);
382544 if (rc)
383545 goto fail;
384
-
385
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
386
- if (!ctx->tqm_count[i])
387
- continue;
388
- ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
389
- ctx->tqm_count[i];
390
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
391
- &ctx->tqm_tbl[i].max_elements, 1,
392
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
393
- if (rc)
394
- goto fail;
395
- }
396
- pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
397
- for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
398
- i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
399
- if (!ctx->tqm_tbl[i].max_elements)
400
- continue;
401
- if (fnz_idx == -1)
402
- fnz_idx = i;
403
- switch (ctx->tqm_tbl[i].level) {
404
- case PBL_LVL_2:
405
- for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
406
- k++)
407
- pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
408
- cpu_to_le64(
409
- ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
410
- | PTU_PTE_VALID);
411
- break;
412
- case PBL_LVL_1:
413
- case PBL_LVL_0:
414
- default:
415
- pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
416
- ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
417
- PTU_PTE_VALID);
418
- break;
419
- }
420
- }
421
- if (fnz_idx == -1)
422
- fnz_idx = 0;
423
- ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
424
- PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
425
-
426546 /* TIM Buffer */
427547 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
428
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
429
- &ctx->tim_tbl.max_elements, 1,
430
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
548
+ hwq_attr.depth = ctx->qpc_count * 16;
549
+ hwq_attr.stride = 1;
550
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
431551 if (rc)
432552 goto fail;
433
-
434553 stats_alloc:
435554 /* Stats */
436
- rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
555
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
437556 if (rc)
438557 goto fail;
439558
440559 return 0;
441560
442561 fail:
443
- bnxt_qplib_free_ctx(pdev, ctx);
562
+ bnxt_qplib_free_ctx(res, ctx);
444563 return rc;
445564 }
446565
....@@ -480,7 +599,7 @@
480599 struct bnxt_qplib_sgid_tbl *sgid_tbl,
481600 u16 max)
482601 {
483
- sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
602
+ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
484603 if (!sgid_tbl->tbl)
485604 return -ENOMEM;
486605
....@@ -518,9 +637,10 @@
518637 for (i = 0; i < sgid_tbl->max; i++) {
519638 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
520639 sizeof(bnxt_qplib_gid_zero)))
521
- bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
640
+ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
641
+ sgid_tbl->tbl[i].vlan_id, true);
522642 }
523
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
643
+ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
524644 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
525645 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
526646 sgid_tbl->active = 0;
....@@ -529,7 +649,11 @@
529649 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
530650 struct net_device *netdev)
531651 {
532
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
652
+ u32 i;
653
+
654
+ for (i = 0; i < sgid_tbl->max; i++)
655
+ sgid_tbl->tbl[i].vlan_id = 0xffff;
656
+
533657 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
534658 }
535659
....@@ -537,7 +661,7 @@
537661 struct bnxt_qplib_pkey_tbl *pkey_tbl)
538662 {
539663 if (!pkey_tbl->tbl)
540
- dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
664
+ dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
541665 else
542666 kfree(pkey_tbl->tbl);
543667
....@@ -578,7 +702,7 @@
578702 struct bnxt_qplib_pd *pd)
579703 {
580704 if (test_and_set_bit(pd->id, pdt->tbl)) {
581
- dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
705
+ dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
582706 pd->id);
583707 return -EINVAL;
584708 }
....@@ -639,11 +763,11 @@
639763 struct bnxt_qplib_dpi *dpi)
640764 {
641765 if (dpi->dpi >= dpit->max) {
642
- dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
766
+ dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
643767 return -EINVAL;
644768 }
645769 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
646
- dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
770
+ dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
647771 dpi->dpi);
648772 return -EINVAL;
649773 }
....@@ -673,31 +797,29 @@
673797 u32 dbr_len, bytes;
674798
675799 if (dpit->dbr_bar_reg_iomem) {
676
- dev_err(&res->pdev->dev,
677
- "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
800
+ dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
801
+ dbr_bar_reg);
678802 return -EALREADY;
679803 }
680804
681805 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
682806 if (!bar_reg_base) {
683
- dev_err(&res->pdev->dev,
684
- "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
807
+ dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
808
+ dbr_bar_reg);
685809 return -ENOMEM;
686810 }
687811
688812 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
689813 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
690
- dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
691
- dbr_len);
814
+ dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
692815 return -ENOMEM;
693816 }
694817
695
- dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
818
+ dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
696819 dbr_len);
697820 if (!dpit->dbr_bar_reg_iomem) {
698821 dev_err(&res->pdev->dev,
699
- "QPLIB: FP: DBR BAR region %d mapping failed",
700
- dbr_bar_reg);
822
+ "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
701823 return -ENOMEM;
702824 }
703825
....@@ -760,15 +882,16 @@
760882 }
761883
762884 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
885
+ struct bnxt_qplib_chip_ctx *cctx,
763886 struct bnxt_qplib_stats *stats)
764887 {
765888 memset(stats, 0, sizeof(*stats));
766889 stats->fw_id = -1;
767
- stats->size = sizeof(struct ctx_hw_stats);
890
+ stats->size = cctx->hw_stats_size;
768891 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
769892 &stats->dma_map, GFP_KERNEL);
770893 if (!stats->dma) {
771
- dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
894
+ dev_err(&pdev->dev, "Stats DMA allocation failed\n");
772895 return -ENOMEM;
773896 }
774897 return 0;
....@@ -794,9 +917,6 @@
794917 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
795918 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
796919 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
797
-
798
- res->netdev = NULL;
799
- res->pdev = NULL;
800920 }
801921
802922 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,