forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/drivers/crypto/cavium/nitrox/nitrox_isr.c
....@@ -6,9 +6,18 @@
66 #include "nitrox_dev.h"
77 #include "nitrox_csr.h"
88 #include "nitrox_common.h"
9
+#include "nitrox_hal.h"
10
+#include "nitrox_mbx.h"
911
12
+/**
13
+ * One vector for each type of ring
14
+ * - NPS packet ring, AQMQ ring and ZQMQ ring
15
+ */
1016 #define NR_RING_VECTORS 3
11
-#define NPS_CORE_INT_ACTIVE_ENTRY 192
17
+#define NR_NON_RING_VECTORS 1
18
+/* base entry for packet ring/port */
19
+#define PKT_RING_MSIX_BASE 0
20
+#define NON_RING_MSIX_BASE 192
1221
1322 /**
1423 * nps_pkt_slc_isr - IRQ handler for NPS solicit port
....@@ -17,13 +26,14 @@
1726 */
1827 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
1928 {
20
- struct bh_data *slc = data;
21
- union nps_pkt_slc_cnts pkt_slc_cnts;
29
+ struct nitrox_q_vector *qvec = data;
30
+ union nps_pkt_slc_cnts slc_cnts;
31
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
2232
23
- pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
33
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
2434 /* New packet on SLC output port */
25
- if (pkt_slc_cnts.s.slc_int)
26
- tasklet_hi_schedule(&slc->resp_handler);
35
+ if (slc_cnts.s.slc_int)
36
+ tasklet_hi_schedule(&qvec->resp_tasklet);
2737
2838 return IRQ_HANDLED;
2939 }
....@@ -190,56 +200,98 @@
190200 dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
191201 }
192202
193
-/**
194
- * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
195
- * @ndev: NITROX device
196
- */
197
-static void clear_nps_core_int_active(struct nitrox_device *ndev)
203
+static void nps_core_int_tasklet(unsigned long data)
198204 {
199
- union nps_core_int_active core_int_active;
205
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
206
+ struct nitrox_device *ndev = qvec->ndev;
200207
201
- core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
202
-
203
- if (core_int_active.s.nps_core)
204
- clear_nps_core_err_intr(ndev);
205
-
206
- if (core_int_active.s.nps_pkt)
207
- clear_nps_pkt_err_intr(ndev);
208
-
209
- if (core_int_active.s.pom)
210
- clear_pom_err_intr(ndev);
211
-
212
- if (core_int_active.s.pem)
213
- clear_pem_err_intr(ndev);
214
-
215
- if (core_int_active.s.lbc)
216
- clear_lbc_err_intr(ndev);
217
-
218
- if (core_int_active.s.efl)
219
- clear_efl_err_intr(ndev);
220
-
221
- if (core_int_active.s.bmi)
222
- clear_bmi_err_intr(ndev);
223
-
224
- /* If more work callback the ISR, set resend */
225
- core_int_active.s.resend = 1;
226
- nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
208
+ /* if pf mode do queue recovery */
209
+ if (ndev->mode == __NDEV_MODE_PF) {
210
+ } else {
211
+ /**
212
+ * if VF(s) enabled communicate the error information
213
+ * to VF(s)
214
+ */
215
+ }
227216 }
228217
218
+/**
219
+ * nps_core_int_isr - interrupt handler for NITROX errors and
220
+ * mailbox communication
221
+ */
229222 static irqreturn_t nps_core_int_isr(int irq, void *data)
230223 {
231
- struct nitrox_device *ndev = data;
224
+ struct nitrox_q_vector *qvec = data;
225
+ struct nitrox_device *ndev = qvec->ndev;
226
+ union nps_core_int_active core_int;
232227
233
- clear_nps_core_int_active(ndev);
228
+ core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
229
+
230
+ if (core_int.s.nps_core)
231
+ clear_nps_core_err_intr(ndev);
232
+
233
+ if (core_int.s.nps_pkt)
234
+ clear_nps_pkt_err_intr(ndev);
235
+
236
+ if (core_int.s.pom)
237
+ clear_pom_err_intr(ndev);
238
+
239
+ if (core_int.s.pem)
240
+ clear_pem_err_intr(ndev);
241
+
242
+ if (core_int.s.lbc)
243
+ clear_lbc_err_intr(ndev);
244
+
245
+ if (core_int.s.efl)
246
+ clear_efl_err_intr(ndev);
247
+
248
+ if (core_int.s.bmi)
249
+ clear_bmi_err_intr(ndev);
250
+
251
+ /* Mailbox interrupt */
252
+ if (core_int.s.mbox)
253
+ nitrox_pf2vf_mbox_handler(ndev);
254
+
255
+ /* If more work callback the ISR, set resend */
256
+ core_int.s.resend = 1;
257
+ nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
234258
235259 return IRQ_HANDLED;
236260 }
237261
238
-static int nitrox_enable_msix(struct nitrox_device *ndev)
262
+void nitrox_unregister_interrupts(struct nitrox_device *ndev)
239263 {
240
- struct msix_entry *entries;
241
- char **names;
242
- int i, nr_entries, ret;
264
+ struct pci_dev *pdev = ndev->pdev;
265
+ int i;
266
+
267
+ for (i = 0; i < ndev->num_vecs; i++) {
268
+ struct nitrox_q_vector *qvec;
269
+ int vec;
270
+
271
+ qvec = ndev->qvec + i;
272
+ if (!qvec->valid)
273
+ continue;
274
+
275
+ /* get the vector number */
276
+ vec = pci_irq_vector(pdev, i);
277
+ irq_set_affinity_hint(vec, NULL);
278
+ free_irq(vec, qvec);
279
+
280
+ tasklet_disable(&qvec->resp_tasklet);
281
+ tasklet_kill(&qvec->resp_tasklet);
282
+ qvec->valid = false;
283
+ }
284
+ kfree(ndev->qvec);
285
+ ndev->qvec = NULL;
286
+ pci_free_irq_vectors(pdev);
287
+}
288
+
289
+int nitrox_register_interrupts(struct nitrox_device *ndev)
290
+{
291
+ struct pci_dev *pdev = ndev->pdev;
292
+ struct nitrox_q_vector *qvec;
293
+ int nr_vecs, vec, cpu;
294
+ int ret, i;
243295
244296 /*
245297 * PF MSI-X vectors
....@@ -253,216 +305,153 @@
253305 * ....
254306 * Entry 192: NPS_CORE_INT_ACTIVE
255307 */
256
- nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
257
- entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
258
- GFP_KERNEL, ndev->node);
259
- if (!entries)
260
- return -ENOMEM;
261
-
262
- names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
263
- if (!names) {
264
- kfree(entries);
265
- return -ENOMEM;
308
+ nr_vecs = pci_msix_vec_count(pdev);
309
+ if (nr_vecs < 0) {
310
+ dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
311
+ return nr_vecs;
266312 }
267313
268
- /* fill entires */
269
- for (i = 0; i < (nr_entries - 1); i++)
270
- entries[i].entry = i;
271
-
272
- entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
273
-
274
- for (i = 0; i < nr_entries; i++) {
275
- *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
276
- if (!(*(names + i))) {
277
- ret = -ENOMEM;
278
- goto msix_fail;
279
- }
280
- }
281
- ndev->msix.entries = entries;
282
- ndev->msix.names = names;
283
- ndev->msix.nr_entries = nr_entries;
284
-
285
- ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
286
- ndev->msix.nr_entries);
287
- if (ret) {
288
- dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
289
- ret);
290
- goto msix_fail;
291
- }
292
- return 0;
293
-
294
-msix_fail:
295
- for (i = 0; i < nr_entries; i++)
296
- kfree(*(names + i));
297
-
298
- kfree(entries);
299
- kfree(names);
300
- return ret;
301
-}
302
-
303
-static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
304
-{
305
- int i;
306
-
307
- if (!ndev->bh.slc)
308
- return;
309
-
310
- for (i = 0; i < ndev->nr_queues; i++) {
311
- struct bh_data *bh = &ndev->bh.slc[i];
312
-
313
- tasklet_disable(&bh->resp_handler);
314
- tasklet_kill(&bh->resp_handler);
315
- }
316
- kfree(ndev->bh.slc);
317
- ndev->bh.slc = NULL;
318
-}
319
-
320
-static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
321
-{
322
- u32 size;
323
- int i;
324
-
325
- size = ndev->nr_queues * sizeof(struct bh_data);
326
- ndev->bh.slc = kzalloc(size, GFP_KERNEL);
327
- if (!ndev->bh.slc)
328
- return -ENOMEM;
329
-
330
- for (i = 0; i < ndev->nr_queues; i++) {
331
- struct bh_data *bh = &ndev->bh.slc[i];
332
- u64 offset;
333
-
334
- offset = NPS_PKT_SLC_CNTSX(i);
335
- /* pre calculate completion count address */
336
- bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
337
- bh->cmdq = &ndev->pkt_cmdqs[i];
338
-
339
- tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
340
- (unsigned long)bh);
341
- }
342
-
343
- return 0;
344
-}
345
-
346
-static int nitrox_request_irqs(struct nitrox_device *ndev)
347
-{
348
- struct pci_dev *pdev = ndev->pdev;
349
- struct msix_entry *msix_ent = ndev->msix.entries;
350
- int nr_ring_vectors, i = 0, ring, cpu, ret;
351
- char *name;
352
-
353
- /*
354
- * PF MSI-X vectors
355
- *
356
- * Entry 0: NPS PKT ring 0
357
- * Entry 1: AQMQ ring 0
358
- * Entry 2: ZQM ring 0
359
- * Entry 3: NPS PKT ring 1
360
- * ....
361
- * Entry 192: NPS_CORE_INT_ACTIVE
362
- */
363
- nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
364
-
365
- /* request irq for pkt ring/ports only */
366
- while (i < nr_ring_vectors) {
367
- name = *(ndev->msix.names + i);
368
- ring = (i / NR_RING_VECTORS);
369
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
370
- ndev->idx, ring);
371
-
372
- ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
373
- name, &ndev->bh.slc[ring]);
374
- if (ret) {
375
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
376
- msix_ent[i].vector, name);
377
- return ret;
378
- }
379
- cpu = ring % num_online_cpus();
380
- irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
381
-
382
- set_bit(i, ndev->msix.irqs);
383
- i += NR_RING_VECTORS;
384
- }
385
-
386
- /* Request IRQ for NPS_CORE_INT_ACTIVE */
387
- name = *(ndev->msix.names + i);
388
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
389
- ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
390
- if (ret) {
391
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
392
- msix_ent[i].vector, name);
314
+ /* Enable MSI-X */
315
+ ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
316
+ if (ret < 0) {
317
+ dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
393318 return ret;
394319 }
395
- set_bit(i, ndev->msix.irqs);
320
+ ndev->num_vecs = nr_vecs;
396321
397
- return 0;
398
-}
399
-
400
-static void nitrox_disable_msix(struct nitrox_device *ndev)
401
-{
402
- struct msix_entry *msix_ent = ndev->msix.entries;
403
- char **names = ndev->msix.names;
404
- int i = 0, ring, nr_ring_vectors;
405
-
406
- nr_ring_vectors = ndev->msix.nr_entries - 1;
407
-
408
- /* clear pkt ring irqs */
409
- while (i < nr_ring_vectors) {
410
- if (test_and_clear_bit(i, ndev->msix.irqs)) {
411
- ring = (i / NR_RING_VECTORS);
412
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
413
- free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
414
- }
415
- i += NR_RING_VECTORS;
322
+ ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
323
+ if (!ndev->qvec) {
324
+ pci_free_irq_vectors(pdev);
325
+ return -ENOMEM;
416326 }
417
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
418
- free_irq(msix_ent[i].vector, ndev);
419
- clear_bit(i, ndev->msix.irqs);
420327
421
- kfree(ndev->msix.entries);
422
- for (i = 0; i < ndev->msix.nr_entries; i++)
423
- kfree(*(names + i));
328
+ /* request irqs for packet rings/ports */
329
+ for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
330
+ qvec = &ndev->qvec[i];
424331
425
- kfree(names);
426
- pci_disable_msix(ndev->pdev);
427
-}
332
+ qvec->ring = i / NR_RING_VECTORS;
333
+ if (qvec->ring >= ndev->nr_queues)
334
+ break;
428335
429
-/**
430
- * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
431
- * @ndev: NITROX device
432
- */
433
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
434
-{
435
- nitrox_disable_msix(ndev);
436
- nitrox_cleanup_pkt_slc_bh(ndev);
437
-}
336
+ qvec->cmdq = &ndev->pkt_inq[qvec->ring];
337
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
338
+ /* get the vector number */
339
+ vec = pci_irq_vector(pdev, i);
340
+ ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
341
+ if (ret) {
342
+ dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
343
+ qvec->ring);
344
+ goto irq_fail;
345
+ }
346
+ cpu = qvec->ring % num_online_cpus();
347
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
438348
439
-/**
440
- * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
441
- * @ndev: NITROX device
442
- *
443
- * Return: 0 on success, a negative value on failure.
444
- */
445
-int nitrox_pf_init_isr(struct nitrox_device *ndev)
446
-{
447
- int err;
349
+ tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
350
+ (unsigned long)qvec);
351
+ qvec->valid = true;
352
+ }
448353
449
- err = nitrox_setup_pkt_slc_bh(ndev);
450
- if (err)
451
- return err;
354
+ /* request irqs for non ring vectors */
355
+ i = NON_RING_MSIX_BASE;
356
+ qvec = &ndev->qvec[i];
357
+ qvec->ndev = ndev;
452358
453
- err = nitrox_enable_msix(ndev);
454
- if (err)
455
- goto msix_fail;
456
-
457
- err = nitrox_request_irqs(ndev);
458
- if (err)
359
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
360
+ /* get the vector number */
361
+ vec = pci_irq_vector(pdev, i);
362
+ ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
363
+ if (ret) {
364
+ dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
459365 goto irq_fail;
366
+ }
367
+ cpu = num_online_cpus();
368
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
369
+
370
+ tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
371
+ (unsigned long)qvec);
372
+ qvec->valid = true;
460373
461374 return 0;
462375
463376 irq_fail:
464
- nitrox_disable_msix(ndev);
465
-msix_fail:
466
- nitrox_cleanup_pkt_slc_bh(ndev);
467
- return err;
377
+ nitrox_unregister_interrupts(ndev);
378
+ return ret;
379
+}
380
+
381
+void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
382
+{
383
+ struct pci_dev *pdev = ndev->pdev;
384
+ int i;
385
+
386
+ for (i = 0; i < ndev->num_vecs; i++) {
387
+ struct nitrox_q_vector *qvec;
388
+ int vec;
389
+
390
+ qvec = ndev->qvec + i;
391
+ if (!qvec->valid)
392
+ continue;
393
+
394
+ vec = ndev->iov.msix.vector;
395
+ irq_set_affinity_hint(vec, NULL);
396
+ free_irq(vec, qvec);
397
+
398
+ tasklet_disable(&qvec->resp_tasklet);
399
+ tasklet_kill(&qvec->resp_tasklet);
400
+ qvec->valid = false;
401
+ }
402
+ kfree(ndev->qvec);
403
+ ndev->qvec = NULL;
404
+ pci_disable_msix(pdev);
405
+}
406
+
407
+int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
408
+{
409
+ struct pci_dev *pdev = ndev->pdev;
410
+ struct nitrox_q_vector *qvec;
411
+ int vec, cpu;
412
+ int ret;
413
+
414
+ /**
415
+ * only non ring vectors i.e Entry 192 is available
416
+ * for PF in SR-IOV mode.
417
+ */
418
+ ndev->iov.msix.entry = NON_RING_MSIX_BASE;
419
+ ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
420
+ if (ret) {
421
+ dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
422
+ NON_RING_MSIX_BASE);
423
+ return ret;
424
+ }
425
+
426
+ qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
427
+ if (!qvec) {
428
+ pci_disable_msix(pdev);
429
+ return -ENOMEM;
430
+ }
431
+ qvec->ndev = ndev;
432
+
433
+ ndev->qvec = qvec;
434
+ ndev->num_vecs = NR_NON_RING_VECTORS;
435
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
436
+ NON_RING_MSIX_BASE);
437
+
438
+ vec = ndev->iov.msix.vector;
439
+ ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
440
+ if (ret) {
441
+ dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
442
+ NON_RING_MSIX_BASE);
443
+ goto iov_irq_fail;
444
+ }
445
+ cpu = num_online_cpus();
446
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
447
+
448
+ tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
449
+ (unsigned long)qvec);
450
+ qvec->valid = true;
451
+
452
+ return 0;
453
+
454
+iov_irq_fail:
455
+ nitrox_sriov_unregister_interrupts(ndev);
456
+ return ret;
468457 }