hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/drivers/crypto/cavium/nitrox/nitrox_hal.c
....@@ -4,9 +4,12 @@
44 #include "nitrox_dev.h"
55 #include "nitrox_csr.h"
66
7
+#define PLL_REF_CLK 50
8
+#define MAX_CSR_RETRIES 10
9
+
710 /**
811 * emu_enable_cores - Enable EMU cluster cores.
9
- * @ndev: N5 device
12
+ * @ndev: NITROX device
1013 */
1114 static void emu_enable_cores(struct nitrox_device *ndev)
1215 {
....@@ -31,7 +34,7 @@
3134
3235 /**
3336 * nitrox_config_emu_unit - configure EMU unit.
34
- * @ndev: N5 device
37
+ * @ndev: NITROX device
3538 */
3639 void nitrox_config_emu_unit(struct nitrox_device *ndev)
3740 {
....@@ -61,29 +64,26 @@
6164 static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
6265 {
6366 union nps_pkt_in_instr_ctl pkt_in_ctl;
64
- union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
6567 union nps_pkt_in_done_cnts pkt_in_cnts;
68
+ int max_retries = MAX_CSR_RETRIES;
6669 u64 offset;
6770
71
+ /* step 1: disable the ring, clear enable bit */
6872 offset = NPS_PKT_IN_INSTR_CTLX(ring);
69
- /* disable the ring */
7073 pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
7174 pkt_in_ctl.s.enb = 0;
7275 nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
73
- usleep_range(100, 150);
7476
75
- /* wait to clear [ENB] */
77
+ /* step 2: wait to clear [ENB] */
78
+ usleep_range(100, 150);
7679 do {
7780 pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
78
- } while (pkt_in_ctl.s.enb);
81
+ if (!pkt_in_ctl.s.enb)
82
+ break;
83
+ udelay(50);
84
+ } while (max_retries--);
7985
80
- /* clear off door bell counts */
81
- offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
82
- pkt_in_dbell.value = 0;
83
- pkt_in_dbell.s.dbell = 0xffffffff;
84
- nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
85
-
86
- /* clear done counts */
86
+ /* step 3: clear done counts */
8787 offset = NPS_PKT_IN_DONE_CNTSX(ring);
8888 pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
8989 nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
....@@ -93,6 +93,7 @@
9393 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
9494 {
9595 union nps_pkt_in_instr_ctl pkt_in_ctl;
96
+ int max_retries = MAX_CSR_RETRIES;
9697 u64 offset;
9798
9899 /* 64-byte instruction size */
....@@ -105,25 +106,31 @@
105106 /* wait for set [ENB] */
106107 do {
107108 pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
108
- } while (!pkt_in_ctl.s.enb);
109
+ if (pkt_in_ctl.s.enb)
110
+ break;
111
+ udelay(50);
112
+ } while (max_retries--);
109113 }
110114
111115 /**
112116 * nitrox_config_pkt_input_rings - configure Packet Input Rings
113
- * @ndev: N5 device
117
+ * @ndev: NITROX device
114118 */
115119 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
116120 {
117121 int i;
118122
119123 for (i = 0; i < ndev->nr_queues; i++) {
120
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
124
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
121125 union nps_pkt_in_instr_rsize pkt_in_rsize;
126
+ union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
122127 u64 offset;
123128
124129 reset_pkt_input_ring(ndev, i);
125130
126
- /* configure ring base address 16-byte aligned,
131
+ /**
132
+ * step 4:
133
+ * configure ring base address 16-byte aligned,
127134 * size and interrupt threshold.
128135 */
129136 offset = NPS_PKT_IN_INSTR_BADDRX(i);
....@@ -139,6 +146,13 @@
139146 offset = NPS_PKT_IN_INT_LEVELSX(i);
140147 nitrox_write_csr(ndev, offset, 0xffffffff);
141148
149
+ /* step 5: clear off door bell counts */
150
+ offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
151
+ pkt_in_dbell.value = 0;
152
+ pkt_in_dbell.s.dbell = 0xffffffff;
153
+ nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
154
+
155
+ /* enable the ring */
142156 enable_pkt_input_ring(ndev, i);
143157 }
144158 }
....@@ -147,21 +161,26 @@
147161 {
148162 union nps_pkt_slc_ctl pkt_slc_ctl;
149163 union nps_pkt_slc_cnts pkt_slc_cnts;
164
+ int max_retries = MAX_CSR_RETRIES;
150165 u64 offset;
151166
152
- /* disable slc port */
167
+ /* step 1: disable slc port */
153168 offset = NPS_PKT_SLC_CTLX(port);
154169 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
155170 pkt_slc_ctl.s.enb = 0;
156171 nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
157
- usleep_range(100, 150);
158172
173
+ /* step 2 */
174
+ usleep_range(100, 150);
159175 /* wait to clear [ENB] */
160176 do {
161177 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
162
- } while (pkt_slc_ctl.s.enb);
178
+ if (!pkt_slc_ctl.s.enb)
179
+ break;
180
+ udelay(50);
181
+ } while (max_retries--);
163182
164
- /* clear slc counters */
183
+ /* step 3: clear slc counters */
165184 offset = NPS_PKT_SLC_CNTSX(port);
166185 pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
167186 nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
....@@ -171,12 +190,12 @@
171190 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
172191 {
173192 union nps_pkt_slc_ctl pkt_slc_ctl;
193
+ int max_retries = MAX_CSR_RETRIES;
174194 u64 offset;
175195
176196 offset = NPS_PKT_SLC_CTLX(port);
177197 pkt_slc_ctl.value = 0;
178198 pkt_slc_ctl.s.enb = 1;
179
-
180199 /*
181200 * 8 trailing 0x00 bytes will be added
182201 * to the end of the outgoing packet.
....@@ -189,23 +208,27 @@
189208 /* wait to set [ENB] */
190209 do {
191210 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
192
- } while (!pkt_slc_ctl.s.enb);
211
+ if (pkt_slc_ctl.s.enb)
212
+ break;
213
+ udelay(50);
214
+ } while (max_retries--);
193215 }
194216
195
-static void config_single_pkt_solicit_port(struct nitrox_device *ndev,
196
- int port)
217
+static void config_pkt_solicit_port(struct nitrox_device *ndev, int port)
197218 {
198219 union nps_pkt_slc_int_levels pkt_slc_int;
199220 u64 offset;
200221
201222 reset_pkt_solicit_port(ndev, port);
202223
224
+ /* step 4: configure interrupt levels */
203225 offset = NPS_PKT_SLC_INT_LEVELSX(port);
204226 pkt_slc_int.value = 0;
205227 /* time interrupt threshold */
206228 pkt_slc_int.s.timet = 0x3fffff;
207229 nitrox_write_csr(ndev, offset, pkt_slc_int.value);
208230
231
+ /* enable the solicit port */
209232 enable_pkt_solicit_port(ndev, port);
210233 }
211234
....@@ -214,16 +237,16 @@
214237 int i;
215238
216239 for (i = 0; i < ndev->nr_queues; i++)
217
- config_single_pkt_solicit_port(ndev, i);
240
+ config_pkt_solicit_port(ndev, i);
218241 }
219242
220243 /**
221
- * enable_nps_interrupts - enable NPS interrutps
222
- * @ndev: N5 device.
244
+ * enable_nps_core_interrupts - enable NPS core interrutps
245
+ * @ndev: NITROX device.
223246 *
224
- * This includes NPS core, packet in and slc interrupts.
247
+ * This includes NPS core interrupts.
225248 */
226
-static void enable_nps_interrupts(struct nitrox_device *ndev)
249
+static void enable_nps_core_interrupts(struct nitrox_device *ndev)
227250 {
228251 union nps_core_int_ena_w1s core_int;
229252
....@@ -235,7 +258,33 @@
235258 core_int.s.npco_dma_malform = 1;
236259 core_int.s.host_nps_wr_err = 1;
237260 nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
261
+}
238262
263
+void nitrox_config_nps_core_unit(struct nitrox_device *ndev)
264
+{
265
+ union nps_core_gbl_vfcfg core_gbl_vfcfg;
266
+
267
+ /* endian control information */
268
+ nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
269
+
270
+ /* disable ILK interface */
271
+ core_gbl_vfcfg.value = 0;
272
+ core_gbl_vfcfg.s.ilk_disable = 1;
273
+ core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
274
+ nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
275
+
276
+ /* enable nps core interrupts */
277
+ enable_nps_core_interrupts(ndev);
278
+}
279
+
280
+/**
281
+ * enable_nps_pkt_interrupts - enable NPS packet interrutps
282
+ * @ndev: NITROX device.
283
+ *
284
+ * This includes NPS packet in and slc interrupts.
285
+ */
286
+static void enable_nps_pkt_interrupts(struct nitrox_device *ndev)
287
+{
239288 /* NPS packet in ring interrupts */
240289 nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
241290 nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
....@@ -246,24 +295,126 @@
246295 nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
247296 }
248297
249
-void nitrox_config_nps_unit(struct nitrox_device *ndev)
298
+void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev)
250299 {
251
- union nps_core_gbl_vfcfg core_gbl_vfcfg;
252
-
253
- /* endian control information */
254
- nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
255
-
256
- /* disable ILK interface */
257
- core_gbl_vfcfg.value = 0;
258
- core_gbl_vfcfg.s.ilk_disable = 1;
259
- core_gbl_vfcfg.s.cfg = PF_MODE;
260
- nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
261300 /* config input and solicit ports */
262301 nitrox_config_pkt_input_rings(ndev);
263302 nitrox_config_pkt_solicit_ports(ndev);
264303
265
- /* enable interrupts */
266
- enable_nps_interrupts(ndev);
304
+ /* enable nps packet interrupts */
305
+ enable_nps_pkt_interrupts(ndev);
306
+}
307
+
308
+static void reset_aqm_ring(struct nitrox_device *ndev, int ring)
309
+{
310
+ union aqmq_en aqmq_en_reg;
311
+ union aqmq_activity_stat activity_stat;
312
+ union aqmq_cmp_cnt cmp_cnt;
313
+ int max_retries = MAX_CSR_RETRIES;
314
+ u64 offset;
315
+
316
+ /* step 1: disable the queue */
317
+ offset = AQMQ_ENX(ring);
318
+ aqmq_en_reg.value = 0;
319
+ aqmq_en_reg.queue_enable = 0;
320
+ nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
321
+
322
+ /* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */
323
+ usleep_range(100, 150);
324
+ offset = AQMQ_ACTIVITY_STATX(ring);
325
+ do {
326
+ activity_stat.value = nitrox_read_csr(ndev, offset);
327
+ if (!activity_stat.queue_active)
328
+ break;
329
+ udelay(50);
330
+ } while (max_retries--);
331
+
332
+ /* step 3: clear commands completed count */
333
+ offset = AQMQ_CMP_CNTX(ring);
334
+ cmp_cnt.value = nitrox_read_csr(ndev, offset);
335
+ nitrox_write_csr(ndev, offset, cmp_cnt.value);
336
+ usleep_range(50, 100);
337
+}
338
+
339
+void enable_aqm_ring(struct nitrox_device *ndev, int ring)
340
+{
341
+ union aqmq_en aqmq_en_reg;
342
+ u64 offset;
343
+
344
+ offset = AQMQ_ENX(ring);
345
+ aqmq_en_reg.value = 0;
346
+ aqmq_en_reg.queue_enable = 1;
347
+ nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
348
+ usleep_range(50, 100);
349
+}
350
+
351
+void nitrox_config_aqm_rings(struct nitrox_device *ndev)
352
+{
353
+ int ring;
354
+
355
+ for (ring = 0; ring < ndev->nr_queues; ring++) {
356
+ struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
357
+ union aqmq_drbl drbl;
358
+ union aqmq_qsz qsize;
359
+ union aqmq_cmp_thr cmp_thr;
360
+ u64 offset;
361
+
362
+ /* steps 1 - 3 */
363
+ reset_aqm_ring(ndev, ring);
364
+
365
+ /* step 4: clear doorbell count of ring */
366
+ offset = AQMQ_DRBLX(ring);
367
+ drbl.value = 0;
368
+ drbl.dbell_count = 0xFFFFFFFF;
369
+ nitrox_write_csr(ndev, offset, drbl.value);
370
+
371
+ /* step 5: configure host ring details */
372
+
373
+ /* set host address for next command of ring */
374
+ offset = AQMQ_NXT_CMDX(ring);
375
+ nitrox_write_csr(ndev, offset, 0ULL);
376
+
377
+ /* set host address of ring base */
378
+ offset = AQMQ_BADRX(ring);
379
+ nitrox_write_csr(ndev, offset, cmdq->dma);
380
+
381
+ /* set ring size */
382
+ offset = AQMQ_QSZX(ring);
383
+ qsize.value = 0;
384
+ qsize.host_queue_size = ndev->qlen;
385
+ nitrox_write_csr(ndev, offset, qsize.value);
386
+
387
+ /* set command completion threshold */
388
+ offset = AQMQ_CMP_THRX(ring);
389
+ cmp_thr.value = 0;
390
+ cmp_thr.commands_completed_threshold = 1;
391
+ nitrox_write_csr(ndev, offset, cmp_thr.value);
392
+
393
+ /* step 6: enable the queue */
394
+ enable_aqm_ring(ndev, ring);
395
+ }
396
+}
397
+
398
+static void enable_aqm_interrupts(struct nitrox_device *ndev)
399
+{
400
+ /* clear interrupt enable bits */
401
+ nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL));
402
+ nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL));
403
+ nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL));
404
+ nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL));
405
+ nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL));
406
+ nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL));
407
+ nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL));
408
+ nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL));
409
+}
410
+
411
+void nitrox_config_aqm_unit(struct nitrox_device *ndev)
412
+{
413
+ /* config aqm command queues */
414
+ nitrox_config_aqm_rings(ndev);
415
+
416
+ /* enable aqm interrupts */
417
+ enable_aqm_interrupts(ndev);
267418 }
268419
269420 void nitrox_config_pom_unit(struct nitrox_device *ndev)
....@@ -282,8 +433,8 @@
282433 }
283434
284435 /**
285
- * nitrox_config_rand_unit - enable N5 random number unit
286
- * @ndev: N5 device
436
+ * nitrox_config_rand_unit - enable NITROX random number unit
437
+ * @ndev: NITROX device
287438 */
288439 void nitrox_config_rand_unit(struct nitrox_device *ndev)
289440 {
....@@ -359,6 +510,7 @@
359510 {
360511 union lbc_inval_ctl lbc_ctl;
361512 union lbc_inval_status lbc_stat;
513
+ int max_retries = MAX_CSR_RETRIES;
362514 u64 offset;
363515
364516 /* invalidate LBC */
....@@ -368,10 +520,12 @@
368520 nitrox_write_csr(ndev, offset, lbc_ctl.value);
369521
370522 offset = LBC_INVAL_STATUS;
371
-
372523 do {
373524 lbc_stat.value = nitrox_read_csr(ndev, offset);
374
- } while (!lbc_stat.s.done);
525
+ if (lbc_stat.s.done)
526
+ break;
527
+ udelay(50);
528
+ } while (max_retries--);
375529 }
376530
377531 void nitrox_config_lbc_unit(struct nitrox_device *ndev)
....@@ -400,3 +554,125 @@
400554 offset = LBC_ELM_VF65_128_INT_ENA_W1S;
401555 nitrox_write_csr(ndev, offset, (~0ULL));
402556 }
557
+
558
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
559
+{
560
+ union nps_core_gbl_vfcfg vfcfg;
561
+
562
+ vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
563
+ vfcfg.s.cfg = mode & 0x7;
564
+
565
+ nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
566
+}
567
+
568
+static const char *get_core_option(u8 se_cores, u8 ae_cores)
569
+{
570
+ const char *option = "";
571
+
572
+ if (ae_cores == AE_MAX_CORES) {
573
+ switch (se_cores) {
574
+ case SE_MAX_CORES:
575
+ option = "60";
576
+ break;
577
+ case 40:
578
+ option = "60s";
579
+ break;
580
+ }
581
+ } else if (ae_cores == (AE_MAX_CORES / 2)) {
582
+ option = "30";
583
+ } else {
584
+ option = "60i";
585
+ }
586
+
587
+ return option;
588
+}
589
+
590
+static const char *get_feature_option(u8 zip_cores, int core_freq)
591
+{
592
+ if (zip_cores == 0)
593
+ return "";
594
+ else if (zip_cores < ZIP_MAX_CORES)
595
+ return "-C15";
596
+
597
+ if (core_freq >= 850)
598
+ return "-C45";
599
+ else if (core_freq >= 750)
600
+ return "-C35";
601
+ else if (core_freq >= 550)
602
+ return "-C25";
603
+
604
+ return "";
605
+}
606
+
607
+void nitrox_get_hwinfo(struct nitrox_device *ndev)
608
+{
609
+ union emu_fuse_map emu_fuse;
610
+ union rst_boot rst_boot;
611
+ union fus_dat1 fus_dat1;
612
+ unsigned char name[IFNAMSIZ * 2] = {};
613
+ int i, dead_cores;
614
+ u64 offset;
615
+
616
+ /* get core frequency */
617
+ offset = RST_BOOT;
618
+ rst_boot.value = nitrox_read_csr(ndev, offset);
619
+ ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
620
+
621
+ for (i = 0; i < NR_CLUSTERS; i++) {
622
+ offset = EMU_FUSE_MAPX(i);
623
+ emu_fuse.value = nitrox_read_csr(ndev, offset);
624
+ if (emu_fuse.s.valid) {
625
+ dead_cores = hweight32(emu_fuse.s.ae_fuse);
626
+ ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
627
+ dead_cores = hweight16(emu_fuse.s.se_fuse);
628
+ ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
629
+ }
630
+ }
631
+ /* find zip hardware availability */
632
+ offset = FUS_DAT1;
633
+ fus_dat1.value = nitrox_read_csr(ndev, offset);
634
+ if (!fus_dat1.nozip) {
635
+ dead_cores = hweight8(fus_dat1.zip_info);
636
+ ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
637
+ }
638
+
639
+ /* determine the partname
640
+ * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
641
+ */
642
+ snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
643
+ get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
644
+ ndev->hw.freq,
645
+ get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
646
+ ndev->hw.revision_id);
647
+
648
+ /* copy partname */
649
+ strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
650
+}
651
+
652
+void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
653
+{
654
+ u64 value = ~0ULL;
655
+ u64 reg_addr;
656
+
657
+ /* Mailbox interrupt low enable set register */
658
+ reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S;
659
+ nitrox_write_csr(ndev, reg_addr, value);
660
+
661
+ /* Mailbox interrupt high enable set register */
662
+ reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S;
663
+ nitrox_write_csr(ndev, reg_addr, value);
664
+}
665
+
666
+void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
667
+{
668
+ u64 value = ~0ULL;
669
+ u64 reg_addr;
670
+
671
+ /* Mailbox interrupt low enable clear register */
672
+ reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C;
673
+ nitrox_write_csr(ndev, reg_addr, value);
674
+
675
+ /* Mailbox interrupt high enable clear register */
676
+ reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C;
677
+ nitrox_write_csr(ndev, reg_addr, value);
678
+}