forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/cavium/liquidio/lio_core.c
....@@ -32,40 +32,8 @@
3232 #define OCTNIC_MAX_SG MAX_SKB_FRAGS
3333
3434 /**
35
- * \brief Callback for getting interface configuration
36
- * @param status status of request
37
- * @param buf pointer to resp structure
38
- */
39
-void lio_if_cfg_callback(struct octeon_device *oct,
40
- u32 status __attribute__((unused)), void *buf)
41
-{
42
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
43
- struct liquidio_if_cfg_context *ctx;
44
- struct liquidio_if_cfg_resp *resp;
45
-
46
- resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
47
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
48
-
49
- oct = lio_get_device(ctx->octeon_id);
50
- if (resp->status)
51
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
52
- CVM_CAST64(resp->status));
53
- WRITE_ONCE(ctx->cond, 1);
54
-
55
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
56
- resp->cfg_info.liquidio_firmware_version);
57
-
58
- /* This barrier is required to be sure that the response has been
59
- * written fully before waking up the handler
60
- */
61
- wmb();
62
-
63
- wake_up_interruptible(&ctx->wc);
64
-}
65
-
66
-/**
67
- * \brief Delete gather lists
68
- * @param lio per-network private data
35
+ * lio_delete_glists - Delete gather lists
36
+ * @lio: per-network private data
6937 */
7038 void lio_delete_glists(struct lio *lio)
7139 {
....@@ -105,8 +73,10 @@
10573 }
10674
10775 /**
108
- * \brief Setup gather lists
109
- * @param lio per-network private data
76
+ * lio_setup_glists - Setup gather lists
77
+ * @oct: octeon_device
78
+ * @lio: per-network private data
79
+ * @num_iqs: count of iqs to allocate
11080 */
11181 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
11282 {
....@@ -198,14 +168,15 @@
198168 nctrl.ncmd.s.cmd = cmd;
199169 nctrl.ncmd.s.param1 = param1;
200170 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
201
- nctrl.wait_time = 100;
202171 nctrl.netpndev = (u64)netdev;
203172 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
204173
205174 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
206
- if (ret < 0) {
175
+ if (ret) {
207176 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
208177 ret);
178
+ if (ret > 0)
179
+ ret = -EIO;
209180 }
210181 return ret;
211182 }
....@@ -285,15 +256,7 @@
285256 struct octeon_device *oct = lio->oct_dev;
286257 u8 *mac;
287258
288
- if (nctrl->completion && nctrl->response_code) {
289
- /* Signal whoever is interested that the response code from the
290
- * firmware has arrived.
291
- */
292
- WRITE_ONCE(*nctrl->response_code, nctrl->status);
293
- complete(nctrl->completion);
294
- }
295
-
296
- if (nctrl->status)
259
+ if (nctrl->sc_status)
297260 return;
298261
299262 switch (nctrl->ncmd.s.cmd) {
....@@ -464,56 +427,73 @@
464427 */
465428 }
466429
430
+void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
431
+ struct octeon_droq *droq)
432
+{
433
+ struct net_device *netdev = oct->props[0].netdev;
434
+ struct lio *lio = GET_LIO(netdev);
435
+ struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
436
+
437
+ queue_delayed_work(wq->wq, &wq->wk.work,
438
+ msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
439
+}
440
+
467441 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
468442 {
469443 struct cavium_wk *wk = (struct cavium_wk *)work;
470444 struct lio *lio = (struct lio *)wk->ctxptr;
471445 struct octeon_device *oct = lio->oct_dev;
472
- struct octeon_droq *droq;
473
- int q, q_no = 0;
446
+ int q_no = wk->ctxul;
447
+ struct octeon_droq *droq = oct->droq[q_no];
474448
475
- if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
476
- for (q = 0; q < lio->linfo.num_rxpciq; q++) {
477
- q_no = lio->linfo.rxpciq[q].s.q_no;
478
- droq = oct->droq[q_no];
479
- if (!droq)
480
- continue;
481
- octeon_droq_check_oom(droq);
482
- }
483
- }
484
- queue_delayed_work(lio->rxq_status_wq.wq,
485
- &lio->rxq_status_wq.wk.work,
486
- msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
449
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
450
+ return;
451
+
452
+ if (octeon_retry_droq_refill(droq))
453
+ octeon_schedule_rxq_oom_work(oct, droq);
487454 }
488455
489456 int setup_rx_oom_poll_fn(struct net_device *netdev)
490457 {
491458 struct lio *lio = GET_LIO(netdev);
492459 struct octeon_device *oct = lio->oct_dev;
460
+ struct cavium_wq *wq;
461
+ int q, q_no;
493462
494
- lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
495
- WQ_MEM_RECLAIM, 0);
496
- if (!lio->rxq_status_wq.wq) {
497
- dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
498
- return -ENOMEM;
463
+ for (q = 0; q < oct->num_oqs; q++) {
464
+ q_no = lio->linfo.rxpciq[q].s.q_no;
465
+ wq = &lio->rxq_status_wq[q_no];
466
+ wq->wq = alloc_workqueue("rxq-oom-status",
467
+ WQ_MEM_RECLAIM, 0);
468
+ if (!wq->wq) {
469
+ dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
470
+ return -ENOMEM;
471
+ }
472
+
473
+ INIT_DELAYED_WORK(&wq->wk.work,
474
+ octnet_poll_check_rxq_oom_status);
475
+ wq->wk.ctxptr = lio;
476
+ wq->wk.ctxul = q_no;
499477 }
500
- INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
501
- octnet_poll_check_rxq_oom_status);
502
- lio->rxq_status_wq.wk.ctxptr = lio;
503
- queue_delayed_work(lio->rxq_status_wq.wq,
504
- &lio->rxq_status_wq.wk.work,
505
- msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
478
+
506479 return 0;
507480 }
508481
509482 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
510483 {
511484 struct lio *lio = GET_LIO(netdev);
485
+ struct octeon_device *oct = lio->oct_dev;
486
+ struct cavium_wq *wq;
487
+ int q_no;
512488
513
- if (lio->rxq_status_wq.wq) {
514
- cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
515
- flush_workqueue(lio->rxq_status_wq.wq);
516
- destroy_workqueue(lio->rxq_status_wq.wq);
489
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
490
+ wq = &lio->rxq_status_wq[q_no];
491
+ if (wq->wq) {
492
+ cancel_delayed_work_sync(&wq->wk.work);
493
+ flush_workqueue(wq->wq);
494
+ destroy_workqueue(wq->wq);
495
+ wq->wq = NULL;
496
+ }
517497 }
518498 }
519499
....@@ -543,12 +523,12 @@
543523 }
544524
545525 /**
546
- * \brief Setup output queue
547
- * @param oct octeon device
548
- * @param q_no which queue
549
- * @param num_descs how many descriptors
550
- * @param desc_size size of each descriptor
551
- * @param app_ctx application context
526
+ * octeon_setup_droq - Setup output queue
527
+ * @oct: octeon device
528
+ * @q_no: which queue
529
+ * @num_descs: how many descriptors
530
+ * @desc_size: size of each descriptor
531
+ * @app_ctx: application context
552532 */
553533 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
554534 int desc_size, void *app_ctx)
....@@ -577,16 +557,17 @@
577557 return ret_val;
578558 }
579559
580
-/** Routine to push packets arriving on Octeon interface upto network layer.
581
- * @param oct_id - octeon device id.
582
- * @param skbuff - skbuff struct to be passed to network layer.
583
- * @param len - size of total data received.
584
- * @param rh - Control header associated with the packet
585
- * @param param - additional control data with the packet
586
- * @param arg - farg registered in droq_ops
560
+/**
561
+ * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
562
+ * @octeon_id:octeon device id.
563
+ * @skbuff: skbuff struct to be passed to network layer.
564
+ * @len: size of total data received.
565
+ * @rh: Control header associated with the packet
566
+ * @param: additional control data with the packet
567
+ * @arg: farg registered in droq_ops
587568 */
588569 static void
589
-liquidio_push_packet(u32 octeon_id __attribute__((unused)),
570
+liquidio_push_packet(u32 __maybe_unused octeon_id,
590571 void *skbuff,
591572 u32 len,
592573 union octeon_rh *rh,
....@@ -683,7 +664,8 @@
683664 (((rh->r_dh.encap_on) &&
684665 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
685666 (!(rh->r_dh.encap_on) &&
686
- (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
667
+ ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
668
+ CNNIC_CSUM_VERIFIED))))
687669 /* checksum has already been verified */
688670 skb->ip_summed = CHECKSUM_UNNECESSARY;
689671 else
....@@ -719,8 +701,8 @@
719701 }
720702
721703 /**
722
- * \brief wrapper for calling napi_schedule
723
- * @param param parameters to pass to napi_schedule
704
+ * napi_schedule_wrapper - wrapper for calling napi_schedule
705
+ * @param: parameters to pass to napi_schedule
724706 *
725707 * Used when scheduling on different CPUs
726708 */
....@@ -732,8 +714,8 @@
732714 }
733715
734716 /**
735
- * \brief callback when receive interrupt occurs and we are in NAPI mode
736
- * @param arg pointer to octeon output queue
717
+ * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
718
+ * @arg: pointer to octeon output queue
737719 */
738720 static void liquidio_napi_drv_callback(void *arg)
739721 {
....@@ -758,9 +740,9 @@
758740 }
759741
760742 /**
761
- * \brief Entry point for NAPI polling
762
- * @param napi NAPI structure
763
- * @param budget maximum number of items to process
743
+ * liquidio_napi_poll - Entry point for NAPI polling
744
+ * @napi: NAPI structure
745
+ * @budget: maximum number of items to process
764746 */
765747 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
766748 {
....@@ -803,7 +785,6 @@
803785 if ((work_done < budget && tx_done) ||
804786 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
805787 (droq->pkt_count >= MAX_REG_CNT)) {
806
- tx_done = 1;
807788 napi_complete_done(napi, work_done);
808789
809790 octeon_enable_irq(droq->oct_dev, droq->q_no);
....@@ -814,9 +795,11 @@
814795 }
815796
816797 /**
817
- * \brief Setup input and output queues
818
- * @param octeon_dev octeon device
819
- * @param ifidx Interface index
798
+ * liquidio_setup_io_queues - Setup input and output queues
799
+ * @octeon_dev: octeon device
800
+ * @ifidx: Interface index
801
+ * @num_iqs: input io queue count
802
+ * @num_oqs: output io queue count
820803 *
821804 * Note: Queues are with respect to the octeon device. Thus
822805 * an input queue is for egress packets, and output queues
....@@ -949,7 +932,7 @@
949932 }
950933
951934 irqreturn_t
952
-liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
935
+liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
953936 {
954937 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
955938 struct octeon_device *oct = ioq_vector->oct_dev;
....@@ -965,8 +948,8 @@
965948 }
966949
967950 /**
968
- * \brief Droq packet processor sceduler
969
- * @param oct octeon device
951
+ * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
952
+ * @oct: octeon device
970953 */
971954 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
972955 {
....@@ -994,13 +977,12 @@
994977 }
995978
996979 /**
997
- * \brief Interrupt handler for octeon
998
- * @param irq unused
999
- * @param dev octeon device
980
+ * liquidio_legacy_intr_handler - Interrupt handler for octeon
981
+ * @irq: unused
982
+ * @dev: octeon device
1000983 */
1001984 static
1002
-irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
1003
- void *dev)
985
+irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
1004986 {
1005987 struct octeon_device *oct = (struct octeon_device *)dev;
1006988 irqreturn_t ret;
....@@ -1021,8 +1003,9 @@
10211003 }
10221004
10231005 /**
1024
- * \brief Setup interrupt for octeon device
1025
- * @param oct octeon device
1006
+ * octeon_setup_interrupt - Setup interrupt for octeon device
1007
+ * @oct: octeon device
1008
+ * @num_ioqs: number of queues
10261009 *
10271010 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
10281011 */
....@@ -1105,7 +1088,7 @@
11051088 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
11061089
11071090 num_ioq_vectors = oct->num_msix_irqs;
1108
- /** For PF, there is one non-ioq interrupt handler */
1091
+ /* For PF, there is one non-ioq interrupt handler */
11091092 if (OCTEON_CN23XX_PF(oct)) {
11101093 num_ioq_vectors -= 1;
11111094
....@@ -1148,13 +1131,13 @@
11481131 dev_err(&oct->pci_dev->dev,
11491132 "Request_irq failed for MSIX interrupt Error: %d\n",
11501133 irqret);
1151
- /** Freeing the non-ioq irq vector here . */
1134
+ /* Freeing the non-ioq irq vector here . */
11521135 free_irq(msix_entries[num_ioq_vectors].vector,
11531136 oct);
11541137
11551138 while (i) {
11561139 i--;
1157
- /** clearing affinity mask. */
1140
+ /* clearing affinity mask. */
11581141 irq_set_affinity_hint(
11591142 msix_entries[i].vector,
11601143 NULL);
....@@ -1218,54 +1201,31 @@
12181201 return 0;
12191202 }
12201203
1221
-static void liquidio_change_mtu_completion(struct octeon_device *oct,
1222
- u32 status, void *buf)
1223
-{
1224
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1225
- struct liquidio_if_cfg_context *ctx;
1226
-
1227
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1228
-
1229
- if (status) {
1230
- dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n",
1231
- CVM_CAST64(status));
1232
- WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL);
1233
- } else {
1234
- WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS);
1235
- }
1236
-
1237
- /* This barrier is required to be sure that the response has been
1238
- * written fully before waking up the handler
1239
- */
1240
- wmb();
1241
-
1242
- wake_up_interruptible(&ctx->wc);
1243
-}
1244
-
12451204 /**
1246
- * \brief Net device change_mtu
1247
- * @param netdev network device
1205
+ * liquidio_change_mtu - Net device change_mtu
1206
+ * @netdev: network device
1207
+ * @new_mtu: the new max transmit unit size
12481208 */
12491209 int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
12501210 {
12511211 struct lio *lio = GET_LIO(netdev);
12521212 struct octeon_device *oct = lio->oct_dev;
1253
- struct liquidio_if_cfg_context *ctx;
12541213 struct octeon_soft_command *sc;
12551214 union octnet_cmd *ncmd;
1256
- int ctx_size;
12571215 int ret = 0;
12581216
1259
- ctx_size = sizeof(struct liquidio_if_cfg_context);
12601217 sc = (struct octeon_soft_command *)
1261
- octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size);
1218
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
1219
+ if (!sc) {
1220
+ netif_info(lio, rx_err, lio->netdev,
1221
+ "Failed to allocate soft command\n");
1222
+ return -ENOMEM;
1223
+ }
12621224
12631225 ncmd = (union octnet_cmd *)sc->virtdptr;
1264
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
12651226
1266
- WRITE_ONCE(ctx->cond, 0);
1267
- ctx->octeon_id = lio_get_device_id(oct);
1268
- init_waitqueue_head(&ctx->wc);
1227
+ init_completion(&sc->complete);
1228
+ sc->sc_status = OCTEON_REQUEST_PENDING;
12691229
12701230 ncmd->u64 = 0;
12711231 ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
....@@ -1278,28 +1238,28 @@
12781238 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
12791239 OPCODE_NIC_CMD, 0, 0, 0);
12801240
1281
- sc->callback = liquidio_change_mtu_completion;
1282
- sc->callback_arg = sc;
1283
- sc->wait_time = 100;
1284
-
12851241 ret = octeon_send_soft_command(oct, sc);
12861242 if (ret == IQ_SEND_FAILED) {
12871243 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1244
+ octeon_free_soft_command(oct, sc);
12881245 return -EINVAL;
12891246 }
12901247 /* Sleep on a wait queue till the cond flag indicates that the
12911248 * response arrived or timed-out.
12921249 */
1293
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR ||
1294
- ctx->cond == LIO_CHANGE_MTU_FAIL) {
1295
- octeon_free_soft_command(oct, sc);
1250
+ ret = wait_for_sc_completion_timeout(oct, sc, 0);
1251
+ if (ret)
1252
+ return ret;
1253
+
1254
+ if (sc->sc_status) {
1255
+ WRITE_ONCE(sc->caller_is_done, true);
12961256 return -EINVAL;
12971257 }
12981258
12991259 netdev->mtu = new_mtu;
13001260 lio->mtu = new_mtu;
13011261
1302
- octeon_free_soft_command(oct, sc);
1262
+ WRITE_ONCE(sc->caller_is_done, true);
13031263 return 0;
13041264 }
13051265
....@@ -1333,8 +1293,6 @@
13331293 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
13341294 struct oct_nic_stats_resp *resp =
13351295 (struct oct_nic_stats_resp *)sc->virtrptr;
1336
- struct oct_nic_stats_ctrl *ctrl =
1337
- (struct oct_nic_stats_ctrl *)sc->ctxptr;
13381296 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
13391297 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
13401298 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
....@@ -1422,93 +1380,148 @@
14221380
14231381 resp->status = 1;
14241382 } else {
1383
+ dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
14251384 resp->status = -1;
14261385 }
1427
- complete(&ctrl->complete);
14281386 }
14291387
1430
-int octnet_get_link_stats(struct net_device *netdev)
1388
+static int lio_fetch_vf_stats(struct lio *lio)
14311389 {
1432
- struct lio *lio = GET_LIO(netdev);
14331390 struct octeon_device *oct_dev = lio->oct_dev;
14341391 struct octeon_soft_command *sc;
1435
- struct oct_nic_stats_ctrl *ctrl;
1436
- struct oct_nic_stats_resp *resp;
1392
+ struct oct_nic_vf_stats_resp *resp;
1393
+
14371394 int retval;
14381395
14391396 /* Alloc soft command */
14401397 sc = (struct octeon_soft_command *)
14411398 octeon_alloc_soft_command(oct_dev,
14421399 0,
1443
- sizeof(struct oct_nic_stats_resp),
1444
- sizeof(struct octnic_ctrl_pkt));
1400
+ sizeof(struct oct_nic_vf_stats_resp),
1401
+ 0);
14451402
1446
- if (!sc)
1447
- return -ENOMEM;
1403
+ if (!sc) {
1404
+ dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1405
+ retval = -ENOMEM;
1406
+ goto lio_fetch_vf_stats_exit;
1407
+ }
1408
+
1409
+ resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1410
+ memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1411
+
1412
+ init_completion(&sc->complete);
1413
+ sc->sc_status = OCTEON_REQUEST_PENDING;
1414
+
1415
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1416
+
1417
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1418
+ OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
1419
+
1420
+ retval = octeon_send_soft_command(oct_dev, sc);
1421
+ if (retval == IQ_SEND_FAILED) {
1422
+ octeon_free_soft_command(oct_dev, sc);
1423
+ goto lio_fetch_vf_stats_exit;
1424
+ }
1425
+
1426
+ retval =
1427
+ wait_for_sc_completion_timeout(oct_dev, sc,
1428
+ (2 * LIO_SC_MAX_TMO_MS));
1429
+ if (retval) {
1430
+ dev_err(&oct_dev->pci_dev->dev,
1431
+ "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1432
+ goto lio_fetch_vf_stats_exit;
1433
+ }
1434
+
1435
+ if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1436
+ octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
1437
+ (sizeof(u64)) >> 3);
1438
+
1439
+ if (resp->spoofmac_cnt != 0) {
1440
+ dev_warn(&oct_dev->pci_dev->dev,
1441
+ "%llu Spoofed packets detected\n",
1442
+ resp->spoofmac_cnt);
1443
+ }
1444
+ }
1445
+ WRITE_ONCE(sc->caller_is_done, 1);
1446
+
1447
+lio_fetch_vf_stats_exit:
1448
+ return retval;
1449
+}
1450
+
1451
+void lio_fetch_stats(struct work_struct *work)
1452
+{
1453
+ struct cavium_wk *wk = (struct cavium_wk *)work;
1454
+ struct lio *lio = wk->ctxptr;
1455
+ struct octeon_device *oct_dev = lio->oct_dev;
1456
+ struct octeon_soft_command *sc;
1457
+ struct oct_nic_stats_resp *resp;
1458
+ unsigned long time_in_jiffies;
1459
+ int retval;
1460
+
1461
+ if (OCTEON_CN23XX_PF(oct_dev)) {
1462
+ /* report spoofchk every 2 seconds */
1463
+ if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1464
+ (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1465
+ oct_dev->sriov_info.num_vfs_alloced) {
1466
+ lio_fetch_vf_stats(lio);
1467
+ }
1468
+
1469
+ oct_dev->vfstats_poll++;
1470
+ }
1471
+
1472
+ /* Alloc soft command */
1473
+ sc = (struct octeon_soft_command *)
1474
+ octeon_alloc_soft_command(oct_dev,
1475
+ 0,
1476
+ sizeof(struct oct_nic_stats_resp),
1477
+ 0);
1478
+
1479
+ if (!sc) {
1480
+ dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1481
+ goto lio_fetch_stats_exit;
1482
+ }
14481483
14491484 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
14501485 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
14511486
1452
- ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1453
- memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1454
- ctrl->netdev = netdev;
1455
- init_completion(&ctrl->complete);
1487
+ init_completion(&sc->complete);
1488
+ sc->sc_status = OCTEON_REQUEST_PENDING;
14561489
14571490 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
14581491
14591492 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
14601493 OPCODE_NIC_PORT_STATS, 0, 0, 0);
14611494
1462
- sc->callback = octnet_nic_stats_callback;
1463
- sc->callback_arg = sc;
1464
- sc->wait_time = 500; /*in milli seconds*/
1465
-
14661495 retval = octeon_send_soft_command(oct_dev, sc);
14671496 if (retval == IQ_SEND_FAILED) {
14681497 octeon_free_soft_command(oct_dev, sc);
1469
- return -EINVAL;
1498
+ goto lio_fetch_stats_exit;
14701499 }
14711500
1472
- wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1473
-
1474
- if (resp->status != 1) {
1475
- octeon_free_soft_command(oct_dev, sc);
1476
-
1477
- return -EINVAL;
1501
+ retval = wait_for_sc_completion_timeout(oct_dev, sc,
1502
+ (2 * LIO_SC_MAX_TMO_MS));
1503
+ if (retval) {
1504
+ dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1505
+ goto lio_fetch_stats_exit;
14781506 }
14791507
1480
- octeon_free_soft_command(oct_dev, sc);
1508
+ octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
1509
+ WRITE_ONCE(sc->caller_is_done, true);
14811510
1482
- return 0;
1483
-}
1511
+lio_fetch_stats_exit:
1512
+ time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1513
+ if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1514
+ schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
14841515
1485
-static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
1486
- u32 status,
1487
- void *buf)
1488
-{
1489
- struct liquidio_nic_seapi_ctl_context *ctx;
1490
- struct octeon_soft_command *sc = buf;
1491
-
1492
- ctx = sc->ctxptr;
1493
-
1494
- oct = lio_get_device(ctx->octeon_id);
1495
- if (status) {
1496
- dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
1497
- __func__,
1498
- CVM_CAST64(status));
1499
- }
1500
- ctx->status = status;
1501
- complete(&ctx->complete);
1516
+ return;
15021517 }
15031518
15041519 int liquidio_set_speed(struct lio *lio, int speed)
15051520 {
1506
- struct liquidio_nic_seapi_ctl_context *ctx;
15071521 struct octeon_device *oct = lio->oct_dev;
15081522 struct oct_nic_seapi_resp *resp;
15091523 struct octeon_soft_command *sc;
15101524 union octnet_cmd *ncmd;
1511
- u32 ctx_size;
15121525 int retval;
15131526 u32 var;
15141527
....@@ -1521,21 +1534,18 @@
15211534 return -EOPNOTSUPP;
15221535 }
15231536
1524
- ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
15251537 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
15261538 sizeof(struct oct_nic_seapi_resp),
1527
- ctx_size);
1539
+ 0);
15281540 if (!sc)
15291541 return -ENOMEM;
15301542
15311543 ncmd = sc->virtdptr;
1532
- ctx = sc->ctxptr;
15331544 resp = sc->virtrptr;
15341545 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
15351546
1536
- ctx->octeon_id = lio_get_device_id(oct);
1537
- ctx->status = 0;
1538
- init_completion(&ctx->complete);
1547
+ init_completion(&sc->complete);
1548
+ sc->sc_status = OCTEON_REQUEST_PENDING;
15391549
15401550 ncmd->u64 = 0;
15411551 ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
....@@ -1548,30 +1558,24 @@
15481558 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
15491559 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
15501560
1551
- sc->callback = liquidio_nic_seapi_ctl_callback;
1552
- sc->callback_arg = sc;
1553
- sc->wait_time = 5000;
1554
-
15551561 retval = octeon_send_soft_command(oct, sc);
15561562 if (retval == IQ_SEND_FAILED) {
15571563 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1564
+ octeon_free_soft_command(oct, sc);
15581565 retval = -EBUSY;
15591566 } else {
15601567 /* Wait for response or timeout */
1561
- if (wait_for_completion_timeout(&ctx->complete,
1562
- msecs_to_jiffies(10000)) == 0) {
1563
- dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
1564
- __func__);
1565
- octeon_free_soft_command(oct, sc);
1566
- return -EINTR;
1567
- }
1568
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
1569
+ if (retval)
1570
+ return retval;
15681571
15691572 retval = resp->status;
15701573
15711574 if (retval) {
15721575 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
15731576 __func__, retval);
1574
- octeon_free_soft_command(oct, sc);
1577
+ WRITE_ONCE(sc->caller_is_done, true);
1578
+
15751579 return -EIO;
15761580 }
15771581
....@@ -1583,38 +1587,32 @@
15831587 }
15841588
15851589 oct->speed_setting = var;
1590
+ WRITE_ONCE(sc->caller_is_done, true);
15861591 }
1587
-
1588
- octeon_free_soft_command(oct, sc);
15891592
15901593 return retval;
15911594 }
15921595
15931596 int liquidio_get_speed(struct lio *lio)
15941597 {
1595
- struct liquidio_nic_seapi_ctl_context *ctx;
15961598 struct octeon_device *oct = lio->oct_dev;
15971599 struct oct_nic_seapi_resp *resp;
15981600 struct octeon_soft_command *sc;
15991601 union octnet_cmd *ncmd;
1600
- u32 ctx_size;
16011602 int retval;
16021603
1603
- ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
16041604 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16051605 sizeof(struct oct_nic_seapi_resp),
1606
- ctx_size);
1606
+ 0);
16071607 if (!sc)
16081608 return -ENOMEM;
16091609
16101610 ncmd = sc->virtdptr;
1611
- ctx = sc->ctxptr;
16121611 resp = sc->virtrptr;
16131612 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
16141613
1615
- ctx->octeon_id = lio_get_device_id(oct);
1616
- ctx->status = 0;
1617
- init_completion(&ctx->complete);
1614
+ init_completion(&sc->complete);
1615
+ sc->sc_status = OCTEON_REQUEST_PENDING;
16181616
16191617 ncmd->u64 = 0;
16201618 ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
....@@ -1626,37 +1624,20 @@
16261624 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
16271625 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
16281626
1629
- sc->callback = liquidio_nic_seapi_ctl_callback;
1630
- sc->callback_arg = sc;
1631
- sc->wait_time = 5000;
1632
-
16331627 retval = octeon_send_soft_command(oct, sc);
16341628 if (retval == IQ_SEND_FAILED) {
16351629 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1636
- oct->no_speed_setting = 1;
1637
- oct->speed_setting = 25;
1638
-
1639
- retval = -EBUSY;
1630
+ octeon_free_soft_command(oct, sc);
1631
+ retval = -EIO;
16401632 } else {
1641
- if (wait_for_completion_timeout(&ctx->complete,
1642
- msecs_to_jiffies(10000)) == 0) {
1643
- dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
1644
- __func__);
1633
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
1634
+ if (retval)
1635
+ return retval;
16451636
1646
- oct->speed_setting = 25;
1647
- oct->no_speed_setting = 1;
1648
-
1649
- octeon_free_soft_command(oct, sc);
1650
-
1651
- return -EINTR;
1652
- }
16531637 retval = resp->status;
16541638 if (retval) {
16551639 dev_err(&oct->pci_dev->dev,
16561640 "%s failed retval=%d\n", __func__, retval);
1657
- oct->no_speed_setting = 1;
1658
- oct->speed_setting = 25;
1659
- octeon_free_soft_command(oct, sc);
16601641 retval = -EIO;
16611642 } else {
16621643 u32 var;
....@@ -1664,16 +1645,176 @@
16641645 var = be32_to_cpu((__force __be32)resp->speed);
16651646 oct->speed_setting = var;
16661647 if (var == 0xffff) {
1667
- oct->no_speed_setting = 1;
16681648 /* unable to access boot variables
16691649 * get the default value based on the NIC type
16701650 */
1671
- oct->speed_setting = 25;
1651
+ if (oct->subsystem_id ==
1652
+ OCTEON_CN2350_25GB_SUBSYS_ID ||
1653
+ oct->subsystem_id ==
1654
+ OCTEON_CN2360_25GB_SUBSYS_ID) {
1655
+ oct->no_speed_setting = 1;
1656
+ oct->speed_setting = 25;
1657
+ } else {
1658
+ oct->speed_setting = 10;
1659
+ }
16721660 }
1661
+
16731662 }
1663
+ WRITE_ONCE(sc->caller_is_done, true);
16741664 }
16751665
1676
- octeon_free_soft_command(oct, sc);
1666
+ return retval;
1667
+}
1668
+
1669
+int liquidio_set_fec(struct lio *lio, int on_off)
1670
+{
1671
+ struct oct_nic_seapi_resp *resp;
1672
+ struct octeon_soft_command *sc;
1673
+ struct octeon_device *oct;
1674
+ union octnet_cmd *ncmd;
1675
+ int retval;
1676
+ u32 var;
1677
+
1678
+ oct = lio->oct_dev;
1679
+
1680
+ if (oct->props[lio->ifidx].fec == on_off)
1681
+ return 0;
1682
+
1683
+ if (!OCTEON_CN23XX_PF(oct)) {
1684
+ dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
1685
+ __func__);
1686
+ return -1;
1687
+ }
1688
+
1689
+ if (oct->speed_boot != 25) {
1690
+ dev_err(&oct->pci_dev->dev,
1691
+ "Set FEC only when link speed is 25G during insmod\n");
1692
+ return -1;
1693
+ }
1694
+
1695
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1696
+ sizeof(struct oct_nic_seapi_resp), 0);
1697
+ if (!sc) {
1698
+ dev_err(&oct->pci_dev->dev,
1699
+ "Failed to allocate soft command\n");
1700
+ return -ENOMEM;
1701
+ }
1702
+
1703
+ ncmd = sc->virtdptr;
1704
+ resp = sc->virtrptr;
1705
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1706
+
1707
+ init_completion(&sc->complete);
1708
+ sc->sc_status = OCTEON_REQUEST_PENDING;
1709
+
1710
+ ncmd->u64 = 0;
1711
+ ncmd->s.cmd = SEAPI_CMD_FEC_SET;
1712
+ ncmd->s.param1 = on_off;
1713
+ /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
1714
+
1715
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1716
+
1717
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1718
+
1719
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1720
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1721
+
1722
+ retval = octeon_send_soft_command(oct, sc);
1723
+ if (retval == IQ_SEND_FAILED) {
1724
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1725
+ octeon_free_soft_command(oct, sc);
1726
+ return -EIO;
1727
+ }
1728
+
1729
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
1730
+ if (retval)
1731
+ return (-EIO);
1732
+
1733
+ var = be32_to_cpu(resp->fec_setting);
1734
+ resp->fec_setting = var;
1735
+ if (var != on_off) {
1736
+ dev_err(&oct->pci_dev->dev,
1737
+ "Setting failed fec= %x, expect %x\n",
1738
+ var, on_off);
1739
+ oct->props[lio->ifidx].fec = var;
1740
+ if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1741
+ oct->props[lio->ifidx].fec = 1;
1742
+ else
1743
+ oct->props[lio->ifidx].fec = 0;
1744
+ }
1745
+
1746
+ WRITE_ONCE(sc->caller_is_done, true);
1747
+
1748
+ if (oct->props[lio->ifidx].fec !=
1749
+ oct->props[lio->ifidx].fec_boot) {
1750
+ dev_dbg(&oct->pci_dev->dev,
1751
+ "Reload driver to change fec to %s\n",
1752
+ oct->props[lio->ifidx].fec ? "on" : "off");
1753
+ }
1754
+
1755
+ return retval;
1756
+}
1757
+
1758
+int liquidio_get_fec(struct lio *lio)
1759
+{
1760
+ struct oct_nic_seapi_resp *resp;
1761
+ struct octeon_soft_command *sc;
1762
+ struct octeon_device *oct;
1763
+ union octnet_cmd *ncmd;
1764
+ int retval;
1765
+ u32 var;
1766
+
1767
+ oct = lio->oct_dev;
1768
+
1769
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1770
+ sizeof(struct oct_nic_seapi_resp), 0);
1771
+ if (!sc)
1772
+ return -ENOMEM;
1773
+
1774
+ ncmd = sc->virtdptr;
1775
+ resp = sc->virtrptr;
1776
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1777
+
1778
+ init_completion(&sc->complete);
1779
+ sc->sc_status = OCTEON_REQUEST_PENDING;
1780
+
1781
+ ncmd->u64 = 0;
1782
+ ncmd->s.cmd = SEAPI_CMD_FEC_GET;
1783
+
1784
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1785
+
1786
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1787
+
1788
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1789
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1790
+
1791
+ retval = octeon_send_soft_command(oct, sc);
1792
+ if (retval == IQ_SEND_FAILED) {
1793
+ dev_info(&oct->pci_dev->dev,
1794
+ "%s: Failed to send soft command\n", __func__);
1795
+ octeon_free_soft_command(oct, sc);
1796
+ return -EIO;
1797
+ }
1798
+
1799
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
1800
+ if (retval)
1801
+ return retval;
1802
+
1803
+ var = be32_to_cpu(resp->fec_setting);
1804
+ resp->fec_setting = var;
1805
+ if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1806
+ oct->props[lio->ifidx].fec = 1;
1807
+ else
1808
+ oct->props[lio->ifidx].fec = 0;
1809
+
1810
+ WRITE_ONCE(sc->caller_is_done, true);
1811
+
1812
+ if (oct->props[lio->ifidx].fec !=
1813
+ oct->props[lio->ifidx].fec_boot) {
1814
+ dev_dbg(&oct->pci_dev->dev,
1815
+ "Reload driver to change fec to %s\n",
1816
+ oct->props[lio->ifidx].fec ? "on" : "off");
1817
+ }
16771818
16781819 return retval;
16791820 }