forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
....@@ -33,23 +33,10 @@
3333
3434 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
3535
36
-struct oct_intrmod_context {
37
- int octeon_id;
38
- wait_queue_head_t wc;
39
- int cond;
40
- int status;
41
-};
42
-
4336 struct oct_intrmod_resp {
4437 u64 rh;
4538 struct oct_intrmod_cfg intrmod;
4639 u64 status;
47
-};
48
-
49
-struct oct_mdio_cmd_context {
50
- int octeon_id;
51
- wait_queue_head_t wc;
52
- int cond;
5340 };
5441
5542 struct oct_mdio_cmd_resp {
....@@ -124,7 +111,7 @@
124111 "mac_tx_one_collision",
125112 "mac_tx_multi_collision",
126113 "mac_tx_max_collision_fail",
127
- "mac_tx_max_deferal_fail",
114
+ "mac_tx_max_deferral_fail",
128115 "mac_tx_fifo_err",
129116 "mac_tx_runts",
130117
....@@ -257,6 +244,7 @@
257244 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
258245 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
259246 dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
247
+ ecmd->base.transceiver = XCVR_EXTERNAL;
260248 } else {
261249 dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
262250 linfo->link.s.if_mode);
....@@ -290,10 +278,12 @@
290278 10000baseCR_Full);
291279 }
292280
293
- if (oct->no_speed_setting == 0)
281
+ if (oct->no_speed_setting == 0) {
294282 liquidio_get_speed(lio);
295
- else
283
+ liquidio_get_fec(lio);
284
+ } else {
296285 oct->speed_setting = 25;
286
+ }
297287
298288 if (oct->speed_setting == 10) {
299289 ethtool_link_ksettings_add_link_mode
....@@ -316,6 +306,24 @@
316306 ethtool_link_ksettings_add_link_mode
317307 (ecmd, advertising,
318308 25000baseCR_Full);
309
+ }
310
+
311
+ if (oct->no_speed_setting)
312
+ break;
313
+
314
+ ethtool_link_ksettings_add_link_mode
315
+ (ecmd, supported, FEC_RS);
316
+ ethtool_link_ksettings_add_link_mode
317
+ (ecmd, supported, FEC_NONE);
318
+ /*FEC_OFF*/
319
+ if (oct->props[lio->ifidx].fec == 1) {
320
+ /* ETHTOOL_FEC_RS */
321
+ ethtool_link_ksettings_add_link_mode
322
+ (ecmd, advertising, FEC_RS);
323
+ } else {
324
+ /* ETHTOOL_FEC_OFF */
325
+ ethtool_link_ksettings_add_link_mode
326
+ (ecmd, advertising, FEC_NONE);
319327 }
320328 } else { /* VF */
321329 if (linfo->link.s.speed == 10000) {
....@@ -434,7 +442,6 @@
434442
435443 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
436444 strcpy(drvinfo->driver, "liquidio");
437
- strcpy(drvinfo->version, LIQUIDIO_VERSION);
438445 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
439446 ETHTOOL_FWVERS_LEN);
440447 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
....@@ -451,7 +458,6 @@
451458
452459 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
453460 strcpy(drvinfo->driver, "liquidio_vf");
454
- strcpy(drvinfo->version, LIQUIDIO_VERSION);
455461 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
456462 ETHTOOL_FWVERS_LEN);
457463 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
....@@ -472,12 +478,11 @@
472478 nctrl.ncmd.s.param1 = num_queues;
473479 nctrl.ncmd.s.param2 = num_queues;
474480 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
475
- nctrl.wait_time = 100;
476481 nctrl.netpndev = (u64)netdev;
477482 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
478483
479484 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
480
- if (ret < 0) {
485
+ if (ret) {
481486 dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
482487 ret);
483488 return -1;
....@@ -708,13 +713,13 @@
708713 nctrl.ncmd.s.param1 = addr;
709714 nctrl.ncmd.s.param2 = val;
710715 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
711
- nctrl.wait_time = 100;
712716 nctrl.netpndev = (u64)netdev;
713717 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
714718
715719 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
716
- if (ret < 0) {
717
- dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
720
+ if (ret) {
721
+ dev_err(&oct->pci_dev->dev,
722
+ "Failed to configure gpio value, ret=%d\n", ret);
718723 return -EINVAL;
719724 }
720725
....@@ -734,39 +739,17 @@
734739 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
735740 nctrl.ncmd.s.param1 = val;
736741 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
737
- nctrl.wait_time = 100;
738742 nctrl.netpndev = (u64)netdev;
739743 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
740744
741745 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
742
- if (ret < 0) {
743
- dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
746
+ if (ret) {
747
+ dev_err(&oct->pci_dev->dev,
748
+ "Failed to configure gpio value, ret=%d\n", ret);
744749 return -EINVAL;
745750 }
746751
747752 return 0;
748
-}
749
-
750
-/* Callback for when mdio command response arrives
751
- */
752
-static void octnet_mdio_resp_callback(struct octeon_device *oct,
753
- u32 status,
754
- void *buf)
755
-{
756
- struct oct_mdio_cmd_context *mdio_cmd_ctx;
757
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
758
-
759
- mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
760
-
761
- oct = lio_get_device(mdio_cmd_ctx->octeon_id);
762
- if (status) {
763
- dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
764
- CVM_CAST64(status));
765
- WRITE_ONCE(mdio_cmd_ctx->cond, -1);
766
- } else {
767
- WRITE_ONCE(mdio_cmd_ctx->cond, 1);
768
- }
769
- wake_up_interruptible(&mdio_cmd_ctx->wc);
770753 }
771754
772755 /* This routine provides PHY access routines for
....@@ -778,25 +761,20 @@
778761 struct octeon_device *oct_dev = lio->oct_dev;
779762 struct octeon_soft_command *sc;
780763 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
781
- struct oct_mdio_cmd_context *mdio_cmd_ctx;
782764 struct oct_mdio_cmd *mdio_cmd;
783765 int retval = 0;
784766
785767 sc = (struct octeon_soft_command *)
786768 octeon_alloc_soft_command(oct_dev,
787769 sizeof(struct oct_mdio_cmd),
788
- sizeof(struct oct_mdio_cmd_resp),
789
- sizeof(struct oct_mdio_cmd_context));
770
+ sizeof(struct oct_mdio_cmd_resp), 0);
790771
791772 if (!sc)
792773 return -ENOMEM;
793774
794
- mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
795775 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
796776 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
797777
798
- WRITE_ONCE(mdio_cmd_ctx->cond, 0);
799
- mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
800778 mdio_cmd->op = op;
801779 mdio_cmd->mdio_addr = loc;
802780 if (op)
....@@ -808,42 +786,40 @@
808786 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
809787 0, 0, 0);
810788
811
- sc->wait_time = 1000;
812
- sc->callback = octnet_mdio_resp_callback;
813
- sc->callback_arg = sc;
814
-
815
- init_waitqueue_head(&mdio_cmd_ctx->wc);
789
+ init_completion(&sc->complete);
790
+ sc->sc_status = OCTEON_REQUEST_PENDING;
816791
817792 retval = octeon_send_soft_command(oct_dev, sc);
818
-
819793 if (retval == IQ_SEND_FAILED) {
820794 dev_err(&oct_dev->pci_dev->dev,
821795 "octnet_mdio45_access instruction failed status: %x\n",
822796 retval);
823
- retval = -EBUSY;
797
+ octeon_free_soft_command(oct_dev, sc);
798
+ return -EBUSY;
824799 } else {
825800 /* Sleep on a wait queue till the cond flag indicates that the
826801 * response arrived
827802 */
828
- sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
803
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
804
+ if (retval)
805
+ return retval;
806
+
829807 retval = mdio_cmd_rsp->status;
830808 if (retval) {
831
- dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
832
- retval = -EBUSY;
833
- } else {
834
- octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
835
- sizeof(struct oct_mdio_cmd) / 8);
836
-
837
- if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
838
- if (!op)
839
- *value = mdio_cmd_rsp->resp.value1;
840
- } else {
841
- retval = -EINVAL;
842
- }
809
+ dev_err(&oct_dev->pci_dev->dev,
810
+ "octnet mdio45 access failed: %x\n", retval);
811
+ WRITE_ONCE(sc->caller_is_done, true);
812
+ return -EBUSY;
843813 }
844
- }
845814
846
- octeon_free_soft_command(oct_dev, sc);
815
+ octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
816
+ sizeof(struct oct_mdio_cmd) / 8);
817
+
818
+ if (!op)
819
+ *value = mdio_cmd_rsp->resp.value1;
820
+
821
+ WRITE_ONCE(sc->caller_is_done, true);
822
+ }
847823
848824 return retval;
849825 }
....@@ -1007,8 +983,7 @@
1007983 static int lio_23xx_reconfigure_queue_count(struct lio *lio)
1008984 {
1009985 struct octeon_device *oct = lio->oct_dev;
1010
- struct liquidio_if_cfg_context *ctx;
1011
- u32 resp_size, ctx_size, data_size;
986
+ u32 resp_size, data_size;
1012987 struct liquidio_if_cfg_resp *resp;
1013988 struct octeon_soft_command *sc;
1014989 union oct_nic_if_cfg if_cfg;
....@@ -1018,11 +993,10 @@
1018993 int j;
1019994
1020995 resp_size = sizeof(struct liquidio_if_cfg_resp);
1021
- ctx_size = sizeof(struct liquidio_if_cfg_context);
1022996 data_size = sizeof(struct lio_version);
1023997 sc = (struct octeon_soft_command *)
1024998 octeon_alloc_soft_command(oct, data_size,
1025
- resp_size, ctx_size);
999
+ resp_size, 0);
10261000 if (!sc) {
10271001 dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
10281002 __func__);
....@@ -1030,7 +1004,6 @@
10301004 }
10311005
10321006 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1033
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
10341007 vdata = (struct lio_version *)sc->virtdptr;
10351008
10361009 vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
....@@ -1038,9 +1011,6 @@
10381011 vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
10391012
10401013 ifidx_or_pfnum = oct->pf_num;
1041
- WRITE_ONCE(ctx->cond, 0);
1042
- ctx->octeon_id = lio_get_device_id(oct);
1043
- init_waitqueue_head(&ctx->wc);
10441014
10451015 if_cfg.u64 = 0;
10461016 if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
....@@ -1052,27 +1022,29 @@
10521022 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
10531023 OPCODE_NIC_QCOUNT_UPDATE, 0,
10541024 if_cfg.u64, 0);
1055
- sc->callback = lio_if_cfg_callback;
1056
- sc->callback_arg = sc;
1057
- sc->wait_time = LIO_IFCFG_WAIT_TIME;
1025
+
1026
+ init_completion(&sc->complete);
1027
+ sc->sc_status = OCTEON_REQUEST_PENDING;
10581028
10591029 retval = octeon_send_soft_command(oct, sc);
10601030 if (retval == IQ_SEND_FAILED) {
10611031 dev_err(&oct->pci_dev->dev,
1062
- "iq/oq config failed status: %x\n",
1032
+ "Sending iq/oq config failed status: %x\n",
10631033 retval);
1064
- goto qcount_update_fail;
1034
+ octeon_free_soft_command(oct, sc);
1035
+ return -EIO;
10651036 }
10661037
1067
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
1068
- dev_err(&oct->pci_dev->dev, "Wait interrupted\n");
1069
- return -1;
1070
- }
1038
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
1039
+ if (retval)
1040
+ return retval;
10711041
10721042 retval = resp->status;
10731043 if (retval) {
1074
- dev_err(&oct->pci_dev->dev, "iq/oq config failed\n");
1075
- goto qcount_update_fail;
1044
+ dev_err(&oct->pci_dev->dev,
1045
+ "iq/oq config failed: %x\n", retval);
1046
+ WRITE_ONCE(sc->caller_is_done, true);
1047
+ return -1;
10761048 }
10771049
10781050 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
....@@ -1097,16 +1069,12 @@
10971069 lio->txq = lio->linfo.txpciq[0].s.q_no;
10981070 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
10991071
1100
- octeon_free_soft_command(oct, sc);
11011072 dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
11021073 lio->linfo.num_rxpciq);
11031074
1075
+ WRITE_ONCE(sc->caller_is_done, true);
1076
+
11041077 return 0;
1105
-
1106
-qcount_update_fail:
1107
- octeon_free_soft_command(oct, sc);
1108
-
1109
- return -1;
11101078 }
11111079
11121080 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
....@@ -1166,6 +1134,8 @@
11661134 * steps like updating sriov_info for the octeon device need to be done.
11671135 */
11681136 if (queue_count_update) {
1137
+ cleanup_rx_oom_poll_fn(netdev);
1138
+
11691139 lio_delete_glists(lio);
11701140
11711141 /* Delete mbox for PF which is SRIOV disabled because sriov_info
....@@ -1263,6 +1233,11 @@
12631233 if (lio_setup_glists(oct, lio, num_qs)) {
12641234 dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
12651235 return -1;
1236
+ }
1237
+
1238
+ if (setup_rx_oom_poll_fn(netdev)) {
1239
+ dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n");
1240
+ return 1;
12661241 }
12671242
12681243 /* Send firmware the information about new number of queues
....@@ -1412,7 +1387,6 @@
14121387 nctrl.ncmd.u64 = 0;
14131388 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
14141389 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1415
- nctrl.wait_time = 100;
14161390 nctrl.netpndev = (u64)netdev;
14171391 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
14181392
....@@ -1433,8 +1407,9 @@
14331407 }
14341408
14351409 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1436
- if (ret < 0) {
1437
- dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
1410
+ if (ret) {
1411
+ dev_err(&oct->pci_dev->dev,
1412
+ "Failed to set pause parameter, ret=%d\n", ret);
14381413 return -EINVAL;
14391414 }
14401415
....@@ -1764,7 +1739,8 @@
17641739 */
17651740 data[i++] = lstats.rx_dropped;
17661741 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1767
- data[i++] = lstats.tx_dropped;
1742
+ data[i++] = lstats.tx_dropped +
1743
+ oct_dev->link_stats.fromhost.fw_err_drop;
17681744
17691745 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
17701746 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
....@@ -2013,34 +1989,11 @@
20131989 }
20141990 }
20151991
2016
-/* Callback function for intrmod */
2017
-static void octnet_intrmod_callback(struct octeon_device *oct_dev,
2018
- u32 status,
2019
- void *ptr)
2020
-{
2021
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
2022
- struct oct_intrmod_context *ctx;
2023
-
2024
- ctx = (struct oct_intrmod_context *)sc->ctxptr;
2025
-
2026
- ctx->status = status;
2027
-
2028
- WRITE_ONCE(ctx->cond, 1);
2029
-
2030
- /* This barrier is required to be sure that the response has been
2031
- * written fully before waking up the handler
2032
- */
2033
- wmb();
2034
-
2035
- wake_up_interruptible(&ctx->wc);
2036
-}
2037
-
20381992 /* get interrupt moderation parameters */
20391993 static int octnet_get_intrmod_cfg(struct lio *lio,
20401994 struct oct_intrmod_cfg *intr_cfg)
20411995 {
20421996 struct octeon_soft_command *sc;
2043
- struct oct_intrmod_context *ctx;
20441997 struct oct_intrmod_resp *resp;
20451998 int retval;
20461999 struct octeon_device *oct_dev = lio->oct_dev;
....@@ -2049,8 +2002,7 @@
20492002 sc = (struct octeon_soft_command *)
20502003 octeon_alloc_soft_command(oct_dev,
20512004 0,
2052
- sizeof(struct oct_intrmod_resp),
2053
- sizeof(struct oct_intrmod_context));
2005
+ sizeof(struct oct_intrmod_resp), 0);
20542006
20552007 if (!sc)
20562008 return -ENOMEM;
....@@ -2058,20 +2010,13 @@
20582010 resp = (struct oct_intrmod_resp *)sc->virtrptr;
20592011 memset(resp, 0, sizeof(struct oct_intrmod_resp));
20602012
2061
- ctx = (struct oct_intrmod_context *)sc->ctxptr;
2062
- memset(ctx, 0, sizeof(struct oct_intrmod_context));
2063
- WRITE_ONCE(ctx->cond, 0);
2064
- ctx->octeon_id = lio_get_device_id(oct_dev);
2065
- init_waitqueue_head(&ctx->wc);
2066
-
20672013 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
20682014
20692015 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
20702016 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
20712017
2072
- sc->callback = octnet_intrmod_callback;
2073
- sc->callback_arg = sc;
2074
- sc->wait_time = 1000;
2018
+ init_completion(&sc->complete);
2019
+ sc->sc_status = OCTEON_REQUEST_PENDING;
20752020
20762021 retval = octeon_send_soft_command(oct_dev, sc);
20772022 if (retval == IQ_SEND_FAILED) {
....@@ -2082,32 +2027,23 @@
20822027 /* Sleep on a wait queue till the cond flag indicates that the
20832028 * response arrived or timed-out.
20842029 */
2085
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
2086
- dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
2087
- goto intrmod_info_wait_intr;
2088
- }
2030
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2031
+ if (retval)
2032
+ return -ENODEV;
20892033
2090
- retval = ctx->status || resp->status;
2091
- if (retval) {
2034
+ if (resp->status) {
20922035 dev_err(&oct_dev->pci_dev->dev,
20932036 "Get interrupt moderation parameters failed\n");
2094
- goto intrmod_info_wait_fail;
2037
+ WRITE_ONCE(sc->caller_is_done, true);
2038
+ return -ENODEV;
20952039 }
20962040
20972041 octeon_swap_8B_data((u64 *)&resp->intrmod,
20982042 (sizeof(struct oct_intrmod_cfg)) / 8);
20992043 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
2100
- octeon_free_soft_command(oct_dev, sc);
2044
+ WRITE_ONCE(sc->caller_is_done, true);
21012045
21022046 return 0;
2103
-
2104
-intrmod_info_wait_fail:
2105
-
2106
- octeon_free_soft_command(oct_dev, sc);
2107
-
2108
-intrmod_info_wait_intr:
2109
-
2110
- return -ENODEV;
21112047 }
21122048
21132049 /* Configure interrupt moderation parameters */
....@@ -2115,7 +2051,6 @@
21152051 struct oct_intrmod_cfg *intr_cfg)
21162052 {
21172053 struct octeon_soft_command *sc;
2118
- struct oct_intrmod_context *ctx;
21192054 struct oct_intrmod_cfg *cfg;
21202055 int retval;
21212056 struct octeon_device *oct_dev = lio->oct_dev;
....@@ -2124,17 +2059,10 @@
21242059 sc = (struct octeon_soft_command *)
21252060 octeon_alloc_soft_command(oct_dev,
21262061 sizeof(struct oct_intrmod_cfg),
2127
- 0,
2128
- sizeof(struct oct_intrmod_context));
2062
+ 16, 0);
21292063
21302064 if (!sc)
21312065 return -ENOMEM;
2132
-
2133
- ctx = (struct oct_intrmod_context *)sc->ctxptr;
2134
-
2135
- WRITE_ONCE(ctx->cond, 0);
2136
- ctx->octeon_id = lio_get_device_id(oct_dev);
2137
- init_waitqueue_head(&ctx->wc);
21382066
21392067 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
21402068
....@@ -2146,9 +2074,8 @@
21462074 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
21472075 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
21482076
2149
- sc->callback = octnet_intrmod_callback;
2150
- sc->callback_arg = sc;
2151
- sc->wait_time = 1000;
2077
+ init_completion(&sc->complete);
2078
+ sc->sc_status = OCTEON_REQUEST_PENDING;
21522079
21532080 retval = octeon_send_soft_command(oct_dev, sc);
21542081 if (retval == IQ_SEND_FAILED) {
....@@ -2159,26 +2086,24 @@
21592086 /* Sleep on a wait queue till the cond flag indicates that the
21602087 * response arrived or timed-out.
21612088 */
2162
- if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
2163
- retval = ctx->status;
2164
- if (retval)
2165
- dev_err(&oct_dev->pci_dev->dev,
2166
- "intrmod config failed. Status: %llx\n",
2167
- CVM_CAST64(retval));
2168
- else
2169
- dev_info(&oct_dev->pci_dev->dev,
2170
- "Rx-Adaptive Interrupt moderation %s\n",
2171
- (intr_cfg->rx_enable) ?
2172
- "enabled" : "disabled");
2089
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2090
+ if (retval)
2091
+ return retval;
21732092
2174
- octeon_free_soft_command(oct_dev, sc);
2175
-
2176
- return ((retval) ? -ENODEV : 0);
2093
+ retval = sc->sc_status;
2094
+ if (retval == 0) {
2095
+ dev_info(&oct_dev->pci_dev->dev,
2096
+ "Rx-Adaptive Interrupt moderation %s\n",
2097
+ (intr_cfg->rx_enable) ?
2098
+ "enabled" : "disabled");
2099
+ WRITE_ONCE(sc->caller_is_done, true);
2100
+ return 0;
21772101 }
21782102
2179
- dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
2180
-
2181
- return -EINTR;
2103
+ dev_err(&oct_dev->pci_dev->dev,
2104
+ "intrmod config failed. Status: %x\n", retval);
2105
+ WRITE_ONCE(sc->caller_is_done, true);
2106
+ return -ENODEV;
21822107 }
21832108
21842109 static int lio_get_intr_coalesce(struct net_device *netdev,
....@@ -3123,9 +3048,70 @@
31233048 return 0;
31243049 }
31253050
3051
+static int lio_get_fecparam(struct net_device *netdev,
3052
+ struct ethtool_fecparam *fec)
3053
+{
3054
+ struct lio *lio = GET_LIO(netdev);
3055
+ struct octeon_device *oct = lio->oct_dev;
3056
+
3057
+ fec->active_fec = ETHTOOL_FEC_NONE;
3058
+ fec->fec = ETHTOOL_FEC_NONE;
3059
+
3060
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3061
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3062
+ if (oct->no_speed_setting == 1)
3063
+ return 0;
3064
+
3065
+ liquidio_get_fec(lio);
3066
+ fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF);
3067
+ if (oct->props[lio->ifidx].fec == 1)
3068
+ fec->active_fec = ETHTOOL_FEC_RS;
3069
+ else
3070
+ fec->active_fec = ETHTOOL_FEC_OFF;
3071
+ }
3072
+
3073
+ return 0;
3074
+}
3075
+
3076
+static int lio_set_fecparam(struct net_device *netdev,
3077
+ struct ethtool_fecparam *fec)
3078
+{
3079
+ struct lio *lio = GET_LIO(netdev);
3080
+ struct octeon_device *oct = lio->oct_dev;
3081
+
3082
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3083
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3084
+ if (oct->no_speed_setting == 1)
3085
+ return -EOPNOTSUPP;
3086
+
3087
+ if (fec->fec & ETHTOOL_FEC_OFF)
3088
+ liquidio_set_fec(lio, 0);
3089
+ else if (fec->fec & ETHTOOL_FEC_RS)
3090
+ liquidio_set_fec(lio, 1);
3091
+ else
3092
+ return -EOPNOTSUPP;
3093
+ } else {
3094
+ return -EOPNOTSUPP;
3095
+ }
3096
+
3097
+ return 0;
3098
+}
3099
+
3100
+#define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \
3101
+ ETHTOOL_COALESCE_MAX_FRAMES | \
3102
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
3103
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
3104
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
3105
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
3106
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \
3107
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
3108
+
31263109 static const struct ethtool_ops lio_ethtool_ops = {
3110
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
31273111 .get_link_ksettings = lio_get_link_ksettings,
31283112 .set_link_ksettings = lio_set_link_ksettings,
3113
+ .get_fecparam = lio_get_fecparam,
3114
+ .set_fecparam = lio_set_fecparam,
31293115 .get_link = ethtool_op_get_link,
31303116 .get_drvinfo = lio_get_drvinfo,
31313117 .get_ringparam = lio_ethtool_get_ringparam,
....@@ -3152,6 +3138,7 @@
31523138 };
31533139
31543140 static const struct ethtool_ops lio_vf_ethtool_ops = {
3141
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
31553142 .get_link_ksettings = lio_get_link_ksettings,
31563143 .get_link = ethtool_op_get_link,
31573144 .get_drvinfo = lio_get_vf_drvinfo,