hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
....@@ -4,12 +4,18 @@
44 #include <linux/etherdevice.h>
55 #include <linux/string.h>
66 #include <linux/phy.h>
7
+#include <linux/sfp.h>
78
89 #include "hns3_enet.h"
910
1011 struct hns3_stats {
1112 char stats_string[ETH_GSTRING_LEN];
1213 int stats_offset;
14
+};
15
+
16
+struct hns3_sfp_type {
17
+ u8 type;
18
+ u8 ext_type;
1319 };
1420
1521 /* tqp related stats */
....@@ -21,56 +27,60 @@
2127
2228 static const struct hns3_stats hns3_txq_stats[] = {
2329 /* Tx per-queue statistics */
24
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
25
- HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
30
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
2631 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
2732 HNS3_TQP_STAT("packets", tx_pkts),
2833 HNS3_TQP_STAT("bytes", tx_bytes),
29
- HNS3_TQP_STAT("errors", tx_err_cnt),
30
- HNS3_TQP_STAT("tx_wake", restart_queue),
31
- HNS3_TQP_STAT("tx_busy", tx_busy),
34
+ HNS3_TQP_STAT("more", tx_more),
35
+ HNS3_TQP_STAT("wake", restart_queue),
36
+ HNS3_TQP_STAT("busy", tx_busy),
37
+ HNS3_TQP_STAT("copy", tx_copy),
38
+ HNS3_TQP_STAT("vlan_err", tx_vlan_err),
39
+ HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
40
+ HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
41
+ HNS3_TQP_STAT("tso_err", tx_tso_err),
42
+ HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
43
+ HNS3_TQP_STAT("hw_limitation", hw_limitation),
3244 };
3345
3446 #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
3547
3648 static const struct hns3_stats hns3_rxq_stats[] = {
3749 /* Rx per-queue statistics */
38
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
39
- HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
50
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
4051 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
4152 HNS3_TQP_STAT("packets", rx_pkts),
4253 HNS3_TQP_STAT("bytes", rx_bytes),
4354 HNS3_TQP_STAT("errors", rx_err_cnt),
4455 HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
4556 HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
46
- HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
4757 HNS3_TQP_STAT("err_bd_num", err_bd_num),
4858 HNS3_TQP_STAT("l2_err", l2_err),
4959 HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
60
+ HNS3_TQP_STAT("multicast", rx_multicast),
61
+ HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
5062 };
5163
5264 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
5365
5466 #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
5567
56
-#define HNS3_SELF_TEST_TYPE_NUM 2
68
+#define HNS3_SELF_TEST_TYPE_NUM 4
5769 #define HNS3_NIC_LB_TEST_PKT_NUM 1
5870 #define HNS3_NIC_LB_TEST_RING_ID 0
5971 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128
72
+#define HNS3_NIC_LB_SETUP_USEC 10000
6073
6174 /* Nic loopback test err */
6275 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
6376 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
6477 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
6578
66
-struct hns3_link_mode_mapping {
67
- u32 hns3_link_mode;
68
- u32 ethtool_link_mode;
69
-};
70
-
7179 static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
7280 {
7381 struct hnae3_handle *h = hns3_get_handle(ndev);
82
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
83
+ bool vlan_filter_enable;
7484 int ret;
7585
7686 if (!h->ae_algo->ops->set_loopback ||
....@@ -78,8 +88,10 @@
7888 return -EOPNOTSUPP;
7989
8090 switch (loop) {
81
- case HNAE3_MAC_INTER_LOOP_SERDES:
82
- case HNAE3_MAC_INTER_LOOP_MAC:
91
+ case HNAE3_LOOP_SERIAL_SERDES:
92
+ case HNAE3_LOOP_PARALLEL_SERDES:
93
+ case HNAE3_LOOP_APP:
94
+ case HNAE3_LOOP_PHY:
8395 ret = h->ae_algo->ops->set_loopback(h, loop, en);
8496 break;
8597 default:
....@@ -87,10 +99,17 @@
8799 break;
88100 }
89101
90
- if (ret)
102
+ if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
91103 return ret;
92104
93
- h->ae_algo->ops->set_promisc_mode(h, en, en);
105
+ if (en) {
106
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
107
+ } else {
108
+ /* recover promisc mode before loopback test */
109
+ hns3_request_update_promisc_mode(h);
110
+ vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
111
+ hns3_enable_vlan_filter(ndev, vlan_filter_enable);
112
+ }
94113
95114 return ret;
96115 }
....@@ -105,9 +124,9 @@
105124 return ret;
106125
107126 ret = hns3_lp_setup(ndev, loop_mode, true);
108
- usleep_range(10000, 20000);
127
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
109128
110
- return 0;
129
+ return ret;
111130 }
112131
113132 static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
....@@ -120,14 +139,18 @@
120139 return ret;
121140 }
122141
123
- usleep_range(10000, 20000);
142
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
124143
125144 return 0;
126145 }
127146
128147 static void hns3_lp_setup_skb(struct sk_buff *skb)
129148 {
149
+#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
150
+
130151 struct net_device *ndev = skb->dev;
152
+ struct hnae3_handle *handle;
153
+ struct hnae3_ae_dev *ae_dev;
131154 unsigned char *packet;
132155 struct ethhdr *ethh;
133156 unsigned int i;
....@@ -137,7 +160,16 @@
137160 packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
138161
139162 memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
140
- ethh->h_dest[5] += 0x1f;
163
+
164
+ /* The dst mac addr of loopback packet is the same as the host'
165
+ * mac addr, the SSU component may loop back the packet to host
166
+ * before the packet reaches mac or serdes, which will defect
167
+ * the purpose of mac or serdes selftest.
168
+ */
169
+ handle = hns3_get_handle(ndev);
170
+ ae_dev = pci_get_drvdata(handle->pdev);
171
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
172
+ ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
141173 eth_zero_addr(ethh->h_source);
142174 ethh->h_proto = htons(ETH_P_ARP);
143175 skb_reset_mac_header(skb);
....@@ -178,7 +210,7 @@
178210
179211 kinfo = &h->kinfo;
180212 for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
181
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
213
+ struct hns3_enet_ring *ring = &priv->ring[i];
182214 struct hns3_enet_ring_group *rx_group;
183215 u64 pre_rx_pkt;
184216
....@@ -201,9 +233,9 @@
201233 u32 i;
202234
203235 for (i = start_ringid; i <= end_ringid; i++) {
204
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
236
+ struct hns3_enet_ring *ring = &priv->ring[i];
205237
206
- hns3_clean_tx_ring(ring, budget);
238
+ hns3_clean_tx_ring(ring, 0);
207239 }
208240 }
209241
....@@ -268,6 +300,105 @@
268300 return ret_val;
269301 }
270302
303
+static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
304
+{
305
+ st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
306
+ st_param[HNAE3_LOOP_APP][1] =
307
+ h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
308
+
309
+ st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES;
310
+ st_param[HNAE3_LOOP_SERIAL_SERDES][1] =
311
+ h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
312
+
313
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][0] =
314
+ HNAE3_LOOP_PARALLEL_SERDES;
315
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
316
+ h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
317
+
318
+ st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
319
+ st_param[HNAE3_LOOP_PHY][1] =
320
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
321
+}
322
+
323
+static void hns3_selftest_prepare(struct net_device *ndev,
324
+ bool if_running, int (*st_param)[2])
325
+{
326
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
327
+ struct hnae3_handle *h = priv->ae_handle;
328
+
329
+ if (netif_msg_ifdown(h))
330
+ netdev_info(ndev, "self test start\n");
331
+
332
+ hns3_set_selftest_param(h, st_param);
333
+
334
+ if (if_running)
335
+ ndev->netdev_ops->ndo_stop(ndev);
336
+
337
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
338
+ /* Disable the vlan filter for selftest does not support it */
339
+ if (h->ae_algo->ops->enable_vlan_filter &&
340
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
341
+ h->ae_algo->ops->enable_vlan_filter(h, false);
342
+#endif
343
+
344
+ /* Tell firmware to stop mac autoneg before loopback test start,
345
+ * otherwise loopback test may be failed when the port is still
346
+ * negotiating.
347
+ */
348
+ if (h->ae_algo->ops->halt_autoneg)
349
+ h->ae_algo->ops->halt_autoneg(h, true);
350
+
351
+ set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
352
+}
353
+
354
+static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
355
+{
356
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
357
+ struct hnae3_handle *h = priv->ae_handle;
358
+
359
+ clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
360
+
361
+ if (h->ae_algo->ops->halt_autoneg)
362
+ h->ae_algo->ops->halt_autoneg(h, false);
363
+
364
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
365
+ if (h->ae_algo->ops->enable_vlan_filter &&
366
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
367
+ h->ae_algo->ops->enable_vlan_filter(h, true);
368
+#endif
369
+
370
+ if (if_running)
371
+ ndev->netdev_ops->ndo_open(ndev);
372
+
373
+ if (netif_msg_ifdown(h))
374
+ netdev_info(ndev, "self test end\n");
375
+}
376
+
377
+static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
378
+ struct ethtool_test *eth_test, u64 *data)
379
+{
380
+ int test_index = 0;
381
+ u32 i;
382
+
383
+ for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
384
+ enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
385
+
386
+ if (!st_param[i][1])
387
+ continue;
388
+
389
+ data[test_index] = hns3_lp_up(ndev, loop_type);
390
+ if (!data[test_index])
391
+ data[test_index] = hns3_lp_run_test(ndev, loop_type);
392
+
393
+ hns3_lp_down(ndev, loop_type);
394
+
395
+ if (data[test_index])
396
+ eth_test->flags |= ETH_TEST_FL_FAILED;
397
+
398
+ test_index++;
399
+ }
400
+}
401
+
271402 /**
272403 * hns3_nic_self_test - self test
273404 * @ndev: net device
....@@ -277,68 +408,21 @@
277408 static void hns3_self_test(struct net_device *ndev,
278409 struct ethtool_test *eth_test, u64 *data)
279410 {
280
- struct hns3_nic_priv *priv = netdev_priv(ndev);
281
- struct hnae3_handle *h = priv->ae_handle;
282411 int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
283412 bool if_running = netif_running(ndev);
284
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
285
- bool dis_vlan_filter;
286
-#endif
287
- int test_index = 0;
288
- u32 i;
413
+
414
+ if (hns3_nic_resetting(ndev)) {
415
+ netdev_err(ndev, "dev resetting!");
416
+ return;
417
+ }
289418
290419 /* Only do offline selftest, or pass by default */
291420 if (eth_test->flags != ETH_TEST_FL_OFFLINE)
292421 return;
293422
294
- st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC;
295
- st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
296
- h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
297
-
298
- st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES;
299
- st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] =
300
- h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
301
-
302
- if (if_running)
303
- ndev->netdev_ops->ndo_stop(ndev);
304
-
305
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
306
- /* Disable the vlan filter for selftest does not support it */
307
- dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
308
- h->ae_algo->ops->enable_vlan_filter;
309
- if (dis_vlan_filter)
310
- h->ae_algo->ops->enable_vlan_filter(h, false);
311
-#endif
312
-
313
- set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
314
-
315
- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
316
- enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
317
-
318
- if (!st_param[i][1])
319
- continue;
320
-
321
- data[test_index] = hns3_lp_up(ndev, loop_type);
322
- if (!data[test_index]) {
323
- data[test_index] = hns3_lp_run_test(ndev, loop_type);
324
- hns3_lp_down(ndev, loop_type);
325
- }
326
-
327
- if (data[test_index])
328
- eth_test->flags |= ETH_TEST_FL_FAILED;
329
-
330
- test_index++;
331
- }
332
-
333
- clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
334
-
335
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
336
- if (dis_vlan_filter)
337
- h->ae_algo->ops->enable_vlan_filter(h, true);
338
-#endif
339
-
340
- if (if_running)
341
- ndev->netdev_ops->ndo_open(ndev);
423
+ hns3_selftest_prepare(ndev, if_running, st_param);
424
+ hns3_do_selftest(ndev, st_param, eth_test, data);
425
+ hns3_selftest_restore(ndev, if_running);
342426 }
343427
344428 static int hns3_get_sset_count(struct net_device *netdev, int stringset)
....@@ -356,9 +440,10 @@
356440
357441 case ETH_SS_TEST:
358442 return ops->get_sset_count(h, stringset);
359
- }
360443
361
- return 0;
444
+ default:
445
+ return -EOPNOTSUPP;
446
+ }
362447 }
363448
364449 static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
....@@ -374,9 +459,8 @@
374459 data[ETH_GSTRING_LEN - 1] = '\0';
375460
376461 /* first, prepend the prefix string */
377
- n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
378
- prefix, i);
379
- n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
462
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
463
+ prefix, i);
380464 size_left = (ETH_GSTRING_LEN - 1) - n1;
381465
382466 /* now, concatenate the stats string to it */
....@@ -417,10 +501,12 @@
417501 switch (stringset) {
418502 case ETH_SS_STATS:
419503 buff = hns3_get_strings_tqps(h, buff);
420
- h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
504
+ ops->get_strings(h, stringset, (u8 *)buff);
421505 break;
422506 case ETH_SS_TEST:
423507 ops->get_strings(h, stringset, data);
508
+ break;
509
+ default:
424510 break;
425511 }
426512 }
....@@ -435,7 +521,7 @@
435521
436522 /* get stats for Tx */
437523 for (i = 0; i < kinfo->num_tqps; i++) {
438
- ring = nic_priv->ring_data[i].ring;
524
+ ring = &nic_priv->ring[i];
439525 for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
440526 stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
441527 *data++ = *(u64 *)stat;
....@@ -444,7 +530,7 @@
444530
445531 /* get stats for Rx */
446532 for (i = 0; i < kinfo->num_tqps; i++) {
447
- ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
533
+ ring = &nic_priv->ring[i + kinfo->num_tqps];
448534 for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
449535 stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
450536 *data++ = *(u64 *)stat;
....@@ -465,6 +551,11 @@
465551 struct hnae3_handle *h = hns3_get_handle(netdev);
466552 u64 *p = data;
467553
554
+ if (hns3_nic_resetting(netdev)) {
555
+ netdev_err(netdev, "dev resetting, could not get stats\n");
556
+ return;
557
+ }
558
+
468559 if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
469560 netdev_err(netdev, "could not get any statistics\n");
470561 return;
....@@ -484,10 +575,12 @@
484575 {
485576 struct hns3_nic_priv *priv = netdev_priv(netdev);
486577 struct hnae3_handle *h = priv->ae_handle;
578
+ u32 fw_version;
487579
488
- strncpy(drvinfo->version, hns3_driver_version,
489
- sizeof(drvinfo->version));
490
- drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
580
+ if (!h->ae_algo->ops->get_fw_version) {
581
+ netdev_err(netdev, "could not get fw version!\n");
582
+ return;
583
+ }
491584
492585 strncpy(drvinfo->driver, h->pdev->driver->name,
493586 sizeof(drvinfo->driver));
....@@ -497,15 +590,25 @@
497590 sizeof(drvinfo->bus_info));
498591 drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
499592
500
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
501
- priv->ae_handle->ae_algo->ops->get_fw_version(h));
593
+ fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
594
+
595
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
596
+ "%lu.%lu.%lu.%lu",
597
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
598
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
599
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
600
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
601
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
602
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
603
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
604
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
502605 }
503606
504607 static u32 hns3_get_link(struct net_device *netdev)
505608 {
506609 struct hnae3_handle *h = hns3_get_handle(netdev);
507610
508
- if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status)
611
+ if (h->ae_algo->ops->get_status)
509612 return h->ae_algo->ops->get_status(h);
510613 else
511614 return 0;
....@@ -518,11 +621,16 @@
518621 struct hnae3_handle *h = priv->ae_handle;
519622 int queue_num = h->kinfo.num_tqps;
520623
624
+ if (hns3_nic_resetting(netdev)) {
625
+ netdev_err(netdev, "dev resetting!");
626
+ return;
627
+ }
628
+
521629 param->tx_max_pending = HNS3_RING_MAX_PENDING;
522630 param->rx_max_pending = HNS3_RING_MAX_PENDING;
523631
524
- param->tx_pending = priv->ring_data[0].ring->desc_num;
525
- param->rx_pending = priv->ring_data[queue_num].ring->desc_num;
632
+ param->tx_pending = priv->ring[0].desc_num;
633
+ param->rx_pending = priv->ring[queue_num].desc_num;
526634 }
527635
528636 static void hns3_get_pauseparam(struct net_device *netdev,
....@@ -530,7 +638,7 @@
530638 {
531639 struct hnae3_handle *h = hns3_get_handle(netdev);
532640
533
- if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam)
641
+ if (h->ae_algo->ops->get_pauseparam)
534642 h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
535643 &param->rx_pause, &param->tx_pause);
536644 }
....@@ -540,6 +648,10 @@
540648 {
541649 struct hnae3_handle *h = hns3_get_handle(netdev);
542650
651
+ netif_dbg(h, drv, netdev,
652
+ "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
653
+ param->autoneg, param->rx_pause, param->tx_pause);
654
+
543655 if (h->ae_algo->ops->set_pauseparam)
544656 return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
545657 param->rx_pause,
....@@ -547,30 +659,77 @@
547659 return -EOPNOTSUPP;
548660 }
549661
662
+static void hns3_get_ksettings(struct hnae3_handle *h,
663
+ struct ethtool_link_ksettings *cmd)
664
+{
665
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
666
+
667
+ /* 1.auto_neg & speed & duplex from cmd */
668
+ if (ops->get_ksettings_an_result)
669
+ ops->get_ksettings_an_result(h,
670
+ &cmd->base.autoneg,
671
+ &cmd->base.speed,
672
+ &cmd->base.duplex);
673
+
674
+ /* 2.get link mode */
675
+ if (ops->get_link_mode)
676
+ ops->get_link_mode(h,
677
+ cmd->link_modes.supported,
678
+ cmd->link_modes.advertising);
679
+
680
+ /* 3.mdix_ctrl&mdix get from phy reg */
681
+ if (ops->get_mdix_mode)
682
+ ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
683
+ &cmd->base.eth_tp_mdix);
684
+}
685
+
550686 static int hns3_get_link_ksettings(struct net_device *netdev,
551687 struct ethtool_link_ksettings *cmd)
552688 {
553689 struct hnae3_handle *h = hns3_get_handle(netdev);
554
- u32 flowctrl_adv = 0;
690
+ const struct hnae3_ae_ops *ops;
691
+ u8 module_type;
692
+ u8 media_type;
555693 u8 link_stat;
556694
557
- if (!h->ae_algo || !h->ae_algo->ops)
695
+ ops = h->ae_algo->ops;
696
+ if (ops->get_media_type)
697
+ ops->get_media_type(h, &media_type, &module_type);
698
+ else
558699 return -EOPNOTSUPP;
559700
560
- /* 1.auto_neg & speed & duplex from cmd */
561
- if (netdev->phydev) {
562
- phy_ethtool_ksettings_get(netdev->phydev, cmd);
701
+ switch (media_type) {
702
+ case HNAE3_MEDIA_TYPE_NONE:
703
+ cmd->base.port = PORT_NONE;
704
+ hns3_get_ksettings(h, cmd);
705
+ break;
706
+ case HNAE3_MEDIA_TYPE_FIBER:
707
+ if (module_type == HNAE3_MODULE_TYPE_CR)
708
+ cmd->base.port = PORT_DA;
709
+ else
710
+ cmd->base.port = PORT_FIBRE;
563711
712
+ hns3_get_ksettings(h, cmd);
713
+ break;
714
+ case HNAE3_MEDIA_TYPE_BACKPLANE:
715
+ cmd->base.port = PORT_NONE;
716
+ hns3_get_ksettings(h, cmd);
717
+ break;
718
+ case HNAE3_MEDIA_TYPE_COPPER:
719
+ cmd->base.port = PORT_TP;
720
+ if (!netdev->phydev)
721
+ hns3_get_ksettings(h, cmd);
722
+ else
723
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
724
+ break;
725
+ default:
726
+
727
+ netdev_warn(netdev, "Unknown media type");
564728 return 0;
565729 }
566730
567
- if (h->ae_algo->ops->get_ksettings_an_result)
568
- h->ae_algo->ops->get_ksettings_an_result(h,
569
- &cmd->base.autoneg,
570
- &cmd->base.speed,
571
- &cmd->base.duplex);
572
- else
573
- return -EOPNOTSUPP;
731
+ /* mdio_support */
732
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
574733
575734 link_stat = hns3_get_link(netdev);
576735 if (!link_stat) {
....@@ -578,35 +737,51 @@
578737 cmd->base.duplex = DUPLEX_UNKNOWN;
579738 }
580739
581
- /* 2.get link mode and port type*/
582
- if (h->ae_algo->ops->get_link_mode)
583
- h->ae_algo->ops->get_link_mode(h,
584
- cmd->link_modes.supported,
585
- cmd->link_modes.advertising);
740
+ return 0;
741
+}
586742
587
- cmd->base.port = PORT_NONE;
588
- if (h->ae_algo->ops->get_port_type)
589
- h->ae_algo->ops->get_port_type(h,
590
- &cmd->base.port);
743
+static int hns3_check_ksettings_param(const struct net_device *netdev,
744
+ const struct ethtool_link_ksettings *cmd)
745
+{
746
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
747
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
748
+ u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
749
+ u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
750
+ u8 autoneg;
751
+ u32 speed;
752
+ u8 duplex;
753
+ int ret;
591754
592
- /* 3.mdix_ctrl&mdix get from phy reg */
593
- if (h->ae_algo->ops->get_mdix_mode)
594
- h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
595
- &cmd->base.eth_tp_mdix);
596
- /* 4.mdio_support */
597
- cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
755
+ /* hw doesn't support use specified speed and duplex to negotiate,
756
+ * unnecessary to check them when autoneg on.
757
+ */
758
+ if (cmd->base.autoneg)
759
+ return 0;
598760
599
- /* 5.get flow control setttings */
600
- if (h->ae_algo->ops->get_flowctrl_adv)
601
- h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
761
+ if (ops->get_ksettings_an_result) {
762
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
763
+ if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
764
+ cmd->base.duplex == duplex)
765
+ return 0;
766
+ }
602767
603
- if (flowctrl_adv & ADVERTISED_Pause)
604
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
605
- Pause);
768
+ if (ops->get_media_type)
769
+ ops->get_media_type(handle, &media_type, &module_type);
606770
607
- if (flowctrl_adv & ADVERTISED_Asym_Pause)
608
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
609
- Asym_Pause);
771
+ if (cmd->base.duplex == DUPLEX_HALF &&
772
+ media_type != HNAE3_MEDIA_TYPE_COPPER) {
773
+ netdev_err(netdev,
774
+ "only copper port supports half duplex!");
775
+ return -EINVAL;
776
+ }
777
+
778
+ if (ops->check_port_speed) {
779
+ ret = ops->check_port_speed(handle, cmd->base.speed);
780
+ if (ret) {
781
+ netdev_err(netdev, "unsupported speed\n");
782
+ return ret;
783
+ }
784
+ }
610785
611786 return 0;
612787 }
....@@ -614,19 +789,63 @@
614789 static int hns3_set_link_ksettings(struct net_device *netdev,
615790 const struct ethtool_link_ksettings *cmd)
616791 {
617
- /* Only support ksettings_set for netdev with phy attached for now */
618
- if (netdev->phydev)
619
- return phy_ethtool_ksettings_set(netdev->phydev, cmd);
792
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
793
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
794
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
795
+ int ret;
620796
621
- return -EOPNOTSUPP;
797
+ /* Chip don't support this mode. */
798
+ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
799
+ return -EINVAL;
800
+
801
+ netif_dbg(handle, drv, netdev,
802
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
803
+ netdev->phydev ? "phy" : "mac",
804
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
805
+
806
+ /* Only support ksettings_set for netdev with phy attached for now */
807
+ if (netdev->phydev) {
808
+ if (cmd->base.speed == SPEED_1000 &&
809
+ cmd->base.autoneg == AUTONEG_DISABLE)
810
+ return -EINVAL;
811
+
812
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
813
+ }
814
+
815
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
816
+ return -EOPNOTSUPP;
817
+
818
+ ret = hns3_check_ksettings_param(netdev, cmd);
819
+ if (ret)
820
+ return ret;
821
+
822
+ if (ops->set_autoneg) {
823
+ ret = ops->set_autoneg(handle, cmd->base.autoneg);
824
+ if (ret)
825
+ return ret;
826
+ }
827
+
828
+ /* hw doesn't support use specified speed and duplex to negotiate,
829
+ * ignore them when autoneg on.
830
+ */
831
+ if (cmd->base.autoneg) {
832
+ netdev_info(netdev,
833
+ "autoneg is on, ignore the speed and duplex\n");
834
+ return 0;
835
+ }
836
+
837
+ if (ops->cfg_mac_speed_dup_h)
838
+ ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
839
+ cmd->base.duplex);
840
+
841
+ return ret;
622842 }
623843
624844 static u32 hns3_get_rss_key_size(struct net_device *netdev)
625845 {
626846 struct hnae3_handle *h = hns3_get_handle(netdev);
627847
628
- if (!h->ae_algo || !h->ae_algo->ops ||
629
- !h->ae_algo->ops->get_rss_key_size)
848
+ if (!h->ae_algo->ops->get_rss_key_size)
630849 return 0;
631850
632851 return h->ae_algo->ops->get_rss_key_size(h);
....@@ -636,8 +855,7 @@
636855 {
637856 struct hnae3_handle *h = hns3_get_handle(netdev);
638857
639
- if (!h->ae_algo || !h->ae_algo->ops ||
640
- !h->ae_algo->ops->get_rss_indir_size)
858
+ if (!h->ae_algo->ops->get_rss_indir_size)
641859 return 0;
642860
643861 return h->ae_algo->ops->get_rss_indir_size(h);
....@@ -648,7 +866,7 @@
648866 {
649867 struct hnae3_handle *h = hns3_get_handle(netdev);
650868
651
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss)
869
+ if (!h->ae_algo->ops->get_rss)
652870 return -EOPNOTSUPP;
653871
654872 return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
....@@ -658,16 +876,18 @@
658876 const u8 *key, const u8 hfunc)
659877 {
660878 struct hnae3_handle *h = hns3_get_handle(netdev);
879
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
661880
662
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
881
+ if (!h->ae_algo->ops->set_rss)
663882 return -EOPNOTSUPP;
664883
665
- /* currently we only support Toeplitz hash */
666
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) {
667
- netdev_err(netdev,
668
- "hash func not supported (only Toeplitz hash)\n");
884
+ if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
885
+ hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
886
+ hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
887
+ netdev_err(netdev, "hash func not supported\n");
669888 return -EOPNOTSUPP;
670889 }
890
+
671891 if (!indir) {
672892 netdev_err(netdev,
673893 "set rss failed for indir is empty\n");
....@@ -683,34 +903,86 @@
683903 {
684904 struct hnae3_handle *h = hns3_get_handle(netdev);
685905
686
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple)
687
- return -EOPNOTSUPP;
688
-
689906 switch (cmd->cmd) {
690907 case ETHTOOL_GRXRINGS:
691
- cmd->data = h->kinfo.rss_size;
692
- break;
908
+ cmd->data = h->kinfo.num_tqps;
909
+ return 0;
693910 case ETHTOOL_GRXFH:
694
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
911
+ if (h->ae_algo->ops->get_rss_tuple)
912
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
913
+ return -EOPNOTSUPP;
914
+ case ETHTOOL_GRXCLSRLCNT:
915
+ if (h->ae_algo->ops->get_fd_rule_cnt)
916
+ return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
917
+ return -EOPNOTSUPP;
918
+ case ETHTOOL_GRXCLSRULE:
919
+ if (h->ae_algo->ops->get_fd_rule_info)
920
+ return h->ae_algo->ops->get_fd_rule_info(h, cmd);
921
+ return -EOPNOTSUPP;
922
+ case ETHTOOL_GRXCLSRLALL:
923
+ if (h->ae_algo->ops->get_fd_all_rules)
924
+ return h->ae_algo->ops->get_fd_all_rules(h, cmd,
925
+ rule_locs);
926
+ return -EOPNOTSUPP;
695927 default:
696928 return -EOPNOTSUPP;
697929 }
698
-
699
- return 0;
700930 }
701931
702
-static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
703
- u32 new_desc_num)
932
+static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
933
+ u32 tx_desc_num, u32 rx_desc_num)
704934 {
705935 struct hnae3_handle *h = priv->ae_handle;
706936 int i;
707937
708
- h->kinfo.num_desc = new_desc_num;
938
+ h->kinfo.num_tx_desc = tx_desc_num;
939
+ h->kinfo.num_rx_desc = rx_desc_num;
709940
710
- for (i = 0; i < h->kinfo.num_tqps * 2; i++)
711
- priv->ring_data[i].ring->desc_num = new_desc_num;
941
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
942
+ priv->ring[i].desc_num = tx_desc_num;
943
+ priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
944
+ }
945
+}
712946
713
- return hns3_init_all_ring(priv);
947
+static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
948
+{
949
+ struct hnae3_handle *handle = priv->ae_handle;
950
+ struct hns3_enet_ring *tmp_rings;
951
+ int i;
952
+
953
+ tmp_rings = kcalloc(handle->kinfo.num_tqps * 2,
954
+ sizeof(struct hns3_enet_ring), GFP_KERNEL);
955
+ if (!tmp_rings)
956
+ return NULL;
957
+
958
+ for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
959
+ memcpy(&tmp_rings[i], &priv->ring[i],
960
+ sizeof(struct hns3_enet_ring));
961
+ tmp_rings[i].skb = NULL;
962
+ }
963
+
964
+ return tmp_rings;
965
+}
966
+
967
+static int hns3_check_ringparam(struct net_device *ndev,
968
+ struct ethtool_ringparam *param)
969
+{
970
+ if (hns3_nic_resetting(ndev))
971
+ return -EBUSY;
972
+
973
+ if (param->rx_mini_pending || param->rx_jumbo_pending)
974
+ return -EINVAL;
975
+
976
+ if (param->tx_pending > HNS3_RING_MAX_PENDING ||
977
+ param->tx_pending < HNS3_RING_MIN_PENDING ||
978
+ param->rx_pending > HNS3_RING_MAX_PENDING ||
979
+ param->rx_pending < HNS3_RING_MIN_PENDING) {
980
+ netdev_err(ndev, "Queue depth out of range [%d-%d]\n",
981
+ HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING);
982
+ return -EINVAL;
983
+ }
984
+
985
+ return 0;
714986 }
715987
716988 static int hns3_set_ringparam(struct net_device *ndev,
....@@ -718,59 +990,61 @@
718990 {
719991 struct hns3_nic_priv *priv = netdev_priv(ndev);
720992 struct hnae3_handle *h = priv->ae_handle;
993
+ struct hns3_enet_ring *tmp_rings;
721994 bool if_running = netif_running(ndev);
722
- u32 old_desc_num, new_desc_num;
723
- int ret;
995
+ u32 old_tx_desc_num, new_tx_desc_num;
996
+ u32 old_rx_desc_num, new_rx_desc_num;
997
+ u16 queue_num = h->kinfo.num_tqps;
998
+ int ret, i;
724999
725
- if (param->rx_mini_pending || param->rx_jumbo_pending)
726
- return -EINVAL;
727
-
728
- if (param->tx_pending != param->rx_pending) {
729
- netdev_err(ndev,
730
- "Descriptors of tx and rx must be equal");
731
- return -EINVAL;
732
- }
733
-
734
- if (param->tx_pending > HNS3_RING_MAX_PENDING ||
735
- param->tx_pending < HNS3_RING_MIN_PENDING) {
736
- netdev_err(ndev,
737
- "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n",
738
- param->tx_pending, HNS3_RING_MIN_PENDING,
739
- HNS3_RING_MAX_PENDING);
740
- return -EINVAL;
741
- }
742
-
743
- new_desc_num = param->tx_pending;
744
-
745
- /* Hardware requires that its descriptors must be multiple of eight */
746
- new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE);
747
- old_desc_num = h->kinfo.num_desc;
748
- if (old_desc_num == new_desc_num)
749
- return 0;
750
-
751
- netdev_info(ndev,
752
- "Changing descriptor count from %d to %d.\n",
753
- old_desc_num, new_desc_num);
754
-
755
- if (if_running)
756
- dev_close(ndev);
757
-
758
- ret = hns3_uninit_all_ring(priv);
1000
+ ret = hns3_check_ringparam(ndev, param);
7591001 if (ret)
7601002 return ret;
7611003
762
- ret = hns3_change_all_ring_bd_num(priv, new_desc_num);
763
- if (ret) {
764
- ret = hns3_change_all_ring_bd_num(priv, old_desc_num);
765
- if (ret) {
766
- netdev_err(ndev,
767
- "Revert to old bd num fail, ret=%d.\n", ret);
768
- return ret;
769
- }
1004
+ /* Hardware requires that its descriptors must be multiple of eight */
1005
+ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
1006
+ new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
1007
+ old_tx_desc_num = priv->ring[0].desc_num;
1008
+ old_rx_desc_num = priv->ring[queue_num].desc_num;
1009
+ if (old_tx_desc_num == new_tx_desc_num &&
1010
+ old_rx_desc_num == new_rx_desc_num)
1011
+ return 0;
1012
+
1013
+ tmp_rings = hns3_backup_ringparam(priv);
1014
+ if (!tmp_rings) {
1015
+ netdev_err(ndev,
1016
+ "backup ring param failed by allocating memory fail\n");
1017
+ return -ENOMEM;
7701018 }
7711019
1020
+ netdev_info(ndev,
1021
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
1022
+ old_tx_desc_num, old_rx_desc_num,
1023
+ new_tx_desc_num, new_rx_desc_num);
1024
+
7721025 if (if_running)
773
- ret = dev_open(ndev);
1026
+ ndev->netdev_ops->ndo_stop(ndev);
1027
+
1028
+ hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
1029
+ ret = hns3_init_all_ring(priv);
1030
+ if (ret) {
1031
+ netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
1032
+ ret);
1033
+
1034
+ hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
1035
+ old_rx_desc_num);
1036
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
1037
+ memcpy(&priv->ring[i], &tmp_rings[i],
1038
+ sizeof(struct hns3_enet_ring));
1039
+ } else {
1040
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
1041
+ hns3_fini_ring(&tmp_rings[i]);
1042
+ }
1043
+
1044
+ kfree(tmp_rings);
1045
+
1046
+ if (if_running)
1047
+ ret = ndev->netdev_ops->ndo_open(ndev);
7741048
7751049 return ret;
7761050 }
....@@ -779,12 +1053,19 @@
7791053 {
7801054 struct hnae3_handle *h = hns3_get_handle(netdev);
7811055
782
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple)
783
- return -EOPNOTSUPP;
784
-
7851056 switch (cmd->cmd) {
7861057 case ETHTOOL_SRXFH:
787
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
1058
+ if (h->ae_algo->ops->set_rss_tuple)
1059
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
1060
+ return -EOPNOTSUPP;
1061
+ case ETHTOOL_SRXCLSRLINS:
1062
+ if (h->ae_algo->ops->add_fd_entry)
1063
+ return h->ae_algo->ops->add_fd_entry(h, cmd);
1064
+ return -EOPNOTSUPP;
1065
+ case ETHTOOL_SRXCLSRLDEL:
1066
+ if (h->ae_algo->ops->del_fd_entry)
1067
+ return h->ae_algo->ops->del_fd_entry(h, cmd);
1068
+ return -EOPNOTSUPP;
7881069 default:
7891070 return -EOPNOTSUPP;
7901071 }
....@@ -792,19 +1073,36 @@
7921073
7931074 static int hns3_nway_reset(struct net_device *netdev)
7941075 {
1076
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1077
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
7951078 struct phy_device *phy = netdev->phydev;
1079
+ int autoneg;
7961080
7971081 if (!netif_running(netdev))
7981082 return 0;
7991083
800
- /* Only support nway_reset for netdev with phy attached for now */
801
- if (!phy)
1084
+ if (hns3_nic_resetting(netdev)) {
1085
+ netdev_err(netdev, "dev resetting!");
1086
+ return -EBUSY;
1087
+ }
1088
+
1089
+ if (!ops->get_autoneg || !ops->restart_autoneg)
8021090 return -EOPNOTSUPP;
8031091
804
- if (phy->autoneg != AUTONEG_ENABLE)
1092
+ autoneg = ops->get_autoneg(handle);
1093
+ if (autoneg != AUTONEG_ENABLE) {
1094
+ netdev_err(netdev,
1095
+ "Autoneg is off, don't support to restart it\n");
8051096 return -EINVAL;
1097
+ }
8061098
807
- return genphy_restart_aneg(phy);
1099
+ netif_dbg(handle, drv, netdev,
1100
+ "nway reset (using %s)\n", phy ? "phy" : "mac");
1101
+
1102
+ if (phy)
1103
+ return genphy_restart_aneg(phy);
1104
+
1105
+ return ops->restart_autoneg(handle);
8081106 }
8091107
8101108 static void hns3_get_channels(struct net_device *netdev,
....@@ -824,15 +1122,18 @@
8241122 struct hnae3_handle *h = priv->ae_handle;
8251123 u16 queue_num = h->kinfo.num_tqps;
8261124
1125
+ if (hns3_nic_resetting(netdev))
1126
+ return -EBUSY;
1127
+
8271128 if (queue >= queue_num) {
8281129 netdev_err(netdev,
829
- "Invalid queue value %d! Queue max id=%d\n",
1130
+ "Invalid queue value %u! Queue max id=%u\n",
8301131 queue, queue_num - 1);
8311132 return -EINVAL;
8321133 }
8331134
834
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
835
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
1135
+ tx_vector = priv->ring[queue].tqp_vector;
1136
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
8361137
8371138 cmd->use_adaptive_tx_coalesce =
8381139 tx_vector->tx_group.coal.gl_adapt_enable;
....@@ -876,14 +1177,14 @@
8761177 rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
8771178 if (rx_gl != cmd->rx_coalesce_usecs) {
8781179 netdev_info(netdev,
879
- "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
1180
+ "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
8801181 cmd->rx_coalesce_usecs, rx_gl);
8811182 }
8821183
8831184 tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
8841185 if (tx_gl != cmd->tx_coalesce_usecs) {
8851186 netdev_info(netdev,
886
- "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
1187
+ "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
8871188 cmd->tx_coalesce_usecs, tx_gl);
8881189 }
8891190
....@@ -911,7 +1212,7 @@
9111212 rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
9121213 if (rl != cmd->rx_coalesce_usecs_high) {
9131214 netdev_info(netdev,
914
- "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
1215
+ "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
9151216 cmd->rx_coalesce_usecs_high, rl);
9161217 }
9171218
....@@ -940,7 +1241,7 @@
9401241 if (cmd->use_adaptive_tx_coalesce == 1 ||
9411242 cmd->use_adaptive_rx_coalesce == 1) {
9421243 netdev_info(netdev,
943
- "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
1244
+ "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
9441245 cmd->use_adaptive_tx_coalesce,
9451246 cmd->use_adaptive_rx_coalesce);
9461247 }
....@@ -957,8 +1258,8 @@
9571258 struct hnae3_handle *h = priv->ae_handle;
9581259 int queue_num = h->kinfo.num_tqps;
9591260
960
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
961
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
1261
+ tx_vector = priv->ring[queue].tqp_vector;
1262
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
9621263
9631264 tx_vector->tx_group.coal.gl_adapt_enable =
9641265 cmd->use_adaptive_tx_coalesce;
....@@ -984,6 +1285,9 @@
9841285 u16 queue_num = h->kinfo.num_tqps;
9851286 int ret;
9861287 int i;
1288
+
1289
+ if (hns3_nic_resetting(netdev))
1290
+ return -EBUSY;
9871291
9881292 ret = hns3_check_coalesce_para(netdev, cmd);
9891293 if (ret)
....@@ -1024,13 +1328,184 @@
10241328 {
10251329 struct hnae3_handle *h = hns3_get_handle(netdev);
10261330
1027
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
1331
+ if (!h->ae_algo->ops->set_led_id)
10281332 return -EOPNOTSUPP;
10291333
10301334 return h->ae_algo->ops->set_led_id(h, state);
10311335 }
10321336
1337
+static u32 hns3_get_msglevel(struct net_device *netdev)
1338
+{
1339
+ struct hnae3_handle *h = hns3_get_handle(netdev);
1340
+
1341
+ return h->msg_enable;
1342
+}
1343
+
1344
+static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
1345
+{
1346
+ struct hnae3_handle *h = hns3_get_handle(netdev);
1347
+
1348
+ h->msg_enable = msg_level;
1349
+}
1350
+
1351
+/* Translate local fec value into ethtool value. */
1352
+static unsigned int loc_to_eth_fec(u8 loc_fec)
1353
+{
1354
+ u32 eth_fec = 0;
1355
+
1356
+ if (loc_fec & BIT(HNAE3_FEC_AUTO))
1357
+ eth_fec |= ETHTOOL_FEC_AUTO;
1358
+ if (loc_fec & BIT(HNAE3_FEC_RS))
1359
+ eth_fec |= ETHTOOL_FEC_RS;
1360
+ if (loc_fec & BIT(HNAE3_FEC_BASER))
1361
+ eth_fec |= ETHTOOL_FEC_BASER;
1362
+
1363
+ /* if nothing is set, then FEC is off */
1364
+ if (!eth_fec)
1365
+ eth_fec = ETHTOOL_FEC_OFF;
1366
+
1367
+ return eth_fec;
1368
+}
1369
+
1370
+/* Translate ethtool fec value into local value. */
1371
+static unsigned int eth_to_loc_fec(unsigned int eth_fec)
1372
+{
1373
+ u32 loc_fec = 0;
1374
+
1375
+ if (eth_fec & ETHTOOL_FEC_OFF)
1376
+ return loc_fec;
1377
+
1378
+ if (eth_fec & ETHTOOL_FEC_AUTO)
1379
+ loc_fec |= BIT(HNAE3_FEC_AUTO);
1380
+ if (eth_fec & ETHTOOL_FEC_RS)
1381
+ loc_fec |= BIT(HNAE3_FEC_RS);
1382
+ if (eth_fec & ETHTOOL_FEC_BASER)
1383
+ loc_fec |= BIT(HNAE3_FEC_BASER);
1384
+
1385
+ return loc_fec;
1386
+}
1387
+
1388
+static int hns3_get_fecparam(struct net_device *netdev,
1389
+ struct ethtool_fecparam *fec)
1390
+{
1391
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1392
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
1393
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1394
+ u8 fec_ability;
1395
+ u8 fec_mode;
1396
+
1397
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
1398
+ return -EOPNOTSUPP;
1399
+
1400
+ if (!ops->get_fec)
1401
+ return -EOPNOTSUPP;
1402
+
1403
+ ops->get_fec(handle, &fec_ability, &fec_mode);
1404
+
1405
+ fec->fec = loc_to_eth_fec(fec_ability);
1406
+ fec->active_fec = loc_to_eth_fec(fec_mode);
1407
+
1408
+ return 0;
1409
+}
1410
+
1411
+static int hns3_set_fecparam(struct net_device *netdev,
1412
+ struct ethtool_fecparam *fec)
1413
+{
1414
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1415
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
1416
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1417
+ u32 fec_mode;
1418
+
1419
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
1420
+ return -EOPNOTSUPP;
1421
+
1422
+ if (!ops->set_fec)
1423
+ return -EOPNOTSUPP;
1424
+ fec_mode = eth_to_loc_fec(fec->fec);
1425
+
1426
+ netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
1427
+
1428
+ return ops->set_fec(handle, fec_mode);
1429
+}
1430
+
1431
+static int hns3_get_module_info(struct net_device *netdev,
1432
+ struct ethtool_modinfo *modinfo)
1433
+{
1434
+#define HNS3_SFF_8636_V1_3 0x03
1435
+
1436
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1437
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
1438
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1439
+ struct hns3_sfp_type sfp_type;
1440
+ int ret;
1441
+
1442
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
1443
+ !ops->get_module_eeprom)
1444
+ return -EOPNOTSUPP;
1445
+
1446
+ memset(&sfp_type, 0, sizeof(sfp_type));
1447
+ ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8),
1448
+ (u8 *)&sfp_type);
1449
+ if (ret)
1450
+ return ret;
1451
+
1452
+ switch (sfp_type.type) {
1453
+ case SFF8024_ID_SFP:
1454
+ modinfo->type = ETH_MODULE_SFF_8472;
1455
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1456
+ break;
1457
+ case SFF8024_ID_QSFP_8438:
1458
+ modinfo->type = ETH_MODULE_SFF_8436;
1459
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1460
+ break;
1461
+ case SFF8024_ID_QSFP_8436_8636:
1462
+ if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
1463
+ modinfo->type = ETH_MODULE_SFF_8436;
1464
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1465
+ } else {
1466
+ modinfo->type = ETH_MODULE_SFF_8636;
1467
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1468
+ }
1469
+ break;
1470
+ case SFF8024_ID_QSFP28_8636:
1471
+ modinfo->type = ETH_MODULE_SFF_8636;
1472
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1473
+ break;
1474
+ default:
1475
+ netdev_err(netdev, "Optical module unknown: %#x\n",
1476
+ sfp_type.type);
1477
+ return -EINVAL;
1478
+ }
1479
+
1480
+ return 0;
1481
+}
1482
+
1483
+static int hns3_get_module_eeprom(struct net_device *netdev,
1484
+ struct ethtool_eeprom *ee, u8 *data)
1485
+{
1486
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1487
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
1488
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1489
+
1490
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
1491
+ !ops->get_module_eeprom)
1492
+ return -EOPNOTSUPP;
1493
+
1494
+ if (!ee->len)
1495
+ return -EINVAL;
1496
+
1497
+ memset(data, 0, ee->len);
1498
+
1499
+ return ops->get_module_eeprom(handle, ee->offset, ee->len, data);
1500
+}
1501
+
1502
+#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
1503
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
1504
+ ETHTOOL_COALESCE_RX_USECS_HIGH | \
1505
+ ETHTOOL_COALESCE_TX_USECS_HIGH)
1506
+
10331507 static const struct ethtool_ops hns3vf_ethtool_ops = {
1508
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
10341509 .get_drvinfo = hns3_get_drvinfo,
10351510 .get_ringparam = hns3_get_ringparam,
10361511 .set_ringparam = hns3_set_ringparam,
....@@ -1038,18 +1513,25 @@
10381513 .get_ethtool_stats = hns3_get_stats,
10391514 .get_sset_count = hns3_get_sset_count,
10401515 .get_rxnfc = hns3_get_rxnfc,
1516
+ .set_rxnfc = hns3_set_rxnfc,
10411517 .get_rxfh_key_size = hns3_get_rss_key_size,
10421518 .get_rxfh_indir_size = hns3_get_rss_indir_size,
10431519 .get_rxfh = hns3_get_rss,
10441520 .set_rxfh = hns3_set_rss,
10451521 .get_link_ksettings = hns3_get_link_ksettings,
10461522 .get_channels = hns3_get_channels,
1523
+ .set_channels = hns3_set_channels,
10471524 .get_coalesce = hns3_get_coalesce,
10481525 .set_coalesce = hns3_set_coalesce,
1526
+ .get_regs_len = hns3_get_regs_len,
1527
+ .get_regs = hns3_get_regs,
10491528 .get_link = hns3_get_link,
1529
+ .get_msglevel = hns3_get_msglevel,
1530
+ .set_msglevel = hns3_set_msglevel,
10501531 };
10511532
10521533 static const struct ethtool_ops hns3_ethtool_ops = {
1534
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
10531535 .self_test = hns3_self_test,
10541536 .get_drvinfo = hns3_get_drvinfo,
10551537 .get_link = hns3_get_link,
....@@ -1076,6 +1558,12 @@
10761558 .get_regs_len = hns3_get_regs_len,
10771559 .get_regs = hns3_get_regs,
10781560 .set_phys_id = hns3_set_phys_id,
1561
+ .get_msglevel = hns3_get_msglevel,
1562
+ .set_msglevel = hns3_set_msglevel,
1563
+ .get_fecparam = hns3_get_fecparam,
1564
+ .set_fecparam = hns3_set_fecparam,
1565
+ .get_module_info = hns3_get_module_info,
1566
+ .get_module_eeprom = hns3_get_module_eeprom,
10791567 };
10801568
10811569 void hns3_ethtool_set_ops(struct net_device *netdev)