forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
....@@ -4,12 +4,18 @@
44 #include <linux/etherdevice.h>
55 #include <linux/string.h>
66 #include <linux/phy.h>
7
+#include <linux/sfp.h>
78
89 #include "hns3_enet.h"
910
1011 struct hns3_stats {
1112 char stats_string[ETH_GSTRING_LEN];
1213 int stats_offset;
14
+};
15
+
16
+struct hns3_sfp_type {
17
+ u8 type;
18
+ u8 ext_type;
1319 };
1420
1521 /* tqp related stats */
....@@ -21,56 +27,60 @@
2127
2228 static const struct hns3_stats hns3_txq_stats[] = {
2329 /* Tx per-queue statistics */
24
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
25
- HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
30
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
2631 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
2732 HNS3_TQP_STAT("packets", tx_pkts),
2833 HNS3_TQP_STAT("bytes", tx_bytes),
29
- HNS3_TQP_STAT("errors", tx_err_cnt),
30
- HNS3_TQP_STAT("tx_wake", restart_queue),
31
- HNS3_TQP_STAT("tx_busy", tx_busy),
34
+ HNS3_TQP_STAT("more", tx_more),
35
+ HNS3_TQP_STAT("wake", restart_queue),
36
+ HNS3_TQP_STAT("busy", tx_busy),
37
+ HNS3_TQP_STAT("copy", tx_copy),
38
+ HNS3_TQP_STAT("vlan_err", tx_vlan_err),
39
+ HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
40
+ HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
41
+ HNS3_TQP_STAT("tso_err", tx_tso_err),
42
+ HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
43
+ HNS3_TQP_STAT("hw_limitation", hw_limitation),
3244 };
3345
3446 #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
3547
3648 static const struct hns3_stats hns3_rxq_stats[] = {
3749 /* Rx per-queue statistics */
38
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
39
- HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
50
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
4051 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
4152 HNS3_TQP_STAT("packets", rx_pkts),
4253 HNS3_TQP_STAT("bytes", rx_bytes),
4354 HNS3_TQP_STAT("errors", rx_err_cnt),
4455 HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
4556 HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
46
- HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
4757 HNS3_TQP_STAT("err_bd_num", err_bd_num),
4858 HNS3_TQP_STAT("l2_err", l2_err),
4959 HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
60
+ HNS3_TQP_STAT("multicast", rx_multicast),
61
+ HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
5062 };
5163
5264 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
5365
5466 #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
5567
56
-#define HNS3_SELF_TEST_TYPE_NUM 2
68
+#define HNS3_SELF_TEST_TYPE_NUM 4
5769 #define HNS3_NIC_LB_TEST_PKT_NUM 1
5870 #define HNS3_NIC_LB_TEST_RING_ID 0
5971 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128
72
+#define HNS3_NIC_LB_SETUP_USEC 10000
6073
6174 /* Nic loopback test err */
6275 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
6376 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
6477 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
6578
66
-struct hns3_link_mode_mapping {
67
- u32 hns3_link_mode;
68
- u32 ethtool_link_mode;
69
-};
70
-
7179 static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
7280 {
7381 struct hnae3_handle *h = hns3_get_handle(ndev);
82
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
83
+ bool vlan_filter_enable;
7484 int ret;
7585
7686 if (!h->ae_algo->ops->set_loopback ||
....@@ -78,8 +88,10 @@
7888 return -EOPNOTSUPP;
7989
8090 switch (loop) {
81
- case HNAE3_MAC_INTER_LOOP_SERDES:
82
- case HNAE3_MAC_INTER_LOOP_MAC:
91
+ case HNAE3_LOOP_SERIAL_SERDES:
92
+ case HNAE3_LOOP_PARALLEL_SERDES:
93
+ case HNAE3_LOOP_APP:
94
+ case HNAE3_LOOP_PHY:
8395 ret = h->ae_algo->ops->set_loopback(h, loop, en);
8496 break;
8597 default:
....@@ -87,10 +99,17 @@
8799 break;
88100 }
89101
90
- if (ret)
102
+ if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
91103 return ret;
92104
93
- h->ae_algo->ops->set_promisc_mode(h, en, en);
105
+ if (en) {
106
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
107
+ } else {
108
+ /* recover promisc mode before loopback test */
109
+ hns3_request_update_promisc_mode(h);
110
+ vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
111
+ hns3_enable_vlan_filter(ndev, vlan_filter_enable);
112
+ }
94113
95114 return ret;
96115 }
....@@ -105,9 +124,9 @@
105124 return ret;
106125
107126 ret = hns3_lp_setup(ndev, loop_mode, true);
108
- usleep_range(10000, 20000);
127
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
109128
110
- return 0;
129
+ return ret;
111130 }
112131
113132 static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
....@@ -120,14 +139,18 @@
120139 return ret;
121140 }
122141
123
- usleep_range(10000, 20000);
142
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
124143
125144 return 0;
126145 }
127146
128147 static void hns3_lp_setup_skb(struct sk_buff *skb)
129148 {
149
+#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
150
+
130151 struct net_device *ndev = skb->dev;
152
+ struct hnae3_handle *handle;
153
+ struct hnae3_ae_dev *ae_dev;
131154 unsigned char *packet;
132155 struct ethhdr *ethh;
133156 unsigned int i;
....@@ -137,7 +160,16 @@
137160 packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
138161
139162 memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
140
- ethh->h_dest[5] += 0x1f;
163
+
164
+ /* The dst mac addr of loopback packet is the same as the host'
165
+ * mac addr, the SSU component may loop back the packet to host
166
+ * before the packet reaches mac or serdes, which will defect
167
+ * the purpose of mac or serdes selftest.
168
+ */
169
+ handle = hns3_get_handle(ndev);
170
+ ae_dev = pci_get_drvdata(handle->pdev);
171
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
172
+ ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
141173 eth_zero_addr(ethh->h_source);
142174 ethh->h_proto = htons(ETH_P_ARP);
143175 skb_reset_mac_header(skb);
....@@ -178,7 +210,7 @@
178210
179211 kinfo = &h->kinfo;
180212 for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
181
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
213
+ struct hns3_enet_ring *ring = &priv->ring[i];
182214 struct hns3_enet_ring_group *rx_group;
183215 u64 pre_rx_pkt;
184216
....@@ -201,9 +233,9 @@
201233 u32 i;
202234
203235 for (i = start_ringid; i <= end_ringid; i++) {
204
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
236
+ struct hns3_enet_ring *ring = &priv->ring[i];
205237
206
- hns3_clean_tx_ring(ring, budget);
238
+ hns3_clean_tx_ring(ring, 0);
207239 }
208240 }
209241
....@@ -268,6 +300,105 @@
268300 return ret_val;
269301 }
270302
303
+static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
304
+{
305
+ st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
306
+ st_param[HNAE3_LOOP_APP][1] =
307
+ h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
308
+
309
+ st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES;
310
+ st_param[HNAE3_LOOP_SERIAL_SERDES][1] =
311
+ h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
312
+
313
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][0] =
314
+ HNAE3_LOOP_PARALLEL_SERDES;
315
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
316
+ h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
317
+
318
+ st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
319
+ st_param[HNAE3_LOOP_PHY][1] =
320
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
321
+}
322
+
323
+static void hns3_selftest_prepare(struct net_device *ndev,
324
+ bool if_running, int (*st_param)[2])
325
+{
326
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
327
+ struct hnae3_handle *h = priv->ae_handle;
328
+
329
+ if (netif_msg_ifdown(h))
330
+ netdev_info(ndev, "self test start\n");
331
+
332
+ hns3_set_selftest_param(h, st_param);
333
+
334
+ if (if_running)
335
+ ndev->netdev_ops->ndo_stop(ndev);
336
+
337
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
338
+ /* Disable the vlan filter for selftest does not support it */
339
+ if (h->ae_algo->ops->enable_vlan_filter &&
340
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
341
+ h->ae_algo->ops->enable_vlan_filter(h, false);
342
+#endif
343
+
344
+ /* Tell firmware to stop mac autoneg before loopback test start,
345
+ * otherwise loopback test may be failed when the port is still
346
+ * negotiating.
347
+ */
348
+ if (h->ae_algo->ops->halt_autoneg)
349
+ h->ae_algo->ops->halt_autoneg(h, true);
350
+
351
+ set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
352
+}
353
+
354
+static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
355
+{
356
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
357
+ struct hnae3_handle *h = priv->ae_handle;
358
+
359
+ clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
360
+
361
+ if (h->ae_algo->ops->halt_autoneg)
362
+ h->ae_algo->ops->halt_autoneg(h, false);
363
+
364
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
365
+ if (h->ae_algo->ops->enable_vlan_filter &&
366
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
367
+ h->ae_algo->ops->enable_vlan_filter(h, true);
368
+#endif
369
+
370
+ if (if_running)
371
+ ndev->netdev_ops->ndo_open(ndev);
372
+
373
+ if (netif_msg_ifdown(h))
374
+ netdev_info(ndev, "self test end\n");
375
+}
376
+
377
+static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
378
+ struct ethtool_test *eth_test, u64 *data)
379
+{
380
+ int test_index = 0;
381
+ u32 i;
382
+
383
+ for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
384
+ enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
385
+
386
+ if (!st_param[i][1])
387
+ continue;
388
+
389
+ data[test_index] = hns3_lp_up(ndev, loop_type);
390
+ if (!data[test_index])
391
+ data[test_index] = hns3_lp_run_test(ndev, loop_type);
392
+
393
+ hns3_lp_down(ndev, loop_type);
394
+
395
+ if (data[test_index])
396
+ eth_test->flags |= ETH_TEST_FL_FAILED;
397
+
398
+ test_index++;
399
+ }
400
+}
401
+
271402 /**
272403 * hns3_nic_self_test - self test
273404 * @ndev: net device
....@@ -277,68 +408,21 @@
277408 static void hns3_self_test(struct net_device *ndev,
278409 struct ethtool_test *eth_test, u64 *data)
279410 {
280
- struct hns3_nic_priv *priv = netdev_priv(ndev);
281
- struct hnae3_handle *h = priv->ae_handle;
282411 int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
283412 bool if_running = netif_running(ndev);
284
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
285
- bool dis_vlan_filter;
286
-#endif
287
- int test_index = 0;
288
- u32 i;
413
+
414
+ if (hns3_nic_resetting(ndev)) {
415
+ netdev_err(ndev, "dev resetting!");
416
+ return;
417
+ }
289418
290419 /* Only do offline selftest, or pass by default */
291420 if (eth_test->flags != ETH_TEST_FL_OFFLINE)
292421 return;
293422
294
- st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC;
295
- st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
296
- h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
297
-
298
- st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES;
299
- st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] =
300
- h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
301
-
302
- if (if_running)
303
- ndev->netdev_ops->ndo_stop(ndev);
304
-
305
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
306
- /* Disable the vlan filter for selftest does not support it */
307
- dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
308
- h->ae_algo->ops->enable_vlan_filter;
309
- if (dis_vlan_filter)
310
- h->ae_algo->ops->enable_vlan_filter(h, false);
311
-#endif
312
-
313
- set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
314
-
315
- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
316
- enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
317
-
318
- if (!st_param[i][1])
319
- continue;
320
-
321
- data[test_index] = hns3_lp_up(ndev, loop_type);
322
- if (!data[test_index]) {
323
- data[test_index] = hns3_lp_run_test(ndev, loop_type);
324
- hns3_lp_down(ndev, loop_type);
325
- }
326
-
327
- if (data[test_index])
328
- eth_test->flags |= ETH_TEST_FL_FAILED;
329
-
330
- test_index++;
331
- }
332
-
333
- clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
334
-
335
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
336
- if (dis_vlan_filter)
337
- h->ae_algo->ops->enable_vlan_filter(h, true);
338
-#endif
339
-
340
- if (if_running)
341
- ndev->netdev_ops->ndo_open(ndev);
423
+ hns3_selftest_prepare(ndev, if_running, st_param);
424
+ hns3_do_selftest(ndev, st_param, eth_test, data);
425
+ hns3_selftest_restore(ndev, if_running);
342426 }
343427
344428 static int hns3_get_sset_count(struct net_device *netdev, int stringset)
....@@ -356,9 +440,10 @@
356440
357441 case ETH_SS_TEST:
358442 return ops->get_sset_count(h, stringset);
359
- }
360443
361
- return 0;
444
+ default:
445
+ return -EOPNOTSUPP;
446
+ }
362447 }
363448
364449 static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
....@@ -374,9 +459,8 @@
374459 data[ETH_GSTRING_LEN - 1] = '\0';
375460
376461 /* first, prepend the prefix string */
377
- n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
378
- prefix, i);
379
- n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
462
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
463
+ prefix, i);
380464 size_left = (ETH_GSTRING_LEN - 1) - n1;
381465
382466 /* now, concatenate the stats string to it */
....@@ -417,10 +501,12 @@
417501 switch (stringset) {
418502 case ETH_SS_STATS:
419503 buff = hns3_get_strings_tqps(h, buff);
420
- h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
504
+ ops->get_strings(h, stringset, (u8 *)buff);
421505 break;
422506 case ETH_SS_TEST:
423507 ops->get_strings(h, stringset, data);
508
+ break;
509
+ default:
424510 break;
425511 }
426512 }
....@@ -435,7 +521,7 @@
435521
436522 /* get stats for Tx */
437523 for (i = 0; i < kinfo->num_tqps; i++) {
438
- ring = nic_priv->ring_data[i].ring;
524
+ ring = &nic_priv->ring[i];
439525 for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
440526 stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
441527 *data++ = *(u64 *)stat;
....@@ -444,7 +530,7 @@
444530
445531 /* get stats for Rx */
446532 for (i = 0; i < kinfo->num_tqps; i++) {
447
- ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
533
+ ring = &nic_priv->ring[i + kinfo->num_tqps];
448534 for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
449535 stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
450536 *data++ = *(u64 *)stat;
....@@ -465,6 +551,11 @@
465551 struct hnae3_handle *h = hns3_get_handle(netdev);
466552 u64 *p = data;
467553
554
+ if (hns3_nic_resetting(netdev)) {
555
+ netdev_err(netdev, "dev resetting, could not get stats\n");
556
+ return;
557
+ }
558
+
468559 if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
469560 netdev_err(netdev, "could not get any statistics\n");
470561 return;
....@@ -484,10 +575,12 @@
484575 {
485576 struct hns3_nic_priv *priv = netdev_priv(netdev);
486577 struct hnae3_handle *h = priv->ae_handle;
578
+ u32 fw_version;
487579
488
- strncpy(drvinfo->version, hns3_driver_version,
489
- sizeof(drvinfo->version));
490
- drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
580
+ if (!h->ae_algo->ops->get_fw_version) {
581
+ netdev_err(netdev, "could not get fw version!\n");
582
+ return;
583
+ }
491584
492585 strncpy(drvinfo->driver, h->pdev->driver->name,
493586 sizeof(drvinfo->driver));
....@@ -497,15 +590,25 @@
497590 sizeof(drvinfo->bus_info));
498591 drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
499592
500
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
501
- priv->ae_handle->ae_algo->ops->get_fw_version(h));
593
+ fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
594
+
595
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
596
+ "%lu.%lu.%lu.%lu",
597
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
598
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
599
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
600
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
601
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
602
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
603
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
604
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
502605 }
503606
504607 static u32 hns3_get_link(struct net_device *netdev)
505608 {
506609 struct hnae3_handle *h = hns3_get_handle(netdev);
507610
508
- if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status)
611
+ if (h->ae_algo->ops->get_status)
509612 return h->ae_algo->ops->get_status(h);
510613 else
511614 return 0;
....@@ -518,11 +621,16 @@
518621 struct hnae3_handle *h = priv->ae_handle;
519622 int queue_num = h->kinfo.num_tqps;
520623
624
+ if (hns3_nic_resetting(netdev)) {
625
+ netdev_err(netdev, "dev resetting!");
626
+ return;
627
+ }
628
+
521629 param->tx_max_pending = HNS3_RING_MAX_PENDING;
522630 param->rx_max_pending = HNS3_RING_MAX_PENDING;
523631
524
- param->tx_pending = priv->ring_data[0].ring->desc_num;
525
- param->rx_pending = priv->ring_data[queue_num].ring->desc_num;
632
+ param->tx_pending = priv->ring[0].desc_num;
633
+ param->rx_pending = priv->ring[queue_num].desc_num;
526634 }
527635
528636 static void hns3_get_pauseparam(struct net_device *netdev,
....@@ -530,7 +638,7 @@
530638 {
531639 struct hnae3_handle *h = hns3_get_handle(netdev);
532640
533
- if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam)
641
+ if (h->ae_algo->ops->get_pauseparam)
534642 h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
535643 &param->rx_pause, &param->tx_pause);
536644 }
....@@ -540,6 +648,10 @@
540648 {
541649 struct hnae3_handle *h = hns3_get_handle(netdev);
542650
651
+ netif_dbg(h, drv, netdev,
652
+ "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
653
+ param->autoneg, param->rx_pause, param->tx_pause);
654
+
543655 if (h->ae_algo->ops->set_pauseparam)
544656 return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
545657 param->rx_pause,
....@@ -547,30 +659,79 @@
547659 return -EOPNOTSUPP;
548660 }
549661
662
+static void hns3_get_ksettings(struct hnae3_handle *h,
663
+ struct ethtool_link_ksettings *cmd)
664
+{
665
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
666
+
667
+ /* 1.auto_neg & speed & duplex from cmd */
668
+ if (ops->get_ksettings_an_result)
669
+ ops->get_ksettings_an_result(h,
670
+ &cmd->base.autoneg,
671
+ &cmd->base.speed,
672
+ &cmd->base.duplex);
673
+
674
+ /* 2.get link mode */
675
+ if (ops->get_link_mode)
676
+ ops->get_link_mode(h,
677
+ cmd->link_modes.supported,
678
+ cmd->link_modes.advertising);
679
+
680
+ /* 3.mdix_ctrl&mdix get from phy reg */
681
+ if (ops->get_mdix_mode)
682
+ ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
683
+ &cmd->base.eth_tp_mdix);
684
+}
685
+
550686 static int hns3_get_link_ksettings(struct net_device *netdev,
551687 struct ethtool_link_ksettings *cmd)
552688 {
553689 struct hnae3_handle *h = hns3_get_handle(netdev);
554
- u32 flowctrl_adv = 0;
690
+ const struct hnae3_ae_ops *ops;
691
+ u8 module_type;
692
+ u8 media_type;
555693 u8 link_stat;
556694
557
- if (!h->ae_algo || !h->ae_algo->ops)
695
+ ops = h->ae_algo->ops;
696
+ if (ops->get_media_type)
697
+ ops->get_media_type(h, &media_type, &module_type);
698
+ else
558699 return -EOPNOTSUPP;
559700
560
- /* 1.auto_neg & speed & duplex from cmd */
561
- if (netdev->phydev) {
562
- phy_ethtool_ksettings_get(netdev->phydev, cmd);
701
+ switch (media_type) {
702
+ case HNAE3_MEDIA_TYPE_NONE:
703
+ cmd->base.port = PORT_NONE;
704
+ hns3_get_ksettings(h, cmd);
705
+ break;
706
+ case HNAE3_MEDIA_TYPE_FIBER:
707
+ if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
708
+ cmd->base.port = PORT_OTHER;
709
+ else if (module_type == HNAE3_MODULE_TYPE_CR)
710
+ cmd->base.port = PORT_DA;
711
+ else
712
+ cmd->base.port = PORT_FIBRE;
563713
714
+ hns3_get_ksettings(h, cmd);
715
+ break;
716
+ case HNAE3_MEDIA_TYPE_BACKPLANE:
717
+ cmd->base.port = PORT_NONE;
718
+ hns3_get_ksettings(h, cmd);
719
+ break;
720
+ case HNAE3_MEDIA_TYPE_COPPER:
721
+ cmd->base.port = PORT_TP;
722
+ if (!netdev->phydev)
723
+ hns3_get_ksettings(h, cmd);
724
+ else
725
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
726
+ break;
727
+ default:
728
+
729
+ netdev_warn(netdev, "Unknown media type");
564730 return 0;
565731 }
566732
567
- if (h->ae_algo->ops->get_ksettings_an_result)
568
- h->ae_algo->ops->get_ksettings_an_result(h,
569
- &cmd->base.autoneg,
570
- &cmd->base.speed,
571
- &cmd->base.duplex);
572
- else
573
- return -EOPNOTSUPP;
733
+ /* mdio_support */
734
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
574735
575736 link_stat = hns3_get_link(netdev);
576737 if (!link_stat) {
....@@ -578,35 +739,51 @@
578739 cmd->base.duplex = DUPLEX_UNKNOWN;
579740 }
580741
581
- /* 2.get link mode and port type*/
582
- if (h->ae_algo->ops->get_link_mode)
583
- h->ae_algo->ops->get_link_mode(h,
584
- cmd->link_modes.supported,
585
- cmd->link_modes.advertising);
742
+ return 0;
743
+}
586744
587
- cmd->base.port = PORT_NONE;
588
- if (h->ae_algo->ops->get_port_type)
589
- h->ae_algo->ops->get_port_type(h,
590
- &cmd->base.port);
745
+static int hns3_check_ksettings_param(const struct net_device *netdev,
746
+ const struct ethtool_link_ksettings *cmd)
747
+{
748
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
749
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
750
+ u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
751
+ u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
752
+ u8 autoneg;
753
+ u32 speed;
754
+ u8 duplex;
755
+ int ret;
591756
592
- /* 3.mdix_ctrl&mdix get from phy reg */
593
- if (h->ae_algo->ops->get_mdix_mode)
594
- h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
595
- &cmd->base.eth_tp_mdix);
596
- /* 4.mdio_support */
597
- cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
757
+ /* hw doesn't support use specified speed and duplex to negotiate,
758
+ * unnecessary to check them when autoneg on.
759
+ */
760
+ if (cmd->base.autoneg)
761
+ return 0;
598762
599
- /* 5.get flow control setttings */
600
- if (h->ae_algo->ops->get_flowctrl_adv)
601
- h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
763
+ if (ops->get_ksettings_an_result) {
764
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
765
+ if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
766
+ cmd->base.duplex == duplex)
767
+ return 0;
768
+ }
602769
603
- if (flowctrl_adv & ADVERTISED_Pause)
604
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
605
- Pause);
770
+ if (ops->get_media_type)
771
+ ops->get_media_type(handle, &media_type, &module_type);
606772
607
- if (flowctrl_adv & ADVERTISED_Asym_Pause)
608
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
609
- Asym_Pause);
773
+ if (cmd->base.duplex == DUPLEX_HALF &&
774
+ media_type != HNAE3_MEDIA_TYPE_COPPER) {
775
+ netdev_err(netdev,
776
+ "only copper port supports half duplex!");
777
+ return -EINVAL;
778
+ }
779
+
780
+ if (ops->check_port_speed) {
781
+ ret = ops->check_port_speed(handle, cmd->base.speed);
782
+ if (ret) {
783
+ netdev_err(netdev, "unsupported speed\n");
784
+ return ret;
785
+ }
786
+ }
610787
611788 return 0;
612789 }
....@@ -614,19 +791,63 @@
614791 static int hns3_set_link_ksettings(struct net_device *netdev,
615792 const struct ethtool_link_ksettings *cmd)
616793 {
617
- /* Only support ksettings_set for netdev with phy attached for now */
618
- if (netdev->phydev)
619
- return phy_ethtool_ksettings_set(netdev->phydev, cmd);
794
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
795
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
796
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
797
+ int ret;
620798
621
- return -EOPNOTSUPP;
799
+ /* Chip don't support this mode. */
800
+ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
801
+ return -EINVAL;
802
+
803
+ netif_dbg(handle, drv, netdev,
804
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
805
+ netdev->phydev ? "phy" : "mac",
806
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
807
+
808
+ /* Only support ksettings_set for netdev with phy attached for now */
809
+ if (netdev->phydev) {
810
+ if (cmd->base.speed == SPEED_1000 &&
811
+ cmd->base.autoneg == AUTONEG_DISABLE)
812
+ return -EINVAL;
813
+
814
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
815
+ }
816
+
817
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
818
+ return -EOPNOTSUPP;
819
+
820
+ ret = hns3_check_ksettings_param(netdev, cmd);
821
+ if (ret)
822
+ return ret;
823
+
824
+ if (ops->set_autoneg) {
825
+ ret = ops->set_autoneg(handle, cmd->base.autoneg);
826
+ if (ret)
827
+ return ret;
828
+ }
829
+
830
+ /* hw doesn't support use specified speed and duplex to negotiate,
831
+ * ignore them when autoneg on.
832
+ */
833
+ if (cmd->base.autoneg) {
834
+ netdev_info(netdev,
835
+ "autoneg is on, ignore the speed and duplex\n");
836
+ return 0;
837
+ }
838
+
839
+ if (ops->cfg_mac_speed_dup_h)
840
+ ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
841
+ cmd->base.duplex);
842
+
843
+ return ret;
622844 }
623845
624846 static u32 hns3_get_rss_key_size(struct net_device *netdev)
625847 {
626848 struct hnae3_handle *h = hns3_get_handle(netdev);
627849
628
- if (!h->ae_algo || !h->ae_algo->ops ||
629
- !h->ae_algo->ops->get_rss_key_size)
850
+ if (!h->ae_algo->ops->get_rss_key_size)
630851 return 0;
631852
632853 return h->ae_algo->ops->get_rss_key_size(h);
....@@ -636,8 +857,7 @@
636857 {
637858 struct hnae3_handle *h = hns3_get_handle(netdev);
638859
639
- if (!h->ae_algo || !h->ae_algo->ops ||
640
- !h->ae_algo->ops->get_rss_indir_size)
860
+ if (!h->ae_algo->ops->get_rss_indir_size)
641861 return 0;
642862
643863 return h->ae_algo->ops->get_rss_indir_size(h);
....@@ -648,7 +868,7 @@
648868 {
649869 struct hnae3_handle *h = hns3_get_handle(netdev);
650870
651
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss)
871
+ if (!h->ae_algo->ops->get_rss)
652872 return -EOPNOTSUPP;
653873
654874 return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
....@@ -658,16 +878,18 @@
658878 const u8 *key, const u8 hfunc)
659879 {
660880 struct hnae3_handle *h = hns3_get_handle(netdev);
881
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
661882
662
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
883
+ if (!h->ae_algo->ops->set_rss)
663884 return -EOPNOTSUPP;
664885
665
- /* currently we only support Toeplitz hash */
666
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) {
667
- netdev_err(netdev,
668
- "hash func not supported (only Toeplitz hash)\n");
886
+ if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
887
+ hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
888
+ hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
889
+ netdev_err(netdev, "hash func not supported\n");
669890 return -EOPNOTSUPP;
670891 }
892
+
671893 if (!indir) {
672894 netdev_err(netdev,
673895 "set rss failed for indir is empty\n");
....@@ -683,34 +905,86 @@
683905 {
684906 struct hnae3_handle *h = hns3_get_handle(netdev);
685907
686
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple)
687
- return -EOPNOTSUPP;
688
-
689908 switch (cmd->cmd) {
690909 case ETHTOOL_GRXRINGS:
691
- cmd->data = h->kinfo.rss_size;
692
- break;
910
+ cmd->data = h->kinfo.num_tqps;
911
+ return 0;
693912 case ETHTOOL_GRXFH:
694
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
913
+ if (h->ae_algo->ops->get_rss_tuple)
914
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
915
+ return -EOPNOTSUPP;
916
+ case ETHTOOL_GRXCLSRLCNT:
917
+ if (h->ae_algo->ops->get_fd_rule_cnt)
918
+ return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
919
+ return -EOPNOTSUPP;
920
+ case ETHTOOL_GRXCLSRULE:
921
+ if (h->ae_algo->ops->get_fd_rule_info)
922
+ return h->ae_algo->ops->get_fd_rule_info(h, cmd);
923
+ return -EOPNOTSUPP;
924
+ case ETHTOOL_GRXCLSRLALL:
925
+ if (h->ae_algo->ops->get_fd_all_rules)
926
+ return h->ae_algo->ops->get_fd_all_rules(h, cmd,
927
+ rule_locs);
928
+ return -EOPNOTSUPP;
695929 default:
696930 return -EOPNOTSUPP;
697931 }
698
-
699
- return 0;
700932 }
701933
702
-static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
703
- u32 new_desc_num)
934
+static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
935
+ u32 tx_desc_num, u32 rx_desc_num)
704936 {
705937 struct hnae3_handle *h = priv->ae_handle;
706938 int i;
707939
708
- h->kinfo.num_desc = new_desc_num;
940
+ h->kinfo.num_tx_desc = tx_desc_num;
941
+ h->kinfo.num_rx_desc = rx_desc_num;
709942
710
- for (i = 0; i < h->kinfo.num_tqps * 2; i++)
711
- priv->ring_data[i].ring->desc_num = new_desc_num;
943
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
944
+ priv->ring[i].desc_num = tx_desc_num;
945
+ priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
946
+ }
947
+}
712948
713
- return hns3_init_all_ring(priv);
949
+static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
950
+{
951
+ struct hnae3_handle *handle = priv->ae_handle;
952
+ struct hns3_enet_ring *tmp_rings;
953
+ int i;
954
+
955
+ tmp_rings = kcalloc(handle->kinfo.num_tqps * 2,
956
+ sizeof(struct hns3_enet_ring), GFP_KERNEL);
957
+ if (!tmp_rings)
958
+ return NULL;
959
+
960
+ for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
961
+ memcpy(&tmp_rings[i], &priv->ring[i],
962
+ sizeof(struct hns3_enet_ring));
963
+ tmp_rings[i].skb = NULL;
964
+ }
965
+
966
+ return tmp_rings;
967
+}
968
+
969
+static int hns3_check_ringparam(struct net_device *ndev,
970
+ struct ethtool_ringparam *param)
971
+{
972
+ if (hns3_nic_resetting(ndev))
973
+ return -EBUSY;
974
+
975
+ if (param->rx_mini_pending || param->rx_jumbo_pending)
976
+ return -EINVAL;
977
+
978
+ if (param->tx_pending > HNS3_RING_MAX_PENDING ||
979
+ param->tx_pending < HNS3_RING_MIN_PENDING ||
980
+ param->rx_pending > HNS3_RING_MAX_PENDING ||
981
+ param->rx_pending < HNS3_RING_MIN_PENDING) {
982
+ netdev_err(ndev, "Queue depth out of range [%d-%d]\n",
983
+ HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING);
984
+ return -EINVAL;
985
+ }
986
+
987
+ return 0;
714988 }
715989
716990 static int hns3_set_ringparam(struct net_device *ndev,
....@@ -718,59 +992,61 @@
718992 {
719993 struct hns3_nic_priv *priv = netdev_priv(ndev);
720994 struct hnae3_handle *h = priv->ae_handle;
995
+ struct hns3_enet_ring *tmp_rings;
721996 bool if_running = netif_running(ndev);
722
- u32 old_desc_num, new_desc_num;
723
- int ret;
997
+ u32 old_tx_desc_num, new_tx_desc_num;
998
+ u32 old_rx_desc_num, new_rx_desc_num;
999
+ u16 queue_num = h->kinfo.num_tqps;
1000
+ int ret, i;
7241001
725
- if (param->rx_mini_pending || param->rx_jumbo_pending)
726
- return -EINVAL;
727
-
728
- if (param->tx_pending != param->rx_pending) {
729
- netdev_err(ndev,
730
- "Descriptors of tx and rx must be equal");
731
- return -EINVAL;
732
- }
733
-
734
- if (param->tx_pending > HNS3_RING_MAX_PENDING ||
735
- param->tx_pending < HNS3_RING_MIN_PENDING) {
736
- netdev_err(ndev,
737
- "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n",
738
- param->tx_pending, HNS3_RING_MIN_PENDING,
739
- HNS3_RING_MAX_PENDING);
740
- return -EINVAL;
741
- }
742
-
743
- new_desc_num = param->tx_pending;
744
-
745
- /* Hardware requires that its descriptors must be multiple of eight */
746
- new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE);
747
- old_desc_num = h->kinfo.num_desc;
748
- if (old_desc_num == new_desc_num)
749
- return 0;
750
-
751
- netdev_info(ndev,
752
- "Changing descriptor count from %d to %d.\n",
753
- old_desc_num, new_desc_num);
754
-
755
- if (if_running)
756
- dev_close(ndev);
757
-
758
- ret = hns3_uninit_all_ring(priv);
1002
+ ret = hns3_check_ringparam(ndev, param);
7591003 if (ret)
7601004 return ret;
7611005
762
- ret = hns3_change_all_ring_bd_num(priv, new_desc_num);
763
- if (ret) {
764
- ret = hns3_change_all_ring_bd_num(priv, old_desc_num);
765
- if (ret) {
766
- netdev_err(ndev,
767
- "Revert to old bd num fail, ret=%d.\n", ret);
768
- return ret;
769
- }
1006
+ /* Hardware requires that its descriptors must be multiple of eight */
1007
+ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
1008
+ new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
1009
+ old_tx_desc_num = priv->ring[0].desc_num;
1010
+ old_rx_desc_num = priv->ring[queue_num].desc_num;
1011
+ if (old_tx_desc_num == new_tx_desc_num &&
1012
+ old_rx_desc_num == new_rx_desc_num)
1013
+ return 0;
1014
+
1015
+ tmp_rings = hns3_backup_ringparam(priv);
1016
+ if (!tmp_rings) {
1017
+ netdev_err(ndev,
1018
+ "backup ring param failed by allocating memory fail\n");
1019
+ return -ENOMEM;
7701020 }
7711021
1022
+ netdev_info(ndev,
1023
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
1024
+ old_tx_desc_num, old_rx_desc_num,
1025
+ new_tx_desc_num, new_rx_desc_num);
1026
+
7721027 if (if_running)
773
- ret = dev_open(ndev);
1028
+ ndev->netdev_ops->ndo_stop(ndev);
1029
+
1030
+ hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
1031
+ ret = hns3_init_all_ring(priv);
1032
+ if (ret) {
1033
+ netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
1034
+ ret);
1035
+
1036
+ hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
1037
+ old_rx_desc_num);
1038
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
1039
+ memcpy(&priv->ring[i], &tmp_rings[i],
1040
+ sizeof(struct hns3_enet_ring));
1041
+ } else {
1042
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
1043
+ hns3_fini_ring(&tmp_rings[i]);
1044
+ }
1045
+
1046
+ kfree(tmp_rings);
1047
+
1048
+ if (if_running)
1049
+ ret = ndev->netdev_ops->ndo_open(ndev);
7741050
7751051 return ret;
7761052 }
....@@ -779,12 +1055,19 @@
7791055 {
7801056 struct hnae3_handle *h = hns3_get_handle(netdev);
7811057
782
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple)
783
- return -EOPNOTSUPP;
784
-
7851058 switch (cmd->cmd) {
7861059 case ETHTOOL_SRXFH:
787
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
1060
+ if (h->ae_algo->ops->set_rss_tuple)
1061
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
1062
+ return -EOPNOTSUPP;
1063
+ case ETHTOOL_SRXCLSRLINS:
1064
+ if (h->ae_algo->ops->add_fd_entry)
1065
+ return h->ae_algo->ops->add_fd_entry(h, cmd);
1066
+ return -EOPNOTSUPP;
1067
+ case ETHTOOL_SRXCLSRLDEL:
1068
+ if (h->ae_algo->ops->del_fd_entry)
1069
+ return h->ae_algo->ops->del_fd_entry(h, cmd);
1070
+ return -EOPNOTSUPP;
7881071 default:
7891072 return -EOPNOTSUPP;
7901073 }
....@@ -792,19 +1075,36 @@
7921075
7931076 static int hns3_nway_reset(struct net_device *netdev)
7941077 {
1078
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1079
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
7951080 struct phy_device *phy = netdev->phydev;
1081
+ int autoneg;
7961082
7971083 if (!netif_running(netdev))
7981084 return 0;
7991085
800
- /* Only support nway_reset for netdev with phy attached for now */
801
- if (!phy)
1086
+ if (hns3_nic_resetting(netdev)) {
1087
+ netdev_err(netdev, "dev resetting!");
1088
+ return -EBUSY;
1089
+ }
1090
+
1091
+ if (!ops->get_autoneg || !ops->restart_autoneg)
8021092 return -EOPNOTSUPP;
8031093
804
- if (phy->autoneg != AUTONEG_ENABLE)
1094
+ autoneg = ops->get_autoneg(handle);
1095
+ if (autoneg != AUTONEG_ENABLE) {
1096
+ netdev_err(netdev,
1097
+ "Autoneg is off, don't support to restart it\n");
8051098 return -EINVAL;
1099
+ }
8061100
807
- return genphy_restart_aneg(phy);
1101
+ netif_dbg(handle, drv, netdev,
1102
+ "nway reset (using %s)\n", phy ? "phy" : "mac");
1103
+
1104
+ if (phy)
1105
+ return genphy_restart_aneg(phy);
1106
+
1107
+ return ops->restart_autoneg(handle);
8081108 }
8091109
8101110 static void hns3_get_channels(struct net_device *netdev,
....@@ -824,15 +1124,18 @@
8241124 struct hnae3_handle *h = priv->ae_handle;
8251125 u16 queue_num = h->kinfo.num_tqps;
8261126
1127
+ if (hns3_nic_resetting(netdev))
1128
+ return -EBUSY;
1129
+
8271130 if (queue >= queue_num) {
8281131 netdev_err(netdev,
829
- "Invalid queue value %d! Queue max id=%d\n",
1132
+ "Invalid queue value %u! Queue max id=%u\n",
8301133 queue, queue_num - 1);
8311134 return -EINVAL;
8321135 }
8331136
834
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
835
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
1137
+ tx_vector = priv->ring[queue].tqp_vector;
1138
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
8361139
8371140 cmd->use_adaptive_tx_coalesce =
8381141 tx_vector->tx_group.coal.gl_adapt_enable;
....@@ -876,14 +1179,14 @@
8761179 rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
8771180 if (rx_gl != cmd->rx_coalesce_usecs) {
8781181 netdev_info(netdev,
879
- "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
1182
+ "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
8801183 cmd->rx_coalesce_usecs, rx_gl);
8811184 }
8821185
8831186 tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
8841187 if (tx_gl != cmd->tx_coalesce_usecs) {
8851188 netdev_info(netdev,
886
- "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
1189
+ "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
8871190 cmd->tx_coalesce_usecs, tx_gl);
8881191 }
8891192
....@@ -911,7 +1214,7 @@
9111214 rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
9121215 if (rl != cmd->rx_coalesce_usecs_high) {
9131216 netdev_info(netdev,
914
- "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
1217
+ "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
9151218 cmd->rx_coalesce_usecs_high, rl);
9161219 }
9171220
....@@ -940,7 +1243,7 @@
9401243 if (cmd->use_adaptive_tx_coalesce == 1 ||
9411244 cmd->use_adaptive_rx_coalesce == 1) {
9421245 netdev_info(netdev,
943
- "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
1246
+ "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
9441247 cmd->use_adaptive_tx_coalesce,
9451248 cmd->use_adaptive_rx_coalesce);
9461249 }
....@@ -957,8 +1260,8 @@
9571260 struct hnae3_handle *h = priv->ae_handle;
9581261 int queue_num = h->kinfo.num_tqps;
9591262
960
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
961
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
1263
+ tx_vector = priv->ring[queue].tqp_vector;
1264
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
9621265
9631266 tx_vector->tx_group.coal.gl_adapt_enable =
9641267 cmd->use_adaptive_tx_coalesce;
....@@ -984,6 +1287,9 @@
9841287 u16 queue_num = h->kinfo.num_tqps;
9851288 int ret;
9861289 int i;
1290
+
1291
+ if (hns3_nic_resetting(netdev))
1292
+ return -EBUSY;
9871293
9881294 ret = hns3_check_coalesce_para(netdev, cmd);
9891295 if (ret)
....@@ -1024,13 +1330,184 @@
10241330 {
10251331 struct hnae3_handle *h = hns3_get_handle(netdev);
10261332
1027
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
1333
+ if (!h->ae_algo->ops->set_led_id)
10281334 return -EOPNOTSUPP;
10291335
10301336 return h->ae_algo->ops->set_led_id(h, state);
10311337 }
10321338
1339
+static u32 hns3_get_msglevel(struct net_device *netdev)
1340
+{
1341
+ struct hnae3_handle *h = hns3_get_handle(netdev);
1342
+
1343
+ return h->msg_enable;
1344
+}
1345
+
1346
+static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
1347
+{
1348
+ struct hnae3_handle *h = hns3_get_handle(netdev);
1349
+
1350
+ h->msg_enable = msg_level;
1351
+}
1352
+
1353
+/* Translate local fec value into ethtool value. */
1354
+static unsigned int loc_to_eth_fec(u8 loc_fec)
1355
+{
1356
+ u32 eth_fec = 0;
1357
+
1358
+ if (loc_fec & BIT(HNAE3_FEC_AUTO))
1359
+ eth_fec |= ETHTOOL_FEC_AUTO;
1360
+ if (loc_fec & BIT(HNAE3_FEC_RS))
1361
+ eth_fec |= ETHTOOL_FEC_RS;
1362
+ if (loc_fec & BIT(HNAE3_FEC_BASER))
1363
+ eth_fec |= ETHTOOL_FEC_BASER;
1364
+
1365
+ /* if nothing is set, then FEC is off */
1366
+ if (!eth_fec)
1367
+ eth_fec = ETHTOOL_FEC_OFF;
1368
+
1369
+ return eth_fec;
1370
+}
1371
+
1372
+/* Translate ethtool fec value into local value. */
1373
+static unsigned int eth_to_loc_fec(unsigned int eth_fec)
1374
+{
1375
+ u32 loc_fec = 0;
1376
+
1377
+ if (eth_fec & ETHTOOL_FEC_OFF)
1378
+ return loc_fec;
1379
+
1380
+ if (eth_fec & ETHTOOL_FEC_AUTO)
1381
+ loc_fec |= BIT(HNAE3_FEC_AUTO);
1382
+ if (eth_fec & ETHTOOL_FEC_RS)
1383
+ loc_fec |= BIT(HNAE3_FEC_RS);
1384
+ if (eth_fec & ETHTOOL_FEC_BASER)
1385
+ loc_fec |= BIT(HNAE3_FEC_BASER);
1386
+
1387
+ return loc_fec;
1388
+}
1389
+
1390
+static int hns3_get_fecparam(struct net_device *netdev,
1391
+ struct ethtool_fecparam *fec)
1392
+{
1393
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1394
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
1395
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1396
+ u8 fec_ability;
1397
+ u8 fec_mode;
1398
+
1399
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
1400
+ return -EOPNOTSUPP;
1401
+
1402
+ if (!ops->get_fec)
1403
+ return -EOPNOTSUPP;
1404
+
1405
+ ops->get_fec(handle, &fec_ability, &fec_mode);
1406
+
1407
+ fec->fec = loc_to_eth_fec(fec_ability);
1408
+ fec->active_fec = loc_to_eth_fec(fec_mode);
1409
+
1410
+ return 0;
1411
+}
1412
+
1413
+static int hns3_set_fecparam(struct net_device *netdev,
1414
+ struct ethtool_fecparam *fec)
1415
+{
1416
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1417
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
1418
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1419
+ u32 fec_mode;
1420
+
1421
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
1422
+ return -EOPNOTSUPP;
1423
+
1424
+ if (!ops->set_fec)
1425
+ return -EOPNOTSUPP;
1426
+ fec_mode = eth_to_loc_fec(fec->fec);
1427
+
1428
+ netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
1429
+
1430
+ return ops->set_fec(handle, fec_mode);
1431
+}
1432
+
1433
+static int hns3_get_module_info(struct net_device *netdev,
1434
+ struct ethtool_modinfo *modinfo)
1435
+{
1436
+#define HNS3_SFF_8636_V1_3 0x03
1437
+
1438
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1439
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
1440
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1441
+ struct hns3_sfp_type sfp_type;
1442
+ int ret;
1443
+
1444
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
1445
+ !ops->get_module_eeprom)
1446
+ return -EOPNOTSUPP;
1447
+
1448
+ memset(&sfp_type, 0, sizeof(sfp_type));
1449
+ ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8),
1450
+ (u8 *)&sfp_type);
1451
+ if (ret)
1452
+ return ret;
1453
+
1454
+ switch (sfp_type.type) {
1455
+ case SFF8024_ID_SFP:
1456
+ modinfo->type = ETH_MODULE_SFF_8472;
1457
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1458
+ break;
1459
+ case SFF8024_ID_QSFP_8438:
1460
+ modinfo->type = ETH_MODULE_SFF_8436;
1461
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1462
+ break;
1463
+ case SFF8024_ID_QSFP_8436_8636:
1464
+ if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
1465
+ modinfo->type = ETH_MODULE_SFF_8436;
1466
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1467
+ } else {
1468
+ modinfo->type = ETH_MODULE_SFF_8636;
1469
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1470
+ }
1471
+ break;
1472
+ case SFF8024_ID_QSFP28_8636:
1473
+ modinfo->type = ETH_MODULE_SFF_8636;
1474
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1475
+ break;
1476
+ default:
1477
+ netdev_err(netdev, "Optical module unknown: %#x\n",
1478
+ sfp_type.type);
1479
+ return -EINVAL;
1480
+ }
1481
+
1482
+ return 0;
1483
+}
1484
+
1485
+static int hns3_get_module_eeprom(struct net_device *netdev,
1486
+ struct ethtool_eeprom *ee, u8 *data)
1487
+{
1488
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
1489
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
1490
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1491
+
1492
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
1493
+ !ops->get_module_eeprom)
1494
+ return -EOPNOTSUPP;
1495
+
1496
+ if (!ee->len)
1497
+ return -EINVAL;
1498
+
1499
+ memset(data, 0, ee->len);
1500
+
1501
+ return ops->get_module_eeprom(handle, ee->offset, ee->len, data);
1502
+}
1503
+
1504
+#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
1505
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
1506
+ ETHTOOL_COALESCE_RX_USECS_HIGH | \
1507
+ ETHTOOL_COALESCE_TX_USECS_HIGH)
1508
+
10331509 static const struct ethtool_ops hns3vf_ethtool_ops = {
1510
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
10341511 .get_drvinfo = hns3_get_drvinfo,
10351512 .get_ringparam = hns3_get_ringparam,
10361513 .set_ringparam = hns3_set_ringparam,
....@@ -1038,18 +1515,25 @@
10381515 .get_ethtool_stats = hns3_get_stats,
10391516 .get_sset_count = hns3_get_sset_count,
10401517 .get_rxnfc = hns3_get_rxnfc,
1518
+ .set_rxnfc = hns3_set_rxnfc,
10411519 .get_rxfh_key_size = hns3_get_rss_key_size,
10421520 .get_rxfh_indir_size = hns3_get_rss_indir_size,
10431521 .get_rxfh = hns3_get_rss,
10441522 .set_rxfh = hns3_set_rss,
10451523 .get_link_ksettings = hns3_get_link_ksettings,
10461524 .get_channels = hns3_get_channels,
1525
+ .set_channels = hns3_set_channels,
10471526 .get_coalesce = hns3_get_coalesce,
10481527 .set_coalesce = hns3_set_coalesce,
1528
+ .get_regs_len = hns3_get_regs_len,
1529
+ .get_regs = hns3_get_regs,
10491530 .get_link = hns3_get_link,
1531
+ .get_msglevel = hns3_get_msglevel,
1532
+ .set_msglevel = hns3_set_msglevel,
10501533 };
10511534
10521535 static const struct ethtool_ops hns3_ethtool_ops = {
1536
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
10531537 .self_test = hns3_self_test,
10541538 .get_drvinfo = hns3_get_drvinfo,
10551539 .get_link = hns3_get_link,
....@@ -1076,6 +1560,12 @@
10761560 .get_regs_len = hns3_get_regs_len,
10771561 .get_regs = hns3_get_regs,
10781562 .set_phys_id = hns3_set_phys_id,
1563
+ .get_msglevel = hns3_get_msglevel,
1564
+ .set_msglevel = hns3_set_msglevel,
1565
+ .get_fecparam = hns3_get_fecparam,
1566
+ .set_fecparam = hns3_set_fecparam,
1567
+ .get_module_info = hns3_get_module_info,
1568
+ .get_module_eeprom = hns3_get_module_eeprom,
10791569 };
10801570
10811571 void hns3_ethtool_set_ops(struct net_device *netdev)