forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
....@@ -12,6 +12,7 @@
1212 #include <linux/pci.h>
1313 #include <linux/platform_device.h>
1414 #include <linux/if_vlan.h>
15
+#include <linux/crash_dump.h>
1516 #include <net/rtnetlink.h>
1617 #include "hclge_cmd.h"
1718 #include "hclge_dcb.h"
....@@ -19,22 +20,63 @@
1920 #include "hclge_mbx.h"
2021 #include "hclge_mdio.h"
2122 #include "hclge_tm.h"
23
+#include "hclge_err.h"
2224 #include "hnae3.h"
2325
2426 #define HCLGE_NAME "hclge"
2527 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
2628 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
27
-#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
28
-#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
2929
30
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
31
- enum hclge_mta_dmac_sel_type mta_mac_sel,
32
- bool enable);
33
-static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
30
+#define HCLGE_BUF_SIZE_UNIT 256U
31
+#define HCLGE_BUF_MUL_BY 2
32
+#define HCLGE_BUF_DIV_BY 2
33
+#define NEED_RESERVE_TC_NUM 2
34
+#define BUF_MAX_PERCENT 100
35
+#define BUF_RESERVE_PERCENT 90
36
+
37
+#define HCLGE_RESET_MAX_FAIL_CNT 5
38
+#define HCLGE_RESET_SYNC_TIME 100
39
+#define HCLGE_PF_RESET_SYNC_TIME 20
40
+#define HCLGE_PF_RESET_SYNC_CNT 1500
41
+
42
+/* Get DFX BD number offset */
43
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
44
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
45
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
46
+#define HCLGE_DFX_IGU_BD_OFFSET 4
47
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
48
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
49
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
50
+#define HCLGE_DFX_RTC_BD_OFFSET 8
51
+#define HCLGE_DFX_PPP_BD_OFFSET 9
52
+#define HCLGE_DFX_RCB_BD_OFFSET 10
53
+#define HCLGE_DFX_TQP_BD_OFFSET 11
54
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
+
56
+#define HCLGE_LINK_STATUS_MS 10
57
+
58
+#define HCLGE_VF_VPORT_START_NUM 1
59
+
60
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
3461 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
3563 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68
+ unsigned long *addr);
69
+static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
+
71
+static void hclge_sync_mac_table(struct hclge_dev *hdev);
72
+static void hclge_restore_hw_table(struct hclge_dev *hdev);
73
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
75
+ int wait_cnt);
3676
3777 static struct hnae3_ae_algo ae_algo;
78
+
79
+static struct workqueue_struct *hclge_wq;
3880
3981 static const struct pci_device_id ae_algo_pci_tbl[] = {
4082 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
....@@ -44,180 +86,74 @@
4486 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
4587 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
4688 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
89
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
4790 /* required last entry */
4891 {0, }
4992 };
5093
5194 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
5295
96
+static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
97
+ HCLGE_CMDQ_TX_ADDR_H_REG,
98
+ HCLGE_CMDQ_TX_DEPTH_REG,
99
+ HCLGE_CMDQ_TX_TAIL_REG,
100
+ HCLGE_CMDQ_TX_HEAD_REG,
101
+ HCLGE_CMDQ_RX_ADDR_L_REG,
102
+ HCLGE_CMDQ_RX_ADDR_H_REG,
103
+ HCLGE_CMDQ_RX_DEPTH_REG,
104
+ HCLGE_CMDQ_RX_TAIL_REG,
105
+ HCLGE_CMDQ_RX_HEAD_REG,
106
+ HCLGE_VECTOR0_CMDQ_SRC_REG,
107
+ HCLGE_CMDQ_INTR_STS_REG,
108
+ HCLGE_CMDQ_INTR_EN_REG,
109
+ HCLGE_CMDQ_INTR_GEN_REG};
110
+
111
+static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
112
+ HCLGE_VECTOR0_OTER_EN_REG,
113
+ HCLGE_MISC_RESET_STS_REG,
114
+ HCLGE_MISC_VECTOR_INT_STS,
115
+ HCLGE_GLOBAL_RESET_REG,
116
+ HCLGE_FUN_RST_ING,
117
+ HCLGE_GRO_EN_REG};
118
+
119
+static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
120
+ HCLGE_RING_RX_ADDR_H_REG,
121
+ HCLGE_RING_RX_BD_NUM_REG,
122
+ HCLGE_RING_RX_BD_LENGTH_REG,
123
+ HCLGE_RING_RX_MERGE_EN_REG,
124
+ HCLGE_RING_RX_TAIL_REG,
125
+ HCLGE_RING_RX_HEAD_REG,
126
+ HCLGE_RING_RX_FBD_NUM_REG,
127
+ HCLGE_RING_RX_OFFSET_REG,
128
+ HCLGE_RING_RX_FBD_OFFSET_REG,
129
+ HCLGE_RING_RX_STASH_REG,
130
+ HCLGE_RING_RX_BD_ERR_REG,
131
+ HCLGE_RING_TX_ADDR_L_REG,
132
+ HCLGE_RING_TX_ADDR_H_REG,
133
+ HCLGE_RING_TX_BD_NUM_REG,
134
+ HCLGE_RING_TX_PRIORITY_REG,
135
+ HCLGE_RING_TX_TC_REG,
136
+ HCLGE_RING_TX_MERGE_EN_REG,
137
+ HCLGE_RING_TX_TAIL_REG,
138
+ HCLGE_RING_TX_HEAD_REG,
139
+ HCLGE_RING_TX_FBD_NUM_REG,
140
+ HCLGE_RING_TX_OFFSET_REG,
141
+ HCLGE_RING_TX_EBD_NUM_REG,
142
+ HCLGE_RING_TX_EBD_OFFSET_REG,
143
+ HCLGE_RING_TX_BD_ERR_REG,
144
+ HCLGE_RING_EN_REG};
145
+
146
+static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
147
+ HCLGE_TQP_INTR_GL0_REG,
148
+ HCLGE_TQP_INTR_GL1_REG,
149
+ HCLGE_TQP_INTR_GL2_REG,
150
+ HCLGE_TQP_INTR_RL_REG};
151
+
53152 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
54
- "Mac Loopback test",
55
- "Serdes Loopback test",
153
+ "App Loopback test",
154
+ "Serdes serial Loopback test",
155
+ "Serdes parallel Loopback test",
56156 "Phy Loopback test"
57
-};
58
-
59
-static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
60
- {"igu_rx_oversize_pkt",
61
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
62
- {"igu_rx_undersize_pkt",
63
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
64
- {"igu_rx_out_all_pkt",
65
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
66
- {"igu_rx_uni_pkt",
67
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
68
- {"igu_rx_multi_pkt",
69
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
70
- {"igu_rx_broad_pkt",
71
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
72
- {"egu_tx_out_all_pkt",
73
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
74
- {"egu_tx_uni_pkt",
75
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
76
- {"egu_tx_multi_pkt",
77
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
78
- {"egu_tx_broad_pkt",
79
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
80
- {"ssu_ppp_mac_key_num",
81
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
82
- {"ssu_ppp_host_key_num",
83
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
84
- {"ppp_ssu_mac_rlt_num",
85
- HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
86
- {"ppp_ssu_host_rlt_num",
87
- HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
88
- {"ssu_tx_in_num",
89
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
90
- {"ssu_tx_out_num",
91
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
92
- {"ssu_rx_in_num",
93
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
94
- {"ssu_rx_out_num",
95
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
96
-};
97
-
98
-static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
99
- {"igu_rx_err_pkt",
100
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
101
- {"igu_rx_no_eof_pkt",
102
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
103
- {"igu_rx_no_sof_pkt",
104
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
105
- {"egu_tx_1588_pkt",
106
- HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
107
- {"ssu_full_drop_num",
108
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
109
- {"ssu_part_drop_num",
110
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
111
- {"ppp_key_drop_num",
112
- HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
113
- {"ppp_rlt_drop_num",
114
- HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
115
- {"ssu_key_drop_num",
116
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
117
- {"pkt_curr_buf_cnt",
118
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
119
- {"qcn_fb_rcv_cnt",
120
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
121
- {"qcn_fb_drop_cnt",
122
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
123
- {"qcn_fb_invaild_cnt",
124
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
125
- {"rx_packet_tc0_in_cnt",
126
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
127
- {"rx_packet_tc1_in_cnt",
128
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
129
- {"rx_packet_tc2_in_cnt",
130
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
131
- {"rx_packet_tc3_in_cnt",
132
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
133
- {"rx_packet_tc4_in_cnt",
134
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
135
- {"rx_packet_tc5_in_cnt",
136
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
137
- {"rx_packet_tc6_in_cnt",
138
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
139
- {"rx_packet_tc7_in_cnt",
140
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
141
- {"rx_packet_tc0_out_cnt",
142
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
143
- {"rx_packet_tc1_out_cnt",
144
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
145
- {"rx_packet_tc2_out_cnt",
146
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
147
- {"rx_packet_tc3_out_cnt",
148
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
149
- {"rx_packet_tc4_out_cnt",
150
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
151
- {"rx_packet_tc5_out_cnt",
152
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
153
- {"rx_packet_tc6_out_cnt",
154
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
155
- {"rx_packet_tc7_out_cnt",
156
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
157
- {"tx_packet_tc0_in_cnt",
158
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
159
- {"tx_packet_tc1_in_cnt",
160
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
161
- {"tx_packet_tc2_in_cnt",
162
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
163
- {"tx_packet_tc3_in_cnt",
164
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
165
- {"tx_packet_tc4_in_cnt",
166
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
167
- {"tx_packet_tc5_in_cnt",
168
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
169
- {"tx_packet_tc6_in_cnt",
170
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
171
- {"tx_packet_tc7_in_cnt",
172
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
173
- {"tx_packet_tc0_out_cnt",
174
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
175
- {"tx_packet_tc1_out_cnt",
176
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
177
- {"tx_packet_tc2_out_cnt",
178
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
179
- {"tx_packet_tc3_out_cnt",
180
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
181
- {"tx_packet_tc4_out_cnt",
182
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
183
- {"tx_packet_tc5_out_cnt",
184
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
185
- {"tx_packet_tc6_out_cnt",
186
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
187
- {"tx_packet_tc7_out_cnt",
188
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
189
- {"pkt_curr_buf_tc0_cnt",
190
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
191
- {"pkt_curr_buf_tc1_cnt",
192
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
193
- {"pkt_curr_buf_tc2_cnt",
194
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
195
- {"pkt_curr_buf_tc3_cnt",
196
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
197
- {"pkt_curr_buf_tc4_cnt",
198
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
199
- {"pkt_curr_buf_tc5_cnt",
200
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
201
- {"pkt_curr_buf_tc6_cnt",
202
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
203
- {"pkt_curr_buf_tc7_cnt",
204
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
205
- {"mb_uncopy_num",
206
- HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
207
- {"lo_pri_unicast_rlt_drop_num",
208
- HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
209
- {"hi_pri_multicast_rlt_drop_num",
210
- HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
211
- {"lo_pri_multicast_rlt_drop_num",
212
- HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
213
- {"rx_oq_drop_pkt_cnt",
214
- HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
215
- {"tx_oq_drop_pkt_cnt",
216
- HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
217
- {"nic_l2_err_drop_pkt_cnt",
218
- HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
219
- {"roc_l2_err_drop_pkt_cnt",
220
- HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
221157 };
222158
223159 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
....@@ -225,6 +161,12 @@
225161 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
226162 {"mac_rx_mac_pause_num",
227163 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
164
+ {"mac_tx_control_pkt_num",
165
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
166
+ {"mac_rx_control_pkt_num",
167
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
168
+ {"mac_tx_pfc_pkt_num",
169
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
228170 {"mac_tx_pfc_pri0_pkt_num",
229171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
230172 {"mac_tx_pfc_pri1_pkt_num",
....@@ -241,6 +183,8 @@
241183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
242184 {"mac_tx_pfc_pri7_pkt_num",
243185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
186
+ {"mac_rx_pfc_pkt_num",
187
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
244188 {"mac_rx_pfc_pri0_pkt_num",
245189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
246190 {"mac_rx_pfc_pri1_pkt_num",
....@@ -387,122 +331,99 @@
387331 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
388332 {
389333 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
390
- .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
391
- .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
392
- .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
334
+ .ethter_type = cpu_to_le16(ETH_P_LLDP),
335
+ .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
393336 .i_port_bitmap = 0x1,
394337 },
395338 };
396339
397
-static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
398
-{
399
-#define HCLGE_64_BIT_CMD_NUM 5
400
-#define HCLGE_64_BIT_RTN_DATANUM 4
401
- u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
402
- struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
403
- __le64 *desc_data;
404
- int i, k, n;
405
- int ret;
340
+static const u8 hclge_hash_key[] = {
341
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
342
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
343
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
344
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
345
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
346
+};
406347
407
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
408
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
409
- if (ret) {
410
- dev_err(&hdev->pdev->dev,
411
- "Get 64 bit pkt stats fail, status = %d.\n", ret);
412
- return ret;
413
- }
348
+static const u32 hclge_dfx_bd_offset_list[] = {
349
+ HCLGE_DFX_BIOS_BD_OFFSET,
350
+ HCLGE_DFX_SSU_0_BD_OFFSET,
351
+ HCLGE_DFX_SSU_1_BD_OFFSET,
352
+ HCLGE_DFX_IGU_BD_OFFSET,
353
+ HCLGE_DFX_RPU_0_BD_OFFSET,
354
+ HCLGE_DFX_RPU_1_BD_OFFSET,
355
+ HCLGE_DFX_NCSI_BD_OFFSET,
356
+ HCLGE_DFX_RTC_BD_OFFSET,
357
+ HCLGE_DFX_PPP_BD_OFFSET,
358
+ HCLGE_DFX_RCB_BD_OFFSET,
359
+ HCLGE_DFX_TQP_BD_OFFSET,
360
+ HCLGE_DFX_SSU_2_BD_OFFSET
361
+};
414362
415
- for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
416
- if (unlikely(i == 0)) {
417
- desc_data = (__le64 *)(&desc[i].data[0]);
418
- n = HCLGE_64_BIT_RTN_DATANUM - 1;
419
- } else {
420
- desc_data = (__le64 *)(&desc[i]);
421
- n = HCLGE_64_BIT_RTN_DATANUM;
422
- }
423
- for (k = 0; k < n; k++) {
424
- *data++ += le64_to_cpu(*desc_data);
425
- desc_data++;
426
- }
427
- }
363
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
364
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
365
+ HCLGE_OPC_DFX_SSU_REG_0,
366
+ HCLGE_OPC_DFX_SSU_REG_1,
367
+ HCLGE_OPC_DFX_IGU_EGU_REG,
368
+ HCLGE_OPC_DFX_RPU_REG_0,
369
+ HCLGE_OPC_DFX_RPU_REG_1,
370
+ HCLGE_OPC_DFX_NCSI_REG,
371
+ HCLGE_OPC_DFX_RTC_REG,
372
+ HCLGE_OPC_DFX_PPP_REG,
373
+ HCLGE_OPC_DFX_RCB_REG,
374
+ HCLGE_OPC_DFX_TQP_REG,
375
+ HCLGE_OPC_DFX_SSU_REG_2
376
+};
428377
429
- return 0;
430
-}
378
+static const struct key_info meta_data_key_info[] = {
379
+ { PACKET_TYPE_ID, 6},
380
+ { IP_FRAGEMENT, 1},
381
+ { ROCE_TYPE, 1},
382
+ { NEXT_KEY, 5},
383
+ { VLAN_NUMBER, 2},
384
+ { SRC_VPORT, 12},
385
+ { DST_VPORT, 12},
386
+ { TUNNEL_PACKET, 1},
387
+};
431388
432
-static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
433
-{
434
- stats->pkt_curr_buf_cnt = 0;
435
- stats->pkt_curr_buf_tc0_cnt = 0;
436
- stats->pkt_curr_buf_tc1_cnt = 0;
437
- stats->pkt_curr_buf_tc2_cnt = 0;
438
- stats->pkt_curr_buf_tc3_cnt = 0;
439
- stats->pkt_curr_buf_tc4_cnt = 0;
440
- stats->pkt_curr_buf_tc5_cnt = 0;
441
- stats->pkt_curr_buf_tc6_cnt = 0;
442
- stats->pkt_curr_buf_tc7_cnt = 0;
443
-}
389
+static const struct key_info tuple_key_info[] = {
390
+ { OUTER_DST_MAC, 48},
391
+ { OUTER_SRC_MAC, 48},
392
+ { OUTER_VLAN_TAG_FST, 16},
393
+ { OUTER_VLAN_TAG_SEC, 16},
394
+ { OUTER_ETH_TYPE, 16},
395
+ { OUTER_L2_RSV, 16},
396
+ { OUTER_IP_TOS, 8},
397
+ { OUTER_IP_PROTO, 8},
398
+ { OUTER_SRC_IP, 32},
399
+ { OUTER_DST_IP, 32},
400
+ { OUTER_L3_RSV, 16},
401
+ { OUTER_SRC_PORT, 16},
402
+ { OUTER_DST_PORT, 16},
403
+ { OUTER_L4_RSV, 32},
404
+ { OUTER_TUN_VNI, 24},
405
+ { OUTER_TUN_FLOW_ID, 8},
406
+ { INNER_DST_MAC, 48},
407
+ { INNER_SRC_MAC, 48},
408
+ { INNER_VLAN_TAG_FST, 16},
409
+ { INNER_VLAN_TAG_SEC, 16},
410
+ { INNER_ETH_TYPE, 16},
411
+ { INNER_L2_RSV, 16},
412
+ { INNER_IP_TOS, 8},
413
+ { INNER_IP_PROTO, 8},
414
+ { INNER_SRC_IP, 32},
415
+ { INNER_DST_IP, 32},
416
+ { INNER_L3_RSV, 16},
417
+ { INNER_SRC_PORT, 16},
418
+ { INNER_DST_PORT, 16},
419
+ { INNER_L4_RSV, 32},
420
+};
444421
445
-static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
446
-{
447
-#define HCLGE_32_BIT_CMD_NUM 8
448
-#define HCLGE_32_BIT_RTN_DATANUM 8
449
-
450
- struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
451
- struct hclge_32_bit_stats *all_32_bit_stats;
452
- __le32 *desc_data;
453
- int i, k, n;
454
- u64 *data;
455
- int ret;
456
-
457
- all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
458
- data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
459
-
460
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
461
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
462
- if (ret) {
463
- dev_err(&hdev->pdev->dev,
464
- "Get 32 bit pkt stats fail, status = %d.\n", ret);
465
-
466
- return ret;
467
- }
468
-
469
- hclge_reset_partial_32bit_counter(all_32_bit_stats);
470
- for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
471
- if (unlikely(i == 0)) {
472
- __le16 *desc_data_16bit;
473
-
474
- all_32_bit_stats->igu_rx_err_pkt +=
475
- le32_to_cpu(desc[i].data[0]);
476
-
477
- desc_data_16bit = (__le16 *)&desc[i].data[1];
478
- all_32_bit_stats->igu_rx_no_eof_pkt +=
479
- le16_to_cpu(*desc_data_16bit);
480
-
481
- desc_data_16bit++;
482
- all_32_bit_stats->igu_rx_no_sof_pkt +=
483
- le16_to_cpu(*desc_data_16bit);
484
-
485
- desc_data = &desc[i].data[2];
486
- n = HCLGE_32_BIT_RTN_DATANUM - 4;
487
- } else {
488
- desc_data = (__le32 *)&desc[i];
489
- n = HCLGE_32_BIT_RTN_DATANUM;
490
- }
491
- for (k = 0; k < n; k++) {
492
- *data++ += le32_to_cpu(*desc_data);
493
- desc_data++;
494
- }
495
- }
496
-
497
- return 0;
498
-}
499
-
500
-static int hclge_mac_update_stats(struct hclge_dev *hdev)
422
+static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
501423 {
502424 #define HCLGE_MAC_CMD_NUM 21
503
-#define HCLGE_RTN_DATA_NUM 4
504425
505
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
426
+ u64 *data = (u64 *)(&hdev->mac_stats);
506427 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
507428 __le64 *desc_data;
508429 int i, k, n;
....@@ -518,20 +439,106 @@
518439 }
519440
520441 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
442
+ /* for special opcode 0032, only the first desc has the head */
521443 if (unlikely(i == 0)) {
522444 desc_data = (__le64 *)(&desc[i].data[0]);
523
- n = HCLGE_RTN_DATA_NUM - 2;
445
+ n = HCLGE_RD_FIRST_STATS_NUM;
524446 } else {
525447 desc_data = (__le64 *)(&desc[i]);
526
- n = HCLGE_RTN_DATA_NUM;
448
+ n = HCLGE_RD_OTHER_STATS_NUM;
527449 }
450
+
528451 for (k = 0; k < n; k++) {
529
- *data++ += le64_to_cpu(*desc_data);
452
+ *data += le64_to_cpu(*desc_data);
453
+ data++;
530454 desc_data++;
531455 }
532456 }
533457
534458 return 0;
459
+}
460
+
461
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
462
+{
463
+ u64 *data = (u64 *)(&hdev->mac_stats);
464
+ struct hclge_desc *desc;
465
+ __le64 *desc_data;
466
+ u16 i, k, n;
467
+ int ret;
468
+
469
+ /* This may be called inside atomic sections,
470
+ * so GFP_ATOMIC is more suitalbe here
471
+ */
472
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473
+ if (!desc)
474
+ return -ENOMEM;
475
+
476
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
477
+ ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478
+ if (ret) {
479
+ kfree(desc);
480
+ return ret;
481
+ }
482
+
483
+ for (i = 0; i < desc_num; i++) {
484
+ /* for special opcode 0034, only the first desc has the head */
485
+ if (i == 0) {
486
+ desc_data = (__le64 *)(&desc[i].data[0]);
487
+ n = HCLGE_RD_FIRST_STATS_NUM;
488
+ } else {
489
+ desc_data = (__le64 *)(&desc[i]);
490
+ n = HCLGE_RD_OTHER_STATS_NUM;
491
+ }
492
+
493
+ for (k = 0; k < n; k++) {
494
+ *data += le64_to_cpu(*desc_data);
495
+ data++;
496
+ desc_data++;
497
+ }
498
+ }
499
+
500
+ kfree(desc);
501
+
502
+ return 0;
503
+}
504
+
505
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
506
+{
507
+ struct hclge_desc desc;
508
+ __le32 *desc_data;
509
+ u32 reg_num;
510
+ int ret;
511
+
512
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
513
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514
+ if (ret)
515
+ return ret;
516
+
517
+ desc_data = (__le32 *)(&desc.data[0]);
518
+ reg_num = le32_to_cpu(*desc_data);
519
+
520
+ *desc_num = 1 + ((reg_num - 3) >> 2) +
521
+ (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
522
+
523
+ return 0;
524
+}
525
+
526
+static int hclge_mac_update_stats(struct hclge_dev *hdev)
527
+{
528
+ u32 desc_num;
529
+ int ret;
530
+
531
+ ret = hclge_mac_query_reg_num(hdev, &desc_num);
532
+
533
+ /* The firmware supports the new statistics acquisition method */
534
+ if (!ret)
535
+ ret = hclge_mac_update_stats_complete(hdev, desc_num);
536
+ else if (ret == -EOPNOTSUPP)
537
+ ret = hclge_mac_update_stats_defective(hdev);
538
+ else
539
+ dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
540
+
541
+ return ret;
535542 }
536543
537544 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
....@@ -548,8 +555,7 @@
548555 queue = handle->kinfo.tqp[i];
549556 tqp = container_of(queue, struct hclge_tqp, q);
550557 /* command : HCLGE_OPC_QUERY_IGU_STAT */
551
- hclge_cmd_setup_basic_desc(&desc[0],
552
- HCLGE_OPC_QUERY_RX_STATUS,
558
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
553559 true);
554560
555561 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
....@@ -557,7 +563,7 @@
557563 if (ret) {
558564 dev_err(&hdev->pdev->dev,
559565 "Query tqp stat fail, status = %d,queue = %d\n",
560
- ret, i);
566
+ ret, i);
561567 return ret;
562568 }
563569 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
....@@ -569,7 +575,7 @@
569575 tqp = container_of(queue, struct hclge_tqp, q);
570576 /* command : HCLGE_OPC_QUERY_IGU_STAT */
571577 hclge_cmd_setup_basic_desc(&desc[0],
572
- HCLGE_OPC_QUERY_TX_STATUS,
578
+ HCLGE_OPC_QUERY_TX_STATS,
573579 true);
574580
575581 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
....@@ -611,6 +617,7 @@
611617 {
612618 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
613619
620
+ /* each tqp has TX & RX two queues */
614621 return kinfo->num_tqps * (2);
615622 }
616623
....@@ -618,12 +625,12 @@
618625 {
619626 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
620627 u8 *buff = data;
621
- int i = 0;
628
+ int i;
622629
623630 for (i = 0; i < kinfo->num_tqps; i++) {
624631 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
625632 struct hclge_tqp, q);
626
- snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
633
+ snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
627634 tqp->index);
628635 buff = buff + ETH_GSTRING_LEN;
629636 }
....@@ -631,7 +638,7 @@
631638 for (i = 0; i < kinfo->num_tqps; i++) {
632639 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
633640 struct hclge_tqp, q);
634
- snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
641
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
635642 tqp->index);
636643 buff = buff + ETH_GSTRING_LEN;
637644 }
....@@ -639,7 +646,7 @@
639646 return buff;
640647 }
641648
642
-static u64 *hclge_comm_get_stats(void *comm_stats,
649
+static u64 *hclge_comm_get_stats(const void *comm_stats,
643650 const struct hclge_comm_stats_str strs[],
644651 int size, u64 *data)
645652 {
....@@ -670,32 +677,6 @@
670677 return (u8 *)buff;
671678 }
672679
673
-static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
674
- struct net_device_stats *net_stats)
675
-{
676
- net_stats->tx_dropped = 0;
677
- net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
678
- net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
679
- net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
680
-
681
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
682
- net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
683
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
684
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
685
- net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
686
-
687
- net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
688
- net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
689
-
690
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
691
- net_stats->rx_length_errors =
692
- hw_stats->mac_stats.mac_rx_undersize_pkt_num;
693
- net_stats->rx_length_errors +=
694
- hw_stats->mac_stats.mac_rx_oversize_pkt_num;
695
- net_stats->rx_over_errors =
696
- hw_stats->mac_stats.mac_rx_oversize_pkt_num;
697
-}
698
-
699680 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
700681 {
701682 struct hnae3_handle *handle;
....@@ -715,14 +696,6 @@
715696 if (status)
716697 dev_err(&hdev->pdev->dev,
717698 "Update MAC stats fail, status = %d.\n", status);
718
-
719
- status = hclge_32_bit_update_stats(hdev);
720
- if (status)
721
- dev_err(&hdev->pdev->dev,
722
- "Update 32 bit stats fail, status = %d.\n",
723
- status);
724
-
725
- hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
726699 }
727700
728701 static void hclge_update_stats(struct hnae3_handle *handle,
....@@ -730,7 +703,6 @@
730703 {
731704 struct hclge_vport *vport = hclge_get_vport(handle);
732705 struct hclge_dev *hdev = vport->back;
733
- struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
734706 int status;
735707
736708 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
....@@ -742,32 +714,21 @@
742714 "Update MAC stats fail, status = %d.\n",
743715 status);
744716
745
- status = hclge_32_bit_update_stats(hdev);
746
- if (status)
747
- dev_err(&hdev->pdev->dev,
748
- "Update 32 bit stats fail, status = %d.\n",
749
- status);
750
-
751
- status = hclge_64_bit_update_stats(hdev);
752
- if (status)
753
- dev_err(&hdev->pdev->dev,
754
- "Update 64 bit stats fail, status = %d.\n",
755
- status);
756
-
757717 status = hclge_tqps_update_stats(handle);
758718 if (status)
759719 dev_err(&hdev->pdev->dev,
760720 "Update TQPS stats fail, status = %d.\n",
761721 status);
762722
763
- hclge_update_netstat(hw_stats, net_stats);
764
-
765723 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
766724 }
767725
768726 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
769727 {
770
-#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
728
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
729
+ HNAE3_SUPPORT_PHY_LOOPBACK |\
730
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
731
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
771732
772733 struct hclge_vport *vport = hclge_get_vport(handle);
773734 struct hclge_dev *hdev = vport->back;
....@@ -781,27 +742,33 @@
781742 if (stringset == ETH_SS_TEST) {
782743 /* clear loopback bit flags at first */
783744 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
784
- if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
746
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
785747 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
786748 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
787749 count += 1;
788
- handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
750
+ handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
789751 }
790752
791
- count++;
792
- handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
753
+ count += 2;
754
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
755
+ handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
756
+
757
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
758
+ hdev->hw.mac.phydev->drv->set_loopback) {
759
+ count += 1;
760
+ handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
761
+ }
762
+
793763 } else if (stringset == ETH_SS_STATS) {
794764 count = ARRAY_SIZE(g_mac_stats_string) +
795
- ARRAY_SIZE(g_all_32bit_stats_string) +
796
- ARRAY_SIZE(g_all_64bit_stats_string) +
797765 hclge_tqps_get_sset_count(handle, stringset);
798766 }
799767
800768 return count;
801769 }
802770
803
-static void hclge_get_strings(struct hnae3_handle *handle,
804
- u32 stringset,
771
+static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
805772 u8 *data)
806773 {
807774 u8 *p = (char *)data;
....@@ -809,37 +776,28 @@
809776
810777 if (stringset == ETH_SS_STATS) {
811778 size = ARRAY_SIZE(g_mac_stats_string);
812
- p = hclge_comm_get_strings(stringset,
813
- g_mac_stats_string,
814
- size,
815
- p);
816
- size = ARRAY_SIZE(g_all_32bit_stats_string);
817
- p = hclge_comm_get_strings(stringset,
818
- g_all_32bit_stats_string,
819
- size,
820
- p);
821
- size = ARRAY_SIZE(g_all_64bit_stats_string);
822
- p = hclge_comm_get_strings(stringset,
823
- g_all_64bit_stats_string,
824
- size,
825
- p);
779
+ p = hclge_comm_get_strings(stringset, g_mac_stats_string,
780
+ size, p);
826781 p = hclge_tqps_get_strings(handle, p);
827782 } else if (stringset == ETH_SS_TEST) {
828
- if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
829
- memcpy(p,
830
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
783
+ if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
784
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
831785 ETH_GSTRING_LEN);
832786 p += ETH_GSTRING_LEN;
833787 }
834
- if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
788
+ if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
789
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
790
+ ETH_GSTRING_LEN);
791
+ p += ETH_GSTRING_LEN;
792
+ }
793
+ if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
835794 memcpy(p,
836
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
795
+ hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
837796 ETH_GSTRING_LEN);
838797 p += ETH_GSTRING_LEN;
839798 }
840799 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
841
- memcpy(p,
842
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
800
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
843801 ETH_GSTRING_LEN);
844802 p += ETH_GSTRING_LEN;
845803 }
....@@ -852,24 +810,28 @@
852810 struct hclge_dev *hdev = vport->back;
853811 u64 *p;
854812
855
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
856
- g_mac_stats_string,
857
- ARRAY_SIZE(g_mac_stats_string),
858
- data);
859
- p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
860
- g_all_32bit_stats_string,
861
- ARRAY_SIZE(g_all_32bit_stats_string),
862
- p);
863
- p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
864
- g_all_64bit_stats_string,
865
- ARRAY_SIZE(g_all_64bit_stats_string),
866
- p);
813
+ p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
814
+ ARRAY_SIZE(g_mac_stats_string), data);
867815 p = hclge_tqps_get_stats(handle, p);
816
+}
817
+
818
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
819
+ struct hns3_mac_stats *mac_stats)
820
+{
821
+ struct hclge_vport *vport = hclge_get_vport(handle);
822
+ struct hclge_dev *hdev = vport->back;
823
+
824
+ hclge_update_stats(handle, NULL);
825
+
826
+ mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
827
+ mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
868828 }
869829
870830 static int hclge_parse_func_status(struct hclge_dev *hdev,
871831 struct hclge_func_status_cmd *status)
872832 {
833
+#define HCLGE_MAC_ID_MASK 0xF
834
+
873835 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
874836 return -EINVAL;
875837
....@@ -879,11 +841,14 @@
879841 else
880842 hdev->flag &= ~HCLGE_FLAG_MAIN;
881843
844
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
882845 return 0;
883846 }
884847
885848 static int hclge_query_function_status(struct hclge_dev *hdev)
886849 {
850
+#define HCLGE_QUERY_MAX_CNT 5
851
+
887852 struct hclge_func_status_cmd *req;
888853 struct hclge_desc desc;
889854 int timeout = 0;
....@@ -896,9 +861,7 @@
896861 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
897862 if (ret) {
898863 dev_err(&hdev->pdev->dev,
899
- "query function status failed %d.\n",
900
- ret);
901
-
864
+ "query function status failed %d.\n", ret);
902865 return ret;
903866 }
904867
....@@ -906,11 +869,9 @@
906869 if (req->pf_state)
907870 break;
908871 usleep_range(1000, 2000);
909
- } while (timeout++ < 5);
872
+ } while (timeout++ < HCLGE_QUERY_MAX_CNT);
910873
911
- ret = hclge_parse_func_status(hdev, req);
912
-
913
- return ret;
874
+ return hclge_parse_func_status(hdev, req);
914875 }
915876
916877 static int hclge_query_pf_resource(struct hclge_dev *hdev)
....@@ -928,26 +889,54 @@
928889 }
929890
930891 req = (struct hclge_pf_res_cmd *)desc.data;
931
- hdev->num_tqps = __le16_to_cpu(req->tqp_num);
932
- hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892
+ hdev->num_tqps = le16_to_cpu(req->tqp_num);
893
+ hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
894
+
895
+ if (req->tx_buf_size)
896
+ hdev->tx_buf_size =
897
+ le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
898
+ else
899
+ hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
900
+
901
+ hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
902
+
903
+ if (req->dv_buf_size)
904
+ hdev->dv_buf_size =
905
+ le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
906
+ else
907
+ hdev->dv_buf_size = HCLGE_DEFAULT_DV;
908
+
909
+ hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
933910
934911 if (hnae3_dev_roce_supported(hdev)) {
935912 hdev->roce_base_msix_offset =
936
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
913
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
937914 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
938915 hdev->num_roce_msi =
939
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
916
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
940917 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
918
+
919
+ /* nic's msix numbers is always equals to the roce's. */
920
+ hdev->num_nic_msi = hdev->num_roce_msi;
941921
942922 /* PF should have NIC vectors and Roce vectors,
943923 * NIC vectors are queued before Roce vectors.
944924 */
945
- hdev->num_msi = hdev->num_roce_msi +
925
+ hdev->num_msi = hdev->num_roce_msi +
946926 hdev->roce_base_msix_offset;
947927 } else {
948928 hdev->num_msi =
949
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
929
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
950930 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
931
+
932
+ hdev->num_nic_msi = hdev->num_msi;
933
+ }
934
+
935
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
936
+ dev_err(&hdev->pdev->dev,
937
+ "Just %u msi resources, not enough for pf(min:2).\n",
938
+ hdev->num_nic_msi);
939
+ return -EINVAL;
951940 }
952941
953942 return 0;
....@@ -980,6 +969,9 @@
980969 case 5:
981970 *speed = HCLGE_MAC_SPEED_100G;
982971 break;
972
+ case 8:
973
+ *speed = HCLGE_MAC_SPEED_200G;
974
+ break;
983975 default:
984976 return -EINVAL;
985977 }
....@@ -987,51 +979,292 @@
987979 return 0;
988980 }
989981
982
+static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
983
+{
984
+ struct hclge_vport *vport = hclge_get_vport(handle);
985
+ struct hclge_dev *hdev = vport->back;
986
+ u32 speed_ability = hdev->hw.mac.speed_ability;
987
+ u32 speed_bit = 0;
988
+
989
+ switch (speed) {
990
+ case HCLGE_MAC_SPEED_10M:
991
+ speed_bit = HCLGE_SUPPORT_10M_BIT;
992
+ break;
993
+ case HCLGE_MAC_SPEED_100M:
994
+ speed_bit = HCLGE_SUPPORT_100M_BIT;
995
+ break;
996
+ case HCLGE_MAC_SPEED_1G:
997
+ speed_bit = HCLGE_SUPPORT_1G_BIT;
998
+ break;
999
+ case HCLGE_MAC_SPEED_10G:
1000
+ speed_bit = HCLGE_SUPPORT_10G_BIT;
1001
+ break;
1002
+ case HCLGE_MAC_SPEED_25G:
1003
+ speed_bit = HCLGE_SUPPORT_25G_BIT;
1004
+ break;
1005
+ case HCLGE_MAC_SPEED_40G:
1006
+ speed_bit = HCLGE_SUPPORT_40G_BIT;
1007
+ break;
1008
+ case HCLGE_MAC_SPEED_50G:
1009
+ speed_bit = HCLGE_SUPPORT_50G_BIT;
1010
+ break;
1011
+ case HCLGE_MAC_SPEED_100G:
1012
+ speed_bit = HCLGE_SUPPORT_100G_BIT;
1013
+ break;
1014
+ case HCLGE_MAC_SPEED_200G:
1015
+ speed_bit = HCLGE_SUPPORT_200G_BIT;
1016
+ break;
1017
+ default:
1018
+ return -EINVAL;
1019
+ }
1020
+
1021
+ if (speed_bit & speed_ability)
1022
+ return 0;
1023
+
1024
+ return -EINVAL;
1025
+}
1026
+
1027
+static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1028
+{
1029
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1030
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1031
+ mac->supported);
1032
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1033
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1034
+ mac->supported);
1035
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1036
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1037
+ mac->supported);
1038
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1039
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1040
+ mac->supported);
1041
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1042
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1043
+ mac->supported);
1044
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1045
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1046
+ mac->supported);
1047
+}
1048
+
1049
+static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1050
+{
1051
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1052
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1053
+ mac->supported);
1054
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1055
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1056
+ mac->supported);
1057
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1058
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1059
+ mac->supported);
1060
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1062
+ mac->supported);
1063
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1064
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1065
+ mac->supported);
1066
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1067
+ linkmode_set_bit(
1068
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1069
+ mac->supported);
1070
+}
1071
+
1072
+static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1073
+{
1074
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1076
+ mac->supported);
1077
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1079
+ mac->supported);
1080
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1082
+ mac->supported);
1083
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1085
+ mac->supported);
1086
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1088
+ mac->supported);
1089
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1090
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1091
+ mac->supported);
1092
+}
1093
+
1094
+static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1095
+{
1096
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1097
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1098
+ mac->supported);
1099
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1100
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1101
+ mac->supported);
1102
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1103
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1104
+ mac->supported);
1105
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1106
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1107
+ mac->supported);
1108
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1109
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1110
+ mac->supported);
1111
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1112
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1113
+ mac->supported);
1114
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1115
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1116
+ mac->supported);
1117
+}
1118
+
1119
+static void hclge_convert_setting_fec(struct hclge_mac *mac)
1120
+{
1121
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1122
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1123
+
1124
+ switch (mac->speed) {
1125
+ case HCLGE_MAC_SPEED_10G:
1126
+ case HCLGE_MAC_SPEED_40G:
1127
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1128
+ mac->supported);
1129
+ mac->fec_ability =
1130
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1131
+ break;
1132
+ case HCLGE_MAC_SPEED_25G:
1133
+ case HCLGE_MAC_SPEED_50G:
1134
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1135
+ mac->supported);
1136
+ mac->fec_ability =
1137
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1138
+ BIT(HNAE3_FEC_AUTO);
1139
+ break;
1140
+ case HCLGE_MAC_SPEED_100G:
1141
+ case HCLGE_MAC_SPEED_200G:
1142
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1143
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1144
+ break;
1145
+ default:
1146
+ mac->fec_ability = 0;
1147
+ break;
1148
+ }
1149
+}
1150
+
9901151 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
991
- u8 speed_ability)
1152
+ u16 speed_ability)
1153
+{
1154
+ struct hclge_mac *mac = &hdev->hw.mac;
1155
+
1156
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1157
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1158
+ mac->supported);
1159
+
1160
+ hclge_convert_setting_sr(mac, speed_ability);
1161
+ hclge_convert_setting_lr(mac, speed_ability);
1162
+ hclge_convert_setting_cr(mac, speed_ability);
1163
+ if (hnae3_dev_fec_supported(hdev))
1164
+ hclge_convert_setting_fec(mac);
1165
+
1166
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1167
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1169
+}
1170
+
1171
+static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1172
+ u16 speed_ability)
1173
+{
1174
+ struct hclge_mac *mac = &hdev->hw.mac;
1175
+
1176
+ hclge_convert_setting_kr(mac, speed_ability);
1177
+ if (hnae3_dev_fec_supported(hdev))
1178
+ hclge_convert_setting_fec(mac);
1179
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1180
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1182
+}
1183
+
1184
+static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1185
+ u16 speed_ability)
9921186 {
9931187 unsigned long *supported = hdev->hw.mac.supported;
9941188
1189
+ /* default to support all speed for GE port */
1190
+ if (!speed_ability)
1191
+ speed_ability = HCLGE_SUPPORT_GE;
1192
+
9951193 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
996
- set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
997
- supported);
1194
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1195
+ supported);
9981196
999
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1000
- set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1001
- supported);
1197
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1198
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1199
+ supported);
1200
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1201
+ supported);
1202
+ }
10021203
1003
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1004
- set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1005
- supported);
1204
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1205
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1206
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1207
+ }
10061208
1007
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1008
- set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1009
- supported);
1010
-
1011
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1012
- set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1013
- supported);
1014
-
1015
- set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
1016
- set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1209
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1210
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1211
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1212
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
10171213 }
10181214
1019
-static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1215
+static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
10201216 {
10211217 u8 media_type = hdev->hw.mac.media_type;
10221218
1023
- if (media_type != HNAE3_MEDIA_TYPE_FIBER)
1024
- return;
1219
+ if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1220
+ hclge_parse_fiber_link_mode(hdev, speed_ability);
1221
+ else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1222
+ hclge_parse_copper_link_mode(hdev, speed_ability);
1223
+ else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1224
+ hclge_parse_backplane_link_mode(hdev, speed_ability);
1225
+}
10251226
1026
- hclge_parse_fiber_link_mode(hdev, speed_ability);
1227
+static u32 hclge_get_max_speed(u16 speed_ability)
1228
+{
1229
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1230
+ return HCLGE_MAC_SPEED_200G;
1231
+
1232
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1233
+ return HCLGE_MAC_SPEED_100G;
1234
+
1235
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1236
+ return HCLGE_MAC_SPEED_50G;
1237
+
1238
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1239
+ return HCLGE_MAC_SPEED_40G;
1240
+
1241
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1242
+ return HCLGE_MAC_SPEED_25G;
1243
+
1244
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1245
+ return HCLGE_MAC_SPEED_10G;
1246
+
1247
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1248
+ return HCLGE_MAC_SPEED_1G;
1249
+
1250
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1251
+ return HCLGE_MAC_SPEED_100M;
1252
+
1253
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1254
+ return HCLGE_MAC_SPEED_10M;
1255
+
1256
+ return HCLGE_MAC_SPEED_1G;
10271257 }
10281258
10291259 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
10301260 {
1261
+#define SPEED_ABILITY_EXT_SHIFT 8
1262
+
10311263 struct hclge_cfg_param_cmd *req;
10321264 u64 mac_addr_tmp_high;
1265
+ u16 speed_ability_ext;
10331266 u64 mac_addr_tmp;
1034
- int i;
1267
+ unsigned int i;
10351268
10361269 req = (struct hclge_cfg_param_cmd *)desc[0].data;
10371270
....@@ -1078,6 +1311,16 @@
10781311 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
10791312 HCLGE_CFG_SPEED_ABILITY_M,
10801313 HCLGE_CFG_SPEED_ABILITY_S);
1314
+ speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1315
+ HCLGE_CFG_SPEED_ABILITY_EXT_M,
1316
+ HCLGE_CFG_SPEED_ABILITY_EXT_S);
1317
+ cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1318
+
1319
+ cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1320
+ HCLGE_CFG_UMV_TBL_SPACE_M,
1321
+ HCLGE_CFG_UMV_TBL_SPACE_S);
1322
+ if (!cfg->umv_space)
1323
+ cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
10811324 }
10821325
10831326 /* hclge_get_cfg: query the static parameter from flash
....@@ -1088,7 +1331,8 @@
10881331 {
10891332 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
10901333 struct hclge_cfg_param_cmd *req;
1091
- int i, ret;
1334
+ unsigned int i;
1335
+ int ret;
10921336
10931337 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
10941338 u32 offset = 0;
....@@ -1115,6 +1359,78 @@
11151359 return 0;
11161360 }
11171361
1362
+static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1363
+{
1364
+#define HCLGE_MAX_NON_TSO_BD_NUM 8U
1365
+
1366
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1367
+
1368
+ ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1369
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1370
+ ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1371
+ ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1372
+}
1373
+
1374
+static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1375
+ struct hclge_desc *desc)
1376
+{
1377
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1378
+ struct hclge_dev_specs_0_cmd *req0;
1379
+
1380
+ req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1381
+
1382
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1383
+ ae_dev->dev_specs.rss_ind_tbl_size =
1384
+ le16_to_cpu(req0->rss_ind_tbl_size);
1385
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1386
+ ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1387
+}
1388
+
1389
+static void hclge_check_dev_specs(struct hclge_dev *hdev)
1390
+{
1391
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1392
+
1393
+ if (!dev_specs->max_non_tso_bd_num)
1394
+ dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1395
+ if (!dev_specs->rss_ind_tbl_size)
1396
+ dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1397
+ if (!dev_specs->rss_key_size)
1398
+ dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1399
+ if (!dev_specs->max_tm_rate)
1400
+ dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1401
+}
1402
+
1403
+static int hclge_query_dev_specs(struct hclge_dev *hdev)
1404
+{
1405
+ struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1406
+ int ret;
1407
+ int i;
1408
+
1409
+ /* set default specifications as devices lower than version V3 do not
1410
+ * support querying specifications from firmware.
1411
+ */
1412
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1413
+ hclge_set_default_dev_specs(hdev);
1414
+ return 0;
1415
+ }
1416
+
1417
+ for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1418
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1419
+ true);
1420
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1421
+ }
1422
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1423
+
1424
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1425
+ if (ret)
1426
+ return ret;
1427
+
1428
+ hclge_parse_dev_specs(hdev, desc);
1429
+ hclge_check_dev_specs(hdev);
1430
+
1431
+ return 0;
1432
+}
1433
+
11181434 static int hclge_get_cap(struct hclge_dev *hdev)
11191435 {
11201436 int ret;
....@@ -1127,23 +1443,36 @@
11271443 }
11281444
11291445 /* get pf resource */
1130
- ret = hclge_query_pf_resource(hdev);
1131
- if (ret)
1132
- dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1446
+ return hclge_query_pf_resource(hdev);
1447
+}
11331448
1134
- return ret;
1449
+static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1450
+{
1451
+#define HCLGE_MIN_TX_DESC 64
1452
+#define HCLGE_MIN_RX_DESC 64
1453
+
1454
+ if (!is_kdump_kernel())
1455
+ return;
1456
+
1457
+ dev_info(&hdev->pdev->dev,
1458
+ "Running kdump kernel. Using minimal resources\n");
1459
+
1460
+ /* minimal queue pairs equals to the number of vports */
1461
+ hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1462
+ hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1463
+ hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
11351464 }
11361465
11371466 static int hclge_configure(struct hclge_dev *hdev)
11381467 {
1468
+ const struct cpumask *cpumask = cpu_online_mask;
11391469 struct hclge_cfg cfg;
1140
- int ret, i;
1470
+ unsigned int i;
1471
+ int node, ret;
11411472
11421473 ret = hclge_get_cfg(hdev, &cfg);
1143
- if (ret) {
1144
- dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1474
+ if (ret)
11451475 return ret;
1146
- }
11471476
11481477 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
11491478 hdev->base_tqp_pid = 0;
....@@ -1152,22 +1481,32 @@
11521481 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
11531482 hdev->hw.mac.media_type = cfg.media_type;
11541483 hdev->hw.mac.phy_addr = cfg.phy_addr;
1155
- hdev->num_desc = cfg.tqp_desc_num;
1484
+ hdev->num_tx_desc = cfg.tqp_desc_num;
1485
+ hdev->num_rx_desc = cfg.tqp_desc_num;
11561486 hdev->tm_info.num_pg = 1;
11571487 hdev->tc_max = cfg.tc_num;
11581488 hdev->tm_info.hw_pfc_map = 0;
1489
+ hdev->wanted_umv_size = cfg.umv_space;
1490
+
1491
+ if (hnae3_dev_fd_supported(hdev)) {
1492
+ hdev->fd_en = true;
1493
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1494
+ }
11591495
11601496 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
11611497 if (ret) {
1162
- dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1498
+ dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1499
+ cfg.default_speed, ret);
11631500 return ret;
11641501 }
11651502
11661503 hclge_parse_link_mode(hdev, cfg.speed_ability);
11671504
1505
+ hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1506
+
11681507 if ((hdev->tc_max > HNAE3_MAX_TC) ||
11691508 (hdev->tc_max < 1)) {
1170
- dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1509
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
11711510 hdev->tc_max);
11721511 hdev->tc_max = 1;
11731512 }
....@@ -1180,7 +1519,7 @@
11801519 hdev->pfc_max = hdev->tc_max;
11811520 }
11821521
1183
- hdev->tm_info.num_tc = hdev->tc_max;
1522
+ hdev->tm_info.num_tc = 1;
11841523
11851524 /* Currently not support uncontiuous tc */
11861525 for (i = 0; i < hdev->tm_info.num_tc; i++)
....@@ -1188,31 +1527,53 @@
11881527
11891528 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
11901529
1530
+ hclge_init_kdump_kernel_config(hdev);
1531
+
1532
+ /* Set the affinity based on numa node */
1533
+ node = dev_to_node(&hdev->pdev->dev);
1534
+ if (node != NUMA_NO_NODE)
1535
+ cpumask = cpumask_of_node(node);
1536
+
1537
+ cpumask_copy(&hdev->affinity_mask, cpumask);
1538
+
11911539 return ret;
11921540 }
11931541
1194
-static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1195
- int tso_mss_max)
1542
+static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1543
+ u16 tso_mss_max)
11961544 {
11971545 struct hclge_cfg_tso_status_cmd *req;
11981546 struct hclge_desc desc;
1199
- u16 tso_mss;
12001547
12011548 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
12021549
12031550 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1204
-
1205
- tso_mss = 0;
1206
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1207
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1208
- req->tso_mss_min = cpu_to_le16(tso_mss);
1209
-
1210
- tso_mss = 0;
1211
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1212
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1213
- req->tso_mss_max = cpu_to_le16(tso_mss);
1551
+ req->tso_mss_min = cpu_to_le16(tso_mss_min);
1552
+ req->tso_mss_max = cpu_to_le16(tso_mss_max);
12141553
12151554 return hclge_cmd_send(&hdev->hw, &desc, 1);
1555
+}
1556
+
1557
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1558
+{
1559
+ struct hclge_cfg_gro_status_cmd *req;
1560
+ struct hclge_desc desc;
1561
+ int ret;
1562
+
1563
+ if (!hnae3_dev_gro_supported(hdev))
1564
+ return 0;
1565
+
1566
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1567
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1568
+
1569
+ req->gro_en = en ? 1 : 0;
1570
+
1571
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1572
+ if (ret)
1573
+ dev_err(&hdev->pdev->dev,
1574
+ "GRO hardware config cmd failed, ret = %d\n", ret);
1575
+
1576
+ return ret;
12161577 }
12171578
12181579 static int hclge_alloc_tqps(struct hclge_dev *hdev)
....@@ -1233,7 +1594,8 @@
12331594
12341595 tqp->q.ae_algo = &ae_algo;
12351596 tqp->q.buf_size = hdev->rx_buf_len;
1236
- tqp->q.desc_num = hdev->num_desc;
1597
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
1598
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
12371599 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
12381600 i * HCLGE_TQP_REG_SIZE;
12391601
....@@ -1255,8 +1617,9 @@
12551617 req = (struct hclge_tqp_map_cmd *)desc.data;
12561618 req->tqp_id = cpu_to_le16(tqp_pid);
12571619 req->tqp_vf = func_id;
1258
- req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1259
- 1 << HCLGE_TQP_MAP_EN_B;
1620
+ req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1621
+ if (!is_pf)
1622
+ req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
12601623 req->tqp_vid = cpu_to_le16(tqp_vid);
12611624
12621625 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
....@@ -1266,64 +1629,55 @@
12661629 return ret;
12671630 }
12681631
1269
-static int hclge_assign_tqp(struct hclge_vport *vport)
1632
+static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
12701633 {
12711634 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12721635 struct hclge_dev *hdev = vport->back;
12731636 int i, alloced;
12741637
12751638 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1276
- alloced < kinfo->num_tqps; i++) {
1639
+ alloced < num_tqps; i++) {
12771640 if (!hdev->htqp[i].alloced) {
12781641 hdev->htqp[i].q.handle = &vport->nic;
12791642 hdev->htqp[i].q.tqp_index = alloced;
1280
- hdev->htqp[i].q.desc_num = kinfo->num_desc;
1643
+ hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1644
+ hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
12811645 kinfo->tqp[alloced] = &hdev->htqp[i].q;
12821646 hdev->htqp[i].alloced = true;
12831647 alloced++;
12841648 }
12851649 }
1286
- vport->alloc_tqps = kinfo->num_tqps;
1650
+ vport->alloc_tqps = alloced;
1651
+ kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1652
+ vport->alloc_tqps / hdev->tm_info.num_tc);
1653
+
1654
+ /* ensure one to one mapping between irq and queue at default */
1655
+ kinfo->rss_size = min_t(u16, kinfo->rss_size,
1656
+ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
12871657
12881658 return 0;
12891659 }
12901660
1291
-static int hclge_knic_setup(struct hclge_vport *vport,
1292
- u16 num_tqps, u16 num_desc)
1661
+static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1662
+ u16 num_tx_desc, u16 num_rx_desc)
1663
+
12931664 {
12941665 struct hnae3_handle *nic = &vport->nic;
12951666 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
12961667 struct hclge_dev *hdev = vport->back;
1297
- int i, ret;
1668
+ int ret;
12981669
1299
- kinfo->num_desc = num_desc;
1670
+ kinfo->num_tx_desc = num_tx_desc;
1671
+ kinfo->num_rx_desc = num_rx_desc;
1672
+
13001673 kinfo->rx_buf_len = hdev->rx_buf_len;
1301
- kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1302
- kinfo->rss_size
1303
- = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1304
- kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
13051674
1306
- for (i = 0; i < HNAE3_MAX_TC; i++) {
1307
- if (hdev->hw_tc_map & BIT(i)) {
1308
- kinfo->tc_info[i].enable = true;
1309
- kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1310
- kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1311
- kinfo->tc_info[i].tc = i;
1312
- } else {
1313
- /* Set to default queue if TC is disable */
1314
- kinfo->tc_info[i].enable = false;
1315
- kinfo->tc_info[i].tqp_offset = 0;
1316
- kinfo->tc_info[i].tqp_count = 1;
1317
- kinfo->tc_info[i].tc = 0;
1318
- }
1319
- }
1320
-
1321
- kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1675
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
13221676 sizeof(struct hnae3_queue *), GFP_KERNEL);
13231677 if (!kinfo->tqp)
13241678 return -ENOMEM;
13251679
1326
- ret = hclge_assign_tqp(vport);
1680
+ ret = hclge_assign_tqp(vport, num_tqps);
13271681 if (ret)
13281682 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
13291683
....@@ -1338,7 +1692,7 @@
13381692 u16 i;
13391693
13401694 kinfo = &nic->kinfo;
1341
- for (i = 0; i < kinfo->num_tqps; i++) {
1695
+ for (i = 0; i < vport->alloc_tqps; i++) {
13421696 struct hclge_tqp *q =
13431697 container_of(kinfo->tqp[i], struct hclge_tqp, q);
13441698 bool is_pf;
....@@ -1373,11 +1727,6 @@
13731727 return 0;
13741728 }
13751729
1376
-static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1377
-{
1378
- /* this would be initialized later */
1379
-}
1380
-
13811730 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
13821731 {
13831732 struct hnae3_handle *nic = &vport->nic;
....@@ -1388,18 +1737,12 @@
13881737 nic->ae_algo = &ae_algo;
13891738 nic->numa_node_mask = hdev->numa_node_mask;
13901739
1391
- if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1392
- ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1393
- if (ret) {
1394
- dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1395
- ret);
1396
- return ret;
1397
- }
1398
- } else {
1399
- hclge_unic_setup(vport, num_tqps);
1400
- }
1740
+ ret = hclge_knic_setup(vport, num_tqps,
1741
+ hdev->num_tx_desc, hdev->num_rx_desc);
1742
+ if (ret)
1743
+ dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
14011744
1402
- return 0;
1745
+ return ret;
14031746 }
14041747
14051748 static int hclge_alloc_vport(struct hclge_dev *hdev)
....@@ -1415,7 +1758,7 @@
14151758 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
14161759
14171760 if (hdev->num_tqps < num_vport) {
1418
- dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1761
+ dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
14191762 hdev->num_tqps, num_vport);
14201763 return -EINVAL;
14211764 }
....@@ -1438,6 +1781,14 @@
14381781 for (i = 0; i < num_vport; i++) {
14391782 vport->back = hdev;
14401783 vport->vport_id = i;
1784
+ vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1785
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1786
+ vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1787
+ vport->rxvlan_cfg.rx_vlan_offload_en = true;
1788
+ INIT_LIST_HEAD(&vport->vlan_list);
1789
+ INIT_LIST_HEAD(&vport->uc_mac_list);
1790
+ INIT_LIST_HEAD(&vport->mc_mac_list);
1791
+ spin_lock_init(&vport->mac_list_lock);
14411792
14421793 if (i == 0)
14431794 ret = hclge_vport_setup(vport, tqp_main_vport);
....@@ -1470,7 +1821,7 @@
14701821 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
14711822
14721823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1473
- for (i = 0; i < HCLGE_TC_NUM; i++) {
1824
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
14741825 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
14751826
14761827 req->tx_pkt_buff[i] =
....@@ -1497,23 +1848,13 @@
14971848 return ret;
14981849 }
14991850
1500
-static int hclge_get_tc_num(struct hclge_dev *hdev)
1851
+static u32 hclge_get_tc_num(struct hclge_dev *hdev)
15011852 {
1502
- int i, cnt = 0;
1853
+ unsigned int i;
1854
+ u32 cnt = 0;
15031855
15041856 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
15051857 if (hdev->hw_tc_map & BIT(i))
1506
- cnt++;
1507
- return cnt;
1508
-}
1509
-
1510
-static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1511
-{
1512
- int i, cnt = 0;
1513
-
1514
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1515
- if (hdev->hw_tc_map & BIT(i) &&
1516
- hdev->tm_info.hw_pfc_map & BIT(i))
15171858 cnt++;
15181859 return cnt;
15191860 }
....@@ -1523,7 +1864,8 @@
15231864 struct hclge_pkt_buf_alloc *buf_alloc)
15241865 {
15251866 struct hclge_priv_buf *priv;
1526
- int i, cnt = 0;
1867
+ unsigned int i;
1868
+ int cnt = 0;
15271869
15281870 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
15291871 priv = &buf_alloc->priv_buf[i];
....@@ -1540,7 +1882,8 @@
15401882 struct hclge_pkt_buf_alloc *buf_alloc)
15411883 {
15421884 struct hclge_priv_buf *priv;
1543
- int i, cnt = 0;
1885
+ unsigned int i;
1886
+ int cnt = 0;
15441887
15451888 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
15461889 priv = &buf_alloc->priv_buf[i];
....@@ -1581,43 +1924,63 @@
15811924 struct hclge_pkt_buf_alloc *buf_alloc,
15821925 u32 rx_all)
15831926 {
1584
- u32 shared_buf_min, shared_buf_tc, shared_std;
1585
- int tc_num, pfc_enable_num;
1586
- u32 shared_buf;
1927
+ u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1928
+ u32 tc_num = hclge_get_tc_num(hdev);
1929
+ u32 shared_buf, aligned_mps;
15871930 u32 rx_priv;
15881931 int i;
15891932
1590
- tc_num = hclge_get_tc_num(hdev);
1591
- pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1933
+ aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
15921934
15931935 if (hnae3_dev_dcb_supported(hdev))
1594
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1936
+ shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1937
+ hdev->dv_buf_size;
15951938 else
1596
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1939
+ shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1940
+ + hdev->dv_buf_size;
15971941
1598
- shared_buf_tc = pfc_enable_num * hdev->mps +
1599
- (tc_num - pfc_enable_num) * hdev->mps / 2 +
1600
- hdev->mps;
1601
- shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1942
+ shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1943
+ shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1944
+ HCLGE_BUF_SIZE_UNIT);
16021945
16031946 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1604
- if (rx_all <= rx_priv + shared_std)
1947
+ if (rx_all < rx_priv + shared_std)
16051948 return false;
16061949
1607
- shared_buf = rx_all - rx_priv;
1950
+ shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
16081951 buf_alloc->s_buf.buf_size = shared_buf;
1609
- buf_alloc->s_buf.self.high = shared_buf;
1610
- buf_alloc->s_buf.self.low = 2 * hdev->mps;
1952
+ if (hnae3_dev_dcb_supported(hdev)) {
1953
+ buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1954
+ buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1955
+ - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1956
+ HCLGE_BUF_SIZE_UNIT);
1957
+ } else {
1958
+ buf_alloc->s_buf.self.high = aligned_mps +
1959
+ HCLGE_NON_DCB_ADDITIONAL_BUF;
1960
+ buf_alloc->s_buf.self.low = aligned_mps;
1961
+ }
1962
+
1963
+ if (hnae3_dev_dcb_supported(hdev)) {
1964
+ hi_thrd = shared_buf - hdev->dv_buf_size;
1965
+
1966
+ if (tc_num <= NEED_RESERVE_TC_NUM)
1967
+ hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1968
+ / BUF_MAX_PERCENT;
1969
+
1970
+ if (tc_num)
1971
+ hi_thrd = hi_thrd / tc_num;
1972
+
1973
+ hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1974
+ hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1975
+ lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1976
+ } else {
1977
+ hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1978
+ lo_thrd = aligned_mps;
1979
+ }
16111980
16121981 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1613
- if ((hdev->hw_tc_map & BIT(i)) &&
1614
- (hdev->tm_info.hw_pfc_map & BIT(i))) {
1615
- buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1616
- buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1617
- } else {
1618
- buf_alloc->s_buf.tc_thrd[i].low = 0;
1619
- buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1620
- }
1982
+ buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1983
+ buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
16211984 }
16221985
16231986 return true;
....@@ -1634,13 +1997,14 @@
16341997 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
16351998 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
16361999
1637
- if (total_size < HCLGE_DEFAULT_TX_BUF)
1638
- return -ENOMEM;
2000
+ if (hdev->hw_tc_map & BIT(i)) {
2001
+ if (total_size < hdev->tx_buf_size)
2002
+ return -ENOMEM;
16392003
1640
- if (hdev->hw_tc_map & BIT(i))
1641
- priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1642
- else
2004
+ priv->tx_buf_size = hdev->tx_buf_size;
2005
+ } else {
16432006 priv->tx_buf_size = 0;
2007
+ }
16442008
16452009 total_size -= priv->tx_buf_size;
16462010 }
....@@ -1648,64 +2012,15 @@
16482012 return 0;
16492013 }
16502014
1651
-/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1652
- * @hdev: pointer to struct hclge_dev
1653
- * @buf_alloc: pointer to buffer calculation data
1654
- * @return: 0: calculate sucessful, negative: fail
1655
- */
1656
-static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1657
- struct hclge_pkt_buf_alloc *buf_alloc)
2015
+static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2016
+ struct hclge_pkt_buf_alloc *buf_alloc)
16582017 {
1659
-#define HCLGE_BUF_SIZE_UNIT 128
1660
- u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1661
- int no_pfc_priv_num, pfc_priv_num;
1662
- struct hclge_priv_buf *priv;
1663
- int i;
2018
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2019
+ u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2020
+ unsigned int i;
16642021
1665
- aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1666
- rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1667
-
1668
- /* When DCB is not supported, rx private
1669
- * buffer is not allocated.
1670
- */
1671
- if (!hnae3_dev_dcb_supported(hdev)) {
1672
- if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1673
- return -ENOMEM;
1674
-
1675
- return 0;
1676
- }
1677
-
1678
- /* step 1, try to alloc private buffer for all enabled tc */
16792022 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1680
- priv = &buf_alloc->priv_buf[i];
1681
- if (hdev->hw_tc_map & BIT(i)) {
1682
- priv->enable = 1;
1683
- if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1684
- priv->wl.low = aligned_mps;
1685
- priv->wl.high = priv->wl.low + aligned_mps;
1686
- priv->buf_size = priv->wl.high +
1687
- HCLGE_DEFAULT_DV;
1688
- } else {
1689
- priv->wl.low = 0;
1690
- priv->wl.high = 2 * aligned_mps;
1691
- priv->buf_size = priv->wl.high;
1692
- }
1693
- } else {
1694
- priv->enable = 0;
1695
- priv->wl.low = 0;
1696
- priv->wl.high = 0;
1697
- priv->buf_size = 0;
1698
- }
1699
- }
1700
-
1701
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1702
- return 0;
1703
-
1704
- /* step 2, try to decrease the buffer size of
1705
- * no pfc TC's private buffer
1706
- */
1707
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1708
- priv = &buf_alloc->priv_buf[i];
2023
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
17092024
17102025 priv->enable = 0;
17112026 priv->wl.low = 0;
....@@ -1718,31 +2033,35 @@
17182033 priv->enable = 1;
17192034
17202035 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1721
- priv->wl.low = 128;
1722
- priv->wl.high = priv->wl.low + aligned_mps;
1723
- priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
2036
+ priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2037
+ priv->wl.high = roundup(priv->wl.low + aligned_mps,
2038
+ HCLGE_BUF_SIZE_UNIT);
17242039 } else {
17252040 priv->wl.low = 0;
1726
- priv->wl.high = aligned_mps;
1727
- priv->buf_size = priv->wl.high;
2041
+ priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2042
+ aligned_mps;
17282043 }
2044
+
2045
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
17292046 }
17302047
1731
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1732
- return 0;
2048
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2049
+}
17332050
1734
- /* step 3, try to reduce the number of pfc disabled TCs,
1735
- * which have private buffer
1736
- */
1737
- /* get the total no pfc enable TC number, which have private buffer */
1738
- no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2051
+static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2052
+ struct hclge_pkt_buf_alloc *buf_alloc)
2053
+{
2054
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2055
+ int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2056
+ int i;
17392057
17402058 /* let the last to be cleared first */
17412059 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1742
- priv = &buf_alloc->priv_buf[i];
2060
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2061
+ unsigned int mask = BIT((unsigned int)i);
17432062
1744
- if (hdev->hw_tc_map & BIT(i) &&
1745
- !(hdev->tm_info.hw_pfc_map & BIT(i))) {
2063
+ if (hdev->hw_tc_map & mask &&
2064
+ !(hdev->tm_info.hw_pfc_map & mask)) {
17462065 /* Clear the no pfc TC private buffer */
17472066 priv->wl.low = 0;
17482067 priv->wl.high = 0;
....@@ -1756,20 +2075,23 @@
17562075 break;
17572076 }
17582077
1759
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1760
- return 0;
2078
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2079
+}
17612080
1762
- /* step 4, try to reduce the number of pfc enabled TCs
1763
- * which have private buffer.
1764
- */
1765
- pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2081
+static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2082
+ struct hclge_pkt_buf_alloc *buf_alloc)
2083
+{
2084
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2085
+ int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2086
+ int i;
17662087
17672088 /* let the last to be cleared first */
17682089 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1769
- priv = &buf_alloc->priv_buf[i];
2090
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2091
+ unsigned int mask = BIT((unsigned int)i);
17702092
1771
- if (hdev->hw_tc_map & BIT(i) &&
1772
- hdev->tm_info.hw_pfc_map & BIT(i)) {
2093
+ if (hdev->hw_tc_map & mask &&
2094
+ hdev->tm_info.hw_pfc_map & mask) {
17732095 /* Reduce the number of pfc TC with private buffer */
17742096 priv->wl.low = 0;
17752097 priv->enable = 0;
....@@ -1782,7 +2104,92 @@
17822104 pfc_priv_num == 0)
17832105 break;
17842106 }
1785
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2107
+
2108
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2109
+}
2110
+
2111
+static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2112
+ struct hclge_pkt_buf_alloc *buf_alloc)
2113
+{
2114
+#define COMPENSATE_BUFFER 0x3C00
2115
+#define COMPENSATE_HALF_MPS_NUM 5
2116
+#define PRIV_WL_GAP 0x1800
2117
+
2118
+ u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2119
+ u32 tc_num = hclge_get_tc_num(hdev);
2120
+ u32 half_mps = hdev->mps >> 1;
2121
+ u32 min_rx_priv;
2122
+ unsigned int i;
2123
+
2124
+ if (tc_num)
2125
+ rx_priv = rx_priv / tc_num;
2126
+
2127
+ if (tc_num <= NEED_RESERVE_TC_NUM)
2128
+ rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2129
+
2130
+ min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2131
+ COMPENSATE_HALF_MPS_NUM * half_mps;
2132
+ min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2133
+ rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2134
+
2135
+ if (rx_priv < min_rx_priv)
2136
+ return false;
2137
+
2138
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2139
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2140
+
2141
+ priv->enable = 0;
2142
+ priv->wl.low = 0;
2143
+ priv->wl.high = 0;
2144
+ priv->buf_size = 0;
2145
+
2146
+ if (!(hdev->hw_tc_map & BIT(i)))
2147
+ continue;
2148
+
2149
+ priv->enable = 1;
2150
+ priv->buf_size = rx_priv;
2151
+ priv->wl.high = rx_priv - hdev->dv_buf_size;
2152
+ priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2153
+ }
2154
+
2155
+ buf_alloc->s_buf.buf_size = 0;
2156
+
2157
+ return true;
2158
+}
2159
+
2160
+/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2161
+ * @hdev: pointer to struct hclge_dev
2162
+ * @buf_alloc: pointer to buffer calculation data
2163
+ * @return: 0: calculate sucessful, negative: fail
2164
+ */
2165
+static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2166
+ struct hclge_pkt_buf_alloc *buf_alloc)
2167
+{
2168
+ /* When DCB is not supported, rx private buffer is not allocated. */
2169
+ if (!hnae3_dev_dcb_supported(hdev)) {
2170
+ u32 rx_all = hdev->pkt_buf_size;
2171
+
2172
+ rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2173
+ if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2174
+ return -ENOMEM;
2175
+
2176
+ return 0;
2177
+ }
2178
+
2179
+ if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2180
+ return 0;
2181
+
2182
+ if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2183
+ return 0;
2184
+
2185
+ /* try to decrease the buffer size */
2186
+ if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2187
+ return 0;
2188
+
2189
+ if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2190
+ return 0;
2191
+
2192
+ if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
17862193 return 0;
17872194
17882195 return -ENOMEM;
....@@ -2028,7 +2435,8 @@
20282435 int vectors;
20292436 int i;
20302437
2031
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2438
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2439
+ hdev->num_msi,
20322440 PCI_IRQ_MSI | PCI_IRQ_MSIX);
20332441 if (vectors < 0) {
20342442 dev_err(&pdev->dev,
....@@ -2038,11 +2446,12 @@
20382446 }
20392447 if (vectors < hdev->num_msi)
20402448 dev_warn(&hdev->pdev->dev,
2041
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2449
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
20422450 hdev->num_msi, vectors);
20432451
20442452 hdev->num_msi = vectors;
20452453 hdev->num_msi_left = vectors;
2454
+
20462455 hdev->base_msi_vector = pdev->irq;
20472456 hdev->roce_base_vector = hdev->base_msi_vector +
20482457 hdev->roce_base_msix_offset;
....@@ -2067,19 +2476,16 @@
20672476 return 0;
20682477 }
20692478
2070
-static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2479
+static u8 hclge_check_speed_dup(u8 duplex, int speed)
20712480 {
2072
- struct hclge_mac *mac = &hdev->hw.mac;
2481
+ if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2482
+ duplex = HCLGE_MAC_FULL;
20732483
2074
- if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2075
- mac->duplex = (u8)duplex;
2076
- else
2077
- mac->duplex = HCLGE_MAC_FULL;
2078
-
2079
- mac->speed = speed;
2484
+ return duplex;
20802485 }
20812486
2082
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2487
+static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2488
+ u8 duplex)
20832489 {
20842490 struct hclge_config_mac_speed_dup_cmd *req;
20852491 struct hclge_desc desc;
....@@ -2089,7 +2495,8 @@
20892495
20902496 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
20912497
2092
- hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2498
+ if (duplex)
2499
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
20932500
20942501 switch (speed) {
20952502 case HCLGE_MAC_SPEED_10M:
....@@ -2124,6 +2531,10 @@
21242531 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
21252532 HCLGE_CFG_SPEED_S, 5);
21262533 break;
2534
+ case HCLGE_MAC_SPEED_200G:
2535
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2536
+ HCLGE_CFG_SPEED_S, 8);
2537
+ break;
21272538 default:
21282539 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
21292540 return -EINVAL;
....@@ -2139,7 +2550,25 @@
21392550 return ret;
21402551 }
21412552
2142
- hclge_check_speed_dup(hdev, duplex, speed);
2553
+ return 0;
2554
+}
2555
+
2556
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2557
+{
2558
+ struct hclge_mac *mac = &hdev->hw.mac;
2559
+ int ret;
2560
+
2561
+ duplex = hclge_check_speed_dup(duplex, speed);
2562
+ if (!mac->support_autoneg && mac->speed == speed &&
2563
+ mac->duplex == duplex)
2564
+ return 0;
2565
+
2566
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2567
+ if (ret)
2568
+ return ret;
2569
+
2570
+ hdev->hw.mac.speed = speed;
2571
+ hdev->hw.mac.duplex = duplex;
21432572
21442573 return 0;
21452574 }
....@@ -2153,37 +2582,6 @@
21532582 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
21542583 }
21552584
2156
-static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2157
- u8 *duplex)
2158
-{
2159
- struct hclge_query_an_speed_dup_cmd *req;
2160
- struct hclge_desc desc;
2161
- int speed_tmp;
2162
- int ret;
2163
-
2164
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2165
-
2166
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2167
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2168
- if (ret) {
2169
- dev_err(&hdev->pdev->dev,
2170
- "mac speed/autoneg/duplex query cmd failed %d\n",
2171
- ret);
2172
- return ret;
2173
- }
2174
-
2175
- *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2176
- speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2177
- HCLGE_QUERY_SPEED_S);
2178
-
2179
- ret = hclge_parse_speed(speed_tmp, speed);
2180
- if (ret)
2181
- dev_err(&hdev->pdev->dev,
2182
- "could not parse speed(=%d), %d\n", speed_tmp, ret);
2183
-
2184
- return ret;
2185
-}
2186
-
21872585 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
21882586 {
21892587 struct hclge_config_auto_neg_cmd *req;
....@@ -2194,7 +2592,8 @@
21942592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
21952593
21962594 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2197
- hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2595
+ if (enable)
2596
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
21982597 req->cfg_an_cmd_flag = cpu_to_le32(flag);
21992598
22002599 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
....@@ -2209,6 +2608,16 @@
22092608 {
22102609 struct hclge_vport *vport = hclge_get_vport(handle);
22112610 struct hclge_dev *hdev = vport->back;
2611
+
2612
+ if (!hdev->hw.mac.support_autoneg) {
2613
+ if (enable) {
2614
+ dev_err(&hdev->pdev->dev,
2615
+ "autoneg is not supported by current port\n");
2616
+ return -EOPNOTSUPP;
2617
+ } else {
2618
+ return 0;
2619
+ }
2620
+ }
22122621
22132622 return hclge_set_autoneg_en(hdev, enable);
22142623 }
....@@ -2225,121 +2634,162 @@
22252634 return hdev->hw.mac.autoneg;
22262635 }
22272636
2228
-static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
2229
- bool mask_vlan,
2230
- u8 *mac_mask)
2637
+static int hclge_restart_autoneg(struct hnae3_handle *handle)
22312638 {
2232
- struct hclge_mac_vlan_mask_entry_cmd *req;
2639
+ struct hclge_vport *vport = hclge_get_vport(handle);
2640
+ struct hclge_dev *hdev = vport->back;
2641
+ int ret;
2642
+
2643
+ dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2644
+
2645
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2646
+ if (ret)
2647
+ return ret;
2648
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2649
+}
2650
+
2651
+static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2652
+{
2653
+ struct hclge_vport *vport = hclge_get_vport(handle);
2654
+ struct hclge_dev *hdev = vport->back;
2655
+
2656
+ if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2657
+ return hclge_set_autoneg_en(hdev, !halt);
2658
+
2659
+ return 0;
2660
+}
2661
+
2662
+static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2663
+{
2664
+ struct hclge_config_fec_cmd *req;
22332665 struct hclge_desc desc;
2234
- int status;
2666
+ int ret;
22352667
2236
- req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
2237
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
2668
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
22382669
2239
- hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
2240
- mask_vlan ? 1 : 0);
2241
- ether_addr_copy(req->mac_mask, mac_mask);
2670
+ req = (struct hclge_config_fec_cmd *)desc.data;
2671
+ if (fec_mode & BIT(HNAE3_FEC_AUTO))
2672
+ hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2673
+ if (fec_mode & BIT(HNAE3_FEC_RS))
2674
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2675
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2676
+ if (fec_mode & BIT(HNAE3_FEC_BASER))
2677
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2678
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
22422679
2243
- status = hclge_cmd_send(&hdev->hw, &desc, 1);
2244
- if (status)
2245
- dev_err(&hdev->pdev->dev,
2246
- "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2247
- status);
2680
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2681
+ if (ret)
2682
+ dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
22482683
2249
- return status;
2684
+ return ret;
2685
+}
2686
+
2687
+static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2688
+{
2689
+ struct hclge_vport *vport = hclge_get_vport(handle);
2690
+ struct hclge_dev *hdev = vport->back;
2691
+ struct hclge_mac *mac = &hdev->hw.mac;
2692
+ int ret;
2693
+
2694
+ if (fec_mode && !(mac->fec_ability & fec_mode)) {
2695
+ dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2696
+ return -EINVAL;
2697
+ }
2698
+
2699
+ ret = hclge_set_fec_hw(hdev, fec_mode);
2700
+ if (ret)
2701
+ return ret;
2702
+
2703
+ mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2704
+ return 0;
2705
+}
2706
+
2707
+static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2708
+ u8 *fec_mode)
2709
+{
2710
+ struct hclge_vport *vport = hclge_get_vport(handle);
2711
+ struct hclge_dev *hdev = vport->back;
2712
+ struct hclge_mac *mac = &hdev->hw.mac;
2713
+
2714
+ if (fec_ability)
2715
+ *fec_ability = mac->fec_ability;
2716
+ if (fec_mode)
2717
+ *fec_mode = mac->fec_mode;
22502718 }
22512719
22522720 static int hclge_mac_init(struct hclge_dev *hdev)
22532721 {
2254
- struct hnae3_handle *handle = &hdev->vport[0].nic;
2255
- struct net_device *netdev = handle->kinfo.netdev;
22562722 struct hclge_mac *mac = &hdev->hw.mac;
2257
- u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2258
- struct hclge_vport *vport;
2259
- int mtu;
22602723 int ret;
2261
- int i;
22622724
2263
- ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2264
- if (ret) {
2265
- dev_err(&hdev->pdev->dev,
2266
- "Config mac speed dup fail ret=%d\n", ret);
2725
+ hdev->support_sfp_query = true;
2726
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2727
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2728
+ hdev->hw.mac.duplex);
2729
+ if (ret)
22672730 return ret;
2731
+
2732
+ if (hdev->hw.mac.support_autoneg) {
2733
+ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2734
+ if (ret)
2735
+ return ret;
22682736 }
22692737
22702738 mac->link = 0;
22712739
2272
- /* Initialize the MTA table work mode */
2273
- hdev->enable_mta = true;
2274
- hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2275
-
2276
- ret = hclge_set_mta_filter_mode(hdev,
2277
- hdev->mta_mac_sel_type,
2278
- hdev->enable_mta);
2279
- if (ret) {
2280
- dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2281
- ret);
2282
- return ret;
2283
- }
2284
-
2285
- for (i = 0; i < hdev->num_alloc_vport; i++) {
2286
- vport = &hdev->vport[i];
2287
- vport->accept_mta_mc = false;
2288
-
2289
- memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
2290
- ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
2291
- if (ret) {
2292
- dev_err(&hdev->pdev->dev,
2293
- "set mta filter mode fail ret=%d\n", ret);
2740
+ if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2741
+ ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2742
+ if (ret)
22942743 return ret;
2295
- }
22962744 }
22972745
2298
- ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2746
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
22992747 if (ret) {
2300
- dev_err(&hdev->pdev->dev,
2301
- "set default mac_vlan_mask fail ret=%d\n", ret);
2748
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
23022749 return ret;
23032750 }
23042751
2305
- if (netdev)
2306
- mtu = netdev->mtu;
2307
- else
2308
- mtu = ETH_DATA_LEN;
2752
+ ret = hclge_set_default_loopback(hdev);
2753
+ if (ret)
2754
+ return ret;
23092755
2310
- ret = hclge_set_mtu(handle, mtu);
2756
+ ret = hclge_buffer_alloc(hdev);
23112757 if (ret)
23122758 dev_err(&hdev->pdev->dev,
2313
- "set mtu failed ret=%d\n", ret);
2759
+ "allocate buffer fail, ret=%d\n", ret);
23142760
23152761 return ret;
23162762 }
23172763
23182764 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
23192765 {
2320
- if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2321
- schedule_work(&hdev->mbx_service_task);
2766
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2767
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2768
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2769
+ hclge_wq, &hdev->service_task, 0);
23222770 }
23232771
23242772 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
23252773 {
2326
- if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2327
- schedule_work(&hdev->rst_service_task);
2774
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2775
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2776
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2777
+ hclge_wq, &hdev->service_task, 0);
23282778 }
23292779
2330
-static void hclge_task_schedule(struct hclge_dev *hdev)
2780
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
23312781 {
2332
- if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2333
- !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2334
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2335
- (void)schedule_work(&hdev->service_task);
2782
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2783
+ !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2784
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2785
+ hclge_wq, &hdev->service_task,
2786
+ delay_time);
23362787 }
23372788
2338
-static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2789
+static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
23392790 {
23402791 struct hclge_link_status_cmd *req;
23412792 struct hclge_desc desc;
2342
- int link_status;
23432793 int ret;
23442794
23452795 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
....@@ -2351,92 +2801,200 @@
23512801 }
23522802
23532803 req = (struct hclge_link_status_cmd *)desc.data;
2354
- link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2804
+ *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2805
+ HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
23552806
2356
- return !!link_status;
2807
+ return 0;
23572808 }
23582809
2359
-static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2810
+static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
23602811 {
2361
- int mac_state;
2362
- int link_stat;
2812
+ struct phy_device *phydev = hdev->hw.mac.phydev;
2813
+
2814
+ *link_status = HCLGE_LINK_STATUS_DOWN;
23632815
23642816 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
23652817 return 0;
23662818
2367
- mac_state = hclge_get_mac_link_status(hdev);
2819
+ if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2820
+ return 0;
23682821
2369
- if (hdev->hw.mac.phydev) {
2370
- if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2371
- link_stat = mac_state &
2372
- hdev->hw.mac.phydev->link;
2373
- else
2374
- link_stat = 0;
2375
-
2376
- } else {
2377
- link_stat = mac_state;
2378
- }
2379
-
2380
- return !!link_stat;
2822
+ return hclge_get_mac_link_status(hdev, link_status);
23812823 }
23822824
23832825 static void hclge_update_link_status(struct hclge_dev *hdev)
23842826 {
2827
+ struct hnae3_client *rclient = hdev->roce_client;
23852828 struct hnae3_client *client = hdev->nic_client;
2829
+ struct hnae3_handle *rhandle;
23862830 struct hnae3_handle *handle;
23872831 int state;
2832
+ int ret;
23882833 int i;
23892834
23902835 if (!client)
23912836 return;
2392
- state = hclge_get_mac_phy_link(hdev);
2837
+
2838
+ if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2839
+ return;
2840
+
2841
+ ret = hclge_get_mac_phy_link(hdev, &state);
2842
+ if (ret) {
2843
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2844
+ return;
2845
+ }
2846
+
23932847 if (state != hdev->hw.mac.link) {
23942848 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
23952849 handle = &hdev->vport[i].nic;
23962850 client->ops->link_status_change(handle, state);
2851
+ hclge_config_mac_tnl_int(hdev, state);
2852
+ rhandle = &hdev->vport[i].roce;
2853
+ if (rclient && rclient->ops->link_status_change)
2854
+ rclient->ops->link_status_change(rhandle,
2855
+ state);
23972856 }
23982857 hdev->hw.mac.link = state;
23992858 }
2859
+
2860
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
24002861 }
24012862
2402
-static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2863
+static void hclge_update_port_capability(struct hclge_mac *mac)
24032864 {
2404
- struct hclge_mac mac = hdev->hw.mac;
2405
- u8 duplex;
2406
- int speed;
2865
+ /* update fec ability by speed */
2866
+ hclge_convert_setting_fec(mac);
2867
+
2868
+ /* firmware can not identify back plane type, the media type
2869
+ * read from configuration can help deal it
2870
+ */
2871
+ if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2872
+ mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2873
+ mac->module_type = HNAE3_MODULE_TYPE_KR;
2874
+ else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2875
+ mac->module_type = HNAE3_MODULE_TYPE_TP;
2876
+
2877
+ if (mac->support_autoneg) {
2878
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2879
+ linkmode_copy(mac->advertising, mac->supported);
2880
+ } else {
2881
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2882
+ mac->supported);
2883
+ linkmode_zero(mac->advertising);
2884
+ }
2885
+}
2886
+
2887
+static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2888
+{
2889
+ struct hclge_sfp_info_cmd *resp;
2890
+ struct hclge_desc desc;
24072891 int ret;
24082892
2409
- /* get the speed and duplex as autoneg'result from mac cmd when phy
2410
- * doesn't exit.
2411
- */
2412
- if (mac.phydev || !mac.autoneg)
2413
- return 0;
2414
-
2415
- ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2416
- if (ret) {
2417
- dev_err(&hdev->pdev->dev,
2418
- "mac autoneg/speed/duplex query failed %d\n", ret);
2893
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2894
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
2895
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2896
+ if (ret == -EOPNOTSUPP) {
2897
+ dev_warn(&hdev->pdev->dev,
2898
+ "IMP do not support get SFP speed %d\n", ret);
2899
+ return ret;
2900
+ } else if (ret) {
2901
+ dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
24192902 return ret;
24202903 }
24212904
2422
- if ((mac.speed != speed) || (mac.duplex != duplex)) {
2423
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2424
- if (ret) {
2425
- dev_err(&hdev->pdev->dev,
2426
- "mac speed/duplex config failed %d\n", ret);
2427
- return ret;
2428
- }
2905
+ *speed = le32_to_cpu(resp->speed);
2906
+
2907
+ return 0;
2908
+}
2909
+
2910
+static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2911
+{
2912
+ struct hclge_sfp_info_cmd *resp;
2913
+ struct hclge_desc desc;
2914
+ int ret;
2915
+
2916
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2917
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
2918
+
2919
+ resp->query_type = QUERY_ACTIVE_SPEED;
2920
+
2921
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2922
+ if (ret == -EOPNOTSUPP) {
2923
+ dev_warn(&hdev->pdev->dev,
2924
+ "IMP does not support get SFP info %d\n", ret);
2925
+ return ret;
2926
+ } else if (ret) {
2927
+ dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2928
+ return ret;
2929
+ }
2930
+
2931
+ /* In some case, mac speed get from IMP may be 0, it shouldn't be
2932
+ * set to mac->speed.
2933
+ */
2934
+ if (!le32_to_cpu(resp->speed))
2935
+ return 0;
2936
+
2937
+ mac->speed = le32_to_cpu(resp->speed);
2938
+ /* if resp->speed_ability is 0, it means it's an old version
2939
+ * firmware, do not update these params
2940
+ */
2941
+ if (resp->speed_ability) {
2942
+ mac->module_type = le32_to_cpu(resp->module_type);
2943
+ mac->speed_ability = le32_to_cpu(resp->speed_ability);
2944
+ mac->autoneg = resp->autoneg;
2945
+ mac->support_autoneg = resp->autoneg_ability;
2946
+ mac->speed_type = QUERY_ACTIVE_SPEED;
2947
+ if (!resp->active_fec)
2948
+ mac->fec_mode = 0;
2949
+ else
2950
+ mac->fec_mode = BIT(resp->active_fec);
2951
+ } else {
2952
+ mac->speed_type = QUERY_SFP_SPEED;
24292953 }
24302954
24312955 return 0;
24322956 }
24332957
2434
-static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2958
+static int hclge_update_port_info(struct hclge_dev *hdev)
24352959 {
2436
- struct hclge_vport *vport = hclge_get_vport(handle);
2437
- struct hclge_dev *hdev = vport->back;
2960
+ struct hclge_mac *mac = &hdev->hw.mac;
2961
+ int speed = HCLGE_MAC_SPEED_UNKNOWN;
2962
+ int ret;
24382963
2439
- return hclge_update_speed_duplex(hdev);
2964
+ /* get the port info from SFP cmd if not copper port */
2965
+ if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2966
+ return 0;
2967
+
2968
+ /* if IMP does not support get SFP/qSFP info, return directly */
2969
+ if (!hdev->support_sfp_query)
2970
+ return 0;
2971
+
2972
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2973
+ ret = hclge_get_sfp_info(hdev, mac);
2974
+ else
2975
+ ret = hclge_get_sfp_speed(hdev, &speed);
2976
+
2977
+ if (ret == -EOPNOTSUPP) {
2978
+ hdev->support_sfp_query = false;
2979
+ return ret;
2980
+ } else if (ret) {
2981
+ return ret;
2982
+ }
2983
+
2984
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2985
+ if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2986
+ hclge_update_port_capability(mac);
2987
+ return 0;
2988
+ }
2989
+ return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2990
+ HCLGE_MAC_FULL);
2991
+ } else {
2992
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2993
+ return 0; /* do nothing if no SFP */
2994
+
2995
+ /* must config full duplex for SFP */
2996
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2997
+ }
24402998 }
24412999
24423000 static int hclge_get_status(struct hnae3_handle *handle)
....@@ -2449,59 +3007,103 @@
24493007 return hdev->hw.mac.link;
24503008 }
24513009
2452
-static void hclge_service_timer(struct timer_list *t)
3010
+static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
24533011 {
2454
- struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
3012
+ if (!pci_num_vf(hdev->pdev)) {
3013
+ dev_err(&hdev->pdev->dev,
3014
+ "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3015
+ return NULL;
3016
+ }
24553017
2456
- mod_timer(&hdev->service_timer, jiffies + HZ);
2457
- hdev->hw_stats.stats_timer++;
2458
- hclge_task_schedule(hdev);
3018
+ if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3019
+ dev_err(&hdev->pdev->dev,
3020
+ "vf id(%d) is out of range(0 <= vfid < %d)\n",
3021
+ vf, pci_num_vf(hdev->pdev));
3022
+ return NULL;
3023
+ }
3024
+
3025
+ /* VF start from 1 in vport */
3026
+ vf += HCLGE_VF_VPORT_START_NUM;
3027
+ return &hdev->vport[vf];
24593028 }
24603029
2461
-static void hclge_service_complete(struct hclge_dev *hdev)
3030
+static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3031
+ struct ifla_vf_info *ivf)
24623032 {
2463
- WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
3033
+ struct hclge_vport *vport = hclge_get_vport(handle);
3034
+ struct hclge_dev *hdev = vport->back;
24643035
2465
- /* Flush memory before next watchdog */
2466
- smp_mb__before_atomic();
2467
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3036
+ vport = hclge_get_vf_vport(hdev, vf);
3037
+ if (!vport)
3038
+ return -EINVAL;
3039
+
3040
+ ivf->vf = vf;
3041
+ ivf->linkstate = vport->vf_info.link_state;
3042
+ ivf->spoofchk = vport->vf_info.spoofchk;
3043
+ ivf->trusted = vport->vf_info.trusted;
3044
+ ivf->min_tx_rate = 0;
3045
+ ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3046
+ ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3047
+ ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3048
+ ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3049
+ ether_addr_copy(ivf->mac, vport->vf_info.mac);
3050
+
3051
+ return 0;
3052
+}
3053
+
3054
+static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3055
+ int link_state)
3056
+{
3057
+ struct hclge_vport *vport = hclge_get_vport(handle);
3058
+ struct hclge_dev *hdev = vport->back;
3059
+
3060
+ vport = hclge_get_vf_vport(hdev, vf);
3061
+ if (!vport)
3062
+ return -EINVAL;
3063
+
3064
+ vport->vf_info.link_state = link_state;
3065
+
3066
+ return 0;
24683067 }
24693068
24703069 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
24713070 {
2472
- u32 rst_src_reg;
2473
- u32 cmdq_src_reg;
3071
+ u32 cmdq_src_reg, msix_src_reg;
24743072
24753073 /* fetch the events from their corresponding regs */
2476
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
24773074 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3075
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
24783076
24793077 /* Assumption: If by any chance reset and mailbox events are reported
24803078 * together then we will only process reset event in this go and will
24813079 * defer the processing of the mailbox events. Since, we would have not
24823080 * cleared RX CMDQ event this time we would receive again another
24833081 * interrupt from H/W just for the mailbox.
3082
+ *
3083
+ * check for vector0 reset event sources
24843084 */
3085
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3086
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3087
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3088
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3089
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3090
+ hdev->rst_stats.imp_rst_cnt++;
3091
+ return HCLGE_VECTOR0_EVENT_RST;
3092
+ }
24853093
2486
- /* check for vector0 reset event sources */
2487
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
3094
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3095
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
24883096 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
24893097 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
24903098 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3099
+ hdev->rst_stats.global_rst_cnt++;
24913100 return HCLGE_VECTOR0_EVENT_RST;
24923101 }
24933102
2494
- if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2495
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2496
- set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2497
- *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2498
- return HCLGE_VECTOR0_EVENT_RST;
2499
- }
2500
-
2501
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2502
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2503
- *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2504
- return HCLGE_VECTOR0_EVENT_RST;
3103
+ /* check for vector0 msix event source */
3104
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3105
+ *clearval = msix_src_reg;
3106
+ return HCLGE_VECTOR0_EVENT_ERR;
25053107 }
25063108
25073109 /* check for vector0 mailbox(=CMDQ RX) event source */
....@@ -2511,18 +3113,31 @@
25113113 return HCLGE_VECTOR0_EVENT_MBX;
25123114 }
25133115
3116
+ /* print other vector0 event source */
3117
+ dev_info(&hdev->pdev->dev,
3118
+ "CMDQ INT status:0x%x, other INT status:0x%x\n",
3119
+ cmdq_src_reg, msix_src_reg);
3120
+ *clearval = msix_src_reg;
3121
+
25143122 return HCLGE_VECTOR0_EVENT_OTHER;
25153123 }
25163124
25173125 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
25183126 u32 regclr)
25193127 {
3128
+#define HCLGE_IMP_RESET_DELAY 5
3129
+
25203130 switch (event_type) {
25213131 case HCLGE_VECTOR0_EVENT_RST:
3132
+ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
3133
+ mdelay(HCLGE_IMP_RESET_DELAY);
3134
+
25223135 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
25233136 break;
25243137 case HCLGE_VECTOR0_EVENT_MBX:
25253138 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3139
+ break;
3140
+ default:
25263141 break;
25273142 }
25283143 }
....@@ -2544,14 +3159,27 @@
25443159 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
25453160 {
25463161 struct hclge_dev *hdev = data;
3162
+ u32 clearval = 0;
25473163 u32 event_cause;
2548
- u32 clearval;
25493164
25503165 hclge_enable_vector(&hdev->misc_vector, false);
25513166 event_cause = hclge_check_event_cause(hdev, &clearval);
25523167
25533168 /* vector 0 interrupt is shared with reset and mailbox source events.*/
25543169 switch (event_cause) {
3170
+ case HCLGE_VECTOR0_EVENT_ERR:
3171
+ /* we do not know what type of reset is required now. This could
3172
+ * only be decided after we fetch the type of errors which
3173
+ * caused this event. Therefore, we will do below for now:
3174
+ * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3175
+ * have defered type of reset to be used.
3176
+ * 2. Schedule the reset serivce task.
3177
+ * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3178
+ * will fetch the correct type of reset. This would be done
3179
+ * by first decoding the types of errors.
3180
+ */
3181
+ set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3182
+ fallthrough;
25553183 case HCLGE_VECTOR0_EVENT_RST:
25563184 hclge_reset_task_schedule(hdev);
25573185 break;
....@@ -2573,9 +3201,15 @@
25733201 break;
25743202 }
25753203
2576
- /* clear the source of interrupt if it is not cause by reset */
2577
- if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2578
- hclge_clear_event_cause(hdev, event_cause, clearval);
3204
+ hclge_clear_event_cause(hdev, event_cause, clearval);
3205
+
3206
+ /* Enable interrupt if it is not cause by reset. And when
3207
+ * clearval equal to 0, it means interrupt status may be
3208
+ * cleared by hardware before driver reads status register.
3209
+ * For this case, vector0 interrupt also should be enabled.
3210
+ */
3211
+ if (!clearval ||
3212
+ event_cause == HCLGE_VECTOR0_EVENT_MBX) {
25793213 hclge_enable_vector(&hdev->misc_vector, true);
25803214 }
25813215
....@@ -2608,6 +3242,36 @@
26083242 hdev->num_msi_used += 1;
26093243 }
26103244
3245
+static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3246
+ const cpumask_t *mask)
3247
+{
3248
+ struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3249
+ affinity_notify);
3250
+
3251
+ cpumask_copy(&hdev->affinity_mask, mask);
3252
+}
3253
+
3254
+static void hclge_irq_affinity_release(struct kref *ref)
3255
+{
3256
+}
3257
+
3258
+static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3259
+{
3260
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3261
+ &hdev->affinity_mask);
3262
+
3263
+ hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3264
+ hdev->affinity_notify.release = hclge_irq_affinity_release;
3265
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3266
+ &hdev->affinity_notify);
3267
+}
3268
+
3269
+static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3270
+{
3271
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3272
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3273
+}
3274
+
26113275 static int hclge_misc_irq_init(struct hclge_dev *hdev)
26123276 {
26133277 int ret;
....@@ -2615,8 +3279,10 @@
26153279 hclge_get_misc_vector(hdev);
26163280
26173281 /* this would be explicitly freed in the end */
3282
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3283
+ HCLGE_NAME, pci_name(hdev->pdev));
26183284 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2619
- 0, "hclge_misc", hdev);
3285
+ 0, hdev->misc_vector.name, hdev);
26203286 if (ret) {
26213287 hclge_free_vector(hdev, 0);
26223288 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
....@@ -2632,11 +3298,14 @@
26323298 hclge_free_vector(hdev, 0);
26333299 }
26343300
2635
-static int hclge_notify_client(struct hclge_dev *hdev,
2636
- enum hnae3_reset_notify_type type)
3301
+int hclge_notify_client(struct hclge_dev *hdev,
3302
+ enum hnae3_reset_notify_type type)
26373303 {
26383304 struct hnae3_client *client = hdev->nic_client;
26393305 u16 i;
3306
+
3307
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3308
+ return 0;
26403309
26413310 if (!client->ops->reset_notify)
26423311 return -EOPNOTSUPP;
....@@ -2646,28 +3315,60 @@
26463315 int ret;
26473316
26483317 ret = client->ops->reset_notify(handle, type);
2649
- if (ret)
3318
+ if (ret) {
3319
+ dev_err(&hdev->pdev->dev,
3320
+ "notify nic client failed %d(%d)\n", type, ret);
26503321 return ret;
3322
+ }
26513323 }
26523324
26533325 return 0;
26543326 }
26553327
3328
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
3329
+ enum hnae3_reset_notify_type type)
3330
+{
3331
+ struct hnae3_client *client = hdev->roce_client;
3332
+ int ret;
3333
+ u16 i;
3334
+
3335
+ if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3336
+ return 0;
3337
+
3338
+ if (!client->ops->reset_notify)
3339
+ return -EOPNOTSUPP;
3340
+
3341
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3342
+ struct hnae3_handle *handle = &hdev->vport[i].roce;
3343
+
3344
+ ret = client->ops->reset_notify(handle, type);
3345
+ if (ret) {
3346
+ dev_err(&hdev->pdev->dev,
3347
+ "notify roce client failed %d(%d)",
3348
+ type, ret);
3349
+ return ret;
3350
+ }
3351
+ }
3352
+
3353
+ return ret;
3354
+}
3355
+
26563356 static int hclge_reset_wait(struct hclge_dev *hdev)
26573357 {
26583358 #define HCLGE_RESET_WATI_MS 100
2659
-#define HCLGE_RESET_WAIT_CNT 5
3359
+#define HCLGE_RESET_WAIT_CNT 350
3360
+
26603361 u32 val, reg, reg_bit;
26613362 u32 cnt = 0;
26623363
26633364 switch (hdev->reset_type) {
3365
+ case HNAE3_IMP_RESET:
3366
+ reg = HCLGE_GLOBAL_RESET_REG;
3367
+ reg_bit = HCLGE_IMP_RESET_BIT;
3368
+ break;
26643369 case HNAE3_GLOBAL_RESET:
26653370 reg = HCLGE_GLOBAL_RESET_REG;
26663371 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2667
- break;
2668
- case HNAE3_CORE_RESET:
2669
- reg = HCLGE_GLOBAL_RESET_REG;
2670
- reg_bit = HCLGE_CORE_RESET_BIT;
26713372 break;
26723373 case HNAE3_FUNC_RESET:
26733374 reg = HCLGE_FUN_RST_ING;
....@@ -2696,6 +3397,134 @@
26963397 return 0;
26973398 }
26983399
3400
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3401
+{
3402
+ struct hclge_vf_rst_cmd *req;
3403
+ struct hclge_desc desc;
3404
+
3405
+ req = (struct hclge_vf_rst_cmd *)desc.data;
3406
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3407
+ req->dest_vfid = func_id;
3408
+
3409
+ if (reset)
3410
+ req->vf_rst = 0x1;
3411
+
3412
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
3413
+}
3414
+
3415
+static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3416
+{
3417
+ int i;
3418
+
3419
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3420
+ struct hclge_vport *vport = &hdev->vport[i];
3421
+ int ret;
3422
+
3423
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
3424
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3425
+ if (ret) {
3426
+ dev_err(&hdev->pdev->dev,
3427
+ "set vf(%u) rst failed %d!\n",
3428
+ vport->vport_id, ret);
3429
+ return ret;
3430
+ }
3431
+
3432
+ if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3433
+ continue;
3434
+
3435
+ /* Inform VF to process the reset.
3436
+ * hclge_inform_reset_assert_to_vf may fail if VF
3437
+ * driver is not loaded.
3438
+ */
3439
+ ret = hclge_inform_reset_assert_to_vf(vport);
3440
+ if (ret)
3441
+ dev_warn(&hdev->pdev->dev,
3442
+ "inform reset to vf(%u) failed %d!\n",
3443
+ vport->vport_id, ret);
3444
+ }
3445
+
3446
+ return 0;
3447
+}
3448
+
3449
+static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3450
+{
3451
+ if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3452
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3453
+ test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3454
+ return;
3455
+
3456
+ hclge_mbx_handler(hdev);
3457
+
3458
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3459
+}
3460
+
3461
+static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3462
+{
3463
+ struct hclge_pf_rst_sync_cmd *req;
3464
+ struct hclge_desc desc;
3465
+ int cnt = 0;
3466
+ int ret;
3467
+
3468
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3469
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3470
+
3471
+ do {
3472
+ /* vf need to down netdev by mbx during PF or FLR reset */
3473
+ hclge_mailbox_service_task(hdev);
3474
+
3475
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3476
+ /* for compatible with old firmware, wait
3477
+ * 100 ms for VF to stop IO
3478
+ */
3479
+ if (ret == -EOPNOTSUPP) {
3480
+ msleep(HCLGE_RESET_SYNC_TIME);
3481
+ return;
3482
+ } else if (ret) {
3483
+ dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3484
+ ret);
3485
+ return;
3486
+ } else if (req->all_vf_ready) {
3487
+ return;
3488
+ }
3489
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
3490
+ hclge_cmd_reuse_desc(&desc, true);
3491
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3492
+
3493
+ dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3494
+}
3495
+
3496
+void hclge_report_hw_error(struct hclge_dev *hdev,
3497
+ enum hnae3_hw_error_type type)
3498
+{
3499
+ struct hnae3_client *client = hdev->nic_client;
3500
+ u16 i;
3501
+
3502
+ if (!client || !client->ops->process_hw_error ||
3503
+ !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3504
+ return;
3505
+
3506
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3507
+ client->ops->process_hw_error(&hdev->vport[i].nic, type);
3508
+}
3509
+
3510
+static void hclge_handle_imp_error(struct hclge_dev *hdev)
3511
+{
3512
+ u32 reg_val;
3513
+
3514
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3515
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3516
+ hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3517
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3518
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3519
+ }
3520
+
3521
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3522
+ hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3523
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3524
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3525
+ }
3526
+}
3527
+
26993528 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
27003529 {
27013530 struct hclge_desc desc;
....@@ -2716,56 +3545,87 @@
27163545
27173546 static void hclge_do_reset(struct hclge_dev *hdev)
27183547 {
3548
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
27193549 struct pci_dev *pdev = hdev->pdev;
27203550 u32 val;
27213551
3552
+ if (hclge_get_hw_reset_stat(handle)) {
3553
+ dev_info(&pdev->dev, "hardware reset not finish\n");
3554
+ dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3555
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3556
+ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3557
+ return;
3558
+ }
3559
+
27223560 switch (hdev->reset_type) {
27233561 case HNAE3_GLOBAL_RESET:
3562
+ dev_info(&pdev->dev, "global reset requested\n");
27243563 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
27253564 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
27263565 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2727
- dev_info(&pdev->dev, "Global Reset requested\n");
2728
- break;
2729
- case HNAE3_CORE_RESET:
2730
- val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2731
- hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2732
- hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2733
- dev_info(&pdev->dev, "Core Reset requested\n");
27343566 break;
27353567 case HNAE3_FUNC_RESET:
2736
- dev_info(&pdev->dev, "PF Reset requested\n");
2737
- hclge_func_reset_cmd(hdev, 0);
3568
+ dev_info(&pdev->dev, "PF reset requested\n");
27383569 /* schedule again to check later */
27393570 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
27403571 hclge_reset_task_schedule(hdev);
27413572 break;
27423573 default:
27433574 dev_warn(&pdev->dev,
2744
- "Unsupported reset type: %d\n", hdev->reset_type);
3575
+ "unsupported reset type: %d\n", hdev->reset_type);
27453576 break;
27463577 }
27473578 }
27483579
2749
-static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3580
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
27503581 unsigned long *addr)
27513582 {
27523583 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3584
+ struct hclge_dev *hdev = ae_dev->priv;
3585
+
3586
+ /* first, resolve any unknown reset type to the known type(s) */
3587
+ if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3588
+ u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3589
+ HCLGE_MISC_VECTOR_INT_STS);
3590
+ /* we will intentionally ignore any errors from this function
3591
+ * as we will end up in *some* reset request in any case
3592
+ */
3593
+ if (hclge_handle_hw_msix_error(hdev, addr))
3594
+ dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3595
+ msix_sts_reg);
3596
+
3597
+ clear_bit(HNAE3_UNKNOWN_RESET, addr);
3598
+ /* We defered the clearing of the error event which caused
3599
+ * interrupt since it was not posssible to do that in
3600
+ * interrupt context (and this is the reason we introduced
3601
+ * new UNKNOWN reset type). Now, the errors have been
3602
+ * handled and cleared in hardware we can safely enable
3603
+ * interrupts. This is an exception to the norm.
3604
+ */
3605
+ hclge_enable_vector(&hdev->misc_vector, true);
3606
+ }
27533607
27543608 /* return the highest priority reset level amongst all */
2755
- if (test_bit(HNAE3_GLOBAL_RESET, addr))
2756
- rst_level = HNAE3_GLOBAL_RESET;
2757
- else if (test_bit(HNAE3_CORE_RESET, addr))
2758
- rst_level = HNAE3_CORE_RESET;
2759
- else if (test_bit(HNAE3_IMP_RESET, addr))
3609
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
27603610 rst_level = HNAE3_IMP_RESET;
2761
- else if (test_bit(HNAE3_FUNC_RESET, addr))
3611
+ clear_bit(HNAE3_IMP_RESET, addr);
3612
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
3613
+ clear_bit(HNAE3_FUNC_RESET, addr);
3614
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3615
+ rst_level = HNAE3_GLOBAL_RESET;
3616
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
3617
+ clear_bit(HNAE3_FUNC_RESET, addr);
3618
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
27623619 rst_level = HNAE3_FUNC_RESET;
3620
+ clear_bit(HNAE3_FUNC_RESET, addr);
3621
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3622
+ rst_level = HNAE3_FLR_RESET;
3623
+ clear_bit(HNAE3_FLR_RESET, addr);
3624
+ }
27633625
2764
- /* now, clear all other resets */
2765
- clear_bit(HNAE3_GLOBAL_RESET, addr);
2766
- clear_bit(HNAE3_CORE_RESET, addr);
2767
- clear_bit(HNAE3_IMP_RESET, addr);
2768
- clear_bit(HNAE3_FUNC_RESET, addr);
3626
+ if (hdev->reset_type != HNAE3_NONE_RESET &&
3627
+ rst_level < hdev->reset_type)
3628
+ return HNAE3_NONE_RESET;
27693629
27703630 return rst_level;
27713631 }
....@@ -2781,9 +3641,6 @@
27813641 case HNAE3_GLOBAL_RESET:
27823642 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
27833643 break;
2784
- case HNAE3_CORE_RESET:
2785
- clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2786
- break;
27873644 default:
27883645 break;
27893646 }
....@@ -2791,45 +3648,302 @@
27913648 if (!clearval)
27923649 return;
27933650
2794
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3651
+ /* For revision 0x20, the reset interrupt source
3652
+ * can only be cleared after hardware reset done
3653
+ */
3654
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3655
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3656
+ clearval);
3657
+
27953658 hclge_enable_vector(&hdev->misc_vector, true);
3659
+}
3660
+
3661
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3662
+{
3663
+ u32 reg_val;
3664
+
3665
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3666
+ if (enable)
3667
+ reg_val |= HCLGE_NIC_SW_RST_RDY;
3668
+ else
3669
+ reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3670
+
3671
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3672
+}
3673
+
3674
+static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3675
+{
3676
+ int ret;
3677
+
3678
+ ret = hclge_set_all_vf_rst(hdev, true);
3679
+ if (ret)
3680
+ return ret;
3681
+
3682
+ hclge_func_reset_sync_vf(hdev);
3683
+
3684
+ return 0;
3685
+}
3686
+
3687
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3688
+{
3689
+ u32 reg_val;
3690
+ int ret = 0;
3691
+
3692
+ switch (hdev->reset_type) {
3693
+ case HNAE3_FUNC_RESET:
3694
+ ret = hclge_func_reset_notify_vf(hdev);
3695
+ if (ret)
3696
+ return ret;
3697
+
3698
+ ret = hclge_func_reset_cmd(hdev, 0);
3699
+ if (ret) {
3700
+ dev_err(&hdev->pdev->dev,
3701
+ "asserting function reset fail %d!\n", ret);
3702
+ return ret;
3703
+ }
3704
+
3705
+ /* After performaning pf reset, it is not necessary to do the
3706
+ * mailbox handling or send any command to firmware, because
3707
+ * any mailbox handling or command to firmware is only valid
3708
+ * after hclge_cmd_init is called.
3709
+ */
3710
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3711
+ hdev->rst_stats.pf_rst_cnt++;
3712
+ break;
3713
+ case HNAE3_FLR_RESET:
3714
+ ret = hclge_func_reset_notify_vf(hdev);
3715
+ if (ret)
3716
+ return ret;
3717
+ break;
3718
+ case HNAE3_IMP_RESET:
3719
+ hclge_handle_imp_error(hdev);
3720
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3721
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3722
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3723
+ break;
3724
+ default:
3725
+ break;
3726
+ }
3727
+
3728
+ /* inform hardware that preparatory work is done */
3729
+ msleep(HCLGE_RESET_SYNC_TIME);
3730
+ hclge_reset_handshake(hdev, true);
3731
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3732
+
3733
+ return ret;
3734
+}
3735
+
3736
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3737
+{
3738
+#define MAX_RESET_FAIL_CNT 5
3739
+
3740
+ if (hdev->reset_pending) {
3741
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3742
+ hdev->reset_pending);
3743
+ return true;
3744
+ } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3745
+ HCLGE_RESET_INT_M) {
3746
+ dev_info(&hdev->pdev->dev,
3747
+ "reset failed because new reset interrupt\n");
3748
+ hclge_clear_reset_cause(hdev);
3749
+ return false;
3750
+ } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3751
+ hdev->rst_stats.reset_fail_cnt++;
3752
+ set_bit(hdev->reset_type, &hdev->reset_pending);
3753
+ dev_info(&hdev->pdev->dev,
3754
+ "re-schedule reset task(%u)\n",
3755
+ hdev->rst_stats.reset_fail_cnt);
3756
+ return true;
3757
+ }
3758
+
3759
+ hclge_clear_reset_cause(hdev);
3760
+
3761
+ /* recover the handshake status when reset fail */
3762
+ hclge_reset_handshake(hdev, true);
3763
+
3764
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
3765
+
3766
+ hclge_dbg_dump_rst_info(hdev);
3767
+
3768
+ set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3769
+
3770
+ return false;
3771
+}
3772
+
3773
+static int hclge_set_rst_done(struct hclge_dev *hdev)
3774
+{
3775
+ struct hclge_pf_rst_done_cmd *req;
3776
+ struct hclge_desc desc;
3777
+ int ret;
3778
+
3779
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
3780
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3781
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3782
+
3783
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3784
+ /* To be compatible with the old firmware, which does not support
3785
+ * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3786
+ * return success
3787
+ */
3788
+ if (ret == -EOPNOTSUPP) {
3789
+ dev_warn(&hdev->pdev->dev,
3790
+ "current firmware does not support command(0x%x)!\n",
3791
+ HCLGE_OPC_PF_RST_DONE);
3792
+ return 0;
3793
+ } else if (ret) {
3794
+ dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3795
+ ret);
3796
+ }
3797
+
3798
+ return ret;
3799
+}
3800
+
3801
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3802
+{
3803
+ int ret = 0;
3804
+
3805
+ switch (hdev->reset_type) {
3806
+ case HNAE3_FUNC_RESET:
3807
+ case HNAE3_FLR_RESET:
3808
+ ret = hclge_set_all_vf_rst(hdev, false);
3809
+ break;
3810
+ case HNAE3_GLOBAL_RESET:
3811
+ case HNAE3_IMP_RESET:
3812
+ ret = hclge_set_rst_done(hdev);
3813
+ break;
3814
+ default:
3815
+ break;
3816
+ }
3817
+
3818
+ /* clear up the handshake status after re-initialize done */
3819
+ hclge_reset_handshake(hdev, false);
3820
+
3821
+ return ret;
3822
+}
3823
+
3824
+static int hclge_reset_stack(struct hclge_dev *hdev)
3825
+{
3826
+ int ret;
3827
+
3828
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3829
+ if (ret)
3830
+ return ret;
3831
+
3832
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
3833
+ if (ret)
3834
+ return ret;
3835
+
3836
+ return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3837
+}
3838
+
3839
+static int hclge_reset_prepare(struct hclge_dev *hdev)
3840
+{
3841
+ int ret;
3842
+
3843
+ hdev->rst_stats.reset_cnt++;
3844
+ /* perform reset of the stack & ae device for a client */
3845
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3846
+ if (ret)
3847
+ return ret;
3848
+
3849
+ rtnl_lock();
3850
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3851
+ rtnl_unlock();
3852
+ if (ret)
3853
+ return ret;
3854
+
3855
+ return hclge_reset_prepare_wait(hdev);
3856
+}
3857
+
3858
+static int hclge_reset_rebuild(struct hclge_dev *hdev)
3859
+{
3860
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3861
+ enum hnae3_reset_type reset_level;
3862
+ int ret;
3863
+
3864
+ hdev->rst_stats.hw_reset_done_cnt++;
3865
+
3866
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3867
+ if (ret)
3868
+ return ret;
3869
+
3870
+ rtnl_lock();
3871
+ ret = hclge_reset_stack(hdev);
3872
+ rtnl_unlock();
3873
+ if (ret)
3874
+ return ret;
3875
+
3876
+ hclge_clear_reset_cause(hdev);
3877
+
3878
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3879
+ /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3880
+ * times
3881
+ */
3882
+ if (ret &&
3883
+ hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3884
+ return ret;
3885
+
3886
+ ret = hclge_reset_prepare_up(hdev);
3887
+ if (ret)
3888
+ return ret;
3889
+
3890
+ rtnl_lock();
3891
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3892
+ rtnl_unlock();
3893
+ if (ret)
3894
+ return ret;
3895
+
3896
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3897
+ if (ret)
3898
+ return ret;
3899
+
3900
+ hdev->last_reset_time = jiffies;
3901
+ hdev->rst_stats.reset_fail_cnt = 0;
3902
+ hdev->rst_stats.reset_done_cnt++;
3903
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3904
+
3905
+ /* if default_reset_request has a higher level reset request,
3906
+ * it should be handled as soon as possible. since some errors
3907
+ * need this kind of reset to fix.
3908
+ */
3909
+ reset_level = hclge_get_reset_level(ae_dev,
3910
+ &hdev->default_reset_request);
3911
+ if (reset_level != HNAE3_NONE_RESET)
3912
+ set_bit(reset_level, &hdev->reset_request);
3913
+
3914
+ return 0;
27963915 }
27973916
27983917 static void hclge_reset(struct hclge_dev *hdev)
27993918 {
2800
- struct hnae3_handle *handle;
3919
+ if (hclge_reset_prepare(hdev))
3920
+ goto err_reset;
28013921
2802
- /* perform reset of the stack & ae device for a client */
2803
- handle = &hdev->vport[0].nic;
2804
- rtnl_lock();
2805
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2806
- rtnl_unlock();
3922
+ if (hclge_reset_wait(hdev))
3923
+ goto err_reset;
28073924
2808
- if (!hclge_reset_wait(hdev)) {
2809
- rtnl_lock();
2810
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2811
- hclge_reset_ae_dev(hdev->ae_dev);
2812
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3925
+ if (hclge_reset_rebuild(hdev))
3926
+ goto err_reset;
28133927
2814
- hclge_clear_reset_cause(hdev);
2815
- } else {
2816
- rtnl_lock();
2817
- /* schedule again to check pending resets later */
2818
- set_bit(hdev->reset_type, &hdev->reset_pending);
3928
+ return;
3929
+
3930
+err_reset:
3931
+ if (hclge_reset_err_handle(hdev))
28193932 hclge_reset_task_schedule(hdev);
2820
- }
2821
-
2822
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2823
- handle->last_reset_time = jiffies;
2824
- rtnl_unlock();
28253933 }
28263934
2827
-static void hclge_reset_event(struct hnae3_handle *handle)
3935
+static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
28283936 {
2829
- struct hclge_vport *vport = hclge_get_vport(handle);
2830
- struct hclge_dev *hdev = vport->back;
3937
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3938
+ struct hclge_dev *hdev = ae_dev->priv;
28313939
2832
- /* check if this is a new reset request and we are not here just because
3940
+ /* We might end up getting called broadly because of 2 below cases:
3941
+ * 1. Recoverable error was conveyed through APEI and only way to bring
3942
+ * normalcy is to reset.
3943
+ * 2. A new reset request from the stack due to timeout
3944
+ *
3945
+ * For the first case,error event might not have ae handle available.
3946
+ * check if this is a new reset request and we are not here just because
28333947 * last reset attempt did not succeed and watchdog hit us again. We will
28343948 * know this if last reset request did not occur very recently (watchdog
28353949 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
....@@ -2838,24 +3952,59 @@
28383952 * want to make sure we throttle the reset request. Therefore, we will
28393953 * not allow it again before 3*HZ times.
28403954 */
2841
- if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
2842
- return;
2843
- else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
2844
- handle->reset_level = HNAE3_FUNC_RESET;
3955
+ if (!handle)
3956
+ handle = &hdev->vport[0].nic;
28453957
2846
- dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2847
- handle->reset_level);
3958
+ if (time_before(jiffies, (hdev->last_reset_time +
3959
+ HCLGE_RESET_INTERVAL))) {
3960
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3961
+ return;
3962
+ } else if (hdev->default_reset_request) {
3963
+ hdev->reset_level =
3964
+ hclge_get_reset_level(ae_dev,
3965
+ &hdev->default_reset_request);
3966
+ } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3967
+ hdev->reset_level = HNAE3_FUNC_RESET;
3968
+ }
3969
+
3970
+ dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3971
+ hdev->reset_level);
28483972
28493973 /* request reset & schedule reset task */
2850
- set_bit(handle->reset_level, &hdev->reset_request);
3974
+ set_bit(hdev->reset_level, &hdev->reset_request);
28513975 hclge_reset_task_schedule(hdev);
28523976
2853
- if (handle->reset_level < HNAE3_GLOBAL_RESET)
2854
- handle->reset_level++;
3977
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3978
+ hdev->reset_level++;
3979
+}
3980
+
3981
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3982
+ enum hnae3_reset_type rst_type)
3983
+{
3984
+ struct hclge_dev *hdev = ae_dev->priv;
3985
+
3986
+ set_bit(rst_type, &hdev->default_reset_request);
3987
+}
3988
+
3989
+static void hclge_reset_timer(struct timer_list *t)
3990
+{
3991
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3992
+
3993
+ /* if default_reset_request has no value, it means that this reset
3994
+ * request has already be handled, so just return here
3995
+ */
3996
+ if (!hdev->default_reset_request)
3997
+ return;
3998
+
3999
+ dev_info(&hdev->pdev->dev,
4000
+ "triggering reset in reset timer\n");
4001
+ hclge_reset_event(hdev->pdev, NULL);
28554002 }
28564003
28574004 static void hclge_reset_subtask(struct hclge_dev *hdev)
28584005 {
4006
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4007
+
28594008 /* check if there is any ongoing reset in the hardware. This status can
28604009 * be checked from reset_pending. If there is then, we need to wait for
28614010 * hardware to complete reset.
....@@ -2865,61 +4014,111 @@
28654014 * b. else, we can come back later to check this status so re-sched
28664015 * now.
28674016 */
2868
- hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
4017
+ hdev->last_reset_time = jiffies;
4018
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
28694019 if (hdev->reset_type != HNAE3_NONE_RESET)
28704020 hclge_reset(hdev);
28714021
28724022 /* check if we got any *new* reset requests to be honored */
2873
- hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
4023
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
28744024 if (hdev->reset_type != HNAE3_NONE_RESET)
28754025 hclge_do_reset(hdev);
28764026
28774027 hdev->reset_type = HNAE3_NONE_RESET;
28784028 }
28794029
2880
-static void hclge_reset_service_task(struct work_struct *work)
4030
+static void hclge_reset_service_task(struct hclge_dev *hdev)
28814031 {
2882
- struct hclge_dev *hdev =
2883
- container_of(work, struct hclge_dev, rst_service_task);
2884
-
2885
- if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4032
+ if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
28864033 return;
28874034
2888
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
4035
+ down(&hdev->reset_sem);
4036
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
28894037
28904038 hclge_reset_subtask(hdev);
28914039
28924040 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4041
+ up(&hdev->reset_sem);
28934042 }
28944043
2895
-static void hclge_mailbox_service_task(struct work_struct *work)
4044
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
28964045 {
2897
- struct hclge_dev *hdev =
2898
- container_of(work, struct hclge_dev, mbx_service_task);
4046
+ int i;
28994047
2900
- if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
4048
+ /* start from vport 1 for PF is always alive */
4049
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
4050
+ struct hclge_vport *vport = &hdev->vport[i];
4051
+
4052
+ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4053
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4054
+
4055
+ /* If vf is not alive, set to default value */
4056
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4057
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4058
+ }
4059
+}
4060
+
4061
+static void hclge_periodic_service_task(struct hclge_dev *hdev)
4062
+{
4063
+ unsigned long delta = round_jiffies_relative(HZ);
4064
+
4065
+ if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
29014066 return;
29024067
2903
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
4068
+ /* Always handle the link updating to make sure link state is
4069
+ * updated when it is triggered by mbx.
4070
+ */
4071
+ hclge_update_link_status(hdev);
4072
+ hclge_sync_mac_table(hdev);
4073
+ hclge_sync_promisc_mode(hdev);
29044074
2905
- hclge_mbx_handler(hdev);
4075
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4076
+ delta = jiffies - hdev->last_serv_processed;
29064077
2907
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
4078
+ if (delta < round_jiffies_relative(HZ)) {
4079
+ delta = round_jiffies_relative(HZ) - delta;
4080
+ goto out;
4081
+ }
4082
+ }
4083
+
4084
+ hdev->serv_processed_cnt++;
4085
+ hclge_update_vport_alive(hdev);
4086
+
4087
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4088
+ hdev->last_serv_processed = jiffies;
4089
+ goto out;
4090
+ }
4091
+
4092
+ if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4093
+ hclge_update_stats_for_all(hdev);
4094
+
4095
+ hclge_update_port_info(hdev);
4096
+ hclge_sync_vlan_filter(hdev);
4097
+
4098
+ if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4099
+ hclge_rfs_filter_expire(hdev);
4100
+
4101
+ hdev->last_serv_processed = jiffies;
4102
+
4103
+out:
4104
+ hclge_task_schedule(hdev, delta);
29084105 }
29094106
29104107 static void hclge_service_task(struct work_struct *work)
29114108 {
29124109 struct hclge_dev *hdev =
2913
- container_of(work, struct hclge_dev, service_task);
4110
+ container_of(work, struct hclge_dev, service_task.work);
29144111
2915
- if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2916
- hclge_update_stats_for_all(hdev);
2917
- hdev->hw_stats.stats_timer = 0;
2918
- }
4112
+ hclge_reset_service_task(hdev);
4113
+ hclge_mailbox_service_task(hdev);
4114
+ hclge_periodic_service_task(hdev);
29194115
2920
- hclge_update_speed_duplex(hdev);
2921
- hclge_update_link_status(hdev);
2922
- hclge_service_complete(hdev);
4116
+ /* Handle reset and mbx again in case periodical task delays the
4117
+ * handling by calling hclge_task_schedule() in
4118
+ * hclge_periodic_service_task().
4119
+ */
4120
+ hclge_reset_service_task(hdev);
4121
+ hclge_mailbox_service_task(hdev);
29234122 }
29244123
29254124 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
....@@ -2942,6 +4141,7 @@
29424141 int alloc = 0;
29434142 int i, j;
29444143
4144
+ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
29454145 vector_num = min(hdev->num_msi_left, vector_num);
29464146
29474147 for (j = 0; j < vector_num; j++) {
....@@ -2989,7 +4189,7 @@
29894189 vector_id = hclge_get_vector_index(hdev, vector);
29904190 if (vector_id < 0) {
29914191 dev_err(&hdev->pdev->dev,
2992
- "Get vector index fail. vector_id =%d\n", vector_id);
4192
+ "Get vector index fail. vector = %d\n", vector);
29934193 return vector_id;
29944194 }
29954195
....@@ -3012,29 +4212,28 @@
30124212 const u8 hfunc, const u8 *key)
30134213 {
30144214 struct hclge_rss_config_cmd *req;
4215
+ unsigned int key_offset = 0;
30154216 struct hclge_desc desc;
3016
- int key_offset;
4217
+ int key_counts;
30174218 int key_size;
30184219 int ret;
30194220
4221
+ key_counts = HCLGE_RSS_KEY_SIZE;
30204222 req = (struct hclge_rss_config_cmd *)desc.data;
30214223
3022
- for (key_offset = 0; key_offset < 3; key_offset++) {
4224
+ while (key_counts) {
30234225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
30244226 false);
30254227
30264228 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
30274229 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
30284230
3029
- if (key_offset == 2)
3030
- key_size =
3031
- HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3032
- else
3033
- key_size = HCLGE_RSS_HASH_KEY_NUM;
3034
-
4231
+ key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
30354232 memcpy(req->hash_key,
30364233 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
30374234
4235
+ key_counts -= key_size;
4236
+ key_offset++;
30384237 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
30394238 if (ret) {
30404239 dev_err(&hdev->pdev->dev,
....@@ -3109,6 +4308,22 @@
31094308 return ret;
31104309 }
31114310
4311
+static void hclge_get_rss_type(struct hclge_vport *vport)
4312
+{
4313
+ if (vport->rss_tuple_sets.ipv4_tcp_en ||
4314
+ vport->rss_tuple_sets.ipv4_udp_en ||
4315
+ vport->rss_tuple_sets.ipv4_sctp_en ||
4316
+ vport->rss_tuple_sets.ipv6_tcp_en ||
4317
+ vport->rss_tuple_sets.ipv6_udp_en ||
4318
+ vport->rss_tuple_sets.ipv6_sctp_en)
4319
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4320
+ else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4321
+ vport->rss_tuple_sets.ipv6_fragment_en)
4322
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4323
+ else
4324
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4325
+}
4326
+
31124327 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
31134328 {
31144329 struct hclge_rss_input_tuple_cmd *req;
....@@ -3128,6 +4343,7 @@
31284343 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
31294344 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
31304345 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4346
+ hclge_get_rss_type(&hdev->vport[0]);
31314347 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
31324348 if (ret)
31334349 dev_err(&hdev->pdev->dev,
....@@ -3142,8 +4358,19 @@
31424358 int i;
31434359
31444360 /* Get hash algorithm */
3145
- if (hfunc)
3146
- *hfunc = vport->rss_algo;
4361
+ if (hfunc) {
4362
+ switch (vport->rss_algo) {
4363
+ case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4364
+ *hfunc = ETH_RSS_HASH_TOP;
4365
+ break;
4366
+ case HCLGE_RSS_HASH_ALGO_SIMPLE:
4367
+ *hfunc = ETH_RSS_HASH_XOR;
4368
+ break;
4369
+ default:
4370
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
4371
+ break;
4372
+ }
4373
+ }
31474374
31484375 /* Get the RSS Key required by the user */
31494376 if (key)
....@@ -3157,6 +4384,24 @@
31574384 return 0;
31584385 }
31594386
4387
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4388
+ u8 *hash_algo)
4389
+{
4390
+ switch (hfunc) {
4391
+ case ETH_RSS_HASH_TOP:
4392
+ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4393
+ return 0;
4394
+ case ETH_RSS_HASH_XOR:
4395
+ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4396
+ return 0;
4397
+ case ETH_RSS_HASH_NO_CHANGE:
4398
+ *hash_algo = vport->rss_algo;
4399
+ return 0;
4400
+ default:
4401
+ return -EINVAL;
4402
+ }
4403
+}
4404
+
31604405 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
31614406 const u8 *key, const u8 hfunc)
31624407 {
....@@ -3165,22 +4410,27 @@
31654410 u8 hash_algo;
31664411 int ret, i;
31674412
4413
+ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4414
+ if (ret) {
4415
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4416
+ return ret;
4417
+ }
4418
+
31684419 /* Set the RSS Hash Key if specififed by the user */
31694420 if (key) {
3170
-
3171
- if (hfunc == ETH_RSS_HASH_TOP ||
3172
- hfunc == ETH_RSS_HASH_NO_CHANGE)
3173
- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3174
- else
3175
- return -EINVAL;
31764421 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
31774422 if (ret)
31784423 return ret;
31794424
31804425 /* Update the shadow RSS key with user specified qids */
31814426 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3182
- vport->rss_algo = hash_algo;
4427
+ } else {
4428
+ ret = hclge_set_rss_algo_key(hdev, hash_algo,
4429
+ vport->rss_hash_key);
4430
+ if (ret)
4431
+ return ret;
31834432 }
4433
+ vport->rss_algo = hash_algo;
31844434
31854435 /* Update the shadow RSS table with user specified qids */
31864436 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
....@@ -3259,8 +4509,8 @@
32594509 req->ipv4_sctp_en = tuple_sets;
32604510 break;
32614511 case SCTP_V6_FLOW:
3262
- if ((nfc->data & RXH_L4_B_0_1) ||
3263
- (nfc->data & RXH_L4_B_2_3))
4512
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4513
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
32644514 return -EINVAL;
32654515
32664516 req->ipv6_sctp_en = tuple_sets;
....@@ -3290,6 +4540,7 @@
32904540 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
32914541 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
32924542 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4543
+ hclge_get_rss_type(vport);
32934544 return 0;
32944545 }
32954546
....@@ -3356,13 +4607,14 @@
33564607 struct hclge_vport *vport = hdev->vport;
33574608 u8 *rss_indir = vport[0].rss_indirection_tbl;
33584609 u16 rss_size = vport[0].alloc_rss_size;
4610
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4611
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
33594612 u8 *key = vport[0].rss_hash_key;
33604613 u8 hfunc = vport[0].rss_algo;
3361
- u16 tc_offset[HCLGE_MAX_TC_NUM];
33624614 u16 tc_valid[HCLGE_MAX_TC_NUM];
3363
- u16 tc_size[HCLGE_MAX_TC_NUM];
33644615 u16 roundup_size;
3365
- int i, ret;
4616
+ unsigned int i;
4617
+ int ret;
33664618
33674619 ret = hclge_set_rss_indir_table(hdev, rss_indir);
33684620 if (ret)
....@@ -3382,7 +4634,7 @@
33824634 */
33834635 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
33844636 dev_err(&hdev->pdev->dev,
3385
- "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4637
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
33864638 rss_size);
33874639 return -EINVAL;
33884640 }
....@@ -3418,8 +4670,11 @@
34184670
34194671 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
34204672 {
4673
+ int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
34214674 struct hclge_vport *vport = hdev->vport;
3422
- int i;
4675
+
4676
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4677
+ rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
34234678
34244679 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
34254680 vport[i].rss_tuple_sets.ipv4_tcp_en =
....@@ -3435,13 +4690,16 @@
34354690 vport[i].rss_tuple_sets.ipv6_udp_en =
34364691 HCLGE_RSS_INPUT_TUPLE_OTHER;
34374692 vport[i].rss_tuple_sets.ipv6_sctp_en =
4693
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4694
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
34384695 HCLGE_RSS_INPUT_TUPLE_SCTP;
34394696 vport[i].rss_tuple_sets.ipv6_fragment_en =
34404697 HCLGE_RSS_INPUT_TUPLE_OTHER;
34414698
3442
- vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4699
+ vport[i].rss_algo = rss_algo;
34434700
3444
- netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
4701
+ memcpy(vport[i].rss_hash_key, hclge_hash_key,
4702
+ HCLGE_RSS_KEY_SIZE);
34454703 }
34464704
34474705 hclge_rss_indir_init_cfg(hdev);
....@@ -3454,8 +4712,8 @@
34544712 struct hclge_dev *hdev = vport->back;
34554713 struct hnae3_ring_chain_node *node;
34564714 struct hclge_desc desc;
3457
- struct hclge_ctrl_vector_chain_cmd *req
3458
- = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4715
+ struct hclge_ctrl_vector_chain_cmd *req =
4716
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
34594717 enum hclge_cmd_status status;
34604718 enum hclge_opcode_type op;
34614719 u16 tqp_type_and_id;
....@@ -3513,8 +4771,7 @@
35134771 return 0;
35144772 }
35154773
3516
-static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3517
- int vector,
4774
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
35184775 struct hnae3_ring_chain_node *ring_chain)
35194776 {
35204777 struct hclge_vport *vport = hclge_get_vport(handle);
....@@ -3524,15 +4781,14 @@
35244781 vector_id = hclge_get_vector_index(hdev, vector);
35254782 if (vector_id < 0) {
35264783 dev_err(&hdev->pdev->dev,
3527
- "Get vector index fail. vector_id =%d\n", vector_id);
4784
+ "failed to get vector index. vector=%d\n", vector);
35284785 return vector_id;
35294786 }
35304787
35314788 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
35324789 }
35334790
3534
-static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3535
- int vector,
4791
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
35364792 struct hnae3_ring_chain_node *ring_chain)
35374793 {
35384794 struct hclge_vport *vport = hclge_get_vport(handle);
....@@ -3553,14 +4809,13 @@
35534809 if (ret)
35544810 dev_err(&handle->pdev->dev,
35554811 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3556
- vector_id,
3557
- ret);
4812
+ vector_id, ret);
35584813
35594814 return ret;
35604815 }
35614816
3562
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3563
- struct hclge_promisc_param *param)
4817
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4818
+ struct hclge_promisc_param *param)
35644819 {
35654820 struct hclge_promisc_cfg_cmd *req;
35664821 struct hclge_desc desc;
....@@ -3582,13 +4837,15 @@
35824837 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
35834838 if (ret)
35844839 dev_err(&hdev->pdev->dev,
3585
- "Set promisc mode fail, status is %d.\n", ret);
4840
+ "failed to set vport %d promisc mode, ret = %d.\n",
4841
+ param->vf_id, ret);
35864842
35874843 return ret;
35884844 }
35894845
3590
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3591
- bool en_mc, bool en_bc, int vport_id)
4846
+static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4847
+ bool en_uc, bool en_mc, bool en_bc,
4848
+ int vport_id)
35924849 {
35934850 if (!param)
35944851 return;
....@@ -3603,20 +4860,1713 @@
36034860 param->vf_id = vport_id;
36044861 }
36054862
3606
-static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3607
- bool en_mc_pmc)
4863
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4864
+ bool en_mc_pmc, bool en_bc_pmc)
36084865 {
3609
- struct hclge_vport *vport = hclge_get_vport(handle);
36104866 struct hclge_dev *hdev = vport->back;
36114867 struct hclge_promisc_param param;
36124868
3613
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
4869
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
36144870 vport->vport_id);
3615
- hclge_cmd_set_promisc_mode(hdev, &param);
4871
+ return hclge_cmd_set_promisc_mode(hdev, &param);
4872
+}
4873
+
4874
+static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4875
+ bool en_mc_pmc)
4876
+{
4877
+ struct hclge_vport *vport = hclge_get_vport(handle);
4878
+ struct hclge_dev *hdev = vport->back;
4879
+ bool en_bc_pmc = true;
4880
+
4881
+ /* For device whose version below V2, if broadcast promisc enabled,
4882
+ * vlan filter is always bypassed. So broadcast promisc should be
4883
+ * disabled until user enable promisc mode
4884
+ */
4885
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4886
+ en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4887
+
4888
+ return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4889
+ en_bc_pmc);
4890
+}
4891
+
4892
+static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4893
+{
4894
+ struct hclge_vport *vport = hclge_get_vport(handle);
4895
+ struct hclge_dev *hdev = vport->back;
4896
+
4897
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4898
+}
4899
+
4900
+static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4901
+{
4902
+ struct hclge_get_fd_mode_cmd *req;
4903
+ struct hclge_desc desc;
4904
+ int ret;
4905
+
4906
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4907
+
4908
+ req = (struct hclge_get_fd_mode_cmd *)desc.data;
4909
+
4910
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4911
+ if (ret) {
4912
+ dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4913
+ return ret;
4914
+ }
4915
+
4916
+ *fd_mode = req->mode;
4917
+
4918
+ return ret;
4919
+}
4920
+
4921
+static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4922
+ u32 *stage1_entry_num,
4923
+ u32 *stage2_entry_num,
4924
+ u16 *stage1_counter_num,
4925
+ u16 *stage2_counter_num)
4926
+{
4927
+ struct hclge_get_fd_allocation_cmd *req;
4928
+ struct hclge_desc desc;
4929
+ int ret;
4930
+
4931
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4932
+
4933
+ req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4934
+
4935
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4936
+ if (ret) {
4937
+ dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4938
+ ret);
4939
+ return ret;
4940
+ }
4941
+
4942
+ *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4943
+ *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4944
+ *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4945
+ *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4946
+
4947
+ return ret;
4948
+}
4949
+
4950
+static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4951
+ enum HCLGE_FD_STAGE stage_num)
4952
+{
4953
+ struct hclge_set_fd_key_config_cmd *req;
4954
+ struct hclge_fd_key_cfg *stage;
4955
+ struct hclge_desc desc;
4956
+ int ret;
4957
+
4958
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4959
+
4960
+ req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4961
+ stage = &hdev->fd_cfg.key_cfg[stage_num];
4962
+ req->stage = stage_num;
4963
+ req->key_select = stage->key_sel;
4964
+ req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4965
+ req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4966
+ req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4967
+ req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4968
+ req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4969
+ req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4970
+
4971
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4972
+ if (ret)
4973
+ dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4974
+
4975
+ return ret;
4976
+}
4977
+
4978
+static int hclge_init_fd_config(struct hclge_dev *hdev)
4979
+{
4980
+#define LOW_2_WORDS 0x03
4981
+ struct hclge_fd_key_cfg *key_cfg;
4982
+ int ret;
4983
+
4984
+ if (!hnae3_dev_fd_supported(hdev))
4985
+ return 0;
4986
+
4987
+ ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4988
+ if (ret)
4989
+ return ret;
4990
+
4991
+ switch (hdev->fd_cfg.fd_mode) {
4992
+ case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4993
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4994
+ break;
4995
+ case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4996
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4997
+ break;
4998
+ default:
4999
+ dev_err(&hdev->pdev->dev,
5000
+ "Unsupported flow director mode %u\n",
5001
+ hdev->fd_cfg.fd_mode);
5002
+ return -EOPNOTSUPP;
5003
+ }
5004
+
5005
+ key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5006
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
5007
+ key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5008
+ key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5009
+ key_cfg->outer_sipv6_word_en = 0;
5010
+ key_cfg->outer_dipv6_word_en = 0;
5011
+
5012
+ key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5013
+ BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5014
+ BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5015
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5016
+
5017
+ /* If use max 400bit key, we can support tuples for ether type */
5018
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5019
+ key_cfg->tuple_active |=
5020
+ BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5021
+
5022
+ /* roce_type is used to filter roce frames
5023
+ * dst_vport is used to specify the rule
5024
+ */
5025
+ key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5026
+
5027
+ ret = hclge_get_fd_allocation(hdev,
5028
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5029
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5030
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5031
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5032
+ if (ret)
5033
+ return ret;
5034
+
5035
+ return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5036
+}
5037
+
5038
+static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5039
+ int loc, u8 *key, bool is_add)
5040
+{
5041
+ struct hclge_fd_tcam_config_1_cmd *req1;
5042
+ struct hclge_fd_tcam_config_2_cmd *req2;
5043
+ struct hclge_fd_tcam_config_3_cmd *req3;
5044
+ struct hclge_desc desc[3];
5045
+ int ret;
5046
+
5047
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5048
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5049
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5050
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5051
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5052
+
5053
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5054
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5055
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5056
+
5057
+ req1->stage = stage;
5058
+ req1->xy_sel = sel_x ? 1 : 0;
5059
+ hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5060
+ req1->index = cpu_to_le32(loc);
5061
+ req1->entry_vld = sel_x ? is_add : 0;
5062
+
5063
+ if (key) {
5064
+ memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5065
+ memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5066
+ sizeof(req2->tcam_data));
5067
+ memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5068
+ sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5069
+ }
5070
+
5071
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
5072
+ if (ret)
5073
+ dev_err(&hdev->pdev->dev,
5074
+ "config tcam key fail, ret=%d\n",
5075
+ ret);
5076
+
5077
+ return ret;
5078
+}
5079
+
5080
+static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5081
+ struct hclge_fd_ad_data *action)
5082
+{
5083
+ struct hclge_fd_ad_config_cmd *req;
5084
+ struct hclge_desc desc;
5085
+ u64 ad_data = 0;
5086
+ int ret;
5087
+
5088
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5089
+
5090
+ req = (struct hclge_fd_ad_config_cmd *)desc.data;
5091
+ req->index = cpu_to_le32(loc);
5092
+ req->stage = stage;
5093
+
5094
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5095
+ action->write_rule_id_to_bd);
5096
+ hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5097
+ action->rule_id);
5098
+ ad_data <<= 32;
5099
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5100
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5101
+ action->forward_to_direct_queue);
5102
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5103
+ action->queue_id);
5104
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5105
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5106
+ HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5107
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5108
+ hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5109
+ action->counter_id);
5110
+
5111
+ req->ad_data = cpu_to_le64(ad_data);
5112
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5113
+ if (ret)
5114
+ dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5115
+
5116
+ return ret;
5117
+}
5118
+
5119
+static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5120
+ struct hclge_fd_rule *rule)
5121
+{
5122
+ u16 tmp_x_s, tmp_y_s;
5123
+ u32 tmp_x_l, tmp_y_l;
5124
+ int i;
5125
+
5126
+ if (rule->unused_tuple & tuple_bit)
5127
+ return true;
5128
+
5129
+ switch (tuple_bit) {
5130
+ case BIT(INNER_DST_MAC):
5131
+ for (i = 0; i < ETH_ALEN; i++) {
5132
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5133
+ rule->tuples_mask.dst_mac[i]);
5134
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5135
+ rule->tuples_mask.dst_mac[i]);
5136
+ }
5137
+
5138
+ return true;
5139
+ case BIT(INNER_SRC_MAC):
5140
+ for (i = 0; i < ETH_ALEN; i++) {
5141
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5142
+ rule->tuples_mask.src_mac[i]);
5143
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5144
+ rule->tuples_mask.src_mac[i]);
5145
+ }
5146
+
5147
+ return true;
5148
+ case BIT(INNER_VLAN_TAG_FST):
5149
+ calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5150
+ rule->tuples_mask.vlan_tag1);
5151
+ calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5152
+ rule->tuples_mask.vlan_tag1);
5153
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5154
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5155
+
5156
+ return true;
5157
+ case BIT(INNER_ETH_TYPE):
5158
+ calc_x(tmp_x_s, rule->tuples.ether_proto,
5159
+ rule->tuples_mask.ether_proto);
5160
+ calc_y(tmp_y_s, rule->tuples.ether_proto,
5161
+ rule->tuples_mask.ether_proto);
5162
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5163
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5164
+
5165
+ return true;
5166
+ case BIT(INNER_IP_TOS):
5167
+ calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5168
+ calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5169
+
5170
+ return true;
5171
+ case BIT(INNER_IP_PROTO):
5172
+ calc_x(*key_x, rule->tuples.ip_proto,
5173
+ rule->tuples_mask.ip_proto);
5174
+ calc_y(*key_y, rule->tuples.ip_proto,
5175
+ rule->tuples_mask.ip_proto);
5176
+
5177
+ return true;
5178
+ case BIT(INNER_SRC_IP):
5179
+ calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5180
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
5181
+ calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5182
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
5183
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5184
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5185
+
5186
+ return true;
5187
+ case BIT(INNER_DST_IP):
5188
+ calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5189
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
5190
+ calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5191
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
5192
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5193
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5194
+
5195
+ return true;
5196
+ case BIT(INNER_SRC_PORT):
5197
+ calc_x(tmp_x_s, rule->tuples.src_port,
5198
+ rule->tuples_mask.src_port);
5199
+ calc_y(tmp_y_s, rule->tuples.src_port,
5200
+ rule->tuples_mask.src_port);
5201
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5202
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5203
+
5204
+ return true;
5205
+ case BIT(INNER_DST_PORT):
5206
+ calc_x(tmp_x_s, rule->tuples.dst_port,
5207
+ rule->tuples_mask.dst_port);
5208
+ calc_y(tmp_y_s, rule->tuples.dst_port,
5209
+ rule->tuples_mask.dst_port);
5210
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5211
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5212
+
5213
+ return true;
5214
+ default:
5215
+ return false;
5216
+ }
5217
+}
5218
+
5219
+static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5220
+ u8 vf_id, u8 network_port_id)
5221
+{
5222
+ u32 port_number = 0;
5223
+
5224
+ if (port_type == HOST_PORT) {
5225
+ hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5226
+ pf_id);
5227
+ hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5228
+ vf_id);
5229
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5230
+ } else {
5231
+ hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5232
+ HCLGE_NETWORK_PORT_ID_S, network_port_id);
5233
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5234
+ }
5235
+
5236
+ return port_number;
5237
+}
5238
+
5239
+static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5240
+ __le32 *key_x, __le32 *key_y,
5241
+ struct hclge_fd_rule *rule)
5242
+{
5243
+ u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5244
+ u8 cur_pos = 0, tuple_size, shift_bits;
5245
+ unsigned int i;
5246
+
5247
+ for (i = 0; i < MAX_META_DATA; i++) {
5248
+ tuple_size = meta_data_key_info[i].key_length;
5249
+ tuple_bit = key_cfg->meta_data_active & BIT(i);
5250
+
5251
+ switch (tuple_bit) {
5252
+ case BIT(ROCE_TYPE):
5253
+ hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5254
+ cur_pos += tuple_size;
5255
+ break;
5256
+ case BIT(DST_VPORT):
5257
+ port_number = hclge_get_port_number(HOST_PORT, 0,
5258
+ rule->vf_id, 0);
5259
+ hnae3_set_field(meta_data,
5260
+ GENMASK(cur_pos + tuple_size, cur_pos),
5261
+ cur_pos, port_number);
5262
+ cur_pos += tuple_size;
5263
+ break;
5264
+ default:
5265
+ break;
5266
+ }
5267
+ }
5268
+
5269
+ calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5270
+ calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5271
+ shift_bits = sizeof(meta_data) * 8 - cur_pos;
5272
+
5273
+ *key_x = cpu_to_le32(tmp_x << shift_bits);
5274
+ *key_y = cpu_to_le32(tmp_y << shift_bits);
5275
+}
5276
+
5277
+/* A complete key is combined with meta data key and tuple key.
5278
+ * Meta data key is stored at the MSB region, and tuple key is stored at
5279
+ * the LSB region, unused bits will be filled 0.
5280
+ */
5281
+static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5282
+ struct hclge_fd_rule *rule)
5283
+{
5284
+ struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5285
+ u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5286
+ u8 *cur_key_x, *cur_key_y;
5287
+ u8 meta_data_region;
5288
+ u8 tuple_size;
5289
+ int ret;
5290
+ u32 i;
5291
+
5292
+ memset(key_x, 0, sizeof(key_x));
5293
+ memset(key_y, 0, sizeof(key_y));
5294
+ cur_key_x = key_x;
5295
+ cur_key_y = key_y;
5296
+
5297
+ for (i = 0 ; i < MAX_TUPLE; i++) {
5298
+ bool tuple_valid;
5299
+ u32 check_tuple;
5300
+
5301
+ tuple_size = tuple_key_info[i].key_length / 8;
5302
+ check_tuple = key_cfg->tuple_active & BIT(i);
5303
+
5304
+ tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5305
+ cur_key_y, rule);
5306
+ if (tuple_valid) {
5307
+ cur_key_x += tuple_size;
5308
+ cur_key_y += tuple_size;
5309
+ }
5310
+ }
5311
+
5312
+ meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5313
+ MAX_META_DATA_LENGTH / 8;
5314
+
5315
+ hclge_fd_convert_meta_data(key_cfg,
5316
+ (__le32 *)(key_x + meta_data_region),
5317
+ (__le32 *)(key_y + meta_data_region),
5318
+ rule);
5319
+
5320
+ ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5321
+ true);
5322
+ if (ret) {
5323
+ dev_err(&hdev->pdev->dev,
5324
+ "fd key_y config fail, loc=%u, ret=%d\n",
5325
+ rule->queue_id, ret);
5326
+ return ret;
5327
+ }
5328
+
5329
+ ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5330
+ true);
5331
+ if (ret)
5332
+ dev_err(&hdev->pdev->dev,
5333
+ "fd key_x config fail, loc=%u, ret=%d\n",
5334
+ rule->queue_id, ret);
5335
+ return ret;
5336
+}
5337
+
5338
+static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5339
+ struct hclge_fd_rule *rule)
5340
+{
5341
+ struct hclge_fd_ad_data ad_data;
5342
+
5343
+ ad_data.ad_id = rule->location;
5344
+
5345
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5346
+ ad_data.drop_packet = true;
5347
+ ad_data.forward_to_direct_queue = false;
5348
+ ad_data.queue_id = 0;
5349
+ } else {
5350
+ ad_data.drop_packet = false;
5351
+ ad_data.forward_to_direct_queue = true;
5352
+ ad_data.queue_id = rule->queue_id;
5353
+ }
5354
+
5355
+ ad_data.use_counter = false;
5356
+ ad_data.counter_id = 0;
5357
+
5358
+ ad_data.use_next_stage = false;
5359
+ ad_data.next_input_key = 0;
5360
+
5361
+ ad_data.write_rule_id_to_bd = true;
5362
+ ad_data.rule_id = rule->location;
5363
+
5364
+ return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5365
+}
5366
+
5367
+static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5368
+ u32 *unused_tuple)
5369
+{
5370
+ if (!spec || !unused_tuple)
5371
+ return -EINVAL;
5372
+
5373
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5374
+
5375
+ if (!spec->ip4src)
5376
+ *unused_tuple |= BIT(INNER_SRC_IP);
5377
+
5378
+ if (!spec->ip4dst)
5379
+ *unused_tuple |= BIT(INNER_DST_IP);
5380
+
5381
+ if (!spec->psrc)
5382
+ *unused_tuple |= BIT(INNER_SRC_PORT);
5383
+
5384
+ if (!spec->pdst)
5385
+ *unused_tuple |= BIT(INNER_DST_PORT);
5386
+
5387
+ if (!spec->tos)
5388
+ *unused_tuple |= BIT(INNER_IP_TOS);
5389
+
5390
+ return 0;
5391
+}
5392
+
5393
+static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5394
+ u32 *unused_tuple)
5395
+{
5396
+ if (!spec || !unused_tuple)
5397
+ return -EINVAL;
5398
+
5399
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5400
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5401
+
5402
+ if (!spec->ip4src)
5403
+ *unused_tuple |= BIT(INNER_SRC_IP);
5404
+
5405
+ if (!spec->ip4dst)
5406
+ *unused_tuple |= BIT(INNER_DST_IP);
5407
+
5408
+ if (!spec->tos)
5409
+ *unused_tuple |= BIT(INNER_IP_TOS);
5410
+
5411
+ if (!spec->proto)
5412
+ *unused_tuple |= BIT(INNER_IP_PROTO);
5413
+
5414
+ if (spec->l4_4_bytes)
5415
+ return -EOPNOTSUPP;
5416
+
5417
+ if (spec->ip_ver != ETH_RX_NFC_IP4)
5418
+ return -EOPNOTSUPP;
5419
+
5420
+ return 0;
5421
+}
5422
+
5423
+static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5424
+ u32 *unused_tuple)
5425
+{
5426
+ if (!spec || !unused_tuple)
5427
+ return -EINVAL;
5428
+
5429
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5430
+ BIT(INNER_IP_TOS);
5431
+
5432
+ /* check whether src/dst ip address used */
5433
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
5434
+ !spec->ip6src[2] && !spec->ip6src[3])
5435
+ *unused_tuple |= BIT(INNER_SRC_IP);
5436
+
5437
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5438
+ !spec->ip6dst[2] && !spec->ip6dst[3])
5439
+ *unused_tuple |= BIT(INNER_DST_IP);
5440
+
5441
+ if (!spec->psrc)
5442
+ *unused_tuple |= BIT(INNER_SRC_PORT);
5443
+
5444
+ if (!spec->pdst)
5445
+ *unused_tuple |= BIT(INNER_DST_PORT);
5446
+
5447
+ if (spec->tclass)
5448
+ return -EOPNOTSUPP;
5449
+
5450
+ return 0;
5451
+}
5452
+
5453
+static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5454
+ u32 *unused_tuple)
5455
+{
5456
+ if (!spec || !unused_tuple)
5457
+ return -EINVAL;
5458
+
5459
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5460
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5461
+
5462
+ /* check whether src/dst ip address used */
5463
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
5464
+ !spec->ip6src[2] && !spec->ip6src[3])
5465
+ *unused_tuple |= BIT(INNER_SRC_IP);
5466
+
5467
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5468
+ !spec->ip6dst[2] && !spec->ip6dst[3])
5469
+ *unused_tuple |= BIT(INNER_DST_IP);
5470
+
5471
+ if (!spec->l4_proto)
5472
+ *unused_tuple |= BIT(INNER_IP_PROTO);
5473
+
5474
+ if (spec->tclass)
5475
+ return -EOPNOTSUPP;
5476
+
5477
+ if (spec->l4_4_bytes)
5478
+ return -EOPNOTSUPP;
5479
+
5480
+ return 0;
5481
+}
5482
+
5483
+static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5484
+{
5485
+ if (!spec || !unused_tuple)
5486
+ return -EINVAL;
5487
+
5488
+ *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5489
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5490
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5491
+
5492
+ if (is_zero_ether_addr(spec->h_source))
5493
+ *unused_tuple |= BIT(INNER_SRC_MAC);
5494
+
5495
+ if (is_zero_ether_addr(spec->h_dest))
5496
+ *unused_tuple |= BIT(INNER_DST_MAC);
5497
+
5498
+ if (!spec->h_proto)
5499
+ *unused_tuple |= BIT(INNER_ETH_TYPE);
5500
+
5501
+ return 0;
5502
+}
5503
+
5504
+static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5505
+ struct ethtool_rx_flow_spec *fs,
5506
+ u32 *unused_tuple)
5507
+{
5508
+ if (fs->flow_type & FLOW_EXT) {
5509
+ if (fs->h_ext.vlan_etype) {
5510
+ dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5511
+ return -EOPNOTSUPP;
5512
+ }
5513
+
5514
+ if (!fs->h_ext.vlan_tci)
5515
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5516
+
5517
+ if (fs->m_ext.vlan_tci &&
5518
+ be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5519
+ dev_err(&hdev->pdev->dev,
5520
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5521
+ ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5522
+ return -EINVAL;
5523
+ }
5524
+ } else {
5525
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5526
+ }
5527
+
5528
+ if (fs->flow_type & FLOW_MAC_EXT) {
5529
+ if (hdev->fd_cfg.fd_mode !=
5530
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5531
+ dev_err(&hdev->pdev->dev,
5532
+ "FLOW_MAC_EXT is not supported in current fd mode!\n");
5533
+ return -EOPNOTSUPP;
5534
+ }
5535
+
5536
+ if (is_zero_ether_addr(fs->h_ext.h_dest))
5537
+ *unused_tuple |= BIT(INNER_DST_MAC);
5538
+ else
5539
+ *unused_tuple &= ~BIT(INNER_DST_MAC);
5540
+ }
5541
+
5542
+ return 0;
5543
+}
5544
+
5545
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
5546
+ struct ethtool_rx_flow_spec *fs,
5547
+ u32 *unused_tuple)
5548
+{
5549
+ u32 flow_type;
5550
+ int ret;
5551
+
5552
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5553
+ dev_err(&hdev->pdev->dev,
5554
+ "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5555
+ fs->location,
5556
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5557
+ return -EINVAL;
5558
+ }
5559
+
5560
+ if ((fs->flow_type & FLOW_EXT) &&
5561
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5562
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5563
+ return -EOPNOTSUPP;
5564
+ }
5565
+
5566
+ flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5567
+ switch (flow_type) {
5568
+ case SCTP_V4_FLOW:
5569
+ case TCP_V4_FLOW:
5570
+ case UDP_V4_FLOW:
5571
+ ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5572
+ unused_tuple);
5573
+ break;
5574
+ case IP_USER_FLOW:
5575
+ ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5576
+ unused_tuple);
5577
+ break;
5578
+ case SCTP_V6_FLOW:
5579
+ case TCP_V6_FLOW:
5580
+ case UDP_V6_FLOW:
5581
+ ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5582
+ unused_tuple);
5583
+ break;
5584
+ case IPV6_USER_FLOW:
5585
+ ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5586
+ unused_tuple);
5587
+ break;
5588
+ case ETHER_FLOW:
5589
+ if (hdev->fd_cfg.fd_mode !=
5590
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5591
+ dev_err(&hdev->pdev->dev,
5592
+ "ETHER_FLOW is not supported in current fd mode!\n");
5593
+ return -EOPNOTSUPP;
5594
+ }
5595
+
5596
+ ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5597
+ unused_tuple);
5598
+ break;
5599
+ default:
5600
+ dev_err(&hdev->pdev->dev,
5601
+ "unsupported protocol type, protocol type = %#x\n",
5602
+ flow_type);
5603
+ return -EOPNOTSUPP;
5604
+ }
5605
+
5606
+ if (ret) {
5607
+ dev_err(&hdev->pdev->dev,
5608
+ "failed to check flow union tuple, ret = %d\n",
5609
+ ret);
5610
+ return ret;
5611
+ }
5612
+
5613
+ return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5614
+}
5615
+
5616
+static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5617
+{
5618
+ struct hclge_fd_rule *rule = NULL;
5619
+ struct hlist_node *node2;
5620
+
5621
+ spin_lock_bh(&hdev->fd_rule_lock);
5622
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5623
+ if (rule->location >= location)
5624
+ break;
5625
+ }
5626
+
5627
+ spin_unlock_bh(&hdev->fd_rule_lock);
5628
+
5629
+ return rule && rule->location == location;
5630
+}
5631
+
5632
+/* make sure being called after lock up with fd_rule_lock */
5633
+static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5634
+ struct hclge_fd_rule *new_rule,
5635
+ u16 location,
5636
+ bool is_add)
5637
+{
5638
+ struct hclge_fd_rule *rule = NULL, *parent = NULL;
5639
+ struct hlist_node *node2;
5640
+
5641
+ if (is_add && !new_rule)
5642
+ return -EINVAL;
5643
+
5644
+ hlist_for_each_entry_safe(rule, node2,
5645
+ &hdev->fd_rule_list, rule_node) {
5646
+ if (rule->location >= location)
5647
+ break;
5648
+ parent = rule;
5649
+ }
5650
+
5651
+ if (rule && rule->location == location) {
5652
+ hlist_del(&rule->rule_node);
5653
+ kfree(rule);
5654
+ hdev->hclge_fd_rule_num--;
5655
+
5656
+ if (!is_add) {
5657
+ if (!hdev->hclge_fd_rule_num)
5658
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5659
+ clear_bit(location, hdev->fd_bmap);
5660
+
5661
+ return 0;
5662
+ }
5663
+ } else if (!is_add) {
5664
+ dev_err(&hdev->pdev->dev,
5665
+ "delete fail, rule %u is inexistent\n",
5666
+ location);
5667
+ return -EINVAL;
5668
+ }
5669
+
5670
+ INIT_HLIST_NODE(&new_rule->rule_node);
5671
+
5672
+ if (parent)
5673
+ hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5674
+ else
5675
+ hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5676
+
5677
+ set_bit(location, hdev->fd_bmap);
5678
+ hdev->hclge_fd_rule_num++;
5679
+ hdev->fd_active_type = new_rule->rule_type;
5680
+
5681
+ return 0;
5682
+}
5683
+
5684
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5685
+ struct ethtool_rx_flow_spec *fs,
5686
+ struct hclge_fd_rule *rule)
5687
+{
5688
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5689
+
5690
+ switch (flow_type) {
5691
+ case SCTP_V4_FLOW:
5692
+ case TCP_V4_FLOW:
5693
+ case UDP_V4_FLOW:
5694
+ rule->tuples.src_ip[IPV4_INDEX] =
5695
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5696
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
5697
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5698
+
5699
+ rule->tuples.dst_ip[IPV4_INDEX] =
5700
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5701
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
5702
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5703
+
5704
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5705
+ rule->tuples_mask.src_port =
5706
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5707
+
5708
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5709
+ rule->tuples_mask.dst_port =
5710
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5711
+
5712
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5713
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5714
+
5715
+ rule->tuples.ether_proto = ETH_P_IP;
5716
+ rule->tuples_mask.ether_proto = 0xFFFF;
5717
+
5718
+ break;
5719
+ case IP_USER_FLOW:
5720
+ rule->tuples.src_ip[IPV4_INDEX] =
5721
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5722
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
5723
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5724
+
5725
+ rule->tuples.dst_ip[IPV4_INDEX] =
5726
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5727
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
5728
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5729
+
5730
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5731
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5732
+
5733
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5734
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5735
+
5736
+ rule->tuples.ether_proto = ETH_P_IP;
5737
+ rule->tuples_mask.ether_proto = 0xFFFF;
5738
+
5739
+ break;
5740
+ case SCTP_V6_FLOW:
5741
+ case TCP_V6_FLOW:
5742
+ case UDP_V6_FLOW:
5743
+ be32_to_cpu_array(rule->tuples.src_ip,
5744
+ fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5745
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
5746
+ fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5747
+
5748
+ be32_to_cpu_array(rule->tuples.dst_ip,
5749
+ fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5750
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
5751
+ fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5752
+
5753
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5754
+ rule->tuples_mask.src_port =
5755
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5756
+
5757
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5758
+ rule->tuples_mask.dst_port =
5759
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5760
+
5761
+ rule->tuples.ether_proto = ETH_P_IPV6;
5762
+ rule->tuples_mask.ether_proto = 0xFFFF;
5763
+
5764
+ break;
5765
+ case IPV6_USER_FLOW:
5766
+ be32_to_cpu_array(rule->tuples.src_ip,
5767
+ fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5768
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
5769
+ fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5770
+
5771
+ be32_to_cpu_array(rule->tuples.dst_ip,
5772
+ fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5773
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
5774
+ fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5775
+
5776
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5777
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5778
+
5779
+ rule->tuples.ether_proto = ETH_P_IPV6;
5780
+ rule->tuples_mask.ether_proto = 0xFFFF;
5781
+
5782
+ break;
5783
+ case ETHER_FLOW:
5784
+ ether_addr_copy(rule->tuples.src_mac,
5785
+ fs->h_u.ether_spec.h_source);
5786
+ ether_addr_copy(rule->tuples_mask.src_mac,
5787
+ fs->m_u.ether_spec.h_source);
5788
+
5789
+ ether_addr_copy(rule->tuples.dst_mac,
5790
+ fs->h_u.ether_spec.h_dest);
5791
+ ether_addr_copy(rule->tuples_mask.dst_mac,
5792
+ fs->m_u.ether_spec.h_dest);
5793
+
5794
+ rule->tuples.ether_proto =
5795
+ be16_to_cpu(fs->h_u.ether_spec.h_proto);
5796
+ rule->tuples_mask.ether_proto =
5797
+ be16_to_cpu(fs->m_u.ether_spec.h_proto);
5798
+
5799
+ break;
5800
+ default:
5801
+ return -EOPNOTSUPP;
5802
+ }
5803
+
5804
+ switch (flow_type) {
5805
+ case SCTP_V4_FLOW:
5806
+ case SCTP_V6_FLOW:
5807
+ rule->tuples.ip_proto = IPPROTO_SCTP;
5808
+ rule->tuples_mask.ip_proto = 0xFF;
5809
+ break;
5810
+ case TCP_V4_FLOW:
5811
+ case TCP_V6_FLOW:
5812
+ rule->tuples.ip_proto = IPPROTO_TCP;
5813
+ rule->tuples_mask.ip_proto = 0xFF;
5814
+ break;
5815
+ case UDP_V4_FLOW:
5816
+ case UDP_V6_FLOW:
5817
+ rule->tuples.ip_proto = IPPROTO_UDP;
5818
+ rule->tuples_mask.ip_proto = 0xFF;
5819
+ break;
5820
+ default:
5821
+ break;
5822
+ }
5823
+
5824
+ if (fs->flow_type & FLOW_EXT) {
5825
+ rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5826
+ rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5827
+ }
5828
+
5829
+ if (fs->flow_type & FLOW_MAC_EXT) {
5830
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5831
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5832
+ }
5833
+
5834
+ return 0;
5835
+}
5836
+
5837
+/* make sure being called after lock up with fd_rule_lock */
5838
+static int hclge_fd_config_rule(struct hclge_dev *hdev,
5839
+ struct hclge_fd_rule *rule)
5840
+{
5841
+ int ret;
5842
+
5843
+ if (!rule) {
5844
+ dev_err(&hdev->pdev->dev,
5845
+ "The flow director rule is NULL\n");
5846
+ return -EINVAL;
5847
+ }
5848
+
5849
+ /* it will never fail here, so needn't to check return value */
5850
+ hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5851
+
5852
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5853
+ if (ret)
5854
+ goto clear_rule;
5855
+
5856
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5857
+ if (ret)
5858
+ goto clear_rule;
5859
+
5860
+ return 0;
5861
+
5862
+clear_rule:
5863
+ hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5864
+ return ret;
5865
+}
5866
+
5867
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
5868
+ struct ethtool_rxnfc *cmd)
5869
+{
5870
+ struct hclge_vport *vport = hclge_get_vport(handle);
5871
+ struct hclge_dev *hdev = vport->back;
5872
+ u16 dst_vport_id = 0, q_index = 0;
5873
+ struct ethtool_rx_flow_spec *fs;
5874
+ struct hclge_fd_rule *rule;
5875
+ u32 unused = 0;
5876
+ u8 action;
5877
+ int ret;
5878
+
5879
+ if (!hnae3_dev_fd_supported(hdev)) {
5880
+ dev_err(&hdev->pdev->dev,
5881
+ "flow table director is not supported\n");
5882
+ return -EOPNOTSUPP;
5883
+ }
5884
+
5885
+ if (!hdev->fd_en) {
5886
+ dev_err(&hdev->pdev->dev,
5887
+ "please enable flow director first\n");
5888
+ return -EOPNOTSUPP;
5889
+ }
5890
+
5891
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5892
+
5893
+ ret = hclge_fd_check_spec(hdev, fs, &unused);
5894
+ if (ret)
5895
+ return ret;
5896
+
5897
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5898
+ action = HCLGE_FD_ACTION_DROP_PACKET;
5899
+ } else {
5900
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5901
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5902
+ u16 tqps;
5903
+
5904
+ if (vf > hdev->num_req_vfs) {
5905
+ dev_err(&hdev->pdev->dev,
5906
+ "Error: vf id (%u) > max vf num (%u)\n",
5907
+ vf, hdev->num_req_vfs);
5908
+ return -EINVAL;
5909
+ }
5910
+
5911
+ dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5912
+ tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5913
+
5914
+ if (ring >= tqps) {
5915
+ dev_err(&hdev->pdev->dev,
5916
+ "Error: queue id (%u) > max tqp num (%u)\n",
5917
+ ring, tqps - 1);
5918
+ return -EINVAL;
5919
+ }
5920
+
5921
+ action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5922
+ q_index = ring;
5923
+ }
5924
+
5925
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5926
+ if (!rule)
5927
+ return -ENOMEM;
5928
+
5929
+ ret = hclge_fd_get_tuple(hdev, fs, rule);
5930
+ if (ret) {
5931
+ kfree(rule);
5932
+ return ret;
5933
+ }
5934
+
5935
+ rule->flow_type = fs->flow_type;
5936
+ rule->location = fs->location;
5937
+ rule->unused_tuple = unused;
5938
+ rule->vf_id = dst_vport_id;
5939
+ rule->queue_id = q_index;
5940
+ rule->action = action;
5941
+ rule->rule_type = HCLGE_FD_EP_ACTIVE;
5942
+
5943
+ /* to avoid rule conflict, when user configure rule by ethtool,
5944
+ * we need to clear all arfs rules
5945
+ */
5946
+ spin_lock_bh(&hdev->fd_rule_lock);
5947
+ hclge_clear_arfs_rules(handle);
5948
+
5949
+ ret = hclge_fd_config_rule(hdev, rule);
5950
+
5951
+ spin_unlock_bh(&hdev->fd_rule_lock);
5952
+
5953
+ return ret;
5954
+}
5955
+
5956
+static int hclge_del_fd_entry(struct hnae3_handle *handle,
5957
+ struct ethtool_rxnfc *cmd)
5958
+{
5959
+ struct hclge_vport *vport = hclge_get_vport(handle);
5960
+ struct hclge_dev *hdev = vport->back;
5961
+ struct ethtool_rx_flow_spec *fs;
5962
+ int ret;
5963
+
5964
+ if (!hnae3_dev_fd_supported(hdev))
5965
+ return -EOPNOTSUPP;
5966
+
5967
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5968
+
5969
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5970
+ return -EINVAL;
5971
+
5972
+ if (!hclge_fd_rule_exist(hdev, fs->location)) {
5973
+ dev_err(&hdev->pdev->dev,
5974
+ "Delete fail, rule %u is inexistent\n", fs->location);
5975
+ return -ENOENT;
5976
+ }
5977
+
5978
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5979
+ NULL, false);
5980
+ if (ret)
5981
+ return ret;
5982
+
5983
+ spin_lock_bh(&hdev->fd_rule_lock);
5984
+ ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5985
+
5986
+ spin_unlock_bh(&hdev->fd_rule_lock);
5987
+
5988
+ return ret;
5989
+}
5990
+
5991
+/* make sure being called after lock up with fd_rule_lock */
5992
+static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5993
+ bool clear_list)
5994
+{
5995
+ struct hclge_vport *vport = hclge_get_vport(handle);
5996
+ struct hclge_dev *hdev = vport->back;
5997
+ struct hclge_fd_rule *rule;
5998
+ struct hlist_node *node;
5999
+ u16 location;
6000
+
6001
+ if (!hnae3_dev_fd_supported(hdev))
6002
+ return;
6003
+
6004
+ for_each_set_bit(location, hdev->fd_bmap,
6005
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6006
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6007
+ NULL, false);
6008
+
6009
+ if (clear_list) {
6010
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6011
+ rule_node) {
6012
+ hlist_del(&rule->rule_node);
6013
+ kfree(rule);
6014
+ }
6015
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6016
+ hdev->hclge_fd_rule_num = 0;
6017
+ bitmap_zero(hdev->fd_bmap,
6018
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6019
+ }
6020
+}
6021
+
6022
+static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6023
+{
6024
+ struct hclge_vport *vport = hclge_get_vport(handle);
6025
+ struct hclge_dev *hdev = vport->back;
6026
+ struct hclge_fd_rule *rule;
6027
+ struct hlist_node *node;
6028
+ int ret;
6029
+
6030
+ /* Return ok here, because reset error handling will check this
6031
+ * return value. If error is returned here, the reset process will
6032
+ * fail.
6033
+ */
6034
+ if (!hnae3_dev_fd_supported(hdev))
6035
+ return 0;
6036
+
6037
+ /* if fd is disabled, should not restore it when reset */
6038
+ if (!hdev->fd_en)
6039
+ return 0;
6040
+
6041
+ spin_lock_bh(&hdev->fd_rule_lock);
6042
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6043
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6044
+ if (!ret)
6045
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6046
+
6047
+ if (ret) {
6048
+ dev_warn(&hdev->pdev->dev,
6049
+ "Restore rule %u failed, remove it\n",
6050
+ rule->location);
6051
+ clear_bit(rule->location, hdev->fd_bmap);
6052
+ hlist_del(&rule->rule_node);
6053
+ kfree(rule);
6054
+ hdev->hclge_fd_rule_num--;
6055
+ }
6056
+ }
6057
+
6058
+ if (hdev->hclge_fd_rule_num)
6059
+ hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6060
+
6061
+ spin_unlock_bh(&hdev->fd_rule_lock);
6062
+
6063
+ return 0;
6064
+}
6065
+
6066
+static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6067
+ struct ethtool_rxnfc *cmd)
6068
+{
6069
+ struct hclge_vport *vport = hclge_get_vport(handle);
6070
+ struct hclge_dev *hdev = vport->back;
6071
+
6072
+ if (!hnae3_dev_fd_supported(hdev))
6073
+ return -EOPNOTSUPP;
6074
+
6075
+ cmd->rule_cnt = hdev->hclge_fd_rule_num;
6076
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6077
+
6078
+ return 0;
6079
+}
6080
+
6081
+static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6082
+ struct ethtool_tcpip4_spec *spec,
6083
+ struct ethtool_tcpip4_spec *spec_mask)
6084
+{
6085
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6086
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6087
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6088
+
6089
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6090
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6091
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6092
+
6093
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
6094
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6095
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
6096
+
6097
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6098
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6099
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6100
+
6101
+ spec->tos = rule->tuples.ip_tos;
6102
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6103
+ 0 : rule->tuples_mask.ip_tos;
6104
+}
6105
+
6106
+static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6107
+ struct ethtool_usrip4_spec *spec,
6108
+ struct ethtool_usrip4_spec *spec_mask)
6109
+{
6110
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6111
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6112
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6113
+
6114
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6115
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6116
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6117
+
6118
+ spec->tos = rule->tuples.ip_tos;
6119
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6120
+ 0 : rule->tuples_mask.ip_tos;
6121
+
6122
+ spec->proto = rule->tuples.ip_proto;
6123
+ spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6124
+ 0 : rule->tuples_mask.ip_proto;
6125
+
6126
+ spec->ip_ver = ETH_RX_NFC_IP4;
6127
+}
6128
+
6129
+static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6130
+ struct ethtool_tcpip6_spec *spec,
6131
+ struct ethtool_tcpip6_spec *spec_mask)
6132
+{
6133
+ cpu_to_be32_array(spec->ip6src,
6134
+ rule->tuples.src_ip, IPV6_SIZE);
6135
+ cpu_to_be32_array(spec->ip6dst,
6136
+ rule->tuples.dst_ip, IPV6_SIZE);
6137
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
6138
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6139
+ else
6140
+ cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6141
+ IPV6_SIZE);
6142
+
6143
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
6144
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6145
+ else
6146
+ cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6147
+ IPV6_SIZE);
6148
+
6149
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
6150
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6151
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
6152
+
6153
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6154
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6155
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6156
+}
6157
+
6158
+static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6159
+ struct ethtool_usrip6_spec *spec,
6160
+ struct ethtool_usrip6_spec *spec_mask)
6161
+{
6162
+ cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6163
+ cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6164
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
6165
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6166
+ else
6167
+ cpu_to_be32_array(spec_mask->ip6src,
6168
+ rule->tuples_mask.src_ip, IPV6_SIZE);
6169
+
6170
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
6171
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6172
+ else
6173
+ cpu_to_be32_array(spec_mask->ip6dst,
6174
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
6175
+
6176
+ spec->l4_proto = rule->tuples.ip_proto;
6177
+ spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6178
+ 0 : rule->tuples_mask.ip_proto;
6179
+}
6180
+
6181
+static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6182
+ struct ethhdr *spec,
6183
+ struct ethhdr *spec_mask)
6184
+{
6185
+ ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6186
+ ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6187
+
6188
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6189
+ eth_zero_addr(spec_mask->h_source);
6190
+ else
6191
+ ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6192
+
6193
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
6194
+ eth_zero_addr(spec_mask->h_dest);
6195
+ else
6196
+ ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6197
+
6198
+ spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6199
+ spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6200
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6201
+}
6202
+
6203
+static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6204
+ struct hclge_fd_rule *rule)
6205
+{
6206
+ if (fs->flow_type & FLOW_EXT) {
6207
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6208
+ fs->m_ext.vlan_tci =
6209
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6210
+ 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6211
+ }
6212
+
6213
+ if (fs->flow_type & FLOW_MAC_EXT) {
6214
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6215
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
6216
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
6217
+ else
6218
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
6219
+ rule->tuples_mask.dst_mac);
6220
+ }
6221
+}
6222
+
6223
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6224
+ struct ethtool_rxnfc *cmd)
6225
+{
6226
+ struct hclge_vport *vport = hclge_get_vport(handle);
6227
+ struct hclge_fd_rule *rule = NULL;
6228
+ struct hclge_dev *hdev = vport->back;
6229
+ struct ethtool_rx_flow_spec *fs;
6230
+ struct hlist_node *node2;
6231
+
6232
+ if (!hnae3_dev_fd_supported(hdev))
6233
+ return -EOPNOTSUPP;
6234
+
6235
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6236
+
6237
+ spin_lock_bh(&hdev->fd_rule_lock);
6238
+
6239
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6240
+ if (rule->location >= fs->location)
6241
+ break;
6242
+ }
6243
+
6244
+ if (!rule || fs->location != rule->location) {
6245
+ spin_unlock_bh(&hdev->fd_rule_lock);
6246
+
6247
+ return -ENOENT;
6248
+ }
6249
+
6250
+ fs->flow_type = rule->flow_type;
6251
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6252
+ case SCTP_V4_FLOW:
6253
+ case TCP_V4_FLOW:
6254
+ case UDP_V4_FLOW:
6255
+ hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6256
+ &fs->m_u.tcp_ip4_spec);
6257
+ break;
6258
+ case IP_USER_FLOW:
6259
+ hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6260
+ &fs->m_u.usr_ip4_spec);
6261
+ break;
6262
+ case SCTP_V6_FLOW:
6263
+ case TCP_V6_FLOW:
6264
+ case UDP_V6_FLOW:
6265
+ hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6266
+ &fs->m_u.tcp_ip6_spec);
6267
+ break;
6268
+ case IPV6_USER_FLOW:
6269
+ hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6270
+ &fs->m_u.usr_ip6_spec);
6271
+ break;
6272
+ /* The flow type of fd rule has been checked before adding in to rule
6273
+ * list. As other flow types have been handled, it must be ETHER_FLOW
6274
+ * for the default case
6275
+ */
6276
+ default:
6277
+ hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6278
+ &fs->m_u.ether_spec);
6279
+ break;
6280
+ }
6281
+
6282
+ hclge_fd_get_ext_info(fs, rule);
6283
+
6284
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6285
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
6286
+ } else {
6287
+ u64 vf_id;
6288
+
6289
+ fs->ring_cookie = rule->queue_id;
6290
+ vf_id = rule->vf_id;
6291
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6292
+ fs->ring_cookie |= vf_id;
6293
+ }
6294
+
6295
+ spin_unlock_bh(&hdev->fd_rule_lock);
6296
+
6297
+ return 0;
6298
+}
6299
+
6300
+static int hclge_get_all_rules(struct hnae3_handle *handle,
6301
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
6302
+{
6303
+ struct hclge_vport *vport = hclge_get_vport(handle);
6304
+ struct hclge_dev *hdev = vport->back;
6305
+ struct hclge_fd_rule *rule;
6306
+ struct hlist_node *node2;
6307
+ int cnt = 0;
6308
+
6309
+ if (!hnae3_dev_fd_supported(hdev))
6310
+ return -EOPNOTSUPP;
6311
+
6312
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6313
+
6314
+ spin_lock_bh(&hdev->fd_rule_lock);
6315
+ hlist_for_each_entry_safe(rule, node2,
6316
+ &hdev->fd_rule_list, rule_node) {
6317
+ if (cnt == cmd->rule_cnt) {
6318
+ spin_unlock_bh(&hdev->fd_rule_lock);
6319
+ return -EMSGSIZE;
6320
+ }
6321
+
6322
+ rule_locs[cnt] = rule->location;
6323
+ cnt++;
6324
+ }
6325
+
6326
+ spin_unlock_bh(&hdev->fd_rule_lock);
6327
+
6328
+ cmd->rule_cnt = cnt;
6329
+
6330
+ return 0;
6331
+}
6332
+
6333
+static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6334
+ struct hclge_fd_rule_tuples *tuples)
6335
+{
6336
+#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6337
+#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6338
+
6339
+ tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6340
+ tuples->ip_proto = fkeys->basic.ip_proto;
6341
+ tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6342
+
6343
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6344
+ tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6345
+ tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6346
+ } else {
6347
+ int i;
6348
+
6349
+ for (i = 0; i < IPV6_SIZE; i++) {
6350
+ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6351
+ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6352
+ }
6353
+ }
6354
+}
6355
+
6356
+/* traverse all rules, check whether an existed rule has the same tuples */
6357
+static struct hclge_fd_rule *
6358
+hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6359
+ const struct hclge_fd_rule_tuples *tuples)
6360
+{
6361
+ struct hclge_fd_rule *rule = NULL;
6362
+ struct hlist_node *node;
6363
+
6364
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6365
+ if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6366
+ return rule;
6367
+ }
6368
+
6369
+ return NULL;
6370
+}
6371
+
6372
+static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6373
+ struct hclge_fd_rule *rule)
6374
+{
6375
+ rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6376
+ BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6377
+ BIT(INNER_SRC_PORT);
6378
+ rule->action = 0;
6379
+ rule->vf_id = 0;
6380
+ rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6381
+ if (tuples->ether_proto == ETH_P_IP) {
6382
+ if (tuples->ip_proto == IPPROTO_TCP)
6383
+ rule->flow_type = TCP_V4_FLOW;
6384
+ else
6385
+ rule->flow_type = UDP_V4_FLOW;
6386
+ } else {
6387
+ if (tuples->ip_proto == IPPROTO_TCP)
6388
+ rule->flow_type = TCP_V6_FLOW;
6389
+ else
6390
+ rule->flow_type = UDP_V6_FLOW;
6391
+ }
6392
+ memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6393
+ memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6394
+}
6395
+
6396
+static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6397
+ u16 flow_id, struct flow_keys *fkeys)
6398
+{
6399
+ struct hclge_vport *vport = hclge_get_vport(handle);
6400
+ struct hclge_fd_rule_tuples new_tuples = {};
6401
+ struct hclge_dev *hdev = vport->back;
6402
+ struct hclge_fd_rule *rule;
6403
+ u16 tmp_queue_id;
6404
+ u16 bit_id;
6405
+ int ret;
6406
+
6407
+ if (!hnae3_dev_fd_supported(hdev))
6408
+ return -EOPNOTSUPP;
6409
+
6410
+ /* when there is already fd rule existed add by user,
6411
+ * arfs should not work
6412
+ */
6413
+ spin_lock_bh(&hdev->fd_rule_lock);
6414
+ if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6415
+ spin_unlock_bh(&hdev->fd_rule_lock);
6416
+ return -EOPNOTSUPP;
6417
+ }
6418
+
6419
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6420
+
6421
+ /* check is there flow director filter existed for this flow,
6422
+ * if not, create a new filter for it;
6423
+ * if filter exist with different queue id, modify the filter;
6424
+ * if filter exist with same queue id, do nothing
6425
+ */
6426
+ rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6427
+ if (!rule) {
6428
+ bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6429
+ if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6430
+ spin_unlock_bh(&hdev->fd_rule_lock);
6431
+ return -ENOSPC;
6432
+ }
6433
+
6434
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6435
+ if (!rule) {
6436
+ spin_unlock_bh(&hdev->fd_rule_lock);
6437
+ return -ENOMEM;
6438
+ }
6439
+
6440
+ set_bit(bit_id, hdev->fd_bmap);
6441
+ rule->location = bit_id;
6442
+ rule->flow_id = flow_id;
6443
+ rule->queue_id = queue_id;
6444
+ hclge_fd_build_arfs_rule(&new_tuples, rule);
6445
+ ret = hclge_fd_config_rule(hdev, rule);
6446
+
6447
+ spin_unlock_bh(&hdev->fd_rule_lock);
6448
+
6449
+ if (ret)
6450
+ return ret;
6451
+
6452
+ return rule->location;
6453
+ }
6454
+
6455
+ spin_unlock_bh(&hdev->fd_rule_lock);
6456
+
6457
+ if (rule->queue_id == queue_id)
6458
+ return rule->location;
6459
+
6460
+ tmp_queue_id = rule->queue_id;
6461
+ rule->queue_id = queue_id;
6462
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6463
+ if (ret) {
6464
+ rule->queue_id = tmp_queue_id;
6465
+ return ret;
6466
+ }
6467
+
6468
+ return rule->location;
6469
+}
6470
+
6471
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6472
+{
6473
+#ifdef CONFIG_RFS_ACCEL
6474
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
6475
+ struct hclge_fd_rule *rule;
6476
+ struct hlist_node *node;
6477
+ HLIST_HEAD(del_list);
6478
+
6479
+ spin_lock_bh(&hdev->fd_rule_lock);
6480
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6481
+ spin_unlock_bh(&hdev->fd_rule_lock);
6482
+ return;
6483
+ }
6484
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6485
+ if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6486
+ rule->flow_id, rule->location)) {
6487
+ hlist_del_init(&rule->rule_node);
6488
+ hlist_add_head(&rule->rule_node, &del_list);
6489
+ hdev->hclge_fd_rule_num--;
6490
+ clear_bit(rule->location, hdev->fd_bmap);
6491
+ }
6492
+ }
6493
+ spin_unlock_bh(&hdev->fd_rule_lock);
6494
+
6495
+ hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6496
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6497
+ rule->location, NULL, false);
6498
+ kfree(rule);
6499
+ }
6500
+#endif
6501
+}
6502
+
6503
+/* make sure being called after lock up with fd_rule_lock */
6504
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6505
+{
6506
+#ifdef CONFIG_RFS_ACCEL
6507
+ struct hclge_vport *vport = hclge_get_vport(handle);
6508
+ struct hclge_dev *hdev = vport->back;
6509
+
6510
+ if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6511
+ hclge_del_all_fd_entries(handle, true);
6512
+#endif
6513
+}
6514
+
6515
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6516
+{
6517
+ struct hclge_vport *vport = hclge_get_vport(handle);
6518
+ struct hclge_dev *hdev = vport->back;
6519
+
6520
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6521
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6522
+}
6523
+
6524
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6525
+{
6526
+ struct hclge_vport *vport = hclge_get_vport(handle);
6527
+ struct hclge_dev *hdev = vport->back;
6528
+
6529
+ return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6530
+}
6531
+
6532
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6533
+{
6534
+ struct hclge_vport *vport = hclge_get_vport(handle);
6535
+ struct hclge_dev *hdev = vport->back;
6536
+
6537
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6538
+}
6539
+
6540
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6541
+{
6542
+ struct hclge_vport *vport = hclge_get_vport(handle);
6543
+ struct hclge_dev *hdev = vport->back;
6544
+
6545
+ return hdev->rst_stats.hw_reset_done_cnt;
6546
+}
6547
+
6548
+static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6549
+{
6550
+ struct hclge_vport *vport = hclge_get_vport(handle);
6551
+ struct hclge_dev *hdev = vport->back;
6552
+ bool clear;
6553
+
6554
+ hdev->fd_en = enable;
6555
+ clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6556
+
6557
+ if (!enable) {
6558
+ spin_lock_bh(&hdev->fd_rule_lock);
6559
+ hclge_del_all_fd_entries(handle, clear);
6560
+ spin_unlock_bh(&hdev->fd_rule_lock);
6561
+ } else {
6562
+ hclge_restore_fd_entries(handle);
6563
+ }
36166564 }
36176565
36186566 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
36196567 {
6568
+#define HCLGE_LINK_STATUS_WAIT_CNT 3
6569
+
36206570 struct hclge_desc desc;
36216571 struct hclge_config_mac_mode_cmd *req =
36226572 (struct hclge_config_mac_mode_cmd *)desc.data;
....@@ -3624,29 +6574,130 @@
36246574 int ret;
36256575
36266576 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3627
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3628
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3629
- hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3630
- hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3631
- hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3632
- hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3633
- hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3634
- hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3635
- hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3636
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3637
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3638
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3639
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3640
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
6577
+
6578
+ if (enable) {
6579
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6580
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6581
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6582
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6583
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6584
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6585
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6586
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6587
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6588
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6589
+ }
6590
+
36416591 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6592
+
6593
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6594
+ if (ret) {
6595
+ dev_err(&hdev->pdev->dev,
6596
+ "mac enable fail, ret =%d.\n", ret);
6597
+ return;
6598
+ }
6599
+
6600
+ if (!enable)
6601
+ hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
6602
+ HCLGE_LINK_STATUS_WAIT_CNT);
6603
+}
6604
+
6605
+static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6606
+ u8 switch_param, u8 param_mask)
6607
+{
6608
+ struct hclge_mac_vlan_switch_cmd *req;
6609
+ struct hclge_desc desc;
6610
+ u32 func_id;
6611
+ int ret;
6612
+
6613
+ func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6614
+ req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6615
+
6616
+ /* read current config parameter */
6617
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6618
+ true);
6619
+ req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6620
+ req->func_id = cpu_to_le32(func_id);
6621
+
6622
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6623
+ if (ret) {
6624
+ dev_err(&hdev->pdev->dev,
6625
+ "read mac vlan switch parameter fail, ret = %d\n", ret);
6626
+ return ret;
6627
+ }
6628
+
6629
+ /* modify and write new config parameter */
6630
+ hclge_cmd_reuse_desc(&desc, false);
6631
+ req->switch_param = (req->switch_param & param_mask) | switch_param;
6632
+ req->param_mask = param_mask;
36426633
36436634 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
36446635 if (ret)
36456636 dev_err(&hdev->pdev->dev,
3646
- "mac enable fail, ret =%d.\n", ret);
6637
+ "set mac vlan switch parameter fail, ret = %d\n", ret);
6638
+ return ret;
36476639 }
36486640
3649
-static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
6641
+static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6642
+ int link_ret)
6643
+{
6644
+#define HCLGE_PHY_LINK_STATUS_NUM 200
6645
+
6646
+ struct phy_device *phydev = hdev->hw.mac.phydev;
6647
+ int i = 0;
6648
+ int ret;
6649
+
6650
+ do {
6651
+ ret = phy_read_status(phydev);
6652
+ if (ret) {
6653
+ dev_err(&hdev->pdev->dev,
6654
+ "phy update link status fail, ret = %d\n", ret);
6655
+ return;
6656
+ }
6657
+
6658
+ if (phydev->link == link_ret)
6659
+ break;
6660
+
6661
+ msleep(HCLGE_LINK_STATUS_MS);
6662
+ } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6663
+}
6664
+
6665
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
6666
+ int wait_cnt)
6667
+{
6668
+ int link_status;
6669
+ int i = 0;
6670
+ int ret;
6671
+
6672
+ do {
6673
+ ret = hclge_get_mac_link_status(hdev, &link_status);
6674
+ if (ret)
6675
+ return ret;
6676
+ if (link_status == link_ret)
6677
+ return 0;
6678
+
6679
+ msleep(HCLGE_LINK_STATUS_MS);
6680
+ } while (++i < wait_cnt);
6681
+ return -EBUSY;
6682
+}
6683
+
6684
+static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6685
+ bool is_phy)
6686
+{
6687
+#define HCLGE_MAC_LINK_STATUS_NUM 100
6688
+
6689
+ int link_ret;
6690
+
6691
+ link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6692
+
6693
+ if (is_phy)
6694
+ hclge_phy_link_status_wait(hdev, link_ret);
6695
+
6696
+ return hclge_mac_link_status_wait(hdev, link_ret,
6697
+ HCLGE_MAC_LINK_STATUS_NUM);
6698
+}
6699
+
6700
+static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
36506701 {
36516702 struct hclge_config_mac_mode_cmd *req;
36526703 struct hclge_desc desc;
....@@ -3666,8 +6717,6 @@
36666717 /* 2 Then setup the loopback flag */
36676718 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
36686719 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
3669
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
3670
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
36716720
36726721 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
36736722
....@@ -3682,22 +6731,38 @@
36826731 return ret;
36836732 }
36846733
3685
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
6734
+static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6735
+ enum hnae3_loop loop_mode)
36866736 {
36876737 #define HCLGE_SERDES_RETRY_MS 10
36886738 #define HCLGE_SERDES_RETRY_NUM 100
6739
+
36896740 struct hclge_serdes_lb_cmd *req;
36906741 struct hclge_desc desc;
36916742 int ret, i = 0;
6743
+ u8 loop_mode_b;
36926744
3693
- req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
6745
+ req = (struct hclge_serdes_lb_cmd *)desc.data;
36946746 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
36956747
6748
+ switch (loop_mode) {
6749
+ case HNAE3_LOOP_SERIAL_SERDES:
6750
+ loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6751
+ break;
6752
+ case HNAE3_LOOP_PARALLEL_SERDES:
6753
+ loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6754
+ break;
6755
+ default:
6756
+ dev_err(&hdev->pdev->dev,
6757
+ "unsupported serdes loopback mode %d\n", loop_mode);
6758
+ return -ENOTSUPP;
6759
+ }
6760
+
36966761 if (en) {
3697
- req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
3698
- req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6762
+ req->enable = loop_mode_b;
6763
+ req->mask = loop_mode_b;
36996764 } else {
3700
- req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6765
+ req->mask = loop_mode_b;
37016766 }
37026767
37036768 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
....@@ -3727,12 +6792,87 @@
37276792 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
37286793 return -EIO;
37296794 }
3730
-
3731
- hclge_cfg_mac_mode(hdev, en);
3732
- return 0;
6795
+ return ret;
37336796 }
37346797
3735
-static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6798
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6799
+ enum hnae3_loop loop_mode)
6800
+{
6801
+ int ret;
6802
+
6803
+ ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6804
+ if (ret)
6805
+ return ret;
6806
+
6807
+ hclge_cfg_mac_mode(hdev, en);
6808
+
6809
+ ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6810
+ if (ret)
6811
+ dev_err(&hdev->pdev->dev,
6812
+ "serdes loopback config mac mode timeout\n");
6813
+
6814
+ return ret;
6815
+}
6816
+
6817
+static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6818
+ struct phy_device *phydev)
6819
+{
6820
+ int ret;
6821
+
6822
+ if (!phydev->suspended) {
6823
+ ret = phy_suspend(phydev);
6824
+ if (ret)
6825
+ return ret;
6826
+ }
6827
+
6828
+ ret = phy_resume(phydev);
6829
+ if (ret)
6830
+ return ret;
6831
+
6832
+ return phy_loopback(phydev, true);
6833
+}
6834
+
6835
+static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6836
+ struct phy_device *phydev)
6837
+{
6838
+ int ret;
6839
+
6840
+ ret = phy_loopback(phydev, false);
6841
+ if (ret)
6842
+ return ret;
6843
+
6844
+ return phy_suspend(phydev);
6845
+}
6846
+
6847
+static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6848
+{
6849
+ struct phy_device *phydev = hdev->hw.mac.phydev;
6850
+ int ret;
6851
+
6852
+ if (!phydev)
6853
+ return -ENOTSUPP;
6854
+
6855
+ if (en)
6856
+ ret = hclge_enable_phy_loopback(hdev, phydev);
6857
+ else
6858
+ ret = hclge_disable_phy_loopback(hdev, phydev);
6859
+ if (ret) {
6860
+ dev_err(&hdev->pdev->dev,
6861
+ "set phy loopback fail, ret = %d\n", ret);
6862
+ return ret;
6863
+ }
6864
+
6865
+ hclge_cfg_mac_mode(hdev, en);
6866
+
6867
+ ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6868
+ if (ret)
6869
+ dev_err(&hdev->pdev->dev,
6870
+ "phy loopback config mac mode timeout\n");
6871
+
6872
+ return ret;
6873
+}
6874
+
6875
+static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
37366876 int stream_id, bool enable)
37376877 {
37386878 struct hclge_desc desc;
....@@ -3743,7 +6883,8 @@
37436883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
37446884 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
37456885 req->stream_id = cpu_to_le16(stream_id);
3746
- req->enable |= enable << HCLGE_TQP_ENABLE_B;
6886
+ if (enable)
6887
+ req->enable |= 1U << HCLGE_TQP_ENABLE_B;
37476888
37486889 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
37496890 if (ret)
....@@ -3756,15 +6897,34 @@
37566897 enum hnae3_loop loop_mode, bool en)
37576898 {
37586899 struct hclge_vport *vport = hclge_get_vport(handle);
6900
+ struct hnae3_knic_private_info *kinfo;
37596901 struct hclge_dev *hdev = vport->back;
37606902 int i, ret;
37616903
6904
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6905
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6906
+ * the same, the packets are looped back in the SSU. If SSU loopback
6907
+ * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6908
+ */
6909
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6910
+ u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6911
+
6912
+ ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6913
+ HCLGE_SWITCH_ALW_LPBK_MASK);
6914
+ if (ret)
6915
+ return ret;
6916
+ }
6917
+
37626918 switch (loop_mode) {
3763
- case HNAE3_MAC_INTER_LOOP_MAC:
3764
- ret = hclge_set_mac_loopback(hdev, en);
6919
+ case HNAE3_LOOP_APP:
6920
+ ret = hclge_set_app_loopback(hdev, en);
37656921 break;
3766
- case HNAE3_MAC_INTER_LOOP_SERDES:
3767
- ret = hclge_set_serdes_loopback(hdev, en);
6922
+ case HNAE3_LOOP_SERIAL_SERDES:
6923
+ case HNAE3_LOOP_PARALLEL_SERDES:
6924
+ ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6925
+ break;
6926
+ case HNAE3_LOOP_PHY:
6927
+ ret = hclge_set_phy_loopback(hdev, en);
37686928 break;
37696929 default:
37706930 ret = -ENOTSUPP;
....@@ -3773,7 +6933,11 @@
37736933 break;
37746934 }
37756935
3776
- for (i = 0; i < vport->alloc_tqps; i++) {
6936
+ if (ret)
6937
+ return ret;
6938
+
6939
+ kinfo = &vport->nic.kinfo;
6940
+ for (i = 0; i < kinfo->num_tqps; i++) {
37776941 ret = hclge_tqp_enable(hdev, i, 0, en);
37786942 if (ret)
37796943 return ret;
....@@ -3782,17 +6946,65 @@
37826946 return 0;
37836947 }
37846948
6949
+static int hclge_set_default_loopback(struct hclge_dev *hdev)
6950
+{
6951
+ int ret;
6952
+
6953
+ ret = hclge_set_app_loopback(hdev, false);
6954
+ if (ret)
6955
+ return ret;
6956
+
6957
+ ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6958
+ if (ret)
6959
+ return ret;
6960
+
6961
+ return hclge_cfg_serdes_loopback(hdev, false,
6962
+ HNAE3_LOOP_PARALLEL_SERDES);
6963
+}
6964
+
37856965 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
37866966 {
37876967 struct hclge_vport *vport = hclge_get_vport(handle);
6968
+ struct hnae3_knic_private_info *kinfo;
37886969 struct hnae3_queue *queue;
37896970 struct hclge_tqp *tqp;
37906971 int i;
37916972
3792
- for (i = 0; i < vport->alloc_tqps; i++) {
6973
+ kinfo = &vport->nic.kinfo;
6974
+ for (i = 0; i < kinfo->num_tqps; i++) {
37936975 queue = handle->kinfo.tqp[i];
37946976 tqp = container_of(queue, struct hclge_tqp, q);
37956977 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6978
+ }
6979
+}
6980
+
6981
+static void hclge_flush_link_update(struct hclge_dev *hdev)
6982
+{
6983
+#define HCLGE_FLUSH_LINK_TIMEOUT 100000
6984
+
6985
+ unsigned long last = hdev->serv_processed_cnt;
6986
+ int i = 0;
6987
+
6988
+ while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6989
+ i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6990
+ last == hdev->serv_processed_cnt)
6991
+ usleep_range(1, 1);
6992
+}
6993
+
6994
+static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6995
+{
6996
+ struct hclge_vport *vport = hclge_get_vport(handle);
6997
+ struct hclge_dev *hdev = vport->back;
6998
+
6999
+ if (enable) {
7000
+ hclge_task_schedule(hdev, 0);
7001
+ } else {
7002
+ /* Set the DOWN flag here to disable link updating */
7003
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
7004
+
7005
+ /* flush memory to make sure DOWN is seen by service task */
7006
+ smp_mb__before_atomic();
7007
+ hclge_flush_link_update(hdev);
37967008 }
37977009 }
37987010
....@@ -3800,15 +7012,10 @@
38007012 {
38017013 struct hclge_vport *vport = hclge_get_vport(handle);
38027014 struct hclge_dev *hdev = vport->back;
3803
- int i;
3804
-
3805
- for (i = 0; i < vport->alloc_tqps; i++)
3806
- hclge_tqp_enable(hdev, i, 0, true);
38077015
38087016 /* mac enable */
38097017 hclge_cfg_mac_mode(hdev, true);
38107018 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3811
- mod_timer(&hdev->service_timer, jiffies + HZ);
38127019 hdev->hw.mac.link = 0;
38137020
38147021 /* reset tqp stats */
....@@ -3826,18 +7033,28 @@
38267033 int i;
38277034
38287035 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7036
+ spin_lock_bh(&hdev->fd_rule_lock);
7037
+ hclge_clear_arfs_rules(handle);
7038
+ spin_unlock_bh(&hdev->fd_rule_lock);
38297039
3830
- del_timer_sync(&hdev->service_timer);
3831
- cancel_work_sync(&hdev->service_task);
3832
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3833
-
7040
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
7041
+ * so it only need to stop phy here.
7042
+ */
38347043 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
3835
- hclge_mac_stop_phy(hdev);
3836
- return;
7044
+ hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
7045
+ HCLGE_PFC_DISABLE);
7046
+ if (hdev->reset_type != HNAE3_FUNC_RESET &&
7047
+ hdev->reset_type != HNAE3_FLR_RESET) {
7048
+ hclge_mac_stop_phy(hdev);
7049
+ hclge_update_link_status(hdev);
7050
+ return;
7051
+ }
38377052 }
38387053
3839
- for (i = 0; i < vport->alloc_tqps; i++)
3840
- hclge_tqp_enable(hdev, i, 0, false);
7054
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
7055
+ hclge_reset_tqp(handle, i);
7056
+
7057
+ hclge_config_mac_tnl_int(hdev, false);
38417058
38427059 /* Mac disable */
38437060 hclge_cfg_mac_mode(hdev, false);
....@@ -3846,9 +7063,47 @@
38467063
38477064 /* reset tqp stats */
38487065 hclge_reset_tqp_stats(handle);
3849
- del_timer_sync(&hdev->service_timer);
3850
- cancel_work_sync(&hdev->service_task);
38517066 hclge_update_link_status(hdev);
7067
+}
7068
+
7069
+int hclge_vport_start(struct hclge_vport *vport)
7070
+{
7071
+ struct hclge_dev *hdev = vport->back;
7072
+
7073
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7074
+ vport->last_active_jiffies = jiffies;
7075
+
7076
+ if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7077
+ if (vport->vport_id) {
7078
+ hclge_restore_mac_table_common(vport);
7079
+ hclge_restore_vport_vlan_table(vport);
7080
+ } else {
7081
+ hclge_restore_hw_table(hdev);
7082
+ }
7083
+ }
7084
+
7085
+ clear_bit(vport->vport_id, hdev->vport_config_block);
7086
+
7087
+ return 0;
7088
+}
7089
+
7090
+void hclge_vport_stop(struct hclge_vport *vport)
7091
+{
7092
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7093
+}
7094
+
7095
+static int hclge_client_start(struct hnae3_handle *handle)
7096
+{
7097
+ struct hclge_vport *vport = hclge_get_vport(handle);
7098
+
7099
+ return hclge_vport_start(vport);
7100
+}
7101
+
7102
+static void hclge_client_stop(struct hnae3_handle *handle)
7103
+{
7104
+ struct hclge_vport *vport = hclge_get_vport(handle);
7105
+
7106
+ hclge_vport_stop(vport);
38527107 }
38537108
38547109 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
....@@ -3856,74 +7111,70 @@
38567111 enum hclge_mac_vlan_tbl_opcode op)
38577112 {
38587113 struct hclge_dev *hdev = vport->back;
3859
- int return_status = -EIO;
38607114
38617115 if (cmdq_resp) {
38627116 dev_err(&hdev->pdev->dev,
3863
- "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
7117
+ "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
38647118 cmdq_resp);
38657119 return -EIO;
38667120 }
38677121
38687122 if (op == HCLGE_MAC_VLAN_ADD) {
3869
- if ((!resp_code) || (resp_code == 1)) {
3870
- return_status = 0;
3871
- } else if (resp_code == 2) {
3872
- return_status = -ENOSPC;
3873
- dev_err(&hdev->pdev->dev,
3874
- "add mac addr failed for uc_overflow.\n");
3875
- } else if (resp_code == 3) {
3876
- return_status = -ENOSPC;
3877
- dev_err(&hdev->pdev->dev,
3878
- "add mac addr failed for mc_overflow.\n");
3879
- } else {
3880
- dev_err(&hdev->pdev->dev,
3881
- "add mac addr failed for undefined, code=%d.\n",
3882
- resp_code);
3883
- }
7123
+ if (!resp_code || resp_code == 1)
7124
+ return 0;
7125
+ else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7126
+ resp_code == HCLGE_ADD_MC_OVERFLOW)
7127
+ return -ENOSPC;
7128
+
7129
+ dev_err(&hdev->pdev->dev,
7130
+ "add mac addr failed for undefined, code=%u.\n",
7131
+ resp_code);
7132
+ return -EIO;
38847133 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
38857134 if (!resp_code) {
3886
- return_status = 0;
7135
+ return 0;
38877136 } else if (resp_code == 1) {
3888
- return_status = -ENOENT;
38897137 dev_dbg(&hdev->pdev->dev,
38907138 "remove mac addr failed for miss.\n");
3891
- } else {
3892
- dev_err(&hdev->pdev->dev,
3893
- "remove mac addr failed for undefined, code=%d.\n",
3894
- resp_code);
7139
+ return -ENOENT;
38957140 }
7141
+
7142
+ dev_err(&hdev->pdev->dev,
7143
+ "remove mac addr failed for undefined, code=%u.\n",
7144
+ resp_code);
7145
+ return -EIO;
38967146 } else if (op == HCLGE_MAC_VLAN_LKUP) {
38977147 if (!resp_code) {
3898
- return_status = 0;
7148
+ return 0;
38997149 } else if (resp_code == 1) {
3900
- return_status = -ENOENT;
39017150 dev_dbg(&hdev->pdev->dev,
39027151 "lookup mac addr failed for miss.\n");
3903
- } else {
3904
- dev_err(&hdev->pdev->dev,
3905
- "lookup mac addr failed for undefined, code=%d.\n",
3906
- resp_code);
7152
+ return -ENOENT;
39077153 }
3908
- } else {
3909
- return_status = -EINVAL;
7154
+
39107155 dev_err(&hdev->pdev->dev,
3911
- "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3912
- op);
7156
+ "lookup mac addr failed for undefined, code=%u.\n",
7157
+ resp_code);
7158
+ return -EIO;
39137159 }
39147160
3915
- return return_status;
7161
+ dev_err(&hdev->pdev->dev,
7162
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7163
+
7164
+ return -EINVAL;
39167165 }
39177166
39187167 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
39197168 {
3920
- int word_num;
3921
- int bit_num;
7169
+#define HCLGE_VF_NUM_IN_FIRST_DESC 192
7170
+
7171
+ unsigned int word_num;
7172
+ unsigned int bit_num;
39227173
39237174 if (vfid > 255 || vfid < 0)
39247175 return -EIO;
39257176
3926
- if (vfid >= 0 && vfid <= 191) {
7177
+ if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
39277178 word_num = vfid / 32;
39287179 bit_num = vfid % 32;
39297180 if (clr)
....@@ -3931,7 +7182,7 @@
39317182 else
39327183 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
39337184 } else {
3934
- word_num = (vfid - 192) / 32;
7185
+ word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
39357186 bit_num = vfid % 32;
39367187 if (clr)
39377188 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
....@@ -3957,183 +7208,21 @@
39577208 }
39587209
39597210 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3960
- const u8 *addr)
7211
+ const u8 *addr, bool is_mc)
39617212 {
39627213 const unsigned char *mac_addr = addr;
39637214 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
39647215 (mac_addr[0]) | (mac_addr[1] << 8);
39657216 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
39667217
7218
+ hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7219
+ if (is_mc) {
7220
+ hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7221
+ hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7222
+ }
7223
+
39677224 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
39687225 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3969
-}
3970
-
3971
-static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3972
- const u8 *addr)
3973
-{
3974
- u16 high_val = addr[1] | (addr[0] << 8);
3975
- struct hclge_dev *hdev = vport->back;
3976
- u32 rsh = 4 - hdev->mta_mac_sel_type;
3977
- u16 ret_val = (high_val >> rsh) & 0xfff;
3978
-
3979
- return ret_val;
3980
-}
3981
-
3982
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3983
- enum hclge_mta_dmac_sel_type mta_mac_sel,
3984
- bool enable)
3985
-{
3986
- struct hclge_mta_filter_mode_cmd *req;
3987
- struct hclge_desc desc;
3988
- int ret;
3989
-
3990
- req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3991
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3992
-
3993
- hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3994
- enable);
3995
- hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3996
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3997
-
3998
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3999
- if (ret)
4000
- dev_err(&hdev->pdev->dev,
4001
- "Config mat filter mode failed for cmd_send, ret =%d.\n",
4002
- ret);
4003
-
4004
- return ret;
4005
-}
4006
-
4007
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
4008
- u8 func_id,
4009
- bool enable)
4010
-{
4011
- struct hclge_cfg_func_mta_filter_cmd *req;
4012
- struct hclge_desc desc;
4013
- int ret;
4014
-
4015
- req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
4016
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
4017
-
4018
- hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
4019
- enable);
4020
- req->function_id = func_id;
4021
-
4022
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4023
- if (ret)
4024
- dev_err(&hdev->pdev->dev,
4025
- "Config func_id enable failed for cmd_send, ret =%d.\n",
4026
- ret);
4027
-
4028
- return ret;
4029
-}
4030
-
4031
-static int hclge_set_mta_table_item(struct hclge_vport *vport,
4032
- u16 idx,
4033
- bool enable)
4034
-{
4035
- struct hclge_dev *hdev = vport->back;
4036
- struct hclge_cfg_func_mta_item_cmd *req;
4037
- struct hclge_desc desc;
4038
- u16 item_idx = 0;
4039
- int ret;
4040
-
4041
- req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
4042
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
4043
- hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
4044
-
4045
- hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
4046
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
4047
- req->item_idx = cpu_to_le16(item_idx);
4048
-
4049
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4050
- if (ret) {
4051
- dev_err(&hdev->pdev->dev,
4052
- "Config mta table item failed for cmd_send, ret =%d.\n",
4053
- ret);
4054
- return ret;
4055
- }
4056
-
4057
- if (enable)
4058
- set_bit(idx, vport->mta_shadow);
4059
- else
4060
- clear_bit(idx, vport->mta_shadow);
4061
-
4062
- return 0;
4063
-}
4064
-
4065
-static int hclge_update_mta_status(struct hnae3_handle *handle)
4066
-{
4067
- unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
4068
- struct hclge_vport *vport = hclge_get_vport(handle);
4069
- struct net_device *netdev = handle->kinfo.netdev;
4070
- struct netdev_hw_addr *ha;
4071
- u16 tbl_idx;
4072
-
4073
- memset(mta_status, 0, sizeof(mta_status));
4074
-
4075
- /* update mta_status from mc addr list */
4076
- netdev_for_each_mc_addr(ha, netdev) {
4077
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
4078
- set_bit(tbl_idx, mta_status);
4079
- }
4080
-
4081
- return hclge_update_mta_status_common(vport, mta_status,
4082
- 0, HCLGE_MTA_TBL_SIZE, true);
4083
-}
4084
-
4085
-int hclge_update_mta_status_common(struct hclge_vport *vport,
4086
- unsigned long *status,
4087
- u16 idx,
4088
- u16 count,
4089
- bool update_filter)
4090
-{
4091
- struct hclge_dev *hdev = vport->back;
4092
- u16 update_max = idx + count;
4093
- u16 check_max;
4094
- int ret = 0;
4095
- bool used;
4096
- u16 i;
4097
-
4098
- /* setup mta check range */
4099
- if (update_filter) {
4100
- i = 0;
4101
- check_max = HCLGE_MTA_TBL_SIZE;
4102
- } else {
4103
- i = idx;
4104
- check_max = update_max;
4105
- }
4106
-
4107
- used = false;
4108
- /* check and update all mta item */
4109
- for (; i < check_max; i++) {
4110
- /* ignore unused item */
4111
- if (!test_bit(i, vport->mta_shadow))
4112
- continue;
4113
-
4114
- /* if i in update range then update it */
4115
- if (i >= idx && i < update_max)
4116
- if (!test_bit(i - idx, status))
4117
- hclge_set_mta_table_item(vport, i, false);
4118
-
4119
- if (!used && test_bit(i, vport->mta_shadow))
4120
- used = true;
4121
- }
4122
-
4123
- /* no longer use mta, disable it */
4124
- if (vport->accept_mta_mc && update_filter && !used) {
4125
- ret = hclge_cfg_func_mta_filter(hdev,
4126
- vport->vport_id,
4127
- false);
4128
- if (ret)
4129
- dev_err(&hdev->pdev->dev,
4130
- "disable func mta filter fail ret=%d\n",
4131
- ret);
4132
- else
4133
- vport->accept_mta_mc = false;
4134
- }
4135
-
4136
- return ret;
41377226 }
41387227
41397228 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
....@@ -4259,12 +7348,203 @@
42597348 return cfg_status;
42607349 }
42617350
7351
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7352
+ u16 *allocated_size)
7353
+{
7354
+ struct hclge_umv_spc_alc_cmd *req;
7355
+ struct hclge_desc desc;
7356
+ int ret;
7357
+
7358
+ req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7359
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7360
+
7361
+ req->space_size = cpu_to_le32(space_size);
7362
+
7363
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7364
+ if (ret) {
7365
+ dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7366
+ ret);
7367
+ return ret;
7368
+ }
7369
+
7370
+ *allocated_size = le32_to_cpu(desc.data[1]);
7371
+
7372
+ return 0;
7373
+}
7374
+
7375
+static int hclge_init_umv_space(struct hclge_dev *hdev)
7376
+{
7377
+ u16 allocated_size = 0;
7378
+ int ret;
7379
+
7380
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7381
+ if (ret)
7382
+ return ret;
7383
+
7384
+ if (allocated_size < hdev->wanted_umv_size)
7385
+ dev_warn(&hdev->pdev->dev,
7386
+ "failed to alloc umv space, want %u, get %u\n",
7387
+ hdev->wanted_umv_size, allocated_size);
7388
+
7389
+ hdev->max_umv_size = allocated_size;
7390
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7391
+ hdev->share_umv_size = hdev->priv_umv_size +
7392
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7393
+
7394
+ return 0;
7395
+}
7396
+
7397
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
7398
+{
7399
+ struct hclge_vport *vport;
7400
+ int i;
7401
+
7402
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
7403
+ vport = &hdev->vport[i];
7404
+ vport->used_umv_num = 0;
7405
+ }
7406
+
7407
+ mutex_lock(&hdev->vport_lock);
7408
+ hdev->share_umv_size = hdev->priv_umv_size +
7409
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7410
+ mutex_unlock(&hdev->vport_lock);
7411
+}
7412
+
7413
+static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7414
+{
7415
+ struct hclge_dev *hdev = vport->back;
7416
+ bool is_full;
7417
+
7418
+ if (need_lock)
7419
+ mutex_lock(&hdev->vport_lock);
7420
+
7421
+ is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7422
+ hdev->share_umv_size == 0);
7423
+
7424
+ if (need_lock)
7425
+ mutex_unlock(&hdev->vport_lock);
7426
+
7427
+ return is_full;
7428
+}
7429
+
7430
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7431
+{
7432
+ struct hclge_dev *hdev = vport->back;
7433
+
7434
+ if (is_free) {
7435
+ if (vport->used_umv_num > hdev->priv_umv_size)
7436
+ hdev->share_umv_size++;
7437
+
7438
+ if (vport->used_umv_num > 0)
7439
+ vport->used_umv_num--;
7440
+ } else {
7441
+ if (vport->used_umv_num >= hdev->priv_umv_size &&
7442
+ hdev->share_umv_size > 0)
7443
+ hdev->share_umv_size--;
7444
+ vport->used_umv_num++;
7445
+ }
7446
+}
7447
+
7448
+static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7449
+ const u8 *mac_addr)
7450
+{
7451
+ struct hclge_mac_node *mac_node, *tmp;
7452
+
7453
+ list_for_each_entry_safe(mac_node, tmp, list, node)
7454
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7455
+ return mac_node;
7456
+
7457
+ return NULL;
7458
+}
7459
+
7460
+static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7461
+ enum HCLGE_MAC_NODE_STATE state)
7462
+{
7463
+ switch (state) {
7464
+ /* from set_rx_mode or tmp_add_list */
7465
+ case HCLGE_MAC_TO_ADD:
7466
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
7467
+ mac_node->state = HCLGE_MAC_ACTIVE;
7468
+ break;
7469
+ /* only from set_rx_mode */
7470
+ case HCLGE_MAC_TO_DEL:
7471
+ if (mac_node->state == HCLGE_MAC_TO_ADD) {
7472
+ list_del(&mac_node->node);
7473
+ kfree(mac_node);
7474
+ } else {
7475
+ mac_node->state = HCLGE_MAC_TO_DEL;
7476
+ }
7477
+ break;
7478
+ /* only from tmp_add_list, the mac_node->state won't be
7479
+ * ACTIVE.
7480
+ */
7481
+ case HCLGE_MAC_ACTIVE:
7482
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
7483
+ mac_node->state = HCLGE_MAC_ACTIVE;
7484
+
7485
+ break;
7486
+ }
7487
+}
7488
+
7489
+int hclge_update_mac_list(struct hclge_vport *vport,
7490
+ enum HCLGE_MAC_NODE_STATE state,
7491
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
7492
+ const unsigned char *addr)
7493
+{
7494
+ struct hclge_dev *hdev = vport->back;
7495
+ struct hclge_mac_node *mac_node;
7496
+ struct list_head *list;
7497
+
7498
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7499
+ &vport->uc_mac_list : &vport->mc_mac_list;
7500
+
7501
+ spin_lock_bh(&vport->mac_list_lock);
7502
+
7503
+ /* if the mac addr is already in the mac list, no need to add a new
7504
+ * one into it, just check the mac addr state, convert it to a new
7505
+ * new state, or just remove it, or do nothing.
7506
+ */
7507
+ mac_node = hclge_find_mac_node(list, addr);
7508
+ if (mac_node) {
7509
+ hclge_update_mac_node(mac_node, state);
7510
+ spin_unlock_bh(&vport->mac_list_lock);
7511
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7512
+ return 0;
7513
+ }
7514
+
7515
+ /* if this address is never added, unnecessary to delete */
7516
+ if (state == HCLGE_MAC_TO_DEL) {
7517
+ spin_unlock_bh(&vport->mac_list_lock);
7518
+ dev_err(&hdev->pdev->dev,
7519
+ "failed to delete address %pM from mac list\n",
7520
+ addr);
7521
+ return -ENOENT;
7522
+ }
7523
+
7524
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7525
+ if (!mac_node) {
7526
+ spin_unlock_bh(&vport->mac_list_lock);
7527
+ return -ENOMEM;
7528
+ }
7529
+
7530
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7531
+
7532
+ mac_node->state = state;
7533
+ ether_addr_copy(mac_node->mac_addr, addr);
7534
+ list_add_tail(&mac_node->node, list);
7535
+
7536
+ spin_unlock_bh(&vport->mac_list_lock);
7537
+
7538
+ return 0;
7539
+}
7540
+
42627541 static int hclge_add_uc_addr(struct hnae3_handle *handle,
42637542 const unsigned char *addr)
42647543 {
42657544 struct hclge_vport *vport = hclge_get_vport(handle);
42667545
4267
- return hclge_add_uc_addr_common(vport, addr);
7546
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7547
+ addr);
42687548 }
42697549
42707550 int hclge_add_uc_addr_common(struct hclge_vport *vport,
....@@ -4282,41 +7562,47 @@
42827562 is_multicast_ether_addr(addr)) {
42837563 dev_err(&hdev->pdev->dev,
42847564 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4285
- addr,
4286
- is_zero_ether_addr(addr),
7565
+ addr, is_zero_ether_addr(addr),
42877566 is_broadcast_ether_addr(addr),
42887567 is_multicast_ether_addr(addr));
42897568 return -EINVAL;
42907569 }
42917570
42927571 memset(&req, 0, sizeof(req));
4293
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
42947572
42957573 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
42967574 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
42977575
42987576 req.egress_port = cpu_to_le16(egress_port);
42997577
4300
- hclge_prepare_mac_addr(&req, addr);
7578
+ hclge_prepare_mac_addr(&req, addr, false);
43017579
43027580 /* Lookup the mac address in the mac_vlan table, and add
43037581 * it if the entry is inexistent. Repeated unicast entry
43047582 * is not allowed in the mac vlan table.
43057583 */
43067584 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
4307
- if (ret == -ENOENT)
4308
- return hclge_add_mac_vlan_tbl(vport, &req, NULL);
7585
+ if (ret == -ENOENT) {
7586
+ mutex_lock(&hdev->vport_lock);
7587
+ if (!hclge_is_umv_space_full(vport, false)) {
7588
+ ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7589
+ if (!ret)
7590
+ hclge_update_umv_space(vport, false);
7591
+ mutex_unlock(&hdev->vport_lock);
7592
+ return ret;
7593
+ }
7594
+ mutex_unlock(&hdev->vport_lock);
43097595
4310
- /* check if we just hit the duplicate */
4311
- if (!ret) {
4312
- dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
4313
- vport->vport_id, addr);
4314
- return 0;
7596
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7597
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7598
+ hdev->priv_umv_size);
7599
+
7600
+ return -ENOSPC;
43157601 }
43167602
4317
- dev_err(&hdev->pdev->dev,
4318
- "PF failed to add unicast entry(%pM) in the MAC table\n",
4319
- addr);
7603
+ /* check if we just hit the duplicate */
7604
+ if (!ret)
7605
+ return -EEXIST;
43207606
43217607 return ret;
43227608 }
....@@ -4326,7 +7612,8 @@
43267612 {
43277613 struct hclge_vport *vport = hclge_get_vport(handle);
43287614
4329
- return hclge_rm_uc_addr_common(vport, addr);
7615
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7616
+ addr);
43307617 }
43317618
43327619 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
....@@ -4340,17 +7627,21 @@
43407627 if (is_zero_ether_addr(addr) ||
43417628 is_broadcast_ether_addr(addr) ||
43427629 is_multicast_ether_addr(addr)) {
4343
- dev_dbg(&hdev->pdev->dev,
4344
- "Remove mac err! invalid mac:%pM.\n",
4345
- addr);
7630
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7631
+ addr);
43467632 return -EINVAL;
43477633 }
43487634
43497635 memset(&req, 0, sizeof(req));
4350
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
43517636 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4352
- hclge_prepare_mac_addr(&req, addr);
7637
+ hclge_prepare_mac_addr(&req, addr, false);
43537638 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7639
+ if (!ret || ret == -ENOENT) {
7640
+ mutex_lock(&hdev->vport_lock);
7641
+ hclge_update_umv_space(vport, true);
7642
+ mutex_unlock(&hdev->vport_lock);
7643
+ return 0;
7644
+ }
43547645
43557646 return ret;
43567647 }
....@@ -4360,7 +7651,8 @@
43607651 {
43617652 struct hclge_vport *vport = hclge_get_vport(handle);
43627653
4363
- return hclge_add_mc_addr_common(vport, addr);
7654
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7655
+ addr);
43647656 }
43657657
43667658 int hclge_add_mc_addr_common(struct hclge_vport *vport,
....@@ -4369,7 +7661,6 @@
43697661 struct hclge_dev *hdev = vport->back;
43707662 struct hclge_mac_vlan_tbl_entry_cmd req;
43717663 struct hclge_desc desc[3];
4372
- u16 tbl_idx;
43737664 int status;
43747665
43757666 /* mac addr check */
....@@ -4380,44 +7671,23 @@
43807671 return -EINVAL;
43817672 }
43827673 memset(&req, 0, sizeof(req));
4383
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4384
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4385
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4386
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4387
- hclge_prepare_mac_addr(&req, addr);
7674
+ hclge_prepare_mac_addr(&req, addr, true);
43887675 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4389
- if (!status) {
4390
- /* This mac addr exist, update VFID for it */
4391
- hclge_update_desc_vfid(desc, vport->vport_id, false);
4392
- status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4393
- } else {
7676
+ if (status) {
43947677 /* This mac addr do not exist, add new entry for it */
43957678 memset(desc[0].data, 0, sizeof(desc[0].data));
43967679 memset(desc[1].data, 0, sizeof(desc[0].data));
43977680 memset(desc[2].data, 0, sizeof(desc[0].data));
4398
- hclge_update_desc_vfid(desc, vport->vport_id, false);
4399
- status = hclge_add_mac_vlan_tbl(vport, &req, desc);
44007681 }
7682
+ status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7683
+ if (status)
7684
+ return status;
7685
+ status = hclge_add_mac_vlan_tbl(vport, &req, desc);
44017686
4402
- /* If mc mac vlan table is full, use MTA table */
4403
- if (status == -ENOSPC) {
4404
- if (!vport->accept_mta_mc) {
4405
- status = hclge_cfg_func_mta_filter(hdev,
4406
- vport->vport_id,
4407
- true);
4408
- if (status) {
4409
- dev_err(&hdev->pdev->dev,
4410
- "set mta filter mode fail ret=%d\n",
4411
- status);
4412
- return status;
4413
- }
4414
- vport->accept_mta_mc = true;
4415
- }
4416
-
4417
- /* Set MTA table for this MAC address */
4418
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4419
- status = hclge_set_mta_table_item(vport, tbl_idx, true);
4420
- }
7687
+ /* if already overflow, not to print each time */
7688
+ if (status == -ENOSPC &&
7689
+ !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7690
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
44217691
44227692 return status;
44237693 }
....@@ -4427,7 +7697,8 @@
44277697 {
44287698 struct hclge_vport *vport = hclge_get_vport(handle);
44297699
4430
- return hclge_rm_mc_addr_common(vport, addr);
7700
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7701
+ addr);
44317702 }
44327703
44337704 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
....@@ -4447,15 +7718,13 @@
44477718 }
44487719
44497720 memset(&req, 0, sizeof(req));
4450
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4451
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4452
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4453
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4454
- hclge_prepare_mac_addr(&req, addr);
7721
+ hclge_prepare_mac_addr(&req, addr, true);
44557722 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
44567723 if (!status) {
44577724 /* This mac addr exist, remove this handle's VFID for it */
4458
- hclge_update_desc_vfid(desc, vport->vport_id, true);
7725
+ status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7726
+ if (status)
7727
+ return status;
44597728
44607729 if (hclge_is_all_function_id_zero(desc))
44617730 /* All the vfid is zero, so need to delete this entry */
....@@ -4464,17 +7733,361 @@
44647733 /* Not all the vfid is zero, update the vfid */
44657734 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
44667735
4467
- } else {
4468
- /* Maybe this mac address is in mta table, but it cannot be
4469
- * deleted here because an entry of mta represents an address
4470
- * range rather than a specific address. the delete action to
4471
- * all entries will take effect in update_mta_status called by
4472
- * hns3_nic_set_rx_mode.
4473
- */
7736
+ } else if (status == -ENOENT) {
44747737 status = 0;
44757738 }
44767739
44777740 return status;
7741
+}
7742
+
7743
+static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7744
+ struct list_head *list,
7745
+ int (*sync)(struct hclge_vport *,
7746
+ const unsigned char *))
7747
+{
7748
+ struct hclge_mac_node *mac_node, *tmp;
7749
+ int ret;
7750
+
7751
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
7752
+ ret = sync(vport, mac_node->mac_addr);
7753
+ if (!ret) {
7754
+ mac_node->state = HCLGE_MAC_ACTIVE;
7755
+ } else {
7756
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7757
+ &vport->state);
7758
+
7759
+ /* If one unicast mac address is existing in hardware,
7760
+ * we need to try whether other unicast mac addresses
7761
+ * are new addresses that can be added.
7762
+ */
7763
+ if (ret != -EEXIST)
7764
+ break;
7765
+ }
7766
+ }
7767
+}
7768
+
7769
+static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7770
+ struct list_head *list,
7771
+ int (*unsync)(struct hclge_vport *,
7772
+ const unsigned char *))
7773
+{
7774
+ struct hclge_mac_node *mac_node, *tmp;
7775
+ int ret;
7776
+
7777
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
7778
+ ret = unsync(vport, mac_node->mac_addr);
7779
+ if (!ret || ret == -ENOENT) {
7780
+ list_del(&mac_node->node);
7781
+ kfree(mac_node);
7782
+ } else {
7783
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7784
+ &vport->state);
7785
+ break;
7786
+ }
7787
+ }
7788
+}
7789
+
7790
+static bool hclge_sync_from_add_list(struct list_head *add_list,
7791
+ struct list_head *mac_list)
7792
+{
7793
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
7794
+ bool all_added = true;
7795
+
7796
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7797
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
7798
+ all_added = false;
7799
+
7800
+ /* if the mac address from tmp_add_list is not in the
7801
+ * uc/mc_mac_list, it means have received a TO_DEL request
7802
+ * during the time window of adding the mac address into mac
7803
+ * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7804
+ * then it will be removed at next time. else it must be TO_ADD,
7805
+ * this address hasn't been added into mac table,
7806
+ * so just remove the mac node.
7807
+ */
7808
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7809
+ if (new_node) {
7810
+ hclge_update_mac_node(new_node, mac_node->state);
7811
+ list_del(&mac_node->node);
7812
+ kfree(mac_node);
7813
+ } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7814
+ mac_node->state = HCLGE_MAC_TO_DEL;
7815
+ list_del(&mac_node->node);
7816
+ list_add_tail(&mac_node->node, mac_list);
7817
+ } else {
7818
+ list_del(&mac_node->node);
7819
+ kfree(mac_node);
7820
+ }
7821
+ }
7822
+
7823
+ return all_added;
7824
+}
7825
+
7826
+static void hclge_sync_from_del_list(struct list_head *del_list,
7827
+ struct list_head *mac_list)
7828
+{
7829
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
7830
+
7831
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7832
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7833
+ if (new_node) {
7834
+ /* If the mac addr exists in the mac list, it means
7835
+ * received a new TO_ADD request during the time window
7836
+ * of configuring the mac address. For the mac node
7837
+ * state is TO_ADD, and the address is already in the
7838
+ * in the hardware(due to delete fail), so we just need
7839
+ * to change the mac node state to ACTIVE.
7840
+ */
7841
+ new_node->state = HCLGE_MAC_ACTIVE;
7842
+ list_del(&mac_node->node);
7843
+ kfree(mac_node);
7844
+ } else {
7845
+ list_del(&mac_node->node);
7846
+ list_add_tail(&mac_node->node, mac_list);
7847
+ }
7848
+ }
7849
+}
7850
+
7851
+static void hclge_update_overflow_flags(struct hclge_vport *vport,
7852
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
7853
+ bool is_all_added)
7854
+{
7855
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
7856
+ if (is_all_added)
7857
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7858
+ else if (hclge_is_umv_space_full(vport, true))
7859
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7860
+ } else {
7861
+ if (is_all_added)
7862
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7863
+ else
7864
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7865
+ }
7866
+}
7867
+
7868
+static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7869
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
7870
+{
7871
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
7872
+ struct list_head tmp_add_list, tmp_del_list;
7873
+ struct list_head *list;
7874
+ bool all_added;
7875
+
7876
+ INIT_LIST_HEAD(&tmp_add_list);
7877
+ INIT_LIST_HEAD(&tmp_del_list);
7878
+
7879
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
7880
+ * we can add/delete these mac addr outside the spin lock
7881
+ */
7882
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7883
+ &vport->uc_mac_list : &vport->mc_mac_list;
7884
+
7885
+ spin_lock_bh(&vport->mac_list_lock);
7886
+
7887
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
7888
+ switch (mac_node->state) {
7889
+ case HCLGE_MAC_TO_DEL:
7890
+ list_del(&mac_node->node);
7891
+ list_add_tail(&mac_node->node, &tmp_del_list);
7892
+ break;
7893
+ case HCLGE_MAC_TO_ADD:
7894
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7895
+ if (!new_node)
7896
+ goto stop_traverse;
7897
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7898
+ new_node->state = mac_node->state;
7899
+ list_add_tail(&new_node->node, &tmp_add_list);
7900
+ break;
7901
+ default:
7902
+ break;
7903
+ }
7904
+ }
7905
+
7906
+stop_traverse:
7907
+ spin_unlock_bh(&vport->mac_list_lock);
7908
+
7909
+ /* delete first, in order to get max mac table space for adding */
7910
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
7911
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7912
+ hclge_rm_uc_addr_common);
7913
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
7914
+ hclge_add_uc_addr_common);
7915
+ } else {
7916
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7917
+ hclge_rm_mc_addr_common);
7918
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
7919
+ hclge_add_mc_addr_common);
7920
+ }
7921
+
7922
+ /* if some mac addresses were added/deleted fail, move back to the
7923
+ * mac_list, and retry at next time.
7924
+ */
7925
+ spin_lock_bh(&vport->mac_list_lock);
7926
+
7927
+ hclge_sync_from_del_list(&tmp_del_list, list);
7928
+ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7929
+
7930
+ spin_unlock_bh(&vport->mac_list_lock);
7931
+
7932
+ hclge_update_overflow_flags(vport, mac_type, all_added);
7933
+}
7934
+
7935
+static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7936
+{
7937
+ struct hclge_dev *hdev = vport->back;
7938
+
7939
+ if (test_bit(vport->vport_id, hdev->vport_config_block))
7940
+ return false;
7941
+
7942
+ if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7943
+ return true;
7944
+
7945
+ return false;
7946
+}
7947
+
7948
+static void hclge_sync_mac_table(struct hclge_dev *hdev)
7949
+{
7950
+ int i;
7951
+
7952
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
7953
+ struct hclge_vport *vport = &hdev->vport[i];
7954
+
7955
+ if (!hclge_need_sync_mac_table(vport))
7956
+ continue;
7957
+
7958
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7959
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7960
+ }
7961
+}
7962
+
7963
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7964
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
7965
+{
7966
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7967
+ struct hclge_mac_node *mac_cfg, *tmp;
7968
+ struct hclge_dev *hdev = vport->back;
7969
+ struct list_head tmp_del_list, *list;
7970
+ int ret;
7971
+
7972
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
7973
+ list = &vport->uc_mac_list;
7974
+ unsync = hclge_rm_uc_addr_common;
7975
+ } else {
7976
+ list = &vport->mc_mac_list;
7977
+ unsync = hclge_rm_mc_addr_common;
7978
+ }
7979
+
7980
+ INIT_LIST_HEAD(&tmp_del_list);
7981
+
7982
+ if (!is_del_list)
7983
+ set_bit(vport->vport_id, hdev->vport_config_block);
7984
+
7985
+ spin_lock_bh(&vport->mac_list_lock);
7986
+
7987
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7988
+ switch (mac_cfg->state) {
7989
+ case HCLGE_MAC_TO_DEL:
7990
+ case HCLGE_MAC_ACTIVE:
7991
+ list_del(&mac_cfg->node);
7992
+ list_add_tail(&mac_cfg->node, &tmp_del_list);
7993
+ break;
7994
+ case HCLGE_MAC_TO_ADD:
7995
+ if (is_del_list) {
7996
+ list_del(&mac_cfg->node);
7997
+ kfree(mac_cfg);
7998
+ }
7999
+ break;
8000
+ }
8001
+ }
8002
+
8003
+ spin_unlock_bh(&vport->mac_list_lock);
8004
+
8005
+ list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
8006
+ ret = unsync(vport, mac_cfg->mac_addr);
8007
+ if (!ret || ret == -ENOENT) {
8008
+ /* clear all mac addr from hardware, but remain these
8009
+ * mac addr in the mac list, and restore them after
8010
+ * vf reset finished.
8011
+ */
8012
+ if (!is_del_list &&
8013
+ mac_cfg->state == HCLGE_MAC_ACTIVE) {
8014
+ mac_cfg->state = HCLGE_MAC_TO_ADD;
8015
+ } else {
8016
+ list_del(&mac_cfg->node);
8017
+ kfree(mac_cfg);
8018
+ }
8019
+ } else if (is_del_list) {
8020
+ mac_cfg->state = HCLGE_MAC_TO_DEL;
8021
+ }
8022
+ }
8023
+
8024
+ spin_lock_bh(&vport->mac_list_lock);
8025
+
8026
+ hclge_sync_from_del_list(&tmp_del_list, list);
8027
+
8028
+ spin_unlock_bh(&vport->mac_list_lock);
8029
+}
8030
+
8031
+/* remove all mac address when uninitailize */
8032
+static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8033
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
8034
+{
8035
+ struct hclge_mac_node *mac_node, *tmp;
8036
+ struct hclge_dev *hdev = vport->back;
8037
+ struct list_head tmp_del_list, *list;
8038
+
8039
+ INIT_LIST_HEAD(&tmp_del_list);
8040
+
8041
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8042
+ &vport->uc_mac_list : &vport->mc_mac_list;
8043
+
8044
+ spin_lock_bh(&vport->mac_list_lock);
8045
+
8046
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
8047
+ switch (mac_node->state) {
8048
+ case HCLGE_MAC_TO_DEL:
8049
+ case HCLGE_MAC_ACTIVE:
8050
+ list_del(&mac_node->node);
8051
+ list_add_tail(&mac_node->node, &tmp_del_list);
8052
+ break;
8053
+ case HCLGE_MAC_TO_ADD:
8054
+ list_del(&mac_node->node);
8055
+ kfree(mac_node);
8056
+ break;
8057
+ }
8058
+ }
8059
+
8060
+ spin_unlock_bh(&vport->mac_list_lock);
8061
+
8062
+ if (mac_type == HCLGE_MAC_ADDR_UC)
8063
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8064
+ hclge_rm_uc_addr_common);
8065
+ else
8066
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8067
+ hclge_rm_mc_addr_common);
8068
+
8069
+ if (!list_empty(&tmp_del_list))
8070
+ dev_warn(&hdev->pdev->dev,
8071
+ "uninit %s mac list for vport %u not completely.\n",
8072
+ mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8073
+ vport->vport_id);
8074
+
8075
+ list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8076
+ list_del(&mac_node->node);
8077
+ kfree(mac_node);
8078
+ }
8079
+}
8080
+
8081
+static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8082
+{
8083
+ struct hclge_vport *vport;
8084
+ int i;
8085
+
8086
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
8087
+ vport = &hdev->vport[i];
8088
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8089
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8090
+ }
44788091 }
44798092
44808093 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
....@@ -4489,7 +8102,7 @@
44898102
44908103 if (cmdq_resp) {
44918104 dev_err(&hdev->pdev->dev,
4492
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
8105
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
44938106 cmdq_resp);
44948107 return -EIO;
44958108 }
....@@ -4511,12 +8124,79 @@
45118124 break;
45128125 default:
45138126 dev_err(&hdev->pdev->dev,
4514
- "add mac ethertype failed for undefined, code=%d.\n",
8127
+ "add mac ethertype failed for undefined, code=%u.\n",
45158128 resp_code);
45168129 return_status = -EIO;
45178130 }
45188131
45198132 return return_status;
8133
+}
8134
+
8135
+static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8136
+ u8 *mac_addr)
8137
+{
8138
+ struct hclge_mac_vlan_tbl_entry_cmd req;
8139
+ struct hclge_dev *hdev = vport->back;
8140
+ struct hclge_desc desc;
8141
+ u16 egress_port = 0;
8142
+ int i;
8143
+
8144
+ if (is_zero_ether_addr(mac_addr))
8145
+ return false;
8146
+
8147
+ memset(&req, 0, sizeof(req));
8148
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8149
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8150
+ req.egress_port = cpu_to_le16(egress_port);
8151
+ hclge_prepare_mac_addr(&req, mac_addr, false);
8152
+
8153
+ if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8154
+ return true;
8155
+
8156
+ vf_idx += HCLGE_VF_VPORT_START_NUM;
8157
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8158
+ if (i != vf_idx &&
8159
+ ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8160
+ return true;
8161
+
8162
+ return false;
8163
+}
8164
+
8165
+static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8166
+ u8 *mac_addr)
8167
+{
8168
+ struct hclge_vport *vport = hclge_get_vport(handle);
8169
+ struct hclge_dev *hdev = vport->back;
8170
+
8171
+ vport = hclge_get_vf_vport(hdev, vf);
8172
+ if (!vport)
8173
+ return -EINVAL;
8174
+
8175
+ if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8176
+ dev_info(&hdev->pdev->dev,
8177
+ "Specified MAC(=%pM) is same as before, no change committed!\n",
8178
+ mac_addr);
8179
+ return 0;
8180
+ }
8181
+
8182
+ if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8183
+ dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8184
+ mac_addr);
8185
+ return -EEXIST;
8186
+ }
8187
+
8188
+ ether_addr_copy(vport->vf_info.mac, mac_addr);
8189
+
8190
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8191
+ dev_info(&hdev->pdev->dev,
8192
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8193
+ vf, mac_addr);
8194
+ return hclge_inform_reset_assert_to_vf(vport);
8195
+ }
8196
+
8197
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8198
+ vf, mac_addr);
8199
+ return 0;
45208200 }
45218201
45228202 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
....@@ -4570,12 +8250,57 @@
45708250 ether_addr_copy(p, hdev->hw.mac.mac_addr);
45718251 }
45728252
8253
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8254
+ const u8 *old_addr, const u8 *new_addr)
8255
+{
8256
+ struct list_head *list = &vport->uc_mac_list;
8257
+ struct hclge_mac_node *old_node, *new_node;
8258
+
8259
+ new_node = hclge_find_mac_node(list, new_addr);
8260
+ if (!new_node) {
8261
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8262
+ if (!new_node)
8263
+ return -ENOMEM;
8264
+
8265
+ new_node->state = HCLGE_MAC_TO_ADD;
8266
+ ether_addr_copy(new_node->mac_addr, new_addr);
8267
+ list_add(&new_node->node, list);
8268
+ } else {
8269
+ if (new_node->state == HCLGE_MAC_TO_DEL)
8270
+ new_node->state = HCLGE_MAC_ACTIVE;
8271
+
8272
+ /* make sure the new addr is in the list head, avoid dev
8273
+ * addr may be not re-added into mac table for the umv space
8274
+ * limitation after global/imp reset which will clear mac
8275
+ * table by hardware.
8276
+ */
8277
+ list_move(&new_node->node, list);
8278
+ }
8279
+
8280
+ if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8281
+ old_node = hclge_find_mac_node(list, old_addr);
8282
+ if (old_node) {
8283
+ if (old_node->state == HCLGE_MAC_TO_ADD) {
8284
+ list_del(&old_node->node);
8285
+ kfree(old_node);
8286
+ } else {
8287
+ old_node->state = HCLGE_MAC_TO_DEL;
8288
+ }
8289
+ }
8290
+ }
8291
+
8292
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8293
+
8294
+ return 0;
8295
+}
8296
+
45738297 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
45748298 bool is_first)
45758299 {
45768300 const unsigned char *new_addr = (const unsigned char *)p;
45778301 struct hclge_vport *vport = hclge_get_vport(handle);
45788302 struct hclge_dev *hdev = vport->back;
8303
+ unsigned char *old_addr = NULL;
45798304 int ret;
45808305
45818306 /* mac addr check */
....@@ -4583,58 +8308,86 @@
45838308 is_broadcast_ether_addr(new_addr) ||
45848309 is_multicast_ether_addr(new_addr)) {
45858310 dev_err(&hdev->pdev->dev,
4586
- "Change uc mac err! invalid mac:%p.\n",
8311
+ "change uc mac err! invalid mac: %pM.\n",
45878312 new_addr);
45888313 return -EINVAL;
4589
- }
4590
-
4591
- if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
4592
- dev_warn(&hdev->pdev->dev,
4593
- "remove old uc mac address fail.\n");
4594
-
4595
- ret = hclge_add_uc_addr(handle, new_addr);
4596
- if (ret) {
4597
- dev_err(&hdev->pdev->dev,
4598
- "add uc mac address fail, ret =%d.\n",
4599
- ret);
4600
-
4601
- if (!is_first &&
4602
- hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
4603
- dev_err(&hdev->pdev->dev,
4604
- "restore uc mac address fail.\n");
4605
-
4606
- return -EIO;
46078314 }
46088315
46098316 ret = hclge_pause_addr_cfg(hdev, new_addr);
46108317 if (ret) {
46118318 dev_err(&hdev->pdev->dev,
4612
- "configure mac pause address fail, ret =%d.\n",
8319
+ "failed to configure mac pause address, ret = %d\n",
46138320 ret);
4614
- return -EIO;
8321
+ return ret;
46158322 }
46168323
8324
+ if (!is_first)
8325
+ old_addr = hdev->hw.mac.mac_addr;
8326
+
8327
+ spin_lock_bh(&vport->mac_list_lock);
8328
+ ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8329
+ if (ret) {
8330
+ dev_err(&hdev->pdev->dev,
8331
+ "failed to change the mac addr:%pM, ret = %d\n",
8332
+ new_addr, ret);
8333
+ spin_unlock_bh(&vport->mac_list_lock);
8334
+
8335
+ if (!is_first)
8336
+ hclge_pause_addr_cfg(hdev, old_addr);
8337
+
8338
+ return ret;
8339
+ }
8340
+ /* we must update dev addr with spin lock protect, preventing dev addr
8341
+ * being removed by set_rx_mode path.
8342
+ */
46178343 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8344
+ spin_unlock_bh(&vport->mac_list_lock);
8345
+
8346
+ hclge_task_schedule(hdev, 0);
46188347
46198348 return 0;
46208349 }
46218350
8351
+static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8352
+ int cmd)
8353
+{
8354
+ struct hclge_vport *vport = hclge_get_vport(handle);
8355
+ struct hclge_dev *hdev = vport->back;
8356
+
8357
+ if (!hdev->hw.mac.phydev)
8358
+ return -EOPNOTSUPP;
8359
+
8360
+ return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8361
+}
8362
+
46228363 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4623
- bool filter_en)
8364
+ u8 fe_type, bool filter_en, u8 vf_id)
46248365 {
46258366 struct hclge_vlan_filter_ctrl_cmd *req;
46268367 struct hclge_desc desc;
46278368 int ret;
46288369
4629
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4630
-
8370
+ /* read current vlan filter parameter */
8371
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
46318372 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46328373 req->vlan_type = vlan_type;
4633
- req->vlan_fe = filter_en;
8374
+ req->vf_id = vf_id;
8375
+
8376
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8377
+ if (ret) {
8378
+ dev_err(&hdev->pdev->dev,
8379
+ "failed to get vlan filter config, ret = %d.\n", ret);
8380
+ return ret;
8381
+ }
8382
+
8383
+ /* modify and write new config parameter */
8384
+ hclge_cmd_reuse_desc(&desc, false);
8385
+ req->vlan_fe = filter_en ?
8386
+ (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
46348387
46358388 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
46368389 if (ret)
4637
- dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
8390
+ dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
46388391 ret);
46398392
46408393 return ret;
....@@ -4642,26 +8395,62 @@
46428395
46438396 #define HCLGE_FILTER_TYPE_VF 0
46448397 #define HCLGE_FILTER_TYPE_PORT 1
8398
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8399
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8400
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8401
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8402
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8403
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8404
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8405
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8406
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
46458407
46468408 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
46478409 {
46488410 struct hclge_vport *vport = hclge_get_vport(handle);
46498411 struct hclge_dev *hdev = vport->back;
46508412
4651
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
8413
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8414
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8415
+ HCLGE_FILTER_FE_EGRESS, enable, 0);
8416
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8417
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
8418
+ } else {
8419
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8420
+ HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8421
+ 0);
8422
+ }
8423
+ if (enable)
8424
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
8425
+ else
8426
+ handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
46528427 }
46538428
4654
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4655
- bool is_kill, u16 vlan, u8 qos,
8429
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8430
+ bool is_kill, u16 vlan,
46568431 __be16 proto)
46578432 {
4658
-#define HCLGE_MAX_VF_BYTES 16
8433
+ struct hclge_vport *vport = &hdev->vport[vfid];
46598434 struct hclge_vlan_filter_vf_cfg_cmd *req0;
46608435 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46618436 struct hclge_desc desc[2];
46628437 u8 vf_byte_val;
46638438 u8 vf_byte_off;
46648439 int ret;
8440
+
8441
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
8442
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
8443
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
8444
+ * new vlan, because tx packets with these vlan id will be dropped.
8445
+ */
8446
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8447
+ if (vport->vf_info.spoofchk && vlan) {
8448
+ dev_err(&hdev->pdev->dev,
8449
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8450
+ return -EPERM;
8451
+ }
8452
+ return 0;
8453
+ }
46658454
46668455 hclge_cmd_setup_basic_desc(&desc[0],
46678456 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
....@@ -4698,28 +8487,30 @@
46988487 return 0;
46998488
47008489 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8490
+ set_bit(vfid, hdev->vf_vlan_full);
47018491 dev_warn(&hdev->pdev->dev,
47028492 "vf vlan table is full, vf vlan filter is disabled\n");
47038493 return 0;
47048494 }
47058495
47068496 dev_err(&hdev->pdev->dev,
4707
- "Add vf vlan filter fail, ret =%d.\n",
8497
+ "Add vf vlan filter fail, ret =%u.\n",
47088498 req0->resp_code);
47098499 } else {
47108500 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
47118501 if (!req0->resp_code)
47128502 return 0;
47138503
4714
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
4715
- dev_warn(&hdev->pdev->dev,
4716
- "vlan %d filter is not in vf vlan table\n",
4717
- vlan);
8504
+ /* vf vlan filter is disabled when vf vlan table is full,
8505
+ * then new vlan id will not be added into vf vlan table.
8506
+ * Just return 0 without warning, avoid massive verbose
8507
+ * print logs when unload.
8508
+ */
8509
+ if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
47188510 return 0;
4719
- }
47208511
47218512 dev_err(&hdev->pdev->dev,
4722
- "Kill vf vlan filter fail, ret =%d.\n",
8513
+ "Kill vf vlan filter fail, ret =%u.\n",
47238514 req0->resp_code);
47248515 }
47258516
....@@ -4738,9 +8529,10 @@
47388529
47398530 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
47408531
4741
- vlan_offset_160 = vlan_id / 160;
4742
- vlan_offset_byte = (vlan_id % 160) / 8;
4743
- vlan_offset_byte_val = 1 << (vlan_id % 8);
8532
+ vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8533
+ vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8534
+ HCLGE_VLAN_BYTE_SIZE;
8535
+ vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
47448536
47458537 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
47468538 req->vlan_offset = vlan_offset_160;
....@@ -4755,7 +8547,7 @@
47558547 }
47568548
47578549 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
4758
- u16 vport_id, u16 vlan_id, u8 qos,
8550
+ u16 vport_id, u16 vlan_id,
47598551 bool is_kill)
47608552 {
47618553 u16 vport_idx, vport_num = 0;
....@@ -4765,10 +8557,10 @@
47658557 return 0;
47668558
47678559 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
4768
- 0, proto);
8560
+ proto);
47698561 if (ret) {
47708562 dev_err(&hdev->pdev->dev,
4771
- "Set %d vport vlan filter config fail, ret =%d.\n",
8563
+ "Set %u vport vlan filter config fail, ret =%d.\n",
47728564 vport_id, ret);
47738565 return ret;
47748566 }
....@@ -4780,7 +8572,7 @@
47808572
47818573 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
47828574 dev_err(&hdev->pdev->dev,
4783
- "Add port vlan failed, vport %d is already in vlan %d\n",
8575
+ "Add port vlan failed, vport %u is already in vlan %u\n",
47848576 vport_id, vlan_id);
47858577 return -EINVAL;
47868578 }
....@@ -4788,7 +8580,7 @@
47888580 if (is_kill &&
47898581 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
47908582 dev_err(&hdev->pdev->dev,
4791
- "Delete port vlan failed, vport %d is not in vlan %d\n",
8583
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
47928584 vport_id, vlan_id);
47938585 return -EINVAL;
47948586 }
....@@ -4801,30 +8593,6 @@
48018593 is_kill);
48028594
48038595 return ret;
4804
-}
4805
-
4806
-int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
4807
- u16 vlan_id, bool is_kill)
4808
-{
4809
- struct hclge_vport *vport = hclge_get_vport(handle);
4810
- struct hclge_dev *hdev = vport->back;
4811
-
4812
- return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
4813
- 0, is_kill);
4814
-}
4815
-
4816
-static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4817
- u16 vlan, u8 qos, __be16 proto)
4818
-{
4819
- struct hclge_vport *vport = hclge_get_vport(handle);
4820
- struct hclge_dev *hdev = vport->back;
4821
-
4822
- if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4823
- return -EINVAL;
4824
- if (proto != htons(ETH_P_8021Q))
4825
- return -EPROTONOSUPPORT;
4826
-
4827
- return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
48288596 }
48298597
48308598 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
....@@ -4906,6 +8674,52 @@
49068674 return status;
49078675 }
49088676
8677
+static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8678
+ u16 port_base_vlan_state,
8679
+ u16 vlan_tag)
8680
+{
8681
+ int ret;
8682
+
8683
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8684
+ vport->txvlan_cfg.accept_tag1 = true;
8685
+ vport->txvlan_cfg.insert_tag1_en = false;
8686
+ vport->txvlan_cfg.default_tag1 = 0;
8687
+ } else {
8688
+ vport->txvlan_cfg.accept_tag1 = false;
8689
+ vport->txvlan_cfg.insert_tag1_en = true;
8690
+ vport->txvlan_cfg.default_tag1 = vlan_tag;
8691
+ }
8692
+
8693
+ vport->txvlan_cfg.accept_untag1 = true;
8694
+
8695
+ /* accept_tag2 and accept_untag2 are not supported on
8696
+ * pdev revision(0x20), new revision support them,
8697
+ * this two fields can not be configured by user.
8698
+ */
8699
+ vport->txvlan_cfg.accept_tag2 = true;
8700
+ vport->txvlan_cfg.accept_untag2 = true;
8701
+ vport->txvlan_cfg.insert_tag2_en = false;
8702
+ vport->txvlan_cfg.default_tag2 = 0;
8703
+
8704
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8705
+ vport->rxvlan_cfg.strip_tag1_en = false;
8706
+ vport->rxvlan_cfg.strip_tag2_en =
8707
+ vport->rxvlan_cfg.rx_vlan_offload_en;
8708
+ } else {
8709
+ vport->rxvlan_cfg.strip_tag1_en =
8710
+ vport->rxvlan_cfg.rx_vlan_offload_en;
8711
+ vport->rxvlan_cfg.strip_tag2_en = true;
8712
+ }
8713
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8714
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8715
+
8716
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
8717
+ if (ret)
8718
+ return ret;
8719
+
8720
+ return hclge_set_vlan_rx_offload_cfg(vport);
8721
+}
8722
+
49098723 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
49108724 {
49118725 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
....@@ -4934,7 +8748,7 @@
49348748
49358749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
49368750
4937
- tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
8751
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
49388752 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
49398753 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
49408754
....@@ -4951,18 +8765,38 @@
49518765 {
49528766 #define HCLGE_DEF_VLAN_TYPE 0x8100
49538767
4954
- struct hnae3_handle *handle;
8768
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
49558769 struct hclge_vport *vport;
49568770 int ret;
49578771 int i;
49588772
4959
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
4960
- if (ret)
4961
- return ret;
8773
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8774
+ /* for revision 0x21, vf vlan filter is per function */
8775
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
8776
+ vport = &hdev->vport[i];
8777
+ ret = hclge_set_vlan_filter_ctrl(hdev,
8778
+ HCLGE_FILTER_TYPE_VF,
8779
+ HCLGE_FILTER_FE_EGRESS,
8780
+ true,
8781
+ vport->vport_id);
8782
+ if (ret)
8783
+ return ret;
8784
+ }
49628785
4963
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
4964
- if (ret)
4965
- return ret;
8786
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8787
+ HCLGE_FILTER_FE_INGRESS, true,
8788
+ 0);
8789
+ if (ret)
8790
+ return ret;
8791
+ } else {
8792
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8793
+ HCLGE_FILTER_FE_EGRESS_V1_B,
8794
+ true, 0);
8795
+ if (ret)
8796
+ return ret;
8797
+ }
8798
+
8799
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
49668800
49678801 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
49688802 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
....@@ -4976,102 +8810,532 @@
49768810 return ret;
49778811
49788812 for (i = 0; i < hdev->num_alloc_vport; i++) {
8813
+ u16 vlan_tag;
8814
+
49798815 vport = &hdev->vport[i];
4980
- vport->txvlan_cfg.accept_tag1 = true;
4981
- vport->txvlan_cfg.accept_untag1 = true;
8816
+ vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
49828817
4983
- /* accept_tag2 and accept_untag2 are not supported on
4984
- * pdev revision(0x20), new revision support them. The
4985
- * value of this two fields will not return error when driver
4986
- * send command to fireware in revision(0x20).
4987
- * This two fields can not configured by user.
4988
- */
4989
- vport->txvlan_cfg.accept_tag2 = true;
4990
- vport->txvlan_cfg.accept_untag2 = true;
4991
-
4992
- vport->txvlan_cfg.insert_tag1_en = false;
4993
- vport->txvlan_cfg.insert_tag2_en = false;
4994
- vport->txvlan_cfg.default_tag1 = 0;
4995
- vport->txvlan_cfg.default_tag2 = 0;
4996
-
4997
- ret = hclge_set_vlan_tx_offload_cfg(vport);
4998
- if (ret)
4999
- return ret;
5000
-
5001
- vport->rxvlan_cfg.strip_tag1_en = false;
5002
- vport->rxvlan_cfg.strip_tag2_en = true;
5003
- vport->rxvlan_cfg.vlan1_vlan_prionly = false;
5004
- vport->rxvlan_cfg.vlan2_vlan_prionly = false;
5005
-
5006
- ret = hclge_set_vlan_rx_offload_cfg(vport);
8818
+ ret = hclge_vlan_offload_cfg(vport,
8819
+ vport->port_base_vlan_cfg.state,
8820
+ vlan_tag);
50078821 if (ret)
50088822 return ret;
50098823 }
50108824
5011
- handle = &hdev->vport[0].nic;
50128825 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8826
+}
8827
+
8828
+static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8829
+ bool writen_to_tbl)
8830
+{
8831
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8832
+
8833
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
8834
+ if (vlan->vlan_id == vlan_id)
8835
+ return;
8836
+
8837
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8838
+ if (!vlan)
8839
+ return;
8840
+
8841
+ vlan->hd_tbl_status = writen_to_tbl;
8842
+ vlan->vlan_id = vlan_id;
8843
+
8844
+ list_add_tail(&vlan->node, &vport->vlan_list);
8845
+}
8846
+
8847
+static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8848
+{
8849
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8850
+ struct hclge_dev *hdev = vport->back;
8851
+ int ret;
8852
+
8853
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8854
+ if (!vlan->hd_tbl_status) {
8855
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8856
+ vport->vport_id,
8857
+ vlan->vlan_id, false);
8858
+ if (ret) {
8859
+ dev_err(&hdev->pdev->dev,
8860
+ "restore vport vlan list failed, ret=%d\n",
8861
+ ret);
8862
+ return ret;
8863
+ }
8864
+ }
8865
+ vlan->hd_tbl_status = true;
8866
+ }
8867
+
8868
+ return 0;
8869
+}
8870
+
8871
+static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8872
+ bool is_write_tbl)
8873
+{
8874
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8875
+ struct hclge_dev *hdev = vport->back;
8876
+
8877
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8878
+ if (vlan->vlan_id == vlan_id) {
8879
+ if (is_write_tbl && vlan->hd_tbl_status)
8880
+ hclge_set_vlan_filter_hw(hdev,
8881
+ htons(ETH_P_8021Q),
8882
+ vport->vport_id,
8883
+ vlan_id,
8884
+ true);
8885
+
8886
+ list_del(&vlan->node);
8887
+ kfree(vlan);
8888
+ break;
8889
+ }
8890
+ }
8891
+}
8892
+
8893
+void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8894
+{
8895
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8896
+ struct hclge_dev *hdev = vport->back;
8897
+
8898
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8899
+ if (vlan->hd_tbl_status)
8900
+ hclge_set_vlan_filter_hw(hdev,
8901
+ htons(ETH_P_8021Q),
8902
+ vport->vport_id,
8903
+ vlan->vlan_id,
8904
+ true);
8905
+
8906
+ vlan->hd_tbl_status = false;
8907
+ if (is_del_list) {
8908
+ list_del(&vlan->node);
8909
+ kfree(vlan);
8910
+ }
8911
+ }
8912
+ clear_bit(vport->vport_id, hdev->vf_vlan_full);
8913
+}
8914
+
8915
+void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8916
+{
8917
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8918
+ struct hclge_vport *vport;
8919
+ int i;
8920
+
8921
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
8922
+ vport = &hdev->vport[i];
8923
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8924
+ list_del(&vlan->node);
8925
+ kfree(vlan);
8926
+ }
8927
+ }
8928
+}
8929
+
8930
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8931
+{
8932
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8933
+ struct hclge_dev *hdev = vport->back;
8934
+ u16 vlan_proto;
8935
+ u16 vlan_id;
8936
+ u16 state;
8937
+ int ret;
8938
+
8939
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8940
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8941
+ state = vport->port_base_vlan_cfg.state;
8942
+
8943
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8944
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8945
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8946
+ vport->vport_id, vlan_id,
8947
+ false);
8948
+ return;
8949
+ }
8950
+
8951
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8952
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8953
+ vport->vport_id,
8954
+ vlan->vlan_id, false);
8955
+ if (ret)
8956
+ break;
8957
+ vlan->hd_tbl_status = true;
8958
+ }
8959
+}
8960
+
8961
+/* For global reset and imp reset, hardware will clear the mac table,
8962
+ * so we change the mac address state from ACTIVE to TO_ADD, then they
8963
+ * can be restored in the service task after reset complete. Furtherly,
8964
+ * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8965
+ * be restored after reset, so just remove these mac nodes from mac_list.
8966
+ */
8967
+static void hclge_mac_node_convert_for_reset(struct list_head *list)
8968
+{
8969
+ struct hclge_mac_node *mac_node, *tmp;
8970
+
8971
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
8972
+ if (mac_node->state == HCLGE_MAC_ACTIVE) {
8973
+ mac_node->state = HCLGE_MAC_TO_ADD;
8974
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8975
+ list_del(&mac_node->node);
8976
+ kfree(mac_node);
8977
+ }
8978
+ }
8979
+}
8980
+
8981
+void hclge_restore_mac_table_common(struct hclge_vport *vport)
8982
+{
8983
+ spin_lock_bh(&vport->mac_list_lock);
8984
+
8985
+ hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8986
+ hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8987
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8988
+
8989
+ spin_unlock_bh(&vport->mac_list_lock);
8990
+}
8991
+
8992
+static void hclge_restore_hw_table(struct hclge_dev *hdev)
8993
+{
8994
+ struct hclge_vport *vport = &hdev->vport[0];
8995
+ struct hnae3_handle *handle = &vport->nic;
8996
+
8997
+ hclge_restore_mac_table_common(vport);
8998
+ hclge_restore_vport_vlan_table(vport);
8999
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9000
+
9001
+ hclge_restore_fd_entries(handle);
50139002 }
50149003
50159004 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
50169005 {
50179006 struct hclge_vport *vport = hclge_get_vport(handle);
50189007
5019
- vport->rxvlan_cfg.strip_tag1_en = false;
5020
- vport->rxvlan_cfg.strip_tag2_en = enable;
9008
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9009
+ vport->rxvlan_cfg.strip_tag1_en = false;
9010
+ vport->rxvlan_cfg.strip_tag2_en = enable;
9011
+ } else {
9012
+ vport->rxvlan_cfg.strip_tag1_en = enable;
9013
+ vport->rxvlan_cfg.strip_tag2_en = true;
9014
+ }
50219015 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
50229016 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9017
+ vport->rxvlan_cfg.rx_vlan_offload_en = enable;
50239018
50249019 return hclge_set_vlan_rx_offload_cfg(vport);
50259020 }
50269021
5027
-static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
9022
+static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9023
+ u16 port_base_vlan_state,
9024
+ struct hclge_vlan_info *new_info,
9025
+ struct hclge_vlan_info *old_info)
9026
+{
9027
+ struct hclge_dev *hdev = vport->back;
9028
+ int ret;
9029
+
9030
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9031
+ hclge_rm_vport_all_vlan_table(vport, false);
9032
+ return hclge_set_vlan_filter_hw(hdev,
9033
+ htons(new_info->vlan_proto),
9034
+ vport->vport_id,
9035
+ new_info->vlan_tag,
9036
+ false);
9037
+ }
9038
+
9039
+ ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9040
+ vport->vport_id, old_info->vlan_tag,
9041
+ true);
9042
+ if (ret)
9043
+ return ret;
9044
+
9045
+ return hclge_add_vport_all_vlan_table(vport);
9046
+}
9047
+
9048
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9049
+ struct hclge_vlan_info *vlan_info)
9050
+{
9051
+ struct hnae3_handle *nic = &vport->nic;
9052
+ struct hclge_vlan_info *old_vlan_info;
9053
+ struct hclge_dev *hdev = vport->back;
9054
+ int ret;
9055
+
9056
+ old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9057
+
9058
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9059
+ if (ret)
9060
+ return ret;
9061
+
9062
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9063
+ /* add new VLAN tag */
9064
+ ret = hclge_set_vlan_filter_hw(hdev,
9065
+ htons(vlan_info->vlan_proto),
9066
+ vport->vport_id,
9067
+ vlan_info->vlan_tag,
9068
+ false);
9069
+ if (ret)
9070
+ return ret;
9071
+
9072
+ /* remove old VLAN tag */
9073
+ ret = hclge_set_vlan_filter_hw(hdev,
9074
+ htons(old_vlan_info->vlan_proto),
9075
+ vport->vport_id,
9076
+ old_vlan_info->vlan_tag,
9077
+ true);
9078
+ if (ret)
9079
+ return ret;
9080
+
9081
+ goto update;
9082
+ }
9083
+
9084
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9085
+ old_vlan_info);
9086
+ if (ret)
9087
+ return ret;
9088
+
9089
+ /* update state only when disable/enable port based VLAN */
9090
+ vport->port_base_vlan_cfg.state = state;
9091
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9092
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9093
+ else
9094
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9095
+
9096
+update:
9097
+ vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9098
+ vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9099
+ vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9100
+
9101
+ return 0;
9102
+}
9103
+
9104
+static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9105
+ enum hnae3_port_base_vlan_state state,
9106
+ u16 vlan)
9107
+{
9108
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9109
+ if (!vlan)
9110
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9111
+ else
9112
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
9113
+ } else {
9114
+ if (!vlan)
9115
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
9116
+ else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9117
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9118
+ else
9119
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
9120
+ }
9121
+}
9122
+
9123
+static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9124
+ u16 vlan, u8 qos, __be16 proto)
9125
+{
9126
+ struct hclge_vport *vport = hclge_get_vport(handle);
9127
+ struct hclge_dev *hdev = vport->back;
9128
+ struct hclge_vlan_info vlan_info;
9129
+ u16 state;
9130
+ int ret;
9131
+
9132
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9133
+ return -EOPNOTSUPP;
9134
+
9135
+ vport = hclge_get_vf_vport(hdev, vfid);
9136
+ if (!vport)
9137
+ return -EINVAL;
9138
+
9139
+ /* qos is a 3 bits value, so can not be bigger than 7 */
9140
+ if (vlan > VLAN_N_VID - 1 || qos > 7)
9141
+ return -EINVAL;
9142
+ if (proto != htons(ETH_P_8021Q))
9143
+ return -EPROTONOSUPPORT;
9144
+
9145
+ state = hclge_get_port_base_vlan_state(vport,
9146
+ vport->port_base_vlan_cfg.state,
9147
+ vlan);
9148
+ if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9149
+ return 0;
9150
+
9151
+ vlan_info.vlan_tag = vlan;
9152
+ vlan_info.qos = qos;
9153
+ vlan_info.vlan_proto = ntohs(proto);
9154
+
9155
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9156
+ return hclge_update_port_base_vlan_cfg(vport, state,
9157
+ &vlan_info);
9158
+ } else {
9159
+ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9160
+ vport->vport_id, state,
9161
+ vlan, qos,
9162
+ ntohs(proto));
9163
+ return ret;
9164
+ }
9165
+}
9166
+
9167
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9168
+{
9169
+ struct hclge_vlan_info *vlan_info;
9170
+ struct hclge_vport *vport;
9171
+ int ret;
9172
+ int vf;
9173
+
9174
+ /* clear port base vlan for all vf */
9175
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9176
+ vport = &hdev->vport[vf];
9177
+ vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9178
+
9179
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9180
+ vport->vport_id,
9181
+ vlan_info->vlan_tag, true);
9182
+ if (ret)
9183
+ dev_err(&hdev->pdev->dev,
9184
+ "failed to clear vf vlan for vf%d, ret = %d\n",
9185
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
9186
+ }
9187
+}
9188
+
9189
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9190
+ u16 vlan_id, bool is_kill)
9191
+{
9192
+ struct hclge_vport *vport = hclge_get_vport(handle);
9193
+ struct hclge_dev *hdev = vport->back;
9194
+ bool writen_to_tbl = false;
9195
+ int ret = 0;
9196
+
9197
+ /* When device is resetting or reset failed, firmware is unable to
9198
+ * handle mailbox. Just record the vlan id, and remove it after
9199
+ * reset finished.
9200
+ */
9201
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9202
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9203
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
9204
+ return -EBUSY;
9205
+ }
9206
+
9207
+ /* when port base vlan enabled, we use port base vlan as the vlan
9208
+ * filter entry. In this case, we don't update vlan filter table
9209
+ * when user add new vlan or remove exist vlan, just update the vport
9210
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
9211
+ * table until port base vlan disabled
9212
+ */
9213
+ if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9214
+ ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9215
+ vlan_id, is_kill);
9216
+ writen_to_tbl = true;
9217
+ }
9218
+
9219
+ if (!ret) {
9220
+ if (!is_kill)
9221
+ hclge_add_vport_vlan_table(vport, vlan_id,
9222
+ writen_to_tbl);
9223
+ else if (is_kill && vlan_id != 0)
9224
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
9225
+ } else if (is_kill) {
9226
+ /* when remove hw vlan filter failed, record the vlan id,
9227
+ * and try to remove it from hw later, to be consistence
9228
+ * with stack
9229
+ */
9230
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
9231
+ }
9232
+ return ret;
9233
+}
9234
+
9235
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9236
+{
9237
+#define HCLGE_MAX_SYNC_COUNT 60
9238
+
9239
+ int i, ret, sync_cnt = 0;
9240
+ u16 vlan_id;
9241
+
9242
+ /* start from vport 1 for PF is always alive */
9243
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
9244
+ struct hclge_vport *vport = &hdev->vport[i];
9245
+
9246
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9247
+ VLAN_N_VID);
9248
+ while (vlan_id != VLAN_N_VID) {
9249
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9250
+ vport->vport_id, vlan_id,
9251
+ true);
9252
+ if (ret && ret != -EINVAL)
9253
+ return;
9254
+
9255
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9256
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
9257
+
9258
+ sync_cnt++;
9259
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9260
+ return;
9261
+
9262
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9263
+ VLAN_N_VID);
9264
+ }
9265
+ }
9266
+}
9267
+
9268
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
50289269 {
50299270 struct hclge_config_max_frm_size_cmd *req;
50309271 struct hclge_desc desc;
5031
- int max_frm_size;
5032
- int ret;
5033
-
5034
- max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5035
-
5036
- if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
5037
- max_frm_size > HCLGE_MAC_MAX_FRAME)
5038
- return -EINVAL;
5039
-
5040
- max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
50419272
50429273 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
50439274
50449275 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
5045
- req->max_frm_size = cpu_to_le16(max_frm_size);
9276
+ req->max_frm_size = cpu_to_le16(new_mps);
50469277 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
50479278
5048
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5049
- if (ret)
5050
- dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
5051
- else
5052
- hdev->mps = max_frm_size;
5053
-
5054
- return ret;
9279
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
50559280 }
50569281
50579282 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
50589283 {
50599284 struct hclge_vport *vport = hclge_get_vport(handle);
5060
- struct hclge_dev *hdev = vport->back;
5061
- int ret;
50629285
5063
- ret = hclge_set_mac_mtu(hdev, new_mtu);
9286
+ return hclge_set_vport_mtu(vport, new_mtu);
9287
+}
9288
+
9289
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9290
+{
9291
+ struct hclge_dev *hdev = vport->back;
9292
+ int i, max_frm_size, ret;
9293
+
9294
+ /* HW supprt 2 layer vlan */
9295
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9296
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9297
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
9298
+ return -EINVAL;
9299
+
9300
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9301
+ mutex_lock(&hdev->vport_lock);
9302
+ /* VF's mps must fit within hdev->mps */
9303
+ if (vport->vport_id && max_frm_size > hdev->mps) {
9304
+ mutex_unlock(&hdev->vport_lock);
9305
+ return -EINVAL;
9306
+ } else if (vport->vport_id) {
9307
+ vport->mps = max_frm_size;
9308
+ mutex_unlock(&hdev->vport_lock);
9309
+ return 0;
9310
+ }
9311
+
9312
+ /* PF's mps must be greater then VF's mps */
9313
+ for (i = 1; i < hdev->num_alloc_vport; i++)
9314
+ if (max_frm_size < hdev->vport[i].mps) {
9315
+ mutex_unlock(&hdev->vport_lock);
9316
+ return -EINVAL;
9317
+ }
9318
+
9319
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9320
+
9321
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
50649322 if (ret) {
50659323 dev_err(&hdev->pdev->dev,
50669324 "Change mtu fail, ret =%d\n", ret);
5067
- return ret;
9325
+ goto out;
50689326 }
9327
+
9328
+ hdev->mps = max_frm_size;
9329
+ vport->mps = max_frm_size;
50699330
50709331 ret = hclge_buffer_alloc(hdev);
50719332 if (ret)
50729333 dev_err(&hdev->pdev->dev,
50739334 "Allocate buffer fail, ret =%d\n", ret);
50749335
9336
+out:
9337
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9338
+ mutex_unlock(&hdev->vport_lock);
50759339 return ret;
50769340 }
50779341
....@@ -5086,7 +9350,8 @@
50869350
50879351 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
50889352 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5089
- hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
9353
+ if (enable)
9354
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
50909355
50919356 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
50929357 if (ret) {
....@@ -5119,8 +9384,7 @@
51199384 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
51209385 }
51219386
5122
-static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
5123
- u16 queue_id)
9387
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
51249388 {
51259389 struct hnae3_queue *queue;
51269390 struct hclge_tqp *tqp;
....@@ -5131,7 +9395,7 @@
51319395 return tqp->index;
51329396 }
51339397
5134
-void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9398
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
51359399 {
51369400 struct hclge_vport *vport = hclge_get_vport(handle);
51379401 struct hclge_dev *hdev = vport->back;
....@@ -5140,44 +9404,41 @@
51409404 u16 queue_gid;
51419405 int ret;
51429406
5143
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5144
- return;
5145
-
51469407 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
51479408
51489409 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
51499410 if (ret) {
5150
- dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
5151
- return;
9411
+ dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9412
+ return ret;
51529413 }
51539414
51549415 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
51559416 if (ret) {
5156
- dev_warn(&hdev->pdev->dev,
5157
- "Send reset tqp cmd fail, ret = %d\n", ret);
5158
- return;
9417
+ dev_err(&hdev->pdev->dev,
9418
+ "Send reset tqp cmd fail, ret = %d\n", ret);
9419
+ return ret;
51599420 }
51609421
5161
- reset_try_times = 0;
51629422 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5163
- /* Wait for tqp hw reset */
5164
- msleep(20);
51659423 reset_status = hclge_get_reset_status(hdev, queue_gid);
51669424 if (reset_status)
51679425 break;
9426
+
9427
+ /* Wait for tqp hw reset */
9428
+ usleep_range(1000, 1200);
51689429 }
51699430
51709431 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5171
- dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5172
- return;
9432
+ dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9433
+ return ret;
51739434 }
51749435
51759436 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5176
- if (ret) {
5177
- dev_warn(&hdev->pdev->dev,
5178
- "Deassert the soft reset fail, ret = %d\n", ret);
5179
- return;
5180
- }
9437
+ if (ret)
9438
+ dev_err(&hdev->pdev->dev,
9439
+ "Deassert the soft reset fail, ret = %d\n", ret);
9440
+
9441
+ return ret;
51819442 }
51829443
51839444 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
....@@ -5204,13 +9465,13 @@
52049465 return;
52059466 }
52069467
5207
- reset_try_times = 0;
52089468 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5209
- /* Wait for tqp hw reset */
5210
- msleep(20);
52119469 reset_status = hclge_get_reset_status(hdev, queue_gid);
52129470 if (reset_status)
52139471 break;
9472
+
9473
+ /* Wait for tqp hw reset */
9474
+ usleep_range(1000, 1200);
52149475 }
52159476
52169477 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
....@@ -5232,20 +9493,6 @@
52329493 return hdev->fw_version;
52339494 }
52349495
5235
-static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
5236
- u32 *flowctrl_adv)
5237
-{
5238
- struct hclge_vport *vport = hclge_get_vport(handle);
5239
- struct hclge_dev *hdev = vport->back;
5240
- struct phy_device *phydev = hdev->hw.mac.phydev;
5241
-
5242
- if (!phydev)
5243
- return;
5244
-
5245
- *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
5246
- (phydev->advertising & ADVERTISED_Asym_Pause);
5247
-}
5248
-
52499496 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
52509497 {
52519498 struct phy_device *phydev = hdev->hw.mac.phydev;
....@@ -5253,59 +9500,36 @@
52539500 if (!phydev)
52549501 return;
52559502
5256
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
5257
-
5258
- if (rx_en)
5259
- phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
5260
-
5261
- if (tx_en)
5262
- phydev->advertising ^= ADVERTISED_Asym_Pause;
9503
+ phy_set_asym_pause(phydev, rx_en, tx_en);
52639504 }
52649505
52659506 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
52669507 {
52679508 int ret;
52689509
5269
- if (rx_en && tx_en)
5270
- hdev->fc_mode_last_time = HCLGE_FC_FULL;
5271
- else if (rx_en && !tx_en)
5272
- hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
5273
- else if (!rx_en && tx_en)
5274
- hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
5275
- else
5276
- hdev->fc_mode_last_time = HCLGE_FC_NONE;
5277
-
52789510 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
52799511 return 0;
52809512
52819513 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
5282
- if (ret) {
5283
- dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
5284
- ret);
5285
- return ret;
5286
- }
9514
+ if (ret)
9515
+ dev_err(&hdev->pdev->dev,
9516
+ "configure pauseparam error, ret = %d.\n", ret);
52879517
5288
- hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
5289
-
5290
- return 0;
9518
+ return ret;
52919519 }
52929520
52939521 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
52949522 {
52959523 struct phy_device *phydev = hdev->hw.mac.phydev;
52969524 u16 remote_advertising = 0;
5297
- u16 local_advertising = 0;
9525
+ u16 local_advertising;
52989526 u32 rx_pause, tx_pause;
52999527 u8 flowctl;
53009528
53019529 if (!phydev->link || !phydev->autoneg)
53029530 return 0;
53039531
5304
- if (phydev->advertising & ADVERTISED_Pause)
5305
- local_advertising = ADVERTISE_PAUSE_CAP;
5306
-
5307
- if (phydev->advertising & ADVERTISED_Asym_Pause)
5308
- local_advertising |= ADVERTISE_PAUSE_ASYM;
9532
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
53099533
53109534 if (phydev->pause)
53119535 remote_advertising = LPA_PAUSE_CAP;
....@@ -5331,8 +9555,9 @@
53319555 {
53329556 struct hclge_vport *vport = hclge_get_vport(handle);
53339557 struct hclge_dev *hdev = vport->back;
9558
+ struct phy_device *phydev = hdev->hw.mac.phydev;
53349559
5335
- *auto_neg = hclge_get_autoneg(handle);
9560
+ *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
53369561
53379562 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
53389563 *rx_en = 0;
....@@ -5355,6 +9580,21 @@
53559580 }
53569581 }
53579582
9583
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9584
+ u32 rx_en, u32 tx_en)
9585
+{
9586
+ if (rx_en && tx_en)
9587
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
9588
+ else if (rx_en && !tx_en)
9589
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9590
+ else if (!rx_en && tx_en)
9591
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9592
+ else
9593
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
9594
+
9595
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9596
+}
9597
+
53589598 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
53599599 u32 rx_en, u32 tx_en)
53609600 {
....@@ -5363,11 +9603,13 @@
53639603 struct phy_device *phydev = hdev->hw.mac.phydev;
53649604 u32 fc_autoneg;
53659605
5366
- fc_autoneg = hclge_get_autoneg(handle);
5367
- if (auto_neg != fc_autoneg) {
5368
- dev_info(&hdev->pdev->dev,
5369
- "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
5370
- return -EOPNOTSUPP;
9606
+ if (phydev) {
9607
+ fc_autoneg = hclge_get_autoneg(handle);
9608
+ if (auto_neg != fc_autoneg) {
9609
+ dev_info(&hdev->pdev->dev,
9610
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9611
+ return -EOPNOTSUPP;
9612
+ }
53719613 }
53729614
53739615 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
....@@ -5378,16 +9620,15 @@
53789620
53799621 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
53809622
5381
- if (!fc_autoneg)
9623
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9624
+
9625
+ if (!auto_neg)
53829626 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
53839627
5384
- /* Only support flow control negotiation for netdev with
5385
- * phy attached for now.
5386
- */
5387
- if (!phydev)
5388
- return -EOPNOTSUPP;
9628
+ if (phydev)
9629
+ return phy_start_aneg(phydev);
53899630
5390
- return phy_start_aneg(phydev);
9631
+ return -EOPNOTSUPP;
53919632 }
53929633
53939634 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
....@@ -5404,13 +9645,23 @@
54049645 *auto_neg = hdev->hw.mac.autoneg;
54059646 }
54069647
5407
-static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
9648
+static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9649
+ u8 *module_type)
54089650 {
54099651 struct hclge_vport *vport = hclge_get_vport(handle);
54109652 struct hclge_dev *hdev = vport->back;
54119653
9654
+ /* When nic is down, the service task is not running, doesn't update
9655
+ * the port information per second. Query the port information before
9656
+ * return the media type, ensure getting the correct media information.
9657
+ */
9658
+ hclge_update_port_info(hdev);
9659
+
54129660 if (media_type)
54139661 *media_type = hdev->hw.mac.media_type;
9662
+
9663
+ if (module_type)
9664
+ *module_type = hdev->hw.mac.module_type;
54149665 }
54159666
54169667 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
....@@ -5419,7 +9670,8 @@
54199670 struct hclge_vport *vport = hclge_get_vport(handle);
54209671 struct hclge_dev *hdev = vport->back;
54219672 struct phy_device *phydev = hdev->hw.mac.phydev;
5422
- int mdix_ctrl, mdix, retval, is_resolved;
9673
+ int mdix_ctrl, mdix, is_resolved;
9674
+ unsigned int retval;
54239675
54249676 if (!phydev) {
54259677 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
....@@ -5462,14 +9714,125 @@
54629714 *tp_mdix = ETH_TP_MDI;
54639715 }
54649716
5465
-static int hclge_init_instance_hw(struct hclge_dev *hdev)
9717
+static void hclge_info_show(struct hclge_dev *hdev)
54669718 {
5467
- return hclge_mac_connect_phy(hdev);
9719
+ struct device *dev = &hdev->pdev->dev;
9720
+
9721
+ dev_info(dev, "PF info begin:\n");
9722
+
9723
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9724
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9725
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9726
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9727
+ dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9728
+ dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9729
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9730
+ dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9731
+ dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9732
+ dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9733
+ dev_info(dev, "This is %s PF\n",
9734
+ hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9735
+ dev_info(dev, "DCB %s\n",
9736
+ hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9737
+ dev_info(dev, "MQPRIO %s\n",
9738
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9739
+
9740
+ dev_info(dev, "PF info end.\n");
54689741 }
54699742
5470
-static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
9743
+static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9744
+ struct hclge_vport *vport)
54719745 {
5472
- hclge_mac_disconnect_phy(hdev);
9746
+ struct hnae3_client *client = vport->nic.client;
9747
+ struct hclge_dev *hdev = ae_dev->priv;
9748
+ int rst_cnt = hdev->rst_stats.reset_cnt;
9749
+ int ret;
9750
+
9751
+ ret = client->ops->init_instance(&vport->nic);
9752
+ if (ret)
9753
+ return ret;
9754
+
9755
+ set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9756
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9757
+ rst_cnt != hdev->rst_stats.reset_cnt) {
9758
+ ret = -EBUSY;
9759
+ goto init_nic_err;
9760
+ }
9761
+
9762
+ /* Enable nic hw error interrupts */
9763
+ ret = hclge_config_nic_hw_error(hdev, true);
9764
+ if (ret) {
9765
+ dev_err(&ae_dev->pdev->dev,
9766
+ "fail(%d) to enable hw error interrupts\n", ret);
9767
+ goto init_nic_err;
9768
+ }
9769
+
9770
+ hnae3_set_client_init_flag(client, ae_dev, 1);
9771
+
9772
+ if (netif_msg_drv(&hdev->vport->nic))
9773
+ hclge_info_show(hdev);
9774
+
9775
+ return ret;
9776
+
9777
+init_nic_err:
9778
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9779
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9780
+ msleep(HCLGE_WAIT_RESET_DONE);
9781
+
9782
+ client->ops->uninit_instance(&vport->nic, 0);
9783
+
9784
+ return ret;
9785
+}
9786
+
9787
+static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9788
+ struct hclge_vport *vport)
9789
+{
9790
+ struct hclge_dev *hdev = ae_dev->priv;
9791
+ struct hnae3_client *client;
9792
+ int rst_cnt;
9793
+ int ret;
9794
+
9795
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9796
+ !hdev->nic_client)
9797
+ return 0;
9798
+
9799
+ client = hdev->roce_client;
9800
+ ret = hclge_init_roce_base_info(vport);
9801
+ if (ret)
9802
+ return ret;
9803
+
9804
+ rst_cnt = hdev->rst_stats.reset_cnt;
9805
+ ret = client->ops->init_instance(&vport->roce);
9806
+ if (ret)
9807
+ return ret;
9808
+
9809
+ set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9810
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9811
+ rst_cnt != hdev->rst_stats.reset_cnt) {
9812
+ ret = -EBUSY;
9813
+ goto init_roce_err;
9814
+ }
9815
+
9816
+ /* Enable roce ras interrupts */
9817
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
9818
+ if (ret) {
9819
+ dev_err(&ae_dev->pdev->dev,
9820
+ "fail(%d) to enable roce ras interrupts\n", ret);
9821
+ goto init_roce_err;
9822
+ }
9823
+
9824
+ hnae3_set_client_init_flag(client, ae_dev, 1);
9825
+
9826
+ return 0;
9827
+
9828
+init_roce_err:
9829
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9830
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9831
+ msleep(HCLGE_WAIT_RESET_DONE);
9832
+
9833
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9834
+
9835
+ return ret;
54739836 }
54749837
54759838 static int hclge_init_client_instance(struct hnae3_client *client,
....@@ -5484,48 +9847,15 @@
54849847
54859848 switch (client->type) {
54869849 case HNAE3_CLIENT_KNIC:
5487
-
54889850 hdev->nic_client = client;
54899851 vport->nic.client = client;
5490
- ret = client->ops->init_instance(&vport->nic);
9852
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
54919853 if (ret)
54929854 goto clear_nic;
54939855
5494
- ret = hclge_init_instance_hw(hdev);
5495
- if (ret) {
5496
- client->ops->uninit_instance(&vport->nic,
5497
- 0);
5498
- goto clear_nic;
5499
- }
5500
-
5501
- hnae3_set_client_init_flag(client, ae_dev, 1);
5502
-
5503
- if (hdev->roce_client &&
5504
- hnae3_dev_roce_supported(hdev)) {
5505
- struct hnae3_client *rc = hdev->roce_client;
5506
-
5507
- ret = hclge_init_roce_base_info(vport);
5508
- if (ret)
5509
- goto clear_roce;
5510
-
5511
- ret = rc->ops->init_instance(&vport->roce);
5512
- if (ret)
5513
- goto clear_roce;
5514
-
5515
- hnae3_set_client_init_flag(hdev->roce_client,
5516
- ae_dev, 1);
5517
- }
5518
-
5519
- break;
5520
- case HNAE3_CLIENT_UNIC:
5521
- hdev->nic_client = client;
5522
- vport->nic.client = client;
5523
-
5524
- ret = client->ops->init_instance(&vport->nic);
9856
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
55259857 if (ret)
5526
- goto clear_nic;
5527
-
5528
- hnae3_set_client_init_flag(client, ae_dev, 1);
9858
+ goto clear_roce;
55299859
55309860 break;
55319861 case HNAE3_CLIENT_ROCE:
....@@ -5534,17 +9864,13 @@
55349864 vport->roce.client = client;
55359865 }
55369866
5537
- if (hdev->roce_client && hdev->nic_client) {
5538
- ret = hclge_init_roce_base_info(vport);
5539
- if (ret)
5540
- goto clear_roce;
9867
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
9868
+ if (ret)
9869
+ goto clear_roce;
55419870
5542
- ret = client->ops->init_instance(&vport->roce);
5543
- if (ret)
5544
- goto clear_roce;
5545
-
5546
- hnae3_set_client_init_flag(client, ae_dev, 1);
5547
- }
9871
+ break;
9872
+ default:
9873
+ return -EINVAL;
55489874 }
55499875 }
55509876
....@@ -5570,6 +9896,10 @@
55709896 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
55719897 vport = &hdev->vport[i];
55729898 if (hdev->roce_client) {
9899
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9900
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9901
+ msleep(HCLGE_WAIT_RESET_DONE);
9902
+
55739903 hdev->roce_client->ops->uninit_instance(&vport->roce,
55749904 0);
55759905 hdev->roce_client = NULL;
....@@ -5578,7 +9908,10 @@
55789908 if (client->type == HNAE3_CLIENT_ROCE)
55799909 return;
55809910 if (hdev->nic_client && client->ops->uninit_instance) {
5581
- hclge_uninit_instance_hw(hdev);
9911
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9912
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9913
+ msleep(HCLGE_WAIT_RESET_DONE);
9914
+
55829915 client->ops->uninit_instance(&vport->nic, 0);
55839916 hdev->nic_client = NULL;
55849917 vport->nic.client = NULL;
....@@ -5653,6 +9986,7 @@
56539986 set_bit(HCLGE_STATE_DOWN, &hdev->state);
56549987 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
56559988 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9989
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
56569990 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
56579991 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
56589992 }
....@@ -5660,15 +9994,102 @@
56609994 static void hclge_state_uninit(struct hclge_dev *hdev)
56619995 {
56629996 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9997
+ set_bit(HCLGE_STATE_REMOVING, &hdev->state);
56639998
5664
- if (hdev->service_timer.function)
5665
- del_timer_sync(&hdev->service_timer);
5666
- if (hdev->service_task.func)
5667
- cancel_work_sync(&hdev->service_task);
5668
- if (hdev->rst_service_task.func)
5669
- cancel_work_sync(&hdev->rst_service_task);
5670
- if (hdev->mbx_service_task.func)
5671
- cancel_work_sync(&hdev->mbx_service_task);
9999
+ if (hdev->reset_timer.function)
10000
+ del_timer_sync(&hdev->reset_timer);
10001
+ if (hdev->service_task.work.func)
10002
+ cancel_delayed_work_sync(&hdev->service_task);
10003
+}
10004
+
10005
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10006
+{
10007
+#define HCLGE_FLR_RETRY_WAIT_MS 500
10008
+#define HCLGE_FLR_RETRY_CNT 5
10009
+
10010
+ struct hclge_dev *hdev = ae_dev->priv;
10011
+ int retry_cnt = 0;
10012
+ int ret;
10013
+
10014
+retry:
10015
+ down(&hdev->reset_sem);
10016
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10017
+ hdev->reset_type = HNAE3_FLR_RESET;
10018
+ ret = hclge_reset_prepare(hdev);
10019
+ if (ret || hdev->reset_pending) {
10020
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10021
+ ret);
10022
+ if (hdev->reset_pending ||
10023
+ retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10024
+ dev_err(&hdev->pdev->dev,
10025
+ "reset_pending:0x%lx, retry_cnt:%d\n",
10026
+ hdev->reset_pending, retry_cnt);
10027
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10028
+ up(&hdev->reset_sem);
10029
+ msleep(HCLGE_FLR_RETRY_WAIT_MS);
10030
+ goto retry;
10031
+ }
10032
+ }
10033
+
10034
+ /* disable misc vector before FLR done */
10035
+ hclge_enable_vector(&hdev->misc_vector, false);
10036
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10037
+ hdev->rst_stats.flr_rst_cnt++;
10038
+}
10039
+
10040
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10041
+{
10042
+ struct hclge_dev *hdev = ae_dev->priv;
10043
+ int ret;
10044
+
10045
+ hclge_enable_vector(&hdev->misc_vector, true);
10046
+
10047
+ ret = hclge_reset_rebuild(hdev);
10048
+ if (ret)
10049
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10050
+
10051
+ hdev->reset_type = HNAE3_NONE_RESET;
10052
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10053
+ up(&hdev->reset_sem);
10054
+}
10055
+
10056
+static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10057
+{
10058
+ u16 i;
10059
+
10060
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
10061
+ struct hclge_vport *vport = &hdev->vport[i];
10062
+ int ret;
10063
+
10064
+ /* Send cmd to clear VF's FUNC_RST_ING */
10065
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10066
+ if (ret)
10067
+ dev_warn(&hdev->pdev->dev,
10068
+ "clear vf(%u) rst failed %d!\n",
10069
+ vport->vport_id, ret);
10070
+ }
10071
+}
10072
+
10073
+static int hclge_clear_hw_resource(struct hclge_dev *hdev)
10074
+{
10075
+ struct hclge_desc desc;
10076
+ int ret;
10077
+
10078
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
10079
+
10080
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10081
+ /* This new command is only supported by new firmware, it will
10082
+ * fail with older firmware. Error value -EOPNOSUPP can only be
10083
+ * returned by older firmware running this command, to keep code
10084
+ * backward compatible we will override this value and return
10085
+ * success.
10086
+ */
10087
+ if (ret && ret != -EOPNOTSUPP) {
10088
+ dev_err(&hdev->pdev->dev,
10089
+ "failed to clear hw resource, ret = %d\n", ret);
10090
+ return ret;
10091
+ }
10092
+ return 0;
567210093 }
567310094
567410095 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
....@@ -5678,37 +10099,47 @@
567810099 int ret;
567910100
568010101 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
5681
- if (!hdev) {
5682
- ret = -ENOMEM;
5683
- goto out;
5684
- }
10102
+ if (!hdev)
10103
+ return -ENOMEM;
568510104
568610105 hdev->pdev = pdev;
568710106 hdev->ae_dev = ae_dev;
568810107 hdev->reset_type = HNAE3_NONE_RESET;
10108
+ hdev->reset_level = HNAE3_FUNC_RESET;
568910109 ae_dev->priv = hdev;
569010110
10111
+ /* HW supprt 2 layer vlan */
10112
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10113
+
10114
+ mutex_init(&hdev->vport_lock);
10115
+ spin_lock_init(&hdev->fd_rule_lock);
10116
+ sema_init(&hdev->reset_sem, 1);
10117
+
569110118 ret = hclge_pci_init(hdev);
5692
- if (ret) {
5693
- dev_err(&pdev->dev, "PCI init failed\n");
10119
+ if (ret)
569410120 goto out;
5695
- }
569610121
569710122 /* Firmware command queue initialize */
569810123 ret = hclge_cmd_queue_init(hdev);
5699
- if (ret) {
5700
- dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
10124
+ if (ret)
570110125 goto err_pci_uninit;
5702
- }
570310126
570410127 /* Firmware command initialize */
570510128 ret = hclge_cmd_init(hdev);
570610129 if (ret)
570710130 goto err_cmd_uninit;
570810131
10132
+ ret = hclge_clear_hw_resource(hdev);
10133
+ if (ret)
10134
+ goto err_cmd_uninit;
10135
+
570910136 ret = hclge_get_cap(hdev);
10137
+ if (ret)
10138
+ goto err_cmd_uninit;
10139
+
10140
+ ret = hclge_query_dev_specs(hdev);
571010141 if (ret) {
5711
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
10142
+ dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
571210143 ret);
571310144 goto err_cmd_uninit;
571410145 }
....@@ -5726,12 +10157,8 @@
572610157 }
572710158
572810159 ret = hclge_misc_irq_init(hdev);
5729
- if (ret) {
5730
- dev_err(&pdev->dev,
5731
- "Misc IRQ(vector0) init error, ret = %d.\n",
5732
- ret);
10160
+ if (ret)
573310161 goto err_msi_uninit;
5734
- }
573510162
573610163 ret = hclge_alloc_tqps(hdev);
573710164 if (ret) {
....@@ -5740,25 +10167,22 @@
574010167 }
574110168
574210169 ret = hclge_alloc_vport(hdev);
5743
- if (ret) {
5744
- dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
10170
+ if (ret)
574510171 goto err_msi_irq_uninit;
5746
- }
574710172
574810173 ret = hclge_map_tqp(hdev);
5749
- if (ret) {
5750
- dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10174
+ if (ret)
575110175 goto err_msi_irq_uninit;
5752
- }
575310176
575410177 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
575510178 ret = hclge_mac_mdio_config(hdev);
5756
- if (ret) {
5757
- dev_err(&hdev->pdev->dev,
5758
- "mdio config fail ret=%d\n", ret);
10179
+ if (ret)
575910180 goto err_msi_irq_uninit;
5760
- }
576110181 }
10182
+
10183
+ ret = hclge_init_umv_space(hdev);
10184
+ if (ret)
10185
+ goto err_mdiobus_unreg;
576210186
576310187 ret = hclge_mac_init(hdev);
576410188 if (ret) {
....@@ -5771,6 +10195,10 @@
577110195 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
577210196 goto err_mdiobus_unreg;
577310197 }
10198
+
10199
+ ret = hclge_config_gro(hdev, true);
10200
+ if (ret)
10201
+ goto err_mdiobus_unreg;
577410202
577510203 ret = hclge_init_vlan_config(hdev);
577610204 if (ret) {
....@@ -5797,21 +10225,54 @@
579710225 goto err_mdiobus_unreg;
579810226 }
579910227
10228
+ ret = hclge_init_fd_config(hdev);
10229
+ if (ret) {
10230
+ dev_err(&pdev->dev,
10231
+ "fd table init fail, ret=%d\n", ret);
10232
+ goto err_mdiobus_unreg;
10233
+ }
10234
+
10235
+ INIT_KFIFO(hdev->mac_tnl_log);
10236
+
580010237 hclge_dcb_ops_set(hdev);
580110238
5802
- timer_setup(&hdev->service_timer, hclge_service_timer, 0);
5803
- INIT_WORK(&hdev->service_task, hclge_service_task);
5804
- INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
5805
- INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
10239
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10240
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10241
+
10242
+ /* Setup affinity after service timer setup because add_timer_on
10243
+ * is called in affinity notify.
10244
+ */
10245
+ hclge_misc_affinity_setup(hdev);
580610246
580710247 hclge_clear_all_event_cause(hdev);
10248
+ hclge_clear_resetting_state(hdev);
10249
+
10250
+ /* Log and clear the hw errors those already occurred */
10251
+ hclge_handle_all_hns_hw_errors(ae_dev);
10252
+
10253
+ /* request delayed reset for the error recovery because an immediate
10254
+ * global reset on a PF affecting pending initialization of other PFs
10255
+ */
10256
+ if (ae_dev->hw_err_reset_req) {
10257
+ enum hnae3_reset_type reset_level;
10258
+
10259
+ reset_level = hclge_get_reset_level(ae_dev,
10260
+ &ae_dev->hw_err_reset_req);
10261
+ hclge_set_def_reset_request(ae_dev, reset_level);
10262
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10263
+ }
580810264
580910265 /* Enable MISC vector(vector0) */
581010266 hclge_enable_vector(&hdev->misc_vector, true);
581110267
581210268 hclge_state_init(hdev);
10269
+ hdev->last_reset_time = jiffies;
581310270
5814
- pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
10271
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10272
+ HCLGE_DRIVER_NAME);
10273
+
10274
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10275
+
581510276 return 0;
581610277
581710278 err_mdiobus_unreg:
....@@ -5822,19 +10283,245 @@
582210283 err_msi_uninit:
582310284 pci_free_irq_vectors(pdev);
582410285 err_cmd_uninit:
5825
- hclge_destroy_cmd_queue(&hdev->hw);
10286
+ hclge_cmd_uninit(hdev);
582610287 err_pci_uninit:
582710288 pcim_iounmap(pdev, hdev->hw.io_base);
582810289 pci_clear_master(pdev);
582910290 pci_release_regions(pdev);
583010291 pci_disable_device(pdev);
583110292 out:
10293
+ mutex_destroy(&hdev->vport_lock);
583210294 return ret;
583310295 }
583410296
583510297 static void hclge_stats_clear(struct hclge_dev *hdev)
583610298 {
5837
- memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
10299
+ memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10300
+}
10301
+
10302
+static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10303
+{
10304
+ return hclge_config_switch_param(hdev, vf, enable,
10305
+ HCLGE_SWITCH_ANTI_SPOOF_MASK);
10306
+}
10307
+
10308
+static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10309
+{
10310
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10311
+ HCLGE_FILTER_FE_NIC_INGRESS_B,
10312
+ enable, vf);
10313
+}
10314
+
10315
+static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10316
+{
10317
+ int ret;
10318
+
10319
+ ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10320
+ if (ret) {
10321
+ dev_err(&hdev->pdev->dev,
10322
+ "Set vf %d mac spoof check %s failed, ret=%d\n",
10323
+ vf, enable ? "on" : "off", ret);
10324
+ return ret;
10325
+ }
10326
+
10327
+ ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10328
+ if (ret)
10329
+ dev_err(&hdev->pdev->dev,
10330
+ "Set vf %d vlan spoof check %s failed, ret=%d\n",
10331
+ vf, enable ? "on" : "off", ret);
10332
+
10333
+ return ret;
10334
+}
10335
+
10336
+static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10337
+ bool enable)
10338
+{
10339
+ struct hclge_vport *vport = hclge_get_vport(handle);
10340
+ struct hclge_dev *hdev = vport->back;
10341
+ u32 new_spoofchk = enable ? 1 : 0;
10342
+ int ret;
10343
+
10344
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10345
+ return -EOPNOTSUPP;
10346
+
10347
+ vport = hclge_get_vf_vport(hdev, vf);
10348
+ if (!vport)
10349
+ return -EINVAL;
10350
+
10351
+ if (vport->vf_info.spoofchk == new_spoofchk)
10352
+ return 0;
10353
+
10354
+ if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10355
+ dev_warn(&hdev->pdev->dev,
10356
+ "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10357
+ vf);
10358
+ else if (enable && hclge_is_umv_space_full(vport, true))
10359
+ dev_warn(&hdev->pdev->dev,
10360
+ "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10361
+ vf);
10362
+
10363
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10364
+ if (ret)
10365
+ return ret;
10366
+
10367
+ vport->vf_info.spoofchk = new_spoofchk;
10368
+ return 0;
10369
+}
10370
+
10371
+static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10372
+{
10373
+ struct hclge_vport *vport = hdev->vport;
10374
+ int ret;
10375
+ int i;
10376
+
10377
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10378
+ return 0;
10379
+
10380
+ /* resume the vf spoof check state after reset */
10381
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
10382
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10383
+ vport->vf_info.spoofchk);
10384
+ if (ret)
10385
+ return ret;
10386
+
10387
+ vport++;
10388
+ }
10389
+
10390
+ return 0;
10391
+}
10392
+
10393
+static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10394
+{
10395
+ struct hclge_vport *vport = hclge_get_vport(handle);
10396
+ struct hclge_dev *hdev = vport->back;
10397
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10398
+ u32 new_trusted = enable ? 1 : 0;
10399
+ bool en_bc_pmc;
10400
+ int ret;
10401
+
10402
+ vport = hclge_get_vf_vport(hdev, vf);
10403
+ if (!vport)
10404
+ return -EINVAL;
10405
+
10406
+ if (vport->vf_info.trusted == new_trusted)
10407
+ return 0;
10408
+
10409
+ /* Disable promisc mode for VF if it is not trusted any more. */
10410
+ if (!enable && vport->vf_info.promisc_enable) {
10411
+ en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10412
+ ret = hclge_set_vport_promisc_mode(vport, false, false,
10413
+ en_bc_pmc);
10414
+ if (ret)
10415
+ return ret;
10416
+ vport->vf_info.promisc_enable = 0;
10417
+ hclge_inform_vf_promisc_info(vport);
10418
+ }
10419
+
10420
+ vport->vf_info.trusted = new_trusted;
10421
+
10422
+ return 0;
10423
+}
10424
+
10425
+static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10426
+{
10427
+ int ret;
10428
+ int vf;
10429
+
10430
+ /* reset vf rate to default value */
10431
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10432
+ struct hclge_vport *vport = &hdev->vport[vf];
10433
+
10434
+ vport->vf_info.max_tx_rate = 0;
10435
+ ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10436
+ if (ret)
10437
+ dev_err(&hdev->pdev->dev,
10438
+ "vf%d failed to reset to default, ret=%d\n",
10439
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
10440
+ }
10441
+}
10442
+
10443
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10444
+ int min_tx_rate, int max_tx_rate)
10445
+{
10446
+ if (min_tx_rate != 0 ||
10447
+ max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10448
+ dev_err(&hdev->pdev->dev,
10449
+ "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10450
+ min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10451
+ return -EINVAL;
10452
+ }
10453
+
10454
+ return 0;
10455
+}
10456
+
10457
+static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10458
+ int min_tx_rate, int max_tx_rate, bool force)
10459
+{
10460
+ struct hclge_vport *vport = hclge_get_vport(handle);
10461
+ struct hclge_dev *hdev = vport->back;
10462
+ int ret;
10463
+
10464
+ ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10465
+ if (ret)
10466
+ return ret;
10467
+
10468
+ vport = hclge_get_vf_vport(hdev, vf);
10469
+ if (!vport)
10470
+ return -EINVAL;
10471
+
10472
+ if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10473
+ return 0;
10474
+
10475
+ ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10476
+ if (ret)
10477
+ return ret;
10478
+
10479
+ vport->vf_info.max_tx_rate = max_tx_rate;
10480
+
10481
+ return 0;
10482
+}
10483
+
10484
+static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10485
+{
10486
+ struct hnae3_handle *handle = &hdev->vport->nic;
10487
+ struct hclge_vport *vport;
10488
+ int ret;
10489
+ int vf;
10490
+
10491
+ /* resume the vf max_tx_rate after reset */
10492
+ for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10493
+ vport = hclge_get_vf_vport(hdev, vf);
10494
+ if (!vport)
10495
+ return -EINVAL;
10496
+
10497
+ /* zero means max rate, after reset, firmware already set it to
10498
+ * max rate, so just continue.
10499
+ */
10500
+ if (!vport->vf_info.max_tx_rate)
10501
+ continue;
10502
+
10503
+ ret = hclge_set_vf_rate(handle, vf, 0,
10504
+ vport->vf_info.max_tx_rate, true);
10505
+ if (ret) {
10506
+ dev_err(&hdev->pdev->dev,
10507
+ "vf%d failed to resume tx_rate:%u, ret=%d\n",
10508
+ vf, vport->vf_info.max_tx_rate, ret);
10509
+ return ret;
10510
+ }
10511
+ }
10512
+
10513
+ return 0;
10514
+}
10515
+
10516
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
10517
+{
10518
+ struct hclge_vport *vport = hdev->vport;
10519
+ int i;
10520
+
10521
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
10522
+ hclge_vport_stop(vport);
10523
+ vport++;
10524
+ }
583810525 }
583910526
584010527 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
....@@ -5846,24 +10533,20 @@
584610533 set_bit(HCLGE_STATE_DOWN, &hdev->state);
584710534
584810535 hclge_stats_clear(hdev);
5849
- memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10536
+ /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10537
+ * so here should not clean table in memory.
10538
+ */
10539
+ if (hdev->reset_type == HNAE3_IMP_RESET ||
10540
+ hdev->reset_type == HNAE3_GLOBAL_RESET) {
10541
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10542
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10543
+ bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10544
+ hclge_reset_umv_space(hdev);
10545
+ }
585010546
585110547 ret = hclge_cmd_init(hdev);
585210548 if (ret) {
585310549 dev_err(&pdev->dev, "Cmd queue init failed\n");
5854
- return ret;
5855
- }
5856
-
5857
- ret = hclge_get_cap(hdev);
5858
- if (ret) {
5859
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5860
- ret);
5861
- return ret;
5862
- }
5863
-
5864
- ret = hclge_configure(hdev);
5865
- if (ret) {
5866
- dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
586710550 return ret;
586810551 }
586910552
....@@ -5885,13 +10568,17 @@
588510568 return ret;
588610569 }
588710570
10571
+ ret = hclge_config_gro(hdev, true);
10572
+ if (ret)
10573
+ return ret;
10574
+
588810575 ret = hclge_init_vlan_config(hdev);
588910576 if (ret) {
589010577 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
589110578 return ret;
589210579 }
589310580
5894
- ret = hclge_tm_init_hw(hdev);
10581
+ ret = hclge_tm_init_hw(hdev, true);
589510582 if (ret) {
589610583 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
589710584 return ret;
....@@ -5902,6 +10589,52 @@
590210589 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
590310590 return ret;
590410591 }
10592
+
10593
+ ret = init_mgr_tbl(hdev);
10594
+ if (ret) {
10595
+ dev_err(&pdev->dev,
10596
+ "failed to reinit manager table, ret = %d\n", ret);
10597
+ return ret;
10598
+ }
10599
+
10600
+ ret = hclge_init_fd_config(hdev);
10601
+ if (ret) {
10602
+ dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10603
+ return ret;
10604
+ }
10605
+
10606
+ /* Log and clear the hw errors those already occurred */
10607
+ hclge_handle_all_hns_hw_errors(ae_dev);
10608
+
10609
+ /* Re-enable the hw error interrupts because
10610
+ * the interrupts get disabled on global reset.
10611
+ */
10612
+ ret = hclge_config_nic_hw_error(hdev, true);
10613
+ if (ret) {
10614
+ dev_err(&pdev->dev,
10615
+ "fail(%d) to re-enable NIC hw error interrupts\n",
10616
+ ret);
10617
+ return ret;
10618
+ }
10619
+
10620
+ if (hdev->roce_client) {
10621
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
10622
+ if (ret) {
10623
+ dev_err(&pdev->dev,
10624
+ "fail(%d) to re-enable roce ras interrupts\n",
10625
+ ret);
10626
+ return ret;
10627
+ }
10628
+ }
10629
+
10630
+ hclge_reset_vport_state(hdev);
10631
+ ret = hclge_reset_vport_spoofchk(hdev);
10632
+ if (ret)
10633
+ return ret;
10634
+
10635
+ ret = hclge_resume_vf_rate(hdev);
10636
+ if (ret)
10637
+ return ret;
590510638
590610639 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
590710640 HCLGE_DRIVER_NAME);
....@@ -5914,7 +10647,11 @@
591410647 struct hclge_dev *hdev = ae_dev->priv;
591510648 struct hclge_mac *mac = &hdev->hw.mac;
591610649
10650
+ hclge_reset_vf_rate(hdev);
10651
+ hclge_clear_vf_vlan(hdev);
10652
+ hclge_misc_affinity_teardown(hdev);
591710653 hclge_state_uninit(hdev);
10654
+ hclge_uninit_mac_table(hdev);
591810655
591910656 if (mac->phydev)
592010657 mdiobus_unregister(mac->mdio_bus);
....@@ -5923,9 +10660,16 @@
592310660 hclge_enable_vector(&hdev->misc_vector, false);
592410661 synchronize_irq(hdev->misc_vector.vector_irq);
592510662
5926
- hclge_destroy_cmd_queue(&hdev->hw);
10663
+ /* Disable all hw interrupts */
10664
+ hclge_config_mac_tnl_int(hdev, false);
10665
+ hclge_config_nic_hw_error(hdev, false);
10666
+ hclge_config_rocee_ras_interrupt(hdev, false);
10667
+
10668
+ hclge_cmd_uninit(hdev);
592710669 hclge_misc_irq_uninit(hdev);
592810670 hclge_pci_uninit(hdev);
10671
+ mutex_destroy(&hdev->vport_lock);
10672
+ hclge_uninit_vport_vlan_table(hdev);
592910673 ae_dev->priv = NULL;
593010674 }
593110675
....@@ -5949,72 +10693,36 @@
594910693 }
595010694
595110695 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
5952
- u16 *free_tqps, u16 *max_rss_size)
10696
+ u16 *alloc_tqps, u16 *max_rss_size)
595310697 {
595410698 struct hclge_vport *vport = hclge_get_vport(handle);
595510699 struct hclge_dev *hdev = vport->back;
5956
- u16 temp_tqps = 0;
5957
- int i;
595810700
5959
- for (i = 0; i < hdev->num_tqps; i++) {
5960
- if (!hdev->htqp[i].alloced)
5961
- temp_tqps++;
5962
- }
5963
- *free_tqps = temp_tqps;
10701
+ *alloc_tqps = vport->alloc_tqps;
596410702 *max_rss_size = hdev->rss_size_max;
596510703 }
596610704
5967
-static void hclge_release_tqp(struct hclge_vport *vport)
5968
-{
5969
- struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5970
- struct hclge_dev *hdev = vport->back;
5971
- int i;
5972
-
5973
- for (i = 0; i < kinfo->num_tqps; i++) {
5974
- struct hclge_tqp *tqp =
5975
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
5976
-
5977
- tqp->q.handle = NULL;
5978
- tqp->q.tqp_index = 0;
5979
- tqp->alloced = false;
5980
- }
5981
-
5982
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
5983
- kinfo->tqp = NULL;
5984
-}
5985
-
5986
-static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
10705
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10706
+ bool rxfh_configured)
598710707 {
598810708 struct hclge_vport *vport = hclge_get_vport(handle);
598910709 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10710
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
599010711 struct hclge_dev *hdev = vport->back;
5991
- int cur_rss_size = kinfo->rss_size;
5992
- int cur_tqps = kinfo->num_tqps;
5993
- u16 tc_offset[HCLGE_MAX_TC_NUM];
10712
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10713
+ u16 cur_rss_size = kinfo->rss_size;
10714
+ u16 cur_tqps = kinfo->num_tqps;
599410715 u16 tc_valid[HCLGE_MAX_TC_NUM];
5995
- u16 tc_size[HCLGE_MAX_TC_NUM];
599610716 u16 roundup_size;
599710717 u32 *rss_indir;
5998
- int ret, i;
10718
+ unsigned int i;
10719
+ int ret;
599910720
6000
- /* Free old tqps, and reallocate with new tqp number when nic setup */
6001
- hclge_release_tqp(vport);
10721
+ kinfo->req_rss_size = new_tqps_num;
600210722
6003
- ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
10723
+ ret = hclge_tm_vport_map_update(hdev);
600410724 if (ret) {
6005
- dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
6006
- return ret;
6007
- }
6008
-
6009
- ret = hclge_map_tqp_to_vport(hdev, vport);
6010
- if (ret) {
6011
- dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
6012
- return ret;
6013
- }
6014
-
6015
- ret = hclge_tm_schd_init(hdev);
6016
- if (ret) {
6017
- dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
10725
+ dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
601810726 return ret;
601910727 }
602010728
....@@ -6035,6 +10743,10 @@
603510743 if (ret)
603610744 return ret;
603710745
10746
+ /* RSS indirection table has been configuared by user */
10747
+ if (rxfh_configured)
10748
+ goto out;
10749
+
603810750 /* Reinitializes the rss indirect table according to the new RSS size */
603910751 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
604010752 if (!rss_indir)
....@@ -6050,9 +10762,10 @@
605010762
605110763 kfree(rss_indir);
605210764
10765
+out:
605310766 if (!ret)
605410767 dev_info(&hdev->pdev->dev,
6055
- "Channels changed, rss_size from %d to %d, tqps from %d to %d",
10768
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
605610769 cur_rss_size, kinfo->rss_size,
605710770 cur_tqps, kinfo->rss_size * kinfo->num_tc);
605810771
....@@ -6088,10 +10801,12 @@
608810801 void *data)
608910802 {
609010803 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10804
+#define HCLGE_32_BIT_DESC_NODATA_LEN 2
609110805
609210806 struct hclge_desc *desc;
609310807 u32 *reg_val = data;
609410808 __le32 *desc_data;
10809
+ int nodata_num;
609510810 int cmd_num;
609610811 int i, k, n;
609710812 int ret;
....@@ -6099,7 +10814,9 @@
609910814 if (regs_num == 0)
610010815 return 0;
610110816
6102
- cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
10817
+ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10818
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10819
+ HCLGE_32_BIT_REG_RTN_DATANUM);
610310820 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
610410821 if (!desc)
610510822 return -ENOMEM;
....@@ -6116,7 +10833,7 @@
611610833 for (i = 0; i < cmd_num; i++) {
611710834 if (i == 0) {
611810835 desc_data = (__le32 *)(&desc[i].data[0]);
6119
- n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
10836
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
612010837 } else {
612110838 desc_data = (__le32 *)(&desc[i]);
612210839 n = HCLGE_32_BIT_REG_RTN_DATANUM;
....@@ -6138,10 +10855,12 @@
613810855 void *data)
613910856 {
614010857 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10858
+#define HCLGE_64_BIT_DESC_NODATA_LEN 1
614110859
614210860 struct hclge_desc *desc;
614310861 u64 *reg_val = data;
614410862 __le64 *desc_data;
10863
+ int nodata_len;
614510864 int cmd_num;
614610865 int i, k, n;
614710866 int ret;
....@@ -6149,7 +10868,9 @@
614910868 if (regs_num == 0)
615010869 return 0;
615110870
6152
- cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
10871
+ nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10872
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10873
+ HCLGE_64_BIT_REG_RTN_DATANUM);
615310874 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
615410875 if (!desc)
615510876 return -ENOMEM;
....@@ -6166,7 +10887,7 @@
616610887 for (i = 0; i < cmd_num; i++) {
616710888 if (i == 0) {
616810889 desc_data = (__le64 *)(&desc[i].data[0]);
6169
- n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
10890
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
617010891 } else {
617110892 desc_data = (__le64 *)(&desc[i]);
617210893 n = HCLGE_64_BIT_REG_RTN_DATANUM;
....@@ -6184,30 +10905,288 @@
618410905 return 0;
618510906 }
618610907
10908
+#define MAX_SEPARATE_NUM 4
10909
+#define SEPARATOR_VALUE 0xFDFCFBFA
10910
+#define REG_NUM_PER_LINE 4
10911
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10912
+#define REG_SEPARATOR_LINE 1
10913
+#define REG_NUM_REMAIN_MASK 3
10914
+
10915
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10916
+{
10917
+ int i;
10918
+
10919
+ /* initialize command BD except the last one */
10920
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10921
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10922
+ true);
10923
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10924
+ }
10925
+
10926
+ /* initialize the last command BD */
10927
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10928
+
10929
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10930
+}
10931
+
10932
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10933
+ int *bd_num_list,
10934
+ u32 type_num)
10935
+{
10936
+ u32 entries_per_desc, desc_index, index, offset, i;
10937
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10938
+ int ret;
10939
+
10940
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
10941
+ if (ret) {
10942
+ dev_err(&hdev->pdev->dev,
10943
+ "Get dfx bd num fail, status is %d.\n", ret);
10944
+ return ret;
10945
+ }
10946
+
10947
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
10948
+ for (i = 0; i < type_num; i++) {
10949
+ offset = hclge_dfx_bd_offset_list[i];
10950
+ index = offset % entries_per_desc;
10951
+ desc_index = offset / entries_per_desc;
10952
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10953
+ }
10954
+
10955
+ return ret;
10956
+}
10957
+
10958
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10959
+ struct hclge_desc *desc_src, int bd_num,
10960
+ enum hclge_opcode_type cmd)
10961
+{
10962
+ struct hclge_desc *desc = desc_src;
10963
+ int i, ret;
10964
+
10965
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
10966
+ for (i = 0; i < bd_num - 1; i++) {
10967
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10968
+ desc++;
10969
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
10970
+ }
10971
+
10972
+ desc = desc_src;
10973
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10974
+ if (ret)
10975
+ dev_err(&hdev->pdev->dev,
10976
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10977
+ cmd, ret);
10978
+
10979
+ return ret;
10980
+}
10981
+
10982
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10983
+ void *data)
10984
+{
10985
+ int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10986
+ struct hclge_desc *desc = desc_src;
10987
+ u32 *reg = data;
10988
+
10989
+ entries_per_desc = ARRAY_SIZE(desc->data);
10990
+ reg_num = entries_per_desc * bd_num;
10991
+ separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10992
+ for (i = 0; i < reg_num; i++) {
10993
+ index = i % entries_per_desc;
10994
+ desc_index = i / entries_per_desc;
10995
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10996
+ }
10997
+ for (i = 0; i < separator_num; i++)
10998
+ *reg++ = SEPARATOR_VALUE;
10999
+
11000
+ return reg_num + separator_num;
11001
+}
11002
+
11003
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11004
+{
11005
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11006
+ int data_len_per_desc, bd_num, i;
11007
+ int *bd_num_list;
11008
+ u32 data_len;
11009
+ int ret;
11010
+
11011
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
11012
+ if (!bd_num_list)
11013
+ return -ENOMEM;
11014
+
11015
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11016
+ if (ret) {
11017
+ dev_err(&hdev->pdev->dev,
11018
+ "Get dfx reg bd num fail, status is %d.\n", ret);
11019
+ goto out;
11020
+ }
11021
+
11022
+ data_len_per_desc = sizeof_field(struct hclge_desc, data);
11023
+ *len = 0;
11024
+ for (i = 0; i < dfx_reg_type_num; i++) {
11025
+ bd_num = bd_num_list[i];
11026
+ data_len = data_len_per_desc * bd_num;
11027
+ *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11028
+ }
11029
+
11030
+out:
11031
+ kfree(bd_num_list);
11032
+ return ret;
11033
+}
11034
+
11035
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11036
+{
11037
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11038
+ int bd_num, bd_num_max, buf_len, i;
11039
+ struct hclge_desc *desc_src;
11040
+ int *bd_num_list;
11041
+ u32 *reg = data;
11042
+ int ret;
11043
+
11044
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
11045
+ if (!bd_num_list)
11046
+ return -ENOMEM;
11047
+
11048
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11049
+ if (ret) {
11050
+ dev_err(&hdev->pdev->dev,
11051
+ "Get dfx reg bd num fail, status is %d.\n", ret);
11052
+ goto out;
11053
+ }
11054
+
11055
+ bd_num_max = bd_num_list[0];
11056
+ for (i = 1; i < dfx_reg_type_num; i++)
11057
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11058
+
11059
+ buf_len = sizeof(*desc_src) * bd_num_max;
11060
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
11061
+ if (!desc_src) {
11062
+ ret = -ENOMEM;
11063
+ goto out;
11064
+ }
11065
+
11066
+ for (i = 0; i < dfx_reg_type_num; i++) {
11067
+ bd_num = bd_num_list[i];
11068
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11069
+ hclge_dfx_reg_opcode_list[i]);
11070
+ if (ret) {
11071
+ dev_err(&hdev->pdev->dev,
11072
+ "Get dfx reg fail, status is %d.\n", ret);
11073
+ break;
11074
+ }
11075
+
11076
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11077
+ }
11078
+
11079
+ kfree(desc_src);
11080
+out:
11081
+ kfree(bd_num_list);
11082
+ return ret;
11083
+}
11084
+
11085
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11086
+ struct hnae3_knic_private_info *kinfo)
11087
+{
11088
+#define HCLGE_RING_REG_OFFSET 0x200
11089
+#define HCLGE_RING_INT_REG_OFFSET 0x4
11090
+
11091
+ int i, j, reg_num, separator_num;
11092
+ int data_num_sum;
11093
+ u32 *reg = data;
11094
+
11095
+ /* fetching per-PF registers valus from PF PCIe register space */
11096
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11097
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11098
+ for (i = 0; i < reg_num; i++)
11099
+ *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11100
+ for (i = 0; i < separator_num; i++)
11101
+ *reg++ = SEPARATOR_VALUE;
11102
+ data_num_sum = reg_num + separator_num;
11103
+
11104
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
11105
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11106
+ for (i = 0; i < reg_num; i++)
11107
+ *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11108
+ for (i = 0; i < separator_num; i++)
11109
+ *reg++ = SEPARATOR_VALUE;
11110
+ data_num_sum += reg_num + separator_num;
11111
+
11112
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
11113
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11114
+ for (j = 0; j < kinfo->num_tqps; j++) {
11115
+ for (i = 0; i < reg_num; i++)
11116
+ *reg++ = hclge_read_dev(&hdev->hw,
11117
+ ring_reg_addr_list[i] +
11118
+ HCLGE_RING_REG_OFFSET * j);
11119
+ for (i = 0; i < separator_num; i++)
11120
+ *reg++ = SEPARATOR_VALUE;
11121
+ }
11122
+ data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11123
+
11124
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11125
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11126
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
11127
+ for (i = 0; i < reg_num; i++)
11128
+ *reg++ = hclge_read_dev(&hdev->hw,
11129
+ tqp_intr_reg_addr_list[i] +
11130
+ HCLGE_RING_INT_REG_OFFSET * j);
11131
+ for (i = 0; i < separator_num; i++)
11132
+ *reg++ = SEPARATOR_VALUE;
11133
+ }
11134
+ data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11135
+
11136
+ return data_num_sum;
11137
+}
11138
+
618711139 static int hclge_get_regs_len(struct hnae3_handle *handle)
618811140 {
11141
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11142
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
618911143 struct hclge_vport *vport = hclge_get_vport(handle);
619011144 struct hclge_dev *hdev = vport->back;
6191
- u32 regs_num_32_bit, regs_num_64_bit;
11145
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11146
+ int regs_lines_32_bit, regs_lines_64_bit;
619211147 int ret;
619311148
619411149 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
619511150 if (ret) {
619611151 dev_err(&hdev->pdev->dev,
619711152 "Get register number failed, ret = %d.\n", ret);
6198
- return -EOPNOTSUPP;
11153
+ return ret;
619911154 }
620011155
6201
- return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
11156
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11157
+ if (ret) {
11158
+ dev_err(&hdev->pdev->dev,
11159
+ "Get dfx reg len failed, ret = %d.\n", ret);
11160
+ return ret;
11161
+ }
11162
+
11163
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11164
+ REG_SEPARATOR_LINE;
11165
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11166
+ REG_SEPARATOR_LINE;
11167
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11168
+ REG_SEPARATOR_LINE;
11169
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11170
+ REG_SEPARATOR_LINE;
11171
+ regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11172
+ REG_SEPARATOR_LINE;
11173
+ regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11174
+ REG_SEPARATOR_LINE;
11175
+
11176
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11177
+ tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11178
+ regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
620211179 }
620311180
620411181 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
620511182 void *data)
620611183 {
11184
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
620711185 struct hclge_vport *vport = hclge_get_vport(handle);
620811186 struct hclge_dev *hdev = vport->back;
620911187 u32 regs_num_32_bit, regs_num_64_bit;
6210
- int ret;
11188
+ int i, reg_num, separator_num, ret;
11189
+ u32 *reg = data;
621111190
621211191 *version = hdev->fw_version;
621311192
....@@ -6218,19 +11197,36 @@
621811197 return;
621911198 }
622011199
6221
- ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
11200
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11201
+
11202
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
622211203 if (ret) {
622311204 dev_err(&hdev->pdev->dev,
622411205 "Get 32 bit register failed, ret = %d.\n", ret);
622511206 return;
622611207 }
11208
+ reg_num = regs_num_32_bit;
11209
+ reg += reg_num;
11210
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11211
+ for (i = 0; i < separator_num; i++)
11212
+ *reg++ = SEPARATOR_VALUE;
622711213
6228
- data = (u32 *)data + regs_num_32_bit;
6229
- ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
6230
- data);
6231
- if (ret)
11214
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11215
+ if (ret) {
623211216 dev_err(&hdev->pdev->dev,
623311217 "Get 64 bit register failed, ret = %d.\n", ret);
11218
+ return;
11219
+ }
11220
+ reg_num = regs_num_64_bit * 2;
11221
+ reg += reg_num;
11222
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11223
+ for (i = 0; i < separator_num; i++)
11224
+ *reg++ = SEPARATOR_VALUE;
11225
+
11226
+ ret = hclge_get_dfx_reg(hdev, reg);
11227
+ if (ret)
11228
+ dev_err(&hdev->pdev->dev,
11229
+ "Get dfx register failed, ret = %d.\n", ret);
623411230 }
623511231
623611232 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
....@@ -6290,30 +11286,144 @@
629011286 }
629111287 }
629211288
6293
-static void hclge_get_port_type(struct hnae3_handle *handle,
6294
- u8 *port_type)
11289
+static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
629511290 {
629611291 struct hclge_vport *vport = hclge_get_vport(handle);
629711292 struct hclge_dev *hdev = vport->back;
6298
- u8 media_type = hdev->hw.mac.media_type;
629911293
6300
- switch (media_type) {
6301
- case HNAE3_MEDIA_TYPE_FIBER:
6302
- *port_type = PORT_FIBRE;
6303
- break;
6304
- case HNAE3_MEDIA_TYPE_COPPER:
6305
- *port_type = PORT_TP;
6306
- break;
6307
- case HNAE3_MEDIA_TYPE_UNKNOWN:
6308
- default:
6309
- *port_type = PORT_OTHER;
6310
- break;
11294
+ return hclge_config_gro(hdev, enable);
11295
+}
11296
+
11297
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11298
+{
11299
+ struct hclge_vport *vport = &hdev->vport[0];
11300
+ struct hnae3_handle *handle = &vport->nic;
11301
+ u8 tmp_flags;
11302
+ int ret;
11303
+
11304
+ if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11305
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11306
+ vport->last_promisc_flags = vport->overflow_promisc_flags;
631111307 }
11308
+
11309
+ if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11310
+ tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11311
+ ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11312
+ tmp_flags & HNAE3_MPE);
11313
+ if (!ret) {
11314
+ clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11315
+ hclge_enable_vlan_filter(handle,
11316
+ tmp_flags & HNAE3_VLAN_FLTR);
11317
+ }
11318
+ }
11319
+}
11320
+
11321
+static bool hclge_module_existed(struct hclge_dev *hdev)
11322
+{
11323
+ struct hclge_desc desc;
11324
+ u32 existed;
11325
+ int ret;
11326
+
11327
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11328
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11329
+ if (ret) {
11330
+ dev_err(&hdev->pdev->dev,
11331
+ "failed to get SFP exist state, ret = %d\n", ret);
11332
+ return false;
11333
+ }
11334
+
11335
+ existed = le32_to_cpu(desc.data[0]);
11336
+
11337
+ return existed != 0;
11338
+}
11339
+
11340
+/* need 6 bds(total 140 bytes) in one reading
11341
+ * return the number of bytes actually read, 0 means read failed.
11342
+ */
11343
+static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11344
+ u32 len, u8 *data)
11345
+{
11346
+ struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11347
+ struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11348
+ u16 read_len;
11349
+ u16 copy_len;
11350
+ int ret;
11351
+ int i;
11352
+
11353
+ /* setup all 6 bds to read module eeprom info. */
11354
+ for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11355
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11356
+ true);
11357
+
11358
+ /* bd0~bd4 need next flag */
11359
+ if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11360
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11361
+ }
11362
+
11363
+ /* setup bd0, this bd contains offset and read length. */
11364
+ sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11365
+ sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11366
+ read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11367
+ sfp_info_bd0->read_len = cpu_to_le16(read_len);
11368
+
11369
+ ret = hclge_cmd_send(&hdev->hw, desc, i);
11370
+ if (ret) {
11371
+ dev_err(&hdev->pdev->dev,
11372
+ "failed to get SFP eeprom info, ret = %d\n", ret);
11373
+ return 0;
11374
+ }
11375
+
11376
+ /* copy sfp info from bd0 to out buffer. */
11377
+ copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11378
+ memcpy(data, sfp_info_bd0->data, copy_len);
11379
+ read_len = copy_len;
11380
+
11381
+ /* copy sfp info from bd1~bd5 to out buffer if needed. */
11382
+ for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11383
+ if (read_len >= len)
11384
+ return read_len;
11385
+
11386
+ copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11387
+ memcpy(data + read_len, desc[i].data, copy_len);
11388
+ read_len += copy_len;
11389
+ }
11390
+
11391
+ return read_len;
11392
+}
11393
+
11394
+static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11395
+ u32 len, u8 *data)
11396
+{
11397
+ struct hclge_vport *vport = hclge_get_vport(handle);
11398
+ struct hclge_dev *hdev = vport->back;
11399
+ u32 read_len = 0;
11400
+ u16 data_len;
11401
+
11402
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11403
+ return -EOPNOTSUPP;
11404
+
11405
+ if (!hclge_module_existed(hdev))
11406
+ return -ENXIO;
11407
+
11408
+ while (read_len < len) {
11409
+ data_len = hclge_get_sfp_eeprom_info(hdev,
11410
+ offset + read_len,
11411
+ len - read_len,
11412
+ data + read_len);
11413
+ if (!data_len)
11414
+ return -EIO;
11415
+
11416
+ read_len += data_len;
11417
+ }
11418
+
11419
+ return 0;
631211420 }
631311421
631411422 static const struct hnae3_ae_ops hclge_ops = {
631511423 .init_ae_dev = hclge_init_ae_dev,
631611424 .uninit_ae_dev = hclge_uninit_ae_dev,
11425
+ .flr_prepare = hclge_flr_prepare,
11426
+ .flr_done = hclge_flr_done,
631711427 .init_client_instance = hclge_init_client_instance,
631811428 .uninit_client_instance = hclge_uninit_client_instance,
631911429 .map_ring_to_vector = hclge_map_ring_to_vector,
....@@ -6321,14 +11431,19 @@
632111431 .get_vector = hclge_get_vector,
632211432 .put_vector = hclge_put_vector,
632311433 .set_promisc_mode = hclge_set_promisc_mode,
11434
+ .request_update_promisc_mode = hclge_request_update_promisc_mode,
632411435 .set_loopback = hclge_set_loopback,
632511436 .start = hclge_ae_start,
632611437 .stop = hclge_ae_stop,
11438
+ .client_start = hclge_client_start,
11439
+ .client_stop = hclge_client_stop,
632711440 .get_status = hclge_get_status,
632811441 .get_ksettings_an_result = hclge_get_ksettings_an_result,
6329
- .update_speed_duplex_h = hclge_update_speed_duplex_h,
633011442 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
633111443 .get_media_type = hclge_get_media_type,
11444
+ .check_port_speed = hclge_check_port_speed,
11445
+ .get_fec = hclge_get_fec,
11446
+ .set_fec = hclge_set_fec,
633211447 .get_rss_key_size = hclge_get_rss_key_size,
633311448 .get_rss_indir_size = hclge_get_rss_indir_size,
633411449 .get_rss = hclge_get_rss,
....@@ -6338,18 +11453,21 @@
633811453 .get_tc_size = hclge_get_tc_size,
633911454 .get_mac_addr = hclge_get_mac_addr,
634011455 .set_mac_addr = hclge_set_mac_addr,
11456
+ .do_ioctl = hclge_do_ioctl,
634111457 .add_uc_addr = hclge_add_uc_addr,
634211458 .rm_uc_addr = hclge_rm_uc_addr,
634311459 .add_mc_addr = hclge_add_mc_addr,
634411460 .rm_mc_addr = hclge_rm_mc_addr,
6345
- .update_mta_status = hclge_update_mta_status,
634611461 .set_autoneg = hclge_set_autoneg,
634711462 .get_autoneg = hclge_get_autoneg,
11463
+ .restart_autoneg = hclge_restart_autoneg,
11464
+ .halt_autoneg = hclge_halt_autoneg,
634811465 .get_pauseparam = hclge_get_pauseparam,
634911466 .set_pauseparam = hclge_set_pauseparam,
635011467 .set_mtu = hclge_set_mtu,
635111468 .reset_queue = hclge_reset_tqp,
635211469 .get_stats = hclge_get_stats,
11470
+ .get_mac_stats = hclge_get_mac_stat,
635311471 .update_stats = hclge_update_stats,
635411472 .get_strings = hclge_get_strings,
635511473 .get_sset_count = hclge_get_sset_count,
....@@ -6360,15 +11478,41 @@
636011478 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
636111479 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
636211480 .reset_event = hclge_reset_event,
11481
+ .get_reset_level = hclge_get_reset_level,
11482
+ .set_default_reset_request = hclge_set_def_reset_request,
636311483 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
636411484 .set_channels = hclge_set_channels,
636511485 .get_channels = hclge_get_channels,
6366
- .get_flowctrl_adv = hclge_get_flowctrl_adv,
636711486 .get_regs_len = hclge_get_regs_len,
636811487 .get_regs = hclge_get_regs,
636911488 .set_led_id = hclge_set_led_id,
637011489 .get_link_mode = hclge_get_link_mode,
6371
- .get_port_type = hclge_get_port_type,
11490
+ .add_fd_entry = hclge_add_fd_entry,
11491
+ .del_fd_entry = hclge_del_fd_entry,
11492
+ .del_all_fd_entries = hclge_del_all_fd_entries,
11493
+ .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11494
+ .get_fd_rule_info = hclge_get_fd_rule_info,
11495
+ .get_fd_all_rules = hclge_get_all_rules,
11496
+ .enable_fd = hclge_enable_fd,
11497
+ .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11498
+ .dbg_run_cmd = hclge_dbg_run_cmd,
11499
+ .handle_hw_ras_error = hclge_handle_hw_ras_error,
11500
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
11501
+ .ae_dev_resetting = hclge_ae_dev_resetting,
11502
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11503
+ .set_gro_en = hclge_gro_en,
11504
+ .get_global_queue_id = hclge_covert_handle_qid_global,
11505
+ .set_timer_task = hclge_set_timer_task,
11506
+ .mac_connect_phy = hclge_mac_connect_phy,
11507
+ .mac_disconnect_phy = hclge_mac_disconnect_phy,
11508
+ .get_vf_config = hclge_get_vf_config,
11509
+ .set_vf_link_state = hclge_set_vf_link_state,
11510
+ .set_vf_spoofchk = hclge_set_vf_spoofchk,
11511
+ .set_vf_trust = hclge_set_vf_trust,
11512
+ .set_vf_rate = hclge_set_vf_rate,
11513
+ .set_vf_mac = hclge_set_vf_mac,
11514
+ .get_module_eeprom = hclge_get_module_eeprom,
11515
+ .get_cmdq_stat = hclge_get_cmdq_stat,
637211516 };
637311517
637411518 static struct hnae3_ae_algo ae_algo = {
....@@ -6380,6 +11524,12 @@
638011524 {
638111525 pr_info("%s is initializing\n", HCLGE_NAME);
638211526
11527
+ hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11528
+ if (!hclge_wq) {
11529
+ pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11530
+ return -ENOMEM;
11531
+ }
11532
+
638311533 hnae3_register_ae_algo(&ae_algo);
638411534
638511535 return 0;
....@@ -6389,6 +11539,7 @@
638911539 {
639011540 hnae3_unregister_ae_algo_prepare(&ae_algo);
639111541 hnae3_unregister_ae_algo(&ae_algo);
11542
+ destroy_workqueue(hclge_wq);
639211543 }
639311544 module_init(hclge_init);
639411545 module_exit(hclge_exit);