forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 958e46acc8e900e8569dd467c1af9b8d2d019394
kernel/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
....@@ -12,6 +12,7 @@
1212 #include <linux/pci.h>
1313 #include <linux/platform_device.h>
1414 #include <linux/if_vlan.h>
15
+#include <linux/crash_dump.h>
1516 #include <net/rtnetlink.h>
1617 #include "hclge_cmd.h"
1718 #include "hclge_dcb.h"
....@@ -19,22 +20,61 @@
1920 #include "hclge_mbx.h"
2021 #include "hclge_mdio.h"
2122 #include "hclge_tm.h"
23
+#include "hclge_err.h"
2224 #include "hnae3.h"
2325
2426 #define HCLGE_NAME "hclge"
2527 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
2628 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
27
-#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
28
-#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
2929
30
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
31
- enum hclge_mta_dmac_sel_type mta_mac_sel,
32
- bool enable);
33
-static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
30
+#define HCLGE_BUF_SIZE_UNIT 256U
31
+#define HCLGE_BUF_MUL_BY 2
32
+#define HCLGE_BUF_DIV_BY 2
33
+#define NEED_RESERVE_TC_NUM 2
34
+#define BUF_MAX_PERCENT 100
35
+#define BUF_RESERVE_PERCENT 90
36
+
37
+#define HCLGE_RESET_MAX_FAIL_CNT 5
38
+#define HCLGE_RESET_SYNC_TIME 100
39
+#define HCLGE_PF_RESET_SYNC_TIME 20
40
+#define HCLGE_PF_RESET_SYNC_CNT 1500
41
+
42
+/* Get DFX BD number offset */
43
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
44
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
45
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
46
+#define HCLGE_DFX_IGU_BD_OFFSET 4
47
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
48
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
49
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
50
+#define HCLGE_DFX_RTC_BD_OFFSET 8
51
+#define HCLGE_DFX_PPP_BD_OFFSET 9
52
+#define HCLGE_DFX_RCB_BD_OFFSET 10
53
+#define HCLGE_DFX_TQP_BD_OFFSET 11
54
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
+
56
+#define HCLGE_LINK_STATUS_MS 10
57
+
58
+#define HCLGE_VF_VPORT_START_NUM 1
59
+
60
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
3461 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
3563 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68
+ unsigned long *addr);
69
+static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
+
71
+static void hclge_sync_mac_table(struct hclge_dev *hdev);
72
+static void hclge_restore_hw_table(struct hclge_dev *hdev);
73
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
3674
3775 static struct hnae3_ae_algo ae_algo;
76
+
77
+static struct workqueue_struct *hclge_wq;
3878
3979 static const struct pci_device_id ae_algo_pci_tbl[] = {
4080 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
....@@ -44,180 +84,74 @@
4484 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
4585 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
4686 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
4788 /* required last entry */
4889 {0, }
4990 };
5091
5192 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
5293
94
+static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95
+ HCLGE_CMDQ_TX_ADDR_H_REG,
96
+ HCLGE_CMDQ_TX_DEPTH_REG,
97
+ HCLGE_CMDQ_TX_TAIL_REG,
98
+ HCLGE_CMDQ_TX_HEAD_REG,
99
+ HCLGE_CMDQ_RX_ADDR_L_REG,
100
+ HCLGE_CMDQ_RX_ADDR_H_REG,
101
+ HCLGE_CMDQ_RX_DEPTH_REG,
102
+ HCLGE_CMDQ_RX_TAIL_REG,
103
+ HCLGE_CMDQ_RX_HEAD_REG,
104
+ HCLGE_VECTOR0_CMDQ_SRC_REG,
105
+ HCLGE_CMDQ_INTR_STS_REG,
106
+ HCLGE_CMDQ_INTR_EN_REG,
107
+ HCLGE_CMDQ_INTR_GEN_REG};
108
+
109
+static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110
+ HCLGE_VECTOR0_OTER_EN_REG,
111
+ HCLGE_MISC_RESET_STS_REG,
112
+ HCLGE_MISC_VECTOR_INT_STS,
113
+ HCLGE_GLOBAL_RESET_REG,
114
+ HCLGE_FUN_RST_ING,
115
+ HCLGE_GRO_EN_REG};
116
+
117
+static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118
+ HCLGE_RING_RX_ADDR_H_REG,
119
+ HCLGE_RING_RX_BD_NUM_REG,
120
+ HCLGE_RING_RX_BD_LENGTH_REG,
121
+ HCLGE_RING_RX_MERGE_EN_REG,
122
+ HCLGE_RING_RX_TAIL_REG,
123
+ HCLGE_RING_RX_HEAD_REG,
124
+ HCLGE_RING_RX_FBD_NUM_REG,
125
+ HCLGE_RING_RX_OFFSET_REG,
126
+ HCLGE_RING_RX_FBD_OFFSET_REG,
127
+ HCLGE_RING_RX_STASH_REG,
128
+ HCLGE_RING_RX_BD_ERR_REG,
129
+ HCLGE_RING_TX_ADDR_L_REG,
130
+ HCLGE_RING_TX_ADDR_H_REG,
131
+ HCLGE_RING_TX_BD_NUM_REG,
132
+ HCLGE_RING_TX_PRIORITY_REG,
133
+ HCLGE_RING_TX_TC_REG,
134
+ HCLGE_RING_TX_MERGE_EN_REG,
135
+ HCLGE_RING_TX_TAIL_REG,
136
+ HCLGE_RING_TX_HEAD_REG,
137
+ HCLGE_RING_TX_FBD_NUM_REG,
138
+ HCLGE_RING_TX_OFFSET_REG,
139
+ HCLGE_RING_TX_EBD_NUM_REG,
140
+ HCLGE_RING_TX_EBD_OFFSET_REG,
141
+ HCLGE_RING_TX_BD_ERR_REG,
142
+ HCLGE_RING_EN_REG};
143
+
144
+static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145
+ HCLGE_TQP_INTR_GL0_REG,
146
+ HCLGE_TQP_INTR_GL1_REG,
147
+ HCLGE_TQP_INTR_GL2_REG,
148
+ HCLGE_TQP_INTR_RL_REG};
149
+
53150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
54
- "Mac Loopback test",
55
- "Serdes Loopback test",
151
+ "App Loopback test",
152
+ "Serdes serial Loopback test",
153
+ "Serdes parallel Loopback test",
56154 "Phy Loopback test"
57
-};
58
-
59
-static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
60
- {"igu_rx_oversize_pkt",
61
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
62
- {"igu_rx_undersize_pkt",
63
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
64
- {"igu_rx_out_all_pkt",
65
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
66
- {"igu_rx_uni_pkt",
67
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
68
- {"igu_rx_multi_pkt",
69
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
70
- {"igu_rx_broad_pkt",
71
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
72
- {"egu_tx_out_all_pkt",
73
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
74
- {"egu_tx_uni_pkt",
75
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
76
- {"egu_tx_multi_pkt",
77
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
78
- {"egu_tx_broad_pkt",
79
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
80
- {"ssu_ppp_mac_key_num",
81
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
82
- {"ssu_ppp_host_key_num",
83
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
84
- {"ppp_ssu_mac_rlt_num",
85
- HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
86
- {"ppp_ssu_host_rlt_num",
87
- HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
88
- {"ssu_tx_in_num",
89
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
90
- {"ssu_tx_out_num",
91
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
92
- {"ssu_rx_in_num",
93
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
94
- {"ssu_rx_out_num",
95
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
96
-};
97
-
98
-static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
99
- {"igu_rx_err_pkt",
100
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
101
- {"igu_rx_no_eof_pkt",
102
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
103
- {"igu_rx_no_sof_pkt",
104
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
105
- {"egu_tx_1588_pkt",
106
- HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
107
- {"ssu_full_drop_num",
108
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
109
- {"ssu_part_drop_num",
110
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
111
- {"ppp_key_drop_num",
112
- HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
113
- {"ppp_rlt_drop_num",
114
- HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
115
- {"ssu_key_drop_num",
116
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
117
- {"pkt_curr_buf_cnt",
118
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
119
- {"qcn_fb_rcv_cnt",
120
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
121
- {"qcn_fb_drop_cnt",
122
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
123
- {"qcn_fb_invaild_cnt",
124
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
125
- {"rx_packet_tc0_in_cnt",
126
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
127
- {"rx_packet_tc1_in_cnt",
128
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
129
- {"rx_packet_tc2_in_cnt",
130
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
131
- {"rx_packet_tc3_in_cnt",
132
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
133
- {"rx_packet_tc4_in_cnt",
134
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
135
- {"rx_packet_tc5_in_cnt",
136
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
137
- {"rx_packet_tc6_in_cnt",
138
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
139
- {"rx_packet_tc7_in_cnt",
140
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
141
- {"rx_packet_tc0_out_cnt",
142
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
143
- {"rx_packet_tc1_out_cnt",
144
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
145
- {"rx_packet_tc2_out_cnt",
146
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
147
- {"rx_packet_tc3_out_cnt",
148
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
149
- {"rx_packet_tc4_out_cnt",
150
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
151
- {"rx_packet_tc5_out_cnt",
152
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
153
- {"rx_packet_tc6_out_cnt",
154
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
155
- {"rx_packet_tc7_out_cnt",
156
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
157
- {"tx_packet_tc0_in_cnt",
158
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
159
- {"tx_packet_tc1_in_cnt",
160
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
161
- {"tx_packet_tc2_in_cnt",
162
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
163
- {"tx_packet_tc3_in_cnt",
164
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
165
- {"tx_packet_tc4_in_cnt",
166
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
167
- {"tx_packet_tc5_in_cnt",
168
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
169
- {"tx_packet_tc6_in_cnt",
170
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
171
- {"tx_packet_tc7_in_cnt",
172
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
173
- {"tx_packet_tc0_out_cnt",
174
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
175
- {"tx_packet_tc1_out_cnt",
176
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
177
- {"tx_packet_tc2_out_cnt",
178
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
179
- {"tx_packet_tc3_out_cnt",
180
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
181
- {"tx_packet_tc4_out_cnt",
182
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
183
- {"tx_packet_tc5_out_cnt",
184
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
185
- {"tx_packet_tc6_out_cnt",
186
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
187
- {"tx_packet_tc7_out_cnt",
188
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
189
- {"pkt_curr_buf_tc0_cnt",
190
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
191
- {"pkt_curr_buf_tc1_cnt",
192
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
193
- {"pkt_curr_buf_tc2_cnt",
194
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
195
- {"pkt_curr_buf_tc3_cnt",
196
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
197
- {"pkt_curr_buf_tc4_cnt",
198
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
199
- {"pkt_curr_buf_tc5_cnt",
200
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
201
- {"pkt_curr_buf_tc6_cnt",
202
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
203
- {"pkt_curr_buf_tc7_cnt",
204
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
205
- {"mb_uncopy_num",
206
- HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
207
- {"lo_pri_unicast_rlt_drop_num",
208
- HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
209
- {"hi_pri_multicast_rlt_drop_num",
210
- HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
211
- {"lo_pri_multicast_rlt_drop_num",
212
- HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
213
- {"rx_oq_drop_pkt_cnt",
214
- HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
215
- {"tx_oq_drop_pkt_cnt",
216
- HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
217
- {"nic_l2_err_drop_pkt_cnt",
218
- HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
219
- {"roc_l2_err_drop_pkt_cnt",
220
- HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
221155 };
222156
223157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
....@@ -225,6 +159,12 @@
225159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
226160 {"mac_rx_mac_pause_num",
227161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162
+ {"mac_tx_control_pkt_num",
163
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164
+ {"mac_rx_control_pkt_num",
165
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166
+ {"mac_tx_pfc_pkt_num",
167
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
228168 {"mac_tx_pfc_pri0_pkt_num",
229169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
230170 {"mac_tx_pfc_pri1_pkt_num",
....@@ -241,6 +181,8 @@
241181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
242182 {"mac_tx_pfc_pri7_pkt_num",
243183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184
+ {"mac_rx_pfc_pkt_num",
185
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
244186 {"mac_rx_pfc_pri0_pkt_num",
245187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
246188 {"mac_rx_pfc_pri1_pkt_num",
....@@ -387,122 +329,99 @@
387329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
388330 {
389331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
390
- .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
391
- .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
392
- .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
332
+ .ethter_type = cpu_to_le16(ETH_P_LLDP),
333
+ .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
393334 .i_port_bitmap = 0x1,
394335 },
395336 };
396337
397
-static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
398
-{
399
-#define HCLGE_64_BIT_CMD_NUM 5
400
-#define HCLGE_64_BIT_RTN_DATANUM 4
401
- u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
402
- struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
403
- __le64 *desc_data;
404
- int i, k, n;
405
- int ret;
338
+static const u8 hclge_hash_key[] = {
339
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344
+};
406345
407
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
408
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
409
- if (ret) {
410
- dev_err(&hdev->pdev->dev,
411
- "Get 64 bit pkt stats fail, status = %d.\n", ret);
412
- return ret;
413
- }
346
+static const u32 hclge_dfx_bd_offset_list[] = {
347
+ HCLGE_DFX_BIOS_BD_OFFSET,
348
+ HCLGE_DFX_SSU_0_BD_OFFSET,
349
+ HCLGE_DFX_SSU_1_BD_OFFSET,
350
+ HCLGE_DFX_IGU_BD_OFFSET,
351
+ HCLGE_DFX_RPU_0_BD_OFFSET,
352
+ HCLGE_DFX_RPU_1_BD_OFFSET,
353
+ HCLGE_DFX_NCSI_BD_OFFSET,
354
+ HCLGE_DFX_RTC_BD_OFFSET,
355
+ HCLGE_DFX_PPP_BD_OFFSET,
356
+ HCLGE_DFX_RCB_BD_OFFSET,
357
+ HCLGE_DFX_TQP_BD_OFFSET,
358
+ HCLGE_DFX_SSU_2_BD_OFFSET
359
+};
414360
415
- for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
416
- if (unlikely(i == 0)) {
417
- desc_data = (__le64 *)(&desc[i].data[0]);
418
- n = HCLGE_64_BIT_RTN_DATANUM - 1;
419
- } else {
420
- desc_data = (__le64 *)(&desc[i]);
421
- n = HCLGE_64_BIT_RTN_DATANUM;
422
- }
423
- for (k = 0; k < n; k++) {
424
- *data++ += le64_to_cpu(*desc_data);
425
- desc_data++;
426
- }
427
- }
361
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
363
+ HCLGE_OPC_DFX_SSU_REG_0,
364
+ HCLGE_OPC_DFX_SSU_REG_1,
365
+ HCLGE_OPC_DFX_IGU_EGU_REG,
366
+ HCLGE_OPC_DFX_RPU_REG_0,
367
+ HCLGE_OPC_DFX_RPU_REG_1,
368
+ HCLGE_OPC_DFX_NCSI_REG,
369
+ HCLGE_OPC_DFX_RTC_REG,
370
+ HCLGE_OPC_DFX_PPP_REG,
371
+ HCLGE_OPC_DFX_RCB_REG,
372
+ HCLGE_OPC_DFX_TQP_REG,
373
+ HCLGE_OPC_DFX_SSU_REG_2
374
+};
428375
429
- return 0;
430
-}
376
+static const struct key_info meta_data_key_info[] = {
377
+ { PACKET_TYPE_ID, 6},
378
+ { IP_FRAGEMENT, 1},
379
+ { ROCE_TYPE, 1},
380
+ { NEXT_KEY, 5},
381
+ { VLAN_NUMBER, 2},
382
+ { SRC_VPORT, 12},
383
+ { DST_VPORT, 12},
384
+ { TUNNEL_PACKET, 1},
385
+};
431386
432
-static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
433
-{
434
- stats->pkt_curr_buf_cnt = 0;
435
- stats->pkt_curr_buf_tc0_cnt = 0;
436
- stats->pkt_curr_buf_tc1_cnt = 0;
437
- stats->pkt_curr_buf_tc2_cnt = 0;
438
- stats->pkt_curr_buf_tc3_cnt = 0;
439
- stats->pkt_curr_buf_tc4_cnt = 0;
440
- stats->pkt_curr_buf_tc5_cnt = 0;
441
- stats->pkt_curr_buf_tc6_cnt = 0;
442
- stats->pkt_curr_buf_tc7_cnt = 0;
443
-}
387
+static const struct key_info tuple_key_info[] = {
388
+ { OUTER_DST_MAC, 48},
389
+ { OUTER_SRC_MAC, 48},
390
+ { OUTER_VLAN_TAG_FST, 16},
391
+ { OUTER_VLAN_TAG_SEC, 16},
392
+ { OUTER_ETH_TYPE, 16},
393
+ { OUTER_L2_RSV, 16},
394
+ { OUTER_IP_TOS, 8},
395
+ { OUTER_IP_PROTO, 8},
396
+ { OUTER_SRC_IP, 32},
397
+ { OUTER_DST_IP, 32},
398
+ { OUTER_L3_RSV, 16},
399
+ { OUTER_SRC_PORT, 16},
400
+ { OUTER_DST_PORT, 16},
401
+ { OUTER_L4_RSV, 32},
402
+ { OUTER_TUN_VNI, 24},
403
+ { OUTER_TUN_FLOW_ID, 8},
404
+ { INNER_DST_MAC, 48},
405
+ { INNER_SRC_MAC, 48},
406
+ { INNER_VLAN_TAG_FST, 16},
407
+ { INNER_VLAN_TAG_SEC, 16},
408
+ { INNER_ETH_TYPE, 16},
409
+ { INNER_L2_RSV, 16},
410
+ { INNER_IP_TOS, 8},
411
+ { INNER_IP_PROTO, 8},
412
+ { INNER_SRC_IP, 32},
413
+ { INNER_DST_IP, 32},
414
+ { INNER_L3_RSV, 16},
415
+ { INNER_SRC_PORT, 16},
416
+ { INNER_DST_PORT, 16},
417
+ { INNER_L4_RSV, 32},
418
+};
444419
445
-static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
446
-{
447
-#define HCLGE_32_BIT_CMD_NUM 8
448
-#define HCLGE_32_BIT_RTN_DATANUM 8
449
-
450
- struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
451
- struct hclge_32_bit_stats *all_32_bit_stats;
452
- __le32 *desc_data;
453
- int i, k, n;
454
- u64 *data;
455
- int ret;
456
-
457
- all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
458
- data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
459
-
460
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
461
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
462
- if (ret) {
463
- dev_err(&hdev->pdev->dev,
464
- "Get 32 bit pkt stats fail, status = %d.\n", ret);
465
-
466
- return ret;
467
- }
468
-
469
- hclge_reset_partial_32bit_counter(all_32_bit_stats);
470
- for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
471
- if (unlikely(i == 0)) {
472
- __le16 *desc_data_16bit;
473
-
474
- all_32_bit_stats->igu_rx_err_pkt +=
475
- le32_to_cpu(desc[i].data[0]);
476
-
477
- desc_data_16bit = (__le16 *)&desc[i].data[1];
478
- all_32_bit_stats->igu_rx_no_eof_pkt +=
479
- le16_to_cpu(*desc_data_16bit);
480
-
481
- desc_data_16bit++;
482
- all_32_bit_stats->igu_rx_no_sof_pkt +=
483
- le16_to_cpu(*desc_data_16bit);
484
-
485
- desc_data = &desc[i].data[2];
486
- n = HCLGE_32_BIT_RTN_DATANUM - 4;
487
- } else {
488
- desc_data = (__le32 *)&desc[i];
489
- n = HCLGE_32_BIT_RTN_DATANUM;
490
- }
491
- for (k = 0; k < n; k++) {
492
- *data++ += le32_to_cpu(*desc_data);
493
- desc_data++;
494
- }
495
- }
496
-
497
- return 0;
498
-}
499
-
500
-static int hclge_mac_update_stats(struct hclge_dev *hdev)
420
+static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
501421 {
502422 #define HCLGE_MAC_CMD_NUM 21
503
-#define HCLGE_RTN_DATA_NUM 4
504423
505
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
424
+ u64 *data = (u64 *)(&hdev->mac_stats);
506425 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
507426 __le64 *desc_data;
508427 int i, k, n;
....@@ -518,20 +437,106 @@
518437 }
519438
520439 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440
+ /* for special opcode 0032, only the first desc has the head */
521441 if (unlikely(i == 0)) {
522442 desc_data = (__le64 *)(&desc[i].data[0]);
523
- n = HCLGE_RTN_DATA_NUM - 2;
443
+ n = HCLGE_RD_FIRST_STATS_NUM;
524444 } else {
525445 desc_data = (__le64 *)(&desc[i]);
526
- n = HCLGE_RTN_DATA_NUM;
446
+ n = HCLGE_RD_OTHER_STATS_NUM;
527447 }
448
+
528449 for (k = 0; k < n; k++) {
529
- *data++ += le64_to_cpu(*desc_data);
450
+ *data += le64_to_cpu(*desc_data);
451
+ data++;
530452 desc_data++;
531453 }
532454 }
533455
534456 return 0;
457
+}
458
+
459
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460
+{
461
+ u64 *data = (u64 *)(&hdev->mac_stats);
462
+ struct hclge_desc *desc;
463
+ __le64 *desc_data;
464
+ u16 i, k, n;
465
+ int ret;
466
+
467
+ /* This may be called inside atomic sections,
468
+ * so GFP_ATOMIC is more suitalbe here
469
+ */
470
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471
+ if (!desc)
472
+ return -ENOMEM;
473
+
474
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475
+ ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476
+ if (ret) {
477
+ kfree(desc);
478
+ return ret;
479
+ }
480
+
481
+ for (i = 0; i < desc_num; i++) {
482
+ /* for special opcode 0034, only the first desc has the head */
483
+ if (i == 0) {
484
+ desc_data = (__le64 *)(&desc[i].data[0]);
485
+ n = HCLGE_RD_FIRST_STATS_NUM;
486
+ } else {
487
+ desc_data = (__le64 *)(&desc[i]);
488
+ n = HCLGE_RD_OTHER_STATS_NUM;
489
+ }
490
+
491
+ for (k = 0; k < n; k++) {
492
+ *data += le64_to_cpu(*desc_data);
493
+ data++;
494
+ desc_data++;
495
+ }
496
+ }
497
+
498
+ kfree(desc);
499
+
500
+ return 0;
501
+}
502
+
503
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504
+{
505
+ struct hclge_desc desc;
506
+ __le32 *desc_data;
507
+ u32 reg_num;
508
+ int ret;
509
+
510
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512
+ if (ret)
513
+ return ret;
514
+
515
+ desc_data = (__le32 *)(&desc.data[0]);
516
+ reg_num = le32_to_cpu(*desc_data);
517
+
518
+ *desc_num = 1 + ((reg_num - 3) >> 2) +
519
+ (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520
+
521
+ return 0;
522
+}
523
+
524
+static int hclge_mac_update_stats(struct hclge_dev *hdev)
525
+{
526
+ u32 desc_num;
527
+ int ret;
528
+
529
+ ret = hclge_mac_query_reg_num(hdev, &desc_num);
530
+
531
+ /* The firmware supports the new statistics acquisition method */
532
+ if (!ret)
533
+ ret = hclge_mac_update_stats_complete(hdev, desc_num);
534
+ else if (ret == -EOPNOTSUPP)
535
+ ret = hclge_mac_update_stats_defective(hdev);
536
+ else
537
+ dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538
+
539
+ return ret;
535540 }
536541
537542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
....@@ -548,8 +553,7 @@
548553 queue = handle->kinfo.tqp[i];
549554 tqp = container_of(queue, struct hclge_tqp, q);
550555 /* command : HCLGE_OPC_QUERY_IGU_STAT */
551
- hclge_cmd_setup_basic_desc(&desc[0],
552
- HCLGE_OPC_QUERY_RX_STATUS,
556
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
553557 true);
554558
555559 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
....@@ -557,7 +561,7 @@
557561 if (ret) {
558562 dev_err(&hdev->pdev->dev,
559563 "Query tqp stat fail, status = %d,queue = %d\n",
560
- ret, i);
564
+ ret, i);
561565 return ret;
562566 }
563567 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
....@@ -569,7 +573,7 @@
569573 tqp = container_of(queue, struct hclge_tqp, q);
570574 /* command : HCLGE_OPC_QUERY_IGU_STAT */
571575 hclge_cmd_setup_basic_desc(&desc[0],
572
- HCLGE_OPC_QUERY_TX_STATUS,
576
+ HCLGE_OPC_QUERY_TX_STATS,
573577 true);
574578
575579 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
....@@ -611,6 +615,7 @@
611615 {
612616 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
613617
618
+ /* each tqp has TX & RX two queues */
614619 return kinfo->num_tqps * (2);
615620 }
616621
....@@ -618,12 +623,12 @@
618623 {
619624 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
620625 u8 *buff = data;
621
- int i = 0;
626
+ int i;
622627
623628 for (i = 0; i < kinfo->num_tqps; i++) {
624629 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
625630 struct hclge_tqp, q);
626
- snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
631
+ snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
627632 tqp->index);
628633 buff = buff + ETH_GSTRING_LEN;
629634 }
....@@ -631,7 +636,7 @@
631636 for (i = 0; i < kinfo->num_tqps; i++) {
632637 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
633638 struct hclge_tqp, q);
634
- snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
639
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
635640 tqp->index);
636641 buff = buff + ETH_GSTRING_LEN;
637642 }
....@@ -639,7 +644,7 @@
639644 return buff;
640645 }
641646
642
-static u64 *hclge_comm_get_stats(void *comm_stats,
647
+static u64 *hclge_comm_get_stats(const void *comm_stats,
643648 const struct hclge_comm_stats_str strs[],
644649 int size, u64 *data)
645650 {
....@@ -670,32 +675,6 @@
670675 return (u8 *)buff;
671676 }
672677
673
-static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
674
- struct net_device_stats *net_stats)
675
-{
676
- net_stats->tx_dropped = 0;
677
- net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
678
- net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
679
- net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
680
-
681
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
682
- net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
683
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
684
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
685
- net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
686
-
687
- net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
688
- net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
689
-
690
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
691
- net_stats->rx_length_errors =
692
- hw_stats->mac_stats.mac_rx_undersize_pkt_num;
693
- net_stats->rx_length_errors +=
694
- hw_stats->mac_stats.mac_rx_oversize_pkt_num;
695
- net_stats->rx_over_errors =
696
- hw_stats->mac_stats.mac_rx_oversize_pkt_num;
697
-}
698
-
699678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
700679 {
701680 struct hnae3_handle *handle;
....@@ -715,14 +694,6 @@
715694 if (status)
716695 dev_err(&hdev->pdev->dev,
717696 "Update MAC stats fail, status = %d.\n", status);
718
-
719
- status = hclge_32_bit_update_stats(hdev);
720
- if (status)
721
- dev_err(&hdev->pdev->dev,
722
- "Update 32 bit stats fail, status = %d.\n",
723
- status);
724
-
725
- hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
726697 }
727698
728699 static void hclge_update_stats(struct hnae3_handle *handle,
....@@ -730,7 +701,6 @@
730701 {
731702 struct hclge_vport *vport = hclge_get_vport(handle);
732703 struct hclge_dev *hdev = vport->back;
733
- struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
734704 int status;
735705
736706 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
....@@ -742,32 +712,21 @@
742712 "Update MAC stats fail, status = %d.\n",
743713 status);
744714
745
- status = hclge_32_bit_update_stats(hdev);
746
- if (status)
747
- dev_err(&hdev->pdev->dev,
748
- "Update 32 bit stats fail, status = %d.\n",
749
- status);
750
-
751
- status = hclge_64_bit_update_stats(hdev);
752
- if (status)
753
- dev_err(&hdev->pdev->dev,
754
- "Update 64 bit stats fail, status = %d.\n",
755
- status);
756
-
757715 status = hclge_tqps_update_stats(handle);
758716 if (status)
759717 dev_err(&hdev->pdev->dev,
760718 "Update TQPS stats fail, status = %d.\n",
761719 status);
762720
763
- hclge_update_netstat(hw_stats, net_stats);
764
-
765721 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
766722 }
767723
768724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
769725 {
770
-#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
726
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727
+ HNAE3_SUPPORT_PHY_LOOPBACK |\
728
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
771730
772731 struct hclge_vport *vport = hclge_get_vport(handle);
773732 struct hclge_dev *hdev = vport->back;
....@@ -781,27 +740,33 @@
781740 if (stringset == ETH_SS_TEST) {
782741 /* clear loopback bit flags at first */
783742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
784
- if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
743
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
785745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
786746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
787747 count += 1;
788
- handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
748
+ handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
789749 }
790750
791
- count++;
792
- handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
751
+ count += 2;
752
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753
+ handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754
+
755
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
756
+ hdev->hw.mac.phydev->drv->set_loopback) {
757
+ count += 1;
758
+ handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759
+ }
760
+
793761 } else if (stringset == ETH_SS_STATS) {
794762 count = ARRAY_SIZE(g_mac_stats_string) +
795
- ARRAY_SIZE(g_all_32bit_stats_string) +
796
- ARRAY_SIZE(g_all_64bit_stats_string) +
797763 hclge_tqps_get_sset_count(handle, stringset);
798764 }
799765
800766 return count;
801767 }
802768
803
-static void hclge_get_strings(struct hnae3_handle *handle,
804
- u32 stringset,
769
+static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
805770 u8 *data)
806771 {
807772 u8 *p = (char *)data;
....@@ -809,37 +774,28 @@
809774
810775 if (stringset == ETH_SS_STATS) {
811776 size = ARRAY_SIZE(g_mac_stats_string);
812
- p = hclge_comm_get_strings(stringset,
813
- g_mac_stats_string,
814
- size,
815
- p);
816
- size = ARRAY_SIZE(g_all_32bit_stats_string);
817
- p = hclge_comm_get_strings(stringset,
818
- g_all_32bit_stats_string,
819
- size,
820
- p);
821
- size = ARRAY_SIZE(g_all_64bit_stats_string);
822
- p = hclge_comm_get_strings(stringset,
823
- g_all_64bit_stats_string,
824
- size,
825
- p);
777
+ p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778
+ size, p);
826779 p = hclge_tqps_get_strings(handle, p);
827780 } else if (stringset == ETH_SS_TEST) {
828
- if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
829
- memcpy(p,
830
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
781
+ if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
831783 ETH_GSTRING_LEN);
832784 p += ETH_GSTRING_LEN;
833785 }
834
- if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
786
+ if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788
+ ETH_GSTRING_LEN);
789
+ p += ETH_GSTRING_LEN;
790
+ }
791
+ if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
835792 memcpy(p,
836
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
793
+ hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
837794 ETH_GSTRING_LEN);
838795 p += ETH_GSTRING_LEN;
839796 }
840797 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
841
- memcpy(p,
842
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
798
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
843799 ETH_GSTRING_LEN);
844800 p += ETH_GSTRING_LEN;
845801 }
....@@ -852,24 +808,28 @@
852808 struct hclge_dev *hdev = vport->back;
853809 u64 *p;
854810
855
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
856
- g_mac_stats_string,
857
- ARRAY_SIZE(g_mac_stats_string),
858
- data);
859
- p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
860
- g_all_32bit_stats_string,
861
- ARRAY_SIZE(g_all_32bit_stats_string),
862
- p);
863
- p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
864
- g_all_64bit_stats_string,
865
- ARRAY_SIZE(g_all_64bit_stats_string),
866
- p);
811
+ p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812
+ ARRAY_SIZE(g_mac_stats_string), data);
867813 p = hclge_tqps_get_stats(handle, p);
814
+}
815
+
816
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
817
+ struct hns3_mac_stats *mac_stats)
818
+{
819
+ struct hclge_vport *vport = hclge_get_vport(handle);
820
+ struct hclge_dev *hdev = vport->back;
821
+
822
+ hclge_update_stats(handle, NULL);
823
+
824
+ mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825
+ mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
868826 }
869827
870828 static int hclge_parse_func_status(struct hclge_dev *hdev,
871829 struct hclge_func_status_cmd *status)
872830 {
831
+#define HCLGE_MAC_ID_MASK 0xF
832
+
873833 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
874834 return -EINVAL;
875835
....@@ -879,11 +839,14 @@
879839 else
880840 hdev->flag &= ~HCLGE_FLAG_MAIN;
881841
842
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
882843 return 0;
883844 }
884845
885846 static int hclge_query_function_status(struct hclge_dev *hdev)
886847 {
848
+#define HCLGE_QUERY_MAX_CNT 5
849
+
887850 struct hclge_func_status_cmd *req;
888851 struct hclge_desc desc;
889852 int timeout = 0;
....@@ -896,9 +859,7 @@
896859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
897860 if (ret) {
898861 dev_err(&hdev->pdev->dev,
899
- "query function status failed %d.\n",
900
- ret);
901
-
862
+ "query function status failed %d.\n", ret);
902863 return ret;
903864 }
904865
....@@ -906,11 +867,9 @@
906867 if (req->pf_state)
907868 break;
908869 usleep_range(1000, 2000);
909
- } while (timeout++ < 5);
870
+ } while (timeout++ < HCLGE_QUERY_MAX_CNT);
910871
911
- ret = hclge_parse_func_status(hdev, req);
912
-
913
- return ret;
872
+ return hclge_parse_func_status(hdev, req);
914873 }
915874
916875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
....@@ -928,26 +887,54 @@
928887 }
929888
930889 req = (struct hclge_pf_res_cmd *)desc.data;
931
- hdev->num_tqps = __le16_to_cpu(req->tqp_num);
932
- hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
890
+ hdev->num_tqps = le16_to_cpu(req->tqp_num);
891
+ hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892
+
893
+ if (req->tx_buf_size)
894
+ hdev->tx_buf_size =
895
+ le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896
+ else
897
+ hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898
+
899
+ hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900
+
901
+ if (req->dv_buf_size)
902
+ hdev->dv_buf_size =
903
+ le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904
+ else
905
+ hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906
+
907
+ hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
933908
934909 if (hnae3_dev_roce_supported(hdev)) {
935910 hdev->roce_base_msix_offset =
936
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
911
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
937912 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
938913 hdev->num_roce_msi =
939
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
914
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
940915 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
916
+
917
+ /* nic's msix numbers is always equals to the roce's. */
918
+ hdev->num_nic_msi = hdev->num_roce_msi;
941919
942920 /* PF should have NIC vectors and Roce vectors,
943921 * NIC vectors are queued before Roce vectors.
944922 */
945
- hdev->num_msi = hdev->num_roce_msi +
923
+ hdev->num_msi = hdev->num_roce_msi +
946924 hdev->roce_base_msix_offset;
947925 } else {
948926 hdev->num_msi =
949
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
927
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
950928 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
929
+
930
+ hdev->num_nic_msi = hdev->num_msi;
931
+ }
932
+
933
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
934
+ dev_err(&hdev->pdev->dev,
935
+ "Just %u msi resources, not enough for pf(min:2).\n",
936
+ hdev->num_nic_msi);
937
+ return -EINVAL;
951938 }
952939
953940 return 0;
....@@ -980,6 +967,9 @@
980967 case 5:
981968 *speed = HCLGE_MAC_SPEED_100G;
982969 break;
970
+ case 8:
971
+ *speed = HCLGE_MAC_SPEED_200G;
972
+ break;
983973 default:
984974 return -EINVAL;
985975 }
....@@ -987,51 +977,292 @@
987977 return 0;
988978 }
989979
980
+static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
981
+{
982
+ struct hclge_vport *vport = hclge_get_vport(handle);
983
+ struct hclge_dev *hdev = vport->back;
984
+ u32 speed_ability = hdev->hw.mac.speed_ability;
985
+ u32 speed_bit = 0;
986
+
987
+ switch (speed) {
988
+ case HCLGE_MAC_SPEED_10M:
989
+ speed_bit = HCLGE_SUPPORT_10M_BIT;
990
+ break;
991
+ case HCLGE_MAC_SPEED_100M:
992
+ speed_bit = HCLGE_SUPPORT_100M_BIT;
993
+ break;
994
+ case HCLGE_MAC_SPEED_1G:
995
+ speed_bit = HCLGE_SUPPORT_1G_BIT;
996
+ break;
997
+ case HCLGE_MAC_SPEED_10G:
998
+ speed_bit = HCLGE_SUPPORT_10G_BIT;
999
+ break;
1000
+ case HCLGE_MAC_SPEED_25G:
1001
+ speed_bit = HCLGE_SUPPORT_25G_BIT;
1002
+ break;
1003
+ case HCLGE_MAC_SPEED_40G:
1004
+ speed_bit = HCLGE_SUPPORT_40G_BIT;
1005
+ break;
1006
+ case HCLGE_MAC_SPEED_50G:
1007
+ speed_bit = HCLGE_SUPPORT_50G_BIT;
1008
+ break;
1009
+ case HCLGE_MAC_SPEED_100G:
1010
+ speed_bit = HCLGE_SUPPORT_100G_BIT;
1011
+ break;
1012
+ case HCLGE_MAC_SPEED_200G:
1013
+ speed_bit = HCLGE_SUPPORT_200G_BIT;
1014
+ break;
1015
+ default:
1016
+ return -EINVAL;
1017
+ }
1018
+
1019
+ if (speed_bit & speed_ability)
1020
+ return 0;
1021
+
1022
+ return -EINVAL;
1023
+}
1024
+
1025
+static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1026
+{
1027
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1028
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1029
+ mac->supported);
1030
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1031
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1032
+ mac->supported);
1033
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1034
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1035
+ mac->supported);
1036
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1037
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1038
+ mac->supported);
1039
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1040
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1041
+ mac->supported);
1042
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1043
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1044
+ mac->supported);
1045
+}
1046
+
1047
+static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1048
+{
1049
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1050
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1051
+ mac->supported);
1052
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1053
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1054
+ mac->supported);
1055
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1056
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1057
+ mac->supported);
1058
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1060
+ mac->supported);
1061
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1062
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1063
+ mac->supported);
1064
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1065
+ linkmode_set_bit(
1066
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1067
+ mac->supported);
1068
+}
1069
+
1070
+static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1071
+{
1072
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1073
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1074
+ mac->supported);
1075
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1076
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1077
+ mac->supported);
1078
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1079
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1080
+ mac->supported);
1081
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1082
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1083
+ mac->supported);
1084
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1085
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1086
+ mac->supported);
1087
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1088
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1089
+ mac->supported);
1090
+}
1091
+
1092
+static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1093
+{
1094
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1095
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1096
+ mac->supported);
1097
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1098
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1099
+ mac->supported);
1100
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1101
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1102
+ mac->supported);
1103
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1104
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1105
+ mac->supported);
1106
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1107
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1108
+ mac->supported);
1109
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1110
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1111
+ mac->supported);
1112
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1113
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1114
+ mac->supported);
1115
+}
1116
+
1117
+static void hclge_convert_setting_fec(struct hclge_mac *mac)
1118
+{
1119
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1120
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1121
+
1122
+ switch (mac->speed) {
1123
+ case HCLGE_MAC_SPEED_10G:
1124
+ case HCLGE_MAC_SPEED_40G:
1125
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1126
+ mac->supported);
1127
+ mac->fec_ability =
1128
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1129
+ break;
1130
+ case HCLGE_MAC_SPEED_25G:
1131
+ case HCLGE_MAC_SPEED_50G:
1132
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1133
+ mac->supported);
1134
+ mac->fec_ability =
1135
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1136
+ BIT(HNAE3_FEC_AUTO);
1137
+ break;
1138
+ case HCLGE_MAC_SPEED_100G:
1139
+ case HCLGE_MAC_SPEED_200G:
1140
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1141
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1142
+ break;
1143
+ default:
1144
+ mac->fec_ability = 0;
1145
+ break;
1146
+ }
1147
+}
1148
+
9901149 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
991
- u8 speed_ability)
1150
+ u16 speed_ability)
1151
+{
1152
+ struct hclge_mac *mac = &hdev->hw.mac;
1153
+
1154
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1155
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1156
+ mac->supported);
1157
+
1158
+ hclge_convert_setting_sr(mac, speed_ability);
1159
+ hclge_convert_setting_lr(mac, speed_ability);
1160
+ hclge_convert_setting_cr(mac, speed_ability);
1161
+ if (hnae3_dev_fec_supported(hdev))
1162
+ hclge_convert_setting_fec(mac);
1163
+
1164
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1165
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1166
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1167
+}
1168
+
1169
+static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1170
+ u16 speed_ability)
1171
+{
1172
+ struct hclge_mac *mac = &hdev->hw.mac;
1173
+
1174
+ hclge_convert_setting_kr(mac, speed_ability);
1175
+ if (hnae3_dev_fec_supported(hdev))
1176
+ hclge_convert_setting_fec(mac);
1177
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1178
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1179
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1180
+}
1181
+
1182
+static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1183
+ u16 speed_ability)
9921184 {
9931185 unsigned long *supported = hdev->hw.mac.supported;
9941186
1187
+ /* default to support all speed for GE port */
1188
+ if (!speed_ability)
1189
+ speed_ability = HCLGE_SUPPORT_GE;
1190
+
9951191 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
996
- set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
997
- supported);
1192
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1193
+ supported);
9981194
999
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1000
- set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1001
- supported);
1195
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1196
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1197
+ supported);
1198
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1199
+ supported);
1200
+ }
10021201
1003
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1004
- set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1005
- supported);
1202
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1203
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1204
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1205
+ }
10061206
1007
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1008
- set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1009
- supported);
1010
-
1011
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1012
- set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1013
- supported);
1014
-
1015
- set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
1016
- set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1207
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1209
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1210
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
10171211 }
10181212
1019
-static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1213
+static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
10201214 {
10211215 u8 media_type = hdev->hw.mac.media_type;
10221216
1023
- if (media_type != HNAE3_MEDIA_TYPE_FIBER)
1024
- return;
1217
+ if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1218
+ hclge_parse_fiber_link_mode(hdev, speed_ability);
1219
+ else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1220
+ hclge_parse_copper_link_mode(hdev, speed_ability);
1221
+ else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1222
+ hclge_parse_backplane_link_mode(hdev, speed_ability);
1223
+}
10251224
1026
- hclge_parse_fiber_link_mode(hdev, speed_ability);
1225
+static u32 hclge_get_max_speed(u16 speed_ability)
1226
+{
1227
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1228
+ return HCLGE_MAC_SPEED_200G;
1229
+
1230
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1231
+ return HCLGE_MAC_SPEED_100G;
1232
+
1233
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1234
+ return HCLGE_MAC_SPEED_50G;
1235
+
1236
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1237
+ return HCLGE_MAC_SPEED_40G;
1238
+
1239
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1240
+ return HCLGE_MAC_SPEED_25G;
1241
+
1242
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1243
+ return HCLGE_MAC_SPEED_10G;
1244
+
1245
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1246
+ return HCLGE_MAC_SPEED_1G;
1247
+
1248
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1249
+ return HCLGE_MAC_SPEED_100M;
1250
+
1251
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1252
+ return HCLGE_MAC_SPEED_10M;
1253
+
1254
+ return HCLGE_MAC_SPEED_1G;
10271255 }
10281256
10291257 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
10301258 {
1259
+#define SPEED_ABILITY_EXT_SHIFT 8
1260
+
10311261 struct hclge_cfg_param_cmd *req;
10321262 u64 mac_addr_tmp_high;
1263
+ u16 speed_ability_ext;
10331264 u64 mac_addr_tmp;
1034
- int i;
1265
+ unsigned int i;
10351266
10361267 req = (struct hclge_cfg_param_cmd *)desc[0].data;
10371268
....@@ -1078,6 +1309,16 @@
10781309 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
10791310 HCLGE_CFG_SPEED_ABILITY_M,
10801311 HCLGE_CFG_SPEED_ABILITY_S);
1312
+ speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1313
+ HCLGE_CFG_SPEED_ABILITY_EXT_M,
1314
+ HCLGE_CFG_SPEED_ABILITY_EXT_S);
1315
+ cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1316
+
1317
+ cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1318
+ HCLGE_CFG_UMV_TBL_SPACE_M,
1319
+ HCLGE_CFG_UMV_TBL_SPACE_S);
1320
+ if (!cfg->umv_space)
1321
+ cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
10811322 }
10821323
10831324 /* hclge_get_cfg: query the static parameter from flash
....@@ -1088,7 +1329,8 @@
10881329 {
10891330 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
10901331 struct hclge_cfg_param_cmd *req;
1091
- int i, ret;
1332
+ unsigned int i;
1333
+ int ret;
10921334
10931335 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
10941336 u32 offset = 0;
....@@ -1115,6 +1357,78 @@
11151357 return 0;
11161358 }
11171359
1360
+static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1361
+{
1362
+#define HCLGE_MAX_NON_TSO_BD_NUM 8U
1363
+
1364
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1365
+
1366
+ ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1367
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1368
+ ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1369
+ ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1370
+}
1371
+
1372
+static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1373
+ struct hclge_desc *desc)
1374
+{
1375
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1376
+ struct hclge_dev_specs_0_cmd *req0;
1377
+
1378
+ req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1379
+
1380
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1381
+ ae_dev->dev_specs.rss_ind_tbl_size =
1382
+ le16_to_cpu(req0->rss_ind_tbl_size);
1383
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1384
+ ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1385
+}
1386
+
1387
+static void hclge_check_dev_specs(struct hclge_dev *hdev)
1388
+{
1389
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1390
+
1391
+ if (!dev_specs->max_non_tso_bd_num)
1392
+ dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1393
+ if (!dev_specs->rss_ind_tbl_size)
1394
+ dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1395
+ if (!dev_specs->rss_key_size)
1396
+ dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1397
+ if (!dev_specs->max_tm_rate)
1398
+ dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1399
+}
1400
+
1401
+static int hclge_query_dev_specs(struct hclge_dev *hdev)
1402
+{
1403
+ struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1404
+ int ret;
1405
+ int i;
1406
+
1407
+ /* set default specifications as devices lower than version V3 do not
1408
+ * support querying specifications from firmware.
1409
+ */
1410
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1411
+ hclge_set_default_dev_specs(hdev);
1412
+ return 0;
1413
+ }
1414
+
1415
+ for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1416
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1417
+ true);
1418
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1419
+ }
1420
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1421
+
1422
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1423
+ if (ret)
1424
+ return ret;
1425
+
1426
+ hclge_parse_dev_specs(hdev, desc);
1427
+ hclge_check_dev_specs(hdev);
1428
+
1429
+ return 0;
1430
+}
1431
+
11181432 static int hclge_get_cap(struct hclge_dev *hdev)
11191433 {
11201434 int ret;
....@@ -1127,23 +1441,36 @@
11271441 }
11281442
11291443 /* get pf resource */
1130
- ret = hclge_query_pf_resource(hdev);
1131
- if (ret)
1132
- dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1444
+ return hclge_query_pf_resource(hdev);
1445
+}
11331446
1134
- return ret;
1447
+static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1448
+{
1449
+#define HCLGE_MIN_TX_DESC 64
1450
+#define HCLGE_MIN_RX_DESC 64
1451
+
1452
+ if (!is_kdump_kernel())
1453
+ return;
1454
+
1455
+ dev_info(&hdev->pdev->dev,
1456
+ "Running kdump kernel. Using minimal resources\n");
1457
+
1458
+ /* minimal queue pairs equals to the number of vports */
1459
+ hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1460
+ hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1461
+ hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
11351462 }
11361463
11371464 static int hclge_configure(struct hclge_dev *hdev)
11381465 {
1466
+ const struct cpumask *cpumask = cpu_online_mask;
11391467 struct hclge_cfg cfg;
1140
- int ret, i;
1468
+ unsigned int i;
1469
+ int node, ret;
11411470
11421471 ret = hclge_get_cfg(hdev, &cfg);
1143
- if (ret) {
1144
- dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1472
+ if (ret)
11451473 return ret;
1146
- }
11471474
11481475 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
11491476 hdev->base_tqp_pid = 0;
....@@ -1152,22 +1479,32 @@
11521479 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
11531480 hdev->hw.mac.media_type = cfg.media_type;
11541481 hdev->hw.mac.phy_addr = cfg.phy_addr;
1155
- hdev->num_desc = cfg.tqp_desc_num;
1482
+ hdev->num_tx_desc = cfg.tqp_desc_num;
1483
+ hdev->num_rx_desc = cfg.tqp_desc_num;
11561484 hdev->tm_info.num_pg = 1;
11571485 hdev->tc_max = cfg.tc_num;
11581486 hdev->tm_info.hw_pfc_map = 0;
1487
+ hdev->wanted_umv_size = cfg.umv_space;
1488
+
1489
+ if (hnae3_dev_fd_supported(hdev)) {
1490
+ hdev->fd_en = true;
1491
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1492
+ }
11591493
11601494 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
11611495 if (ret) {
1162
- dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1496
+ dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1497
+ cfg.default_speed, ret);
11631498 return ret;
11641499 }
11651500
11661501 hclge_parse_link_mode(hdev, cfg.speed_ability);
11671502
1503
+ hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1504
+
11681505 if ((hdev->tc_max > HNAE3_MAX_TC) ||
11691506 (hdev->tc_max < 1)) {
1170
- dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1507
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
11711508 hdev->tc_max);
11721509 hdev->tc_max = 1;
11731510 }
....@@ -1180,7 +1517,7 @@
11801517 hdev->pfc_max = hdev->tc_max;
11811518 }
11821519
1183
- hdev->tm_info.num_tc = hdev->tc_max;
1520
+ hdev->tm_info.num_tc = 1;
11841521
11851522 /* Currently not support uncontiuous tc */
11861523 for (i = 0; i < hdev->tm_info.num_tc; i++)
....@@ -1188,31 +1525,53 @@
11881525
11891526 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
11901527
1528
+ hclge_init_kdump_kernel_config(hdev);
1529
+
1530
+ /* Set the affinity based on numa node */
1531
+ node = dev_to_node(&hdev->pdev->dev);
1532
+ if (node != NUMA_NO_NODE)
1533
+ cpumask = cpumask_of_node(node);
1534
+
1535
+ cpumask_copy(&hdev->affinity_mask, cpumask);
1536
+
11911537 return ret;
11921538 }
11931539
1194
-static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1195
- int tso_mss_max)
1540
+static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1541
+ u16 tso_mss_max)
11961542 {
11971543 struct hclge_cfg_tso_status_cmd *req;
11981544 struct hclge_desc desc;
1199
- u16 tso_mss;
12001545
12011546 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
12021547
12031548 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1204
-
1205
- tso_mss = 0;
1206
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1207
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1208
- req->tso_mss_min = cpu_to_le16(tso_mss);
1209
-
1210
- tso_mss = 0;
1211
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1212
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1213
- req->tso_mss_max = cpu_to_le16(tso_mss);
1549
+ req->tso_mss_min = cpu_to_le16(tso_mss_min);
1550
+ req->tso_mss_max = cpu_to_le16(tso_mss_max);
12141551
12151552 return hclge_cmd_send(&hdev->hw, &desc, 1);
1553
+}
1554
+
1555
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1556
+{
1557
+ struct hclge_cfg_gro_status_cmd *req;
1558
+ struct hclge_desc desc;
1559
+ int ret;
1560
+
1561
+ if (!hnae3_dev_gro_supported(hdev))
1562
+ return 0;
1563
+
1564
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1565
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1566
+
1567
+ req->gro_en = en ? 1 : 0;
1568
+
1569
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1570
+ if (ret)
1571
+ dev_err(&hdev->pdev->dev,
1572
+ "GRO hardware config cmd failed, ret = %d\n", ret);
1573
+
1574
+ return ret;
12161575 }
12171576
12181577 static int hclge_alloc_tqps(struct hclge_dev *hdev)
....@@ -1233,7 +1592,8 @@
12331592
12341593 tqp->q.ae_algo = &ae_algo;
12351594 tqp->q.buf_size = hdev->rx_buf_len;
1236
- tqp->q.desc_num = hdev->num_desc;
1595
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
1596
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
12371597 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
12381598 i * HCLGE_TQP_REG_SIZE;
12391599
....@@ -1255,8 +1615,9 @@
12551615 req = (struct hclge_tqp_map_cmd *)desc.data;
12561616 req->tqp_id = cpu_to_le16(tqp_pid);
12571617 req->tqp_vf = func_id;
1258
- req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1259
- 1 << HCLGE_TQP_MAP_EN_B;
1618
+ req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1619
+ if (!is_pf)
1620
+ req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
12601621 req->tqp_vid = cpu_to_le16(tqp_vid);
12611622
12621623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
....@@ -1266,64 +1627,55 @@
12661627 return ret;
12671628 }
12681629
1269
-static int hclge_assign_tqp(struct hclge_vport *vport)
1630
+static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
12701631 {
12711632 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12721633 struct hclge_dev *hdev = vport->back;
12731634 int i, alloced;
12741635
12751636 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1276
- alloced < kinfo->num_tqps; i++) {
1637
+ alloced < num_tqps; i++) {
12771638 if (!hdev->htqp[i].alloced) {
12781639 hdev->htqp[i].q.handle = &vport->nic;
12791640 hdev->htqp[i].q.tqp_index = alloced;
1280
- hdev->htqp[i].q.desc_num = kinfo->num_desc;
1641
+ hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1642
+ hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
12811643 kinfo->tqp[alloced] = &hdev->htqp[i].q;
12821644 hdev->htqp[i].alloced = true;
12831645 alloced++;
12841646 }
12851647 }
1286
- vport->alloc_tqps = kinfo->num_tqps;
1648
+ vport->alloc_tqps = alloced;
1649
+ kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1650
+ vport->alloc_tqps / hdev->tm_info.num_tc);
1651
+
1652
+ /* ensure one to one mapping between irq and queue at default */
1653
+ kinfo->rss_size = min_t(u16, kinfo->rss_size,
1654
+ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
12871655
12881656 return 0;
12891657 }
12901658
1291
-static int hclge_knic_setup(struct hclge_vport *vport,
1292
- u16 num_tqps, u16 num_desc)
1659
+static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1660
+ u16 num_tx_desc, u16 num_rx_desc)
1661
+
12931662 {
12941663 struct hnae3_handle *nic = &vport->nic;
12951664 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
12961665 struct hclge_dev *hdev = vport->back;
1297
- int i, ret;
1666
+ int ret;
12981667
1299
- kinfo->num_desc = num_desc;
1668
+ kinfo->num_tx_desc = num_tx_desc;
1669
+ kinfo->num_rx_desc = num_rx_desc;
1670
+
13001671 kinfo->rx_buf_len = hdev->rx_buf_len;
1301
- kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1302
- kinfo->rss_size
1303
- = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1304
- kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
13051672
1306
- for (i = 0; i < HNAE3_MAX_TC; i++) {
1307
- if (hdev->hw_tc_map & BIT(i)) {
1308
- kinfo->tc_info[i].enable = true;
1309
- kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1310
- kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1311
- kinfo->tc_info[i].tc = i;
1312
- } else {
1313
- /* Set to default queue if TC is disable */
1314
- kinfo->tc_info[i].enable = false;
1315
- kinfo->tc_info[i].tqp_offset = 0;
1316
- kinfo->tc_info[i].tqp_count = 1;
1317
- kinfo->tc_info[i].tc = 0;
1318
- }
1319
- }
1320
-
1321
- kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1673
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
13221674 sizeof(struct hnae3_queue *), GFP_KERNEL);
13231675 if (!kinfo->tqp)
13241676 return -ENOMEM;
13251677
1326
- ret = hclge_assign_tqp(vport);
1678
+ ret = hclge_assign_tqp(vport, num_tqps);
13271679 if (ret)
13281680 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
13291681
....@@ -1338,7 +1690,7 @@
13381690 u16 i;
13391691
13401692 kinfo = &nic->kinfo;
1341
- for (i = 0; i < kinfo->num_tqps; i++) {
1693
+ for (i = 0; i < vport->alloc_tqps; i++) {
13421694 struct hclge_tqp *q =
13431695 container_of(kinfo->tqp[i], struct hclge_tqp, q);
13441696 bool is_pf;
....@@ -1373,11 +1725,6 @@
13731725 return 0;
13741726 }
13751727
1376
-static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1377
-{
1378
- /* this would be initialized later */
1379
-}
1380
-
13811728 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
13821729 {
13831730 struct hnae3_handle *nic = &vport->nic;
....@@ -1388,18 +1735,12 @@
13881735 nic->ae_algo = &ae_algo;
13891736 nic->numa_node_mask = hdev->numa_node_mask;
13901737
1391
- if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1392
- ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1393
- if (ret) {
1394
- dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1395
- ret);
1396
- return ret;
1397
- }
1398
- } else {
1399
- hclge_unic_setup(vport, num_tqps);
1400
- }
1738
+ ret = hclge_knic_setup(vport, num_tqps,
1739
+ hdev->num_tx_desc, hdev->num_rx_desc);
1740
+ if (ret)
1741
+ dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
14011742
1402
- return 0;
1743
+ return ret;
14031744 }
14041745
14051746 static int hclge_alloc_vport(struct hclge_dev *hdev)
....@@ -1415,7 +1756,7 @@
14151756 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
14161757
14171758 if (hdev->num_tqps < num_vport) {
1418
- dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1759
+ dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
14191760 hdev->num_tqps, num_vport);
14201761 return -EINVAL;
14211762 }
....@@ -1438,6 +1779,14 @@
14381779 for (i = 0; i < num_vport; i++) {
14391780 vport->back = hdev;
14401781 vport->vport_id = i;
1782
+ vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1783
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1784
+ vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1785
+ vport->rxvlan_cfg.rx_vlan_offload_en = true;
1786
+ INIT_LIST_HEAD(&vport->vlan_list);
1787
+ INIT_LIST_HEAD(&vport->uc_mac_list);
1788
+ INIT_LIST_HEAD(&vport->mc_mac_list);
1789
+ spin_lock_init(&vport->mac_list_lock);
14411790
14421791 if (i == 0)
14431792 ret = hclge_vport_setup(vport, tqp_main_vport);
....@@ -1470,7 +1819,7 @@
14701819 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
14711820
14721821 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1473
- for (i = 0; i < HCLGE_TC_NUM; i++) {
1822
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
14741823 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
14751824
14761825 req->tx_pkt_buff[i] =
....@@ -1497,23 +1846,13 @@
14971846 return ret;
14981847 }
14991848
1500
-static int hclge_get_tc_num(struct hclge_dev *hdev)
1849
+static u32 hclge_get_tc_num(struct hclge_dev *hdev)
15011850 {
1502
- int i, cnt = 0;
1851
+ unsigned int i;
1852
+ u32 cnt = 0;
15031853
15041854 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
15051855 if (hdev->hw_tc_map & BIT(i))
1506
- cnt++;
1507
- return cnt;
1508
-}
1509
-
1510
-static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1511
-{
1512
- int i, cnt = 0;
1513
-
1514
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1515
- if (hdev->hw_tc_map & BIT(i) &&
1516
- hdev->tm_info.hw_pfc_map & BIT(i))
15171856 cnt++;
15181857 return cnt;
15191858 }
....@@ -1523,7 +1862,8 @@
15231862 struct hclge_pkt_buf_alloc *buf_alloc)
15241863 {
15251864 struct hclge_priv_buf *priv;
1526
- int i, cnt = 0;
1865
+ unsigned int i;
1866
+ int cnt = 0;
15271867
15281868 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
15291869 priv = &buf_alloc->priv_buf[i];
....@@ -1540,7 +1880,8 @@
15401880 struct hclge_pkt_buf_alloc *buf_alloc)
15411881 {
15421882 struct hclge_priv_buf *priv;
1543
- int i, cnt = 0;
1883
+ unsigned int i;
1884
+ int cnt = 0;
15441885
15451886 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
15461887 priv = &buf_alloc->priv_buf[i];
....@@ -1581,43 +1922,63 @@
15811922 struct hclge_pkt_buf_alloc *buf_alloc,
15821923 u32 rx_all)
15831924 {
1584
- u32 shared_buf_min, shared_buf_tc, shared_std;
1585
- int tc_num, pfc_enable_num;
1586
- u32 shared_buf;
1925
+ u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1926
+ u32 tc_num = hclge_get_tc_num(hdev);
1927
+ u32 shared_buf, aligned_mps;
15871928 u32 rx_priv;
15881929 int i;
15891930
1590
- tc_num = hclge_get_tc_num(hdev);
1591
- pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1931
+ aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
15921932
15931933 if (hnae3_dev_dcb_supported(hdev))
1594
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1934
+ shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1935
+ hdev->dv_buf_size;
15951936 else
1596
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1937
+ shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1938
+ + hdev->dv_buf_size;
15971939
1598
- shared_buf_tc = pfc_enable_num * hdev->mps +
1599
- (tc_num - pfc_enable_num) * hdev->mps / 2 +
1600
- hdev->mps;
1601
- shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1940
+ shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1941
+ shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1942
+ HCLGE_BUF_SIZE_UNIT);
16021943
16031944 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1604
- if (rx_all <= rx_priv + shared_std)
1945
+ if (rx_all < rx_priv + shared_std)
16051946 return false;
16061947
1607
- shared_buf = rx_all - rx_priv;
1948
+ shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
16081949 buf_alloc->s_buf.buf_size = shared_buf;
1609
- buf_alloc->s_buf.self.high = shared_buf;
1610
- buf_alloc->s_buf.self.low = 2 * hdev->mps;
1950
+ if (hnae3_dev_dcb_supported(hdev)) {
1951
+ buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1952
+ buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1953
+ - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1954
+ HCLGE_BUF_SIZE_UNIT);
1955
+ } else {
1956
+ buf_alloc->s_buf.self.high = aligned_mps +
1957
+ HCLGE_NON_DCB_ADDITIONAL_BUF;
1958
+ buf_alloc->s_buf.self.low = aligned_mps;
1959
+ }
1960
+
1961
+ if (hnae3_dev_dcb_supported(hdev)) {
1962
+ hi_thrd = shared_buf - hdev->dv_buf_size;
1963
+
1964
+ if (tc_num <= NEED_RESERVE_TC_NUM)
1965
+ hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1966
+ / BUF_MAX_PERCENT;
1967
+
1968
+ if (tc_num)
1969
+ hi_thrd = hi_thrd / tc_num;
1970
+
1971
+ hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1972
+ hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1973
+ lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1974
+ } else {
1975
+ hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1976
+ lo_thrd = aligned_mps;
1977
+ }
16111978
16121979 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1613
- if ((hdev->hw_tc_map & BIT(i)) &&
1614
- (hdev->tm_info.hw_pfc_map & BIT(i))) {
1615
- buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1616
- buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1617
- } else {
1618
- buf_alloc->s_buf.tc_thrd[i].low = 0;
1619
- buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1620
- }
1980
+ buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1981
+ buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
16211982 }
16221983
16231984 return true;
....@@ -1634,13 +1995,14 @@
16341995 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
16351996 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
16361997
1637
- if (total_size < HCLGE_DEFAULT_TX_BUF)
1638
- return -ENOMEM;
1998
+ if (hdev->hw_tc_map & BIT(i)) {
1999
+ if (total_size < hdev->tx_buf_size)
2000
+ return -ENOMEM;
16392001
1640
- if (hdev->hw_tc_map & BIT(i))
1641
- priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1642
- else
2002
+ priv->tx_buf_size = hdev->tx_buf_size;
2003
+ } else {
16432004 priv->tx_buf_size = 0;
2005
+ }
16442006
16452007 total_size -= priv->tx_buf_size;
16462008 }
....@@ -1648,64 +2010,15 @@
16482010 return 0;
16492011 }
16502012
1651
-/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1652
- * @hdev: pointer to struct hclge_dev
1653
- * @buf_alloc: pointer to buffer calculation data
1654
- * @return: 0: calculate sucessful, negative: fail
1655
- */
1656
-static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1657
- struct hclge_pkt_buf_alloc *buf_alloc)
2013
+static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2014
+ struct hclge_pkt_buf_alloc *buf_alloc)
16582015 {
1659
-#define HCLGE_BUF_SIZE_UNIT 128
1660
- u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1661
- int no_pfc_priv_num, pfc_priv_num;
1662
- struct hclge_priv_buf *priv;
1663
- int i;
2016
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2017
+ u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2018
+ unsigned int i;
16642019
1665
- aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1666
- rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1667
-
1668
- /* When DCB is not supported, rx private
1669
- * buffer is not allocated.
1670
- */
1671
- if (!hnae3_dev_dcb_supported(hdev)) {
1672
- if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1673
- return -ENOMEM;
1674
-
1675
- return 0;
1676
- }
1677
-
1678
- /* step 1, try to alloc private buffer for all enabled tc */
16792020 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1680
- priv = &buf_alloc->priv_buf[i];
1681
- if (hdev->hw_tc_map & BIT(i)) {
1682
- priv->enable = 1;
1683
- if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1684
- priv->wl.low = aligned_mps;
1685
- priv->wl.high = priv->wl.low + aligned_mps;
1686
- priv->buf_size = priv->wl.high +
1687
- HCLGE_DEFAULT_DV;
1688
- } else {
1689
- priv->wl.low = 0;
1690
- priv->wl.high = 2 * aligned_mps;
1691
- priv->buf_size = priv->wl.high;
1692
- }
1693
- } else {
1694
- priv->enable = 0;
1695
- priv->wl.low = 0;
1696
- priv->wl.high = 0;
1697
- priv->buf_size = 0;
1698
- }
1699
- }
1700
-
1701
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1702
- return 0;
1703
-
1704
- /* step 2, try to decrease the buffer size of
1705
- * no pfc TC's private buffer
1706
- */
1707
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1708
- priv = &buf_alloc->priv_buf[i];
2021
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
17092022
17102023 priv->enable = 0;
17112024 priv->wl.low = 0;
....@@ -1718,31 +2031,35 @@
17182031 priv->enable = 1;
17192032
17202033 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1721
- priv->wl.low = 128;
1722
- priv->wl.high = priv->wl.low + aligned_mps;
1723
- priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
2034
+ priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2035
+ priv->wl.high = roundup(priv->wl.low + aligned_mps,
2036
+ HCLGE_BUF_SIZE_UNIT);
17242037 } else {
17252038 priv->wl.low = 0;
1726
- priv->wl.high = aligned_mps;
1727
- priv->buf_size = priv->wl.high;
2039
+ priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2040
+ aligned_mps;
17282041 }
2042
+
2043
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
17292044 }
17302045
1731
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1732
- return 0;
2046
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2047
+}
17332048
1734
- /* step 3, try to reduce the number of pfc disabled TCs,
1735
- * which have private buffer
1736
- */
1737
- /* get the total no pfc enable TC number, which have private buffer */
1738
- no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2049
+static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2050
+ struct hclge_pkt_buf_alloc *buf_alloc)
2051
+{
2052
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2053
+ int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2054
+ int i;
17392055
17402056 /* let the last to be cleared first */
17412057 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1742
- priv = &buf_alloc->priv_buf[i];
2058
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2059
+ unsigned int mask = BIT((unsigned int)i);
17432060
1744
- if (hdev->hw_tc_map & BIT(i) &&
1745
- !(hdev->tm_info.hw_pfc_map & BIT(i))) {
2061
+ if (hdev->hw_tc_map & mask &&
2062
+ !(hdev->tm_info.hw_pfc_map & mask)) {
17462063 /* Clear the no pfc TC private buffer */
17472064 priv->wl.low = 0;
17482065 priv->wl.high = 0;
....@@ -1756,20 +2073,23 @@
17562073 break;
17572074 }
17582075
1759
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1760
- return 0;
2076
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2077
+}
17612078
1762
- /* step 4, try to reduce the number of pfc enabled TCs
1763
- * which have private buffer.
1764
- */
1765
- pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2079
+static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2080
+ struct hclge_pkt_buf_alloc *buf_alloc)
2081
+{
2082
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083
+ int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2084
+ int i;
17662085
17672086 /* let the last to be cleared first */
17682087 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1769
- priv = &buf_alloc->priv_buf[i];
2088
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089
+ unsigned int mask = BIT((unsigned int)i);
17702090
1771
- if (hdev->hw_tc_map & BIT(i) &&
1772
- hdev->tm_info.hw_pfc_map & BIT(i)) {
2091
+ if (hdev->hw_tc_map & mask &&
2092
+ hdev->tm_info.hw_pfc_map & mask) {
17732093 /* Reduce the number of pfc TC with private buffer */
17742094 priv->wl.low = 0;
17752095 priv->enable = 0;
....@@ -1782,7 +2102,92 @@
17822102 pfc_priv_num == 0)
17832103 break;
17842104 }
1785
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2105
+
2106
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107
+}
2108
+
2109
+static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2110
+ struct hclge_pkt_buf_alloc *buf_alloc)
2111
+{
2112
+#define COMPENSATE_BUFFER 0x3C00
2113
+#define COMPENSATE_HALF_MPS_NUM 5
2114
+#define PRIV_WL_GAP 0x1800
2115
+
2116
+ u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2117
+ u32 tc_num = hclge_get_tc_num(hdev);
2118
+ u32 half_mps = hdev->mps >> 1;
2119
+ u32 min_rx_priv;
2120
+ unsigned int i;
2121
+
2122
+ if (tc_num)
2123
+ rx_priv = rx_priv / tc_num;
2124
+
2125
+ if (tc_num <= NEED_RESERVE_TC_NUM)
2126
+ rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2127
+
2128
+ min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2129
+ COMPENSATE_HALF_MPS_NUM * half_mps;
2130
+ min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2131
+ rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2132
+
2133
+ if (rx_priv < min_rx_priv)
2134
+ return false;
2135
+
2136
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2137
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2138
+
2139
+ priv->enable = 0;
2140
+ priv->wl.low = 0;
2141
+ priv->wl.high = 0;
2142
+ priv->buf_size = 0;
2143
+
2144
+ if (!(hdev->hw_tc_map & BIT(i)))
2145
+ continue;
2146
+
2147
+ priv->enable = 1;
2148
+ priv->buf_size = rx_priv;
2149
+ priv->wl.high = rx_priv - hdev->dv_buf_size;
2150
+ priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2151
+ }
2152
+
2153
+ buf_alloc->s_buf.buf_size = 0;
2154
+
2155
+ return true;
2156
+}
2157
+
2158
+/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2159
+ * @hdev: pointer to struct hclge_dev
2160
+ * @buf_alloc: pointer to buffer calculation data
2161
+ * @return: 0: calculate sucessful, negative: fail
2162
+ */
2163
+static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2164
+ struct hclge_pkt_buf_alloc *buf_alloc)
2165
+{
2166
+ /* When DCB is not supported, rx private buffer is not allocated. */
2167
+ if (!hnae3_dev_dcb_supported(hdev)) {
2168
+ u32 rx_all = hdev->pkt_buf_size;
2169
+
2170
+ rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2171
+ if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2172
+ return -ENOMEM;
2173
+
2174
+ return 0;
2175
+ }
2176
+
2177
+ if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2178
+ return 0;
2179
+
2180
+ if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2181
+ return 0;
2182
+
2183
+ /* try to decrease the buffer size */
2184
+ if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2185
+ return 0;
2186
+
2187
+ if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2188
+ return 0;
2189
+
2190
+ if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
17862191 return 0;
17872192
17882193 return -ENOMEM;
....@@ -2028,7 +2433,8 @@
20282433 int vectors;
20292434 int i;
20302435
2031
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2436
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2437
+ hdev->num_msi,
20322438 PCI_IRQ_MSI | PCI_IRQ_MSIX);
20332439 if (vectors < 0) {
20342440 dev_err(&pdev->dev,
....@@ -2038,11 +2444,12 @@
20382444 }
20392445 if (vectors < hdev->num_msi)
20402446 dev_warn(&hdev->pdev->dev,
2041
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2447
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
20422448 hdev->num_msi, vectors);
20432449
20442450 hdev->num_msi = vectors;
20452451 hdev->num_msi_left = vectors;
2452
+
20462453 hdev->base_msi_vector = pdev->irq;
20472454 hdev->roce_base_vector = hdev->base_msi_vector +
20482455 hdev->roce_base_msix_offset;
....@@ -2067,19 +2474,16 @@
20672474 return 0;
20682475 }
20692476
2070
-static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2477
+static u8 hclge_check_speed_dup(u8 duplex, int speed)
20712478 {
2072
- struct hclge_mac *mac = &hdev->hw.mac;
2479
+ if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2480
+ duplex = HCLGE_MAC_FULL;
20732481
2074
- if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2075
- mac->duplex = (u8)duplex;
2076
- else
2077
- mac->duplex = HCLGE_MAC_FULL;
2078
-
2079
- mac->speed = speed;
2482
+ return duplex;
20802483 }
20812484
2082
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2485
+static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2486
+ u8 duplex)
20832487 {
20842488 struct hclge_config_mac_speed_dup_cmd *req;
20852489 struct hclge_desc desc;
....@@ -2089,7 +2493,8 @@
20892493
20902494 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
20912495
2092
- hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2496
+ if (duplex)
2497
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
20932498
20942499 switch (speed) {
20952500 case HCLGE_MAC_SPEED_10M:
....@@ -2124,6 +2529,10 @@
21242529 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
21252530 HCLGE_CFG_SPEED_S, 5);
21262531 break;
2532
+ case HCLGE_MAC_SPEED_200G:
2533
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2534
+ HCLGE_CFG_SPEED_S, 8);
2535
+ break;
21272536 default:
21282537 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
21292538 return -EINVAL;
....@@ -2139,7 +2548,25 @@
21392548 return ret;
21402549 }
21412550
2142
- hclge_check_speed_dup(hdev, duplex, speed);
2551
+ return 0;
2552
+}
2553
+
2554
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2555
+{
2556
+ struct hclge_mac *mac = &hdev->hw.mac;
2557
+ int ret;
2558
+
2559
+ duplex = hclge_check_speed_dup(duplex, speed);
2560
+ if (!mac->support_autoneg && mac->speed == speed &&
2561
+ mac->duplex == duplex)
2562
+ return 0;
2563
+
2564
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2565
+ if (ret)
2566
+ return ret;
2567
+
2568
+ hdev->hw.mac.speed = speed;
2569
+ hdev->hw.mac.duplex = duplex;
21432570
21442571 return 0;
21452572 }
....@@ -2153,37 +2580,6 @@
21532580 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
21542581 }
21552582
2156
-static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2157
- u8 *duplex)
2158
-{
2159
- struct hclge_query_an_speed_dup_cmd *req;
2160
- struct hclge_desc desc;
2161
- int speed_tmp;
2162
- int ret;
2163
-
2164
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2165
-
2166
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2167
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2168
- if (ret) {
2169
- dev_err(&hdev->pdev->dev,
2170
- "mac speed/autoneg/duplex query cmd failed %d\n",
2171
- ret);
2172
- return ret;
2173
- }
2174
-
2175
- *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2176
- speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2177
- HCLGE_QUERY_SPEED_S);
2178
-
2179
- ret = hclge_parse_speed(speed_tmp, speed);
2180
- if (ret)
2181
- dev_err(&hdev->pdev->dev,
2182
- "could not parse speed(=%d), %d\n", speed_tmp, ret);
2183
-
2184
- return ret;
2185
-}
2186
-
21872583 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
21882584 {
21892585 struct hclge_config_auto_neg_cmd *req;
....@@ -2194,7 +2590,8 @@
21942590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
21952591
21962592 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2197
- hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2593
+ if (enable)
2594
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
21982595 req->cfg_an_cmd_flag = cpu_to_le32(flag);
21992596
22002597 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
....@@ -2209,6 +2606,16 @@
22092606 {
22102607 struct hclge_vport *vport = hclge_get_vport(handle);
22112608 struct hclge_dev *hdev = vport->back;
2609
+
2610
+ if (!hdev->hw.mac.support_autoneg) {
2611
+ if (enable) {
2612
+ dev_err(&hdev->pdev->dev,
2613
+ "autoneg is not supported by current port\n");
2614
+ return -EOPNOTSUPP;
2615
+ } else {
2616
+ return 0;
2617
+ }
2618
+ }
22122619
22132620 return hclge_set_autoneg_en(hdev, enable);
22142621 }
....@@ -2225,121 +2632,162 @@
22252632 return hdev->hw.mac.autoneg;
22262633 }
22272634
2228
-static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
2229
- bool mask_vlan,
2230
- u8 *mac_mask)
2635
+static int hclge_restart_autoneg(struct hnae3_handle *handle)
22312636 {
2232
- struct hclge_mac_vlan_mask_entry_cmd *req;
2637
+ struct hclge_vport *vport = hclge_get_vport(handle);
2638
+ struct hclge_dev *hdev = vport->back;
2639
+ int ret;
2640
+
2641
+ dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2642
+
2643
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2644
+ if (ret)
2645
+ return ret;
2646
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2647
+}
2648
+
2649
+static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2650
+{
2651
+ struct hclge_vport *vport = hclge_get_vport(handle);
2652
+ struct hclge_dev *hdev = vport->back;
2653
+
2654
+ if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2655
+ return hclge_set_autoneg_en(hdev, !halt);
2656
+
2657
+ return 0;
2658
+}
2659
+
2660
+static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2661
+{
2662
+ struct hclge_config_fec_cmd *req;
22332663 struct hclge_desc desc;
2234
- int status;
2664
+ int ret;
22352665
2236
- req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
2237
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
2666
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
22382667
2239
- hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
2240
- mask_vlan ? 1 : 0);
2241
- ether_addr_copy(req->mac_mask, mac_mask);
2668
+ req = (struct hclge_config_fec_cmd *)desc.data;
2669
+ if (fec_mode & BIT(HNAE3_FEC_AUTO))
2670
+ hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2671
+ if (fec_mode & BIT(HNAE3_FEC_RS))
2672
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2673
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2674
+ if (fec_mode & BIT(HNAE3_FEC_BASER))
2675
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2676
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
22422677
2243
- status = hclge_cmd_send(&hdev->hw, &desc, 1);
2244
- if (status)
2245
- dev_err(&hdev->pdev->dev,
2246
- "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2247
- status);
2678
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2679
+ if (ret)
2680
+ dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
22482681
2249
- return status;
2682
+ return ret;
2683
+}
2684
+
2685
+static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2686
+{
2687
+ struct hclge_vport *vport = hclge_get_vport(handle);
2688
+ struct hclge_dev *hdev = vport->back;
2689
+ struct hclge_mac *mac = &hdev->hw.mac;
2690
+ int ret;
2691
+
2692
+ if (fec_mode && !(mac->fec_ability & fec_mode)) {
2693
+ dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2694
+ return -EINVAL;
2695
+ }
2696
+
2697
+ ret = hclge_set_fec_hw(hdev, fec_mode);
2698
+ if (ret)
2699
+ return ret;
2700
+
2701
+ mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2702
+ return 0;
2703
+}
2704
+
2705
+static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2706
+ u8 *fec_mode)
2707
+{
2708
+ struct hclge_vport *vport = hclge_get_vport(handle);
2709
+ struct hclge_dev *hdev = vport->back;
2710
+ struct hclge_mac *mac = &hdev->hw.mac;
2711
+
2712
+ if (fec_ability)
2713
+ *fec_ability = mac->fec_ability;
2714
+ if (fec_mode)
2715
+ *fec_mode = mac->fec_mode;
22502716 }
22512717
22522718 static int hclge_mac_init(struct hclge_dev *hdev)
22532719 {
2254
- struct hnae3_handle *handle = &hdev->vport[0].nic;
2255
- struct net_device *netdev = handle->kinfo.netdev;
22562720 struct hclge_mac *mac = &hdev->hw.mac;
2257
- u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2258
- struct hclge_vport *vport;
2259
- int mtu;
22602721 int ret;
2261
- int i;
22622722
2263
- ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2264
- if (ret) {
2265
- dev_err(&hdev->pdev->dev,
2266
- "Config mac speed dup fail ret=%d\n", ret);
2723
+ hdev->support_sfp_query = true;
2724
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2725
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2726
+ hdev->hw.mac.duplex);
2727
+ if (ret)
22672728 return ret;
2729
+
2730
+ if (hdev->hw.mac.support_autoneg) {
2731
+ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2732
+ if (ret)
2733
+ return ret;
22682734 }
22692735
22702736 mac->link = 0;
22712737
2272
- /* Initialize the MTA table work mode */
2273
- hdev->enable_mta = true;
2274
- hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2275
-
2276
- ret = hclge_set_mta_filter_mode(hdev,
2277
- hdev->mta_mac_sel_type,
2278
- hdev->enable_mta);
2279
- if (ret) {
2280
- dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2281
- ret);
2282
- return ret;
2283
- }
2284
-
2285
- for (i = 0; i < hdev->num_alloc_vport; i++) {
2286
- vport = &hdev->vport[i];
2287
- vport->accept_mta_mc = false;
2288
-
2289
- memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
2290
- ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
2291
- if (ret) {
2292
- dev_err(&hdev->pdev->dev,
2293
- "set mta filter mode fail ret=%d\n", ret);
2738
+ if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2739
+ ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2740
+ if (ret)
22942741 return ret;
2295
- }
22962742 }
22972743
2298
- ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2744
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
22992745 if (ret) {
2300
- dev_err(&hdev->pdev->dev,
2301
- "set default mac_vlan_mask fail ret=%d\n", ret);
2746
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
23022747 return ret;
23032748 }
23042749
2305
- if (netdev)
2306
- mtu = netdev->mtu;
2307
- else
2308
- mtu = ETH_DATA_LEN;
2750
+ ret = hclge_set_default_loopback(hdev);
2751
+ if (ret)
2752
+ return ret;
23092753
2310
- ret = hclge_set_mtu(handle, mtu);
2754
+ ret = hclge_buffer_alloc(hdev);
23112755 if (ret)
23122756 dev_err(&hdev->pdev->dev,
2313
- "set mtu failed ret=%d\n", ret);
2757
+ "allocate buffer fail, ret=%d\n", ret);
23142758
23152759 return ret;
23162760 }
23172761
23182762 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
23192763 {
2320
- if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2321
- schedule_work(&hdev->mbx_service_task);
2764
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2765
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2766
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2767
+ hclge_wq, &hdev->service_task, 0);
23222768 }
23232769
23242770 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
23252771 {
2326
- if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2327
- schedule_work(&hdev->rst_service_task);
2772
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2773
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2774
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2775
+ hclge_wq, &hdev->service_task, 0);
23282776 }
23292777
2330
-static void hclge_task_schedule(struct hclge_dev *hdev)
2778
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
23312779 {
2332
- if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2333
- !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2334
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2335
- (void)schedule_work(&hdev->service_task);
2780
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2781
+ !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2782
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2783
+ hclge_wq, &hdev->service_task,
2784
+ delay_time);
23362785 }
23372786
2338
-static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2787
+static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
23392788 {
23402789 struct hclge_link_status_cmd *req;
23412790 struct hclge_desc desc;
2342
- int link_status;
23432791 int ret;
23442792
23452793 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
....@@ -2351,92 +2799,200 @@
23512799 }
23522800
23532801 req = (struct hclge_link_status_cmd *)desc.data;
2354
- link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2802
+ *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2803
+ HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
23552804
2356
- return !!link_status;
2805
+ return 0;
23572806 }
23582807
2359
-static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2808
+static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
23602809 {
2361
- int mac_state;
2362
- int link_stat;
2810
+ struct phy_device *phydev = hdev->hw.mac.phydev;
2811
+
2812
+ *link_status = HCLGE_LINK_STATUS_DOWN;
23632813
23642814 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
23652815 return 0;
23662816
2367
- mac_state = hclge_get_mac_link_status(hdev);
2817
+ if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2818
+ return 0;
23682819
2369
- if (hdev->hw.mac.phydev) {
2370
- if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2371
- link_stat = mac_state &
2372
- hdev->hw.mac.phydev->link;
2373
- else
2374
- link_stat = 0;
2375
-
2376
- } else {
2377
- link_stat = mac_state;
2378
- }
2379
-
2380
- return !!link_stat;
2820
+ return hclge_get_mac_link_status(hdev, link_status);
23812821 }
23822822
23832823 static void hclge_update_link_status(struct hclge_dev *hdev)
23842824 {
2825
+ struct hnae3_client *rclient = hdev->roce_client;
23852826 struct hnae3_client *client = hdev->nic_client;
2827
+ struct hnae3_handle *rhandle;
23862828 struct hnae3_handle *handle;
23872829 int state;
2830
+ int ret;
23882831 int i;
23892832
23902833 if (!client)
23912834 return;
2392
- state = hclge_get_mac_phy_link(hdev);
2835
+
2836
+ if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2837
+ return;
2838
+
2839
+ ret = hclge_get_mac_phy_link(hdev, &state);
2840
+ if (ret) {
2841
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2842
+ return;
2843
+ }
2844
+
23932845 if (state != hdev->hw.mac.link) {
23942846 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
23952847 handle = &hdev->vport[i].nic;
23962848 client->ops->link_status_change(handle, state);
2849
+ hclge_config_mac_tnl_int(hdev, state);
2850
+ rhandle = &hdev->vport[i].roce;
2851
+ if (rclient && rclient->ops->link_status_change)
2852
+ rclient->ops->link_status_change(rhandle,
2853
+ state);
23972854 }
23982855 hdev->hw.mac.link = state;
23992856 }
2857
+
2858
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
24002859 }
24012860
2402
-static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2861
+static void hclge_update_port_capability(struct hclge_mac *mac)
24032862 {
2404
- struct hclge_mac mac = hdev->hw.mac;
2405
- u8 duplex;
2406
- int speed;
2863
+ /* update fec ability by speed */
2864
+ hclge_convert_setting_fec(mac);
2865
+
2866
+ /* firmware can not identify back plane type, the media type
2867
+ * read from configuration can help deal it
2868
+ */
2869
+ if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2870
+ mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2871
+ mac->module_type = HNAE3_MODULE_TYPE_KR;
2872
+ else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2873
+ mac->module_type = HNAE3_MODULE_TYPE_TP;
2874
+
2875
+ if (mac->support_autoneg) {
2876
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2877
+ linkmode_copy(mac->advertising, mac->supported);
2878
+ } else {
2879
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2880
+ mac->supported);
2881
+ linkmode_zero(mac->advertising);
2882
+ }
2883
+}
2884
+
2885
+static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2886
+{
2887
+ struct hclge_sfp_info_cmd *resp;
2888
+ struct hclge_desc desc;
24072889 int ret;
24082890
2409
- /* get the speed and duplex as autoneg'result from mac cmd when phy
2410
- * doesn't exit.
2411
- */
2412
- if (mac.phydev || !mac.autoneg)
2413
- return 0;
2414
-
2415
- ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2416
- if (ret) {
2417
- dev_err(&hdev->pdev->dev,
2418
- "mac autoneg/speed/duplex query failed %d\n", ret);
2891
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2892
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
2893
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2894
+ if (ret == -EOPNOTSUPP) {
2895
+ dev_warn(&hdev->pdev->dev,
2896
+ "IMP do not support get SFP speed %d\n", ret);
2897
+ return ret;
2898
+ } else if (ret) {
2899
+ dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
24192900 return ret;
24202901 }
24212902
2422
- if ((mac.speed != speed) || (mac.duplex != duplex)) {
2423
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2424
- if (ret) {
2425
- dev_err(&hdev->pdev->dev,
2426
- "mac speed/duplex config failed %d\n", ret);
2427
- return ret;
2428
- }
2903
+ *speed = le32_to_cpu(resp->speed);
2904
+
2905
+ return 0;
2906
+}
2907
+
2908
+static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2909
+{
2910
+ struct hclge_sfp_info_cmd *resp;
2911
+ struct hclge_desc desc;
2912
+ int ret;
2913
+
2914
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2915
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
2916
+
2917
+ resp->query_type = QUERY_ACTIVE_SPEED;
2918
+
2919
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2920
+ if (ret == -EOPNOTSUPP) {
2921
+ dev_warn(&hdev->pdev->dev,
2922
+ "IMP does not support get SFP info %d\n", ret);
2923
+ return ret;
2924
+ } else if (ret) {
2925
+ dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2926
+ return ret;
2927
+ }
2928
+
2929
+ /* In some case, mac speed get from IMP may be 0, it shouldn't be
2930
+ * set to mac->speed.
2931
+ */
2932
+ if (!le32_to_cpu(resp->speed))
2933
+ return 0;
2934
+
2935
+ mac->speed = le32_to_cpu(resp->speed);
2936
+ /* if resp->speed_ability is 0, it means it's an old version
2937
+ * firmware, do not update these params
2938
+ */
2939
+ if (resp->speed_ability) {
2940
+ mac->module_type = le32_to_cpu(resp->module_type);
2941
+ mac->speed_ability = le32_to_cpu(resp->speed_ability);
2942
+ mac->autoneg = resp->autoneg;
2943
+ mac->support_autoneg = resp->autoneg_ability;
2944
+ mac->speed_type = QUERY_ACTIVE_SPEED;
2945
+ if (!resp->active_fec)
2946
+ mac->fec_mode = 0;
2947
+ else
2948
+ mac->fec_mode = BIT(resp->active_fec);
2949
+ } else {
2950
+ mac->speed_type = QUERY_SFP_SPEED;
24292951 }
24302952
24312953 return 0;
24322954 }
24332955
2434
-static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2956
+static int hclge_update_port_info(struct hclge_dev *hdev)
24352957 {
2436
- struct hclge_vport *vport = hclge_get_vport(handle);
2437
- struct hclge_dev *hdev = vport->back;
2958
+ struct hclge_mac *mac = &hdev->hw.mac;
2959
+ int speed = HCLGE_MAC_SPEED_UNKNOWN;
2960
+ int ret;
24382961
2439
- return hclge_update_speed_duplex(hdev);
2962
+ /* get the port info from SFP cmd if not copper port */
2963
+ if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2964
+ return 0;
2965
+
2966
+ /* if IMP does not support get SFP/qSFP info, return directly */
2967
+ if (!hdev->support_sfp_query)
2968
+ return 0;
2969
+
2970
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2971
+ ret = hclge_get_sfp_info(hdev, mac);
2972
+ else
2973
+ ret = hclge_get_sfp_speed(hdev, &speed);
2974
+
2975
+ if (ret == -EOPNOTSUPP) {
2976
+ hdev->support_sfp_query = false;
2977
+ return ret;
2978
+ } else if (ret) {
2979
+ return ret;
2980
+ }
2981
+
2982
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2983
+ if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2984
+ hclge_update_port_capability(mac);
2985
+ return 0;
2986
+ }
2987
+ return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2988
+ HCLGE_MAC_FULL);
2989
+ } else {
2990
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2991
+ return 0; /* do nothing if no SFP */
2992
+
2993
+ /* must config full duplex for SFP */
2994
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2995
+ }
24402996 }
24412997
24422998 static int hclge_get_status(struct hnae3_handle *handle)
....@@ -2449,59 +3005,103 @@
24493005 return hdev->hw.mac.link;
24503006 }
24513007
2452
-static void hclge_service_timer(struct timer_list *t)
3008
+static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
24533009 {
2454
- struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
3010
+ if (!pci_num_vf(hdev->pdev)) {
3011
+ dev_err(&hdev->pdev->dev,
3012
+ "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3013
+ return NULL;
3014
+ }
24553015
2456
- mod_timer(&hdev->service_timer, jiffies + HZ);
2457
- hdev->hw_stats.stats_timer++;
2458
- hclge_task_schedule(hdev);
3016
+ if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3017
+ dev_err(&hdev->pdev->dev,
3018
+ "vf id(%d) is out of range(0 <= vfid < %d)\n",
3019
+ vf, pci_num_vf(hdev->pdev));
3020
+ return NULL;
3021
+ }
3022
+
3023
+ /* VF start from 1 in vport */
3024
+ vf += HCLGE_VF_VPORT_START_NUM;
3025
+ return &hdev->vport[vf];
24593026 }
24603027
2461
-static void hclge_service_complete(struct hclge_dev *hdev)
3028
+static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3029
+ struct ifla_vf_info *ivf)
24623030 {
2463
- WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
3031
+ struct hclge_vport *vport = hclge_get_vport(handle);
3032
+ struct hclge_dev *hdev = vport->back;
24643033
2465
- /* Flush memory before next watchdog */
2466
- smp_mb__before_atomic();
2467
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3034
+ vport = hclge_get_vf_vport(hdev, vf);
3035
+ if (!vport)
3036
+ return -EINVAL;
3037
+
3038
+ ivf->vf = vf;
3039
+ ivf->linkstate = vport->vf_info.link_state;
3040
+ ivf->spoofchk = vport->vf_info.spoofchk;
3041
+ ivf->trusted = vport->vf_info.trusted;
3042
+ ivf->min_tx_rate = 0;
3043
+ ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3044
+ ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3045
+ ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3046
+ ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3047
+ ether_addr_copy(ivf->mac, vport->vf_info.mac);
3048
+
3049
+ return 0;
3050
+}
3051
+
3052
+static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3053
+ int link_state)
3054
+{
3055
+ struct hclge_vport *vport = hclge_get_vport(handle);
3056
+ struct hclge_dev *hdev = vport->back;
3057
+
3058
+ vport = hclge_get_vf_vport(hdev, vf);
3059
+ if (!vport)
3060
+ return -EINVAL;
3061
+
3062
+ vport->vf_info.link_state = link_state;
3063
+
3064
+ return 0;
24683065 }
24693066
24703067 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
24713068 {
2472
- u32 rst_src_reg;
2473
- u32 cmdq_src_reg;
3069
+ u32 cmdq_src_reg, msix_src_reg;
24743070
24753071 /* fetch the events from their corresponding regs */
2476
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
24773072 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3073
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
24783074
24793075 /* Assumption: If by any chance reset and mailbox events are reported
24803076 * together then we will only process reset event in this go and will
24813077 * defer the processing of the mailbox events. Since, we would have not
24823078 * cleared RX CMDQ event this time we would receive again another
24833079 * interrupt from H/W just for the mailbox.
3080
+ *
3081
+ * check for vector0 reset event sources
24843082 */
3083
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3084
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3085
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3086
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3087
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3088
+ hdev->rst_stats.imp_rst_cnt++;
3089
+ return HCLGE_VECTOR0_EVENT_RST;
3090
+ }
24853091
2486
- /* check for vector0 reset event sources */
2487
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
3092
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3093
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
24883094 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
24893095 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
24903096 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3097
+ hdev->rst_stats.global_rst_cnt++;
24913098 return HCLGE_VECTOR0_EVENT_RST;
24923099 }
24933100
2494
- if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2495
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2496
- set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2497
- *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2498
- return HCLGE_VECTOR0_EVENT_RST;
2499
- }
2500
-
2501
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2502
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2503
- *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2504
- return HCLGE_VECTOR0_EVENT_RST;
3101
+ /* check for vector0 msix event source */
3102
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3103
+ *clearval = msix_src_reg;
3104
+ return HCLGE_VECTOR0_EVENT_ERR;
25053105 }
25063106
25073107 /* check for vector0 mailbox(=CMDQ RX) event source */
....@@ -2510,6 +3110,12 @@
25103110 *clearval = cmdq_src_reg;
25113111 return HCLGE_VECTOR0_EVENT_MBX;
25123112 }
3113
+
3114
+ /* print other vector0 event source */
3115
+ dev_info(&hdev->pdev->dev,
3116
+ "CMDQ INT status:0x%x, other INT status:0x%x\n",
3117
+ cmdq_src_reg, msix_src_reg);
3118
+ *clearval = msix_src_reg;
25133119
25143120 return HCLGE_VECTOR0_EVENT_OTHER;
25153121 }
....@@ -2523,6 +3129,8 @@
25233129 break;
25243130 case HCLGE_VECTOR0_EVENT_MBX:
25253131 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3132
+ break;
3133
+ default:
25263134 break;
25273135 }
25283136 }
....@@ -2544,14 +3152,27 @@
25443152 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
25453153 {
25463154 struct hclge_dev *hdev = data;
3155
+ u32 clearval = 0;
25473156 u32 event_cause;
2548
- u32 clearval;
25493157
25503158 hclge_enable_vector(&hdev->misc_vector, false);
25513159 event_cause = hclge_check_event_cause(hdev, &clearval);
25523160
25533161 /* vector 0 interrupt is shared with reset and mailbox source events.*/
25543162 switch (event_cause) {
3163
+ case HCLGE_VECTOR0_EVENT_ERR:
3164
+ /* we do not know what type of reset is required now. This could
3165
+ * only be decided after we fetch the type of errors which
3166
+ * caused this event. Therefore, we will do below for now:
3167
+ * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3168
+ * have defered type of reset to be used.
3169
+ * 2. Schedule the reset serivce task.
3170
+ * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3171
+ * will fetch the correct type of reset. This would be done
3172
+ * by first decoding the types of errors.
3173
+ */
3174
+ set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3175
+ fallthrough;
25553176 case HCLGE_VECTOR0_EVENT_RST:
25563177 hclge_reset_task_schedule(hdev);
25573178 break;
....@@ -2573,9 +3194,15 @@
25733194 break;
25743195 }
25753196
2576
- /* clear the source of interrupt if it is not cause by reset */
2577
- if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2578
- hclge_clear_event_cause(hdev, event_cause, clearval);
3197
+ hclge_clear_event_cause(hdev, event_cause, clearval);
3198
+
3199
+ /* Enable interrupt if it is not cause by reset. And when
3200
+ * clearval equal to 0, it means interrupt status may be
3201
+ * cleared by hardware before driver reads status register.
3202
+ * For this case, vector0 interrupt also should be enabled.
3203
+ */
3204
+ if (!clearval ||
3205
+ event_cause == HCLGE_VECTOR0_EVENT_MBX) {
25793206 hclge_enable_vector(&hdev->misc_vector, true);
25803207 }
25813208
....@@ -2608,6 +3235,36 @@
26083235 hdev->num_msi_used += 1;
26093236 }
26103237
3238
+static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3239
+ const cpumask_t *mask)
3240
+{
3241
+ struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3242
+ affinity_notify);
3243
+
3244
+ cpumask_copy(&hdev->affinity_mask, mask);
3245
+}
3246
+
3247
+static void hclge_irq_affinity_release(struct kref *ref)
3248
+{
3249
+}
3250
+
3251
+static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3252
+{
3253
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3254
+ &hdev->affinity_mask);
3255
+
3256
+ hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3257
+ hdev->affinity_notify.release = hclge_irq_affinity_release;
3258
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3259
+ &hdev->affinity_notify);
3260
+}
3261
+
3262
+static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3263
+{
3264
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3265
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3266
+}
3267
+
26113268 static int hclge_misc_irq_init(struct hclge_dev *hdev)
26123269 {
26133270 int ret;
....@@ -2615,8 +3272,10 @@
26153272 hclge_get_misc_vector(hdev);
26163273
26173274 /* this would be explicitly freed in the end */
3275
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3276
+ HCLGE_NAME, pci_name(hdev->pdev));
26183277 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2619
- 0, "hclge_misc", hdev);
3278
+ 0, hdev->misc_vector.name, hdev);
26203279 if (ret) {
26213280 hclge_free_vector(hdev, 0);
26223281 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
....@@ -2632,11 +3291,14 @@
26323291 hclge_free_vector(hdev, 0);
26333292 }
26343293
2635
-static int hclge_notify_client(struct hclge_dev *hdev,
2636
- enum hnae3_reset_notify_type type)
3294
+int hclge_notify_client(struct hclge_dev *hdev,
3295
+ enum hnae3_reset_notify_type type)
26373296 {
26383297 struct hnae3_client *client = hdev->nic_client;
26393298 u16 i;
3299
+
3300
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3301
+ return 0;
26403302
26413303 if (!client->ops->reset_notify)
26423304 return -EOPNOTSUPP;
....@@ -2646,28 +3308,60 @@
26463308 int ret;
26473309
26483310 ret = client->ops->reset_notify(handle, type);
2649
- if (ret)
3311
+ if (ret) {
3312
+ dev_err(&hdev->pdev->dev,
3313
+ "notify nic client failed %d(%d)\n", type, ret);
26503314 return ret;
3315
+ }
26513316 }
26523317
26533318 return 0;
26543319 }
26553320
3321
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
3322
+ enum hnae3_reset_notify_type type)
3323
+{
3324
+ struct hnae3_client *client = hdev->roce_client;
3325
+ int ret;
3326
+ u16 i;
3327
+
3328
+ if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3329
+ return 0;
3330
+
3331
+ if (!client->ops->reset_notify)
3332
+ return -EOPNOTSUPP;
3333
+
3334
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3335
+ struct hnae3_handle *handle = &hdev->vport[i].roce;
3336
+
3337
+ ret = client->ops->reset_notify(handle, type);
3338
+ if (ret) {
3339
+ dev_err(&hdev->pdev->dev,
3340
+ "notify roce client failed %d(%d)",
3341
+ type, ret);
3342
+ return ret;
3343
+ }
3344
+ }
3345
+
3346
+ return ret;
3347
+}
3348
+
26563349 static int hclge_reset_wait(struct hclge_dev *hdev)
26573350 {
26583351 #define HCLGE_RESET_WATI_MS 100
2659
-#define HCLGE_RESET_WAIT_CNT 5
3352
+#define HCLGE_RESET_WAIT_CNT 350
3353
+
26603354 u32 val, reg, reg_bit;
26613355 u32 cnt = 0;
26623356
26633357 switch (hdev->reset_type) {
3358
+ case HNAE3_IMP_RESET:
3359
+ reg = HCLGE_GLOBAL_RESET_REG;
3360
+ reg_bit = HCLGE_IMP_RESET_BIT;
3361
+ break;
26643362 case HNAE3_GLOBAL_RESET:
26653363 reg = HCLGE_GLOBAL_RESET_REG;
26663364 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2667
- break;
2668
- case HNAE3_CORE_RESET:
2669
- reg = HCLGE_GLOBAL_RESET_REG;
2670
- reg_bit = HCLGE_CORE_RESET_BIT;
26713365 break;
26723366 case HNAE3_FUNC_RESET:
26733367 reg = HCLGE_FUN_RST_ING;
....@@ -2696,6 +3390,134 @@
26963390 return 0;
26973391 }
26983392
3393
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3394
+{
3395
+ struct hclge_vf_rst_cmd *req;
3396
+ struct hclge_desc desc;
3397
+
3398
+ req = (struct hclge_vf_rst_cmd *)desc.data;
3399
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3400
+ req->dest_vfid = func_id;
3401
+
3402
+ if (reset)
3403
+ req->vf_rst = 0x1;
3404
+
3405
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
3406
+}
3407
+
3408
+static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3409
+{
3410
+ int i;
3411
+
3412
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3413
+ struct hclge_vport *vport = &hdev->vport[i];
3414
+ int ret;
3415
+
3416
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
3417
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3418
+ if (ret) {
3419
+ dev_err(&hdev->pdev->dev,
3420
+ "set vf(%u) rst failed %d!\n",
3421
+ vport->vport_id, ret);
3422
+ return ret;
3423
+ }
3424
+
3425
+ if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3426
+ continue;
3427
+
3428
+ /* Inform VF to process the reset.
3429
+ * hclge_inform_reset_assert_to_vf may fail if VF
3430
+ * driver is not loaded.
3431
+ */
3432
+ ret = hclge_inform_reset_assert_to_vf(vport);
3433
+ if (ret)
3434
+ dev_warn(&hdev->pdev->dev,
3435
+ "inform reset to vf(%u) failed %d!\n",
3436
+ vport->vport_id, ret);
3437
+ }
3438
+
3439
+ return 0;
3440
+}
3441
+
3442
+static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3443
+{
3444
+ if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3445
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3446
+ test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3447
+ return;
3448
+
3449
+ hclge_mbx_handler(hdev);
3450
+
3451
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3452
+}
3453
+
3454
+static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3455
+{
3456
+ struct hclge_pf_rst_sync_cmd *req;
3457
+ struct hclge_desc desc;
3458
+ int cnt = 0;
3459
+ int ret;
3460
+
3461
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3462
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3463
+
3464
+ do {
3465
+ /* vf need to down netdev by mbx during PF or FLR reset */
3466
+ hclge_mailbox_service_task(hdev);
3467
+
3468
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3469
+ /* for compatible with old firmware, wait
3470
+ * 100 ms for VF to stop IO
3471
+ */
3472
+ if (ret == -EOPNOTSUPP) {
3473
+ msleep(HCLGE_RESET_SYNC_TIME);
3474
+ return;
3475
+ } else if (ret) {
3476
+ dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3477
+ ret);
3478
+ return;
3479
+ } else if (req->all_vf_ready) {
3480
+ return;
3481
+ }
3482
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
3483
+ hclge_cmd_reuse_desc(&desc, true);
3484
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3485
+
3486
+ dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3487
+}
3488
+
3489
+void hclge_report_hw_error(struct hclge_dev *hdev,
3490
+ enum hnae3_hw_error_type type)
3491
+{
3492
+ struct hnae3_client *client = hdev->nic_client;
3493
+ u16 i;
3494
+
3495
+ if (!client || !client->ops->process_hw_error ||
3496
+ !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3497
+ return;
3498
+
3499
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3500
+ client->ops->process_hw_error(&hdev->vport[i].nic, type);
3501
+}
3502
+
3503
+static void hclge_handle_imp_error(struct hclge_dev *hdev)
3504
+{
3505
+ u32 reg_val;
3506
+
3507
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3508
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3509
+ hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3510
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3511
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3512
+ }
3513
+
3514
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3515
+ hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3516
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3517
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3518
+ }
3519
+}
3520
+
26993521 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
27003522 {
27013523 struct hclge_desc desc;
....@@ -2716,56 +3538,87 @@
27163538
27173539 static void hclge_do_reset(struct hclge_dev *hdev)
27183540 {
3541
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
27193542 struct pci_dev *pdev = hdev->pdev;
27203543 u32 val;
27213544
3545
+ if (hclge_get_hw_reset_stat(handle)) {
3546
+ dev_info(&pdev->dev, "hardware reset not finish\n");
3547
+ dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3548
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3549
+ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3550
+ return;
3551
+ }
3552
+
27223553 switch (hdev->reset_type) {
27233554 case HNAE3_GLOBAL_RESET:
3555
+ dev_info(&pdev->dev, "global reset requested\n");
27243556 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
27253557 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
27263558 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2727
- dev_info(&pdev->dev, "Global Reset requested\n");
2728
- break;
2729
- case HNAE3_CORE_RESET:
2730
- val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2731
- hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2732
- hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2733
- dev_info(&pdev->dev, "Core Reset requested\n");
27343559 break;
27353560 case HNAE3_FUNC_RESET:
2736
- dev_info(&pdev->dev, "PF Reset requested\n");
2737
- hclge_func_reset_cmd(hdev, 0);
3561
+ dev_info(&pdev->dev, "PF reset requested\n");
27383562 /* schedule again to check later */
27393563 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
27403564 hclge_reset_task_schedule(hdev);
27413565 break;
27423566 default:
27433567 dev_warn(&pdev->dev,
2744
- "Unsupported reset type: %d\n", hdev->reset_type);
3568
+ "unsupported reset type: %d\n", hdev->reset_type);
27453569 break;
27463570 }
27473571 }
27483572
2749
-static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3573
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
27503574 unsigned long *addr)
27513575 {
27523576 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3577
+ struct hclge_dev *hdev = ae_dev->priv;
3578
+
3579
+ /* first, resolve any unknown reset type to the known type(s) */
3580
+ if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3581
+ u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3582
+ HCLGE_MISC_VECTOR_INT_STS);
3583
+ /* we will intentionally ignore any errors from this function
3584
+ * as we will end up in *some* reset request in any case
3585
+ */
3586
+ if (hclge_handle_hw_msix_error(hdev, addr))
3587
+ dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3588
+ msix_sts_reg);
3589
+
3590
+ clear_bit(HNAE3_UNKNOWN_RESET, addr);
3591
+ /* We defered the clearing of the error event which caused
3592
+ * interrupt since it was not posssible to do that in
3593
+ * interrupt context (and this is the reason we introduced
3594
+ * new UNKNOWN reset type). Now, the errors have been
3595
+ * handled and cleared in hardware we can safely enable
3596
+ * interrupts. This is an exception to the norm.
3597
+ */
3598
+ hclge_enable_vector(&hdev->misc_vector, true);
3599
+ }
27533600
27543601 /* return the highest priority reset level amongst all */
2755
- if (test_bit(HNAE3_GLOBAL_RESET, addr))
2756
- rst_level = HNAE3_GLOBAL_RESET;
2757
- else if (test_bit(HNAE3_CORE_RESET, addr))
2758
- rst_level = HNAE3_CORE_RESET;
2759
- else if (test_bit(HNAE3_IMP_RESET, addr))
3602
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
27603603 rst_level = HNAE3_IMP_RESET;
2761
- else if (test_bit(HNAE3_FUNC_RESET, addr))
3604
+ clear_bit(HNAE3_IMP_RESET, addr);
3605
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
3606
+ clear_bit(HNAE3_FUNC_RESET, addr);
3607
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3608
+ rst_level = HNAE3_GLOBAL_RESET;
3609
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
3610
+ clear_bit(HNAE3_FUNC_RESET, addr);
3611
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
27623612 rst_level = HNAE3_FUNC_RESET;
3613
+ clear_bit(HNAE3_FUNC_RESET, addr);
3614
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3615
+ rst_level = HNAE3_FLR_RESET;
3616
+ clear_bit(HNAE3_FLR_RESET, addr);
3617
+ }
27633618
2764
- /* now, clear all other resets */
2765
- clear_bit(HNAE3_GLOBAL_RESET, addr);
2766
- clear_bit(HNAE3_CORE_RESET, addr);
2767
- clear_bit(HNAE3_IMP_RESET, addr);
2768
- clear_bit(HNAE3_FUNC_RESET, addr);
3619
+ if (hdev->reset_type != HNAE3_NONE_RESET &&
3620
+ rst_level < hdev->reset_type)
3621
+ return HNAE3_NONE_RESET;
27693622
27703623 return rst_level;
27713624 }
....@@ -2781,9 +3634,6 @@
27813634 case HNAE3_GLOBAL_RESET:
27823635 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
27833636 break;
2784
- case HNAE3_CORE_RESET:
2785
- clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2786
- break;
27873637 default:
27883638 break;
27893639 }
....@@ -2791,45 +3641,302 @@
27913641 if (!clearval)
27923642 return;
27933643
2794
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3644
+ /* For revision 0x20, the reset interrupt source
3645
+ * can only be cleared after hardware reset done
3646
+ */
3647
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3648
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3649
+ clearval);
3650
+
27953651 hclge_enable_vector(&hdev->misc_vector, true);
3652
+}
3653
+
3654
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3655
+{
3656
+ u32 reg_val;
3657
+
3658
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3659
+ if (enable)
3660
+ reg_val |= HCLGE_NIC_SW_RST_RDY;
3661
+ else
3662
+ reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3663
+
3664
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3665
+}
3666
+
3667
+static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3668
+{
3669
+ int ret;
3670
+
3671
+ ret = hclge_set_all_vf_rst(hdev, true);
3672
+ if (ret)
3673
+ return ret;
3674
+
3675
+ hclge_func_reset_sync_vf(hdev);
3676
+
3677
+ return 0;
3678
+}
3679
+
3680
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3681
+{
3682
+ u32 reg_val;
3683
+ int ret = 0;
3684
+
3685
+ switch (hdev->reset_type) {
3686
+ case HNAE3_FUNC_RESET:
3687
+ ret = hclge_func_reset_notify_vf(hdev);
3688
+ if (ret)
3689
+ return ret;
3690
+
3691
+ ret = hclge_func_reset_cmd(hdev, 0);
3692
+ if (ret) {
3693
+ dev_err(&hdev->pdev->dev,
3694
+ "asserting function reset fail %d!\n", ret);
3695
+ return ret;
3696
+ }
3697
+
3698
+ /* After performaning pf reset, it is not necessary to do the
3699
+ * mailbox handling or send any command to firmware, because
3700
+ * any mailbox handling or command to firmware is only valid
3701
+ * after hclge_cmd_init is called.
3702
+ */
3703
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3704
+ hdev->rst_stats.pf_rst_cnt++;
3705
+ break;
3706
+ case HNAE3_FLR_RESET:
3707
+ ret = hclge_func_reset_notify_vf(hdev);
3708
+ if (ret)
3709
+ return ret;
3710
+ break;
3711
+ case HNAE3_IMP_RESET:
3712
+ hclge_handle_imp_error(hdev);
3713
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3714
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3715
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3716
+ break;
3717
+ default:
3718
+ break;
3719
+ }
3720
+
3721
+ /* inform hardware that preparatory work is done */
3722
+ msleep(HCLGE_RESET_SYNC_TIME);
3723
+ hclge_reset_handshake(hdev, true);
3724
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3725
+
3726
+ return ret;
3727
+}
3728
+
3729
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3730
+{
3731
+#define MAX_RESET_FAIL_CNT 5
3732
+
3733
+ if (hdev->reset_pending) {
3734
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3735
+ hdev->reset_pending);
3736
+ return true;
3737
+ } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3738
+ HCLGE_RESET_INT_M) {
3739
+ dev_info(&hdev->pdev->dev,
3740
+ "reset failed because new reset interrupt\n");
3741
+ hclge_clear_reset_cause(hdev);
3742
+ return false;
3743
+ } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3744
+ hdev->rst_stats.reset_fail_cnt++;
3745
+ set_bit(hdev->reset_type, &hdev->reset_pending);
3746
+ dev_info(&hdev->pdev->dev,
3747
+ "re-schedule reset task(%u)\n",
3748
+ hdev->rst_stats.reset_fail_cnt);
3749
+ return true;
3750
+ }
3751
+
3752
+ hclge_clear_reset_cause(hdev);
3753
+
3754
+ /* recover the handshake status when reset fail */
3755
+ hclge_reset_handshake(hdev, true);
3756
+
3757
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
3758
+
3759
+ hclge_dbg_dump_rst_info(hdev);
3760
+
3761
+ set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3762
+
3763
+ return false;
3764
+}
3765
+
3766
+static int hclge_set_rst_done(struct hclge_dev *hdev)
3767
+{
3768
+ struct hclge_pf_rst_done_cmd *req;
3769
+ struct hclge_desc desc;
3770
+ int ret;
3771
+
3772
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
3773
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3774
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3775
+
3776
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3777
+ /* To be compatible with the old firmware, which does not support
3778
+ * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3779
+ * return success
3780
+ */
3781
+ if (ret == -EOPNOTSUPP) {
3782
+ dev_warn(&hdev->pdev->dev,
3783
+ "current firmware does not support command(0x%x)!\n",
3784
+ HCLGE_OPC_PF_RST_DONE);
3785
+ return 0;
3786
+ } else if (ret) {
3787
+ dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3788
+ ret);
3789
+ }
3790
+
3791
+ return ret;
3792
+}
3793
+
3794
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3795
+{
3796
+ int ret = 0;
3797
+
3798
+ switch (hdev->reset_type) {
3799
+ case HNAE3_FUNC_RESET:
3800
+ case HNAE3_FLR_RESET:
3801
+ ret = hclge_set_all_vf_rst(hdev, false);
3802
+ break;
3803
+ case HNAE3_GLOBAL_RESET:
3804
+ case HNAE3_IMP_RESET:
3805
+ ret = hclge_set_rst_done(hdev);
3806
+ break;
3807
+ default:
3808
+ break;
3809
+ }
3810
+
3811
+ /* clear up the handshake status after re-initialize done */
3812
+ hclge_reset_handshake(hdev, false);
3813
+
3814
+ return ret;
3815
+}
3816
+
3817
+static int hclge_reset_stack(struct hclge_dev *hdev)
3818
+{
3819
+ int ret;
3820
+
3821
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3822
+ if (ret)
3823
+ return ret;
3824
+
3825
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
3826
+ if (ret)
3827
+ return ret;
3828
+
3829
+ return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3830
+}
3831
+
3832
+static int hclge_reset_prepare(struct hclge_dev *hdev)
3833
+{
3834
+ int ret;
3835
+
3836
+ hdev->rst_stats.reset_cnt++;
3837
+ /* perform reset of the stack & ae device for a client */
3838
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3839
+ if (ret)
3840
+ return ret;
3841
+
3842
+ rtnl_lock();
3843
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3844
+ rtnl_unlock();
3845
+ if (ret)
3846
+ return ret;
3847
+
3848
+ return hclge_reset_prepare_wait(hdev);
3849
+}
3850
+
3851
+static int hclge_reset_rebuild(struct hclge_dev *hdev)
3852
+{
3853
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3854
+ enum hnae3_reset_type reset_level;
3855
+ int ret;
3856
+
3857
+ hdev->rst_stats.hw_reset_done_cnt++;
3858
+
3859
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3860
+ if (ret)
3861
+ return ret;
3862
+
3863
+ rtnl_lock();
3864
+ ret = hclge_reset_stack(hdev);
3865
+ rtnl_unlock();
3866
+ if (ret)
3867
+ return ret;
3868
+
3869
+ hclge_clear_reset_cause(hdev);
3870
+
3871
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3872
+ /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3873
+ * times
3874
+ */
3875
+ if (ret &&
3876
+ hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3877
+ return ret;
3878
+
3879
+ ret = hclge_reset_prepare_up(hdev);
3880
+ if (ret)
3881
+ return ret;
3882
+
3883
+ rtnl_lock();
3884
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3885
+ rtnl_unlock();
3886
+ if (ret)
3887
+ return ret;
3888
+
3889
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3890
+ if (ret)
3891
+ return ret;
3892
+
3893
+ hdev->last_reset_time = jiffies;
3894
+ hdev->rst_stats.reset_fail_cnt = 0;
3895
+ hdev->rst_stats.reset_done_cnt++;
3896
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3897
+
3898
+ /* if default_reset_request has a higher level reset request,
3899
+ * it should be handled as soon as possible. since some errors
3900
+ * need this kind of reset to fix.
3901
+ */
3902
+ reset_level = hclge_get_reset_level(ae_dev,
3903
+ &hdev->default_reset_request);
3904
+ if (reset_level != HNAE3_NONE_RESET)
3905
+ set_bit(reset_level, &hdev->reset_request);
3906
+
3907
+ return 0;
27963908 }
27973909
27983910 static void hclge_reset(struct hclge_dev *hdev)
27993911 {
2800
- struct hnae3_handle *handle;
3912
+ if (hclge_reset_prepare(hdev))
3913
+ goto err_reset;
28013914
2802
- /* perform reset of the stack & ae device for a client */
2803
- handle = &hdev->vport[0].nic;
2804
- rtnl_lock();
2805
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2806
- rtnl_unlock();
3915
+ if (hclge_reset_wait(hdev))
3916
+ goto err_reset;
28073917
2808
- if (!hclge_reset_wait(hdev)) {
2809
- rtnl_lock();
2810
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2811
- hclge_reset_ae_dev(hdev->ae_dev);
2812
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3918
+ if (hclge_reset_rebuild(hdev))
3919
+ goto err_reset;
28133920
2814
- hclge_clear_reset_cause(hdev);
2815
- } else {
2816
- rtnl_lock();
2817
- /* schedule again to check pending resets later */
2818
- set_bit(hdev->reset_type, &hdev->reset_pending);
3921
+ return;
3922
+
3923
+err_reset:
3924
+ if (hclge_reset_err_handle(hdev))
28193925 hclge_reset_task_schedule(hdev);
2820
- }
2821
-
2822
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2823
- handle->last_reset_time = jiffies;
2824
- rtnl_unlock();
28253926 }
28263927
2827
-static void hclge_reset_event(struct hnae3_handle *handle)
3928
+static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
28283929 {
2829
- struct hclge_vport *vport = hclge_get_vport(handle);
2830
- struct hclge_dev *hdev = vport->back;
3930
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3931
+ struct hclge_dev *hdev = ae_dev->priv;
28313932
2832
- /* check if this is a new reset request and we are not here just because
3933
+ /* We might end up getting called broadly because of 2 below cases:
3934
+ * 1. Recoverable error was conveyed through APEI and only way to bring
3935
+ * normalcy is to reset.
3936
+ * 2. A new reset request from the stack due to timeout
3937
+ *
3938
+ * For the first case,error event might not have ae handle available.
3939
+ * check if this is a new reset request and we are not here just because
28333940 * last reset attempt did not succeed and watchdog hit us again. We will
28343941 * know this if last reset request did not occur very recently (watchdog
28353942 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
....@@ -2838,24 +3945,59 @@
28383945 * want to make sure we throttle the reset request. Therefore, we will
28393946 * not allow it again before 3*HZ times.
28403947 */
2841
- if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
2842
- return;
2843
- else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
2844
- handle->reset_level = HNAE3_FUNC_RESET;
3948
+ if (!handle)
3949
+ handle = &hdev->vport[0].nic;
28453950
2846
- dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2847
- handle->reset_level);
3951
+ if (time_before(jiffies, (hdev->last_reset_time +
3952
+ HCLGE_RESET_INTERVAL))) {
3953
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3954
+ return;
3955
+ } else if (hdev->default_reset_request) {
3956
+ hdev->reset_level =
3957
+ hclge_get_reset_level(ae_dev,
3958
+ &hdev->default_reset_request);
3959
+ } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3960
+ hdev->reset_level = HNAE3_FUNC_RESET;
3961
+ }
3962
+
3963
+ dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3964
+ hdev->reset_level);
28483965
28493966 /* request reset & schedule reset task */
2850
- set_bit(handle->reset_level, &hdev->reset_request);
3967
+ set_bit(hdev->reset_level, &hdev->reset_request);
28513968 hclge_reset_task_schedule(hdev);
28523969
2853
- if (handle->reset_level < HNAE3_GLOBAL_RESET)
2854
- handle->reset_level++;
3970
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3971
+ hdev->reset_level++;
3972
+}
3973
+
3974
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3975
+ enum hnae3_reset_type rst_type)
3976
+{
3977
+ struct hclge_dev *hdev = ae_dev->priv;
3978
+
3979
+ set_bit(rst_type, &hdev->default_reset_request);
3980
+}
3981
+
3982
+static void hclge_reset_timer(struct timer_list *t)
3983
+{
3984
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3985
+
3986
+ /* if default_reset_request has no value, it means that this reset
3987
+ * request has already be handled, so just return here
3988
+ */
3989
+ if (!hdev->default_reset_request)
3990
+ return;
3991
+
3992
+ dev_info(&hdev->pdev->dev,
3993
+ "triggering reset in reset timer\n");
3994
+ hclge_reset_event(hdev->pdev, NULL);
28553995 }
28563996
28573997 static void hclge_reset_subtask(struct hclge_dev *hdev)
28583998 {
3999
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4000
+
28594001 /* check if there is any ongoing reset in the hardware. This status can
28604002 * be checked from reset_pending. If there is then, we need to wait for
28614003 * hardware to complete reset.
....@@ -2865,61 +4007,111 @@
28654007 * b. else, we can come back later to check this status so re-sched
28664008 * now.
28674009 */
2868
- hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
4010
+ hdev->last_reset_time = jiffies;
4011
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
28694012 if (hdev->reset_type != HNAE3_NONE_RESET)
28704013 hclge_reset(hdev);
28714014
28724015 /* check if we got any *new* reset requests to be honored */
2873
- hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
4016
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
28744017 if (hdev->reset_type != HNAE3_NONE_RESET)
28754018 hclge_do_reset(hdev);
28764019
28774020 hdev->reset_type = HNAE3_NONE_RESET;
28784021 }
28794022
2880
-static void hclge_reset_service_task(struct work_struct *work)
4023
+static void hclge_reset_service_task(struct hclge_dev *hdev)
28814024 {
2882
- struct hclge_dev *hdev =
2883
- container_of(work, struct hclge_dev, rst_service_task);
2884
-
2885
- if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4025
+ if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
28864026 return;
28874027
2888
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
4028
+ down(&hdev->reset_sem);
4029
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
28894030
28904031 hclge_reset_subtask(hdev);
28914032
28924033 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4034
+ up(&hdev->reset_sem);
28934035 }
28944036
2895
-static void hclge_mailbox_service_task(struct work_struct *work)
4037
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
28964038 {
2897
- struct hclge_dev *hdev =
2898
- container_of(work, struct hclge_dev, mbx_service_task);
4039
+ int i;
28994040
2900
- if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
4041
+ /* start from vport 1 for PF is always alive */
4042
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
4043
+ struct hclge_vport *vport = &hdev->vport[i];
4044
+
4045
+ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4046
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4047
+
4048
+ /* If vf is not alive, set to default value */
4049
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4050
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4051
+ }
4052
+}
4053
+
4054
+static void hclge_periodic_service_task(struct hclge_dev *hdev)
4055
+{
4056
+ unsigned long delta = round_jiffies_relative(HZ);
4057
+
4058
+ if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
29014059 return;
29024060
2903
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
4061
+ /* Always handle the link updating to make sure link state is
4062
+ * updated when it is triggered by mbx.
4063
+ */
4064
+ hclge_update_link_status(hdev);
4065
+ hclge_sync_mac_table(hdev);
4066
+ hclge_sync_promisc_mode(hdev);
29044067
2905
- hclge_mbx_handler(hdev);
4068
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4069
+ delta = jiffies - hdev->last_serv_processed;
29064070
2907
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
4071
+ if (delta < round_jiffies_relative(HZ)) {
4072
+ delta = round_jiffies_relative(HZ) - delta;
4073
+ goto out;
4074
+ }
4075
+ }
4076
+
4077
+ hdev->serv_processed_cnt++;
4078
+ hclge_update_vport_alive(hdev);
4079
+
4080
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4081
+ hdev->last_serv_processed = jiffies;
4082
+ goto out;
4083
+ }
4084
+
4085
+ if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4086
+ hclge_update_stats_for_all(hdev);
4087
+
4088
+ hclge_update_port_info(hdev);
4089
+ hclge_sync_vlan_filter(hdev);
4090
+
4091
+ if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4092
+ hclge_rfs_filter_expire(hdev);
4093
+
4094
+ hdev->last_serv_processed = jiffies;
4095
+
4096
+out:
4097
+ hclge_task_schedule(hdev, delta);
29084098 }
29094099
29104100 static void hclge_service_task(struct work_struct *work)
29114101 {
29124102 struct hclge_dev *hdev =
2913
- container_of(work, struct hclge_dev, service_task);
4103
+ container_of(work, struct hclge_dev, service_task.work);
29144104
2915
- if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2916
- hclge_update_stats_for_all(hdev);
2917
- hdev->hw_stats.stats_timer = 0;
2918
- }
4105
+ hclge_reset_service_task(hdev);
4106
+ hclge_mailbox_service_task(hdev);
4107
+ hclge_periodic_service_task(hdev);
29194108
2920
- hclge_update_speed_duplex(hdev);
2921
- hclge_update_link_status(hdev);
2922
- hclge_service_complete(hdev);
4109
+ /* Handle reset and mbx again in case periodical task delays the
4110
+ * handling by calling hclge_task_schedule() in
4111
+ * hclge_periodic_service_task().
4112
+ */
4113
+ hclge_reset_service_task(hdev);
4114
+ hclge_mailbox_service_task(hdev);
29234115 }
29244116
29254117 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
....@@ -2942,6 +4134,7 @@
29424134 int alloc = 0;
29434135 int i, j;
29444136
4137
+ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
29454138 vector_num = min(hdev->num_msi_left, vector_num);
29464139
29474140 for (j = 0; j < vector_num; j++) {
....@@ -2989,7 +4182,7 @@
29894182 vector_id = hclge_get_vector_index(hdev, vector);
29904183 if (vector_id < 0) {
29914184 dev_err(&hdev->pdev->dev,
2992
- "Get vector index fail. vector_id =%d\n", vector_id);
4185
+ "Get vector index fail. vector = %d\n", vector);
29934186 return vector_id;
29944187 }
29954188
....@@ -3012,29 +4205,28 @@
30124205 const u8 hfunc, const u8 *key)
30134206 {
30144207 struct hclge_rss_config_cmd *req;
4208
+ unsigned int key_offset = 0;
30154209 struct hclge_desc desc;
3016
- int key_offset;
4210
+ int key_counts;
30174211 int key_size;
30184212 int ret;
30194213
4214
+ key_counts = HCLGE_RSS_KEY_SIZE;
30204215 req = (struct hclge_rss_config_cmd *)desc.data;
30214216
3022
- for (key_offset = 0; key_offset < 3; key_offset++) {
4217
+ while (key_counts) {
30234218 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
30244219 false);
30254220
30264221 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
30274222 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
30284223
3029
- if (key_offset == 2)
3030
- key_size =
3031
- HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3032
- else
3033
- key_size = HCLGE_RSS_HASH_KEY_NUM;
3034
-
4224
+ key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
30354225 memcpy(req->hash_key,
30364226 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
30374227
4228
+ key_counts -= key_size;
4229
+ key_offset++;
30384230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
30394231 if (ret) {
30404232 dev_err(&hdev->pdev->dev,
....@@ -3109,6 +4301,22 @@
31094301 return ret;
31104302 }
31114303
4304
+static void hclge_get_rss_type(struct hclge_vport *vport)
4305
+{
4306
+ if (vport->rss_tuple_sets.ipv4_tcp_en ||
4307
+ vport->rss_tuple_sets.ipv4_udp_en ||
4308
+ vport->rss_tuple_sets.ipv4_sctp_en ||
4309
+ vport->rss_tuple_sets.ipv6_tcp_en ||
4310
+ vport->rss_tuple_sets.ipv6_udp_en ||
4311
+ vport->rss_tuple_sets.ipv6_sctp_en)
4312
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4313
+ else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4314
+ vport->rss_tuple_sets.ipv6_fragment_en)
4315
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4316
+ else
4317
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4318
+}
4319
+
31124320 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
31134321 {
31144322 struct hclge_rss_input_tuple_cmd *req;
....@@ -3128,6 +4336,7 @@
31284336 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
31294337 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
31304338 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4339
+ hclge_get_rss_type(&hdev->vport[0]);
31314340 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
31324341 if (ret)
31334342 dev_err(&hdev->pdev->dev,
....@@ -3142,8 +4351,19 @@
31424351 int i;
31434352
31444353 /* Get hash algorithm */
3145
- if (hfunc)
3146
- *hfunc = vport->rss_algo;
4354
+ if (hfunc) {
4355
+ switch (vport->rss_algo) {
4356
+ case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4357
+ *hfunc = ETH_RSS_HASH_TOP;
4358
+ break;
4359
+ case HCLGE_RSS_HASH_ALGO_SIMPLE:
4360
+ *hfunc = ETH_RSS_HASH_XOR;
4361
+ break;
4362
+ default:
4363
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
4364
+ break;
4365
+ }
4366
+ }
31474367
31484368 /* Get the RSS Key required by the user */
31494369 if (key)
....@@ -3157,6 +4377,24 @@
31574377 return 0;
31584378 }
31594379
4380
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4381
+ u8 *hash_algo)
4382
+{
4383
+ switch (hfunc) {
4384
+ case ETH_RSS_HASH_TOP:
4385
+ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4386
+ return 0;
4387
+ case ETH_RSS_HASH_XOR:
4388
+ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4389
+ return 0;
4390
+ case ETH_RSS_HASH_NO_CHANGE:
4391
+ *hash_algo = vport->rss_algo;
4392
+ return 0;
4393
+ default:
4394
+ return -EINVAL;
4395
+ }
4396
+}
4397
+
31604398 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
31614399 const u8 *key, const u8 hfunc)
31624400 {
....@@ -3165,22 +4403,27 @@
31654403 u8 hash_algo;
31664404 int ret, i;
31674405
4406
+ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4407
+ if (ret) {
4408
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4409
+ return ret;
4410
+ }
4411
+
31684412 /* Set the RSS Hash Key if specififed by the user */
31694413 if (key) {
3170
-
3171
- if (hfunc == ETH_RSS_HASH_TOP ||
3172
- hfunc == ETH_RSS_HASH_NO_CHANGE)
3173
- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3174
- else
3175
- return -EINVAL;
31764414 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
31774415 if (ret)
31784416 return ret;
31794417
31804418 /* Update the shadow RSS key with user specified qids */
31814419 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3182
- vport->rss_algo = hash_algo;
4420
+ } else {
4421
+ ret = hclge_set_rss_algo_key(hdev, hash_algo,
4422
+ vport->rss_hash_key);
4423
+ if (ret)
4424
+ return ret;
31834425 }
4426
+ vport->rss_algo = hash_algo;
31844427
31854428 /* Update the shadow RSS table with user specified qids */
31864429 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
....@@ -3259,8 +4502,8 @@
32594502 req->ipv4_sctp_en = tuple_sets;
32604503 break;
32614504 case SCTP_V6_FLOW:
3262
- if ((nfc->data & RXH_L4_B_0_1) ||
3263
- (nfc->data & RXH_L4_B_2_3))
4505
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4506
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
32644507 return -EINVAL;
32654508
32664509 req->ipv6_sctp_en = tuple_sets;
....@@ -3290,6 +4533,7 @@
32904533 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
32914534 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
32924535 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4536
+ hclge_get_rss_type(vport);
32934537 return 0;
32944538 }
32954539
....@@ -3356,13 +4600,14 @@
33564600 struct hclge_vport *vport = hdev->vport;
33574601 u8 *rss_indir = vport[0].rss_indirection_tbl;
33584602 u16 rss_size = vport[0].alloc_rss_size;
4603
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4604
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
33594605 u8 *key = vport[0].rss_hash_key;
33604606 u8 hfunc = vport[0].rss_algo;
3361
- u16 tc_offset[HCLGE_MAX_TC_NUM];
33624607 u16 tc_valid[HCLGE_MAX_TC_NUM];
3363
- u16 tc_size[HCLGE_MAX_TC_NUM];
33644608 u16 roundup_size;
3365
- int i, ret;
4609
+ unsigned int i;
4610
+ int ret;
33664611
33674612 ret = hclge_set_rss_indir_table(hdev, rss_indir);
33684613 if (ret)
....@@ -3382,7 +4627,7 @@
33824627 */
33834628 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
33844629 dev_err(&hdev->pdev->dev,
3385
- "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4630
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
33864631 rss_size);
33874632 return -EINVAL;
33884633 }
....@@ -3418,8 +4663,11 @@
34184663
34194664 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
34204665 {
4666
+ int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
34214667 struct hclge_vport *vport = hdev->vport;
3422
- int i;
4668
+
4669
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4670
+ rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
34234671
34244672 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
34254673 vport[i].rss_tuple_sets.ipv4_tcp_en =
....@@ -3435,13 +4683,16 @@
34354683 vport[i].rss_tuple_sets.ipv6_udp_en =
34364684 HCLGE_RSS_INPUT_TUPLE_OTHER;
34374685 vport[i].rss_tuple_sets.ipv6_sctp_en =
4686
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4687
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
34384688 HCLGE_RSS_INPUT_TUPLE_SCTP;
34394689 vport[i].rss_tuple_sets.ipv6_fragment_en =
34404690 HCLGE_RSS_INPUT_TUPLE_OTHER;
34414691
3442
- vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4692
+ vport[i].rss_algo = rss_algo;
34434693
3444
- netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
4694
+ memcpy(vport[i].rss_hash_key, hclge_hash_key,
4695
+ HCLGE_RSS_KEY_SIZE);
34454696 }
34464697
34474698 hclge_rss_indir_init_cfg(hdev);
....@@ -3454,8 +4705,8 @@
34544705 struct hclge_dev *hdev = vport->back;
34554706 struct hnae3_ring_chain_node *node;
34564707 struct hclge_desc desc;
3457
- struct hclge_ctrl_vector_chain_cmd *req
3458
- = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4708
+ struct hclge_ctrl_vector_chain_cmd *req =
4709
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
34594710 enum hclge_cmd_status status;
34604711 enum hclge_opcode_type op;
34614712 u16 tqp_type_and_id;
....@@ -3513,8 +4764,7 @@
35134764 return 0;
35144765 }
35154766
3516
-static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3517
- int vector,
4767
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
35184768 struct hnae3_ring_chain_node *ring_chain)
35194769 {
35204770 struct hclge_vport *vport = hclge_get_vport(handle);
....@@ -3524,15 +4774,14 @@
35244774 vector_id = hclge_get_vector_index(hdev, vector);
35254775 if (vector_id < 0) {
35264776 dev_err(&hdev->pdev->dev,
3527
- "Get vector index fail. vector_id =%d\n", vector_id);
4777
+ "failed to get vector index. vector=%d\n", vector);
35284778 return vector_id;
35294779 }
35304780
35314781 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
35324782 }
35334783
3534
-static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3535
- int vector,
4784
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
35364785 struct hnae3_ring_chain_node *ring_chain)
35374786 {
35384787 struct hclge_vport *vport = hclge_get_vport(handle);
....@@ -3553,14 +4802,13 @@
35534802 if (ret)
35544803 dev_err(&handle->pdev->dev,
35554804 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3556
- vector_id,
3557
- ret);
4805
+ vector_id, ret);
35584806
35594807 return ret;
35604808 }
35614809
3562
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3563
- struct hclge_promisc_param *param)
4810
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4811
+ struct hclge_promisc_param *param)
35644812 {
35654813 struct hclge_promisc_cfg_cmd *req;
35664814 struct hclge_desc desc;
....@@ -3582,13 +4830,15 @@
35824830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
35834831 if (ret)
35844832 dev_err(&hdev->pdev->dev,
3585
- "Set promisc mode fail, status is %d.\n", ret);
4833
+ "failed to set vport %d promisc mode, ret = %d.\n",
4834
+ param->vf_id, ret);
35864835
35874836 return ret;
35884837 }
35894838
3590
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3591
- bool en_mc, bool en_bc, int vport_id)
4839
+static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4840
+ bool en_uc, bool en_mc, bool en_bc,
4841
+ int vport_id)
35924842 {
35934843 if (!param)
35944844 return;
....@@ -3603,16 +4853,1707 @@
36034853 param->vf_id = vport_id;
36044854 }
36054855
3606
-static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3607
- bool en_mc_pmc)
4856
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4857
+ bool en_mc_pmc, bool en_bc_pmc)
36084858 {
3609
- struct hclge_vport *vport = hclge_get_vport(handle);
36104859 struct hclge_dev *hdev = vport->back;
36114860 struct hclge_promisc_param param;
36124861
3613
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
4862
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
36144863 vport->vport_id);
3615
- hclge_cmd_set_promisc_mode(hdev, &param);
4864
+ return hclge_cmd_set_promisc_mode(hdev, &param);
4865
+}
4866
+
4867
+static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4868
+ bool en_mc_pmc)
4869
+{
4870
+ struct hclge_vport *vport = hclge_get_vport(handle);
4871
+ struct hclge_dev *hdev = vport->back;
4872
+ bool en_bc_pmc = true;
4873
+
4874
+ /* For device whose version below V2, if broadcast promisc enabled,
4875
+ * vlan filter is always bypassed. So broadcast promisc should be
4876
+ * disabled until user enable promisc mode
4877
+ */
4878
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4879
+ en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4880
+
4881
+ return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4882
+ en_bc_pmc);
4883
+}
4884
+
4885
+static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4886
+{
4887
+ struct hclge_vport *vport = hclge_get_vport(handle);
4888
+ struct hclge_dev *hdev = vport->back;
4889
+
4890
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4891
+}
4892
+
4893
+static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4894
+{
4895
+ struct hclge_get_fd_mode_cmd *req;
4896
+ struct hclge_desc desc;
4897
+ int ret;
4898
+
4899
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4900
+
4901
+ req = (struct hclge_get_fd_mode_cmd *)desc.data;
4902
+
4903
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4904
+ if (ret) {
4905
+ dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4906
+ return ret;
4907
+ }
4908
+
4909
+ *fd_mode = req->mode;
4910
+
4911
+ return ret;
4912
+}
4913
+
4914
+static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4915
+ u32 *stage1_entry_num,
4916
+ u32 *stage2_entry_num,
4917
+ u16 *stage1_counter_num,
4918
+ u16 *stage2_counter_num)
4919
+{
4920
+ struct hclge_get_fd_allocation_cmd *req;
4921
+ struct hclge_desc desc;
4922
+ int ret;
4923
+
4924
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4925
+
4926
+ req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4927
+
4928
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4929
+ if (ret) {
4930
+ dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4931
+ ret);
4932
+ return ret;
4933
+ }
4934
+
4935
+ *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4936
+ *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4937
+ *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4938
+ *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4939
+
4940
+ return ret;
4941
+}
4942
+
4943
+static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4944
+ enum HCLGE_FD_STAGE stage_num)
4945
+{
4946
+ struct hclge_set_fd_key_config_cmd *req;
4947
+ struct hclge_fd_key_cfg *stage;
4948
+ struct hclge_desc desc;
4949
+ int ret;
4950
+
4951
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4952
+
4953
+ req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4954
+ stage = &hdev->fd_cfg.key_cfg[stage_num];
4955
+ req->stage = stage_num;
4956
+ req->key_select = stage->key_sel;
4957
+ req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4958
+ req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4959
+ req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4960
+ req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4961
+ req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4962
+ req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4963
+
4964
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4965
+ if (ret)
4966
+ dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4967
+
4968
+ return ret;
4969
+}
4970
+
4971
+static int hclge_init_fd_config(struct hclge_dev *hdev)
4972
+{
4973
+#define LOW_2_WORDS 0x03
4974
+ struct hclge_fd_key_cfg *key_cfg;
4975
+ int ret;
4976
+
4977
+ if (!hnae3_dev_fd_supported(hdev))
4978
+ return 0;
4979
+
4980
+ ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4981
+ if (ret)
4982
+ return ret;
4983
+
4984
+ switch (hdev->fd_cfg.fd_mode) {
4985
+ case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4986
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4987
+ break;
4988
+ case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4989
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4990
+ break;
4991
+ default:
4992
+ dev_err(&hdev->pdev->dev,
4993
+ "Unsupported flow director mode %u\n",
4994
+ hdev->fd_cfg.fd_mode);
4995
+ return -EOPNOTSUPP;
4996
+ }
4997
+
4998
+ key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4999
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
5000
+ key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5001
+ key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5002
+ key_cfg->outer_sipv6_word_en = 0;
5003
+ key_cfg->outer_dipv6_word_en = 0;
5004
+
5005
+ key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5006
+ BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5007
+ BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5008
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5009
+
5010
+ /* If use max 400bit key, we can support tuples for ether type */
5011
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5012
+ key_cfg->tuple_active |=
5013
+ BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5014
+
5015
+ /* roce_type is used to filter roce frames
5016
+ * dst_vport is used to specify the rule
5017
+ */
5018
+ key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5019
+
5020
+ ret = hclge_get_fd_allocation(hdev,
5021
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5022
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5023
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5024
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5025
+ if (ret)
5026
+ return ret;
5027
+
5028
+ return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5029
+}
5030
+
5031
+static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5032
+ int loc, u8 *key, bool is_add)
5033
+{
5034
+ struct hclge_fd_tcam_config_1_cmd *req1;
5035
+ struct hclge_fd_tcam_config_2_cmd *req2;
5036
+ struct hclge_fd_tcam_config_3_cmd *req3;
5037
+ struct hclge_desc desc[3];
5038
+ int ret;
5039
+
5040
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5041
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5042
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5043
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5044
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5045
+
5046
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5047
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5048
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5049
+
5050
+ req1->stage = stage;
5051
+ req1->xy_sel = sel_x ? 1 : 0;
5052
+ hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5053
+ req1->index = cpu_to_le32(loc);
5054
+ req1->entry_vld = sel_x ? is_add : 0;
5055
+
5056
+ if (key) {
5057
+ memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5058
+ memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5059
+ sizeof(req2->tcam_data));
5060
+ memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5061
+ sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5062
+ }
5063
+
5064
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
5065
+ if (ret)
5066
+ dev_err(&hdev->pdev->dev,
5067
+ "config tcam key fail, ret=%d\n",
5068
+ ret);
5069
+
5070
+ return ret;
5071
+}
5072
+
5073
+static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5074
+ struct hclge_fd_ad_data *action)
5075
+{
5076
+ struct hclge_fd_ad_config_cmd *req;
5077
+ struct hclge_desc desc;
5078
+ u64 ad_data = 0;
5079
+ int ret;
5080
+
5081
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5082
+
5083
+ req = (struct hclge_fd_ad_config_cmd *)desc.data;
5084
+ req->index = cpu_to_le32(loc);
5085
+ req->stage = stage;
5086
+
5087
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5088
+ action->write_rule_id_to_bd);
5089
+ hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5090
+ action->rule_id);
5091
+ ad_data <<= 32;
5092
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5093
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5094
+ action->forward_to_direct_queue);
5095
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5096
+ action->queue_id);
5097
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5098
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5099
+ HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5100
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5101
+ hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5102
+ action->counter_id);
5103
+
5104
+ req->ad_data = cpu_to_le64(ad_data);
5105
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5106
+ if (ret)
5107
+ dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5108
+
5109
+ return ret;
5110
+}
5111
+
5112
+static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5113
+ struct hclge_fd_rule *rule)
5114
+{
5115
+ u16 tmp_x_s, tmp_y_s;
5116
+ u32 tmp_x_l, tmp_y_l;
5117
+ int i;
5118
+
5119
+ if (rule->unused_tuple & tuple_bit)
5120
+ return true;
5121
+
5122
+ switch (tuple_bit) {
5123
+ case BIT(INNER_DST_MAC):
5124
+ for (i = 0; i < ETH_ALEN; i++) {
5125
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5126
+ rule->tuples_mask.dst_mac[i]);
5127
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5128
+ rule->tuples_mask.dst_mac[i]);
5129
+ }
5130
+
5131
+ return true;
5132
+ case BIT(INNER_SRC_MAC):
5133
+ for (i = 0; i < ETH_ALEN; i++) {
5134
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5135
+ rule->tuples_mask.src_mac[i]);
5136
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5137
+ rule->tuples_mask.src_mac[i]);
5138
+ }
5139
+
5140
+ return true;
5141
+ case BIT(INNER_VLAN_TAG_FST):
5142
+ calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5143
+ rule->tuples_mask.vlan_tag1);
5144
+ calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5145
+ rule->tuples_mask.vlan_tag1);
5146
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5147
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5148
+
5149
+ return true;
5150
+ case BIT(INNER_ETH_TYPE):
5151
+ calc_x(tmp_x_s, rule->tuples.ether_proto,
5152
+ rule->tuples_mask.ether_proto);
5153
+ calc_y(tmp_y_s, rule->tuples.ether_proto,
5154
+ rule->tuples_mask.ether_proto);
5155
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5156
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5157
+
5158
+ return true;
5159
+ case BIT(INNER_IP_TOS):
5160
+ calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5161
+ calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5162
+
5163
+ return true;
5164
+ case BIT(INNER_IP_PROTO):
5165
+ calc_x(*key_x, rule->tuples.ip_proto,
5166
+ rule->tuples_mask.ip_proto);
5167
+ calc_y(*key_y, rule->tuples.ip_proto,
5168
+ rule->tuples_mask.ip_proto);
5169
+
5170
+ return true;
5171
+ case BIT(INNER_SRC_IP):
5172
+ calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5173
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
5174
+ calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5175
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
5176
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5177
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5178
+
5179
+ return true;
5180
+ case BIT(INNER_DST_IP):
5181
+ calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5182
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
5183
+ calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5184
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
5185
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5186
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5187
+
5188
+ return true;
5189
+ case BIT(INNER_SRC_PORT):
5190
+ calc_x(tmp_x_s, rule->tuples.src_port,
5191
+ rule->tuples_mask.src_port);
5192
+ calc_y(tmp_y_s, rule->tuples.src_port,
5193
+ rule->tuples_mask.src_port);
5194
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5195
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5196
+
5197
+ return true;
5198
+ case BIT(INNER_DST_PORT):
5199
+ calc_x(tmp_x_s, rule->tuples.dst_port,
5200
+ rule->tuples_mask.dst_port);
5201
+ calc_y(tmp_y_s, rule->tuples.dst_port,
5202
+ rule->tuples_mask.dst_port);
5203
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5204
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5205
+
5206
+ return true;
5207
+ default:
5208
+ return false;
5209
+ }
5210
+}
5211
+
5212
+static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5213
+ u8 vf_id, u8 network_port_id)
5214
+{
5215
+ u32 port_number = 0;
5216
+
5217
+ if (port_type == HOST_PORT) {
5218
+ hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5219
+ pf_id);
5220
+ hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5221
+ vf_id);
5222
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5223
+ } else {
5224
+ hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5225
+ HCLGE_NETWORK_PORT_ID_S, network_port_id);
5226
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5227
+ }
5228
+
5229
+ return port_number;
5230
+}
5231
+
5232
+static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5233
+ __le32 *key_x, __le32 *key_y,
5234
+ struct hclge_fd_rule *rule)
5235
+{
5236
+ u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5237
+ u8 cur_pos = 0, tuple_size, shift_bits;
5238
+ unsigned int i;
5239
+
5240
+ for (i = 0; i < MAX_META_DATA; i++) {
5241
+ tuple_size = meta_data_key_info[i].key_length;
5242
+ tuple_bit = key_cfg->meta_data_active & BIT(i);
5243
+
5244
+ switch (tuple_bit) {
5245
+ case BIT(ROCE_TYPE):
5246
+ hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5247
+ cur_pos += tuple_size;
5248
+ break;
5249
+ case BIT(DST_VPORT):
5250
+ port_number = hclge_get_port_number(HOST_PORT, 0,
5251
+ rule->vf_id, 0);
5252
+ hnae3_set_field(meta_data,
5253
+ GENMASK(cur_pos + tuple_size, cur_pos),
5254
+ cur_pos, port_number);
5255
+ cur_pos += tuple_size;
5256
+ break;
5257
+ default:
5258
+ break;
5259
+ }
5260
+ }
5261
+
5262
+ calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5263
+ calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5264
+ shift_bits = sizeof(meta_data) * 8 - cur_pos;
5265
+
5266
+ *key_x = cpu_to_le32(tmp_x << shift_bits);
5267
+ *key_y = cpu_to_le32(tmp_y << shift_bits);
5268
+}
5269
+
5270
+/* A complete key is combined with meta data key and tuple key.
5271
+ * Meta data key is stored at the MSB region, and tuple key is stored at
5272
+ * the LSB region, unused bits will be filled 0.
5273
+ */
5274
+static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5275
+ struct hclge_fd_rule *rule)
5276
+{
5277
+ struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5278
+ u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5279
+ u8 *cur_key_x, *cur_key_y;
5280
+ u8 meta_data_region;
5281
+ u8 tuple_size;
5282
+ int ret;
5283
+ u32 i;
5284
+
5285
+ memset(key_x, 0, sizeof(key_x));
5286
+ memset(key_y, 0, sizeof(key_y));
5287
+ cur_key_x = key_x;
5288
+ cur_key_y = key_y;
5289
+
5290
+ for (i = 0 ; i < MAX_TUPLE; i++) {
5291
+ bool tuple_valid;
5292
+ u32 check_tuple;
5293
+
5294
+ tuple_size = tuple_key_info[i].key_length / 8;
5295
+ check_tuple = key_cfg->tuple_active & BIT(i);
5296
+
5297
+ tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5298
+ cur_key_y, rule);
5299
+ if (tuple_valid) {
5300
+ cur_key_x += tuple_size;
5301
+ cur_key_y += tuple_size;
5302
+ }
5303
+ }
5304
+
5305
+ meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5306
+ MAX_META_DATA_LENGTH / 8;
5307
+
5308
+ hclge_fd_convert_meta_data(key_cfg,
5309
+ (__le32 *)(key_x + meta_data_region),
5310
+ (__le32 *)(key_y + meta_data_region),
5311
+ rule);
5312
+
5313
+ ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5314
+ true);
5315
+ if (ret) {
5316
+ dev_err(&hdev->pdev->dev,
5317
+ "fd key_y config fail, loc=%u, ret=%d\n",
5318
+ rule->queue_id, ret);
5319
+ return ret;
5320
+ }
5321
+
5322
+ ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5323
+ true);
5324
+ if (ret)
5325
+ dev_err(&hdev->pdev->dev,
5326
+ "fd key_x config fail, loc=%u, ret=%d\n",
5327
+ rule->queue_id, ret);
5328
+ return ret;
5329
+}
5330
+
5331
+static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5332
+ struct hclge_fd_rule *rule)
5333
+{
5334
+ struct hclge_fd_ad_data ad_data;
5335
+
5336
+ ad_data.ad_id = rule->location;
5337
+
5338
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5339
+ ad_data.drop_packet = true;
5340
+ ad_data.forward_to_direct_queue = false;
5341
+ ad_data.queue_id = 0;
5342
+ } else {
5343
+ ad_data.drop_packet = false;
5344
+ ad_data.forward_to_direct_queue = true;
5345
+ ad_data.queue_id = rule->queue_id;
5346
+ }
5347
+
5348
+ ad_data.use_counter = false;
5349
+ ad_data.counter_id = 0;
5350
+
5351
+ ad_data.use_next_stage = false;
5352
+ ad_data.next_input_key = 0;
5353
+
5354
+ ad_data.write_rule_id_to_bd = true;
5355
+ ad_data.rule_id = rule->location;
5356
+
5357
+ return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5358
+}
5359
+
5360
+static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5361
+ u32 *unused_tuple)
5362
+{
5363
+ if (!spec || !unused_tuple)
5364
+ return -EINVAL;
5365
+
5366
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5367
+
5368
+ if (!spec->ip4src)
5369
+ *unused_tuple |= BIT(INNER_SRC_IP);
5370
+
5371
+ if (!spec->ip4dst)
5372
+ *unused_tuple |= BIT(INNER_DST_IP);
5373
+
5374
+ if (!spec->psrc)
5375
+ *unused_tuple |= BIT(INNER_SRC_PORT);
5376
+
5377
+ if (!spec->pdst)
5378
+ *unused_tuple |= BIT(INNER_DST_PORT);
5379
+
5380
+ if (!spec->tos)
5381
+ *unused_tuple |= BIT(INNER_IP_TOS);
5382
+
5383
+ return 0;
5384
+}
5385
+
5386
+static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5387
+ u32 *unused_tuple)
5388
+{
5389
+ if (!spec || !unused_tuple)
5390
+ return -EINVAL;
5391
+
5392
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5393
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5394
+
5395
+ if (!spec->ip4src)
5396
+ *unused_tuple |= BIT(INNER_SRC_IP);
5397
+
5398
+ if (!spec->ip4dst)
5399
+ *unused_tuple |= BIT(INNER_DST_IP);
5400
+
5401
+ if (!spec->tos)
5402
+ *unused_tuple |= BIT(INNER_IP_TOS);
5403
+
5404
+ if (!spec->proto)
5405
+ *unused_tuple |= BIT(INNER_IP_PROTO);
5406
+
5407
+ if (spec->l4_4_bytes)
5408
+ return -EOPNOTSUPP;
5409
+
5410
+ if (spec->ip_ver != ETH_RX_NFC_IP4)
5411
+ return -EOPNOTSUPP;
5412
+
5413
+ return 0;
5414
+}
5415
+
5416
+static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5417
+ u32 *unused_tuple)
5418
+{
5419
+ if (!spec || !unused_tuple)
5420
+ return -EINVAL;
5421
+
5422
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5423
+ BIT(INNER_IP_TOS);
5424
+
5425
+ /* check whether src/dst ip address used */
5426
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
5427
+ !spec->ip6src[2] && !spec->ip6src[3])
5428
+ *unused_tuple |= BIT(INNER_SRC_IP);
5429
+
5430
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5431
+ !spec->ip6dst[2] && !spec->ip6dst[3])
5432
+ *unused_tuple |= BIT(INNER_DST_IP);
5433
+
5434
+ if (!spec->psrc)
5435
+ *unused_tuple |= BIT(INNER_SRC_PORT);
5436
+
5437
+ if (!spec->pdst)
5438
+ *unused_tuple |= BIT(INNER_DST_PORT);
5439
+
5440
+ if (spec->tclass)
5441
+ return -EOPNOTSUPP;
5442
+
5443
+ return 0;
5444
+}
5445
+
5446
+static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5447
+ u32 *unused_tuple)
5448
+{
5449
+ if (!spec || !unused_tuple)
5450
+ return -EINVAL;
5451
+
5452
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5453
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5454
+
5455
+ /* check whether src/dst ip address used */
5456
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
5457
+ !spec->ip6src[2] && !spec->ip6src[3])
5458
+ *unused_tuple |= BIT(INNER_SRC_IP);
5459
+
5460
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5461
+ !spec->ip6dst[2] && !spec->ip6dst[3])
5462
+ *unused_tuple |= BIT(INNER_DST_IP);
5463
+
5464
+ if (!spec->l4_proto)
5465
+ *unused_tuple |= BIT(INNER_IP_PROTO);
5466
+
5467
+ if (spec->tclass)
5468
+ return -EOPNOTSUPP;
5469
+
5470
+ if (spec->l4_4_bytes)
5471
+ return -EOPNOTSUPP;
5472
+
5473
+ return 0;
5474
+}
5475
+
5476
+static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5477
+{
5478
+ if (!spec || !unused_tuple)
5479
+ return -EINVAL;
5480
+
5481
+ *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5482
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5483
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5484
+
5485
+ if (is_zero_ether_addr(spec->h_source))
5486
+ *unused_tuple |= BIT(INNER_SRC_MAC);
5487
+
5488
+ if (is_zero_ether_addr(spec->h_dest))
5489
+ *unused_tuple |= BIT(INNER_DST_MAC);
5490
+
5491
+ if (!spec->h_proto)
5492
+ *unused_tuple |= BIT(INNER_ETH_TYPE);
5493
+
5494
+ return 0;
5495
+}
5496
+
5497
+static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5498
+ struct ethtool_rx_flow_spec *fs,
5499
+ u32 *unused_tuple)
5500
+{
5501
+ if (fs->flow_type & FLOW_EXT) {
5502
+ if (fs->h_ext.vlan_etype) {
5503
+ dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5504
+ return -EOPNOTSUPP;
5505
+ }
5506
+
5507
+ if (!fs->h_ext.vlan_tci)
5508
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5509
+
5510
+ if (fs->m_ext.vlan_tci &&
5511
+ be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5512
+ dev_err(&hdev->pdev->dev,
5513
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5514
+ ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5515
+ return -EINVAL;
5516
+ }
5517
+ } else {
5518
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5519
+ }
5520
+
5521
+ if (fs->flow_type & FLOW_MAC_EXT) {
5522
+ if (hdev->fd_cfg.fd_mode !=
5523
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5524
+ dev_err(&hdev->pdev->dev,
5525
+ "FLOW_MAC_EXT is not supported in current fd mode!\n");
5526
+ return -EOPNOTSUPP;
5527
+ }
5528
+
5529
+ if (is_zero_ether_addr(fs->h_ext.h_dest))
5530
+ *unused_tuple |= BIT(INNER_DST_MAC);
5531
+ else
5532
+ *unused_tuple &= ~BIT(INNER_DST_MAC);
5533
+ }
5534
+
5535
+ return 0;
5536
+}
5537
+
5538
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
5539
+ struct ethtool_rx_flow_spec *fs,
5540
+ u32 *unused_tuple)
5541
+{
5542
+ u32 flow_type;
5543
+ int ret;
5544
+
5545
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5546
+ dev_err(&hdev->pdev->dev,
5547
+ "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5548
+ fs->location,
5549
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5550
+ return -EINVAL;
5551
+ }
5552
+
5553
+ if ((fs->flow_type & FLOW_EXT) &&
5554
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5555
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5556
+ return -EOPNOTSUPP;
5557
+ }
5558
+
5559
+ flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5560
+ switch (flow_type) {
5561
+ case SCTP_V4_FLOW:
5562
+ case TCP_V4_FLOW:
5563
+ case UDP_V4_FLOW:
5564
+ ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5565
+ unused_tuple);
5566
+ break;
5567
+ case IP_USER_FLOW:
5568
+ ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5569
+ unused_tuple);
5570
+ break;
5571
+ case SCTP_V6_FLOW:
5572
+ case TCP_V6_FLOW:
5573
+ case UDP_V6_FLOW:
5574
+ ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5575
+ unused_tuple);
5576
+ break;
5577
+ case IPV6_USER_FLOW:
5578
+ ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5579
+ unused_tuple);
5580
+ break;
5581
+ case ETHER_FLOW:
5582
+ if (hdev->fd_cfg.fd_mode !=
5583
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5584
+ dev_err(&hdev->pdev->dev,
5585
+ "ETHER_FLOW is not supported in current fd mode!\n");
5586
+ return -EOPNOTSUPP;
5587
+ }
5588
+
5589
+ ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5590
+ unused_tuple);
5591
+ break;
5592
+ default:
5593
+ dev_err(&hdev->pdev->dev,
5594
+ "unsupported protocol type, protocol type = %#x\n",
5595
+ flow_type);
5596
+ return -EOPNOTSUPP;
5597
+ }
5598
+
5599
+ if (ret) {
5600
+ dev_err(&hdev->pdev->dev,
5601
+ "failed to check flow union tuple, ret = %d\n",
5602
+ ret);
5603
+ return ret;
5604
+ }
5605
+
5606
+ return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5607
+}
5608
+
5609
+static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5610
+{
5611
+ struct hclge_fd_rule *rule = NULL;
5612
+ struct hlist_node *node2;
5613
+
5614
+ spin_lock_bh(&hdev->fd_rule_lock);
5615
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5616
+ if (rule->location >= location)
5617
+ break;
5618
+ }
5619
+
5620
+ spin_unlock_bh(&hdev->fd_rule_lock);
5621
+
5622
+ return rule && rule->location == location;
5623
+}
5624
+
5625
+/* make sure being called after lock up with fd_rule_lock */
5626
+static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5627
+ struct hclge_fd_rule *new_rule,
5628
+ u16 location,
5629
+ bool is_add)
5630
+{
5631
+ struct hclge_fd_rule *rule = NULL, *parent = NULL;
5632
+ struct hlist_node *node2;
5633
+
5634
+ if (is_add && !new_rule)
5635
+ return -EINVAL;
5636
+
5637
+ hlist_for_each_entry_safe(rule, node2,
5638
+ &hdev->fd_rule_list, rule_node) {
5639
+ if (rule->location >= location)
5640
+ break;
5641
+ parent = rule;
5642
+ }
5643
+
5644
+ if (rule && rule->location == location) {
5645
+ hlist_del(&rule->rule_node);
5646
+ kfree(rule);
5647
+ hdev->hclge_fd_rule_num--;
5648
+
5649
+ if (!is_add) {
5650
+ if (!hdev->hclge_fd_rule_num)
5651
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5652
+ clear_bit(location, hdev->fd_bmap);
5653
+
5654
+ return 0;
5655
+ }
5656
+ } else if (!is_add) {
5657
+ dev_err(&hdev->pdev->dev,
5658
+ "delete fail, rule %u is inexistent\n",
5659
+ location);
5660
+ return -EINVAL;
5661
+ }
5662
+
5663
+ INIT_HLIST_NODE(&new_rule->rule_node);
5664
+
5665
+ if (parent)
5666
+ hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5667
+ else
5668
+ hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5669
+
5670
+ set_bit(location, hdev->fd_bmap);
5671
+ hdev->hclge_fd_rule_num++;
5672
+ hdev->fd_active_type = new_rule->rule_type;
5673
+
5674
+ return 0;
5675
+}
5676
+
5677
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5678
+ struct ethtool_rx_flow_spec *fs,
5679
+ struct hclge_fd_rule *rule)
5680
+{
5681
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5682
+
5683
+ switch (flow_type) {
5684
+ case SCTP_V4_FLOW:
5685
+ case TCP_V4_FLOW:
5686
+ case UDP_V4_FLOW:
5687
+ rule->tuples.src_ip[IPV4_INDEX] =
5688
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5689
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
5690
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5691
+
5692
+ rule->tuples.dst_ip[IPV4_INDEX] =
5693
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5694
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
5695
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5696
+
5697
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5698
+ rule->tuples_mask.src_port =
5699
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5700
+
5701
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5702
+ rule->tuples_mask.dst_port =
5703
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5704
+
5705
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5706
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5707
+
5708
+ rule->tuples.ether_proto = ETH_P_IP;
5709
+ rule->tuples_mask.ether_proto = 0xFFFF;
5710
+
5711
+ break;
5712
+ case IP_USER_FLOW:
5713
+ rule->tuples.src_ip[IPV4_INDEX] =
5714
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5715
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
5716
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5717
+
5718
+ rule->tuples.dst_ip[IPV4_INDEX] =
5719
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5720
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
5721
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5722
+
5723
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5724
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5725
+
5726
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5727
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5728
+
5729
+ rule->tuples.ether_proto = ETH_P_IP;
5730
+ rule->tuples_mask.ether_proto = 0xFFFF;
5731
+
5732
+ break;
5733
+ case SCTP_V6_FLOW:
5734
+ case TCP_V6_FLOW:
5735
+ case UDP_V6_FLOW:
5736
+ be32_to_cpu_array(rule->tuples.src_ip,
5737
+ fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5738
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
5739
+ fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5740
+
5741
+ be32_to_cpu_array(rule->tuples.dst_ip,
5742
+ fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5743
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
5744
+ fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5745
+
5746
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5747
+ rule->tuples_mask.src_port =
5748
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5749
+
5750
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5751
+ rule->tuples_mask.dst_port =
5752
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5753
+
5754
+ rule->tuples.ether_proto = ETH_P_IPV6;
5755
+ rule->tuples_mask.ether_proto = 0xFFFF;
5756
+
5757
+ break;
5758
+ case IPV6_USER_FLOW:
5759
+ be32_to_cpu_array(rule->tuples.src_ip,
5760
+ fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5761
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
5762
+ fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5763
+
5764
+ be32_to_cpu_array(rule->tuples.dst_ip,
5765
+ fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5766
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
5767
+ fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5768
+
5769
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5770
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5771
+
5772
+ rule->tuples.ether_proto = ETH_P_IPV6;
5773
+ rule->tuples_mask.ether_proto = 0xFFFF;
5774
+
5775
+ break;
5776
+ case ETHER_FLOW:
5777
+ ether_addr_copy(rule->tuples.src_mac,
5778
+ fs->h_u.ether_spec.h_source);
5779
+ ether_addr_copy(rule->tuples_mask.src_mac,
5780
+ fs->m_u.ether_spec.h_source);
5781
+
5782
+ ether_addr_copy(rule->tuples.dst_mac,
5783
+ fs->h_u.ether_spec.h_dest);
5784
+ ether_addr_copy(rule->tuples_mask.dst_mac,
5785
+ fs->m_u.ether_spec.h_dest);
5786
+
5787
+ rule->tuples.ether_proto =
5788
+ be16_to_cpu(fs->h_u.ether_spec.h_proto);
5789
+ rule->tuples_mask.ether_proto =
5790
+ be16_to_cpu(fs->m_u.ether_spec.h_proto);
5791
+
5792
+ break;
5793
+ default:
5794
+ return -EOPNOTSUPP;
5795
+ }
5796
+
5797
+ switch (flow_type) {
5798
+ case SCTP_V4_FLOW:
5799
+ case SCTP_V6_FLOW:
5800
+ rule->tuples.ip_proto = IPPROTO_SCTP;
5801
+ rule->tuples_mask.ip_proto = 0xFF;
5802
+ break;
5803
+ case TCP_V4_FLOW:
5804
+ case TCP_V6_FLOW:
5805
+ rule->tuples.ip_proto = IPPROTO_TCP;
5806
+ rule->tuples_mask.ip_proto = 0xFF;
5807
+ break;
5808
+ case UDP_V4_FLOW:
5809
+ case UDP_V6_FLOW:
5810
+ rule->tuples.ip_proto = IPPROTO_UDP;
5811
+ rule->tuples_mask.ip_proto = 0xFF;
5812
+ break;
5813
+ default:
5814
+ break;
5815
+ }
5816
+
5817
+ if (fs->flow_type & FLOW_EXT) {
5818
+ rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5819
+ rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5820
+ }
5821
+
5822
+ if (fs->flow_type & FLOW_MAC_EXT) {
5823
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5824
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5825
+ }
5826
+
5827
+ return 0;
5828
+}
5829
+
5830
+/* make sure being called after lock up with fd_rule_lock */
5831
+static int hclge_fd_config_rule(struct hclge_dev *hdev,
5832
+ struct hclge_fd_rule *rule)
5833
+{
5834
+ int ret;
5835
+
5836
+ if (!rule) {
5837
+ dev_err(&hdev->pdev->dev,
5838
+ "The flow director rule is NULL\n");
5839
+ return -EINVAL;
5840
+ }
5841
+
5842
+ /* it will never fail here, so needn't to check return value */
5843
+ hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5844
+
5845
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5846
+ if (ret)
5847
+ goto clear_rule;
5848
+
5849
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5850
+ if (ret)
5851
+ goto clear_rule;
5852
+
5853
+ return 0;
5854
+
5855
+clear_rule:
5856
+ hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5857
+ return ret;
5858
+}
5859
+
5860
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
5861
+ struct ethtool_rxnfc *cmd)
5862
+{
5863
+ struct hclge_vport *vport = hclge_get_vport(handle);
5864
+ struct hclge_dev *hdev = vport->back;
5865
+ u16 dst_vport_id = 0, q_index = 0;
5866
+ struct ethtool_rx_flow_spec *fs;
5867
+ struct hclge_fd_rule *rule;
5868
+ u32 unused = 0;
5869
+ u8 action;
5870
+ int ret;
5871
+
5872
+ if (!hnae3_dev_fd_supported(hdev)) {
5873
+ dev_err(&hdev->pdev->dev,
5874
+ "flow table director is not supported\n");
5875
+ return -EOPNOTSUPP;
5876
+ }
5877
+
5878
+ if (!hdev->fd_en) {
5879
+ dev_err(&hdev->pdev->dev,
5880
+ "please enable flow director first\n");
5881
+ return -EOPNOTSUPP;
5882
+ }
5883
+
5884
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5885
+
5886
+ ret = hclge_fd_check_spec(hdev, fs, &unused);
5887
+ if (ret)
5888
+ return ret;
5889
+
5890
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5891
+ action = HCLGE_FD_ACTION_DROP_PACKET;
5892
+ } else {
5893
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5894
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5895
+ u16 tqps;
5896
+
5897
+ if (vf > hdev->num_req_vfs) {
5898
+ dev_err(&hdev->pdev->dev,
5899
+ "Error: vf id (%u) > max vf num (%u)\n",
5900
+ vf, hdev->num_req_vfs);
5901
+ return -EINVAL;
5902
+ }
5903
+
5904
+ dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5905
+ tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5906
+
5907
+ if (ring >= tqps) {
5908
+ dev_err(&hdev->pdev->dev,
5909
+ "Error: queue id (%u) > max tqp num (%u)\n",
5910
+ ring, tqps - 1);
5911
+ return -EINVAL;
5912
+ }
5913
+
5914
+ action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5915
+ q_index = ring;
5916
+ }
5917
+
5918
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5919
+ if (!rule)
5920
+ return -ENOMEM;
5921
+
5922
+ ret = hclge_fd_get_tuple(hdev, fs, rule);
5923
+ if (ret) {
5924
+ kfree(rule);
5925
+ return ret;
5926
+ }
5927
+
5928
+ rule->flow_type = fs->flow_type;
5929
+ rule->location = fs->location;
5930
+ rule->unused_tuple = unused;
5931
+ rule->vf_id = dst_vport_id;
5932
+ rule->queue_id = q_index;
5933
+ rule->action = action;
5934
+ rule->rule_type = HCLGE_FD_EP_ACTIVE;
5935
+
5936
+ /* to avoid rule conflict, when user configure rule by ethtool,
5937
+ * we need to clear all arfs rules
5938
+ */
5939
+ spin_lock_bh(&hdev->fd_rule_lock);
5940
+ hclge_clear_arfs_rules(handle);
5941
+
5942
+ ret = hclge_fd_config_rule(hdev, rule);
5943
+
5944
+ spin_unlock_bh(&hdev->fd_rule_lock);
5945
+
5946
+ return ret;
5947
+}
5948
+
5949
+static int hclge_del_fd_entry(struct hnae3_handle *handle,
5950
+ struct ethtool_rxnfc *cmd)
5951
+{
5952
+ struct hclge_vport *vport = hclge_get_vport(handle);
5953
+ struct hclge_dev *hdev = vport->back;
5954
+ struct ethtool_rx_flow_spec *fs;
5955
+ int ret;
5956
+
5957
+ if (!hnae3_dev_fd_supported(hdev))
5958
+ return -EOPNOTSUPP;
5959
+
5960
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5961
+
5962
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5963
+ return -EINVAL;
5964
+
5965
+ if (!hclge_fd_rule_exist(hdev, fs->location)) {
5966
+ dev_err(&hdev->pdev->dev,
5967
+ "Delete fail, rule %u is inexistent\n", fs->location);
5968
+ return -ENOENT;
5969
+ }
5970
+
5971
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5972
+ NULL, false);
5973
+ if (ret)
5974
+ return ret;
5975
+
5976
+ spin_lock_bh(&hdev->fd_rule_lock);
5977
+ ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5978
+
5979
+ spin_unlock_bh(&hdev->fd_rule_lock);
5980
+
5981
+ return ret;
5982
+}
5983
+
5984
+/* make sure being called after lock up with fd_rule_lock */
5985
+static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5986
+ bool clear_list)
5987
+{
5988
+ struct hclge_vport *vport = hclge_get_vport(handle);
5989
+ struct hclge_dev *hdev = vport->back;
5990
+ struct hclge_fd_rule *rule;
5991
+ struct hlist_node *node;
5992
+ u16 location;
5993
+
5994
+ if (!hnae3_dev_fd_supported(hdev))
5995
+ return;
5996
+
5997
+ for_each_set_bit(location, hdev->fd_bmap,
5998
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5999
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6000
+ NULL, false);
6001
+
6002
+ if (clear_list) {
6003
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6004
+ rule_node) {
6005
+ hlist_del(&rule->rule_node);
6006
+ kfree(rule);
6007
+ }
6008
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6009
+ hdev->hclge_fd_rule_num = 0;
6010
+ bitmap_zero(hdev->fd_bmap,
6011
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6012
+ }
6013
+}
6014
+
6015
+static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6016
+{
6017
+ struct hclge_vport *vport = hclge_get_vport(handle);
6018
+ struct hclge_dev *hdev = vport->back;
6019
+ struct hclge_fd_rule *rule;
6020
+ struct hlist_node *node;
6021
+ int ret;
6022
+
6023
+ /* Return ok here, because reset error handling will check this
6024
+ * return value. If error is returned here, the reset process will
6025
+ * fail.
6026
+ */
6027
+ if (!hnae3_dev_fd_supported(hdev))
6028
+ return 0;
6029
+
6030
+ /* if fd is disabled, should not restore it when reset */
6031
+ if (!hdev->fd_en)
6032
+ return 0;
6033
+
6034
+ spin_lock_bh(&hdev->fd_rule_lock);
6035
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6036
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6037
+ if (!ret)
6038
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6039
+
6040
+ if (ret) {
6041
+ dev_warn(&hdev->pdev->dev,
6042
+ "Restore rule %u failed, remove it\n",
6043
+ rule->location);
6044
+ clear_bit(rule->location, hdev->fd_bmap);
6045
+ hlist_del(&rule->rule_node);
6046
+ kfree(rule);
6047
+ hdev->hclge_fd_rule_num--;
6048
+ }
6049
+ }
6050
+
6051
+ if (hdev->hclge_fd_rule_num)
6052
+ hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6053
+
6054
+ spin_unlock_bh(&hdev->fd_rule_lock);
6055
+
6056
+ return 0;
6057
+}
6058
+
6059
+static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6060
+ struct ethtool_rxnfc *cmd)
6061
+{
6062
+ struct hclge_vport *vport = hclge_get_vport(handle);
6063
+ struct hclge_dev *hdev = vport->back;
6064
+
6065
+ if (!hnae3_dev_fd_supported(hdev))
6066
+ return -EOPNOTSUPP;
6067
+
6068
+ cmd->rule_cnt = hdev->hclge_fd_rule_num;
6069
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6070
+
6071
+ return 0;
6072
+}
6073
+
6074
+static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6075
+ struct ethtool_tcpip4_spec *spec,
6076
+ struct ethtool_tcpip4_spec *spec_mask)
6077
+{
6078
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6079
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6080
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6081
+
6082
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6083
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6084
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6085
+
6086
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
6087
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6088
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
6089
+
6090
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6091
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6092
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6093
+
6094
+ spec->tos = rule->tuples.ip_tos;
6095
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6096
+ 0 : rule->tuples_mask.ip_tos;
6097
+}
6098
+
6099
+static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6100
+ struct ethtool_usrip4_spec *spec,
6101
+ struct ethtool_usrip4_spec *spec_mask)
6102
+{
6103
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6104
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6105
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6106
+
6107
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6108
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6109
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6110
+
6111
+ spec->tos = rule->tuples.ip_tos;
6112
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6113
+ 0 : rule->tuples_mask.ip_tos;
6114
+
6115
+ spec->proto = rule->tuples.ip_proto;
6116
+ spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6117
+ 0 : rule->tuples_mask.ip_proto;
6118
+
6119
+ spec->ip_ver = ETH_RX_NFC_IP4;
6120
+}
6121
+
6122
+static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6123
+ struct ethtool_tcpip6_spec *spec,
6124
+ struct ethtool_tcpip6_spec *spec_mask)
6125
+{
6126
+ cpu_to_be32_array(spec->ip6src,
6127
+ rule->tuples.src_ip, IPV6_SIZE);
6128
+ cpu_to_be32_array(spec->ip6dst,
6129
+ rule->tuples.dst_ip, IPV6_SIZE);
6130
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
6131
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6132
+ else
6133
+ cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6134
+ IPV6_SIZE);
6135
+
6136
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
6137
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6138
+ else
6139
+ cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6140
+ IPV6_SIZE);
6141
+
6142
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
6143
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6144
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
6145
+
6146
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6147
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6148
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6149
+}
6150
+
6151
+static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6152
+ struct ethtool_usrip6_spec *spec,
6153
+ struct ethtool_usrip6_spec *spec_mask)
6154
+{
6155
+ cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6156
+ cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6157
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
6158
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6159
+ else
6160
+ cpu_to_be32_array(spec_mask->ip6src,
6161
+ rule->tuples_mask.src_ip, IPV6_SIZE);
6162
+
6163
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
6164
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6165
+ else
6166
+ cpu_to_be32_array(spec_mask->ip6dst,
6167
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
6168
+
6169
+ spec->l4_proto = rule->tuples.ip_proto;
6170
+ spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6171
+ 0 : rule->tuples_mask.ip_proto;
6172
+}
6173
+
6174
+static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6175
+ struct ethhdr *spec,
6176
+ struct ethhdr *spec_mask)
6177
+{
6178
+ ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6179
+ ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6180
+
6181
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6182
+ eth_zero_addr(spec_mask->h_source);
6183
+ else
6184
+ ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6185
+
6186
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
6187
+ eth_zero_addr(spec_mask->h_dest);
6188
+ else
6189
+ ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6190
+
6191
+ spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6192
+ spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6193
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6194
+}
6195
+
6196
+static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6197
+ struct hclge_fd_rule *rule)
6198
+{
6199
+ if (fs->flow_type & FLOW_EXT) {
6200
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6201
+ fs->m_ext.vlan_tci =
6202
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6203
+ 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6204
+ }
6205
+
6206
+ if (fs->flow_type & FLOW_MAC_EXT) {
6207
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6208
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
6209
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
6210
+ else
6211
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
6212
+ rule->tuples_mask.dst_mac);
6213
+ }
6214
+}
6215
+
6216
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6217
+ struct ethtool_rxnfc *cmd)
6218
+{
6219
+ struct hclge_vport *vport = hclge_get_vport(handle);
6220
+ struct hclge_fd_rule *rule = NULL;
6221
+ struct hclge_dev *hdev = vport->back;
6222
+ struct ethtool_rx_flow_spec *fs;
6223
+ struct hlist_node *node2;
6224
+
6225
+ if (!hnae3_dev_fd_supported(hdev))
6226
+ return -EOPNOTSUPP;
6227
+
6228
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6229
+
6230
+ spin_lock_bh(&hdev->fd_rule_lock);
6231
+
6232
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6233
+ if (rule->location >= fs->location)
6234
+ break;
6235
+ }
6236
+
6237
+ if (!rule || fs->location != rule->location) {
6238
+ spin_unlock_bh(&hdev->fd_rule_lock);
6239
+
6240
+ return -ENOENT;
6241
+ }
6242
+
6243
+ fs->flow_type = rule->flow_type;
6244
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6245
+ case SCTP_V4_FLOW:
6246
+ case TCP_V4_FLOW:
6247
+ case UDP_V4_FLOW:
6248
+ hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6249
+ &fs->m_u.tcp_ip4_spec);
6250
+ break;
6251
+ case IP_USER_FLOW:
6252
+ hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6253
+ &fs->m_u.usr_ip4_spec);
6254
+ break;
6255
+ case SCTP_V6_FLOW:
6256
+ case TCP_V6_FLOW:
6257
+ case UDP_V6_FLOW:
6258
+ hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6259
+ &fs->m_u.tcp_ip6_spec);
6260
+ break;
6261
+ case IPV6_USER_FLOW:
6262
+ hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6263
+ &fs->m_u.usr_ip6_spec);
6264
+ break;
6265
+ /* The flow type of fd rule has been checked before adding in to rule
6266
+ * list. As other flow types have been handled, it must be ETHER_FLOW
6267
+ * for the default case
6268
+ */
6269
+ default:
6270
+ hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6271
+ &fs->m_u.ether_spec);
6272
+ break;
6273
+ }
6274
+
6275
+ hclge_fd_get_ext_info(fs, rule);
6276
+
6277
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6278
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
6279
+ } else {
6280
+ u64 vf_id;
6281
+
6282
+ fs->ring_cookie = rule->queue_id;
6283
+ vf_id = rule->vf_id;
6284
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6285
+ fs->ring_cookie |= vf_id;
6286
+ }
6287
+
6288
+ spin_unlock_bh(&hdev->fd_rule_lock);
6289
+
6290
+ return 0;
6291
+}
6292
+
6293
+static int hclge_get_all_rules(struct hnae3_handle *handle,
6294
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
6295
+{
6296
+ struct hclge_vport *vport = hclge_get_vport(handle);
6297
+ struct hclge_dev *hdev = vport->back;
6298
+ struct hclge_fd_rule *rule;
6299
+ struct hlist_node *node2;
6300
+ int cnt = 0;
6301
+
6302
+ if (!hnae3_dev_fd_supported(hdev))
6303
+ return -EOPNOTSUPP;
6304
+
6305
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6306
+
6307
+ spin_lock_bh(&hdev->fd_rule_lock);
6308
+ hlist_for_each_entry_safe(rule, node2,
6309
+ &hdev->fd_rule_list, rule_node) {
6310
+ if (cnt == cmd->rule_cnt) {
6311
+ spin_unlock_bh(&hdev->fd_rule_lock);
6312
+ return -EMSGSIZE;
6313
+ }
6314
+
6315
+ rule_locs[cnt] = rule->location;
6316
+ cnt++;
6317
+ }
6318
+
6319
+ spin_unlock_bh(&hdev->fd_rule_lock);
6320
+
6321
+ cmd->rule_cnt = cnt;
6322
+
6323
+ return 0;
6324
+}
6325
+
6326
+static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6327
+ struct hclge_fd_rule_tuples *tuples)
6328
+{
6329
+#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6330
+#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6331
+
6332
+ tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6333
+ tuples->ip_proto = fkeys->basic.ip_proto;
6334
+ tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6335
+
6336
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6337
+ tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6338
+ tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6339
+ } else {
6340
+ int i;
6341
+
6342
+ for (i = 0; i < IPV6_SIZE; i++) {
6343
+ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6344
+ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6345
+ }
6346
+ }
6347
+}
6348
+
6349
+/* traverse all rules, check whether an existed rule has the same tuples */
6350
+static struct hclge_fd_rule *
6351
+hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6352
+ const struct hclge_fd_rule_tuples *tuples)
6353
+{
6354
+ struct hclge_fd_rule *rule = NULL;
6355
+ struct hlist_node *node;
6356
+
6357
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6358
+ if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6359
+ return rule;
6360
+ }
6361
+
6362
+ return NULL;
6363
+}
6364
+
6365
+static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6366
+ struct hclge_fd_rule *rule)
6367
+{
6368
+ rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6369
+ BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6370
+ BIT(INNER_SRC_PORT);
6371
+ rule->action = 0;
6372
+ rule->vf_id = 0;
6373
+ rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6374
+ if (tuples->ether_proto == ETH_P_IP) {
6375
+ if (tuples->ip_proto == IPPROTO_TCP)
6376
+ rule->flow_type = TCP_V4_FLOW;
6377
+ else
6378
+ rule->flow_type = UDP_V4_FLOW;
6379
+ } else {
6380
+ if (tuples->ip_proto == IPPROTO_TCP)
6381
+ rule->flow_type = TCP_V6_FLOW;
6382
+ else
6383
+ rule->flow_type = UDP_V6_FLOW;
6384
+ }
6385
+ memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6386
+ memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6387
+}
6388
+
6389
+static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6390
+ u16 flow_id, struct flow_keys *fkeys)
6391
+{
6392
+ struct hclge_vport *vport = hclge_get_vport(handle);
6393
+ struct hclge_fd_rule_tuples new_tuples = {};
6394
+ struct hclge_dev *hdev = vport->back;
6395
+ struct hclge_fd_rule *rule;
6396
+ u16 tmp_queue_id;
6397
+ u16 bit_id;
6398
+ int ret;
6399
+
6400
+ if (!hnae3_dev_fd_supported(hdev))
6401
+ return -EOPNOTSUPP;
6402
+
6403
+ /* when there is already fd rule existed add by user,
6404
+ * arfs should not work
6405
+ */
6406
+ spin_lock_bh(&hdev->fd_rule_lock);
6407
+ if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6408
+ spin_unlock_bh(&hdev->fd_rule_lock);
6409
+ return -EOPNOTSUPP;
6410
+ }
6411
+
6412
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6413
+
6414
+ /* check is there flow director filter existed for this flow,
6415
+ * if not, create a new filter for it;
6416
+ * if filter exist with different queue id, modify the filter;
6417
+ * if filter exist with same queue id, do nothing
6418
+ */
6419
+ rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6420
+ if (!rule) {
6421
+ bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6422
+ if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6423
+ spin_unlock_bh(&hdev->fd_rule_lock);
6424
+ return -ENOSPC;
6425
+ }
6426
+
6427
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6428
+ if (!rule) {
6429
+ spin_unlock_bh(&hdev->fd_rule_lock);
6430
+ return -ENOMEM;
6431
+ }
6432
+
6433
+ set_bit(bit_id, hdev->fd_bmap);
6434
+ rule->location = bit_id;
6435
+ rule->flow_id = flow_id;
6436
+ rule->queue_id = queue_id;
6437
+ hclge_fd_build_arfs_rule(&new_tuples, rule);
6438
+ ret = hclge_fd_config_rule(hdev, rule);
6439
+
6440
+ spin_unlock_bh(&hdev->fd_rule_lock);
6441
+
6442
+ if (ret)
6443
+ return ret;
6444
+
6445
+ return rule->location;
6446
+ }
6447
+
6448
+ spin_unlock_bh(&hdev->fd_rule_lock);
6449
+
6450
+ if (rule->queue_id == queue_id)
6451
+ return rule->location;
6452
+
6453
+ tmp_queue_id = rule->queue_id;
6454
+ rule->queue_id = queue_id;
6455
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6456
+ if (ret) {
6457
+ rule->queue_id = tmp_queue_id;
6458
+ return ret;
6459
+ }
6460
+
6461
+ return rule->location;
6462
+}
6463
+
6464
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6465
+{
6466
+#ifdef CONFIG_RFS_ACCEL
6467
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
6468
+ struct hclge_fd_rule *rule;
6469
+ struct hlist_node *node;
6470
+ HLIST_HEAD(del_list);
6471
+
6472
+ spin_lock_bh(&hdev->fd_rule_lock);
6473
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6474
+ spin_unlock_bh(&hdev->fd_rule_lock);
6475
+ return;
6476
+ }
6477
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6478
+ if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6479
+ rule->flow_id, rule->location)) {
6480
+ hlist_del_init(&rule->rule_node);
6481
+ hlist_add_head(&rule->rule_node, &del_list);
6482
+ hdev->hclge_fd_rule_num--;
6483
+ clear_bit(rule->location, hdev->fd_bmap);
6484
+ }
6485
+ }
6486
+ spin_unlock_bh(&hdev->fd_rule_lock);
6487
+
6488
+ hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6489
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6490
+ rule->location, NULL, false);
6491
+ kfree(rule);
6492
+ }
6493
+#endif
6494
+}
6495
+
6496
+/* make sure being called after lock up with fd_rule_lock */
6497
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6498
+{
6499
+#ifdef CONFIG_RFS_ACCEL
6500
+ struct hclge_vport *vport = hclge_get_vport(handle);
6501
+ struct hclge_dev *hdev = vport->back;
6502
+
6503
+ if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6504
+ hclge_del_all_fd_entries(handle, true);
6505
+#endif
6506
+}
6507
+
6508
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6509
+{
6510
+ struct hclge_vport *vport = hclge_get_vport(handle);
6511
+ struct hclge_dev *hdev = vport->back;
6512
+
6513
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6514
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6515
+}
6516
+
6517
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6518
+{
6519
+ struct hclge_vport *vport = hclge_get_vport(handle);
6520
+ struct hclge_dev *hdev = vport->back;
6521
+
6522
+ return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6523
+}
6524
+
6525
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6526
+{
6527
+ struct hclge_vport *vport = hclge_get_vport(handle);
6528
+ struct hclge_dev *hdev = vport->back;
6529
+
6530
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6531
+}
6532
+
6533
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6534
+{
6535
+ struct hclge_vport *vport = hclge_get_vport(handle);
6536
+ struct hclge_dev *hdev = vport->back;
6537
+
6538
+ return hdev->rst_stats.hw_reset_done_cnt;
6539
+}
6540
+
6541
+static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6542
+{
6543
+ struct hclge_vport *vport = hclge_get_vport(handle);
6544
+ struct hclge_dev *hdev = vport->back;
6545
+ bool clear;
6546
+
6547
+ hdev->fd_en = enable;
6548
+ clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6549
+
6550
+ if (!enable) {
6551
+ spin_lock_bh(&hdev->fd_rule_lock);
6552
+ hclge_del_all_fd_entries(handle, clear);
6553
+ spin_unlock_bh(&hdev->fd_rule_lock);
6554
+ } else {
6555
+ hclge_restore_fd_entries(handle);
6556
+ }
36166557 }
36176558
36186559 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
....@@ -3624,20 +6565,20 @@
36246565 int ret;
36256566
36266567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3627
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3628
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3629
- hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3630
- hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3631
- hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3632
- hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3633
- hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3634
- hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3635
- hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3636
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3637
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3638
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3639
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3640
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
6568
+
6569
+ if (enable) {
6570
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6571
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6572
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6573
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6574
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6575
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6576
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6577
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6578
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6579
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6580
+ }
6581
+
36416582 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
36426583
36436584 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
....@@ -3646,7 +6587,100 @@
36466587 "mac enable fail, ret =%d.\n", ret);
36476588 }
36486589
3649
-static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
6590
+static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6591
+ u8 switch_param, u8 param_mask)
6592
+{
6593
+ struct hclge_mac_vlan_switch_cmd *req;
6594
+ struct hclge_desc desc;
6595
+ u32 func_id;
6596
+ int ret;
6597
+
6598
+ func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6599
+ req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6600
+
6601
+ /* read current config parameter */
6602
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6603
+ true);
6604
+ req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6605
+ req->func_id = cpu_to_le32(func_id);
6606
+
6607
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6608
+ if (ret) {
6609
+ dev_err(&hdev->pdev->dev,
6610
+ "read mac vlan switch parameter fail, ret = %d\n", ret);
6611
+ return ret;
6612
+ }
6613
+
6614
+ /* modify and write new config parameter */
6615
+ hclge_cmd_reuse_desc(&desc, false);
6616
+ req->switch_param = (req->switch_param & param_mask) | switch_param;
6617
+ req->param_mask = param_mask;
6618
+
6619
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6620
+ if (ret)
6621
+ dev_err(&hdev->pdev->dev,
6622
+ "set mac vlan switch parameter fail, ret = %d\n", ret);
6623
+ return ret;
6624
+}
6625
+
6626
+static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6627
+ int link_ret)
6628
+{
6629
+#define HCLGE_PHY_LINK_STATUS_NUM 200
6630
+
6631
+ struct phy_device *phydev = hdev->hw.mac.phydev;
6632
+ int i = 0;
6633
+ int ret;
6634
+
6635
+ do {
6636
+ ret = phy_read_status(phydev);
6637
+ if (ret) {
6638
+ dev_err(&hdev->pdev->dev,
6639
+ "phy update link status fail, ret = %d\n", ret);
6640
+ return;
6641
+ }
6642
+
6643
+ if (phydev->link == link_ret)
6644
+ break;
6645
+
6646
+ msleep(HCLGE_LINK_STATUS_MS);
6647
+ } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6648
+}
6649
+
6650
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6651
+{
6652
+#define HCLGE_MAC_LINK_STATUS_NUM 100
6653
+
6654
+ int link_status;
6655
+ int i = 0;
6656
+ int ret;
6657
+
6658
+ do {
6659
+ ret = hclge_get_mac_link_status(hdev, &link_status);
6660
+ if (ret)
6661
+ return ret;
6662
+ if (link_status == link_ret)
6663
+ return 0;
6664
+
6665
+ msleep(HCLGE_LINK_STATUS_MS);
6666
+ } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6667
+ return -EBUSY;
6668
+}
6669
+
6670
+static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6671
+ bool is_phy)
6672
+{
6673
+ int link_ret;
6674
+
6675
+ link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6676
+
6677
+ if (is_phy)
6678
+ hclge_phy_link_status_wait(hdev, link_ret);
6679
+
6680
+ return hclge_mac_link_status_wait(hdev, link_ret);
6681
+}
6682
+
6683
+static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
36506684 {
36516685 struct hclge_config_mac_mode_cmd *req;
36526686 struct hclge_desc desc;
....@@ -3666,8 +6700,6 @@
36666700 /* 2 Then setup the loopback flag */
36676701 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
36686702 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
3669
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
3670
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
36716703
36726704 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
36736705
....@@ -3682,22 +6714,38 @@
36826714 return ret;
36836715 }
36846716
3685
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
6717
+static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6718
+ enum hnae3_loop loop_mode)
36866719 {
36876720 #define HCLGE_SERDES_RETRY_MS 10
36886721 #define HCLGE_SERDES_RETRY_NUM 100
6722
+
36896723 struct hclge_serdes_lb_cmd *req;
36906724 struct hclge_desc desc;
36916725 int ret, i = 0;
6726
+ u8 loop_mode_b;
36926727
3693
- req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
6728
+ req = (struct hclge_serdes_lb_cmd *)desc.data;
36946729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
36956730
6731
+ switch (loop_mode) {
6732
+ case HNAE3_LOOP_SERIAL_SERDES:
6733
+ loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6734
+ break;
6735
+ case HNAE3_LOOP_PARALLEL_SERDES:
6736
+ loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6737
+ break;
6738
+ default:
6739
+ dev_err(&hdev->pdev->dev,
6740
+ "unsupported serdes loopback mode %d\n", loop_mode);
6741
+ return -ENOTSUPP;
6742
+ }
6743
+
36966744 if (en) {
3697
- req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
3698
- req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6745
+ req->enable = loop_mode_b;
6746
+ req->mask = loop_mode_b;
36996747 } else {
3700
- req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6748
+ req->mask = loop_mode_b;
37016749 }
37026750
37036751 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
....@@ -3727,12 +6775,87 @@
37276775 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
37286776 return -EIO;
37296777 }
3730
-
3731
- hclge_cfg_mac_mode(hdev, en);
3732
- return 0;
6778
+ return ret;
37336779 }
37346780
3735
-static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6781
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6782
+ enum hnae3_loop loop_mode)
6783
+{
6784
+ int ret;
6785
+
6786
+ ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6787
+ if (ret)
6788
+ return ret;
6789
+
6790
+ hclge_cfg_mac_mode(hdev, en);
6791
+
6792
+ ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6793
+ if (ret)
6794
+ dev_err(&hdev->pdev->dev,
6795
+ "serdes loopback config mac mode timeout\n");
6796
+
6797
+ return ret;
6798
+}
6799
+
6800
+static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6801
+ struct phy_device *phydev)
6802
+{
6803
+ int ret;
6804
+
6805
+ if (!phydev->suspended) {
6806
+ ret = phy_suspend(phydev);
6807
+ if (ret)
6808
+ return ret;
6809
+ }
6810
+
6811
+ ret = phy_resume(phydev);
6812
+ if (ret)
6813
+ return ret;
6814
+
6815
+ return phy_loopback(phydev, true);
6816
+}
6817
+
6818
+static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6819
+ struct phy_device *phydev)
6820
+{
6821
+ int ret;
6822
+
6823
+ ret = phy_loopback(phydev, false);
6824
+ if (ret)
6825
+ return ret;
6826
+
6827
+ return phy_suspend(phydev);
6828
+}
6829
+
6830
+static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6831
+{
6832
+ struct phy_device *phydev = hdev->hw.mac.phydev;
6833
+ int ret;
6834
+
6835
+ if (!phydev)
6836
+ return -ENOTSUPP;
6837
+
6838
+ if (en)
6839
+ ret = hclge_enable_phy_loopback(hdev, phydev);
6840
+ else
6841
+ ret = hclge_disable_phy_loopback(hdev, phydev);
6842
+ if (ret) {
6843
+ dev_err(&hdev->pdev->dev,
6844
+ "set phy loopback fail, ret = %d\n", ret);
6845
+ return ret;
6846
+ }
6847
+
6848
+ hclge_cfg_mac_mode(hdev, en);
6849
+
6850
+ ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6851
+ if (ret)
6852
+ dev_err(&hdev->pdev->dev,
6853
+ "phy loopback config mac mode timeout\n");
6854
+
6855
+ return ret;
6856
+}
6857
+
6858
+static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
37366859 int stream_id, bool enable)
37376860 {
37386861 struct hclge_desc desc;
....@@ -3743,7 +6866,8 @@
37436866 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
37446867 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
37456868 req->stream_id = cpu_to_le16(stream_id);
3746
- req->enable |= enable << HCLGE_TQP_ENABLE_B;
6869
+ if (enable)
6870
+ req->enable |= 1U << HCLGE_TQP_ENABLE_B;
37476871
37486872 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
37496873 if (ret)
....@@ -3756,15 +6880,34 @@
37566880 enum hnae3_loop loop_mode, bool en)
37576881 {
37586882 struct hclge_vport *vport = hclge_get_vport(handle);
6883
+ struct hnae3_knic_private_info *kinfo;
37596884 struct hclge_dev *hdev = vport->back;
37606885 int i, ret;
37616886
6887
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6888
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6889
+ * the same, the packets are looped back in the SSU. If SSU loopback
6890
+ * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6891
+ */
6892
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6893
+ u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6894
+
6895
+ ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6896
+ HCLGE_SWITCH_ALW_LPBK_MASK);
6897
+ if (ret)
6898
+ return ret;
6899
+ }
6900
+
37626901 switch (loop_mode) {
3763
- case HNAE3_MAC_INTER_LOOP_MAC:
3764
- ret = hclge_set_mac_loopback(hdev, en);
6902
+ case HNAE3_LOOP_APP:
6903
+ ret = hclge_set_app_loopback(hdev, en);
37656904 break;
3766
- case HNAE3_MAC_INTER_LOOP_SERDES:
3767
- ret = hclge_set_serdes_loopback(hdev, en);
6905
+ case HNAE3_LOOP_SERIAL_SERDES:
6906
+ case HNAE3_LOOP_PARALLEL_SERDES:
6907
+ ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6908
+ break;
6909
+ case HNAE3_LOOP_PHY:
6910
+ ret = hclge_set_phy_loopback(hdev, en);
37686911 break;
37696912 default:
37706913 ret = -ENOTSUPP;
....@@ -3773,7 +6916,11 @@
37736916 break;
37746917 }
37756918
3776
- for (i = 0; i < vport->alloc_tqps; i++) {
6919
+ if (ret)
6920
+ return ret;
6921
+
6922
+ kinfo = &vport->nic.kinfo;
6923
+ for (i = 0; i < kinfo->num_tqps; i++) {
37776924 ret = hclge_tqp_enable(hdev, i, 0, en);
37786925 if (ret)
37796926 return ret;
....@@ -3782,17 +6929,65 @@
37826929 return 0;
37836930 }
37846931
6932
+static int hclge_set_default_loopback(struct hclge_dev *hdev)
6933
+{
6934
+ int ret;
6935
+
6936
+ ret = hclge_set_app_loopback(hdev, false);
6937
+ if (ret)
6938
+ return ret;
6939
+
6940
+ ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6941
+ if (ret)
6942
+ return ret;
6943
+
6944
+ return hclge_cfg_serdes_loopback(hdev, false,
6945
+ HNAE3_LOOP_PARALLEL_SERDES);
6946
+}
6947
+
37856948 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
37866949 {
37876950 struct hclge_vport *vport = hclge_get_vport(handle);
6951
+ struct hnae3_knic_private_info *kinfo;
37886952 struct hnae3_queue *queue;
37896953 struct hclge_tqp *tqp;
37906954 int i;
37916955
3792
- for (i = 0; i < vport->alloc_tqps; i++) {
6956
+ kinfo = &vport->nic.kinfo;
6957
+ for (i = 0; i < kinfo->num_tqps; i++) {
37936958 queue = handle->kinfo.tqp[i];
37946959 tqp = container_of(queue, struct hclge_tqp, q);
37956960 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6961
+ }
6962
+}
6963
+
6964
+static void hclge_flush_link_update(struct hclge_dev *hdev)
6965
+{
6966
+#define HCLGE_FLUSH_LINK_TIMEOUT 100000
6967
+
6968
+ unsigned long last = hdev->serv_processed_cnt;
6969
+ int i = 0;
6970
+
6971
+ while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6972
+ i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6973
+ last == hdev->serv_processed_cnt)
6974
+ usleep_range(1, 1);
6975
+}
6976
+
6977
+static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6978
+{
6979
+ struct hclge_vport *vport = hclge_get_vport(handle);
6980
+ struct hclge_dev *hdev = vport->back;
6981
+
6982
+ if (enable) {
6983
+ hclge_task_schedule(hdev, 0);
6984
+ } else {
6985
+ /* Set the DOWN flag here to disable link updating */
6986
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
6987
+
6988
+ /* flush memory to make sure DOWN is seen by service task */
6989
+ smp_mb__before_atomic();
6990
+ hclge_flush_link_update(hdev);
37966991 }
37976992 }
37986993
....@@ -3800,15 +6995,10 @@
38006995 {
38016996 struct hclge_vport *vport = hclge_get_vport(handle);
38026997 struct hclge_dev *hdev = vport->back;
3803
- int i;
3804
-
3805
- for (i = 0; i < vport->alloc_tqps; i++)
3806
- hclge_tqp_enable(hdev, i, 0, true);
38076998
38086999 /* mac enable */
38097000 hclge_cfg_mac_mode(hdev, true);
38107001 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3811
- mod_timer(&hdev->service_timer, jiffies + HZ);
38127002 hdev->hw.mac.link = 0;
38137003
38147004 /* reset tqp stats */
....@@ -3826,18 +7016,25 @@
38267016 int i;
38277017
38287018 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7019
+ spin_lock_bh(&hdev->fd_rule_lock);
7020
+ hclge_clear_arfs_rules(handle);
7021
+ spin_unlock_bh(&hdev->fd_rule_lock);
38297022
3830
- del_timer_sync(&hdev->service_timer);
3831
- cancel_work_sync(&hdev->service_task);
3832
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3833
-
3834
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
7023
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
7024
+ * so it only need to stop phy here.
7025
+ */
7026
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7027
+ hdev->reset_type != HNAE3_FUNC_RESET &&
7028
+ hdev->reset_type != HNAE3_FLR_RESET) {
38357029 hclge_mac_stop_phy(hdev);
7030
+ hclge_update_link_status(hdev);
38367031 return;
38377032 }
38387033
3839
- for (i = 0; i < vport->alloc_tqps; i++)
3840
- hclge_tqp_enable(hdev, i, 0, false);
7034
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
7035
+ hclge_reset_tqp(handle, i);
7036
+
7037
+ hclge_config_mac_tnl_int(hdev, false);
38417038
38427039 /* Mac disable */
38437040 hclge_cfg_mac_mode(hdev, false);
....@@ -3846,9 +7043,47 @@
38467043
38477044 /* reset tqp stats */
38487045 hclge_reset_tqp_stats(handle);
3849
- del_timer_sync(&hdev->service_timer);
3850
- cancel_work_sync(&hdev->service_task);
38517046 hclge_update_link_status(hdev);
7047
+}
7048
+
7049
+int hclge_vport_start(struct hclge_vport *vport)
7050
+{
7051
+ struct hclge_dev *hdev = vport->back;
7052
+
7053
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7054
+ vport->last_active_jiffies = jiffies;
7055
+
7056
+ if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7057
+ if (vport->vport_id) {
7058
+ hclge_restore_mac_table_common(vport);
7059
+ hclge_restore_vport_vlan_table(vport);
7060
+ } else {
7061
+ hclge_restore_hw_table(hdev);
7062
+ }
7063
+ }
7064
+
7065
+ clear_bit(vport->vport_id, hdev->vport_config_block);
7066
+
7067
+ return 0;
7068
+}
7069
+
7070
+void hclge_vport_stop(struct hclge_vport *vport)
7071
+{
7072
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7073
+}
7074
+
7075
+static int hclge_client_start(struct hnae3_handle *handle)
7076
+{
7077
+ struct hclge_vport *vport = hclge_get_vport(handle);
7078
+
7079
+ return hclge_vport_start(vport);
7080
+}
7081
+
7082
+static void hclge_client_stop(struct hnae3_handle *handle)
7083
+{
7084
+ struct hclge_vport *vport = hclge_get_vport(handle);
7085
+
7086
+ hclge_vport_stop(vport);
38527087 }
38537088
38547089 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
....@@ -3856,74 +7091,70 @@
38567091 enum hclge_mac_vlan_tbl_opcode op)
38577092 {
38587093 struct hclge_dev *hdev = vport->back;
3859
- int return_status = -EIO;
38607094
38617095 if (cmdq_resp) {
38627096 dev_err(&hdev->pdev->dev,
3863
- "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
7097
+ "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
38647098 cmdq_resp);
38657099 return -EIO;
38667100 }
38677101
38687102 if (op == HCLGE_MAC_VLAN_ADD) {
3869
- if ((!resp_code) || (resp_code == 1)) {
3870
- return_status = 0;
3871
- } else if (resp_code == 2) {
3872
- return_status = -ENOSPC;
3873
- dev_err(&hdev->pdev->dev,
3874
- "add mac addr failed for uc_overflow.\n");
3875
- } else if (resp_code == 3) {
3876
- return_status = -ENOSPC;
3877
- dev_err(&hdev->pdev->dev,
3878
- "add mac addr failed for mc_overflow.\n");
3879
- } else {
3880
- dev_err(&hdev->pdev->dev,
3881
- "add mac addr failed for undefined, code=%d.\n",
3882
- resp_code);
3883
- }
7103
+ if (!resp_code || resp_code == 1)
7104
+ return 0;
7105
+ else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7106
+ resp_code == HCLGE_ADD_MC_OVERFLOW)
7107
+ return -ENOSPC;
7108
+
7109
+ dev_err(&hdev->pdev->dev,
7110
+ "add mac addr failed for undefined, code=%u.\n",
7111
+ resp_code);
7112
+ return -EIO;
38847113 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
38857114 if (!resp_code) {
3886
- return_status = 0;
7115
+ return 0;
38877116 } else if (resp_code == 1) {
3888
- return_status = -ENOENT;
38897117 dev_dbg(&hdev->pdev->dev,
38907118 "remove mac addr failed for miss.\n");
3891
- } else {
3892
- dev_err(&hdev->pdev->dev,
3893
- "remove mac addr failed for undefined, code=%d.\n",
3894
- resp_code);
7119
+ return -ENOENT;
38957120 }
7121
+
7122
+ dev_err(&hdev->pdev->dev,
7123
+ "remove mac addr failed for undefined, code=%u.\n",
7124
+ resp_code);
7125
+ return -EIO;
38967126 } else if (op == HCLGE_MAC_VLAN_LKUP) {
38977127 if (!resp_code) {
3898
- return_status = 0;
7128
+ return 0;
38997129 } else if (resp_code == 1) {
3900
- return_status = -ENOENT;
39017130 dev_dbg(&hdev->pdev->dev,
39027131 "lookup mac addr failed for miss.\n");
3903
- } else {
3904
- dev_err(&hdev->pdev->dev,
3905
- "lookup mac addr failed for undefined, code=%d.\n",
3906
- resp_code);
7132
+ return -ENOENT;
39077133 }
3908
- } else {
3909
- return_status = -EINVAL;
7134
+
39107135 dev_err(&hdev->pdev->dev,
3911
- "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3912
- op);
7136
+ "lookup mac addr failed for undefined, code=%u.\n",
7137
+ resp_code);
7138
+ return -EIO;
39137139 }
39147140
3915
- return return_status;
7141
+ dev_err(&hdev->pdev->dev,
7142
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7143
+
7144
+ return -EINVAL;
39167145 }
39177146
39187147 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
39197148 {
3920
- int word_num;
3921
- int bit_num;
7149
+#define HCLGE_VF_NUM_IN_FIRST_DESC 192
7150
+
7151
+ unsigned int word_num;
7152
+ unsigned int bit_num;
39227153
39237154 if (vfid > 255 || vfid < 0)
39247155 return -EIO;
39257156
3926
- if (vfid >= 0 && vfid <= 191) {
7157
+ if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
39277158 word_num = vfid / 32;
39287159 bit_num = vfid % 32;
39297160 if (clr)
....@@ -3931,7 +7162,7 @@
39317162 else
39327163 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
39337164 } else {
3934
- word_num = (vfid - 192) / 32;
7165
+ word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
39357166 bit_num = vfid % 32;
39367167 if (clr)
39377168 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
....@@ -3957,183 +7188,21 @@
39577188 }
39587189
39597190 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3960
- const u8 *addr)
7191
+ const u8 *addr, bool is_mc)
39617192 {
39627193 const unsigned char *mac_addr = addr;
39637194 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
39647195 (mac_addr[0]) | (mac_addr[1] << 8);
39657196 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
39667197
7198
+ hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7199
+ if (is_mc) {
7200
+ hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7201
+ hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7202
+ }
7203
+
39677204 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
39687205 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3969
-}
3970
-
3971
-static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3972
- const u8 *addr)
3973
-{
3974
- u16 high_val = addr[1] | (addr[0] << 8);
3975
- struct hclge_dev *hdev = vport->back;
3976
- u32 rsh = 4 - hdev->mta_mac_sel_type;
3977
- u16 ret_val = (high_val >> rsh) & 0xfff;
3978
-
3979
- return ret_val;
3980
-}
3981
-
3982
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3983
- enum hclge_mta_dmac_sel_type mta_mac_sel,
3984
- bool enable)
3985
-{
3986
- struct hclge_mta_filter_mode_cmd *req;
3987
- struct hclge_desc desc;
3988
- int ret;
3989
-
3990
- req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3991
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3992
-
3993
- hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3994
- enable);
3995
- hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3996
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3997
-
3998
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3999
- if (ret)
4000
- dev_err(&hdev->pdev->dev,
4001
- "Config mat filter mode failed for cmd_send, ret =%d.\n",
4002
- ret);
4003
-
4004
- return ret;
4005
-}
4006
-
4007
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
4008
- u8 func_id,
4009
- bool enable)
4010
-{
4011
- struct hclge_cfg_func_mta_filter_cmd *req;
4012
- struct hclge_desc desc;
4013
- int ret;
4014
-
4015
- req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
4016
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
4017
-
4018
- hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
4019
- enable);
4020
- req->function_id = func_id;
4021
-
4022
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4023
- if (ret)
4024
- dev_err(&hdev->pdev->dev,
4025
- "Config func_id enable failed for cmd_send, ret =%d.\n",
4026
- ret);
4027
-
4028
- return ret;
4029
-}
4030
-
4031
-static int hclge_set_mta_table_item(struct hclge_vport *vport,
4032
- u16 idx,
4033
- bool enable)
4034
-{
4035
- struct hclge_dev *hdev = vport->back;
4036
- struct hclge_cfg_func_mta_item_cmd *req;
4037
- struct hclge_desc desc;
4038
- u16 item_idx = 0;
4039
- int ret;
4040
-
4041
- req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
4042
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
4043
- hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
4044
-
4045
- hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
4046
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
4047
- req->item_idx = cpu_to_le16(item_idx);
4048
-
4049
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4050
- if (ret) {
4051
- dev_err(&hdev->pdev->dev,
4052
- "Config mta table item failed for cmd_send, ret =%d.\n",
4053
- ret);
4054
- return ret;
4055
- }
4056
-
4057
- if (enable)
4058
- set_bit(idx, vport->mta_shadow);
4059
- else
4060
- clear_bit(idx, vport->mta_shadow);
4061
-
4062
- return 0;
4063
-}
4064
-
4065
-static int hclge_update_mta_status(struct hnae3_handle *handle)
4066
-{
4067
- unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
4068
- struct hclge_vport *vport = hclge_get_vport(handle);
4069
- struct net_device *netdev = handle->kinfo.netdev;
4070
- struct netdev_hw_addr *ha;
4071
- u16 tbl_idx;
4072
-
4073
- memset(mta_status, 0, sizeof(mta_status));
4074
-
4075
- /* update mta_status from mc addr list */
4076
- netdev_for_each_mc_addr(ha, netdev) {
4077
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
4078
- set_bit(tbl_idx, mta_status);
4079
- }
4080
-
4081
- return hclge_update_mta_status_common(vport, mta_status,
4082
- 0, HCLGE_MTA_TBL_SIZE, true);
4083
-}
4084
-
4085
-int hclge_update_mta_status_common(struct hclge_vport *vport,
4086
- unsigned long *status,
4087
- u16 idx,
4088
- u16 count,
4089
- bool update_filter)
4090
-{
4091
- struct hclge_dev *hdev = vport->back;
4092
- u16 update_max = idx + count;
4093
- u16 check_max;
4094
- int ret = 0;
4095
- bool used;
4096
- u16 i;
4097
-
4098
- /* setup mta check range */
4099
- if (update_filter) {
4100
- i = 0;
4101
- check_max = HCLGE_MTA_TBL_SIZE;
4102
- } else {
4103
- i = idx;
4104
- check_max = update_max;
4105
- }
4106
-
4107
- used = false;
4108
- /* check and update all mta item */
4109
- for (; i < check_max; i++) {
4110
- /* ignore unused item */
4111
- if (!test_bit(i, vport->mta_shadow))
4112
- continue;
4113
-
4114
- /* if i in update range then update it */
4115
- if (i >= idx && i < update_max)
4116
- if (!test_bit(i - idx, status))
4117
- hclge_set_mta_table_item(vport, i, false);
4118
-
4119
- if (!used && test_bit(i, vport->mta_shadow))
4120
- used = true;
4121
- }
4122
-
4123
- /* no longer use mta, disable it */
4124
- if (vport->accept_mta_mc && update_filter && !used) {
4125
- ret = hclge_cfg_func_mta_filter(hdev,
4126
- vport->vport_id,
4127
- false);
4128
- if (ret)
4129
- dev_err(&hdev->pdev->dev,
4130
- "disable func mta filter fail ret=%d\n",
4131
- ret);
4132
- else
4133
- vport->accept_mta_mc = false;
4134
- }
4135
-
4136
- return ret;
41377206 }
41387207
41397208 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
....@@ -4259,12 +7328,203 @@
42597328 return cfg_status;
42607329 }
42617330
7331
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7332
+ u16 *allocated_size)
7333
+{
7334
+ struct hclge_umv_spc_alc_cmd *req;
7335
+ struct hclge_desc desc;
7336
+ int ret;
7337
+
7338
+ req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7339
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7340
+
7341
+ req->space_size = cpu_to_le32(space_size);
7342
+
7343
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7344
+ if (ret) {
7345
+ dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7346
+ ret);
7347
+ return ret;
7348
+ }
7349
+
7350
+ *allocated_size = le32_to_cpu(desc.data[1]);
7351
+
7352
+ return 0;
7353
+}
7354
+
7355
+static int hclge_init_umv_space(struct hclge_dev *hdev)
7356
+{
7357
+ u16 allocated_size = 0;
7358
+ int ret;
7359
+
7360
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7361
+ if (ret)
7362
+ return ret;
7363
+
7364
+ if (allocated_size < hdev->wanted_umv_size)
7365
+ dev_warn(&hdev->pdev->dev,
7366
+ "failed to alloc umv space, want %u, get %u\n",
7367
+ hdev->wanted_umv_size, allocated_size);
7368
+
7369
+ hdev->max_umv_size = allocated_size;
7370
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7371
+ hdev->share_umv_size = hdev->priv_umv_size +
7372
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7373
+
7374
+ return 0;
7375
+}
7376
+
7377
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
7378
+{
7379
+ struct hclge_vport *vport;
7380
+ int i;
7381
+
7382
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
7383
+ vport = &hdev->vport[i];
7384
+ vport->used_umv_num = 0;
7385
+ }
7386
+
7387
+ mutex_lock(&hdev->vport_lock);
7388
+ hdev->share_umv_size = hdev->priv_umv_size +
7389
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7390
+ mutex_unlock(&hdev->vport_lock);
7391
+}
7392
+
7393
+static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7394
+{
7395
+ struct hclge_dev *hdev = vport->back;
7396
+ bool is_full;
7397
+
7398
+ if (need_lock)
7399
+ mutex_lock(&hdev->vport_lock);
7400
+
7401
+ is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7402
+ hdev->share_umv_size == 0);
7403
+
7404
+ if (need_lock)
7405
+ mutex_unlock(&hdev->vport_lock);
7406
+
7407
+ return is_full;
7408
+}
7409
+
7410
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7411
+{
7412
+ struct hclge_dev *hdev = vport->back;
7413
+
7414
+ if (is_free) {
7415
+ if (vport->used_umv_num > hdev->priv_umv_size)
7416
+ hdev->share_umv_size++;
7417
+
7418
+ if (vport->used_umv_num > 0)
7419
+ vport->used_umv_num--;
7420
+ } else {
7421
+ if (vport->used_umv_num >= hdev->priv_umv_size &&
7422
+ hdev->share_umv_size > 0)
7423
+ hdev->share_umv_size--;
7424
+ vport->used_umv_num++;
7425
+ }
7426
+}
7427
+
7428
+static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7429
+ const u8 *mac_addr)
7430
+{
7431
+ struct hclge_mac_node *mac_node, *tmp;
7432
+
7433
+ list_for_each_entry_safe(mac_node, tmp, list, node)
7434
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7435
+ return mac_node;
7436
+
7437
+ return NULL;
7438
+}
7439
+
7440
+static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7441
+ enum HCLGE_MAC_NODE_STATE state)
7442
+{
7443
+ switch (state) {
7444
+ /* from set_rx_mode or tmp_add_list */
7445
+ case HCLGE_MAC_TO_ADD:
7446
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
7447
+ mac_node->state = HCLGE_MAC_ACTIVE;
7448
+ break;
7449
+ /* only from set_rx_mode */
7450
+ case HCLGE_MAC_TO_DEL:
7451
+ if (mac_node->state == HCLGE_MAC_TO_ADD) {
7452
+ list_del(&mac_node->node);
7453
+ kfree(mac_node);
7454
+ } else {
7455
+ mac_node->state = HCLGE_MAC_TO_DEL;
7456
+ }
7457
+ break;
7458
+ /* only from tmp_add_list, the mac_node->state won't be
7459
+ * ACTIVE.
7460
+ */
7461
+ case HCLGE_MAC_ACTIVE:
7462
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
7463
+ mac_node->state = HCLGE_MAC_ACTIVE;
7464
+
7465
+ break;
7466
+ }
7467
+}
7468
+
7469
+int hclge_update_mac_list(struct hclge_vport *vport,
7470
+ enum HCLGE_MAC_NODE_STATE state,
7471
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
7472
+ const unsigned char *addr)
7473
+{
7474
+ struct hclge_dev *hdev = vport->back;
7475
+ struct hclge_mac_node *mac_node;
7476
+ struct list_head *list;
7477
+
7478
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7479
+ &vport->uc_mac_list : &vport->mc_mac_list;
7480
+
7481
+ spin_lock_bh(&vport->mac_list_lock);
7482
+
7483
+ /* if the mac addr is already in the mac list, no need to add a new
7484
+ * one into it, just check the mac addr state, convert it to a new
7485
+ * new state, or just remove it, or do nothing.
7486
+ */
7487
+ mac_node = hclge_find_mac_node(list, addr);
7488
+ if (mac_node) {
7489
+ hclge_update_mac_node(mac_node, state);
7490
+ spin_unlock_bh(&vport->mac_list_lock);
7491
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7492
+ return 0;
7493
+ }
7494
+
7495
+ /* if this address is never added, unnecessary to delete */
7496
+ if (state == HCLGE_MAC_TO_DEL) {
7497
+ spin_unlock_bh(&vport->mac_list_lock);
7498
+ dev_err(&hdev->pdev->dev,
7499
+ "failed to delete address %pM from mac list\n",
7500
+ addr);
7501
+ return -ENOENT;
7502
+ }
7503
+
7504
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7505
+ if (!mac_node) {
7506
+ spin_unlock_bh(&vport->mac_list_lock);
7507
+ return -ENOMEM;
7508
+ }
7509
+
7510
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7511
+
7512
+ mac_node->state = state;
7513
+ ether_addr_copy(mac_node->mac_addr, addr);
7514
+ list_add_tail(&mac_node->node, list);
7515
+
7516
+ spin_unlock_bh(&vport->mac_list_lock);
7517
+
7518
+ return 0;
7519
+}
7520
+
42627521 static int hclge_add_uc_addr(struct hnae3_handle *handle,
42637522 const unsigned char *addr)
42647523 {
42657524 struct hclge_vport *vport = hclge_get_vport(handle);
42667525
4267
- return hclge_add_uc_addr_common(vport, addr);
7526
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7527
+ addr);
42687528 }
42697529
42707530 int hclge_add_uc_addr_common(struct hclge_vport *vport,
....@@ -4282,41 +7542,47 @@
42827542 is_multicast_ether_addr(addr)) {
42837543 dev_err(&hdev->pdev->dev,
42847544 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4285
- addr,
4286
- is_zero_ether_addr(addr),
7545
+ addr, is_zero_ether_addr(addr),
42877546 is_broadcast_ether_addr(addr),
42887547 is_multicast_ether_addr(addr));
42897548 return -EINVAL;
42907549 }
42917550
42927551 memset(&req, 0, sizeof(req));
4293
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
42947552
42957553 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
42967554 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
42977555
42987556 req.egress_port = cpu_to_le16(egress_port);
42997557
4300
- hclge_prepare_mac_addr(&req, addr);
7558
+ hclge_prepare_mac_addr(&req, addr, false);
43017559
43027560 /* Lookup the mac address in the mac_vlan table, and add
43037561 * it if the entry is inexistent. Repeated unicast entry
43047562 * is not allowed in the mac vlan table.
43057563 */
43067564 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
4307
- if (ret == -ENOENT)
4308
- return hclge_add_mac_vlan_tbl(vport, &req, NULL);
7565
+ if (ret == -ENOENT) {
7566
+ mutex_lock(&hdev->vport_lock);
7567
+ if (!hclge_is_umv_space_full(vport, false)) {
7568
+ ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7569
+ if (!ret)
7570
+ hclge_update_umv_space(vport, false);
7571
+ mutex_unlock(&hdev->vport_lock);
7572
+ return ret;
7573
+ }
7574
+ mutex_unlock(&hdev->vport_lock);
43097575
4310
- /* check if we just hit the duplicate */
4311
- if (!ret) {
4312
- dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
4313
- vport->vport_id, addr);
4314
- return 0;
7576
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7577
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7578
+ hdev->priv_umv_size);
7579
+
7580
+ return -ENOSPC;
43157581 }
43167582
4317
- dev_err(&hdev->pdev->dev,
4318
- "PF failed to add unicast entry(%pM) in the MAC table\n",
4319
- addr);
7583
+ /* check if we just hit the duplicate */
7584
+ if (!ret)
7585
+ return -EEXIST;
43207586
43217587 return ret;
43227588 }
....@@ -4326,7 +7592,8 @@
43267592 {
43277593 struct hclge_vport *vport = hclge_get_vport(handle);
43287594
4329
- return hclge_rm_uc_addr_common(vport, addr);
7595
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7596
+ addr);
43307597 }
43317598
43327599 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
....@@ -4340,17 +7607,21 @@
43407607 if (is_zero_ether_addr(addr) ||
43417608 is_broadcast_ether_addr(addr) ||
43427609 is_multicast_ether_addr(addr)) {
4343
- dev_dbg(&hdev->pdev->dev,
4344
- "Remove mac err! invalid mac:%pM.\n",
4345
- addr);
7610
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7611
+ addr);
43467612 return -EINVAL;
43477613 }
43487614
43497615 memset(&req, 0, sizeof(req));
4350
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
43517616 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4352
- hclge_prepare_mac_addr(&req, addr);
7617
+ hclge_prepare_mac_addr(&req, addr, false);
43537618 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7619
+ if (!ret || ret == -ENOENT) {
7620
+ mutex_lock(&hdev->vport_lock);
7621
+ hclge_update_umv_space(vport, true);
7622
+ mutex_unlock(&hdev->vport_lock);
7623
+ return 0;
7624
+ }
43547625
43557626 return ret;
43567627 }
....@@ -4360,7 +7631,8 @@
43607631 {
43617632 struct hclge_vport *vport = hclge_get_vport(handle);
43627633
4363
- return hclge_add_mc_addr_common(vport, addr);
7634
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7635
+ addr);
43647636 }
43657637
43667638 int hclge_add_mc_addr_common(struct hclge_vport *vport,
....@@ -4369,7 +7641,6 @@
43697641 struct hclge_dev *hdev = vport->back;
43707642 struct hclge_mac_vlan_tbl_entry_cmd req;
43717643 struct hclge_desc desc[3];
4372
- u16 tbl_idx;
43737644 int status;
43747645
43757646 /* mac addr check */
....@@ -4380,44 +7651,23 @@
43807651 return -EINVAL;
43817652 }
43827653 memset(&req, 0, sizeof(req));
4383
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4384
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4385
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4386
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4387
- hclge_prepare_mac_addr(&req, addr);
7654
+ hclge_prepare_mac_addr(&req, addr, true);
43887655 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4389
- if (!status) {
4390
- /* This mac addr exist, update VFID for it */
4391
- hclge_update_desc_vfid(desc, vport->vport_id, false);
4392
- status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4393
- } else {
7656
+ if (status) {
43947657 /* This mac addr do not exist, add new entry for it */
43957658 memset(desc[0].data, 0, sizeof(desc[0].data));
43967659 memset(desc[1].data, 0, sizeof(desc[0].data));
43977660 memset(desc[2].data, 0, sizeof(desc[0].data));
4398
- hclge_update_desc_vfid(desc, vport->vport_id, false);
4399
- status = hclge_add_mac_vlan_tbl(vport, &req, desc);
44007661 }
7662
+ status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7663
+ if (status)
7664
+ return status;
7665
+ status = hclge_add_mac_vlan_tbl(vport, &req, desc);
44017666
4402
- /* If mc mac vlan table is full, use MTA table */
4403
- if (status == -ENOSPC) {
4404
- if (!vport->accept_mta_mc) {
4405
- status = hclge_cfg_func_mta_filter(hdev,
4406
- vport->vport_id,
4407
- true);
4408
- if (status) {
4409
- dev_err(&hdev->pdev->dev,
4410
- "set mta filter mode fail ret=%d\n",
4411
- status);
4412
- return status;
4413
- }
4414
- vport->accept_mta_mc = true;
4415
- }
4416
-
4417
- /* Set MTA table for this MAC address */
4418
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4419
- status = hclge_set_mta_table_item(vport, tbl_idx, true);
4420
- }
7667
+ /* if already overflow, not to print each time */
7668
+ if (status == -ENOSPC &&
7669
+ !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7670
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
44217671
44227672 return status;
44237673 }
....@@ -4427,7 +7677,8 @@
44277677 {
44287678 struct hclge_vport *vport = hclge_get_vport(handle);
44297679
4430
- return hclge_rm_mc_addr_common(vport, addr);
7680
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7681
+ addr);
44317682 }
44327683
44337684 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
....@@ -4447,15 +7698,13 @@
44477698 }
44487699
44497700 memset(&req, 0, sizeof(req));
4450
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4451
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4452
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4453
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4454
- hclge_prepare_mac_addr(&req, addr);
7701
+ hclge_prepare_mac_addr(&req, addr, true);
44557702 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
44567703 if (!status) {
44577704 /* This mac addr exist, remove this handle's VFID for it */
4458
- hclge_update_desc_vfid(desc, vport->vport_id, true);
7705
+ status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7706
+ if (status)
7707
+ return status;
44597708
44607709 if (hclge_is_all_function_id_zero(desc))
44617710 /* All the vfid is zero, so need to delete this entry */
....@@ -4464,17 +7713,361 @@
44647713 /* Not all the vfid is zero, update the vfid */
44657714 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
44667715
4467
- } else {
4468
- /* Maybe this mac address is in mta table, but it cannot be
4469
- * deleted here because an entry of mta represents an address
4470
- * range rather than a specific address. the delete action to
4471
- * all entries will take effect in update_mta_status called by
4472
- * hns3_nic_set_rx_mode.
4473
- */
7716
+ } else if (status == -ENOENT) {
44747717 status = 0;
44757718 }
44767719
44777720 return status;
7721
+}
7722
+
7723
+static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7724
+ struct list_head *list,
7725
+ int (*sync)(struct hclge_vport *,
7726
+ const unsigned char *))
7727
+{
7728
+ struct hclge_mac_node *mac_node, *tmp;
7729
+ int ret;
7730
+
7731
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
7732
+ ret = sync(vport, mac_node->mac_addr);
7733
+ if (!ret) {
7734
+ mac_node->state = HCLGE_MAC_ACTIVE;
7735
+ } else {
7736
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7737
+ &vport->state);
7738
+
7739
+ /* If one unicast mac address is existing in hardware,
7740
+ * we need to try whether other unicast mac addresses
7741
+ * are new addresses that can be added.
7742
+ */
7743
+ if (ret != -EEXIST)
7744
+ break;
7745
+ }
7746
+ }
7747
+}
7748
+
7749
+static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7750
+ struct list_head *list,
7751
+ int (*unsync)(struct hclge_vport *,
7752
+ const unsigned char *))
7753
+{
7754
+ struct hclge_mac_node *mac_node, *tmp;
7755
+ int ret;
7756
+
7757
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
7758
+ ret = unsync(vport, mac_node->mac_addr);
7759
+ if (!ret || ret == -ENOENT) {
7760
+ list_del(&mac_node->node);
7761
+ kfree(mac_node);
7762
+ } else {
7763
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7764
+ &vport->state);
7765
+ break;
7766
+ }
7767
+ }
7768
+}
7769
+
7770
+static bool hclge_sync_from_add_list(struct list_head *add_list,
7771
+ struct list_head *mac_list)
7772
+{
7773
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
7774
+ bool all_added = true;
7775
+
7776
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7777
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
7778
+ all_added = false;
7779
+
7780
+ /* if the mac address from tmp_add_list is not in the
7781
+ * uc/mc_mac_list, it means have received a TO_DEL request
7782
+ * during the time window of adding the mac address into mac
7783
+ * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7784
+ * then it will be removed at next time. else it must be TO_ADD,
7785
+ * this address hasn't been added into mac table,
7786
+ * so just remove the mac node.
7787
+ */
7788
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7789
+ if (new_node) {
7790
+ hclge_update_mac_node(new_node, mac_node->state);
7791
+ list_del(&mac_node->node);
7792
+ kfree(mac_node);
7793
+ } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7794
+ mac_node->state = HCLGE_MAC_TO_DEL;
7795
+ list_del(&mac_node->node);
7796
+ list_add_tail(&mac_node->node, mac_list);
7797
+ } else {
7798
+ list_del(&mac_node->node);
7799
+ kfree(mac_node);
7800
+ }
7801
+ }
7802
+
7803
+ return all_added;
7804
+}
7805
+
7806
+static void hclge_sync_from_del_list(struct list_head *del_list,
7807
+ struct list_head *mac_list)
7808
+{
7809
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
7810
+
7811
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7812
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7813
+ if (new_node) {
7814
+ /* If the mac addr exists in the mac list, it means
7815
+ * received a new TO_ADD request during the time window
7816
+ * of configuring the mac address. For the mac node
7817
+ * state is TO_ADD, and the address is already in the
7818
+ * in the hardware(due to delete fail), so we just need
7819
+ * to change the mac node state to ACTIVE.
7820
+ */
7821
+ new_node->state = HCLGE_MAC_ACTIVE;
7822
+ list_del(&mac_node->node);
7823
+ kfree(mac_node);
7824
+ } else {
7825
+ list_del(&mac_node->node);
7826
+ list_add_tail(&mac_node->node, mac_list);
7827
+ }
7828
+ }
7829
+}
7830
+
7831
+static void hclge_update_overflow_flags(struct hclge_vport *vport,
7832
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
7833
+ bool is_all_added)
7834
+{
7835
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
7836
+ if (is_all_added)
7837
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7838
+ else
7839
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7840
+ } else {
7841
+ if (is_all_added)
7842
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7843
+ else
7844
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7845
+ }
7846
+}
7847
+
7848
+static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7849
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
7850
+{
7851
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
7852
+ struct list_head tmp_add_list, tmp_del_list;
7853
+ struct list_head *list;
7854
+ bool all_added;
7855
+
7856
+ INIT_LIST_HEAD(&tmp_add_list);
7857
+ INIT_LIST_HEAD(&tmp_del_list);
7858
+
7859
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
7860
+ * we can add/delete these mac addr outside the spin lock
7861
+ */
7862
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7863
+ &vport->uc_mac_list : &vport->mc_mac_list;
7864
+
7865
+ spin_lock_bh(&vport->mac_list_lock);
7866
+
7867
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
7868
+ switch (mac_node->state) {
7869
+ case HCLGE_MAC_TO_DEL:
7870
+ list_del(&mac_node->node);
7871
+ list_add_tail(&mac_node->node, &tmp_del_list);
7872
+ break;
7873
+ case HCLGE_MAC_TO_ADD:
7874
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7875
+ if (!new_node)
7876
+ goto stop_traverse;
7877
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7878
+ new_node->state = mac_node->state;
7879
+ list_add_tail(&new_node->node, &tmp_add_list);
7880
+ break;
7881
+ default:
7882
+ break;
7883
+ }
7884
+ }
7885
+
7886
+stop_traverse:
7887
+ spin_unlock_bh(&vport->mac_list_lock);
7888
+
7889
+ /* delete first, in order to get max mac table space for adding */
7890
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
7891
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7892
+ hclge_rm_uc_addr_common);
7893
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
7894
+ hclge_add_uc_addr_common);
7895
+ } else {
7896
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7897
+ hclge_rm_mc_addr_common);
7898
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
7899
+ hclge_add_mc_addr_common);
7900
+ }
7901
+
7902
+ /* if some mac addresses were added/deleted fail, move back to the
7903
+ * mac_list, and retry at next time.
7904
+ */
7905
+ spin_lock_bh(&vport->mac_list_lock);
7906
+
7907
+ hclge_sync_from_del_list(&tmp_del_list, list);
7908
+ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7909
+
7910
+ spin_unlock_bh(&vport->mac_list_lock);
7911
+
7912
+ hclge_update_overflow_flags(vport, mac_type, all_added);
7913
+}
7914
+
7915
+static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7916
+{
7917
+ struct hclge_dev *hdev = vport->back;
7918
+
7919
+ if (test_bit(vport->vport_id, hdev->vport_config_block))
7920
+ return false;
7921
+
7922
+ if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7923
+ return true;
7924
+
7925
+ return false;
7926
+}
7927
+
7928
+static void hclge_sync_mac_table(struct hclge_dev *hdev)
7929
+{
7930
+ int i;
7931
+
7932
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
7933
+ struct hclge_vport *vport = &hdev->vport[i];
7934
+
7935
+ if (!hclge_need_sync_mac_table(vport))
7936
+ continue;
7937
+
7938
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7939
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7940
+ }
7941
+}
7942
+
7943
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7944
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
7945
+{
7946
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7947
+ struct hclge_mac_node *mac_cfg, *tmp;
7948
+ struct hclge_dev *hdev = vport->back;
7949
+ struct list_head tmp_del_list, *list;
7950
+ int ret;
7951
+
7952
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
7953
+ list = &vport->uc_mac_list;
7954
+ unsync = hclge_rm_uc_addr_common;
7955
+ } else {
7956
+ list = &vport->mc_mac_list;
7957
+ unsync = hclge_rm_mc_addr_common;
7958
+ }
7959
+
7960
+ INIT_LIST_HEAD(&tmp_del_list);
7961
+
7962
+ if (!is_del_list)
7963
+ set_bit(vport->vport_id, hdev->vport_config_block);
7964
+
7965
+ spin_lock_bh(&vport->mac_list_lock);
7966
+
7967
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7968
+ switch (mac_cfg->state) {
7969
+ case HCLGE_MAC_TO_DEL:
7970
+ case HCLGE_MAC_ACTIVE:
7971
+ list_del(&mac_cfg->node);
7972
+ list_add_tail(&mac_cfg->node, &tmp_del_list);
7973
+ break;
7974
+ case HCLGE_MAC_TO_ADD:
7975
+ if (is_del_list) {
7976
+ list_del(&mac_cfg->node);
7977
+ kfree(mac_cfg);
7978
+ }
7979
+ break;
7980
+ }
7981
+ }
7982
+
7983
+ spin_unlock_bh(&vport->mac_list_lock);
7984
+
7985
+ list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7986
+ ret = unsync(vport, mac_cfg->mac_addr);
7987
+ if (!ret || ret == -ENOENT) {
7988
+ /* clear all mac addr from hardware, but remain these
7989
+ * mac addr in the mac list, and restore them after
7990
+ * vf reset finished.
7991
+ */
7992
+ if (!is_del_list &&
7993
+ mac_cfg->state == HCLGE_MAC_ACTIVE) {
7994
+ mac_cfg->state = HCLGE_MAC_TO_ADD;
7995
+ } else {
7996
+ list_del(&mac_cfg->node);
7997
+ kfree(mac_cfg);
7998
+ }
7999
+ } else if (is_del_list) {
8000
+ mac_cfg->state = HCLGE_MAC_TO_DEL;
8001
+ }
8002
+ }
8003
+
8004
+ spin_lock_bh(&vport->mac_list_lock);
8005
+
8006
+ hclge_sync_from_del_list(&tmp_del_list, list);
8007
+
8008
+ spin_unlock_bh(&vport->mac_list_lock);
8009
+}
8010
+
8011
+/* remove all mac address when uninitailize */
8012
+static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8013
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
8014
+{
8015
+ struct hclge_mac_node *mac_node, *tmp;
8016
+ struct hclge_dev *hdev = vport->back;
8017
+ struct list_head tmp_del_list, *list;
8018
+
8019
+ INIT_LIST_HEAD(&tmp_del_list);
8020
+
8021
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8022
+ &vport->uc_mac_list : &vport->mc_mac_list;
8023
+
8024
+ spin_lock_bh(&vport->mac_list_lock);
8025
+
8026
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
8027
+ switch (mac_node->state) {
8028
+ case HCLGE_MAC_TO_DEL:
8029
+ case HCLGE_MAC_ACTIVE:
8030
+ list_del(&mac_node->node);
8031
+ list_add_tail(&mac_node->node, &tmp_del_list);
8032
+ break;
8033
+ case HCLGE_MAC_TO_ADD:
8034
+ list_del(&mac_node->node);
8035
+ kfree(mac_node);
8036
+ break;
8037
+ }
8038
+ }
8039
+
8040
+ spin_unlock_bh(&vport->mac_list_lock);
8041
+
8042
+ if (mac_type == HCLGE_MAC_ADDR_UC)
8043
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8044
+ hclge_rm_uc_addr_common);
8045
+ else
8046
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8047
+ hclge_rm_mc_addr_common);
8048
+
8049
+ if (!list_empty(&tmp_del_list))
8050
+ dev_warn(&hdev->pdev->dev,
8051
+ "uninit %s mac list for vport %u not completely.\n",
8052
+ mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8053
+ vport->vport_id);
8054
+
8055
+ list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8056
+ list_del(&mac_node->node);
8057
+ kfree(mac_node);
8058
+ }
8059
+}
8060
+
8061
+static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8062
+{
8063
+ struct hclge_vport *vport;
8064
+ int i;
8065
+
8066
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
8067
+ vport = &hdev->vport[i];
8068
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8069
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8070
+ }
44788071 }
44798072
44808073 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
....@@ -4489,7 +8082,7 @@
44898082
44908083 if (cmdq_resp) {
44918084 dev_err(&hdev->pdev->dev,
4492
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
8085
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
44938086 cmdq_resp);
44948087 return -EIO;
44958088 }
....@@ -4511,12 +8104,79 @@
45118104 break;
45128105 default:
45138106 dev_err(&hdev->pdev->dev,
4514
- "add mac ethertype failed for undefined, code=%d.\n",
8107
+ "add mac ethertype failed for undefined, code=%u.\n",
45158108 resp_code);
45168109 return_status = -EIO;
45178110 }
45188111
45198112 return return_status;
8113
+}
8114
+
8115
+static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8116
+ u8 *mac_addr)
8117
+{
8118
+ struct hclge_mac_vlan_tbl_entry_cmd req;
8119
+ struct hclge_dev *hdev = vport->back;
8120
+ struct hclge_desc desc;
8121
+ u16 egress_port = 0;
8122
+ int i;
8123
+
8124
+ if (is_zero_ether_addr(mac_addr))
8125
+ return false;
8126
+
8127
+ memset(&req, 0, sizeof(req));
8128
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8129
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8130
+ req.egress_port = cpu_to_le16(egress_port);
8131
+ hclge_prepare_mac_addr(&req, mac_addr, false);
8132
+
8133
+ if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8134
+ return true;
8135
+
8136
+ vf_idx += HCLGE_VF_VPORT_START_NUM;
8137
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8138
+ if (i != vf_idx &&
8139
+ ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8140
+ return true;
8141
+
8142
+ return false;
8143
+}
8144
+
8145
+static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8146
+ u8 *mac_addr)
8147
+{
8148
+ struct hclge_vport *vport = hclge_get_vport(handle);
8149
+ struct hclge_dev *hdev = vport->back;
8150
+
8151
+ vport = hclge_get_vf_vport(hdev, vf);
8152
+ if (!vport)
8153
+ return -EINVAL;
8154
+
8155
+ if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8156
+ dev_info(&hdev->pdev->dev,
8157
+ "Specified MAC(=%pM) is same as before, no change committed!\n",
8158
+ mac_addr);
8159
+ return 0;
8160
+ }
8161
+
8162
+ if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8163
+ dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8164
+ mac_addr);
8165
+ return -EEXIST;
8166
+ }
8167
+
8168
+ ether_addr_copy(vport->vf_info.mac, mac_addr);
8169
+
8170
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8171
+ dev_info(&hdev->pdev->dev,
8172
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8173
+ vf, mac_addr);
8174
+ return hclge_inform_reset_assert_to_vf(vport);
8175
+ }
8176
+
8177
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8178
+ vf, mac_addr);
8179
+ return 0;
45208180 }
45218181
45228182 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
....@@ -4570,12 +8230,57 @@
45708230 ether_addr_copy(p, hdev->hw.mac.mac_addr);
45718231 }
45728232
8233
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8234
+ const u8 *old_addr, const u8 *new_addr)
8235
+{
8236
+ struct list_head *list = &vport->uc_mac_list;
8237
+ struct hclge_mac_node *old_node, *new_node;
8238
+
8239
+ new_node = hclge_find_mac_node(list, new_addr);
8240
+ if (!new_node) {
8241
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8242
+ if (!new_node)
8243
+ return -ENOMEM;
8244
+
8245
+ new_node->state = HCLGE_MAC_TO_ADD;
8246
+ ether_addr_copy(new_node->mac_addr, new_addr);
8247
+ list_add(&new_node->node, list);
8248
+ } else {
8249
+ if (new_node->state == HCLGE_MAC_TO_DEL)
8250
+ new_node->state = HCLGE_MAC_ACTIVE;
8251
+
8252
+ /* make sure the new addr is in the list head, avoid dev
8253
+ * addr may be not re-added into mac table for the umv space
8254
+ * limitation after global/imp reset which will clear mac
8255
+ * table by hardware.
8256
+ */
8257
+ list_move(&new_node->node, list);
8258
+ }
8259
+
8260
+ if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8261
+ old_node = hclge_find_mac_node(list, old_addr);
8262
+ if (old_node) {
8263
+ if (old_node->state == HCLGE_MAC_TO_ADD) {
8264
+ list_del(&old_node->node);
8265
+ kfree(old_node);
8266
+ } else {
8267
+ old_node->state = HCLGE_MAC_TO_DEL;
8268
+ }
8269
+ }
8270
+ }
8271
+
8272
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8273
+
8274
+ return 0;
8275
+}
8276
+
45738277 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
45748278 bool is_first)
45758279 {
45768280 const unsigned char *new_addr = (const unsigned char *)p;
45778281 struct hclge_vport *vport = hclge_get_vport(handle);
45788282 struct hclge_dev *hdev = vport->back;
8283
+ unsigned char *old_addr = NULL;
45798284 int ret;
45808285
45818286 /* mac addr check */
....@@ -4583,58 +8288,86 @@
45838288 is_broadcast_ether_addr(new_addr) ||
45848289 is_multicast_ether_addr(new_addr)) {
45858290 dev_err(&hdev->pdev->dev,
4586
- "Change uc mac err! invalid mac:%p.\n",
8291
+ "change uc mac err! invalid mac: %pM.\n",
45878292 new_addr);
45888293 return -EINVAL;
4589
- }
4590
-
4591
- if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
4592
- dev_warn(&hdev->pdev->dev,
4593
- "remove old uc mac address fail.\n");
4594
-
4595
- ret = hclge_add_uc_addr(handle, new_addr);
4596
- if (ret) {
4597
- dev_err(&hdev->pdev->dev,
4598
- "add uc mac address fail, ret =%d.\n",
4599
- ret);
4600
-
4601
- if (!is_first &&
4602
- hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
4603
- dev_err(&hdev->pdev->dev,
4604
- "restore uc mac address fail.\n");
4605
-
4606
- return -EIO;
46078294 }
46088295
46098296 ret = hclge_pause_addr_cfg(hdev, new_addr);
46108297 if (ret) {
46118298 dev_err(&hdev->pdev->dev,
4612
- "configure mac pause address fail, ret =%d.\n",
8299
+ "failed to configure mac pause address, ret = %d\n",
46138300 ret);
4614
- return -EIO;
8301
+ return ret;
46158302 }
46168303
8304
+ if (!is_first)
8305
+ old_addr = hdev->hw.mac.mac_addr;
8306
+
8307
+ spin_lock_bh(&vport->mac_list_lock);
8308
+ ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8309
+ if (ret) {
8310
+ dev_err(&hdev->pdev->dev,
8311
+ "failed to change the mac addr:%pM, ret = %d\n",
8312
+ new_addr, ret);
8313
+ spin_unlock_bh(&vport->mac_list_lock);
8314
+
8315
+ if (!is_first)
8316
+ hclge_pause_addr_cfg(hdev, old_addr);
8317
+
8318
+ return ret;
8319
+ }
8320
+ /* we must update dev addr with spin lock protect, preventing dev addr
8321
+ * being removed by set_rx_mode path.
8322
+ */
46178323 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8324
+ spin_unlock_bh(&vport->mac_list_lock);
8325
+
8326
+ hclge_task_schedule(hdev, 0);
46188327
46198328 return 0;
46208329 }
46218330
8331
+static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8332
+ int cmd)
8333
+{
8334
+ struct hclge_vport *vport = hclge_get_vport(handle);
8335
+ struct hclge_dev *hdev = vport->back;
8336
+
8337
+ if (!hdev->hw.mac.phydev)
8338
+ return -EOPNOTSUPP;
8339
+
8340
+ return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8341
+}
8342
+
46228343 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4623
- bool filter_en)
8344
+ u8 fe_type, bool filter_en, u8 vf_id)
46248345 {
46258346 struct hclge_vlan_filter_ctrl_cmd *req;
46268347 struct hclge_desc desc;
46278348 int ret;
46288349
4629
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4630
-
8350
+ /* read current vlan filter parameter */
8351
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
46318352 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
46328353 req->vlan_type = vlan_type;
4633
- req->vlan_fe = filter_en;
8354
+ req->vf_id = vf_id;
8355
+
8356
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8357
+ if (ret) {
8358
+ dev_err(&hdev->pdev->dev,
8359
+ "failed to get vlan filter config, ret = %d.\n", ret);
8360
+ return ret;
8361
+ }
8362
+
8363
+ /* modify and write new config parameter */
8364
+ hclge_cmd_reuse_desc(&desc, false);
8365
+ req->vlan_fe = filter_en ?
8366
+ (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
46348367
46358368 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
46368369 if (ret)
4637
- dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
8370
+ dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
46388371 ret);
46398372
46408373 return ret;
....@@ -4642,26 +8375,62 @@
46428375
46438376 #define HCLGE_FILTER_TYPE_VF 0
46448377 #define HCLGE_FILTER_TYPE_PORT 1
8378
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8379
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8380
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8381
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8382
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8383
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8384
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8385
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8386
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
46458387
46468388 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
46478389 {
46488390 struct hclge_vport *vport = hclge_get_vport(handle);
46498391 struct hclge_dev *hdev = vport->back;
46508392
4651
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
8393
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8394
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8395
+ HCLGE_FILTER_FE_EGRESS, enable, 0);
8396
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8397
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
8398
+ } else {
8399
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8400
+ HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8401
+ 0);
8402
+ }
8403
+ if (enable)
8404
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
8405
+ else
8406
+ handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
46528407 }
46538408
4654
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4655
- bool is_kill, u16 vlan, u8 qos,
8409
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8410
+ bool is_kill, u16 vlan,
46568411 __be16 proto)
46578412 {
4658
-#define HCLGE_MAX_VF_BYTES 16
8413
+ struct hclge_vport *vport = &hdev->vport[vfid];
46598414 struct hclge_vlan_filter_vf_cfg_cmd *req0;
46608415 struct hclge_vlan_filter_vf_cfg_cmd *req1;
46618416 struct hclge_desc desc[2];
46628417 u8 vf_byte_val;
46638418 u8 vf_byte_off;
46648419 int ret;
8420
+
8421
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
8422
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
8423
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
8424
+ * new vlan, because tx packets with these vlan id will be dropped.
8425
+ */
8426
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8427
+ if (vport->vf_info.spoofchk && vlan) {
8428
+ dev_err(&hdev->pdev->dev,
8429
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8430
+ return -EPERM;
8431
+ }
8432
+ return 0;
8433
+ }
46658434
46668435 hclge_cmd_setup_basic_desc(&desc[0],
46678436 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
....@@ -4698,28 +8467,30 @@
46988467 return 0;
46998468
47008469 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8470
+ set_bit(vfid, hdev->vf_vlan_full);
47018471 dev_warn(&hdev->pdev->dev,
47028472 "vf vlan table is full, vf vlan filter is disabled\n");
47038473 return 0;
47048474 }
47058475
47068476 dev_err(&hdev->pdev->dev,
4707
- "Add vf vlan filter fail, ret =%d.\n",
8477
+ "Add vf vlan filter fail, ret =%u.\n",
47088478 req0->resp_code);
47098479 } else {
47108480 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
47118481 if (!req0->resp_code)
47128482 return 0;
47138483
4714
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
4715
- dev_warn(&hdev->pdev->dev,
4716
- "vlan %d filter is not in vf vlan table\n",
4717
- vlan);
8484
+ /* vf vlan filter is disabled when vf vlan table is full,
8485
+ * then new vlan id will not be added into vf vlan table.
8486
+ * Just return 0 without warning, avoid massive verbose
8487
+ * print logs when unload.
8488
+ */
8489
+ if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
47188490 return 0;
4719
- }
47208491
47218492 dev_err(&hdev->pdev->dev,
4722
- "Kill vf vlan filter fail, ret =%d.\n",
8493
+ "Kill vf vlan filter fail, ret =%u.\n",
47238494 req0->resp_code);
47248495 }
47258496
....@@ -4738,9 +8509,10 @@
47388509
47398510 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
47408511
4741
- vlan_offset_160 = vlan_id / 160;
4742
- vlan_offset_byte = (vlan_id % 160) / 8;
4743
- vlan_offset_byte_val = 1 << (vlan_id % 8);
8512
+ vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8513
+ vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8514
+ HCLGE_VLAN_BYTE_SIZE;
8515
+ vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
47448516
47458517 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
47468518 req->vlan_offset = vlan_offset_160;
....@@ -4755,7 +8527,7 @@
47558527 }
47568528
47578529 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
4758
- u16 vport_id, u16 vlan_id, u8 qos,
8530
+ u16 vport_id, u16 vlan_id,
47598531 bool is_kill)
47608532 {
47618533 u16 vport_idx, vport_num = 0;
....@@ -4765,10 +8537,10 @@
47658537 return 0;
47668538
47678539 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
4768
- 0, proto);
8540
+ proto);
47698541 if (ret) {
47708542 dev_err(&hdev->pdev->dev,
4771
- "Set %d vport vlan filter config fail, ret =%d.\n",
8543
+ "Set %u vport vlan filter config fail, ret =%d.\n",
47728544 vport_id, ret);
47738545 return ret;
47748546 }
....@@ -4780,7 +8552,7 @@
47808552
47818553 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
47828554 dev_err(&hdev->pdev->dev,
4783
- "Add port vlan failed, vport %d is already in vlan %d\n",
8555
+ "Add port vlan failed, vport %u is already in vlan %u\n",
47848556 vport_id, vlan_id);
47858557 return -EINVAL;
47868558 }
....@@ -4788,7 +8560,7 @@
47888560 if (is_kill &&
47898561 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
47908562 dev_err(&hdev->pdev->dev,
4791
- "Delete port vlan failed, vport %d is not in vlan %d\n",
8563
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
47928564 vport_id, vlan_id);
47938565 return -EINVAL;
47948566 }
....@@ -4801,30 +8573,6 @@
48018573 is_kill);
48028574
48038575 return ret;
4804
-}
4805
-
4806
-int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
4807
- u16 vlan_id, bool is_kill)
4808
-{
4809
- struct hclge_vport *vport = hclge_get_vport(handle);
4810
- struct hclge_dev *hdev = vport->back;
4811
-
4812
- return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
4813
- 0, is_kill);
4814
-}
4815
-
4816
-static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4817
- u16 vlan, u8 qos, __be16 proto)
4818
-{
4819
- struct hclge_vport *vport = hclge_get_vport(handle);
4820
- struct hclge_dev *hdev = vport->back;
4821
-
4822
- if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4823
- return -EINVAL;
4824
- if (proto != htons(ETH_P_8021Q))
4825
- return -EPROTONOSUPPORT;
4826
-
4827
- return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
48288576 }
48298577
48308578 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
....@@ -4906,6 +8654,52 @@
49068654 return status;
49078655 }
49088656
8657
+static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8658
+ u16 port_base_vlan_state,
8659
+ u16 vlan_tag)
8660
+{
8661
+ int ret;
8662
+
8663
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8664
+ vport->txvlan_cfg.accept_tag1 = true;
8665
+ vport->txvlan_cfg.insert_tag1_en = false;
8666
+ vport->txvlan_cfg.default_tag1 = 0;
8667
+ } else {
8668
+ vport->txvlan_cfg.accept_tag1 = false;
8669
+ vport->txvlan_cfg.insert_tag1_en = true;
8670
+ vport->txvlan_cfg.default_tag1 = vlan_tag;
8671
+ }
8672
+
8673
+ vport->txvlan_cfg.accept_untag1 = true;
8674
+
8675
+ /* accept_tag2 and accept_untag2 are not supported on
8676
+ * pdev revision(0x20), new revision support them,
8677
+ * this two fields can not be configured by user.
8678
+ */
8679
+ vport->txvlan_cfg.accept_tag2 = true;
8680
+ vport->txvlan_cfg.accept_untag2 = true;
8681
+ vport->txvlan_cfg.insert_tag2_en = false;
8682
+ vport->txvlan_cfg.default_tag2 = 0;
8683
+
8684
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8685
+ vport->rxvlan_cfg.strip_tag1_en = false;
8686
+ vport->rxvlan_cfg.strip_tag2_en =
8687
+ vport->rxvlan_cfg.rx_vlan_offload_en;
8688
+ } else {
8689
+ vport->rxvlan_cfg.strip_tag1_en =
8690
+ vport->rxvlan_cfg.rx_vlan_offload_en;
8691
+ vport->rxvlan_cfg.strip_tag2_en = true;
8692
+ }
8693
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8694
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8695
+
8696
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
8697
+ if (ret)
8698
+ return ret;
8699
+
8700
+ return hclge_set_vlan_rx_offload_cfg(vport);
8701
+}
8702
+
49098703 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
49108704 {
49118705 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
....@@ -4934,7 +8728,7 @@
49348728
49358729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
49368730
4937
- tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
8731
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
49388732 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
49398733 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
49408734
....@@ -4951,18 +8745,38 @@
49518745 {
49528746 #define HCLGE_DEF_VLAN_TYPE 0x8100
49538747
4954
- struct hnae3_handle *handle;
8748
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
49558749 struct hclge_vport *vport;
49568750 int ret;
49578751 int i;
49588752
4959
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
4960
- if (ret)
4961
- return ret;
8753
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8754
+ /* for revision 0x21, vf vlan filter is per function */
8755
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
8756
+ vport = &hdev->vport[i];
8757
+ ret = hclge_set_vlan_filter_ctrl(hdev,
8758
+ HCLGE_FILTER_TYPE_VF,
8759
+ HCLGE_FILTER_FE_EGRESS,
8760
+ true,
8761
+ vport->vport_id);
8762
+ if (ret)
8763
+ return ret;
8764
+ }
49628765
4963
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
4964
- if (ret)
4965
- return ret;
8766
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8767
+ HCLGE_FILTER_FE_INGRESS, true,
8768
+ 0);
8769
+ if (ret)
8770
+ return ret;
8771
+ } else {
8772
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8773
+ HCLGE_FILTER_FE_EGRESS_V1_B,
8774
+ true, 0);
8775
+ if (ret)
8776
+ return ret;
8777
+ }
8778
+
8779
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
49668780
49678781 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
49688782 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
....@@ -4976,102 +8790,532 @@
49768790 return ret;
49778791
49788792 for (i = 0; i < hdev->num_alloc_vport; i++) {
8793
+ u16 vlan_tag;
8794
+
49798795 vport = &hdev->vport[i];
4980
- vport->txvlan_cfg.accept_tag1 = true;
4981
- vport->txvlan_cfg.accept_untag1 = true;
8796
+ vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
49828797
4983
- /* accept_tag2 and accept_untag2 are not supported on
4984
- * pdev revision(0x20), new revision support them. The
4985
- * value of this two fields will not return error when driver
4986
- * send command to fireware in revision(0x20).
4987
- * This two fields can not configured by user.
4988
- */
4989
- vport->txvlan_cfg.accept_tag2 = true;
4990
- vport->txvlan_cfg.accept_untag2 = true;
4991
-
4992
- vport->txvlan_cfg.insert_tag1_en = false;
4993
- vport->txvlan_cfg.insert_tag2_en = false;
4994
- vport->txvlan_cfg.default_tag1 = 0;
4995
- vport->txvlan_cfg.default_tag2 = 0;
4996
-
4997
- ret = hclge_set_vlan_tx_offload_cfg(vport);
4998
- if (ret)
4999
- return ret;
5000
-
5001
- vport->rxvlan_cfg.strip_tag1_en = false;
5002
- vport->rxvlan_cfg.strip_tag2_en = true;
5003
- vport->rxvlan_cfg.vlan1_vlan_prionly = false;
5004
- vport->rxvlan_cfg.vlan2_vlan_prionly = false;
5005
-
5006
- ret = hclge_set_vlan_rx_offload_cfg(vport);
8798
+ ret = hclge_vlan_offload_cfg(vport,
8799
+ vport->port_base_vlan_cfg.state,
8800
+ vlan_tag);
50078801 if (ret)
50088802 return ret;
50098803 }
50108804
5011
- handle = &hdev->vport[0].nic;
50128805 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8806
+}
8807
+
8808
+static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8809
+ bool writen_to_tbl)
8810
+{
8811
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8812
+
8813
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
8814
+ if (vlan->vlan_id == vlan_id)
8815
+ return;
8816
+
8817
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8818
+ if (!vlan)
8819
+ return;
8820
+
8821
+ vlan->hd_tbl_status = writen_to_tbl;
8822
+ vlan->vlan_id = vlan_id;
8823
+
8824
+ list_add_tail(&vlan->node, &vport->vlan_list);
8825
+}
8826
+
8827
+static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8828
+{
8829
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8830
+ struct hclge_dev *hdev = vport->back;
8831
+ int ret;
8832
+
8833
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8834
+ if (!vlan->hd_tbl_status) {
8835
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8836
+ vport->vport_id,
8837
+ vlan->vlan_id, false);
8838
+ if (ret) {
8839
+ dev_err(&hdev->pdev->dev,
8840
+ "restore vport vlan list failed, ret=%d\n",
8841
+ ret);
8842
+ return ret;
8843
+ }
8844
+ }
8845
+ vlan->hd_tbl_status = true;
8846
+ }
8847
+
8848
+ return 0;
8849
+}
8850
+
8851
+static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8852
+ bool is_write_tbl)
8853
+{
8854
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8855
+ struct hclge_dev *hdev = vport->back;
8856
+
8857
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8858
+ if (vlan->vlan_id == vlan_id) {
8859
+ if (is_write_tbl && vlan->hd_tbl_status)
8860
+ hclge_set_vlan_filter_hw(hdev,
8861
+ htons(ETH_P_8021Q),
8862
+ vport->vport_id,
8863
+ vlan_id,
8864
+ true);
8865
+
8866
+ list_del(&vlan->node);
8867
+ kfree(vlan);
8868
+ break;
8869
+ }
8870
+ }
8871
+}
8872
+
8873
+void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8874
+{
8875
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8876
+ struct hclge_dev *hdev = vport->back;
8877
+
8878
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8879
+ if (vlan->hd_tbl_status)
8880
+ hclge_set_vlan_filter_hw(hdev,
8881
+ htons(ETH_P_8021Q),
8882
+ vport->vport_id,
8883
+ vlan->vlan_id,
8884
+ true);
8885
+
8886
+ vlan->hd_tbl_status = false;
8887
+ if (is_del_list) {
8888
+ list_del(&vlan->node);
8889
+ kfree(vlan);
8890
+ }
8891
+ }
8892
+ clear_bit(vport->vport_id, hdev->vf_vlan_full);
8893
+}
8894
+
8895
+void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8896
+{
8897
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8898
+ struct hclge_vport *vport;
8899
+ int i;
8900
+
8901
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
8902
+ vport = &hdev->vport[i];
8903
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8904
+ list_del(&vlan->node);
8905
+ kfree(vlan);
8906
+ }
8907
+ }
8908
+}
8909
+
8910
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8911
+{
8912
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
8913
+ struct hclge_dev *hdev = vport->back;
8914
+ u16 vlan_proto;
8915
+ u16 vlan_id;
8916
+ u16 state;
8917
+ int ret;
8918
+
8919
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8920
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8921
+ state = vport->port_base_vlan_cfg.state;
8922
+
8923
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8924
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8925
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8926
+ vport->vport_id, vlan_id,
8927
+ false);
8928
+ return;
8929
+ }
8930
+
8931
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8932
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8933
+ vport->vport_id,
8934
+ vlan->vlan_id, false);
8935
+ if (ret)
8936
+ break;
8937
+ vlan->hd_tbl_status = true;
8938
+ }
8939
+}
8940
+
8941
+/* For global reset and imp reset, hardware will clear the mac table,
8942
+ * so we change the mac address state from ACTIVE to TO_ADD, then they
8943
+ * can be restored in the service task after reset complete. Furtherly,
8944
+ * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8945
+ * be restored after reset, so just remove these mac nodes from mac_list.
8946
+ */
8947
+static void hclge_mac_node_convert_for_reset(struct list_head *list)
8948
+{
8949
+ struct hclge_mac_node *mac_node, *tmp;
8950
+
8951
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
8952
+ if (mac_node->state == HCLGE_MAC_ACTIVE) {
8953
+ mac_node->state = HCLGE_MAC_TO_ADD;
8954
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8955
+ list_del(&mac_node->node);
8956
+ kfree(mac_node);
8957
+ }
8958
+ }
8959
+}
8960
+
8961
+void hclge_restore_mac_table_common(struct hclge_vport *vport)
8962
+{
8963
+ spin_lock_bh(&vport->mac_list_lock);
8964
+
8965
+ hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8966
+ hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8967
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8968
+
8969
+ spin_unlock_bh(&vport->mac_list_lock);
8970
+}
8971
+
8972
+static void hclge_restore_hw_table(struct hclge_dev *hdev)
8973
+{
8974
+ struct hclge_vport *vport = &hdev->vport[0];
8975
+ struct hnae3_handle *handle = &vport->nic;
8976
+
8977
+ hclge_restore_mac_table_common(vport);
8978
+ hclge_restore_vport_vlan_table(vport);
8979
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8980
+
8981
+ hclge_restore_fd_entries(handle);
50138982 }
50148983
50158984 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
50168985 {
50178986 struct hclge_vport *vport = hclge_get_vport(handle);
50188987
5019
- vport->rxvlan_cfg.strip_tag1_en = false;
5020
- vport->rxvlan_cfg.strip_tag2_en = enable;
8988
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8989
+ vport->rxvlan_cfg.strip_tag1_en = false;
8990
+ vport->rxvlan_cfg.strip_tag2_en = enable;
8991
+ } else {
8992
+ vport->rxvlan_cfg.strip_tag1_en = enable;
8993
+ vport->rxvlan_cfg.strip_tag2_en = true;
8994
+ }
50218995 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
50228996 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8997
+ vport->rxvlan_cfg.rx_vlan_offload_en = enable;
50238998
50248999 return hclge_set_vlan_rx_offload_cfg(vport);
50259000 }
50269001
5027
-static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
9002
+static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9003
+ u16 port_base_vlan_state,
9004
+ struct hclge_vlan_info *new_info,
9005
+ struct hclge_vlan_info *old_info)
9006
+{
9007
+ struct hclge_dev *hdev = vport->back;
9008
+ int ret;
9009
+
9010
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9011
+ hclge_rm_vport_all_vlan_table(vport, false);
9012
+ return hclge_set_vlan_filter_hw(hdev,
9013
+ htons(new_info->vlan_proto),
9014
+ vport->vport_id,
9015
+ new_info->vlan_tag,
9016
+ false);
9017
+ }
9018
+
9019
+ ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9020
+ vport->vport_id, old_info->vlan_tag,
9021
+ true);
9022
+ if (ret)
9023
+ return ret;
9024
+
9025
+ return hclge_add_vport_all_vlan_table(vport);
9026
+}
9027
+
9028
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9029
+ struct hclge_vlan_info *vlan_info)
9030
+{
9031
+ struct hnae3_handle *nic = &vport->nic;
9032
+ struct hclge_vlan_info *old_vlan_info;
9033
+ struct hclge_dev *hdev = vport->back;
9034
+ int ret;
9035
+
9036
+ old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9037
+
9038
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9039
+ if (ret)
9040
+ return ret;
9041
+
9042
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9043
+ /* add new VLAN tag */
9044
+ ret = hclge_set_vlan_filter_hw(hdev,
9045
+ htons(vlan_info->vlan_proto),
9046
+ vport->vport_id,
9047
+ vlan_info->vlan_tag,
9048
+ false);
9049
+ if (ret)
9050
+ return ret;
9051
+
9052
+ /* remove old VLAN tag */
9053
+ ret = hclge_set_vlan_filter_hw(hdev,
9054
+ htons(old_vlan_info->vlan_proto),
9055
+ vport->vport_id,
9056
+ old_vlan_info->vlan_tag,
9057
+ true);
9058
+ if (ret)
9059
+ return ret;
9060
+
9061
+ goto update;
9062
+ }
9063
+
9064
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9065
+ old_vlan_info);
9066
+ if (ret)
9067
+ return ret;
9068
+
9069
+ /* update state only when disable/enable port based VLAN */
9070
+ vport->port_base_vlan_cfg.state = state;
9071
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9072
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9073
+ else
9074
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9075
+
9076
+update:
9077
+ vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9078
+ vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9079
+ vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9080
+
9081
+ return 0;
9082
+}
9083
+
9084
+static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9085
+ enum hnae3_port_base_vlan_state state,
9086
+ u16 vlan)
9087
+{
9088
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9089
+ if (!vlan)
9090
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9091
+ else
9092
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
9093
+ } else {
9094
+ if (!vlan)
9095
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
9096
+ else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9097
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9098
+ else
9099
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
9100
+ }
9101
+}
9102
+
9103
+static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9104
+ u16 vlan, u8 qos, __be16 proto)
9105
+{
9106
+ struct hclge_vport *vport = hclge_get_vport(handle);
9107
+ struct hclge_dev *hdev = vport->back;
9108
+ struct hclge_vlan_info vlan_info;
9109
+ u16 state;
9110
+ int ret;
9111
+
9112
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9113
+ return -EOPNOTSUPP;
9114
+
9115
+ vport = hclge_get_vf_vport(hdev, vfid);
9116
+ if (!vport)
9117
+ return -EINVAL;
9118
+
9119
+ /* qos is a 3 bits value, so can not be bigger than 7 */
9120
+ if (vlan > VLAN_N_VID - 1 || qos > 7)
9121
+ return -EINVAL;
9122
+ if (proto != htons(ETH_P_8021Q))
9123
+ return -EPROTONOSUPPORT;
9124
+
9125
+ state = hclge_get_port_base_vlan_state(vport,
9126
+ vport->port_base_vlan_cfg.state,
9127
+ vlan);
9128
+ if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9129
+ return 0;
9130
+
9131
+ vlan_info.vlan_tag = vlan;
9132
+ vlan_info.qos = qos;
9133
+ vlan_info.vlan_proto = ntohs(proto);
9134
+
9135
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9136
+ return hclge_update_port_base_vlan_cfg(vport, state,
9137
+ &vlan_info);
9138
+ } else {
9139
+ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9140
+ vport->vport_id, state,
9141
+ vlan, qos,
9142
+ ntohs(proto));
9143
+ return ret;
9144
+ }
9145
+}
9146
+
9147
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9148
+{
9149
+ struct hclge_vlan_info *vlan_info;
9150
+ struct hclge_vport *vport;
9151
+ int ret;
9152
+ int vf;
9153
+
9154
+ /* clear port base vlan for all vf */
9155
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9156
+ vport = &hdev->vport[vf];
9157
+ vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9158
+
9159
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9160
+ vport->vport_id,
9161
+ vlan_info->vlan_tag, true);
9162
+ if (ret)
9163
+ dev_err(&hdev->pdev->dev,
9164
+ "failed to clear vf vlan for vf%d, ret = %d\n",
9165
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
9166
+ }
9167
+}
9168
+
9169
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9170
+ u16 vlan_id, bool is_kill)
9171
+{
9172
+ struct hclge_vport *vport = hclge_get_vport(handle);
9173
+ struct hclge_dev *hdev = vport->back;
9174
+ bool writen_to_tbl = false;
9175
+ int ret = 0;
9176
+
9177
+ /* When device is resetting or reset failed, firmware is unable to
9178
+ * handle mailbox. Just record the vlan id, and remove it after
9179
+ * reset finished.
9180
+ */
9181
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9182
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9183
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
9184
+ return -EBUSY;
9185
+ }
9186
+
9187
+ /* when port base vlan enabled, we use port base vlan as the vlan
9188
+ * filter entry. In this case, we don't update vlan filter table
9189
+ * when user add new vlan or remove exist vlan, just update the vport
9190
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
9191
+ * table until port base vlan disabled
9192
+ */
9193
+ if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9194
+ ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9195
+ vlan_id, is_kill);
9196
+ writen_to_tbl = true;
9197
+ }
9198
+
9199
+ if (!ret) {
9200
+ if (!is_kill)
9201
+ hclge_add_vport_vlan_table(vport, vlan_id,
9202
+ writen_to_tbl);
9203
+ else if (is_kill && vlan_id != 0)
9204
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
9205
+ } else if (is_kill) {
9206
+ /* when remove hw vlan filter failed, record the vlan id,
9207
+ * and try to remove it from hw later, to be consistence
9208
+ * with stack
9209
+ */
9210
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
9211
+ }
9212
+ return ret;
9213
+}
9214
+
9215
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9216
+{
9217
+#define HCLGE_MAX_SYNC_COUNT 60
9218
+
9219
+ int i, ret, sync_cnt = 0;
9220
+ u16 vlan_id;
9221
+
9222
+ /* start from vport 1 for PF is always alive */
9223
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
9224
+ struct hclge_vport *vport = &hdev->vport[i];
9225
+
9226
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9227
+ VLAN_N_VID);
9228
+ while (vlan_id != VLAN_N_VID) {
9229
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9230
+ vport->vport_id, vlan_id,
9231
+ true);
9232
+ if (ret && ret != -EINVAL)
9233
+ return;
9234
+
9235
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9236
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
9237
+
9238
+ sync_cnt++;
9239
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9240
+ return;
9241
+
9242
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9243
+ VLAN_N_VID);
9244
+ }
9245
+ }
9246
+}
9247
+
9248
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
50289249 {
50299250 struct hclge_config_max_frm_size_cmd *req;
50309251 struct hclge_desc desc;
5031
- int max_frm_size;
5032
- int ret;
5033
-
5034
- max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5035
-
5036
- if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
5037
- max_frm_size > HCLGE_MAC_MAX_FRAME)
5038
- return -EINVAL;
5039
-
5040
- max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
50419252
50429253 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
50439254
50449255 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
5045
- req->max_frm_size = cpu_to_le16(max_frm_size);
9256
+ req->max_frm_size = cpu_to_le16(new_mps);
50469257 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
50479258
5048
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5049
- if (ret)
5050
- dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
5051
- else
5052
- hdev->mps = max_frm_size;
5053
-
5054
- return ret;
9259
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
50559260 }
50569261
50579262 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
50589263 {
50599264 struct hclge_vport *vport = hclge_get_vport(handle);
5060
- struct hclge_dev *hdev = vport->back;
5061
- int ret;
50629265
5063
- ret = hclge_set_mac_mtu(hdev, new_mtu);
9266
+ return hclge_set_vport_mtu(vport, new_mtu);
9267
+}
9268
+
9269
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9270
+{
9271
+ struct hclge_dev *hdev = vport->back;
9272
+ int i, max_frm_size, ret;
9273
+
9274
+ /* HW supprt 2 layer vlan */
9275
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9276
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9277
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
9278
+ return -EINVAL;
9279
+
9280
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9281
+ mutex_lock(&hdev->vport_lock);
9282
+ /* VF's mps must fit within hdev->mps */
9283
+ if (vport->vport_id && max_frm_size > hdev->mps) {
9284
+ mutex_unlock(&hdev->vport_lock);
9285
+ return -EINVAL;
9286
+ } else if (vport->vport_id) {
9287
+ vport->mps = max_frm_size;
9288
+ mutex_unlock(&hdev->vport_lock);
9289
+ return 0;
9290
+ }
9291
+
9292
+ /* PF's mps must be greater then VF's mps */
9293
+ for (i = 1; i < hdev->num_alloc_vport; i++)
9294
+ if (max_frm_size < hdev->vport[i].mps) {
9295
+ mutex_unlock(&hdev->vport_lock);
9296
+ return -EINVAL;
9297
+ }
9298
+
9299
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9300
+
9301
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
50649302 if (ret) {
50659303 dev_err(&hdev->pdev->dev,
50669304 "Change mtu fail, ret =%d\n", ret);
5067
- return ret;
9305
+ goto out;
50689306 }
9307
+
9308
+ hdev->mps = max_frm_size;
9309
+ vport->mps = max_frm_size;
50699310
50709311 ret = hclge_buffer_alloc(hdev);
50719312 if (ret)
50729313 dev_err(&hdev->pdev->dev,
50739314 "Allocate buffer fail, ret =%d\n", ret);
50749315
9316
+out:
9317
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9318
+ mutex_unlock(&hdev->vport_lock);
50759319 return ret;
50769320 }
50779321
....@@ -5086,7 +9330,8 @@
50869330
50879331 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
50889332 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5089
- hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
9333
+ if (enable)
9334
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
50909335
50919336 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
50929337 if (ret) {
....@@ -5119,8 +9364,7 @@
51199364 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
51209365 }
51219366
5122
-static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
5123
- u16 queue_id)
9367
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
51249368 {
51259369 struct hnae3_queue *queue;
51269370 struct hclge_tqp *tqp;
....@@ -5131,7 +9375,7 @@
51319375 return tqp->index;
51329376 }
51339377
5134
-void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9378
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
51359379 {
51369380 struct hclge_vport *vport = hclge_get_vport(handle);
51379381 struct hclge_dev *hdev = vport->back;
....@@ -5140,44 +9384,41 @@
51409384 u16 queue_gid;
51419385 int ret;
51429386
5143
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5144
- return;
5145
-
51469387 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
51479388
51489389 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
51499390 if (ret) {
5150
- dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
5151
- return;
9391
+ dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9392
+ return ret;
51529393 }
51539394
51549395 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
51559396 if (ret) {
5156
- dev_warn(&hdev->pdev->dev,
5157
- "Send reset tqp cmd fail, ret = %d\n", ret);
5158
- return;
9397
+ dev_err(&hdev->pdev->dev,
9398
+ "Send reset tqp cmd fail, ret = %d\n", ret);
9399
+ return ret;
51599400 }
51609401
5161
- reset_try_times = 0;
51629402 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5163
- /* Wait for tqp hw reset */
5164
- msleep(20);
51659403 reset_status = hclge_get_reset_status(hdev, queue_gid);
51669404 if (reset_status)
51679405 break;
9406
+
9407
+ /* Wait for tqp hw reset */
9408
+ usleep_range(1000, 1200);
51689409 }
51699410
51709411 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5171
- dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5172
- return;
9412
+ dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9413
+ return ret;
51739414 }
51749415
51759416 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5176
- if (ret) {
5177
- dev_warn(&hdev->pdev->dev,
5178
- "Deassert the soft reset fail, ret = %d\n", ret);
5179
- return;
5180
- }
9417
+ if (ret)
9418
+ dev_err(&hdev->pdev->dev,
9419
+ "Deassert the soft reset fail, ret = %d\n", ret);
9420
+
9421
+ return ret;
51819422 }
51829423
51839424 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
....@@ -5204,13 +9445,13 @@
52049445 return;
52059446 }
52069447
5207
- reset_try_times = 0;
52089448 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5209
- /* Wait for tqp hw reset */
5210
- msleep(20);
52119449 reset_status = hclge_get_reset_status(hdev, queue_gid);
52129450 if (reset_status)
52139451 break;
9452
+
9453
+ /* Wait for tqp hw reset */
9454
+ usleep_range(1000, 1200);
52149455 }
52159456
52169457 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
....@@ -5232,20 +9473,6 @@
52329473 return hdev->fw_version;
52339474 }
52349475
5235
-static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
5236
- u32 *flowctrl_adv)
5237
-{
5238
- struct hclge_vport *vport = hclge_get_vport(handle);
5239
- struct hclge_dev *hdev = vport->back;
5240
- struct phy_device *phydev = hdev->hw.mac.phydev;
5241
-
5242
- if (!phydev)
5243
- return;
5244
-
5245
- *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
5246
- (phydev->advertising & ADVERTISED_Asym_Pause);
5247
-}
5248
-
52499476 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
52509477 {
52519478 struct phy_device *phydev = hdev->hw.mac.phydev;
....@@ -5253,59 +9480,36 @@
52539480 if (!phydev)
52549481 return;
52559482
5256
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
5257
-
5258
- if (rx_en)
5259
- phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
5260
-
5261
- if (tx_en)
5262
- phydev->advertising ^= ADVERTISED_Asym_Pause;
9483
+ phy_set_asym_pause(phydev, rx_en, tx_en);
52639484 }
52649485
52659486 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
52669487 {
52679488 int ret;
52689489
5269
- if (rx_en && tx_en)
5270
- hdev->fc_mode_last_time = HCLGE_FC_FULL;
5271
- else if (rx_en && !tx_en)
5272
- hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
5273
- else if (!rx_en && tx_en)
5274
- hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
5275
- else
5276
- hdev->fc_mode_last_time = HCLGE_FC_NONE;
5277
-
52789490 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
52799491 return 0;
52809492
52819493 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
5282
- if (ret) {
5283
- dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
5284
- ret);
5285
- return ret;
5286
- }
9494
+ if (ret)
9495
+ dev_err(&hdev->pdev->dev,
9496
+ "configure pauseparam error, ret = %d.\n", ret);
52879497
5288
- hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
5289
-
5290
- return 0;
9498
+ return ret;
52919499 }
52929500
52939501 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
52949502 {
52959503 struct phy_device *phydev = hdev->hw.mac.phydev;
52969504 u16 remote_advertising = 0;
5297
- u16 local_advertising = 0;
9505
+ u16 local_advertising;
52989506 u32 rx_pause, tx_pause;
52999507 u8 flowctl;
53009508
53019509 if (!phydev->link || !phydev->autoneg)
53029510 return 0;
53039511
5304
- if (phydev->advertising & ADVERTISED_Pause)
5305
- local_advertising = ADVERTISE_PAUSE_CAP;
5306
-
5307
- if (phydev->advertising & ADVERTISED_Asym_Pause)
5308
- local_advertising |= ADVERTISE_PAUSE_ASYM;
9512
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
53099513
53109514 if (phydev->pause)
53119515 remote_advertising = LPA_PAUSE_CAP;
....@@ -5331,8 +9535,9 @@
53319535 {
53329536 struct hclge_vport *vport = hclge_get_vport(handle);
53339537 struct hclge_dev *hdev = vport->back;
9538
+ struct phy_device *phydev = hdev->hw.mac.phydev;
53349539
5335
- *auto_neg = hclge_get_autoneg(handle);
9540
+ *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
53369541
53379542 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
53389543 *rx_en = 0;
....@@ -5355,6 +9560,21 @@
53559560 }
53569561 }
53579562
9563
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9564
+ u32 rx_en, u32 tx_en)
9565
+{
9566
+ if (rx_en && tx_en)
9567
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
9568
+ else if (rx_en && !tx_en)
9569
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9570
+ else if (!rx_en && tx_en)
9571
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9572
+ else
9573
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
9574
+
9575
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9576
+}
9577
+
53589578 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
53599579 u32 rx_en, u32 tx_en)
53609580 {
....@@ -5363,11 +9583,13 @@
53639583 struct phy_device *phydev = hdev->hw.mac.phydev;
53649584 u32 fc_autoneg;
53659585
5366
- fc_autoneg = hclge_get_autoneg(handle);
5367
- if (auto_neg != fc_autoneg) {
5368
- dev_info(&hdev->pdev->dev,
5369
- "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
5370
- return -EOPNOTSUPP;
9586
+ if (phydev) {
9587
+ fc_autoneg = hclge_get_autoneg(handle);
9588
+ if (auto_neg != fc_autoneg) {
9589
+ dev_info(&hdev->pdev->dev,
9590
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9591
+ return -EOPNOTSUPP;
9592
+ }
53719593 }
53729594
53739595 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
....@@ -5378,16 +9600,15 @@
53789600
53799601 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
53809602
5381
- if (!fc_autoneg)
9603
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9604
+
9605
+ if (!auto_neg)
53829606 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
53839607
5384
- /* Only support flow control negotiation for netdev with
5385
- * phy attached for now.
5386
- */
5387
- if (!phydev)
5388
- return -EOPNOTSUPP;
9608
+ if (phydev)
9609
+ return phy_start_aneg(phydev);
53899610
5390
- return phy_start_aneg(phydev);
9611
+ return -EOPNOTSUPP;
53919612 }
53929613
53939614 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
....@@ -5404,13 +9625,23 @@
54049625 *auto_neg = hdev->hw.mac.autoneg;
54059626 }
54069627
5407
-static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
9628
+static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9629
+ u8 *module_type)
54089630 {
54099631 struct hclge_vport *vport = hclge_get_vport(handle);
54109632 struct hclge_dev *hdev = vport->back;
54119633
9634
+ /* When nic is down, the service task is not running, doesn't update
9635
+ * the port information per second. Query the port information before
9636
+ * return the media type, ensure getting the correct media information.
9637
+ */
9638
+ hclge_update_port_info(hdev);
9639
+
54129640 if (media_type)
54139641 *media_type = hdev->hw.mac.media_type;
9642
+
9643
+ if (module_type)
9644
+ *module_type = hdev->hw.mac.module_type;
54149645 }
54159646
54169647 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
....@@ -5419,7 +9650,8 @@
54199650 struct hclge_vport *vport = hclge_get_vport(handle);
54209651 struct hclge_dev *hdev = vport->back;
54219652 struct phy_device *phydev = hdev->hw.mac.phydev;
5422
- int mdix_ctrl, mdix, retval, is_resolved;
9653
+ int mdix_ctrl, mdix, is_resolved;
9654
+ unsigned int retval;
54239655
54249656 if (!phydev) {
54259657 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
....@@ -5462,14 +9694,125 @@
54629694 *tp_mdix = ETH_TP_MDI;
54639695 }
54649696
5465
-static int hclge_init_instance_hw(struct hclge_dev *hdev)
9697
+static void hclge_info_show(struct hclge_dev *hdev)
54669698 {
5467
- return hclge_mac_connect_phy(hdev);
9699
+ struct device *dev = &hdev->pdev->dev;
9700
+
9701
+ dev_info(dev, "PF info begin:\n");
9702
+
9703
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9704
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9705
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9706
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9707
+ dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9708
+ dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9709
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9710
+ dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9711
+ dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9712
+ dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9713
+ dev_info(dev, "This is %s PF\n",
9714
+ hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9715
+ dev_info(dev, "DCB %s\n",
9716
+ hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9717
+ dev_info(dev, "MQPRIO %s\n",
9718
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9719
+
9720
+ dev_info(dev, "PF info end.\n");
54689721 }
54699722
5470
-static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
9723
+static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9724
+ struct hclge_vport *vport)
54719725 {
5472
- hclge_mac_disconnect_phy(hdev);
9726
+ struct hnae3_client *client = vport->nic.client;
9727
+ struct hclge_dev *hdev = ae_dev->priv;
9728
+ int rst_cnt = hdev->rst_stats.reset_cnt;
9729
+ int ret;
9730
+
9731
+ ret = client->ops->init_instance(&vport->nic);
9732
+ if (ret)
9733
+ return ret;
9734
+
9735
+ set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9736
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9737
+ rst_cnt != hdev->rst_stats.reset_cnt) {
9738
+ ret = -EBUSY;
9739
+ goto init_nic_err;
9740
+ }
9741
+
9742
+ /* Enable nic hw error interrupts */
9743
+ ret = hclge_config_nic_hw_error(hdev, true);
9744
+ if (ret) {
9745
+ dev_err(&ae_dev->pdev->dev,
9746
+ "fail(%d) to enable hw error interrupts\n", ret);
9747
+ goto init_nic_err;
9748
+ }
9749
+
9750
+ hnae3_set_client_init_flag(client, ae_dev, 1);
9751
+
9752
+ if (netif_msg_drv(&hdev->vport->nic))
9753
+ hclge_info_show(hdev);
9754
+
9755
+ return ret;
9756
+
9757
+init_nic_err:
9758
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9759
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9760
+ msleep(HCLGE_WAIT_RESET_DONE);
9761
+
9762
+ client->ops->uninit_instance(&vport->nic, 0);
9763
+
9764
+ return ret;
9765
+}
9766
+
9767
+static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9768
+ struct hclge_vport *vport)
9769
+{
9770
+ struct hclge_dev *hdev = ae_dev->priv;
9771
+ struct hnae3_client *client;
9772
+ int rst_cnt;
9773
+ int ret;
9774
+
9775
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9776
+ !hdev->nic_client)
9777
+ return 0;
9778
+
9779
+ client = hdev->roce_client;
9780
+ ret = hclge_init_roce_base_info(vport);
9781
+ if (ret)
9782
+ return ret;
9783
+
9784
+ rst_cnt = hdev->rst_stats.reset_cnt;
9785
+ ret = client->ops->init_instance(&vport->roce);
9786
+ if (ret)
9787
+ return ret;
9788
+
9789
+ set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9790
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9791
+ rst_cnt != hdev->rst_stats.reset_cnt) {
9792
+ ret = -EBUSY;
9793
+ goto init_roce_err;
9794
+ }
9795
+
9796
+ /* Enable roce ras interrupts */
9797
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
9798
+ if (ret) {
9799
+ dev_err(&ae_dev->pdev->dev,
9800
+ "fail(%d) to enable roce ras interrupts\n", ret);
9801
+ goto init_roce_err;
9802
+ }
9803
+
9804
+ hnae3_set_client_init_flag(client, ae_dev, 1);
9805
+
9806
+ return 0;
9807
+
9808
+init_roce_err:
9809
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9810
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9811
+ msleep(HCLGE_WAIT_RESET_DONE);
9812
+
9813
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9814
+
9815
+ return ret;
54739816 }
54749817
54759818 static int hclge_init_client_instance(struct hnae3_client *client,
....@@ -5484,48 +9827,15 @@
54849827
54859828 switch (client->type) {
54869829 case HNAE3_CLIENT_KNIC:
5487
-
54889830 hdev->nic_client = client;
54899831 vport->nic.client = client;
5490
- ret = client->ops->init_instance(&vport->nic);
9832
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
54919833 if (ret)
54929834 goto clear_nic;
54939835
5494
- ret = hclge_init_instance_hw(hdev);
5495
- if (ret) {
5496
- client->ops->uninit_instance(&vport->nic,
5497
- 0);
5498
- goto clear_nic;
5499
- }
5500
-
5501
- hnae3_set_client_init_flag(client, ae_dev, 1);
5502
-
5503
- if (hdev->roce_client &&
5504
- hnae3_dev_roce_supported(hdev)) {
5505
- struct hnae3_client *rc = hdev->roce_client;
5506
-
5507
- ret = hclge_init_roce_base_info(vport);
5508
- if (ret)
5509
- goto clear_roce;
5510
-
5511
- ret = rc->ops->init_instance(&vport->roce);
5512
- if (ret)
5513
- goto clear_roce;
5514
-
5515
- hnae3_set_client_init_flag(hdev->roce_client,
5516
- ae_dev, 1);
5517
- }
5518
-
5519
- break;
5520
- case HNAE3_CLIENT_UNIC:
5521
- hdev->nic_client = client;
5522
- vport->nic.client = client;
5523
-
5524
- ret = client->ops->init_instance(&vport->nic);
9836
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
55259837 if (ret)
5526
- goto clear_nic;
5527
-
5528
- hnae3_set_client_init_flag(client, ae_dev, 1);
9838
+ goto clear_roce;
55299839
55309840 break;
55319841 case HNAE3_CLIENT_ROCE:
....@@ -5534,17 +9844,13 @@
55349844 vport->roce.client = client;
55359845 }
55369846
5537
- if (hdev->roce_client && hdev->nic_client) {
5538
- ret = hclge_init_roce_base_info(vport);
5539
- if (ret)
5540
- goto clear_roce;
9847
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
9848
+ if (ret)
9849
+ goto clear_roce;
55419850
5542
- ret = client->ops->init_instance(&vport->roce);
5543
- if (ret)
5544
- goto clear_roce;
5545
-
5546
- hnae3_set_client_init_flag(client, ae_dev, 1);
5547
- }
9851
+ break;
9852
+ default:
9853
+ return -EINVAL;
55489854 }
55499855 }
55509856
....@@ -5570,6 +9876,10 @@
55709876 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
55719877 vport = &hdev->vport[i];
55729878 if (hdev->roce_client) {
9879
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9880
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9881
+ msleep(HCLGE_WAIT_RESET_DONE);
9882
+
55739883 hdev->roce_client->ops->uninit_instance(&vport->roce,
55749884 0);
55759885 hdev->roce_client = NULL;
....@@ -5578,7 +9888,10 @@
55789888 if (client->type == HNAE3_CLIENT_ROCE)
55799889 return;
55809890 if (hdev->nic_client && client->ops->uninit_instance) {
5581
- hclge_uninit_instance_hw(hdev);
9891
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9892
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9893
+ msleep(HCLGE_WAIT_RESET_DONE);
9894
+
55829895 client->ops->uninit_instance(&vport->nic, 0);
55839896 hdev->nic_client = NULL;
55849897 vport->nic.client = NULL;
....@@ -5653,6 +9966,7 @@
56539966 set_bit(HCLGE_STATE_DOWN, &hdev->state);
56549967 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
56559968 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9969
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
56569970 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
56579971 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
56589972 }
....@@ -5660,15 +9974,102 @@
56609974 static void hclge_state_uninit(struct hclge_dev *hdev)
56619975 {
56629976 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9977
+ set_bit(HCLGE_STATE_REMOVING, &hdev->state);
56639978
5664
- if (hdev->service_timer.function)
5665
- del_timer_sync(&hdev->service_timer);
5666
- if (hdev->service_task.func)
5667
- cancel_work_sync(&hdev->service_task);
5668
- if (hdev->rst_service_task.func)
5669
- cancel_work_sync(&hdev->rst_service_task);
5670
- if (hdev->mbx_service_task.func)
5671
- cancel_work_sync(&hdev->mbx_service_task);
9979
+ if (hdev->reset_timer.function)
9980
+ del_timer_sync(&hdev->reset_timer);
9981
+ if (hdev->service_task.work.func)
9982
+ cancel_delayed_work_sync(&hdev->service_task);
9983
+}
9984
+
9985
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9986
+{
9987
+#define HCLGE_FLR_RETRY_WAIT_MS 500
9988
+#define HCLGE_FLR_RETRY_CNT 5
9989
+
9990
+ struct hclge_dev *hdev = ae_dev->priv;
9991
+ int retry_cnt = 0;
9992
+ int ret;
9993
+
9994
+retry:
9995
+ down(&hdev->reset_sem);
9996
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9997
+ hdev->reset_type = HNAE3_FLR_RESET;
9998
+ ret = hclge_reset_prepare(hdev);
9999
+ if (ret || hdev->reset_pending) {
10000
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10001
+ ret);
10002
+ if (hdev->reset_pending ||
10003
+ retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10004
+ dev_err(&hdev->pdev->dev,
10005
+ "reset_pending:0x%lx, retry_cnt:%d\n",
10006
+ hdev->reset_pending, retry_cnt);
10007
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10008
+ up(&hdev->reset_sem);
10009
+ msleep(HCLGE_FLR_RETRY_WAIT_MS);
10010
+ goto retry;
10011
+ }
10012
+ }
10013
+
10014
+ /* disable misc vector before FLR done */
10015
+ hclge_enable_vector(&hdev->misc_vector, false);
10016
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10017
+ hdev->rst_stats.flr_rst_cnt++;
10018
+}
10019
+
10020
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10021
+{
10022
+ struct hclge_dev *hdev = ae_dev->priv;
10023
+ int ret;
10024
+
10025
+ hclge_enable_vector(&hdev->misc_vector, true);
10026
+
10027
+ ret = hclge_reset_rebuild(hdev);
10028
+ if (ret)
10029
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10030
+
10031
+ hdev->reset_type = HNAE3_NONE_RESET;
10032
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10033
+ up(&hdev->reset_sem);
10034
+}
10035
+
10036
+static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10037
+{
10038
+ u16 i;
10039
+
10040
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
10041
+ struct hclge_vport *vport = &hdev->vport[i];
10042
+ int ret;
10043
+
10044
+ /* Send cmd to clear VF's FUNC_RST_ING */
10045
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10046
+ if (ret)
10047
+ dev_warn(&hdev->pdev->dev,
10048
+ "clear vf(%u) rst failed %d!\n",
10049
+ vport->vport_id, ret);
10050
+ }
10051
+}
10052
+
10053
+static int hclge_clear_hw_resource(struct hclge_dev *hdev)
10054
+{
10055
+ struct hclge_desc desc;
10056
+ int ret;
10057
+
10058
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
10059
+
10060
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10061
+ /* This new command is only supported by new firmware, it will
10062
+ * fail with older firmware. Error value -EOPNOSUPP can only be
10063
+ * returned by older firmware running this command, to keep code
10064
+ * backward compatible we will override this value and return
10065
+ * success.
10066
+ */
10067
+ if (ret && ret != -EOPNOTSUPP) {
10068
+ dev_err(&hdev->pdev->dev,
10069
+ "failed to clear hw resource, ret = %d\n", ret);
10070
+ return ret;
10071
+ }
10072
+ return 0;
567210073 }
567310074
567410075 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
....@@ -5678,37 +10079,47 @@
567810079 int ret;
567910080
568010081 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
5681
- if (!hdev) {
5682
- ret = -ENOMEM;
5683
- goto out;
5684
- }
10082
+ if (!hdev)
10083
+ return -ENOMEM;
568510084
568610085 hdev->pdev = pdev;
568710086 hdev->ae_dev = ae_dev;
568810087 hdev->reset_type = HNAE3_NONE_RESET;
10088
+ hdev->reset_level = HNAE3_FUNC_RESET;
568910089 ae_dev->priv = hdev;
569010090
10091
+ /* HW supprt 2 layer vlan */
10092
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10093
+
10094
+ mutex_init(&hdev->vport_lock);
10095
+ spin_lock_init(&hdev->fd_rule_lock);
10096
+ sema_init(&hdev->reset_sem, 1);
10097
+
569110098 ret = hclge_pci_init(hdev);
5692
- if (ret) {
5693
- dev_err(&pdev->dev, "PCI init failed\n");
10099
+ if (ret)
569410100 goto out;
5695
- }
569610101
569710102 /* Firmware command queue initialize */
569810103 ret = hclge_cmd_queue_init(hdev);
5699
- if (ret) {
5700
- dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
10104
+ if (ret)
570110105 goto err_pci_uninit;
5702
- }
570310106
570410107 /* Firmware command initialize */
570510108 ret = hclge_cmd_init(hdev);
570610109 if (ret)
570710110 goto err_cmd_uninit;
570810111
10112
+ ret = hclge_clear_hw_resource(hdev);
10113
+ if (ret)
10114
+ goto err_cmd_uninit;
10115
+
570910116 ret = hclge_get_cap(hdev);
10117
+ if (ret)
10118
+ goto err_cmd_uninit;
10119
+
10120
+ ret = hclge_query_dev_specs(hdev);
571010121 if (ret) {
5711
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
10122
+ dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
571210123 ret);
571310124 goto err_cmd_uninit;
571410125 }
....@@ -5726,12 +10137,8 @@
572610137 }
572710138
572810139 ret = hclge_misc_irq_init(hdev);
5729
- if (ret) {
5730
- dev_err(&pdev->dev,
5731
- "Misc IRQ(vector0) init error, ret = %d.\n",
5732
- ret);
10140
+ if (ret)
573310141 goto err_msi_uninit;
5734
- }
573510142
573610143 ret = hclge_alloc_tqps(hdev);
573710144 if (ret) {
....@@ -5740,25 +10147,22 @@
574010147 }
574110148
574210149 ret = hclge_alloc_vport(hdev);
5743
- if (ret) {
5744
- dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
10150
+ if (ret)
574510151 goto err_msi_irq_uninit;
5746
- }
574710152
574810153 ret = hclge_map_tqp(hdev);
5749
- if (ret) {
5750
- dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10154
+ if (ret)
575110155 goto err_msi_irq_uninit;
5752
- }
575310156
575410157 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
575510158 ret = hclge_mac_mdio_config(hdev);
5756
- if (ret) {
5757
- dev_err(&hdev->pdev->dev,
5758
- "mdio config fail ret=%d\n", ret);
10159
+ if (ret)
575910160 goto err_msi_irq_uninit;
5760
- }
576110161 }
10162
+
10163
+ ret = hclge_init_umv_space(hdev);
10164
+ if (ret)
10165
+ goto err_mdiobus_unreg;
576210166
576310167 ret = hclge_mac_init(hdev);
576410168 if (ret) {
....@@ -5771,6 +10175,10 @@
577110175 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
577210176 goto err_mdiobus_unreg;
577310177 }
10178
+
10179
+ ret = hclge_config_gro(hdev, true);
10180
+ if (ret)
10181
+ goto err_mdiobus_unreg;
577410182
577510183 ret = hclge_init_vlan_config(hdev);
577610184 if (ret) {
....@@ -5797,21 +10205,54 @@
579710205 goto err_mdiobus_unreg;
579810206 }
579910207
10208
+ ret = hclge_init_fd_config(hdev);
10209
+ if (ret) {
10210
+ dev_err(&pdev->dev,
10211
+ "fd table init fail, ret=%d\n", ret);
10212
+ goto err_mdiobus_unreg;
10213
+ }
10214
+
10215
+ INIT_KFIFO(hdev->mac_tnl_log);
10216
+
580010217 hclge_dcb_ops_set(hdev);
580110218
5802
- timer_setup(&hdev->service_timer, hclge_service_timer, 0);
5803
- INIT_WORK(&hdev->service_task, hclge_service_task);
5804
- INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
5805
- INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
10219
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10220
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10221
+
10222
+ /* Setup affinity after service timer setup because add_timer_on
10223
+ * is called in affinity notify.
10224
+ */
10225
+ hclge_misc_affinity_setup(hdev);
580610226
580710227 hclge_clear_all_event_cause(hdev);
10228
+ hclge_clear_resetting_state(hdev);
10229
+
10230
+ /* Log and clear the hw errors those already occurred */
10231
+ hclge_handle_all_hns_hw_errors(ae_dev);
10232
+
10233
+ /* request delayed reset for the error recovery because an immediate
10234
+ * global reset on a PF affecting pending initialization of other PFs
10235
+ */
10236
+ if (ae_dev->hw_err_reset_req) {
10237
+ enum hnae3_reset_type reset_level;
10238
+
10239
+ reset_level = hclge_get_reset_level(ae_dev,
10240
+ &ae_dev->hw_err_reset_req);
10241
+ hclge_set_def_reset_request(ae_dev, reset_level);
10242
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10243
+ }
580810244
580910245 /* Enable MISC vector(vector0) */
581010246 hclge_enable_vector(&hdev->misc_vector, true);
581110247
581210248 hclge_state_init(hdev);
10249
+ hdev->last_reset_time = jiffies;
581310250
5814
- pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
10251
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10252
+ HCLGE_DRIVER_NAME);
10253
+
10254
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10255
+
581510256 return 0;
581610257
581710258 err_mdiobus_unreg:
....@@ -5822,19 +10263,245 @@
582210263 err_msi_uninit:
582310264 pci_free_irq_vectors(pdev);
582410265 err_cmd_uninit:
5825
- hclge_destroy_cmd_queue(&hdev->hw);
10266
+ hclge_cmd_uninit(hdev);
582610267 err_pci_uninit:
582710268 pcim_iounmap(pdev, hdev->hw.io_base);
582810269 pci_clear_master(pdev);
582910270 pci_release_regions(pdev);
583010271 pci_disable_device(pdev);
583110272 out:
10273
+ mutex_destroy(&hdev->vport_lock);
583210274 return ret;
583310275 }
583410276
583510277 static void hclge_stats_clear(struct hclge_dev *hdev)
583610278 {
5837
- memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
10279
+ memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10280
+}
10281
+
10282
+static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10283
+{
10284
+ return hclge_config_switch_param(hdev, vf, enable,
10285
+ HCLGE_SWITCH_ANTI_SPOOF_MASK);
10286
+}
10287
+
10288
+static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10289
+{
10290
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10291
+ HCLGE_FILTER_FE_NIC_INGRESS_B,
10292
+ enable, vf);
10293
+}
10294
+
10295
+static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10296
+{
10297
+ int ret;
10298
+
10299
+ ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10300
+ if (ret) {
10301
+ dev_err(&hdev->pdev->dev,
10302
+ "Set vf %d mac spoof check %s failed, ret=%d\n",
10303
+ vf, enable ? "on" : "off", ret);
10304
+ return ret;
10305
+ }
10306
+
10307
+ ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10308
+ if (ret)
10309
+ dev_err(&hdev->pdev->dev,
10310
+ "Set vf %d vlan spoof check %s failed, ret=%d\n",
10311
+ vf, enable ? "on" : "off", ret);
10312
+
10313
+ return ret;
10314
+}
10315
+
10316
+static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10317
+ bool enable)
10318
+{
10319
+ struct hclge_vport *vport = hclge_get_vport(handle);
10320
+ struct hclge_dev *hdev = vport->back;
10321
+ u32 new_spoofchk = enable ? 1 : 0;
10322
+ int ret;
10323
+
10324
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10325
+ return -EOPNOTSUPP;
10326
+
10327
+ vport = hclge_get_vf_vport(hdev, vf);
10328
+ if (!vport)
10329
+ return -EINVAL;
10330
+
10331
+ if (vport->vf_info.spoofchk == new_spoofchk)
10332
+ return 0;
10333
+
10334
+ if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10335
+ dev_warn(&hdev->pdev->dev,
10336
+ "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10337
+ vf);
10338
+ else if (enable && hclge_is_umv_space_full(vport, true))
10339
+ dev_warn(&hdev->pdev->dev,
10340
+ "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10341
+ vf);
10342
+
10343
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10344
+ if (ret)
10345
+ return ret;
10346
+
10347
+ vport->vf_info.spoofchk = new_spoofchk;
10348
+ return 0;
10349
+}
10350
+
10351
+static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10352
+{
10353
+ struct hclge_vport *vport = hdev->vport;
10354
+ int ret;
10355
+ int i;
10356
+
10357
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10358
+ return 0;
10359
+
10360
+ /* resume the vf spoof check state after reset */
10361
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
10362
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10363
+ vport->vf_info.spoofchk);
10364
+ if (ret)
10365
+ return ret;
10366
+
10367
+ vport++;
10368
+ }
10369
+
10370
+ return 0;
10371
+}
10372
+
10373
+static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10374
+{
10375
+ struct hclge_vport *vport = hclge_get_vport(handle);
10376
+ struct hclge_dev *hdev = vport->back;
10377
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10378
+ u32 new_trusted = enable ? 1 : 0;
10379
+ bool en_bc_pmc;
10380
+ int ret;
10381
+
10382
+ vport = hclge_get_vf_vport(hdev, vf);
10383
+ if (!vport)
10384
+ return -EINVAL;
10385
+
10386
+ if (vport->vf_info.trusted == new_trusted)
10387
+ return 0;
10388
+
10389
+ /* Disable promisc mode for VF if it is not trusted any more. */
10390
+ if (!enable && vport->vf_info.promisc_enable) {
10391
+ en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10392
+ ret = hclge_set_vport_promisc_mode(vport, false, false,
10393
+ en_bc_pmc);
10394
+ if (ret)
10395
+ return ret;
10396
+ vport->vf_info.promisc_enable = 0;
10397
+ hclge_inform_vf_promisc_info(vport);
10398
+ }
10399
+
10400
+ vport->vf_info.trusted = new_trusted;
10401
+
10402
+ return 0;
10403
+}
10404
+
10405
+static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10406
+{
10407
+ int ret;
10408
+ int vf;
10409
+
10410
+ /* reset vf rate to default value */
10411
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10412
+ struct hclge_vport *vport = &hdev->vport[vf];
10413
+
10414
+ vport->vf_info.max_tx_rate = 0;
10415
+ ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10416
+ if (ret)
10417
+ dev_err(&hdev->pdev->dev,
10418
+ "vf%d failed to reset to default, ret=%d\n",
10419
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
10420
+ }
10421
+}
10422
+
10423
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10424
+ int min_tx_rate, int max_tx_rate)
10425
+{
10426
+ if (min_tx_rate != 0 ||
10427
+ max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10428
+ dev_err(&hdev->pdev->dev,
10429
+ "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10430
+ min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10431
+ return -EINVAL;
10432
+ }
10433
+
10434
+ return 0;
10435
+}
10436
+
10437
+static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10438
+ int min_tx_rate, int max_tx_rate, bool force)
10439
+{
10440
+ struct hclge_vport *vport = hclge_get_vport(handle);
10441
+ struct hclge_dev *hdev = vport->back;
10442
+ int ret;
10443
+
10444
+ ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10445
+ if (ret)
10446
+ return ret;
10447
+
10448
+ vport = hclge_get_vf_vport(hdev, vf);
10449
+ if (!vport)
10450
+ return -EINVAL;
10451
+
10452
+ if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10453
+ return 0;
10454
+
10455
+ ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10456
+ if (ret)
10457
+ return ret;
10458
+
10459
+ vport->vf_info.max_tx_rate = max_tx_rate;
10460
+
10461
+ return 0;
10462
+}
10463
+
10464
+static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10465
+{
10466
+ struct hnae3_handle *handle = &hdev->vport->nic;
10467
+ struct hclge_vport *vport;
10468
+ int ret;
10469
+ int vf;
10470
+
10471
+ /* resume the vf max_tx_rate after reset */
10472
+ for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10473
+ vport = hclge_get_vf_vport(hdev, vf);
10474
+ if (!vport)
10475
+ return -EINVAL;
10476
+
10477
+ /* zero means max rate, after reset, firmware already set it to
10478
+ * max rate, so just continue.
10479
+ */
10480
+ if (!vport->vf_info.max_tx_rate)
10481
+ continue;
10482
+
10483
+ ret = hclge_set_vf_rate(handle, vf, 0,
10484
+ vport->vf_info.max_tx_rate, true);
10485
+ if (ret) {
10486
+ dev_err(&hdev->pdev->dev,
10487
+ "vf%d failed to resume tx_rate:%u, ret=%d\n",
10488
+ vf, vport->vf_info.max_tx_rate, ret);
10489
+ return ret;
10490
+ }
10491
+ }
10492
+
10493
+ return 0;
10494
+}
10495
+
10496
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
10497
+{
10498
+ struct hclge_vport *vport = hdev->vport;
10499
+ int i;
10500
+
10501
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
10502
+ hclge_vport_stop(vport);
10503
+ vport++;
10504
+ }
583810505 }
583910506
584010507 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
....@@ -5846,24 +10513,20 @@
584610513 set_bit(HCLGE_STATE_DOWN, &hdev->state);
584710514
584810515 hclge_stats_clear(hdev);
5849
- memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10516
+ /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10517
+ * so here should not clean table in memory.
10518
+ */
10519
+ if (hdev->reset_type == HNAE3_IMP_RESET ||
10520
+ hdev->reset_type == HNAE3_GLOBAL_RESET) {
10521
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10522
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10523
+ bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10524
+ hclge_reset_umv_space(hdev);
10525
+ }
585010526
585110527 ret = hclge_cmd_init(hdev);
585210528 if (ret) {
585310529 dev_err(&pdev->dev, "Cmd queue init failed\n");
5854
- return ret;
5855
- }
5856
-
5857
- ret = hclge_get_cap(hdev);
5858
- if (ret) {
5859
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5860
- ret);
5861
- return ret;
5862
- }
5863
-
5864
- ret = hclge_configure(hdev);
5865
- if (ret) {
5866
- dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
586710530 return ret;
586810531 }
586910532
....@@ -5885,13 +10548,17 @@
588510548 return ret;
588610549 }
588710550
10551
+ ret = hclge_config_gro(hdev, true);
10552
+ if (ret)
10553
+ return ret;
10554
+
588810555 ret = hclge_init_vlan_config(hdev);
588910556 if (ret) {
589010557 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
589110558 return ret;
589210559 }
589310560
5894
- ret = hclge_tm_init_hw(hdev);
10561
+ ret = hclge_tm_init_hw(hdev, true);
589510562 if (ret) {
589610563 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
589710564 return ret;
....@@ -5902,6 +10569,52 @@
590210569 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
590310570 return ret;
590410571 }
10572
+
10573
+ ret = init_mgr_tbl(hdev);
10574
+ if (ret) {
10575
+ dev_err(&pdev->dev,
10576
+ "failed to reinit manager table, ret = %d\n", ret);
10577
+ return ret;
10578
+ }
10579
+
10580
+ ret = hclge_init_fd_config(hdev);
10581
+ if (ret) {
10582
+ dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10583
+ return ret;
10584
+ }
10585
+
10586
+ /* Log and clear the hw errors those already occurred */
10587
+ hclge_handle_all_hns_hw_errors(ae_dev);
10588
+
10589
+ /* Re-enable the hw error interrupts because
10590
+ * the interrupts get disabled on global reset.
10591
+ */
10592
+ ret = hclge_config_nic_hw_error(hdev, true);
10593
+ if (ret) {
10594
+ dev_err(&pdev->dev,
10595
+ "fail(%d) to re-enable NIC hw error interrupts\n",
10596
+ ret);
10597
+ return ret;
10598
+ }
10599
+
10600
+ if (hdev->roce_client) {
10601
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
10602
+ if (ret) {
10603
+ dev_err(&pdev->dev,
10604
+ "fail(%d) to re-enable roce ras interrupts\n",
10605
+ ret);
10606
+ return ret;
10607
+ }
10608
+ }
10609
+
10610
+ hclge_reset_vport_state(hdev);
10611
+ ret = hclge_reset_vport_spoofchk(hdev);
10612
+ if (ret)
10613
+ return ret;
10614
+
10615
+ ret = hclge_resume_vf_rate(hdev);
10616
+ if (ret)
10617
+ return ret;
590510618
590610619 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
590710620 HCLGE_DRIVER_NAME);
....@@ -5914,7 +10627,11 @@
591410627 struct hclge_dev *hdev = ae_dev->priv;
591510628 struct hclge_mac *mac = &hdev->hw.mac;
591610629
10630
+ hclge_reset_vf_rate(hdev);
10631
+ hclge_clear_vf_vlan(hdev);
10632
+ hclge_misc_affinity_teardown(hdev);
591710633 hclge_state_uninit(hdev);
10634
+ hclge_uninit_mac_table(hdev);
591810635
591910636 if (mac->phydev)
592010637 mdiobus_unregister(mac->mdio_bus);
....@@ -5923,9 +10640,16 @@
592310640 hclge_enable_vector(&hdev->misc_vector, false);
592410641 synchronize_irq(hdev->misc_vector.vector_irq);
592510642
5926
- hclge_destroy_cmd_queue(&hdev->hw);
10643
+ /* Disable all hw interrupts */
10644
+ hclge_config_mac_tnl_int(hdev, false);
10645
+ hclge_config_nic_hw_error(hdev, false);
10646
+ hclge_config_rocee_ras_interrupt(hdev, false);
10647
+
10648
+ hclge_cmd_uninit(hdev);
592710649 hclge_misc_irq_uninit(hdev);
592810650 hclge_pci_uninit(hdev);
10651
+ mutex_destroy(&hdev->vport_lock);
10652
+ hclge_uninit_vport_vlan_table(hdev);
592910653 ae_dev->priv = NULL;
593010654 }
593110655
....@@ -5949,72 +10673,36 @@
594910673 }
595010674
595110675 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
5952
- u16 *free_tqps, u16 *max_rss_size)
10676
+ u16 *alloc_tqps, u16 *max_rss_size)
595310677 {
595410678 struct hclge_vport *vport = hclge_get_vport(handle);
595510679 struct hclge_dev *hdev = vport->back;
5956
- u16 temp_tqps = 0;
5957
- int i;
595810680
5959
- for (i = 0; i < hdev->num_tqps; i++) {
5960
- if (!hdev->htqp[i].alloced)
5961
- temp_tqps++;
5962
- }
5963
- *free_tqps = temp_tqps;
10681
+ *alloc_tqps = vport->alloc_tqps;
596410682 *max_rss_size = hdev->rss_size_max;
596510683 }
596610684
5967
-static void hclge_release_tqp(struct hclge_vport *vport)
5968
-{
5969
- struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5970
- struct hclge_dev *hdev = vport->back;
5971
- int i;
5972
-
5973
- for (i = 0; i < kinfo->num_tqps; i++) {
5974
- struct hclge_tqp *tqp =
5975
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
5976
-
5977
- tqp->q.handle = NULL;
5978
- tqp->q.tqp_index = 0;
5979
- tqp->alloced = false;
5980
- }
5981
-
5982
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
5983
- kinfo->tqp = NULL;
5984
-}
5985
-
5986
-static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
10685
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10686
+ bool rxfh_configured)
598710687 {
598810688 struct hclge_vport *vport = hclge_get_vport(handle);
598910689 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10690
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
599010691 struct hclge_dev *hdev = vport->back;
5991
- int cur_rss_size = kinfo->rss_size;
5992
- int cur_tqps = kinfo->num_tqps;
5993
- u16 tc_offset[HCLGE_MAX_TC_NUM];
10692
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10693
+ u16 cur_rss_size = kinfo->rss_size;
10694
+ u16 cur_tqps = kinfo->num_tqps;
599410695 u16 tc_valid[HCLGE_MAX_TC_NUM];
5995
- u16 tc_size[HCLGE_MAX_TC_NUM];
599610696 u16 roundup_size;
599710697 u32 *rss_indir;
5998
- int ret, i;
10698
+ unsigned int i;
10699
+ int ret;
599910700
6000
- /* Free old tqps, and reallocate with new tqp number when nic setup */
6001
- hclge_release_tqp(vport);
10701
+ kinfo->req_rss_size = new_tqps_num;
600210702
6003
- ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
10703
+ ret = hclge_tm_vport_map_update(hdev);
600410704 if (ret) {
6005
- dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
6006
- return ret;
6007
- }
6008
-
6009
- ret = hclge_map_tqp_to_vport(hdev, vport);
6010
- if (ret) {
6011
- dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
6012
- return ret;
6013
- }
6014
-
6015
- ret = hclge_tm_schd_init(hdev);
6016
- if (ret) {
6017
- dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
10705
+ dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
601810706 return ret;
601910707 }
602010708
....@@ -6035,6 +10723,10 @@
603510723 if (ret)
603610724 return ret;
603710725
10726
+ /* RSS indirection table has been configuared by user */
10727
+ if (rxfh_configured)
10728
+ goto out;
10729
+
603810730 /* Reinitializes the rss indirect table according to the new RSS size */
603910731 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
604010732 if (!rss_indir)
....@@ -6050,9 +10742,10 @@
605010742
605110743 kfree(rss_indir);
605210744
10745
+out:
605310746 if (!ret)
605410747 dev_info(&hdev->pdev->dev,
6055
- "Channels changed, rss_size from %d to %d, tqps from %d to %d",
10748
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
605610749 cur_rss_size, kinfo->rss_size,
605710750 cur_tqps, kinfo->rss_size * kinfo->num_tc);
605810751
....@@ -6088,10 +10781,12 @@
608810781 void *data)
608910782 {
609010783 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10784
+#define HCLGE_32_BIT_DESC_NODATA_LEN 2
609110785
609210786 struct hclge_desc *desc;
609310787 u32 *reg_val = data;
609410788 __le32 *desc_data;
10789
+ int nodata_num;
609510790 int cmd_num;
609610791 int i, k, n;
609710792 int ret;
....@@ -6099,7 +10794,9 @@
609910794 if (regs_num == 0)
610010795 return 0;
610110796
6102
- cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
10797
+ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10798
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10799
+ HCLGE_32_BIT_REG_RTN_DATANUM);
610310800 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
610410801 if (!desc)
610510802 return -ENOMEM;
....@@ -6116,7 +10813,7 @@
611610813 for (i = 0; i < cmd_num; i++) {
611710814 if (i == 0) {
611810815 desc_data = (__le32 *)(&desc[i].data[0]);
6119
- n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
10816
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
612010817 } else {
612110818 desc_data = (__le32 *)(&desc[i]);
612210819 n = HCLGE_32_BIT_REG_RTN_DATANUM;
....@@ -6138,10 +10835,12 @@
613810835 void *data)
613910836 {
614010837 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10838
+#define HCLGE_64_BIT_DESC_NODATA_LEN 1
614110839
614210840 struct hclge_desc *desc;
614310841 u64 *reg_val = data;
614410842 __le64 *desc_data;
10843
+ int nodata_len;
614510844 int cmd_num;
614610845 int i, k, n;
614710846 int ret;
....@@ -6149,7 +10848,9 @@
614910848 if (regs_num == 0)
615010849 return 0;
615110850
6152
- cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
10851
+ nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10852
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10853
+ HCLGE_64_BIT_REG_RTN_DATANUM);
615310854 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
615410855 if (!desc)
615510856 return -ENOMEM;
....@@ -6166,7 +10867,7 @@
616610867 for (i = 0; i < cmd_num; i++) {
616710868 if (i == 0) {
616810869 desc_data = (__le64 *)(&desc[i].data[0]);
6169
- n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
10870
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
617010871 } else {
617110872 desc_data = (__le64 *)(&desc[i]);
617210873 n = HCLGE_64_BIT_REG_RTN_DATANUM;
....@@ -6184,30 +10885,288 @@
618410885 return 0;
618510886 }
618610887
10888
+#define MAX_SEPARATE_NUM 4
10889
+#define SEPARATOR_VALUE 0xFDFCFBFA
10890
+#define REG_NUM_PER_LINE 4
10891
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10892
+#define REG_SEPARATOR_LINE 1
10893
+#define REG_NUM_REMAIN_MASK 3
10894
+
10895
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10896
+{
10897
+ int i;
10898
+
10899
+ /* initialize command BD except the last one */
10900
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10901
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10902
+ true);
10903
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10904
+ }
10905
+
10906
+ /* initialize the last command BD */
10907
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10908
+
10909
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10910
+}
10911
+
10912
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10913
+ int *bd_num_list,
10914
+ u32 type_num)
10915
+{
10916
+ u32 entries_per_desc, desc_index, index, offset, i;
10917
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10918
+ int ret;
10919
+
10920
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
10921
+ if (ret) {
10922
+ dev_err(&hdev->pdev->dev,
10923
+ "Get dfx bd num fail, status is %d.\n", ret);
10924
+ return ret;
10925
+ }
10926
+
10927
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
10928
+ for (i = 0; i < type_num; i++) {
10929
+ offset = hclge_dfx_bd_offset_list[i];
10930
+ index = offset % entries_per_desc;
10931
+ desc_index = offset / entries_per_desc;
10932
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10933
+ }
10934
+
10935
+ return ret;
10936
+}
10937
+
10938
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10939
+ struct hclge_desc *desc_src, int bd_num,
10940
+ enum hclge_opcode_type cmd)
10941
+{
10942
+ struct hclge_desc *desc = desc_src;
10943
+ int i, ret;
10944
+
10945
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
10946
+ for (i = 0; i < bd_num - 1; i++) {
10947
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10948
+ desc++;
10949
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
10950
+ }
10951
+
10952
+ desc = desc_src;
10953
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10954
+ if (ret)
10955
+ dev_err(&hdev->pdev->dev,
10956
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10957
+ cmd, ret);
10958
+
10959
+ return ret;
10960
+}
10961
+
10962
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10963
+ void *data)
10964
+{
10965
+ int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10966
+ struct hclge_desc *desc = desc_src;
10967
+ u32 *reg = data;
10968
+
10969
+ entries_per_desc = ARRAY_SIZE(desc->data);
10970
+ reg_num = entries_per_desc * bd_num;
10971
+ separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10972
+ for (i = 0; i < reg_num; i++) {
10973
+ index = i % entries_per_desc;
10974
+ desc_index = i / entries_per_desc;
10975
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10976
+ }
10977
+ for (i = 0; i < separator_num; i++)
10978
+ *reg++ = SEPARATOR_VALUE;
10979
+
10980
+ return reg_num + separator_num;
10981
+}
10982
+
10983
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10984
+{
10985
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10986
+ int data_len_per_desc, bd_num, i;
10987
+ int *bd_num_list;
10988
+ u32 data_len;
10989
+ int ret;
10990
+
10991
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
10992
+ if (!bd_num_list)
10993
+ return -ENOMEM;
10994
+
10995
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10996
+ if (ret) {
10997
+ dev_err(&hdev->pdev->dev,
10998
+ "Get dfx reg bd num fail, status is %d.\n", ret);
10999
+ goto out;
11000
+ }
11001
+
11002
+ data_len_per_desc = sizeof_field(struct hclge_desc, data);
11003
+ *len = 0;
11004
+ for (i = 0; i < dfx_reg_type_num; i++) {
11005
+ bd_num = bd_num_list[i];
11006
+ data_len = data_len_per_desc * bd_num;
11007
+ *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11008
+ }
11009
+
11010
+out:
11011
+ kfree(bd_num_list);
11012
+ return ret;
11013
+}
11014
+
11015
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11016
+{
11017
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11018
+ int bd_num, bd_num_max, buf_len, i;
11019
+ struct hclge_desc *desc_src;
11020
+ int *bd_num_list;
11021
+ u32 *reg = data;
11022
+ int ret;
11023
+
11024
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
11025
+ if (!bd_num_list)
11026
+ return -ENOMEM;
11027
+
11028
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11029
+ if (ret) {
11030
+ dev_err(&hdev->pdev->dev,
11031
+ "Get dfx reg bd num fail, status is %d.\n", ret);
11032
+ goto out;
11033
+ }
11034
+
11035
+ bd_num_max = bd_num_list[0];
11036
+ for (i = 1; i < dfx_reg_type_num; i++)
11037
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11038
+
11039
+ buf_len = sizeof(*desc_src) * bd_num_max;
11040
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
11041
+ if (!desc_src) {
11042
+ ret = -ENOMEM;
11043
+ goto out;
11044
+ }
11045
+
11046
+ for (i = 0; i < dfx_reg_type_num; i++) {
11047
+ bd_num = bd_num_list[i];
11048
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11049
+ hclge_dfx_reg_opcode_list[i]);
11050
+ if (ret) {
11051
+ dev_err(&hdev->pdev->dev,
11052
+ "Get dfx reg fail, status is %d.\n", ret);
11053
+ break;
11054
+ }
11055
+
11056
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11057
+ }
11058
+
11059
+ kfree(desc_src);
11060
+out:
11061
+ kfree(bd_num_list);
11062
+ return ret;
11063
+}
11064
+
11065
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11066
+ struct hnae3_knic_private_info *kinfo)
11067
+{
11068
+#define HCLGE_RING_REG_OFFSET 0x200
11069
+#define HCLGE_RING_INT_REG_OFFSET 0x4
11070
+
11071
+ int i, j, reg_num, separator_num;
11072
+ int data_num_sum;
11073
+ u32 *reg = data;
11074
+
11075
+ /* fetching per-PF registers valus from PF PCIe register space */
11076
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11077
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11078
+ for (i = 0; i < reg_num; i++)
11079
+ *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11080
+ for (i = 0; i < separator_num; i++)
11081
+ *reg++ = SEPARATOR_VALUE;
11082
+ data_num_sum = reg_num + separator_num;
11083
+
11084
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
11085
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11086
+ for (i = 0; i < reg_num; i++)
11087
+ *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11088
+ for (i = 0; i < separator_num; i++)
11089
+ *reg++ = SEPARATOR_VALUE;
11090
+ data_num_sum += reg_num + separator_num;
11091
+
11092
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
11093
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11094
+ for (j = 0; j < kinfo->num_tqps; j++) {
11095
+ for (i = 0; i < reg_num; i++)
11096
+ *reg++ = hclge_read_dev(&hdev->hw,
11097
+ ring_reg_addr_list[i] +
11098
+ HCLGE_RING_REG_OFFSET * j);
11099
+ for (i = 0; i < separator_num; i++)
11100
+ *reg++ = SEPARATOR_VALUE;
11101
+ }
11102
+ data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11103
+
11104
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11105
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11106
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
11107
+ for (i = 0; i < reg_num; i++)
11108
+ *reg++ = hclge_read_dev(&hdev->hw,
11109
+ tqp_intr_reg_addr_list[i] +
11110
+ HCLGE_RING_INT_REG_OFFSET * j);
11111
+ for (i = 0; i < separator_num; i++)
11112
+ *reg++ = SEPARATOR_VALUE;
11113
+ }
11114
+ data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11115
+
11116
+ return data_num_sum;
11117
+}
11118
+
618711119 static int hclge_get_regs_len(struct hnae3_handle *handle)
618811120 {
11121
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11122
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
618911123 struct hclge_vport *vport = hclge_get_vport(handle);
619011124 struct hclge_dev *hdev = vport->back;
6191
- u32 regs_num_32_bit, regs_num_64_bit;
11125
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11126
+ int regs_lines_32_bit, regs_lines_64_bit;
619211127 int ret;
619311128
619411129 ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
619511130 if (ret) {
619611131 dev_err(&hdev->pdev->dev,
619711132 "Get register number failed, ret = %d.\n", ret);
6198
- return -EOPNOTSUPP;
11133
+ return ret;
619911134 }
620011135
6201
- return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
11136
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11137
+ if (ret) {
11138
+ dev_err(&hdev->pdev->dev,
11139
+ "Get dfx reg len failed, ret = %d.\n", ret);
11140
+ return ret;
11141
+ }
11142
+
11143
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11144
+ REG_SEPARATOR_LINE;
11145
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11146
+ REG_SEPARATOR_LINE;
11147
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11148
+ REG_SEPARATOR_LINE;
11149
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11150
+ REG_SEPARATOR_LINE;
11151
+ regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11152
+ REG_SEPARATOR_LINE;
11153
+ regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11154
+ REG_SEPARATOR_LINE;
11155
+
11156
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11157
+ tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11158
+ regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
620211159 }
620311160
620411161 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
620511162 void *data)
620611163 {
11164
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
620711165 struct hclge_vport *vport = hclge_get_vport(handle);
620811166 struct hclge_dev *hdev = vport->back;
620911167 u32 regs_num_32_bit, regs_num_64_bit;
6210
- int ret;
11168
+ int i, reg_num, separator_num, ret;
11169
+ u32 *reg = data;
621111170
621211171 *version = hdev->fw_version;
621311172
....@@ -6218,19 +11177,36 @@
621811177 return;
621911178 }
622011179
6221
- ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
11180
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11181
+
11182
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
622211183 if (ret) {
622311184 dev_err(&hdev->pdev->dev,
622411185 "Get 32 bit register failed, ret = %d.\n", ret);
622511186 return;
622611187 }
11188
+ reg_num = regs_num_32_bit;
11189
+ reg += reg_num;
11190
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11191
+ for (i = 0; i < separator_num; i++)
11192
+ *reg++ = SEPARATOR_VALUE;
622711193
6228
- data = (u32 *)data + regs_num_32_bit;
6229
- ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
6230
- data);
6231
- if (ret)
11194
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11195
+ if (ret) {
623211196 dev_err(&hdev->pdev->dev,
623311197 "Get 64 bit register failed, ret = %d.\n", ret);
11198
+ return;
11199
+ }
11200
+ reg_num = regs_num_64_bit * 2;
11201
+ reg += reg_num;
11202
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11203
+ for (i = 0; i < separator_num; i++)
11204
+ *reg++ = SEPARATOR_VALUE;
11205
+
11206
+ ret = hclge_get_dfx_reg(hdev, reg);
11207
+ if (ret)
11208
+ dev_err(&hdev->pdev->dev,
11209
+ "Get dfx register failed, ret = %d.\n", ret);
623411210 }
623511211
623611212 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
....@@ -6290,30 +11266,144 @@
629011266 }
629111267 }
629211268
6293
-static void hclge_get_port_type(struct hnae3_handle *handle,
6294
- u8 *port_type)
11269
+static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
629511270 {
629611271 struct hclge_vport *vport = hclge_get_vport(handle);
629711272 struct hclge_dev *hdev = vport->back;
6298
- u8 media_type = hdev->hw.mac.media_type;
629911273
6300
- switch (media_type) {
6301
- case HNAE3_MEDIA_TYPE_FIBER:
6302
- *port_type = PORT_FIBRE;
6303
- break;
6304
- case HNAE3_MEDIA_TYPE_COPPER:
6305
- *port_type = PORT_TP;
6306
- break;
6307
- case HNAE3_MEDIA_TYPE_UNKNOWN:
6308
- default:
6309
- *port_type = PORT_OTHER;
6310
- break;
11274
+ return hclge_config_gro(hdev, enable);
11275
+}
11276
+
11277
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11278
+{
11279
+ struct hclge_vport *vport = &hdev->vport[0];
11280
+ struct hnae3_handle *handle = &vport->nic;
11281
+ u8 tmp_flags;
11282
+ int ret;
11283
+
11284
+ if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11285
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11286
+ vport->last_promisc_flags = vport->overflow_promisc_flags;
631111287 }
11288
+
11289
+ if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11290
+ tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11291
+ ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11292
+ tmp_flags & HNAE3_MPE);
11293
+ if (!ret) {
11294
+ clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11295
+ hclge_enable_vlan_filter(handle,
11296
+ tmp_flags & HNAE3_VLAN_FLTR);
11297
+ }
11298
+ }
11299
+}
11300
+
11301
+static bool hclge_module_existed(struct hclge_dev *hdev)
11302
+{
11303
+ struct hclge_desc desc;
11304
+ u32 existed;
11305
+ int ret;
11306
+
11307
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11308
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11309
+ if (ret) {
11310
+ dev_err(&hdev->pdev->dev,
11311
+ "failed to get SFP exist state, ret = %d\n", ret);
11312
+ return false;
11313
+ }
11314
+
11315
+ existed = le32_to_cpu(desc.data[0]);
11316
+
11317
+ return existed != 0;
11318
+}
11319
+
11320
+/* need 6 bds(total 140 bytes) in one reading
11321
+ * return the number of bytes actually read, 0 means read failed.
11322
+ */
11323
+static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11324
+ u32 len, u8 *data)
11325
+{
11326
+ struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11327
+ struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11328
+ u16 read_len;
11329
+ u16 copy_len;
11330
+ int ret;
11331
+ int i;
11332
+
11333
+ /* setup all 6 bds to read module eeprom info. */
11334
+ for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11335
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11336
+ true);
11337
+
11338
+ /* bd0~bd4 need next flag */
11339
+ if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11340
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11341
+ }
11342
+
11343
+ /* setup bd0, this bd contains offset and read length. */
11344
+ sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11345
+ sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11346
+ read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11347
+ sfp_info_bd0->read_len = cpu_to_le16(read_len);
11348
+
11349
+ ret = hclge_cmd_send(&hdev->hw, desc, i);
11350
+ if (ret) {
11351
+ dev_err(&hdev->pdev->dev,
11352
+ "failed to get SFP eeprom info, ret = %d\n", ret);
11353
+ return 0;
11354
+ }
11355
+
11356
+ /* copy sfp info from bd0 to out buffer. */
11357
+ copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11358
+ memcpy(data, sfp_info_bd0->data, copy_len);
11359
+ read_len = copy_len;
11360
+
11361
+ /* copy sfp info from bd1~bd5 to out buffer if needed. */
11362
+ for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11363
+ if (read_len >= len)
11364
+ return read_len;
11365
+
11366
+ copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11367
+ memcpy(data + read_len, desc[i].data, copy_len);
11368
+ read_len += copy_len;
11369
+ }
11370
+
11371
+ return read_len;
11372
+}
11373
+
11374
+static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11375
+ u32 len, u8 *data)
11376
+{
11377
+ struct hclge_vport *vport = hclge_get_vport(handle);
11378
+ struct hclge_dev *hdev = vport->back;
11379
+ u32 read_len = 0;
11380
+ u16 data_len;
11381
+
11382
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11383
+ return -EOPNOTSUPP;
11384
+
11385
+ if (!hclge_module_existed(hdev))
11386
+ return -ENXIO;
11387
+
11388
+ while (read_len < len) {
11389
+ data_len = hclge_get_sfp_eeprom_info(hdev,
11390
+ offset + read_len,
11391
+ len - read_len,
11392
+ data + read_len);
11393
+ if (!data_len)
11394
+ return -EIO;
11395
+
11396
+ read_len += data_len;
11397
+ }
11398
+
11399
+ return 0;
631211400 }
631311401
631411402 static const struct hnae3_ae_ops hclge_ops = {
631511403 .init_ae_dev = hclge_init_ae_dev,
631611404 .uninit_ae_dev = hclge_uninit_ae_dev,
11405
+ .flr_prepare = hclge_flr_prepare,
11406
+ .flr_done = hclge_flr_done,
631711407 .init_client_instance = hclge_init_client_instance,
631811408 .uninit_client_instance = hclge_uninit_client_instance,
631911409 .map_ring_to_vector = hclge_map_ring_to_vector,
....@@ -6321,14 +11411,19 @@
632111411 .get_vector = hclge_get_vector,
632211412 .put_vector = hclge_put_vector,
632311413 .set_promisc_mode = hclge_set_promisc_mode,
11414
+ .request_update_promisc_mode = hclge_request_update_promisc_mode,
632411415 .set_loopback = hclge_set_loopback,
632511416 .start = hclge_ae_start,
632611417 .stop = hclge_ae_stop,
11418
+ .client_start = hclge_client_start,
11419
+ .client_stop = hclge_client_stop,
632711420 .get_status = hclge_get_status,
632811421 .get_ksettings_an_result = hclge_get_ksettings_an_result,
6329
- .update_speed_duplex_h = hclge_update_speed_duplex_h,
633011422 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
633111423 .get_media_type = hclge_get_media_type,
11424
+ .check_port_speed = hclge_check_port_speed,
11425
+ .get_fec = hclge_get_fec,
11426
+ .set_fec = hclge_set_fec,
633211427 .get_rss_key_size = hclge_get_rss_key_size,
633311428 .get_rss_indir_size = hclge_get_rss_indir_size,
633411429 .get_rss = hclge_get_rss,
....@@ -6338,18 +11433,21 @@
633811433 .get_tc_size = hclge_get_tc_size,
633911434 .get_mac_addr = hclge_get_mac_addr,
634011435 .set_mac_addr = hclge_set_mac_addr,
11436
+ .do_ioctl = hclge_do_ioctl,
634111437 .add_uc_addr = hclge_add_uc_addr,
634211438 .rm_uc_addr = hclge_rm_uc_addr,
634311439 .add_mc_addr = hclge_add_mc_addr,
634411440 .rm_mc_addr = hclge_rm_mc_addr,
6345
- .update_mta_status = hclge_update_mta_status,
634611441 .set_autoneg = hclge_set_autoneg,
634711442 .get_autoneg = hclge_get_autoneg,
11443
+ .restart_autoneg = hclge_restart_autoneg,
11444
+ .halt_autoneg = hclge_halt_autoneg,
634811445 .get_pauseparam = hclge_get_pauseparam,
634911446 .set_pauseparam = hclge_set_pauseparam,
635011447 .set_mtu = hclge_set_mtu,
635111448 .reset_queue = hclge_reset_tqp,
635211449 .get_stats = hclge_get_stats,
11450
+ .get_mac_stats = hclge_get_mac_stat,
635311451 .update_stats = hclge_update_stats,
635411452 .get_strings = hclge_get_strings,
635511453 .get_sset_count = hclge_get_sset_count,
....@@ -6360,15 +11458,41 @@
636011458 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
636111459 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
636211460 .reset_event = hclge_reset_event,
11461
+ .get_reset_level = hclge_get_reset_level,
11462
+ .set_default_reset_request = hclge_set_def_reset_request,
636311463 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
636411464 .set_channels = hclge_set_channels,
636511465 .get_channels = hclge_get_channels,
6366
- .get_flowctrl_adv = hclge_get_flowctrl_adv,
636711466 .get_regs_len = hclge_get_regs_len,
636811467 .get_regs = hclge_get_regs,
636911468 .set_led_id = hclge_set_led_id,
637011469 .get_link_mode = hclge_get_link_mode,
6371
- .get_port_type = hclge_get_port_type,
11470
+ .add_fd_entry = hclge_add_fd_entry,
11471
+ .del_fd_entry = hclge_del_fd_entry,
11472
+ .del_all_fd_entries = hclge_del_all_fd_entries,
11473
+ .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11474
+ .get_fd_rule_info = hclge_get_fd_rule_info,
11475
+ .get_fd_all_rules = hclge_get_all_rules,
11476
+ .enable_fd = hclge_enable_fd,
11477
+ .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11478
+ .dbg_run_cmd = hclge_dbg_run_cmd,
11479
+ .handle_hw_ras_error = hclge_handle_hw_ras_error,
11480
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
11481
+ .ae_dev_resetting = hclge_ae_dev_resetting,
11482
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11483
+ .set_gro_en = hclge_gro_en,
11484
+ .get_global_queue_id = hclge_covert_handle_qid_global,
11485
+ .set_timer_task = hclge_set_timer_task,
11486
+ .mac_connect_phy = hclge_mac_connect_phy,
11487
+ .mac_disconnect_phy = hclge_mac_disconnect_phy,
11488
+ .get_vf_config = hclge_get_vf_config,
11489
+ .set_vf_link_state = hclge_set_vf_link_state,
11490
+ .set_vf_spoofchk = hclge_set_vf_spoofchk,
11491
+ .set_vf_trust = hclge_set_vf_trust,
11492
+ .set_vf_rate = hclge_set_vf_rate,
11493
+ .set_vf_mac = hclge_set_vf_mac,
11494
+ .get_module_eeprom = hclge_get_module_eeprom,
11495
+ .get_cmdq_stat = hclge_get_cmdq_stat,
637211496 };
637311497
637411498 static struct hnae3_ae_algo ae_algo = {
....@@ -6380,6 +11504,12 @@
638011504 {
638111505 pr_info("%s is initializing\n", HCLGE_NAME);
638211506
11507
+ hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11508
+ if (!hclge_wq) {
11509
+ pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11510
+ return -ENOMEM;
11511
+ }
11512
+
638311513 hnae3_register_ae_algo(&ae_algo);
638411514
638511515 return 0;
....@@ -6389,6 +11519,7 @@
638911519 {
639011520 hnae3_unregister_ae_algo_prepare(&ae_algo);
639111521 hnae3_unregister_ae_algo(&ae_algo);
11522
+ destroy_workqueue(hclge_wq);
639211523 }
639311524 module_init(hclge_init);
639411525 module_exit(hclge_exit);