forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
....@@ -2,6 +2,7 @@
22 // Copyright (c) 2016-2017 Hisilicon Limited.
33
44 #include <linux/etherdevice.h>
5
+#include <linux/iopoll.h>
56 #include <net/rtnetlink.h>
67 #include "hclgevf_cmd.h"
78 #include "hclgevf_main.h"
....@@ -10,21 +11,84 @@
1011
1112 #define HCLGEVF_NAME "hclgevf"
1213
13
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
14
-static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
14
+#define HCLGEVF_RESET_MAX_FAIL_CNT 5
15
+
16
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
1517 static struct hnae3_ae_algo ae_algovf;
1618
19
+static struct workqueue_struct *hclgevf_wq;
20
+
1721 static const struct pci_device_id ae_algovf_pci_tbl[] = {
18
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
19
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
22
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
23
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
24
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
2025 /* required last entry */
2126 {0, }
2227 };
2328
29
+static const u8 hclgevf_hash_key[] = {
30
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
31
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
32
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
33
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
34
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
35
+};
36
+
2437 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
2538
26
-static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
27
- struct hnae3_handle *handle)
39
+static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
40
+ HCLGEVF_CMDQ_TX_ADDR_H_REG,
41
+ HCLGEVF_CMDQ_TX_DEPTH_REG,
42
+ HCLGEVF_CMDQ_TX_TAIL_REG,
43
+ HCLGEVF_CMDQ_TX_HEAD_REG,
44
+ HCLGEVF_CMDQ_RX_ADDR_L_REG,
45
+ HCLGEVF_CMDQ_RX_ADDR_H_REG,
46
+ HCLGEVF_CMDQ_RX_DEPTH_REG,
47
+ HCLGEVF_CMDQ_RX_TAIL_REG,
48
+ HCLGEVF_CMDQ_RX_HEAD_REG,
49
+ HCLGEVF_VECTOR0_CMDQ_SRC_REG,
50
+ HCLGEVF_VECTOR0_CMDQ_STATE_REG,
51
+ HCLGEVF_CMDQ_INTR_EN_REG,
52
+ HCLGEVF_CMDQ_INTR_GEN_REG};
53
+
54
+static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
55
+ HCLGEVF_RST_ING,
56
+ HCLGEVF_GRO_EN_REG};
57
+
58
+static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
59
+ HCLGEVF_RING_RX_ADDR_H_REG,
60
+ HCLGEVF_RING_RX_BD_NUM_REG,
61
+ HCLGEVF_RING_RX_BD_LENGTH_REG,
62
+ HCLGEVF_RING_RX_MERGE_EN_REG,
63
+ HCLGEVF_RING_RX_TAIL_REG,
64
+ HCLGEVF_RING_RX_HEAD_REG,
65
+ HCLGEVF_RING_RX_FBD_NUM_REG,
66
+ HCLGEVF_RING_RX_OFFSET_REG,
67
+ HCLGEVF_RING_RX_FBD_OFFSET_REG,
68
+ HCLGEVF_RING_RX_STASH_REG,
69
+ HCLGEVF_RING_RX_BD_ERR_REG,
70
+ HCLGEVF_RING_TX_ADDR_L_REG,
71
+ HCLGEVF_RING_TX_ADDR_H_REG,
72
+ HCLGEVF_RING_TX_BD_NUM_REG,
73
+ HCLGEVF_RING_TX_PRIORITY_REG,
74
+ HCLGEVF_RING_TX_TC_REG,
75
+ HCLGEVF_RING_TX_MERGE_EN_REG,
76
+ HCLGEVF_RING_TX_TAIL_REG,
77
+ HCLGEVF_RING_TX_HEAD_REG,
78
+ HCLGEVF_RING_TX_FBD_NUM_REG,
79
+ HCLGEVF_RING_TX_OFFSET_REG,
80
+ HCLGEVF_RING_TX_EBD_NUM_REG,
81
+ HCLGEVF_RING_TX_EBD_OFFSET_REG,
82
+ HCLGEVF_RING_TX_BD_ERR_REG,
83
+ HCLGEVF_RING_EN_REG};
84
+
85
+static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
86
+ HCLGEVF_TQP_INTR_GL0_REG,
87
+ HCLGEVF_TQP_INTR_GL1_REG,
88
+ HCLGEVF_TQP_INTR_GL2_REG,
89
+ HCLGEVF_TQP_INTR_RL_REG};
90
+
91
+static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
2892 {
2993 if (!handle->client)
3094 return container_of(handle, struct hclgevf_dev, nic);
....@@ -36,16 +100,15 @@
36100
37101 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
38102 {
103
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
39104 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
40
- struct hnae3_queue *queue;
41105 struct hclgevf_desc desc;
42106 struct hclgevf_tqp *tqp;
43107 int status;
44108 int i;
45109
46
- for (i = 0; i < hdev->num_tqps; i++) {
47
- queue = handle->kinfo.tqp[i];
48
- tqp = container_of(queue, struct hclgevf_tqp, q);
110
+ for (i = 0; i < kinfo->num_tqps; i++) {
111
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
49112 hclgevf_cmd_setup_basic_desc(&desc,
50113 HCLGEVF_OPC_QUERY_RX_STATUS,
51114 true);
....@@ -82,17 +145,16 @@
82145 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
83146 {
84147 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
85
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
86148 struct hclgevf_tqp *tqp;
87149 u64 *buff = data;
88150 int i;
89151
90
- for (i = 0; i < hdev->num_tqps; i++) {
91
- tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
152
+ for (i = 0; i < kinfo->num_tqps; i++) {
153
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
92154 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
93155 }
94156 for (i = 0; i < kinfo->num_tqps; i++) {
95
- tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
157
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
96158 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
97159 }
98160
....@@ -101,29 +163,29 @@
101163
102164 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
103165 {
104
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
166
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
105167
106
- return hdev->num_tqps * 2;
168
+ return kinfo->num_tqps * 2;
107169 }
108170
109171 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
110172 {
111
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
173
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
112174 u8 *buff = data;
113
- int i = 0;
175
+ int i;
114176
115
- for (i = 0; i < hdev->num_tqps; i++) {
116
- struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
117
- struct hclgevf_tqp, q);
118
- snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
177
+ for (i = 0; i < kinfo->num_tqps; i++) {
178
+ struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
179
+ struct hclgevf_tqp, q);
180
+ snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
119181 tqp->index);
120182 buff += ETH_GSTRING_LEN;
121183 }
122184
123
- for (i = 0; i < hdev->num_tqps; i++) {
124
- struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
125
- struct hclgevf_tqp, q);
126
- snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
185
+ for (i = 0; i < kinfo->num_tqps; i++) {
186
+ struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
187
+ struct hclgevf_tqp, q);
188
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
127189 tqp->index);
128190 buff += ETH_GSTRING_LEN;
129191 }
....@@ -168,13 +230,25 @@
168230 hclgevf_tqps_get_stats(handle, data);
169231 }
170232
233
+static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
234
+ u8 subcode)
235
+{
236
+ if (msg) {
237
+ memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
238
+ msg->code = code;
239
+ msg->subcode = subcode;
240
+ }
241
+}
242
+
171243 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
172244 {
245
+ struct hclge_vf_to_pf_msg send_msg;
173246 u8 resp_msg;
174247 int status;
175248
176
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
177
- true, &resp_msg, sizeof(u8));
249
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0);
250
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
251
+ sizeof(resp_msg));
178252 if (status) {
179253 dev_err(&hdev->pdev->dev,
180254 "VF request to get TC info from PF failed %d",
....@@ -187,14 +261,42 @@
187261 return 0;
188262 }
189263
190
-static int hclge_get_queue_info(struct hclgevf_dev *hdev)
264
+static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
191265 {
192
-#define HCLGEVF_TQPS_RSS_INFO_LEN 8
266
+ struct hnae3_handle *nic = &hdev->nic;
267
+ struct hclge_vf_to_pf_msg send_msg;
268
+ u8 resp_msg;
269
+ int ret;
270
+
271
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
272
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
273
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
274
+ sizeof(u8));
275
+ if (ret) {
276
+ dev_err(&hdev->pdev->dev,
277
+ "VF request to get port based vlan state failed %d",
278
+ ret);
279
+ return ret;
280
+ }
281
+
282
+ nic->port_base_vlan_state = resp_msg;
283
+
284
+ return 0;
285
+}
286
+
287
+static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
288
+{
289
+#define HCLGEVF_TQPS_RSS_INFO_LEN 6
290
+#define HCLGEVF_TQPS_ALLOC_OFFSET 0
291
+#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
292
+#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
293
+
193294 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
295
+ struct hclge_vf_to_pf_msg send_msg;
194296 int status;
195297
196
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
197
- true, resp_msg,
298
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
299
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
198300 HCLGEVF_TQPS_RSS_INFO_LEN);
199301 if (status) {
200302 dev_err(&hdev->pdev->dev,
....@@ -203,10 +305,80 @@
203305 return status;
204306 }
205307
206
- memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
207
- memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
208
- memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
209
- memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
308
+ memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
309
+ sizeof(u16));
310
+ memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
311
+ sizeof(u16));
312
+ memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
313
+ sizeof(u16));
314
+
315
+ return 0;
316
+}
317
+
318
+static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
319
+{
320
+#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
321
+#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
322
+#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
323
+
324
+ u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
325
+ struct hclge_vf_to_pf_msg send_msg;
326
+ int ret;
327
+
328
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
329
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
330
+ HCLGEVF_TQPS_DEPTH_INFO_LEN);
331
+ if (ret) {
332
+ dev_err(&hdev->pdev->dev,
333
+ "VF request to get tqp depth info from PF failed %d",
334
+ ret);
335
+ return ret;
336
+ }
337
+
338
+ memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
339
+ sizeof(u16));
340
+ memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
341
+ sizeof(u16));
342
+
343
+ return 0;
344
+}
345
+
346
+static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
347
+{
348
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
349
+ struct hclge_vf_to_pf_msg send_msg;
350
+ u16 qid_in_pf = 0;
351
+ u8 resp_data[2];
352
+ int ret;
353
+
354
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
355
+ memcpy(send_msg.data, &queue_id, sizeof(queue_id));
356
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
357
+ sizeof(resp_data));
358
+ if (!ret)
359
+ qid_in_pf = *(u16 *)resp_data;
360
+
361
+ return qid_in_pf;
362
+}
363
+
364
+static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
365
+{
366
+ struct hclge_vf_to_pf_msg send_msg;
367
+ u8 resp_msg[2];
368
+ int ret;
369
+
370
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
371
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
372
+ sizeof(resp_msg));
373
+ if (ret) {
374
+ dev_err(&hdev->pdev->dev,
375
+ "VF request to get the pf port media type failed %d",
376
+ ret);
377
+ return ret;
378
+ }
379
+
380
+ hdev->hw.mac.media_type = resp_msg[0];
381
+ hdev->hw.mac.module_type = resp_msg[1];
210382
211383 return 0;
212384 }
....@@ -215,12 +387,6 @@
215387 {
216388 struct hclgevf_tqp *tqp;
217389 int i;
218
-
219
- /* if this is on going reset then we need to re-allocate the TPQs
220
- * since we cannot assume we would get same number of TPQs back from PF
221
- */
222
- if (hclgevf_dev_ongoing_reset(hdev))
223
- devm_kfree(&hdev->pdev->dev, hdev->htqp);
224390
225391 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
226392 sizeof(struct hclgevf_tqp), GFP_KERNEL);
....@@ -235,7 +401,8 @@
235401
236402 tqp->q.ae_algo = &ae_algovf;
237403 tqp->q.buf_size = hdev->rx_buf_len;
238
- tqp->q.desc_num = hdev->num_desc;
404
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
405
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
239406 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
240407 i * HCLGEVF_TQP_REG_SIZE;
241408
....@@ -250,11 +417,12 @@
250417 struct hnae3_handle *nic = &hdev->nic;
251418 struct hnae3_knic_private_info *kinfo;
252419 u16 new_tqps = hdev->num_tqps;
253
- int i;
420
+ unsigned int i;
254421
255422 kinfo = &nic->kinfo;
256423 kinfo->num_tc = 0;
257
- kinfo->num_desc = hdev->num_desc;
424
+ kinfo->num_tx_desc = hdev->num_tx_desc;
425
+ kinfo->num_rx_desc = hdev->num_rx_desc;
258426 kinfo->rx_buf_len = hdev->rx_buf_len;
259427 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
260428 if (hdev->hw_tc_map & BIT(i))
....@@ -264,12 +432,6 @@
264432 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
265433 new_tqps = kinfo->rss_size * kinfo->num_tc;
266434 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
267
-
268
- /* if this is on going reset then we need to re-allocate the hnae queues
269
- * as well since number of TPQs from PF might have changed.
270
- */
271
- if (hclgevf_dev_ongoing_reset(hdev))
272
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
273435
274436 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
275437 sizeof(struct hnae3_queue *), GFP_KERNEL);
....@@ -282,16 +444,23 @@
282444 kinfo->tqp[i] = &hdev->htqp[i].q;
283445 }
284446
447
+ /* after init the max rss_size and tqps, adjust the default tqp numbers
448
+ * and rss size with the actual vector numbers
449
+ */
450
+ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
451
+ kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
452
+ kinfo->rss_size);
453
+
285454 return 0;
286455 }
287456
288457 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
289458 {
459
+ struct hclge_vf_to_pf_msg send_msg;
290460 int status;
291
- u8 resp_msg;
292461
293
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
294
- 0, false, &resp_msg, sizeof(u8));
462
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
463
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
295464 if (status)
296465 dev_err(&hdev->pdev->dev,
297466 "VF failed to fetch link status(%d) from PF", status);
....@@ -299,18 +468,42 @@
299468
300469 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
301470 {
471
+ struct hnae3_handle *rhandle = &hdev->roce;
302472 struct hnae3_handle *handle = &hdev->nic;
473
+ struct hnae3_client *rclient;
303474 struct hnae3_client *client;
304475
476
+ if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
477
+ return;
478
+
305479 client = handle->client;
480
+ rclient = hdev->roce_client;
306481
307482 link_state =
308483 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
309484
310485 if (link_state != hdev->hw.mac.link) {
311486 client->ops->link_status_change(handle, !!link_state);
487
+ if (rclient && rclient->ops->link_status_change)
488
+ rclient->ops->link_status_change(rhandle, !!link_state);
312489 hdev->hw.mac.link = link_state;
313490 }
491
+
492
+ clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
493
+}
494
+
495
+static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
496
+{
497
+#define HCLGEVF_ADVERTISING 0
498
+#define HCLGEVF_SUPPORTED 1
499
+
500
+ struct hclge_vf_to_pf_msg send_msg;
501
+
502
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
503
+ send_msg.data[0] = HCLGEVF_ADVERTISING;
504
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
505
+ send_msg.data[0] = HCLGEVF_SUPPORTED;
506
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
314507 }
315508
316509 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
....@@ -322,12 +515,6 @@
322515 nic->pdev = hdev->pdev;
323516 nic->numa_node_mask = hdev->numa_node_mask;
324517 nic->flags |= HNAE3_SUPPORT_VF;
325
-
326
- if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
327
- dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
328
- hdev->ae_dev->dev_type);
329
- return -EINVAL;
330
- }
331518
332519 ret = hclgevf_knic_setup(hdev);
333520 if (ret)
....@@ -357,6 +544,7 @@
357544 int alloc = 0;
358545 int i, j;
359546
547
+ vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
360548 vector_num = min(hdev->num_msi_left, vector_num);
361549
362550 for (j = 0; j < vector_num; j++) {
....@@ -391,6 +579,46 @@
391579 return i;
392580
393581 return -EINVAL;
582
+}
583
+
584
+static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
585
+ const u8 hfunc, const u8 *key)
586
+{
587
+ struct hclgevf_rss_config_cmd *req;
588
+ unsigned int key_offset = 0;
589
+ struct hclgevf_desc desc;
590
+ int key_counts;
591
+ int key_size;
592
+ int ret;
593
+
594
+ key_counts = HCLGEVF_RSS_KEY_SIZE;
595
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
596
+
597
+ while (key_counts) {
598
+ hclgevf_cmd_setup_basic_desc(&desc,
599
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
600
+ false);
601
+
602
+ req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
603
+ req->hash_config |=
604
+ (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
605
+
606
+ key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
607
+ memcpy(req->hash_key,
608
+ key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
609
+
610
+ key_counts -= key_size;
611
+ key_offset++;
612
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
613
+ if (ret) {
614
+ dev_err(&hdev->pdev->dev,
615
+ "Configure RSS config fail, status = %d\n",
616
+ ret);
617
+ return ret;
618
+ }
619
+ }
620
+
621
+ return 0;
394622 }
395623
396624 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
....@@ -442,8 +670,8 @@
442670 u16 tc_size[HCLGEVF_MAX_TC_NUM];
443671 struct hclgevf_desc desc;
444672 u16 roundup_size;
673
+ unsigned int i;
445674 int status;
446
- int i;
447675
448676 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
449677
....@@ -473,51 +701,39 @@
473701 return status;
474702 }
475703
476
-static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
477
- u8 *key)
704
+/* for revision 0x20, vf shared the same rss config with pf */
705
+static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
478706 {
479
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
480
- struct hclgevf_rss_config_cmd *req;
481
- int lkup_times = key ? 3 : 1;
482
- struct hclgevf_desc desc;
483
- int key_offset;
484
- int key_size;
485
- int status;
707
+#define HCLGEVF_RSS_MBX_RESP_LEN 8
708
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
709
+ u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
710
+ struct hclge_vf_to_pf_msg send_msg;
711
+ u16 msg_num, hash_key_index;
712
+ u8 index;
713
+ int ret;
486714
487
- req = (struct hclgevf_rss_config_cmd *)desc.data;
488
- lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
489
-
490
- for (key_offset = 0; key_offset < lkup_times; key_offset++) {
491
- hclgevf_cmd_setup_basic_desc(&desc,
492
- HCLGEVF_OPC_RSS_GENERIC_CONFIG,
493
- true);
494
- req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
495
-
496
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
497
- if (status) {
715
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
716
+ msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
717
+ HCLGEVF_RSS_MBX_RESP_LEN;
718
+ for (index = 0; index < msg_num; index++) {
719
+ send_msg.data[0] = index;
720
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
721
+ HCLGEVF_RSS_MBX_RESP_LEN);
722
+ if (ret) {
498723 dev_err(&hdev->pdev->dev,
499
- "failed to get hardware RSS cfg, status = %d\n",
500
- status);
501
- return status;
724
+ "VF get rss hash key from PF failed, ret=%d",
725
+ ret);
726
+ return ret;
502727 }
503728
504
- if (key_offset == 2)
505
- key_size =
506
- HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
729
+ hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
730
+ if (index == msg_num - 1)
731
+ memcpy(&rss_cfg->rss_hash_key[hash_key_index],
732
+ &resp_msg[0],
733
+ HCLGEVF_RSS_KEY_SIZE - hash_key_index);
507734 else
508
- key_size = HCLGEVF_RSS_HASH_KEY_NUM;
509
-
510
- if (key)
511
- memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
512
- req->hash_key,
513
- key_size);
514
- }
515
-
516
- if (hash) {
517
- if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
518
- *hash = ETH_RSS_HASH_TOP;
519
- else
520
- *hash = ETH_RSS_HASH_UNKNOWN;
735
+ memcpy(&rss_cfg->rss_hash_key[hash_key_index],
736
+ &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
521737 }
522738
523739 return 0;
....@@ -528,21 +744,98 @@
528744 {
529745 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
530746 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
531
- int i;
747
+ int i, ret;
748
+
749
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
750
+ /* Get hash algorithm */
751
+ if (hfunc) {
752
+ switch (rss_cfg->hash_algo) {
753
+ case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
754
+ *hfunc = ETH_RSS_HASH_TOP;
755
+ break;
756
+ case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
757
+ *hfunc = ETH_RSS_HASH_XOR;
758
+ break;
759
+ default:
760
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
761
+ break;
762
+ }
763
+ }
764
+
765
+ /* Get the RSS Key required by the user */
766
+ if (key)
767
+ memcpy(key, rss_cfg->rss_hash_key,
768
+ HCLGEVF_RSS_KEY_SIZE);
769
+ } else {
770
+ if (hfunc)
771
+ *hfunc = ETH_RSS_HASH_TOP;
772
+ if (key) {
773
+ ret = hclgevf_get_rss_hash_key(hdev);
774
+ if (ret)
775
+ return ret;
776
+ memcpy(key, rss_cfg->rss_hash_key,
777
+ HCLGEVF_RSS_KEY_SIZE);
778
+ }
779
+ }
532780
533781 if (indir)
534782 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
535783 indir[i] = rss_cfg->rss_indirection_tbl[i];
536784
537
- return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
785
+ return 0;
786
+}
787
+
788
+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
789
+ u8 *hash_algo)
790
+{
791
+ switch (hfunc) {
792
+ case ETH_RSS_HASH_TOP:
793
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
794
+ return 0;
795
+ case ETH_RSS_HASH_XOR:
796
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
797
+ return 0;
798
+ case ETH_RSS_HASH_NO_CHANGE:
799
+ *hash_algo = hdev->rss_cfg.hash_algo;
800
+ return 0;
801
+ default:
802
+ return -EINVAL;
803
+ }
538804 }
539805
540806 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
541
- const u8 *key, const u8 hfunc)
807
+ const u8 *key, const u8 hfunc)
542808 {
543809 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
544810 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
545
- int i;
811
+ u8 hash_algo;
812
+ int ret, i;
813
+
814
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
815
+ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
816
+ if (ret)
817
+ return ret;
818
+
819
+ /* Set the RSS Hash Key if specififed by the user */
820
+ if (key) {
821
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
822
+ if (ret) {
823
+ dev_err(&hdev->pdev->dev,
824
+ "invalid hfunc type %u\n", hfunc);
825
+ return ret;
826
+ }
827
+
828
+ /* Update the shadow RSS key with user specified qids */
829
+ memcpy(rss_cfg->rss_hash_key, key,
830
+ HCLGEVF_RSS_KEY_SIZE);
831
+ } else {
832
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
833
+ rss_cfg->rss_hash_key);
834
+ if (ret)
835
+ return ret;
836
+ }
837
+ rss_cfg->hash_algo = hash_algo;
838
+ }
546839
547840 /* update the shadow RSS table with user specified qids */
548841 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
....@@ -550,6 +843,193 @@
550843
551844 /* update the hardware */
552845 return hclgevf_set_rss_indir_table(hdev);
846
+}
847
+
848
+static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
849
+{
850
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
851
+
852
+ if (nfc->data & RXH_L4_B_2_3)
853
+ hash_sets |= HCLGEVF_D_PORT_BIT;
854
+ else
855
+ hash_sets &= ~HCLGEVF_D_PORT_BIT;
856
+
857
+ if (nfc->data & RXH_IP_SRC)
858
+ hash_sets |= HCLGEVF_S_IP_BIT;
859
+ else
860
+ hash_sets &= ~HCLGEVF_S_IP_BIT;
861
+
862
+ if (nfc->data & RXH_IP_DST)
863
+ hash_sets |= HCLGEVF_D_IP_BIT;
864
+ else
865
+ hash_sets &= ~HCLGEVF_D_IP_BIT;
866
+
867
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
868
+ hash_sets |= HCLGEVF_V_TAG_BIT;
869
+
870
+ return hash_sets;
871
+}
872
+
873
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
874
+ struct ethtool_rxnfc *nfc)
875
+{
876
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
877
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
878
+ struct hclgevf_rss_input_tuple_cmd *req;
879
+ struct hclgevf_desc desc;
880
+ u8 tuple_sets;
881
+ int ret;
882
+
883
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
884
+ return -EOPNOTSUPP;
885
+
886
+ if (nfc->data &
887
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
888
+ return -EINVAL;
889
+
890
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
891
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
892
+
893
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
894
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
895
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
896
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
897
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
898
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
899
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
900
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
901
+
902
+ tuple_sets = hclgevf_get_rss_hash_bits(nfc);
903
+ switch (nfc->flow_type) {
904
+ case TCP_V4_FLOW:
905
+ req->ipv4_tcp_en = tuple_sets;
906
+ break;
907
+ case TCP_V6_FLOW:
908
+ req->ipv6_tcp_en = tuple_sets;
909
+ break;
910
+ case UDP_V4_FLOW:
911
+ req->ipv4_udp_en = tuple_sets;
912
+ break;
913
+ case UDP_V6_FLOW:
914
+ req->ipv6_udp_en = tuple_sets;
915
+ break;
916
+ case SCTP_V4_FLOW:
917
+ req->ipv4_sctp_en = tuple_sets;
918
+ break;
919
+ case SCTP_V6_FLOW:
920
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
921
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
922
+ return -EINVAL;
923
+
924
+ req->ipv6_sctp_en = tuple_sets;
925
+ break;
926
+ case IPV4_FLOW:
927
+ req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
928
+ break;
929
+ case IPV6_FLOW:
930
+ req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
931
+ break;
932
+ default:
933
+ return -EINVAL;
934
+ }
935
+
936
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
937
+ if (ret) {
938
+ dev_err(&hdev->pdev->dev,
939
+ "Set rss tuple fail, status = %d\n", ret);
940
+ return ret;
941
+ }
942
+
943
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
944
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
945
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
946
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
947
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
948
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
949
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
950
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
951
+ return 0;
952
+}
953
+
954
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
955
+ struct ethtool_rxnfc *nfc)
956
+{
957
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
958
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
959
+ u8 tuple_sets;
960
+
961
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
962
+ return -EOPNOTSUPP;
963
+
964
+ nfc->data = 0;
965
+
966
+ switch (nfc->flow_type) {
967
+ case TCP_V4_FLOW:
968
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
969
+ break;
970
+ case UDP_V4_FLOW:
971
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
972
+ break;
973
+ case TCP_V6_FLOW:
974
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
975
+ break;
976
+ case UDP_V6_FLOW:
977
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
978
+ break;
979
+ case SCTP_V4_FLOW:
980
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
981
+ break;
982
+ case SCTP_V6_FLOW:
983
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
984
+ break;
985
+ case IPV4_FLOW:
986
+ case IPV6_FLOW:
987
+ tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
988
+ break;
989
+ default:
990
+ return -EINVAL;
991
+ }
992
+
993
+ if (!tuple_sets)
994
+ return 0;
995
+
996
+ if (tuple_sets & HCLGEVF_D_PORT_BIT)
997
+ nfc->data |= RXH_L4_B_2_3;
998
+ if (tuple_sets & HCLGEVF_S_PORT_BIT)
999
+ nfc->data |= RXH_L4_B_0_1;
1000
+ if (tuple_sets & HCLGEVF_D_IP_BIT)
1001
+ nfc->data |= RXH_IP_DST;
1002
+ if (tuple_sets & HCLGEVF_S_IP_BIT)
1003
+ nfc->data |= RXH_IP_SRC;
1004
+
1005
+ return 0;
1006
+}
1007
+
1008
+static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
1009
+ struct hclgevf_rss_cfg *rss_cfg)
1010
+{
1011
+ struct hclgevf_rss_input_tuple_cmd *req;
1012
+ struct hclgevf_desc desc;
1013
+ int ret;
1014
+
1015
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
1016
+
1017
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
1018
+
1019
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
1020
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
1021
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
1022
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
1023
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
1024
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
1025
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
1026
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
1027
+
1028
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1029
+ if (ret)
1030
+ dev_err(&hdev->pdev->dev,
1031
+ "Configure rss input fail, status = %d\n", ret);
1032
+ return ret;
5531033 }
5541034
5551035 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
....@@ -565,45 +1045,32 @@
5651045 struct hnae3_ring_chain_node *ring_chain)
5661046 {
5671047 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1048
+ struct hclge_vf_to_pf_msg send_msg;
5681049 struct hnae3_ring_chain_node *node;
569
- struct hclge_mbx_vf_to_pf_cmd *req;
570
- struct hclgevf_desc desc;
571
- int i = 0;
5721050 int status;
573
- u8 type;
1051
+ int i = 0;
5741052
575
- req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
1053
+ memset(&send_msg, 0, sizeof(send_msg));
1054
+ send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
1055
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR;
1056
+ send_msg.vector_id = vector_id;
5761057
5771058 for (node = ring_chain; node; node = node->next) {
578
- int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
579
- HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
580
-
581
- if (i == 0) {
582
- hclgevf_cmd_setup_basic_desc(&desc,
583
- HCLGEVF_OPC_MBX_VF_TO_PF,
584
- false);
585
- type = en ?
586
- HCLGE_MBX_MAP_RING_TO_VECTOR :
587
- HCLGE_MBX_UNMAP_RING_TO_VECTOR;
588
- req->msg[0] = type;
589
- req->msg[1] = vector_id;
590
- }
591
-
592
- req->msg[idx_offset] =
1059
+ send_msg.param[i].ring_type =
5931060 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
594
- req->msg[idx_offset + 1] = node->tqp_index;
595
- req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
596
- HNAE3_RING_GL_IDX_M,
597
- HNAE3_RING_GL_IDX_S);
1061
+
1062
+ send_msg.param[i].tqp_index = node->tqp_index;
1063
+ send_msg.param[i].int_gl_index =
1064
+ hnae3_get_field(node->int_gl_idx,
1065
+ HNAE3_RING_GL_IDX_M,
1066
+ HNAE3_RING_GL_IDX_S);
5981067
5991068 i++;
600
- if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
601
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
602
- HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
603
- !node->next) {
604
- req->msg[2] = i;
1069
+ if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
1070
+ send_msg.ring_num = i;
6051071
606
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1072
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
1073
+ NULL, 0);
6071074 if (status) {
6081075 dev_err(&hdev->pdev->dev,
6091076 "Map TQP fail, status is %d.\n",
....@@ -611,11 +1078,6 @@
6111078 return status;
6121079 }
6131080 i = 0;
614
- hclgevf_cmd_setup_basic_desc(&desc,
615
- HCLGEVF_OPC_MBX_VF_TO_PF,
616
- false);
617
- req->msg[0] = type;
618
- req->msg[1] = vector_id;
6191081 }
6201082 }
6211083
....@@ -645,6 +1107,9 @@
6451107 {
6461108 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
6471109 int ret, vector_id;
1110
+
1111
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1112
+ return 0;
6481113
6491114 vector_id = hclgevf_get_vector_index(hdev, vector);
6501115 if (vector_id < 0) {
....@@ -682,36 +1147,60 @@
6821147 }
6831148
6841149 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
685
- bool en_uc_pmc, bool en_mc_pmc)
1150
+ bool en_uc_pmc, bool en_mc_pmc,
1151
+ bool en_bc_pmc)
6861152 {
687
- struct hclge_mbx_vf_to_pf_cmd *req;
688
- struct hclgevf_desc desc;
689
- int status;
1153
+ struct hclge_vf_to_pf_msg send_msg;
1154
+ int ret;
6901155
691
- req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
1156
+ memset(&send_msg, 0, sizeof(send_msg));
1157
+ send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
1158
+ send_msg.en_bc = en_bc_pmc ? 1 : 0;
1159
+ send_msg.en_uc = en_uc_pmc ? 1 : 0;
1160
+ send_msg.en_mc = en_mc_pmc ? 1 : 0;
6921161
693
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
694
- req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
695
- req->msg[1] = en_uc_pmc ? 1 : 0;
696
- req->msg[2] = en_mc_pmc ? 1 : 0;
697
-
698
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
699
- if (status)
1162
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1163
+ if (ret)
7001164 dev_err(&hdev->pdev->dev,
701
- "Set promisc mode fail, status is %d.\n", status);
1165
+ "Set promisc mode fail, status is %d.\n", ret);
7021166
703
- return status;
1167
+ return ret;
7041168 }
7051169
706
-static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
707
- bool en_uc_pmc, bool en_mc_pmc)
1170
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
1171
+ bool en_mc_pmc)
1172
+{
1173
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1174
+ bool en_bc_pmc;
1175
+
1176
+ en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
1177
+
1178
+ return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
1179
+ en_bc_pmc);
1180
+}
1181
+
1182
+static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
7081183 {
7091184 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
7101185
711
- hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
1186
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
7121187 }
7131188
714
-static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
1189
+static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
1190
+{
1191
+ struct hnae3_handle *handle = &hdev->nic;
1192
+ bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
1193
+ bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
1194
+ int ret;
1195
+
1196
+ if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
1197
+ ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
1198
+ if (!ret)
1199
+ clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1200
+ }
1201
+}
1202
+
1203
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
7151204 int stream_id, bool enable)
7161205 {
7171206 struct hclgevf_cfg_com_tqp_queue_cmd *req;
....@@ -724,7 +1213,8 @@
7241213 false);
7251214 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
7261215 req->stream_id = cpu_to_le16(stream_id);
727
- req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
1216
+ if (enable)
1217
+ req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
7281218
7291219 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
7301220 if (status)
....@@ -734,152 +1224,51 @@
7341224 return status;
7351225 }
7361226
737
-static int hclgevf_get_queue_id(struct hnae3_queue *queue)
738
-{
739
- struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
740
-
741
- return tqp->index;
742
-}
743
-
7441227 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
7451228 {
746
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
747
- struct hnae3_queue *queue;
1229
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7481230 struct hclgevf_tqp *tqp;
7491231 int i;
7501232
751
- for (i = 0; i < hdev->num_tqps; i++) {
752
- queue = handle->kinfo.tqp[i];
753
- tqp = container_of(queue, struct hclgevf_tqp, q);
1233
+ for (i = 0; i < kinfo->num_tqps; i++) {
1234
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
7541235 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7551236 }
7561237 }
7571238
758
-static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
1239
+static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
7591240 {
760
- u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
761
- int ret;
1241
+ struct hclge_vf_to_pf_msg send_msg;
1242
+ u8 host_mac[ETH_ALEN];
1243
+ int status;
7621244
763
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
764
- HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
765
- NULL, 0, true, &resp_msg, sizeof(u8));
766
-
767
- if (ret) {
1245
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
1246
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
1247
+ ETH_ALEN);
1248
+ if (status) {
7681249 dev_err(&hdev->pdev->dev,
769
- "Read mta type fail, ret=%d.\n", ret);
770
- return ret;
1250
+ "fail to get VF MAC from host %d", status);
1251
+ return status;
7711252 }
7721253
773
- if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
774
- dev_err(&hdev->pdev->dev,
775
- "Read mta type invalid, resp=%d.\n", resp_msg);
776
- return -EINVAL;
777
- }
778
-
779
- hdev->mta_mac_sel_type = resp_msg;
1254
+ ether_addr_copy(p, host_mac);
7801255
7811256 return 0;
782
-}
783
-
784
-static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
785
- const u8 *addr)
786
-{
787
- u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
788
- u16 high_val = addr[1] | (addr[0] << 8);
789
-
790
- return (high_val >> rsh) & 0xfff;
791
-}
792
-
793
-static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
794
- unsigned long *status)
795
-{
796
-#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
797
-#define HCLGEVF_MTA_STATUS_MSG_BITS \
798
- (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
799
-#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
800
- (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
801
- u16 tbl_cnt;
802
- u16 tbl_idx;
803
- u8 msg_cnt;
804
- u8 msg_idx;
805
- int ret;
806
-
807
- msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
808
- HCLGEVF_MTA_STATUS_MSG_BITS);
809
- tbl_idx = 0;
810
- msg_idx = 0;
811
- while (msg_cnt--) {
812
- u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
813
- u8 *p = &msg[1];
814
- u8 msg_ofs;
815
- u8 msg_bit;
816
-
817
- memset(msg, 0, sizeof(msg));
818
-
819
- /* set index field */
820
- msg[0] = 0x7F & msg_idx;
821
-
822
- /* set end flag field */
823
- if (msg_cnt == 0) {
824
- msg[0] |= 0x80;
825
- tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
826
- } else {
827
- tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
828
- }
829
-
830
- /* set status field */
831
- msg_ofs = 0;
832
- msg_bit = 0;
833
- while (tbl_cnt--) {
834
- if (test_bit(tbl_idx, status))
835
- p[msg_ofs] |= BIT(msg_bit);
836
-
837
- tbl_idx++;
838
-
839
- msg_bit++;
840
- if (msg_bit == BITS_PER_BYTE) {
841
- msg_bit = 0;
842
- msg_ofs++;
843
- }
844
- }
845
-
846
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
847
- HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
848
- msg, sizeof(msg), false, NULL, 0);
849
- if (ret)
850
- break;
851
-
852
- msg_idx++;
853
- }
854
-
855
- return ret;
856
-}
857
-
858
-static int hclgevf_update_mta_status(struct hnae3_handle *handle)
859
-{
860
- unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
861
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
862
- struct net_device *netdev = hdev->nic.kinfo.netdev;
863
- struct netdev_hw_addr *ha;
864
- u16 tbl_idx;
865
-
866
- /* clear status */
867
- memset(mta_status, 0, sizeof(mta_status));
868
-
869
- /* update status from mc addr list */
870
- netdev_for_each_mc_addr(ha, netdev) {
871
- tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
872
- set_bit(tbl_idx, mta_status);
873
- }
874
-
875
- return hclgevf_do_update_mta_status(hdev, mta_status);
8761257 }
8771258
8781259 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8791260 {
8801261 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1262
+ u8 host_mac_addr[ETH_ALEN];
8811263
882
- ether_addr_copy(p, hdev->hw.mac.mac_addr);
1264
+ if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
1265
+ return;
1266
+
1267
+ hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
1268
+ if (hdev->has_pf_mac)
1269
+ ether_addr_copy(p, host_mac_addr);
1270
+ else
1271
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
8831272 }
8841273
8851274 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
....@@ -887,114 +1276,424 @@
8871276 {
8881277 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
8891278 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1279
+ struct hclge_vf_to_pf_msg send_msg;
8901280 u8 *new_mac_addr = (u8 *)p;
891
- u8 msg_data[ETH_ALEN * 2];
892
- u16 subcode;
8931281 int status;
8941282
895
- ether_addr_copy(msg_data, new_mac_addr);
896
- ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
897
-
898
- subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
899
- HCLGE_MBX_MAC_VLAN_UC_MODIFY;
900
-
901
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
902
- subcode, msg_data, ETH_ALEN * 2,
903
- true, NULL, 0);
1283
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
1284
+ send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1285
+ ether_addr_copy(send_msg.data, new_mac_addr);
1286
+ if (is_first && !hdev->has_pf_mac)
1287
+ eth_zero_addr(&send_msg.data[ETH_ALEN]);
1288
+ else
1289
+ ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
1290
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
9041291 if (!status)
9051292 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
9061293
9071294 return status;
9081295 }
9091296
1297
+static struct hclgevf_mac_addr_node *
1298
+hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
1299
+{
1300
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
1301
+
1302
+ list_for_each_entry_safe(mac_node, tmp, list, node)
1303
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
1304
+ return mac_node;
1305
+
1306
+ return NULL;
1307
+}
1308
+
1309
+static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
1310
+ enum HCLGEVF_MAC_NODE_STATE state)
1311
+{
1312
+ switch (state) {
1313
+ /* from set_rx_mode or tmp_add_list */
1314
+ case HCLGEVF_MAC_TO_ADD:
1315
+ if (mac_node->state == HCLGEVF_MAC_TO_DEL)
1316
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
1317
+ break;
1318
+ /* only from set_rx_mode */
1319
+ case HCLGEVF_MAC_TO_DEL:
1320
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1321
+ list_del(&mac_node->node);
1322
+ kfree(mac_node);
1323
+ } else {
1324
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
1325
+ }
1326
+ break;
1327
+ /* only from tmp_add_list, the mac_node->state won't be
1328
+ * HCLGEVF_MAC_ACTIVE
1329
+ */
1330
+ case HCLGEVF_MAC_ACTIVE:
1331
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1332
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
1333
+ break;
1334
+ }
1335
+}
1336
+
1337
+static int hclgevf_update_mac_list(struct hnae3_handle *handle,
1338
+ enum HCLGEVF_MAC_NODE_STATE state,
1339
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type,
1340
+ const unsigned char *addr)
1341
+{
1342
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1343
+ struct hclgevf_mac_addr_node *mac_node;
1344
+ struct list_head *list;
1345
+
1346
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1347
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1348
+
1349
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
1350
+
1351
+ /* if the mac addr is already in the mac list, no need to add a new
1352
+ * one into it, just check the mac addr state, convert it to a new
1353
+ * new state, or just remove it, or do nothing.
1354
+ */
1355
+ mac_node = hclgevf_find_mac_node(list, addr);
1356
+ if (mac_node) {
1357
+ hclgevf_update_mac_node(mac_node, state);
1358
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1359
+ return 0;
1360
+ }
1361
+ /* if this address is never added, unnecessary to delete */
1362
+ if (state == HCLGEVF_MAC_TO_DEL) {
1363
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1364
+ return -ENOENT;
1365
+ }
1366
+
1367
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
1368
+ if (!mac_node) {
1369
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1370
+ return -ENOMEM;
1371
+ }
1372
+
1373
+ mac_node->state = state;
1374
+ ether_addr_copy(mac_node->mac_addr, addr);
1375
+ list_add_tail(&mac_node->node, list);
1376
+
1377
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1378
+ return 0;
1379
+}
1380
+
9101381 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
9111382 const unsigned char *addr)
9121383 {
913
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
914
-
915
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
916
- HCLGE_MBX_MAC_VLAN_UC_ADD,
917
- addr, ETH_ALEN, false, NULL, 0);
1384
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1385
+ HCLGEVF_MAC_ADDR_UC, addr);
9181386 }
9191387
9201388 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
9211389 const unsigned char *addr)
9221390 {
923
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
924
-
925
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
926
- HCLGE_MBX_MAC_VLAN_UC_REMOVE,
927
- addr, ETH_ALEN, false, NULL, 0);
1391
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1392
+ HCLGEVF_MAC_ADDR_UC, addr);
9281393 }
9291394
9301395 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
9311396 const unsigned char *addr)
9321397 {
933
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
934
-
935
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
936
- HCLGE_MBX_MAC_VLAN_MC_ADD,
937
- addr, ETH_ALEN, false, NULL, 0);
1398
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1399
+ HCLGEVF_MAC_ADDR_MC, addr);
9381400 }
9391401
9401402 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
9411403 const unsigned char *addr)
9421404 {
943
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1405
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1406
+ HCLGEVF_MAC_ADDR_MC, addr);
1407
+}
9441408
945
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
946
- HCLGE_MBX_MAC_VLAN_MC_REMOVE,
947
- addr, ETH_ALEN, false, NULL, 0);
1409
+static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1410
+ struct hclgevf_mac_addr_node *mac_node,
1411
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1412
+{
1413
+ struct hclge_vf_to_pf_msg send_msg;
1414
+ u8 code, subcode;
1415
+
1416
+ if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1417
+ code = HCLGE_MBX_SET_UNICAST;
1418
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1419
+ subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1420
+ else
1421
+ subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1422
+ } else {
1423
+ code = HCLGE_MBX_SET_MULTICAST;
1424
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1425
+ subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1426
+ else
1427
+ subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1428
+ }
1429
+
1430
+ hclgevf_build_send_msg(&send_msg, code, subcode);
1431
+ ether_addr_copy(send_msg.data, mac_node->mac_addr);
1432
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1433
+}
1434
+
1435
+static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1436
+ struct list_head *list,
1437
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1438
+{
1439
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
1440
+ int ret;
1441
+
1442
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
1443
+ ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1444
+ if (ret) {
1445
+ dev_err(&hdev->pdev->dev,
1446
+ "failed to configure mac %pM, state = %d, ret = %d\n",
1447
+ mac_node->mac_addr, mac_node->state, ret);
1448
+ return;
1449
+ }
1450
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1451
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
1452
+ } else {
1453
+ list_del(&mac_node->node);
1454
+ kfree(mac_node);
1455
+ }
1456
+ }
1457
+}
1458
+
1459
+static void hclgevf_sync_from_add_list(struct list_head *add_list,
1460
+ struct list_head *mac_list)
1461
+{
1462
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1463
+
1464
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1465
+ /* if the mac address from tmp_add_list is not in the
1466
+ * uc/mc_mac_list, it means have received a TO_DEL request
1467
+ * during the time window of sending mac config request to PF
1468
+ * If mac_node state is ACTIVE, then change its state to TO_DEL,
1469
+ * then it will be removed at next time. If is TO_ADD, it means
1470
+ * send TO_ADD request failed, so just remove the mac node.
1471
+ */
1472
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1473
+ if (new_node) {
1474
+ hclgevf_update_mac_node(new_node, mac_node->state);
1475
+ list_del(&mac_node->node);
1476
+ kfree(mac_node);
1477
+ } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1478
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
1479
+ list_del(&mac_node->node);
1480
+ list_add_tail(&mac_node->node, mac_list);
1481
+ } else {
1482
+ list_del(&mac_node->node);
1483
+ kfree(mac_node);
1484
+ }
1485
+ }
1486
+}
1487
+
1488
+static void hclgevf_sync_from_del_list(struct list_head *del_list,
1489
+ struct list_head *mac_list)
1490
+{
1491
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1492
+
1493
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1494
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1495
+ if (new_node) {
1496
+ /* If the mac addr is exist in the mac list, it means
1497
+ * received a new request TO_ADD during the time window
1498
+ * of sending mac addr configurrequest to PF, so just
1499
+ * change the mac state to ACTIVE.
1500
+ */
1501
+ new_node->state = HCLGEVF_MAC_ACTIVE;
1502
+ list_del(&mac_node->node);
1503
+ kfree(mac_node);
1504
+ } else {
1505
+ list_del(&mac_node->node);
1506
+ list_add_tail(&mac_node->node, mac_list);
1507
+ }
1508
+ }
1509
+}
1510
+
1511
+static void hclgevf_clear_list(struct list_head *list)
1512
+{
1513
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
1514
+
1515
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
1516
+ list_del(&mac_node->node);
1517
+ kfree(mac_node);
1518
+ }
1519
+}
1520
+
1521
+static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1522
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1523
+{
1524
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1525
+ struct list_head tmp_add_list, tmp_del_list;
1526
+ struct list_head *list;
1527
+
1528
+ INIT_LIST_HEAD(&tmp_add_list);
1529
+ INIT_LIST_HEAD(&tmp_del_list);
1530
+
1531
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
1532
+ * we can add/delete these mac addr outside the spin lock
1533
+ */
1534
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1535
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1536
+
1537
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
1538
+
1539
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
1540
+ switch (mac_node->state) {
1541
+ case HCLGEVF_MAC_TO_DEL:
1542
+ list_del(&mac_node->node);
1543
+ list_add_tail(&mac_node->node, &tmp_del_list);
1544
+ break;
1545
+ case HCLGEVF_MAC_TO_ADD:
1546
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1547
+ if (!new_node)
1548
+ goto stop_traverse;
1549
+
1550
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1551
+ new_node->state = mac_node->state;
1552
+ list_add_tail(&new_node->node, &tmp_add_list);
1553
+ break;
1554
+ default:
1555
+ break;
1556
+ }
1557
+ }
1558
+
1559
+stop_traverse:
1560
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1561
+
1562
+ /* delete first, in order to get max mac table space for adding */
1563
+ hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1564
+ hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1565
+
1566
+ /* if some mac addresses were added/deleted fail, move back to the
1567
+ * mac_list, and retry at next time.
1568
+ */
1569
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
1570
+
1571
+ hclgevf_sync_from_del_list(&tmp_del_list, list);
1572
+ hclgevf_sync_from_add_list(&tmp_add_list, list);
1573
+
1574
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1575
+}
1576
+
1577
+static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1578
+{
1579
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1580
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1581
+}
1582
+
1583
+static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1584
+{
1585
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
1586
+
1587
+ hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1588
+ hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1589
+
1590
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
9481591 }
9491592
9501593 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
9511594 __be16 proto, u16 vlan_id,
9521595 bool is_kill)
9531596 {
954
-#define HCLGEVF_VLAN_MBX_MSG_LEN 5
955
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
956
- u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
1597
+#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
1598
+#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
1599
+#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
9571600
958
- if (vlan_id > 4095)
1601
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1602
+ struct hclge_vf_to_pf_msg send_msg;
1603
+ int ret;
1604
+
1605
+ if (vlan_id > HCLGEVF_MAX_VLAN_ID)
9591606 return -EINVAL;
9601607
9611608 if (proto != htons(ETH_P_8021Q))
9621609 return -EPROTONOSUPPORT;
9631610
964
- msg_data[0] = is_kill;
965
- memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
966
- memcpy(&msg_data[3], &proto, sizeof(proto));
967
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
968
- HCLGE_MBX_VLAN_FILTER, msg_data,
969
- HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
1611
+ /* When device is resetting or reset failed, firmware is unable to
1612
+ * handle mailbox. Just record the vlan id, and remove it after
1613
+ * reset finished.
1614
+ */
1615
+ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1616
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1617
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1618
+ return -EBUSY;
1619
+ }
1620
+
1621
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1622
+ HCLGE_MBX_VLAN_FILTER);
1623
+ send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
1624
+ memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
1625
+ sizeof(vlan_id));
1626
+ memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
1627
+ sizeof(proto));
1628
+ /* when remove hw vlan filter failed, record the vlan id,
1629
+ * and try to remove it from hw later, to be consistence
1630
+ * with stack.
1631
+ */
1632
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1633
+ if (is_kill && ret)
1634
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1635
+
1636
+ return ret;
1637
+}
1638
+
1639
+static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1640
+{
1641
+#define HCLGEVF_MAX_SYNC_COUNT 60
1642
+ struct hnae3_handle *handle = &hdev->nic;
1643
+ int ret, sync_cnt = 0;
1644
+ u16 vlan_id;
1645
+
1646
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1647
+ while (vlan_id != VLAN_N_VID) {
1648
+ ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1649
+ vlan_id, true);
1650
+ if (ret)
1651
+ return;
1652
+
1653
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1654
+ sync_cnt++;
1655
+ if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1656
+ return;
1657
+
1658
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1659
+ }
9701660 }
9711661
9721662 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9731663 {
9741664 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
975
- u8 msg_data;
1665
+ struct hclge_vf_to_pf_msg send_msg;
9761666
977
- msg_data = enable ? 1 : 0;
978
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
979
- HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
980
- 1, false, NULL, 0);
1667
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1668
+ HCLGE_MBX_VLAN_RX_OFF_CFG);
1669
+ send_msg.data[0] = enable ? 1 : 0;
1670
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
9811671 }
9821672
983
-static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
1673
+static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9841674 {
9851675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
986
- u8 msg_data[2];
1676
+ struct hclge_vf_to_pf_msg send_msg;
9871677 int ret;
988
-
989
- memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
9901678
9911679 /* disable vf queue before send queue reset msg to PF */
9921680 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
9931681 if (ret)
994
- return;
1682
+ return ret;
9951683
996
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
997
- 2, true, NULL, 0);
1684
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1685
+ memcpy(send_msg.data, &queue_id, sizeof(queue_id));
1686
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1687
+}
1688
+
1689
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1690
+{
1691
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1692
+ struct hclge_vf_to_pf_msg send_msg;
1693
+
1694
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1695
+ memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
1696
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
9981697 }
9991698
10001699 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
....@@ -1002,42 +1701,97 @@
10021701 {
10031702 struct hnae3_client *client = hdev->nic_client;
10041703 struct hnae3_handle *handle = &hdev->nic;
1704
+ int ret;
1705
+
1706
+ if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1707
+ !client)
1708
+ return 0;
10051709
10061710 if (!client->ops->reset_notify)
10071711 return -EOPNOTSUPP;
10081712
1009
- return client->ops->reset_notify(handle, type);
1713
+ ret = client->ops->reset_notify(handle, type);
1714
+ if (ret)
1715
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1716
+ type, ret);
1717
+
1718
+ return ret;
1719
+}
1720
+
1721
+static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1722
+ enum hnae3_reset_notify_type type)
1723
+{
1724
+ struct hnae3_client *client = hdev->roce_client;
1725
+ struct hnae3_handle *handle = &hdev->roce;
1726
+ int ret;
1727
+
1728
+ if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1729
+ return 0;
1730
+
1731
+ if (!client->ops->reset_notify)
1732
+ return -EOPNOTSUPP;
1733
+
1734
+ ret = client->ops->reset_notify(handle, type);
1735
+ if (ret)
1736
+ dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1737
+ type, ret);
1738
+ return ret;
10101739 }
10111740
10121741 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
10131742 {
1014
-#define HCLGEVF_RESET_WAIT_MS 500
1015
-#define HCLGEVF_RESET_WAIT_CNT 20
1016
- u32 val, cnt = 0;
1743
+#define HCLGEVF_RESET_WAIT_US 20000
1744
+#define HCLGEVF_RESET_WAIT_CNT 2000
1745
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1746
+ (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
10171747
1018
- /* wait to check the hardware reset completion status */
1019
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1020
- while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
1021
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
1022
- msleep(HCLGEVF_RESET_WAIT_MS);
1023
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1024
- cnt++;
1025
- }
1748
+ u32 val;
1749
+ int ret;
1750
+
1751
+ if (hdev->reset_type == HNAE3_VF_RESET)
1752
+ ret = readl_poll_timeout(hdev->hw.io_base +
1753
+ HCLGEVF_VF_RST_ING, val,
1754
+ !(val & HCLGEVF_VF_RST_ING_BIT),
1755
+ HCLGEVF_RESET_WAIT_US,
1756
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
1757
+ else
1758
+ ret = readl_poll_timeout(hdev->hw.io_base +
1759
+ HCLGEVF_RST_ING, val,
1760
+ !(val & HCLGEVF_RST_ING_BITS),
1761
+ HCLGEVF_RESET_WAIT_US,
1762
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
10261763
10271764 /* hardware completion status should be available by this time */
1028
- if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
1029
- dev_warn(&hdev->pdev->dev,
1030
- "could'nt get reset done status from h/w, timeout!\n");
1031
- return -EBUSY;
1765
+ if (ret) {
1766
+ dev_err(&hdev->pdev->dev,
1767
+ "couldn't get reset done status from h/w, timeout!\n");
1768
+ return ret;
10321769 }
10331770
10341771 /* we will wait a bit more to let reset of the stack to complete. This
10351772 * might happen in case reset assertion was made by PF. Yes, this also
10361773 * means we might end up waiting bit more even for VF reset.
10371774 */
1038
- msleep(5000);
1775
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET)
1776
+ msleep(5000);
1777
+ else
1778
+ msleep(500);
10391779
10401780 return 0;
1781
+}
1782
+
1783
+static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1784
+{
1785
+ u32 reg_val;
1786
+
1787
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
1788
+ if (enable)
1789
+ reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1790
+ else
1791
+ reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1792
+
1793
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
1794
+ reg_val);
10411795 }
10421796
10431797 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
....@@ -1045,10 +1799,12 @@
10451799 int ret;
10461800
10471801 /* uninitialize the nic client */
1048
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1802
+ ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1803
+ if (ret)
1804
+ return ret;
10491805
10501806 /* re-initialize the hclge device */
1051
- ret = hclgevf_init_hdev(hdev);
1807
+ ret = hclgevf_reset_hdev(hdev);
10521808 if (ret) {
10531809 dev_err(&hdev->pdev->dev,
10541810 "hclge device re-init failed, VF is disabled!\n");
....@@ -1056,82 +1812,289 @@
10561812 }
10571813
10581814 /* bring up the nic client again */
1059
- hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1815
+ ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1816
+ if (ret)
1817
+ return ret;
1818
+
1819
+ /* clear handshake status with IMP */
1820
+ hclgevf_reset_handshake(hdev, false);
1821
+
1822
+ /* bring up the nic to enable TX/RX again */
1823
+ return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1824
+}
1825
+
1826
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1827
+{
1828
+#define HCLGEVF_RESET_SYNC_TIME 100
1829
+
1830
+ if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1831
+ struct hclge_vf_to_pf_msg send_msg;
1832
+ int ret;
1833
+
1834
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1835
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1836
+ if (ret) {
1837
+ dev_err(&hdev->pdev->dev,
1838
+ "failed to assert VF reset, ret = %d\n", ret);
1839
+ return ret;
1840
+ }
1841
+ hdev->rst_stats.vf_func_rst_cnt++;
1842
+ }
1843
+
1844
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1845
+ /* inform hardware that preparatory work is done */
1846
+ msleep(HCLGEVF_RESET_SYNC_TIME);
1847
+ hclgevf_reset_handshake(hdev, true);
1848
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1849
+ hdev->reset_type);
10601850
10611851 return 0;
10621852 }
10631853
1064
-static int hclgevf_reset(struct hclgevf_dev *hdev)
1854
+static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1855
+{
1856
+ dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1857
+ hdev->rst_stats.vf_func_rst_cnt);
1858
+ dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1859
+ hdev->rst_stats.flr_rst_cnt);
1860
+ dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1861
+ hdev->rst_stats.vf_rst_cnt);
1862
+ dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1863
+ hdev->rst_stats.rst_done_cnt);
1864
+ dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1865
+ hdev->rst_stats.hw_rst_done_cnt);
1866
+ dev_info(&hdev->pdev->dev, "reset count: %u\n",
1867
+ hdev->rst_stats.rst_cnt);
1868
+ dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1869
+ hdev->rst_stats.rst_fail_cnt);
1870
+ dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1871
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1872
+ dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1873
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
1874
+ dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1875
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
1876
+ dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1877
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1878
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1879
+}
1880
+
1881
+static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1882
+{
1883
+ /* recover handshake status with IMP when reset fail */
1884
+ hclgevf_reset_handshake(hdev, true);
1885
+ hdev->rst_stats.rst_fail_cnt++;
1886
+ dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1887
+ hdev->rst_stats.rst_fail_cnt);
1888
+
1889
+ if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1890
+ set_bit(hdev->reset_type, &hdev->reset_pending);
1891
+
1892
+ if (hclgevf_is_reset_pending(hdev)) {
1893
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1894
+ hclgevf_reset_task_schedule(hdev);
1895
+ } else {
1896
+ set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1897
+ hclgevf_dump_rst_info(hdev);
1898
+ }
1899
+}
1900
+
1901
+static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
10651902 {
10661903 int ret;
10671904
1905
+ hdev->rst_stats.rst_cnt++;
1906
+
1907
+ /* perform reset of the stack & ae device for a client */
1908
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
1909
+ if (ret)
1910
+ return ret;
1911
+
10681912 rtnl_lock();
1069
-
10701913 /* bring down the nic to stop any ongoing TX/RX */
1071
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1072
-
1914
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
10731915 rtnl_unlock();
1916
+ if (ret)
1917
+ return ret;
1918
+
1919
+ return hclgevf_reset_prepare_wait(hdev);
1920
+}
1921
+
1922
+static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
1923
+{
1924
+ int ret;
1925
+
1926
+ hdev->rst_stats.hw_rst_done_cnt++;
1927
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
1928
+ if (ret)
1929
+ return ret;
1930
+
1931
+ rtnl_lock();
1932
+ /* now, re-initialize the nic client and ae device */
1933
+ ret = hclgevf_reset_stack(hdev);
1934
+ rtnl_unlock();
1935
+ if (ret) {
1936
+ dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1937
+ return ret;
1938
+ }
1939
+
1940
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
1941
+ /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1942
+ * times
1943
+ */
1944
+ if (ret &&
1945
+ hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
1946
+ return ret;
1947
+
1948
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
1949
+ if (ret)
1950
+ return ret;
1951
+
1952
+ hdev->last_reset_time = jiffies;
1953
+ hdev->rst_stats.rst_done_cnt++;
1954
+ hdev->rst_stats.rst_fail_cnt = 0;
1955
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1956
+
1957
+ return 0;
1958
+}
1959
+
1960
+static void hclgevf_reset(struct hclgevf_dev *hdev)
1961
+{
1962
+ if (hclgevf_reset_prepare(hdev))
1963
+ goto err_reset;
10741964
10751965 /* check if VF could successfully fetch the hardware reset completion
10761966 * status from the hardware
10771967 */
1078
- ret = hclgevf_reset_wait(hdev);
1079
- if (ret) {
1968
+ if (hclgevf_reset_wait(hdev)) {
10801969 /* can't do much in this situation, will disable VF */
10811970 dev_err(&hdev->pdev->dev,
1082
- "VF failed(=%d) to fetch H/W reset completion status\n",
1083
- ret);
1084
-
1085
- dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
1086
- rtnl_lock();
1087
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1088
-
1089
- rtnl_unlock();
1090
- return ret;
1971
+ "failed to fetch H/W reset completion status\n");
1972
+ goto err_reset;
10911973 }
10921974
1093
- rtnl_lock();
1975
+ if (hclgevf_reset_rebuild(hdev))
1976
+ goto err_reset;
10941977
1095
- /* now, re-initialize the nic client and ae device*/
1096
- ret = hclgevf_reset_stack(hdev);
1097
- if (ret)
1098
- dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1978
+ return;
10991979
1100
- /* bring up the nic to enable TX/RX again */
1101
- hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1102
-
1103
- rtnl_unlock();
1104
-
1105
- return ret;
1980
+err_reset:
1981
+ hclgevf_reset_err_handle(hdev);
11061982 }
11071983
1108
-static int hclgevf_do_reset(struct hclgevf_dev *hdev)
1984
+static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1985
+ unsigned long *addr)
11091986 {
1110
- int status;
1111
- u8 respmsg;
1987
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
11121988
1113
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1114
- 0, false, &respmsg, sizeof(u8));
1115
- if (status)
1116
- dev_err(&hdev->pdev->dev,
1117
- "VF reset request to PF failed(=%d)\n", status);
1989
+ /* return the highest priority reset level amongst all */
1990
+ if (test_bit(HNAE3_VF_RESET, addr)) {
1991
+ rst_level = HNAE3_VF_RESET;
1992
+ clear_bit(HNAE3_VF_RESET, addr);
1993
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1994
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
1995
+ } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1996
+ rst_level = HNAE3_VF_FULL_RESET;
1997
+ clear_bit(HNAE3_VF_FULL_RESET, addr);
1998
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
1999
+ } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
2000
+ rst_level = HNAE3_VF_PF_FUNC_RESET;
2001
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
2002
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
2003
+ } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
2004
+ rst_level = HNAE3_VF_FUNC_RESET;
2005
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
2006
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2007
+ rst_level = HNAE3_FLR_RESET;
2008
+ clear_bit(HNAE3_FLR_RESET, addr);
2009
+ }
11182010
1119
- return status;
2011
+ return rst_level;
11202012 }
11212013
1122
-static void hclgevf_reset_event(struct hnae3_handle *handle)
2014
+static void hclgevf_reset_event(struct pci_dev *pdev,
2015
+ struct hnae3_handle *handle)
11232016 {
1124
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2017
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2018
+ struct hclgevf_dev *hdev = ae_dev->priv;
11252019
11262020 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
11272021
1128
- handle->reset_level = HNAE3_VF_RESET;
2022
+ if (hdev->default_reset_request)
2023
+ hdev->reset_level =
2024
+ hclgevf_get_reset_level(hdev,
2025
+ &hdev->default_reset_request);
2026
+ else
2027
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
11292028
11302029 /* reset of this VF requested */
11312030 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
11322031 hclgevf_reset_task_schedule(hdev);
11332032
1134
- handle->last_reset_time = jiffies;
2033
+ hdev->last_reset_time = jiffies;
2034
+}
2035
+
2036
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2037
+ enum hnae3_reset_type rst_type)
2038
+{
2039
+ struct hclgevf_dev *hdev = ae_dev->priv;
2040
+
2041
+ set_bit(rst_type, &hdev->default_reset_request);
2042
+}
2043
+
2044
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
2045
+{
2046
+ writel(en ? 1 : 0, vector->addr);
2047
+}
2048
+
2049
+static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
2050
+{
2051
+#define HCLGEVF_FLR_RETRY_WAIT_MS 500
2052
+#define HCLGEVF_FLR_RETRY_CNT 5
2053
+
2054
+ struct hclgevf_dev *hdev = ae_dev->priv;
2055
+ int retry_cnt = 0;
2056
+ int ret;
2057
+
2058
+retry:
2059
+ down(&hdev->reset_sem);
2060
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2061
+ hdev->reset_type = HNAE3_FLR_RESET;
2062
+ ret = hclgevf_reset_prepare(hdev);
2063
+ if (ret) {
2064
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
2065
+ ret);
2066
+ if (hdev->reset_pending ||
2067
+ retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) {
2068
+ dev_err(&hdev->pdev->dev,
2069
+ "reset_pending:0x%lx, retry_cnt:%d\n",
2070
+ hdev->reset_pending, retry_cnt);
2071
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2072
+ up(&hdev->reset_sem);
2073
+ msleep(HCLGEVF_FLR_RETRY_WAIT_MS);
2074
+ goto retry;
2075
+ }
2076
+ }
2077
+
2078
+ /* disable misc vector before FLR done */
2079
+ hclgevf_enable_vector(&hdev->misc_vector, false);
2080
+ hdev->rst_stats.flr_rst_cnt++;
2081
+}
2082
+
2083
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
2084
+{
2085
+ struct hclgevf_dev *hdev = ae_dev->priv;
2086
+ int ret;
2087
+
2088
+ hclgevf_enable_vector(&hdev->misc_vector, true);
2089
+
2090
+ ret = hclgevf_reset_rebuild(hdev);
2091
+ if (ret)
2092
+ dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
2093
+ ret);
2094
+
2095
+ hdev->reset_type = HNAE3_NONE_RESET;
2096
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2097
+ up(&hdev->reset_sem);
11352098 }
11362099
11372100 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
....@@ -1158,75 +2121,56 @@
11582121
11592122 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
11602123 {
1161
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1162
- !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1163
- set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1164
- schedule_work(&hdev->rst_service_task);
1165
- }
2124
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2125
+ !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
2126
+ &hdev->state))
2127
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
11662128 }
11672129
11682130 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
11692131 {
1170
- if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1171
- !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1172
- set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1173
- schedule_work(&hdev->mbx_service_task);
1174
- }
2132
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2133
+ !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
2134
+ &hdev->state))
2135
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
11752136 }
11762137
1177
-static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
2138
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
2139
+ unsigned long delay)
11782140 {
1179
- if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
1180
- !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1181
- schedule_work(&hdev->service_task);
2141
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2142
+ !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2143
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
11822144 }
11832145
1184
-static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
2146
+static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
11852147 {
1186
- /* if we have any pending mailbox event then schedule the mbx task */
1187
- if (hdev->mbx_event_pending)
1188
- hclgevf_mbx_task_schedule(hdev);
2148
+#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
11892149
1190
- if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1191
- hclgevf_reset_task_schedule(hdev);
1192
-}
1193
-
1194
-static void hclgevf_service_timer(struct timer_list *t)
1195
-{
1196
- struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1197
-
1198
- mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1199
-
1200
- hclgevf_task_schedule(hdev);
1201
-}
1202
-
1203
-static void hclgevf_reset_service_task(struct work_struct *work)
1204
-{
1205
- struct hclgevf_dev *hdev =
1206
- container_of(work, struct hclgevf_dev, rst_service_task);
1207
- int ret;
1208
-
1209
- if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2150
+ if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
12102151 return;
12112152
1212
- clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
2153
+ down(&hdev->reset_sem);
2154
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
12132155
12142156 if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
12152157 &hdev->reset_state)) {
12162158 /* PF has initmated that it is about to reset the hardware.
1217
- * We now have to poll & check if harware has actually completed
1218
- * the reset sequence. On hardware reset completion, VF needs to
1219
- * reset the client and ae device.
2159
+ * We now have to poll & check if hardware has actually
2160
+ * completed the reset sequence. On hardware reset completion,
2161
+ * VF needs to reset the client and ae device.
12202162 */
12212163 hdev->reset_attempts = 0;
12222164
1223
- ret = hclgevf_reset(hdev);
1224
- if (ret)
1225
- dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
2165
+ hdev->last_reset_time = jiffies;
2166
+ hdev->reset_type =
2167
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending);
2168
+ if (hdev->reset_type != HNAE3_NONE_RESET)
2169
+ hclgevf_reset(hdev);
12262170 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
12272171 &hdev->reset_state)) {
12282172 /* we could be here when either of below happens:
1229
- * 1. reset was initiated due to watchdog timeout due to
2173
+ * 1. reset was initiated due to watchdog timeout caused by
12302174 * a. IMP was earlier reset and our TX got choked down and
12312175 * which resulted in watchdog reacting and inducing VF
12322176 * reset. This also means our cmdq would be unreliable.
....@@ -1240,65 +2184,125 @@
12402184 * 1b and 2. cases but we will not get any intimation about 1a
12412185 * from PF as cmdq would be in unreliable state i.e. mailbox
12422186 * communication between PF and VF would be broken.
1243
- */
1244
-
1245
- /* if we are never geting into pending state it means either:
2187
+ *
2188
+ * if we are never geting into pending state it means either:
12462189 * 1. PF is not receiving our request which could be due to IMP
12472190 * reset
12482191 * 2. PF is screwed
12492192 * We cannot do much for 2. but to check first we can try reset
12502193 * our PCIe + stack and see if it alleviates the problem.
12512194 */
1252
- if (hdev->reset_attempts > 3) {
2195
+ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
12532196 /* prepare for full reset of stack + pcie interface */
1254
- hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
2197
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
12552198
12562199 /* "defer" schedule the reset task again */
12572200 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
12582201 } else {
12592202 hdev->reset_attempts++;
12602203
1261
- /* request PF for resetting this VF via mailbox */
1262
- ret = hclgevf_do_reset(hdev);
1263
- if (ret)
1264
- dev_warn(&hdev->pdev->dev,
1265
- "VF rst fail, stack will call\n");
2204
+ set_bit(hdev->reset_level, &hdev->reset_pending);
2205
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
12662206 }
2207
+ hclgevf_reset_task_schedule(hdev);
12672208 }
12682209
2210
+ hdev->reset_type = HNAE3_NONE_RESET;
12692211 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2212
+ up(&hdev->reset_sem);
12702213 }
12712214
1272
-static void hclgevf_mailbox_service_task(struct work_struct *work)
2215
+static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
12732216 {
1274
- struct hclgevf_dev *hdev;
1275
-
1276
- hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
2217
+ if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
2218
+ return;
12772219
12782220 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
12792221 return;
1280
-
1281
- clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
12822222
12832223 hclgevf_mbx_async_handler(hdev);
12842224
12852225 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
12862226 }
12872227
1288
-static void hclgevf_service_task(struct work_struct *work)
2228
+static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
12892229 {
1290
- struct hclgevf_dev *hdev;
2230
+ struct hclge_vf_to_pf_msg send_msg;
2231
+ int ret;
12912232
1292
- hdev = container_of(work, struct hclgevf_dev, service_task);
2233
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
2234
+ return;
2235
+
2236
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
2237
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2238
+ if (ret)
2239
+ dev_err(&hdev->pdev->dev,
2240
+ "VF sends keep alive cmd failed(=%d)\n", ret);
2241
+}
2242
+
2243
+static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
2244
+{
2245
+ unsigned long delta = round_jiffies_relative(HZ);
2246
+ struct hnae3_handle *handle = &hdev->nic;
2247
+
2248
+ if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2249
+ return;
2250
+
2251
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
2252
+ delta = jiffies - hdev->last_serv_processed;
2253
+
2254
+ if (delta < round_jiffies_relative(HZ)) {
2255
+ delta = round_jiffies_relative(HZ) - delta;
2256
+ goto out;
2257
+ }
2258
+ }
2259
+
2260
+ hdev->serv_processed_cnt++;
2261
+ if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
2262
+ hclgevf_keep_alive(hdev);
2263
+
2264
+ if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
2265
+ hdev->last_serv_processed = jiffies;
2266
+ goto out;
2267
+ }
2268
+
2269
+ if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
2270
+ hclgevf_tqps_update_stats(handle);
12932271
12942272 /* request the link status from the PF. PF would be able to tell VF
12952273 * about such updates in future so we might remove this later
12962274 */
12972275 hclgevf_request_link_info(hdev);
12982276
1299
- hclgevf_deferred_task_schedule(hdev);
2277
+ hclgevf_update_link_mode(hdev);
13002278
1301
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
2279
+ hclgevf_sync_vlan_filter(hdev);
2280
+
2281
+ hclgevf_sync_mac_table(hdev);
2282
+
2283
+ hclgevf_sync_promisc_mode(hdev);
2284
+
2285
+ hdev->last_serv_processed = jiffies;
2286
+
2287
+out:
2288
+ hclgevf_task_schedule(hdev, delta);
2289
+}
2290
+
2291
+static void hclgevf_service_task(struct work_struct *work)
2292
+{
2293
+ struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
2294
+ service_task.work);
2295
+
2296
+ hclgevf_reset_service_task(hdev);
2297
+ hclgevf_mailbox_service_task(hdev);
2298
+ hclgevf_periodic_service_task(hdev);
2299
+
2300
+ /* Handle reset and mbx again in case periodical task delays the
2301
+ * handling by calling hclgevf_task_schedule() in
2302
+ * hclgevf_periodic_service_task()
2303
+ */
2304
+ hclgevf_reset_service_task(hdev);
2305
+ hclgevf_mailbox_service_task(hdev);
13022306 }
13032307
13042308 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
....@@ -1306,45 +2310,81 @@
13062310 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
13072311 }
13082312
1309
-static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
2313
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
2314
+ u32 *clearval)
13102315 {
1311
- u32 cmdq_src_reg;
2316
+ u32 val, cmdq_stat_reg, rst_ing_reg;
13122317
13132318 /* fetch the events from their corresponding regs */
1314
- cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1315
- HCLGEVF_VECTOR0_CMDQ_SRC_REG);
2319
+ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
2320
+ HCLGEVF_VECTOR0_CMDQ_STATE_REG);
13162321
1317
- /* check for vector0 mailbox(=CMDQ RX) event source */
1318
- if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1319
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1320
- *clearval = cmdq_src_reg;
1321
- return true;
2322
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
2323
+ rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2324
+ dev_info(&hdev->pdev->dev,
2325
+ "receive reset interrupt 0x%x!\n", rst_ing_reg);
2326
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
2327
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2328
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
2329
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
2330
+ hdev->rst_stats.vf_rst_cnt++;
2331
+ /* set up VF hardware reset status, its PF will clear
2332
+ * this status when PF has initialized done.
2333
+ */
2334
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
2335
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
2336
+ val | HCLGEVF_VF_RST_ING_BIT);
2337
+ return HCLGEVF_VECTOR0_EVENT_RST;
13222338 }
13232339
1324
- dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
2340
+ /* check for vector0 mailbox(=CMDQ RX) event source */
2341
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
2342
+ /* for revision 0x21, clearing interrupt is writing bit 0
2343
+ * to the clear register, writing bit 1 means to keep the
2344
+ * old value.
2345
+ * for revision 0x20, the clear register is a read & write
2346
+ * register, so we should just write 0 to the bit we are
2347
+ * handling, and keep other bits as cmdq_stat_reg.
2348
+ */
2349
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2350
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2351
+ else
2352
+ *clearval = cmdq_stat_reg &
2353
+ ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
13252354
1326
- return false;
1327
-}
2355
+ return HCLGEVF_VECTOR0_EVENT_MBX;
2356
+ }
13282357
1329
-static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1330
-{
1331
- writel(en ? 1 : 0, vector->addr);
2358
+ /* print other vector0 event source */
2359
+ dev_info(&hdev->pdev->dev,
2360
+ "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2361
+ cmdq_stat_reg);
2362
+
2363
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
13322364 }
13332365
13342366 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
13352367 {
2368
+ enum hclgevf_evt_cause event_cause;
13362369 struct hclgevf_dev *hdev = data;
13372370 u32 clearval;
13382371
13392372 hclgevf_enable_vector(&hdev->misc_vector, false);
1340
- if (!hclgevf_check_event_cause(hdev, &clearval))
1341
- goto skip_sched;
2373
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2374
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2375
+ hclgevf_clear_event_cause(hdev, clearval);
13422376
1343
- hclgevf_mbx_handler(hdev);
2377
+ switch (event_cause) {
2378
+ case HCLGEVF_VECTOR0_EVENT_RST:
2379
+ hclgevf_reset_task_schedule(hdev);
2380
+ break;
2381
+ case HCLGEVF_VECTOR0_EVENT_MBX:
2382
+ hclgevf_mbx_handler(hdev);
2383
+ break;
2384
+ default:
2385
+ break;
2386
+ }
13442387
1345
- hclgevf_clear_event_cause(hdev, clearval);
1346
-
1347
-skip_sched:
13482388 hclgevf_enable_vector(&hdev->misc_vector, true);
13492389
13502390 return IRQ_HANDLED;
....@@ -1354,10 +2394,25 @@
13542394 {
13552395 int ret;
13562396
1357
- /* get queue configuration from PF */
1358
- ret = hclge_get_queue_info(hdev);
2397
+ /* get current port based vlan state from PF */
2398
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
13592399 if (ret)
13602400 return ret;
2401
+
2402
+ /* get queue configuration from PF */
2403
+ ret = hclgevf_get_queue_info(hdev);
2404
+ if (ret)
2405
+ return ret;
2406
+
2407
+ /* get queue depth info from PF */
2408
+ ret = hclgevf_get_queue_depth(hdev);
2409
+ if (ret)
2410
+ return ret;
2411
+
2412
+ ret = hclgevf_get_pf_media_type(hdev);
2413
+ if (ret)
2414
+ return ret;
2415
+
13612416 /* get tc configuration from PF */
13622417 return hclgevf_get_tc_info(hdev);
13632418 }
....@@ -1365,7 +2420,7 @@
13652420 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
13662421 {
13672422 struct pci_dev *pdev = ae_dev->pdev;
1368
- struct hclgevf_dev *hdev = ae_dev->priv;
2423
+ struct hclgevf_dev *hdev;
13692424
13702425 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
13712426 if (!hdev)
....@@ -1401,57 +2456,139 @@
14012456 return 0;
14022457 }
14032458
2459
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
2460
+{
2461
+ struct hclgevf_cfg_gro_status_cmd *req;
2462
+ struct hclgevf_desc desc;
2463
+ int ret;
2464
+
2465
+ if (!hnae3_dev_gro_supported(hdev))
2466
+ return 0;
2467
+
2468
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
2469
+ false);
2470
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2471
+
2472
+ req->gro_en = en ? 1 : 0;
2473
+
2474
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2475
+ if (ret)
2476
+ dev_err(&hdev->pdev->dev,
2477
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
2478
+
2479
+ return ret;
2480
+}
2481
+
2482
+static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
2483
+{
2484
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2485
+ struct hclgevf_rss_tuple_cfg *tuple_sets;
2486
+ u32 i;
2487
+
2488
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
2489
+ rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
2490
+ tuple_sets = &rss_cfg->rss_tuple_sets;
2491
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2492
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
2493
+ memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
2494
+ HCLGEVF_RSS_KEY_SIZE);
2495
+
2496
+ tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2497
+ tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2498
+ tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2499
+ tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2500
+ tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2501
+ tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2502
+ tuple_sets->ipv6_sctp_en =
2503
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
2504
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
2505
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2506
+ tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2507
+ }
2508
+
2509
+ /* Initialize RSS indirect table */
2510
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
2511
+ rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
2512
+}
2513
+
14042514 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
14052515 {
14062516 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1407
- int i, ret;
2517
+ int ret;
14082518
1409
- rss_cfg->rss_size = hdev->rss_size_max;
2519
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2520
+ ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
2521
+ rss_cfg->rss_hash_key);
2522
+ if (ret)
2523
+ return ret;
14102524
1411
- /* Initialize RSS indirect table for each vport */
1412
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1413
- rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
2525
+ ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
2526
+ if (ret)
2527
+ return ret;
2528
+ }
14142529
14152530 ret = hclgevf_set_rss_indir_table(hdev);
14162531 if (ret)
14172532 return ret;
14182533
1419
- return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
2534
+ return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
14202535 }
14212536
14222537 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
14232538 {
1424
- /* other vlan config(like, VLAN TX/RX offload) would also be added
1425
- * here later
1426
- */
2539
+ struct hnae3_handle *nic = &hdev->nic;
2540
+ int ret;
2541
+
2542
+ ret = hclgevf_en_hw_strip_rxvtag(nic, true);
2543
+ if (ret) {
2544
+ dev_err(&hdev->pdev->dev,
2545
+ "failed to enable rx vlan offload, ret = %d\n", ret);
2546
+ return ret;
2547
+ }
2548
+
14272549 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
14282550 false);
2551
+}
2552
+
2553
+static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2554
+{
2555
+#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
2556
+
2557
+ unsigned long last = hdev->serv_processed_cnt;
2558
+ int i = 0;
2559
+
2560
+ while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2561
+ i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2562
+ last == hdev->serv_processed_cnt)
2563
+ usleep_range(1, 1);
2564
+}
2565
+
2566
+static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2567
+{
2568
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2569
+
2570
+ if (enable) {
2571
+ hclgevf_task_schedule(hdev, 0);
2572
+ } else {
2573
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2574
+
2575
+ /* flush memory to make sure DOWN is seen by service task */
2576
+ smp_mb__before_atomic();
2577
+ hclgevf_flush_link_update(hdev);
2578
+ }
14292579 }
14302580
14312581 static int hclgevf_ae_start(struct hnae3_handle *handle)
14322582 {
14332583 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1434
- int i, queue_id;
14352584
1436
- for (i = 0; i < handle->kinfo.num_tqps; i++) {
1437
- /* ring enable */
1438
- queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1439
- if (queue_id < 0) {
1440
- dev_warn(&hdev->pdev->dev,
1441
- "Get invalid queue id, ignore it\n");
1442
- continue;
1443
- }
2585
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
14442586
1445
- hclgevf_tqp_enable(hdev, queue_id, 0, true);
1446
- }
1447
-
1448
- /* reset tqp stats */
14492587 hclgevf_reset_tqp_stats(handle);
14502588
14512589 hclgevf_request_link_info(hdev);
14522590
1453
- clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1454
- mod_timer(&hdev->service_timer, jiffies + HZ);
2591
+ hclgevf_update_link_mode(hdev);
14552592
14562593 return 0;
14572594 }
....@@ -1459,50 +2596,63 @@
14592596 static void hclgevf_ae_stop(struct hnae3_handle *handle)
14602597 {
14612598 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1462
- int i, queue_id;
2599
+ int i;
14632600
14642601 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
14652602
1466
- for (i = 0; i < hdev->num_tqps; i++) {
1467
- /* Ring disable */
1468
- queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1469
- if (queue_id < 0) {
1470
- dev_warn(&hdev->pdev->dev,
1471
- "Get invalid queue id, ignore it\n");
1472
- continue;
1473
- }
2603
+ if (hdev->reset_type != HNAE3_VF_RESET)
2604
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
2605
+ if (hclgevf_reset_tqp(handle, i))
2606
+ break;
14742607
1475
- hclgevf_tqp_enable(hdev, queue_id, 0, false);
1476
- }
1477
-
1478
- /* reset tqp stats */
14792608 hclgevf_reset_tqp_stats(handle);
1480
- del_timer_sync(&hdev->service_timer);
1481
- cancel_work_sync(&hdev->service_task);
1482
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
14832609 hclgevf_update_link_status(hdev, 0);
2610
+}
2611
+
2612
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2613
+{
2614
+#define HCLGEVF_STATE_ALIVE 1
2615
+#define HCLGEVF_STATE_NOT_ALIVE 0
2616
+
2617
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2618
+ struct hclge_vf_to_pf_msg send_msg;
2619
+
2620
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2621
+ send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2622
+ HCLGEVF_STATE_NOT_ALIVE;
2623
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2624
+}
2625
+
2626
+static int hclgevf_client_start(struct hnae3_handle *handle)
2627
+{
2628
+ return hclgevf_set_alive(handle, true);
2629
+}
2630
+
2631
+static void hclgevf_client_stop(struct hnae3_handle *handle)
2632
+{
2633
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2634
+ int ret;
2635
+
2636
+ ret = hclgevf_set_alive(handle, false);
2637
+ if (ret)
2638
+ dev_warn(&hdev->pdev->dev,
2639
+ "%s failed %d\n", __func__, ret);
14842640 }
14852641
14862642 static void hclgevf_state_init(struct hclgevf_dev *hdev)
14872643 {
1488
- /* if this is on going reset then skip this initialization */
1489
- if (hclgevf_dev_ongoing_reset(hdev))
1490
- return;
1491
-
1492
- /* setup tasks for the MBX */
1493
- INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
14942644 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
14952645 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2646
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
14962647
1497
- /* setup tasks for service timer */
1498
- timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1499
-
1500
- INIT_WORK(&hdev->service_task, hclgevf_service_task);
1501
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1502
-
1503
- INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
2648
+ INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
15042649
15052650 mutex_init(&hdev->mbx_resp.mbx_mutex);
2651
+ sema_init(&hdev->reset_sem, 1);
2652
+
2653
+ spin_lock_init(&hdev->mac_table.mac_list_lock);
2654
+ INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2655
+ INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
15062656
15072657 /* bring the device down */
15082658 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
....@@ -1511,15 +2661,10 @@
15112661 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
15122662 {
15132663 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2664
+ set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
15142665
1515
- if (hdev->service_timer.function)
1516
- del_timer_sync(&hdev->service_timer);
1517
- if (hdev->service_task.func)
1518
- cancel_work_sync(&hdev->service_task);
1519
- if (hdev->mbx_service_task.func)
1520
- cancel_work_sync(&hdev->mbx_service_task);
1521
- if (hdev->rst_service_task.func)
1522
- cancel_work_sync(&hdev->rst_service_task);
2666
+ if (hdev->service_task.work.func)
2667
+ cancel_delayed_work_sync(&hdev->service_task);
15232668
15242669 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
15252670 }
....@@ -1530,17 +2675,14 @@
15302675 int vectors;
15312676 int i;
15322677
1533
- /* if this is on going reset then skip this initialization */
1534
- if (hclgevf_dev_ongoing_reset(hdev))
1535
- return 0;
1536
-
1537
- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
2678
+ if (hnae3_dev_roce_supported(hdev))
15382679 vectors = pci_alloc_irq_vectors(pdev,
15392680 hdev->roce_base_msix_offset + 1,
15402681 hdev->num_msi,
15412682 PCI_IRQ_MSIX);
15422683 else
1543
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2684
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2685
+ hdev->num_msi,
15442686 PCI_IRQ_MSI | PCI_IRQ_MSIX);
15452687
15462688 if (vectors < 0) {
....@@ -1551,11 +2693,12 @@
15512693 }
15522694 if (vectors < hdev->num_msi)
15532695 dev_warn(&hdev->pdev->dev,
1554
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2696
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
15552697 hdev->num_msi, vectors);
15562698
15572699 hdev->num_msi = vectors;
15582700 hdev->num_msi_left = vectors;
2701
+
15592702 hdev->base_msi_vector = pdev->irq;
15602703 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
15612704
....@@ -1572,6 +2715,7 @@
15722715 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
15732716 sizeof(int), GFP_KERNEL);
15742717 if (!hdev->vector_irq) {
2718
+ devm_kfree(&pdev->dev, hdev->vector_status);
15752719 pci_free_irq_vectors(pdev);
15762720 return -ENOMEM;
15772721 }
....@@ -1583,21 +2727,21 @@
15832727 {
15842728 struct pci_dev *pdev = hdev->pdev;
15852729
2730
+ devm_kfree(&pdev->dev, hdev->vector_status);
2731
+ devm_kfree(&pdev->dev, hdev->vector_irq);
15862732 pci_free_irq_vectors(pdev);
15872733 }
15882734
15892735 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
15902736 {
1591
- int ret = 0;
1592
-
1593
- /* if this is on going reset then skip this initialization */
1594
- if (hclgevf_dev_ongoing_reset(hdev))
1595
- return 0;
2737
+ int ret;
15962738
15972739 hclgevf_get_misc_vector(hdev);
15982740
2741
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2742
+ HCLGEVF_NAME, pci_name(hdev->pdev));
15992743 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1600
- 0, "hclgevf_cmd", hdev);
2744
+ 0, hdev->misc_vector.name, hdev);
16012745 if (ret) {
16022746 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
16032747 hdev->misc_vector.vector_irq);
....@@ -1621,6 +2765,75 @@
16212765 hclgevf_free_vector(hdev, 0);
16222766 }
16232767
2768
+static void hclgevf_info_show(struct hclgevf_dev *hdev)
2769
+{
2770
+ struct device *dev = &hdev->pdev->dev;
2771
+
2772
+ dev_info(dev, "VF info begin:\n");
2773
+
2774
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2775
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2776
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2777
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2778
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2779
+ dev_info(dev, "PF media type of this VF: %u\n",
2780
+ hdev->hw.mac.media_type);
2781
+
2782
+ dev_info(dev, "VF info end.\n");
2783
+}
2784
+
2785
+static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2786
+ struct hnae3_client *client)
2787
+{
2788
+ struct hclgevf_dev *hdev = ae_dev->priv;
2789
+ int rst_cnt = hdev->rst_stats.rst_cnt;
2790
+ int ret;
2791
+
2792
+ ret = client->ops->init_instance(&hdev->nic);
2793
+ if (ret)
2794
+ return ret;
2795
+
2796
+ set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2797
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2798
+ rst_cnt != hdev->rst_stats.rst_cnt) {
2799
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2800
+
2801
+ client->ops->uninit_instance(&hdev->nic, 0);
2802
+ return -EBUSY;
2803
+ }
2804
+
2805
+ hnae3_set_client_init_flag(client, ae_dev, 1);
2806
+
2807
+ if (netif_msg_drv(&hdev->nic))
2808
+ hclgevf_info_show(hdev);
2809
+
2810
+ return 0;
2811
+}
2812
+
2813
+static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2814
+ struct hnae3_client *client)
2815
+{
2816
+ struct hclgevf_dev *hdev = ae_dev->priv;
2817
+ int ret;
2818
+
2819
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2820
+ !hdev->nic_client)
2821
+ return 0;
2822
+
2823
+ ret = hclgevf_init_roce_base_info(hdev);
2824
+ if (ret)
2825
+ return ret;
2826
+
2827
+ ret = client->ops->init_instance(&hdev->roce);
2828
+ if (ret)
2829
+ return ret;
2830
+
2831
+ set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2832
+ hnae3_set_client_init_flag(client, ae_dev, 1);
2833
+
2834
+ return 0;
2835
+}
2836
+
16242837 static int hclgevf_init_client_instance(struct hnae3_client *client,
16252838 struct hnae3_ae_dev *ae_dev)
16262839 {
....@@ -1632,35 +2845,15 @@
16322845 hdev->nic_client = client;
16332846 hdev->nic.client = client;
16342847
1635
- ret = client->ops->init_instance(&hdev->nic);
2848
+ ret = hclgevf_init_nic_client_instance(ae_dev, client);
16362849 if (ret)
16372850 goto clear_nic;
16382851
1639
- hnae3_set_client_init_flag(client, ae_dev, 1);
1640
-
1641
- if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1642
- struct hnae3_client *rc = hdev->roce_client;
1643
-
1644
- ret = hclgevf_init_roce_base_info(hdev);
1645
- if (ret)
1646
- goto clear_roce;
1647
- ret = rc->ops->init_instance(&hdev->roce);
1648
- if (ret)
1649
- goto clear_roce;
1650
-
1651
- hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
1652
- 1);
1653
- }
1654
- break;
1655
- case HNAE3_CLIENT_UNIC:
1656
- hdev->nic_client = client;
1657
- hdev->nic.client = client;
1658
-
1659
- ret = client->ops->init_instance(&hdev->nic);
2852
+ ret = hclgevf_init_roce_client_instance(ae_dev,
2853
+ hdev->roce_client);
16602854 if (ret)
1661
- goto clear_nic;
2855
+ goto clear_roce;
16622856
1663
- hnae3_set_client_init_flag(client, ae_dev, 1);
16642857 break;
16652858 case HNAE3_CLIENT_ROCE:
16662859 if (hnae3_dev_roce_supported(hdev)) {
....@@ -1668,17 +2861,13 @@
16682861 hdev->roce.client = client;
16692862 }
16702863
1671
- if (hdev->roce_client && hdev->nic_client) {
1672
- ret = hclgevf_init_roce_base_info(hdev);
1673
- if (ret)
1674
- goto clear_roce;
2864
+ ret = hclgevf_init_roce_client_instance(ae_dev, client);
2865
+ if (ret)
2866
+ goto clear_roce;
16752867
1676
- ret = client->ops->init_instance(&hdev->roce);
1677
- if (ret)
1678
- goto clear_roce;
1679
- }
1680
-
1681
- hnae3_set_client_init_flag(client, ae_dev, 1);
2868
+ break;
2869
+ default:
2870
+ return -EINVAL;
16822871 }
16832872
16842873 return 0;
....@@ -1700,6 +2889,10 @@
17002889
17012890 /* un-init roce, if it exists */
17022891 if (hdev->roce_client) {
2892
+ while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2893
+ msleep(HCLGEVF_WAIT_RESET_DONE);
2894
+ clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2895
+
17032896 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
17042897 hdev->roce_client = NULL;
17052898 hdev->roce.client = NULL;
....@@ -1708,6 +2901,10 @@
17082901 /* un-init nic/unic, if this was not called by roce client */
17092902 if (client->ops->uninit_instance && hdev->nic_client &&
17102903 client->type != HNAE3_CLIENT_ROCE) {
2904
+ while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2905
+ msleep(HCLGEVF_WAIT_RESET_DONE);
2906
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2907
+
17112908 client->ops->uninit_instance(&hdev->nic, 0);
17122909 hdev->nic_client = NULL;
17132910 hdev->nic.client = NULL;
....@@ -1719,14 +2916,6 @@
17192916 struct pci_dev *pdev = hdev->pdev;
17202917 struct hclgevf_hw *hw;
17212918 int ret;
1722
-
1723
- /* check if we need to skip initialization of pci. This will happen if
1724
- * device is undergoing VF reset. Otherwise, we would need to
1725
- * re-initialize pci interface again i.e. when device is not going
1726
- * through *any* reset or actually undergoing full reset.
1727
- */
1728
- if (hclgevf_dev_ongoing_reset(hdev))
1729
- return 0;
17302919
17312920 ret = pci_enable_device(pdev);
17322921 if (ret) {
....@@ -1793,14 +2982,17 @@
17932982
17942983 req = (struct hclgevf_query_res_cmd *)desc.data;
17952984
1796
- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
2985
+ if (hnae3_dev_roce_supported(hdev)) {
17972986 hdev->roce_base_msix_offset =
1798
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
2987
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
17992988 HCLGEVF_MSIX_OFT_ROCEE_M,
18002989 HCLGEVF_MSIX_OFT_ROCEE_S);
18012990 hdev->num_roce_msix =
1802
- hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2991
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
18032992 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2993
+
2994
+ /* nic's msix numbers is always equals to the roce's. */
2995
+ hdev->num_nic_msix = hdev->num_roce_msix;
18042996
18052997 /* VF should have NIC vectors and Roce vectors, NIC vectors
18062998 * are queued before Roce vectors. The offset is fixed to 64.
....@@ -1809,9 +3001,175 @@
18093001 hdev->roce_base_msix_offset;
18103002 } else {
18113003 hdev->num_msi =
1812
- hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
3004
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
18133005 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
3006
+
3007
+ hdev->num_nic_msix = hdev->num_msi;
18143008 }
3009
+
3010
+ if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
3011
+ dev_err(&hdev->pdev->dev,
3012
+ "Just %u msi resources, not enough for vf(min:2).\n",
3013
+ hdev->num_nic_msix);
3014
+ return -EINVAL;
3015
+ }
3016
+
3017
+ return 0;
3018
+}
3019
+
3020
+static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
3021
+{
3022
+#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
3023
+
3024
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3025
+
3026
+ ae_dev->dev_specs.max_non_tso_bd_num =
3027
+ HCLGEVF_MAX_NON_TSO_BD_NUM;
3028
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
3029
+ ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
3030
+}
3031
+
3032
+static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
3033
+ struct hclgevf_desc *desc)
3034
+{
3035
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3036
+ struct hclgevf_dev_specs_0_cmd *req0;
3037
+
3038
+ req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
3039
+
3040
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
3041
+ ae_dev->dev_specs.rss_ind_tbl_size =
3042
+ le16_to_cpu(req0->rss_ind_tbl_size);
3043
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
3044
+}
3045
+
3046
+static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
3047
+{
3048
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
3049
+
3050
+ if (!dev_specs->max_non_tso_bd_num)
3051
+ dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
3052
+ if (!dev_specs->rss_ind_tbl_size)
3053
+ dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
3054
+ if (!dev_specs->rss_key_size)
3055
+ dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
3056
+}
3057
+
3058
+static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
3059
+{
3060
+ struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
3061
+ int ret;
3062
+ int i;
3063
+
3064
+ /* set default specifications as devices lower than version V3 do not
3065
+ * support querying specifications from firmware.
3066
+ */
3067
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
3068
+ hclgevf_set_default_dev_specs(hdev);
3069
+ return 0;
3070
+ }
3071
+
3072
+ for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
3073
+ hclgevf_cmd_setup_basic_desc(&desc[i],
3074
+ HCLGEVF_OPC_QUERY_DEV_SPECS, true);
3075
+ desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
3076
+ }
3077
+ hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
3078
+ true);
3079
+
3080
+ ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
3081
+ if (ret)
3082
+ return ret;
3083
+
3084
+ hclgevf_parse_dev_specs(hdev, desc);
3085
+ hclgevf_check_dev_specs(hdev);
3086
+
3087
+ return 0;
3088
+}
3089
+
3090
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
3091
+{
3092
+ struct pci_dev *pdev = hdev->pdev;
3093
+ int ret = 0;
3094
+
3095
+ if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
3096
+ hdev->reset_type == HNAE3_FLR_RESET) &&
3097
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3098
+ hclgevf_misc_irq_uninit(hdev);
3099
+ hclgevf_uninit_msi(hdev);
3100
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3101
+ }
3102
+
3103
+ if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3104
+ pci_set_master(pdev);
3105
+ ret = hclgevf_init_msi(hdev);
3106
+ if (ret) {
3107
+ dev_err(&pdev->dev,
3108
+ "failed(%d) to init MSI/MSI-X\n", ret);
3109
+ return ret;
3110
+ }
3111
+
3112
+ ret = hclgevf_misc_irq_init(hdev);
3113
+ if (ret) {
3114
+ hclgevf_uninit_msi(hdev);
3115
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
3116
+ ret);
3117
+ return ret;
3118
+ }
3119
+
3120
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3121
+ }
3122
+
3123
+ return ret;
3124
+}
3125
+
3126
+static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
3127
+{
3128
+ struct hclge_vf_to_pf_msg send_msg;
3129
+
3130
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
3131
+ HCLGE_MBX_VPORT_LIST_CLEAR);
3132
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3133
+}
3134
+
3135
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
3136
+{
3137
+ struct pci_dev *pdev = hdev->pdev;
3138
+ int ret;
3139
+
3140
+ ret = hclgevf_pci_reset(hdev);
3141
+ if (ret) {
3142
+ dev_err(&pdev->dev, "pci reset failed %d\n", ret);
3143
+ return ret;
3144
+ }
3145
+
3146
+ ret = hclgevf_cmd_init(hdev);
3147
+ if (ret) {
3148
+ dev_err(&pdev->dev, "cmd failed %d\n", ret);
3149
+ return ret;
3150
+ }
3151
+
3152
+ ret = hclgevf_rss_init_hw(hdev);
3153
+ if (ret) {
3154
+ dev_err(&hdev->pdev->dev,
3155
+ "failed(%d) to initialize RSS\n", ret);
3156
+ return ret;
3157
+ }
3158
+
3159
+ ret = hclgevf_config_gro(hdev, true);
3160
+ if (ret)
3161
+ return ret;
3162
+
3163
+ ret = hclgevf_init_vlan_config(hdev);
3164
+ if (ret) {
3165
+ dev_err(&hdev->pdev->dev,
3166
+ "failed(%d) to initialize VLAN config\n", ret);
3167
+ return ret;
3168
+ }
3169
+
3170
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
3171
+
3172
+ dev_info(&hdev->pdev->dev, "Reset done\n");
18153173
18163174 return 0;
18173175 }
....@@ -1821,17 +3179,13 @@
18213179 struct pci_dev *pdev = hdev->pdev;
18223180 int ret;
18233181
1824
- /* check if device is on-going full reset(i.e. pcie as well) */
1825
- if (hclgevf_dev_ongoing_full_reset(hdev)) {
1826
- dev_warn(&pdev->dev, "device is going full reset\n");
1827
- hclgevf_uninit_hdev(hdev);
1828
- }
1829
-
18303182 ret = hclgevf_pci_init(hdev);
1831
- if (ret) {
1832
- dev_err(&pdev->dev, "PCI initialization failed\n");
3183
+ if (ret)
18333184 return ret;
1834
- }
3185
+
3186
+ ret = hclgevf_cmd_queue_init(hdev);
3187
+ if (ret)
3188
+ goto err_cmd_queue_init;
18353189
18363190 ret = hclgevf_cmd_init(hdev);
18373191 if (ret)
....@@ -1839,26 +3193,31 @@
18393193
18403194 /* Get vf resource */
18413195 ret = hclgevf_query_vf_resource(hdev);
3196
+ if (ret)
3197
+ goto err_cmd_init;
3198
+
3199
+ ret = hclgevf_query_dev_specs(hdev);
18423200 if (ret) {
1843
- dev_err(&hdev->pdev->dev,
1844
- "Query vf status error, ret = %d.\n", ret);
1845
- goto err_query_vf;
3201
+ dev_err(&pdev->dev,
3202
+ "failed to query dev specifications, ret = %d\n", ret);
3203
+ goto err_cmd_init;
18463204 }
18473205
18483206 ret = hclgevf_init_msi(hdev);
18493207 if (ret) {
18503208 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1851
- goto err_query_vf;
3209
+ goto err_cmd_init;
18523210 }
18533211
18543212 hclgevf_state_init(hdev);
3213
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
3214
+ hdev->reset_type = HNAE3_NONE_RESET;
18553215
18563216 ret = hclgevf_misc_irq_init(hdev);
1857
- if (ret) {
1858
- dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1859
- ret);
3217
+ if (ret)
18603218 goto err_misc_irq_init;
1861
- }
3219
+
3220
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
18623221
18633222 ret = hclgevf_configure(hdev);
18643223 if (ret) {
....@@ -1873,24 +3232,28 @@
18733232 }
18743233
18753234 ret = hclgevf_set_handle_info(hdev);
1876
- if (ret) {
1877
- dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
3235
+ if (ret)
18783236 goto err_config;
1879
- }
18803237
1881
- /* Initialize mta type for this VF */
1882
- ret = hclgevf_cfg_func_mta_type(hdev);
1883
- if (ret) {
1884
- dev_err(&hdev->pdev->dev,
1885
- "failed(%d) to initialize MTA type\n", ret);
3238
+ ret = hclgevf_config_gro(hdev, true);
3239
+ if (ret)
18863240 goto err_config;
1887
- }
18883241
18893242 /* Initialize RSS for this VF */
3243
+ hclgevf_rss_init_cfg(hdev);
18903244 ret = hclgevf_rss_init_hw(hdev);
18913245 if (ret) {
18923246 dev_err(&hdev->pdev->dev,
18933247 "failed(%d) to initialize RSS\n", ret);
3248
+ goto err_config;
3249
+ }
3250
+
3251
+ /* ensure vf tbl list as empty before init*/
3252
+ ret = hclgevf_clear_vport_list(hdev);
3253
+ if (ret) {
3254
+ dev_err(&pdev->dev,
3255
+ "failed to clear tbl list configuration, ret = %d.\n",
3256
+ ret);
18943257 goto err_config;
18953258 }
18963259
....@@ -1901,7 +3264,11 @@
19013264 goto err_config;
19023265 }
19033266
1904
- pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
3267
+ hdev->last_reset_time = jiffies;
3268
+ dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
3269
+ HCLGEVF_DRIVER_NAME);
3270
+
3271
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
19053272
19063273 return 0;
19073274
....@@ -1910,20 +3277,31 @@
19103277 err_misc_irq_init:
19113278 hclgevf_state_uninit(hdev);
19123279 hclgevf_uninit_msi(hdev);
1913
-err_query_vf:
1914
- hclgevf_cmd_uninit(hdev);
19153280 err_cmd_init:
3281
+ hclgevf_cmd_uninit(hdev);
3282
+err_cmd_queue_init:
19163283 hclgevf_pci_uninit(hdev);
3284
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
19173285 return ret;
19183286 }
19193287
19203288 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
19213289 {
3290
+ struct hclge_vf_to_pf_msg send_msg;
3291
+
19223292 hclgevf_state_uninit(hdev);
1923
- hclgevf_misc_irq_uninit(hdev);
3293
+
3294
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3295
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3296
+
3297
+ if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3298
+ hclgevf_misc_irq_uninit(hdev);
3299
+ hclgevf_uninit_msi(hdev);
3300
+ }
3301
+
19243302 hclgevf_cmd_uninit(hdev);
1925
- hclgevf_uninit_msi(hdev);
19263303 hclgevf_pci_uninit(hdev);
3304
+ hclgevf_uninit_mac_list(hdev);
19273305 }
19283306
19293307 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
....@@ -1938,10 +3316,12 @@
19383316 }
19393317
19403318 ret = hclgevf_init_hdev(ae_dev->priv);
1941
- if (ret)
3319
+ if (ret) {
19423320 dev_err(&pdev->dev, "hclge device initialization failed\n");
3321
+ return ret;
3322
+ }
19433323
1944
- return ret;
3324
+ return 0;
19453325 }
19463326
19473327 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
....@@ -1983,12 +3363,85 @@
19833363 }
19843364
19853365 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1986
- u16 *free_tqps, u16 *max_rss_size)
3366
+ u16 *alloc_tqps, u16 *max_rss_size)
19873367 {
19883368 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
19893369
1990
- *free_tqps = 0;
3370
+ *alloc_tqps = hdev->num_tqps;
19913371 *max_rss_size = hdev->rss_size_max;
3372
+}
3373
+
3374
+static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3375
+ u32 new_tqps_num)
3376
+{
3377
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3378
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3379
+ u16 max_rss_size;
3380
+
3381
+ kinfo->req_rss_size = new_tqps_num;
3382
+
3383
+ max_rss_size = min_t(u16, hdev->rss_size_max,
3384
+ hdev->num_tqps / kinfo->num_tc);
3385
+
3386
+ /* Use the user's configuration when it is not larger than
3387
+ * max_rss_size, otherwise, use the maximum specification value.
3388
+ */
3389
+ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3390
+ kinfo->req_rss_size <= max_rss_size)
3391
+ kinfo->rss_size = kinfo->req_rss_size;
3392
+ else if (kinfo->rss_size > max_rss_size ||
3393
+ (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3394
+ kinfo->rss_size = max_rss_size;
3395
+
3396
+ kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
3397
+}
3398
+
3399
+static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3400
+ bool rxfh_configured)
3401
+{
3402
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3403
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3404
+ u16 cur_rss_size = kinfo->rss_size;
3405
+ u16 cur_tqps = kinfo->num_tqps;
3406
+ u32 *rss_indir;
3407
+ unsigned int i;
3408
+ int ret;
3409
+
3410
+ hclgevf_update_rss_size(handle, new_tqps_num);
3411
+
3412
+ ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
3413
+ if (ret)
3414
+ return ret;
3415
+
3416
+ /* RSS indirection table has been configuared by user */
3417
+ if (rxfh_configured)
3418
+ goto out;
3419
+
3420
+ /* Reinitializes the rss indirect table according to the new RSS size */
3421
+ rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
3422
+ if (!rss_indir)
3423
+ return -ENOMEM;
3424
+
3425
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
3426
+ rss_indir[i] = i % kinfo->rss_size;
3427
+
3428
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
3429
+
3430
+ ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3431
+ if (ret)
3432
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3433
+ ret);
3434
+
3435
+ kfree(rss_indir);
3436
+
3437
+out:
3438
+ if (!ret)
3439
+ dev_info(&hdev->pdev->dev,
3440
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3441
+ cur_rss_size, kinfo->rss_size,
3442
+ cur_tqps, kinfo->rss_size * kinfo->num_tc);
3443
+
3444
+ return ret;
19923445 }
19933446
19943447 static int hclgevf_get_status(struct hnae3_handle *handle)
....@@ -2019,26 +3472,190 @@
20193472 hdev->hw.mac.duplex = duplex;
20203473 }
20213474
3475
+static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3476
+{
3477
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3478
+
3479
+ return hclgevf_config_gro(hdev, enable);
3480
+}
3481
+
3482
+static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3483
+ u8 *module_type)
3484
+{
3485
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3486
+
3487
+ if (media_type)
3488
+ *media_type = hdev->hw.mac.media_type;
3489
+
3490
+ if (module_type)
3491
+ *module_type = hdev->hw.mac.module_type;
3492
+}
3493
+
3494
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3495
+{
3496
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3497
+
3498
+ return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3499
+}
3500
+
3501
+static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3502
+{
3503
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3504
+
3505
+ return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
3506
+}
3507
+
3508
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3509
+{
3510
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3511
+
3512
+ return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3513
+}
3514
+
3515
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3516
+{
3517
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3518
+
3519
+ return hdev->rst_stats.hw_rst_done_cnt;
3520
+}
3521
+
3522
+static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3523
+ unsigned long *supported,
3524
+ unsigned long *advertising)
3525
+{
3526
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3527
+
3528
+ *supported = hdev->hw.mac.supported;
3529
+ *advertising = hdev->hw.mac.advertising;
3530
+}
3531
+
3532
+#define MAX_SEPARATE_NUM 4
3533
+#define SEPARATOR_VALUE 0xFFFFFFFF
3534
+#define REG_NUM_PER_LINE 4
3535
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
3536
+
3537
+static int hclgevf_get_regs_len(struct hnae3_handle *handle)
3538
+{
3539
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
3540
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3541
+
3542
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
3543
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
3544
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
3545
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
3546
+
3547
+ return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
3548
+ tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
3549
+}
3550
+
3551
+static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
3552
+ void *data)
3553
+{
3554
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3555
+ int i, j, reg_um, separator_num;
3556
+ u32 *reg = data;
3557
+
3558
+ *version = hdev->fw_version;
3559
+
3560
+ /* fetching per-VF registers values from VF PCIe register space */
3561
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
3562
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3563
+ for (i = 0; i < reg_um; i++)
3564
+ *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
3565
+ for (i = 0; i < separator_num; i++)
3566
+ *reg++ = SEPARATOR_VALUE;
3567
+
3568
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
3569
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3570
+ for (i = 0; i < reg_um; i++)
3571
+ *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
3572
+ for (i = 0; i < separator_num; i++)
3573
+ *reg++ = SEPARATOR_VALUE;
3574
+
3575
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
3576
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3577
+ for (j = 0; j < hdev->num_tqps; j++) {
3578
+ for (i = 0; i < reg_um; i++)
3579
+ *reg++ = hclgevf_read_dev(&hdev->hw,
3580
+ ring_reg_addr_list[i] +
3581
+ 0x200 * j);
3582
+ for (i = 0; i < separator_num; i++)
3583
+ *reg++ = SEPARATOR_VALUE;
3584
+ }
3585
+
3586
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
3587
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3588
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
3589
+ for (i = 0; i < reg_um; i++)
3590
+ *reg++ = hclgevf_read_dev(&hdev->hw,
3591
+ tqp_intr_reg_addr_list[i] +
3592
+ 4 * j);
3593
+ for (i = 0; i < separator_num; i++)
3594
+ *reg++ = SEPARATOR_VALUE;
3595
+ }
3596
+}
3597
+
3598
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3599
+ u8 *port_base_vlan_info, u8 data_size)
3600
+{
3601
+ struct hnae3_handle *nic = &hdev->nic;
3602
+ struct hclge_vf_to_pf_msg send_msg;
3603
+ int ret;
3604
+
3605
+ rtnl_lock();
3606
+
3607
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3608
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3609
+ dev_warn(&hdev->pdev->dev,
3610
+ "is resetting when updating port based vlan info\n");
3611
+ rtnl_unlock();
3612
+ return;
3613
+ }
3614
+
3615
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3616
+ if (ret) {
3617
+ rtnl_unlock();
3618
+ return;
3619
+ }
3620
+
3621
+ /* send msg to PF and wait update port based vlan info */
3622
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3623
+ HCLGE_MBX_PORT_BASE_VLAN_CFG);
3624
+ memcpy(send_msg.data, port_base_vlan_info, data_size);
3625
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3626
+ if (!ret) {
3627
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3628
+ nic->port_base_vlan_state = state;
3629
+ else
3630
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3631
+ }
3632
+
3633
+ hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3634
+ rtnl_unlock();
3635
+}
3636
+
20223637 static const struct hnae3_ae_ops hclgevf_ops = {
20233638 .init_ae_dev = hclgevf_init_ae_dev,
20243639 .uninit_ae_dev = hclgevf_uninit_ae_dev,
3640
+ .flr_prepare = hclgevf_flr_prepare,
3641
+ .flr_done = hclgevf_flr_done,
20253642 .init_client_instance = hclgevf_init_client_instance,
20263643 .uninit_client_instance = hclgevf_uninit_client_instance,
20273644 .start = hclgevf_ae_start,
20283645 .stop = hclgevf_ae_stop,
3646
+ .client_start = hclgevf_client_start,
3647
+ .client_stop = hclgevf_client_stop,
20293648 .map_ring_to_vector = hclgevf_map_ring_to_vector,
20303649 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
20313650 .get_vector = hclgevf_get_vector,
20323651 .put_vector = hclgevf_put_vector,
20333652 .reset_queue = hclgevf_reset_tqp,
2034
- .set_promisc_mode = hclgevf_set_promisc_mode,
20353653 .get_mac_addr = hclgevf_get_mac_addr,
20363654 .set_mac_addr = hclgevf_set_mac_addr,
20373655 .add_uc_addr = hclgevf_add_uc_addr,
20383656 .rm_uc_addr = hclgevf_rm_uc_addr,
20393657 .add_mc_addr = hclgevf_add_mc_addr,
20403658 .rm_mc_addr = hclgevf_rm_mc_addr,
2041
- .update_mta_status = hclgevf_update_mta_status,
20423659 .get_stats = hclgevf_get_stats,
20433660 .update_stats = hclgevf_update_stats,
20443661 .get_strings = hclgevf_get_strings,
....@@ -2047,15 +3664,33 @@
20473664 .get_rss_indir_size = hclgevf_get_rss_indir_size,
20483665 .get_rss = hclgevf_get_rss,
20493666 .set_rss = hclgevf_set_rss,
3667
+ .get_rss_tuple = hclgevf_get_rss_tuple,
3668
+ .set_rss_tuple = hclgevf_set_rss_tuple,
20503669 .get_tc_size = hclgevf_get_tc_size,
20513670 .get_fw_version = hclgevf_get_fw_version,
20523671 .set_vlan_filter = hclgevf_set_vlan_filter,
20533672 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
20543673 .reset_event = hclgevf_reset_event,
3674
+ .set_default_reset_request = hclgevf_set_def_reset_request,
3675
+ .set_channels = hclgevf_set_channels,
20553676 .get_channels = hclgevf_get_channels,
20563677 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3678
+ .get_regs_len = hclgevf_get_regs_len,
3679
+ .get_regs = hclgevf_get_regs,
20573680 .get_status = hclgevf_get_status,
20583681 .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3682
+ .get_media_type = hclgevf_get_media_type,
3683
+ .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3684
+ .ae_dev_resetting = hclgevf_ae_dev_resetting,
3685
+ .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3686
+ .set_gro_en = hclgevf_gro_en,
3687
+ .set_mtu = hclgevf_set_mtu,
3688
+ .get_global_queue_id = hclgevf_get_qid_global,
3689
+ .set_timer_task = hclgevf_set_timer_task,
3690
+ .get_link_mode = hclgevf_get_link_mode,
3691
+ .set_promisc_mode = hclgevf_set_promisc_mode,
3692
+ .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3693
+ .get_cmdq_stat = hclgevf_get_cmdq_stat,
20593694 };
20603695
20613696 static struct hnae3_ae_algo ae_algovf = {
....@@ -2067,6 +3702,12 @@
20673702 {
20683703 pr_info("%s is initializing\n", HCLGEVF_NAME);
20693704
3705
+ hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
3706
+ if (!hclgevf_wq) {
3707
+ pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3708
+ return -ENOMEM;
3709
+ }
3710
+
20703711 hnae3_register_ae_algo(&ae_algovf);
20713712
20723713 return 0;
....@@ -2075,6 +3716,7 @@
20753716 static void hclgevf_exit(void)
20763717 {
20773718 hnae3_unregister_ae_algo(&ae_algovf);
3719
+ destroy_workqueue(hclgevf_wq);
20783720 }
20793721 module_init(hclgevf_init);
20803722 module_exit(hclgevf_exit);