forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
....@@ -11,9 +11,6 @@
1111 #include "hclgevf_main.h"
1212 #include "hnae3.h"
1313
14
-#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
15
-#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
16
- DMA_TO_DEVICE : DMA_FROM_DEVICE)
1714 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
1815
1916 static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
....@@ -27,26 +24,39 @@
2724 return ring->desc_num - used - 1;
2825 }
2926
27
+static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
28
+ int head)
29
+{
30
+ int ntu = ring->next_to_use;
31
+ int ntc = ring->next_to_clean;
32
+
33
+ if (ntu > ntc)
34
+ return head >= ntc && head <= ntu;
35
+
36
+ return head >= ntc || head <= ntu;
37
+}
38
+
3039 static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
3140 {
41
+ struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
3242 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
33
- u16 ntc = csq->next_to_clean;
34
- struct hclgevf_desc *desc;
35
- int clean = 0;
43
+ int clean;
3644 u32 head;
3745
38
- desc = &csq->desc[ntc];
3946 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
40
- while (head != ntc) {
41
- memset(desc, 0, sizeof(*desc));
42
- ntc++;
43
- if (ntc == csq->desc_num)
44
- ntc = 0;
45
- desc = &csq->desc[ntc];
46
- clean++;
47
- }
48
- csq->next_to_clean = ntc;
47
+ rmb(); /* Make sure head is ready before touch any data */
4948
49
+ if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
50
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
51
+ csq->next_to_use, csq->next_to_clean);
52
+ dev_warn(&hdev->pdev->dev,
53
+ "Disabling any further commands to IMP firmware\n");
54
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
55
+ return -EIO;
56
+ }
57
+
58
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
59
+ csq->next_to_clean = head;
5060 return clean;
5161 }
5262
....@@ -61,7 +71,7 @@
6171
6272 static bool hclgevf_is_special_opcode(u16 opcode)
6373 {
64
- u16 spec_opcode[] = {0x30, 0x31, 0x32};
74
+ static const u16 spec_opcode[] = {0x30, 0x31, 0x32};
6575 int i;
6676
6777 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
....@@ -72,13 +82,51 @@
7282 return false;
7383 }
7484
85
+static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
86
+{
87
+ struct hclgevf_dev *hdev = ring->dev;
88
+ struct hclgevf_hw *hw = &hdev->hw;
89
+ u32 reg_val;
90
+
91
+ if (ring->flag == HCLGEVF_TYPE_CSQ) {
92
+ reg_val = lower_32_bits(ring->desc_dma_addr);
93
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
94
+ reg_val = upper_32_bits(ring->desc_dma_addr);
95
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
96
+
97
+ reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
98
+ reg_val &= HCLGEVF_NIC_SW_RST_RDY;
99
+ reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
100
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
101
+
102
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
103
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
104
+ } else {
105
+ reg_val = lower_32_bits(ring->desc_dma_addr);
106
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
107
+ reg_val = upper_32_bits(ring->desc_dma_addr);
108
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
109
+
110
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
111
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
112
+
113
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
114
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
115
+ }
116
+}
117
+
118
+static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
119
+{
120
+ hclgevf_cmd_config_regs(&hw->cmq.csq);
121
+ hclgevf_cmd_config_regs(&hw->cmq.crq);
122
+}
123
+
75124 static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
76125 {
77126 int size = ring->desc_num * sizeof(struct hclgevf_desc);
78127
79
- ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
80
- size, &ring->desc_dma_addr,
81
- GFP_KERNEL);
128
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
129
+ &ring->desc_dma_addr, GFP_KERNEL);
82130 if (!ring->desc)
83131 return -ENOMEM;
84132
....@@ -96,61 +144,23 @@
96144 }
97145 }
98146
99
-static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
100
- struct hclgevf_cmq_ring *ring)
147
+static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
101148 {
102149 struct hclgevf_hw *hw = &hdev->hw;
103
- int ring_type = ring->flag;
104
- u32 reg_val;
150
+ struct hclgevf_cmq_ring *ring =
151
+ (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
105152 int ret;
106153
107
- ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
108
- spin_lock_init(&ring->lock);
109
- ring->next_to_clean = 0;
110
- ring->next_to_use = 0;
111154 ring->dev = hdev;
155
+ ring->flag = ring_type;
112156
113157 /* allocate CSQ/CRQ descriptor */
114158 ret = hclgevf_alloc_cmd_desc(ring);
115
- if (ret) {
159
+ if (ret)
116160 dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
117161 (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
118
- return ret;
119
- }
120162
121
- /* initialize the hardware registers with csq/crq dma-address,
122
- * descriptor number, head & tail pointers
123
- */
124
- switch (ring_type) {
125
- case HCLGEVF_TYPE_CSQ:
126
- reg_val = (u32)ring->desc_dma_addr;
127
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
128
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
129
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
130
-
131
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
132
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
133
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
134
-
135
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
136
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
137
- break;
138
- case HCLGEVF_TYPE_CRQ:
139
- reg_val = (u32)ring->desc_dma_addr;
140
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
141
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
142
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
143
-
144
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
145
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
146
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
147
-
148
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
149
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
150
- break;
151
- }
152
-
153
- return 0;
163
+ return ret;
154164 }
155165
156166 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
....@@ -166,6 +176,38 @@
166176 desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
167177 }
168178
179
+static int hclgevf_cmd_convert_err_code(u16 desc_ret)
180
+{
181
+ switch (desc_ret) {
182
+ case HCLGEVF_CMD_EXEC_SUCCESS:
183
+ return 0;
184
+ case HCLGEVF_CMD_NO_AUTH:
185
+ return -EPERM;
186
+ case HCLGEVF_CMD_NOT_SUPPORTED:
187
+ return -EOPNOTSUPP;
188
+ case HCLGEVF_CMD_QUEUE_FULL:
189
+ return -EXFULL;
190
+ case HCLGEVF_CMD_NEXT_ERR:
191
+ return -ENOSR;
192
+ case HCLGEVF_CMD_UNEXE_ERR:
193
+ return -ENOTBLK;
194
+ case HCLGEVF_CMD_PARA_ERR:
195
+ return -EINVAL;
196
+ case HCLGEVF_CMD_RESULT_ERR:
197
+ return -ERANGE;
198
+ case HCLGEVF_CMD_TIMEOUT:
199
+ return -ETIME;
200
+ case HCLGEVF_CMD_HILINK_ERR:
201
+ return -ENOLINK;
202
+ case HCLGEVF_CMD_QUEUE_ILLEGAL:
203
+ return -ENXIO;
204
+ case HCLGEVF_CMD_INVALID:
205
+ return -EBADR;
206
+ default:
207
+ return -EIO;
208
+ }
209
+}
210
+
169211 /* hclgevf_cmd_send - send command to command queue
170212 * @hw: pointer to the hw struct
171213 * @desc: prefilled descriptor for describing the command
....@@ -177,6 +219,7 @@
177219 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
178220 {
179221 struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
222
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
180223 struct hclgevf_desc *desc_to_use;
181224 bool complete = false;
182225 u32 timeout = 0;
....@@ -188,7 +231,17 @@
188231
189232 spin_lock_bh(&hw->cmq.csq.lock);
190233
234
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
235
+ spin_unlock_bh(&hw->cmq.csq.lock);
236
+ return -EBUSY;
237
+ }
238
+
191239 if (num > hclgevf_ring_space(&hw->cmq.csq)) {
240
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
241
+ * need update the SW HEAD pointer csq->next_to_clean
242
+ */
243
+ csq->next_to_clean = hclgevf_read_dev(hw,
244
+ HCLGEVF_NIC_CSQ_HEAD_REG);
192245 spin_unlock_bh(&hw->cmq.csq.lock);
193246 return -EBUSY;
194247 }
....@@ -237,11 +290,7 @@
237290 else
238291 retval = le16_to_cpu(desc[0].retval);
239292
240
- if ((enum hclgevf_cmd_return_status)retval ==
241
- HCLGEVF_CMD_EXEC_SUCCESS)
242
- status = 0;
243
- else
244
- status = -EIO;
293
+ status = hclgevf_cmd_convert_err_code(retval);
245294 hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
246295 ntc++;
247296 handle++;
....@@ -251,23 +300,47 @@
251300 }
252301
253302 if (!complete)
254
- status = -EAGAIN;
303
+ status = -EBADE;
255304
256305 /* Clean the command send queue */
257306 handle = hclgevf_cmd_csq_clean(hw);
258
- if (handle != num) {
307
+ if (handle != num)
259308 dev_warn(&hdev->pdev->dev,
260309 "cleaned %d, need to clean %d\n", handle, num);
261
- }
262310
263311 spin_unlock_bh(&hw->cmq.csq.lock);
264312
265313 return status;
266314 }
267315
268
-static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
269
- u32 *version)
316
+static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
270317 {
318
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
319
+
320
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
321
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
322
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
323
+}
324
+
325
+static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
326
+ struct hclgevf_query_version_cmd *cmd)
327
+{
328
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
329
+ u32 caps;
330
+
331
+ caps = __le32_to_cpu(cmd->caps[0]);
332
+
333
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B))
334
+ set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
335
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B))
336
+ set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
337
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
338
+ set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
339
+}
340
+
341
+static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
342
+{
343
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
271344 struct hclgevf_query_version_cmd *resp;
272345 struct hclgevf_desc desc;
273346 int status;
....@@ -275,66 +348,141 @@
275348 resp = (struct hclgevf_query_version_cmd *)desc.data;
276349
277350 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
278
- status = hclgevf_cmd_send(hw, &desc, 1);
279
- if (!status)
280
- *version = le32_to_cpu(resp->firmware);
351
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
352
+ if (status)
353
+ return status;
354
+
355
+ hdev->fw_version = le32_to_cpu(resp->firmware);
356
+
357
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
358
+ HNAE3_PCI_REVISION_BIT_SIZE;
359
+ ae_dev->dev_version |= hdev->pdev->revision;
360
+
361
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
362
+ hclgevf_set_default_capability(hdev);
363
+
364
+ hclgevf_parse_capability(hdev, resp);
281365
282366 return status;
283367 }
284368
285
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
369
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
286370 {
287
- u32 version;
288371 int ret;
289372
290
- /* setup Tx write back timeout */
291
- hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
373
+ /* Setup the lock for command queue */
374
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
375
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
292376
293
- /* setup queue CSQ/CRQ rings */
294
- hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
295
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
377
+ hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
378
+ hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
379
+ hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
380
+
381
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
296382 if (ret) {
297383 dev_err(&hdev->pdev->dev,
298
- "failed(%d) to initialize CSQ ring\n", ret);
384
+ "CSQ ring setup error %d\n", ret);
299385 return ret;
300386 }
301387
302
- hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
303
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
388
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
304389 if (ret) {
305390 dev_err(&hdev->pdev->dev,
306
- "failed(%d) to initialize CRQ ring\n", ret);
391
+ "CRQ ring setup error %d\n", ret);
307392 goto err_csq;
308393 }
394
+
395
+ return 0;
396
+err_csq:
397
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
398
+ return ret;
399
+}
400
+
401
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
402
+{
403
+ int ret;
404
+
405
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
406
+ spin_lock(&hdev->hw.cmq.crq.lock);
309407
310408 /* initialize the pointers of async rx queue of mailbox */
311409 hdev->arq.hdev = hdev;
312410 hdev->arq.head = 0;
313411 hdev->arq.tail = 0;
314
- hdev->arq.count = 0;
412
+ atomic_set(&hdev->arq.count, 0);
413
+ hdev->hw.cmq.csq.next_to_clean = 0;
414
+ hdev->hw.cmq.csq.next_to_use = 0;
415
+ hdev->hw.cmq.crq.next_to_clean = 0;
416
+ hdev->hw.cmq.crq.next_to_use = 0;
315417
316
- /* get firmware version */
317
- ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
418
+ hclgevf_cmd_init_regs(&hdev->hw);
419
+
420
+ spin_unlock(&hdev->hw.cmq.crq.lock);
421
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
422
+
423
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
424
+
425
+ /* Check if there is new reset pending, because the higher level
426
+ * reset may happen when lower level reset is being processed.
427
+ */
428
+ if (hclgevf_is_reset_pending(hdev)) {
429
+ ret = -EBUSY;
430
+ goto err_cmd_init;
431
+ }
432
+
433
+ /* get version and device capabilities */
434
+ ret = hclgevf_cmd_query_version_and_capability(hdev);
318435 if (ret) {
319436 dev_err(&hdev->pdev->dev,
320
- "failed(%d) to query firmware version\n", ret);
321
- goto err_crq;
437
+ "failed to query version and capabilities, ret = %d\n", ret);
438
+ goto err_cmd_init;
322439 }
323
- hdev->fw_version = version;
324440
325
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
441
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
442
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
443
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
444
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
445
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
446
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
447
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
448
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
449
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
326450
327451 return 0;
328
-err_crq:
329
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
330
-err_csq:
331
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
452
+
453
+err_cmd_init:
454
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
332455
333456 return ret;
334457 }
335458
459
+static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
460
+{
461
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
462
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
463
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
464
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
465
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
466
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
467
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
468
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
469
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
470
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
471
+}
472
+
336473 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
337474 {
475
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
476
+ /* wait to ensure that the firmware completes the possible left
477
+ * over commands.
478
+ */
479
+ msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
480
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
481
+ spin_lock(&hdev->hw.cmq.crq.lock);
482
+ hclgevf_cmd_uninit_regs(&hdev->hw);
483
+ spin_unlock(&hdev->hw.cmq.crq.lock);
484
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
485
+
338486 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
339487 hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
340488 }