forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
....@@ -11,8 +11,6 @@
1111 #include "hnae3.h"
1212 #include "hclge_main.h"
1313
14
-#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
15
-
1614 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
1715
1816 static int hclge_ring_space(struct hclge_cmq_ring *ring)
....@@ -39,9 +37,8 @@
3937 {
4038 int size = ring->desc_num * sizeof(struct hclge_desc);
4139
42
- ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
43
- size, &ring->desc_dma_addr,
44
- GFP_KERNEL);
40
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
41
+ &ring->desc_dma_addr, GFP_KERNEL);
4542 if (!ring->desc)
4643 return -ENOMEM;
4744
....@@ -104,15 +101,17 @@
104101 dma_addr_t dma = ring->desc_dma_addr;
105102 struct hclge_dev *hdev = ring->dev;
106103 struct hclge_hw *hw = &hdev->hw;
104
+ u32 reg_val;
107105
108106 if (ring->ring_type == HCLGE_TYPE_CSQ) {
109107 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
110108 lower_32_bits(dma));
111109 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
112110 upper_32_bits(dma));
113
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
114
- (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
115
- HCLGE_NIC_CMQ_ENABLE);
111
+ reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
112
+ reg_val &= HCLGE_NIC_SW_RST_RDY;
113
+ reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
114
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
116115 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
117116 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
118117 } else {
....@@ -121,8 +120,7 @@
121120 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
122121 upper_32_bits(dma));
123122 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
124
- (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
125
- HCLGE_NIC_CMQ_ENABLE);
123
+ ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
126124 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
127125 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
128126 }
....@@ -145,7 +143,7 @@
145143 rmb(); /* Make sure head is ready before touch any data */
146144
147145 if (!is_valid_csq_clean_head(csq, head)) {
148
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
146
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
149147 csq->next_to_use, csq->next_to_clean);
150148 dev_warn(&hdev->pdev->dev,
151149 "Disabling any further commands to IMP firmware\n");
....@@ -171,8 +169,16 @@
171169 /* these commands have several descriptors,
172170 * and use the first one to save opcode and return value
173171 */
174
- u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
175
- HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
172
+ u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
173
+ HCLGE_OPC_STATS_32_BIT,
174
+ HCLGE_OPC_STATS_MAC,
175
+ HCLGE_OPC_STATS_MAC_ALL,
176
+ HCLGE_OPC_QUERY_32_BIT_REG,
177
+ HCLGE_OPC_QUERY_64_BIT_REG,
178
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
179
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
180
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
181
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
176182 int i;
177183
178184 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
....@@ -181,6 +187,61 @@
181187 }
182188
183189 return false;
190
+}
191
+
192
+static int hclge_cmd_convert_err_code(u16 desc_ret)
193
+{
194
+ switch (desc_ret) {
195
+ case HCLGE_CMD_EXEC_SUCCESS:
196
+ return 0;
197
+ case HCLGE_CMD_NO_AUTH:
198
+ return -EPERM;
199
+ case HCLGE_CMD_NOT_SUPPORTED:
200
+ return -EOPNOTSUPP;
201
+ case HCLGE_CMD_QUEUE_FULL:
202
+ return -EXFULL;
203
+ case HCLGE_CMD_NEXT_ERR:
204
+ return -ENOSR;
205
+ case HCLGE_CMD_UNEXE_ERR:
206
+ return -ENOTBLK;
207
+ case HCLGE_CMD_PARA_ERR:
208
+ return -EINVAL;
209
+ case HCLGE_CMD_RESULT_ERR:
210
+ return -ERANGE;
211
+ case HCLGE_CMD_TIMEOUT:
212
+ return -ETIME;
213
+ case HCLGE_CMD_HILINK_ERR:
214
+ return -ENOLINK;
215
+ case HCLGE_CMD_QUEUE_ILLEGAL:
216
+ return -ENXIO;
217
+ case HCLGE_CMD_INVALID:
218
+ return -EBADR;
219
+ default:
220
+ return -EIO;
221
+ }
222
+}
223
+
224
+static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
225
+ int num, int ntc)
226
+{
227
+ u16 opcode, desc_ret;
228
+ int handle;
229
+
230
+ opcode = le16_to_cpu(desc[0].opcode);
231
+ for (handle = 0; handle < num; handle++) {
232
+ desc[handle] = hw->cmq.csq.desc[ntc];
233
+ ntc++;
234
+ if (ntc >= hw->cmq.csq.desc_num)
235
+ ntc = 0;
236
+ }
237
+ if (likely(!hclge_is_special_opcode(opcode)))
238
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
239
+ else
240
+ desc_ret = le16_to_cpu(desc[0].retval);
241
+
242
+ hw->cmq.last_status = desc_ret;
243
+
244
+ return hclge_cmd_convert_err_code(desc_ret);
184245 }
185246
186247 /**
....@@ -195,18 +256,26 @@
195256 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
196257 {
197258 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
259
+ struct hclge_cmq_ring *csq = &hw->cmq.csq;
198260 struct hclge_desc *desc_to_use;
199261 bool complete = false;
200262 u32 timeout = 0;
201263 int handle = 0;
202
- int retval = 0;
203
- u16 opcode, desc_ret;
264
+ int retval;
204265 int ntc;
205266
206267 spin_lock_bh(&hw->cmq.csq.lock);
207268
208
- if (num > hclge_ring_space(&hw->cmq.csq) ||
209
- test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
269
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
270
+ spin_unlock_bh(&hw->cmq.csq.lock);
271
+ return -EBUSY;
272
+ }
273
+
274
+ if (num > hclge_ring_space(&hw->cmq.csq)) {
275
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
276
+ * need update the SW HEAD pointer csq->next_to_clean
277
+ */
278
+ csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
210279 spin_unlock_bh(&hw->cmq.csq.lock);
211280 return -EBUSY;
212281 }
....@@ -216,12 +285,11 @@
216285 * which will be use for hardware to write back
217286 */
218287 ntc = hw->cmq.csq.next_to_use;
219
- opcode = le16_to_cpu(desc[0].opcode);
220288 while (handle < num) {
221289 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
222290 *desc_to_use = desc[handle];
223291 (hw->cmq.csq.next_to_use)++;
224
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
292
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
225293 hw->cmq.csq.next_to_use = 0;
226294 handle++;
227295 }
....@@ -244,33 +312,10 @@
244312 } while (timeout < hw->cmq.tx_timeout);
245313 }
246314
247
- if (!complete) {
248
- retval = -EAGAIN;
249
- } else {
250
- handle = 0;
251
- while (handle < num) {
252
- /* Get the result of hardware write back */
253
- desc_to_use = &hw->cmq.csq.desc[ntc];
254
- desc[handle] = *desc_to_use;
255
-
256
- if (likely(!hclge_is_special_opcode(opcode)))
257
- desc_ret = le16_to_cpu(desc[handle].retval);
258
- else
259
- desc_ret = le16_to_cpu(desc[0].retval);
260
-
261
- if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
262
- retval = 0;
263
- else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
264
- retval = -EOPNOTSUPP;
265
- else
266
- retval = -EIO;
267
- hw->cmq.last_status = desc_ret;
268
- ntc++;
269
- handle++;
270
- if (ntc == hw->cmq.csq.desc_num)
271
- ntc = 0;
272
- }
273
- }
315
+ if (!complete)
316
+ retval = -EBADE;
317
+ else
318
+ retval = hclge_cmd_check_retval(hw, desc, num, ntc);
274319
275320 /* Clean the command send queue */
276321 handle = hclge_cmd_csq_clean(hw);
....@@ -285,9 +330,37 @@
285330 return retval;
286331 }
287332
288
-static enum hclge_cmd_status hclge_cmd_query_firmware_version(
289
- struct hclge_hw *hw, u32 *version)
333
+static void hclge_set_default_capability(struct hclge_dev *hdev)
290334 {
335
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
336
+
337
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
338
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
339
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
340
+}
341
+
342
+static void hclge_parse_capability(struct hclge_dev *hdev,
343
+ struct hclge_query_version_cmd *cmd)
344
+{
345
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
346
+ u32 caps;
347
+
348
+ caps = __le32_to_cpu(cmd->caps[0]);
349
+
350
+ if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
351
+ set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
352
+ if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
353
+ set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
354
+ if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
355
+ set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
356
+ if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
357
+ set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
358
+}
359
+
360
+static enum hclge_cmd_status
361
+hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
362
+{
363
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
291364 struct hclge_query_version_cmd *resp;
292365 struct hclge_desc desc;
293366 int ret;
....@@ -295,9 +368,20 @@
295368 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
296369 resp = (struct hclge_query_version_cmd *)desc.data;
297370
298
- ret = hclge_cmd_send(hw, &desc, 1);
299
- if (!ret)
300
- *version = le32_to_cpu(resp->firmware);
371
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
372
+ if (ret)
373
+ return ret;
374
+
375
+ hdev->fw_version = le32_to_cpu(resp->firmware);
376
+
377
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
378
+ HNAE3_PCI_REVISION_BIT_SIZE;
379
+ ae_dev->dev_version |= hdev->pdev->revision;
380
+
381
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
382
+ hclge_set_default_capability(hdev);
383
+
384
+ hclge_parse_capability(hdev, resp);
301385
302386 return ret;
303387 }
....@@ -338,13 +422,29 @@
338422 return ret;
339423 }
340424
425
+static int hclge_firmware_compat_config(struct hclge_dev *hdev)
426
+{
427
+ struct hclge_firmware_compat_cmd *req;
428
+ struct hclge_desc desc;
429
+ u32 compat = 0;
430
+
431
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
432
+
433
+ req = (struct hclge_firmware_compat_cmd *)desc.data;
434
+
435
+ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
436
+ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
437
+ req->compat = cpu_to_le32(compat);
438
+
439
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
440
+}
441
+
341442 int hclge_cmd_init(struct hclge_dev *hdev)
342443 {
343
- u32 version;
344444 int ret;
345445
346446 spin_lock_bh(&hdev->hw.cmq.csq.lock);
347
- spin_lock_bh(&hdev->hw.cmq.crq.lock);
447
+ spin_lock(&hdev->hw.cmq.crq.lock);
348448
349449 hdev->hw.cmq.csq.next_to_clean = 0;
350450 hdev->hw.cmq.csq.next_to_use = 0;
....@@ -352,33 +452,86 @@
352452 hdev->hw.cmq.crq.next_to_use = 0;
353453
354454 hclge_cmd_init_regs(&hdev->hw);
355
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
356455
357
- spin_unlock_bh(&hdev->hw.cmq.crq.lock);
456
+ spin_unlock(&hdev->hw.cmq.crq.lock);
358457 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
359458
360
- ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
459
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
460
+
461
+ /* Check if there is new reset pending, because the higher level
462
+ * reset may happen when lower level reset is being processed.
463
+ */
464
+ if ((hclge_is_reset_pending(hdev))) {
465
+ dev_err(&hdev->pdev->dev,
466
+ "failed to init cmd since reset %#lx pending\n",
467
+ hdev->reset_pending);
468
+ ret = -EBUSY;
469
+ goto err_cmd_init;
470
+ }
471
+
472
+ /* get version and device capabilities */
473
+ ret = hclge_cmd_query_version_and_capability(hdev);
361474 if (ret) {
362475 dev_err(&hdev->pdev->dev,
363
- "firmware version query failed %d\n", ret);
364
- return ret;
476
+ "failed to query version and capabilities, ret = %d\n",
477
+ ret);
478
+ goto err_cmd_init;
365479 }
366
- hdev->fw_version = version;
367480
368
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
481
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
482
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
483
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
484
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
485
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
486
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
487
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
488
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
489
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
490
+
491
+ /* ask the firmware to enable some features, driver can work without
492
+ * it.
493
+ */
494
+ ret = hclge_firmware_compat_config(hdev);
495
+ if (ret)
496
+ dev_warn(&hdev->pdev->dev,
497
+ "Firmware compatible features not enabled(%d).\n",
498
+ ret);
369499
370500 return 0;
501
+
502
+err_cmd_init:
503
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
504
+
505
+ return ret;
371506 }
372507
373
-static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
508
+static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
374509 {
375
- spin_lock(&ring->lock);
376
- hclge_free_cmd_desc(ring);
377
- spin_unlock(&ring->lock);
510
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
511
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
512
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
513
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
514
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
515
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
516
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
517
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
518
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
519
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
378520 }
379521
380
-void hclge_destroy_cmd_queue(struct hclge_hw *hw)
522
+void hclge_cmd_uninit(struct hclge_dev *hdev)
381523 {
382
- hclge_destroy_queue(&hw->cmq.csq);
383
- hclge_destroy_queue(&hw->cmq.crq);
524
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
525
+ /* wait to ensure that the firmware completes the possible left
526
+ * over commands.
527
+ */
528
+ msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
529
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
530
+ spin_lock(&hdev->hw.cmq.crq.lock);
531
+ hclge_cmd_uninit_regs(&hdev->hw);
532
+ spin_unlock(&hdev->hw.cmq.crq.lock);
533
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
534
+
535
+ hclge_free_cmd_desc(&hdev->hw.cmq.csq);
536
+ hclge_free_cmd_desc(&hdev->hw.cmq.crq);
384537 }