.. | .. |
---|
23 | 23 | #define HCLGE_SHAPER_BS_U_DEF 5 |
---|
24 | 24 | #define HCLGE_SHAPER_BS_S_DEF 20 |
---|
25 | 25 | |
---|
26 | | -#define HCLGE_ETHER_MAX_RATE 100000 |
---|
27 | | - |
---|
28 | 26 | /* hclge_shaper_para_calc: calculate ir parameter for the shaper |
---|
29 | 27 | * @ir: Rate to be config, its unit is Mbps |
---|
30 | 28 | * @shaper_level: the shaper level. eg: port, pg, priority, queueset |
---|
31 | | - * @ir_b: IR_B parameter of IR shaper |
---|
32 | | - * @ir_u: IR_U parameter of IR shaper |
---|
33 | | - * @ir_s: IR_S parameter of IR shaper |
---|
| 29 | + * @ir_para: parameters of IR shaper |
---|
| 30 | + * @max_tm_rate: max tm rate is available to config |
---|
34 | 31 | * |
---|
35 | 32 | * the formula: |
---|
36 | 33 | * |
---|
.. | .. |
---|
41 | 38 | * @return: 0: calculate sucessful, negative: fail |
---|
42 | 39 | */ |
---|
43 | 40 | static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, |
---|
44 | | - u8 *ir_b, u8 *ir_u, u8 *ir_s) |
---|
| 41 | + struct hclge_shaper_ir_para *ir_para, |
---|
| 42 | + u32 max_tm_rate) |
---|
45 | 43 | { |
---|
46 | | - const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { |
---|
| 44 | +#define DIVISOR_CLK (1000 * 8) |
---|
| 45 | +#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK) |
---|
| 46 | + |
---|
| 47 | + static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { |
---|
47 | 48 | 6 * 256, /* Prioriy level */ |
---|
48 | 49 | 6 * 32, /* Prioriy group level */ |
---|
49 | 50 | 6 * 8, /* Port level */ |
---|
50 | 51 | 6 * 256 /* Qset level */ |
---|
51 | 52 | }; |
---|
52 | | - u8 ir_u_calc = 0, ir_s_calc = 0; |
---|
| 53 | + u8 ir_u_calc = 0; |
---|
| 54 | + u8 ir_s_calc = 0; |
---|
53 | 55 | u32 ir_calc; |
---|
54 | 56 | u32 tick; |
---|
55 | 57 | |
---|
56 | 58 | /* Calc tick */ |
---|
57 | 59 | if (shaper_level >= HCLGE_SHAPER_LVL_CNT || |
---|
58 | | - ir > HCLGE_ETHER_MAX_RATE) |
---|
| 60 | + ir > max_tm_rate) |
---|
59 | 61 | return -EINVAL; |
---|
60 | 62 | |
---|
61 | 63 | tick = tick_array[shaper_level]; |
---|
.. | .. |
---|
67 | 69 | * ir_calc = ---------------- * 1000 |
---|
68 | 70 | * tick * 1 |
---|
69 | 71 | */ |
---|
70 | | - ir_calc = (1008000 + (tick >> 1) - 1) / tick; |
---|
| 72 | + ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick; |
---|
71 | 73 | |
---|
72 | 74 | if (ir_calc == ir) { |
---|
73 | | - *ir_b = 126; |
---|
74 | | - *ir_u = 0; |
---|
75 | | - *ir_s = 0; |
---|
| 75 | + ir_para->ir_b = 126; |
---|
| 76 | + ir_para->ir_u = 0; |
---|
| 77 | + ir_para->ir_s = 0; |
---|
76 | 78 | |
---|
77 | 79 | return 0; |
---|
78 | 80 | } else if (ir_calc > ir) { |
---|
79 | 81 | /* Increasing the denominator to select ir_s value */ |
---|
80 | | - while (ir_calc > ir) { |
---|
| 82 | + while (ir_calc >= ir && ir) { |
---|
81 | 83 | ir_s_calc++; |
---|
82 | | - ir_calc = 1008000 / (tick * (1 << ir_s_calc)); |
---|
| 84 | + ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc)); |
---|
83 | 85 | } |
---|
84 | 86 | |
---|
85 | | - if (ir_calc == ir) |
---|
86 | | - *ir_b = 126; |
---|
87 | | - else |
---|
88 | | - *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; |
---|
| 87 | + ir_para->ir_b = (ir * tick * (1 << ir_s_calc) + |
---|
| 88 | + (DIVISOR_CLK >> 1)) / DIVISOR_CLK; |
---|
89 | 89 | } else { |
---|
90 | 90 | /* Increasing the numerator to select ir_u value */ |
---|
91 | 91 | u32 numerator; |
---|
92 | 92 | |
---|
93 | 93 | while (ir_calc < ir) { |
---|
94 | 94 | ir_u_calc++; |
---|
95 | | - numerator = 1008000 * (1 << ir_u_calc); |
---|
| 95 | + numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc); |
---|
96 | 96 | ir_calc = (numerator + (tick >> 1)) / tick; |
---|
97 | 97 | } |
---|
98 | 98 | |
---|
99 | 99 | if (ir_calc == ir) { |
---|
100 | | - *ir_b = 126; |
---|
| 100 | + ir_para->ir_b = 126; |
---|
101 | 101 | } else { |
---|
102 | | - u32 denominator = (8000 * (1 << --ir_u_calc)); |
---|
103 | | - *ir_b = (ir * tick + (denominator >> 1)) / denominator; |
---|
| 102 | + u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); |
---|
| 103 | + ir_para->ir_b = (ir * tick + (denominator >> 1)) / |
---|
| 104 | + denominator; |
---|
104 | 105 | } |
---|
105 | 106 | } |
---|
106 | 107 | |
---|
107 | | - *ir_u = ir_u_calc; |
---|
108 | | - *ir_s = ir_s_calc; |
---|
| 108 | + ir_para->ir_u = ir_u_calc; |
---|
| 109 | + ir_para->ir_s = ir_s_calc; |
---|
109 | 110 | |
---|
110 | 111 | return 0; |
---|
111 | 112 | } |
---|
.. | .. |
---|
120 | 121 | opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) |
---|
121 | 122 | return -EINVAL; |
---|
122 | 123 | |
---|
123 | | - for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { |
---|
| 124 | + for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) { |
---|
124 | 125 | hclge_cmd_setup_basic_desc(&desc[i], opcode, true); |
---|
125 | | - if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1)) |
---|
126 | | - desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
---|
127 | | - else |
---|
128 | | - desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
---|
| 126 | + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); |
---|
129 | 127 | } |
---|
| 128 | + |
---|
| 129 | + hclge_cmd_setup_basic_desc(&desc[i], opcode, true); |
---|
130 | 130 | |
---|
131 | 131 | ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); |
---|
132 | 132 | if (ret) |
---|
.. | .. |
---|
169 | 169 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
---|
170 | 170 | } |
---|
171 | 171 | |
---|
172 | | -static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, |
---|
173 | | - u8 pfc_bitmap) |
---|
| 172 | +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, |
---|
| 173 | + u8 pfc_bitmap) |
---|
174 | 174 | { |
---|
175 | 175 | struct hclge_desc desc; |
---|
176 | | - struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data; |
---|
| 176 | + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; |
---|
177 | 177 | |
---|
178 | 178 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); |
---|
179 | 179 | |
---|
.. | .. |
---|
189 | 189 | struct hclge_cfg_pause_param_cmd *pause_param; |
---|
190 | 190 | struct hclge_desc desc; |
---|
191 | 191 | |
---|
192 | | - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; |
---|
| 192 | + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; |
---|
193 | 193 | |
---|
194 | 194 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); |
---|
195 | 195 | |
---|
196 | 196 | ether_addr_copy(pause_param->mac_addr, addr); |
---|
| 197 | + ether_addr_copy(pause_param->mac_addr_extra, addr); |
---|
197 | 198 | pause_param->pause_trans_gap = pause_trans_gap; |
---|
198 | 199 | pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); |
---|
199 | 200 | |
---|
.. | .. |
---|
208 | 209 | u8 trans_gap; |
---|
209 | 210 | int ret; |
---|
210 | 211 | |
---|
211 | | - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; |
---|
| 212 | + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; |
---|
212 | 213 | |
---|
213 | 214 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); |
---|
214 | 215 | |
---|
.. | .. |
---|
219 | 220 | trans_gap = pause_param->pause_trans_gap; |
---|
220 | 221 | trans_time = le16_to_cpu(pause_param->pause_trans_time); |
---|
221 | 222 | |
---|
222 | | - return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, |
---|
223 | | - trans_time); |
---|
| 223 | + return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); |
---|
224 | 224 | } |
---|
225 | 225 | |
---|
226 | 226 | static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) |
---|
.. | .. |
---|
361 | 361 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
---|
362 | 362 | } |
---|
363 | 363 | |
---|
364 | | -static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, |
---|
365 | | - enum hclge_shap_bucket bucket, u8 pg_id, |
---|
366 | | - u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) |
---|
| 364 | +static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, |
---|
| 365 | + u8 bs_b, u8 bs_s) |
---|
367 | 366 | { |
---|
368 | | - struct hclge_pg_shapping_cmd *shap_cfg_cmd; |
---|
369 | | - enum hclge_opcode_type opcode; |
---|
370 | | - struct hclge_desc desc; |
---|
371 | 367 | u32 shapping_para = 0; |
---|
372 | | - |
---|
373 | | - opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : |
---|
374 | | - HCLGE_OPC_TM_PG_C_SHAPPING; |
---|
375 | | - hclge_cmd_setup_basic_desc(&desc, opcode, false); |
---|
376 | | - |
---|
377 | | - shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; |
---|
378 | | - |
---|
379 | | - shap_cfg_cmd->pg_id = pg_id; |
---|
380 | 368 | |
---|
381 | 369 | hclge_tm_set_field(shapping_para, IR_B, ir_b); |
---|
382 | 370 | hclge_tm_set_field(shapping_para, IR_U, ir_u); |
---|
383 | 371 | hclge_tm_set_field(shapping_para, IR_S, ir_s); |
---|
384 | 372 | hclge_tm_set_field(shapping_para, BS_B, bs_b); |
---|
385 | 373 | hclge_tm_set_field(shapping_para, BS_S, bs_s); |
---|
| 374 | + |
---|
| 375 | + return shapping_para; |
---|
| 376 | +} |
---|
| 377 | + |
---|
| 378 | +static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, |
---|
| 379 | + enum hclge_shap_bucket bucket, u8 pg_id, |
---|
| 380 | + u32 shapping_para) |
---|
| 381 | +{ |
---|
| 382 | + struct hclge_pg_shapping_cmd *shap_cfg_cmd; |
---|
| 383 | + enum hclge_opcode_type opcode; |
---|
| 384 | + struct hclge_desc desc; |
---|
| 385 | + |
---|
| 386 | + opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : |
---|
| 387 | + HCLGE_OPC_TM_PG_C_SHAPPING; |
---|
| 388 | + hclge_cmd_setup_basic_desc(&desc, opcode, false); |
---|
| 389 | + |
---|
| 390 | + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; |
---|
| 391 | + |
---|
| 392 | + shap_cfg_cmd->pg_id = pg_id; |
---|
386 | 393 | |
---|
387 | 394 | shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); |
---|
388 | 395 | |
---|
.. | .. |
---|
392 | 399 | static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) |
---|
393 | 400 | { |
---|
394 | 401 | struct hclge_port_shapping_cmd *shap_cfg_cmd; |
---|
| 402 | + struct hclge_shaper_ir_para ir_para; |
---|
395 | 403 | struct hclge_desc desc; |
---|
396 | | - u32 shapping_para = 0; |
---|
397 | | - u8 ir_u, ir_b, ir_s; |
---|
| 404 | + u32 shapping_para; |
---|
398 | 405 | int ret; |
---|
399 | 406 | |
---|
400 | | - ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE, |
---|
401 | | - HCLGE_SHAPER_LVL_PORT, |
---|
402 | | - &ir_b, &ir_u, &ir_s); |
---|
| 407 | + ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, |
---|
| 408 | + &ir_para, |
---|
| 409 | + hdev->ae_dev->dev_specs.max_tm_rate); |
---|
403 | 410 | if (ret) |
---|
404 | 411 | return ret; |
---|
405 | 412 | |
---|
406 | 413 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); |
---|
407 | 414 | shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; |
---|
408 | 415 | |
---|
409 | | - hclge_tm_set_field(shapping_para, IR_B, ir_b); |
---|
410 | | - hclge_tm_set_field(shapping_para, IR_U, ir_u); |
---|
411 | | - hclge_tm_set_field(shapping_para, IR_S, ir_s); |
---|
412 | | - hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF); |
---|
413 | | - hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF); |
---|
| 416 | + shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, |
---|
| 417 | + ir_para.ir_s, |
---|
| 418 | + HCLGE_SHAPER_BS_U_DEF, |
---|
| 419 | + HCLGE_SHAPER_BS_S_DEF); |
---|
414 | 420 | |
---|
415 | 421 | shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); |
---|
416 | 422 | |
---|
.. | .. |
---|
419 | 425 | |
---|
420 | 426 | static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, |
---|
421 | 427 | enum hclge_shap_bucket bucket, u8 pri_id, |
---|
422 | | - u8 ir_b, u8 ir_u, u8 ir_s, |
---|
423 | | - u8 bs_b, u8 bs_s) |
---|
| 428 | + u32 shapping_para) |
---|
424 | 429 | { |
---|
425 | 430 | struct hclge_pri_shapping_cmd *shap_cfg_cmd; |
---|
426 | 431 | enum hclge_opcode_type opcode; |
---|
427 | 432 | struct hclge_desc desc; |
---|
428 | | - u32 shapping_para = 0; |
---|
429 | 433 | |
---|
430 | 434 | opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : |
---|
431 | | - HCLGE_OPC_TM_PRI_C_SHAPPING; |
---|
| 435 | + HCLGE_OPC_TM_PRI_C_SHAPPING; |
---|
432 | 436 | |
---|
433 | 437 | hclge_cmd_setup_basic_desc(&desc, opcode, false); |
---|
434 | 438 | |
---|
435 | 439 | shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; |
---|
436 | 440 | |
---|
437 | 441 | shap_cfg_cmd->pri_id = pri_id; |
---|
438 | | - |
---|
439 | | - hclge_tm_set_field(shapping_para, IR_B, ir_b); |
---|
440 | | - hclge_tm_set_field(shapping_para, IR_U, ir_u); |
---|
441 | | - hclge_tm_set_field(shapping_para, IR_S, ir_s); |
---|
442 | | - hclge_tm_set_field(shapping_para, BS_B, bs_b); |
---|
443 | | - hclge_tm_set_field(shapping_para, BS_S, bs_s); |
---|
444 | 442 | |
---|
445 | 443 | shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); |
---|
446 | 444 | |
---|
.. | .. |
---|
513 | 511 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
---|
514 | 512 | } |
---|
515 | 513 | |
---|
| 514 | +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) |
---|
| 515 | +{ |
---|
| 516 | + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
---|
| 517 | + struct hclge_qs_shapping_cmd *shap_cfg_cmd; |
---|
| 518 | + struct hclge_shaper_ir_para ir_para; |
---|
| 519 | + struct hclge_dev *hdev = vport->back; |
---|
| 520 | + struct hclge_desc desc; |
---|
| 521 | + u32 shaper_para; |
---|
| 522 | + int ret, i; |
---|
| 523 | + |
---|
| 524 | + if (!max_tx_rate) |
---|
| 525 | + max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate; |
---|
| 526 | + |
---|
| 527 | + ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, |
---|
| 528 | + &ir_para, |
---|
| 529 | + hdev->ae_dev->dev_specs.max_tm_rate); |
---|
| 530 | + if (ret) |
---|
| 531 | + return ret; |
---|
| 532 | + |
---|
| 533 | + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, |
---|
| 534 | + ir_para.ir_s, |
---|
| 535 | + HCLGE_SHAPER_BS_U_DEF, |
---|
| 536 | + HCLGE_SHAPER_BS_S_DEF); |
---|
| 537 | + |
---|
| 538 | + for (i = 0; i < kinfo->num_tc; i++) { |
---|
| 539 | + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, |
---|
| 540 | + false); |
---|
| 541 | + |
---|
| 542 | + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; |
---|
| 543 | + shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); |
---|
| 544 | + shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); |
---|
| 545 | + |
---|
| 546 | + ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
---|
| 547 | + if (ret) { |
---|
| 548 | + dev_err(&hdev->pdev->dev, |
---|
| 549 | + "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n", |
---|
| 550 | + vport->vport_id, shap_cfg_cmd->qs_id, |
---|
| 551 | + max_tx_rate, ret); |
---|
| 552 | + return ret; |
---|
| 553 | + } |
---|
| 554 | + } |
---|
| 555 | + |
---|
| 556 | + return 0; |
---|
| 557 | +} |
---|
| 558 | + |
---|
516 | 559 | static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) |
---|
517 | 560 | { |
---|
518 | 561 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
---|
519 | 562 | struct hclge_dev *hdev = vport->back; |
---|
| 563 | + u16 max_rss_size; |
---|
520 | 564 | u8 i; |
---|
521 | 565 | |
---|
522 | | - vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; |
---|
523 | | - kinfo->num_tc = |
---|
524 | | - min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); |
---|
525 | | - kinfo->rss_size |
---|
526 | | - = min_t(u16, hdev->rss_size_max, |
---|
527 | | - kinfo->num_tqps / kinfo->num_tc); |
---|
528 | | - vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; |
---|
| 566 | + /* TC configuration is shared by PF/VF in one port, only allow |
---|
| 567 | + * one tc for VF for simplicity. VF's vport_id is non zero. |
---|
| 568 | + */ |
---|
| 569 | + kinfo->num_tc = vport->vport_id ? 1 : |
---|
| 570 | + min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); |
---|
| 571 | + vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) + |
---|
| 572 | + (vport->vport_id ? (vport->vport_id - 1) : 0); |
---|
| 573 | + |
---|
| 574 | + max_rss_size = min_t(u16, hdev->rss_size_max, |
---|
| 575 | + vport->alloc_tqps / kinfo->num_tc); |
---|
| 576 | + |
---|
| 577 | + /* Set to user value, no larger than max_rss_size. */ |
---|
| 578 | + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && |
---|
| 579 | + kinfo->req_rss_size <= max_rss_size) { |
---|
| 580 | + dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", |
---|
| 581 | + kinfo->rss_size, kinfo->req_rss_size); |
---|
| 582 | + kinfo->rss_size = kinfo->req_rss_size; |
---|
| 583 | + } else if (kinfo->rss_size > max_rss_size || |
---|
| 584 | + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { |
---|
| 585 | + /* if user not set rss, the rss_size should compare with the |
---|
| 586 | + * valid msi numbers to ensure one to one map between tqp and |
---|
| 587 | + * irq as default. |
---|
| 588 | + */ |
---|
| 589 | + if (!kinfo->req_rss_size) |
---|
| 590 | + max_rss_size = min_t(u16, max_rss_size, |
---|
| 591 | + (hdev->num_nic_msi - 1) / |
---|
| 592 | + kinfo->num_tc); |
---|
| 593 | + |
---|
| 594 | + /* Set to the maximum specification value (max_rss_size). */ |
---|
| 595 | + kinfo->rss_size = max_rss_size; |
---|
| 596 | + } |
---|
| 597 | + |
---|
| 598 | + kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; |
---|
529 | 599 | vport->dwrr = 100; /* 100 percent as init */ |
---|
530 | 600 | vport->alloc_rss_size = kinfo->rss_size; |
---|
| 601 | + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; |
---|
531 | 602 | |
---|
532 | | - for (i = 0; i < kinfo->num_tc; i++) { |
---|
533 | | - if (hdev->hw_tc_map & BIT(i)) { |
---|
| 603 | + for (i = 0; i < HNAE3_MAX_TC; i++) { |
---|
| 604 | + if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) { |
---|
534 | 605 | kinfo->tc_info[i].enable = true; |
---|
535 | 606 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; |
---|
536 | 607 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; |
---|
.. | .. |
---|
545 | 616 | } |
---|
546 | 617 | |
---|
547 | 618 | memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, |
---|
548 | | - FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); |
---|
| 619 | + sizeof_field(struct hnae3_knic_private_info, prio_tc)); |
---|
549 | 620 | } |
---|
550 | 621 | |
---|
551 | 622 | static void hclge_tm_vport_info_update(struct hclge_dev *hdev) |
---|
.. | .. |
---|
575 | 646 | for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) |
---|
576 | 647 | hdev->tm_info.prio_tc[i] = |
---|
577 | 648 | (i >= hdev->tm_info.num_tc) ? 0 : i; |
---|
578 | | - |
---|
579 | | - /* DCB is enabled if we have more than 1 TC */ |
---|
580 | | - if (hdev->tm_info.num_tc > 1) |
---|
581 | | - hdev->flag |= HCLGE_FLAG_DCB_ENABLE; |
---|
582 | | - else |
---|
583 | | - hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; |
---|
584 | 649 | } |
---|
585 | 650 | |
---|
586 | 651 | static void hclge_tm_pg_info_init(struct hclge_dev *hdev) |
---|
587 | 652 | { |
---|
| 653 | +#define BW_PERCENT 100 |
---|
| 654 | +#define DEFAULT_BW_WEIGHT 1 |
---|
| 655 | + |
---|
588 | 656 | u8 i; |
---|
589 | 657 | |
---|
590 | 658 | for (i = 0; i < hdev->tm_info.num_pg; i++) { |
---|
591 | 659 | int k; |
---|
592 | 660 | |
---|
593 | | - hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; |
---|
| 661 | + hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; |
---|
594 | 662 | |
---|
595 | 663 | hdev->tm_info.pg_info[i].pg_id = i; |
---|
596 | 664 | hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; |
---|
597 | 665 | |
---|
598 | | - hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE; |
---|
| 666 | + hdev->tm_info.pg_info[i].bw_limit = |
---|
| 667 | + hdev->ae_dev->dev_specs.max_tm_rate; |
---|
599 | 668 | |
---|
600 | 669 | if (i != 0) |
---|
601 | 670 | continue; |
---|
602 | 671 | |
---|
603 | 672 | hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; |
---|
604 | 673 | for (k = 0; k < hdev->tm_info.num_tc; k++) |
---|
605 | | - hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; |
---|
| 674 | + hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; |
---|
| 675 | + for (; k < HNAE3_MAX_TC; k++) |
---|
| 676 | + hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT; |
---|
606 | 677 | } |
---|
607 | 678 | } |
---|
608 | 679 | |
---|
609 | | -static void hclge_pfc_info_init(struct hclge_dev *hdev) |
---|
| 680 | +static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) |
---|
610 | 681 | { |
---|
611 | | - if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { |
---|
| 682 | + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) { |
---|
612 | 683 | if (hdev->fc_mode_last_time == HCLGE_FC_PFC) |
---|
613 | 684 | dev_warn(&hdev->pdev->dev, |
---|
614 | | - "DCB is disable, but last mode is FC_PFC\n"); |
---|
| 685 | + "Only 1 tc used, but last mode is FC_PFC\n"); |
---|
615 | 686 | |
---|
616 | 687 | hdev->tm_info.fc_mode = hdev->fc_mode_last_time; |
---|
617 | 688 | } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { |
---|
.. | .. |
---|
624 | 695 | } |
---|
625 | 696 | } |
---|
626 | 697 | |
---|
627 | | -static int hclge_tm_schd_info_init(struct hclge_dev *hdev) |
---|
| 698 | +static void hclge_update_fc_mode(struct hclge_dev *hdev) |
---|
628 | 699 | { |
---|
629 | | - if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && |
---|
630 | | - (hdev->tm_info.num_pg != 1)) |
---|
631 | | - return -EINVAL; |
---|
| 700 | + if (!hdev->tm_info.pfc_en) { |
---|
| 701 | + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; |
---|
| 702 | + return; |
---|
| 703 | + } |
---|
632 | 704 | |
---|
| 705 | + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { |
---|
| 706 | + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; |
---|
| 707 | + hdev->tm_info.fc_mode = HCLGE_FC_PFC; |
---|
| 708 | + } |
---|
| 709 | +} |
---|
| 710 | + |
---|
| 711 | +void hclge_tm_pfc_info_update(struct hclge_dev *hdev) |
---|
| 712 | +{ |
---|
| 713 | + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) |
---|
| 714 | + hclge_update_fc_mode(hdev); |
---|
| 715 | + else |
---|
| 716 | + hclge_update_fc_mode_by_dcb_flag(hdev); |
---|
| 717 | +} |
---|
| 718 | + |
---|
| 719 | +static void hclge_tm_schd_info_init(struct hclge_dev *hdev) |
---|
| 720 | +{ |
---|
633 | 721 | hclge_tm_pg_info_init(hdev); |
---|
634 | 722 | |
---|
635 | 723 | hclge_tm_tc_info_init(hdev); |
---|
636 | 724 | |
---|
637 | 725 | hclge_tm_vport_info_update(hdev); |
---|
638 | 726 | |
---|
639 | | - hclge_pfc_info_init(hdev); |
---|
640 | | - |
---|
641 | | - return 0; |
---|
| 727 | + hclge_tm_pfc_info_update(hdev); |
---|
642 | 728 | } |
---|
643 | 729 | |
---|
644 | 730 | static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) |
---|
.. | .. |
---|
662 | 748 | |
---|
663 | 749 | static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) |
---|
664 | 750 | { |
---|
665 | | - u8 ir_u, ir_b, ir_s; |
---|
| 751 | + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; |
---|
| 752 | + struct hclge_shaper_ir_para ir_para; |
---|
| 753 | + u32 shaper_para; |
---|
666 | 754 | int ret; |
---|
667 | 755 | u32 i; |
---|
668 | 756 | |
---|
.. | .. |
---|
673 | 761 | /* Pg to pri */ |
---|
674 | 762 | for (i = 0; i < hdev->tm_info.num_pg; i++) { |
---|
675 | 763 | /* Calc shaper para */ |
---|
676 | | - ret = hclge_shaper_para_calc( |
---|
677 | | - hdev->tm_info.pg_info[i].bw_limit, |
---|
678 | | - HCLGE_SHAPER_LVL_PG, |
---|
679 | | - &ir_b, &ir_u, &ir_s); |
---|
| 764 | + ret = hclge_shaper_para_calc(hdev->tm_info.pg_info[i].bw_limit, |
---|
| 765 | + HCLGE_SHAPER_LVL_PG, |
---|
| 766 | + &ir_para, max_tm_rate); |
---|
680 | 767 | if (ret) |
---|
681 | 768 | return ret; |
---|
682 | 769 | |
---|
| 770 | + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, |
---|
| 771 | + HCLGE_SHAPER_BS_U_DEF, |
---|
| 772 | + HCLGE_SHAPER_BS_S_DEF); |
---|
683 | 773 | ret = hclge_tm_pg_shapping_cfg(hdev, |
---|
684 | 774 | HCLGE_TM_SHAP_C_BUCKET, i, |
---|
685 | | - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, |
---|
686 | | - HCLGE_SHAPER_BS_S_DEF); |
---|
| 775 | + shaper_para); |
---|
687 | 776 | if (ret) |
---|
688 | 777 | return ret; |
---|
689 | 778 | |
---|
| 779 | + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, |
---|
| 780 | + ir_para.ir_u, |
---|
| 781 | + ir_para.ir_s, |
---|
| 782 | + HCLGE_SHAPER_BS_U_DEF, |
---|
| 783 | + HCLGE_SHAPER_BS_S_DEF); |
---|
690 | 784 | ret = hclge_tm_pg_shapping_cfg(hdev, |
---|
691 | 785 | HCLGE_TM_SHAP_P_BUCKET, i, |
---|
692 | | - ir_b, ir_u, ir_s, |
---|
693 | | - HCLGE_SHAPER_BS_U_DEF, |
---|
694 | | - HCLGE_SHAPER_BS_S_DEF); |
---|
| 786 | + shaper_para); |
---|
695 | 787 | if (ret) |
---|
696 | 788 | return ret; |
---|
697 | 789 | } |
---|
.. | .. |
---|
711 | 803 | /* pg to prio */ |
---|
712 | 804 | for (i = 0; i < hdev->tm_info.num_pg; i++) { |
---|
713 | 805 | /* Cfg dwrr */ |
---|
714 | | - ret = hclge_tm_pg_weight_cfg(hdev, i, |
---|
715 | | - hdev->tm_info.pg_dwrr[i]); |
---|
| 806 | + ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); |
---|
716 | 807 | if (ret) |
---|
717 | 808 | return ret; |
---|
718 | 809 | } |
---|
.. | .. |
---|
753 | 844 | |
---|
754 | 845 | if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { |
---|
755 | 846 | /* Cfg qs -> pri mapping, one by one mapping */ |
---|
756 | | - for (k = 0; k < hdev->num_alloc_vport; k++) |
---|
757 | | - for (i = 0; i < hdev->tm_info.num_tc; i++) { |
---|
| 847 | + for (k = 0; k < hdev->num_alloc_vport; k++) { |
---|
| 848 | + struct hnae3_knic_private_info *kinfo = |
---|
| 849 | + &vport[k].nic.kinfo; |
---|
| 850 | + |
---|
| 851 | + for (i = 0; i < kinfo->num_tc; i++) { |
---|
758 | 852 | ret = hclge_tm_qs_to_pri_map_cfg( |
---|
759 | 853 | hdev, vport[k].qs_offset + i, i); |
---|
760 | 854 | if (ret) |
---|
761 | 855 | return ret; |
---|
762 | 856 | } |
---|
| 857 | + } |
---|
763 | 858 | } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { |
---|
764 | 859 | /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ |
---|
765 | 860 | for (k = 0; k < hdev->num_alloc_vport; k++) |
---|
.. | .. |
---|
787 | 882 | |
---|
788 | 883 | static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) |
---|
789 | 884 | { |
---|
790 | | - u8 ir_u, ir_b, ir_s; |
---|
| 885 | + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; |
---|
| 886 | + struct hclge_shaper_ir_para ir_para; |
---|
| 887 | + u32 shaper_para; |
---|
791 | 888 | int ret; |
---|
792 | 889 | u32 i; |
---|
793 | 890 | |
---|
794 | 891 | for (i = 0; i < hdev->tm_info.num_tc; i++) { |
---|
795 | | - ret = hclge_shaper_para_calc( |
---|
796 | | - hdev->tm_info.tc_info[i].bw_limit, |
---|
797 | | - HCLGE_SHAPER_LVL_PRI, |
---|
798 | | - &ir_b, &ir_u, &ir_s); |
---|
| 892 | + ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, |
---|
| 893 | + HCLGE_SHAPER_LVL_PRI, |
---|
| 894 | + &ir_para, max_tm_rate); |
---|
799 | 895 | if (ret) |
---|
800 | 896 | return ret; |
---|
801 | 897 | |
---|
802 | | - ret = hclge_tm_pri_shapping_cfg( |
---|
803 | | - hdev, HCLGE_TM_SHAP_C_BUCKET, i, |
---|
804 | | - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, |
---|
805 | | - HCLGE_SHAPER_BS_S_DEF); |
---|
| 898 | + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, |
---|
| 899 | + HCLGE_SHAPER_BS_U_DEF, |
---|
| 900 | + HCLGE_SHAPER_BS_S_DEF); |
---|
| 901 | + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, |
---|
| 902 | + shaper_para); |
---|
806 | 903 | if (ret) |
---|
807 | 904 | return ret; |
---|
808 | 905 | |
---|
809 | | - ret = hclge_tm_pri_shapping_cfg( |
---|
810 | | - hdev, HCLGE_TM_SHAP_P_BUCKET, i, |
---|
811 | | - ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, |
---|
812 | | - HCLGE_SHAPER_BS_S_DEF); |
---|
| 906 | + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, |
---|
| 907 | + ir_para.ir_u, |
---|
| 908 | + ir_para.ir_s, |
---|
| 909 | + HCLGE_SHAPER_BS_U_DEF, |
---|
| 910 | + HCLGE_SHAPER_BS_S_DEF); |
---|
| 911 | + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, |
---|
| 912 | + shaper_para); |
---|
813 | 913 | if (ret) |
---|
814 | 914 | return ret; |
---|
815 | 915 | } |
---|
.. | .. |
---|
820 | 920 | static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) |
---|
821 | 921 | { |
---|
822 | 922 | struct hclge_dev *hdev = vport->back; |
---|
823 | | - u8 ir_u, ir_b, ir_s; |
---|
| 923 | + struct hclge_shaper_ir_para ir_para; |
---|
| 924 | + u32 shaper_para; |
---|
824 | 925 | int ret; |
---|
825 | 926 | |
---|
826 | 927 | ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, |
---|
827 | | - &ir_b, &ir_u, &ir_s); |
---|
| 928 | + &ir_para, |
---|
| 929 | + hdev->ae_dev->dev_specs.max_tm_rate); |
---|
828 | 930 | if (ret) |
---|
829 | 931 | return ret; |
---|
830 | 932 | |
---|
| 933 | + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, |
---|
| 934 | + HCLGE_SHAPER_BS_U_DEF, |
---|
| 935 | + HCLGE_SHAPER_BS_S_DEF); |
---|
831 | 936 | ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, |
---|
832 | | - vport->vport_id, |
---|
833 | | - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, |
---|
834 | | - HCLGE_SHAPER_BS_S_DEF); |
---|
| 937 | + vport->vport_id, shaper_para); |
---|
835 | 938 | if (ret) |
---|
836 | 939 | return ret; |
---|
837 | 940 | |
---|
| 941 | + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, |
---|
| 942 | + ir_para.ir_s, |
---|
| 943 | + HCLGE_SHAPER_BS_U_DEF, |
---|
| 944 | + HCLGE_SHAPER_BS_S_DEF); |
---|
838 | 945 | ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, |
---|
839 | | - vport->vport_id, |
---|
840 | | - ir_b, ir_u, ir_s, |
---|
841 | | - HCLGE_SHAPER_BS_U_DEF, |
---|
842 | | - HCLGE_SHAPER_BS_S_DEF); |
---|
| 946 | + vport->vport_id, shaper_para); |
---|
843 | 947 | if (ret) |
---|
844 | 948 | return ret; |
---|
845 | 949 | |
---|
.. | .. |
---|
850 | 954 | { |
---|
851 | 955 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
---|
852 | 956 | struct hclge_dev *hdev = vport->back; |
---|
853 | | - u8 ir_u, ir_b, ir_s; |
---|
| 957 | + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; |
---|
| 958 | + struct hclge_shaper_ir_para ir_para; |
---|
854 | 959 | u32 i; |
---|
855 | 960 | int ret; |
---|
856 | 961 | |
---|
857 | 962 | for (i = 0; i < kinfo->num_tc; i++) { |
---|
858 | | - ret = hclge_shaper_para_calc( |
---|
859 | | - hdev->tm_info.tc_info[i].bw_limit, |
---|
860 | | - HCLGE_SHAPER_LVL_QSET, |
---|
861 | | - &ir_b, &ir_u, &ir_s); |
---|
| 963 | + ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, |
---|
| 964 | + HCLGE_SHAPER_LVL_QSET, |
---|
| 965 | + &ir_para, max_tm_rate); |
---|
862 | 966 | if (ret) |
---|
863 | 967 | return ret; |
---|
864 | 968 | } |
---|
.. | .. |
---|
934 | 1038 | return 0; |
---|
935 | 1039 | } |
---|
936 | 1040 | |
---|
| 1041 | +static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) |
---|
| 1042 | +{ |
---|
| 1043 | +#define DEFAULT_TC_OFFSET 14 |
---|
| 1044 | + |
---|
| 1045 | + struct hclge_ets_tc_weight_cmd *ets_weight; |
---|
| 1046 | + struct hclge_desc desc; |
---|
| 1047 | + unsigned int i; |
---|
| 1048 | + |
---|
| 1049 | + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); |
---|
| 1050 | + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; |
---|
| 1051 | + |
---|
| 1052 | + for (i = 0; i < HNAE3_MAX_TC; i++) { |
---|
| 1053 | + struct hclge_pg_info *pg_info; |
---|
| 1054 | + |
---|
| 1055 | + pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; |
---|
| 1056 | + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; |
---|
| 1057 | + } |
---|
| 1058 | + |
---|
| 1059 | + ets_weight->weight_offset = DEFAULT_TC_OFFSET; |
---|
| 1060 | + |
---|
| 1061 | + return hclge_cmd_send(&hdev->hw, &desc, 1); |
---|
| 1062 | +} |
---|
| 1063 | + |
---|
937 | 1064 | static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) |
---|
938 | 1065 | { |
---|
939 | 1066 | struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; |
---|
.. | .. |
---|
983 | 1110 | ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); |
---|
984 | 1111 | if (ret) |
---|
985 | 1112 | return ret; |
---|
| 1113 | + |
---|
| 1114 | + if (!hnae3_dev_dcb_supported(hdev)) |
---|
| 1115 | + return 0; |
---|
| 1116 | + |
---|
| 1117 | + ret = hclge_tm_ets_tc_dwrr_cfg(hdev); |
---|
| 1118 | + if (ret == -EOPNOTSUPP) { |
---|
| 1119 | + dev_warn(&hdev->pdev->dev, |
---|
| 1120 | + "fw %08x does't support ets tc weight cmd\n", |
---|
| 1121 | + hdev->fw_version); |
---|
| 1122 | + ret = 0; |
---|
| 1123 | + } |
---|
| 1124 | + |
---|
| 1125 | + return ret; |
---|
986 | 1126 | } else { |
---|
987 | 1127 | ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); |
---|
988 | 1128 | if (ret) |
---|
.. | .. |
---|
992 | 1132 | return 0; |
---|
993 | 1133 | } |
---|
994 | 1134 | |
---|
995 | | -int hclge_tm_map_cfg(struct hclge_dev *hdev) |
---|
| 1135 | +static int hclge_tm_map_cfg(struct hclge_dev *hdev) |
---|
996 | 1136 | { |
---|
997 | 1137 | int ret; |
---|
998 | 1138 | |
---|
.. | .. |
---|
1110 | 1250 | return 0; |
---|
1111 | 1251 | } |
---|
1112 | 1252 | |
---|
1113 | | -int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) |
---|
| 1253 | +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) |
---|
1114 | 1254 | { |
---|
1115 | 1255 | int ret; |
---|
1116 | 1256 | |
---|
.. | .. |
---|
1121 | 1261 | return hclge_tm_lvl34_schd_mode_cfg(hdev); |
---|
1122 | 1262 | } |
---|
1123 | 1263 | |
---|
1124 | | -static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) |
---|
| 1264 | +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) |
---|
1125 | 1265 | { |
---|
1126 | 1266 | int ret; |
---|
1127 | 1267 | |
---|
.. | .. |
---|
1149 | 1289 | struct hclge_mac *mac = &hdev->hw.mac; |
---|
1150 | 1290 | |
---|
1151 | 1291 | return hclge_pause_param_cfg(hdev, mac->mac_addr, |
---|
1152 | | - HCLGE_DEFAULT_PAUSE_TRANS_GAP, |
---|
1153 | | - HCLGE_DEFAULT_PAUSE_TRANS_TIME); |
---|
| 1292 | + HCLGE_DEFAULT_PAUSE_TRANS_GAP, |
---|
| 1293 | + HCLGE_DEFAULT_PAUSE_TRANS_TIME); |
---|
1154 | 1294 | } |
---|
1155 | 1295 | |
---|
1156 | 1296 | static int hclge_pfc_setup_hw(struct hclge_dev *hdev) |
---|
.. | .. |
---|
1231 | 1371 | return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); |
---|
1232 | 1372 | } |
---|
1233 | 1373 | |
---|
1234 | | -int hclge_pause_setup_hw(struct hclge_dev *hdev) |
---|
| 1374 | +static int hclge_tm_bp_setup(struct hclge_dev *hdev) |
---|
1235 | 1375 | { |
---|
1236 | 1376 | int ret; |
---|
1237 | | - u8 i; |
---|
| 1377 | + int i; |
---|
| 1378 | + |
---|
| 1379 | + for (i = 0; i < hdev->tm_info.num_tc; i++) { |
---|
| 1380 | + ret = hclge_bp_setup_hw(hdev, i); |
---|
| 1381 | + if (ret) |
---|
| 1382 | + return ret; |
---|
| 1383 | + } |
---|
| 1384 | + |
---|
| 1385 | + return 0; |
---|
| 1386 | +} |
---|
| 1387 | + |
---|
| 1388 | +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) |
---|
| 1389 | +{ |
---|
| 1390 | + int ret; |
---|
1238 | 1391 | |
---|
1239 | 1392 | ret = hclge_pause_param_setup_hw(hdev); |
---|
1240 | 1393 | if (ret) |
---|
.. | .. |
---|
1248 | 1401 | if (!hnae3_dev_dcb_supported(hdev)) |
---|
1249 | 1402 | return 0; |
---|
1250 | 1403 | |
---|
1251 | | - /* When MAC is GE Mode, hdev does not support pfc setting */ |
---|
| 1404 | + /* GE MAC does not support PFC, when driver is initializing and MAC |
---|
| 1405 | + * is in GE Mode, ignore the error here, otherwise initialization |
---|
| 1406 | + * will fail. |
---|
| 1407 | + */ |
---|
1252 | 1408 | ret = hclge_pfc_setup_hw(hdev); |
---|
1253 | | - if (ret) |
---|
1254 | | - dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); |
---|
1255 | | - |
---|
1256 | | - for (i = 0; i < hdev->tm_info.num_tc; i++) { |
---|
1257 | | - ret = hclge_bp_setup_hw(hdev, i); |
---|
1258 | | - if (ret) |
---|
1259 | | - return ret; |
---|
| 1409 | + if (init && ret == -EOPNOTSUPP) |
---|
| 1410 | + dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); |
---|
| 1411 | + else if (ret) { |
---|
| 1412 | + dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", |
---|
| 1413 | + ret); |
---|
| 1414 | + return ret; |
---|
1260 | 1415 | } |
---|
1261 | 1416 | |
---|
1262 | | - return 0; |
---|
| 1417 | + return hclge_tm_bp_setup(hdev); |
---|
1263 | 1418 | } |
---|
1264 | 1419 | |
---|
1265 | | -int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) |
---|
| 1420 | +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) |
---|
1266 | 1421 | { |
---|
1267 | 1422 | struct hclge_vport *vport = hdev->vport; |
---|
1268 | 1423 | struct hnae3_knic_private_info *kinfo; |
---|
1269 | 1424 | u32 i, k; |
---|
1270 | 1425 | |
---|
1271 | 1426 | for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { |
---|
1272 | | - if (prio_tc[i] >= hdev->tm_info.num_tc) |
---|
1273 | | - return -EINVAL; |
---|
1274 | 1427 | hdev->tm_info.prio_tc[i] = prio_tc[i]; |
---|
1275 | 1428 | |
---|
1276 | 1429 | for (k = 0; k < hdev->num_alloc_vport; k++) { |
---|
.. | .. |
---|
1278 | 1431 | kinfo->prio_tc[i] = prio_tc[i]; |
---|
1279 | 1432 | } |
---|
1280 | 1433 | } |
---|
1281 | | - return 0; |
---|
1282 | 1434 | } |
---|
1283 | 1435 | |
---|
1284 | 1436 | void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) |
---|
1285 | 1437 | { |
---|
1286 | | - u8 i, bit_map = 0; |
---|
| 1438 | + u8 bit_map = 0; |
---|
| 1439 | + u8 i; |
---|
1287 | 1440 | |
---|
1288 | 1441 | hdev->tm_info.num_tc = num_tc; |
---|
1289 | 1442 | |
---|
.. | .. |
---|
1300 | 1453 | hclge_tm_schd_info_init(hdev); |
---|
1301 | 1454 | } |
---|
1302 | 1455 | |
---|
1303 | | -int hclge_tm_init_hw(struct hclge_dev *hdev) |
---|
| 1456 | +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) |
---|
1304 | 1457 | { |
---|
1305 | 1458 | int ret; |
---|
1306 | 1459 | |
---|
.. | .. |
---|
1312 | 1465 | if (ret) |
---|
1313 | 1466 | return ret; |
---|
1314 | 1467 | |
---|
1315 | | - ret = hclge_pause_setup_hw(hdev); |
---|
| 1468 | + ret = hclge_pause_setup_hw(hdev, init); |
---|
1316 | 1469 | if (ret) |
---|
1317 | 1470 | return ret; |
---|
1318 | 1471 | |
---|
.. | .. |
---|
1321 | 1474 | |
---|
1322 | 1475 | int hclge_tm_schd_init(struct hclge_dev *hdev) |
---|
1323 | 1476 | { |
---|
1324 | | - int ret; |
---|
1325 | | - |
---|
1326 | 1477 | /* fc_mode is HCLGE_FC_FULL on reset */ |
---|
1327 | 1478 | hdev->tm_info.fc_mode = HCLGE_FC_FULL; |
---|
1328 | 1479 | hdev->fc_mode_last_time = hdev->tm_info.fc_mode; |
---|
1329 | 1480 | |
---|
1330 | | - ret = hclge_tm_schd_info_init(hdev); |
---|
| 1481 | + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && |
---|
| 1482 | + hdev->tm_info.num_pg != 1) |
---|
| 1483 | + return -EINVAL; |
---|
| 1484 | + |
---|
| 1485 | + hclge_tm_schd_info_init(hdev); |
---|
| 1486 | + |
---|
| 1487 | + return hclge_tm_init_hw(hdev, true); |
---|
| 1488 | +} |
---|
| 1489 | + |
---|
| 1490 | +int hclge_tm_vport_map_update(struct hclge_dev *hdev) |
---|
| 1491 | +{ |
---|
| 1492 | + struct hclge_vport *vport = hdev->vport; |
---|
| 1493 | + int ret; |
---|
| 1494 | + |
---|
| 1495 | + hclge_tm_vport_tc_info_update(vport); |
---|
| 1496 | + |
---|
| 1497 | + ret = hclge_vport_q_to_qs_map(hdev, vport); |
---|
1331 | 1498 | if (ret) |
---|
1332 | 1499 | return ret; |
---|
1333 | 1500 | |
---|
1334 | | - return hclge_tm_init_hw(hdev); |
---|
| 1501 | + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) |
---|
| 1502 | + return 0; |
---|
| 1503 | + |
---|
| 1504 | + return hclge_tm_bp_setup(hdev); |
---|
1335 | 1505 | } |
---|