.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Huawei HiNIC PCI Express Linux driver |
---|
3 | 4 | * Copyright(c) 2017 Huawei Technologies Co., Ltd |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify it |
---|
6 | | - * under the terms and conditions of the GNU General Public License, |
---|
7 | | - * version 2, as published by the Free Software Foundation. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
---|
10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
---|
12 | | - * for more details. |
---|
13 | | - * |
---|
14 | 5 | */ |
---|
15 | 6 | |
---|
16 | 7 | #include <linux/kernel.h> |
---|
.. | .. |
---|
70 | 61 | #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) |
---|
71 | 62 | #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) |
---|
72 | 63 | |
---|
73 | | -#define TX_MAX_MSS_DEFAULT 0x3E00 |
---|
74 | | - |
---|
75 | 64 | enum sq_wqe_type { |
---|
76 | 65 | SQ_NORMAL_WQE = 0, |
---|
77 | 66 | }; |
---|
.. | .. |
---|
119 | 108 | wq_page_pfn_hi = upper_32_bits(wq_page_pfn); |
---|
120 | 109 | wq_page_pfn_lo = lower_32_bits(wq_page_pfn); |
---|
121 | 110 | |
---|
122 | | - wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); |
---|
| 111 | + /* If only one page, use 0-level CLA */ |
---|
| 112 | + if (wq->num_q_pages == 1) |
---|
| 113 | + wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr); |
---|
| 114 | + else |
---|
| 115 | + wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); |
---|
| 116 | + |
---|
123 | 117 | wq_block_pfn_hi = upper_32_bits(wq_block_pfn); |
---|
124 | 118 | wq_block_pfn_lo = lower_32_bits(wq_block_pfn); |
---|
125 | 119 | |
---|
.. | .. |
---|
338 | 332 | goto err_cqe_dma_arr_alloc; |
---|
339 | 333 | |
---|
340 | 334 | for (i = 0; i < wq->q_depth; i++) { |
---|
341 | | - rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, |
---|
342 | | - sizeof(*rq->cqe[i]), |
---|
343 | | - &rq->cqe_dma[i], GFP_KERNEL); |
---|
| 335 | + rq->cqe[i] = dma_alloc_coherent(&pdev->dev, |
---|
| 336 | + sizeof(*rq->cqe[i]), |
---|
| 337 | + &rq->cqe_dma[i], GFP_KERNEL); |
---|
344 | 338 | if (!rq->cqe[i]) |
---|
345 | 339 | goto err_cqe_alloc; |
---|
346 | 340 | } |
---|
.. | .. |
---|
417 | 411 | |
---|
418 | 412 | /* HW requirements: Must be at least 32 bit */ |
---|
419 | 413 | pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); |
---|
420 | | - rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, |
---|
421 | | - &rq->pi_dma_addr, GFP_KERNEL); |
---|
| 414 | + rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, |
---|
| 415 | + &rq->pi_dma_addr, GFP_KERNEL); |
---|
422 | 416 | if (!rq->pi_virt_addr) { |
---|
423 | 417 | dev_err(&pdev->dev, "Failed to allocate PI address\n"); |
---|
424 | 418 | err = -ENOMEM; |
---|
.. | .. |
---|
494 | 488 | HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | |
---|
495 | 489 | HINIC_SQ_CTRL_SET(ctrl_size, LEN); |
---|
496 | 490 | |
---|
497 | | - ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT, |
---|
498 | | - QUEUE_INFO_MSS); |
---|
| 491 | + ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT, |
---|
| 492 | + QUEUE_INFO_MSS) | |
---|
| 493 | + HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC); |
---|
499 | 494 | } |
---|
500 | 495 | |
---|
501 | 496 | static void sq_prepare_task(struct hinic_sq_task *task) |
---|
502 | 497 | { |
---|
503 | | - task->pkt_info0 = |
---|
504 | | - HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) | |
---|
505 | | - HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) | |
---|
506 | | - HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN, |
---|
507 | | - INNER_L3TYPE) | |
---|
508 | | - HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE, |
---|
509 | | - VLAN_OFFLOAD) | |
---|
510 | | - HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG); |
---|
511 | | - |
---|
512 | | - task->pkt_info1 = |
---|
513 | | - HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) | |
---|
514 | | - HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) | |
---|
515 | | - HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN); |
---|
516 | | - |
---|
517 | | - task->pkt_info2 = |
---|
518 | | - HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) | |
---|
519 | | - HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) | |
---|
520 | | - HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN, |
---|
521 | | - TUNNEL_L4TYPE) | |
---|
522 | | - HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN, |
---|
523 | | - OUTER_L3TYPE); |
---|
| 498 | + task->pkt_info0 = 0; |
---|
| 499 | + task->pkt_info1 = 0; |
---|
| 500 | + task->pkt_info2 = 0; |
---|
524 | 501 | |
---|
525 | 502 | task->ufo_v6_identify = 0; |
---|
526 | 503 | |
---|
527 | 504 | task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE); |
---|
528 | 505 | |
---|
529 | 506 | task->zero_pad = 0; |
---|
| 507 | +} |
---|
| 508 | + |
---|
| 509 | +void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len) |
---|
| 510 | +{ |
---|
| 511 | + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN); |
---|
| 512 | +} |
---|
| 513 | + |
---|
| 514 | +void hinic_task_set_outter_l3(struct hinic_sq_task *task, |
---|
| 515 | + enum hinic_l3_offload_type l3_type, |
---|
| 516 | + u32 network_len) |
---|
| 517 | +{ |
---|
| 518 | + task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) | |
---|
| 519 | + HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN); |
---|
| 520 | +} |
---|
| 521 | + |
---|
| 522 | +void hinic_task_set_inner_l3(struct hinic_sq_task *task, |
---|
| 523 | + enum hinic_l3_offload_type l3_type, |
---|
| 524 | + u32 network_len) |
---|
| 525 | +{ |
---|
| 526 | + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE); |
---|
| 527 | + task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN); |
---|
| 528 | +} |
---|
| 529 | + |
---|
| 530 | +void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, |
---|
| 531 | + enum hinic_l4_tunnel_type l4_type, |
---|
| 532 | + u32 tunnel_len) |
---|
| 533 | +{ |
---|
| 534 | + task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | |
---|
| 535 | + HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN); |
---|
| 536 | +} |
---|
| 537 | + |
---|
| 538 | +void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info, |
---|
| 539 | + enum hinic_l4_offload_type l4_offload, |
---|
| 540 | + u32 l4_len, u32 offset) |
---|
| 541 | +{ |
---|
| 542 | + u32 tcp_udp_cs = 0, sctp = 0; |
---|
| 543 | + u32 mss = HINIC_MSS_DEFAULT; |
---|
| 544 | + |
---|
| 545 | + if (l4_offload == TCP_OFFLOAD_ENABLE || |
---|
| 546 | + l4_offload == UDP_OFFLOAD_ENABLE) |
---|
| 547 | + tcp_udp_cs = 1; |
---|
| 548 | + else if (l4_offload == SCTP_OFFLOAD_ENABLE) |
---|
| 549 | + sctp = 1; |
---|
| 550 | + |
---|
| 551 | + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); |
---|
| 552 | + task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); |
---|
| 553 | + |
---|
| 554 | + *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | |
---|
| 555 | + HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) | |
---|
| 556 | + HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP); |
---|
| 557 | + |
---|
| 558 | + *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); |
---|
| 559 | + *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); |
---|
| 560 | +} |
---|
| 561 | + |
---|
| 562 | +void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info, |
---|
| 563 | + enum hinic_l4_offload_type l4_offload, |
---|
| 564 | + u32 l4_len, u32 offset, u32 ip_ident, u32 mss) |
---|
| 565 | +{ |
---|
| 566 | + u32 tso = 0, ufo = 0; |
---|
| 567 | + |
---|
| 568 | + if (l4_offload == TCP_OFFLOAD_ENABLE) |
---|
| 569 | + tso = 1; |
---|
| 570 | + else if (l4_offload == UDP_OFFLOAD_ENABLE) |
---|
| 571 | + ufo = 1; |
---|
| 572 | + |
---|
| 573 | + task->ufo_v6_identify = ip_ident; |
---|
| 574 | + |
---|
| 575 | + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); |
---|
| 576 | + task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG); |
---|
| 577 | + task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); |
---|
| 578 | + |
---|
| 579 | + *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | |
---|
| 580 | + HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) | |
---|
| 581 | + HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) | |
---|
| 582 | + HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS); |
---|
| 583 | + |
---|
| 584 | + /* set MSS value */ |
---|
| 585 | + *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); |
---|
| 586 | + *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); |
---|
530 | 587 | } |
---|
531 | 588 | |
---|
532 | 589 | /** |
---|
.. | .. |
---|
586 | 643 | |
---|
587 | 644 | /* increment prod_idx to the next */ |
---|
588 | 645 | prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; |
---|
| 646 | + prod_idx = SQ_MASKED_IDX(sq, prod_idx); |
---|
589 | 647 | |
---|
590 | 648 | wmb(); /* Write all before the doorbell */ |
---|
591 | 649 | |
---|
.. | .. |
---|
613 | 671 | } |
---|
614 | 672 | |
---|
615 | 673 | /** |
---|
| 674 | + * hinic_sq_return_wqe - return the wqe to the sq |
---|
| 675 | + * @sq: send queue |
---|
| 676 | + * @wqe_size: the size of the wqe |
---|
| 677 | + **/ |
---|
| 678 | +void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size) |
---|
| 679 | +{ |
---|
| 680 | + hinic_return_wqe(sq->wq, wqe_size); |
---|
| 681 | +} |
---|
| 682 | + |
---|
| 683 | +/** |
---|
616 | 684 | * hinic_sq_write_wqe - write the wqe to the sq |
---|
617 | 685 | * @sq: send queue |
---|
618 | 686 | * @prod_idx: pi of the wqe |
---|