forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/infiniband/hw/qib/qib_rc.c
....@@ -45,12 +45,7 @@
4545 u32 len;
4646
4747 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
48
- ss->sge = wqe->sg_list[0];
49
- ss->sg_list = wqe->sg_list + 1;
50
- ss->num_sge = wqe->wr.num_sge;
51
- ss->total_len = wqe->length;
52
- rvt_skip_sge(ss, len, false);
53
- return wqe->length - len;
48
+ return rvt_restart_sge(ss, wqe, len);
5449 }
5550
5651 /**
....@@ -88,7 +83,7 @@
8883 rvt_put_mr(e->rdma_sge.mr);
8984 e->rdma_sge.mr = NULL;
9085 }
91
- /* FALLTHROUGH */
86
+ fallthrough;
9287 case OP(ATOMIC_ACKNOWLEDGE):
9388 /*
9489 * We can increment the tail pointer now that the last
....@@ -97,7 +92,7 @@
9792 */
9893 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
9994 qp->s_tail_ack_queue = 0;
100
- /* FALLTHROUGH */
95
+ fallthrough;
10196 case OP(SEND_ONLY):
10297 case OP(ACKNOWLEDGE):
10398 /* Check for no next entry in the queue. */
....@@ -154,7 +149,7 @@
154149
155150 case OP(RDMA_READ_RESPONSE_FIRST):
156151 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
157
- /* FALLTHROUGH */
152
+ fallthrough;
158153 case OP(RDMA_READ_RESPONSE_MIDDLE):
159154 qp->s_cur_sge = &qp->s_ack_rdma_sge;
160155 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
....@@ -254,7 +249,7 @@
254249 goto bail;
255250 }
256251 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
257
- qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
252
+ rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
258253 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
259254 /* will get called again */
260255 goto done;
....@@ -318,11 +313,8 @@
318313 case IB_WR_SEND:
319314 case IB_WR_SEND_WITH_IMM:
320315 /* If no credit, return. */
321
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
322
- rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
323
- qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
316
+ if (!rvt_rc_credit_avail(qp, wqe))
324317 goto bail;
325
- }
326318 if (len > pmtu) {
327319 qp->s_state = OP(SEND_FIRST);
328320 len = pmtu;
....@@ -349,11 +341,8 @@
349341 goto no_flow_control;
350342 case IB_WR_RDMA_WRITE_WITH_IMM:
351343 /* If no credit, return. */
352
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
353
- rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
354
- qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
344
+ if (!rvt_rc_credit_avail(qp, wqe))
355345 goto bail;
356
- }
357346 no_flow_control:
358347 ohdr->u.rc.reth.vaddr =
359348 cpu_to_be64(wqe->rdma_wr.remote_addr);
....@@ -482,10 +471,10 @@
482471 * See qib_restart_rc().
483472 */
484473 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
485
- /* FALLTHROUGH */
474
+ fallthrough;
486475 case OP(SEND_FIRST):
487476 qp->s_state = OP(SEND_MIDDLE);
488
- /* FALLTHROUGH */
477
+ fallthrough;
489478 case OP(SEND_MIDDLE):
490479 bth2 = qp->s_psn++ & QIB_PSN_MASK;
491480 ss = &qp->s_sge;
....@@ -521,10 +510,10 @@
521510 * See qib_restart_rc().
522511 */
523512 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
524
- /* FALLTHROUGH */
513
+ fallthrough;
525514 case OP(RDMA_WRITE_FIRST):
526515 qp->s_state = OP(RDMA_WRITE_MIDDLE);
527
- /* FALLTHROUGH */
516
+ fallthrough;
528517 case OP(RDMA_WRITE_MIDDLE):
529518 bth2 = qp->s_psn++ & QIB_PSN_MASK;
530519 ss = &qp->s_sge;
....@@ -838,7 +827,7 @@
838827 qib_migrate_qp(qp);
839828 qp->s_retry = qp->s_retry_cnt;
840829 } else if (qp->s_last == qp->s_acked) {
841
- qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
830
+ rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
842831 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
843832 return;
844833 } else /* XXX need to handle delayed completion */
....@@ -926,20 +915,11 @@
926915 rvt_add_retry_timer(qp);
927916
928917 while (qp->s_last != qp->s_acked) {
929
- u32 s_last;
930
-
931918 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
932919 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
933920 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
934921 break;
935
- s_last = qp->s_last;
936
- if (++s_last >= qp->s_size)
937
- s_last = 0;
938
- qp->s_last = s_last;
939
- /* see post_send() */
940
- barrier();
941
- rvt_put_swqe(wqe);
942
- rvt_qp_swqe_complete(qp,
922
+ rvt_qp_complete_swqe(qp,
943923 wqe,
944924 ib_qib_wc_opcode[wqe->wr.opcode],
945925 IB_WC_SUCCESS);
....@@ -977,21 +957,12 @@
977957 * is finished.
978958 */
979959 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
980
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
981
- u32 s_last;
982
-
983
- rvt_put_swqe(wqe);
984
- s_last = qp->s_last;
985
- if (++s_last >= qp->s_size)
986
- s_last = 0;
987
- qp->s_last = s_last;
988
- /* see post_send() */
989
- barrier();
990
- rvt_qp_swqe_complete(qp,
960
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
961
+ rvt_qp_complete_swqe(qp,
991962 wqe,
992963 ib_qib_wc_opcode[wqe->wr.opcode],
993964 IB_WC_SUCCESS);
994
- } else
965
+ else
995966 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
996967
997968 qp->s_retry = qp->s_retry_cnt;
....@@ -1221,7 +1192,7 @@
12211192 ibp->rvp.n_other_naks++;
12221193 class_b:
12231194 if (qp->s_last == qp->s_acked) {
1224
- qib_send_complete(qp, wqe, status);
1195
+ rvt_send_complete(qp, wqe, status);
12251196 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
12261197 }
12271198 break;
....@@ -1425,7 +1396,8 @@
14251396 qp->s_rdma_read_len -= pmtu;
14261397 update_last_psn(qp, psn);
14271398 spin_unlock_irqrestore(&qp->s_lock, flags);
1428
- qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1399
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1400
+ data, pmtu, false, false);
14291401 goto bail;
14301402
14311403 case OP(RDMA_READ_RESPONSE_ONLY):
....@@ -1471,7 +1443,8 @@
14711443 if (unlikely(tlen != qp->s_rdma_read_len))
14721444 goto ack_len_err;
14731445 aeth = be32_to_cpu(ohdr->u.aeth);
1474
- qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1446
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1447
+ data, tlen, false, false);
14751448 WARN_ON(qp->s_rdma_read_sge.num_sge);
14761449 (void) do_rc_ack(qp, aeth, psn,
14771450 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
....@@ -1490,7 +1463,7 @@
14901463 status = IB_WC_LOC_LEN_ERR;
14911464 ack_err:
14921465 if (qp->s_last == qp->s_acked) {
1493
- qib_send_complete(qp, wqe, status);
1466
+ rvt_send_complete(qp, wqe, status);
14941467 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
14951468 }
14961469 ack_done:
....@@ -1834,7 +1807,7 @@
18341807 if (!ret)
18351808 goto rnr_nak;
18361809 qp->r_rcv_len = 0;
1837
- /* FALLTHROUGH */
1810
+ fallthrough;
18381811 case OP(SEND_MIDDLE):
18391812 case OP(RDMA_WRITE_MIDDLE):
18401813 send_middle:
....@@ -1844,7 +1817,7 @@
18441817 qp->r_rcv_len += pmtu;
18451818 if (unlikely(qp->r_rcv_len > qp->r_len))
18461819 goto nack_inv;
1847
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
1820
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
18481821 break;
18491822
18501823 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
....@@ -1866,7 +1839,7 @@
18661839 qp->r_rcv_len = 0;
18671840 if (opcode == OP(SEND_ONLY))
18681841 goto no_immediate_data;
1869
- /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */
1842
+ fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
18701843 case OP(SEND_LAST_WITH_IMMEDIATE):
18711844 send_last_imm:
18721845 wc.ex.imm_data = ohdr->u.imm_data;
....@@ -1890,7 +1863,7 @@
18901863 wc.byte_len = tlen + qp->r_rcv_len;
18911864 if (unlikely(wc.byte_len > qp->r_len))
18921865 goto nack_inv;
1893
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
1866
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
18941867 rvt_put_ss(&qp->r_sge);
18951868 qp->r_msn++;
18961869 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
....@@ -1912,8 +1885,7 @@
19121885 wc.dlid_path_bits = 0;
19131886 wc.port_num = 0;
19141887 /* Signal completion event if the solicited bit is set. */
1915
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
1916
- ib_bth_is_solicited(ohdr));
1888
+ rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
19171889 break;
19181890
19191891 case OP(RDMA_WRITE_FIRST):