hc
2024-01-31 f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2
kernel/include/rdma/rdmavt_qp.h
....@@ -1,57 +1,16 @@
1
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2
+/*
3
+ * Copyright(c) 2016 - 2020 Intel Corporation.
4
+ */
5
+
16 #ifndef DEF_RDMAVT_INCQP_H
27 #define DEF_RDMAVT_INCQP_H
3
-
4
-/*
5
- * Copyright(c) 2016 - 2018 Intel Corporation.
6
- *
7
- * This file is provided under a dual BSD/GPLv2 license. When using or
8
- * redistributing this file, you may do so under either license.
9
- *
10
- * GPL LICENSE SUMMARY
11
- *
12
- * This program is free software; you can redistribute it and/or modify
13
- * it under the terms of version 2 of the GNU General Public License as
14
- * published by the Free Software Foundation.
15
- *
16
- * This program is distributed in the hope that it will be useful, but
17
- * WITHOUT ANY WARRANTY; without even the implied warranty of
18
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19
- * General Public License for more details.
20
- *
21
- * BSD LICENSE
22
- *
23
- * Redistribution and use in source and binary forms, with or without
24
- * modification, are permitted provided that the following conditions
25
- * are met:
26
- *
27
- * - Redistributions of source code must retain the above copyright
28
- * notice, this list of conditions and the following disclaimer.
29
- * - Redistributions in binary form must reproduce the above copyright
30
- * notice, this list of conditions and the following disclaimer in
31
- * the documentation and/or other materials provided with the
32
- * distribution.
33
- * - Neither the name of Intel Corporation nor the names of its
34
- * contributors may be used to endorse or promote products derived
35
- * from this software without specific prior written permission.
36
- *
37
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48
- *
49
- */
508
519 #include <rdma/rdma_vt.h>
5210 #include <rdma/ib_pack.h>
5311 #include <rdma/ib_verbs.h>
5412 #include <rdma/rdmavt_cq.h>
13
+#include <rdma/rvt-abi.h>
5514 /*
5615 * Atomic bit definitions for r_aflags.
5716 */
....@@ -66,6 +25,33 @@
6625 #define RVT_R_RSP_NAK 0x04
6726 #define RVT_R_RSP_SEND 0x08
6827 #define RVT_R_COMM_EST 0x10
28
+
29
+/*
30
+ * If a packet's QP[23:16] bits match this value, then it is
31
+ * a PSM packet and the hardware will expect a KDETH header
32
+ * following the BTH.
33
+ */
34
+#define RVT_KDETH_QP_PREFIX 0x80
35
+#define RVT_KDETH_QP_SUFFIX 0xffff
36
+#define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000
37
+#define RVT_KDETH_QP_PREFIX_SHIFT 16
38
+#define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \
39
+ RVT_KDETH_QP_PREFIX_SHIFT)
40
+#define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
41
+
42
+/*
43
+ * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
44
+ * prefix value, then it is an AIP packet with a DETH containing the entropy
45
+ * value in byte 4 following the BTH.
46
+ */
47
+#define RVT_AIP_QP_PREFIX 0x81
48
+#define RVT_AIP_QP_SUFFIX 0xffff
49
+#define RVT_AIP_QP_PREFIX_MASK 0x00ff0000
50
+#define RVT_AIP_QP_PREFIX_SHIFT 16
51
+#define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \
52
+ RVT_AIP_QP_PREFIX_SHIFT)
53
+#define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT)
54
+#define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
6955
7056 /*
7157 * Bit definitions for s_flags.
....@@ -83,7 +69,6 @@
8369 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
8470 * next send completion entry not via send DMA
8571 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
86
- * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
8772 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
8873 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
8974 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
....@@ -157,6 +142,22 @@
157142 #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
158143 #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
159144
145
+/**
146
+ * rvt_ud_wr - IB UD work plus AH cache
147
+ * @wr: valid IB work request
148
+ * @attr: pointer to an allocated AH attribute
149
+ *
150
+ * Special case the UD WR so we can keep track of the AH attributes.
151
+ *
152
+ * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
153
+ * MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr.
154
+ * The copy assumes that wr is first.
155
+ */
156
+struct rvt_ud_wr {
157
+ struct ib_ud_wr wr;
158
+ struct rdma_ah_attr *attr;
159
+};
160
+
160161 /*
161162 * Send work request queue entry.
162163 * The size of the sg_list is determined when the QP is created and stored
....@@ -165,7 +166,7 @@
165166 struct rvt_swqe {
166167 union {
167168 struct ib_send_wr wr; /* don't use wr.sg_list */
168
- struct ib_ud_wr ud_wr;
169
+ struct rvt_ud_wr ud_wr;
169170 struct ib_reg_wr reg_wr;
170171 struct ib_rdma_wr rdma_wr;
171172 struct ib_atomic_wr atomic_wr;
....@@ -174,55 +175,112 @@
174175 u32 lpsn; /* last packet sequence number */
175176 u32 ssn; /* send sequence number */
176177 u32 length; /* total length of data in sg_list */
177
- struct rvt_sge sg_list[0];
178
+ void *priv; /* driver dependent field */
179
+ struct rvt_sge sg_list[];
178180 };
179181
180
-/*
181
- * Receive work request queue entry.
182
- * The size of the sg_list is determined when the QP (or SRQ) is created
183
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
182
+/**
183
+ * struct rvt_krwq - kernel struct receive work request
184
+ * @p_lock: lock to protect producer of the kernel buffer
185
+ * @head: index of next entry to fill
186
+ * @c_lock:lock to protect consumer of the kernel buffer
187
+ * @tail: index of next entry to pull
188
+ * @count: count is aproximate of total receive enteries posted
189
+ * @rvt_rwqe: struct of receive work request queue entry
190
+ *
191
+ * This structure is used to contain the head pointer,
192
+ * tail pointer and receive work queue entries for kernel
193
+ * mode user.
184194 */
185
-struct rvt_rwqe {
186
- u64 wr_id;
187
- u8 num_sge;
188
- struct ib_sge sg_list[0];
189
-};
190
-
191
-/*
192
- * This structure is used to contain the head pointer, tail pointer,
193
- * and receive work queue entries as a single memory allocation so
194
- * it can be mmap'ed into user space.
195
- * Note that the wq array elements are variable size so you can't
196
- * just index into the array to get the N'th element;
197
- * use get_rwqe_ptr() instead.
198
- */
199
-struct rvt_rwq {
195
+struct rvt_krwq {
196
+ spinlock_t p_lock; /* protect producer */
200197 u32 head; /* new work requests posted to the head */
198
+
199
+ /* protect consumer */
200
+ spinlock_t c_lock ____cacheline_aligned_in_smp;
201201 u32 tail; /* receives pull requests from here. */
202
- struct rvt_rwqe wq[0];
202
+ u32 count; /* approx count of receive entries posted */
203
+ struct rvt_rwqe *curr_wq;
204
+ struct rvt_rwqe wq[];
203205 };
206
+
207
+/*
208
+ * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
209
+ * @swqe: valid Send WQE
210
+ *
211
+ */
212
+static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
213
+{
214
+ return ibah_to_rvtah(swqe->ud_wr.wr.ah);
215
+}
216
+
217
+/**
218
+ * rvt_get_swqe_ah_attr - Return the cached ah attribute information
219
+ * @swqe: valid Send WQE
220
+ *
221
+ */
222
+static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
223
+{
224
+ return swqe->ud_wr.attr;
225
+}
226
+
227
+/**
228
+ * rvt_get_swqe_remote_qpn - Access the remote QPN value
229
+ * @swqe: valid Send WQE
230
+ *
231
+ */
232
+static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
233
+{
234
+ return swqe->ud_wr.wr.remote_qpn;
235
+}
236
+
237
+/**
238
+ * rvt_get_swqe_remote_qkey - Acces the remote qkey value
239
+ * @swqe: valid Send WQE
240
+ *
241
+ */
242
+static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
243
+{
244
+ return swqe->ud_wr.wr.remote_qkey;
245
+}
246
+
247
+/**
248
+ * rvt_get_swqe_pkey_index - Access the pkey index
249
+ * @swqe: valid Send WQE
250
+ *
251
+ */
252
+static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
253
+{
254
+ return swqe->ud_wr.wr.pkey_index;
255
+}
204256
205257 struct rvt_rq {
206258 struct rvt_rwq *wq;
259
+ struct rvt_krwq *kwq;
207260 u32 size; /* size of RWQE array */
208261 u8 max_sge;
209262 /* protect changes in this struct */
210263 spinlock_t lock ____cacheline_aligned_in_smp;
211264 };
212265
213
-/*
214
- * This structure is used by rvt_mmap() to validate an offset
215
- * when an mmap() request is made. The vm_area_struct then uses
216
- * this as its vm_private_data.
266
+/**
267
+ * rvt_get_rq_count - count numbers of request work queue entries
268
+ * in circular buffer
269
+ * @rq: data structure for request queue entry
270
+ * @head: head indices of the circular buffer
271
+ * @tail: tail indices of the circular buffer
272
+ *
273
+ * Return - total number of entries in the Receive Queue
217274 */
218
-struct rvt_mmap_info {
219
- struct list_head pending_mmaps;
220
- struct ib_ucontext *context;
221
- void *obj;
222
- __u64 offset;
223
- struct kref ref;
224
- unsigned size;
225
-};
275
+
276
+static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
277
+{
278
+ u32 count = head - tail;
279
+
280
+ if ((s32)count < 0)
281
+ count += rq->size;
282
+ return count;
283
+}
226284
227285 /*
228286 * This structure holds the information that the send tasklet needs
....@@ -235,6 +293,7 @@
235293 u32 lpsn;
236294 u8 opcode;
237295 u8 sent;
296
+ void *priv;
238297 };
239298
240299 #define RC_QP_SCALING_INTERVAL 5
....@@ -244,6 +303,7 @@
244303 #define RVT_OPERATION_ATOMIC_SGE 0x00000004
245304 #define RVT_OPERATION_LOCAL 0x00000008
246305 #define RVT_OPERATION_USE_RESERVE 0x00000010
306
+#define RVT_OPERATION_IGN_RNR_CNT 0x00000020
247307
248308 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
249309
....@@ -373,6 +433,7 @@
373433 u8 s_rnr_retry; /* requester RNR retry counter */
374434 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
375435 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
436
+ u8 s_acked_ack_queue; /* index into s_ack_queue[] */
376437
377438 struct rvt_sge_state s_ack_rdma_sge;
378439 struct timer_list s_timer;
....@@ -383,7 +444,7 @@
383444 /*
384445 * This sge list MUST be last. Do not add anything below here.
385446 */
386
- struct rvt_sge r_sg_list[0] /* verified SGEs */
447
+ struct rvt_sge r_sg_list[] /* verified SGEs */
387448 ____cacheline_aligned_in_smp;
388449 };
389450
....@@ -394,6 +455,16 @@
394455 /* send signal when number of RWQEs < limit */
395456 u32 limit;
396457 };
458
+
459
+static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
460
+{
461
+ return container_of(ibsrq, struct rvt_srq, ibsrq);
462
+}
463
+
464
+static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
465
+{
466
+ return container_of(ibqp, struct rvt_qp, ibqp);
467
+}
397468
398469 #define RVT_QPN_MAX BIT(24)
399470 #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
....@@ -473,7 +544,7 @@
473544 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
474545 {
475546 return (struct rvt_rwqe *)
476
- ((char *)rq->wq->wq +
547
+ ((char *)rq->kwq->curr_wq +
477548 (sizeof(struct rvt_rwqe) +
478549 rq->max_sge * sizeof(struct ib_sge)) * n);
479550 }
....@@ -541,7 +612,7 @@
541612 /**
542613 * rvt_qp_wqe_unreserve - clean reserved operation
543614 * @qp - the rvt qp
544
- * @wqe - the send wqe
615
+ * @flags - send wqe flags
545616 *
546617 * This decrements the reserve use count.
547618 *
....@@ -553,11 +624,9 @@
553624 * the compiler does not juggle the order of the s_last
554625 * ring index and the decrementing of s_reserved_used.
555626 */
556
-static inline void rvt_qp_wqe_unreserve(
557
- struct rvt_qp *qp,
558
- struct rvt_swqe *wqe)
627
+static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
559628 {
560
- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
629
+ if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
561630 atomic_dec(&qp->s_reserved_used);
562631 /* insure no compiler re-order up to s_last change */
563632 smp_mb__after_atomic();
....@@ -565,42 +634,6 @@
565634 }
566635
567636 extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
568
-
569
-/**
570
- * rvt_qp_swqe_complete() - insert send completion
571
- * @qp - the qp
572
- * @wqe - the send wqe
573
- * @status - completion status
574
- *
575
- * Insert a send completion into the completion
576
- * queue if the qp indicates it should be done.
577
- *
578
- * See IBTA 10.7.3.1 for info on completion
579
- * control.
580
- */
581
-static inline void rvt_qp_swqe_complete(
582
- struct rvt_qp *qp,
583
- struct rvt_swqe *wqe,
584
- enum ib_wc_opcode opcode,
585
- enum ib_wc_status status)
586
-{
587
- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
588
- return;
589
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
590
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
591
- status != IB_WC_SUCCESS) {
592
- struct ib_wc wc;
593
-
594
- memset(&wc, 0, sizeof(wc));
595
- wc.wr_id = wqe->wr.wr_id;
596
- wc.status = status;
597
- wc.opcode = opcode;
598
- wc.qp = &qp->ibqp;
599
- wc.byte_len = wqe->length;
600
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
601
- status != IB_WC_SUCCESS);
602
- }
603
-}
604637
605638 /*
606639 * Compare the lower 24 bits of the msn values.
....@@ -611,24 +644,14 @@
611644 return (((int)a) - ((int)b)) << 8;
612645 }
613646
614
-/**
615
- * rvt_compute_aeth - compute the AETH (syndrome + MSN)
616
- * @qp: the queue pair to compute the AETH for
617
- *
618
- * Returns the AETH.
619
- */
620647 __be32 rvt_compute_aeth(struct rvt_qp *qp);
621648
622
-/**
623
- * rvt_get_credit - flush the send work queue of a QP
624
- * @qp: the qp who's send work queue to flush
625
- * @aeth: the Acknowledge Extended Transport Header
626
- *
627
- * The QP s_lock should be held.
628
- */
629649 void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
630650
651
+u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
652
+
631653 /**
654
+ * rvt_div_round_up_mtu - round up divide
632655 * @qp - the qp pair
633656 * @len - the length
634657 *
....@@ -664,19 +687,206 @@
664687 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
665688 }
666689
690
+/**
691
+ * rvt_lookup_qpn - return the QP with the given QPN
692
+ * @ibp: the ibport
693
+ * @qpn: the QP number to look up
694
+ *
695
+ * The caller must hold the rcu_read_lock(), and keep the lock until
696
+ * the returned qp is no longer in use.
697
+ */
698
+static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
699
+ struct rvt_ibport *rvp,
700
+ u32 qpn) __must_hold(RCU)
701
+{
702
+ struct rvt_qp *qp = NULL;
703
+
704
+ if (unlikely(qpn <= 1)) {
705
+ qp = rcu_dereference(rvp->qp[qpn]);
706
+ } else {
707
+ u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
708
+
709
+ for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
710
+ qp = rcu_dereference(qp->next))
711
+ if (qp->ibqp.qp_num == qpn)
712
+ break;
713
+ }
714
+ return qp;
715
+}
716
+
717
+/**
718
+ * rvt_mod_retry_timer - mod a retry timer
719
+ * @qp - the QP
720
+ * @shift - timeout shift to wait for multiple packets
721
+ * Modify a potentially already running retry timer
722
+ */
723
+static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
724
+{
725
+ struct ib_qp *ibqp = &qp->ibqp;
726
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
727
+
728
+ lockdep_assert_held(&qp->s_lock);
729
+ qp->s_flags |= RVT_S_TIMER;
730
+ /* 4.096 usec. * (1 << qp->timeout) */
731
+ mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
732
+ (qp->timeout_jiffies << shift));
733
+}
734
+
735
+static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
736
+{
737
+ return rvt_mod_retry_timer_ext(qp, 0);
738
+}
739
+
740
+/**
741
+ * rvt_put_qp_swqe - drop refs held by swqe
742
+ * @qp: the send qp
743
+ * @wqe: the send wqe
744
+ *
745
+ * This drops any references held by the swqe
746
+ */
747
+static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
748
+{
749
+ rvt_put_swqe(wqe);
750
+ if (qp->allowed_ops == IB_OPCODE_UD)
751
+ rdma_destroy_ah_attr(wqe->ud_wr.attr);
752
+}
753
+
754
+/**
755
+ * rvt_qp_sqwe_incr - increment ring index
756
+ * @qp: the qp
757
+ * @val: the starting value
758
+ *
759
+ * Return: the new value wrapping as appropriate
760
+ */
761
+static inline u32
762
+rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
763
+{
764
+ if (++val >= qp->s_size)
765
+ val = 0;
766
+ return val;
767
+}
768
+
769
+int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
770
+
771
+/**
772
+ * rvt_recv_cq - add a new entry to completion queue
773
+ * by receive queue
774
+ * @qp: receive queue
775
+ * @wc: work completion entry to add
776
+ * @solicited: true if @entry is solicited
777
+ *
778
+ * This is wrapper function for rvt_enter_cq function call by
779
+ * receive queue. If rvt_cq_enter return false, it means cq is
780
+ * full and the qp is put into error state.
781
+ */
782
+static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
783
+ bool solicited)
784
+{
785
+ struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
786
+
787
+ if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
788
+ rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
789
+}
790
+
791
+/**
792
+ * rvt_send_cq - add a new entry to completion queue
793
+ * by send queue
794
+ * @qp: send queue
795
+ * @wc: work completion entry to add
796
+ * @solicited: true if @entry is solicited
797
+ *
798
+ * This is wrapper function for rvt_enter_cq function call by
799
+ * send queue. If rvt_cq_enter return false, it means cq is
800
+ * full and the qp is put into error state.
801
+ */
802
+static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
803
+ bool solicited)
804
+{
805
+ struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
806
+
807
+ if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
808
+ rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
809
+}
810
+
811
+/**
812
+ * rvt_qp_complete_swqe - insert send completion
813
+ * @qp - the qp
814
+ * @wqe - the send wqe
815
+ * @opcode - wc operation (driver dependent)
816
+ * @status - completion status
817
+ *
818
+ * Update the s_last information, and then insert a send
819
+ * completion into the completion
820
+ * queue if the qp indicates it should be done.
821
+ *
822
+ * See IBTA 10.7.3.1 for info on completion
823
+ * control.
824
+ *
825
+ * Return: new last
826
+ */
827
+static inline u32
828
+rvt_qp_complete_swqe(struct rvt_qp *qp,
829
+ struct rvt_swqe *wqe,
830
+ enum ib_wc_opcode opcode,
831
+ enum ib_wc_status status)
832
+{
833
+ bool need_completion;
834
+ u64 wr_id;
835
+ u32 byte_len, last;
836
+ int flags = wqe->wr.send_flags;
837
+
838
+ rvt_qp_wqe_unreserve(qp, flags);
839
+ rvt_put_qp_swqe(qp, wqe);
840
+
841
+ need_completion =
842
+ !(flags & RVT_SEND_RESERVE_USED) &&
843
+ (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
844
+ (flags & IB_SEND_SIGNALED) ||
845
+ status != IB_WC_SUCCESS);
846
+ if (need_completion) {
847
+ wr_id = wqe->wr.wr_id;
848
+ byte_len = wqe->length;
849
+ /* above fields required before writing s_last */
850
+ }
851
+ last = rvt_qp_swqe_incr(qp, qp->s_last);
852
+ /* see rvt_qp_is_avail() */
853
+ smp_store_release(&qp->s_last, last);
854
+ if (need_completion) {
855
+ struct ib_wc w = {
856
+ .wr_id = wr_id,
857
+ .status = status,
858
+ .opcode = opcode,
859
+ .qp = &qp->ibqp,
860
+ .byte_len = byte_len,
861
+ };
862
+ rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
863
+ }
864
+ return last;
865
+}
866
+
667867 extern const int ib_rvt_state_ops[];
668868
669869 struct rvt_dev_info;
670870 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
671871 void rvt_comm_est(struct rvt_qp *qp);
672
-int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
673872 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
674873 unsigned long rvt_rnr_tbl_to_usec(u32 index);
675874 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
676875 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
677876 void rvt_del_timers_sync(struct rvt_qp *qp);
678877 void rvt_stop_rc_timers(struct rvt_qp *qp);
679
-void rvt_add_retry_timer(struct rvt_qp *qp);
878
+void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
879
+static inline void rvt_add_retry_timer(struct rvt_qp *qp)
880
+{
881
+ rvt_add_retry_timer_ext(qp, 0);
882
+}
883
+
884
+void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
885
+ void *data, u32 length,
886
+ bool release, bool copy_last);
887
+void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
888
+ enum ib_wc_status status);
889
+void rvt_ruc_loopback(struct rvt_qp *qp);
680890
681891 /**
682892 * struct rvt_qp_iter - the iterator for QPs
....@@ -700,6 +910,88 @@
700910 int n;
701911 };
702912
913
+/**
914
+ * ib_cq_tail - Return tail index of cq buffer
915
+ * @send_cq - The cq for send
916
+ *
917
+ * This is called in qp_iter_print to get tail
918
+ * of cq buffer.
919
+ */
920
+static inline u32 ib_cq_tail(struct ib_cq *send_cq)
921
+{
922
+ struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
923
+
924
+ return ibcq_to_rvtcq(send_cq)->ip ?
925
+ RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
926
+ ibcq_to_rvtcq(send_cq)->kqueue->tail;
927
+}
928
+
929
+/**
930
+ * ib_cq_head - Return head index of cq buffer
931
+ * @send_cq - The cq for send
932
+ *
933
+ * This is called in qp_iter_print to get head
934
+ * of cq buffer.
935
+ */
936
+static inline u32 ib_cq_head(struct ib_cq *send_cq)
937
+{
938
+ struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
939
+
940
+ return ibcq_to_rvtcq(send_cq)->ip ?
941
+ RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
942
+ ibcq_to_rvtcq(send_cq)->kqueue->head;
943
+}
944
+
945
+/**
946
+ * rvt_free_rq - free memory allocated for rvt_rq struct
947
+ * @rvt_rq: request queue data structure
948
+ *
949
+ * This function should only be called if the rvt_mmap_info()
950
+ * has not succeeded.
951
+ */
952
+static inline void rvt_free_rq(struct rvt_rq *rq)
953
+{
954
+ kvfree(rq->kwq);
955
+ rq->kwq = NULL;
956
+ vfree(rq->wq);
957
+ rq->wq = NULL;
958
+}
959
+
960
+/**
961
+ * rvt_to_iport - Get the ibport pointer
962
+ * @qp: the qp pointer
963
+ *
964
+ * This function returns the ibport pointer from the qp pointer.
965
+ */
966
+static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
967
+{
968
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
969
+
970
+ return rdi->ports[qp->port_num - 1];
971
+}
972
+
973
+/**
974
+ * rvt_rc_credit_avail - Check if there are enough RC credits for the request
975
+ * @qp: the qp
976
+ * @wqe: the request
977
+ *
978
+ * This function returns false when there are not enough credits for the given
979
+ * request and true otherwise.
980
+ */
981
+static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
982
+{
983
+ lockdep_assert_held(&qp->s_lock);
984
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
985
+ rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
986
+ struct rvt_ibport *rvp = rvt_to_iport(qp);
987
+
988
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
989
+ rvp->n_rc_crwaits++;
990
+ return false;
991
+ }
992
+ return true;
993
+}
994
+
703995 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
704996 u64 v,
705997 void (*cb)(struct rvt_qp *qp, u64 v));