hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/infiniband/hw/hfi1/ruc.c
....@@ -156,338 +156,6 @@
156156 }
157157
158158 /**
159
- * ruc_loopback - handle UC and RC loopback requests
160
- * @sqp: the sending QP
161
- *
162
- * This is called from hfi1_do_send() to
163
- * forward a WQE addressed to the same HFI.
164
- * Note that although we are single threaded due to the send engine, we still
165
- * have to protect against post_send(). We don't have to worry about
166
- * receive interrupts since this is a connected protocol and all packets
167
- * will pass through here.
168
- */
169
-static void ruc_loopback(struct rvt_qp *sqp)
170
-{
171
- struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
172
- struct rvt_qp *qp;
173
- struct rvt_swqe *wqe;
174
- struct rvt_sge *sge;
175
- unsigned long flags;
176
- struct ib_wc wc;
177
- u64 sdata;
178
- atomic64_t *maddr;
179
- enum ib_wc_status send_status;
180
- bool release;
181
- int ret;
182
- bool copy_last = false;
183
- int local_ops = 0;
184
-
185
- rcu_read_lock();
186
-
187
- /*
188
- * Note that we check the responder QP state after
189
- * checking the requester's state.
190
- */
191
- qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
192
- sqp->remote_qpn);
193
-
194
- spin_lock_irqsave(&sqp->s_lock, flags);
195
-
196
- /* Return if we are already busy processing a work request. */
197
- if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) ||
198
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
199
- goto unlock;
200
-
201
- sqp->s_flags |= RVT_S_BUSY;
202
-
203
-again:
204
- if (sqp->s_last == READ_ONCE(sqp->s_head))
205
- goto clr_busy;
206
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
207
-
208
- /* Return if it is not OK to start a new work request. */
209
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
210
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
211
- goto clr_busy;
212
- /* We are in the error state, flush the work request. */
213
- send_status = IB_WC_WR_FLUSH_ERR;
214
- goto flush_send;
215
- }
216
-
217
- /*
218
- * We can rely on the entry not changing without the s_lock
219
- * being held until we update s_last.
220
- * We increment s_cur to indicate s_last is in progress.
221
- */
222
- if (sqp->s_last == sqp->s_cur) {
223
- if (++sqp->s_cur >= sqp->s_size)
224
- sqp->s_cur = 0;
225
- }
226
- spin_unlock_irqrestore(&sqp->s_lock, flags);
227
-
228
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
229
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
230
- ibp->rvp.n_pkt_drops++;
231
- /*
232
- * For RC, the requester would timeout and retry so
233
- * shortcut the timeouts and just signal too many retries.
234
- */
235
- if (sqp->ibqp.qp_type == IB_QPT_RC)
236
- send_status = IB_WC_RETRY_EXC_ERR;
237
- else
238
- send_status = IB_WC_SUCCESS;
239
- goto serr;
240
- }
241
-
242
- memset(&wc, 0, sizeof(wc));
243
- send_status = IB_WC_SUCCESS;
244
-
245
- release = true;
246
- sqp->s_sge.sge = wqe->sg_list[0];
247
- sqp->s_sge.sg_list = wqe->sg_list + 1;
248
- sqp->s_sge.num_sge = wqe->wr.num_sge;
249
- sqp->s_len = wqe->length;
250
- switch (wqe->wr.opcode) {
251
- case IB_WR_REG_MR:
252
- goto send_comp;
253
-
254
- case IB_WR_LOCAL_INV:
255
- if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
256
- if (rvt_invalidate_rkey(sqp,
257
- wqe->wr.ex.invalidate_rkey))
258
- send_status = IB_WC_LOC_PROT_ERR;
259
- local_ops = 1;
260
- }
261
- goto send_comp;
262
-
263
- case IB_WR_SEND_WITH_INV:
264
- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
265
- wc.wc_flags = IB_WC_WITH_INVALIDATE;
266
- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
267
- }
268
- goto send;
269
-
270
- case IB_WR_SEND_WITH_IMM:
271
- wc.wc_flags = IB_WC_WITH_IMM;
272
- wc.ex.imm_data = wqe->wr.ex.imm_data;
273
- /* FALLTHROUGH */
274
- case IB_WR_SEND:
275
-send:
276
- ret = rvt_get_rwqe(qp, false);
277
- if (ret < 0)
278
- goto op_err;
279
- if (!ret)
280
- goto rnr_nak;
281
- if (wqe->length > qp->r_len)
282
- goto inv_err;
283
- break;
284
-
285
- case IB_WR_RDMA_WRITE_WITH_IMM:
286
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
287
- goto inv_err;
288
- wc.wc_flags = IB_WC_WITH_IMM;
289
- wc.ex.imm_data = wqe->wr.ex.imm_data;
290
- ret = rvt_get_rwqe(qp, true);
291
- if (ret < 0)
292
- goto op_err;
293
- if (!ret)
294
- goto rnr_nak;
295
- /* skip copy_last set and qp_access_flags recheck */
296
- goto do_write;
297
- case IB_WR_RDMA_WRITE:
298
- copy_last = rvt_is_user_qp(qp);
299
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
300
- goto inv_err;
301
-do_write:
302
- if (wqe->length == 0)
303
- break;
304
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
305
- wqe->rdma_wr.remote_addr,
306
- wqe->rdma_wr.rkey,
307
- IB_ACCESS_REMOTE_WRITE)))
308
- goto acc_err;
309
- qp->r_sge.sg_list = NULL;
310
- qp->r_sge.num_sge = 1;
311
- qp->r_sge.total_len = wqe->length;
312
- break;
313
-
314
- case IB_WR_RDMA_READ:
315
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
316
- goto inv_err;
317
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
318
- wqe->rdma_wr.remote_addr,
319
- wqe->rdma_wr.rkey,
320
- IB_ACCESS_REMOTE_READ)))
321
- goto acc_err;
322
- release = false;
323
- sqp->s_sge.sg_list = NULL;
324
- sqp->s_sge.num_sge = 1;
325
- qp->r_sge.sge = wqe->sg_list[0];
326
- qp->r_sge.sg_list = wqe->sg_list + 1;
327
- qp->r_sge.num_sge = wqe->wr.num_sge;
328
- qp->r_sge.total_len = wqe->length;
329
- break;
330
-
331
- case IB_WR_ATOMIC_CMP_AND_SWP:
332
- case IB_WR_ATOMIC_FETCH_AND_ADD:
333
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
334
- goto inv_err;
335
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
336
- wqe->atomic_wr.remote_addr,
337
- wqe->atomic_wr.rkey,
338
- IB_ACCESS_REMOTE_ATOMIC)))
339
- goto acc_err;
340
- /* Perform atomic OP and save result. */
341
- maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
342
- sdata = wqe->atomic_wr.compare_add;
343
- *(u64 *)sqp->s_sge.sge.vaddr =
344
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
345
- (u64)atomic64_add_return(sdata, maddr) - sdata :
346
- (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
347
- sdata, wqe->atomic_wr.swap);
348
- rvt_put_mr(qp->r_sge.sge.mr);
349
- qp->r_sge.num_sge = 0;
350
- goto send_comp;
351
-
352
- default:
353
- send_status = IB_WC_LOC_QP_OP_ERR;
354
- goto serr;
355
- }
356
-
357
- sge = &sqp->s_sge.sge;
358
- while (sqp->s_len) {
359
- u32 len = sqp->s_len;
360
-
361
- if (len > sge->length)
362
- len = sge->length;
363
- if (len > sge->sge_length)
364
- len = sge->sge_length;
365
- WARN_ON_ONCE(len == 0);
366
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
367
- sge->vaddr += len;
368
- sge->length -= len;
369
- sge->sge_length -= len;
370
- if (sge->sge_length == 0) {
371
- if (!release)
372
- rvt_put_mr(sge->mr);
373
- if (--sqp->s_sge.num_sge)
374
- *sge = *sqp->s_sge.sg_list++;
375
- } else if (sge->length == 0 && sge->mr->lkey) {
376
- if (++sge->n >= RVT_SEGSZ) {
377
- if (++sge->m >= sge->mr->mapsz)
378
- break;
379
- sge->n = 0;
380
- }
381
- sge->vaddr =
382
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
383
- sge->length =
384
- sge->mr->map[sge->m]->segs[sge->n].length;
385
- }
386
- sqp->s_len -= len;
387
- }
388
- if (release)
389
- rvt_put_ss(&qp->r_sge);
390
-
391
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
392
- goto send_comp;
393
-
394
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
395
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
396
- else
397
- wc.opcode = IB_WC_RECV;
398
- wc.wr_id = qp->r_wr_id;
399
- wc.status = IB_WC_SUCCESS;
400
- wc.byte_len = wqe->length;
401
- wc.qp = &qp->ibqp;
402
- wc.src_qp = qp->remote_qpn;
403
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
404
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
405
- wc.port_num = 1;
406
- /* Signal completion event if the solicited bit is set. */
407
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
408
- wqe->wr.send_flags & IB_SEND_SOLICITED);
409
-
410
-send_comp:
411
- spin_lock_irqsave(&sqp->s_lock, flags);
412
- ibp->rvp.n_loop_pkts++;
413
-flush_send:
414
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
415
- hfi1_send_complete(sqp, wqe, send_status);
416
- if (local_ops) {
417
- atomic_dec(&sqp->local_ops_pending);
418
- local_ops = 0;
419
- }
420
- goto again;
421
-
422
-rnr_nak:
423
- /* Handle RNR NAK */
424
- if (qp->ibqp.qp_type == IB_QPT_UC)
425
- goto send_comp;
426
- ibp->rvp.n_rnr_naks++;
427
- /*
428
- * Note: we don't need the s_lock held since the BUSY flag
429
- * makes this single threaded.
430
- */
431
- if (sqp->s_rnr_retry == 0) {
432
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
433
- goto serr;
434
- }
435
- if (sqp->s_rnr_retry_cnt < 7)
436
- sqp->s_rnr_retry--;
437
- spin_lock_irqsave(&sqp->s_lock, flags);
438
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
439
- goto clr_busy;
440
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
441
- IB_AETH_CREDIT_SHIFT);
442
- goto clr_busy;
443
-
444
-op_err:
445
- send_status = IB_WC_REM_OP_ERR;
446
- wc.status = IB_WC_LOC_QP_OP_ERR;
447
- goto err;
448
-
449
-inv_err:
450
- send_status =
451
- sqp->ibqp.qp_type == IB_QPT_RC ?
452
- IB_WC_REM_INV_REQ_ERR :
453
- IB_WC_SUCCESS;
454
- wc.status = IB_WC_LOC_QP_OP_ERR;
455
- goto err;
456
-
457
-acc_err:
458
- send_status = IB_WC_REM_ACCESS_ERR;
459
- wc.status = IB_WC_LOC_PROT_ERR;
460
-err:
461
- /* responder goes to error state */
462
- rvt_rc_error(qp, wc.status);
463
-
464
-serr:
465
- spin_lock_irqsave(&sqp->s_lock, flags);
466
- hfi1_send_complete(sqp, wqe, send_status);
467
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
468
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
469
-
470
- sqp->s_flags &= ~RVT_S_BUSY;
471
- spin_unlock_irqrestore(&sqp->s_lock, flags);
472
- if (lastwqe) {
473
- struct ib_event ev;
474
-
475
- ev.device = sqp->ibqp.device;
476
- ev.element.qp = &sqp->ibqp;
477
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
478
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
479
- }
480
- goto done;
481
- }
482
-clr_busy:
483
- sqp->s_flags &= ~RVT_S_BUSY;
484
-unlock:
485
- spin_unlock_irqrestore(&sqp->s_lock, flags);
486
-done:
487
- rcu_read_unlock();
488
-}
489
-
490
-/**
491159 * hfi1_make_grh - construct a GRH header
492160 * @ibp: a pointer to the IB port
493161 * @hdr: a pointer to the GRH header being constructed
....@@ -582,7 +250,6 @@
582250 struct ib_other_headers *ohdr,
583251 u32 bth0, u32 bth1, u32 bth2)
584252 {
585
- bth1 |= qp->remote_qpn;
586253 ohdr->bth[0] = cpu_to_be32(bth0);
587254 ohdr->bth[1] = cpu_to_be32(bth1);
588255 ohdr->bth[2] = cpu_to_be32(bth2);
....@@ -604,13 +271,13 @@
604271 */
605272 static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
606273 struct ib_other_headers *ohdr,
607
- u32 bth0, u32 bth2, int middle,
274
+ u32 bth0, u32 bth1, u32 bth2,
275
+ int middle,
608276 struct hfi1_pkt_state *ps)
609277 {
610278 struct hfi1_qp_priv *priv = qp->priv;
611279 struct hfi1_ibport *ibp = ps->ibp;
612280 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
613
- u32 bth1 = 0;
614281 u32 slid;
615282 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
616283 u8 l4 = OPA_16B_L4_IB_LOCAL;
....@@ -692,12 +359,12 @@
692359 */
693360 static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
694361 struct ib_other_headers *ohdr,
695
- u32 bth0, u32 bth2, int middle,
362
+ u32 bth0, u32 bth1, u32 bth2,
363
+ int middle,
696364 struct hfi1_pkt_state *ps)
697365 {
698366 struct hfi1_qp_priv *priv = qp->priv;
699367 struct hfi1_ibport *ibp = ps->ibp;
700
- u32 bth1 = 0;
701368 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
702369 u16 lrh0 = HFI1_LRH_BTH;
703370 u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
....@@ -747,7 +414,7 @@
747414
748415 typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
749416 struct ib_other_headers *ohdr,
750
- u32 bth0, u32 bth2, int middle,
417
+ u32 bth0, u32 bth1, u32 bth2, int middle,
751418 struct hfi1_pkt_state *ps);
752419
753420 /* We support only two types - 9B and 16B for now */
....@@ -757,7 +424,7 @@
757424 };
758425
759426 void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
760
- u32 bth0, u32 bth2, int middle,
427
+ u32 bth0, u32 bth1, u32 bth2, int middle,
761428 struct hfi1_pkt_state *ps)
762429 {
763430 struct hfi1_qp_priv *priv = qp->priv;
....@@ -778,18 +445,21 @@
778445 priv->s_ahg->ahgidx = 0;
779446
780447 /* Make the appropriate header */
781
- hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps);
448
+ hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle,
449
+ ps);
782450 }
783451
784452 /* when sending, force a reschedule every one of these periods */
785453 #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
786454
787455 /**
788
- * schedule_send_yield - test for a yield required for QP send engine
456
+ * hfi1_schedule_send_yield - test for a yield required for QP
457
+ * send engine
789458 * @timeout: Final time for timeout slice for jiffies
790459 * @qp: a pointer to QP
791460 * @ps: a pointer to a structure with commonly lookup values for
792461 * the the send engine progress
462
+ * @tid - true if it is the tid leg
793463 *
794464 * This routine checks if the time slice for the QP has expired
795465 * for RC QPs, if so an additional work entry is queued. At this
....@@ -797,8 +467,8 @@
797467 * returns true if a yield is required, otherwise, false
798468 * is returned.
799469 */
800
-static bool schedule_send_yield(struct rvt_qp *qp,
801
- struct hfi1_pkt_state *ps)
470
+bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
471
+ bool tid)
802472 {
803473 ps->pkts_sent = true;
804474
....@@ -806,8 +476,24 @@
806476 if (!ps->in_thread ||
807477 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
808478 spin_lock_irqsave(&qp->s_lock, ps->flags);
809
- qp->s_flags &= ~RVT_S_BUSY;
810
- hfi1_schedule_send(qp);
479
+ if (!tid) {
480
+ qp->s_flags &= ~RVT_S_BUSY;
481
+ hfi1_schedule_send(qp);
482
+ } else {
483
+ struct hfi1_qp_priv *priv = qp->priv;
484
+
485
+ if (priv->s_flags &
486
+ HFI1_S_TID_BUSY_SET) {
487
+ qp->s_flags &= ~RVT_S_BUSY;
488
+ priv->s_flags &=
489
+ ~(HFI1_S_TID_BUSY_SET |
490
+ RVT_S_BUSY);
491
+ } else {
492
+ priv->s_flags &= ~RVT_S_BUSY;
493
+ }
494
+ hfi1_schedule_tid_send(qp);
495
+ }
496
+
811497 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
812498 this_cpu_inc(*ps->ppd->dd->send_schedule);
813499 trace_hfi1_rc_expired_time_slice(qp, true);
....@@ -830,15 +516,15 @@
830516
831517 void _hfi1_do_send(struct work_struct *work)
832518 {
833
- struct iowait *wait = container_of(work, struct iowait, iowork);
834
- struct rvt_qp *qp = iowait_to_qp(wait);
519
+ struct iowait_work *w = container_of(work, struct iowait_work, iowork);
520
+ struct rvt_qp *qp = iowait_to_qp(w->iow);
835521
836522 hfi1_do_send(qp, true);
837523 }
838524
839525 /**
840526 * hfi1_do_send - perform a send on a QP
841
- * @work: contains a pointer to the QP
527
+ * @qp: a pointer to the QP
842528 * @in_thread: true if in a workqueue thread
843529 *
844530 * Process entries in the send work queue until credit or queue is
....@@ -855,6 +541,7 @@
855541 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
856542 ps.ppd = ppd_from_ibp(ps.ibp);
857543 ps.in_thread = in_thread;
544
+ ps.wait = iowait_get_ib_work(&priv->s_iowait);
858545
859546 trace_hfi1_rc_do_send(qp, in_thread);
860547
....@@ -863,7 +550,7 @@
863550 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
864551 ~((1 << ps.ppd->lmc) - 1)) ==
865552 ps.ppd->lid)) {
866
- ruc_loopback(qp);
553
+ rvt_ruc_loopback(qp);
867554 return;
868555 }
869556 make_req = hfi1_make_rc_req;
....@@ -873,7 +560,7 @@
873560 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
874561 ~((1 << ps.ppd->lmc) - 1)) ==
875562 ps.ppd->lid)) {
876
- ruc_loopback(qp);
563
+ rvt_ruc_loopback(qp);
877564 return;
878565 }
879566 make_req = hfi1_make_uc_req;
....@@ -888,6 +575,8 @@
888575
889576 /* Return if we are already busy processing a work request. */
890577 if (!hfi1_send_ok(qp)) {
578
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
579
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
891580 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
892581 return;
893582 }
....@@ -901,10 +590,12 @@
901590 ps.pkts_sent = false;
902591
903592 /* insure a pre-built packet is handled */
904
- ps.s_txreq = get_waiting_verbs_txreq(qp);
593
+ ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
905594 do {
906595 /* Check for a constructed packet to be sent. */
907596 if (ps.s_txreq) {
597
+ if (priv->s_flags & HFI1_S_TID_BUSY_SET)
598
+ qp->s_flags |= RVT_S_BUSY;
908599 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
909600 /*
910601 * If the packet cannot be sent now, return and
....@@ -912,8 +603,9 @@
912603 */
913604 if (hfi1_verbs_send(qp, &ps))
914605 return;
606
+
915607 /* allow other tasks to run */
916
- if (schedule_send_yield(qp, &ps))
608
+ if (hfi1_schedule_send_yield(qp, &ps, false))
917609 return;
918610
919611 spin_lock_irqsave(&qp->s_lock, ps.flags);
....@@ -921,45 +613,4 @@
921613 } while (make_req(qp, &ps));
922614 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
923615 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
924
-}
925
-
926
-/*
927
- * This should be called with s_lock held.
928
- */
929
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
930
- enum ib_wc_status status)
931
-{
932
- u32 old_last, last;
933
-
934
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
935
- return;
936
-
937
- last = qp->s_last;
938
- old_last = last;
939
- trace_hfi1_qp_send_completion(qp, wqe, last);
940
- if (++last >= qp->s_size)
941
- last = 0;
942
- trace_hfi1_qp_send_completion(qp, wqe, last);
943
- qp->s_last = last;
944
- /* See post_send() */
945
- barrier();
946
- rvt_put_swqe(wqe);
947
- if (qp->ibqp.qp_type == IB_QPT_UD ||
948
- qp->ibqp.qp_type == IB_QPT_SMI ||
949
- qp->ibqp.qp_type == IB_QPT_GSI)
950
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
951
-
952
- rvt_qp_swqe_complete(qp,
953
- wqe,
954
- ib_hfi1_wc_opcode[wqe->wr.opcode],
955
- status);
956
-
957
- if (qp->s_acked == old_last)
958
- qp->s_acked = last;
959
- if (qp->s_cur == old_last)
960
- qp->s_cur = last;
961
- if (qp->s_tail == old_last)
962
- qp->s_tail = last;
963
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
964
- qp->s_draining = 0;
965616 }