hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/qib/qib_ruc.c
....@@ -171,312 +171,6 @@
171171 }
172172
173173 /**
174
- * qib_ruc_loopback - handle UC and RC lookback requests
175
- * @sqp: the sending QP
176
- *
177
- * This is called from qib_do_send() to
178
- * forward a WQE addressed to the same HCA.
179
- * Note that although we are single threaded due to the tasklet, we still
180
- * have to protect against post_send(). We don't have to worry about
181
- * receive interrupts since this is a connected protocol and all packets
182
- * will pass through here.
183
- */
184
-static void qib_ruc_loopback(struct rvt_qp *sqp)
185
-{
186
- struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
187
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
188
- struct qib_devdata *dd = ppd->dd;
189
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
190
- struct rvt_qp *qp;
191
- struct rvt_swqe *wqe;
192
- struct rvt_sge *sge;
193
- unsigned long flags;
194
- struct ib_wc wc;
195
- u64 sdata;
196
- atomic64_t *maddr;
197
- enum ib_wc_status send_status;
198
- int release;
199
- int ret;
200
-
201
- rcu_read_lock();
202
- /*
203
- * Note that we check the responder QP state after
204
- * checking the requester's state.
205
- */
206
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
207
- if (!qp)
208
- goto done;
209
-
210
- spin_lock_irqsave(&sqp->s_lock, flags);
211
-
212
- /* Return if we are already busy processing a work request. */
213
- if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
214
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
215
- goto unlock;
216
-
217
- sqp->s_flags |= RVT_S_BUSY;
218
-
219
-again:
220
- if (sqp->s_last == READ_ONCE(sqp->s_head))
221
- goto clr_busy;
222
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
223
-
224
- /* Return if it is not OK to start a new work reqeust. */
225
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
226
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
227
- goto clr_busy;
228
- /* We are in the error state, flush the work request. */
229
- send_status = IB_WC_WR_FLUSH_ERR;
230
- goto flush_send;
231
- }
232
-
233
- /*
234
- * We can rely on the entry not changing without the s_lock
235
- * being held until we update s_last.
236
- * We increment s_cur to indicate s_last is in progress.
237
- */
238
- if (sqp->s_last == sqp->s_cur) {
239
- if (++sqp->s_cur >= sqp->s_size)
240
- sqp->s_cur = 0;
241
- }
242
- spin_unlock_irqrestore(&sqp->s_lock, flags);
243
-
244
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
245
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
246
- ibp->rvp.n_pkt_drops++;
247
- /*
248
- * For RC, the requester would timeout and retry so
249
- * shortcut the timeouts and just signal too many retries.
250
- */
251
- if (sqp->ibqp.qp_type == IB_QPT_RC)
252
- send_status = IB_WC_RETRY_EXC_ERR;
253
- else
254
- send_status = IB_WC_SUCCESS;
255
- goto serr;
256
- }
257
-
258
- memset(&wc, 0, sizeof(wc));
259
- send_status = IB_WC_SUCCESS;
260
-
261
- release = 1;
262
- sqp->s_sge.sge = wqe->sg_list[0];
263
- sqp->s_sge.sg_list = wqe->sg_list + 1;
264
- sqp->s_sge.num_sge = wqe->wr.num_sge;
265
- sqp->s_len = wqe->length;
266
- switch (wqe->wr.opcode) {
267
- case IB_WR_SEND_WITH_IMM:
268
- wc.wc_flags = IB_WC_WITH_IMM;
269
- wc.ex.imm_data = wqe->wr.ex.imm_data;
270
- /* FALLTHROUGH */
271
- case IB_WR_SEND:
272
- ret = rvt_get_rwqe(qp, false);
273
- if (ret < 0)
274
- goto op_err;
275
- if (!ret)
276
- goto rnr_nak;
277
- if (wqe->length > qp->r_len)
278
- goto inv_err;
279
- break;
280
-
281
- case IB_WR_RDMA_WRITE_WITH_IMM:
282
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
283
- goto inv_err;
284
- wc.wc_flags = IB_WC_WITH_IMM;
285
- wc.ex.imm_data = wqe->wr.ex.imm_data;
286
- ret = rvt_get_rwqe(qp, true);
287
- if (ret < 0)
288
- goto op_err;
289
- if (!ret)
290
- goto rnr_nak;
291
- /* FALLTHROUGH */
292
- case IB_WR_RDMA_WRITE:
293
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
294
- goto inv_err;
295
- if (wqe->length == 0)
296
- break;
297
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
298
- wqe->rdma_wr.remote_addr,
299
- wqe->rdma_wr.rkey,
300
- IB_ACCESS_REMOTE_WRITE)))
301
- goto acc_err;
302
- qp->r_sge.sg_list = NULL;
303
- qp->r_sge.num_sge = 1;
304
- qp->r_sge.total_len = wqe->length;
305
- break;
306
-
307
- case IB_WR_RDMA_READ:
308
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
309
- goto inv_err;
310
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
311
- wqe->rdma_wr.remote_addr,
312
- wqe->rdma_wr.rkey,
313
- IB_ACCESS_REMOTE_READ)))
314
- goto acc_err;
315
- release = 0;
316
- sqp->s_sge.sg_list = NULL;
317
- sqp->s_sge.num_sge = 1;
318
- qp->r_sge.sge = wqe->sg_list[0];
319
- qp->r_sge.sg_list = wqe->sg_list + 1;
320
- qp->r_sge.num_sge = wqe->wr.num_sge;
321
- qp->r_sge.total_len = wqe->length;
322
- break;
323
-
324
- case IB_WR_ATOMIC_CMP_AND_SWP:
325
- case IB_WR_ATOMIC_FETCH_AND_ADD:
326
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
327
- goto inv_err;
328
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
329
- wqe->atomic_wr.remote_addr,
330
- wqe->atomic_wr.rkey,
331
- IB_ACCESS_REMOTE_ATOMIC)))
332
- goto acc_err;
333
- /* Perform atomic OP and save result. */
334
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
335
- sdata = wqe->atomic_wr.compare_add;
336
- *(u64 *) sqp->s_sge.sge.vaddr =
337
- (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
338
- (u64) atomic64_add_return(sdata, maddr) - sdata :
339
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
340
- sdata, wqe->atomic_wr.swap);
341
- rvt_put_mr(qp->r_sge.sge.mr);
342
- qp->r_sge.num_sge = 0;
343
- goto send_comp;
344
-
345
- default:
346
- send_status = IB_WC_LOC_QP_OP_ERR;
347
- goto serr;
348
- }
349
-
350
- sge = &sqp->s_sge.sge;
351
- while (sqp->s_len) {
352
- u32 len = sqp->s_len;
353
-
354
- if (len > sge->length)
355
- len = sge->length;
356
- if (len > sge->sge_length)
357
- len = sge->sge_length;
358
- BUG_ON(len == 0);
359
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
360
- sge->vaddr += len;
361
- sge->length -= len;
362
- sge->sge_length -= len;
363
- if (sge->sge_length == 0) {
364
- if (!release)
365
- rvt_put_mr(sge->mr);
366
- if (--sqp->s_sge.num_sge)
367
- *sge = *sqp->s_sge.sg_list++;
368
- } else if (sge->length == 0 && sge->mr->lkey) {
369
- if (++sge->n >= RVT_SEGSZ) {
370
- if (++sge->m >= sge->mr->mapsz)
371
- break;
372
- sge->n = 0;
373
- }
374
- sge->vaddr =
375
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
376
- sge->length =
377
- sge->mr->map[sge->m]->segs[sge->n].length;
378
- }
379
- sqp->s_len -= len;
380
- }
381
- if (release)
382
- rvt_put_ss(&qp->r_sge);
383
-
384
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
385
- goto send_comp;
386
-
387
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
388
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
389
- else
390
- wc.opcode = IB_WC_RECV;
391
- wc.wr_id = qp->r_wr_id;
392
- wc.status = IB_WC_SUCCESS;
393
- wc.byte_len = wqe->length;
394
- wc.qp = &qp->ibqp;
395
- wc.src_qp = qp->remote_qpn;
396
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
397
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
398
- wc.port_num = 1;
399
- /* Signal completion event if the solicited bit is set. */
400
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
401
- wqe->wr.send_flags & IB_SEND_SOLICITED);
402
-
403
-send_comp:
404
- spin_lock_irqsave(&sqp->s_lock, flags);
405
- ibp->rvp.n_loop_pkts++;
406
-flush_send:
407
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
408
- qib_send_complete(sqp, wqe, send_status);
409
- goto again;
410
-
411
-rnr_nak:
412
- /* Handle RNR NAK */
413
- if (qp->ibqp.qp_type == IB_QPT_UC)
414
- goto send_comp;
415
- ibp->rvp.n_rnr_naks++;
416
- /*
417
- * Note: we don't need the s_lock held since the BUSY flag
418
- * makes this single threaded.
419
- */
420
- if (sqp->s_rnr_retry == 0) {
421
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
422
- goto serr;
423
- }
424
- if (sqp->s_rnr_retry_cnt < 7)
425
- sqp->s_rnr_retry--;
426
- spin_lock_irqsave(&sqp->s_lock, flags);
427
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
428
- goto clr_busy;
429
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
430
- IB_AETH_CREDIT_SHIFT);
431
- goto clr_busy;
432
-
433
-op_err:
434
- send_status = IB_WC_REM_OP_ERR;
435
- wc.status = IB_WC_LOC_QP_OP_ERR;
436
- goto err;
437
-
438
-inv_err:
439
- send_status =
440
- sqp->ibqp.qp_type == IB_QPT_RC ?
441
- IB_WC_REM_INV_REQ_ERR :
442
- IB_WC_SUCCESS;
443
- wc.status = IB_WC_LOC_QP_OP_ERR;
444
- goto err;
445
-
446
-acc_err:
447
- send_status = IB_WC_REM_ACCESS_ERR;
448
- wc.status = IB_WC_LOC_PROT_ERR;
449
-err:
450
- /* responder goes to error state */
451
- rvt_rc_error(qp, wc.status);
452
-
453
-serr:
454
- spin_lock_irqsave(&sqp->s_lock, flags);
455
- qib_send_complete(sqp, wqe, send_status);
456
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
457
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
458
-
459
- sqp->s_flags &= ~RVT_S_BUSY;
460
- spin_unlock_irqrestore(&sqp->s_lock, flags);
461
- if (lastwqe) {
462
- struct ib_event ev;
463
-
464
- ev.device = sqp->ibqp.device;
465
- ev.element.qp = &sqp->ibqp;
466
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
467
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
468
- }
469
- goto done;
470
- }
471
-clr_busy:
472
- sqp->s_flags &= ~RVT_S_BUSY;
473
-unlock:
474
- spin_unlock_irqrestore(&sqp->s_lock, flags);
475
-done:
476
- rcu_read_unlock();
477
-}
478
-
479
-/**
480174 * qib_make_grh - construct a GRH header
481175 * @ibp: a pointer to the IB port
482176 * @hdr: a pointer to the GRH header being constructed
....@@ -578,7 +272,7 @@
578272 qp->ibqp.qp_type == IB_QPT_UC) &&
579273 (rdma_ah_get_dlid(&qp->remote_ah_attr) &
580274 ~((1 << ppd->lmc) - 1)) == ppd->lid) {
581
- qib_ruc_loopback(qp);
275
+ rvt_ruc_loopback(qp);
582276 return;
583277 }
584278
....@@ -617,43 +311,4 @@
617311 } while (make_req(qp, &flags));
618312
619313 spin_unlock_irqrestore(&qp->s_lock, flags);
620
-}
621
-
622
-/*
623
- * This should be called with s_lock held.
624
- */
625
-void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
626
- enum ib_wc_status status)
627
-{
628
- u32 old_last, last;
629
-
630
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
631
- return;
632
-
633
- last = qp->s_last;
634
- old_last = last;
635
- if (++last >= qp->s_size)
636
- last = 0;
637
- qp->s_last = last;
638
- /* See post_send() */
639
- barrier();
640
- rvt_put_swqe(wqe);
641
- if (qp->ibqp.qp_type == IB_QPT_UD ||
642
- qp->ibqp.qp_type == IB_QPT_SMI ||
643
- qp->ibqp.qp_type == IB_QPT_GSI)
644
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
645
-
646
- rvt_qp_swqe_complete(qp,
647
- wqe,
648
- ib_qib_wc_opcode[wqe->wr.opcode],
649
- status);
650
-
651
- if (qp->s_acked == old_last)
652
- qp->s_acked = last;
653
- if (qp->s_cur == old_last)
654
- qp->s_cur = last;
655
- if (qp->s_tail == old_last)
656
- qp->s_tail = last;
657
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
658
- qp->s_draining = 0;
659314 }