.. | .. |
---|
1 | 1 | /* |
---|
2 | | - * Copyright(c) 2015 - 2018 Intel Corporation. |
---|
| 2 | + * Copyright(c) 2015 - 2020 Intel Corporation. |
---|
3 | 3 | * |
---|
4 | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
---|
5 | 5 | * redistributing this file, you may do so under either license. |
---|
.. | .. |
---|
66 | 66 | static void flush_tx_list(struct rvt_qp *qp); |
---|
67 | 67 | static int iowait_sleep( |
---|
68 | 68 | struct sdma_engine *sde, |
---|
69 | | - struct iowait *wait, |
---|
| 69 | + struct iowait_work *wait, |
---|
70 | 70 | struct sdma_txreq *stx, |
---|
71 | 71 | unsigned int seq, |
---|
72 | 72 | bool pkts_sent); |
---|
.. | .. |
---|
132 | 132 | .qpt_support = BIT(IB_QPT_RC), |
---|
133 | 133 | }, |
---|
134 | 134 | |
---|
| 135 | +[IB_WR_OPFN] = { |
---|
| 136 | + .length = sizeof(struct ib_atomic_wr), |
---|
| 137 | + .qpt_support = BIT(IB_QPT_RC), |
---|
| 138 | + .flags = RVT_OPERATION_USE_RESERVE, |
---|
| 139 | +}, |
---|
| 140 | + |
---|
| 141 | +[IB_WR_TID_RDMA_WRITE] = { |
---|
| 142 | + .length = sizeof(struct ib_rdma_wr), |
---|
| 143 | + .qpt_support = BIT(IB_QPT_RC), |
---|
| 144 | + .flags = RVT_OPERATION_IGN_RNR_CNT, |
---|
| 145 | +}, |
---|
| 146 | + |
---|
135 | 147 | }; |
---|
136 | 148 | |
---|
137 | | -static void flush_tx_list(struct rvt_qp *qp) |
---|
| 149 | +static void flush_list_head(struct list_head *l) |
---|
138 | 150 | { |
---|
139 | | - struct hfi1_qp_priv *priv = qp->priv; |
---|
140 | | - |
---|
141 | | - while (!list_empty(&priv->s_iowait.tx_head)) { |
---|
| 151 | + while (!list_empty(l)) { |
---|
142 | 152 | struct sdma_txreq *tx; |
---|
143 | 153 | |
---|
144 | 154 | tx = list_first_entry( |
---|
145 | | - &priv->s_iowait.tx_head, |
---|
| 155 | + l, |
---|
146 | 156 | struct sdma_txreq, |
---|
147 | 157 | list); |
---|
148 | 158 | list_del_init(&tx->list); |
---|
149 | 159 | hfi1_put_txreq( |
---|
150 | 160 | container_of(tx, struct verbs_txreq, txreq)); |
---|
151 | 161 | } |
---|
| 162 | +} |
---|
| 163 | + |
---|
| 164 | +static void flush_tx_list(struct rvt_qp *qp) |
---|
| 165 | +{ |
---|
| 166 | + struct hfi1_qp_priv *priv = qp->priv; |
---|
| 167 | + |
---|
| 168 | + flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head); |
---|
| 169 | + flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head); |
---|
152 | 170 | } |
---|
153 | 171 | |
---|
154 | 172 | static void flush_iowait(struct rvt_qp *qp) |
---|
.. | .. |
---|
168 | 186 | write_sequnlock_irqrestore(lock, flags); |
---|
169 | 187 | } |
---|
170 | 188 | |
---|
171 | | -static inline int opa_mtu_enum_to_int(int mtu) |
---|
172 | | -{ |
---|
173 | | - switch (mtu) { |
---|
174 | | - case OPA_MTU_8192: return 8192; |
---|
175 | | - case OPA_MTU_10240: return 10240; |
---|
176 | | - default: return -1; |
---|
177 | | - } |
---|
178 | | -} |
---|
179 | | - |
---|
180 | 189 | /** |
---|
181 | 190 | * This function is what we would push to the core layer if we wanted to be a |
---|
182 | 191 | * "first class citizen". Instead we hide this here and rely on Verbs ULPs |
---|
.. | .. |
---|
184 | 193 | */ |
---|
185 | 194 | static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) |
---|
186 | 195 | { |
---|
187 | | - int val; |
---|
188 | | - |
---|
189 | 196 | /* Constraining 10KB packets to 8KB packets */ |
---|
190 | 197 | if (mtu == (enum ib_mtu)OPA_MTU_10240) |
---|
191 | | - mtu = OPA_MTU_8192; |
---|
192 | | - val = opa_mtu_enum_to_int((int)mtu); |
---|
193 | | - if (val > 0) |
---|
194 | | - return val; |
---|
195 | | - return ib_mtu_enum_to_int(mtu); |
---|
| 198 | + mtu = (enum ib_mtu)OPA_MTU_8192; |
---|
| 199 | + return opa_mtu_enum_to_int((enum opa_mtu)mtu); |
---|
196 | 200 | } |
---|
197 | 201 | |
---|
198 | 202 | int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, |
---|
.. | .. |
---|
279 | 283 | priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); |
---|
280 | 284 | qp_set_16b(qp); |
---|
281 | 285 | } |
---|
| 286 | + |
---|
| 287 | + opfn_qp_init(qp, attr, attr_mask); |
---|
282 | 288 | } |
---|
283 | 289 | |
---|
284 | 290 | /** |
---|
285 | | - * hfi1_check_send_wqe - validate wqe |
---|
| 291 | + * hfi1_setup_wqe - set up the wqe |
---|
286 | 292 | * @qp - The qp |
---|
287 | 293 | * @wqe - The built wqe |
---|
| 294 | + * @call_send - Determine if the send should be posted or scheduled. |
---|
288 | 295 | * |
---|
289 | | - * validate wqe. This is called |
---|
290 | | - * prior to inserting the wqe into |
---|
291 | | - * the ring but after the wqe has been |
---|
292 | | - * setup. |
---|
| 296 | + * Perform setup of the wqe. This is called |
---|
| 297 | + * prior to inserting the wqe into the ring but after |
---|
| 298 | + * the wqe has been setup by RDMAVT. This function |
---|
| 299 | + * allows the driver the opportunity to perform |
---|
| 300 | + * validation and additional setup of the wqe. |
---|
293 | 301 | * |
---|
294 | 302 | * Returns 0 on success, -EINVAL on failure |
---|
295 | 303 | * |
---|
296 | 304 | */ |
---|
297 | | -int hfi1_check_send_wqe(struct rvt_qp *qp, |
---|
298 | | - struct rvt_swqe *wqe) |
---|
| 305 | +int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) |
---|
299 | 306 | { |
---|
300 | 307 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
---|
301 | 308 | struct rvt_ah *ah; |
---|
| 309 | + struct hfi1_pportdata *ppd; |
---|
| 310 | + struct hfi1_devdata *dd; |
---|
302 | 311 | |
---|
303 | 312 | switch (qp->ibqp.qp_type) { |
---|
304 | 313 | case IB_QPT_RC: |
---|
| 314 | + hfi1_setup_tid_rdma_wqe(qp, wqe); |
---|
| 315 | + fallthrough; |
---|
305 | 316 | case IB_QPT_UC: |
---|
306 | 317 | if (wqe->length > 0x80000000U) |
---|
307 | 318 | return -EINVAL; |
---|
| 319 | + if (wqe->length > qp->pmtu) |
---|
| 320 | + *call_send = false; |
---|
308 | 321 | break; |
---|
309 | 322 | case IB_QPT_SMI: |
---|
310 | | - ah = ibah_to_rvtah(wqe->ud_wr.ah); |
---|
311 | | - if (wqe->length > (1 << ah->log_pmtu)) |
---|
| 323 | + /* |
---|
| 324 | + * SM packets should exclusively use VL15 and their SL is |
---|
| 325 | + * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah |
---|
| 326 | + * is created, SL is 0 in most cases and as a result some |
---|
| 327 | + * fields (vl and pmtu) in ah may not be set correctly, |
---|
| 328 | + * depending on the SL2SC and SC2VL tables at the time. |
---|
| 329 | + */ |
---|
| 330 | + ppd = ppd_from_ibp(ibp); |
---|
| 331 | + dd = dd_from_ppd(ppd); |
---|
| 332 | + if (wqe->length > dd->vld[15].mtu) |
---|
312 | 333 | return -EINVAL; |
---|
313 | 334 | break; |
---|
314 | 335 | case IB_QPT_GSI: |
---|
315 | 336 | case IB_QPT_UD: |
---|
316 | | - ah = ibah_to_rvtah(wqe->ud_wr.ah); |
---|
| 337 | + ah = rvt_get_swqe_ah(wqe); |
---|
317 | 338 | if (wqe->length > (1 << ah->log_pmtu)) |
---|
318 | 339 | return -EINVAL; |
---|
319 | 340 | if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf) |
---|
.. | .. |
---|
321 | 342 | default: |
---|
322 | 343 | break; |
---|
323 | 344 | } |
---|
324 | | - return wqe->length <= piothreshold; |
---|
| 345 | + |
---|
| 346 | + /* |
---|
| 347 | + * System latency between send and schedule is large enough that |
---|
| 348 | + * forcing call_send to true for piothreshold packets is necessary. |
---|
| 349 | + */ |
---|
| 350 | + if (wqe->length <= piothreshold) |
---|
| 351 | + *call_send = true; |
---|
| 352 | + return 0; |
---|
325 | 353 | } |
---|
326 | 354 | |
---|
327 | 355 | /** |
---|
.. | .. |
---|
333 | 361 | * It is only used in the post send, which doesn't hold |
---|
334 | 362 | * the s_lock. |
---|
335 | 363 | */ |
---|
336 | | -void _hfi1_schedule_send(struct rvt_qp *qp) |
---|
| 364 | +bool _hfi1_schedule_send(struct rvt_qp *qp) |
---|
337 | 365 | { |
---|
338 | 366 | struct hfi1_qp_priv *priv = qp->priv; |
---|
339 | 367 | struct hfi1_ibport *ibp = |
---|
340 | 368 | to_iport(qp->ibqp.device, qp->port_num); |
---|
341 | 369 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); |
---|
342 | | - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); |
---|
| 370 | + struct hfi1_devdata *dd = ppd->dd; |
---|
343 | 371 | |
---|
344 | | - iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, |
---|
345 | | - priv->s_sde ? |
---|
346 | | - priv->s_sde->cpu : |
---|
347 | | - cpumask_first(cpumask_of_node(dd->node))); |
---|
| 372 | + if (dd->flags & HFI1_SHUTDOWN) |
---|
| 373 | + return true; |
---|
| 374 | + |
---|
| 375 | + return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, |
---|
| 376 | + priv->s_sde ? |
---|
| 377 | + priv->s_sde->cpu : |
---|
| 378 | + cpumask_first(cpumask_of_node(dd->node))); |
---|
348 | 379 | } |
---|
349 | 380 | |
---|
350 | 381 | static void qp_pio_drain(struct rvt_qp *qp) |
---|
351 | 382 | { |
---|
352 | | - struct hfi1_ibdev *dev; |
---|
353 | 383 | struct hfi1_qp_priv *priv = qp->priv; |
---|
354 | 384 | |
---|
355 | 385 | if (!priv->s_sendcontext) |
---|
356 | 386 | return; |
---|
357 | | - dev = to_idev(qp->ibqp.device); |
---|
358 | 387 | while (iowait_pio_pending(&priv->s_iowait)) { |
---|
359 | | - write_seqlock_irq(&dev->iowait_lock); |
---|
| 388 | + write_seqlock_irq(&priv->s_sendcontext->waitlock); |
---|
360 | 389 | hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); |
---|
361 | | - write_sequnlock_irq(&dev->iowait_lock); |
---|
| 390 | + write_sequnlock_irq(&priv->s_sendcontext->waitlock); |
---|
362 | 391 | iowait_pio_drain(&priv->s_iowait); |
---|
363 | | - write_seqlock_irq(&dev->iowait_lock); |
---|
| 392 | + write_seqlock_irq(&priv->s_sendcontext->waitlock); |
---|
364 | 393 | hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); |
---|
365 | | - write_sequnlock_irq(&dev->iowait_lock); |
---|
| 394 | + write_sequnlock_irq(&priv->s_sendcontext->waitlock); |
---|
366 | 395 | } |
---|
367 | 396 | } |
---|
368 | 397 | |
---|
.. | .. |
---|
372 | 401 | * |
---|
373 | 402 | * This schedules qp progress and caller should hold |
---|
374 | 403 | * the s_lock. |
---|
| 404 | + * @return true if the first leg is scheduled; |
---|
| 405 | + * false if the first leg is not scheduled. |
---|
375 | 406 | */ |
---|
376 | | -void hfi1_schedule_send(struct rvt_qp *qp) |
---|
| 407 | +bool hfi1_schedule_send(struct rvt_qp *qp) |
---|
377 | 408 | { |
---|
378 | 409 | lockdep_assert_held(&qp->s_lock); |
---|
379 | | - if (hfi1_send_ok(qp)) |
---|
| 410 | + if (hfi1_send_ok(qp)) { |
---|
380 | 411 | _hfi1_schedule_send(qp); |
---|
| 412 | + return true; |
---|
| 413 | + } |
---|
| 414 | + if (qp->s_flags & HFI1_S_ANY_WAIT_IO) |
---|
| 415 | + iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait, |
---|
| 416 | + IOWAIT_PENDING_IB); |
---|
| 417 | + return false; |
---|
| 418 | +} |
---|
| 419 | + |
---|
| 420 | +static void hfi1_qp_schedule(struct rvt_qp *qp) |
---|
| 421 | +{ |
---|
| 422 | + struct hfi1_qp_priv *priv = qp->priv; |
---|
| 423 | + bool ret; |
---|
| 424 | + |
---|
| 425 | + if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) { |
---|
| 426 | + ret = hfi1_schedule_send(qp); |
---|
| 427 | + if (ret) |
---|
| 428 | + iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); |
---|
| 429 | + } |
---|
| 430 | + if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) { |
---|
| 431 | + ret = hfi1_schedule_tid_send(qp); |
---|
| 432 | + if (ret) |
---|
| 433 | + iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); |
---|
| 434 | + } |
---|
381 | 435 | } |
---|
382 | 436 | |
---|
383 | 437 | void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) |
---|
.. | .. |
---|
388 | 442 | if (qp->s_flags & flag) { |
---|
389 | 443 | qp->s_flags &= ~flag; |
---|
390 | 444 | trace_hfi1_qpwakeup(qp, flag); |
---|
391 | | - hfi1_schedule_send(qp); |
---|
| 445 | + hfi1_qp_schedule(qp); |
---|
392 | 446 | } |
---|
393 | 447 | spin_unlock_irqrestore(&qp->s_lock, flags); |
---|
394 | 448 | /* Notify hfi1_destroy_qp() if it is waiting. */ |
---|
395 | 449 | rvt_put_qp(qp); |
---|
396 | 450 | } |
---|
397 | 451 | |
---|
| 452 | +void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait) |
---|
| 453 | +{ |
---|
| 454 | + struct hfi1_qp_priv *priv = qp->priv; |
---|
| 455 | + |
---|
| 456 | + if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) { |
---|
| 457 | + qp->s_flags &= ~RVT_S_BUSY; |
---|
| 458 | + /* |
---|
| 459 | + * If we are sending a first-leg packet from the second leg, |
---|
| 460 | + * we need to clear the busy flag from priv->s_flags to |
---|
| 461 | + * avoid a race condition when the qp wakes up before |
---|
| 462 | + * the call to hfi1_verbs_send() returns to the second |
---|
| 463 | + * leg. In that case, the second leg will terminate without |
---|
| 464 | + * being re-scheduled, resulting in failure to send TID RDMA |
---|
| 465 | + * WRITE DATA and TID RDMA ACK packets. |
---|
| 466 | + */ |
---|
| 467 | + if (priv->s_flags & HFI1_S_TID_BUSY_SET) { |
---|
| 468 | + priv->s_flags &= ~(HFI1_S_TID_BUSY_SET | |
---|
| 469 | + RVT_S_BUSY); |
---|
| 470 | + iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID); |
---|
| 471 | + } |
---|
| 472 | + } else { |
---|
| 473 | + priv->s_flags &= ~RVT_S_BUSY; |
---|
| 474 | + } |
---|
| 475 | +} |
---|
| 476 | + |
---|
398 | 477 | static int iowait_sleep( |
---|
399 | 478 | struct sdma_engine *sde, |
---|
400 | | - struct iowait *wait, |
---|
| 479 | + struct iowait_work *wait, |
---|
401 | 480 | struct sdma_txreq *stx, |
---|
402 | 481 | uint seq, |
---|
403 | 482 | bool pkts_sent) |
---|
.. | .. |
---|
407 | 486 | struct hfi1_qp_priv *priv; |
---|
408 | 487 | unsigned long flags; |
---|
409 | 488 | int ret = 0; |
---|
410 | | - struct hfi1_ibdev *dev; |
---|
411 | 489 | |
---|
412 | 490 | qp = tx->qp; |
---|
413 | 491 | priv = qp->priv; |
---|
.. | .. |
---|
420 | 498 | * buffer and undoing the side effects of the copy. |
---|
421 | 499 | */ |
---|
422 | 500 | /* Make a common routine? */ |
---|
423 | | - dev = &sde->dd->verbs_dev; |
---|
424 | 501 | list_add_tail(&stx->list, &wait->tx_head); |
---|
425 | | - write_seqlock(&dev->iowait_lock); |
---|
| 502 | + write_seqlock(&sde->waitlock); |
---|
426 | 503 | if (sdma_progress(sde, seq, stx)) |
---|
427 | 504 | goto eagain; |
---|
428 | 505 | if (list_empty(&priv->s_iowait.list)) { |
---|
.. | .. |
---|
431 | 508 | |
---|
432 | 509 | ibp->rvp.n_dmawait++; |
---|
433 | 510 | qp->s_flags |= RVT_S_WAIT_DMA_DESC; |
---|
| 511 | + iowait_get_priority(&priv->s_iowait); |
---|
434 | 512 | iowait_queue(pkts_sent, &priv->s_iowait, |
---|
435 | 513 | &sde->dmawait); |
---|
436 | | - priv->s_iowait.lock = &dev->iowait_lock; |
---|
| 514 | + priv->s_iowait.lock = &sde->waitlock; |
---|
437 | 515 | trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); |
---|
438 | 516 | rvt_get_qp(qp); |
---|
439 | 517 | } |
---|
440 | | - write_sequnlock(&dev->iowait_lock); |
---|
441 | | - qp->s_flags &= ~RVT_S_BUSY; |
---|
| 518 | + write_sequnlock(&sde->waitlock); |
---|
| 519 | + hfi1_qp_unbusy(qp, wait); |
---|
442 | 520 | spin_unlock_irqrestore(&qp->s_lock, flags); |
---|
443 | 521 | ret = -EBUSY; |
---|
444 | 522 | } else { |
---|
.. | .. |
---|
447 | 525 | } |
---|
448 | 526 | return ret; |
---|
449 | 527 | eagain: |
---|
450 | | - write_sequnlock(&dev->iowait_lock); |
---|
| 528 | + write_sequnlock(&sde->waitlock); |
---|
451 | 529 | spin_unlock_irqrestore(&qp->s_lock, flags); |
---|
452 | 530 | list_del_init(&stx->list); |
---|
453 | 531 | return -EAGAIN; |
---|
.. | .. |
---|
478 | 556 | hfi1_schedule_send(qp); |
---|
479 | 557 | } |
---|
480 | 558 | spin_unlock_irqrestore(&qp->s_lock, flags); |
---|
| 559 | +} |
---|
| 560 | + |
---|
| 561 | +static void hfi1_init_priority(struct iowait *w) |
---|
| 562 | +{ |
---|
| 563 | + struct rvt_qp *qp = iowait_to_qp(w); |
---|
| 564 | + struct hfi1_qp_priv *priv = qp->priv; |
---|
| 565 | + |
---|
| 566 | + if (qp->s_flags & RVT_S_ACK_PENDING) |
---|
| 567 | + w->priority++; |
---|
| 568 | + if (priv->s_flags & RVT_S_ACK_PENDING) |
---|
| 569 | + w->priority++; |
---|
481 | 570 | } |
---|
482 | 571 | |
---|
483 | 572 | /** |
---|
.. | .. |
---|
602 | 691 | sde ? sde->this_idx : 0, |
---|
603 | 692 | send_context, |
---|
604 | 693 | send_context ? send_context->sw_index : 0, |
---|
605 | | - ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, |
---|
606 | | - ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, |
---|
| 694 | + ib_cq_head(qp->ibqp.send_cq), |
---|
| 695 | + ib_cq_tail(qp->ibqp.send_cq), |
---|
607 | 696 | qp->pid, |
---|
608 | 697 | qp->s_state, |
---|
609 | 698 | qp->s_ack_state, |
---|
.. | .. |
---|
637 | 726 | &priv->s_iowait, |
---|
638 | 727 | 1, |
---|
639 | 728 | _hfi1_do_send, |
---|
| 729 | + _hfi1_do_tid_send, |
---|
640 | 730 | iowait_sleep, |
---|
641 | 731 | iowait_wakeup, |
---|
642 | | - iowait_sdma_drained); |
---|
| 732 | + iowait_sdma_drained, |
---|
| 733 | + hfi1_init_priority); |
---|
| 734 | + /* Init to a value to start the running average correctly */ |
---|
| 735 | + priv->s_running_pkt_size = piothreshold / 2; |
---|
643 | 736 | return priv; |
---|
644 | 737 | } |
---|
645 | 738 | |
---|
.. | .. |
---|
647 | 740 | { |
---|
648 | 741 | struct hfi1_qp_priv *priv = qp->priv; |
---|
649 | 742 | |
---|
| 743 | + hfi1_qp_priv_tid_free(rdi, qp); |
---|
650 | 744 | kfree(priv->s_ahg); |
---|
651 | 745 | kfree(priv); |
---|
652 | 746 | } |
---|
.. | .. |
---|
680 | 774 | { |
---|
681 | 775 | lockdep_assert_held(&qp->s_lock); |
---|
682 | 776 | flush_iowait(qp); |
---|
| 777 | + hfi1_tid_rdma_flush_wait(qp); |
---|
683 | 778 | } |
---|
684 | 779 | |
---|
685 | 780 | void stop_send_queue(struct rvt_qp *qp) |
---|
686 | 781 | { |
---|
687 | 782 | struct hfi1_qp_priv *priv = qp->priv; |
---|
688 | 783 | |
---|
689 | | - cancel_work_sync(&priv->s_iowait.iowork); |
---|
| 784 | + iowait_cancel_work(&priv->s_iowait); |
---|
| 785 | + if (cancel_work_sync(&priv->tid_rdma.trigger_work)) |
---|
| 786 | + rvt_put_qp(qp); |
---|
690 | 787 | } |
---|
691 | 788 | |
---|
692 | 789 | void quiesce_qp(struct rvt_qp *qp) |
---|
693 | 790 | { |
---|
694 | 791 | struct hfi1_qp_priv *priv = qp->priv; |
---|
695 | 792 | |
---|
| 793 | + hfi1_del_tid_reap_timer(qp); |
---|
| 794 | + hfi1_del_tid_retry_timer(qp); |
---|
696 | 795 | iowait_sdma_drain(&priv->s_iowait); |
---|
697 | 796 | qp_pio_drain(qp); |
---|
698 | 797 | flush_tx_list(qp); |
---|
.. | .. |
---|
700 | 799 | |
---|
701 | 800 | void notify_qp_reset(struct rvt_qp *qp) |
---|
702 | 801 | { |
---|
| 802 | + hfi1_qp_kern_exp_rcv_clear_all(qp); |
---|
703 | 803 | qp->r_adefered = 0; |
---|
704 | 804 | clear_ahg(qp); |
---|
| 805 | + |
---|
| 806 | + /* Clear any OPFN state */ |
---|
| 807 | + if (qp->ibqp.qp_type == IB_QPT_RC) |
---|
| 808 | + opfn_conn_error(qp); |
---|
705 | 809 | } |
---|
706 | 810 | |
---|
707 | 811 | /* |
---|
.. | .. |
---|
783 | 887 | if (lock) { |
---|
784 | 888 | write_seqlock(lock); |
---|
785 | 889 | if (!list_empty(&priv->s_iowait.list) && |
---|
786 | | - !(qp->s_flags & RVT_S_BUSY)) { |
---|
| 890 | + !(qp->s_flags & RVT_S_BUSY) && |
---|
| 891 | + !(priv->s_flags & RVT_S_BUSY)) { |
---|
787 | 892 | qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; |
---|
| 893 | + iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); |
---|
| 894 | + iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); |
---|
788 | 895 | list_del_init(&priv->s_iowait.list); |
---|
789 | 896 | priv->s_iowait.lock = NULL; |
---|
790 | 897 | rvt_put_qp(qp); |
---|
.. | .. |
---|
792 | 899 | write_sequnlock(lock); |
---|
793 | 900 | } |
---|
794 | 901 | |
---|
795 | | - if (!(qp->s_flags & RVT_S_BUSY)) { |
---|
| 902 | + if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) { |
---|
| 903 | + qp->s_hdrwords = 0; |
---|
796 | 904 | if (qp->s_rdma_mr) { |
---|
797 | 905 | rvt_put_mr(qp->s_rdma_mr); |
---|
798 | 906 | qp->s_rdma_mr = NULL; |
---|