hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/infiniband/hw/cxgb4/ev.c
....@@ -123,15 +123,15 @@
123123 struct c4iw_qp *qhp;
124124 u32 cqid;
125125
126
- spin_lock_irq(&dev->lock);
127
- qhp = get_qhp(dev, CQE_QPID(err_cqe));
126
+ xa_lock_irq(&dev->qps);
127
+ qhp = xa_load(&dev->qps, CQE_QPID(err_cqe));
128128 if (!qhp) {
129129 pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
130130 CQE_QPID(err_cqe),
131131 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
132132 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
133133 CQE_WRID_LOW(err_cqe));
134
- spin_unlock_irq(&dev->lock);
134
+ xa_unlock_irq(&dev->qps);
135135 goto out;
136136 }
137137
....@@ -146,13 +146,13 @@
146146 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
147147 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
148148 CQE_WRID_LOW(err_cqe));
149
- spin_unlock_irq(&dev->lock);
149
+ xa_unlock_irq(&dev->qps);
150150 goto out;
151151 }
152152
153153 c4iw_qp_add_ref(&qhp->ibqp);
154154 atomic_inc(&chp->refcnt);
155
- spin_unlock_irq(&dev->lock);
155
+ xa_unlock_irq(&dev->qps);
156156
157157 /* Bad incoming write */
158158 if (RQ_TYPE(err_cqe) &&
....@@ -225,11 +225,11 @@
225225 struct c4iw_cq *chp;
226226 unsigned long flag;
227227
228
- spin_lock_irqsave(&dev->lock, flag);
229
- chp = get_chp(dev, qid);
228
+ xa_lock_irqsave(&dev->cqs, flag);
229
+ chp = xa_load(&dev->cqs, qid);
230230 if (chp) {
231231 atomic_inc(&chp->refcnt);
232
- spin_unlock_irqrestore(&dev->lock, flag);
232
+ xa_unlock_irqrestore(&dev->cqs, flag);
233233 t4_clear_cq_armed(&chp->cq);
234234 spin_lock_irqsave(&chp->comp_handler_lock, flag);
235235 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
....@@ -238,7 +238,7 @@
238238 wake_up(&chp->wait);
239239 } else {
240240 pr_debug("unknown cqid 0x%x\n", qid);
241
- spin_unlock_irqrestore(&dev->lock, flag);
241
+ xa_unlock_irqrestore(&dev->cqs, flag);
242242 }
243243 return 0;
244244 }