| .. | .. |
|---|
| 35 | 35 | #include <linux/platform_device.h> |
|---|
| 36 | 36 | #include <rdma/ib_addr.h> |
|---|
| 37 | 37 | #include <rdma/ib_umem.h> |
|---|
| 38 | +#include <rdma/uverbs_ioctl.h> |
|---|
| 38 | 39 | #include "hns_roce_common.h" |
|---|
| 39 | 40 | #include "hns_roce_device.h" |
|---|
| 40 | 41 | #include "hns_roce_hem.h" |
|---|
| 41 | 42 | #include <rdma/hns-abi.h> |
|---|
| 42 | 43 | |
|---|
| 43 | | -#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) |
|---|
| 44 | +static void flush_work_handle(struct work_struct *work) |
|---|
| 45 | +{ |
|---|
| 46 | + struct hns_roce_work *flush_work = container_of(work, |
|---|
| 47 | + struct hns_roce_work, work); |
|---|
| 48 | + struct hns_roce_qp *hr_qp = container_of(flush_work, |
|---|
| 49 | + struct hns_roce_qp, flush_work); |
|---|
| 50 | + struct device *dev = flush_work->hr_dev->dev; |
|---|
| 51 | + struct ib_qp_attr attr; |
|---|
| 52 | + int attr_mask; |
|---|
| 53 | + int ret; |
|---|
| 54 | + |
|---|
| 55 | + attr_mask = IB_QP_STATE; |
|---|
| 56 | + attr.qp_state = IB_QPS_ERR; |
|---|
| 57 | + |
|---|
| 58 | + if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { |
|---|
| 59 | + ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); |
|---|
| 60 | + if (ret) |
|---|
| 61 | + dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", |
|---|
| 62 | + ret); |
|---|
| 63 | + } |
|---|
| 64 | + |
|---|
| 65 | + /* |
|---|
| 66 | + * make sure we signal QP destroy leg that flush QP was completed |
|---|
| 67 | + * so that it can safely proceed ahead now and destroy QP |
|---|
| 68 | + */ |
|---|
| 69 | + if (atomic_dec_and_test(&hr_qp->refcount)) |
|---|
| 70 | + complete(&hr_qp->free); |
|---|
| 71 | +} |
|---|
| 72 | + |
|---|
| 73 | +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
|---|
| 74 | +{ |
|---|
| 75 | + struct hns_roce_work *flush_work = &hr_qp->flush_work; |
|---|
| 76 | + |
|---|
| 77 | + flush_work->hr_dev = hr_dev; |
|---|
| 78 | + INIT_WORK(&flush_work->work, flush_work_handle); |
|---|
| 79 | + atomic_inc(&hr_qp->refcount); |
|---|
| 80 | + queue_work(hr_dev->irq_workq, &flush_work->work); |
|---|
| 81 | +} |
|---|
| 44 | 82 | |
|---|
| 45 | 83 | void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) |
|---|
| 46 | 84 | { |
|---|
| 47 | | - struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
|---|
| 48 | 85 | struct device *dev = hr_dev->dev; |
|---|
| 49 | 86 | struct hns_roce_qp *qp; |
|---|
| 50 | 87 | |
|---|
| 51 | | - spin_lock(&qp_table->lock); |
|---|
| 52 | | - |
|---|
| 88 | + xa_lock(&hr_dev->qp_table_xa); |
|---|
| 53 | 89 | qp = __hns_roce_qp_lookup(hr_dev, qpn); |
|---|
| 54 | 90 | if (qp) |
|---|
| 55 | 91 | atomic_inc(&qp->refcount); |
|---|
| 56 | | - |
|---|
| 57 | | - spin_unlock(&qp_table->lock); |
|---|
| 92 | + xa_unlock(&hr_dev->qp_table_xa); |
|---|
| 58 | 93 | |
|---|
| 59 | 94 | if (!qp) { |
|---|
| 60 | 95 | dev_warn(dev, "Async event for bogus QP %08x\n", qpn); |
|---|
| 61 | 96 | return; |
|---|
| 97 | + } |
|---|
| 98 | + |
|---|
| 99 | + if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && |
|---|
| 100 | + (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || |
|---|
| 101 | + event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || |
|---|
| 102 | + event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) { |
|---|
| 103 | + qp->state = IB_QPS_ERR; |
|---|
| 104 | + if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) |
|---|
| 105 | + init_flush_work(hr_dev, qp); |
|---|
| 62 | 106 | } |
|---|
| 63 | 107 | |
|---|
| 64 | 108 | qp->event(qp, (enum hns_roce_event)event_type); |
|---|
| .. | .. |
|---|
| 66 | 110 | if (atomic_dec_and_test(&qp->refcount)) |
|---|
| 67 | 111 | complete(&qp->free); |
|---|
| 68 | 112 | } |
|---|
| 69 | | -EXPORT_SYMBOL_GPL(hns_roce_qp_event); |
|---|
| 70 | 113 | |
|---|
| 71 | 114 | static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, |
|---|
| 72 | 115 | enum hns_roce_event type) |
|---|
| 73 | 116 | { |
|---|
| 74 | | - struct ib_event event; |
|---|
| 75 | 117 | struct ib_qp *ibqp = &hr_qp->ibqp; |
|---|
| 118 | + struct ib_event event; |
|---|
| 76 | 119 | |
|---|
| 77 | 120 | if (ibqp->event_handler) { |
|---|
| 78 | 121 | event.device = ibqp->device; |
|---|
| .. | .. |
|---|
| 111 | 154 | } |
|---|
| 112 | 155 | } |
|---|
| 113 | 156 | |
|---|
| 114 | | -static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, |
|---|
| 115 | | - int align, unsigned long *base) |
|---|
| 157 | +static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
|---|
| 116 | 158 | { |
|---|
| 117 | | - struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
|---|
| 159 | + unsigned long num = 0; |
|---|
| 160 | + int ret; |
|---|
| 118 | 161 | |
|---|
| 119 | | - return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, |
|---|
| 120 | | - base) ? |
|---|
| 121 | | - -ENOMEM : |
|---|
| 122 | | - 0; |
|---|
| 162 | + if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { |
|---|
| 163 | + /* when hw version is v1, the sqpn is allocated */ |
|---|
| 164 | + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) |
|---|
| 165 | + num = HNS_ROCE_MAX_PORTS + |
|---|
| 166 | + hr_dev->iboe.phy_port[hr_qp->port]; |
|---|
| 167 | + else |
|---|
| 168 | + num = 1; |
|---|
| 169 | + |
|---|
| 170 | + hr_qp->doorbell_qpn = 1; |
|---|
| 171 | + } else { |
|---|
| 172 | + ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap, |
|---|
| 173 | + 1, 1, &num); |
|---|
| 174 | + if (ret) { |
|---|
| 175 | + ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n"); |
|---|
| 176 | + return -ENOMEM; |
|---|
| 177 | + } |
|---|
| 178 | + |
|---|
| 179 | + hr_qp->doorbell_qpn = (u32)num; |
|---|
| 180 | + } |
|---|
| 181 | + |
|---|
| 182 | + hr_qp->qpn = num; |
|---|
| 183 | + |
|---|
| 184 | + return 0; |
|---|
| 123 | 185 | } |
|---|
| 124 | 186 | |
|---|
| 125 | 187 | enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) |
|---|
| .. | .. |
|---|
| 141 | 203 | return HNS_ROCE_QP_NUM_STATE; |
|---|
| 142 | 204 | } |
|---|
| 143 | 205 | } |
|---|
| 144 | | -EXPORT_SYMBOL_GPL(to_hns_roce_state); |
|---|
| 145 | 206 | |
|---|
| 146 | | -static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, |
|---|
| 147 | | - struct hns_roce_qp *hr_qp) |
|---|
| 207 | +static void add_qp_to_list(struct hns_roce_dev *hr_dev, |
|---|
| 208 | + struct hns_roce_qp *hr_qp, |
|---|
| 209 | + struct ib_cq *send_cq, struct ib_cq *recv_cq) |
|---|
| 148 | 210 | { |
|---|
| 149 | | - struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
|---|
| 211 | + struct hns_roce_cq *hr_send_cq, *hr_recv_cq; |
|---|
| 212 | + unsigned long flags; |
|---|
| 213 | + |
|---|
| 214 | + hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; |
|---|
| 215 | + hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; |
|---|
| 216 | + |
|---|
| 217 | + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); |
|---|
| 218 | + hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); |
|---|
| 219 | + |
|---|
| 220 | + list_add_tail(&hr_qp->node, &hr_dev->qp_list); |
|---|
| 221 | + if (hr_send_cq) |
|---|
| 222 | + list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); |
|---|
| 223 | + if (hr_recv_cq) |
|---|
| 224 | + list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); |
|---|
| 225 | + |
|---|
| 226 | + hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); |
|---|
| 227 | + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); |
|---|
| 228 | +} |
|---|
| 229 | + |
|---|
| 230 | +static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, |
|---|
| 231 | + struct hns_roce_qp *hr_qp, |
|---|
| 232 | + struct ib_qp_init_attr *init_attr) |
|---|
| 233 | +{ |
|---|
| 234 | + struct xarray *xa = &hr_dev->qp_table_xa; |
|---|
| 150 | 235 | int ret; |
|---|
| 151 | 236 | |
|---|
| 152 | | - if (!qpn) |
|---|
| 237 | + if (!hr_qp->qpn) |
|---|
| 153 | 238 | return -EINVAL; |
|---|
| 154 | 239 | |
|---|
| 155 | | - hr_qp->qpn = qpn; |
|---|
| 156 | | - |
|---|
| 157 | | - spin_lock_irq(&qp_table->lock); |
|---|
| 158 | | - ret = radix_tree_insert(&hr_dev->qp_table_tree, |
|---|
| 159 | | - hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); |
|---|
| 160 | | - spin_unlock_irq(&qp_table->lock); |
|---|
| 161 | | - if (ret) { |
|---|
| 162 | | - dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n"); |
|---|
| 163 | | - goto err_put_irrl; |
|---|
| 164 | | - } |
|---|
| 165 | | - |
|---|
| 166 | | - atomic_set(&hr_qp->refcount, 1); |
|---|
| 167 | | - init_completion(&hr_qp->free); |
|---|
| 168 | | - |
|---|
| 169 | | - return 0; |
|---|
| 170 | | - |
|---|
| 171 | | -err_put_irrl: |
|---|
| 240 | + ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); |
|---|
| 241 | + if (ret) |
|---|
| 242 | + dev_err(hr_dev->dev, "Failed to xa store for QPC\n"); |
|---|
| 243 | + else |
|---|
| 244 | + /* add QP to device's QP list for softwc */ |
|---|
| 245 | + add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, |
|---|
| 246 | + init_attr->recv_cq); |
|---|
| 172 | 247 | |
|---|
| 173 | 248 | return ret; |
|---|
| 174 | 249 | } |
|---|
| 175 | 250 | |
|---|
| 176 | | -static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, |
|---|
| 177 | | - struct hns_roce_qp *hr_qp) |
|---|
| 251 | +static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
|---|
| 178 | 252 | { |
|---|
| 179 | 253 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
|---|
| 180 | 254 | struct device *dev = hr_dev->dev; |
|---|
| 181 | 255 | int ret; |
|---|
| 182 | 256 | |
|---|
| 183 | | - if (!qpn) |
|---|
| 257 | + if (!hr_qp->qpn) |
|---|
| 184 | 258 | return -EINVAL; |
|---|
| 185 | 259 | |
|---|
| 186 | | - hr_qp->qpn = qpn; |
|---|
| 260 | + /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ |
|---|
| 261 | + if (hr_qp->ibqp.qp_type == IB_QPT_GSI && |
|---|
| 262 | + hr_dev->hw_rev == HNS_ROCE_HW_VER1) |
|---|
| 263 | + return 0; |
|---|
| 187 | 264 | |
|---|
| 188 | 265 | /* Alloc memory for QPC */ |
|---|
| 189 | 266 | ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); |
|---|
| 190 | 267 | if (ret) { |
|---|
| 191 | | - dev_err(dev, "QPC table get failed\n"); |
|---|
| 268 | + dev_err(dev, "Failed to get QPC table\n"); |
|---|
| 192 | 269 | goto err_out; |
|---|
| 193 | 270 | } |
|---|
| 194 | 271 | |
|---|
| 195 | 272 | /* Alloc memory for IRRL */ |
|---|
| 196 | 273 | ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); |
|---|
| 197 | 274 | if (ret) { |
|---|
| 198 | | - dev_err(dev, "IRRL table get failed\n"); |
|---|
| 275 | + dev_err(dev, "Failed to get IRRL table\n"); |
|---|
| 199 | 276 | goto err_put_qp; |
|---|
| 200 | 277 | } |
|---|
| 201 | 278 | |
|---|
| .. | .. |
|---|
| 204 | 281 | ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, |
|---|
| 205 | 282 | hr_qp->qpn); |
|---|
| 206 | 283 | if (ret) { |
|---|
| 207 | | - dev_err(dev, "TRRL table get failed\n"); |
|---|
| 284 | + dev_err(dev, "Failed to get TRRL table\n"); |
|---|
| 208 | 285 | goto err_put_irrl; |
|---|
| 209 | 286 | } |
|---|
| 210 | 287 | } |
|---|
| 211 | 288 | |
|---|
| 212 | | - spin_lock_irq(&qp_table->lock); |
|---|
| 213 | | - ret = radix_tree_insert(&hr_dev->qp_table_tree, |
|---|
| 214 | | - hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); |
|---|
| 215 | | - spin_unlock_irq(&qp_table->lock); |
|---|
| 216 | | - if (ret) { |
|---|
| 217 | | - dev_err(dev, "QPC radix_tree_insert failed\n"); |
|---|
| 218 | | - goto err_put_trrl; |
|---|
| 289 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { |
|---|
| 290 | + /* Alloc memory for SCC CTX */ |
|---|
| 291 | + ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, |
|---|
| 292 | + hr_qp->qpn); |
|---|
| 293 | + if (ret) { |
|---|
| 294 | + dev_err(dev, "Failed to get SCC CTX table\n"); |
|---|
| 295 | + goto err_put_trrl; |
|---|
| 296 | + } |
|---|
| 219 | 297 | } |
|---|
| 220 | | - |
|---|
| 221 | | - atomic_set(&hr_qp->refcount, 1); |
|---|
| 222 | | - init_completion(&hr_qp->free); |
|---|
| 223 | 298 | |
|---|
| 224 | 299 | return 0; |
|---|
| 225 | 300 | |
|---|
| .. | .. |
|---|
| 239 | 314 | |
|---|
| 240 | 315 | void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
|---|
| 241 | 316 | { |
|---|
| 242 | | - struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
|---|
| 317 | + struct xarray *xa = &hr_dev->qp_table_xa; |
|---|
| 243 | 318 | unsigned long flags; |
|---|
| 244 | 319 | |
|---|
| 245 | | - spin_lock_irqsave(&qp_table->lock, flags); |
|---|
| 246 | | - radix_tree_delete(&hr_dev->qp_table_tree, |
|---|
| 247 | | - hr_qp->qpn & (hr_dev->caps.num_qps - 1)); |
|---|
| 248 | | - spin_unlock_irqrestore(&qp_table->lock, flags); |
|---|
| 249 | | -} |
|---|
| 250 | | -EXPORT_SYMBOL_GPL(hns_roce_qp_remove); |
|---|
| 320 | + list_del(&hr_qp->node); |
|---|
| 321 | + list_del(&hr_qp->sq_node); |
|---|
| 322 | + list_del(&hr_qp->rq_node); |
|---|
| 251 | 323 | |
|---|
| 252 | | -void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
|---|
| 324 | + xa_lock_irqsave(xa, flags); |
|---|
| 325 | + __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1)); |
|---|
| 326 | + xa_unlock_irqrestore(xa, flags); |
|---|
| 327 | +} |
|---|
| 328 | + |
|---|
| 329 | +static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
|---|
| 253 | 330 | { |
|---|
| 254 | 331 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
|---|
| 255 | 332 | |
|---|
| 256 | | - if (atomic_dec_and_test(&hr_qp->refcount)) |
|---|
| 257 | | - complete(&hr_qp->free); |
|---|
| 258 | | - wait_for_completion(&hr_qp->free); |
|---|
| 259 | | - |
|---|
| 260 | | - if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { |
|---|
| 261 | | - if (hr_dev->caps.trrl_entry_sz) |
|---|
| 262 | | - hns_roce_table_put(hr_dev, &qp_table->trrl_table, |
|---|
| 263 | | - hr_qp->qpn); |
|---|
| 264 | | - hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); |
|---|
| 265 | | - } |
|---|
| 266 | | -} |
|---|
| 267 | | -EXPORT_SYMBOL_GPL(hns_roce_qp_free); |
|---|
| 268 | | - |
|---|
| 269 | | -void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, |
|---|
| 270 | | - int cnt) |
|---|
| 271 | | -{ |
|---|
| 272 | | - struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
|---|
| 273 | | - |
|---|
| 274 | | - if (base_qpn < SQP_NUM) |
|---|
| 333 | + /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ |
|---|
| 334 | + if (hr_qp->ibqp.qp_type == IB_QPT_GSI && |
|---|
| 335 | + hr_dev->hw_rev == HNS_ROCE_HW_VER1) |
|---|
| 275 | 336 | return; |
|---|
| 276 | 337 | |
|---|
| 277 | | - hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR); |
|---|
| 338 | + if (hr_dev->caps.trrl_entry_sz) |
|---|
| 339 | + hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); |
|---|
| 340 | + hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); |
|---|
| 278 | 341 | } |
|---|
| 279 | | -EXPORT_SYMBOL_GPL(hns_roce_release_range_qp); |
|---|
| 280 | 342 | |
|---|
| 281 | | -static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, |
|---|
| 282 | | - struct ib_qp_cap *cap, int is_user, int has_srq, |
|---|
| 283 | | - struct hns_roce_qp *hr_qp) |
|---|
| 343 | +static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
|---|
| 284 | 344 | { |
|---|
| 285 | | - struct device *dev = hr_dev->dev; |
|---|
| 286 | | - u32 max_cnt; |
|---|
| 345 | + struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
|---|
| 346 | + |
|---|
| 347 | + if (hr_qp->ibqp.qp_type == IB_QPT_GSI) |
|---|
| 348 | + return; |
|---|
| 349 | + |
|---|
| 350 | + if (hr_qp->qpn < hr_dev->caps.reserved_qps) |
|---|
| 351 | + return; |
|---|
| 352 | + |
|---|
| 353 | + hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR); |
|---|
| 354 | +} |
|---|
| 355 | + |
|---|
| 356 | +static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, |
|---|
| 357 | + struct hns_roce_qp *hr_qp, int has_rq) |
|---|
| 358 | +{ |
|---|
| 359 | + u32 cnt; |
|---|
| 360 | + |
|---|
| 361 | + /* If srq exist, set zero for relative number of rq */ |
|---|
| 362 | + if (!has_rq) { |
|---|
| 363 | + hr_qp->rq.wqe_cnt = 0; |
|---|
| 364 | + hr_qp->rq.max_gs = 0; |
|---|
| 365 | + hr_qp->rq_inl_buf.wqe_cnt = 0; |
|---|
| 366 | + cap->max_recv_wr = 0; |
|---|
| 367 | + cap->max_recv_sge = 0; |
|---|
| 368 | + |
|---|
| 369 | + return 0; |
|---|
| 370 | + } |
|---|
| 287 | 371 | |
|---|
| 288 | 372 | /* Check the validity of QP support capacity */ |
|---|
| 289 | | - if (cap->max_recv_wr > hr_dev->caps.max_wqes || |
|---|
| 373 | + if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || |
|---|
| 290 | 374 | cap->max_recv_sge > hr_dev->caps.max_rq_sg) { |
|---|
| 291 | | - dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n", |
|---|
| 292 | | - cap->max_recv_wr, cap->max_recv_sge); |
|---|
| 375 | + ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n", |
|---|
| 376 | + cap->max_recv_wr, cap->max_recv_sge); |
|---|
| 293 | 377 | return -EINVAL; |
|---|
| 294 | 378 | } |
|---|
| 295 | 379 | |
|---|
| 296 | | - /* If srq exit, set zero for relative number of rq */ |
|---|
| 297 | | - if (has_srq) { |
|---|
| 298 | | - if (cap->max_recv_wr) { |
|---|
| 299 | | - dev_dbg(dev, "srq no need config max_recv_wr\n"); |
|---|
| 300 | | - return -EINVAL; |
|---|
| 301 | | - } |
|---|
| 302 | | - |
|---|
| 303 | | - hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0; |
|---|
| 304 | | - } else { |
|---|
| 305 | | - if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { |
|---|
| 306 | | - dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n"); |
|---|
| 307 | | - return -EINVAL; |
|---|
| 308 | | - } |
|---|
| 309 | | - |
|---|
| 310 | | - if (hr_dev->caps.min_wqes) |
|---|
| 311 | | - max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes); |
|---|
| 312 | | - else |
|---|
| 313 | | - max_cnt = cap->max_recv_wr; |
|---|
| 314 | | - |
|---|
| 315 | | - hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); |
|---|
| 316 | | - |
|---|
| 317 | | - if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { |
|---|
| 318 | | - dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n"); |
|---|
| 319 | | - return -EINVAL; |
|---|
| 320 | | - } |
|---|
| 321 | | - |
|---|
| 322 | | - max_cnt = max(1U, cap->max_recv_sge); |
|---|
| 323 | | - hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); |
|---|
| 324 | | - if (hr_dev->caps.max_rq_sg <= 2) |
|---|
| 325 | | - hr_qp->rq.wqe_shift = |
|---|
| 326 | | - ilog2(hr_dev->caps.max_rq_desc_sz); |
|---|
| 327 | | - else |
|---|
| 328 | | - hr_qp->rq.wqe_shift = |
|---|
| 329 | | - ilog2(hr_dev->caps.max_rq_desc_sz |
|---|
| 330 | | - * hr_qp->rq.max_gs); |
|---|
| 380 | + cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); |
|---|
| 381 | + if (cnt > hr_dev->caps.max_wqes) { |
|---|
| 382 | + ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", |
|---|
| 383 | + cap->max_recv_wr); |
|---|
| 384 | + return -EINVAL; |
|---|
| 331 | 385 | } |
|---|
| 332 | 386 | |
|---|
| 333 | | - cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; |
|---|
| 387 | + hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); |
|---|
| 388 | + |
|---|
| 389 | + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * |
|---|
| 390 | + hr_qp->rq.max_gs); |
|---|
| 391 | + |
|---|
| 392 | + hr_qp->rq.wqe_cnt = cnt; |
|---|
| 393 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) |
|---|
| 394 | + hr_qp->rq_inl_buf.wqe_cnt = cnt; |
|---|
| 395 | + else |
|---|
| 396 | + hr_qp->rq_inl_buf.wqe_cnt = 0; |
|---|
| 397 | + |
|---|
| 398 | + cap->max_recv_wr = cnt; |
|---|
| 334 | 399 | cap->max_recv_sge = hr_qp->rq.max_gs; |
|---|
| 335 | 400 | |
|---|
| 336 | 401 | return 0; |
|---|
| 337 | 402 | } |
|---|
| 338 | 403 | |
|---|
| 339 | | -static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, |
|---|
| 340 | | - struct ib_qp_cap *cap, |
|---|
| 341 | | - struct hns_roce_qp *hr_qp, |
|---|
| 342 | | - struct hns_roce_ib_create_qp *ucmd) |
|---|
| 404 | +static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, |
|---|
| 405 | + struct hns_roce_qp *hr_qp, |
|---|
| 406 | + struct ib_qp_cap *cap) |
|---|
| 407 | +{ |
|---|
| 408 | + u32 cnt; |
|---|
| 409 | + |
|---|
| 410 | + cnt = max(1U, cap->max_send_sge); |
|---|
| 411 | + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { |
|---|
| 412 | + hr_qp->sq.max_gs = roundup_pow_of_two(cnt); |
|---|
| 413 | + hr_qp->sge.sge_cnt = 0; |
|---|
| 414 | + |
|---|
| 415 | + return 0; |
|---|
| 416 | + } |
|---|
| 417 | + |
|---|
| 418 | + hr_qp->sq.max_gs = cnt; |
|---|
| 419 | + |
|---|
| 420 | + /* UD sqwqe's sge use extend sge */ |
|---|
| 421 | + if (hr_qp->ibqp.qp_type == IB_QPT_GSI || |
|---|
| 422 | + hr_qp->ibqp.qp_type == IB_QPT_UD) { |
|---|
| 423 | + cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs); |
|---|
| 424 | + } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) { |
|---|
| 425 | + cnt = roundup_pow_of_two(sq_wqe_cnt * |
|---|
| 426 | + (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE)); |
|---|
| 427 | + } else { |
|---|
| 428 | + cnt = 0; |
|---|
| 429 | + } |
|---|
| 430 | + |
|---|
| 431 | + hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; |
|---|
| 432 | + |
|---|
| 433 | + /* If the number of extended sge is not zero, they MUST use the |
|---|
| 434 | + * space of HNS_HW_PAGE_SIZE at least. |
|---|
| 435 | + */ |
|---|
| 436 | + hr_qp->sge.sge_cnt = cnt ? |
|---|
| 437 | + max(cnt, (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE) : 0; |
|---|
| 438 | + |
|---|
| 439 | + return 0; |
|---|
| 440 | +} |
|---|
| 441 | + |
|---|
| 442 | +static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, |
|---|
| 443 | + struct ib_qp_cap *cap, |
|---|
| 444 | + struct hns_roce_ib_create_qp *ucmd) |
|---|
| 343 | 445 | { |
|---|
| 344 | 446 | u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); |
|---|
| 345 | 447 | u8 max_sq_stride = ilog2(roundup_sq_stride); |
|---|
| 346 | | - u32 page_size; |
|---|
| 347 | | - u32 max_cnt; |
|---|
| 348 | 448 | |
|---|
| 349 | 449 | /* Sanity check SQ size before proceeding */ |
|---|
| 350 | | - if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || |
|---|
| 351 | | - ucmd->log_sq_stride > max_sq_stride || |
|---|
| 352 | | - ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { |
|---|
| 353 | | - dev_err(hr_dev->dev, "check SQ size error!\n"); |
|---|
| 450 | + if (ucmd->log_sq_stride > max_sq_stride || |
|---|
| 451 | + ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { |
|---|
| 452 | + ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); |
|---|
| 354 | 453 | return -EINVAL; |
|---|
| 355 | 454 | } |
|---|
| 356 | 455 | |
|---|
| 357 | 456 | if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { |
|---|
| 358 | | - dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n", |
|---|
| 359 | | - cap->max_send_sge); |
|---|
| 457 | + ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", |
|---|
| 458 | + cap->max_send_sge); |
|---|
| 360 | 459 | return -EINVAL; |
|---|
| 361 | | - } |
|---|
| 362 | | - |
|---|
| 363 | | - hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; |
|---|
| 364 | | - hr_qp->sq.wqe_shift = ucmd->log_sq_stride; |
|---|
| 365 | | - |
|---|
| 366 | | - max_cnt = max(1U, cap->max_send_sge); |
|---|
| 367 | | - if (hr_dev->caps.max_sq_sg <= 2) |
|---|
| 368 | | - hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); |
|---|
| 369 | | - else |
|---|
| 370 | | - hr_qp->sq.max_gs = max_cnt; |
|---|
| 371 | | - |
|---|
| 372 | | - if (hr_qp->sq.max_gs > 2) |
|---|
| 373 | | - hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * |
|---|
| 374 | | - (hr_qp->sq.max_gs - 2)); |
|---|
| 375 | | - |
|---|
| 376 | | - if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) { |
|---|
| 377 | | - if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { |
|---|
| 378 | | - dev_err(hr_dev->dev, |
|---|
| 379 | | - "The extended sge cnt error! sge_cnt=%d\n", |
|---|
| 380 | | - hr_qp->sge.sge_cnt); |
|---|
| 381 | | - return -EINVAL; |
|---|
| 382 | | - } |
|---|
| 383 | | - } |
|---|
| 384 | | - |
|---|
| 385 | | - hr_qp->sge.sge_shift = 4; |
|---|
| 386 | | - |
|---|
| 387 | | - /* Get buf size, SQ and RQ are aligned to page_szie */ |
|---|
| 388 | | - if (hr_dev->caps.max_sq_sg <= 2) { |
|---|
| 389 | | - hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << |
|---|
| 390 | | - hr_qp->rq.wqe_shift), PAGE_SIZE) + |
|---|
| 391 | | - HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << |
|---|
| 392 | | - hr_qp->sq.wqe_shift), PAGE_SIZE); |
|---|
| 393 | | - |
|---|
| 394 | | - hr_qp->sq.offset = 0; |
|---|
| 395 | | - hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << |
|---|
| 396 | | - hr_qp->sq.wqe_shift), PAGE_SIZE); |
|---|
| 397 | | - } else { |
|---|
| 398 | | - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); |
|---|
| 399 | | - hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << |
|---|
| 400 | | - hr_qp->rq.wqe_shift), page_size) + |
|---|
| 401 | | - HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << |
|---|
| 402 | | - hr_qp->sge.sge_shift), page_size) + |
|---|
| 403 | | - HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << |
|---|
| 404 | | - hr_qp->sq.wqe_shift), page_size); |
|---|
| 405 | | - |
|---|
| 406 | | - hr_qp->sq.offset = 0; |
|---|
| 407 | | - if (hr_qp->sge.sge_cnt) { |
|---|
| 408 | | - hr_qp->sge.offset = HNS_ROCE_ALOGN_UP( |
|---|
| 409 | | - (hr_qp->sq.wqe_cnt << |
|---|
| 410 | | - hr_qp->sq.wqe_shift), |
|---|
| 411 | | - page_size); |
|---|
| 412 | | - hr_qp->rq.offset = hr_qp->sge.offset + |
|---|
| 413 | | - HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << |
|---|
| 414 | | - hr_qp->sge.sge_shift), |
|---|
| 415 | | - page_size); |
|---|
| 416 | | - } else { |
|---|
| 417 | | - hr_qp->rq.offset = HNS_ROCE_ALOGN_UP( |
|---|
| 418 | | - (hr_qp->sq.wqe_cnt << |
|---|
| 419 | | - hr_qp->sq.wqe_shift), |
|---|
| 420 | | - page_size); |
|---|
| 421 | | - } |
|---|
| 422 | 460 | } |
|---|
| 423 | 461 | |
|---|
| 424 | 462 | return 0; |
|---|
| 425 | 463 | } |
|---|
| 426 | 464 | |
|---|
| 427 | | -static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, |
|---|
| 428 | | - struct ib_qp_cap *cap, |
|---|
| 429 | | - struct hns_roce_qp *hr_qp) |
|---|
| 465 | +static int set_user_sq_size(struct hns_roce_dev *hr_dev, |
|---|
| 466 | + struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, |
|---|
| 467 | + struct hns_roce_ib_create_qp *ucmd) |
|---|
| 430 | 468 | { |
|---|
| 431 | | - struct device *dev = hr_dev->dev; |
|---|
| 432 | | - u32 page_size; |
|---|
| 433 | | - u32 max_cnt; |
|---|
| 434 | | - int size; |
|---|
| 469 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
|---|
| 470 | + u32 cnt = 0; |
|---|
| 471 | + int ret; |
|---|
| 435 | 472 | |
|---|
| 436 | | - if (cap->max_send_wr > hr_dev->caps.max_wqes || |
|---|
| 437 | | - cap->max_send_sge > hr_dev->caps.max_sq_sg || |
|---|
| 438 | | - cap->max_inline_data > hr_dev->caps.max_sq_inline) { |
|---|
| 439 | | - dev_err(dev, "SQ WR or sge or inline data error!\n"); |
|---|
| 473 | + if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || |
|---|
| 474 | + cnt > hr_dev->caps.max_wqes) |
|---|
| 475 | + return -EINVAL; |
|---|
| 476 | + |
|---|
| 477 | + ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); |
|---|
| 478 | + if (ret) { |
|---|
| 479 | + ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n", |
|---|
| 480 | + ret); |
|---|
| 481 | + return ret; |
|---|
| 482 | + } |
|---|
| 483 | + |
|---|
| 484 | + ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap); |
|---|
| 485 | + if (ret) |
|---|
| 486 | + return ret; |
|---|
| 487 | + |
|---|
| 488 | + hr_qp->sq.wqe_shift = ucmd->log_sq_stride; |
|---|
| 489 | + hr_qp->sq.wqe_cnt = cnt; |
|---|
| 490 | + |
|---|
| 491 | + return 0; |
|---|
| 492 | +} |
|---|
| 493 | + |
|---|
| 494 | +static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, |
|---|
| 495 | + struct hns_roce_qp *hr_qp, |
|---|
| 496 | + struct hns_roce_buf_attr *buf_attr) |
|---|
| 497 | +{ |
|---|
| 498 | + int buf_size; |
|---|
| 499 | + int idx = 0; |
|---|
| 500 | + |
|---|
| 501 | + hr_qp->buff_size = 0; |
|---|
| 502 | + |
|---|
| 503 | + /* SQ WQE */ |
|---|
| 504 | + hr_qp->sq.offset = 0; |
|---|
| 505 | + buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, |
|---|
| 506 | + hr_qp->sq.wqe_shift); |
|---|
| 507 | + if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { |
|---|
| 508 | + buf_attr->region[idx].size = buf_size; |
|---|
| 509 | + buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; |
|---|
| 510 | + idx++; |
|---|
| 511 | + hr_qp->buff_size += buf_size; |
|---|
| 512 | + } |
|---|
| 513 | + |
|---|
| 514 | + /* extend SGE WQE in SQ */ |
|---|
| 515 | + hr_qp->sge.offset = hr_qp->buff_size; |
|---|
| 516 | + buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, |
|---|
| 517 | + hr_qp->sge.sge_shift); |
|---|
| 518 | + if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { |
|---|
| 519 | + buf_attr->region[idx].size = buf_size; |
|---|
| 520 | + buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; |
|---|
| 521 | + idx++; |
|---|
| 522 | + hr_qp->buff_size += buf_size; |
|---|
| 523 | + } |
|---|
| 524 | + |
|---|
| 525 | + /* RQ WQE */ |
|---|
| 526 | + hr_qp->rq.offset = hr_qp->buff_size; |
|---|
| 527 | + buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, |
|---|
| 528 | + hr_qp->rq.wqe_shift); |
|---|
| 529 | + if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { |
|---|
| 530 | + buf_attr->region[idx].size = buf_size; |
|---|
| 531 | + buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; |
|---|
| 532 | + idx++; |
|---|
| 533 | + hr_qp->buff_size += buf_size; |
|---|
| 534 | + } |
|---|
| 535 | + |
|---|
| 536 | + if (hr_qp->buff_size < 1) |
|---|
| 537 | + return -EINVAL; |
|---|
| 538 | + |
|---|
| 539 | + buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; |
|---|
| 540 | + buf_attr->fixed_page = true; |
|---|
| 541 | + buf_attr->region_count = idx; |
|---|
| 542 | + |
|---|
| 543 | + return 0; |
|---|
| 544 | +} |
|---|
| 545 | + |
|---|
| 546 | +static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, |
|---|
| 547 | + struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) |
|---|
| 548 | +{ |
|---|
| 549 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
|---|
| 550 | + u32 cnt; |
|---|
| 551 | + int ret; |
|---|
| 552 | + |
|---|
| 553 | + if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || |
|---|
| 554 | + cap->max_send_sge > hr_dev->caps.max_sq_sg) { |
|---|
| 555 | + ibdev_err(ibdev, |
|---|
| 556 | + "failed to check SQ WR or SGE num, ret = %d.\n", |
|---|
| 557 | + -EINVAL); |
|---|
| 558 | + return -EINVAL; |
|---|
| 559 | + } |
|---|
| 560 | + |
|---|
| 561 | + cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); |
|---|
| 562 | + if (cnt > hr_dev->caps.max_wqes) { |
|---|
| 563 | + ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n", |
|---|
| 564 | + cnt); |
|---|
| 440 | 565 | return -EINVAL; |
|---|
| 441 | 566 | } |
|---|
| 442 | 567 | |
|---|
| 443 | 568 | hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); |
|---|
| 444 | | - hr_qp->sq_max_wqes_per_wr = 1; |
|---|
| 445 | | - hr_qp->sq_spare_wqes = 0; |
|---|
| 569 | + hr_qp->sq.wqe_cnt = cnt; |
|---|
| 446 | 570 | |
|---|
| 447 | | - if (hr_dev->caps.min_wqes) |
|---|
| 448 | | - max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); |
|---|
| 449 | | - else |
|---|
| 450 | | - max_cnt = cap->max_send_wr; |
|---|
| 571 | + ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap); |
|---|
| 572 | + if (ret) |
|---|
| 573 | + return ret; |
|---|
| 451 | 574 | |
|---|
| 452 | | - hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); |
|---|
| 453 | | - if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { |
|---|
| 454 | | - dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n"); |
|---|
| 455 | | - return -EINVAL; |
|---|
| 456 | | - } |
|---|
| 457 | | - |
|---|
| 458 | | - /* Get data_seg numbers */ |
|---|
| 459 | | - max_cnt = max(1U, cap->max_send_sge); |
|---|
| 460 | | - if (hr_dev->caps.max_sq_sg <= 2) |
|---|
| 461 | | - hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); |
|---|
| 462 | | - else |
|---|
| 463 | | - hr_qp->sq.max_gs = max_cnt; |
|---|
| 464 | | - |
|---|
| 465 | | - if (hr_qp->sq.max_gs > 2) { |
|---|
| 466 | | - hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * |
|---|
| 467 | | - (hr_qp->sq.max_gs - 2)); |
|---|
| 468 | | - hr_qp->sge.sge_shift = 4; |
|---|
| 469 | | - } |
|---|
| 470 | | - |
|---|
| 471 | | - /* ud sqwqe's sge use extend sge */ |
|---|
| 472 | | - if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) { |
|---|
| 473 | | - hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * |
|---|
| 474 | | - hr_qp->sq.max_gs); |
|---|
| 475 | | - hr_qp->sge.sge_shift = 4; |
|---|
| 476 | | - } |
|---|
| 477 | | - |
|---|
| 478 | | - if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { |
|---|
| 479 | | - if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { |
|---|
| 480 | | - dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n", |
|---|
| 481 | | - hr_qp->sge.sge_cnt); |
|---|
| 482 | | - return -EINVAL; |
|---|
| 483 | | - } |
|---|
| 484 | | - } |
|---|
| 485 | | - |
|---|
| 486 | | - /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ |
|---|
| 487 | | - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); |
|---|
| 488 | | - hr_qp->sq.offset = 0; |
|---|
| 489 | | - size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, |
|---|
| 490 | | - page_size); |
|---|
| 491 | | - |
|---|
| 492 | | - if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { |
|---|
| 493 | | - hr_qp->sge.offset = size; |
|---|
| 494 | | - size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt << |
|---|
| 495 | | - hr_qp->sge.sge_shift, page_size); |
|---|
| 496 | | - } |
|---|
| 497 | | - |
|---|
| 498 | | - hr_qp->rq.offset = size; |
|---|
| 499 | | - size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), |
|---|
| 500 | | - page_size); |
|---|
| 501 | | - hr_qp->buff_size = size; |
|---|
| 502 | | - |
|---|
| 503 | | - /* Get wr and sge number which send */ |
|---|
| 504 | | - cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; |
|---|
| 575 | + /* sync the parameters of kernel QP to user's configuration */ |
|---|
| 576 | + cap->max_send_wr = cnt; |
|---|
| 505 | 577 | cap->max_send_sge = hr_qp->sq.max_gs; |
|---|
| 506 | | - |
|---|
| 507 | | - /* We don't support inline sends for kernel QPs (yet) */ |
|---|
| 508 | | - cap->max_inline_data = 0; |
|---|
| 509 | 578 | |
|---|
| 510 | 579 | return 0; |
|---|
| 511 | 580 | } |
|---|
| .. | .. |
|---|
| 528 | 597 | return 1; |
|---|
| 529 | 598 | } |
|---|
| 530 | 599 | |
|---|
| 531 | | -static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, |
|---|
| 532 | | - struct ib_pd *ib_pd, |
|---|
| 533 | | - struct ib_qp_init_attr *init_attr, |
|---|
| 534 | | - struct ib_udata *udata, unsigned long sqpn, |
|---|
| 535 | | - struct hns_roce_qp *hr_qp) |
|---|
| 600 | +static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp, |
|---|
| 601 | + struct ib_qp_init_attr *init_attr) |
|---|
| 536 | 602 | { |
|---|
| 537 | | - struct device *dev = hr_dev->dev; |
|---|
| 538 | | - struct hns_roce_ib_create_qp ucmd; |
|---|
| 539 | | - struct hns_roce_ib_create_qp_resp resp = {}; |
|---|
| 540 | | - unsigned long qpn = 0; |
|---|
| 541 | | - int ret = 0; |
|---|
| 542 | | - u32 page_shift; |
|---|
| 543 | | - u32 npages; |
|---|
| 603 | + u32 max_recv_sge = init_attr->cap.max_recv_sge; |
|---|
| 604 | + u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt; |
|---|
| 605 | + struct hns_roce_rinl_wqe *wqe_list; |
|---|
| 544 | 606 | int i; |
|---|
| 545 | 607 | |
|---|
| 546 | | - mutex_init(&hr_qp->mutex); |
|---|
| 547 | | - spin_lock_init(&hr_qp->sq.lock); |
|---|
| 548 | | - spin_lock_init(&hr_qp->rq.lock); |
|---|
| 608 | + /* allocate recv inline buf */ |
|---|
| 609 | + wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe), |
|---|
| 610 | + GFP_KERNEL); |
|---|
| 549 | 611 | |
|---|
| 550 | | - hr_qp->state = IB_QPS_RESET; |
|---|
| 612 | + if (!wqe_list) |
|---|
| 613 | + goto err; |
|---|
| 551 | 614 | |
|---|
| 552 | | - hr_qp->ibqp.qp_type = init_attr->qp_type; |
|---|
| 615 | + /* Allocate a continuous buffer for all inline sge we need */ |
|---|
| 616 | + wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge * |
|---|
| 617 | + sizeof(struct hns_roce_rinl_sge)), |
|---|
| 618 | + GFP_KERNEL); |
|---|
| 619 | + if (!wqe_list[0].sg_list) |
|---|
| 620 | + goto err_wqe_list; |
|---|
| 553 | 621 | |
|---|
| 554 | | - if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
|---|
| 555 | | - hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR); |
|---|
| 556 | | - else |
|---|
| 557 | | - hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); |
|---|
| 622 | + /* Assign buffers of sg_list to each inline wqe */ |
|---|
| 623 | + for (i = 1; i < wqe_cnt; i++) |
|---|
| 624 | + wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge]; |
|---|
| 558 | 625 | |
|---|
| 559 | | - ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, |
|---|
| 560 | | - !!init_attr->srq, hr_qp); |
|---|
| 561 | | - if (ret) { |
|---|
| 562 | | - dev_err(dev, "hns_roce_set_rq_size failed\n"); |
|---|
| 563 | | - goto err_out; |
|---|
| 564 | | - } |
|---|
| 626 | + hr_qp->rq_inl_buf.wqe_list = wqe_list; |
|---|
| 565 | 627 | |
|---|
| 566 | | - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { |
|---|
| 567 | | - /* allocate recv inline buf */ |
|---|
| 568 | | - hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, |
|---|
| 569 | | - sizeof(struct hns_roce_rinl_wqe), |
|---|
| 570 | | - GFP_KERNEL); |
|---|
| 571 | | - if (!hr_qp->rq_inl_buf.wqe_list) { |
|---|
| 572 | | - ret = -ENOMEM; |
|---|
| 573 | | - goto err_out; |
|---|
| 574 | | - } |
|---|
| 628 | + return 0; |
|---|
| 575 | 629 | |
|---|
| 576 | | - hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; |
|---|
| 630 | +err_wqe_list: |
|---|
| 631 | + kfree(wqe_list); |
|---|
| 577 | 632 | |
|---|
| 578 | | - /* Firstly, allocate a list of sge space buffer */ |
|---|
| 579 | | - hr_qp->rq_inl_buf.wqe_list[0].sg_list = |
|---|
| 580 | | - kcalloc(hr_qp->rq_inl_buf.wqe_cnt, |
|---|
| 581 | | - init_attr->cap.max_recv_sge * |
|---|
| 582 | | - sizeof(struct hns_roce_rinl_sge), |
|---|
| 583 | | - GFP_KERNEL); |
|---|
| 584 | | - if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { |
|---|
| 585 | | - ret = -ENOMEM; |
|---|
| 586 | | - goto err_wqe_list; |
|---|
| 587 | | - } |
|---|
| 633 | +err: |
|---|
| 634 | + return -ENOMEM; |
|---|
| 635 | +} |
|---|
| 588 | 636 | |
|---|
| 589 | | - for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) |
|---|
| 590 | | - /* Secondly, reallocate the buffer */ |
|---|
| 591 | | - hr_qp->rq_inl_buf.wqe_list[i].sg_list = |
|---|
| 592 | | - &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * |
|---|
| 593 | | - init_attr->cap.max_recv_sge]; |
|---|
| 594 | | - } |
|---|
| 637 | +static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) |
|---|
| 638 | +{ |
|---|
| 639 | + if (hr_qp->rq_inl_buf.wqe_list) |
|---|
| 640 | + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); |
|---|
| 641 | + kfree(hr_qp->rq_inl_buf.wqe_list); |
|---|
| 642 | +} |
|---|
| 595 | 643 | |
|---|
| 596 | | - if (ib_pd->uobject) { |
|---|
| 597 | | - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { |
|---|
| 598 | | - dev_err(dev, "ib_copy_from_udata error for create qp\n"); |
|---|
| 599 | | - ret = -EFAULT; |
|---|
| 600 | | - goto err_rq_sge_list; |
|---|
| 601 | | - } |
|---|
| 644 | +static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
|---|
| 645 | + struct ib_qp_init_attr *init_attr, |
|---|
| 646 | + struct ib_udata *udata, unsigned long addr) |
|---|
| 647 | +{ |
|---|
| 648 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
|---|
| 649 | + struct hns_roce_buf_attr buf_attr = {}; |
|---|
| 650 | + int ret; |
|---|
| 602 | 651 | |
|---|
| 603 | | - ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, |
|---|
| 604 | | - &ucmd); |
|---|
| 652 | + if (!udata && hr_qp->rq_inl_buf.wqe_cnt) { |
|---|
| 653 | + ret = alloc_rq_inline_buf(hr_qp, init_attr); |
|---|
| 605 | 654 | if (ret) { |
|---|
| 606 | | - dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); |
|---|
| 607 | | - goto err_rq_sge_list; |
|---|
| 608 | | - } |
|---|
| 609 | | - |
|---|
| 610 | | - hr_qp->umem = ib_umem_get(ib_pd->uobject->context, |
|---|
| 611 | | - ucmd.buf_addr, hr_qp->buff_size, 0, |
|---|
| 612 | | - 0); |
|---|
| 613 | | - if (IS_ERR(hr_qp->umem)) { |
|---|
| 614 | | - dev_err(dev, "ib_umem_get error for create qp\n"); |
|---|
| 615 | | - ret = PTR_ERR(hr_qp->umem); |
|---|
| 616 | | - goto err_rq_sge_list; |
|---|
| 617 | | - } |
|---|
| 618 | | - |
|---|
| 619 | | - hr_qp->mtt.mtt_type = MTT_TYPE_WQE; |
|---|
| 620 | | - if (hr_dev->caps.mtt_buf_pg_sz) { |
|---|
| 621 | | - npages = (ib_umem_page_count(hr_qp->umem) + |
|---|
| 622 | | - (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) / |
|---|
| 623 | | - (1 << hr_dev->caps.mtt_buf_pg_sz); |
|---|
| 624 | | - page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; |
|---|
| 625 | | - ret = hns_roce_mtt_init(hr_dev, npages, |
|---|
| 626 | | - page_shift, |
|---|
| 627 | | - &hr_qp->mtt); |
|---|
| 628 | | - } else { |
|---|
| 629 | | - ret = hns_roce_mtt_init(hr_dev, |
|---|
| 630 | | - ib_umem_page_count(hr_qp->umem), |
|---|
| 631 | | - hr_qp->umem->page_shift, |
|---|
| 632 | | - &hr_qp->mtt); |
|---|
| 633 | | - } |
|---|
| 634 | | - if (ret) { |
|---|
| 635 | | - dev_err(dev, "hns_roce_mtt_init error for create qp\n"); |
|---|
| 636 | | - goto err_buf; |
|---|
| 637 | | - } |
|---|
| 638 | | - |
|---|
| 639 | | - ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt, |
|---|
| 640 | | - hr_qp->umem); |
|---|
| 641 | | - if (ret) { |
|---|
| 642 | | - dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n"); |
|---|
| 643 | | - goto err_mtt; |
|---|
| 644 | | - } |
|---|
| 645 | | - |
|---|
| 646 | | - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && |
|---|
| 647 | | - (udata->inlen >= sizeof(ucmd)) && |
|---|
| 648 | | - (udata->outlen >= sizeof(resp)) && |
|---|
| 649 | | - hns_roce_qp_has_sq(init_attr)) { |
|---|
| 650 | | - ret = hns_roce_db_map_user( |
|---|
| 651 | | - to_hr_ucontext(ib_pd->uobject->context), |
|---|
| 652 | | - ucmd.sdb_addr, &hr_qp->sdb); |
|---|
| 653 | | - if (ret) { |
|---|
| 654 | | - dev_err(dev, "sq record doorbell map failed!\n"); |
|---|
| 655 | | - goto err_mtt; |
|---|
| 656 | | - } |
|---|
| 657 | | - |
|---|
| 658 | | - /* indicate kernel supports sq record db */ |
|---|
| 659 | | - resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB; |
|---|
| 660 | | - hr_qp->sdb_en = 1; |
|---|
| 661 | | - } |
|---|
| 662 | | - |
|---|
| 663 | | - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && |
|---|
| 664 | | - (udata->outlen >= sizeof(resp)) && |
|---|
| 665 | | - hns_roce_qp_has_rq(init_attr)) { |
|---|
| 666 | | - ret = hns_roce_db_map_user( |
|---|
| 667 | | - to_hr_ucontext(ib_pd->uobject->context), |
|---|
| 668 | | - ucmd.db_addr, &hr_qp->rdb); |
|---|
| 669 | | - if (ret) { |
|---|
| 670 | | - dev_err(dev, "rq record doorbell map failed!\n"); |
|---|
| 671 | | - goto err_sq_dbmap; |
|---|
| 672 | | - } |
|---|
| 655 | + ibdev_err(ibdev, |
|---|
| 656 | + "failed to alloc inline buf, ret = %d.\n", |
|---|
| 657 | + ret); |
|---|
| 658 | + return ret; |
|---|
| 673 | 659 | } |
|---|
| 674 | 660 | } else { |
|---|
| 675 | | - if (init_attr->create_flags & |
|---|
| 676 | | - IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { |
|---|
| 677 | | - dev_err(dev, "init_attr->create_flags error!\n"); |
|---|
| 678 | | - ret = -EINVAL; |
|---|
| 679 | | - goto err_rq_sge_list; |
|---|
| 661 | + hr_qp->rq_inl_buf.wqe_list = NULL; |
|---|
| 662 | + } |
|---|
| 663 | + |
|---|
| 664 | + ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); |
|---|
| 665 | + if (ret) { |
|---|
| 666 | + ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); |
|---|
| 667 | + goto err_inline; |
|---|
| 668 | + } |
|---|
| 669 | + ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, |
|---|
| 670 | + HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, |
|---|
| 671 | + udata, addr); |
|---|
| 672 | + if (ret) { |
|---|
| 673 | + ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); |
|---|
| 674 | + goto err_inline; |
|---|
| 675 | + } |
|---|
| 676 | + |
|---|
| 677 | + return 0; |
|---|
| 678 | +err_inline: |
|---|
| 679 | + free_rq_inline_buf(hr_qp); |
|---|
| 680 | + |
|---|
| 681 | + return ret; |
|---|
| 682 | +} |
|---|
| 683 | + |
|---|
| 684 | +static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
|---|
| 685 | +{ |
|---|
| 686 | + hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); |
|---|
| 687 | + free_rq_inline_buf(hr_qp); |
|---|
| 688 | +} |
|---|
| 689 | + |
|---|
| 690 | +static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, |
|---|
| 691 | + struct ib_qp_init_attr *init_attr, |
|---|
| 692 | + struct ib_udata *udata, |
|---|
| 693 | + struct hns_roce_ib_create_qp_resp *resp, |
|---|
| 694 | + struct hns_roce_ib_create_qp *ucmd) |
|---|
| 695 | +{ |
|---|
| 696 | + return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && |
|---|
| 697 | + udata->outlen >= offsetofend(typeof(*resp), cap_flags) && |
|---|
| 698 | + hns_roce_qp_has_sq(init_attr) && |
|---|
| 699 | + udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); |
|---|
| 700 | +} |
|---|
| 701 | + |
|---|
| 702 | +static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, |
|---|
| 703 | + struct ib_qp_init_attr *init_attr, |
|---|
| 704 | + struct ib_udata *udata, |
|---|
| 705 | + struct hns_roce_ib_create_qp_resp *resp) |
|---|
| 706 | +{ |
|---|
| 707 | + return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && |
|---|
| 708 | + udata->outlen >= offsetofend(typeof(*resp), cap_flags) && |
|---|
| 709 | + hns_roce_qp_has_rq(init_attr)); |
|---|
| 710 | +} |
|---|
| 711 | + |
|---|
| 712 | +static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, |
|---|
| 713 | + struct ib_qp_init_attr *init_attr) |
|---|
| 714 | +{ |
|---|
| 715 | + return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && |
|---|
| 716 | + hns_roce_qp_has_rq(init_attr)); |
|---|
| 717 | +} |
|---|
| 718 | + |
|---|
| 719 | +static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
|---|
| 720 | + struct ib_qp_init_attr *init_attr, |
|---|
| 721 | + struct ib_udata *udata, |
|---|
| 722 | + struct hns_roce_ib_create_qp *ucmd, |
|---|
| 723 | + struct hns_roce_ib_create_qp_resp *resp) |
|---|
| 724 | +{ |
|---|
| 725 | + struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( |
|---|
| 726 | + udata, struct hns_roce_ucontext, ibucontext); |
|---|
| 727 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
|---|
| 728 | + int ret; |
|---|
| 729 | + |
|---|
| 730 | + if (udata) { |
|---|
| 731 | + if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { |
|---|
| 732 | + ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr, |
|---|
| 733 | + &hr_qp->sdb); |
|---|
| 734 | + if (ret) { |
|---|
| 735 | + ibdev_err(ibdev, |
|---|
| 736 | + "failed to map user SQ doorbell, ret = %d.\n", |
|---|
| 737 | + ret); |
|---|
| 738 | + goto err_out; |
|---|
| 739 | + } |
|---|
| 740 | + hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; |
|---|
| 680 | 741 | } |
|---|
| 681 | 742 | |
|---|
| 682 | | - if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { |
|---|
| 683 | | - dev_err(dev, "init_attr->create_flags error!\n"); |
|---|
| 684 | | - ret = -EINVAL; |
|---|
| 685 | | - goto err_rq_sge_list; |
|---|
| 743 | + if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { |
|---|
| 744 | + ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr, |
|---|
| 745 | + &hr_qp->rdb); |
|---|
| 746 | + if (ret) { |
|---|
| 747 | + ibdev_err(ibdev, |
|---|
| 748 | + "failed to map user RQ doorbell, ret = %d.\n", |
|---|
| 749 | + ret); |
|---|
| 750 | + goto err_sdb; |
|---|
| 751 | + } |
|---|
| 752 | + hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; |
|---|
| 686 | 753 | } |
|---|
| 687 | | - |
|---|
| 688 | | - /* Set SQ size */ |
|---|
| 689 | | - ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, |
|---|
| 690 | | - hr_qp); |
|---|
| 691 | | - if (ret) { |
|---|
| 692 | | - dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); |
|---|
| 693 | | - goto err_rq_sge_list; |
|---|
| 694 | | - } |
|---|
| 695 | | - |
|---|
| 754 | + } else { |
|---|
| 696 | 755 | /* QP doorbell register address */ |
|---|
| 697 | 756 | hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + |
|---|
| 698 | 757 | DB_REG_OFFSET * hr_dev->priv_uar.index; |
|---|
| 699 | 758 | hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + |
|---|
| 700 | 759 | DB_REG_OFFSET * hr_dev->priv_uar.index; |
|---|
| 701 | 760 | |
|---|
| 702 | | - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && |
|---|
| 703 | | - hns_roce_qp_has_rq(init_attr)) { |
|---|
| 761 | + if (kernel_qp_has_rdb(hr_dev, init_attr)) { |
|---|
| 704 | 762 | ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); |
|---|
| 705 | 763 | if (ret) { |
|---|
| 706 | | - dev_err(dev, "rq record doorbell alloc failed!\n"); |
|---|
| 707 | | - goto err_rq_sge_list; |
|---|
| 764 | + ibdev_err(ibdev, |
|---|
| 765 | + "failed to alloc kernel RQ doorbell, ret = %d.\n", |
|---|
| 766 | + ret); |
|---|
| 767 | + goto err_out; |
|---|
| 708 | 768 | } |
|---|
| 709 | 769 | *hr_qp->rdb.db_record = 0; |
|---|
| 710 | | - hr_qp->rdb_en = 1; |
|---|
| 711 | | - } |
|---|
| 712 | | - |
|---|
| 713 | | - /* Allocate QP buf */ |
|---|
| 714 | | - page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; |
|---|
| 715 | | - if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, |
|---|
| 716 | | - (1 << page_shift) * 2, |
|---|
| 717 | | - &hr_qp->hr_buf, page_shift)) { |
|---|
| 718 | | - dev_err(dev, "hns_roce_buf_alloc error!\n"); |
|---|
| 719 | | - ret = -ENOMEM; |
|---|
| 720 | | - goto err_db; |
|---|
| 721 | | - } |
|---|
| 722 | | - |
|---|
| 723 | | - hr_qp->mtt.mtt_type = MTT_TYPE_WQE; |
|---|
| 724 | | - /* Write MTT */ |
|---|
| 725 | | - ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages, |
|---|
| 726 | | - hr_qp->hr_buf.page_shift, &hr_qp->mtt); |
|---|
| 727 | | - if (ret) { |
|---|
| 728 | | - dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n"); |
|---|
| 729 | | - goto err_buf; |
|---|
| 730 | | - } |
|---|
| 731 | | - |
|---|
| 732 | | - ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt, |
|---|
| 733 | | - &hr_qp->hr_buf); |
|---|
| 734 | | - if (ret) { |
|---|
| 735 | | - dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n"); |
|---|
| 736 | | - goto err_mtt; |
|---|
| 737 | | - } |
|---|
| 738 | | - |
|---|
| 739 | | - hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64), |
|---|
| 740 | | - GFP_KERNEL); |
|---|
| 741 | | - hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64), |
|---|
| 742 | | - GFP_KERNEL); |
|---|
| 743 | | - if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { |
|---|
| 744 | | - ret = -ENOMEM; |
|---|
| 745 | | - goto err_wrid; |
|---|
| 770 | + hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; |
|---|
| 746 | 771 | } |
|---|
| 747 | 772 | } |
|---|
| 748 | 773 | |
|---|
| 749 | | - if (sqpn) { |
|---|
| 750 | | - qpn = sqpn; |
|---|
| 774 | + return 0; |
|---|
| 775 | +err_sdb: |
|---|
| 776 | + if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) |
|---|
| 777 | + hns_roce_db_unmap_user(uctx, &hr_qp->sdb); |
|---|
| 778 | +err_out: |
|---|
| 779 | + return ret; |
|---|
| 780 | +} |
|---|
| 781 | + |
|---|
| 782 | +static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
|---|
| 783 | + struct ib_udata *udata) |
|---|
| 784 | +{ |
|---|
| 785 | + struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( |
|---|
| 786 | + udata, struct hns_roce_ucontext, ibucontext); |
|---|
| 787 | + |
|---|
| 788 | + if (udata) { |
|---|
| 789 | + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
|---|
| 790 | + hns_roce_db_unmap_user(uctx, &hr_qp->rdb); |
|---|
| 791 | + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) |
|---|
| 792 | + hns_roce_db_unmap_user(uctx, &hr_qp->sdb); |
|---|
| 751 | 793 | } else { |
|---|
| 752 | | - /* Get QPN */ |
|---|
| 753 | | - ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn); |
|---|
| 754 | | - if (ret) { |
|---|
| 755 | | - dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n"); |
|---|
| 756 | | - goto err_wrid; |
|---|
| 794 | + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
|---|
| 795 | + hns_roce_free_db(hr_dev, &hr_qp->rdb); |
|---|
| 796 | + } |
|---|
| 797 | +} |
|---|
| 798 | + |
|---|
| 799 | +static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, |
|---|
| 800 | + struct hns_roce_qp *hr_qp) |
|---|
| 801 | +{ |
|---|
| 802 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
|---|
| 803 | + u64 *sq_wrid = NULL; |
|---|
| 804 | + u64 *rq_wrid = NULL; |
|---|
| 805 | + int ret; |
|---|
| 806 | + |
|---|
| 807 | + sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); |
|---|
| 808 | + if (ZERO_OR_NULL_PTR(sq_wrid)) { |
|---|
| 809 | + ibdev_err(ibdev, "failed to alloc SQ wrid.\n"); |
|---|
| 810 | + return -ENOMEM; |
|---|
| 811 | + } |
|---|
| 812 | + |
|---|
| 813 | + if (hr_qp->rq.wqe_cnt) { |
|---|
| 814 | + rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); |
|---|
| 815 | + if (ZERO_OR_NULL_PTR(rq_wrid)) { |
|---|
| 816 | + ibdev_err(ibdev, "failed to alloc RQ wrid.\n"); |
|---|
| 817 | + ret = -ENOMEM; |
|---|
| 818 | + goto err_sq; |
|---|
| 757 | 819 | } |
|---|
| 758 | 820 | } |
|---|
| 759 | 821 | |
|---|
| 760 | | - if (init_attr->qp_type == IB_QPT_GSI && |
|---|
| 761 | | - hr_dev->hw_rev == HNS_ROCE_HW_VER1) { |
|---|
| 762 | | - /* In v1 engine, GSI QP context in RoCE engine's register */ |
|---|
| 763 | | - ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); |
|---|
| 764 | | - if (ret) { |
|---|
| 765 | | - dev_err(dev, "hns_roce_qp_alloc failed!\n"); |
|---|
| 766 | | - goto err_qpn; |
|---|
| 767 | | - } |
|---|
| 768 | | - } else { |
|---|
| 769 | | - ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp); |
|---|
| 770 | | - if (ret) { |
|---|
| 771 | | - dev_err(dev, "hns_roce_qp_alloc failed!\n"); |
|---|
| 772 | | - goto err_qpn; |
|---|
| 773 | | - } |
|---|
| 774 | | - } |
|---|
| 822 | + hr_qp->sq.wrid = sq_wrid; |
|---|
| 823 | + hr_qp->rq.wrid = rq_wrid; |
|---|
| 824 | + return 0; |
|---|
| 825 | +err_sq: |
|---|
| 826 | + kfree(sq_wrid); |
|---|
| 775 | 827 | |
|---|
| 776 | | - if (sqpn) |
|---|
| 777 | | - hr_qp->doorbell_qpn = 1; |
|---|
| 828 | + return ret; |
|---|
| 829 | +} |
|---|
| 830 | + |
|---|
| 831 | +static void free_kernel_wrid(struct hns_roce_qp *hr_qp) |
|---|
| 832 | +{ |
|---|
| 833 | + kfree(hr_qp->rq.wrid); |
|---|
| 834 | + kfree(hr_qp->sq.wrid); |
|---|
| 835 | +} |
|---|
| 836 | + |
|---|
| 837 | +static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
|---|
| 838 | + struct ib_qp_init_attr *init_attr, |
|---|
| 839 | + struct ib_udata *udata, |
|---|
| 840 | + struct hns_roce_ib_create_qp *ucmd) |
|---|
| 841 | +{ |
|---|
| 842 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
|---|
| 843 | + int ret; |
|---|
| 844 | + |
|---|
| 845 | + hr_qp->ibqp.qp_type = init_attr->qp_type; |
|---|
| 846 | + |
|---|
| 847 | + if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline) |
|---|
| 848 | + init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline; |
|---|
| 849 | + |
|---|
| 850 | + hr_qp->max_inline_data = init_attr->cap.max_inline_data; |
|---|
| 851 | + |
|---|
| 852 | + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
|---|
| 853 | + hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; |
|---|
| 778 | 854 | else |
|---|
| 779 | | - hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); |
|---|
| 855 | + hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; |
|---|
| 780 | 856 | |
|---|
| 781 | | - if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) && |
|---|
| 782 | | - (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) { |
|---|
| 783 | | - |
|---|
| 784 | | - /* indicate kernel supports rq record db */ |
|---|
| 785 | | - resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; |
|---|
| 786 | | - ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
|---|
| 787 | | - if (ret) |
|---|
| 788 | | - goto err_qp; |
|---|
| 789 | | - |
|---|
| 790 | | - hr_qp->rdb_en = 1; |
|---|
| 857 | + ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, |
|---|
| 858 | + hns_roce_qp_has_rq(init_attr)); |
|---|
| 859 | + if (ret) { |
|---|
| 860 | + ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n", |
|---|
| 861 | + ret); |
|---|
| 862 | + return ret; |
|---|
| 791 | 863 | } |
|---|
| 864 | + |
|---|
| 865 | + if (udata) { |
|---|
| 866 | + ret = ib_copy_from_udata(ucmd, udata, |
|---|
| 867 | + min(udata->inlen, sizeof(*ucmd))); |
|---|
| 868 | + if (ret) { |
|---|
| 869 | + ibdev_err(ibdev, |
|---|
| 870 | + "failed to copy QP ucmd, ret = %d\n", ret); |
|---|
| 871 | + return ret; |
|---|
| 872 | + } |
|---|
| 873 | + |
|---|
| 874 | + ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); |
|---|
| 875 | + if (ret) |
|---|
| 876 | + ibdev_err(ibdev, |
|---|
| 877 | + "failed to set user SQ size, ret = %d.\n", |
|---|
| 878 | + ret); |
|---|
| 879 | + } else { |
|---|
| 880 | + if (init_attr->create_flags & |
|---|
| 881 | + IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { |
|---|
| 882 | + ibdev_err(ibdev, "Failed to check multicast loopback\n"); |
|---|
| 883 | + return -EINVAL; |
|---|
| 884 | + } |
|---|
| 885 | + |
|---|
| 886 | + if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { |
|---|
| 887 | + ibdev_err(ibdev, "Failed to check ipoib ud lso\n"); |
|---|
| 888 | + return -EINVAL; |
|---|
| 889 | + } |
|---|
| 890 | + |
|---|
| 891 | + ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); |
|---|
| 892 | + if (ret) |
|---|
| 893 | + ibdev_err(ibdev, |
|---|
| 894 | + "failed to set kernel SQ size, ret = %d.\n", |
|---|
| 895 | + ret); |
|---|
| 896 | + } |
|---|
| 897 | + |
|---|
| 898 | + return ret; |
|---|
| 899 | +} |
|---|
| 900 | + |
|---|
| 901 | +static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, |
|---|
| 902 | + struct ib_pd *ib_pd, |
|---|
| 903 | + struct ib_qp_init_attr *init_attr, |
|---|
| 904 | + struct ib_udata *udata, |
|---|
| 905 | + struct hns_roce_qp *hr_qp) |
|---|
| 906 | +{ |
|---|
| 907 | + struct hns_roce_ib_create_qp_resp resp = {}; |
|---|
| 908 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
|---|
| 909 | + struct hns_roce_ib_create_qp ucmd; |
|---|
| 910 | + int ret; |
|---|
| 911 | + |
|---|
| 912 | + mutex_init(&hr_qp->mutex); |
|---|
| 913 | + spin_lock_init(&hr_qp->sq.lock); |
|---|
| 914 | + spin_lock_init(&hr_qp->rq.lock); |
|---|
| 915 | + |
|---|
| 916 | + hr_qp->state = IB_QPS_RESET; |
|---|
| 917 | + hr_qp->flush_flag = 0; |
|---|
| 918 | + |
|---|
| 919 | + ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); |
|---|
| 920 | + if (ret) { |
|---|
| 921 | + ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret); |
|---|
| 922 | + return ret; |
|---|
| 923 | + } |
|---|
| 924 | + |
|---|
| 925 | + if (!udata) { |
|---|
| 926 | + ret = alloc_kernel_wrid(hr_dev, hr_qp); |
|---|
| 927 | + if (ret) { |
|---|
| 928 | + ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n", |
|---|
| 929 | + ret); |
|---|
| 930 | + return ret; |
|---|
| 931 | + } |
|---|
| 932 | + } |
|---|
| 933 | + |
|---|
| 934 | + ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); |
|---|
| 935 | + if (ret) { |
|---|
| 936 | + ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n", |
|---|
| 937 | + ret); |
|---|
| 938 | + goto err_wrid; |
|---|
| 939 | + } |
|---|
| 940 | + |
|---|
| 941 | + ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); |
|---|
| 942 | + if (ret) { |
|---|
| 943 | + ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret); |
|---|
| 944 | + goto err_db; |
|---|
| 945 | + } |
|---|
| 946 | + |
|---|
| 947 | + ret = alloc_qpn(hr_dev, hr_qp); |
|---|
| 948 | + if (ret) { |
|---|
| 949 | + ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); |
|---|
| 950 | + goto err_buf; |
|---|
| 951 | + } |
|---|
| 952 | + |
|---|
| 953 | + ret = alloc_qpc(hr_dev, hr_qp); |
|---|
| 954 | + if (ret) { |
|---|
| 955 | + ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n", |
|---|
| 956 | + ret); |
|---|
| 957 | + goto err_qpn; |
|---|
| 958 | + } |
|---|
| 959 | + |
|---|
| 960 | + ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); |
|---|
| 961 | + if (ret) { |
|---|
| 962 | + ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret); |
|---|
| 963 | + goto err_qpc; |
|---|
| 964 | + } |
|---|
| 965 | + |
|---|
| 966 | + if (udata) { |
|---|
| 967 | + resp.cap_flags = hr_qp->en_flags; |
|---|
| 968 | + ret = ib_copy_to_udata(udata, &resp, |
|---|
| 969 | + min(udata->outlen, sizeof(resp))); |
|---|
| 970 | + if (ret) { |
|---|
| 971 | + ibdev_err(ibdev, "copy qp resp failed!\n"); |
|---|
| 972 | + goto err_store; |
|---|
| 973 | + } |
|---|
| 974 | + } |
|---|
| 975 | + |
|---|
| 976 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { |
|---|
| 977 | + ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); |
|---|
| 978 | + if (ret) |
|---|
| 979 | + goto err_store; |
|---|
| 980 | + } |
|---|
| 981 | + |
|---|
| 982 | + hr_qp->ibqp.qp_num = hr_qp->qpn; |
|---|
| 792 | 983 | hr_qp->event = hns_roce_ib_qp_event; |
|---|
| 984 | + atomic_set(&hr_qp->refcount, 1); |
|---|
| 985 | + init_completion(&hr_qp->free); |
|---|
| 793 | 986 | |
|---|
| 794 | 987 | return 0; |
|---|
| 795 | 988 | |
|---|
| 796 | | -err_qp: |
|---|
| 797 | | - if (init_attr->qp_type == IB_QPT_GSI && |
|---|
| 798 | | - hr_dev->hw_rev == HNS_ROCE_HW_VER1) |
|---|
| 799 | | - hns_roce_qp_remove(hr_dev, hr_qp); |
|---|
| 800 | | - else |
|---|
| 801 | | - hns_roce_qp_free(hr_dev, hr_qp); |
|---|
| 802 | | - |
|---|
| 989 | +err_store: |
|---|
| 990 | + hns_roce_qp_remove(hr_dev, hr_qp); |
|---|
| 991 | +err_qpc: |
|---|
| 992 | + free_qpc(hr_dev, hr_qp); |
|---|
| 803 | 993 | err_qpn: |
|---|
| 804 | | - if (!sqpn) |
|---|
| 805 | | - hns_roce_release_range_qp(hr_dev, qpn, 1); |
|---|
| 806 | | - |
|---|
| 807 | | -err_wrid: |
|---|
| 808 | | - if (ib_pd->uobject) { |
|---|
| 809 | | - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && |
|---|
| 810 | | - (udata->outlen >= sizeof(resp)) && |
|---|
| 811 | | - hns_roce_qp_has_rq(init_attr)) |
|---|
| 812 | | - hns_roce_db_unmap_user( |
|---|
| 813 | | - to_hr_ucontext(ib_pd->uobject->context), |
|---|
| 814 | | - &hr_qp->rdb); |
|---|
| 815 | | - } else { |
|---|
| 816 | | - kfree(hr_qp->sq.wrid); |
|---|
| 817 | | - kfree(hr_qp->rq.wrid); |
|---|
| 818 | | - } |
|---|
| 819 | | - |
|---|
| 820 | | -err_sq_dbmap: |
|---|
| 821 | | - if (ib_pd->uobject) |
|---|
| 822 | | - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && |
|---|
| 823 | | - (udata->inlen >= sizeof(ucmd)) && |
|---|
| 824 | | - (udata->outlen >= sizeof(resp)) && |
|---|
| 825 | | - hns_roce_qp_has_sq(init_attr)) |
|---|
| 826 | | - hns_roce_db_unmap_user( |
|---|
| 827 | | - to_hr_ucontext(ib_pd->uobject->context), |
|---|
| 828 | | - &hr_qp->sdb); |
|---|
| 829 | | - |
|---|
| 830 | | -err_mtt: |
|---|
| 831 | | - hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); |
|---|
| 832 | | - |
|---|
| 994 | + free_qpn(hr_dev, hr_qp); |
|---|
| 833 | 995 | err_buf: |
|---|
| 834 | | - if (ib_pd->uobject) |
|---|
| 835 | | - ib_umem_release(hr_qp->umem); |
|---|
| 836 | | - else |
|---|
| 837 | | - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); |
|---|
| 838 | | - |
|---|
| 996 | + free_qp_buf(hr_dev, hr_qp); |
|---|
| 839 | 997 | err_db: |
|---|
| 840 | | - if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) && |
|---|
| 841 | | - (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) |
|---|
| 842 | | - hns_roce_free_db(hr_dev, &hr_qp->rdb); |
|---|
| 843 | | - |
|---|
| 844 | | -err_rq_sge_list: |
|---|
| 845 | | - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) |
|---|
| 846 | | - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); |
|---|
| 847 | | - |
|---|
| 848 | | -err_wqe_list: |
|---|
| 849 | | - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) |
|---|
| 850 | | - kfree(hr_qp->rq_inl_buf.wqe_list); |
|---|
| 851 | | - |
|---|
| 852 | | -err_out: |
|---|
| 998 | + free_qp_db(hr_dev, hr_qp, udata); |
|---|
| 999 | +err_wrid: |
|---|
| 1000 | + free_kernel_wrid(hr_qp); |
|---|
| 853 | 1001 | return ret; |
|---|
| 1002 | +} |
|---|
| 1003 | + |
|---|
| 1004 | +void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
|---|
| 1005 | + struct ib_udata *udata) |
|---|
| 1006 | +{ |
|---|
| 1007 | + if (atomic_dec_and_test(&hr_qp->refcount)) |
|---|
| 1008 | + complete(&hr_qp->free); |
|---|
| 1009 | + wait_for_completion(&hr_qp->free); |
|---|
| 1010 | + |
|---|
| 1011 | + free_qpc(hr_dev, hr_qp); |
|---|
| 1012 | + free_qpn(hr_dev, hr_qp); |
|---|
| 1013 | + free_qp_buf(hr_dev, hr_qp); |
|---|
| 1014 | + free_kernel_wrid(hr_qp); |
|---|
| 1015 | + free_qp_db(hr_dev, hr_qp, udata); |
|---|
| 1016 | + |
|---|
| 1017 | + kfree(hr_qp); |
|---|
| 854 | 1018 | } |
|---|
| 855 | 1019 | |
|---|
| 856 | 1020 | struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, |
|---|
| .. | .. |
|---|
| 858 | 1022 | struct ib_udata *udata) |
|---|
| 859 | 1023 | { |
|---|
| 860 | 1024 | struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); |
|---|
| 861 | | - struct device *dev = hr_dev->dev; |
|---|
| 862 | | - struct hns_roce_sqp *hr_sqp; |
|---|
| 1025 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
|---|
| 863 | 1026 | struct hns_roce_qp *hr_qp; |
|---|
| 864 | 1027 | int ret; |
|---|
| 865 | 1028 | |
|---|
| 866 | 1029 | switch (init_attr->qp_type) { |
|---|
| 867 | | - case IB_QPT_RC: { |
|---|
| 868 | | - hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); |
|---|
| 869 | | - if (!hr_qp) |
|---|
| 870 | | - return ERR_PTR(-ENOMEM); |
|---|
| 871 | | - |
|---|
| 872 | | - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, |
|---|
| 873 | | - hr_qp); |
|---|
| 874 | | - if (ret) { |
|---|
| 875 | | - dev_err(dev, "Create RC QP failed\n"); |
|---|
| 876 | | - kfree(hr_qp); |
|---|
| 877 | | - return ERR_PTR(ret); |
|---|
| 878 | | - } |
|---|
| 879 | | - |
|---|
| 880 | | - hr_qp->ibqp.qp_num = hr_qp->qpn; |
|---|
| 881 | | - |
|---|
| 1030 | + case IB_QPT_RC: |
|---|
| 1031 | + case IB_QPT_GSI: |
|---|
| 882 | 1032 | break; |
|---|
| 1033 | + default: |
|---|
| 1034 | + ibdev_err(ibdev, "not support QP type %d\n", |
|---|
| 1035 | + init_attr->qp_type); |
|---|
| 1036 | + return ERR_PTR(-EOPNOTSUPP); |
|---|
| 883 | 1037 | } |
|---|
| 884 | | - case IB_QPT_GSI: { |
|---|
| 885 | | - /* Userspace is not allowed to create special QPs: */ |
|---|
| 886 | | - if (pd->uobject) { |
|---|
| 887 | | - dev_err(dev, "not support usr space GSI\n"); |
|---|
| 888 | | - return ERR_PTR(-EINVAL); |
|---|
| 889 | | - } |
|---|
| 890 | 1038 | |
|---|
| 891 | | - hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL); |
|---|
| 892 | | - if (!hr_sqp) |
|---|
| 893 | | - return ERR_PTR(-ENOMEM); |
|---|
| 1039 | + hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); |
|---|
| 1040 | + if (!hr_qp) |
|---|
| 1041 | + return ERR_PTR(-ENOMEM); |
|---|
| 894 | 1042 | |
|---|
| 895 | | - hr_qp = &hr_sqp->hr_qp; |
|---|
| 1043 | + if (init_attr->qp_type == IB_QPT_GSI) { |
|---|
| 896 | 1044 | hr_qp->port = init_attr->port_num - 1; |
|---|
| 897 | 1045 | hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; |
|---|
| 898 | | - |
|---|
| 899 | | - /* when hw version is v1, the sqpn is allocated */ |
|---|
| 900 | | - if (hr_dev->caps.max_sq_sg <= 2) |
|---|
| 901 | | - hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + |
|---|
| 902 | | - hr_dev->iboe.phy_port[hr_qp->port]; |
|---|
| 903 | | - else |
|---|
| 904 | | - hr_qp->ibqp.qp_num = 1; |
|---|
| 905 | | - |
|---|
| 906 | | - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, |
|---|
| 907 | | - hr_qp->ibqp.qp_num, hr_qp); |
|---|
| 908 | | - if (ret) { |
|---|
| 909 | | - dev_err(dev, "Create GSI QP failed!\n"); |
|---|
| 910 | | - kfree(hr_sqp); |
|---|
| 911 | | - return ERR_PTR(ret); |
|---|
| 912 | | - } |
|---|
| 913 | | - |
|---|
| 914 | | - break; |
|---|
| 915 | | - } |
|---|
| 916 | | - default:{ |
|---|
| 917 | | - dev_err(dev, "not support QP type %d\n", init_attr->qp_type); |
|---|
| 918 | | - return ERR_PTR(-EINVAL); |
|---|
| 919 | | - } |
|---|
| 920 | 1046 | } |
|---|
| 921 | 1047 | |
|---|
| 1048 | + ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); |
|---|
| 1049 | + if (ret) { |
|---|
| 1050 | + ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n", |
|---|
| 1051 | + init_attr->qp_type, ret); |
|---|
| 1052 | + ibdev_err(ibdev, "Create GSI QP failed!\n"); |
|---|
| 1053 | + kfree(hr_qp); |
|---|
| 1054 | + return ERR_PTR(ret); |
|---|
| 1055 | + } |
|---|
| 922 | 1056 | return &hr_qp->ibqp; |
|---|
| 923 | 1057 | } |
|---|
| 924 | | -EXPORT_SYMBOL_GPL(hns_roce_create_qp); |
|---|
| 925 | 1058 | |
|---|
| 926 | 1059 | int to_hr_qp_type(int qp_type) |
|---|
| 927 | 1060 | { |
|---|
| .. | .. |
|---|
| 940 | 1073 | |
|---|
| 941 | 1074 | return transport_type; |
|---|
| 942 | 1075 | } |
|---|
| 943 | | -EXPORT_SYMBOL_GPL(to_hr_qp_type); |
|---|
| 1076 | + |
|---|
| 1077 | +static int check_mtu_validate(struct hns_roce_dev *hr_dev, |
|---|
| 1078 | + struct hns_roce_qp *hr_qp, |
|---|
| 1079 | + struct ib_qp_attr *attr, int attr_mask) |
|---|
| 1080 | +{ |
|---|
| 1081 | + enum ib_mtu active_mtu; |
|---|
| 1082 | + int p; |
|---|
| 1083 | + |
|---|
| 1084 | + p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; |
|---|
| 1085 | + active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); |
|---|
| 1086 | + |
|---|
| 1087 | + if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && |
|---|
| 1088 | + attr->path_mtu > hr_dev->caps.max_mtu) || |
|---|
| 1089 | + attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { |
|---|
| 1090 | + ibdev_err(&hr_dev->ib_dev, |
|---|
| 1091 | + "attr path_mtu(%d)invalid while modify qp", |
|---|
| 1092 | + attr->path_mtu); |
|---|
| 1093 | + return -EINVAL; |
|---|
| 1094 | + } |
|---|
| 1095 | + |
|---|
| 1096 | + return 0; |
|---|
| 1097 | +} |
|---|
| 1098 | + |
|---|
| 1099 | +static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
|---|
| 1100 | + int attr_mask) |
|---|
| 1101 | +{ |
|---|
| 1102 | + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
|---|
| 1103 | + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
|---|
| 1104 | + int p; |
|---|
| 1105 | + |
|---|
| 1106 | + if ((attr_mask & IB_QP_PORT) && |
|---|
| 1107 | + (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { |
|---|
| 1108 | + ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", |
|---|
| 1109 | + attr->port_num); |
|---|
| 1110 | + return -EINVAL; |
|---|
| 1111 | + } |
|---|
| 1112 | + |
|---|
| 1113 | + if (attr_mask & IB_QP_PKEY_INDEX) { |
|---|
| 1114 | + p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; |
|---|
| 1115 | + if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { |
|---|
| 1116 | + ibdev_err(&hr_dev->ib_dev, |
|---|
| 1117 | + "invalid attr, pkey_index = %u.\n", |
|---|
| 1118 | + attr->pkey_index); |
|---|
| 1119 | + return -EINVAL; |
|---|
| 1120 | + } |
|---|
| 1121 | + } |
|---|
| 1122 | + |
|---|
| 1123 | + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
|---|
| 1124 | + attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { |
|---|
| 1125 | + ibdev_err(&hr_dev->ib_dev, |
|---|
| 1126 | + "invalid attr, max_rd_atomic = %u.\n", |
|---|
| 1127 | + attr->max_rd_atomic); |
|---|
| 1128 | + return -EINVAL; |
|---|
| 1129 | + } |
|---|
| 1130 | + |
|---|
| 1131 | + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && |
|---|
| 1132 | + attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { |
|---|
| 1133 | + ibdev_err(&hr_dev->ib_dev, |
|---|
| 1134 | + "invalid attr, max_dest_rd_atomic = %u.\n", |
|---|
| 1135 | + attr->max_dest_rd_atomic); |
|---|
| 1136 | + return -EINVAL; |
|---|
| 1137 | + } |
|---|
| 1138 | + |
|---|
| 1139 | + if (attr_mask & IB_QP_PATH_MTU) |
|---|
| 1140 | + return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); |
|---|
| 1141 | + |
|---|
| 1142 | + return 0; |
|---|
| 1143 | +} |
|---|
| 944 | 1144 | |
|---|
| 945 | 1145 | int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
|---|
| 946 | 1146 | int attr_mask, struct ib_udata *udata) |
|---|
| .. | .. |
|---|
| 948 | 1148 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
|---|
| 949 | 1149 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
|---|
| 950 | 1150 | enum ib_qp_state cur_state, new_state; |
|---|
| 951 | | - struct device *dev = hr_dev->dev; |
|---|
| 952 | 1151 | int ret = -EINVAL; |
|---|
| 953 | | - int p; |
|---|
| 954 | | - enum ib_mtu active_mtu; |
|---|
| 955 | 1152 | |
|---|
| 956 | 1153 | mutex_lock(&hr_qp->mutex); |
|---|
| 957 | 1154 | |
|---|
| 958 | | - cur_state = attr_mask & IB_QP_CUR_STATE ? |
|---|
| 959 | | - attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; |
|---|
| 960 | | - new_state = attr_mask & IB_QP_STATE ? |
|---|
| 961 | | - attr->qp_state : cur_state; |
|---|
| 1155 | + if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) |
|---|
| 1156 | + goto out; |
|---|
| 1157 | + |
|---|
| 1158 | + cur_state = hr_qp->state; |
|---|
| 1159 | + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
|---|
| 962 | 1160 | |
|---|
| 963 | 1161 | if (ibqp->uobject && |
|---|
| 964 | 1162 | (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { |
|---|
| 965 | | - if (hr_qp->sdb_en == 1) { |
|---|
| 1163 | + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { |
|---|
| 966 | 1164 | hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); |
|---|
| 967 | | - hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); |
|---|
| 1165 | + |
|---|
| 1166 | + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
|---|
| 1167 | + hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); |
|---|
| 968 | 1168 | } else { |
|---|
| 969 | | - dev_warn(dev, "flush cqe is not supported in userspace!\n"); |
|---|
| 1169 | + ibdev_warn(&hr_dev->ib_dev, |
|---|
| 1170 | + "flush cqe is not supported in userspace!\n"); |
|---|
| 970 | 1171 | goto out; |
|---|
| 971 | 1172 | } |
|---|
| 972 | 1173 | } |
|---|
| 973 | 1174 | |
|---|
| 974 | | - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, |
|---|
| 975 | | - IB_LINK_LAYER_ETHERNET)) { |
|---|
| 976 | | - dev_err(dev, "ib_modify_qp_is_ok failed\n"); |
|---|
| 1175 | + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, |
|---|
| 1176 | + attr_mask)) { |
|---|
| 1177 | + ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); |
|---|
| 977 | 1178 | goto out; |
|---|
| 978 | 1179 | } |
|---|
| 979 | 1180 | |
|---|
| 980 | | - if ((attr_mask & IB_QP_PORT) && |
|---|
| 981 | | - (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { |
|---|
| 982 | | - dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", |
|---|
| 983 | | - attr->port_num); |
|---|
| 1181 | + ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); |
|---|
| 1182 | + if (ret) |
|---|
| 984 | 1183 | goto out; |
|---|
| 985 | | - } |
|---|
| 986 | | - |
|---|
| 987 | | - if (attr_mask & IB_QP_PKEY_INDEX) { |
|---|
| 988 | | - p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; |
|---|
| 989 | | - if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { |
|---|
| 990 | | - dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", |
|---|
| 991 | | - attr->pkey_index); |
|---|
| 992 | | - goto out; |
|---|
| 993 | | - } |
|---|
| 994 | | - } |
|---|
| 995 | | - |
|---|
| 996 | | - if (attr_mask & IB_QP_PATH_MTU) { |
|---|
| 997 | | - p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; |
|---|
| 998 | | - active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); |
|---|
| 999 | | - |
|---|
| 1000 | | - if ((hr_dev->caps.max_mtu == IB_MTU_4096 && |
|---|
| 1001 | | - attr->path_mtu > IB_MTU_4096) || |
|---|
| 1002 | | - (hr_dev->caps.max_mtu == IB_MTU_2048 && |
|---|
| 1003 | | - attr->path_mtu > IB_MTU_2048) || |
|---|
| 1004 | | - attr->path_mtu < IB_MTU_256 || |
|---|
| 1005 | | - attr->path_mtu > active_mtu) { |
|---|
| 1006 | | - dev_err(dev, "attr path_mtu(%d)invalid while modify qp", |
|---|
| 1007 | | - attr->path_mtu); |
|---|
| 1008 | | - goto out; |
|---|
| 1009 | | - } |
|---|
| 1010 | | - } |
|---|
| 1011 | | - |
|---|
| 1012 | | - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
|---|
| 1013 | | - attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { |
|---|
| 1014 | | - dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", |
|---|
| 1015 | | - attr->max_rd_atomic); |
|---|
| 1016 | | - goto out; |
|---|
| 1017 | | - } |
|---|
| 1018 | | - |
|---|
| 1019 | | - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && |
|---|
| 1020 | | - attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { |
|---|
| 1021 | | - dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", |
|---|
| 1022 | | - attr->max_dest_rd_atomic); |
|---|
| 1023 | | - goto out; |
|---|
| 1024 | | - } |
|---|
| 1025 | 1184 | |
|---|
| 1026 | 1185 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { |
|---|
| 1027 | | - if (hr_dev->caps.min_wqes) { |
|---|
| 1186 | + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { |
|---|
| 1028 | 1187 | ret = -EPERM; |
|---|
| 1029 | | - dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, |
|---|
| 1030 | | - new_state); |
|---|
| 1188 | + ibdev_err(&hr_dev->ib_dev, |
|---|
| 1189 | + "RST2RST state is not supported\n"); |
|---|
| 1031 | 1190 | } else { |
|---|
| 1032 | 1191 | ret = 0; |
|---|
| 1033 | 1192 | } |
|---|
| .. | .. |
|---|
| 1047 | 1206 | void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) |
|---|
| 1048 | 1207 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) |
|---|
| 1049 | 1208 | { |
|---|
| 1050 | | - if (send_cq == recv_cq) { |
|---|
| 1209 | + if (unlikely(send_cq == NULL && recv_cq == NULL)) { |
|---|
| 1210 | + __acquire(&send_cq->lock); |
|---|
| 1211 | + __acquire(&recv_cq->lock); |
|---|
| 1212 | + } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { |
|---|
| 1213 | + spin_lock_irq(&send_cq->lock); |
|---|
| 1214 | + __acquire(&recv_cq->lock); |
|---|
| 1215 | + } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { |
|---|
| 1216 | + spin_lock_irq(&recv_cq->lock); |
|---|
| 1217 | + __acquire(&send_cq->lock); |
|---|
| 1218 | + } else if (send_cq == recv_cq) { |
|---|
| 1051 | 1219 | spin_lock_irq(&send_cq->lock); |
|---|
| 1052 | 1220 | __acquire(&recv_cq->lock); |
|---|
| 1053 | 1221 | } else if (send_cq->cqn < recv_cq->cqn) { |
|---|
| .. | .. |
|---|
| 1058 | 1226 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); |
|---|
| 1059 | 1227 | } |
|---|
| 1060 | 1228 | } |
|---|
| 1061 | | -EXPORT_SYMBOL_GPL(hns_roce_lock_cqs); |
|---|
| 1062 | 1229 | |
|---|
| 1063 | 1230 | void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, |
|---|
| 1064 | 1231 | struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) |
|---|
| 1065 | 1232 | __releases(&recv_cq->lock) |
|---|
| 1066 | 1233 | { |
|---|
| 1067 | | - if (send_cq == recv_cq) { |
|---|
| 1234 | + if (unlikely(send_cq == NULL && recv_cq == NULL)) { |
|---|
| 1235 | + __release(&recv_cq->lock); |
|---|
| 1236 | + __release(&send_cq->lock); |
|---|
| 1237 | + } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { |
|---|
| 1238 | + __release(&recv_cq->lock); |
|---|
| 1239 | + spin_unlock(&send_cq->lock); |
|---|
| 1240 | + } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { |
|---|
| 1241 | + __release(&send_cq->lock); |
|---|
| 1242 | + spin_unlock(&recv_cq->lock); |
|---|
| 1243 | + } else if (send_cq == recv_cq) { |
|---|
| 1068 | 1244 | __release(&recv_cq->lock); |
|---|
| 1069 | 1245 | spin_unlock_irq(&send_cq->lock); |
|---|
| 1070 | 1246 | } else if (send_cq->cqn < recv_cq->cqn) { |
|---|
| .. | .. |
|---|
| 1075 | 1251 | spin_unlock_irq(&recv_cq->lock); |
|---|
| 1076 | 1252 | } |
|---|
| 1077 | 1253 | } |
|---|
| 1078 | | -EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs); |
|---|
| 1079 | 1254 | |
|---|
| 1080 | | -static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) |
|---|
| 1255 | +static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) |
|---|
| 1081 | 1256 | { |
|---|
| 1082 | | - |
|---|
| 1083 | | - return hns_roce_buf_offset(&hr_qp->hr_buf, offset); |
|---|
| 1257 | + return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); |
|---|
| 1084 | 1258 | } |
|---|
| 1085 | 1259 | |
|---|
| 1086 | | -void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) |
|---|
| 1260 | +void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n) |
|---|
| 1087 | 1261 | { |
|---|
| 1088 | 1262 | return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); |
|---|
| 1089 | 1263 | } |
|---|
| 1090 | | -EXPORT_SYMBOL_GPL(get_recv_wqe); |
|---|
| 1091 | 1264 | |
|---|
| 1092 | | -void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) |
|---|
| 1265 | +void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n) |
|---|
| 1093 | 1266 | { |
|---|
| 1094 | 1267 | return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); |
|---|
| 1095 | 1268 | } |
|---|
| 1096 | | -EXPORT_SYMBOL_GPL(get_send_wqe); |
|---|
| 1097 | 1269 | |
|---|
| 1098 | | -void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n) |
|---|
| 1270 | +void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n) |
|---|
| 1099 | 1271 | { |
|---|
| 1100 | | - return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset + |
|---|
| 1101 | | - (n << hr_qp->sge.sge_shift)); |
|---|
| 1272 | + return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); |
|---|
| 1102 | 1273 | } |
|---|
| 1103 | | -EXPORT_SYMBOL_GPL(get_send_extend_sge); |
|---|
| 1104 | 1274 | |
|---|
| 1105 | 1275 | bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, |
|---|
| 1106 | 1276 | struct ib_cq *ib_cq) |
|---|
| .. | .. |
|---|
| 1109 | 1279 | u32 cur; |
|---|
| 1110 | 1280 | |
|---|
| 1111 | 1281 | cur = hr_wq->head - hr_wq->tail; |
|---|
| 1112 | | - if (likely(cur + nreq < hr_wq->max_post)) |
|---|
| 1282 | + if (likely(cur + nreq < hr_wq->wqe_cnt)) |
|---|
| 1113 | 1283 | return false; |
|---|
| 1114 | 1284 | |
|---|
| 1115 | 1285 | hr_cq = to_hr_cq(ib_cq); |
|---|
| .. | .. |
|---|
| 1117 | 1287 | cur = hr_wq->head - hr_wq->tail; |
|---|
| 1118 | 1288 | spin_unlock(&hr_cq->lock); |
|---|
| 1119 | 1289 | |
|---|
| 1120 | | - return cur + nreq >= hr_wq->max_post; |
|---|
| 1290 | + return cur + nreq >= hr_wq->wqe_cnt; |
|---|
| 1121 | 1291 | } |
|---|
| 1122 | | -EXPORT_SYMBOL_GPL(hns_roce_wq_overflow); |
|---|
| 1123 | 1292 | |
|---|
| 1124 | 1293 | int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) |
|---|
| 1125 | 1294 | { |
|---|
| .. | .. |
|---|
| 1128 | 1297 | int reserved_from_bot; |
|---|
| 1129 | 1298 | int ret; |
|---|
| 1130 | 1299 | |
|---|
| 1131 | | - spin_lock_init(&qp_table->lock); |
|---|
| 1132 | | - INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); |
|---|
| 1300 | + mutex_init(&qp_table->scc_mutex); |
|---|
| 1301 | + xa_init(&hr_dev->qp_table_xa); |
|---|
| 1133 | 1302 | |
|---|
| 1134 | | - /* In hw v1, a port include two SQP, six ports total 12 */ |
|---|
| 1135 | | - if (hr_dev->caps.max_sq_sg <= 2) |
|---|
| 1136 | | - reserved_from_bot = SQP_NUM; |
|---|
| 1137 | | - else |
|---|
| 1138 | | - reserved_from_bot = hr_dev->caps.reserved_qps; |
|---|
| 1303 | + reserved_from_bot = hr_dev->caps.reserved_qps; |
|---|
| 1139 | 1304 | |
|---|
| 1140 | 1305 | ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, |
|---|
| 1141 | 1306 | hr_dev->caps.num_qps - 1, reserved_from_bot, |
|---|