.. | .. |
---|
32 | 32 | |
---|
33 | 33 | #include <linux/platform_device.h> |
---|
34 | 34 | #include <rdma/ib_umem.h> |
---|
| 35 | +#include <rdma/uverbs_ioctl.h> |
---|
35 | 36 | #include "hns_roce_device.h" |
---|
36 | 37 | #include "hns_roce_cmd.h" |
---|
37 | 38 | #include "hns_roce_hem.h" |
---|
38 | 39 | #include <rdma/hns-abi.h> |
---|
39 | 40 | #include "hns_roce_common.h" |
---|
40 | 41 | |
---|
41 | | -static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq) |
---|
| 42 | +static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) |
---|
42 | 43 | { |
---|
43 | | - struct ib_cq *ibcq = &hr_cq->ib_cq; |
---|
44 | | - |
---|
45 | | - ibcq->comp_handler(ibcq, ibcq->cq_context); |
---|
46 | | -} |
---|
47 | | - |
---|
48 | | -static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq, |
---|
49 | | - enum hns_roce_event event_type) |
---|
50 | | -{ |
---|
51 | | - struct hns_roce_dev *hr_dev; |
---|
52 | | - struct ib_event event; |
---|
53 | | - struct ib_cq *ibcq; |
---|
54 | | - |
---|
55 | | - ibcq = &hr_cq->ib_cq; |
---|
56 | | - hr_dev = to_hr_dev(ibcq->device); |
---|
57 | | - |
---|
58 | | - if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && |
---|
59 | | - event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && |
---|
60 | | - event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { |
---|
61 | | - dev_err(hr_dev->dev, |
---|
62 | | - "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n", |
---|
63 | | - event_type, hr_cq->cqn); |
---|
64 | | - return; |
---|
65 | | - } |
---|
66 | | - |
---|
67 | | - if (ibcq->event_handler) { |
---|
68 | | - event.device = ibcq->device; |
---|
69 | | - event.event = IB_EVENT_CQ_ERR; |
---|
70 | | - event.element.cq = ibcq; |
---|
71 | | - ibcq->event_handler(&event, ibcq->cq_context); |
---|
72 | | - } |
---|
73 | | -} |
---|
74 | | - |
---|
75 | | -static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, |
---|
76 | | - struct hns_roce_cmd_mailbox *mailbox, |
---|
77 | | - unsigned long cq_num) |
---|
78 | | -{ |
---|
79 | | - return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0, |
---|
80 | | - HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS); |
---|
81 | | -} |
---|
82 | | - |
---|
83 | | -static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, |
---|
84 | | - struct hns_roce_mtt *hr_mtt, |
---|
85 | | - struct hns_roce_uar *hr_uar, |
---|
86 | | - struct hns_roce_cq *hr_cq, int vector) |
---|
87 | | -{ |
---|
| 44 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
---|
88 | 45 | struct hns_roce_cmd_mailbox *mailbox; |
---|
89 | | - struct hns_roce_hem_table *mtt_table; |
---|
90 | 46 | struct hns_roce_cq_table *cq_table; |
---|
91 | | - struct device *dev = hr_dev->dev; |
---|
| 47 | + u64 mtts[MTT_MIN_COUNT] = { 0 }; |
---|
92 | 48 | dma_addr_t dma_handle; |
---|
93 | | - u64 *mtts; |
---|
94 | 49 | int ret; |
---|
95 | 50 | |
---|
| 51 | + ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), |
---|
| 52 | + &dma_handle); |
---|
| 53 | + if (!ret) { |
---|
| 54 | + ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret); |
---|
| 55 | + return -EINVAL; |
---|
| 56 | + } |
---|
| 57 | + |
---|
96 | 58 | cq_table = &hr_dev->cq_table; |
---|
97 | | - |
---|
98 | | - /* Get the physical address of cq buf */ |
---|
99 | | - if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) |
---|
100 | | - mtt_table = &hr_dev->mr_table.mtt_cqe_table; |
---|
101 | | - else |
---|
102 | | - mtt_table = &hr_dev->mr_table.mtt_table; |
---|
103 | | - |
---|
104 | | - mtts = hns_roce_table_find(hr_dev, mtt_table, |
---|
105 | | - hr_mtt->first_seg, &dma_handle); |
---|
106 | | - if (!mtts) { |
---|
107 | | - dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n"); |
---|
108 | | - return -EINVAL; |
---|
109 | | - } |
---|
110 | | - |
---|
111 | | - if (vector >= hr_dev->caps.num_comp_vectors) { |
---|
112 | | - dev_err(dev, "CQ alloc.Invalid vector.\n"); |
---|
113 | | - return -EINVAL; |
---|
114 | | - } |
---|
115 | | - hr_cq->vector = vector; |
---|
116 | | - |
---|
117 | 59 | ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); |
---|
118 | | - if (ret == -1) { |
---|
119 | | - dev_err(dev, "CQ alloc.Failed to alloc index.\n"); |
---|
120 | | - return -ENOMEM; |
---|
| 60 | + if (ret) { |
---|
| 61 | + ibdev_err(ibdev, "failed to alloc CQ bitmap, ret = %d.\n", ret); |
---|
| 62 | + return ret; |
---|
121 | 63 | } |
---|
122 | 64 | |
---|
123 | 65 | /* Get CQC memory HEM(Hardware Entry Memory) table */ |
---|
124 | 66 | ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); |
---|
125 | 67 | if (ret) { |
---|
126 | | - dev_err(dev, "CQ alloc.Failed to get context mem.\n"); |
---|
| 68 | + ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n", |
---|
| 69 | + hr_cq->cqn, ret); |
---|
127 | 70 | goto err_out; |
---|
128 | 71 | } |
---|
129 | 72 | |
---|
130 | | - /* The cq insert radix tree */ |
---|
131 | | - spin_lock_irq(&cq_table->lock); |
---|
132 | | - /* Radix_tree: The associated pointer and long integer key value like */ |
---|
133 | | - ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq); |
---|
134 | | - spin_unlock_irq(&cq_table->lock); |
---|
| 73 | + ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); |
---|
135 | 74 | if (ret) { |
---|
136 | | - dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n"); |
---|
| 75 | + ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret); |
---|
137 | 76 | goto err_put; |
---|
138 | 77 | } |
---|
139 | 78 | |
---|
.. | .. |
---|
141 | 80 | mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); |
---|
142 | 81 | if (IS_ERR(mailbox)) { |
---|
143 | 82 | ret = PTR_ERR(mailbox); |
---|
144 | | - goto err_radix; |
---|
| 83 | + goto err_xa; |
---|
145 | 84 | } |
---|
146 | 85 | |
---|
147 | | - hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle, |
---|
148 | | - nent, vector); |
---|
| 86 | + hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); |
---|
149 | 87 | |
---|
150 | 88 | /* Send mailbox to hw */ |
---|
151 | | - ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn); |
---|
| 89 | + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, |
---|
| 90 | + HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS); |
---|
152 | 91 | hns_roce_free_cmd_mailbox(hr_dev, mailbox); |
---|
153 | 92 | if (ret) { |
---|
154 | | - dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n"); |
---|
155 | | - goto err_radix; |
---|
| 93 | + ibdev_err(ibdev, |
---|
| 94 | + "failed to send create cmd for CQ(0x%lx), ret = %d.\n", |
---|
| 95 | + hr_cq->cqn, ret); |
---|
| 96 | + goto err_xa; |
---|
156 | 97 | } |
---|
157 | 98 | |
---|
158 | 99 | hr_cq->cons_index = 0; |
---|
159 | 100 | hr_cq->arm_sn = 1; |
---|
160 | | - hr_cq->uar = hr_uar; |
---|
161 | 101 | |
---|
162 | 102 | atomic_set(&hr_cq->refcount, 1); |
---|
163 | 103 | init_completion(&hr_cq->free); |
---|
164 | 104 | |
---|
165 | 105 | return 0; |
---|
166 | 106 | |
---|
167 | | -err_radix: |
---|
168 | | - spin_lock_irq(&cq_table->lock); |
---|
169 | | - radix_tree_delete(&cq_table->tree, hr_cq->cqn); |
---|
170 | | - spin_unlock_irq(&cq_table->lock); |
---|
| 107 | +err_xa: |
---|
| 108 | + xa_erase(&cq_table->array, hr_cq->cqn); |
---|
171 | 109 | |
---|
172 | 110 | err_put: |
---|
173 | 111 | hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); |
---|
.. | .. |
---|
177 | 115 | return ret; |
---|
178 | 116 | } |
---|
179 | 117 | |
---|
180 | | -static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev, |
---|
181 | | - struct hns_roce_cmd_mailbox *mailbox, |
---|
182 | | - unsigned long cq_num) |
---|
183 | | -{ |
---|
184 | | - return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num, |
---|
185 | | - mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ, |
---|
186 | | - HNS_ROCE_CMD_TIMEOUT_MSECS); |
---|
187 | | -} |
---|
188 | | - |
---|
189 | | -void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) |
---|
| 118 | +static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) |
---|
190 | 119 | { |
---|
191 | 120 | struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; |
---|
192 | 121 | struct device *dev = hr_dev->dev; |
---|
193 | 122 | int ret; |
---|
194 | 123 | |
---|
195 | | - ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn); |
---|
| 124 | + ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1, |
---|
| 125 | + HNS_ROCE_CMD_DESTROY_CQC, |
---|
| 126 | + HNS_ROCE_CMD_TIMEOUT_MSECS); |
---|
196 | 127 | if (ret) |
---|
197 | | - dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret, |
---|
| 128 | + dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, |
---|
198 | 129 | hr_cq->cqn); |
---|
| 130 | + |
---|
| 131 | + xa_erase(&cq_table->array, hr_cq->cqn); |
---|
199 | 132 | |
---|
200 | 133 | /* Waiting interrupt process procedure carried out */ |
---|
201 | 134 | synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); |
---|
.. | .. |
---|
205 | 138 | complete(&hr_cq->free); |
---|
206 | 139 | wait_for_completion(&hr_cq->free); |
---|
207 | 140 | |
---|
208 | | - spin_lock_irq(&cq_table->lock); |
---|
209 | | - radix_tree_delete(&cq_table->tree, hr_cq->cqn); |
---|
210 | | - spin_unlock_irq(&cq_table->lock); |
---|
211 | | - |
---|
212 | 141 | hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); |
---|
213 | 142 | hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); |
---|
214 | 143 | } |
---|
215 | | -EXPORT_SYMBOL_GPL(hns_roce_free_cq); |
---|
216 | 144 | |
---|
217 | | -static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, |
---|
218 | | - struct ib_ucontext *context, |
---|
219 | | - struct hns_roce_cq_buf *buf, |
---|
220 | | - struct ib_umem **umem, u64 buf_addr, int cqe) |
---|
| 145 | +static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, |
---|
| 146 | + struct ib_udata *udata, unsigned long addr) |
---|
221 | 147 | { |
---|
| 148 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
---|
| 149 | + struct hns_roce_buf_attr buf_attr = {}; |
---|
222 | 150 | int ret; |
---|
223 | | - u32 page_shift; |
---|
224 | | - u32 npages; |
---|
225 | 151 | |
---|
226 | | - *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz, |
---|
227 | | - IB_ACCESS_LOCAL_WRITE, 1); |
---|
228 | | - if (IS_ERR(*umem)) |
---|
229 | | - return PTR_ERR(*umem); |
---|
| 152 | + buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT; |
---|
| 153 | + buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; |
---|
| 154 | + buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num; |
---|
| 155 | + buf_attr.region_count = 1; |
---|
| 156 | + buf_attr.fixed_page = true; |
---|
230 | 157 | |
---|
231 | | - if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) |
---|
232 | | - buf->hr_mtt.mtt_type = MTT_TYPE_CQE; |
---|
233 | | - else |
---|
234 | | - buf->hr_mtt.mtt_type = MTT_TYPE_WQE; |
---|
| 158 | + ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr, |
---|
| 159 | + hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT, |
---|
| 160 | + udata, addr); |
---|
| 161 | + if (ret) |
---|
| 162 | + ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret); |
---|
235 | 163 | |
---|
236 | | - if (hr_dev->caps.cqe_buf_pg_sz) { |
---|
237 | | - npages = (ib_umem_page_count(*umem) + |
---|
238 | | - (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) / |
---|
239 | | - (1 << hr_dev->caps.cqe_buf_pg_sz); |
---|
240 | | - page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz; |
---|
241 | | - ret = hns_roce_mtt_init(hr_dev, npages, page_shift, |
---|
242 | | - &buf->hr_mtt); |
---|
| 164 | + return ret; |
---|
| 165 | +} |
---|
| 166 | + |
---|
| 167 | +static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) |
---|
| 168 | +{ |
---|
| 169 | + hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr); |
---|
| 170 | +} |
---|
| 171 | + |
---|
| 172 | +static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, |
---|
| 173 | + struct ib_udata *udata, unsigned long addr, |
---|
| 174 | + struct hns_roce_ib_create_cq_resp *resp) |
---|
| 175 | +{ |
---|
| 176 | + bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB; |
---|
| 177 | + struct hns_roce_ucontext *uctx; |
---|
| 178 | + int err; |
---|
| 179 | + |
---|
| 180 | + if (udata) { |
---|
| 181 | + if (has_db && |
---|
| 182 | + udata->outlen >= offsetofend(typeof(*resp), cap_flags)) { |
---|
| 183 | + uctx = rdma_udata_to_drv_context(udata, |
---|
| 184 | + struct hns_roce_ucontext, ibucontext); |
---|
| 185 | + err = hns_roce_db_map_user(uctx, udata, addr, |
---|
| 186 | + &hr_cq->db); |
---|
| 187 | + if (err) |
---|
| 188 | + return err; |
---|
| 189 | + hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB; |
---|
| 190 | + resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB; |
---|
| 191 | + } |
---|
243 | 192 | } else { |
---|
244 | | - ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem), |
---|
245 | | - (*umem)->page_shift, |
---|
246 | | - &buf->hr_mtt); |
---|
| 193 | + if (has_db) { |
---|
| 194 | + err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); |
---|
| 195 | + if (err) |
---|
| 196 | + return err; |
---|
| 197 | + hr_cq->set_ci_db = hr_cq->db.db_record; |
---|
| 198 | + *hr_cq->set_ci_db = 0; |
---|
| 199 | + hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB; |
---|
| 200 | + } |
---|
| 201 | + hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + |
---|
| 202 | + DB_REG_OFFSET * hr_dev->priv_uar.index; |
---|
247 | 203 | } |
---|
248 | | - if (ret) |
---|
249 | | - goto err_buf; |
---|
250 | | - |
---|
251 | | - ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem); |
---|
252 | | - if (ret) |
---|
253 | | - goto err_mtt; |
---|
254 | 204 | |
---|
255 | 205 | return 0; |
---|
256 | | - |
---|
257 | | -err_mtt: |
---|
258 | | - hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt); |
---|
259 | | - |
---|
260 | | -err_buf: |
---|
261 | | - ib_umem_release(*umem); |
---|
262 | | - return ret; |
---|
263 | 206 | } |
---|
264 | 207 | |
---|
265 | | -static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, |
---|
266 | | - struct hns_roce_cq_buf *buf, u32 nent) |
---|
| 208 | +static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, |
---|
| 209 | + struct ib_udata *udata) |
---|
267 | 210 | { |
---|
268 | | - int ret; |
---|
269 | | - u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz; |
---|
| 211 | + struct hns_roce_ucontext *uctx; |
---|
270 | 212 | |
---|
271 | | - ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz, |
---|
272 | | - (1 << page_shift) * 2, &buf->hr_buf, |
---|
273 | | - page_shift); |
---|
274 | | - if (ret) |
---|
275 | | - goto out; |
---|
| 213 | + if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) |
---|
| 214 | + return; |
---|
276 | 215 | |
---|
277 | | - if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) |
---|
278 | | - buf->hr_mtt.mtt_type = MTT_TYPE_CQE; |
---|
279 | | - else |
---|
280 | | - buf->hr_mtt.mtt_type = MTT_TYPE_WQE; |
---|
281 | | - |
---|
282 | | - ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages, |
---|
283 | | - buf->hr_buf.page_shift, &buf->hr_mtt); |
---|
284 | | - if (ret) |
---|
285 | | - goto err_buf; |
---|
286 | | - |
---|
287 | | - ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf); |
---|
288 | | - if (ret) |
---|
289 | | - goto err_mtt; |
---|
290 | | - |
---|
291 | | - return 0; |
---|
292 | | - |
---|
293 | | -err_mtt: |
---|
294 | | - hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt); |
---|
295 | | - |
---|
296 | | -err_buf: |
---|
297 | | - hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz, |
---|
298 | | - &buf->hr_buf); |
---|
299 | | -out: |
---|
300 | | - return ret; |
---|
| 216 | + hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB; |
---|
| 217 | + if (udata) { |
---|
| 218 | + uctx = rdma_udata_to_drv_context(udata, |
---|
| 219 | + struct hns_roce_ucontext, |
---|
| 220 | + ibucontext); |
---|
| 221 | + hns_roce_db_unmap_user(uctx, &hr_cq->db); |
---|
| 222 | + } else { |
---|
| 223 | + hns_roce_free_db(hr_dev, &hr_cq->db); |
---|
| 224 | + } |
---|
301 | 225 | } |
---|
302 | 226 | |
---|
303 | | -static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, |
---|
304 | | - struct hns_roce_cq_buf *buf, int cqe) |
---|
| 227 | +static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata, |
---|
| 228 | + struct hns_roce_ib_create_cq *ucmd) |
---|
305 | 229 | { |
---|
306 | | - hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz, |
---|
307 | | - &buf->hr_buf); |
---|
| 230 | + struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); |
---|
| 231 | + |
---|
| 232 | + if (udata) { |
---|
| 233 | + if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) |
---|
| 234 | + hr_cq->cqe_size = ucmd->cqe_size; |
---|
| 235 | + else |
---|
| 236 | + hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE; |
---|
| 237 | + } else { |
---|
| 238 | + hr_cq->cqe_size = hr_dev->caps.cqe_sz; |
---|
| 239 | + } |
---|
308 | 240 | } |
---|
309 | 241 | |
---|
310 | | -struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, |
---|
311 | | - const struct ib_cq_init_attr *attr, |
---|
312 | | - struct ib_ucontext *context, |
---|
313 | | - struct ib_udata *udata) |
---|
| 242 | +int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, |
---|
| 243 | + struct ib_udata *udata) |
---|
314 | 244 | { |
---|
315 | | - struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); |
---|
316 | | - struct device *dev = hr_dev->dev; |
---|
317 | | - struct hns_roce_ib_create_cq ucmd; |
---|
| 245 | + struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); |
---|
318 | 246 | struct hns_roce_ib_create_cq_resp resp = {}; |
---|
319 | | - struct hns_roce_cq *hr_cq = NULL; |
---|
320 | | - struct hns_roce_uar *uar = NULL; |
---|
| 247 | + struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); |
---|
| 248 | + struct ib_device *ibdev = &hr_dev->ib_dev; |
---|
| 249 | + struct hns_roce_ib_create_cq ucmd = {}; |
---|
321 | 250 | int vector = attr->comp_vector; |
---|
322 | | - int cq_entries = attr->cqe; |
---|
| 251 | + u32 cq_entries = attr->cqe; |
---|
323 | 252 | int ret; |
---|
324 | 253 | |
---|
325 | 254 | if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { |
---|
326 | | - dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", |
---|
327 | | - cq_entries, hr_dev->caps.max_cqes); |
---|
328 | | - return ERR_PTR(-EINVAL); |
---|
| 255 | + ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n", |
---|
| 256 | + cq_entries, hr_dev->caps.max_cqes); |
---|
| 257 | + return -EINVAL; |
---|
329 | 258 | } |
---|
330 | 259 | |
---|
331 | | - hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL); |
---|
332 | | - if (!hr_cq) |
---|
333 | | - return ERR_PTR(-ENOMEM); |
---|
| 260 | + if (vector >= hr_dev->caps.num_comp_vectors) { |
---|
| 261 | + ibdev_err(ibdev, "failed to check CQ vector = %d, max = %d.\n", |
---|
| 262 | + vector, hr_dev->caps.num_comp_vectors); |
---|
| 263 | + return -EINVAL; |
---|
| 264 | + } |
---|
334 | 265 | |
---|
335 | | - if (hr_dev->caps.min_cqes) |
---|
336 | | - cq_entries = max(cq_entries, hr_dev->caps.min_cqes); |
---|
337 | | - |
---|
338 | | - cq_entries = roundup_pow_of_two((unsigned int)cq_entries); |
---|
339 | | - hr_cq->ib_cq.cqe = cq_entries - 1; |
---|
| 266 | + cq_entries = max(cq_entries, hr_dev->caps.min_cqes); |
---|
| 267 | + cq_entries = roundup_pow_of_two(cq_entries); |
---|
| 268 | + hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */ |
---|
| 269 | + hr_cq->cq_depth = cq_entries; |
---|
| 270 | + hr_cq->vector = vector; |
---|
340 | 271 | spin_lock_init(&hr_cq->lock); |
---|
| 272 | + INIT_LIST_HEAD(&hr_cq->sq_list); |
---|
| 273 | + INIT_LIST_HEAD(&hr_cq->rq_list); |
---|
341 | 274 | |
---|
342 | | - if (context) { |
---|
343 | | - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { |
---|
344 | | - dev_err(dev, "Failed to copy_from_udata.\n"); |
---|
345 | | - ret = -EFAULT; |
---|
346 | | - goto err_cq; |
---|
347 | | - } |
---|
348 | | - |
---|
349 | | - /* Get user space address, write it into mtt table */ |
---|
350 | | - ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf, |
---|
351 | | - &hr_cq->umem, ucmd.buf_addr, |
---|
352 | | - cq_entries); |
---|
| 275 | + if (udata) { |
---|
| 276 | + ret = ib_copy_from_udata(&ucmd, udata, |
---|
| 277 | + min(udata->inlen, sizeof(ucmd))); |
---|
353 | 278 | if (ret) { |
---|
354 | | - dev_err(dev, "Failed to get_cq_umem.\n"); |
---|
355 | | - goto err_cq; |
---|
| 279 | + ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", |
---|
| 280 | + ret); |
---|
| 281 | + return ret; |
---|
356 | 282 | } |
---|
357 | | - |
---|
358 | | - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && |
---|
359 | | - (udata->outlen >= sizeof(resp))) { |
---|
360 | | - ret = hns_roce_db_map_user(to_hr_ucontext(context), |
---|
361 | | - ucmd.db_addr, &hr_cq->db); |
---|
362 | | - if (ret) { |
---|
363 | | - dev_err(dev, "cq record doorbell map failed!\n"); |
---|
364 | | - goto err_mtt; |
---|
365 | | - } |
---|
366 | | - hr_cq->db_en = 1; |
---|
367 | | - resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; |
---|
368 | | - } |
---|
369 | | - |
---|
370 | | - /* Get user space parameters */ |
---|
371 | | - uar = &to_hr_ucontext(context)->uar; |
---|
372 | | - } else { |
---|
373 | | - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { |
---|
374 | | - ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); |
---|
375 | | - if (ret) |
---|
376 | | - goto err_cq; |
---|
377 | | - |
---|
378 | | - hr_cq->set_ci_db = hr_cq->db.db_record; |
---|
379 | | - *hr_cq->set_ci_db = 0; |
---|
380 | | - hr_cq->db_en = 1; |
---|
381 | | - } |
---|
382 | | - |
---|
383 | | - /* Init mmt table and write buff address to mtt table */ |
---|
384 | | - ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, |
---|
385 | | - cq_entries); |
---|
386 | | - if (ret) { |
---|
387 | | - dev_err(dev, "Failed to alloc_cq_buf.\n"); |
---|
388 | | - goto err_db; |
---|
389 | | - } |
---|
390 | | - |
---|
391 | | - uar = &hr_dev->priv_uar; |
---|
392 | | - hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + |
---|
393 | | - DB_REG_OFFSET * uar->index; |
---|
394 | 283 | } |
---|
395 | 284 | |
---|
396 | | - /* Allocate cq index, fill cq_context */ |
---|
397 | | - ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, |
---|
398 | | - hr_cq, vector); |
---|
| 285 | + set_cqe_size(hr_cq, udata, &ucmd); |
---|
| 286 | + |
---|
| 287 | + ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr); |
---|
399 | 288 | if (ret) { |
---|
400 | | - dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); |
---|
401 | | - goto err_dbmap; |
---|
| 289 | + ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret); |
---|
| 290 | + return ret; |
---|
| 291 | + } |
---|
| 292 | + |
---|
| 293 | + ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp); |
---|
| 294 | + if (ret) { |
---|
| 295 | + ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret); |
---|
| 296 | + goto err_cq_buf; |
---|
| 297 | + } |
---|
| 298 | + |
---|
| 299 | + ret = alloc_cqc(hr_dev, hr_cq); |
---|
| 300 | + if (ret) { |
---|
| 301 | + ibdev_err(ibdev, |
---|
| 302 | + "failed to alloc CQ context, ret = %d.\n", ret); |
---|
| 303 | + goto err_cq_db; |
---|
402 | 304 | } |
---|
403 | 305 | |
---|
404 | 306 | /* |
---|
405 | 307 | * For the QP created by kernel space, tptr value should be initialized |
---|
406 | 308 | * to zero; For the QP created by user space, it will cause synchronous |
---|
407 | | - * problems if tptr is set to zero here, so we initialze it in user |
---|
| 309 | + * problems if tptr is set to zero here, so we initialize it in user |
---|
408 | 310 | * space. |
---|
409 | 311 | */ |
---|
410 | | - if (!context && hr_cq->tptr_addr) |
---|
| 312 | + if (!udata && hr_cq->tptr_addr) |
---|
411 | 313 | *hr_cq->tptr_addr = 0; |
---|
412 | 314 | |
---|
413 | | - /* Get created cq handler and carry out event */ |
---|
414 | | - hr_cq->comp = hns_roce_ib_cq_comp; |
---|
415 | | - hr_cq->event = hns_roce_ib_cq_event; |
---|
416 | | - hr_cq->cq_depth = cq_entries; |
---|
417 | | - |
---|
418 | | - if (context) { |
---|
| 315 | + if (udata) { |
---|
419 | 316 | resp.cqn = hr_cq->cqn; |
---|
420 | | - ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
| 317 | + ret = ib_copy_to_udata(udata, &resp, |
---|
| 318 | + min(udata->outlen, sizeof(resp))); |
---|
421 | 319 | if (ret) |
---|
422 | 320 | goto err_cqc; |
---|
423 | 321 | } |
---|
424 | 322 | |
---|
425 | | - return &hr_cq->ib_cq; |
---|
| 323 | + return 0; |
---|
426 | 324 | |
---|
427 | 325 | err_cqc: |
---|
428 | | - hns_roce_free_cq(hr_dev, hr_cq); |
---|
429 | | - |
---|
430 | | -err_dbmap: |
---|
431 | | - if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && |
---|
432 | | - (udata->outlen >= sizeof(resp))) |
---|
433 | | - hns_roce_db_unmap_user(to_hr_ucontext(context), |
---|
434 | | - &hr_cq->db); |
---|
435 | | - |
---|
436 | | -err_mtt: |
---|
437 | | - hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); |
---|
438 | | - if (context) |
---|
439 | | - ib_umem_release(hr_cq->umem); |
---|
440 | | - else |
---|
441 | | - hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, |
---|
442 | | - hr_cq->ib_cq.cqe); |
---|
443 | | - |
---|
444 | | -err_db: |
---|
445 | | - if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) |
---|
446 | | - hns_roce_free_db(hr_dev, &hr_cq->db); |
---|
447 | | - |
---|
448 | | -err_cq: |
---|
449 | | - kfree(hr_cq); |
---|
450 | | - return ERR_PTR(ret); |
---|
| 326 | + free_cqc(hr_dev, hr_cq); |
---|
| 327 | +err_cq_db: |
---|
| 328 | + free_cq_db(hr_dev, hr_cq, udata); |
---|
| 329 | +err_cq_buf: |
---|
| 330 | + free_cq_buf(hr_dev, hr_cq); |
---|
| 331 | + return ret; |
---|
451 | 332 | } |
---|
452 | | -EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq); |
---|
453 | 333 | |
---|
454 | | -int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) |
---|
| 334 | +int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
---|
455 | 335 | { |
---|
456 | 336 | struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); |
---|
457 | 337 | struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); |
---|
458 | | - int ret = 0; |
---|
459 | 338 | |
---|
460 | | - if (hr_dev->hw->destroy_cq) { |
---|
461 | | - ret = hr_dev->hw->destroy_cq(ib_cq); |
---|
462 | | - } else { |
---|
463 | | - hns_roce_free_cq(hr_dev, hr_cq); |
---|
464 | | - hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); |
---|
| 339 | + if (hr_dev->hw->destroy_cq) |
---|
| 340 | + hr_dev->hw->destroy_cq(ib_cq, udata); |
---|
465 | 341 | |
---|
466 | | - if (ib_cq->uobject) { |
---|
467 | | - ib_umem_release(hr_cq->umem); |
---|
468 | | - |
---|
469 | | - if (hr_cq->db_en == 1) |
---|
470 | | - hns_roce_db_unmap_user( |
---|
471 | | - to_hr_ucontext(ib_cq->uobject->context), |
---|
472 | | - &hr_cq->db); |
---|
473 | | - } else { |
---|
474 | | - /* Free the buff of stored cq */ |
---|
475 | | - hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, |
---|
476 | | - ib_cq->cqe); |
---|
477 | | - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) |
---|
478 | | - hns_roce_free_db(hr_dev, &hr_cq->db); |
---|
479 | | - } |
---|
480 | | - |
---|
481 | | - kfree(hr_cq); |
---|
482 | | - } |
---|
483 | | - |
---|
484 | | - return ret; |
---|
| 342 | + free_cq_buf(hr_dev, hr_cq); |
---|
| 343 | + free_cq_db(hr_dev, hr_cq, udata); |
---|
| 344 | + free_cqc(hr_dev, hr_cq); |
---|
| 345 | + return 0; |
---|
485 | 346 | } |
---|
486 | | -EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq); |
---|
487 | 347 | |
---|
488 | 348 | void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) |
---|
489 | 349 | { |
---|
490 | | - struct device *dev = hr_dev->dev; |
---|
491 | | - struct hns_roce_cq *cq; |
---|
| 350 | + struct hns_roce_cq *hr_cq; |
---|
| 351 | + struct ib_cq *ibcq; |
---|
492 | 352 | |
---|
493 | | - cq = radix_tree_lookup(&hr_dev->cq_table.tree, |
---|
494 | | - cqn & (hr_dev->caps.num_cqs - 1)); |
---|
495 | | - if (!cq) { |
---|
496 | | - dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn); |
---|
| 353 | + hr_cq = xa_load(&hr_dev->cq_table.array, |
---|
| 354 | + cqn & (hr_dev->caps.num_cqs - 1)); |
---|
| 355 | + if (!hr_cq) { |
---|
| 356 | + dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n", |
---|
| 357 | + cqn); |
---|
497 | 358 | return; |
---|
498 | 359 | } |
---|
499 | 360 | |
---|
500 | | - ++cq->arm_sn; |
---|
501 | | - cq->comp(cq); |
---|
| 361 | + ++hr_cq->arm_sn; |
---|
| 362 | + ibcq = &hr_cq->ib_cq; |
---|
| 363 | + if (ibcq->comp_handler) |
---|
| 364 | + ibcq->comp_handler(ibcq, ibcq->cq_context); |
---|
502 | 365 | } |
---|
503 | | -EXPORT_SYMBOL_GPL(hns_roce_cq_completion); |
---|
504 | 366 | |
---|
505 | 367 | void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) |
---|
506 | 368 | { |
---|
507 | | - struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; |
---|
508 | 369 | struct device *dev = hr_dev->dev; |
---|
509 | | - struct hns_roce_cq *cq; |
---|
| 370 | + struct hns_roce_cq *hr_cq; |
---|
| 371 | + struct ib_event event; |
---|
| 372 | + struct ib_cq *ibcq; |
---|
510 | 373 | |
---|
511 | | - cq = radix_tree_lookup(&cq_table->tree, |
---|
512 | | - cqn & (hr_dev->caps.num_cqs - 1)); |
---|
513 | | - if (cq) |
---|
514 | | - atomic_inc(&cq->refcount); |
---|
515 | | - |
---|
516 | | - if (!cq) { |
---|
517 | | - dev_warn(dev, "Async event for bogus CQ %08x\n", cqn); |
---|
| 374 | + hr_cq = xa_load(&hr_dev->cq_table.array, |
---|
| 375 | + cqn & (hr_dev->caps.num_cqs - 1)); |
---|
| 376 | + if (!hr_cq) { |
---|
| 377 | + dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn); |
---|
518 | 378 | return; |
---|
519 | 379 | } |
---|
520 | 380 | |
---|
521 | | - cq->event(cq, (enum hns_roce_event)event_type); |
---|
| 381 | + if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && |
---|
| 382 | + event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && |
---|
| 383 | + event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { |
---|
| 384 | + dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n", |
---|
| 385 | + event_type, cqn); |
---|
| 386 | + return; |
---|
| 387 | + } |
---|
522 | 388 | |
---|
523 | | - if (atomic_dec_and_test(&cq->refcount)) |
---|
524 | | - complete(&cq->free); |
---|
| 389 | + atomic_inc(&hr_cq->refcount); |
---|
| 390 | + |
---|
| 391 | + ibcq = &hr_cq->ib_cq; |
---|
| 392 | + if (ibcq->event_handler) { |
---|
| 393 | + event.device = ibcq->device; |
---|
| 394 | + event.element.cq = ibcq; |
---|
| 395 | + event.event = IB_EVENT_CQ_ERR; |
---|
| 396 | + ibcq->event_handler(&event, ibcq->cq_context); |
---|
| 397 | + } |
---|
| 398 | + |
---|
| 399 | + if (atomic_dec_and_test(&hr_cq->refcount)) |
---|
| 400 | + complete(&hr_cq->free); |
---|
525 | 401 | } |
---|
526 | | -EXPORT_SYMBOL_GPL(hns_roce_cq_event); |
---|
527 | 402 | |
---|
528 | 403 | int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev) |
---|
529 | 404 | { |
---|
530 | 405 | struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; |
---|
531 | 406 | |
---|
532 | | - spin_lock_init(&cq_table->lock); |
---|
533 | | - INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); |
---|
| 407 | + xa_init(&cq_table->array); |
---|
534 | 408 | |
---|
535 | 409 | return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs, |
---|
536 | 410 | hr_dev->caps.num_cqs - 1, |
---|