| .. | .. |
|---|
| 57 | 57 | { |
|---|
| 58 | 58 | return gid_index * hr_dev->caps.num_ports + port; |
|---|
| 59 | 59 | } |
|---|
| 60 | | -EXPORT_SYMBOL_GPL(hns_get_gid_index); |
|---|
| 61 | 60 | |
|---|
| 62 | 61 | static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) |
|---|
| 63 | 62 | { |
|---|
| 64 | 63 | u8 phy_port; |
|---|
| 65 | 64 | u32 i = 0; |
|---|
| 66 | 65 | |
|---|
| 67 | | - if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM)) |
|---|
| 66 | + if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN)) |
|---|
| 68 | 67 | return 0; |
|---|
| 69 | 68 | |
|---|
| 70 | | - for (i = 0; i < MAC_ADDR_OCTET_NUM; i++) |
|---|
| 69 | + for (i = 0; i < ETH_ALEN; i++) |
|---|
| 71 | 70 | hr_dev->dev_addr[port][i] = addr[i]; |
|---|
| 72 | 71 | |
|---|
| 73 | 72 | phy_port = hr_dev->iboe.phy_port[port]; |
|---|
| .. | .. |
|---|
| 78 | 77 | { |
|---|
| 79 | 78 | struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); |
|---|
| 80 | 79 | u8 port = attr->port_num - 1; |
|---|
| 81 | | - unsigned long flags; |
|---|
| 82 | 80 | int ret; |
|---|
| 83 | 81 | |
|---|
| 84 | 82 | if (port >= hr_dev->caps.num_ports) |
|---|
| 85 | 83 | return -EINVAL; |
|---|
| 86 | 84 | |
|---|
| 87 | | - spin_lock_irqsave(&hr_dev->iboe.lock, flags); |
|---|
| 88 | | - |
|---|
| 89 | 85 | ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); |
|---|
| 90 | | - |
|---|
| 91 | | - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); |
|---|
| 92 | 86 | |
|---|
| 93 | 87 | return ret; |
|---|
| 94 | 88 | } |
|---|
| .. | .. |
|---|
| 96 | 90 | static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) |
|---|
| 97 | 91 | { |
|---|
| 98 | 92 | struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); |
|---|
| 99 | | - struct ib_gid_attr zattr = { }; |
|---|
| 93 | + struct ib_gid_attr zattr = {}; |
|---|
| 100 | 94 | u8 port = attr->port_num - 1; |
|---|
| 101 | | - unsigned long flags; |
|---|
| 102 | 95 | int ret; |
|---|
| 103 | 96 | |
|---|
| 104 | 97 | if (port >= hr_dev->caps.num_ports) |
|---|
| 105 | 98 | return -EINVAL; |
|---|
| 106 | 99 | |
|---|
| 107 | | - spin_lock_irqsave(&hr_dev->iboe.lock, flags); |
|---|
| 108 | | - |
|---|
| 109 | 100 | ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr); |
|---|
| 110 | | - |
|---|
| 111 | | - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); |
|---|
| 112 | 101 | |
|---|
| 113 | 102 | return ret; |
|---|
| 114 | 103 | } |
|---|
| .. | .. |
|---|
| 122 | 111 | |
|---|
| 123 | 112 | netdev = hr_dev->iboe.netdevs[port]; |
|---|
| 124 | 113 | if (!netdev) { |
|---|
| 125 | | - dev_err(dev, "port(%d) can't find netdev\n", port); |
|---|
| 114 | + dev_err(dev, "Can't find netdev on port(%u)!\n", port); |
|---|
| 126 | 115 | return -ENODEV; |
|---|
| 127 | 116 | } |
|---|
| 128 | 117 | |
|---|
| .. | .. |
|---|
| 152 | 141 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
|---|
| 153 | 142 | struct hns_roce_ib_iboe *iboe = NULL; |
|---|
| 154 | 143 | struct hns_roce_dev *hr_dev = NULL; |
|---|
| 155 | | - u8 port = 0; |
|---|
| 156 | | - int ret = 0; |
|---|
| 144 | + int ret; |
|---|
| 145 | + u8 port; |
|---|
| 157 | 146 | |
|---|
| 158 | 147 | hr_dev = container_of(self, struct hns_roce_dev, iboe.nb); |
|---|
| 159 | 148 | iboe = &hr_dev->iboe; |
|---|
| .. | .. |
|---|
| 196 | 185 | |
|---|
| 197 | 186 | memset(props, 0, sizeof(*props)); |
|---|
| 198 | 187 | |
|---|
| 188 | + props->fw_ver = hr_dev->caps.fw_ver; |
|---|
| 199 | 189 | props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid); |
|---|
| 200 | 190 | props->max_mr_size = (u64)(~(0ULL)); |
|---|
| 201 | 191 | props->page_size_cap = hr_dev->caps.page_size_cap; |
|---|
| .. | .. |
|---|
| 215 | 205 | props->max_pd = hr_dev->caps.num_pds; |
|---|
| 216 | 206 | props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma; |
|---|
| 217 | 207 | props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma; |
|---|
| 218 | | - props->atomic_cap = IB_ATOMIC_NONE; |
|---|
| 208 | + props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ? |
|---|
| 209 | + IB_ATOMIC_HCA : IB_ATOMIC_NONE; |
|---|
| 219 | 210 | props->max_pkeys = 1; |
|---|
| 220 | 211 | props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; |
|---|
| 212 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { |
|---|
| 213 | + props->max_srq = hr_dev->caps.num_srqs; |
|---|
| 214 | + props->max_srq_wr = hr_dev->caps.max_srq_wrs; |
|---|
| 215 | + props->max_srq_sge = hr_dev->caps.max_srq_sges; |
|---|
| 216 | + } |
|---|
| 217 | + |
|---|
| 218 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) { |
|---|
| 219 | + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
|---|
| 220 | + props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA; |
|---|
| 221 | + } |
|---|
| 221 | 222 | |
|---|
| 222 | 223 | return 0; |
|---|
| 223 | | -} |
|---|
| 224 | | - |
|---|
| 225 | | -static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev, |
|---|
| 226 | | - u8 port_num) |
|---|
| 227 | | -{ |
|---|
| 228 | | - struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); |
|---|
| 229 | | - struct net_device *ndev; |
|---|
| 230 | | - |
|---|
| 231 | | - if (port_num < 1 || port_num > hr_dev->caps.num_ports) |
|---|
| 232 | | - return NULL; |
|---|
| 233 | | - |
|---|
| 234 | | - rcu_read_lock(); |
|---|
| 235 | | - |
|---|
| 236 | | - ndev = hr_dev->iboe.netdevs[port_num - 1]; |
|---|
| 237 | | - if (ndev) |
|---|
| 238 | | - dev_hold(ndev); |
|---|
| 239 | | - |
|---|
| 240 | | - rcu_read_unlock(); |
|---|
| 241 | | - return ndev; |
|---|
| 242 | 224 | } |
|---|
| 243 | 225 | |
|---|
| 244 | 226 | static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, |
|---|
| .. | .. |
|---|
| 251 | 233 | enum ib_mtu mtu; |
|---|
| 252 | 234 | u8 port; |
|---|
| 253 | 235 | |
|---|
| 254 | | - assert(port_num > 0); |
|---|
| 255 | 236 | port = port_num - 1; |
|---|
| 256 | 237 | |
|---|
| 257 | 238 | /* props being zeroed by the caller, avoid zeroing it here */ |
|---|
| .. | .. |
|---|
| 271 | 252 | net_dev = hr_dev->iboe.netdevs[port]; |
|---|
| 272 | 253 | if (!net_dev) { |
|---|
| 273 | 254 | spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); |
|---|
| 274 | | - dev_err(dev, "find netdev %d failed!\r\n", port); |
|---|
| 255 | + dev_err(dev, "Find netdev %u failed!\n", port); |
|---|
| 275 | 256 | return -EINVAL; |
|---|
| 276 | 257 | } |
|---|
| 277 | 258 | |
|---|
| 278 | 259 | mtu = iboe_get_mtu(net_dev->mtu); |
|---|
| 279 | 260 | props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256; |
|---|
| 280 | | - props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? |
|---|
| 281 | | - IB_PORT_ACTIVE : IB_PORT_DOWN; |
|---|
| 282 | | - props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3; |
|---|
| 261 | + props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ? |
|---|
| 262 | + IB_PORT_ACTIVE : |
|---|
| 263 | + IB_PORT_DOWN; |
|---|
| 264 | + props->phys_state = props->state == IB_PORT_ACTIVE ? |
|---|
| 265 | + IB_PORT_PHYS_STATE_LINK_UP : |
|---|
| 266 | + IB_PORT_PHYS_STATE_DISABLED; |
|---|
| 283 | 267 | |
|---|
| 284 | 268 | spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); |
|---|
| 285 | 269 | |
|---|
| .. | .. |
|---|
| 320 | 304 | return 0; |
|---|
| 321 | 305 | } |
|---|
| 322 | 306 | |
|---|
| 323 | | -static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask, |
|---|
| 324 | | - struct ib_port_modify *props) |
|---|
| 307 | +static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, |
|---|
| 308 | + struct ib_udata *udata) |
|---|
| 325 | 309 | { |
|---|
| 326 | | - return 0; |
|---|
| 327 | | -} |
|---|
| 328 | | - |
|---|
| 329 | | -static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, |
|---|
| 330 | | - struct ib_udata *udata) |
|---|
| 331 | | -{ |
|---|
| 332 | | - int ret = 0; |
|---|
| 333 | | - struct hns_roce_ucontext *context; |
|---|
| 310 | + int ret; |
|---|
| 311 | + struct hns_roce_ucontext *context = to_hr_ucontext(uctx); |
|---|
| 334 | 312 | struct hns_roce_ib_alloc_ucontext_resp resp = {}; |
|---|
| 335 | | - struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); |
|---|
| 313 | + struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); |
|---|
| 336 | 314 | |
|---|
| 337 | 315 | if (!hr_dev->active) |
|---|
| 338 | | - return ERR_PTR(-EAGAIN); |
|---|
| 316 | + return -EAGAIN; |
|---|
| 339 | 317 | |
|---|
| 340 | 318 | resp.qp_tab_size = hr_dev->caps.num_qps; |
|---|
| 341 | | - |
|---|
| 342 | | - context = kmalloc(sizeof(*context), GFP_KERNEL); |
|---|
| 343 | | - if (!context) |
|---|
| 344 | | - return ERR_PTR(-ENOMEM); |
|---|
| 345 | 319 | |
|---|
| 346 | 320 | ret = hns_roce_uar_alloc(hr_dev, &context->uar); |
|---|
| 347 | 321 | if (ret) |
|---|
| 348 | 322 | goto error_fail_uar_alloc; |
|---|
| 349 | 323 | |
|---|
| 350 | | - INIT_LIST_HEAD(&context->vma_list); |
|---|
| 351 | | - mutex_init(&context->vma_list_mutex); |
|---|
| 352 | 324 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { |
|---|
| 353 | 325 | INIT_LIST_HEAD(&context->page_list); |
|---|
| 354 | 326 | mutex_init(&context->page_mutex); |
|---|
| 355 | 327 | } |
|---|
| 356 | 328 | |
|---|
| 357 | | - ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
|---|
| 329 | + resp.cqe_size = hr_dev->caps.cqe_sz; |
|---|
| 330 | + |
|---|
| 331 | + ret = ib_copy_to_udata(udata, &resp, |
|---|
| 332 | + min(udata->outlen, sizeof(resp))); |
|---|
| 358 | 333 | if (ret) |
|---|
| 359 | 334 | goto error_fail_copy_to_udata; |
|---|
| 360 | 335 | |
|---|
| 361 | | - return &context->ibucontext; |
|---|
| 336 | + return 0; |
|---|
| 362 | 337 | |
|---|
| 363 | 338 | error_fail_copy_to_udata: |
|---|
| 364 | 339 | hns_roce_uar_free(hr_dev, &context->uar); |
|---|
| 365 | 340 | |
|---|
| 366 | 341 | error_fail_uar_alloc: |
|---|
| 367 | | - kfree(context); |
|---|
| 368 | | - |
|---|
| 369 | | - return ERR_PTR(ret); |
|---|
| 342 | + return ret; |
|---|
| 370 | 343 | } |
|---|
| 371 | 344 | |
|---|
| 372 | | -static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) |
|---|
| 345 | +static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) |
|---|
| 373 | 346 | { |
|---|
| 374 | 347 | struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); |
|---|
| 375 | 348 | |
|---|
| 376 | 349 | hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar); |
|---|
| 377 | | - kfree(context); |
|---|
| 378 | | - |
|---|
| 379 | | - return 0; |
|---|
| 380 | | -} |
|---|
| 381 | | - |
|---|
| 382 | | -static void hns_roce_vma_open(struct vm_area_struct *vma) |
|---|
| 383 | | -{ |
|---|
| 384 | | - vma->vm_ops = NULL; |
|---|
| 385 | | -} |
|---|
| 386 | | - |
|---|
| 387 | | -static void hns_roce_vma_close(struct vm_area_struct *vma) |
|---|
| 388 | | -{ |
|---|
| 389 | | - struct hns_roce_vma_data *vma_data; |
|---|
| 390 | | - |
|---|
| 391 | | - vma_data = (struct hns_roce_vma_data *)vma->vm_private_data; |
|---|
| 392 | | - vma_data->vma = NULL; |
|---|
| 393 | | - mutex_lock(vma_data->vma_list_mutex); |
|---|
| 394 | | - list_del(&vma_data->list); |
|---|
| 395 | | - mutex_unlock(vma_data->vma_list_mutex); |
|---|
| 396 | | - kfree(vma_data); |
|---|
| 397 | | -} |
|---|
| 398 | | - |
|---|
| 399 | | -static const struct vm_operations_struct hns_roce_vm_ops = { |
|---|
| 400 | | - .open = hns_roce_vma_open, |
|---|
| 401 | | - .close = hns_roce_vma_close, |
|---|
| 402 | | -}; |
|---|
| 403 | | - |
|---|
| 404 | | -static int hns_roce_set_vma_data(struct vm_area_struct *vma, |
|---|
| 405 | | - struct hns_roce_ucontext *context) |
|---|
| 406 | | -{ |
|---|
| 407 | | - struct list_head *vma_head = &context->vma_list; |
|---|
| 408 | | - struct hns_roce_vma_data *vma_data; |
|---|
| 409 | | - |
|---|
| 410 | | - vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL); |
|---|
| 411 | | - if (!vma_data) |
|---|
| 412 | | - return -ENOMEM; |
|---|
| 413 | | - |
|---|
| 414 | | - vma_data->vma = vma; |
|---|
| 415 | | - vma_data->vma_list_mutex = &context->vma_list_mutex; |
|---|
| 416 | | - vma->vm_private_data = vma_data; |
|---|
| 417 | | - vma->vm_ops = &hns_roce_vm_ops; |
|---|
| 418 | | - |
|---|
| 419 | | - mutex_lock(&context->vma_list_mutex); |
|---|
| 420 | | - list_add(&vma_data->list, vma_head); |
|---|
| 421 | | - mutex_unlock(&context->vma_list_mutex); |
|---|
| 422 | | - |
|---|
| 423 | | - return 0; |
|---|
| 424 | 350 | } |
|---|
| 425 | 351 | |
|---|
| 426 | 352 | static int hns_roce_mmap(struct ib_ucontext *context, |
|---|
| .. | .. |
|---|
| 428 | 354 | { |
|---|
| 429 | 355 | struct hns_roce_dev *hr_dev = to_hr_dev(context->device); |
|---|
| 430 | 356 | |
|---|
| 431 | | - if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) |
|---|
| 432 | | - return -EINVAL; |
|---|
| 357 | + switch (vma->vm_pgoff) { |
|---|
| 358 | + case 0: |
|---|
| 359 | + return rdma_user_mmap_io(context, vma, |
|---|
| 360 | + to_hr_ucontext(context)->uar.pfn, |
|---|
| 361 | + PAGE_SIZE, |
|---|
| 362 | + pgprot_device(vma->vm_page_prot), |
|---|
| 363 | + NULL); |
|---|
| 433 | 364 | |
|---|
| 434 | | - if (vma->vm_pgoff == 0) { |
|---|
| 435 | | - vma->vm_page_prot = pgprot_device(vma->vm_page_prot); |
|---|
| 436 | | - if (io_remap_pfn_range(vma, vma->vm_start, |
|---|
| 437 | | - to_hr_ucontext(context)->uar.pfn, |
|---|
| 438 | | - PAGE_SIZE, vma->vm_page_prot)) |
|---|
| 439 | | - return -EAGAIN; |
|---|
| 440 | | - } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr && |
|---|
| 441 | | - hr_dev->tptr_size) { |
|---|
| 442 | | - /* vm_pgoff: 1 -- TPTR */ |
|---|
| 443 | | - if (io_remap_pfn_range(vma, vma->vm_start, |
|---|
| 444 | | - hr_dev->tptr_dma_addr >> PAGE_SHIFT, |
|---|
| 445 | | - hr_dev->tptr_size, |
|---|
| 446 | | - vma->vm_page_prot)) |
|---|
| 447 | | - return -EAGAIN; |
|---|
| 448 | | - } else |
|---|
| 449 | | - return -EINVAL; |
|---|
| 365 | + /* vm_pgoff: 1 -- TPTR */ |
|---|
| 366 | + case 1: |
|---|
| 367 | + if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size) |
|---|
| 368 | + return -EINVAL; |
|---|
| 369 | + /* |
|---|
| 370 | + * FIXME: using io_remap_pfn_range on the dma address returned |
|---|
| 371 | + * by dma_alloc_coherent is totally wrong. |
|---|
| 372 | + */ |
|---|
| 373 | + return rdma_user_mmap_io(context, vma, |
|---|
| 374 | + hr_dev->tptr_dma_addr >> PAGE_SHIFT, |
|---|
| 375 | + hr_dev->tptr_size, |
|---|
| 376 | + vma->vm_page_prot, |
|---|
| 377 | + NULL); |
|---|
| 450 | 378 | |
|---|
| 451 | | - return hns_roce_set_vma_data(vma, to_hr_ucontext(context)); |
|---|
| 379 | + default: |
|---|
| 380 | + return -EINVAL; |
|---|
| 381 | + } |
|---|
| 452 | 382 | } |
|---|
| 453 | 383 | |
|---|
| 454 | 384 | static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, |
|---|
| .. | .. |
|---|
| 474 | 404 | |
|---|
| 475 | 405 | static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext) |
|---|
| 476 | 406 | { |
|---|
| 477 | | - struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); |
|---|
| 478 | | - struct hns_roce_vma_data *vma_data, *n; |
|---|
| 479 | | - struct vm_area_struct *vma; |
|---|
| 480 | | - |
|---|
| 481 | | - mutex_lock(&context->vma_list_mutex); |
|---|
| 482 | | - list_for_each_entry_safe(vma_data, n, &context->vma_list, list) { |
|---|
| 483 | | - vma = vma_data->vma; |
|---|
| 484 | | - zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE); |
|---|
| 485 | | - |
|---|
| 486 | | - vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); |
|---|
| 487 | | - vma->vm_ops = NULL; |
|---|
| 488 | | - list_del(&vma_data->list); |
|---|
| 489 | | - kfree(vma_data); |
|---|
| 490 | | - } |
|---|
| 491 | | - mutex_unlock(&context->vma_list_mutex); |
|---|
| 492 | 407 | } |
|---|
| 493 | 408 | |
|---|
| 494 | 409 | static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev) |
|---|
| .. | .. |
|---|
| 500 | 415 | ib_unregister_device(&hr_dev->ib_dev); |
|---|
| 501 | 416 | } |
|---|
| 502 | 417 | |
|---|
| 418 | +static const struct ib_device_ops hns_roce_dev_ops = { |
|---|
| 419 | + .owner = THIS_MODULE, |
|---|
| 420 | + .driver_id = RDMA_DRIVER_HNS, |
|---|
| 421 | + .uverbs_abi_ver = 1, |
|---|
| 422 | + .uverbs_no_driver_id_binding = 1, |
|---|
| 423 | + |
|---|
| 424 | + .add_gid = hns_roce_add_gid, |
|---|
| 425 | + .alloc_pd = hns_roce_alloc_pd, |
|---|
| 426 | + .alloc_ucontext = hns_roce_alloc_ucontext, |
|---|
| 427 | + .create_ah = hns_roce_create_ah, |
|---|
| 428 | + .create_cq = hns_roce_create_cq, |
|---|
| 429 | + .create_qp = hns_roce_create_qp, |
|---|
| 430 | + .dealloc_pd = hns_roce_dealloc_pd, |
|---|
| 431 | + .dealloc_ucontext = hns_roce_dealloc_ucontext, |
|---|
| 432 | + .del_gid = hns_roce_del_gid, |
|---|
| 433 | + .dereg_mr = hns_roce_dereg_mr, |
|---|
| 434 | + .destroy_ah = hns_roce_destroy_ah, |
|---|
| 435 | + .destroy_cq = hns_roce_destroy_cq, |
|---|
| 436 | + .disassociate_ucontext = hns_roce_disassociate_ucontext, |
|---|
| 437 | + .fill_res_cq_entry = hns_roce_fill_res_cq_entry, |
|---|
| 438 | + .get_dma_mr = hns_roce_get_dma_mr, |
|---|
| 439 | + .get_link_layer = hns_roce_get_link_layer, |
|---|
| 440 | + .get_port_immutable = hns_roce_port_immutable, |
|---|
| 441 | + .mmap = hns_roce_mmap, |
|---|
| 442 | + .modify_device = hns_roce_modify_device, |
|---|
| 443 | + .modify_qp = hns_roce_modify_qp, |
|---|
| 444 | + .query_ah = hns_roce_query_ah, |
|---|
| 445 | + .query_device = hns_roce_query_device, |
|---|
| 446 | + .query_pkey = hns_roce_query_pkey, |
|---|
| 447 | + .query_port = hns_roce_query_port, |
|---|
| 448 | + .reg_user_mr = hns_roce_reg_user_mr, |
|---|
| 449 | + |
|---|
| 450 | + INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah), |
|---|
| 451 | + INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq), |
|---|
| 452 | + INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd), |
|---|
| 453 | + INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext), |
|---|
| 454 | +}; |
|---|
| 455 | + |
|---|
| 456 | +static const struct ib_device_ops hns_roce_dev_mr_ops = { |
|---|
| 457 | + .rereg_user_mr = hns_roce_rereg_user_mr, |
|---|
| 458 | +}; |
|---|
| 459 | + |
|---|
| 460 | +static const struct ib_device_ops hns_roce_dev_mw_ops = { |
|---|
| 461 | + .alloc_mw = hns_roce_alloc_mw, |
|---|
| 462 | + .dealloc_mw = hns_roce_dealloc_mw, |
|---|
| 463 | + |
|---|
| 464 | + INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw), |
|---|
| 465 | +}; |
|---|
| 466 | + |
|---|
| 467 | +static const struct ib_device_ops hns_roce_dev_frmr_ops = { |
|---|
| 468 | + .alloc_mr = hns_roce_alloc_mr, |
|---|
| 469 | + .map_mr_sg = hns_roce_map_mr_sg, |
|---|
| 470 | +}; |
|---|
| 471 | + |
|---|
| 472 | +static const struct ib_device_ops hns_roce_dev_srq_ops = { |
|---|
| 473 | + .create_srq = hns_roce_create_srq, |
|---|
| 474 | + .destroy_srq = hns_roce_destroy_srq, |
|---|
| 475 | + |
|---|
| 476 | + INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq), |
|---|
| 477 | +}; |
|---|
| 478 | + |
|---|
| 503 | 479 | static int hns_roce_register_device(struct hns_roce_dev *hr_dev) |
|---|
| 504 | 480 | { |
|---|
| 505 | 481 | int ret; |
|---|
| 506 | 482 | struct hns_roce_ib_iboe *iboe = NULL; |
|---|
| 507 | 483 | struct ib_device *ib_dev = NULL; |
|---|
| 508 | 484 | struct device *dev = hr_dev->dev; |
|---|
| 485 | + unsigned int i; |
|---|
| 509 | 486 | |
|---|
| 510 | 487 | iboe = &hr_dev->iboe; |
|---|
| 511 | 488 | spin_lock_init(&iboe->lock); |
|---|
| 512 | 489 | |
|---|
| 513 | 490 | ib_dev = &hr_dev->ib_dev; |
|---|
| 514 | | - strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX); |
|---|
| 515 | 491 | |
|---|
| 516 | | - ib_dev->owner = THIS_MODULE; |
|---|
| 517 | | - ib_dev->node_type = RDMA_NODE_IB_CA; |
|---|
| 518 | | - ib_dev->dev.parent = dev; |
|---|
| 492 | + ib_dev->node_type = RDMA_NODE_IB_CA; |
|---|
| 493 | + ib_dev->dev.parent = dev; |
|---|
| 519 | 494 | |
|---|
| 520 | | - ib_dev->phys_port_cnt = hr_dev->caps.num_ports; |
|---|
| 521 | | - ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey; |
|---|
| 522 | | - ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors; |
|---|
| 523 | | - ib_dev->uverbs_abi_ver = 1; |
|---|
| 524 | | - ib_dev->uverbs_cmd_mask = |
|---|
| 495 | + ib_dev->phys_port_cnt = hr_dev->caps.num_ports; |
|---|
| 496 | + ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey; |
|---|
| 497 | + ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors; |
|---|
| 498 | + ib_dev->uverbs_cmd_mask = |
|---|
| 525 | 499 | (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) | |
|---|
| 526 | 500 | (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) | |
|---|
| 527 | 501 | (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) | |
|---|
| .. | .. |
|---|
| 537 | 511 | (1ULL << IB_USER_VERBS_CMD_QUERY_QP) | |
|---|
| 538 | 512 | (1ULL << IB_USER_VERBS_CMD_DESTROY_QP); |
|---|
| 539 | 513 | |
|---|
| 540 | | - ib_dev->uverbs_ex_cmd_mask |= |
|---|
| 541 | | - (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ); |
|---|
| 542 | | - |
|---|
| 543 | | - /* HCA||device||port */ |
|---|
| 544 | | - ib_dev->modify_device = hns_roce_modify_device; |
|---|
| 545 | | - ib_dev->query_device = hns_roce_query_device; |
|---|
| 546 | | - ib_dev->query_port = hns_roce_query_port; |
|---|
| 547 | | - ib_dev->modify_port = hns_roce_modify_port; |
|---|
| 548 | | - ib_dev->get_link_layer = hns_roce_get_link_layer; |
|---|
| 549 | | - ib_dev->get_netdev = hns_roce_get_netdev; |
|---|
| 550 | | - ib_dev->add_gid = hns_roce_add_gid; |
|---|
| 551 | | - ib_dev->del_gid = hns_roce_del_gid; |
|---|
| 552 | | - ib_dev->query_pkey = hns_roce_query_pkey; |
|---|
| 553 | | - ib_dev->alloc_ucontext = hns_roce_alloc_ucontext; |
|---|
| 554 | | - ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext; |
|---|
| 555 | | - ib_dev->mmap = hns_roce_mmap; |
|---|
| 556 | | - |
|---|
| 557 | | - /* PD */ |
|---|
| 558 | | - ib_dev->alloc_pd = hns_roce_alloc_pd; |
|---|
| 559 | | - ib_dev->dealloc_pd = hns_roce_dealloc_pd; |
|---|
| 560 | | - |
|---|
| 561 | | - /* AH */ |
|---|
| 562 | | - ib_dev->create_ah = hns_roce_create_ah; |
|---|
| 563 | | - ib_dev->query_ah = hns_roce_query_ah; |
|---|
| 564 | | - ib_dev->destroy_ah = hns_roce_destroy_ah; |
|---|
| 565 | | - |
|---|
| 566 | | - /* QP */ |
|---|
| 567 | | - ib_dev->create_qp = hns_roce_create_qp; |
|---|
| 568 | | - ib_dev->modify_qp = hns_roce_modify_qp; |
|---|
| 569 | | - ib_dev->query_qp = hr_dev->hw->query_qp; |
|---|
| 570 | | - ib_dev->destroy_qp = hr_dev->hw->destroy_qp; |
|---|
| 571 | | - ib_dev->post_send = hr_dev->hw->post_send; |
|---|
| 572 | | - ib_dev->post_recv = hr_dev->hw->post_recv; |
|---|
| 573 | | - |
|---|
| 574 | | - /* CQ */ |
|---|
| 575 | | - ib_dev->create_cq = hns_roce_ib_create_cq; |
|---|
| 576 | | - ib_dev->modify_cq = hr_dev->hw->modify_cq; |
|---|
| 577 | | - ib_dev->destroy_cq = hns_roce_ib_destroy_cq; |
|---|
| 578 | | - ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq; |
|---|
| 579 | | - ib_dev->poll_cq = hr_dev->hw->poll_cq; |
|---|
| 580 | | - |
|---|
| 581 | | - /* MR */ |
|---|
| 582 | | - ib_dev->get_dma_mr = hns_roce_get_dma_mr; |
|---|
| 583 | | - ib_dev->reg_user_mr = hns_roce_reg_user_mr; |
|---|
| 584 | | - ib_dev->dereg_mr = hns_roce_dereg_mr; |
|---|
| 585 | 514 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) { |
|---|
| 586 | | - ib_dev->rereg_user_mr = hns_roce_rereg_user_mr; |
|---|
| 587 | 515 | ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR); |
|---|
| 516 | + ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops); |
|---|
| 588 | 517 | } |
|---|
| 589 | 518 | |
|---|
| 590 | | - /* OTHERS */ |
|---|
| 591 | | - ib_dev->get_port_immutable = hns_roce_port_immutable; |
|---|
| 592 | | - ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext; |
|---|
| 519 | + /* MW */ |
|---|
| 520 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) { |
|---|
| 521 | + ib_dev->uverbs_cmd_mask |= |
|---|
| 522 | + (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) | |
|---|
| 523 | + (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW); |
|---|
| 524 | + ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops); |
|---|
| 525 | + } |
|---|
| 593 | 526 | |
|---|
| 594 | | - ib_dev->driver_id = RDMA_DRIVER_HNS; |
|---|
| 595 | | - ret = ib_register_device(ib_dev, NULL); |
|---|
| 527 | + /* FRMR */ |
|---|
| 528 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) |
|---|
| 529 | + ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops); |
|---|
| 530 | + |
|---|
| 531 | + /* SRQ */ |
|---|
| 532 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { |
|---|
| 533 | + ib_dev->uverbs_cmd_mask |= |
|---|
| 534 | + (1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) | |
|---|
| 535 | + (1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) | |
|---|
| 536 | + (1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) | |
|---|
| 537 | + (1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) | |
|---|
| 538 | + (1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV); |
|---|
| 539 | + ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops); |
|---|
| 540 | + ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops); |
|---|
| 541 | + } |
|---|
| 542 | + |
|---|
| 543 | + ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); |
|---|
| 544 | + ib_set_device_ops(ib_dev, &hns_roce_dev_ops); |
|---|
| 545 | + for (i = 0; i < hr_dev->caps.num_ports; i++) { |
|---|
| 546 | + if (!hr_dev->iboe.netdevs[i]) |
|---|
| 547 | + continue; |
|---|
| 548 | + |
|---|
| 549 | + ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i], |
|---|
| 550 | + i + 1); |
|---|
| 551 | + if (ret) |
|---|
| 552 | + return ret; |
|---|
| 553 | + } |
|---|
| 554 | + dma_set_max_seg_size(dev, UINT_MAX); |
|---|
| 555 | + ret = ib_register_device(ib_dev, "hns_%d", dev); |
|---|
| 596 | 556 | if (ret) { |
|---|
| 597 | 557 | dev_err(dev, "ib_register_device failed!\n"); |
|---|
| 598 | 558 | return ret; |
|---|
| .. | .. |
|---|
| 622 | 582 | |
|---|
| 623 | 583 | static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) |
|---|
| 624 | 584 | { |
|---|
| 625 | | - int ret; |
|---|
| 626 | 585 | struct device *dev = hr_dev->dev; |
|---|
| 627 | | - |
|---|
| 628 | | - ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table, |
|---|
| 629 | | - HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz, |
|---|
| 630 | | - hr_dev->caps.num_mtt_segs, 1); |
|---|
| 631 | | - if (ret) { |
|---|
| 632 | | - dev_err(dev, "Failed to init MTT context memory, aborting.\n"); |
|---|
| 633 | | - return ret; |
|---|
| 634 | | - } |
|---|
| 635 | | - |
|---|
| 636 | | - if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) { |
|---|
| 637 | | - ret = hns_roce_init_hem_table(hr_dev, |
|---|
| 638 | | - &hr_dev->mr_table.mtt_cqe_table, |
|---|
| 639 | | - HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz, |
|---|
| 640 | | - hr_dev->caps.num_cqe_segs, 1); |
|---|
| 641 | | - if (ret) { |
|---|
| 642 | | - dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n"); |
|---|
| 643 | | - goto err_unmap_cqe; |
|---|
| 644 | | - } |
|---|
| 645 | | - } |
|---|
| 586 | + int ret; |
|---|
| 646 | 587 | |
|---|
| 647 | 588 | ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, |
|---|
| 648 | 589 | HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, |
|---|
| 649 | 590 | hr_dev->caps.num_mtpts, 1); |
|---|
| 650 | 591 | if (ret) { |
|---|
| 651 | 592 | dev_err(dev, "Failed to init MTPT context memory, aborting.\n"); |
|---|
| 652 | | - goto err_unmap_mtt; |
|---|
| 593 | + return ret; |
|---|
| 653 | 594 | } |
|---|
| 654 | 595 | |
|---|
| 655 | 596 | ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, |
|---|
| 656 | | - HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz, |
|---|
| 597 | + HEM_TYPE_QPC, hr_dev->caps.qpc_sz, |
|---|
| 657 | 598 | hr_dev->caps.num_qps, 1); |
|---|
| 658 | 599 | if (ret) { |
|---|
| 659 | 600 | dev_err(dev, "Failed to init QP context memory, aborting.\n"); |
|---|
| .. | .. |
|---|
| 679 | 620 | hr_dev->caps.num_qps, 1); |
|---|
| 680 | 621 | if (ret) { |
|---|
| 681 | 622 | dev_err(dev, |
|---|
| 682 | | - "Failed to init trrl_table memory, aborting.\n"); |
|---|
| 623 | + "Failed to init trrl_table memory, aborting.\n"); |
|---|
| 683 | 624 | goto err_unmap_irrl; |
|---|
| 684 | 625 | } |
|---|
| 685 | 626 | } |
|---|
| .. | .. |
|---|
| 692 | 633 | goto err_unmap_trrl; |
|---|
| 693 | 634 | } |
|---|
| 694 | 635 | |
|---|
| 636 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { |
|---|
| 637 | + ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table, |
|---|
| 638 | + HEM_TYPE_SRQC, |
|---|
| 639 | + hr_dev->caps.srqc_entry_sz, |
|---|
| 640 | + hr_dev->caps.num_srqs, 1); |
|---|
| 641 | + if (ret) { |
|---|
| 642 | + dev_err(dev, |
|---|
| 643 | + "Failed to init SRQ context memory, aborting.\n"); |
|---|
| 644 | + goto err_unmap_cq; |
|---|
| 645 | + } |
|---|
| 646 | + } |
|---|
| 647 | + |
|---|
| 648 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { |
|---|
| 649 | + ret = hns_roce_init_hem_table(hr_dev, |
|---|
| 650 | + &hr_dev->qp_table.sccc_table, |
|---|
| 651 | + HEM_TYPE_SCCC, |
|---|
| 652 | + hr_dev->caps.sccc_sz, |
|---|
| 653 | + hr_dev->caps.num_qps, 1); |
|---|
| 654 | + if (ret) { |
|---|
| 655 | + dev_err(dev, |
|---|
| 656 | + "Failed to init SCC context memory, aborting.\n"); |
|---|
| 657 | + goto err_unmap_srq; |
|---|
| 658 | + } |
|---|
| 659 | + } |
|---|
| 660 | + |
|---|
| 661 | + if (hr_dev->caps.qpc_timer_entry_sz) { |
|---|
| 662 | + ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table, |
|---|
| 663 | + HEM_TYPE_QPC_TIMER, |
|---|
| 664 | + hr_dev->caps.qpc_timer_entry_sz, |
|---|
| 665 | + hr_dev->caps.num_qpc_timer, 1); |
|---|
| 666 | + if (ret) { |
|---|
| 667 | + dev_err(dev, |
|---|
| 668 | + "Failed to init QPC timer memory, aborting.\n"); |
|---|
| 669 | + goto err_unmap_ctx; |
|---|
| 670 | + } |
|---|
| 671 | + } |
|---|
| 672 | + |
|---|
| 673 | + if (hr_dev->caps.cqc_timer_entry_sz) { |
|---|
| 674 | + ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table, |
|---|
| 675 | + HEM_TYPE_CQC_TIMER, |
|---|
| 676 | + hr_dev->caps.cqc_timer_entry_sz, |
|---|
| 677 | + hr_dev->caps.num_cqc_timer, 1); |
|---|
| 678 | + if (ret) { |
|---|
| 679 | + dev_err(dev, |
|---|
| 680 | + "Failed to init CQC timer memory, aborting.\n"); |
|---|
| 681 | + goto err_unmap_qpc_timer; |
|---|
| 682 | + } |
|---|
| 683 | + } |
|---|
| 684 | + |
|---|
| 695 | 685 | return 0; |
|---|
| 686 | + |
|---|
| 687 | +err_unmap_qpc_timer: |
|---|
| 688 | + if (hr_dev->caps.qpc_timer_entry_sz) |
|---|
| 689 | + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table); |
|---|
| 690 | + |
|---|
| 691 | +err_unmap_ctx: |
|---|
| 692 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) |
|---|
| 693 | + hns_roce_cleanup_hem_table(hr_dev, |
|---|
| 694 | + &hr_dev->qp_table.sccc_table); |
|---|
| 695 | +err_unmap_srq: |
|---|
| 696 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) |
|---|
| 697 | + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table); |
|---|
| 698 | + |
|---|
| 699 | +err_unmap_cq: |
|---|
| 700 | + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); |
|---|
| 696 | 701 | |
|---|
| 697 | 702 | err_unmap_trrl: |
|---|
| 698 | 703 | if (hr_dev->caps.trrl_entry_sz) |
|---|
| .. | .. |
|---|
| 708 | 713 | err_unmap_dmpt: |
|---|
| 709 | 714 | hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); |
|---|
| 710 | 715 | |
|---|
| 711 | | -err_unmap_mtt: |
|---|
| 712 | | - if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) |
|---|
| 713 | | - hns_roce_cleanup_hem_table(hr_dev, |
|---|
| 714 | | - &hr_dev->mr_table.mtt_cqe_table); |
|---|
| 715 | | - |
|---|
| 716 | | -err_unmap_cqe: |
|---|
| 717 | | - hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); |
|---|
| 718 | | - |
|---|
| 719 | 716 | return ret; |
|---|
| 720 | 717 | } |
|---|
| 721 | 718 | |
|---|
| .. | .. |
|---|
| 726 | 723 | */ |
|---|
| 727 | 724 | static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) |
|---|
| 728 | 725 | { |
|---|
| 729 | | - int ret; |
|---|
| 730 | 726 | struct device *dev = hr_dev->dev; |
|---|
| 727 | + int ret; |
|---|
| 731 | 728 | |
|---|
| 732 | 729 | spin_lock_init(&hr_dev->sm_lock); |
|---|
| 733 | 730 | spin_lock_init(&hr_dev->bt_cmd_lock); |
|---|
| .. | .. |
|---|
| 773 | 770 | goto err_cq_table_free; |
|---|
| 774 | 771 | } |
|---|
| 775 | 772 | |
|---|
| 773 | + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { |
|---|
| 774 | + ret = hns_roce_init_srq_table(hr_dev); |
|---|
| 775 | + if (ret) { |
|---|
| 776 | + dev_err(dev, |
|---|
| 777 | + "Failed to init share receive queue table.\n"); |
|---|
| 778 | + goto err_qp_table_free; |
|---|
| 779 | + } |
|---|
| 780 | + } |
|---|
| 781 | + |
|---|
| 776 | 782 | return 0; |
|---|
| 783 | + |
|---|
| 784 | +err_qp_table_free: |
|---|
| 785 | + hns_roce_cleanup_qp_table(hr_dev); |
|---|
| 777 | 786 | |
|---|
| 778 | 787 | err_cq_table_free: |
|---|
| 779 | 788 | hns_roce_cleanup_cq_table(hr_dev); |
|---|
| .. | .. |
|---|
| 792 | 801 | return ret; |
|---|
| 793 | 802 | } |
|---|
| 794 | 803 | |
|---|
| 804 | +static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq) |
|---|
| 805 | +{ |
|---|
| 806 | + struct hns_roce_cq *hr_cq = to_hr_cq(cq); |
|---|
| 807 | + unsigned long flags; |
|---|
| 808 | + |
|---|
| 809 | + spin_lock_irqsave(&hr_cq->lock, flags); |
|---|
| 810 | + if (cq->comp_handler) { |
|---|
| 811 | + if (!hr_cq->is_armed) { |
|---|
| 812 | + hr_cq->is_armed = 1; |
|---|
| 813 | + list_add_tail(&hr_cq->node, cq_list); |
|---|
| 814 | + } |
|---|
| 815 | + } |
|---|
| 816 | + spin_unlock_irqrestore(&hr_cq->lock, flags); |
|---|
| 817 | +} |
|---|
| 818 | + |
|---|
| 819 | +void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev) |
|---|
| 820 | +{ |
|---|
| 821 | + struct hns_roce_qp *hr_qp; |
|---|
| 822 | + struct hns_roce_cq *hr_cq; |
|---|
| 823 | + struct list_head cq_list; |
|---|
| 824 | + unsigned long flags_qp; |
|---|
| 825 | + unsigned long flags; |
|---|
| 826 | + |
|---|
| 827 | + INIT_LIST_HEAD(&cq_list); |
|---|
| 828 | + |
|---|
| 829 | + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); |
|---|
| 830 | + list_for_each_entry(hr_qp, &hr_dev->qp_list, node) { |
|---|
| 831 | + spin_lock_irqsave(&hr_qp->sq.lock, flags_qp); |
|---|
| 832 | + if (hr_qp->sq.tail != hr_qp->sq.head) |
|---|
| 833 | + check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq); |
|---|
| 834 | + spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp); |
|---|
| 835 | + |
|---|
| 836 | + spin_lock_irqsave(&hr_qp->rq.lock, flags_qp); |
|---|
| 837 | + if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head)) |
|---|
| 838 | + check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq); |
|---|
| 839 | + spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp); |
|---|
| 840 | + } |
|---|
| 841 | + |
|---|
| 842 | + list_for_each_entry(hr_cq, &cq_list, node) |
|---|
| 843 | + hns_roce_cq_completion(hr_dev, hr_cq->cqn); |
|---|
| 844 | + |
|---|
| 845 | + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); |
|---|
| 846 | +} |
|---|
| 847 | + |
|---|
| 795 | 848 | int hns_roce_init(struct hns_roce_dev *hr_dev) |
|---|
| 796 | 849 | { |
|---|
| 797 | | - int ret; |
|---|
| 798 | 850 | struct device *dev = hr_dev->dev; |
|---|
| 851 | + int ret; |
|---|
| 799 | 852 | |
|---|
| 800 | 853 | if (hr_dev->hw->reset) { |
|---|
| 801 | 854 | ret = hr_dev->hw->reset(hr_dev, true); |
|---|
| .. | .. |
|---|
| 826 | 879 | goto error_failed_cmd_init; |
|---|
| 827 | 880 | } |
|---|
| 828 | 881 | |
|---|
| 882 | + /* EQ depends on poll mode, event mode depends on EQ */ |
|---|
| 829 | 883 | ret = hr_dev->hw->init_eq(hr_dev); |
|---|
| 830 | 884 | if (ret) { |
|---|
| 831 | 885 | dev_err(dev, "eq init failed!\n"); |
|---|
| .. | .. |
|---|
| 835 | 889 | if (hr_dev->cmd_mod) { |
|---|
| 836 | 890 | ret = hns_roce_cmd_use_events(hr_dev); |
|---|
| 837 | 891 | if (ret) { |
|---|
| 838 | | - dev_err(dev, "Switch to event-driven cmd failed!\n"); |
|---|
| 839 | | - goto error_failed_use_event; |
|---|
| 892 | + dev_warn(dev, |
|---|
| 893 | + "Cmd event mode failed, set back to poll!\n"); |
|---|
| 894 | + hns_roce_cmd_use_polling(hr_dev); |
|---|
| 840 | 895 | } |
|---|
| 841 | 896 | } |
|---|
| 842 | 897 | |
|---|
| .. | .. |
|---|
| 860 | 915 | } |
|---|
| 861 | 916 | } |
|---|
| 862 | 917 | |
|---|
| 918 | + INIT_LIST_HEAD(&hr_dev->qp_list); |
|---|
| 919 | + spin_lock_init(&hr_dev->qp_list_lock); |
|---|
| 920 | + |
|---|
| 863 | 921 | ret = hns_roce_register_device(hr_dev); |
|---|
| 864 | 922 | if (ret) |
|---|
| 865 | 923 | goto error_failed_register_device; |
|---|
| .. | .. |
|---|
| 879 | 937 | error_failed_init_hem: |
|---|
| 880 | 938 | if (hr_dev->cmd_mod) |
|---|
| 881 | 939 | hns_roce_cmd_use_polling(hr_dev); |
|---|
| 882 | | - |
|---|
| 883 | | -error_failed_use_event: |
|---|
| 884 | 940 | hr_dev->hw->cleanup_eq(hr_dev); |
|---|
| 885 | 941 | |
|---|
| 886 | 942 | error_failed_eq_table: |
|---|
| .. | .. |
|---|
| 898 | 954 | |
|---|
| 899 | 955 | return ret; |
|---|
| 900 | 956 | } |
|---|
| 901 | | -EXPORT_SYMBOL_GPL(hns_roce_init); |
|---|
| 902 | 957 | |
|---|
| 903 | 958 | void hns_roce_exit(struct hns_roce_dev *hr_dev) |
|---|
| 904 | 959 | { |
|---|
| .. | .. |
|---|
| 919 | 974 | if (hr_dev->hw->reset) |
|---|
| 920 | 975 | hr_dev->hw->reset(hr_dev, false); |
|---|
| 921 | 976 | } |
|---|
| 922 | | -EXPORT_SYMBOL_GPL(hns_roce_exit); |
|---|
| 923 | 977 | |
|---|
| 924 | 978 | MODULE_LICENSE("Dual BSD/GPL"); |
|---|
| 925 | 979 | MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); |
|---|