| .. | .. |
|---|
| 50 | 50 | #include <rdma/ib_cache.h> |
|---|
| 51 | 51 | #include <rdma/ib_addr.h> |
|---|
| 52 | 52 | #include <rdma/rw.h> |
|---|
| 53 | +#include <rdma/lag.h> |
|---|
| 53 | 54 | |
|---|
| 54 | 55 | #include "core_priv.h" |
|---|
| 56 | +#include <trace/events/rdma_core.h> |
|---|
| 55 | 57 | |
|---|
| 56 | 58 | static int ib_resolve_eth_dmac(struct ib_device *device, |
|---|
| 57 | 59 | struct rdma_ah_attr *ah_attr); |
|---|
| .. | .. |
|---|
| 141 | 143 | case IB_RATE_100_GBPS: return 40; |
|---|
| 142 | 144 | case IB_RATE_200_GBPS: return 80; |
|---|
| 143 | 145 | case IB_RATE_300_GBPS: return 120; |
|---|
| 146 | + case IB_RATE_28_GBPS: return 11; |
|---|
| 147 | + case IB_RATE_50_GBPS: return 20; |
|---|
| 148 | + case IB_RATE_400_GBPS: return 160; |
|---|
| 149 | + case IB_RATE_600_GBPS: return 240; |
|---|
| 144 | 150 | default: return -1; |
|---|
| 145 | 151 | } |
|---|
| 146 | 152 | } |
|---|
| .. | .. |
|---|
| 166 | 172 | case 40: return IB_RATE_100_GBPS; |
|---|
| 167 | 173 | case 80: return IB_RATE_200_GBPS; |
|---|
| 168 | 174 | case 120: return IB_RATE_300_GBPS; |
|---|
| 175 | + case 11: return IB_RATE_28_GBPS; |
|---|
| 176 | + case 20: return IB_RATE_50_GBPS; |
|---|
| 177 | + case 160: return IB_RATE_400_GBPS; |
|---|
| 178 | + case 240: return IB_RATE_600_GBPS; |
|---|
| 169 | 179 | default: return IB_RATE_PORT_CURRENT; |
|---|
| 170 | 180 | } |
|---|
| 171 | 181 | } |
|---|
| .. | .. |
|---|
| 191 | 201 | case IB_RATE_100_GBPS: return 103125; |
|---|
| 192 | 202 | case IB_RATE_200_GBPS: return 206250; |
|---|
| 193 | 203 | case IB_RATE_300_GBPS: return 309375; |
|---|
| 204 | + case IB_RATE_28_GBPS: return 28125; |
|---|
| 205 | + case IB_RATE_50_GBPS: return 53125; |
|---|
| 206 | + case IB_RATE_400_GBPS: return 425000; |
|---|
| 207 | + case IB_RATE_600_GBPS: return 637500; |
|---|
| 194 | 208 | default: return -1; |
|---|
| 195 | 209 | } |
|---|
| 196 | 210 | } |
|---|
| 197 | 211 | EXPORT_SYMBOL(ib_rate_to_mbps); |
|---|
| 198 | 212 | |
|---|
| 199 | 213 | __attribute_const__ enum rdma_transport_type |
|---|
| 200 | | -rdma_node_get_transport(enum rdma_node_type node_type) |
|---|
| 214 | +rdma_node_get_transport(unsigned int node_type) |
|---|
| 201 | 215 | { |
|---|
| 202 | 216 | |
|---|
| 203 | 217 | if (node_type == RDMA_NODE_USNIC) |
|---|
| .. | .. |
|---|
| 206 | 220 | return RDMA_TRANSPORT_USNIC_UDP; |
|---|
| 207 | 221 | if (node_type == RDMA_NODE_RNIC) |
|---|
| 208 | 222 | return RDMA_TRANSPORT_IWARP; |
|---|
| 223 | + if (node_type == RDMA_NODE_UNSPECIFIED) |
|---|
| 224 | + return RDMA_TRANSPORT_UNSPECIFIED; |
|---|
| 209 | 225 | |
|---|
| 210 | 226 | return RDMA_TRANSPORT_IB; |
|---|
| 211 | 227 | } |
|---|
| .. | .. |
|---|
| 214 | 230 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) |
|---|
| 215 | 231 | { |
|---|
| 216 | 232 | enum rdma_transport_type lt; |
|---|
| 217 | | - if (device->get_link_layer) |
|---|
| 218 | | - return device->get_link_layer(device, port_num); |
|---|
| 233 | + if (device->ops.get_link_layer) |
|---|
| 234 | + return device->ops.get_link_layer(device, port_num); |
|---|
| 219 | 235 | |
|---|
| 220 | 236 | lt = rdma_node_get_transport(device->node_type); |
|---|
| 221 | 237 | if (lt == RDMA_TRANSPORT_IB) |
|---|
| .. | .. |
|---|
| 230 | 246 | /** |
|---|
| 231 | 247 | * ib_alloc_pd - Allocates an unused protection domain. |
|---|
| 232 | 248 | * @device: The device on which to allocate the protection domain. |
|---|
| 249 | + * @flags: protection domain flags |
|---|
| 250 | + * @caller: caller's build-time module name |
|---|
| 233 | 251 | * |
|---|
| 234 | 252 | * A protection domain object provides an association between QPs, shared |
|---|
| 235 | 253 | * receive queues, address handles, memory regions, and memory windows. |
|---|
| .. | .. |
|---|
| 242 | 260 | { |
|---|
| 243 | 261 | struct ib_pd *pd; |
|---|
| 244 | 262 | int mr_access_flags = 0; |
|---|
| 263 | + int ret; |
|---|
| 245 | 264 | |
|---|
| 246 | | - pd = device->alloc_pd(device, NULL, NULL); |
|---|
| 247 | | - if (IS_ERR(pd)) |
|---|
| 248 | | - return pd; |
|---|
| 265 | + pd = rdma_zalloc_drv_obj(device, ib_pd); |
|---|
| 266 | + if (!pd) |
|---|
| 267 | + return ERR_PTR(-ENOMEM); |
|---|
| 249 | 268 | |
|---|
| 250 | 269 | pd->device = device; |
|---|
| 251 | 270 | pd->uobject = NULL; |
|---|
| 252 | 271 | pd->__internal_mr = NULL; |
|---|
| 253 | 272 | atomic_set(&pd->usecnt, 0); |
|---|
| 254 | 273 | pd->flags = flags; |
|---|
| 274 | + |
|---|
| 275 | + rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD); |
|---|
| 276 | + rdma_restrack_set_name(&pd->res, caller); |
|---|
| 277 | + |
|---|
| 278 | + ret = device->ops.alloc_pd(pd, NULL); |
|---|
| 279 | + if (ret) { |
|---|
| 280 | + rdma_restrack_put(&pd->res); |
|---|
| 281 | + kfree(pd); |
|---|
| 282 | + return ERR_PTR(ret); |
|---|
| 283 | + } |
|---|
| 284 | + rdma_restrack_add(&pd->res); |
|---|
| 255 | 285 | |
|---|
| 256 | 286 | if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) |
|---|
| 257 | 287 | pd->local_dma_lkey = device->local_dma_lkey; |
|---|
| .. | .. |
|---|
| 263 | 293 | mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; |
|---|
| 264 | 294 | } |
|---|
| 265 | 295 | |
|---|
| 266 | | - pd->res.type = RDMA_RESTRACK_PD; |
|---|
| 267 | | - pd->res.kern_name = caller; |
|---|
| 268 | | - rdma_restrack_add(&pd->res); |
|---|
| 269 | | - |
|---|
| 270 | 296 | if (mr_access_flags) { |
|---|
| 271 | 297 | struct ib_mr *mr; |
|---|
| 272 | 298 | |
|---|
| 273 | | - mr = pd->device->get_dma_mr(pd, mr_access_flags); |
|---|
| 299 | + mr = pd->device->ops.get_dma_mr(pd, mr_access_flags); |
|---|
| 274 | 300 | if (IS_ERR(mr)) { |
|---|
| 275 | 301 | ib_dealloc_pd(pd); |
|---|
| 276 | 302 | return ERR_CAST(mr); |
|---|
| .. | .. |
|---|
| 278 | 304 | |
|---|
| 279 | 305 | mr->device = pd->device; |
|---|
| 280 | 306 | mr->pd = pd; |
|---|
| 307 | + mr->type = IB_MR_TYPE_DMA; |
|---|
| 281 | 308 | mr->uobject = NULL; |
|---|
| 282 | 309 | mr->need_inval = false; |
|---|
| 283 | 310 | |
|---|
| .. | .. |
|---|
| 295 | 322 | EXPORT_SYMBOL(__ib_alloc_pd); |
|---|
| 296 | 323 | |
|---|
| 297 | 324 | /** |
|---|
| 298 | | - * ib_dealloc_pd - Deallocates a protection domain. |
|---|
| 325 | + * ib_dealloc_pd_user - Deallocates a protection domain. |
|---|
| 299 | 326 | * @pd: The protection domain to deallocate. |
|---|
| 327 | + * @udata: Valid user data or NULL for kernel object |
|---|
| 300 | 328 | * |
|---|
| 301 | 329 | * It is an error to call this function while any resources in the pd still |
|---|
| 302 | 330 | * exist. The caller is responsible to synchronously destroy them and |
|---|
| 303 | 331 | * guarantee no new allocations will happen. |
|---|
| 304 | 332 | */ |
|---|
| 305 | | -void ib_dealloc_pd(struct ib_pd *pd) |
|---|
| 333 | +int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata) |
|---|
| 306 | 334 | { |
|---|
| 307 | 335 | int ret; |
|---|
| 308 | 336 | |
|---|
| 309 | 337 | if (pd->__internal_mr) { |
|---|
| 310 | | - ret = pd->device->dereg_mr(pd->__internal_mr); |
|---|
| 338 | + ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL); |
|---|
| 311 | 339 | WARN_ON(ret); |
|---|
| 312 | 340 | pd->__internal_mr = NULL; |
|---|
| 313 | 341 | } |
|---|
| .. | .. |
|---|
| 316 | 344 | requires the caller to guarantee we can't race here. */ |
|---|
| 317 | 345 | WARN_ON(atomic_read(&pd->usecnt)); |
|---|
| 318 | 346 | |
|---|
| 347 | + ret = pd->device->ops.dealloc_pd(pd, udata); |
|---|
| 348 | + if (ret) |
|---|
| 349 | + return ret; |
|---|
| 350 | + |
|---|
| 319 | 351 | rdma_restrack_del(&pd->res); |
|---|
| 320 | | - /* Making delalloc_pd a void return is a WIP, no driver should return |
|---|
| 321 | | - an error here. */ |
|---|
| 322 | | - ret = pd->device->dealloc_pd(pd); |
|---|
| 323 | | - WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); |
|---|
| 352 | + kfree(pd); |
|---|
| 353 | + return ret; |
|---|
| 324 | 354 | } |
|---|
| 325 | | -EXPORT_SYMBOL(ib_dealloc_pd); |
|---|
| 355 | +EXPORT_SYMBOL(ib_dealloc_pd_user); |
|---|
| 326 | 356 | |
|---|
| 327 | 357 | /* Address handles */ |
|---|
| 328 | 358 | |
|---|
| .. | .. |
|---|
| 475 | 505 | |
|---|
| 476 | 506 | static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, |
|---|
| 477 | 507 | struct rdma_ah_attr *ah_attr, |
|---|
| 478 | | - struct ib_udata *udata) |
|---|
| 508 | + u32 flags, |
|---|
| 509 | + struct ib_udata *udata, |
|---|
| 510 | + struct net_device *xmit_slave) |
|---|
| 479 | 511 | { |
|---|
| 512 | + struct rdma_ah_init_attr init_attr = {}; |
|---|
| 513 | + struct ib_device *device = pd->device; |
|---|
| 480 | 514 | struct ib_ah *ah; |
|---|
| 515 | + int ret; |
|---|
| 481 | 516 | |
|---|
| 482 | | - if (!pd->device->create_ah) |
|---|
| 517 | + might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE); |
|---|
| 518 | + |
|---|
| 519 | + if (!device->ops.create_ah) |
|---|
| 483 | 520 | return ERR_PTR(-EOPNOTSUPP); |
|---|
| 484 | 521 | |
|---|
| 485 | | - ah = pd->device->create_ah(pd, ah_attr, udata); |
|---|
| 522 | + ah = rdma_zalloc_drv_obj_gfp( |
|---|
| 523 | + device, ib_ah, |
|---|
| 524 | + (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC); |
|---|
| 525 | + if (!ah) |
|---|
| 526 | + return ERR_PTR(-ENOMEM); |
|---|
| 486 | 527 | |
|---|
| 487 | | - if (!IS_ERR(ah)) { |
|---|
| 488 | | - ah->device = pd->device; |
|---|
| 489 | | - ah->pd = pd; |
|---|
| 490 | | - ah->uobject = NULL; |
|---|
| 491 | | - ah->type = ah_attr->type; |
|---|
| 492 | | - ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL); |
|---|
| 528 | + ah->device = device; |
|---|
| 529 | + ah->pd = pd; |
|---|
| 530 | + ah->type = ah_attr->type; |
|---|
| 531 | + ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL); |
|---|
| 532 | + init_attr.ah_attr = ah_attr; |
|---|
| 533 | + init_attr.flags = flags; |
|---|
| 534 | + init_attr.xmit_slave = xmit_slave; |
|---|
| 493 | 535 | |
|---|
| 494 | | - atomic_inc(&pd->usecnt); |
|---|
| 536 | + ret = device->ops.create_ah(ah, &init_attr, udata); |
|---|
| 537 | + if (ret) { |
|---|
| 538 | + if (ah->sgid_attr) |
|---|
| 539 | + rdma_put_gid_attr(ah->sgid_attr); |
|---|
| 540 | + kfree(ah); |
|---|
| 541 | + return ERR_PTR(ret); |
|---|
| 495 | 542 | } |
|---|
| 496 | 543 | |
|---|
| 544 | + atomic_inc(&pd->usecnt); |
|---|
| 497 | 545 | return ah; |
|---|
| 498 | 546 | } |
|---|
| 499 | 547 | |
|---|
| .. | .. |
|---|
| 502 | 550 | * given address vector. |
|---|
| 503 | 551 | * @pd: The protection domain associated with the address handle. |
|---|
| 504 | 552 | * @ah_attr: The attributes of the address vector. |
|---|
| 553 | + * @flags: Create address handle flags (see enum rdma_create_ah_flags). |
|---|
| 505 | 554 | * |
|---|
| 506 | 555 | * It returns 0 on success and returns appropriate error code on error. |
|---|
| 507 | 556 | * The address handle is used to reference a local or global destination |
|---|
| 508 | 557 | * in all UD QP post sends. |
|---|
| 509 | 558 | */ |
|---|
| 510 | | -struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr) |
|---|
| 559 | +struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
|---|
| 560 | + u32 flags) |
|---|
| 511 | 561 | { |
|---|
| 512 | 562 | const struct ib_gid_attr *old_sgid_attr; |
|---|
| 563 | + struct net_device *slave; |
|---|
| 513 | 564 | struct ib_ah *ah; |
|---|
| 514 | 565 | int ret; |
|---|
| 515 | 566 | |
|---|
| 516 | 567 | ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); |
|---|
| 517 | 568 | if (ret) |
|---|
| 518 | 569 | return ERR_PTR(ret); |
|---|
| 519 | | - |
|---|
| 520 | | - ah = _rdma_create_ah(pd, ah_attr, NULL); |
|---|
| 521 | | - |
|---|
| 570 | + slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr, |
|---|
| 571 | + (flags & RDMA_CREATE_AH_SLEEPABLE) ? |
|---|
| 572 | + GFP_KERNEL : GFP_ATOMIC); |
|---|
| 573 | + if (IS_ERR(slave)) { |
|---|
| 574 | + rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); |
|---|
| 575 | + return (void *)slave; |
|---|
| 576 | + } |
|---|
| 577 | + ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave); |
|---|
| 578 | + rdma_lag_put_ah_roce_slave(slave); |
|---|
| 522 | 579 | rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); |
|---|
| 523 | 580 | return ah; |
|---|
| 524 | 581 | } |
|---|
| .. | .. |
|---|
| 557 | 614 | } |
|---|
| 558 | 615 | } |
|---|
| 559 | 616 | |
|---|
| 560 | | - ah = _rdma_create_ah(pd, ah_attr, udata); |
|---|
| 617 | + ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, |
|---|
| 618 | + udata, NULL); |
|---|
| 561 | 619 | |
|---|
| 562 | 620 | out: |
|---|
| 563 | 621 | rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); |
|---|
| .. | .. |
|---|
| 628 | 686 | void *context) |
|---|
| 629 | 687 | { |
|---|
| 630 | 688 | struct find_gid_index_context *ctx = context; |
|---|
| 689 | + u16 vlan_id = 0xffff; |
|---|
| 690 | + int ret; |
|---|
| 631 | 691 | |
|---|
| 632 | 692 | if (ctx->gid_type != gid_attr->gid_type) |
|---|
| 633 | 693 | return false; |
|---|
| 634 | 694 | |
|---|
| 635 | | - if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || |
|---|
| 636 | | - (is_vlan_dev(gid_attr->ndev) && |
|---|
| 637 | | - vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) |
|---|
| 695 | + ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL); |
|---|
| 696 | + if (ret) |
|---|
| 638 | 697 | return false; |
|---|
| 639 | 698 | |
|---|
| 640 | | - return true; |
|---|
| 699 | + return ctx->vlan_id == vlan_id; |
|---|
| 641 | 700 | } |
|---|
| 642 | 701 | |
|---|
| 643 | 702 | static const struct ib_gid_attr * |
|---|
| .. | .. |
|---|
| 676 | 735 | (struct in6_addr *)dgid); |
|---|
| 677 | 736 | return 0; |
|---|
| 678 | 737 | } else if (net_type == RDMA_NETWORK_IPV6 || |
|---|
| 679 | | - net_type == RDMA_NETWORK_IB) { |
|---|
| 738 | + net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) { |
|---|
| 680 | 739 | *dgid = hdr->ibgrh.dgid; |
|---|
| 681 | 740 | *sgid = hdr->ibgrh.sgid; |
|---|
| 682 | 741 | return 0; |
|---|
| .. | .. |
|---|
| 710 | 769 | |
|---|
| 711 | 770 | ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid, |
|---|
| 712 | 771 | ah_attr->roce.dmac, |
|---|
| 713 | | - sgid_attr->ndev, &hop_limit); |
|---|
| 772 | + sgid_attr, &hop_limit); |
|---|
| 714 | 773 | |
|---|
| 715 | 774 | grh->hop_limit = hop_limit; |
|---|
| 716 | 775 | return ret; |
|---|
| .. | .. |
|---|
| 869 | 928 | if (ret) |
|---|
| 870 | 929 | return ERR_PTR(ret); |
|---|
| 871 | 930 | |
|---|
| 872 | | - ah = rdma_create_ah(pd, &ah_attr); |
|---|
| 931 | + ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE); |
|---|
| 873 | 932 | |
|---|
| 874 | 933 | rdma_destroy_ah_attr(&ah_attr); |
|---|
| 875 | 934 | return ah; |
|---|
| .. | .. |
|---|
| 888 | 947 | if (ret) |
|---|
| 889 | 948 | return ret; |
|---|
| 890 | 949 | |
|---|
| 891 | | - ret = ah->device->modify_ah ? |
|---|
| 892 | | - ah->device->modify_ah(ah, ah_attr) : |
|---|
| 950 | + ret = ah->device->ops.modify_ah ? |
|---|
| 951 | + ah->device->ops.modify_ah(ah, ah_attr) : |
|---|
| 893 | 952 | -EOPNOTSUPP; |
|---|
| 894 | 953 | |
|---|
| 895 | 954 | ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr); |
|---|
| .. | .. |
|---|
| 902 | 961 | { |
|---|
| 903 | 962 | ah_attr->grh.sgid_attr = NULL; |
|---|
| 904 | 963 | |
|---|
| 905 | | - return ah->device->query_ah ? |
|---|
| 906 | | - ah->device->query_ah(ah, ah_attr) : |
|---|
| 964 | + return ah->device->ops.query_ah ? |
|---|
| 965 | + ah->device->ops.query_ah(ah, ah_attr) : |
|---|
| 907 | 966 | -EOPNOTSUPP; |
|---|
| 908 | 967 | } |
|---|
| 909 | 968 | EXPORT_SYMBOL(rdma_query_ah); |
|---|
| 910 | 969 | |
|---|
| 911 | | -int rdma_destroy_ah(struct ib_ah *ah) |
|---|
| 970 | +int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata) |
|---|
| 912 | 971 | { |
|---|
| 913 | 972 | const struct ib_gid_attr *sgid_attr = ah->sgid_attr; |
|---|
| 914 | 973 | struct ib_pd *pd; |
|---|
| 915 | 974 | int ret; |
|---|
| 916 | 975 | |
|---|
| 917 | | - pd = ah->pd; |
|---|
| 918 | | - ret = ah->device->destroy_ah(ah); |
|---|
| 919 | | - if (!ret) { |
|---|
| 920 | | - atomic_dec(&pd->usecnt); |
|---|
| 921 | | - if (sgid_attr) |
|---|
| 922 | | - rdma_put_gid_attr(sgid_attr); |
|---|
| 923 | | - } |
|---|
| 976 | + might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); |
|---|
| 924 | 977 | |
|---|
| 978 | + pd = ah->pd; |
|---|
| 979 | + |
|---|
| 980 | + ret = ah->device->ops.destroy_ah(ah, flags); |
|---|
| 981 | + if (ret) |
|---|
| 982 | + return ret; |
|---|
| 983 | + |
|---|
| 984 | + atomic_dec(&pd->usecnt); |
|---|
| 985 | + if (sgid_attr) |
|---|
| 986 | + rdma_put_gid_attr(sgid_attr); |
|---|
| 987 | + |
|---|
| 988 | + kfree(ah); |
|---|
| 925 | 989 | return ret; |
|---|
| 926 | 990 | } |
|---|
| 927 | | -EXPORT_SYMBOL(rdma_destroy_ah); |
|---|
| 991 | +EXPORT_SYMBOL(rdma_destroy_ah_user); |
|---|
| 928 | 992 | |
|---|
| 929 | 993 | /* Shared receive queues */ |
|---|
| 930 | 994 | |
|---|
| 931 | | -struct ib_srq *ib_create_srq(struct ib_pd *pd, |
|---|
| 932 | | - struct ib_srq_init_attr *srq_init_attr) |
|---|
| 995 | +/** |
|---|
| 996 | + * ib_create_srq_user - Creates a SRQ associated with the specified protection |
|---|
| 997 | + * domain. |
|---|
| 998 | + * @pd: The protection domain associated with the SRQ. |
|---|
| 999 | + * @srq_init_attr: A list of initial attributes required to create the |
|---|
| 1000 | + * SRQ. If SRQ creation succeeds, then the attributes are updated to |
|---|
| 1001 | + * the actual capabilities of the created SRQ. |
|---|
| 1002 | + * @uobject: uobject pointer if this is not a kernel SRQ |
|---|
| 1003 | + * @udata: udata pointer if this is not a kernel SRQ |
|---|
| 1004 | + * |
|---|
| 1005 | + * srq_attr->max_wr and srq_attr->max_sge are read the determine the |
|---|
| 1006 | + * requested size of the SRQ, and set to the actual values allocated |
|---|
| 1007 | + * on return. If ib_create_srq() succeeds, then max_wr and max_sge |
|---|
| 1008 | + * will always be at least as large as the requested values. |
|---|
| 1009 | + */ |
|---|
| 1010 | +struct ib_srq *ib_create_srq_user(struct ib_pd *pd, |
|---|
| 1011 | + struct ib_srq_init_attr *srq_init_attr, |
|---|
| 1012 | + struct ib_usrq_object *uobject, |
|---|
| 1013 | + struct ib_udata *udata) |
|---|
| 933 | 1014 | { |
|---|
| 934 | 1015 | struct ib_srq *srq; |
|---|
| 1016 | + int ret; |
|---|
| 935 | 1017 | |
|---|
| 936 | | - if (!pd->device->create_srq) |
|---|
| 937 | | - return ERR_PTR(-EOPNOTSUPP); |
|---|
| 1018 | + srq = rdma_zalloc_drv_obj(pd->device, ib_srq); |
|---|
| 1019 | + if (!srq) |
|---|
| 1020 | + return ERR_PTR(-ENOMEM); |
|---|
| 938 | 1021 | |
|---|
| 939 | | - srq = pd->device->create_srq(pd, srq_init_attr, NULL); |
|---|
| 1022 | + srq->device = pd->device; |
|---|
| 1023 | + srq->pd = pd; |
|---|
| 1024 | + srq->event_handler = srq_init_attr->event_handler; |
|---|
| 1025 | + srq->srq_context = srq_init_attr->srq_context; |
|---|
| 1026 | + srq->srq_type = srq_init_attr->srq_type; |
|---|
| 1027 | + srq->uobject = uobject; |
|---|
| 940 | 1028 | |
|---|
| 941 | | - if (!IS_ERR(srq)) { |
|---|
| 942 | | - srq->device = pd->device; |
|---|
| 943 | | - srq->pd = pd; |
|---|
| 944 | | - srq->uobject = NULL; |
|---|
| 945 | | - srq->event_handler = srq_init_attr->event_handler; |
|---|
| 946 | | - srq->srq_context = srq_init_attr->srq_context; |
|---|
| 947 | | - srq->srq_type = srq_init_attr->srq_type; |
|---|
| 948 | | - if (ib_srq_has_cq(srq->srq_type)) { |
|---|
| 949 | | - srq->ext.cq = srq_init_attr->ext.cq; |
|---|
| 950 | | - atomic_inc(&srq->ext.cq->usecnt); |
|---|
| 951 | | - } |
|---|
| 952 | | - if (srq->srq_type == IB_SRQT_XRC) { |
|---|
| 953 | | - srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; |
|---|
| 954 | | - atomic_inc(&srq->ext.xrc.xrcd->usecnt); |
|---|
| 955 | | - } |
|---|
| 956 | | - atomic_inc(&pd->usecnt); |
|---|
| 957 | | - atomic_set(&srq->usecnt, 0); |
|---|
| 1029 | + if (ib_srq_has_cq(srq->srq_type)) { |
|---|
| 1030 | + srq->ext.cq = srq_init_attr->ext.cq; |
|---|
| 1031 | + atomic_inc(&srq->ext.cq->usecnt); |
|---|
| 1032 | + } |
|---|
| 1033 | + if (srq->srq_type == IB_SRQT_XRC) { |
|---|
| 1034 | + srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; |
|---|
| 1035 | + atomic_inc(&srq->ext.xrc.xrcd->usecnt); |
|---|
| 1036 | + } |
|---|
| 1037 | + atomic_inc(&pd->usecnt); |
|---|
| 1038 | + |
|---|
| 1039 | + ret = pd->device->ops.create_srq(srq, srq_init_attr, udata); |
|---|
| 1040 | + if (ret) { |
|---|
| 1041 | + atomic_dec(&srq->pd->usecnt); |
|---|
| 1042 | + if (srq->srq_type == IB_SRQT_XRC) |
|---|
| 1043 | + atomic_dec(&srq->ext.xrc.xrcd->usecnt); |
|---|
| 1044 | + if (ib_srq_has_cq(srq->srq_type)) |
|---|
| 1045 | + atomic_dec(&srq->ext.cq->usecnt); |
|---|
| 1046 | + kfree(srq); |
|---|
| 1047 | + return ERR_PTR(ret); |
|---|
| 958 | 1048 | } |
|---|
| 959 | 1049 | |
|---|
| 960 | 1050 | return srq; |
|---|
| 961 | 1051 | } |
|---|
| 962 | | -EXPORT_SYMBOL(ib_create_srq); |
|---|
| 1052 | +EXPORT_SYMBOL(ib_create_srq_user); |
|---|
| 963 | 1053 | |
|---|
| 964 | 1054 | int ib_modify_srq(struct ib_srq *srq, |
|---|
| 965 | 1055 | struct ib_srq_attr *srq_attr, |
|---|
| 966 | 1056 | enum ib_srq_attr_mask srq_attr_mask) |
|---|
| 967 | 1057 | { |
|---|
| 968 | | - return srq->device->modify_srq ? |
|---|
| 969 | | - srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : |
|---|
| 970 | | - -EOPNOTSUPP; |
|---|
| 1058 | + return srq->device->ops.modify_srq ? |
|---|
| 1059 | + srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask, |
|---|
| 1060 | + NULL) : -EOPNOTSUPP; |
|---|
| 971 | 1061 | } |
|---|
| 972 | 1062 | EXPORT_SYMBOL(ib_modify_srq); |
|---|
| 973 | 1063 | |
|---|
| 974 | 1064 | int ib_query_srq(struct ib_srq *srq, |
|---|
| 975 | 1065 | struct ib_srq_attr *srq_attr) |
|---|
| 976 | 1066 | { |
|---|
| 977 | | - return srq->device->query_srq ? |
|---|
| 978 | | - srq->device->query_srq(srq, srq_attr) : -EOPNOTSUPP; |
|---|
| 1067 | + return srq->device->ops.query_srq ? |
|---|
| 1068 | + srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP; |
|---|
| 979 | 1069 | } |
|---|
| 980 | 1070 | EXPORT_SYMBOL(ib_query_srq); |
|---|
| 981 | 1071 | |
|---|
| 982 | | -int ib_destroy_srq(struct ib_srq *srq) |
|---|
| 1072 | +int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) |
|---|
| 983 | 1073 | { |
|---|
| 984 | | - struct ib_pd *pd; |
|---|
| 985 | | - enum ib_srq_type srq_type; |
|---|
| 986 | | - struct ib_xrcd *uninitialized_var(xrcd); |
|---|
| 987 | | - struct ib_cq *uninitialized_var(cq); |
|---|
| 988 | 1074 | int ret; |
|---|
| 989 | 1075 | |
|---|
| 990 | 1076 | if (atomic_read(&srq->usecnt)) |
|---|
| 991 | 1077 | return -EBUSY; |
|---|
| 992 | 1078 | |
|---|
| 993 | | - pd = srq->pd; |
|---|
| 994 | | - srq_type = srq->srq_type; |
|---|
| 995 | | - if (ib_srq_has_cq(srq_type)) |
|---|
| 996 | | - cq = srq->ext.cq; |
|---|
| 997 | | - if (srq_type == IB_SRQT_XRC) |
|---|
| 998 | | - xrcd = srq->ext.xrc.xrcd; |
|---|
| 1079 | + ret = srq->device->ops.destroy_srq(srq, udata); |
|---|
| 1080 | + if (ret) |
|---|
| 1081 | + return ret; |
|---|
| 999 | 1082 | |
|---|
| 1000 | | - ret = srq->device->destroy_srq(srq); |
|---|
| 1001 | | - if (!ret) { |
|---|
| 1002 | | - atomic_dec(&pd->usecnt); |
|---|
| 1003 | | - if (srq_type == IB_SRQT_XRC) |
|---|
| 1004 | | - atomic_dec(&xrcd->usecnt); |
|---|
| 1005 | | - if (ib_srq_has_cq(srq_type)) |
|---|
| 1006 | | - atomic_dec(&cq->usecnt); |
|---|
| 1007 | | - } |
|---|
| 1083 | + atomic_dec(&srq->pd->usecnt); |
|---|
| 1084 | + if (srq->srq_type == IB_SRQT_XRC) |
|---|
| 1085 | + atomic_dec(&srq->ext.xrc.xrcd->usecnt); |
|---|
| 1086 | + if (ib_srq_has_cq(srq->srq_type)) |
|---|
| 1087 | + atomic_dec(&srq->ext.cq->usecnt); |
|---|
| 1088 | + kfree(srq); |
|---|
| 1008 | 1089 | |
|---|
| 1009 | 1090 | return ret; |
|---|
| 1010 | 1091 | } |
|---|
| 1011 | | -EXPORT_SYMBOL(ib_destroy_srq); |
|---|
| 1092 | +EXPORT_SYMBOL(ib_destroy_srq_user); |
|---|
| 1012 | 1093 | |
|---|
| 1013 | 1094 | /* Queue pairs */ |
|---|
| 1014 | 1095 | |
|---|
| .. | .. |
|---|
| 1017 | 1098 | struct ib_qp *qp = context; |
|---|
| 1018 | 1099 | unsigned long flags; |
|---|
| 1019 | 1100 | |
|---|
| 1020 | | - spin_lock_irqsave(&qp->device->event_handler_lock, flags); |
|---|
| 1101 | + spin_lock_irqsave(&qp->device->qp_open_list_lock, flags); |
|---|
| 1021 | 1102 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) |
|---|
| 1022 | 1103 | if (event->element.qp->event_handler) |
|---|
| 1023 | 1104 | event->element.qp->event_handler(event, event->element.qp->qp_context); |
|---|
| 1024 | | - spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); |
|---|
| 1025 | | -} |
|---|
| 1026 | | - |
|---|
| 1027 | | -static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) |
|---|
| 1028 | | -{ |
|---|
| 1029 | | - mutex_lock(&xrcd->tgt_qp_mutex); |
|---|
| 1030 | | - list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); |
|---|
| 1031 | | - mutex_unlock(&xrcd->tgt_qp_mutex); |
|---|
| 1105 | + spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags); |
|---|
| 1032 | 1106 | } |
|---|
| 1033 | 1107 | |
|---|
| 1034 | 1108 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, |
|---|
| .. | .. |
|---|
| 1058 | 1132 | qp->qp_num = real_qp->qp_num; |
|---|
| 1059 | 1133 | qp->qp_type = real_qp->qp_type; |
|---|
| 1060 | 1134 | |
|---|
| 1061 | | - spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); |
|---|
| 1135 | + spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); |
|---|
| 1062 | 1136 | list_add(&qp->open_list, &real_qp->open_list); |
|---|
| 1063 | | - spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); |
|---|
| 1137 | + spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); |
|---|
| 1064 | 1138 | |
|---|
| 1065 | 1139 | return qp; |
|---|
| 1066 | 1140 | } |
|---|
| .. | .. |
|---|
| 1073 | 1147 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) |
|---|
| 1074 | 1148 | return ERR_PTR(-EINVAL); |
|---|
| 1075 | 1149 | |
|---|
| 1076 | | - qp = ERR_PTR(-EINVAL); |
|---|
| 1077 | | - mutex_lock(&xrcd->tgt_qp_mutex); |
|---|
| 1078 | | - list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { |
|---|
| 1079 | | - if (real_qp->qp_num == qp_open_attr->qp_num) { |
|---|
| 1080 | | - qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, |
|---|
| 1081 | | - qp_open_attr->qp_context); |
|---|
| 1082 | | - break; |
|---|
| 1083 | | - } |
|---|
| 1150 | + down_read(&xrcd->tgt_qps_rwsem); |
|---|
| 1151 | + real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num); |
|---|
| 1152 | + if (!real_qp) { |
|---|
| 1153 | + up_read(&xrcd->tgt_qps_rwsem); |
|---|
| 1154 | + return ERR_PTR(-EINVAL); |
|---|
| 1084 | 1155 | } |
|---|
| 1085 | | - mutex_unlock(&xrcd->tgt_qp_mutex); |
|---|
| 1156 | + qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, |
|---|
| 1157 | + qp_open_attr->qp_context); |
|---|
| 1158 | + up_read(&xrcd->tgt_qps_rwsem); |
|---|
| 1086 | 1159 | return qp; |
|---|
| 1087 | 1160 | } |
|---|
| 1088 | 1161 | EXPORT_SYMBOL(ib_open_qp); |
|---|
| 1089 | 1162 | |
|---|
| 1090 | | -static struct ib_qp *create_xrc_qp(struct ib_qp *qp, |
|---|
| 1091 | | - struct ib_qp_init_attr *qp_init_attr) |
|---|
| 1163 | +static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, |
|---|
| 1164 | + struct ib_qp_init_attr *qp_init_attr) |
|---|
| 1092 | 1165 | { |
|---|
| 1093 | 1166 | struct ib_qp *real_qp = qp; |
|---|
| 1167 | + int err; |
|---|
| 1094 | 1168 | |
|---|
| 1095 | 1169 | qp->event_handler = __ib_shared_qp_event_handler; |
|---|
| 1096 | 1170 | qp->qp_context = qp; |
|---|
| .. | .. |
|---|
| 1106 | 1180 | if (IS_ERR(qp)) |
|---|
| 1107 | 1181 | return qp; |
|---|
| 1108 | 1182 | |
|---|
| 1109 | | - __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); |
|---|
| 1183 | + err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num, |
|---|
| 1184 | + real_qp, GFP_KERNEL)); |
|---|
| 1185 | + if (err) { |
|---|
| 1186 | + ib_close_qp(qp); |
|---|
| 1187 | + return ERR_PTR(err); |
|---|
| 1188 | + } |
|---|
| 1110 | 1189 | return qp; |
|---|
| 1111 | 1190 | } |
|---|
| 1112 | 1191 | |
|---|
| 1192 | +/** |
|---|
| 1193 | + * ib_create_qp - Creates a kernel QP associated with the specified protection |
|---|
| 1194 | + * domain. |
|---|
| 1195 | + * @pd: The protection domain associated with the QP. |
|---|
| 1196 | + * @qp_init_attr: A list of initial attributes required to create the |
|---|
| 1197 | + * QP. If QP creation succeeds, then the attributes are updated to |
|---|
| 1198 | + * the actual capabilities of the created QP. |
|---|
| 1199 | + * |
|---|
| 1200 | + * NOTE: for user qp use ib_create_qp_user with valid udata! |
|---|
| 1201 | + */ |
|---|
| 1113 | 1202 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
|---|
| 1114 | 1203 | struct ib_qp_init_attr *qp_init_attr) |
|---|
| 1115 | 1204 | { |
|---|
| .. | .. |
|---|
| 1121 | 1210 | (qp_init_attr->recv_cq || |
|---|
| 1122 | 1211 | qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || |
|---|
| 1123 | 1212 | qp_init_attr->cap.max_recv_sge)) |
|---|
| 1213 | + return ERR_PTR(-EINVAL); |
|---|
| 1214 | + |
|---|
| 1215 | + if ((qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) && |
|---|
| 1216 | + !(device->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER)) |
|---|
| 1124 | 1217 | return ERR_PTR(-EINVAL); |
|---|
| 1125 | 1218 | |
|---|
| 1126 | 1219 | /* |
|---|
| .. | .. |
|---|
| 1140 | 1233 | if (ret) |
|---|
| 1141 | 1234 | goto err; |
|---|
| 1142 | 1235 | |
|---|
| 1143 | | - qp->real_qp = qp; |
|---|
| 1144 | | - qp->qp_type = qp_init_attr->qp_type; |
|---|
| 1145 | | - qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; |
|---|
| 1146 | | - |
|---|
| 1147 | | - atomic_set(&qp->usecnt, 0); |
|---|
| 1148 | | - qp->mrs_used = 0; |
|---|
| 1149 | | - spin_lock_init(&qp->mr_lock); |
|---|
| 1150 | | - INIT_LIST_HEAD(&qp->rdma_mrs); |
|---|
| 1151 | | - INIT_LIST_HEAD(&qp->sig_mrs); |
|---|
| 1152 | | - qp->port = 0; |
|---|
| 1153 | | - |
|---|
| 1154 | 1236 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { |
|---|
| 1155 | | - struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr); |
|---|
| 1237 | + struct ib_qp *xrc_qp = |
|---|
| 1238 | + create_xrc_qp_user(qp, qp_init_attr); |
|---|
| 1156 | 1239 | |
|---|
| 1157 | 1240 | if (IS_ERR(xrc_qp)) { |
|---|
| 1158 | 1241 | ret = PTR_ERR(xrc_qp); |
|---|
| .. | .. |
|---|
| 1198 | 1281 | qp->max_write_sge = qp_init_attr->cap.max_send_sge; |
|---|
| 1199 | 1282 | qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, |
|---|
| 1200 | 1283 | device->attrs.max_sge_rd); |
|---|
| 1284 | + if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) |
|---|
| 1285 | + qp->integrity_en = true; |
|---|
| 1201 | 1286 | |
|---|
| 1202 | 1287 | return qp; |
|---|
| 1203 | 1288 | |
|---|
| .. | .. |
|---|
| 1516 | 1601 | }; |
|---|
| 1517 | 1602 | |
|---|
| 1518 | 1603 | bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
|---|
| 1519 | | - enum ib_qp_type type, enum ib_qp_attr_mask mask, |
|---|
| 1520 | | - enum rdma_link_layer ll) |
|---|
| 1604 | + enum ib_qp_type type, enum ib_qp_attr_mask mask) |
|---|
| 1521 | 1605 | { |
|---|
| 1522 | 1606 | enum ib_qp_attr_mask req_param, opt_param; |
|---|
| 1523 | 1607 | |
|---|
| .. | .. |
|---|
| 1591 | 1675 | const struct ib_gid_attr *old_sgid_attr_alt_av; |
|---|
| 1592 | 1676 | int ret; |
|---|
| 1593 | 1677 | |
|---|
| 1678 | + attr->xmit_slave = NULL; |
|---|
| 1594 | 1679 | if (attr_mask & IB_QP_AV) { |
|---|
| 1595 | 1680 | ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr, |
|---|
| 1596 | 1681 | &old_sgid_attr_av); |
|---|
| 1597 | 1682 | if (ret) |
|---|
| 1598 | 1683 | return ret; |
|---|
| 1684 | + |
|---|
| 1685 | + if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && |
|---|
| 1686 | + is_qp_type_connected(qp)) { |
|---|
| 1687 | + struct net_device *slave; |
|---|
| 1688 | + |
|---|
| 1689 | + /* |
|---|
| 1690 | + * If the user provided the qp_attr then we have to |
|---|
| 1691 | + * resolve it. Kerne users have to provide already |
|---|
| 1692 | + * resolved rdma_ah_attr's. |
|---|
| 1693 | + */ |
|---|
| 1694 | + if (udata) { |
|---|
| 1695 | + ret = ib_resolve_eth_dmac(qp->device, |
|---|
| 1696 | + &attr->ah_attr); |
|---|
| 1697 | + if (ret) |
|---|
| 1698 | + goto out_av; |
|---|
| 1699 | + } |
|---|
| 1700 | + slave = rdma_lag_get_ah_roce_slave(qp->device, |
|---|
| 1701 | + &attr->ah_attr, |
|---|
| 1702 | + GFP_KERNEL); |
|---|
| 1703 | + if (IS_ERR(slave)) { |
|---|
| 1704 | + ret = PTR_ERR(slave); |
|---|
| 1705 | + goto out_av; |
|---|
| 1706 | + } |
|---|
| 1707 | + attr->xmit_slave = slave; |
|---|
| 1708 | + } |
|---|
| 1599 | 1709 | } |
|---|
| 1600 | 1710 | if (attr_mask & IB_QP_ALT_PATH) { |
|---|
| 1601 | 1711 | /* |
|---|
| .. | .. |
|---|
| 1622 | 1732 | } |
|---|
| 1623 | 1733 | } |
|---|
| 1624 | 1734 | |
|---|
| 1625 | | - /* |
|---|
| 1626 | | - * If the user provided the qp_attr then we have to resolve it. Kernel |
|---|
| 1627 | | - * users have to provide already resolved rdma_ah_attr's |
|---|
| 1628 | | - */ |
|---|
| 1629 | | - if (udata && (attr_mask & IB_QP_AV) && |
|---|
| 1630 | | - attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && |
|---|
| 1631 | | - is_qp_type_connected(qp)) { |
|---|
| 1632 | | - ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); |
|---|
| 1633 | | - if (ret) |
|---|
| 1634 | | - goto out; |
|---|
| 1635 | | - } |
|---|
| 1636 | | - |
|---|
| 1637 | 1735 | if (rdma_ib_or_roce(qp->device, port)) { |
|---|
| 1638 | 1736 | if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) { |
|---|
| 1639 | | - pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n", |
|---|
| 1640 | | - __func__, qp->device->name); |
|---|
| 1737 | + dev_warn(&qp->device->dev, |
|---|
| 1738 | + "%s rq_psn overflow, masking to 24 bits\n", |
|---|
| 1739 | + __func__); |
|---|
| 1641 | 1740 | attr->rq_psn &= 0xffffff; |
|---|
| 1642 | 1741 | } |
|---|
| 1643 | 1742 | |
|---|
| 1644 | 1743 | if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) { |
|---|
| 1645 | | - pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n", |
|---|
| 1646 | | - __func__, qp->device->name); |
|---|
| 1744 | + dev_warn(&qp->device->dev, |
|---|
| 1745 | + " %s sq_psn overflow, masking to 24 bits\n", |
|---|
| 1746 | + __func__); |
|---|
| 1647 | 1747 | attr->sq_psn &= 0xffffff; |
|---|
| 1648 | 1748 | } |
|---|
| 1649 | 1749 | } |
|---|
| 1750 | + |
|---|
| 1751 | + /* |
|---|
| 1752 | + * Bind this qp to a counter automatically based on the rdma counter |
|---|
| 1753 | + * rules. This only set in RST2INIT with port specified |
|---|
| 1754 | + */ |
|---|
| 1755 | + if (!qp->counter && (attr_mask & IB_QP_PORT) && |
|---|
| 1756 | + ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT)) |
|---|
| 1757 | + rdma_counter_bind_qp_auto(qp, attr->port_num); |
|---|
| 1650 | 1758 | |
|---|
| 1651 | 1759 | ret = ib_security_modify_qp(qp, attr, attr_mask, udata); |
|---|
| 1652 | 1760 | if (ret) |
|---|
| .. | .. |
|---|
| 1665 | 1773 | if (attr_mask & IB_QP_ALT_PATH) |
|---|
| 1666 | 1774 | rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av); |
|---|
| 1667 | 1775 | out_av: |
|---|
| 1668 | | - if (attr_mask & IB_QP_AV) |
|---|
| 1776 | + if (attr_mask & IB_QP_AV) { |
|---|
| 1777 | + rdma_lag_put_ah_roce_slave(attr->xmit_slave); |
|---|
| 1669 | 1778 | rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av); |
|---|
| 1779 | + } |
|---|
| 1670 | 1780 | return ret; |
|---|
| 1671 | 1781 | } |
|---|
| 1672 | 1782 | |
|---|
| .. | .. |
|---|
| 1688 | 1798 | } |
|---|
| 1689 | 1799 | EXPORT_SYMBOL(ib_modify_qp_with_udata); |
|---|
| 1690 | 1800 | |
|---|
| 1691 | | -int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) |
|---|
| 1801 | +int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width) |
|---|
| 1692 | 1802 | { |
|---|
| 1693 | 1803 | int rc; |
|---|
| 1694 | 1804 | u32 netdev_speed; |
|---|
| .. | .. |
|---|
| 1698 | 1808 | if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) |
|---|
| 1699 | 1809 | return -EINVAL; |
|---|
| 1700 | 1810 | |
|---|
| 1701 | | - if (!dev->get_netdev) |
|---|
| 1702 | | - return -EOPNOTSUPP; |
|---|
| 1703 | | - |
|---|
| 1704 | | - netdev = dev->get_netdev(dev, port_num); |
|---|
| 1811 | + netdev = ib_device_get_netdev(dev, port_num); |
|---|
| 1705 | 1812 | if (!netdev) |
|---|
| 1706 | 1813 | return -ENODEV; |
|---|
| 1707 | 1814 | |
|---|
| .. | .. |
|---|
| 1759 | 1866 | qp_attr->ah_attr.grh.sgid_attr = NULL; |
|---|
| 1760 | 1867 | qp_attr->alt_ah_attr.grh.sgid_attr = NULL; |
|---|
| 1761 | 1868 | |
|---|
| 1762 | | - return qp->device->query_qp ? |
|---|
| 1763 | | - qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : |
|---|
| 1764 | | - -EOPNOTSUPP; |
|---|
| 1869 | + return qp->device->ops.query_qp ? |
|---|
| 1870 | + qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask, |
|---|
| 1871 | + qp_init_attr) : -EOPNOTSUPP; |
|---|
| 1765 | 1872 | } |
|---|
| 1766 | 1873 | EXPORT_SYMBOL(ib_query_qp); |
|---|
| 1767 | 1874 | |
|---|
| .. | .. |
|---|
| 1774 | 1881 | if (real_qp == qp) |
|---|
| 1775 | 1882 | return -EINVAL; |
|---|
| 1776 | 1883 | |
|---|
| 1777 | | - spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); |
|---|
| 1884 | + spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); |
|---|
| 1778 | 1885 | list_del(&qp->open_list); |
|---|
| 1779 | | - spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); |
|---|
| 1886 | + spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); |
|---|
| 1780 | 1887 | |
|---|
| 1781 | 1888 | atomic_dec(&real_qp->usecnt); |
|---|
| 1782 | 1889 | if (qp->qp_sec) |
|---|
| .. | .. |
|---|
| 1795 | 1902 | |
|---|
| 1796 | 1903 | real_qp = qp->real_qp; |
|---|
| 1797 | 1904 | xrcd = real_qp->xrcd; |
|---|
| 1798 | | - |
|---|
| 1799 | | - mutex_lock(&xrcd->tgt_qp_mutex); |
|---|
| 1905 | + down_write(&xrcd->tgt_qps_rwsem); |
|---|
| 1800 | 1906 | ib_close_qp(qp); |
|---|
| 1801 | 1907 | if (atomic_read(&real_qp->usecnt) == 0) |
|---|
| 1802 | | - list_del(&real_qp->xrcd_list); |
|---|
| 1908 | + xa_erase(&xrcd->tgt_qps, real_qp->qp_num); |
|---|
| 1803 | 1909 | else |
|---|
| 1804 | 1910 | real_qp = NULL; |
|---|
| 1805 | | - mutex_unlock(&xrcd->tgt_qp_mutex); |
|---|
| 1911 | + up_write(&xrcd->tgt_qps_rwsem); |
|---|
| 1806 | 1912 | |
|---|
| 1807 | 1913 | if (real_qp) { |
|---|
| 1808 | 1914 | ret = ib_destroy_qp(real_qp); |
|---|
| 1809 | 1915 | if (!ret) |
|---|
| 1810 | 1916 | atomic_dec(&xrcd->usecnt); |
|---|
| 1811 | | - else |
|---|
| 1812 | | - __ib_insert_xrcd_qp(xrcd, real_qp); |
|---|
| 1813 | 1917 | } |
|---|
| 1814 | 1918 | |
|---|
| 1815 | 1919 | return 0; |
|---|
| 1816 | 1920 | } |
|---|
| 1817 | 1921 | |
|---|
| 1818 | | -int ib_destroy_qp(struct ib_qp *qp) |
|---|
| 1922 | +int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata) |
|---|
| 1819 | 1923 | { |
|---|
| 1820 | 1924 | const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; |
|---|
| 1821 | 1925 | const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; |
|---|
| .. | .. |
|---|
| 1846 | 1950 | if (!qp->uobject) |
|---|
| 1847 | 1951 | rdma_rw_cleanup_mrs(qp); |
|---|
| 1848 | 1952 | |
|---|
| 1953 | + rdma_counter_unbind_qp(qp, true); |
|---|
| 1849 | 1954 | rdma_restrack_del(&qp->res); |
|---|
| 1850 | | - ret = qp->device->destroy_qp(qp); |
|---|
| 1955 | + ret = qp->device->ops.destroy_qp(qp, udata); |
|---|
| 1851 | 1956 | if (!ret) { |
|---|
| 1852 | 1957 | if (alt_path_sgid_attr) |
|---|
| 1853 | 1958 | rdma_put_gid_attr(alt_path_sgid_attr); |
|---|
| .. | .. |
|---|
| 1872 | 1977 | |
|---|
| 1873 | 1978 | return ret; |
|---|
| 1874 | 1979 | } |
|---|
| 1875 | | -EXPORT_SYMBOL(ib_destroy_qp); |
|---|
| 1980 | +EXPORT_SYMBOL(ib_destroy_qp_user); |
|---|
| 1876 | 1981 | |
|---|
| 1877 | 1982 | /* Completion queues */ |
|---|
| 1878 | 1983 | |
|---|
| .. | .. |
|---|
| 1884 | 1989 | const char *caller) |
|---|
| 1885 | 1990 | { |
|---|
| 1886 | 1991 | struct ib_cq *cq; |
|---|
| 1992 | + int ret; |
|---|
| 1887 | 1993 | |
|---|
| 1888 | | - cq = device->create_cq(device, cq_attr, NULL, NULL); |
|---|
| 1994 | + cq = rdma_zalloc_drv_obj(device, ib_cq); |
|---|
| 1995 | + if (!cq) |
|---|
| 1996 | + return ERR_PTR(-ENOMEM); |
|---|
| 1889 | 1997 | |
|---|
| 1890 | | - if (!IS_ERR(cq)) { |
|---|
| 1891 | | - cq->device = device; |
|---|
| 1892 | | - cq->uobject = NULL; |
|---|
| 1893 | | - cq->comp_handler = comp_handler; |
|---|
| 1894 | | - cq->event_handler = event_handler; |
|---|
| 1895 | | - cq->cq_context = cq_context; |
|---|
| 1896 | | - atomic_set(&cq->usecnt, 0); |
|---|
| 1897 | | - cq->res.type = RDMA_RESTRACK_CQ; |
|---|
| 1898 | | - cq->res.kern_name = caller; |
|---|
| 1899 | | - rdma_restrack_add(&cq->res); |
|---|
| 1998 | + cq->device = device; |
|---|
| 1999 | + cq->uobject = NULL; |
|---|
| 2000 | + cq->comp_handler = comp_handler; |
|---|
| 2001 | + cq->event_handler = event_handler; |
|---|
| 2002 | + cq->cq_context = cq_context; |
|---|
| 2003 | + atomic_set(&cq->usecnt, 0); |
|---|
| 2004 | + |
|---|
| 2005 | + rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); |
|---|
| 2006 | + rdma_restrack_set_name(&cq->res, caller); |
|---|
| 2007 | + |
|---|
| 2008 | + ret = device->ops.create_cq(cq, cq_attr, NULL); |
|---|
| 2009 | + if (ret) { |
|---|
| 2010 | + rdma_restrack_put(&cq->res); |
|---|
| 2011 | + kfree(cq); |
|---|
| 2012 | + return ERR_PTR(ret); |
|---|
| 1900 | 2013 | } |
|---|
| 1901 | 2014 | |
|---|
| 2015 | + rdma_restrack_add(&cq->res); |
|---|
| 1902 | 2016 | return cq; |
|---|
| 1903 | 2017 | } |
|---|
| 1904 | 2018 | EXPORT_SYMBOL(__ib_create_cq); |
|---|
| 1905 | 2019 | |
|---|
| 1906 | 2020 | int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) |
|---|
| 1907 | 2021 | { |
|---|
| 1908 | | - return cq->device->modify_cq ? |
|---|
| 1909 | | - cq->device->modify_cq(cq, cq_count, cq_period) : -EOPNOTSUPP; |
|---|
| 2022 | + if (cq->shared) |
|---|
| 2023 | + return -EOPNOTSUPP; |
|---|
| 2024 | + |
|---|
| 2025 | + return cq->device->ops.modify_cq ? |
|---|
| 2026 | + cq->device->ops.modify_cq(cq, cq_count, |
|---|
| 2027 | + cq_period) : -EOPNOTSUPP; |
|---|
| 1910 | 2028 | } |
|---|
| 1911 | 2029 | EXPORT_SYMBOL(rdma_set_cq_moderation); |
|---|
| 1912 | 2030 | |
|---|
| 1913 | | -int ib_destroy_cq(struct ib_cq *cq) |
|---|
| 2031 | +int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata) |
|---|
| 1914 | 2032 | { |
|---|
| 2033 | + int ret; |
|---|
| 2034 | + |
|---|
| 2035 | + if (WARN_ON_ONCE(cq->shared)) |
|---|
| 2036 | + return -EOPNOTSUPP; |
|---|
| 2037 | + |
|---|
| 1915 | 2038 | if (atomic_read(&cq->usecnt)) |
|---|
| 1916 | 2039 | return -EBUSY; |
|---|
| 1917 | 2040 | |
|---|
| 2041 | + ret = cq->device->ops.destroy_cq(cq, udata); |
|---|
| 2042 | + if (ret) |
|---|
| 2043 | + return ret; |
|---|
| 2044 | + |
|---|
| 1918 | 2045 | rdma_restrack_del(&cq->res); |
|---|
| 1919 | | - return cq->device->destroy_cq(cq); |
|---|
| 2046 | + kfree(cq); |
|---|
| 2047 | + return ret; |
|---|
| 1920 | 2048 | } |
|---|
| 1921 | | -EXPORT_SYMBOL(ib_destroy_cq); |
|---|
| 2049 | +EXPORT_SYMBOL(ib_destroy_cq_user); |
|---|
| 1922 | 2050 | |
|---|
| 1923 | 2051 | int ib_resize_cq(struct ib_cq *cq, int cqe) |
|---|
| 1924 | 2052 | { |
|---|
| 1925 | | - return cq->device->resize_cq ? |
|---|
| 1926 | | - cq->device->resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; |
|---|
| 2053 | + if (cq->shared) |
|---|
| 2054 | + return -EOPNOTSUPP; |
|---|
| 2055 | + |
|---|
| 2056 | + return cq->device->ops.resize_cq ? |
|---|
| 2057 | + cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; |
|---|
| 1927 | 2058 | } |
|---|
| 1928 | 2059 | EXPORT_SYMBOL(ib_resize_cq); |
|---|
| 1929 | 2060 | |
|---|
| 1930 | 2061 | /* Memory regions */ |
|---|
| 1931 | 2062 | |
|---|
| 1932 | | -int ib_dereg_mr(struct ib_mr *mr) |
|---|
| 2063 | +struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
|---|
| 2064 | + u64 virt_addr, int access_flags) |
|---|
| 2065 | +{ |
|---|
| 2066 | + struct ib_mr *mr; |
|---|
| 2067 | + |
|---|
| 2068 | + if (access_flags & IB_ACCESS_ON_DEMAND) { |
|---|
| 2069 | + if (!(pd->device->attrs.device_cap_flags & |
|---|
| 2070 | + IB_DEVICE_ON_DEMAND_PAGING)) { |
|---|
| 2071 | + pr_debug("ODP support not available\n"); |
|---|
| 2072 | + return ERR_PTR(-EINVAL); |
|---|
| 2073 | + } |
|---|
| 2074 | + } |
|---|
| 2075 | + |
|---|
| 2076 | + mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr, |
|---|
| 2077 | + access_flags, NULL); |
|---|
| 2078 | + |
|---|
| 2079 | + if (IS_ERR(mr)) |
|---|
| 2080 | + return mr; |
|---|
| 2081 | + |
|---|
| 2082 | + mr->device = pd->device; |
|---|
| 2083 | + mr->type = IB_MR_TYPE_USER; |
|---|
| 2084 | + mr->pd = pd; |
|---|
| 2085 | + mr->dm = NULL; |
|---|
| 2086 | + atomic_inc(&pd->usecnt); |
|---|
| 2087 | + mr->iova = virt_addr; |
|---|
| 2088 | + mr->length = length; |
|---|
| 2089 | + |
|---|
| 2090 | + rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); |
|---|
| 2091 | + rdma_restrack_parent_name(&mr->res, &pd->res); |
|---|
| 2092 | + rdma_restrack_add(&mr->res); |
|---|
| 2093 | + |
|---|
| 2094 | + return mr; |
|---|
| 2095 | +} |
|---|
| 2096 | +EXPORT_SYMBOL(ib_reg_user_mr); |
|---|
| 2097 | + |
|---|
| 2098 | +int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, |
|---|
| 2099 | + u32 flags, struct ib_sge *sg_list, u32 num_sge) |
|---|
| 2100 | +{ |
|---|
| 2101 | + if (!pd->device->ops.advise_mr) |
|---|
| 2102 | + return -EOPNOTSUPP; |
|---|
| 2103 | + |
|---|
| 2104 | + if (!num_sge) |
|---|
| 2105 | + return 0; |
|---|
| 2106 | + |
|---|
| 2107 | + return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge, |
|---|
| 2108 | + NULL); |
|---|
| 2109 | +} |
|---|
| 2110 | +EXPORT_SYMBOL(ib_advise_mr); |
|---|
| 2111 | + |
|---|
| 2112 | +int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata) |
|---|
| 1933 | 2113 | { |
|---|
| 1934 | 2114 | struct ib_pd *pd = mr->pd; |
|---|
| 1935 | 2115 | struct ib_dm *dm = mr->dm; |
|---|
| 2116 | + struct ib_sig_attrs *sig_attrs = mr->sig_attrs; |
|---|
| 1936 | 2117 | int ret; |
|---|
| 1937 | 2118 | |
|---|
| 2119 | + trace_mr_dereg(mr); |
|---|
| 1938 | 2120 | rdma_restrack_del(&mr->res); |
|---|
| 1939 | | - ret = mr->device->dereg_mr(mr); |
|---|
| 2121 | + ret = mr->device->ops.dereg_mr(mr, udata); |
|---|
| 1940 | 2122 | if (!ret) { |
|---|
| 1941 | 2123 | atomic_dec(&pd->usecnt); |
|---|
| 1942 | 2124 | if (dm) |
|---|
| 1943 | 2125 | atomic_dec(&dm->usecnt); |
|---|
| 2126 | + kfree(sig_attrs); |
|---|
| 1944 | 2127 | } |
|---|
| 1945 | 2128 | |
|---|
| 1946 | 2129 | return ret; |
|---|
| 1947 | 2130 | } |
|---|
| 1948 | | -EXPORT_SYMBOL(ib_dereg_mr); |
|---|
| 2131 | +EXPORT_SYMBOL(ib_dereg_mr_user); |
|---|
| 1949 | 2132 | |
|---|
| 1950 | 2133 | /** |
|---|
| 1951 | 2134 | * ib_alloc_mr() - Allocates a memory region |
|---|
| .. | .. |
|---|
| 1959 | 2142 | * max_num_sg * used_page_size. |
|---|
| 1960 | 2143 | * |
|---|
| 1961 | 2144 | */ |
|---|
| 1962 | | -struct ib_mr *ib_alloc_mr(struct ib_pd *pd, |
|---|
| 1963 | | - enum ib_mr_type mr_type, |
|---|
| 2145 | +struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
|---|
| 1964 | 2146 | u32 max_num_sg) |
|---|
| 1965 | 2147 | { |
|---|
| 1966 | 2148 | struct ib_mr *mr; |
|---|
| 1967 | 2149 | |
|---|
| 1968 | | - if (!pd->device->alloc_mr) |
|---|
| 1969 | | - return ERR_PTR(-EOPNOTSUPP); |
|---|
| 1970 | | - |
|---|
| 1971 | | - mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); |
|---|
| 1972 | | - if (!IS_ERR(mr)) { |
|---|
| 1973 | | - mr->device = pd->device; |
|---|
| 1974 | | - mr->pd = pd; |
|---|
| 1975 | | - mr->dm = NULL; |
|---|
| 1976 | | - mr->uobject = NULL; |
|---|
| 1977 | | - atomic_inc(&pd->usecnt); |
|---|
| 1978 | | - mr->need_inval = false; |
|---|
| 1979 | | - mr->res.type = RDMA_RESTRACK_MR; |
|---|
| 1980 | | - rdma_restrack_add(&mr->res); |
|---|
| 2150 | + if (!pd->device->ops.alloc_mr) { |
|---|
| 2151 | + mr = ERR_PTR(-EOPNOTSUPP); |
|---|
| 2152 | + goto out; |
|---|
| 1981 | 2153 | } |
|---|
| 1982 | 2154 | |
|---|
| 2155 | + if (mr_type == IB_MR_TYPE_INTEGRITY) { |
|---|
| 2156 | + WARN_ON_ONCE(1); |
|---|
| 2157 | + mr = ERR_PTR(-EINVAL); |
|---|
| 2158 | + goto out; |
|---|
| 2159 | + } |
|---|
| 2160 | + |
|---|
| 2161 | + mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); |
|---|
| 2162 | + if (IS_ERR(mr)) |
|---|
| 2163 | + goto out; |
|---|
| 2164 | + |
|---|
| 2165 | + mr->device = pd->device; |
|---|
| 2166 | + mr->pd = pd; |
|---|
| 2167 | + mr->dm = NULL; |
|---|
| 2168 | + mr->uobject = NULL; |
|---|
| 2169 | + atomic_inc(&pd->usecnt); |
|---|
| 2170 | + mr->need_inval = false; |
|---|
| 2171 | + mr->type = mr_type; |
|---|
| 2172 | + mr->sig_attrs = NULL; |
|---|
| 2173 | + |
|---|
| 2174 | + rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); |
|---|
| 2175 | + rdma_restrack_parent_name(&mr->res, &pd->res); |
|---|
| 2176 | + rdma_restrack_add(&mr->res); |
|---|
| 2177 | +out: |
|---|
| 2178 | + trace_mr_alloc(pd, mr_type, max_num_sg, mr); |
|---|
| 1983 | 2179 | return mr; |
|---|
| 1984 | 2180 | } |
|---|
| 1985 | 2181 | EXPORT_SYMBOL(ib_alloc_mr); |
|---|
| 1986 | 2182 | |
|---|
| 1987 | | -/* "Fast" memory regions */ |
|---|
| 1988 | | - |
|---|
| 1989 | | -struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, |
|---|
| 1990 | | - int mr_access_flags, |
|---|
| 1991 | | - struct ib_fmr_attr *fmr_attr) |
|---|
| 2183 | +/** |
|---|
| 2184 | + * ib_alloc_mr_integrity() - Allocates an integrity memory region |
|---|
| 2185 | + * @pd: protection domain associated with the region |
|---|
| 2186 | + * @max_num_data_sg: maximum data sg entries available for registration |
|---|
| 2187 | + * @max_num_meta_sg: maximum metadata sg entries available for |
|---|
| 2188 | + * registration |
|---|
| 2189 | + * |
|---|
| 2190 | + * Notes: |
|---|
| 2191 | + * Memory registration page/sg lists must not exceed max_num_sg, |
|---|
| 2192 | + * also the integrity page/sg lists must not exceed max_num_meta_sg. |
|---|
| 2193 | + * |
|---|
| 2194 | + */ |
|---|
| 2195 | +struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, |
|---|
| 2196 | + u32 max_num_data_sg, |
|---|
| 2197 | + u32 max_num_meta_sg) |
|---|
| 1992 | 2198 | { |
|---|
| 1993 | | - struct ib_fmr *fmr; |
|---|
| 2199 | + struct ib_mr *mr; |
|---|
| 2200 | + struct ib_sig_attrs *sig_attrs; |
|---|
| 1994 | 2201 | |
|---|
| 1995 | | - if (!pd->device->alloc_fmr) |
|---|
| 1996 | | - return ERR_PTR(-EOPNOTSUPP); |
|---|
| 1997 | | - |
|---|
| 1998 | | - fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); |
|---|
| 1999 | | - if (!IS_ERR(fmr)) { |
|---|
| 2000 | | - fmr->device = pd->device; |
|---|
| 2001 | | - fmr->pd = pd; |
|---|
| 2002 | | - atomic_inc(&pd->usecnt); |
|---|
| 2202 | + if (!pd->device->ops.alloc_mr_integrity || |
|---|
| 2203 | + !pd->device->ops.map_mr_sg_pi) { |
|---|
| 2204 | + mr = ERR_PTR(-EOPNOTSUPP); |
|---|
| 2205 | + goto out; |
|---|
| 2003 | 2206 | } |
|---|
| 2004 | 2207 | |
|---|
| 2005 | | - return fmr; |
|---|
| 2208 | + if (!max_num_meta_sg) { |
|---|
| 2209 | + mr = ERR_PTR(-EINVAL); |
|---|
| 2210 | + goto out; |
|---|
| 2211 | + } |
|---|
| 2212 | + |
|---|
| 2213 | + sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL); |
|---|
| 2214 | + if (!sig_attrs) { |
|---|
| 2215 | + mr = ERR_PTR(-ENOMEM); |
|---|
| 2216 | + goto out; |
|---|
| 2217 | + } |
|---|
| 2218 | + |
|---|
| 2219 | + mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg, |
|---|
| 2220 | + max_num_meta_sg); |
|---|
| 2221 | + if (IS_ERR(mr)) { |
|---|
| 2222 | + kfree(sig_attrs); |
|---|
| 2223 | + goto out; |
|---|
| 2224 | + } |
|---|
| 2225 | + |
|---|
| 2226 | + mr->device = pd->device; |
|---|
| 2227 | + mr->pd = pd; |
|---|
| 2228 | + mr->dm = NULL; |
|---|
| 2229 | + mr->uobject = NULL; |
|---|
| 2230 | + atomic_inc(&pd->usecnt); |
|---|
| 2231 | + mr->need_inval = false; |
|---|
| 2232 | + mr->type = IB_MR_TYPE_INTEGRITY; |
|---|
| 2233 | + mr->sig_attrs = sig_attrs; |
|---|
| 2234 | + |
|---|
| 2235 | + rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); |
|---|
| 2236 | + rdma_restrack_parent_name(&mr->res, &pd->res); |
|---|
| 2237 | + rdma_restrack_add(&mr->res); |
|---|
| 2238 | +out: |
|---|
| 2239 | + trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr); |
|---|
| 2240 | + return mr; |
|---|
| 2006 | 2241 | } |
|---|
| 2007 | | -EXPORT_SYMBOL(ib_alloc_fmr); |
|---|
| 2008 | | - |
|---|
| 2009 | | -int ib_unmap_fmr(struct list_head *fmr_list) |
|---|
| 2010 | | -{ |
|---|
| 2011 | | - struct ib_fmr *fmr; |
|---|
| 2012 | | - |
|---|
| 2013 | | - if (list_empty(fmr_list)) |
|---|
| 2014 | | - return 0; |
|---|
| 2015 | | - |
|---|
| 2016 | | - fmr = list_entry(fmr_list->next, struct ib_fmr, list); |
|---|
| 2017 | | - return fmr->device->unmap_fmr(fmr_list); |
|---|
| 2018 | | -} |
|---|
| 2019 | | -EXPORT_SYMBOL(ib_unmap_fmr); |
|---|
| 2020 | | - |
|---|
| 2021 | | -int ib_dealloc_fmr(struct ib_fmr *fmr) |
|---|
| 2022 | | -{ |
|---|
| 2023 | | - struct ib_pd *pd; |
|---|
| 2024 | | - int ret; |
|---|
| 2025 | | - |
|---|
| 2026 | | - pd = fmr->pd; |
|---|
| 2027 | | - ret = fmr->device->dealloc_fmr(fmr); |
|---|
| 2028 | | - if (!ret) |
|---|
| 2029 | | - atomic_dec(&pd->usecnt); |
|---|
| 2030 | | - |
|---|
| 2031 | | - return ret; |
|---|
| 2032 | | -} |
|---|
| 2033 | | -EXPORT_SYMBOL(ib_dealloc_fmr); |
|---|
| 2242 | +EXPORT_SYMBOL(ib_alloc_mr_integrity); |
|---|
| 2034 | 2243 | |
|---|
| 2035 | 2244 | /* Multicast groups */ |
|---|
| 2036 | 2245 | |
|---|
| .. | .. |
|---|
| 2076 | 2285 | { |
|---|
| 2077 | 2286 | int ret; |
|---|
| 2078 | 2287 | |
|---|
| 2079 | | - if (!qp->device->attach_mcast) |
|---|
| 2288 | + if (!qp->device->ops.attach_mcast) |
|---|
| 2080 | 2289 | return -EOPNOTSUPP; |
|---|
| 2081 | 2290 | |
|---|
| 2082 | 2291 | if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || |
|---|
| 2083 | 2292 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) |
|---|
| 2084 | 2293 | return -EINVAL; |
|---|
| 2085 | 2294 | |
|---|
| 2086 | | - ret = qp->device->attach_mcast(qp, gid, lid); |
|---|
| 2295 | + ret = qp->device->ops.attach_mcast(qp, gid, lid); |
|---|
| 2087 | 2296 | if (!ret) |
|---|
| 2088 | 2297 | atomic_inc(&qp->usecnt); |
|---|
| 2089 | 2298 | return ret; |
|---|
| .. | .. |
|---|
| 2094 | 2303 | { |
|---|
| 2095 | 2304 | int ret; |
|---|
| 2096 | 2305 | |
|---|
| 2097 | | - if (!qp->device->detach_mcast) |
|---|
| 2306 | + if (!qp->device->ops.detach_mcast) |
|---|
| 2098 | 2307 | return -EOPNOTSUPP; |
|---|
| 2099 | 2308 | |
|---|
| 2100 | 2309 | if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || |
|---|
| 2101 | 2310 | qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) |
|---|
| 2102 | 2311 | return -EINVAL; |
|---|
| 2103 | 2312 | |
|---|
| 2104 | | - ret = qp->device->detach_mcast(qp, gid, lid); |
|---|
| 2313 | + ret = qp->device->ops.detach_mcast(qp, gid, lid); |
|---|
| 2105 | 2314 | if (!ret) |
|---|
| 2106 | 2315 | atomic_dec(&qp->usecnt); |
|---|
| 2107 | 2316 | return ret; |
|---|
| 2108 | 2317 | } |
|---|
| 2109 | 2318 | EXPORT_SYMBOL(ib_detach_mcast); |
|---|
| 2110 | 2319 | |
|---|
| 2111 | | -struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) |
|---|
| 2320 | +/** |
|---|
| 2321 | + * ib_alloc_xrcd_user - Allocates an XRC domain. |
|---|
| 2322 | + * @device: The device on which to allocate the XRC domain. |
|---|
| 2323 | + * @inode: inode to connect XRCD |
|---|
| 2324 | + * @udata: Valid user data or NULL for kernel object |
|---|
| 2325 | + */ |
|---|
| 2326 | +struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, |
|---|
| 2327 | + struct inode *inode, struct ib_udata *udata) |
|---|
| 2112 | 2328 | { |
|---|
| 2113 | 2329 | struct ib_xrcd *xrcd; |
|---|
| 2330 | + int ret; |
|---|
| 2114 | 2331 | |
|---|
| 2115 | | - if (!device->alloc_xrcd) |
|---|
| 2332 | + if (!device->ops.alloc_xrcd) |
|---|
| 2116 | 2333 | return ERR_PTR(-EOPNOTSUPP); |
|---|
| 2117 | 2334 | |
|---|
| 2118 | | - xrcd = device->alloc_xrcd(device, NULL, NULL); |
|---|
| 2119 | | - if (!IS_ERR(xrcd)) { |
|---|
| 2120 | | - xrcd->device = device; |
|---|
| 2121 | | - xrcd->inode = NULL; |
|---|
| 2122 | | - atomic_set(&xrcd->usecnt, 0); |
|---|
| 2123 | | - mutex_init(&xrcd->tgt_qp_mutex); |
|---|
| 2124 | | - INIT_LIST_HEAD(&xrcd->tgt_qp_list); |
|---|
| 2125 | | - } |
|---|
| 2335 | + xrcd = rdma_zalloc_drv_obj(device, ib_xrcd); |
|---|
| 2336 | + if (!xrcd) |
|---|
| 2337 | + return ERR_PTR(-ENOMEM); |
|---|
| 2126 | 2338 | |
|---|
| 2339 | + xrcd->device = device; |
|---|
| 2340 | + xrcd->inode = inode; |
|---|
| 2341 | + atomic_set(&xrcd->usecnt, 0); |
|---|
| 2342 | + init_rwsem(&xrcd->tgt_qps_rwsem); |
|---|
| 2343 | + xa_init(&xrcd->tgt_qps); |
|---|
| 2344 | + |
|---|
| 2345 | + ret = device->ops.alloc_xrcd(xrcd, udata); |
|---|
| 2346 | + if (ret) |
|---|
| 2347 | + goto err; |
|---|
| 2127 | 2348 | return xrcd; |
|---|
| 2349 | +err: |
|---|
| 2350 | + kfree(xrcd); |
|---|
| 2351 | + return ERR_PTR(ret); |
|---|
| 2128 | 2352 | } |
|---|
| 2129 | | -EXPORT_SYMBOL(__ib_alloc_xrcd); |
|---|
| 2353 | +EXPORT_SYMBOL(ib_alloc_xrcd_user); |
|---|
| 2130 | 2354 | |
|---|
| 2131 | | -int ib_dealloc_xrcd(struct ib_xrcd *xrcd) |
|---|
| 2355 | +/** |
|---|
| 2356 | + * ib_dealloc_xrcd_user - Deallocates an XRC domain. |
|---|
| 2357 | + * @xrcd: The XRC domain to deallocate. |
|---|
| 2358 | + * @udata: Valid user data or NULL for kernel object |
|---|
| 2359 | + */ |
|---|
| 2360 | +int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) |
|---|
| 2132 | 2361 | { |
|---|
| 2133 | | - struct ib_qp *qp; |
|---|
| 2134 | 2362 | int ret; |
|---|
| 2135 | 2363 | |
|---|
| 2136 | 2364 | if (atomic_read(&xrcd->usecnt)) |
|---|
| 2137 | 2365 | return -EBUSY; |
|---|
| 2138 | 2366 | |
|---|
| 2139 | | - while (!list_empty(&xrcd->tgt_qp_list)) { |
|---|
| 2140 | | - qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); |
|---|
| 2141 | | - ret = ib_destroy_qp(qp); |
|---|
| 2142 | | - if (ret) |
|---|
| 2143 | | - return ret; |
|---|
| 2144 | | - } |
|---|
| 2145 | | - |
|---|
| 2146 | | - return xrcd->device->dealloc_xrcd(xrcd); |
|---|
| 2367 | + WARN_ON(!xa_empty(&xrcd->tgt_qps)); |
|---|
| 2368 | + ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata); |
|---|
| 2369 | + if (ret) |
|---|
| 2370 | + return ret; |
|---|
| 2371 | + kfree(xrcd); |
|---|
| 2372 | + return ret; |
|---|
| 2147 | 2373 | } |
|---|
| 2148 | | -EXPORT_SYMBOL(ib_dealloc_xrcd); |
|---|
| 2374 | +EXPORT_SYMBOL(ib_dealloc_xrcd_user); |
|---|
| 2149 | 2375 | |
|---|
| 2150 | 2376 | /** |
|---|
| 2151 | 2377 | * ib_create_wq - Creates a WQ associated with the specified protection |
|---|
| .. | .. |
|---|
| 2166 | 2392 | { |
|---|
| 2167 | 2393 | struct ib_wq *wq; |
|---|
| 2168 | 2394 | |
|---|
| 2169 | | - if (!pd->device->create_wq) |
|---|
| 2395 | + if (!pd->device->ops.create_wq) |
|---|
| 2170 | 2396 | return ERR_PTR(-EOPNOTSUPP); |
|---|
| 2171 | 2397 | |
|---|
| 2172 | | - wq = pd->device->create_wq(pd, wq_attr, NULL); |
|---|
| 2398 | + wq = pd->device->ops.create_wq(pd, wq_attr, NULL); |
|---|
| 2173 | 2399 | if (!IS_ERR(wq)) { |
|---|
| 2174 | 2400 | wq->event_handler = wq_attr->event_handler; |
|---|
| 2175 | 2401 | wq->wq_context = wq_attr->wq_context; |
|---|
| .. | .. |
|---|
| 2187 | 2413 | EXPORT_SYMBOL(ib_create_wq); |
|---|
| 2188 | 2414 | |
|---|
| 2189 | 2415 | /** |
|---|
| 2190 | | - * ib_destroy_wq - Destroys the specified WQ. |
|---|
| 2416 | + * ib_destroy_wq_user - Destroys the specified user WQ. |
|---|
| 2191 | 2417 | * @wq: The WQ to destroy. |
|---|
| 2418 | + * @udata: Valid user data |
|---|
| 2192 | 2419 | */ |
|---|
| 2193 | | -int ib_destroy_wq(struct ib_wq *wq) |
|---|
| 2420 | +int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata) |
|---|
| 2194 | 2421 | { |
|---|
| 2195 | | - int err; |
|---|
| 2196 | 2422 | struct ib_cq *cq = wq->cq; |
|---|
| 2197 | 2423 | struct ib_pd *pd = wq->pd; |
|---|
| 2424 | + int ret; |
|---|
| 2198 | 2425 | |
|---|
| 2199 | 2426 | if (atomic_read(&wq->usecnt)) |
|---|
| 2200 | 2427 | return -EBUSY; |
|---|
| 2201 | 2428 | |
|---|
| 2202 | | - err = wq->device->destroy_wq(wq); |
|---|
| 2203 | | - if (!err) { |
|---|
| 2204 | | - atomic_dec(&pd->usecnt); |
|---|
| 2205 | | - atomic_dec(&cq->usecnt); |
|---|
| 2206 | | - } |
|---|
| 2207 | | - return err; |
|---|
| 2429 | + ret = wq->device->ops.destroy_wq(wq, udata); |
|---|
| 2430 | + if (ret) |
|---|
| 2431 | + return ret; |
|---|
| 2432 | + |
|---|
| 2433 | + atomic_dec(&pd->usecnt); |
|---|
| 2434 | + atomic_dec(&cq->usecnt); |
|---|
| 2435 | + return ret; |
|---|
| 2208 | 2436 | } |
|---|
| 2209 | | -EXPORT_SYMBOL(ib_destroy_wq); |
|---|
| 2437 | +EXPORT_SYMBOL(ib_destroy_wq_user); |
|---|
| 2210 | 2438 | |
|---|
| 2211 | 2439 | /** |
|---|
| 2212 | 2440 | * ib_modify_wq - Modifies the specified WQ. |
|---|
| .. | .. |
|---|
| 2221 | 2449 | { |
|---|
| 2222 | 2450 | int err; |
|---|
| 2223 | 2451 | |
|---|
| 2224 | | - if (!wq->device->modify_wq) |
|---|
| 2452 | + if (!wq->device->ops.modify_wq) |
|---|
| 2225 | 2453 | return -EOPNOTSUPP; |
|---|
| 2226 | 2454 | |
|---|
| 2227 | | - err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); |
|---|
| 2455 | + err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL); |
|---|
| 2228 | 2456 | return err; |
|---|
| 2229 | 2457 | } |
|---|
| 2230 | 2458 | EXPORT_SYMBOL(ib_modify_wq); |
|---|
| 2231 | 2459 | |
|---|
| 2232 | | -/* |
|---|
| 2233 | | - * ib_create_rwq_ind_table - Creates a RQ Indirection Table. |
|---|
| 2234 | | - * @device: The device on which to create the rwq indirection table. |
|---|
| 2235 | | - * @ib_rwq_ind_table_init_attr: A list of initial attributes required to |
|---|
| 2236 | | - * create the Indirection Table. |
|---|
| 2237 | | - * |
|---|
| 2238 | | - * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less |
|---|
| 2239 | | - * than the created ib_rwq_ind_table object and the caller is responsible |
|---|
| 2240 | | - * for its memory allocation/free. |
|---|
| 2241 | | - */ |
|---|
| 2242 | | -struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, |
|---|
| 2243 | | - struct ib_rwq_ind_table_init_attr *init_attr) |
|---|
| 2244 | | -{ |
|---|
| 2245 | | - struct ib_rwq_ind_table *rwq_ind_table; |
|---|
| 2246 | | - int i; |
|---|
| 2247 | | - u32 table_size; |
|---|
| 2248 | | - |
|---|
| 2249 | | - if (!device->create_rwq_ind_table) |
|---|
| 2250 | | - return ERR_PTR(-EOPNOTSUPP); |
|---|
| 2251 | | - |
|---|
| 2252 | | - table_size = (1 << init_attr->log_ind_tbl_size); |
|---|
| 2253 | | - rwq_ind_table = device->create_rwq_ind_table(device, |
|---|
| 2254 | | - init_attr, NULL); |
|---|
| 2255 | | - if (IS_ERR(rwq_ind_table)) |
|---|
| 2256 | | - return rwq_ind_table; |
|---|
| 2257 | | - |
|---|
| 2258 | | - rwq_ind_table->ind_tbl = init_attr->ind_tbl; |
|---|
| 2259 | | - rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; |
|---|
| 2260 | | - rwq_ind_table->device = device; |
|---|
| 2261 | | - rwq_ind_table->uobject = NULL; |
|---|
| 2262 | | - atomic_set(&rwq_ind_table->usecnt, 0); |
|---|
| 2263 | | - |
|---|
| 2264 | | - for (i = 0; i < table_size; i++) |
|---|
| 2265 | | - atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); |
|---|
| 2266 | | - |
|---|
| 2267 | | - return rwq_ind_table; |
|---|
| 2268 | | -} |
|---|
| 2269 | | -EXPORT_SYMBOL(ib_create_rwq_ind_table); |
|---|
| 2270 | | - |
|---|
| 2271 | | -/* |
|---|
| 2272 | | - * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. |
|---|
| 2273 | | - * @wq_ind_table: The Indirection Table to destroy. |
|---|
| 2274 | | -*/ |
|---|
| 2275 | | -int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) |
|---|
| 2276 | | -{ |
|---|
| 2277 | | - int err, i; |
|---|
| 2278 | | - u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); |
|---|
| 2279 | | - struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; |
|---|
| 2280 | | - |
|---|
| 2281 | | - if (atomic_read(&rwq_ind_table->usecnt)) |
|---|
| 2282 | | - return -EBUSY; |
|---|
| 2283 | | - |
|---|
| 2284 | | - err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); |
|---|
| 2285 | | - if (!err) { |
|---|
| 2286 | | - for (i = 0; i < table_size; i++) |
|---|
| 2287 | | - atomic_dec(&ind_tbl[i]->usecnt); |
|---|
| 2288 | | - } |
|---|
| 2289 | | - |
|---|
| 2290 | | - return err; |
|---|
| 2291 | | -} |
|---|
| 2292 | | -EXPORT_SYMBOL(ib_destroy_rwq_ind_table); |
|---|
| 2293 | | - |
|---|
| 2294 | 2460 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
|---|
| 2295 | 2461 | struct ib_mr_status *mr_status) |
|---|
| 2296 | 2462 | { |
|---|
| 2297 | | - return mr->device->check_mr_status ? |
|---|
| 2298 | | - mr->device->check_mr_status(mr, check_mask, mr_status) : -EOPNOTSUPP; |
|---|
| 2463 | + if (!mr->device->ops.check_mr_status) |
|---|
| 2464 | + return -EOPNOTSUPP; |
|---|
| 2465 | + |
|---|
| 2466 | + return mr->device->ops.check_mr_status(mr, check_mask, mr_status); |
|---|
| 2299 | 2467 | } |
|---|
| 2300 | 2468 | EXPORT_SYMBOL(ib_check_mr_status); |
|---|
| 2301 | 2469 | |
|---|
| 2302 | 2470 | int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, |
|---|
| 2303 | 2471 | int state) |
|---|
| 2304 | 2472 | { |
|---|
| 2305 | | - if (!device->set_vf_link_state) |
|---|
| 2473 | + if (!device->ops.set_vf_link_state) |
|---|
| 2306 | 2474 | return -EOPNOTSUPP; |
|---|
| 2307 | 2475 | |
|---|
| 2308 | | - return device->set_vf_link_state(device, vf, port, state); |
|---|
| 2476 | + return device->ops.set_vf_link_state(device, vf, port, state); |
|---|
| 2309 | 2477 | } |
|---|
| 2310 | 2478 | EXPORT_SYMBOL(ib_set_vf_link_state); |
|---|
| 2311 | 2479 | |
|---|
| 2312 | 2480 | int ib_get_vf_config(struct ib_device *device, int vf, u8 port, |
|---|
| 2313 | 2481 | struct ifla_vf_info *info) |
|---|
| 2314 | 2482 | { |
|---|
| 2315 | | - if (!device->get_vf_config) |
|---|
| 2483 | + if (!device->ops.get_vf_config) |
|---|
| 2316 | 2484 | return -EOPNOTSUPP; |
|---|
| 2317 | 2485 | |
|---|
| 2318 | | - return device->get_vf_config(device, vf, port, info); |
|---|
| 2486 | + return device->ops.get_vf_config(device, vf, port, info); |
|---|
| 2319 | 2487 | } |
|---|
| 2320 | 2488 | EXPORT_SYMBOL(ib_get_vf_config); |
|---|
| 2321 | 2489 | |
|---|
| 2322 | 2490 | int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, |
|---|
| 2323 | 2491 | struct ifla_vf_stats *stats) |
|---|
| 2324 | 2492 | { |
|---|
| 2325 | | - if (!device->get_vf_stats) |
|---|
| 2493 | + if (!device->ops.get_vf_stats) |
|---|
| 2326 | 2494 | return -EOPNOTSUPP; |
|---|
| 2327 | 2495 | |
|---|
| 2328 | | - return device->get_vf_stats(device, vf, port, stats); |
|---|
| 2496 | + return device->ops.get_vf_stats(device, vf, port, stats); |
|---|
| 2329 | 2497 | } |
|---|
| 2330 | 2498 | EXPORT_SYMBOL(ib_get_vf_stats); |
|---|
| 2331 | 2499 | |
|---|
| 2332 | 2500 | int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, |
|---|
| 2333 | 2501 | int type) |
|---|
| 2334 | 2502 | { |
|---|
| 2335 | | - if (!device->set_vf_guid) |
|---|
| 2503 | + if (!device->ops.set_vf_guid) |
|---|
| 2336 | 2504 | return -EOPNOTSUPP; |
|---|
| 2337 | 2505 | |
|---|
| 2338 | | - return device->set_vf_guid(device, vf, port, guid, type); |
|---|
| 2506 | + return device->ops.set_vf_guid(device, vf, port, guid, type); |
|---|
| 2339 | 2507 | } |
|---|
| 2340 | 2508 | EXPORT_SYMBOL(ib_set_vf_guid); |
|---|
| 2509 | + |
|---|
| 2510 | +int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, |
|---|
| 2511 | + struct ifla_vf_guid *node_guid, |
|---|
| 2512 | + struct ifla_vf_guid *port_guid) |
|---|
| 2513 | +{ |
|---|
| 2514 | + if (!device->ops.get_vf_guid) |
|---|
| 2515 | + return -EOPNOTSUPP; |
|---|
| 2516 | + |
|---|
| 2517 | + return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid); |
|---|
| 2518 | +} |
|---|
| 2519 | +EXPORT_SYMBOL(ib_get_vf_guid); |
|---|
| 2520 | +/** |
|---|
| 2521 | + * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection |
|---|
| 2522 | + * information) and set an appropriate memory region for registration. |
|---|
| 2523 | + * @mr: memory region |
|---|
| 2524 | + * @data_sg: dma mapped scatterlist for data |
|---|
| 2525 | + * @data_sg_nents: number of entries in data_sg |
|---|
| 2526 | + * @data_sg_offset: offset in bytes into data_sg |
|---|
| 2527 | + * @meta_sg: dma mapped scatterlist for metadata |
|---|
| 2528 | + * @meta_sg_nents: number of entries in meta_sg |
|---|
| 2529 | + * @meta_sg_offset: offset in bytes into meta_sg |
|---|
| 2530 | + * @page_size: page vector desired page size |
|---|
| 2531 | + * |
|---|
| 2532 | + * Constraints: |
|---|
| 2533 | + * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY. |
|---|
| 2534 | + * |
|---|
| 2535 | + * Return: 0 on success. |
|---|
| 2536 | + * |
|---|
| 2537 | + * After this completes successfully, the memory region |
|---|
| 2538 | + * is ready for registration. |
|---|
| 2539 | + */ |
|---|
| 2540 | +int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, |
|---|
| 2541 | + int data_sg_nents, unsigned int *data_sg_offset, |
|---|
| 2542 | + struct scatterlist *meta_sg, int meta_sg_nents, |
|---|
| 2543 | + unsigned int *meta_sg_offset, unsigned int page_size) |
|---|
| 2544 | +{ |
|---|
| 2545 | + if (unlikely(!mr->device->ops.map_mr_sg_pi || |
|---|
| 2546 | + WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY))) |
|---|
| 2547 | + return -EOPNOTSUPP; |
|---|
| 2548 | + |
|---|
| 2549 | + mr->page_size = page_size; |
|---|
| 2550 | + |
|---|
| 2551 | + return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents, |
|---|
| 2552 | + data_sg_offset, meta_sg, |
|---|
| 2553 | + meta_sg_nents, meta_sg_offset); |
|---|
| 2554 | +} |
|---|
| 2555 | +EXPORT_SYMBOL(ib_map_mr_sg_pi); |
|---|
| 2341 | 2556 | |
|---|
| 2342 | 2557 | /** |
|---|
| 2343 | 2558 | * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list |
|---|
| .. | .. |
|---|
| 2349 | 2564 | * @page_size: page vector desired page size |
|---|
| 2350 | 2565 | * |
|---|
| 2351 | 2566 | * Constraints: |
|---|
| 2567 | + * |
|---|
| 2352 | 2568 | * - The first sg element is allowed to have an offset. |
|---|
| 2353 | 2569 | * - Each sg element must either be aligned to page_size or virtually |
|---|
| 2354 | 2570 | * contiguous to the previous element. In case an sg element has a |
|---|
| .. | .. |
|---|
| 2367 | 2583 | int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
|---|
| 2368 | 2584 | unsigned int *sg_offset, unsigned int page_size) |
|---|
| 2369 | 2585 | { |
|---|
| 2370 | | - if (unlikely(!mr->device->map_mr_sg)) |
|---|
| 2586 | + if (unlikely(!mr->device->ops.map_mr_sg)) |
|---|
| 2371 | 2587 | return -EOPNOTSUPP; |
|---|
| 2372 | 2588 | |
|---|
| 2373 | 2589 | mr->page_size = page_size; |
|---|
| 2374 | 2590 | |
|---|
| 2375 | | - return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); |
|---|
| 2591 | + return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset); |
|---|
| 2376 | 2592 | } |
|---|
| 2377 | 2593 | EXPORT_SYMBOL(ib_map_mr_sg); |
|---|
| 2378 | 2594 | |
|---|
| .. | .. |
|---|
| 2382 | 2598 | * @mr: memory region |
|---|
| 2383 | 2599 | * @sgl: dma mapped scatterlist |
|---|
| 2384 | 2600 | * @sg_nents: number of entries in sg |
|---|
| 2385 | | - * @sg_offset_p: IN: start offset in bytes into sg |
|---|
| 2386 | | - * OUT: offset in bytes for element n of the sg of the first |
|---|
| 2601 | + * @sg_offset_p: ==== ======================================================= |
|---|
| 2602 | + * IN start offset in bytes into sg |
|---|
| 2603 | + * OUT offset in bytes for element n of the sg of the first |
|---|
| 2387 | 2604 | * byte that has not been processed where n is the return |
|---|
| 2388 | 2605 | * value of this function. |
|---|
| 2606 | + * ==== ======================================================= |
|---|
| 2389 | 2607 | * @set_page: driver page assignment function pointer |
|---|
| 2390 | 2608 | * |
|---|
| 2391 | 2609 | * Core service helper for drivers to convert the largest |
|---|
| .. | .. |
|---|
| 2571 | 2789 | */ |
|---|
| 2572 | 2790 | void ib_drain_sq(struct ib_qp *qp) |
|---|
| 2573 | 2791 | { |
|---|
| 2574 | | - if (qp->device->drain_sq) |
|---|
| 2575 | | - qp->device->drain_sq(qp); |
|---|
| 2792 | + if (qp->device->ops.drain_sq) |
|---|
| 2793 | + qp->device->ops.drain_sq(qp); |
|---|
| 2576 | 2794 | else |
|---|
| 2577 | 2795 | __ib_drain_sq(qp); |
|---|
| 2796 | + trace_cq_drain_complete(qp->send_cq); |
|---|
| 2578 | 2797 | } |
|---|
| 2579 | 2798 | EXPORT_SYMBOL(ib_drain_sq); |
|---|
| 2580 | 2799 | |
|---|
| .. | .. |
|---|
| 2599 | 2818 | */ |
|---|
| 2600 | 2819 | void ib_drain_rq(struct ib_qp *qp) |
|---|
| 2601 | 2820 | { |
|---|
| 2602 | | - if (qp->device->drain_rq) |
|---|
| 2603 | | - qp->device->drain_rq(qp); |
|---|
| 2821 | + if (qp->device->ops.drain_rq) |
|---|
| 2822 | + qp->device->ops.drain_rq(qp); |
|---|
| 2604 | 2823 | else |
|---|
| 2605 | 2824 | __ib_drain_rq(qp); |
|---|
| 2825 | + trace_cq_drain_complete(qp->recv_cq); |
|---|
| 2606 | 2826 | } |
|---|
| 2607 | 2827 | EXPORT_SYMBOL(ib_drain_rq); |
|---|
| 2608 | 2828 | |
|---|
| .. | .. |
|---|
| 2628 | 2848 | ib_drain_rq(qp); |
|---|
| 2629 | 2849 | } |
|---|
| 2630 | 2850 | EXPORT_SYMBOL(ib_drain_qp); |
|---|
| 2851 | + |
|---|
| 2852 | +struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, |
|---|
| 2853 | + enum rdma_netdev_t type, const char *name, |
|---|
| 2854 | + unsigned char name_assign_type, |
|---|
| 2855 | + void (*setup)(struct net_device *)) |
|---|
| 2856 | +{ |
|---|
| 2857 | + struct rdma_netdev_alloc_params params; |
|---|
| 2858 | + struct net_device *netdev; |
|---|
| 2859 | + int rc; |
|---|
| 2860 | + |
|---|
| 2861 | + if (!device->ops.rdma_netdev_get_params) |
|---|
| 2862 | + return ERR_PTR(-EOPNOTSUPP); |
|---|
| 2863 | + |
|---|
| 2864 | + rc = device->ops.rdma_netdev_get_params(device, port_num, type, |
|---|
| 2865 | + ¶ms); |
|---|
| 2866 | + if (rc) |
|---|
| 2867 | + return ERR_PTR(rc); |
|---|
| 2868 | + |
|---|
| 2869 | + netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type, |
|---|
| 2870 | + setup, params.txqs, params.rxqs); |
|---|
| 2871 | + if (!netdev) |
|---|
| 2872 | + return ERR_PTR(-ENOMEM); |
|---|
| 2873 | + |
|---|
| 2874 | + return netdev; |
|---|
| 2875 | +} |
|---|
| 2876 | +EXPORT_SYMBOL(rdma_alloc_netdev); |
|---|
| 2877 | + |
|---|
| 2878 | +int rdma_init_netdev(struct ib_device *device, u8 port_num, |
|---|
| 2879 | + enum rdma_netdev_t type, const char *name, |
|---|
| 2880 | + unsigned char name_assign_type, |
|---|
| 2881 | + void (*setup)(struct net_device *), |
|---|
| 2882 | + struct net_device *netdev) |
|---|
| 2883 | +{ |
|---|
| 2884 | + struct rdma_netdev_alloc_params params; |
|---|
| 2885 | + int rc; |
|---|
| 2886 | + |
|---|
| 2887 | + if (!device->ops.rdma_netdev_get_params) |
|---|
| 2888 | + return -EOPNOTSUPP; |
|---|
| 2889 | + |
|---|
| 2890 | + rc = device->ops.rdma_netdev_get_params(device, port_num, type, |
|---|
| 2891 | + ¶ms); |
|---|
| 2892 | + if (rc) |
|---|
| 2893 | + return rc; |
|---|
| 2894 | + |
|---|
| 2895 | + return params.initialize_rdma_netdev(device, port_num, |
|---|
| 2896 | + netdev, params.param); |
|---|
| 2897 | +} |
|---|
| 2898 | +EXPORT_SYMBOL(rdma_init_netdev); |
|---|
| 2899 | + |
|---|
| 2900 | +void __rdma_block_iter_start(struct ib_block_iter *biter, |
|---|
| 2901 | + struct scatterlist *sglist, unsigned int nents, |
|---|
| 2902 | + unsigned long pgsz) |
|---|
| 2903 | +{ |
|---|
| 2904 | + memset(biter, 0, sizeof(struct ib_block_iter)); |
|---|
| 2905 | + biter->__sg = sglist; |
|---|
| 2906 | + biter->__sg_nents = nents; |
|---|
| 2907 | + |
|---|
| 2908 | + /* Driver provides best block size to use */ |
|---|
| 2909 | + biter->__pg_bit = __fls(pgsz); |
|---|
| 2910 | +} |
|---|
| 2911 | +EXPORT_SYMBOL(__rdma_block_iter_start); |
|---|
| 2912 | + |
|---|
| 2913 | +bool __rdma_block_iter_next(struct ib_block_iter *biter) |
|---|
| 2914 | +{ |
|---|
| 2915 | + unsigned int block_offset; |
|---|
| 2916 | + unsigned int sg_delta; |
|---|
| 2917 | + |
|---|
| 2918 | + if (!biter->__sg_nents || !biter->__sg) |
|---|
| 2919 | + return false; |
|---|
| 2920 | + |
|---|
| 2921 | + biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; |
|---|
| 2922 | + block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); |
|---|
| 2923 | + sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; |
|---|
| 2924 | + |
|---|
| 2925 | + if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { |
|---|
| 2926 | + biter->__sg_advance += sg_delta; |
|---|
| 2927 | + } else { |
|---|
| 2928 | + biter->__sg_advance = 0; |
|---|
| 2929 | + biter->__sg = sg_next(biter->__sg); |
|---|
| 2930 | + biter->__sg_nents--; |
|---|
| 2931 | + } |
|---|
| 2932 | + |
|---|
| 2933 | + return true; |
|---|
| 2934 | +} |
|---|
| 2935 | +EXPORT_SYMBOL(__rdma_block_iter_next); |
|---|