| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (c) 2005 Voltaire Inc. All rights reserved. |
|---|
| 3 | 4 | * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. |
|---|
| 4 | | - * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. |
|---|
| 5 | + * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved. |
|---|
| 5 | 6 | * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. |
|---|
| 6 | | - * |
|---|
| 7 | | - * This software is available to you under a choice of one of two |
|---|
| 8 | | - * licenses. You may choose to be licensed under the terms of the GNU |
|---|
| 9 | | - * General Public License (GPL) Version 2, available from the file |
|---|
| 10 | | - * COPYING in the main directory of this source tree, or the |
|---|
| 11 | | - * OpenIB.org BSD license below: |
|---|
| 12 | | - * |
|---|
| 13 | | - * Redistribution and use in source and binary forms, with or |
|---|
| 14 | | - * without modification, are permitted provided that the following |
|---|
| 15 | | - * conditions are met: |
|---|
| 16 | | - * |
|---|
| 17 | | - * - Redistributions of source code must retain the above |
|---|
| 18 | | - * copyright notice, this list of conditions and the following |
|---|
| 19 | | - * disclaimer. |
|---|
| 20 | | - * |
|---|
| 21 | | - * - Redistributions in binary form must reproduce the above |
|---|
| 22 | | - * copyright notice, this list of conditions and the following |
|---|
| 23 | | - * disclaimer in the documentation and/or other materials |
|---|
| 24 | | - * provided with the distribution. |
|---|
| 25 | | - * |
|---|
| 26 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
|---|
| 27 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
|---|
| 28 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
|---|
| 29 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
|---|
| 30 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
|---|
| 31 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
|---|
| 32 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|---|
| 33 | | - * SOFTWARE. |
|---|
| 34 | 7 | */ |
|---|
| 35 | 8 | |
|---|
| 36 | 9 | #include <linux/completion.h> |
|---|
| .. | .. |
|---|
| 39 | 12 | #include <linux/mutex.h> |
|---|
| 40 | 13 | #include <linux/random.h> |
|---|
| 41 | 14 | #include <linux/igmp.h> |
|---|
| 42 | | -#include <linux/idr.h> |
|---|
| 15 | +#include <linux/xarray.h> |
|---|
| 43 | 16 | #include <linux/inetdevice.h> |
|---|
| 44 | 17 | #include <linux/slab.h> |
|---|
| 45 | 18 | #include <linux/module.h> |
|---|
| .. | .. |
|---|
| 63 | 36 | |
|---|
| 64 | 37 | #include "core_priv.h" |
|---|
| 65 | 38 | #include "cma_priv.h" |
|---|
| 39 | +#include "cma_trace.h" |
|---|
| 66 | 40 | |
|---|
| 67 | 41 | MODULE_AUTHOR("Sean Hefty"); |
|---|
| 68 | 42 | MODULE_DESCRIPTION("Generic RDMA CM Agent"); |
|---|
| .. | .. |
|---|
| 94 | 68 | [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", |
|---|
| 95 | 69 | }; |
|---|
| 96 | 70 | |
|---|
| 71 | +static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, |
|---|
| 72 | + enum ib_gid_type gid_type); |
|---|
| 73 | + |
|---|
| 97 | 74 | const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) |
|---|
| 98 | 75 | { |
|---|
| 99 | 76 | size_t index = event; |
|---|
| .. | .. |
|---|
| 117 | 94 | } |
|---|
| 118 | 95 | EXPORT_SYMBOL(rdma_reject_msg); |
|---|
| 119 | 96 | |
|---|
| 120 | | -bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) |
|---|
| 97 | +/** |
|---|
| 98 | + * rdma_is_consumer_reject - return true if the consumer rejected the connect |
|---|
| 99 | + * request. |
|---|
| 100 | + * @id: Communication identifier that received the REJECT event. |
|---|
| 101 | + * @reason: Value returned in the REJECT event status field. |
|---|
| 102 | + */ |
|---|
| 103 | +static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) |
|---|
| 121 | 104 | { |
|---|
| 122 | 105 | if (rdma_ib_or_roce(id->device, id->port_num)) |
|---|
| 123 | 106 | return reason == IB_CM_REJ_CONSUMER_DEFINED; |
|---|
| .. | .. |
|---|
| 128 | 111 | WARN_ON_ONCE(1); |
|---|
| 129 | 112 | return false; |
|---|
| 130 | 113 | } |
|---|
| 131 | | -EXPORT_SYMBOL(rdma_is_consumer_reject); |
|---|
| 132 | 114 | |
|---|
| 133 | 115 | const void *rdma_consumer_reject_data(struct rdma_cm_id *id, |
|---|
| 134 | 116 | struct rdma_cm_event *ev, u8 *data_len) |
|---|
| .. | .. |
|---|
| 174 | 156 | } |
|---|
| 175 | 157 | EXPORT_SYMBOL(rdma_res_to_id); |
|---|
| 176 | 158 | |
|---|
| 177 | | -static void cma_add_one(struct ib_device *device); |
|---|
| 159 | +static int cma_add_one(struct ib_device *device); |
|---|
| 178 | 160 | static void cma_remove_one(struct ib_device *device, void *client_data); |
|---|
| 179 | 161 | |
|---|
| 180 | 162 | static struct ib_client cma_client = { |
|---|
| .. | .. |
|---|
| 191 | 173 | static unsigned int cma_pernet_id; |
|---|
| 192 | 174 | |
|---|
| 193 | 175 | struct cma_pernet { |
|---|
| 194 | | - struct idr tcp_ps; |
|---|
| 195 | | - struct idr udp_ps; |
|---|
| 196 | | - struct idr ipoib_ps; |
|---|
| 197 | | - struct idr ib_ps; |
|---|
| 176 | + struct xarray tcp_ps; |
|---|
| 177 | + struct xarray udp_ps; |
|---|
| 178 | + struct xarray ipoib_ps; |
|---|
| 179 | + struct xarray ib_ps; |
|---|
| 198 | 180 | }; |
|---|
| 199 | 181 | |
|---|
| 200 | 182 | static struct cma_pernet *cma_pernet(struct net *net) |
|---|
| .. | .. |
|---|
| 202 | 184 | return net_generic(net, cma_pernet_id); |
|---|
| 203 | 185 | } |
|---|
| 204 | 186 | |
|---|
| 205 | | -static struct idr *cma_pernet_idr(struct net *net, enum rdma_ucm_port_space ps) |
|---|
| 187 | +static |
|---|
| 188 | +struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps) |
|---|
| 206 | 189 | { |
|---|
| 207 | 190 | struct cma_pernet *pernet = cma_pernet(net); |
|---|
| 208 | 191 | |
|---|
| .. | .. |
|---|
| 224 | 207 | struct list_head list; |
|---|
| 225 | 208 | struct ib_device *device; |
|---|
| 226 | 209 | struct completion comp; |
|---|
| 227 | | - atomic_t refcount; |
|---|
| 210 | + refcount_t refcount; |
|---|
| 228 | 211 | struct list_head id_list; |
|---|
| 229 | 212 | enum ib_gid_type *default_gid_type; |
|---|
| 230 | 213 | u8 *default_roce_tos; |
|---|
| .. | .. |
|---|
| 247 | 230 | static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, |
|---|
| 248 | 231 | struct rdma_bind_list *bind_list, int snum) |
|---|
| 249 | 232 | { |
|---|
| 250 | | - struct idr *idr = cma_pernet_idr(net, ps); |
|---|
| 233 | + struct xarray *xa = cma_pernet_xa(net, ps); |
|---|
| 251 | 234 | |
|---|
| 252 | | - return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); |
|---|
| 235 | + return xa_insert(xa, snum, bind_list, GFP_KERNEL); |
|---|
| 253 | 236 | } |
|---|
| 254 | 237 | |
|---|
| 255 | 238 | static struct rdma_bind_list *cma_ps_find(struct net *net, |
|---|
| 256 | 239 | enum rdma_ucm_port_space ps, int snum) |
|---|
| 257 | 240 | { |
|---|
| 258 | | - struct idr *idr = cma_pernet_idr(net, ps); |
|---|
| 241 | + struct xarray *xa = cma_pernet_xa(net, ps); |
|---|
| 259 | 242 | |
|---|
| 260 | | - return idr_find(idr, snum); |
|---|
| 243 | + return xa_load(xa, snum); |
|---|
| 261 | 244 | } |
|---|
| 262 | 245 | |
|---|
| 263 | 246 | static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps, |
|---|
| 264 | 247 | int snum) |
|---|
| 265 | 248 | { |
|---|
| 266 | | - struct idr *idr = cma_pernet_idr(net, ps); |
|---|
| 249 | + struct xarray *xa = cma_pernet_xa(net, ps); |
|---|
| 267 | 250 | |
|---|
| 268 | | - idr_remove(idr, snum); |
|---|
| 251 | + xa_erase(xa, snum); |
|---|
| 269 | 252 | } |
|---|
| 270 | 253 | |
|---|
| 271 | 254 | enum { |
|---|
| 272 | 255 | CMA_OPTION_AFONLY, |
|---|
| 273 | 256 | }; |
|---|
| 274 | 257 | |
|---|
| 275 | | -void cma_ref_dev(struct cma_device *cma_dev) |
|---|
| 258 | +void cma_dev_get(struct cma_device *cma_dev) |
|---|
| 276 | 259 | { |
|---|
| 277 | | - atomic_inc(&cma_dev->refcount); |
|---|
| 260 | + refcount_inc(&cma_dev->refcount); |
|---|
| 261 | +} |
|---|
| 262 | + |
|---|
| 263 | +void cma_dev_put(struct cma_device *cma_dev) |
|---|
| 264 | +{ |
|---|
| 265 | + if (refcount_dec_and_test(&cma_dev->refcount)) |
|---|
| 266 | + complete(&cma_dev->comp); |
|---|
| 278 | 267 | } |
|---|
| 279 | 268 | |
|---|
| 280 | 269 | struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, |
|---|
| .. | .. |
|---|
| 292 | 281 | } |
|---|
| 293 | 282 | |
|---|
| 294 | 283 | if (found_cma_dev) |
|---|
| 295 | | - cma_ref_dev(found_cma_dev); |
|---|
| 284 | + cma_dev_get(found_cma_dev); |
|---|
| 296 | 285 | mutex_unlock(&lock); |
|---|
| 297 | 286 | return found_cma_dev; |
|---|
| 298 | 287 | } |
|---|
| .. | .. |
|---|
| 314 | 303 | |
|---|
| 315 | 304 | if (!rdma_is_port_valid(cma_dev->device, port)) |
|---|
| 316 | 305 | return -EINVAL; |
|---|
| 306 | + |
|---|
| 307 | + if (default_gid_type == IB_GID_TYPE_IB && |
|---|
| 308 | + rdma_protocol_roce_eth_encap(cma_dev->device, port)) |
|---|
| 309 | + default_gid_type = IB_GID_TYPE_ROCE; |
|---|
| 317 | 310 | |
|---|
| 318 | 311 | supported_gids = roce_gid_type_mask_support(cma_dev->device, port); |
|---|
| 319 | 312 | |
|---|
| .. | .. |
|---|
| 360 | 353 | struct cma_multicast { |
|---|
| 361 | 354 | struct rdma_id_private *id_priv; |
|---|
| 362 | 355 | union { |
|---|
| 363 | | - struct ib_sa_multicast *ib; |
|---|
| 364 | | - } multicast; |
|---|
| 356 | + struct ib_sa_multicast *sa_mc; |
|---|
| 357 | + struct { |
|---|
| 358 | + struct work_struct work; |
|---|
| 359 | + struct rdma_cm_event event; |
|---|
| 360 | + } iboe_join; |
|---|
| 361 | + }; |
|---|
| 365 | 362 | struct list_head list; |
|---|
| 366 | 363 | void *context; |
|---|
| 367 | 364 | struct sockaddr_storage addr; |
|---|
| 368 | | - struct kref mcref; |
|---|
| 369 | 365 | u8 join_state; |
|---|
| 370 | 366 | }; |
|---|
| 371 | 367 | |
|---|
| .. | .. |
|---|
| 375 | 371 | enum rdma_cm_state old_state; |
|---|
| 376 | 372 | enum rdma_cm_state new_state; |
|---|
| 377 | 373 | struct rdma_cm_event event; |
|---|
| 378 | | -}; |
|---|
| 379 | | - |
|---|
| 380 | | -struct cma_ndev_work { |
|---|
| 381 | | - struct work_struct work; |
|---|
| 382 | | - struct rdma_id_private *id; |
|---|
| 383 | | - struct rdma_cm_event event; |
|---|
| 384 | | -}; |
|---|
| 385 | | - |
|---|
| 386 | | -struct iboe_mcast_work { |
|---|
| 387 | | - struct work_struct work; |
|---|
| 388 | | - struct rdma_id_private *id; |
|---|
| 389 | | - struct cma_multicast *mc; |
|---|
| 390 | 374 | }; |
|---|
| 391 | 375 | |
|---|
| 392 | 376 | union cma_ip_addr { |
|---|
| .. | .. |
|---|
| 418 | 402 | u16 pkey; |
|---|
| 419 | 403 | }; |
|---|
| 420 | 404 | |
|---|
| 421 | | -static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) |
|---|
| 422 | | -{ |
|---|
| 423 | | - unsigned long flags; |
|---|
| 424 | | - int ret; |
|---|
| 425 | | - |
|---|
| 426 | | - spin_lock_irqsave(&id_priv->lock, flags); |
|---|
| 427 | | - ret = (id_priv->state == comp); |
|---|
| 428 | | - spin_unlock_irqrestore(&id_priv->lock, flags); |
|---|
| 429 | | - return ret; |
|---|
| 430 | | -} |
|---|
| 431 | | - |
|---|
| 432 | 405 | static int cma_comp_exch(struct rdma_id_private *id_priv, |
|---|
| 433 | 406 | enum rdma_cm_state comp, enum rdma_cm_state exch) |
|---|
| 434 | 407 | { |
|---|
| 435 | 408 | unsigned long flags; |
|---|
| 436 | 409 | int ret; |
|---|
| 437 | 410 | |
|---|
| 411 | + /* |
|---|
| 412 | + * The FSM uses a funny double locking where state is protected by both |
|---|
| 413 | + * the handler_mutex and the spinlock. State is not allowed to change |
|---|
| 414 | + * to/from a handler_mutex protected value without also holding |
|---|
| 415 | + * handler_mutex. |
|---|
| 416 | + */ |
|---|
| 417 | + if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) |
|---|
| 418 | + lockdep_assert_held(&id_priv->handler_mutex); |
|---|
| 419 | + |
|---|
| 438 | 420 | spin_lock_irqsave(&id_priv->lock, flags); |
|---|
| 439 | 421 | if ((ret = (id_priv->state == comp))) |
|---|
| 440 | 422 | id_priv->state = exch; |
|---|
| 441 | 423 | spin_unlock_irqrestore(&id_priv->lock, flags); |
|---|
| 442 | 424 | return ret; |
|---|
| 443 | | -} |
|---|
| 444 | | - |
|---|
| 445 | | -static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, |
|---|
| 446 | | - enum rdma_cm_state exch) |
|---|
| 447 | | -{ |
|---|
| 448 | | - unsigned long flags; |
|---|
| 449 | | - enum rdma_cm_state old; |
|---|
| 450 | | - |
|---|
| 451 | | - spin_lock_irqsave(&id_priv->lock, flags); |
|---|
| 452 | | - old = id_priv->state; |
|---|
| 453 | | - id_priv->state = exch; |
|---|
| 454 | | - spin_unlock_irqrestore(&id_priv->lock, flags); |
|---|
| 455 | | - return old; |
|---|
| 456 | 425 | } |
|---|
| 457 | 426 | |
|---|
| 458 | 427 | static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) |
|---|
| .. | .. |
|---|
| 488 | 457 | static void _cma_attach_to_dev(struct rdma_id_private *id_priv, |
|---|
| 489 | 458 | struct cma_device *cma_dev) |
|---|
| 490 | 459 | { |
|---|
| 491 | | - cma_ref_dev(cma_dev); |
|---|
| 460 | + cma_dev_get(cma_dev); |
|---|
| 492 | 461 | id_priv->cma_dev = cma_dev; |
|---|
| 493 | 462 | id_priv->id.device = cma_dev->device; |
|---|
| 494 | 463 | id_priv->id.route.addr.dev_addr.transport = |
|---|
| 495 | 464 | rdma_node_get_transport(cma_dev->device->node_type); |
|---|
| 496 | 465 | list_add_tail(&id_priv->list, &cma_dev->id_list); |
|---|
| 497 | | - rdma_restrack_add(&id_priv->res); |
|---|
| 466 | + |
|---|
| 467 | + trace_cm_id_attach(id_priv, cma_dev->device); |
|---|
| 498 | 468 | } |
|---|
| 499 | 469 | |
|---|
| 500 | 470 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
|---|
| .. | .. |
|---|
| 506 | 476 | rdma_start_port(cma_dev->device)]; |
|---|
| 507 | 477 | } |
|---|
| 508 | 478 | |
|---|
| 509 | | -void cma_deref_dev(struct cma_device *cma_dev) |
|---|
| 510 | | -{ |
|---|
| 511 | | - if (atomic_dec_and_test(&cma_dev->refcount)) |
|---|
| 512 | | - complete(&cma_dev->comp); |
|---|
| 513 | | -} |
|---|
| 514 | | - |
|---|
| 515 | | -static inline void release_mc(struct kref *kref) |
|---|
| 516 | | -{ |
|---|
| 517 | | - struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); |
|---|
| 518 | | - |
|---|
| 519 | | - kfree(mc->multicast.ib); |
|---|
| 520 | | - kfree(mc); |
|---|
| 521 | | -} |
|---|
| 522 | | - |
|---|
| 523 | 479 | static void cma_release_dev(struct rdma_id_private *id_priv) |
|---|
| 524 | 480 | { |
|---|
| 525 | 481 | mutex_lock(&lock); |
|---|
| 526 | 482 | list_del(&id_priv->list); |
|---|
| 527 | | - cma_deref_dev(id_priv->cma_dev); |
|---|
| 483 | + cma_dev_put(id_priv->cma_dev); |
|---|
| 528 | 484 | id_priv->cma_dev = NULL; |
|---|
| 485 | + id_priv->id.device = NULL; |
|---|
| 486 | + if (id_priv->id.route.addr.dev_addr.sgid_attr) { |
|---|
| 487 | + rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); |
|---|
| 488 | + id_priv->id.route.addr.dev_addr.sgid_attr = NULL; |
|---|
| 489 | + } |
|---|
| 529 | 490 | mutex_unlock(&lock); |
|---|
| 530 | 491 | } |
|---|
| 531 | 492 | |
|---|
| .. | .. |
|---|
| 544 | 505 | return id_priv->id.route.addr.src_addr.ss_family; |
|---|
| 545 | 506 | } |
|---|
| 546 | 507 | |
|---|
| 547 | | -static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) |
|---|
| 508 | +static int cma_set_default_qkey(struct rdma_id_private *id_priv) |
|---|
| 548 | 509 | { |
|---|
| 549 | 510 | struct ib_sa_mcmember_rec rec; |
|---|
| 550 | 511 | int ret = 0; |
|---|
| 551 | | - |
|---|
| 552 | | - if (id_priv->qkey) { |
|---|
| 553 | | - if (qkey && id_priv->qkey != qkey) |
|---|
| 554 | | - return -EINVAL; |
|---|
| 555 | | - return 0; |
|---|
| 556 | | - } |
|---|
| 557 | | - |
|---|
| 558 | | - if (qkey) { |
|---|
| 559 | | - id_priv->qkey = qkey; |
|---|
| 560 | | - return 0; |
|---|
| 561 | | - } |
|---|
| 562 | 512 | |
|---|
| 563 | 513 | switch (id_priv->id.ps) { |
|---|
| 564 | 514 | case RDMA_PS_UDP: |
|---|
| .. | .. |
|---|
| 577 | 527 | break; |
|---|
| 578 | 528 | } |
|---|
| 579 | 529 | return ret; |
|---|
| 530 | +} |
|---|
| 531 | + |
|---|
| 532 | +static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) |
|---|
| 533 | +{ |
|---|
| 534 | + if (!qkey || |
|---|
| 535 | + (id_priv->qkey && (id_priv->qkey != qkey))) |
|---|
| 536 | + return -EINVAL; |
|---|
| 537 | + |
|---|
| 538 | + id_priv->qkey = qkey; |
|---|
| 539 | + return 0; |
|---|
| 580 | 540 | } |
|---|
| 581 | 541 | |
|---|
| 582 | 542 | static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) |
|---|
| .. | .. |
|---|
| 612 | 572 | int dev_type = dev_addr->dev_type; |
|---|
| 613 | 573 | struct net_device *ndev = NULL; |
|---|
| 614 | 574 | |
|---|
| 575 | + if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) |
|---|
| 576 | + return ERR_PTR(-ENODEV); |
|---|
| 577 | + |
|---|
| 615 | 578 | if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) |
|---|
| 616 | 579 | return ERR_PTR(-ENODEV); |
|---|
| 617 | 580 | |
|---|
| .. | .. |
|---|
| 639 | 602 | id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; |
|---|
| 640 | 603 | } |
|---|
| 641 | 604 | |
|---|
| 642 | | -static int cma_acquire_dev(struct rdma_id_private *id_priv, |
|---|
| 643 | | - const struct rdma_id_private *listen_id_priv) |
|---|
| 605 | +/** |
|---|
| 606 | + * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute |
|---|
| 607 | + * based on source ip address. |
|---|
| 608 | + * @id_priv: cm_id which should be bound to cma device |
|---|
| 609 | + * |
|---|
| 610 | + * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute |
|---|
| 611 | + * based on source IP address. It returns 0 on success or error code otherwise. |
|---|
| 612 | + * It is applicable to active and passive side cm_id. |
|---|
| 613 | + */ |
|---|
| 614 | +static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) |
|---|
| 644 | 615 | { |
|---|
| 645 | 616 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
|---|
| 646 | 617 | const struct ib_gid_attr *sgid_attr; |
|---|
| 647 | | - struct cma_device *cma_dev; |
|---|
| 648 | 618 | union ib_gid gid, iboe_gid, *gidp; |
|---|
| 619 | + struct cma_device *cma_dev; |
|---|
| 649 | 620 | enum ib_gid_type gid_type; |
|---|
| 650 | 621 | int ret = -ENODEV; |
|---|
| 651 | | - u8 port; |
|---|
| 622 | + unsigned int port; |
|---|
| 652 | 623 | |
|---|
| 653 | 624 | if (dev_addr->dev_type != ARPHRD_INFINIBAND && |
|---|
| 654 | 625 | id_priv->id.ps == RDMA_PS_IPOIB) |
|---|
| 655 | 626 | return -EINVAL; |
|---|
| 656 | 627 | |
|---|
| 657 | | - mutex_lock(&lock); |
|---|
| 658 | 628 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
|---|
| 659 | 629 | &iboe_gid); |
|---|
| 660 | 630 | |
|---|
| 661 | 631 | memcpy(&gid, dev_addr->src_dev_addr + |
|---|
| 662 | | - rdma_addr_gid_offset(dev_addr), sizeof gid); |
|---|
| 632 | + rdma_addr_gid_offset(dev_addr), sizeof(gid)); |
|---|
| 663 | 633 | |
|---|
| 664 | | - if (listen_id_priv) { |
|---|
| 665 | | - cma_dev = listen_id_priv->cma_dev; |
|---|
| 666 | | - port = listen_id_priv->id.port_num; |
|---|
| 667 | | - gidp = rdma_protocol_roce(cma_dev->device, port) ? |
|---|
| 668 | | - &iboe_gid : &gid; |
|---|
| 669 | | - gid_type = listen_id_priv->gid_type; |
|---|
| 670 | | - sgid_attr = cma_validate_port(cma_dev->device, port, |
|---|
| 671 | | - gid_type, gidp, id_priv); |
|---|
| 672 | | - if (!IS_ERR(sgid_attr)) { |
|---|
| 673 | | - id_priv->id.port_num = port; |
|---|
| 674 | | - cma_bind_sgid_attr(id_priv, sgid_attr); |
|---|
| 675 | | - ret = 0; |
|---|
| 676 | | - goto out; |
|---|
| 677 | | - } |
|---|
| 678 | | - } |
|---|
| 679 | | - |
|---|
| 634 | + mutex_lock(&lock); |
|---|
| 680 | 635 | list_for_each_entry(cma_dev, &dev_list, list) { |
|---|
| 681 | | - for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { |
|---|
| 682 | | - if (listen_id_priv && |
|---|
| 683 | | - listen_id_priv->cma_dev == cma_dev && |
|---|
| 684 | | - listen_id_priv->id.port_num == port) |
|---|
| 685 | | - continue; |
|---|
| 686 | | - |
|---|
| 636 | + rdma_for_each_port (cma_dev->device, port) { |
|---|
| 687 | 637 | gidp = rdma_protocol_roce(cma_dev->device, port) ? |
|---|
| 688 | 638 | &iboe_gid : &gid; |
|---|
| 689 | 639 | gid_type = cma_dev->default_gid_type[port - 1]; |
|---|
| 690 | 640 | sgid_attr = cma_validate_port(cma_dev->device, port, |
|---|
| 691 | 641 | gid_type, gidp, id_priv); |
|---|
| 642 | + if (!IS_ERR(sgid_attr)) { |
|---|
| 643 | + id_priv->id.port_num = port; |
|---|
| 644 | + cma_bind_sgid_attr(id_priv, sgid_attr); |
|---|
| 645 | + cma_attach_to_dev(id_priv, cma_dev); |
|---|
| 646 | + ret = 0; |
|---|
| 647 | + goto out; |
|---|
| 648 | + } |
|---|
| 649 | + } |
|---|
| 650 | + } |
|---|
| 651 | +out: |
|---|
| 652 | + mutex_unlock(&lock); |
|---|
| 653 | + return ret; |
|---|
| 654 | +} |
|---|
| 655 | + |
|---|
| 656 | +/** |
|---|
| 657 | + * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute |
|---|
| 658 | + * @id_priv: cm id to bind to cma device |
|---|
| 659 | + * @listen_id_priv: listener cm id to match against |
|---|
| 660 | + * @req: Pointer to req structure containaining incoming |
|---|
| 661 | + * request information |
|---|
| 662 | + * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when |
|---|
| 663 | + * rdma device matches for listen_id and incoming request. It also verifies |
|---|
| 664 | + * that a GID table entry is present for the source address. |
|---|
| 665 | + * Returns 0 on success, or returns error code otherwise. |
|---|
| 666 | + */ |
|---|
| 667 | +static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, |
|---|
| 668 | + const struct rdma_id_private *listen_id_priv, |
|---|
| 669 | + struct cma_req_info *req) |
|---|
| 670 | +{ |
|---|
| 671 | + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
|---|
| 672 | + const struct ib_gid_attr *sgid_attr; |
|---|
| 673 | + enum ib_gid_type gid_type; |
|---|
| 674 | + union ib_gid gid; |
|---|
| 675 | + |
|---|
| 676 | + if (dev_addr->dev_type != ARPHRD_INFINIBAND && |
|---|
| 677 | + id_priv->id.ps == RDMA_PS_IPOIB) |
|---|
| 678 | + return -EINVAL; |
|---|
| 679 | + |
|---|
| 680 | + if (rdma_protocol_roce(req->device, req->port)) |
|---|
| 681 | + rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
|---|
| 682 | + &gid); |
|---|
| 683 | + else |
|---|
| 684 | + memcpy(&gid, dev_addr->src_dev_addr + |
|---|
| 685 | + rdma_addr_gid_offset(dev_addr), sizeof(gid)); |
|---|
| 686 | + |
|---|
| 687 | + gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; |
|---|
| 688 | + sgid_attr = cma_validate_port(req->device, req->port, |
|---|
| 689 | + gid_type, &gid, id_priv); |
|---|
| 690 | + if (IS_ERR(sgid_attr)) |
|---|
| 691 | + return PTR_ERR(sgid_attr); |
|---|
| 692 | + |
|---|
| 693 | + id_priv->id.port_num = req->port; |
|---|
| 694 | + cma_bind_sgid_attr(id_priv, sgid_attr); |
|---|
| 695 | + /* Need to acquire lock to protect against reader |
|---|
| 696 | + * of cma_dev->id_list such as cma_netdev_callback() and |
|---|
| 697 | + * cma_process_remove(). |
|---|
| 698 | + */ |
|---|
| 699 | + mutex_lock(&lock); |
|---|
| 700 | + cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); |
|---|
| 701 | + mutex_unlock(&lock); |
|---|
| 702 | + rdma_restrack_add(&id_priv->res); |
|---|
| 703 | + return 0; |
|---|
| 704 | +} |
|---|
| 705 | + |
|---|
| 706 | +static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, |
|---|
| 707 | + const struct rdma_id_private *listen_id_priv) |
|---|
| 708 | +{ |
|---|
| 709 | + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
|---|
| 710 | + const struct ib_gid_attr *sgid_attr; |
|---|
| 711 | + struct cma_device *cma_dev; |
|---|
| 712 | + enum ib_gid_type gid_type; |
|---|
| 713 | + int ret = -ENODEV; |
|---|
| 714 | + unsigned int port; |
|---|
| 715 | + union ib_gid gid; |
|---|
| 716 | + |
|---|
| 717 | + if (dev_addr->dev_type != ARPHRD_INFINIBAND && |
|---|
| 718 | + id_priv->id.ps == RDMA_PS_IPOIB) |
|---|
| 719 | + return -EINVAL; |
|---|
| 720 | + |
|---|
| 721 | + memcpy(&gid, dev_addr->src_dev_addr + |
|---|
| 722 | + rdma_addr_gid_offset(dev_addr), sizeof(gid)); |
|---|
| 723 | + |
|---|
| 724 | + mutex_lock(&lock); |
|---|
| 725 | + |
|---|
| 726 | + cma_dev = listen_id_priv->cma_dev; |
|---|
| 727 | + port = listen_id_priv->id.port_num; |
|---|
| 728 | + gid_type = listen_id_priv->gid_type; |
|---|
| 729 | + sgid_attr = cma_validate_port(cma_dev->device, port, |
|---|
| 730 | + gid_type, &gid, id_priv); |
|---|
| 731 | + if (!IS_ERR(sgid_attr)) { |
|---|
| 732 | + id_priv->id.port_num = port; |
|---|
| 733 | + cma_bind_sgid_attr(id_priv, sgid_attr); |
|---|
| 734 | + ret = 0; |
|---|
| 735 | + goto out; |
|---|
| 736 | + } |
|---|
| 737 | + |
|---|
| 738 | + list_for_each_entry(cma_dev, &dev_list, list) { |
|---|
| 739 | + rdma_for_each_port (cma_dev->device, port) { |
|---|
| 740 | + if (listen_id_priv->cma_dev == cma_dev && |
|---|
| 741 | + listen_id_priv->id.port_num == port) |
|---|
| 742 | + continue; |
|---|
| 743 | + |
|---|
| 744 | + gid_type = cma_dev->default_gid_type[port - 1]; |
|---|
| 745 | + sgid_attr = cma_validate_port(cma_dev->device, port, |
|---|
| 746 | + gid_type, &gid, id_priv); |
|---|
| 692 | 747 | if (!IS_ERR(sgid_attr)) { |
|---|
| 693 | 748 | id_priv->id.port_num = port; |
|---|
| 694 | 749 | cma_bind_sgid_attr(id_priv, sgid_attr); |
|---|
| .. | .. |
|---|
| 699 | 754 | } |
|---|
| 700 | 755 | |
|---|
| 701 | 756 | out: |
|---|
| 702 | | - if (!ret) |
|---|
| 757 | + if (!ret) { |
|---|
| 703 | 758 | cma_attach_to_dev(id_priv, cma_dev); |
|---|
| 759 | + rdma_restrack_add(&id_priv->res); |
|---|
| 760 | + } |
|---|
| 704 | 761 | |
|---|
| 705 | 762 | mutex_unlock(&lock); |
|---|
| 706 | 763 | return ret; |
|---|
| .. | .. |
|---|
| 714 | 771 | struct cma_device *cma_dev, *cur_dev; |
|---|
| 715 | 772 | struct sockaddr_ib *addr; |
|---|
| 716 | 773 | union ib_gid gid, sgid, *dgid; |
|---|
| 774 | + unsigned int p; |
|---|
| 717 | 775 | u16 pkey, index; |
|---|
| 718 | | - u8 p; |
|---|
| 719 | 776 | enum ib_port_state port_state; |
|---|
| 777 | + int ret; |
|---|
| 720 | 778 | int i; |
|---|
| 721 | 779 | |
|---|
| 722 | 780 | cma_dev = NULL; |
|---|
| .. | .. |
|---|
| 726 | 784 | |
|---|
| 727 | 785 | mutex_lock(&lock); |
|---|
| 728 | 786 | list_for_each_entry(cur_dev, &dev_list, list) { |
|---|
| 729 | | - for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { |
|---|
| 787 | + rdma_for_each_port (cur_dev->device, p) { |
|---|
| 730 | 788 | if (!rdma_cap_af_ib(cur_dev->device, p)) |
|---|
| 731 | 789 | continue; |
|---|
| 732 | 790 | |
|---|
| .. | .. |
|---|
| 735 | 793 | |
|---|
| 736 | 794 | if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) |
|---|
| 737 | 795 | continue; |
|---|
| 738 | | - for (i = 0; !rdma_query_gid(cur_dev->device, |
|---|
| 739 | | - p, i, &gid); |
|---|
| 740 | | - i++) { |
|---|
| 796 | + |
|---|
| 797 | + for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; |
|---|
| 798 | + ++i) { |
|---|
| 799 | + ret = rdma_query_gid(cur_dev->device, p, i, |
|---|
| 800 | + &gid); |
|---|
| 801 | + if (ret) |
|---|
| 802 | + continue; |
|---|
| 803 | + |
|---|
| 741 | 804 | if (!memcmp(&gid, dgid, sizeof(gid))) { |
|---|
| 742 | 805 | cma_dev = cur_dev; |
|---|
| 743 | 806 | sgid = gid; |
|---|
| .. | .. |
|---|
| 761 | 824 | |
|---|
| 762 | 825 | found: |
|---|
| 763 | 826 | cma_attach_to_dev(id_priv, cma_dev); |
|---|
| 827 | + rdma_restrack_add(&id_priv->res); |
|---|
| 764 | 828 | mutex_unlock(&lock); |
|---|
| 765 | 829 | addr = (struct sockaddr_ib *)cma_src_addr(id_priv); |
|---|
| 766 | 830 | memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); |
|---|
| .. | .. |
|---|
| 768 | 832 | return 0; |
|---|
| 769 | 833 | } |
|---|
| 770 | 834 | |
|---|
| 771 | | -static void cma_deref_id(struct rdma_id_private *id_priv) |
|---|
| 835 | +static void cma_id_get(struct rdma_id_private *id_priv) |
|---|
| 772 | 836 | { |
|---|
| 773 | | - if (atomic_dec_and_test(&id_priv->refcount)) |
|---|
| 837 | + refcount_inc(&id_priv->refcount); |
|---|
| 838 | +} |
|---|
| 839 | + |
|---|
| 840 | +static void cma_id_put(struct rdma_id_private *id_priv) |
|---|
| 841 | +{ |
|---|
| 842 | + if (refcount_dec_and_test(&id_priv->refcount)) |
|---|
| 774 | 843 | complete(&id_priv->comp); |
|---|
| 775 | 844 | } |
|---|
| 776 | 845 | |
|---|
| 777 | | -struct rdma_cm_id *__rdma_create_id(struct net *net, |
|---|
| 778 | | - rdma_cm_event_handler event_handler, |
|---|
| 779 | | - void *context, enum rdma_ucm_port_space ps, |
|---|
| 780 | | - enum ib_qp_type qp_type, const char *caller) |
|---|
| 846 | +static struct rdma_id_private * |
|---|
| 847 | +__rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, |
|---|
| 848 | + void *context, enum rdma_ucm_port_space ps, |
|---|
| 849 | + enum ib_qp_type qp_type, const struct rdma_id_private *parent) |
|---|
| 781 | 850 | { |
|---|
| 782 | 851 | struct rdma_id_private *id_priv; |
|---|
| 783 | 852 | |
|---|
| .. | .. |
|---|
| 785 | 854 | if (!id_priv) |
|---|
| 786 | 855 | return ERR_PTR(-ENOMEM); |
|---|
| 787 | 856 | |
|---|
| 788 | | - if (caller) |
|---|
| 789 | | - id_priv->res.kern_name = caller; |
|---|
| 790 | | - else |
|---|
| 791 | | - rdma_restrack_set_task(&id_priv->res, current); |
|---|
| 792 | | - id_priv->res.type = RDMA_RESTRACK_CM_ID; |
|---|
| 793 | 857 | id_priv->state = RDMA_CM_IDLE; |
|---|
| 794 | 858 | id_priv->id.context = context; |
|---|
| 795 | 859 | id_priv->id.event_handler = event_handler; |
|---|
| 796 | 860 | id_priv->id.ps = ps; |
|---|
| 797 | 861 | id_priv->id.qp_type = qp_type; |
|---|
| 798 | 862 | id_priv->tos_set = false; |
|---|
| 863 | + id_priv->timeout_set = false; |
|---|
| 799 | 864 | id_priv->gid_type = IB_GID_TYPE_IB; |
|---|
| 800 | 865 | spin_lock_init(&id_priv->lock); |
|---|
| 801 | 866 | mutex_init(&id_priv->qp_mutex); |
|---|
| 802 | 867 | init_completion(&id_priv->comp); |
|---|
| 803 | | - atomic_set(&id_priv->refcount, 1); |
|---|
| 868 | + refcount_set(&id_priv->refcount, 1); |
|---|
| 804 | 869 | mutex_init(&id_priv->handler_mutex); |
|---|
| 805 | 870 | INIT_LIST_HEAD(&id_priv->listen_list); |
|---|
| 806 | 871 | INIT_LIST_HEAD(&id_priv->mc_list); |
|---|
| .. | .. |
|---|
| 808 | 873 | id_priv->id.route.addr.dev_addr.net = get_net(net); |
|---|
| 809 | 874 | id_priv->seq_num &= 0x00ffffff; |
|---|
| 810 | 875 | |
|---|
| 811 | | - return &id_priv->id; |
|---|
| 876 | + rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); |
|---|
| 877 | + if (parent) |
|---|
| 878 | + rdma_restrack_parent_name(&id_priv->res, &parent->res); |
|---|
| 879 | + |
|---|
| 880 | + return id_priv; |
|---|
| 812 | 881 | } |
|---|
| 813 | | -EXPORT_SYMBOL(__rdma_create_id); |
|---|
| 882 | + |
|---|
| 883 | +struct rdma_cm_id * |
|---|
| 884 | +__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, |
|---|
| 885 | + void *context, enum rdma_ucm_port_space ps, |
|---|
| 886 | + enum ib_qp_type qp_type, const char *caller) |
|---|
| 887 | +{ |
|---|
| 888 | + struct rdma_id_private *ret; |
|---|
| 889 | + |
|---|
| 890 | + ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); |
|---|
| 891 | + if (IS_ERR(ret)) |
|---|
| 892 | + return ERR_CAST(ret); |
|---|
| 893 | + |
|---|
| 894 | + rdma_restrack_set_name(&ret->res, caller); |
|---|
| 895 | + return &ret->id; |
|---|
| 896 | +} |
|---|
| 897 | +EXPORT_SYMBOL(__rdma_create_kernel_id); |
|---|
| 898 | + |
|---|
| 899 | +struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, |
|---|
| 900 | + void *context, |
|---|
| 901 | + enum rdma_ucm_port_space ps, |
|---|
| 902 | + enum ib_qp_type qp_type) |
|---|
| 903 | +{ |
|---|
| 904 | + struct rdma_id_private *ret; |
|---|
| 905 | + |
|---|
| 906 | + ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, |
|---|
| 907 | + ps, qp_type, NULL); |
|---|
| 908 | + if (IS_ERR(ret)) |
|---|
| 909 | + return ERR_CAST(ret); |
|---|
| 910 | + |
|---|
| 911 | + rdma_restrack_set_name(&ret->res, NULL); |
|---|
| 912 | + return &ret->id; |
|---|
| 913 | +} |
|---|
| 914 | +EXPORT_SYMBOL(rdma_create_user_id); |
|---|
| 814 | 915 | |
|---|
| 815 | 916 | static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) |
|---|
| 816 | 917 | { |
|---|
| .. | .. |
|---|
| 859 | 960 | int ret; |
|---|
| 860 | 961 | |
|---|
| 861 | 962 | id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 862 | | - if (id->device != pd->device) |
|---|
| 863 | | - return -EINVAL; |
|---|
| 963 | + if (id->device != pd->device) { |
|---|
| 964 | + ret = -EINVAL; |
|---|
| 965 | + goto out_err; |
|---|
| 966 | + } |
|---|
| 864 | 967 | |
|---|
| 865 | 968 | qp_init_attr->port_num = id->port_num; |
|---|
| 866 | 969 | qp = ib_create_qp(pd, qp_init_attr); |
|---|
| 867 | | - if (IS_ERR(qp)) |
|---|
| 868 | | - return PTR_ERR(qp); |
|---|
| 970 | + if (IS_ERR(qp)) { |
|---|
| 971 | + ret = PTR_ERR(qp); |
|---|
| 972 | + goto out_err; |
|---|
| 973 | + } |
|---|
| 869 | 974 | |
|---|
| 870 | 975 | if (id->qp_type == IB_QPT_UD) |
|---|
| 871 | 976 | ret = cma_init_ud_qp(id_priv, qp); |
|---|
| 872 | 977 | else |
|---|
| 873 | 978 | ret = cma_init_conn_qp(id_priv, qp); |
|---|
| 874 | 979 | if (ret) |
|---|
| 875 | | - goto err; |
|---|
| 980 | + goto out_destroy; |
|---|
| 876 | 981 | |
|---|
| 877 | 982 | id->qp = qp; |
|---|
| 878 | 983 | id_priv->qp_num = qp->qp_num; |
|---|
| 879 | 984 | id_priv->srq = (qp->srq != NULL); |
|---|
| 985 | + trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); |
|---|
| 880 | 986 | return 0; |
|---|
| 881 | | -err: |
|---|
| 987 | +out_destroy: |
|---|
| 882 | 988 | ib_destroy_qp(qp); |
|---|
| 989 | +out_err: |
|---|
| 990 | + trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); |
|---|
| 883 | 991 | return ret; |
|---|
| 884 | 992 | } |
|---|
| 885 | 993 | EXPORT_SYMBOL(rdma_create_qp); |
|---|
| .. | .. |
|---|
| 889 | 997 | struct rdma_id_private *id_priv; |
|---|
| 890 | 998 | |
|---|
| 891 | 999 | id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 1000 | + trace_cm_qp_destroy(id_priv); |
|---|
| 892 | 1001 | mutex_lock(&id_priv->qp_mutex); |
|---|
| 893 | 1002 | ib_destroy_qp(id_priv->id.qp); |
|---|
| 894 | 1003 | id_priv->id.qp = NULL; |
|---|
| .. | .. |
|---|
| 997 | 1106 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; |
|---|
| 998 | 1107 | |
|---|
| 999 | 1108 | if (id_priv->id.qp_type == IB_QPT_UD) { |
|---|
| 1000 | | - ret = cma_set_qkey(id_priv, 0); |
|---|
| 1109 | + ret = cma_set_default_qkey(id_priv); |
|---|
| 1001 | 1110 | if (ret) |
|---|
| 1002 | 1111 | return ret; |
|---|
| 1003 | 1112 | |
|---|
| .. | .. |
|---|
| 1037 | 1146 | *qp_attr_mask |= IB_QP_PORT; |
|---|
| 1038 | 1147 | } else |
|---|
| 1039 | 1148 | ret = -ENOSYS; |
|---|
| 1149 | + |
|---|
| 1150 | + if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) |
|---|
| 1151 | + qp_attr->timeout = id_priv->timeout; |
|---|
| 1040 | 1152 | |
|---|
| 1041 | 1153 | return ret; |
|---|
| 1042 | 1154 | } |
|---|
| .. | .. |
|---|
| 1324 | 1436 | return false; |
|---|
| 1325 | 1437 | |
|---|
| 1326 | 1438 | memset(&fl4, 0, sizeof(fl4)); |
|---|
| 1327 | | - fl4.flowi4_iif = net_dev->ifindex; |
|---|
| 1439 | + fl4.flowi4_oif = net_dev->ifindex; |
|---|
| 1328 | 1440 | fl4.daddr = daddr; |
|---|
| 1329 | 1441 | fl4.saddr = saddr; |
|---|
| 1330 | 1442 | |
|---|
| .. | .. |
|---|
| 1387 | 1499 | roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event) |
|---|
| 1388 | 1500 | { |
|---|
| 1389 | 1501 | const struct ib_gid_attr *sgid_attr = NULL; |
|---|
| 1502 | + struct net_device *ndev; |
|---|
| 1390 | 1503 | |
|---|
| 1391 | 1504 | if (ib_event->event == IB_CM_REQ_RECEIVED) |
|---|
| 1392 | 1505 | sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; |
|---|
| .. | .. |
|---|
| 1395 | 1508 | |
|---|
| 1396 | 1509 | if (!sgid_attr) |
|---|
| 1397 | 1510 | return NULL; |
|---|
| 1398 | | - dev_hold(sgid_attr->ndev); |
|---|
| 1399 | | - return sgid_attr->ndev; |
|---|
| 1511 | + |
|---|
| 1512 | + rcu_read_lock(); |
|---|
| 1513 | + ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr); |
|---|
| 1514 | + if (IS_ERR(ndev)) |
|---|
| 1515 | + ndev = NULL; |
|---|
| 1516 | + else |
|---|
| 1517 | + dev_hold(ndev); |
|---|
| 1518 | + rcu_read_unlock(); |
|---|
| 1519 | + return ndev; |
|---|
| 1400 | 1520 | } |
|---|
| 1401 | 1521 | |
|---|
| 1402 | 1522 | static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event, |
|---|
| .. | .. |
|---|
| 1475 | 1595 | return rdma_protocol_roce(device, port_num); |
|---|
| 1476 | 1596 | } |
|---|
| 1477 | 1597 | |
|---|
| 1598 | +static bool cma_is_req_ipv6_ll(const struct cma_req_info *req) |
|---|
| 1599 | +{ |
|---|
| 1600 | + const struct sockaddr *daddr = |
|---|
| 1601 | + (const struct sockaddr *)&req->listen_addr_storage; |
|---|
| 1602 | + const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; |
|---|
| 1603 | + |
|---|
| 1604 | + /* Returns true if the req is for IPv6 link local */ |
|---|
| 1605 | + return (daddr->sa_family == AF_INET6 && |
|---|
| 1606 | + (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); |
|---|
| 1607 | +} |
|---|
| 1608 | + |
|---|
| 1478 | 1609 | static bool cma_match_net_dev(const struct rdma_cm_id *id, |
|---|
| 1479 | 1610 | const struct net_device *net_dev, |
|---|
| 1480 | | - u8 port_num) |
|---|
| 1611 | + const struct cma_req_info *req) |
|---|
| 1481 | 1612 | { |
|---|
| 1482 | 1613 | const struct rdma_addr *addr = &id->route.addr; |
|---|
| 1483 | 1614 | |
|---|
| 1484 | 1615 | if (!net_dev) |
|---|
| 1485 | 1616 | /* This request is an AF_IB request */ |
|---|
| 1486 | | - return (!id->port_num || id->port_num == port_num) && |
|---|
| 1617 | + return (!id->port_num || id->port_num == req->port) && |
|---|
| 1487 | 1618 | (addr->src_addr.ss_family == AF_IB); |
|---|
| 1488 | 1619 | |
|---|
| 1620 | + /* |
|---|
| 1621 | + * If the request is not for IPv6 link local, allow matching |
|---|
| 1622 | + * request to any netdevice of the one or multiport rdma device. |
|---|
| 1623 | + */ |
|---|
| 1624 | + if (!cma_is_req_ipv6_ll(req)) |
|---|
| 1625 | + return true; |
|---|
| 1489 | 1626 | /* |
|---|
| 1490 | 1627 | * Net namespaces must match, and if the listner is listening |
|---|
| 1491 | 1628 | * on a specific netdevice than netdevice must match as well. |
|---|
| .. | .. |
|---|
| 1515 | 1652 | hlist_for_each_entry(id_priv, &bind_list->owners, node) { |
|---|
| 1516 | 1653 | if (cma_match_private_data(id_priv, ib_event->private_data)) { |
|---|
| 1517 | 1654 | if (id_priv->id.device == cm_id->device && |
|---|
| 1518 | | - cma_match_net_dev(&id_priv->id, net_dev, req->port)) |
|---|
| 1655 | + cma_match_net_dev(&id_priv->id, net_dev, req)) |
|---|
| 1519 | 1656 | return id_priv; |
|---|
| 1520 | 1657 | list_for_each_entry(id_priv_dev, |
|---|
| 1521 | 1658 | &id_priv->listen_list, |
|---|
| 1522 | 1659 | listen_list) { |
|---|
| 1523 | 1660 | if (id_priv_dev->id.device == cm_id->device && |
|---|
| 1524 | | - cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) |
|---|
| 1661 | + cma_match_net_dev(&id_priv_dev->id, |
|---|
| 1662 | + net_dev, req)) |
|---|
| 1525 | 1663 | return id_priv_dev; |
|---|
| 1526 | 1664 | } |
|---|
| 1527 | 1665 | } |
|---|
| .. | .. |
|---|
| 1533 | 1671 | static struct rdma_id_private * |
|---|
| 1534 | 1672 | cma_ib_id_from_event(struct ib_cm_id *cm_id, |
|---|
| 1535 | 1673 | const struct ib_cm_event *ib_event, |
|---|
| 1674 | + struct cma_req_info *req, |
|---|
| 1536 | 1675 | struct net_device **net_dev) |
|---|
| 1537 | 1676 | { |
|---|
| 1538 | | - struct cma_req_info req; |
|---|
| 1539 | 1677 | struct rdma_bind_list *bind_list; |
|---|
| 1540 | 1678 | struct rdma_id_private *id_priv; |
|---|
| 1541 | 1679 | int err; |
|---|
| 1542 | 1680 | |
|---|
| 1543 | | - err = cma_save_req_info(ib_event, &req); |
|---|
| 1681 | + err = cma_save_req_info(ib_event, req); |
|---|
| 1544 | 1682 | if (err) |
|---|
| 1545 | 1683 | return ERR_PTR(err); |
|---|
| 1546 | 1684 | |
|---|
| 1547 | | - *net_dev = cma_get_net_dev(ib_event, &req); |
|---|
| 1685 | + *net_dev = cma_get_net_dev(ib_event, req); |
|---|
| 1548 | 1686 | if (IS_ERR(*net_dev)) { |
|---|
| 1549 | 1687 | if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { |
|---|
| 1550 | 1688 | /* Assuming the protocol is AF_IB */ |
|---|
| .. | .. |
|---|
| 1583 | 1721 | } |
|---|
| 1584 | 1722 | |
|---|
| 1585 | 1723 | if (!validate_net_dev(*net_dev, |
|---|
| 1586 | | - (struct sockaddr *)&req.listen_addr_storage, |
|---|
| 1587 | | - (struct sockaddr *)&req.src_addr_storage)) { |
|---|
| 1724 | + (struct sockaddr *)&req->src_addr_storage, |
|---|
| 1725 | + (struct sockaddr *)&req->listen_addr_storage)) { |
|---|
| 1588 | 1726 | id_priv = ERR_PTR(-EHOSTUNREACH); |
|---|
| 1589 | 1727 | goto err; |
|---|
| 1590 | 1728 | } |
|---|
| 1591 | 1729 | } |
|---|
| 1592 | 1730 | |
|---|
| 1593 | 1731 | bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, |
|---|
| 1594 | | - rdma_ps_from_service_id(req.service_id), |
|---|
| 1595 | | - cma_port_from_service_id(req.service_id)); |
|---|
| 1596 | | - id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); |
|---|
| 1732 | + rdma_ps_from_service_id(req->service_id), |
|---|
| 1733 | + cma_port_from_service_id(req->service_id)); |
|---|
| 1734 | + id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); |
|---|
| 1597 | 1735 | err: |
|---|
| 1598 | 1736 | rcu_read_unlock(); |
|---|
| 1599 | 1737 | mutex_unlock(&lock); |
|---|
| .. | .. |
|---|
| 1617 | 1755 | } |
|---|
| 1618 | 1756 | } |
|---|
| 1619 | 1757 | |
|---|
| 1620 | | -static void cma_cancel_listens(struct rdma_id_private *id_priv) |
|---|
| 1758 | +static void _cma_cancel_listens(struct rdma_id_private *id_priv) |
|---|
| 1621 | 1759 | { |
|---|
| 1622 | 1760 | struct rdma_id_private *dev_id_priv; |
|---|
| 1761 | + |
|---|
| 1762 | + lockdep_assert_held(&lock); |
|---|
| 1623 | 1763 | |
|---|
| 1624 | 1764 | /* |
|---|
| 1625 | 1765 | * Remove from listen_any_list to prevent added devices from spawning |
|---|
| 1626 | 1766 | * additional listen requests. |
|---|
| 1627 | 1767 | */ |
|---|
| 1628 | | - mutex_lock(&lock); |
|---|
| 1629 | 1768 | list_del(&id_priv->list); |
|---|
| 1630 | 1769 | |
|---|
| 1631 | 1770 | while (!list_empty(&id_priv->listen_list)) { |
|---|
| .. | .. |
|---|
| 1639 | 1778 | rdma_destroy_id(&dev_id_priv->id); |
|---|
| 1640 | 1779 | mutex_lock(&lock); |
|---|
| 1641 | 1780 | } |
|---|
| 1781 | +} |
|---|
| 1782 | + |
|---|
| 1783 | +static void cma_cancel_listens(struct rdma_id_private *id_priv) |
|---|
| 1784 | +{ |
|---|
| 1785 | + mutex_lock(&lock); |
|---|
| 1786 | + _cma_cancel_listens(id_priv); |
|---|
| 1642 | 1787 | mutex_unlock(&lock); |
|---|
| 1643 | 1788 | } |
|---|
| 1644 | 1789 | |
|---|
| .. | .. |
|---|
| 1647 | 1792 | { |
|---|
| 1648 | 1793 | switch (state) { |
|---|
| 1649 | 1794 | case RDMA_CM_ADDR_QUERY: |
|---|
| 1795 | + /* |
|---|
| 1796 | + * We can avoid doing the rdma_addr_cancel() based on state, |
|---|
| 1797 | + * only RDMA_CM_ADDR_QUERY has a work that could still execute. |
|---|
| 1798 | + * Notice that the addr_handler work could still be exiting |
|---|
| 1799 | + * outside this state, however due to the interaction with the |
|---|
| 1800 | + * handler_mutex the work is guaranteed not to touch id_priv |
|---|
| 1801 | + * during exit. |
|---|
| 1802 | + */ |
|---|
| 1650 | 1803 | rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); |
|---|
| 1651 | 1804 | break; |
|---|
| 1652 | 1805 | case RDMA_CM_ROUTE_QUERY: |
|---|
| .. | .. |
|---|
| 1681 | 1834 | static void destroy_mc(struct rdma_id_private *id_priv, |
|---|
| 1682 | 1835 | struct cma_multicast *mc) |
|---|
| 1683 | 1836 | { |
|---|
| 1684 | | - if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) { |
|---|
| 1685 | | - ib_sa_free_multicast(mc->multicast.ib); |
|---|
| 1686 | | - kfree(mc); |
|---|
| 1687 | | - return; |
|---|
| 1688 | | - } |
|---|
| 1837 | + bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); |
|---|
| 1689 | 1838 | |
|---|
| 1690 | | - if (rdma_protocol_roce(id_priv->id.device, |
|---|
| 1691 | | - id_priv->id.port_num)) { |
|---|
| 1839 | + if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) |
|---|
| 1840 | + ib_sa_free_multicast(mc->sa_mc); |
|---|
| 1841 | + |
|---|
| 1842 | + if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { |
|---|
| 1692 | 1843 | struct rdma_dev_addr *dev_addr = |
|---|
| 1693 | 1844 | &id_priv->id.route.addr.dev_addr; |
|---|
| 1694 | 1845 | struct net_device *ndev = NULL; |
|---|
| .. | .. |
|---|
| 1696 | 1847 | if (dev_addr->bound_dev_if) |
|---|
| 1697 | 1848 | ndev = dev_get_by_index(dev_addr->net, |
|---|
| 1698 | 1849 | dev_addr->bound_dev_if); |
|---|
| 1699 | | - if (ndev) { |
|---|
| 1700 | | - cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false); |
|---|
| 1701 | | - dev_put(ndev); |
|---|
| 1850 | + if (ndev && !send_only) { |
|---|
| 1851 | + enum ib_gid_type gid_type; |
|---|
| 1852 | + union ib_gid mgid; |
|---|
| 1853 | + |
|---|
| 1854 | + gid_type = id_priv->cma_dev->default_gid_type |
|---|
| 1855 | + [id_priv->id.port_num - |
|---|
| 1856 | + rdma_start_port( |
|---|
| 1857 | + id_priv->cma_dev->device)]; |
|---|
| 1858 | + cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, |
|---|
| 1859 | + gid_type); |
|---|
| 1860 | + cma_igmp_send(ndev, &mgid, false); |
|---|
| 1702 | 1861 | } |
|---|
| 1703 | | - kref_put(&mc->mcref, release_mc); |
|---|
| 1862 | + dev_put(ndev); |
|---|
| 1863 | + |
|---|
| 1864 | + cancel_work_sync(&mc->iboe_join.work); |
|---|
| 1704 | 1865 | } |
|---|
| 1866 | + kfree(mc); |
|---|
| 1705 | 1867 | } |
|---|
| 1706 | 1868 | |
|---|
| 1707 | 1869 | static void cma_leave_mc_groups(struct rdma_id_private *id_priv) |
|---|
| .. | .. |
|---|
| 1716 | 1878 | } |
|---|
| 1717 | 1879 | } |
|---|
| 1718 | 1880 | |
|---|
| 1719 | | -void rdma_destroy_id(struct rdma_cm_id *id) |
|---|
| 1881 | +static void _destroy_id(struct rdma_id_private *id_priv, |
|---|
| 1882 | + enum rdma_cm_state state) |
|---|
| 1720 | 1883 | { |
|---|
| 1721 | | - struct rdma_id_private *id_priv; |
|---|
| 1722 | | - enum rdma_cm_state state; |
|---|
| 1723 | | - |
|---|
| 1724 | | - id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 1725 | | - state = cma_exch(id_priv, RDMA_CM_DESTROYING); |
|---|
| 1726 | 1884 | cma_cancel_operation(id_priv, state); |
|---|
| 1727 | | - |
|---|
| 1728 | | - /* |
|---|
| 1729 | | - * Wait for any active callback to finish. New callbacks will find |
|---|
| 1730 | | - * the id_priv state set to destroying and abort. |
|---|
| 1731 | | - */ |
|---|
| 1732 | | - mutex_lock(&id_priv->handler_mutex); |
|---|
| 1733 | | - mutex_unlock(&id_priv->handler_mutex); |
|---|
| 1734 | 1885 | |
|---|
| 1735 | 1886 | rdma_restrack_del(&id_priv->res); |
|---|
| 1736 | 1887 | if (id_priv->cma_dev) { |
|---|
| .. | .. |
|---|
| 1746 | 1897 | } |
|---|
| 1747 | 1898 | |
|---|
| 1748 | 1899 | cma_release_port(id_priv); |
|---|
| 1749 | | - cma_deref_id(id_priv); |
|---|
| 1900 | + cma_id_put(id_priv); |
|---|
| 1750 | 1901 | wait_for_completion(&id_priv->comp); |
|---|
| 1751 | 1902 | |
|---|
| 1752 | 1903 | if (id_priv->internal_id) |
|---|
| 1753 | | - cma_deref_id(id_priv->id.context); |
|---|
| 1904 | + cma_id_put(id_priv->id.context); |
|---|
| 1754 | 1905 | |
|---|
| 1755 | 1906 | kfree(id_priv->id.route.path_rec); |
|---|
| 1756 | 1907 | |
|---|
| 1757 | | - if (id_priv->id.route.addr.dev_addr.sgid_attr) |
|---|
| 1758 | | - rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); |
|---|
| 1759 | | - |
|---|
| 1760 | 1908 | put_net(id_priv->id.route.addr.dev_addr.net); |
|---|
| 1761 | 1909 | kfree(id_priv); |
|---|
| 1910 | +} |
|---|
| 1911 | + |
|---|
| 1912 | +/* |
|---|
| 1913 | + * destroy an ID from within the handler_mutex. This ensures that no other |
|---|
| 1914 | + * handlers can start running concurrently. |
|---|
| 1915 | + */ |
|---|
| 1916 | +static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) |
|---|
| 1917 | + __releases(&idprv->handler_mutex) |
|---|
| 1918 | +{ |
|---|
| 1919 | + enum rdma_cm_state state; |
|---|
| 1920 | + unsigned long flags; |
|---|
| 1921 | + |
|---|
| 1922 | + trace_cm_id_destroy(id_priv); |
|---|
| 1923 | + |
|---|
| 1924 | + /* |
|---|
| 1925 | + * Setting the state to destroyed under the handler mutex provides a |
|---|
| 1926 | + * fence against calling handler callbacks. If this is invoked due to |
|---|
| 1927 | + * the failure of a handler callback then it guarentees that no future |
|---|
| 1928 | + * handlers will be called. |
|---|
| 1929 | + */ |
|---|
| 1930 | + lockdep_assert_held(&id_priv->handler_mutex); |
|---|
| 1931 | + spin_lock_irqsave(&id_priv->lock, flags); |
|---|
| 1932 | + state = id_priv->state; |
|---|
| 1933 | + id_priv->state = RDMA_CM_DESTROYING; |
|---|
| 1934 | + spin_unlock_irqrestore(&id_priv->lock, flags); |
|---|
| 1935 | + mutex_unlock(&id_priv->handler_mutex); |
|---|
| 1936 | + _destroy_id(id_priv, state); |
|---|
| 1937 | +} |
|---|
| 1938 | + |
|---|
| 1939 | +void rdma_destroy_id(struct rdma_cm_id *id) |
|---|
| 1940 | +{ |
|---|
| 1941 | + struct rdma_id_private *id_priv = |
|---|
| 1942 | + container_of(id, struct rdma_id_private, id); |
|---|
| 1943 | + |
|---|
| 1944 | + mutex_lock(&id_priv->handler_mutex); |
|---|
| 1945 | + destroy_id_handler_unlock(id_priv); |
|---|
| 1762 | 1946 | } |
|---|
| 1763 | 1947 | EXPORT_SYMBOL(rdma_destroy_id); |
|---|
| 1764 | 1948 | |
|---|
| .. | .. |
|---|
| 1774 | 1958 | if (ret) |
|---|
| 1775 | 1959 | goto reject; |
|---|
| 1776 | 1960 | |
|---|
| 1961 | + trace_cm_send_rtu(id_priv); |
|---|
| 1777 | 1962 | ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); |
|---|
| 1778 | 1963 | if (ret) |
|---|
| 1779 | 1964 | goto reject; |
|---|
| .. | .. |
|---|
| 1782 | 1967 | reject: |
|---|
| 1783 | 1968 | pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret); |
|---|
| 1784 | 1969 | cma_modify_qp_err(id_priv); |
|---|
| 1970 | + trace_cm_send_rej(id_priv); |
|---|
| 1785 | 1971 | ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, |
|---|
| 1786 | 1972 | NULL, 0, NULL, 0); |
|---|
| 1787 | 1973 | return ret; |
|---|
| .. | .. |
|---|
| 1799 | 1985 | event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; |
|---|
| 1800 | 1986 | event->param.conn.srq = rep_data->srq; |
|---|
| 1801 | 1987 | event->param.conn.qp_num = rep_data->remote_qpn; |
|---|
| 1988 | + |
|---|
| 1989 | + event->ece.vendor_id = rep_data->ece.vendor_id; |
|---|
| 1990 | + event->ece.attr_mod = rep_data->ece.attr_mod; |
|---|
| 1991 | +} |
|---|
| 1992 | + |
|---|
| 1993 | +static int cma_cm_event_handler(struct rdma_id_private *id_priv, |
|---|
| 1994 | + struct rdma_cm_event *event) |
|---|
| 1995 | +{ |
|---|
| 1996 | + int ret; |
|---|
| 1997 | + |
|---|
| 1998 | + lockdep_assert_held(&id_priv->handler_mutex); |
|---|
| 1999 | + |
|---|
| 2000 | + trace_cm_event_handler(id_priv, event); |
|---|
| 2001 | + ret = id_priv->id.event_handler(&id_priv->id, event); |
|---|
| 2002 | + trace_cm_event_done(id_priv, event, ret); |
|---|
| 2003 | + return ret; |
|---|
| 1802 | 2004 | } |
|---|
| 1803 | 2005 | |
|---|
| 1804 | 2006 | static int cma_ib_handler(struct ib_cm_id *cm_id, |
|---|
| .. | .. |
|---|
| 1806 | 2008 | { |
|---|
| 1807 | 2009 | struct rdma_id_private *id_priv = cm_id->context; |
|---|
| 1808 | 2010 | struct rdma_cm_event event = {}; |
|---|
| 1809 | | - int ret = 0; |
|---|
| 2011 | + enum rdma_cm_state state; |
|---|
| 2012 | + int ret; |
|---|
| 1810 | 2013 | |
|---|
| 1811 | 2014 | mutex_lock(&id_priv->handler_mutex); |
|---|
| 2015 | + state = READ_ONCE(id_priv->state); |
|---|
| 1812 | 2016 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && |
|---|
| 1813 | | - id_priv->state != RDMA_CM_CONNECT) || |
|---|
| 2017 | + state != RDMA_CM_CONNECT) || |
|---|
| 1814 | 2018 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && |
|---|
| 1815 | | - id_priv->state != RDMA_CM_DISCONNECT)) |
|---|
| 2019 | + state != RDMA_CM_DISCONNECT)) |
|---|
| 1816 | 2020 | goto out; |
|---|
| 1817 | 2021 | |
|---|
| 1818 | 2022 | switch (ib_event->event) { |
|---|
| .. | .. |
|---|
| 1822 | 2026 | event.status = -ETIMEDOUT; |
|---|
| 1823 | 2027 | break; |
|---|
| 1824 | 2028 | case IB_CM_REP_RECEIVED: |
|---|
| 1825 | | - if (cma_comp(id_priv, RDMA_CM_CONNECT) && |
|---|
| 1826 | | - (id_priv->id.qp_type != IB_QPT_UD)) |
|---|
| 2029 | + if (state == RDMA_CM_CONNECT && |
|---|
| 2030 | + (id_priv->id.qp_type != IB_QPT_UD)) { |
|---|
| 2031 | + trace_cm_send_mra(id_priv); |
|---|
| 1827 | 2032 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
|---|
| 2033 | + } |
|---|
| 1828 | 2034 | if (id_priv->id.qp) { |
|---|
| 1829 | 2035 | event.status = cma_rep_recv(id_priv); |
|---|
| 1830 | 2036 | event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : |
|---|
| .. | .. |
|---|
| 1840 | 2046 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
|---|
| 1841 | 2047 | break; |
|---|
| 1842 | 2048 | case IB_CM_DREQ_ERROR: |
|---|
| 1843 | | - event.status = -ETIMEDOUT; /* fall through */ |
|---|
| 2049 | + event.status = -ETIMEDOUT; |
|---|
| 2050 | + fallthrough; |
|---|
| 1844 | 2051 | case IB_CM_DREQ_RECEIVED: |
|---|
| 1845 | 2052 | case IB_CM_DREP_RECEIVED: |
|---|
| 1846 | 2053 | if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, |
|---|
| .. | .. |
|---|
| 1869 | 2076 | goto out; |
|---|
| 1870 | 2077 | } |
|---|
| 1871 | 2078 | |
|---|
| 1872 | | - ret = id_priv->id.event_handler(&id_priv->id, &event); |
|---|
| 2079 | + ret = cma_cm_event_handler(id_priv, &event); |
|---|
| 1873 | 2080 | if (ret) { |
|---|
| 1874 | 2081 | /* Destroy the CM ID by returning a non-zero value. */ |
|---|
| 1875 | 2082 | id_priv->cm_id.ib = NULL; |
|---|
| 1876 | | - cma_exch(id_priv, RDMA_CM_DESTROYING); |
|---|
| 1877 | | - mutex_unlock(&id_priv->handler_mutex); |
|---|
| 1878 | | - rdma_destroy_id(&id_priv->id); |
|---|
| 2083 | + destroy_id_handler_unlock(id_priv); |
|---|
| 1879 | 2084 | return ret; |
|---|
| 1880 | 2085 | } |
|---|
| 1881 | 2086 | out: |
|---|
| 1882 | 2087 | mutex_unlock(&id_priv->handler_mutex); |
|---|
| 1883 | | - return ret; |
|---|
| 2088 | + return 0; |
|---|
| 1884 | 2089 | } |
|---|
| 1885 | 2090 | |
|---|
| 1886 | 2091 | static struct rdma_id_private * |
|---|
| .. | .. |
|---|
| 1899 | 2104 | int ret; |
|---|
| 1900 | 2105 | |
|---|
| 1901 | 2106 | listen_id_priv = container_of(listen_id, struct rdma_id_private, id); |
|---|
| 1902 | | - id = __rdma_create_id(listen_id->route.addr.dev_addr.net, |
|---|
| 1903 | | - listen_id->event_handler, listen_id->context, |
|---|
| 1904 | | - listen_id->ps, ib_event->param.req_rcvd.qp_type, |
|---|
| 1905 | | - listen_id_priv->res.kern_name); |
|---|
| 1906 | | - if (IS_ERR(id)) |
|---|
| 2107 | + id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, |
|---|
| 2108 | + listen_id->event_handler, listen_id->context, |
|---|
| 2109 | + listen_id->ps, |
|---|
| 2110 | + ib_event->param.req_rcvd.qp_type, |
|---|
| 2111 | + listen_id_priv); |
|---|
| 2112 | + if (IS_ERR(id_priv)) |
|---|
| 1907 | 2113 | return NULL; |
|---|
| 1908 | 2114 | |
|---|
| 1909 | | - id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 2115 | + id = &id_priv->id; |
|---|
| 1910 | 2116 | if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, |
|---|
| 1911 | 2117 | (struct sockaddr *)&id->route.addr.dst_addr, |
|---|
| 1912 | 2118 | listen_id, ib_event, ss_family, service_id)) |
|---|
| .. | .. |
|---|
| 1924 | 2130 | rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; |
|---|
| 1925 | 2131 | |
|---|
| 1926 | 2132 | if (net_dev) { |
|---|
| 1927 | | - rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL); |
|---|
| 2133 | + rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); |
|---|
| 1928 | 2134 | } else { |
|---|
| 1929 | 2135 | if (!cma_protocol_roce(listen_id) && |
|---|
| 1930 | 2136 | cma_any_addr(cma_src_addr(id_priv))) { |
|---|
| .. | .. |
|---|
| 1960 | 2166 | int ret; |
|---|
| 1961 | 2167 | |
|---|
| 1962 | 2168 | listen_id_priv = container_of(listen_id, struct rdma_id_private, id); |
|---|
| 1963 | | - id = __rdma_create_id(net, listen_id->event_handler, listen_id->context, |
|---|
| 1964 | | - listen_id->ps, IB_QPT_UD, |
|---|
| 1965 | | - listen_id_priv->res.kern_name); |
|---|
| 1966 | | - if (IS_ERR(id)) |
|---|
| 2169 | + id_priv = __rdma_create_id(net, listen_id->event_handler, |
|---|
| 2170 | + listen_id->context, listen_id->ps, IB_QPT_UD, |
|---|
| 2171 | + listen_id_priv); |
|---|
| 2172 | + if (IS_ERR(id_priv)) |
|---|
| 1967 | 2173 | return NULL; |
|---|
| 1968 | 2174 | |
|---|
| 1969 | | - id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 2175 | + id = &id_priv->id; |
|---|
| 1970 | 2176 | if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, |
|---|
| 1971 | 2177 | (struct sockaddr *)&id->route.addr.dst_addr, |
|---|
| 1972 | 2178 | listen_id, ib_event, ss_family, |
|---|
| .. | .. |
|---|
| 1974 | 2180 | goto err; |
|---|
| 1975 | 2181 | |
|---|
| 1976 | 2182 | if (net_dev) { |
|---|
| 1977 | | - rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL); |
|---|
| 2183 | + rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); |
|---|
| 1978 | 2184 | } else { |
|---|
| 1979 | 2185 | if (!cma_any_addr(cma_src_addr(id_priv))) { |
|---|
| 1980 | 2186 | ret = cma_translate_addr(cma_src_addr(id_priv), |
|---|
| .. | .. |
|---|
| 2004 | 2210 | event->param.conn.rnr_retry_count = req_data->rnr_retry_count; |
|---|
| 2005 | 2211 | event->param.conn.srq = req_data->srq; |
|---|
| 2006 | 2212 | event->param.conn.qp_num = req_data->remote_qpn; |
|---|
| 2213 | + |
|---|
| 2214 | + event->ece.vendor_id = req_data->ece.vendor_id; |
|---|
| 2215 | + event->ece.attr_mod = req_data->ece.attr_mod; |
|---|
| 2007 | 2216 | } |
|---|
| 2008 | 2217 | |
|---|
| 2009 | 2218 | static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id, |
|---|
| .. | .. |
|---|
| 2021 | 2230 | { |
|---|
| 2022 | 2231 | struct rdma_id_private *listen_id, *conn_id = NULL; |
|---|
| 2023 | 2232 | struct rdma_cm_event event = {}; |
|---|
| 2233 | + struct cma_req_info req = {}; |
|---|
| 2024 | 2234 | struct net_device *net_dev; |
|---|
| 2025 | 2235 | u8 offset; |
|---|
| 2026 | 2236 | int ret; |
|---|
| 2027 | 2237 | |
|---|
| 2028 | | - listen_id = cma_ib_id_from_event(cm_id, ib_event, &net_dev); |
|---|
| 2238 | + listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev); |
|---|
| 2029 | 2239 | if (IS_ERR(listen_id)) |
|---|
| 2030 | 2240 | return PTR_ERR(listen_id); |
|---|
| 2031 | 2241 | |
|---|
| 2242 | + trace_cm_req_handler(listen_id, ib_event->event); |
|---|
| 2032 | 2243 | if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { |
|---|
| 2033 | 2244 | ret = -EINVAL; |
|---|
| 2034 | 2245 | goto net_dev_put; |
|---|
| 2035 | 2246 | } |
|---|
| 2036 | 2247 | |
|---|
| 2037 | 2248 | mutex_lock(&listen_id->handler_mutex); |
|---|
| 2038 | | - if (listen_id->state != RDMA_CM_LISTEN) { |
|---|
| 2249 | + if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { |
|---|
| 2039 | 2250 | ret = -ECONNABORTED; |
|---|
| 2040 | | - goto err1; |
|---|
| 2251 | + goto err_unlock; |
|---|
| 2041 | 2252 | } |
|---|
| 2042 | 2253 | |
|---|
| 2043 | 2254 | offset = cma_user_data_offset(listen_id); |
|---|
| .. | .. |
|---|
| 2054 | 2265 | } |
|---|
| 2055 | 2266 | if (!conn_id) { |
|---|
| 2056 | 2267 | ret = -ENOMEM; |
|---|
| 2057 | | - goto err1; |
|---|
| 2268 | + goto err_unlock; |
|---|
| 2058 | 2269 | } |
|---|
| 2059 | 2270 | |
|---|
| 2060 | 2271 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
|---|
| 2061 | | - ret = cma_acquire_dev(conn_id, listen_id); |
|---|
| 2062 | | - if (ret) |
|---|
| 2063 | | - goto err2; |
|---|
| 2272 | + ret = cma_ib_acquire_dev(conn_id, listen_id, &req); |
|---|
| 2273 | + if (ret) { |
|---|
| 2274 | + destroy_id_handler_unlock(conn_id); |
|---|
| 2275 | + goto err_unlock; |
|---|
| 2276 | + } |
|---|
| 2064 | 2277 | |
|---|
| 2065 | 2278 | conn_id->cm_id.ib = cm_id; |
|---|
| 2066 | 2279 | cm_id->context = conn_id; |
|---|
| 2067 | 2280 | cm_id->cm_handler = cma_ib_handler; |
|---|
| 2068 | 2281 | |
|---|
| 2069 | | - /* |
|---|
| 2070 | | - * Protect against the user destroying conn_id from another thread |
|---|
| 2071 | | - * until we're done accessing it. |
|---|
| 2072 | | - */ |
|---|
| 2073 | | - atomic_inc(&conn_id->refcount); |
|---|
| 2074 | | - ret = conn_id->id.event_handler(&conn_id->id, &event); |
|---|
| 2075 | | - if (ret) |
|---|
| 2076 | | - goto err3; |
|---|
| 2077 | | - /* |
|---|
| 2078 | | - * Acquire mutex to prevent user executing rdma_destroy_id() |
|---|
| 2079 | | - * while we're accessing the cm_id. |
|---|
| 2080 | | - */ |
|---|
| 2081 | | - mutex_lock(&lock); |
|---|
| 2082 | | - if (cma_comp(conn_id, RDMA_CM_CONNECT) && |
|---|
| 2083 | | - (conn_id->id.qp_type != IB_QPT_UD)) |
|---|
| 2084 | | - ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
|---|
| 2085 | | - mutex_unlock(&lock); |
|---|
| 2086 | | - mutex_unlock(&conn_id->handler_mutex); |
|---|
| 2087 | | - mutex_unlock(&listen_id->handler_mutex); |
|---|
| 2088 | | - cma_deref_id(conn_id); |
|---|
| 2089 | | - if (net_dev) |
|---|
| 2090 | | - dev_put(net_dev); |
|---|
| 2091 | | - return 0; |
|---|
| 2282 | + ret = cma_cm_event_handler(conn_id, &event); |
|---|
| 2283 | + if (ret) { |
|---|
| 2284 | + /* Destroy the CM ID by returning a non-zero value. */ |
|---|
| 2285 | + conn_id->cm_id.ib = NULL; |
|---|
| 2286 | + mutex_unlock(&listen_id->handler_mutex); |
|---|
| 2287 | + destroy_id_handler_unlock(conn_id); |
|---|
| 2288 | + goto net_dev_put; |
|---|
| 2289 | + } |
|---|
| 2092 | 2290 | |
|---|
| 2093 | | -err3: |
|---|
| 2094 | | - cma_deref_id(conn_id); |
|---|
| 2095 | | - /* Destroy the CM ID by returning a non-zero value. */ |
|---|
| 2096 | | - conn_id->cm_id.ib = NULL; |
|---|
| 2097 | | -err2: |
|---|
| 2098 | | - cma_exch(conn_id, RDMA_CM_DESTROYING); |
|---|
| 2291 | + if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && |
|---|
| 2292 | + conn_id->id.qp_type != IB_QPT_UD) { |
|---|
| 2293 | + trace_cm_send_mra(cm_id->context); |
|---|
| 2294 | + ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
|---|
| 2295 | + } |
|---|
| 2099 | 2296 | mutex_unlock(&conn_id->handler_mutex); |
|---|
| 2100 | | -err1: |
|---|
| 2297 | + |
|---|
| 2298 | +err_unlock: |
|---|
| 2101 | 2299 | mutex_unlock(&listen_id->handler_mutex); |
|---|
| 2102 | | - if (conn_id) |
|---|
| 2103 | | - rdma_destroy_id(&conn_id->id); |
|---|
| 2104 | 2300 | |
|---|
| 2105 | 2301 | net_dev_put: |
|---|
| 2106 | 2302 | if (net_dev) |
|---|
| .. | .. |
|---|
| 2154 | 2350 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; |
|---|
| 2155 | 2351 | |
|---|
| 2156 | 2352 | mutex_lock(&id_priv->handler_mutex); |
|---|
| 2157 | | - if (id_priv->state != RDMA_CM_CONNECT) |
|---|
| 2353 | + if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
|---|
| 2158 | 2354 | goto out; |
|---|
| 2159 | 2355 | |
|---|
| 2160 | 2356 | switch (iw_event->event) { |
|---|
| .. | .. |
|---|
| 2196 | 2392 | event.status = iw_event->status; |
|---|
| 2197 | 2393 | event.param.conn.private_data = iw_event->private_data; |
|---|
| 2198 | 2394 | event.param.conn.private_data_len = iw_event->private_data_len; |
|---|
| 2199 | | - ret = id_priv->id.event_handler(&id_priv->id, &event); |
|---|
| 2395 | + ret = cma_cm_event_handler(id_priv, &event); |
|---|
| 2200 | 2396 | if (ret) { |
|---|
| 2201 | 2397 | /* Destroy the CM ID by returning a non-zero value. */ |
|---|
| 2202 | 2398 | id_priv->cm_id.iw = NULL; |
|---|
| 2203 | | - cma_exch(id_priv, RDMA_CM_DESTROYING); |
|---|
| 2204 | | - mutex_unlock(&id_priv->handler_mutex); |
|---|
| 2205 | | - rdma_destroy_id(&id_priv->id); |
|---|
| 2399 | + destroy_id_handler_unlock(id_priv); |
|---|
| 2206 | 2400 | return ret; |
|---|
| 2207 | 2401 | } |
|---|
| 2208 | 2402 | |
|---|
| .. | .. |
|---|
| 2214 | 2408 | static int iw_conn_req_handler(struct iw_cm_id *cm_id, |
|---|
| 2215 | 2409 | struct iw_cm_event *iw_event) |
|---|
| 2216 | 2410 | { |
|---|
| 2217 | | - struct rdma_cm_id *new_cm_id; |
|---|
| 2218 | 2411 | struct rdma_id_private *listen_id, *conn_id; |
|---|
| 2219 | 2412 | struct rdma_cm_event event = {}; |
|---|
| 2220 | 2413 | int ret = -ECONNABORTED; |
|---|
| .. | .. |
|---|
| 2230 | 2423 | listen_id = cm_id->context; |
|---|
| 2231 | 2424 | |
|---|
| 2232 | 2425 | mutex_lock(&listen_id->handler_mutex); |
|---|
| 2233 | | - if (listen_id->state != RDMA_CM_LISTEN) |
|---|
| 2426 | + if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) |
|---|
| 2234 | 2427 | goto out; |
|---|
| 2235 | 2428 | |
|---|
| 2236 | 2429 | /* Create a new RDMA id for the new IW CM ID */ |
|---|
| 2237 | | - new_cm_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, |
|---|
| 2238 | | - listen_id->id.event_handler, |
|---|
| 2239 | | - listen_id->id.context, |
|---|
| 2240 | | - RDMA_PS_TCP, IB_QPT_RC, |
|---|
| 2241 | | - listen_id->res.kern_name); |
|---|
| 2242 | | - if (IS_ERR(new_cm_id)) { |
|---|
| 2430 | + conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, |
|---|
| 2431 | + listen_id->id.event_handler, |
|---|
| 2432 | + listen_id->id.context, RDMA_PS_TCP, |
|---|
| 2433 | + IB_QPT_RC, listen_id); |
|---|
| 2434 | + if (IS_ERR(conn_id)) { |
|---|
| 2243 | 2435 | ret = -ENOMEM; |
|---|
| 2244 | 2436 | goto out; |
|---|
| 2245 | 2437 | } |
|---|
| 2246 | | - conn_id = container_of(new_cm_id, struct rdma_id_private, id); |
|---|
| 2247 | 2438 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
|---|
| 2248 | 2439 | conn_id->state = RDMA_CM_CONNECT; |
|---|
| 2249 | 2440 | |
|---|
| 2250 | 2441 | ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); |
|---|
| 2251 | 2442 | if (ret) { |
|---|
| 2252 | | - mutex_unlock(&conn_id->handler_mutex); |
|---|
| 2253 | | - rdma_destroy_id(new_cm_id); |
|---|
| 2254 | | - goto out; |
|---|
| 2443 | + mutex_unlock(&listen_id->handler_mutex); |
|---|
| 2444 | + destroy_id_handler_unlock(conn_id); |
|---|
| 2445 | + return ret; |
|---|
| 2255 | 2446 | } |
|---|
| 2256 | 2447 | |
|---|
| 2257 | | - ret = cma_acquire_dev(conn_id, listen_id); |
|---|
| 2448 | + ret = cma_iw_acquire_dev(conn_id, listen_id); |
|---|
| 2258 | 2449 | if (ret) { |
|---|
| 2259 | | - mutex_unlock(&conn_id->handler_mutex); |
|---|
| 2260 | | - rdma_destroy_id(new_cm_id); |
|---|
| 2261 | | - goto out; |
|---|
| 2450 | + mutex_unlock(&listen_id->handler_mutex); |
|---|
| 2451 | + destroy_id_handler_unlock(conn_id); |
|---|
| 2452 | + return ret; |
|---|
| 2262 | 2453 | } |
|---|
| 2263 | 2454 | |
|---|
| 2264 | 2455 | conn_id->cm_id.iw = cm_id; |
|---|
| .. | .. |
|---|
| 2268 | 2459 | memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); |
|---|
| 2269 | 2460 | memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); |
|---|
| 2270 | 2461 | |
|---|
| 2271 | | - /* |
|---|
| 2272 | | - * Protect against the user destroying conn_id from another thread |
|---|
| 2273 | | - * until we're done accessing it. |
|---|
| 2274 | | - */ |
|---|
| 2275 | | - atomic_inc(&conn_id->refcount); |
|---|
| 2276 | | - ret = conn_id->id.event_handler(&conn_id->id, &event); |
|---|
| 2462 | + ret = cma_cm_event_handler(conn_id, &event); |
|---|
| 2277 | 2463 | if (ret) { |
|---|
| 2278 | 2464 | /* User wants to destroy the CM ID */ |
|---|
| 2279 | 2465 | conn_id->cm_id.iw = NULL; |
|---|
| 2280 | | - cma_exch(conn_id, RDMA_CM_DESTROYING); |
|---|
| 2281 | | - mutex_unlock(&conn_id->handler_mutex); |
|---|
| 2282 | 2466 | mutex_unlock(&listen_id->handler_mutex); |
|---|
| 2283 | | - cma_deref_id(conn_id); |
|---|
| 2284 | | - rdma_destroy_id(&conn_id->id); |
|---|
| 2467 | + destroy_id_handler_unlock(conn_id); |
|---|
| 2285 | 2468 | return ret; |
|---|
| 2286 | 2469 | } |
|---|
| 2287 | 2470 | |
|---|
| 2288 | 2471 | mutex_unlock(&conn_id->handler_mutex); |
|---|
| 2289 | | - cma_deref_id(conn_id); |
|---|
| 2290 | 2472 | |
|---|
| 2291 | 2473 | out: |
|---|
| 2292 | 2474 | mutex_unlock(&listen_id->handler_mutex); |
|---|
| .. | .. |
|---|
| 2321 | 2503 | if (IS_ERR(id)) |
|---|
| 2322 | 2504 | return PTR_ERR(id); |
|---|
| 2323 | 2505 | |
|---|
| 2506 | + mutex_lock(&id_priv->qp_mutex); |
|---|
| 2324 | 2507 | id->tos = id_priv->tos; |
|---|
| 2508 | + id->tos_set = id_priv->tos_set; |
|---|
| 2509 | + mutex_unlock(&id_priv->qp_mutex); |
|---|
| 2325 | 2510 | id_priv->cm_id.iw = id; |
|---|
| 2326 | 2511 | |
|---|
| 2327 | 2512 | memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), |
|---|
| .. | .. |
|---|
| 2342 | 2527 | { |
|---|
| 2343 | 2528 | struct rdma_id_private *id_priv = id->context; |
|---|
| 2344 | 2529 | |
|---|
| 2530 | + /* Listening IDs are always destroyed on removal */ |
|---|
| 2531 | + if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) |
|---|
| 2532 | + return -1; |
|---|
| 2533 | + |
|---|
| 2345 | 2534 | id->context = id_priv->id.context; |
|---|
| 2346 | 2535 | id->event_handler = id_priv->id.event_handler; |
|---|
| 2536 | + trace_cm_event_handler(id_priv, event); |
|---|
| 2347 | 2537 | return id_priv->id.event_handler(id, event); |
|---|
| 2348 | 2538 | } |
|---|
| 2349 | 2539 | |
|---|
| 2350 | | -static void cma_listen_on_dev(struct rdma_id_private *id_priv, |
|---|
| 2351 | | - struct cma_device *cma_dev) |
|---|
| 2540 | +static int cma_listen_on_dev(struct rdma_id_private *id_priv, |
|---|
| 2541 | + struct cma_device *cma_dev, |
|---|
| 2542 | + struct rdma_id_private **to_destroy) |
|---|
| 2352 | 2543 | { |
|---|
| 2353 | 2544 | struct rdma_id_private *dev_id_priv; |
|---|
| 2354 | | - struct rdma_cm_id *id; |
|---|
| 2355 | 2545 | struct net *net = id_priv->id.route.addr.dev_addr.net; |
|---|
| 2356 | 2546 | int ret; |
|---|
| 2357 | 2547 | |
|---|
| 2358 | 2548 | lockdep_assert_held(&lock); |
|---|
| 2359 | 2549 | |
|---|
| 2550 | + *to_destroy = NULL; |
|---|
| 2360 | 2551 | if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) |
|---|
| 2361 | | - return; |
|---|
| 2552 | + return 0; |
|---|
| 2362 | 2553 | |
|---|
| 2363 | | - id = __rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, |
|---|
| 2364 | | - id_priv->id.qp_type, id_priv->res.kern_name); |
|---|
| 2365 | | - if (IS_ERR(id)) |
|---|
| 2366 | | - return; |
|---|
| 2367 | | - |
|---|
| 2368 | | - dev_id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 2554 | + dev_id_priv = |
|---|
| 2555 | + __rdma_create_id(net, cma_listen_handler, id_priv, |
|---|
| 2556 | + id_priv->id.ps, id_priv->id.qp_type, id_priv); |
|---|
| 2557 | + if (IS_ERR(dev_id_priv)) |
|---|
| 2558 | + return PTR_ERR(dev_id_priv); |
|---|
| 2369 | 2559 | |
|---|
| 2370 | 2560 | dev_id_priv->state = RDMA_CM_ADDR_BOUND; |
|---|
| 2371 | 2561 | memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), |
|---|
| 2372 | 2562 | rdma_addr_size(cma_src_addr(id_priv))); |
|---|
| 2373 | 2563 | |
|---|
| 2374 | 2564 | _cma_attach_to_dev(dev_id_priv, cma_dev); |
|---|
| 2375 | | - list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); |
|---|
| 2376 | | - atomic_inc(&id_priv->refcount); |
|---|
| 2565 | + rdma_restrack_add(&dev_id_priv->res); |
|---|
| 2566 | + cma_id_get(id_priv); |
|---|
| 2377 | 2567 | dev_id_priv->internal_id = 1; |
|---|
| 2378 | 2568 | dev_id_priv->afonly = id_priv->afonly; |
|---|
| 2569 | + mutex_lock(&id_priv->qp_mutex); |
|---|
| 2570 | + dev_id_priv->tos_set = id_priv->tos_set; |
|---|
| 2571 | + dev_id_priv->tos = id_priv->tos; |
|---|
| 2572 | + mutex_unlock(&id_priv->qp_mutex); |
|---|
| 2379 | 2573 | |
|---|
| 2380 | | - ret = rdma_listen(id, id_priv->backlog); |
|---|
| 2574 | + ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); |
|---|
| 2381 | 2575 | if (ret) |
|---|
| 2382 | | - pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n", |
|---|
| 2383 | | - ret, cma_dev->device->name); |
|---|
| 2576 | + goto err_listen; |
|---|
| 2577 | + list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); |
|---|
| 2578 | + return 0; |
|---|
| 2579 | +err_listen: |
|---|
| 2580 | + /* Caller must destroy this after releasing lock */ |
|---|
| 2581 | + *to_destroy = dev_id_priv; |
|---|
| 2582 | + dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); |
|---|
| 2583 | + return ret; |
|---|
| 2384 | 2584 | } |
|---|
| 2385 | 2585 | |
|---|
| 2386 | | -static void cma_listen_on_all(struct rdma_id_private *id_priv) |
|---|
| 2586 | +static int cma_listen_on_all(struct rdma_id_private *id_priv) |
|---|
| 2387 | 2587 | { |
|---|
| 2588 | + struct rdma_id_private *to_destroy; |
|---|
| 2388 | 2589 | struct cma_device *cma_dev; |
|---|
| 2590 | + int ret; |
|---|
| 2389 | 2591 | |
|---|
| 2390 | 2592 | mutex_lock(&lock); |
|---|
| 2391 | 2593 | list_add_tail(&id_priv->list, &listen_any_list); |
|---|
| 2392 | | - list_for_each_entry(cma_dev, &dev_list, list) |
|---|
| 2393 | | - cma_listen_on_dev(id_priv, cma_dev); |
|---|
| 2594 | + list_for_each_entry(cma_dev, &dev_list, list) { |
|---|
| 2595 | + ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); |
|---|
| 2596 | + if (ret) { |
|---|
| 2597 | + /* Prevent racing with cma_process_remove() */ |
|---|
| 2598 | + if (to_destroy) |
|---|
| 2599 | + list_del_init(&to_destroy->list); |
|---|
| 2600 | + goto err_listen; |
|---|
| 2601 | + } |
|---|
| 2602 | + } |
|---|
| 2394 | 2603 | mutex_unlock(&lock); |
|---|
| 2604 | + return 0; |
|---|
| 2605 | + |
|---|
| 2606 | +err_listen: |
|---|
| 2607 | + _cma_cancel_listens(id_priv); |
|---|
| 2608 | + mutex_unlock(&lock); |
|---|
| 2609 | + if (to_destroy) |
|---|
| 2610 | + rdma_destroy_id(&to_destroy->id); |
|---|
| 2611 | + return ret; |
|---|
| 2395 | 2612 | } |
|---|
| 2396 | 2613 | |
|---|
| 2397 | 2614 | void rdma_set_service_type(struct rdma_cm_id *id, int tos) |
|---|
| .. | .. |
|---|
| 2399 | 2616 | struct rdma_id_private *id_priv; |
|---|
| 2400 | 2617 | |
|---|
| 2401 | 2618 | id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 2619 | + mutex_lock(&id_priv->qp_mutex); |
|---|
| 2402 | 2620 | id_priv->tos = (u8) tos; |
|---|
| 2403 | 2621 | id_priv->tos_set = true; |
|---|
| 2622 | + mutex_unlock(&id_priv->qp_mutex); |
|---|
| 2404 | 2623 | } |
|---|
| 2405 | 2624 | EXPORT_SYMBOL(rdma_set_service_type); |
|---|
| 2625 | + |
|---|
| 2626 | +/** |
|---|
| 2627 | + * rdma_set_ack_timeout() - Set the ack timeout of QP associated |
|---|
| 2628 | + * with a connection identifier. |
|---|
| 2629 | + * @id: Communication identifier to associated with service type. |
|---|
| 2630 | + * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. |
|---|
| 2631 | + * |
|---|
| 2632 | + * This function should be called before rdma_connect() on active side, |
|---|
| 2633 | + * and on passive side before rdma_accept(). It is applicable to primary |
|---|
| 2634 | + * path only. The timeout will affect the local side of the QP, it is not |
|---|
| 2635 | + * negotiated with remote side and zero disables the timer. In case it is |
|---|
| 2636 | + * set before rdma_resolve_route, the value will also be used to determine |
|---|
| 2637 | + * PacketLifeTime for RoCE. |
|---|
| 2638 | + * |
|---|
| 2639 | + * Return: 0 for success |
|---|
| 2640 | + */ |
|---|
| 2641 | +int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) |
|---|
| 2642 | +{ |
|---|
| 2643 | + struct rdma_id_private *id_priv; |
|---|
| 2644 | + |
|---|
| 2645 | + if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) |
|---|
| 2646 | + return -EINVAL; |
|---|
| 2647 | + |
|---|
| 2648 | + id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 2649 | + mutex_lock(&id_priv->qp_mutex); |
|---|
| 2650 | + id_priv->timeout = timeout; |
|---|
| 2651 | + id_priv->timeout_set = true; |
|---|
| 2652 | + mutex_unlock(&id_priv->qp_mutex); |
|---|
| 2653 | + |
|---|
| 2654 | + return 0; |
|---|
| 2655 | +} |
|---|
| 2656 | +EXPORT_SYMBOL(rdma_set_ack_timeout); |
|---|
| 2406 | 2657 | |
|---|
| 2407 | 2658 | static void cma_query_handler(int status, struct sa_path_rec *path_rec, |
|---|
| 2408 | 2659 | void *context) |
|---|
| .. | .. |
|---|
| 2427 | 2678 | queue_work(cma_wq, &work->work); |
|---|
| 2428 | 2679 | } |
|---|
| 2429 | 2680 | |
|---|
| 2430 | | -static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, |
|---|
| 2431 | | - struct cma_work *work) |
|---|
| 2681 | +static int cma_query_ib_route(struct rdma_id_private *id_priv, |
|---|
| 2682 | + unsigned long timeout_ms, struct cma_work *work) |
|---|
| 2432 | 2683 | { |
|---|
| 2433 | 2684 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
|---|
| 2434 | 2685 | struct sa_path_rec path_rec; |
|---|
| .. | .. |
|---|
| 2480 | 2731 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; |
|---|
| 2481 | 2732 | } |
|---|
| 2482 | 2733 | |
|---|
| 2734 | +static void cma_iboe_join_work_handler(struct work_struct *work) |
|---|
| 2735 | +{ |
|---|
| 2736 | + struct cma_multicast *mc = |
|---|
| 2737 | + container_of(work, struct cma_multicast, iboe_join.work); |
|---|
| 2738 | + struct rdma_cm_event *event = &mc->iboe_join.event; |
|---|
| 2739 | + struct rdma_id_private *id_priv = mc->id_priv; |
|---|
| 2740 | + int ret; |
|---|
| 2741 | + |
|---|
| 2742 | + mutex_lock(&id_priv->handler_mutex); |
|---|
| 2743 | + if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || |
|---|
| 2744 | + READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) |
|---|
| 2745 | + goto out_unlock; |
|---|
| 2746 | + |
|---|
| 2747 | + ret = cma_cm_event_handler(id_priv, event); |
|---|
| 2748 | + WARN_ON(ret); |
|---|
| 2749 | + |
|---|
| 2750 | +out_unlock: |
|---|
| 2751 | + mutex_unlock(&id_priv->handler_mutex); |
|---|
| 2752 | + if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) |
|---|
| 2753 | + rdma_destroy_ah_attr(&event->param.ud.ah_attr); |
|---|
| 2754 | +} |
|---|
| 2755 | + |
|---|
| 2483 | 2756 | static void cma_work_handler(struct work_struct *_work) |
|---|
| 2484 | 2757 | { |
|---|
| 2485 | 2758 | struct cma_work *work = container_of(_work, struct cma_work, work); |
|---|
| 2486 | 2759 | struct rdma_id_private *id_priv = work->id; |
|---|
| 2487 | | - int destroy = 0; |
|---|
| 2488 | 2760 | |
|---|
| 2489 | 2761 | mutex_lock(&id_priv->handler_mutex); |
|---|
| 2490 | | - if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) |
|---|
| 2491 | | - goto out; |
|---|
| 2492 | | - |
|---|
| 2493 | | - if (id_priv->id.event_handler(&id_priv->id, &work->event)) { |
|---|
| 2494 | | - cma_exch(id_priv, RDMA_CM_DESTROYING); |
|---|
| 2495 | | - destroy = 1; |
|---|
| 2496 | | - } |
|---|
| 2497 | | -out: |
|---|
| 2498 | | - mutex_unlock(&id_priv->handler_mutex); |
|---|
| 2499 | | - cma_deref_id(id_priv); |
|---|
| 2500 | | - if (destroy) |
|---|
| 2501 | | - rdma_destroy_id(&id_priv->id); |
|---|
| 2502 | | - kfree(work); |
|---|
| 2503 | | -} |
|---|
| 2504 | | - |
|---|
| 2505 | | -static void cma_ndev_work_handler(struct work_struct *_work) |
|---|
| 2506 | | -{ |
|---|
| 2507 | | - struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); |
|---|
| 2508 | | - struct rdma_id_private *id_priv = work->id; |
|---|
| 2509 | | - int destroy = 0; |
|---|
| 2510 | | - |
|---|
| 2511 | | - mutex_lock(&id_priv->handler_mutex); |
|---|
| 2512 | | - if (id_priv->state == RDMA_CM_DESTROYING || |
|---|
| 2513 | | - id_priv->state == RDMA_CM_DEVICE_REMOVAL) |
|---|
| 2514 | | - goto out; |
|---|
| 2515 | | - |
|---|
| 2516 | | - if (id_priv->id.event_handler(&id_priv->id, &work->event)) { |
|---|
| 2517 | | - cma_exch(id_priv, RDMA_CM_DESTROYING); |
|---|
| 2518 | | - destroy = 1; |
|---|
| 2762 | + if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || |
|---|
| 2763 | + READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) |
|---|
| 2764 | + goto out_unlock; |
|---|
| 2765 | + if (work->old_state != 0 || work->new_state != 0) { |
|---|
| 2766 | + if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) |
|---|
| 2767 | + goto out_unlock; |
|---|
| 2519 | 2768 | } |
|---|
| 2520 | 2769 | |
|---|
| 2521 | | -out: |
|---|
| 2770 | + if (cma_cm_event_handler(id_priv, &work->event)) { |
|---|
| 2771 | + cma_id_put(id_priv); |
|---|
| 2772 | + destroy_id_handler_unlock(id_priv); |
|---|
| 2773 | + goto out_free; |
|---|
| 2774 | + } |
|---|
| 2775 | + |
|---|
| 2776 | +out_unlock: |
|---|
| 2522 | 2777 | mutex_unlock(&id_priv->handler_mutex); |
|---|
| 2523 | | - cma_deref_id(id_priv); |
|---|
| 2524 | | - if (destroy) |
|---|
| 2525 | | - rdma_destroy_id(&id_priv->id); |
|---|
| 2778 | + cma_id_put(id_priv); |
|---|
| 2779 | +out_free: |
|---|
| 2780 | + if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) |
|---|
| 2781 | + rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); |
|---|
| 2526 | 2782 | kfree(work); |
|---|
| 2527 | 2783 | } |
|---|
| 2528 | 2784 | |
|---|
| .. | .. |
|---|
| 2536 | 2792 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
|---|
| 2537 | 2793 | } |
|---|
| 2538 | 2794 | |
|---|
| 2539 | | -static void cma_init_resolve_addr_work(struct cma_work *work, |
|---|
| 2540 | | - struct rdma_id_private *id_priv) |
|---|
| 2795 | +static void enqueue_resolve_addr_work(struct cma_work *work, |
|---|
| 2796 | + struct rdma_id_private *id_priv) |
|---|
| 2541 | 2797 | { |
|---|
| 2798 | + /* Balances with cma_id_put() in cma_work_handler */ |
|---|
| 2799 | + cma_id_get(id_priv); |
|---|
| 2800 | + |
|---|
| 2542 | 2801 | work->id = id_priv; |
|---|
| 2543 | 2802 | INIT_WORK(&work->work, cma_work_handler); |
|---|
| 2544 | 2803 | work->old_state = RDMA_CM_ADDR_QUERY; |
|---|
| 2545 | 2804 | work->new_state = RDMA_CM_ADDR_RESOLVED; |
|---|
| 2546 | 2805 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
|---|
| 2806 | + |
|---|
| 2807 | + queue_work(cma_wq, &work->work); |
|---|
| 2547 | 2808 | } |
|---|
| 2548 | 2809 | |
|---|
| 2549 | | -static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) |
|---|
| 2810 | +static int cma_resolve_ib_route(struct rdma_id_private *id_priv, |
|---|
| 2811 | + unsigned long timeout_ms) |
|---|
| 2550 | 2812 | { |
|---|
| 2551 | 2813 | struct rdma_route *route = &id_priv->id.route; |
|---|
| 2552 | 2814 | struct cma_work *work; |
|---|
| .. | .. |
|---|
| 2669 | 2931 | } |
|---|
| 2670 | 2932 | EXPORT_SYMBOL(rdma_set_ib_path); |
|---|
| 2671 | 2933 | |
|---|
| 2672 | | -static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) |
|---|
| 2934 | +static int cma_resolve_iw_route(struct rdma_id_private *id_priv) |
|---|
| 2673 | 2935 | { |
|---|
| 2674 | 2936 | struct cma_work *work; |
|---|
| 2675 | 2937 | |
|---|
| .. | .. |
|---|
| 2682 | 2944 | return 0; |
|---|
| 2683 | 2945 | } |
|---|
| 2684 | 2946 | |
|---|
| 2685 | | -static int iboe_tos_to_sl(struct net_device *ndev, int tos) |
|---|
| 2947 | +static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio) |
|---|
| 2686 | 2948 | { |
|---|
| 2687 | | - int prio; |
|---|
| 2688 | 2949 | struct net_device *dev; |
|---|
| 2689 | 2950 | |
|---|
| 2690 | | - prio = rt_tos2priority(tos); |
|---|
| 2691 | | - dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; |
|---|
| 2951 | + dev = vlan_dev_real_dev(vlan_ndev); |
|---|
| 2692 | 2952 | if (dev->num_tc) |
|---|
| 2693 | 2953 | return netdev_get_prio_tc_map(dev, prio); |
|---|
| 2694 | 2954 | |
|---|
| 2695 | | -#if IS_ENABLED(CONFIG_VLAN_8021Q) |
|---|
| 2955 | + return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) & |
|---|
| 2956 | + VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; |
|---|
| 2957 | +} |
|---|
| 2958 | + |
|---|
| 2959 | +struct iboe_prio_tc_map { |
|---|
| 2960 | + int input_prio; |
|---|
| 2961 | + int output_tc; |
|---|
| 2962 | + bool found; |
|---|
| 2963 | +}; |
|---|
| 2964 | + |
|---|
| 2965 | +static int get_lower_vlan_dev_tc(struct net_device *dev, |
|---|
| 2966 | + struct netdev_nested_priv *priv) |
|---|
| 2967 | +{ |
|---|
| 2968 | + struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; |
|---|
| 2969 | + |
|---|
| 2970 | + if (is_vlan_dev(dev)) |
|---|
| 2971 | + map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); |
|---|
| 2972 | + else if (dev->num_tc) |
|---|
| 2973 | + map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); |
|---|
| 2974 | + else |
|---|
| 2975 | + map->output_tc = 0; |
|---|
| 2976 | + /* We are interested only in first level VLAN device, so always |
|---|
| 2977 | + * return 1 to stop iterating over next level devices. |
|---|
| 2978 | + */ |
|---|
| 2979 | + map->found = true; |
|---|
| 2980 | + return 1; |
|---|
| 2981 | +} |
|---|
| 2982 | + |
|---|
| 2983 | +static int iboe_tos_to_sl(struct net_device *ndev, int tos) |
|---|
| 2984 | +{ |
|---|
| 2985 | + struct iboe_prio_tc_map prio_tc_map = {}; |
|---|
| 2986 | + int prio = rt_tos2priority(tos); |
|---|
| 2987 | + struct netdev_nested_priv priv; |
|---|
| 2988 | + |
|---|
| 2989 | + /* If VLAN device, get it directly from the VLAN netdev */ |
|---|
| 2696 | 2990 | if (is_vlan_dev(ndev)) |
|---|
| 2697 | | - return (vlan_dev_get_egress_qos_mask(ndev, prio) & |
|---|
| 2698 | | - VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; |
|---|
| 2699 | | -#endif |
|---|
| 2700 | | - return 0; |
|---|
| 2991 | + return get_vlan_ndev_tc(ndev, prio); |
|---|
| 2992 | + |
|---|
| 2993 | + prio_tc_map.input_prio = prio; |
|---|
| 2994 | + priv.data = (void *)&prio_tc_map; |
|---|
| 2995 | + rcu_read_lock(); |
|---|
| 2996 | + netdev_walk_all_lower_dev_rcu(ndev, |
|---|
| 2997 | + get_lower_vlan_dev_tc, |
|---|
| 2998 | + &priv); |
|---|
| 2999 | + rcu_read_unlock(); |
|---|
| 3000 | + /* If map is found from lower device, use it; Otherwise |
|---|
| 3001 | + * continue with the current netdevice to get priority to tc map. |
|---|
| 3002 | + */ |
|---|
| 3003 | + if (prio_tc_map.found) |
|---|
| 3004 | + return prio_tc_map.output_tc; |
|---|
| 3005 | + else if (ndev->num_tc) |
|---|
| 3006 | + return netdev_get_prio_tc_map(ndev, prio); |
|---|
| 3007 | + else |
|---|
| 3008 | + return 0; |
|---|
| 3009 | +} |
|---|
| 3010 | + |
|---|
| 3011 | +static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) |
|---|
| 3012 | +{ |
|---|
| 3013 | + struct sockaddr_in6 *addr6; |
|---|
| 3014 | + u16 dport, sport; |
|---|
| 3015 | + u32 hash, fl; |
|---|
| 3016 | + |
|---|
| 3017 | + addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); |
|---|
| 3018 | + fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; |
|---|
| 3019 | + if ((cma_family(id_priv) != AF_INET6) || !fl) { |
|---|
| 3020 | + dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); |
|---|
| 3021 | + sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); |
|---|
| 3022 | + hash = (u32)sport * 31 + dport; |
|---|
| 3023 | + fl = hash & IB_GRH_FLOWLABEL_MASK; |
|---|
| 3024 | + } |
|---|
| 3025 | + |
|---|
| 3026 | + return cpu_to_be32(fl); |
|---|
| 2701 | 3027 | } |
|---|
| 2702 | 3028 | |
|---|
| 2703 | 3029 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) |
|---|
| .. | .. |
|---|
| 2710 | 3036 | |
|---|
| 2711 | 3037 | u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - |
|---|
| 2712 | 3038 | rdma_start_port(id_priv->cma_dev->device)]; |
|---|
| 2713 | | - u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; |
|---|
| 3039 | + u8 tos; |
|---|
| 2714 | 3040 | |
|---|
| 3041 | + mutex_lock(&id_priv->qp_mutex); |
|---|
| 3042 | + tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; |
|---|
| 3043 | + mutex_unlock(&id_priv->qp_mutex); |
|---|
| 2715 | 3044 | |
|---|
| 2716 | 3045 | work = kzalloc(sizeof *work, GFP_KERNEL); |
|---|
| 2717 | 3046 | if (!work) |
|---|
| .. | .. |
|---|
| 2748 | 3077 | route->path_rec->traffic_class = tos; |
|---|
| 2749 | 3078 | route->path_rec->mtu = iboe_get_mtu(ndev->mtu); |
|---|
| 2750 | 3079 | route->path_rec->rate_selector = IB_SA_EQ; |
|---|
| 2751 | | - route->path_rec->rate = iboe_get_rate(ndev); |
|---|
| 3080 | + route->path_rec->rate = IB_RATE_PORT_CURRENT; |
|---|
| 2752 | 3081 | dev_put(ndev); |
|---|
| 2753 | 3082 | route->path_rec->packet_life_time_selector = IB_SA_EQ; |
|---|
| 2754 | | - route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; |
|---|
| 3083 | + /* In case ACK timeout is set, use this value to calculate |
|---|
| 3084 | + * PacketLifeTime. As per IBTA 12.7.34, |
|---|
| 3085 | + * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay). |
|---|
| 3086 | + * Assuming a negligible local ACK delay, we can use |
|---|
| 3087 | + * PacketLifeTime = local ACK timeout/2 |
|---|
| 3088 | + * as a reasonable approximation for RoCE networks. |
|---|
| 3089 | + */ |
|---|
| 3090 | + mutex_lock(&id_priv->qp_mutex); |
|---|
| 3091 | + if (id_priv->timeout_set && id_priv->timeout) |
|---|
| 3092 | + route->path_rec->packet_life_time = id_priv->timeout - 1; |
|---|
| 3093 | + else |
|---|
| 3094 | + route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; |
|---|
| 3095 | + mutex_unlock(&id_priv->qp_mutex); |
|---|
| 3096 | + |
|---|
| 2755 | 3097 | if (!route->path_rec->mtu) { |
|---|
| 2756 | 3098 | ret = -EINVAL; |
|---|
| 2757 | 3099 | goto err2; |
|---|
| 2758 | 3100 | } |
|---|
| 3101 | + |
|---|
| 3102 | + if (rdma_protocol_roce_udp_encap(id_priv->id.device, |
|---|
| 3103 | + id_priv->id.port_num)) |
|---|
| 3104 | + route->path_rec->flow_label = |
|---|
| 3105 | + cma_get_roce_udp_flow_label(id_priv); |
|---|
| 2759 | 3106 | |
|---|
| 2760 | 3107 | cma_init_resolve_route_work(work, id_priv); |
|---|
| 2761 | 3108 | queue_work(cma_wq, &work->work); |
|---|
| .. | .. |
|---|
| 2771 | 3118 | return ret; |
|---|
| 2772 | 3119 | } |
|---|
| 2773 | 3120 | |
|---|
| 2774 | | -int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) |
|---|
| 3121 | +int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) |
|---|
| 2775 | 3122 | { |
|---|
| 2776 | 3123 | struct rdma_id_private *id_priv; |
|---|
| 2777 | 3124 | int ret; |
|---|
| .. | .. |
|---|
| 2780 | 3127 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) |
|---|
| 2781 | 3128 | return -EINVAL; |
|---|
| 2782 | 3129 | |
|---|
| 2783 | | - atomic_inc(&id_priv->refcount); |
|---|
| 3130 | + cma_id_get(id_priv); |
|---|
| 2784 | 3131 | if (rdma_cap_ib_sa(id->device, id->port_num)) |
|---|
| 2785 | 3132 | ret = cma_resolve_ib_route(id_priv, timeout_ms); |
|---|
| 2786 | 3133 | else if (rdma_protocol_roce(id->device, id->port_num)) |
|---|
| 2787 | 3134 | ret = cma_resolve_iboe_route(id_priv); |
|---|
| 2788 | 3135 | else if (rdma_protocol_iwarp(id->device, id->port_num)) |
|---|
| 2789 | | - ret = cma_resolve_iw_route(id_priv, timeout_ms); |
|---|
| 3136 | + ret = cma_resolve_iw_route(id_priv); |
|---|
| 2790 | 3137 | else |
|---|
| 2791 | 3138 | ret = -ENOSYS; |
|---|
| 2792 | 3139 | |
|---|
| .. | .. |
|---|
| 2796 | 3143 | return 0; |
|---|
| 2797 | 3144 | err: |
|---|
| 2798 | 3145 | cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); |
|---|
| 2799 | | - cma_deref_id(id_priv); |
|---|
| 3146 | + cma_id_put(id_priv); |
|---|
| 2800 | 3147 | return ret; |
|---|
| 2801 | 3148 | } |
|---|
| 2802 | 3149 | EXPORT_SYMBOL(rdma_resolve_route); |
|---|
| .. | .. |
|---|
| 2823 | 3170 | struct cma_device *cma_dev, *cur_dev; |
|---|
| 2824 | 3171 | union ib_gid gid; |
|---|
| 2825 | 3172 | enum ib_port_state port_state; |
|---|
| 3173 | + unsigned int p; |
|---|
| 2826 | 3174 | u16 pkey; |
|---|
| 2827 | 3175 | int ret; |
|---|
| 2828 | | - u8 p; |
|---|
| 2829 | 3176 | |
|---|
| 2830 | 3177 | cma_dev = NULL; |
|---|
| 2831 | 3178 | mutex_lock(&lock); |
|---|
| .. | .. |
|---|
| 2837 | 3184 | if (!cma_dev) |
|---|
| 2838 | 3185 | cma_dev = cur_dev; |
|---|
| 2839 | 3186 | |
|---|
| 2840 | | - for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { |
|---|
| 3187 | + rdma_for_each_port (cur_dev->device, p) { |
|---|
| 2841 | 3188 | if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && |
|---|
| 2842 | 3189 | port_state == IB_PORT_ACTIVE) { |
|---|
| 2843 | 3190 | cma_dev = cur_dev; |
|---|
| .. | .. |
|---|
| 2870 | 3217 | ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); |
|---|
| 2871 | 3218 | id_priv->id.port_num = p; |
|---|
| 2872 | 3219 | cma_attach_to_dev(id_priv, cma_dev); |
|---|
| 3220 | + rdma_restrack_add(&id_priv->res); |
|---|
| 2873 | 3221 | cma_set_loopback(cma_src_addr(id_priv)); |
|---|
| 2874 | 3222 | out: |
|---|
| 2875 | 3223 | mutex_unlock(&lock); |
|---|
| .. | .. |
|---|
| 2898 | 3246 | memcpy(&old_addr, addr, rdma_addr_size(addr)); |
|---|
| 2899 | 3247 | memcpy(addr, src_addr, rdma_addr_size(src_addr)); |
|---|
| 2900 | 3248 | if (!status && !id_priv->cma_dev) { |
|---|
| 2901 | | - status = cma_acquire_dev(id_priv, NULL); |
|---|
| 3249 | + status = cma_acquire_dev_by_src_ip(id_priv); |
|---|
| 2902 | 3250 | if (status) |
|---|
| 2903 | 3251 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", |
|---|
| 2904 | 3252 | status); |
|---|
| 3253 | + rdma_restrack_add(&id_priv->res); |
|---|
| 2905 | 3254 | } else if (status) { |
|---|
| 2906 | 3255 | pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); |
|---|
| 2907 | 3256 | } |
|---|
| .. | .. |
|---|
| 2917 | 3266 | } else |
|---|
| 2918 | 3267 | event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
|---|
| 2919 | 3268 | |
|---|
| 2920 | | - if (id_priv->id.event_handler(&id_priv->id, &event)) { |
|---|
| 2921 | | - cma_exch(id_priv, RDMA_CM_DESTROYING); |
|---|
| 2922 | | - mutex_unlock(&id_priv->handler_mutex); |
|---|
| 2923 | | - cma_deref_id(id_priv); |
|---|
| 2924 | | - rdma_destroy_id(&id_priv->id); |
|---|
| 3269 | + if (cma_cm_event_handler(id_priv, &event)) { |
|---|
| 3270 | + destroy_id_handler_unlock(id_priv); |
|---|
| 2925 | 3271 | return; |
|---|
| 2926 | 3272 | } |
|---|
| 2927 | 3273 | out: |
|---|
| 2928 | 3274 | mutex_unlock(&id_priv->handler_mutex); |
|---|
| 2929 | | - cma_deref_id(id_priv); |
|---|
| 2930 | 3275 | } |
|---|
| 2931 | 3276 | |
|---|
| 2932 | 3277 | static int cma_resolve_loopback(struct rdma_id_private *id_priv) |
|---|
| .. | .. |
|---|
| 2948 | 3293 | rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); |
|---|
| 2949 | 3294 | rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); |
|---|
| 2950 | 3295 | |
|---|
| 2951 | | - cma_init_resolve_addr_work(work, id_priv); |
|---|
| 2952 | | - queue_work(cma_wq, &work->work); |
|---|
| 3296 | + enqueue_resolve_addr_work(work, id_priv); |
|---|
| 2953 | 3297 | return 0; |
|---|
| 2954 | 3298 | err: |
|---|
| 2955 | 3299 | kfree(work); |
|---|
| .. | .. |
|---|
| 2974 | 3318 | rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) |
|---|
| 2975 | 3319 | &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); |
|---|
| 2976 | 3320 | |
|---|
| 2977 | | - cma_init_resolve_addr_work(work, id_priv); |
|---|
| 2978 | | - queue_work(cma_wq, &work->work); |
|---|
| 3321 | + enqueue_resolve_addr_work(work, id_priv); |
|---|
| 2979 | 3322 | return 0; |
|---|
| 2980 | 3323 | err: |
|---|
| 2981 | 3324 | kfree(work); |
|---|
| .. | .. |
|---|
| 2985 | 3328 | static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, |
|---|
| 2986 | 3329 | const struct sockaddr *dst_addr) |
|---|
| 2987 | 3330 | { |
|---|
| 2988 | | - if (!src_addr || !src_addr->sa_family) { |
|---|
| 2989 | | - src_addr = (struct sockaddr *) &id->route.addr.src_addr; |
|---|
| 2990 | | - src_addr->sa_family = dst_addr->sa_family; |
|---|
| 2991 | | - if (IS_ENABLED(CONFIG_IPV6) && |
|---|
| 2992 | | - dst_addr->sa_family == AF_INET6) { |
|---|
| 2993 | | - struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; |
|---|
| 2994 | | - struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; |
|---|
| 2995 | | - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; |
|---|
| 2996 | | - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
|---|
| 2997 | | - id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; |
|---|
| 2998 | | - } else if (dst_addr->sa_family == AF_IB) { |
|---|
| 2999 | | - ((struct sockaddr_ib *) src_addr)->sib_pkey = |
|---|
| 3000 | | - ((struct sockaddr_ib *) dst_addr)->sib_pkey; |
|---|
| 3331 | + struct sockaddr_storage zero_sock = {}; |
|---|
| 3332 | + |
|---|
| 3333 | + if (src_addr && src_addr->sa_family) |
|---|
| 3334 | + return rdma_bind_addr(id, src_addr); |
|---|
| 3335 | + |
|---|
| 3336 | + /* |
|---|
| 3337 | + * When the src_addr is not specified, automatically supply an any addr |
|---|
| 3338 | + */ |
|---|
| 3339 | + zero_sock.ss_family = dst_addr->sa_family; |
|---|
| 3340 | + if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { |
|---|
| 3341 | + struct sockaddr_in6 *src_addr6 = |
|---|
| 3342 | + (struct sockaddr_in6 *)&zero_sock; |
|---|
| 3343 | + struct sockaddr_in6 *dst_addr6 = |
|---|
| 3344 | + (struct sockaddr_in6 *)dst_addr; |
|---|
| 3345 | + |
|---|
| 3346 | + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; |
|---|
| 3347 | + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
|---|
| 3348 | + id->route.addr.dev_addr.bound_dev_if = |
|---|
| 3349 | + dst_addr6->sin6_scope_id; |
|---|
| 3350 | + } else if (dst_addr->sa_family == AF_IB) { |
|---|
| 3351 | + ((struct sockaddr_ib *)&zero_sock)->sib_pkey = |
|---|
| 3352 | + ((struct sockaddr_ib *)dst_addr)->sib_pkey; |
|---|
| 3353 | + } |
|---|
| 3354 | + return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); |
|---|
| 3355 | +} |
|---|
| 3356 | + |
|---|
| 3357 | +/* |
|---|
| 3358 | + * If required, resolve the source address for bind and leave the id_priv in |
|---|
| 3359 | + * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior |
|---|
| 3360 | + * calls made by ULP, a previously bound ID will not be re-bound and src_addr is |
|---|
| 3361 | + * ignored. |
|---|
| 3362 | + */ |
|---|
| 3363 | +static int resolve_prepare_src(struct rdma_id_private *id_priv, |
|---|
| 3364 | + struct sockaddr *src_addr, |
|---|
| 3365 | + const struct sockaddr *dst_addr) |
|---|
| 3366 | +{ |
|---|
| 3367 | + int ret; |
|---|
| 3368 | + |
|---|
| 3369 | + memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); |
|---|
| 3370 | + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { |
|---|
| 3371 | + /* For a well behaved ULP state will be RDMA_CM_IDLE */ |
|---|
| 3372 | + ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); |
|---|
| 3373 | + if (ret) |
|---|
| 3374 | + goto err_dst; |
|---|
| 3375 | + if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, |
|---|
| 3376 | + RDMA_CM_ADDR_QUERY))) { |
|---|
| 3377 | + ret = -EINVAL; |
|---|
| 3378 | + goto err_dst; |
|---|
| 3001 | 3379 | } |
|---|
| 3002 | 3380 | } |
|---|
| 3003 | | - return rdma_bind_addr(id, src_addr); |
|---|
| 3381 | + |
|---|
| 3382 | + if (cma_family(id_priv) != dst_addr->sa_family) { |
|---|
| 3383 | + ret = -EINVAL; |
|---|
| 3384 | + goto err_state; |
|---|
| 3385 | + } |
|---|
| 3386 | + return 0; |
|---|
| 3387 | + |
|---|
| 3388 | +err_state: |
|---|
| 3389 | + cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); |
|---|
| 3390 | +err_dst: |
|---|
| 3391 | + memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); |
|---|
| 3392 | + return ret; |
|---|
| 3004 | 3393 | } |
|---|
| 3005 | 3394 | |
|---|
| 3006 | 3395 | int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, |
|---|
| 3007 | | - const struct sockaddr *dst_addr, int timeout_ms) |
|---|
| 3396 | + const struct sockaddr *dst_addr, unsigned long timeout_ms) |
|---|
| 3008 | 3397 | { |
|---|
| 3009 | | - struct rdma_id_private *id_priv; |
|---|
| 3398 | + struct rdma_id_private *id_priv = |
|---|
| 3399 | + container_of(id, struct rdma_id_private, id); |
|---|
| 3010 | 3400 | int ret; |
|---|
| 3011 | 3401 | |
|---|
| 3012 | | - id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 3013 | | - if (id_priv->state == RDMA_CM_IDLE) { |
|---|
| 3014 | | - ret = cma_bind_addr(id, src_addr, dst_addr); |
|---|
| 3015 | | - if (ret) |
|---|
| 3016 | | - return ret; |
|---|
| 3017 | | - } |
|---|
| 3402 | + ret = resolve_prepare_src(id_priv, src_addr, dst_addr); |
|---|
| 3403 | + if (ret) |
|---|
| 3404 | + return ret; |
|---|
| 3018 | 3405 | |
|---|
| 3019 | | - if (cma_family(id_priv) != dst_addr->sa_family) |
|---|
| 3020 | | - return -EINVAL; |
|---|
| 3021 | | - |
|---|
| 3022 | | - if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) |
|---|
| 3023 | | - return -EINVAL; |
|---|
| 3024 | | - |
|---|
| 3025 | | - memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); |
|---|
| 3026 | | - atomic_inc(&id_priv->refcount); |
|---|
| 3027 | 3406 | if (cma_any_addr(dst_addr)) { |
|---|
| 3028 | 3407 | ret = cma_resolve_loopback(id_priv); |
|---|
| 3029 | 3408 | } else { |
|---|
| 3030 | 3409 | if (dst_addr->sa_family == AF_IB) { |
|---|
| 3031 | 3410 | ret = cma_resolve_ib_addr(id_priv); |
|---|
| 3032 | 3411 | } else { |
|---|
| 3033 | | - ret = rdma_resolve_ip(cma_src_addr(id_priv), |
|---|
| 3034 | | - dst_addr, &id->route.addr.dev_addr, |
|---|
| 3035 | | - timeout_ms, addr_handler, id_priv); |
|---|
| 3412 | + /* |
|---|
| 3413 | + * The FSM can return back to RDMA_CM_ADDR_BOUND after |
|---|
| 3414 | + * rdma_resolve_ip() is called, eg through the error |
|---|
| 3415 | + * path in addr_handler(). If this happens the existing |
|---|
| 3416 | + * request must be canceled before issuing a new one. |
|---|
| 3417 | + * Since canceling a request is a bit slow and this |
|---|
| 3418 | + * oddball path is rare, keep track once a request has |
|---|
| 3419 | + * been issued. The track turns out to be a permanent |
|---|
| 3420 | + * state since this is the only cancel as it is |
|---|
| 3421 | + * immediately before rdma_resolve_ip(). |
|---|
| 3422 | + */ |
|---|
| 3423 | + if (id_priv->used_resolve_ip) |
|---|
| 3424 | + rdma_addr_cancel(&id->route.addr.dev_addr); |
|---|
| 3425 | + else |
|---|
| 3426 | + id_priv->used_resolve_ip = 1; |
|---|
| 3427 | + ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, |
|---|
| 3428 | + &id->route.addr.dev_addr, |
|---|
| 3429 | + timeout_ms, addr_handler, |
|---|
| 3430 | + false, id_priv); |
|---|
| 3036 | 3431 | } |
|---|
| 3037 | 3432 | } |
|---|
| 3038 | 3433 | if (ret) |
|---|
| .. | .. |
|---|
| 3041 | 3436 | return 0; |
|---|
| 3042 | 3437 | err: |
|---|
| 3043 | 3438 | cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); |
|---|
| 3044 | | - cma_deref_id(id_priv); |
|---|
| 3045 | 3439 | return ret; |
|---|
| 3046 | 3440 | } |
|---|
| 3047 | 3441 | EXPORT_SYMBOL(rdma_resolve_addr); |
|---|
| .. | .. |
|---|
| 3054 | 3448 | |
|---|
| 3055 | 3449 | id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 3056 | 3450 | spin_lock_irqsave(&id_priv->lock, flags); |
|---|
| 3057 | | - if (reuse || id_priv->state == RDMA_CM_IDLE) { |
|---|
| 3451 | + if ((reuse && id_priv->state != RDMA_CM_LISTEN) || |
|---|
| 3452 | + id_priv->state == RDMA_CM_IDLE) { |
|---|
| 3058 | 3453 | id_priv->reuseaddr = reuse; |
|---|
| 3059 | 3454 | ret = 0; |
|---|
| 3060 | 3455 | } else { |
|---|
| .. | .. |
|---|
| 3135 | 3530 | goto err; |
|---|
| 3136 | 3531 | |
|---|
| 3137 | 3532 | bind_list->ps = ps; |
|---|
| 3138 | | - bind_list->port = (unsigned short)ret; |
|---|
| 3533 | + bind_list->port = snum; |
|---|
| 3139 | 3534 | cma_bind_port(bind_list, id_priv); |
|---|
| 3140 | 3535 | return 0; |
|---|
| 3141 | 3536 | err: |
|---|
| .. | .. |
|---|
| 3248 | 3643 | if (id_priv == cur_id) |
|---|
| 3249 | 3644 | continue; |
|---|
| 3250 | 3645 | |
|---|
| 3251 | | - if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && |
|---|
| 3252 | | - cur_id->reuseaddr) |
|---|
| 3646 | + if (reuseaddr && cur_id->reuseaddr) |
|---|
| 3253 | 3647 | continue; |
|---|
| 3254 | 3648 | |
|---|
| 3255 | 3649 | cur_addr = cma_src_addr(cur_id); |
|---|
| .. | .. |
|---|
| 3287 | 3681 | if (!ret) |
|---|
| 3288 | 3682 | cma_bind_port(bind_list, id_priv); |
|---|
| 3289 | 3683 | } |
|---|
| 3290 | | - return ret; |
|---|
| 3291 | | -} |
|---|
| 3292 | | - |
|---|
| 3293 | | -static int cma_bind_listen(struct rdma_id_private *id_priv) |
|---|
| 3294 | | -{ |
|---|
| 3295 | | - struct rdma_bind_list *bind_list = id_priv->bind_list; |
|---|
| 3296 | | - int ret = 0; |
|---|
| 3297 | | - |
|---|
| 3298 | | - mutex_lock(&lock); |
|---|
| 3299 | | - if (bind_list->owners.first->next) |
|---|
| 3300 | | - ret = cma_check_port(bind_list, id_priv, 0); |
|---|
| 3301 | | - mutex_unlock(&lock); |
|---|
| 3302 | 3684 | return ret; |
|---|
| 3303 | 3685 | } |
|---|
| 3304 | 3686 | |
|---|
| .. | .. |
|---|
| 3395 | 3777 | |
|---|
| 3396 | 3778 | int rdma_listen(struct rdma_cm_id *id, int backlog) |
|---|
| 3397 | 3779 | { |
|---|
| 3398 | | - struct rdma_id_private *id_priv; |
|---|
| 3780 | + struct rdma_id_private *id_priv = |
|---|
| 3781 | + container_of(id, struct rdma_id_private, id); |
|---|
| 3399 | 3782 | int ret; |
|---|
| 3400 | 3783 | |
|---|
| 3401 | | - id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 3402 | | - if (id_priv->state == RDMA_CM_IDLE) { |
|---|
| 3403 | | - id->route.addr.src_addr.ss_family = AF_INET; |
|---|
| 3404 | | - ret = rdma_bind_addr(id, cma_src_addr(id_priv)); |
|---|
| 3784 | + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { |
|---|
| 3785 | + struct sockaddr_in any_in = { |
|---|
| 3786 | + .sin_family = AF_INET, |
|---|
| 3787 | + .sin_addr.s_addr = htonl(INADDR_ANY), |
|---|
| 3788 | + }; |
|---|
| 3789 | + |
|---|
| 3790 | + /* For a well behaved ULP state will be RDMA_CM_IDLE */ |
|---|
| 3791 | + ret = rdma_bind_addr(id, (struct sockaddr *)&any_in); |
|---|
| 3405 | 3792 | if (ret) |
|---|
| 3406 | 3793 | return ret; |
|---|
| 3794 | + if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, |
|---|
| 3795 | + RDMA_CM_LISTEN))) |
|---|
| 3796 | + return -EINVAL; |
|---|
| 3407 | 3797 | } |
|---|
| 3408 | 3798 | |
|---|
| 3409 | | - if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) |
|---|
| 3410 | | - return -EINVAL; |
|---|
| 3411 | | - |
|---|
| 3799 | + /* |
|---|
| 3800 | + * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable |
|---|
| 3801 | + * any more, and has to be unique in the bind list. |
|---|
| 3802 | + */ |
|---|
| 3412 | 3803 | if (id_priv->reuseaddr) { |
|---|
| 3413 | | - ret = cma_bind_listen(id_priv); |
|---|
| 3804 | + mutex_lock(&lock); |
|---|
| 3805 | + ret = cma_check_port(id_priv->bind_list, id_priv, 0); |
|---|
| 3806 | + if (!ret) |
|---|
| 3807 | + id_priv->reuseaddr = 0; |
|---|
| 3808 | + mutex_unlock(&lock); |
|---|
| 3414 | 3809 | if (ret) |
|---|
| 3415 | 3810 | goto err; |
|---|
| 3416 | 3811 | } |
|---|
| 3417 | 3812 | |
|---|
| 3418 | 3813 | id_priv->backlog = backlog; |
|---|
| 3419 | | - if (id->device) { |
|---|
| 3814 | + if (id_priv->cma_dev) { |
|---|
| 3420 | 3815 | if (rdma_cap_ib_cm(id->device, 1)) { |
|---|
| 3421 | 3816 | ret = cma_ib_listen(id_priv); |
|---|
| 3422 | 3817 | if (ret) |
|---|
| .. | .. |
|---|
| 3429 | 3824 | ret = -ENOSYS; |
|---|
| 3430 | 3825 | goto err; |
|---|
| 3431 | 3826 | } |
|---|
| 3432 | | - } else |
|---|
| 3433 | | - cma_listen_on_all(id_priv); |
|---|
| 3827 | + } else { |
|---|
| 3828 | + ret = cma_listen_on_all(id_priv); |
|---|
| 3829 | + if (ret) |
|---|
| 3830 | + goto err; |
|---|
| 3831 | + } |
|---|
| 3434 | 3832 | |
|---|
| 3435 | 3833 | return 0; |
|---|
| 3436 | 3834 | err: |
|---|
| 3437 | 3835 | id_priv->backlog = 0; |
|---|
| 3836 | + /* |
|---|
| 3837 | + * All the failure paths that lead here will not allow the req_handler's |
|---|
| 3838 | + * to have run. |
|---|
| 3839 | + */ |
|---|
| 3438 | 3840 | cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); |
|---|
| 3439 | 3841 | return ret; |
|---|
| 3440 | 3842 | } |
|---|
| .. | .. |
|---|
| 3464 | 3866 | if (ret) |
|---|
| 3465 | 3867 | goto err1; |
|---|
| 3466 | 3868 | |
|---|
| 3467 | | - ret = cma_acquire_dev(id_priv, NULL); |
|---|
| 3869 | + ret = cma_acquire_dev_by_src_ip(id_priv); |
|---|
| 3468 | 3870 | if (ret) |
|---|
| 3469 | 3871 | goto err1; |
|---|
| 3470 | 3872 | } |
|---|
| .. | .. |
|---|
| 3487 | 3889 | if (ret) |
|---|
| 3488 | 3890 | goto err2; |
|---|
| 3489 | 3891 | |
|---|
| 3892 | + if (!cma_any_addr(addr)) |
|---|
| 3893 | + rdma_restrack_add(&id_priv->res); |
|---|
| 3490 | 3894 | return 0; |
|---|
| 3491 | 3895 | err2: |
|---|
| 3492 | | - rdma_restrack_del(&id_priv->res); |
|---|
| 3493 | 3896 | if (id_priv->cma_dev) |
|---|
| 3494 | 3897 | cma_release_dev(id_priv); |
|---|
| 3495 | 3898 | err1: |
|---|
| .. | .. |
|---|
| 3535 | 3938 | struct rdma_cm_event event = {}; |
|---|
| 3536 | 3939 | const struct ib_cm_sidr_rep_event_param *rep = |
|---|
| 3537 | 3940 | &ib_event->param.sidr_rep_rcvd; |
|---|
| 3538 | | - int ret = 0; |
|---|
| 3941 | + int ret; |
|---|
| 3539 | 3942 | |
|---|
| 3540 | 3943 | mutex_lock(&id_priv->handler_mutex); |
|---|
| 3541 | | - if (id_priv->state != RDMA_CM_CONNECT) |
|---|
| 3944 | + if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
|---|
| 3542 | 3945 | goto out; |
|---|
| 3543 | 3946 | |
|---|
| 3544 | 3947 | switch (ib_event->event) { |
|---|
| .. | .. |
|---|
| 3579 | 3982 | goto out; |
|---|
| 3580 | 3983 | } |
|---|
| 3581 | 3984 | |
|---|
| 3582 | | - ret = id_priv->id.event_handler(&id_priv->id, &event); |
|---|
| 3985 | + ret = cma_cm_event_handler(id_priv, &event); |
|---|
| 3583 | 3986 | |
|---|
| 3584 | 3987 | rdma_destroy_ah_attr(&event.param.ud.ah_attr); |
|---|
| 3585 | 3988 | if (ret) { |
|---|
| 3586 | 3989 | /* Destroy the CM ID by returning a non-zero value. */ |
|---|
| 3587 | 3990 | id_priv->cm_id.ib = NULL; |
|---|
| 3588 | | - cma_exch(id_priv, RDMA_CM_DESTROYING); |
|---|
| 3589 | | - mutex_unlock(&id_priv->handler_mutex); |
|---|
| 3590 | | - rdma_destroy_id(&id_priv->id); |
|---|
| 3991 | + destroy_id_handler_unlock(id_priv); |
|---|
| 3591 | 3992 | return ret; |
|---|
| 3592 | 3993 | } |
|---|
| 3593 | 3994 | out: |
|---|
| 3594 | 3995 | mutex_unlock(&id_priv->handler_mutex); |
|---|
| 3595 | | - return ret; |
|---|
| 3996 | + return 0; |
|---|
| 3596 | 3997 | } |
|---|
| 3597 | 3998 | |
|---|
| 3598 | 3999 | static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, |
|---|
| .. | .. |
|---|
| 3643 | 4044 | req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); |
|---|
| 3644 | 4045 | req.max_cm_retries = CMA_MAX_CM_RETRIES; |
|---|
| 3645 | 4046 | |
|---|
| 4047 | + trace_cm_send_sidr_req(id_priv); |
|---|
| 3646 | 4048 | ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); |
|---|
| 3647 | 4049 | if (ret) { |
|---|
| 3648 | 4050 | ib_destroy_cm_id(id_priv->cm_id.ib); |
|---|
| .. | .. |
|---|
| 3715 | 4117 | req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; |
|---|
| 3716 | 4118 | req.max_cm_retries = CMA_MAX_CM_RETRIES; |
|---|
| 3717 | 4119 | req.srq = id_priv->srq ? 1 : 0; |
|---|
| 4120 | + req.ece.vendor_id = id_priv->ece.vendor_id; |
|---|
| 4121 | + req.ece.attr_mod = id_priv->ece.attr_mod; |
|---|
| 3718 | 4122 | |
|---|
| 4123 | + trace_cm_send_req(id_priv); |
|---|
| 3719 | 4124 | ret = ib_send_cm_req(id_priv->cm_id.ib, &req); |
|---|
| 3720 | 4125 | out: |
|---|
| 3721 | 4126 | if (ret && !IS_ERR(id)) { |
|---|
| .. | .. |
|---|
| 3738 | 4143 | if (IS_ERR(cm_id)) |
|---|
| 3739 | 4144 | return PTR_ERR(cm_id); |
|---|
| 3740 | 4145 | |
|---|
| 4146 | + mutex_lock(&id_priv->qp_mutex); |
|---|
| 3741 | 4147 | cm_id->tos = id_priv->tos; |
|---|
| 4148 | + cm_id->tos_set = id_priv->tos_set; |
|---|
| 4149 | + mutex_unlock(&id_priv->qp_mutex); |
|---|
| 4150 | + |
|---|
| 3742 | 4151 | id_priv->cm_id.iw = cm_id; |
|---|
| 3743 | 4152 | |
|---|
| 3744 | 4153 | memcpy(&cm_id->local_addr, cma_src_addr(id_priv), |
|---|
| .. | .. |
|---|
| 3769 | 4178 | return ret; |
|---|
| 3770 | 4179 | } |
|---|
| 3771 | 4180 | |
|---|
| 3772 | | -int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
|---|
| 4181 | +/** |
|---|
| 4182 | + * rdma_connect_locked - Initiate an active connection request. |
|---|
| 4183 | + * @id: Connection identifier to connect. |
|---|
| 4184 | + * @conn_param: Connection information used for connected QPs. |
|---|
| 4185 | + * |
|---|
| 4186 | + * Same as rdma_connect() but can only be called from the |
|---|
| 4187 | + * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. |
|---|
| 4188 | + */ |
|---|
| 4189 | +int rdma_connect_locked(struct rdma_cm_id *id, |
|---|
| 4190 | + struct rdma_conn_param *conn_param) |
|---|
| 3773 | 4191 | { |
|---|
| 3774 | | - struct rdma_id_private *id_priv; |
|---|
| 4192 | + struct rdma_id_private *id_priv = |
|---|
| 4193 | + container_of(id, struct rdma_id_private, id); |
|---|
| 3775 | 4194 | int ret; |
|---|
| 3776 | 4195 | |
|---|
| 3777 | | - id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 3778 | 4196 | if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) |
|---|
| 3779 | 4197 | return -EINVAL; |
|---|
| 3780 | 4198 | |
|---|
| .. | .. |
|---|
| 3793 | 4211 | else |
|---|
| 3794 | 4212 | ret = -ENOSYS; |
|---|
| 3795 | 4213 | if (ret) |
|---|
| 3796 | | - goto err; |
|---|
| 3797 | | - |
|---|
| 4214 | + goto err_state; |
|---|
| 3798 | 4215 | return 0; |
|---|
| 3799 | | -err: |
|---|
| 4216 | +err_state: |
|---|
| 3800 | 4217 | cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); |
|---|
| 3801 | 4218 | return ret; |
|---|
| 3802 | 4219 | } |
|---|
| 4220 | +EXPORT_SYMBOL(rdma_connect_locked); |
|---|
| 4221 | + |
|---|
| 4222 | +/** |
|---|
| 4223 | + * rdma_connect - Initiate an active connection request. |
|---|
| 4224 | + * @id: Connection identifier to connect. |
|---|
| 4225 | + * @conn_param: Connection information used for connected QPs. |
|---|
| 4226 | + * |
|---|
| 4227 | + * Users must have resolved a route for the rdma_cm_id to connect with by having |
|---|
| 4228 | + * called rdma_resolve_route before calling this routine. |
|---|
| 4229 | + * |
|---|
| 4230 | + * This call will either connect to a remote QP or obtain remote QP information |
|---|
| 4231 | + * for unconnected rdma_cm_id's. The actual operation is based on the |
|---|
| 4232 | + * rdma_cm_id's port space. |
|---|
| 4233 | + */ |
|---|
| 4234 | +int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
|---|
| 4235 | +{ |
|---|
| 4236 | + struct rdma_id_private *id_priv = |
|---|
| 4237 | + container_of(id, struct rdma_id_private, id); |
|---|
| 4238 | + int ret; |
|---|
| 4239 | + |
|---|
| 4240 | + mutex_lock(&id_priv->handler_mutex); |
|---|
| 4241 | + ret = rdma_connect_locked(id, conn_param); |
|---|
| 4242 | + mutex_unlock(&id_priv->handler_mutex); |
|---|
| 4243 | + return ret; |
|---|
| 4244 | +} |
|---|
| 3803 | 4245 | EXPORT_SYMBOL(rdma_connect); |
|---|
| 4246 | + |
|---|
| 4247 | +/** |
|---|
| 4248 | + * rdma_connect_ece - Initiate an active connection request with ECE data. |
|---|
| 4249 | + * @id: Connection identifier to connect. |
|---|
| 4250 | + * @conn_param: Connection information used for connected QPs. |
|---|
| 4251 | + * @ece: ECE parameters |
|---|
| 4252 | + * |
|---|
| 4253 | + * See rdma_connect() explanation. |
|---|
| 4254 | + */ |
|---|
| 4255 | +int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, |
|---|
| 4256 | + struct rdma_ucm_ece *ece) |
|---|
| 4257 | +{ |
|---|
| 4258 | + struct rdma_id_private *id_priv = |
|---|
| 4259 | + container_of(id, struct rdma_id_private, id); |
|---|
| 4260 | + |
|---|
| 4261 | + id_priv->ece.vendor_id = ece->vendor_id; |
|---|
| 4262 | + id_priv->ece.attr_mod = ece->attr_mod; |
|---|
| 4263 | + |
|---|
| 4264 | + return rdma_connect(id, conn_param); |
|---|
| 4265 | +} |
|---|
| 4266 | +EXPORT_SYMBOL(rdma_connect_ece); |
|---|
| 3804 | 4267 | |
|---|
| 3805 | 4268 | static int cma_accept_ib(struct rdma_id_private *id_priv, |
|---|
| 3806 | 4269 | struct rdma_conn_param *conn_param) |
|---|
| .. | .. |
|---|
| 3827 | 4290 | rep.flow_control = conn_param->flow_control; |
|---|
| 3828 | 4291 | rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); |
|---|
| 3829 | 4292 | rep.srq = id_priv->srq ? 1 : 0; |
|---|
| 4293 | + rep.ece.vendor_id = id_priv->ece.vendor_id; |
|---|
| 4294 | + rep.ece.attr_mod = id_priv->ece.attr_mod; |
|---|
| 3830 | 4295 | |
|---|
| 4296 | + trace_cm_send_rep(id_priv); |
|---|
| 3831 | 4297 | ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); |
|---|
| 3832 | 4298 | out: |
|---|
| 3833 | 4299 | return ret; |
|---|
| .. | .. |
|---|
| 3868 | 4334 | memset(&rep, 0, sizeof rep); |
|---|
| 3869 | 4335 | rep.status = status; |
|---|
| 3870 | 4336 | if (status == IB_SIDR_SUCCESS) { |
|---|
| 3871 | | - ret = cma_set_qkey(id_priv, qkey); |
|---|
| 4337 | + if (qkey) |
|---|
| 4338 | + ret = cma_set_qkey(id_priv, qkey); |
|---|
| 4339 | + else |
|---|
| 4340 | + ret = cma_set_default_qkey(id_priv); |
|---|
| 3872 | 4341 | if (ret) |
|---|
| 3873 | 4342 | return ret; |
|---|
| 3874 | 4343 | rep.qp_num = id_priv->qp_num; |
|---|
| 3875 | 4344 | rep.qkey = id_priv->qkey; |
|---|
| 4345 | + |
|---|
| 4346 | + rep.ece.vendor_id = id_priv->ece.vendor_id; |
|---|
| 4347 | + rep.ece.attr_mod = id_priv->ece.attr_mod; |
|---|
| 3876 | 4348 | } |
|---|
| 4349 | + |
|---|
| 3877 | 4350 | rep.private_data = private_data; |
|---|
| 3878 | 4351 | rep.private_data_len = private_data_len; |
|---|
| 3879 | 4352 | |
|---|
| 4353 | + trace_cm_send_sidr_rep(id_priv); |
|---|
| 3880 | 4354 | return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); |
|---|
| 3881 | 4355 | } |
|---|
| 3882 | 4356 | |
|---|
| 3883 | | -int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, |
|---|
| 3884 | | - const char *caller) |
|---|
| 4357 | +/** |
|---|
| 4358 | + * rdma_accept - Called to accept a connection request or response. |
|---|
| 4359 | + * @id: Connection identifier associated with the request. |
|---|
| 4360 | + * @conn_param: Information needed to establish the connection. This must be |
|---|
| 4361 | + * provided if accepting a connection request. If accepting a connection |
|---|
| 4362 | + * response, this parameter must be NULL. |
|---|
| 4363 | + * |
|---|
| 4364 | + * Typically, this routine is only called by the listener to accept a connection |
|---|
| 4365 | + * request. It must also be called on the active side of a connection if the |
|---|
| 4366 | + * user is performing their own QP transitions. |
|---|
| 4367 | + * |
|---|
| 4368 | + * In the case of error, a reject message is sent to the remote side and the |
|---|
| 4369 | + * state of the qp associated with the id is modified to error, such that any |
|---|
| 4370 | + * previously posted receive buffers would be flushed. |
|---|
| 4371 | + * |
|---|
| 4372 | + * This function is for use by kernel ULPs and must be called from under the |
|---|
| 4373 | + * handler callback. |
|---|
| 4374 | + */ |
|---|
| 4375 | +int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
|---|
| 3885 | 4376 | { |
|---|
| 3886 | | - struct rdma_id_private *id_priv; |
|---|
| 4377 | + struct rdma_id_private *id_priv = |
|---|
| 4378 | + container_of(id, struct rdma_id_private, id); |
|---|
| 3887 | 4379 | int ret; |
|---|
| 3888 | 4380 | |
|---|
| 3889 | | - id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 4381 | + lockdep_assert_held(&id_priv->handler_mutex); |
|---|
| 3890 | 4382 | |
|---|
| 3891 | | - if (caller) |
|---|
| 3892 | | - id_priv->res.kern_name = caller; |
|---|
| 3893 | | - else |
|---|
| 3894 | | - rdma_restrack_set_task(&id_priv->res, current); |
|---|
| 3895 | | - |
|---|
| 3896 | | - if (!cma_comp(id_priv, RDMA_CM_CONNECT)) |
|---|
| 4383 | + if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) |
|---|
| 3897 | 4384 | return -EINVAL; |
|---|
| 3898 | 4385 | |
|---|
| 3899 | 4386 | if (!id->qp && conn_param) { |
|---|
| .. | .. |
|---|
| 3928 | 4415 | return 0; |
|---|
| 3929 | 4416 | reject: |
|---|
| 3930 | 4417 | cma_modify_qp_err(id_priv); |
|---|
| 3931 | | - rdma_reject(id, NULL, 0); |
|---|
| 4418 | + rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); |
|---|
| 3932 | 4419 | return ret; |
|---|
| 3933 | 4420 | } |
|---|
| 3934 | | -EXPORT_SYMBOL(__rdma_accept); |
|---|
| 4421 | +EXPORT_SYMBOL(rdma_accept); |
|---|
| 4422 | + |
|---|
| 4423 | +int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, |
|---|
| 4424 | + struct rdma_ucm_ece *ece) |
|---|
| 4425 | +{ |
|---|
| 4426 | + struct rdma_id_private *id_priv = |
|---|
| 4427 | + container_of(id, struct rdma_id_private, id); |
|---|
| 4428 | + |
|---|
| 4429 | + id_priv->ece.vendor_id = ece->vendor_id; |
|---|
| 4430 | + id_priv->ece.attr_mod = ece->attr_mod; |
|---|
| 4431 | + |
|---|
| 4432 | + return rdma_accept(id, conn_param); |
|---|
| 4433 | +} |
|---|
| 4434 | +EXPORT_SYMBOL(rdma_accept_ece); |
|---|
| 4435 | + |
|---|
| 4436 | +void rdma_lock_handler(struct rdma_cm_id *id) |
|---|
| 4437 | +{ |
|---|
| 4438 | + struct rdma_id_private *id_priv = |
|---|
| 4439 | + container_of(id, struct rdma_id_private, id); |
|---|
| 4440 | + |
|---|
| 4441 | + mutex_lock(&id_priv->handler_mutex); |
|---|
| 4442 | +} |
|---|
| 4443 | +EXPORT_SYMBOL(rdma_lock_handler); |
|---|
| 4444 | + |
|---|
| 4445 | +void rdma_unlock_handler(struct rdma_cm_id *id) |
|---|
| 4446 | +{ |
|---|
| 4447 | + struct rdma_id_private *id_priv = |
|---|
| 4448 | + container_of(id, struct rdma_id_private, id); |
|---|
| 4449 | + |
|---|
| 4450 | + mutex_unlock(&id_priv->handler_mutex); |
|---|
| 4451 | +} |
|---|
| 4452 | +EXPORT_SYMBOL(rdma_unlock_handler); |
|---|
| 3935 | 4453 | |
|---|
| 3936 | 4454 | int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) |
|---|
| 3937 | 4455 | { |
|---|
| .. | .. |
|---|
| 3955 | 4473 | EXPORT_SYMBOL(rdma_notify); |
|---|
| 3956 | 4474 | |
|---|
| 3957 | 4475 | int rdma_reject(struct rdma_cm_id *id, const void *private_data, |
|---|
| 3958 | | - u8 private_data_len) |
|---|
| 4476 | + u8 private_data_len, u8 reason) |
|---|
| 3959 | 4477 | { |
|---|
| 3960 | 4478 | struct rdma_id_private *id_priv; |
|---|
| 3961 | 4479 | int ret; |
|---|
| .. | .. |
|---|
| 3965 | 4483 | return -EINVAL; |
|---|
| 3966 | 4484 | |
|---|
| 3967 | 4485 | if (rdma_cap_ib_cm(id->device, id->port_num)) { |
|---|
| 3968 | | - if (id->qp_type == IB_QPT_UD) |
|---|
| 4486 | + if (id->qp_type == IB_QPT_UD) { |
|---|
| 3969 | 4487 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, |
|---|
| 3970 | 4488 | private_data, private_data_len); |
|---|
| 3971 | | - else |
|---|
| 3972 | | - ret = ib_send_cm_rej(id_priv->cm_id.ib, |
|---|
| 3973 | | - IB_CM_REJ_CONSUMER_DEFINED, NULL, |
|---|
| 3974 | | - 0, private_data, private_data_len); |
|---|
| 4489 | + } else { |
|---|
| 4490 | + trace_cm_send_rej(id_priv); |
|---|
| 4491 | + ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, |
|---|
| 4492 | + private_data, private_data_len); |
|---|
| 4493 | + } |
|---|
| 3975 | 4494 | } else if (rdma_cap_iw_cm(id->device, id->port_num)) { |
|---|
| 3976 | 4495 | ret = iw_cm_reject(id_priv->cm_id.iw, |
|---|
| 3977 | 4496 | private_data, private_data_len); |
|---|
| .. | .. |
|---|
| 3996 | 4515 | if (ret) |
|---|
| 3997 | 4516 | goto out; |
|---|
| 3998 | 4517 | /* Initiate or respond to a disconnect. */ |
|---|
| 3999 | | - if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) |
|---|
| 4000 | | - ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); |
|---|
| 4518 | + trace_cm_disconnect(id_priv); |
|---|
| 4519 | + if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { |
|---|
| 4520 | + if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) |
|---|
| 4521 | + trace_cm_sent_drep(id_priv); |
|---|
| 4522 | + } else { |
|---|
| 4523 | + trace_cm_sent_dreq(id_priv); |
|---|
| 4524 | + } |
|---|
| 4001 | 4525 | } else if (rdma_cap_iw_cm(id->device, id->port_num)) { |
|---|
| 4002 | 4526 | ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); |
|---|
| 4003 | 4527 | } else |
|---|
| .. | .. |
|---|
| 4008 | 4532 | } |
|---|
| 4009 | 4533 | EXPORT_SYMBOL(rdma_disconnect); |
|---|
| 4010 | 4534 | |
|---|
| 4535 | +static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, |
|---|
| 4536 | + struct ib_sa_multicast *multicast, |
|---|
| 4537 | + struct rdma_cm_event *event, |
|---|
| 4538 | + struct cma_multicast *mc) |
|---|
| 4539 | +{ |
|---|
| 4540 | + struct rdma_dev_addr *dev_addr; |
|---|
| 4541 | + enum ib_gid_type gid_type; |
|---|
| 4542 | + struct net_device *ndev; |
|---|
| 4543 | + |
|---|
| 4544 | + if (status) |
|---|
| 4545 | + pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", |
|---|
| 4546 | + status); |
|---|
| 4547 | + |
|---|
| 4548 | + event->status = status; |
|---|
| 4549 | + event->param.ud.private_data = mc->context; |
|---|
| 4550 | + if (status) { |
|---|
| 4551 | + event->event = RDMA_CM_EVENT_MULTICAST_ERROR; |
|---|
| 4552 | + return; |
|---|
| 4553 | + } |
|---|
| 4554 | + |
|---|
| 4555 | + dev_addr = &id_priv->id.route.addr.dev_addr; |
|---|
| 4556 | + ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); |
|---|
| 4557 | + gid_type = |
|---|
| 4558 | + id_priv->cma_dev |
|---|
| 4559 | + ->default_gid_type[id_priv->id.port_num - |
|---|
| 4560 | + rdma_start_port( |
|---|
| 4561 | + id_priv->cma_dev->device)]; |
|---|
| 4562 | + |
|---|
| 4563 | + event->event = RDMA_CM_EVENT_MULTICAST_JOIN; |
|---|
| 4564 | + if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, |
|---|
| 4565 | + &multicast->rec, ndev, gid_type, |
|---|
| 4566 | + &event->param.ud.ah_attr)) { |
|---|
| 4567 | + event->event = RDMA_CM_EVENT_MULTICAST_ERROR; |
|---|
| 4568 | + goto out; |
|---|
| 4569 | + } |
|---|
| 4570 | + |
|---|
| 4571 | + event->param.ud.qp_num = 0xFFFFFF; |
|---|
| 4572 | + event->param.ud.qkey = id_priv->qkey; |
|---|
| 4573 | + |
|---|
| 4574 | +out: |
|---|
| 4575 | + if (ndev) |
|---|
| 4576 | + dev_put(ndev); |
|---|
| 4577 | +} |
|---|
| 4578 | + |
|---|
| 4011 | 4579 | static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) |
|---|
| 4012 | 4580 | { |
|---|
| 4013 | | - struct rdma_id_private *id_priv; |
|---|
| 4014 | 4581 | struct cma_multicast *mc = multicast->context; |
|---|
| 4582 | + struct rdma_id_private *id_priv = mc->id_priv; |
|---|
| 4015 | 4583 | struct rdma_cm_event event = {}; |
|---|
| 4016 | 4584 | int ret = 0; |
|---|
| 4017 | 4585 | |
|---|
| 4018 | | - id_priv = mc->id_priv; |
|---|
| 4019 | 4586 | mutex_lock(&id_priv->handler_mutex); |
|---|
| 4020 | | - if (id_priv->state != RDMA_CM_ADDR_BOUND && |
|---|
| 4021 | | - id_priv->state != RDMA_CM_ADDR_RESOLVED) |
|---|
| 4587 | + if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || |
|---|
| 4588 | + READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) |
|---|
| 4022 | 4589 | goto out; |
|---|
| 4023 | 4590 | |
|---|
| 4024 | | - if (!status) |
|---|
| 4025 | | - status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); |
|---|
| 4026 | | - else |
|---|
| 4027 | | - pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", |
|---|
| 4028 | | - status); |
|---|
| 4029 | | - event.status = status; |
|---|
| 4030 | | - event.param.ud.private_data = mc->context; |
|---|
| 4031 | | - if (!status) { |
|---|
| 4032 | | - struct rdma_dev_addr *dev_addr = |
|---|
| 4033 | | - &id_priv->id.route.addr.dev_addr; |
|---|
| 4034 | | - struct net_device *ndev = |
|---|
| 4035 | | - dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); |
|---|
| 4036 | | - enum ib_gid_type gid_type = |
|---|
| 4037 | | - id_priv->cma_dev->default_gid_type[id_priv->id.port_num - |
|---|
| 4038 | | - rdma_start_port(id_priv->cma_dev->device)]; |
|---|
| 4039 | | - |
|---|
| 4040 | | - event.event = RDMA_CM_EVENT_MULTICAST_JOIN; |
|---|
| 4041 | | - ret = ib_init_ah_from_mcmember(id_priv->id.device, |
|---|
| 4042 | | - id_priv->id.port_num, |
|---|
| 4043 | | - &multicast->rec, |
|---|
| 4044 | | - ndev, gid_type, |
|---|
| 4045 | | - &event.param.ud.ah_attr); |
|---|
| 4046 | | - if (ret) |
|---|
| 4047 | | - event.event = RDMA_CM_EVENT_MULTICAST_ERROR; |
|---|
| 4048 | | - |
|---|
| 4049 | | - event.param.ud.qp_num = 0xFFFFFF; |
|---|
| 4050 | | - event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); |
|---|
| 4051 | | - if (ndev) |
|---|
| 4052 | | - dev_put(ndev); |
|---|
| 4053 | | - } else |
|---|
| 4054 | | - event.event = RDMA_CM_EVENT_MULTICAST_ERROR; |
|---|
| 4055 | | - |
|---|
| 4056 | | - ret = id_priv->id.event_handler(&id_priv->id, &event); |
|---|
| 4057 | | - |
|---|
| 4058 | | - rdma_destroy_ah_attr(&event.param.ud.ah_attr); |
|---|
| 4059 | | - if (ret) { |
|---|
| 4060 | | - cma_exch(id_priv, RDMA_CM_DESTROYING); |
|---|
| 4061 | | - mutex_unlock(&id_priv->handler_mutex); |
|---|
| 4062 | | - rdma_destroy_id(&id_priv->id); |
|---|
| 4063 | | - return 0; |
|---|
| 4591 | + ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); |
|---|
| 4592 | + if (!ret) { |
|---|
| 4593 | + cma_make_mc_event(status, id_priv, multicast, &event, mc); |
|---|
| 4594 | + ret = cma_cm_event_handler(id_priv, &event); |
|---|
| 4064 | 4595 | } |
|---|
| 4596 | + rdma_destroy_ah_attr(&event.param.ud.ah_attr); |
|---|
| 4597 | + WARN_ON(ret); |
|---|
| 4065 | 4598 | |
|---|
| 4066 | 4599 | out: |
|---|
| 4067 | 4600 | mutex_unlock(&id_priv->handler_mutex); |
|---|
| .. | .. |
|---|
| 4112 | 4645 | if (ret) |
|---|
| 4113 | 4646 | return ret; |
|---|
| 4114 | 4647 | |
|---|
| 4115 | | - ret = cma_set_qkey(id_priv, 0); |
|---|
| 4116 | | - if (ret) |
|---|
| 4117 | | - return ret; |
|---|
| 4648 | + if (!id_priv->qkey) { |
|---|
| 4649 | + ret = cma_set_default_qkey(id_priv); |
|---|
| 4650 | + if (ret) |
|---|
| 4651 | + return ret; |
|---|
| 4652 | + } |
|---|
| 4118 | 4653 | |
|---|
| 4119 | 4654 | cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); |
|---|
| 4120 | 4655 | rec.qkey = cpu_to_be32(id_priv->qkey); |
|---|
| .. | .. |
|---|
| 4126 | 4661 | (!ib_sa_sendonly_fullmem_support(&sa_client, |
|---|
| 4127 | 4662 | id_priv->id.device, |
|---|
| 4128 | 4663 | id_priv->id.port_num))) { |
|---|
| 4129 | | - pr_warn("RDMA CM: %s port %u Unable to multicast join\n" |
|---|
| 4130 | | - "RDMA CM: SM doesn't support Send Only Full Member option\n", |
|---|
| 4131 | | - id_priv->id.device->name, id_priv->id.port_num); |
|---|
| 4664 | + dev_warn( |
|---|
| 4665 | + &id_priv->id.device->dev, |
|---|
| 4666 | + "RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n", |
|---|
| 4667 | + id_priv->id.port_num); |
|---|
| 4132 | 4668 | return -EOPNOTSUPP; |
|---|
| 4133 | 4669 | } |
|---|
| 4134 | 4670 | |
|---|
| .. | .. |
|---|
| 4145 | 4681 | IB_SA_MCMEMBER_REC_MTU | |
|---|
| 4146 | 4682 | IB_SA_MCMEMBER_REC_HOP_LIMIT; |
|---|
| 4147 | 4683 | |
|---|
| 4148 | | - mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, |
|---|
| 4149 | | - id_priv->id.port_num, &rec, |
|---|
| 4150 | | - comp_mask, GFP_KERNEL, |
|---|
| 4151 | | - cma_ib_mc_handler, mc); |
|---|
| 4152 | | - return PTR_ERR_OR_ZERO(mc->multicast.ib); |
|---|
| 4153 | | -} |
|---|
| 4154 | | - |
|---|
| 4155 | | -static void iboe_mcast_work_handler(struct work_struct *work) |
|---|
| 4156 | | -{ |
|---|
| 4157 | | - struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); |
|---|
| 4158 | | - struct cma_multicast *mc = mw->mc; |
|---|
| 4159 | | - struct ib_sa_multicast *m = mc->multicast.ib; |
|---|
| 4160 | | - |
|---|
| 4161 | | - mc->multicast.ib->context = mc; |
|---|
| 4162 | | - cma_ib_mc_handler(0, m); |
|---|
| 4163 | | - kref_put(&mc->mcref, release_mc); |
|---|
| 4164 | | - kfree(mw); |
|---|
| 4684 | + mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, |
|---|
| 4685 | + id_priv->id.port_num, &rec, comp_mask, |
|---|
| 4686 | + GFP_KERNEL, cma_ib_mc_handler, mc); |
|---|
| 4687 | + return PTR_ERR_OR_ZERO(mc->sa_mc); |
|---|
| 4165 | 4688 | } |
|---|
| 4166 | 4689 | |
|---|
| 4167 | 4690 | static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, |
|---|
| .. | .. |
|---|
| 4196 | 4719 | static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, |
|---|
| 4197 | 4720 | struct cma_multicast *mc) |
|---|
| 4198 | 4721 | { |
|---|
| 4199 | | - struct iboe_mcast_work *work; |
|---|
| 4200 | 4722 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
|---|
| 4201 | 4723 | int err = 0; |
|---|
| 4202 | 4724 | struct sockaddr *addr = (struct sockaddr *)&mc->addr; |
|---|
| 4203 | 4725 | struct net_device *ndev = NULL; |
|---|
| 4726 | + struct ib_sa_multicast ib = {}; |
|---|
| 4204 | 4727 | enum ib_gid_type gid_type; |
|---|
| 4205 | 4728 | bool send_only; |
|---|
| 4206 | 4729 | |
|---|
| 4207 | 4730 | send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); |
|---|
| 4208 | 4731 | |
|---|
| 4209 | | - if (cma_zero_addr((struct sockaddr *)&mc->addr)) |
|---|
| 4732 | + if (cma_zero_addr(addr)) |
|---|
| 4210 | 4733 | return -EINVAL; |
|---|
| 4211 | | - |
|---|
| 4212 | | - work = kzalloc(sizeof *work, GFP_KERNEL); |
|---|
| 4213 | | - if (!work) |
|---|
| 4214 | | - return -ENOMEM; |
|---|
| 4215 | | - |
|---|
| 4216 | | - mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); |
|---|
| 4217 | | - if (!mc->multicast.ib) { |
|---|
| 4218 | | - err = -ENOMEM; |
|---|
| 4219 | | - goto out1; |
|---|
| 4220 | | - } |
|---|
| 4221 | 4734 | |
|---|
| 4222 | 4735 | gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - |
|---|
| 4223 | 4736 | rdma_start_port(id_priv->cma_dev->device)]; |
|---|
| 4224 | | - cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type); |
|---|
| 4737 | + cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); |
|---|
| 4225 | 4738 | |
|---|
| 4226 | | - mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); |
|---|
| 4227 | | - if (id_priv->id.ps == RDMA_PS_UDP) |
|---|
| 4228 | | - mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); |
|---|
| 4229 | | - |
|---|
| 4739 | + ib.rec.pkey = cpu_to_be16(0xffff); |
|---|
| 4230 | 4740 | if (dev_addr->bound_dev_if) |
|---|
| 4231 | 4741 | ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); |
|---|
| 4232 | | - if (!ndev) { |
|---|
| 4233 | | - err = -ENODEV; |
|---|
| 4234 | | - goto out2; |
|---|
| 4235 | | - } |
|---|
| 4236 | | - mc->multicast.ib->rec.rate = iboe_get_rate(ndev); |
|---|
| 4237 | | - mc->multicast.ib->rec.hop_limit = 1; |
|---|
| 4238 | | - mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); |
|---|
| 4742 | + if (!ndev) |
|---|
| 4743 | + return -ENODEV; |
|---|
| 4744 | + |
|---|
| 4745 | + ib.rec.rate = IB_RATE_PORT_CURRENT; |
|---|
| 4746 | + ib.rec.hop_limit = 1; |
|---|
| 4747 | + ib.rec.mtu = iboe_get_mtu(ndev->mtu); |
|---|
| 4239 | 4748 | |
|---|
| 4240 | 4749 | if (addr->sa_family == AF_INET) { |
|---|
| 4241 | 4750 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { |
|---|
| 4242 | | - mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; |
|---|
| 4751 | + ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; |
|---|
| 4243 | 4752 | if (!send_only) { |
|---|
| 4244 | | - err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, |
|---|
| 4753 | + err = cma_igmp_send(ndev, &ib.rec.mgid, |
|---|
| 4245 | 4754 | true); |
|---|
| 4246 | 4755 | } |
|---|
| 4247 | 4756 | } |
|---|
| .. | .. |
|---|
| 4250 | 4759 | err = -ENOTSUPP; |
|---|
| 4251 | 4760 | } |
|---|
| 4252 | 4761 | dev_put(ndev); |
|---|
| 4253 | | - if (err || !mc->multicast.ib->rec.mtu) { |
|---|
| 4254 | | - if (!err) |
|---|
| 4255 | | - err = -EINVAL; |
|---|
| 4256 | | - goto out2; |
|---|
| 4257 | | - } |
|---|
| 4762 | + if (err || !ib.rec.mtu) |
|---|
| 4763 | + return err ?: -EINVAL; |
|---|
| 4764 | + |
|---|
| 4765 | + if (!id_priv->qkey) |
|---|
| 4766 | + cma_set_default_qkey(id_priv); |
|---|
| 4767 | + |
|---|
| 4258 | 4768 | rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, |
|---|
| 4259 | | - &mc->multicast.ib->rec.port_gid); |
|---|
| 4260 | | - work->id = id_priv; |
|---|
| 4261 | | - work->mc = mc; |
|---|
| 4262 | | - INIT_WORK(&work->work, iboe_mcast_work_handler); |
|---|
| 4263 | | - kref_get(&mc->mcref); |
|---|
| 4264 | | - queue_work(cma_wq, &work->work); |
|---|
| 4265 | | - |
|---|
| 4769 | + &ib.rec.port_gid); |
|---|
| 4770 | + INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); |
|---|
| 4771 | + cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); |
|---|
| 4772 | + queue_work(cma_wq, &mc->iboe_join.work); |
|---|
| 4266 | 4773 | return 0; |
|---|
| 4267 | | - |
|---|
| 4268 | | -out2: |
|---|
| 4269 | | - kfree(mc->multicast.ib); |
|---|
| 4270 | | -out1: |
|---|
| 4271 | | - kfree(work); |
|---|
| 4272 | | - return err; |
|---|
| 4273 | 4774 | } |
|---|
| 4274 | 4775 | |
|---|
| 4275 | 4776 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, |
|---|
| 4276 | 4777 | u8 join_state, void *context) |
|---|
| 4277 | 4778 | { |
|---|
| 4278 | | - struct rdma_id_private *id_priv; |
|---|
| 4779 | + struct rdma_id_private *id_priv = |
|---|
| 4780 | + container_of(id, struct rdma_id_private, id); |
|---|
| 4279 | 4781 | struct cma_multicast *mc; |
|---|
| 4280 | 4782 | int ret; |
|---|
| 4281 | 4783 | |
|---|
| .. | .. |
|---|
| 4283 | 4785 | if (WARN_ON(id->qp)) |
|---|
| 4284 | 4786 | return -EINVAL; |
|---|
| 4285 | 4787 | |
|---|
| 4286 | | - if (!id->device) |
|---|
| 4788 | + /* ULP is calling this wrong. */ |
|---|
| 4789 | + if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && |
|---|
| 4790 | + READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) |
|---|
| 4287 | 4791 | return -EINVAL; |
|---|
| 4288 | 4792 | |
|---|
| 4289 | | - id_priv = container_of(id, struct rdma_id_private, id); |
|---|
| 4290 | | - if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && |
|---|
| 4291 | | - !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) |
|---|
| 4793 | + if (id_priv->id.qp_type != IB_QPT_UD) |
|---|
| 4292 | 4794 | return -EINVAL; |
|---|
| 4293 | 4795 | |
|---|
| 4294 | | - mc = kmalloc(sizeof *mc, GFP_KERNEL); |
|---|
| 4796 | + mc = kzalloc(sizeof(*mc), GFP_KERNEL); |
|---|
| 4295 | 4797 | if (!mc) |
|---|
| 4296 | 4798 | return -ENOMEM; |
|---|
| 4297 | 4799 | |
|---|
| .. | .. |
|---|
| 4301 | 4803 | mc->join_state = join_state; |
|---|
| 4302 | 4804 | |
|---|
| 4303 | 4805 | if (rdma_protocol_roce(id->device, id->port_num)) { |
|---|
| 4304 | | - kref_init(&mc->mcref); |
|---|
| 4305 | 4806 | ret = cma_iboe_join_multicast(id_priv, mc); |
|---|
| 4306 | 4807 | if (ret) |
|---|
| 4307 | 4808 | goto out_err; |
|---|
| .. | .. |
|---|
| 4349 | 4850 | static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) |
|---|
| 4350 | 4851 | { |
|---|
| 4351 | 4852 | struct rdma_dev_addr *dev_addr; |
|---|
| 4352 | | - struct cma_ndev_work *work; |
|---|
| 4853 | + struct cma_work *work; |
|---|
| 4353 | 4854 | |
|---|
| 4354 | 4855 | dev_addr = &id_priv->id.route.addr.dev_addr; |
|---|
| 4355 | 4856 | |
|---|
| .. | .. |
|---|
| 4362 | 4863 | if (!work) |
|---|
| 4363 | 4864 | return -ENOMEM; |
|---|
| 4364 | 4865 | |
|---|
| 4365 | | - INIT_WORK(&work->work, cma_ndev_work_handler); |
|---|
| 4866 | + INIT_WORK(&work->work, cma_work_handler); |
|---|
| 4366 | 4867 | work->id = id_priv; |
|---|
| 4367 | 4868 | work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; |
|---|
| 4368 | | - atomic_inc(&id_priv->refcount); |
|---|
| 4869 | + cma_id_get(id_priv); |
|---|
| 4369 | 4870 | queue_work(cma_wq, &work->work); |
|---|
| 4370 | 4871 | } |
|---|
| 4371 | 4872 | |
|---|
| .. | .. |
|---|
| 4403 | 4904 | .notifier_call = cma_netdev_callback |
|---|
| 4404 | 4905 | }; |
|---|
| 4405 | 4906 | |
|---|
| 4406 | | -static void cma_add_one(struct ib_device *device) |
|---|
| 4907 | +static void cma_send_device_removal_put(struct rdma_id_private *id_priv) |
|---|
| 4407 | 4908 | { |
|---|
| 4909 | + struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; |
|---|
| 4910 | + enum rdma_cm_state state; |
|---|
| 4911 | + unsigned long flags; |
|---|
| 4912 | + |
|---|
| 4913 | + mutex_lock(&id_priv->handler_mutex); |
|---|
| 4914 | + /* Record that we want to remove the device */ |
|---|
| 4915 | + spin_lock_irqsave(&id_priv->lock, flags); |
|---|
| 4916 | + state = id_priv->state; |
|---|
| 4917 | + if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) { |
|---|
| 4918 | + spin_unlock_irqrestore(&id_priv->lock, flags); |
|---|
| 4919 | + mutex_unlock(&id_priv->handler_mutex); |
|---|
| 4920 | + cma_id_put(id_priv); |
|---|
| 4921 | + return; |
|---|
| 4922 | + } |
|---|
| 4923 | + id_priv->state = RDMA_CM_DEVICE_REMOVAL; |
|---|
| 4924 | + spin_unlock_irqrestore(&id_priv->lock, flags); |
|---|
| 4925 | + |
|---|
| 4926 | + if (cma_cm_event_handler(id_priv, &event)) { |
|---|
| 4927 | + /* |
|---|
| 4928 | + * At this point the ULP promises it won't call |
|---|
| 4929 | + * rdma_destroy_id() concurrently |
|---|
| 4930 | + */ |
|---|
| 4931 | + cma_id_put(id_priv); |
|---|
| 4932 | + mutex_unlock(&id_priv->handler_mutex); |
|---|
| 4933 | + trace_cm_id_destroy(id_priv); |
|---|
| 4934 | + _destroy_id(id_priv, state); |
|---|
| 4935 | + return; |
|---|
| 4936 | + } |
|---|
| 4937 | + mutex_unlock(&id_priv->handler_mutex); |
|---|
| 4938 | + |
|---|
| 4939 | + /* |
|---|
| 4940 | + * If this races with destroy then the thread that first assigns state |
|---|
| 4941 | + * to a destroying does the cancel. |
|---|
| 4942 | + */ |
|---|
| 4943 | + cma_cancel_operation(id_priv, state); |
|---|
| 4944 | + cma_id_put(id_priv); |
|---|
| 4945 | +} |
|---|
| 4946 | + |
|---|
| 4947 | +static void cma_process_remove(struct cma_device *cma_dev) |
|---|
| 4948 | +{ |
|---|
| 4949 | + mutex_lock(&lock); |
|---|
| 4950 | + while (!list_empty(&cma_dev->id_list)) { |
|---|
| 4951 | + struct rdma_id_private *id_priv = list_first_entry( |
|---|
| 4952 | + &cma_dev->id_list, struct rdma_id_private, list); |
|---|
| 4953 | + |
|---|
| 4954 | + list_del(&id_priv->listen_list); |
|---|
| 4955 | + list_del_init(&id_priv->list); |
|---|
| 4956 | + cma_id_get(id_priv); |
|---|
| 4957 | + mutex_unlock(&lock); |
|---|
| 4958 | + |
|---|
| 4959 | + cma_send_device_removal_put(id_priv); |
|---|
| 4960 | + |
|---|
| 4961 | + mutex_lock(&lock); |
|---|
| 4962 | + } |
|---|
| 4963 | + mutex_unlock(&lock); |
|---|
| 4964 | + |
|---|
| 4965 | + cma_dev_put(cma_dev); |
|---|
| 4966 | + wait_for_completion(&cma_dev->comp); |
|---|
| 4967 | +} |
|---|
| 4968 | + |
|---|
| 4969 | +static int cma_add_one(struct ib_device *device) |
|---|
| 4970 | +{ |
|---|
| 4971 | + struct rdma_id_private *to_destroy; |
|---|
| 4408 | 4972 | struct cma_device *cma_dev; |
|---|
| 4409 | 4973 | struct rdma_id_private *id_priv; |
|---|
| 4410 | 4974 | unsigned int i; |
|---|
| 4411 | 4975 | unsigned long supported_gids = 0; |
|---|
| 4976 | + int ret; |
|---|
| 4412 | 4977 | |
|---|
| 4413 | | - cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); |
|---|
| 4978 | + cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); |
|---|
| 4414 | 4979 | if (!cma_dev) |
|---|
| 4415 | | - return; |
|---|
| 4980 | + return -ENOMEM; |
|---|
| 4416 | 4981 | |
|---|
| 4417 | 4982 | cma_dev->device = device; |
|---|
| 4418 | 4983 | cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, |
|---|
| 4419 | 4984 | sizeof(*cma_dev->default_gid_type), |
|---|
| 4420 | 4985 | GFP_KERNEL); |
|---|
| 4421 | | - if (!cma_dev->default_gid_type) |
|---|
| 4986 | + if (!cma_dev->default_gid_type) { |
|---|
| 4987 | + ret = -ENOMEM; |
|---|
| 4422 | 4988 | goto free_cma_dev; |
|---|
| 4989 | + } |
|---|
| 4423 | 4990 | |
|---|
| 4424 | 4991 | cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, |
|---|
| 4425 | 4992 | sizeof(*cma_dev->default_roce_tos), |
|---|
| 4426 | 4993 | GFP_KERNEL); |
|---|
| 4427 | | - if (!cma_dev->default_roce_tos) |
|---|
| 4994 | + if (!cma_dev->default_roce_tos) { |
|---|
| 4995 | + ret = -ENOMEM; |
|---|
| 4428 | 4996 | goto free_gid_type; |
|---|
| 4997 | + } |
|---|
| 4429 | 4998 | |
|---|
| 4430 | | - for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { |
|---|
| 4999 | + rdma_for_each_port (device, i) { |
|---|
| 4431 | 5000 | supported_gids = roce_gid_type_mask_support(device, i); |
|---|
| 4432 | 5001 | WARN_ON(!supported_gids); |
|---|
| 4433 | 5002 | if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) |
|---|
| .. | .. |
|---|
| 4440 | 5009 | } |
|---|
| 4441 | 5010 | |
|---|
| 4442 | 5011 | init_completion(&cma_dev->comp); |
|---|
| 4443 | | - atomic_set(&cma_dev->refcount, 1); |
|---|
| 5012 | + refcount_set(&cma_dev->refcount, 1); |
|---|
| 4444 | 5013 | INIT_LIST_HEAD(&cma_dev->id_list); |
|---|
| 4445 | 5014 | ib_set_client_data(device, &cma_client, cma_dev); |
|---|
| 4446 | 5015 | |
|---|
| 4447 | 5016 | mutex_lock(&lock); |
|---|
| 4448 | 5017 | list_add_tail(&cma_dev->list, &dev_list); |
|---|
| 4449 | | - list_for_each_entry(id_priv, &listen_any_list, list) |
|---|
| 4450 | | - cma_listen_on_dev(id_priv, cma_dev); |
|---|
| 5018 | + list_for_each_entry(id_priv, &listen_any_list, list) { |
|---|
| 5019 | + ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); |
|---|
| 5020 | + if (ret) |
|---|
| 5021 | + goto free_listen; |
|---|
| 5022 | + } |
|---|
| 4451 | 5023 | mutex_unlock(&lock); |
|---|
| 4452 | 5024 | |
|---|
| 4453 | | - return; |
|---|
| 5025 | + trace_cm_add_one(device); |
|---|
| 5026 | + return 0; |
|---|
| 4454 | 5027 | |
|---|
| 5028 | +free_listen: |
|---|
| 5029 | + list_del(&cma_dev->list); |
|---|
| 5030 | + mutex_unlock(&lock); |
|---|
| 5031 | + |
|---|
| 5032 | + /* cma_process_remove() will delete to_destroy */ |
|---|
| 5033 | + cma_process_remove(cma_dev); |
|---|
| 5034 | + kfree(cma_dev->default_roce_tos); |
|---|
| 4455 | 5035 | free_gid_type: |
|---|
| 4456 | 5036 | kfree(cma_dev->default_gid_type); |
|---|
| 4457 | 5037 | |
|---|
| 4458 | 5038 | free_cma_dev: |
|---|
| 4459 | 5039 | kfree(cma_dev); |
|---|
| 4460 | | - |
|---|
| 4461 | | - return; |
|---|
| 4462 | | -} |
|---|
| 4463 | | - |
|---|
| 4464 | | -static int cma_remove_id_dev(struct rdma_id_private *id_priv) |
|---|
| 4465 | | -{ |
|---|
| 4466 | | - struct rdma_cm_event event = {}; |
|---|
| 4467 | | - enum rdma_cm_state state; |
|---|
| 4468 | | - int ret = 0; |
|---|
| 4469 | | - |
|---|
| 4470 | | - /* Record that we want to remove the device */ |
|---|
| 4471 | | - state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); |
|---|
| 4472 | | - if (state == RDMA_CM_DESTROYING) |
|---|
| 4473 | | - return 0; |
|---|
| 4474 | | - |
|---|
| 4475 | | - cma_cancel_operation(id_priv, state); |
|---|
| 4476 | | - mutex_lock(&id_priv->handler_mutex); |
|---|
| 4477 | | - |
|---|
| 4478 | | - /* Check for destruction from another callback. */ |
|---|
| 4479 | | - if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) |
|---|
| 4480 | | - goto out; |
|---|
| 4481 | | - |
|---|
| 4482 | | - event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; |
|---|
| 4483 | | - ret = id_priv->id.event_handler(&id_priv->id, &event); |
|---|
| 4484 | | -out: |
|---|
| 4485 | | - mutex_unlock(&id_priv->handler_mutex); |
|---|
| 4486 | 5040 | return ret; |
|---|
| 4487 | | -} |
|---|
| 4488 | | - |
|---|
| 4489 | | -static void cma_process_remove(struct cma_device *cma_dev) |
|---|
| 4490 | | -{ |
|---|
| 4491 | | - struct rdma_id_private *id_priv; |
|---|
| 4492 | | - int ret; |
|---|
| 4493 | | - |
|---|
| 4494 | | - mutex_lock(&lock); |
|---|
| 4495 | | - while (!list_empty(&cma_dev->id_list)) { |
|---|
| 4496 | | - id_priv = list_entry(cma_dev->id_list.next, |
|---|
| 4497 | | - struct rdma_id_private, list); |
|---|
| 4498 | | - |
|---|
| 4499 | | - list_del(&id_priv->listen_list); |
|---|
| 4500 | | - list_del_init(&id_priv->list); |
|---|
| 4501 | | - atomic_inc(&id_priv->refcount); |
|---|
| 4502 | | - mutex_unlock(&lock); |
|---|
| 4503 | | - |
|---|
| 4504 | | - ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); |
|---|
| 4505 | | - cma_deref_id(id_priv); |
|---|
| 4506 | | - if (ret) |
|---|
| 4507 | | - rdma_destroy_id(&id_priv->id); |
|---|
| 4508 | | - |
|---|
| 4509 | | - mutex_lock(&lock); |
|---|
| 4510 | | - } |
|---|
| 4511 | | - mutex_unlock(&lock); |
|---|
| 4512 | | - |
|---|
| 4513 | | - cma_deref_dev(cma_dev); |
|---|
| 4514 | | - wait_for_completion(&cma_dev->comp); |
|---|
| 4515 | 5041 | } |
|---|
| 4516 | 5042 | |
|---|
| 4517 | 5043 | static void cma_remove_one(struct ib_device *device, void *client_data) |
|---|
| 4518 | 5044 | { |
|---|
| 4519 | 5045 | struct cma_device *cma_dev = client_data; |
|---|
| 4520 | 5046 | |
|---|
| 4521 | | - if (!cma_dev) |
|---|
| 4522 | | - return; |
|---|
| 5047 | + trace_cm_remove_one(device); |
|---|
| 4523 | 5048 | |
|---|
| 4524 | 5049 | mutex_lock(&lock); |
|---|
| 4525 | 5050 | list_del(&cma_dev->list); |
|---|
| .. | .. |
|---|
| 4531 | 5056 | kfree(cma_dev); |
|---|
| 4532 | 5057 | } |
|---|
| 4533 | 5058 | |
|---|
| 4534 | | -static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) |
|---|
| 4535 | | -{ |
|---|
| 4536 | | - struct nlmsghdr *nlh; |
|---|
| 4537 | | - struct rdma_cm_id_stats *id_stats; |
|---|
| 4538 | | - struct rdma_id_private *id_priv; |
|---|
| 4539 | | - struct rdma_cm_id *id = NULL; |
|---|
| 4540 | | - struct cma_device *cma_dev; |
|---|
| 4541 | | - int i_dev = 0, i_id = 0; |
|---|
| 4542 | | - |
|---|
| 4543 | | - /* |
|---|
| 4544 | | - * We export all of the IDs as a sequence of messages. Each |
|---|
| 4545 | | - * ID gets its own netlink message. |
|---|
| 4546 | | - */ |
|---|
| 4547 | | - mutex_lock(&lock); |
|---|
| 4548 | | - |
|---|
| 4549 | | - list_for_each_entry(cma_dev, &dev_list, list) { |
|---|
| 4550 | | - if (i_dev < cb->args[0]) { |
|---|
| 4551 | | - i_dev++; |
|---|
| 4552 | | - continue; |
|---|
| 4553 | | - } |
|---|
| 4554 | | - |
|---|
| 4555 | | - i_id = 0; |
|---|
| 4556 | | - list_for_each_entry(id_priv, &cma_dev->id_list, list) { |
|---|
| 4557 | | - if (i_id < cb->args[1]) { |
|---|
| 4558 | | - i_id++; |
|---|
| 4559 | | - continue; |
|---|
| 4560 | | - } |
|---|
| 4561 | | - |
|---|
| 4562 | | - id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, |
|---|
| 4563 | | - sizeof *id_stats, RDMA_NL_RDMA_CM, |
|---|
| 4564 | | - RDMA_NL_RDMA_CM_ID_STATS, |
|---|
| 4565 | | - NLM_F_MULTI); |
|---|
| 4566 | | - if (!id_stats) |
|---|
| 4567 | | - goto out; |
|---|
| 4568 | | - |
|---|
| 4569 | | - memset(id_stats, 0, sizeof *id_stats); |
|---|
| 4570 | | - id = &id_priv->id; |
|---|
| 4571 | | - id_stats->node_type = id->route.addr.dev_addr.dev_type; |
|---|
| 4572 | | - id_stats->port_num = id->port_num; |
|---|
| 4573 | | - id_stats->bound_dev_if = |
|---|
| 4574 | | - id->route.addr.dev_addr.bound_dev_if; |
|---|
| 4575 | | - |
|---|
| 4576 | | - if (ibnl_put_attr(skb, nlh, |
|---|
| 4577 | | - rdma_addr_size(cma_src_addr(id_priv)), |
|---|
| 4578 | | - cma_src_addr(id_priv), |
|---|
| 4579 | | - RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) |
|---|
| 4580 | | - goto out; |
|---|
| 4581 | | - if (ibnl_put_attr(skb, nlh, |
|---|
| 4582 | | - rdma_addr_size(cma_dst_addr(id_priv)), |
|---|
| 4583 | | - cma_dst_addr(id_priv), |
|---|
| 4584 | | - RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) |
|---|
| 4585 | | - goto out; |
|---|
| 4586 | | - |
|---|
| 4587 | | - id_stats->pid = task_pid_vnr(id_priv->res.task); |
|---|
| 4588 | | - id_stats->port_space = id->ps; |
|---|
| 4589 | | - id_stats->cm_state = id_priv->state; |
|---|
| 4590 | | - id_stats->qp_num = id_priv->qp_num; |
|---|
| 4591 | | - id_stats->qp_type = id->qp_type; |
|---|
| 4592 | | - |
|---|
| 4593 | | - i_id++; |
|---|
| 4594 | | - nlmsg_end(skb, nlh); |
|---|
| 4595 | | - } |
|---|
| 4596 | | - |
|---|
| 4597 | | - cb->args[1] = 0; |
|---|
| 4598 | | - i_dev++; |
|---|
| 4599 | | - } |
|---|
| 4600 | | - |
|---|
| 4601 | | -out: |
|---|
| 4602 | | - mutex_unlock(&lock); |
|---|
| 4603 | | - cb->args[0] = i_dev; |
|---|
| 4604 | | - cb->args[1] = i_id; |
|---|
| 4605 | | - |
|---|
| 4606 | | - return skb->len; |
|---|
| 4607 | | -} |
|---|
| 4608 | | - |
|---|
| 4609 | | -static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = { |
|---|
| 4610 | | - [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, |
|---|
| 4611 | | -}; |
|---|
| 4612 | | - |
|---|
| 4613 | 5059 | static int cma_init_net(struct net *net) |
|---|
| 4614 | 5060 | { |
|---|
| 4615 | 5061 | struct cma_pernet *pernet = cma_pernet(net); |
|---|
| 4616 | 5062 | |
|---|
| 4617 | | - idr_init(&pernet->tcp_ps); |
|---|
| 4618 | | - idr_init(&pernet->udp_ps); |
|---|
| 4619 | | - idr_init(&pernet->ipoib_ps); |
|---|
| 4620 | | - idr_init(&pernet->ib_ps); |
|---|
| 5063 | + xa_init(&pernet->tcp_ps); |
|---|
| 5064 | + xa_init(&pernet->udp_ps); |
|---|
| 5065 | + xa_init(&pernet->ipoib_ps); |
|---|
| 5066 | + xa_init(&pernet->ib_ps); |
|---|
| 4621 | 5067 | |
|---|
| 4622 | 5068 | return 0; |
|---|
| 4623 | 5069 | } |
|---|
| .. | .. |
|---|
| 4626 | 5072 | { |
|---|
| 4627 | 5073 | struct cma_pernet *pernet = cma_pernet(net); |
|---|
| 4628 | 5074 | |
|---|
| 4629 | | - idr_destroy(&pernet->tcp_ps); |
|---|
| 4630 | | - idr_destroy(&pernet->udp_ps); |
|---|
| 4631 | | - idr_destroy(&pernet->ipoib_ps); |
|---|
| 4632 | | - idr_destroy(&pernet->ib_ps); |
|---|
| 5075 | + WARN_ON(!xa_empty(&pernet->tcp_ps)); |
|---|
| 5076 | + WARN_ON(!xa_empty(&pernet->udp_ps)); |
|---|
| 5077 | + WARN_ON(!xa_empty(&pernet->ipoib_ps)); |
|---|
| 5078 | + WARN_ON(!xa_empty(&pernet->ib_ps)); |
|---|
| 4633 | 5079 | } |
|---|
| 4634 | 5080 | |
|---|
| 4635 | 5081 | static struct pernet_operations cma_pernet_operations = { |
|---|
| .. | .. |
|---|
| 4671 | 5117 | if (ret) |
|---|
| 4672 | 5118 | goto err; |
|---|
| 4673 | 5119 | |
|---|
| 4674 | | - rdma_nl_register(RDMA_NL_RDMA_CM, cma_cb_table); |
|---|
| 4675 | | - cma_configfs_init(); |
|---|
| 5120 | + ret = cma_configfs_init(); |
|---|
| 5121 | + if (ret) |
|---|
| 5122 | + goto err_ib; |
|---|
| 4676 | 5123 | |
|---|
| 4677 | 5124 | return 0; |
|---|
| 4678 | 5125 | |
|---|
| 5126 | +err_ib: |
|---|
| 5127 | + ib_unregister_client(&cma_client); |
|---|
| 4679 | 5128 | err: |
|---|
| 4680 | 5129 | unregister_netdevice_notifier(&cma_nb); |
|---|
| 4681 | 5130 | ib_sa_unregister_client(&sa_client); |
|---|
| .. | .. |
|---|
| 4688 | 5137 | static void __exit cma_cleanup(void) |
|---|
| 4689 | 5138 | { |
|---|
| 4690 | 5139 | cma_configfs_exit(); |
|---|
| 4691 | | - rdma_nl_unregister(RDMA_NL_RDMA_CM); |
|---|
| 4692 | 5140 | ib_unregister_client(&cma_client); |
|---|
| 4693 | 5141 | unregister_netdevice_notifier(&cma_nb); |
|---|
| 4694 | 5142 | ib_sa_unregister_client(&sa_client); |
|---|
| 4695 | 5143 | unregister_pernet_subsys(&cma_pernet_operations); |
|---|
| 4696 | 5144 | destroy_workqueue(cma_wq); |
|---|
| 4697 | 5145 | } |
|---|
| 4698 | | - |
|---|
| 4699 | | -MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_RDMA_CM, 1); |
|---|
| 4700 | 5146 | |
|---|
| 4701 | 5147 | module_init(cma_init); |
|---|
| 4702 | 5148 | module_exit(cma_cleanup); |
|---|