| .. | .. |
|---|
| 54 | 54 | struct delayed_work timeout; |
|---|
| 55 | 55 | }; |
|---|
| 56 | 56 | |
|---|
| 57 | +struct rej_tmout_entry { |
|---|
| 58 | + int slave; |
|---|
| 59 | + u32 rem_pv_cm_id; |
|---|
| 60 | + struct delayed_work timeout; |
|---|
| 61 | + struct xarray *xa_rej_tmout; |
|---|
| 62 | +}; |
|---|
| 63 | + |
|---|
| 57 | 64 | struct cm_generic_msg { |
|---|
| 58 | 65 | struct ib_mad_hdr hdr; |
|---|
| 59 | 66 | |
|---|
| 60 | 67 | __be32 local_comm_id; |
|---|
| 61 | 68 | __be32 remote_comm_id; |
|---|
| 69 | + unsigned char unused[2]; |
|---|
| 70 | + __be16 rej_reason; |
|---|
| 62 | 71 | }; |
|---|
| 63 | 72 | |
|---|
| 64 | 73 | struct cm_sidr_generic_msg { |
|---|
| .. | .. |
|---|
| 168 | 177 | { |
|---|
| 169 | 178 | struct delayed_work *delay = to_delayed_work(work); |
|---|
| 170 | 179 | struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); |
|---|
| 171 | | - struct id_map_entry *db_ent, *found_ent; |
|---|
| 180 | + struct id_map_entry *found_ent; |
|---|
| 172 | 181 | struct mlx4_ib_dev *dev = ent->dev; |
|---|
| 173 | 182 | struct mlx4_ib_sriov *sriov = &dev->sriov; |
|---|
| 174 | 183 | struct rb_root *sl_id_map = &sriov->sl_id_map; |
|---|
| 175 | | - int pv_id = (int) ent->pv_cm_id; |
|---|
| 176 | 184 | |
|---|
| 177 | 185 | spin_lock(&sriov->id_map_lock); |
|---|
| 178 | | - db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id); |
|---|
| 179 | | - if (!db_ent) |
|---|
| 186 | + if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id)) |
|---|
| 180 | 187 | goto out; |
|---|
| 181 | 188 | found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); |
|---|
| 182 | 189 | if (found_ent && found_ent == ent) |
|---|
| 183 | 190 | rb_erase(&found_ent->node, sl_id_map); |
|---|
| 184 | | - idr_remove(&sriov->pv_id_table, pv_id); |
|---|
| 185 | 191 | |
|---|
| 186 | 192 | out: |
|---|
| 187 | 193 | list_del(&ent->list); |
|---|
| 188 | 194 | spin_unlock(&sriov->id_map_lock); |
|---|
| 189 | 195 | kfree(ent); |
|---|
| 190 | | -} |
|---|
| 191 | | - |
|---|
| 192 | | -static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) |
|---|
| 193 | | -{ |
|---|
| 194 | | - struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; |
|---|
| 195 | | - struct rb_root *sl_id_map = &sriov->sl_id_map; |
|---|
| 196 | | - struct id_map_entry *ent, *found_ent; |
|---|
| 197 | | - |
|---|
| 198 | | - spin_lock(&sriov->id_map_lock); |
|---|
| 199 | | - ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id); |
|---|
| 200 | | - if (!ent) |
|---|
| 201 | | - goto out; |
|---|
| 202 | | - found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); |
|---|
| 203 | | - if (found_ent && found_ent == ent) |
|---|
| 204 | | - rb_erase(&found_ent->node, sl_id_map); |
|---|
| 205 | | - idr_remove(&sriov->pv_id_table, pv_cm_id); |
|---|
| 206 | | -out: |
|---|
| 207 | | - spin_unlock(&sriov->id_map_lock); |
|---|
| 208 | 196 | } |
|---|
| 209 | 197 | |
|---|
| 210 | 198 | static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) |
|---|
| .. | .. |
|---|
| 256 | 244 | ent->dev = to_mdev(ibdev); |
|---|
| 257 | 245 | INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); |
|---|
| 258 | 246 | |
|---|
| 259 | | - idr_preload(GFP_KERNEL); |
|---|
| 260 | | - spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); |
|---|
| 261 | | - |
|---|
| 262 | | - ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); |
|---|
| 247 | + ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent, |
|---|
| 248 | + xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL); |
|---|
| 263 | 249 | if (ret >= 0) { |
|---|
| 264 | | - ent->pv_cm_id = (u32)ret; |
|---|
| 250 | + spin_lock(&sriov->id_map_lock); |
|---|
| 265 | 251 | sl_id_map_add(ibdev, ent); |
|---|
| 266 | 252 | list_add_tail(&ent->list, &sriov->cm_list); |
|---|
| 267 | | - } |
|---|
| 268 | | - |
|---|
| 269 | | - spin_unlock(&sriov->id_map_lock); |
|---|
| 270 | | - idr_preload_end(); |
|---|
| 271 | | - |
|---|
| 272 | | - if (ret >= 0) |
|---|
| 253 | + spin_unlock(&sriov->id_map_lock); |
|---|
| 273 | 254 | return ent; |
|---|
| 255 | + } |
|---|
| 274 | 256 | |
|---|
| 275 | 257 | /*error flow*/ |
|---|
| 276 | 258 | kfree(ent); |
|---|
| 277 | | - mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret); |
|---|
| 259 | + mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret); |
|---|
| 278 | 260 | return ERR_PTR(-ENOMEM); |
|---|
| 279 | 261 | } |
|---|
| 280 | 262 | |
|---|
| .. | .. |
|---|
| 290 | 272 | if (ent) |
|---|
| 291 | 273 | *pv_cm_id = (int) ent->pv_cm_id; |
|---|
| 292 | 274 | } else |
|---|
| 293 | | - ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id); |
|---|
| 275 | + ent = xa_load(&sriov->pv_id_table, *pv_cm_id); |
|---|
| 294 | 276 | spin_unlock(&sriov->id_map_lock); |
|---|
| 295 | 277 | |
|---|
| 296 | 278 | return ent; |
|---|
| .. | .. |
|---|
| 304 | 286 | spin_lock(&sriov->id_map_lock); |
|---|
| 305 | 287 | spin_lock_irqsave(&sriov->going_down_lock, flags); |
|---|
| 306 | 288 | /*make sure that there is no schedule inside the scheduled work.*/ |
|---|
| 307 | | - if (!sriov->is_going_down) { |
|---|
| 289 | + if (!sriov->is_going_down && !id->scheduled_delete) { |
|---|
| 308 | 290 | id->scheduled_delete = 1; |
|---|
| 309 | 291 | schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); |
|---|
| 310 | 292 | } else if (id->scheduled_delete) { |
|---|
| .. | .. |
|---|
| 315 | 297 | spin_unlock(&sriov->id_map_lock); |
|---|
| 316 | 298 | } |
|---|
| 317 | 299 | |
|---|
| 300 | +#define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason) |
|---|
| 318 | 301 | int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, |
|---|
| 319 | 302 | struct ib_mad *mad) |
|---|
| 320 | 303 | { |
|---|
| .. | .. |
|---|
| 323 | 306 | int pv_cm_id = -1; |
|---|
| 324 | 307 | |
|---|
| 325 | 308 | if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || |
|---|
| 326 | | - mad->mad_hdr.attr_id == CM_REP_ATTR_ID || |
|---|
| 327 | | - mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { |
|---|
| 309 | + mad->mad_hdr.attr_id == CM_REP_ATTR_ID || |
|---|
| 310 | + mad->mad_hdr.attr_id == CM_MRA_ATTR_ID || |
|---|
| 311 | + mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID || |
|---|
| 312 | + (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) { |
|---|
| 328 | 313 | sl_cm_id = get_local_comm_id(mad); |
|---|
| 329 | 314 | id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); |
|---|
| 330 | 315 | if (id) |
|---|
| .. | .. |
|---|
| 344 | 329 | } |
|---|
| 345 | 330 | |
|---|
| 346 | 331 | if (!id) { |
|---|
| 347 | | - pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n", |
|---|
| 348 | | - slave_id, sl_cm_id); |
|---|
| 332 | + pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n", |
|---|
| 333 | + slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id)); |
|---|
| 349 | 334 | return -EINVAL; |
|---|
| 350 | 335 | } |
|---|
| 351 | 336 | |
|---|
| .. | .. |
|---|
| 354 | 339 | |
|---|
| 355 | 340 | if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) |
|---|
| 356 | 341 | schedule_delayed(ibdev, id); |
|---|
| 357 | | - else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) |
|---|
| 358 | | - id_map_find_del(ibdev, pv_cm_id); |
|---|
| 342 | + return 0; |
|---|
| 343 | +} |
|---|
| 344 | + |
|---|
| 345 | +static void rej_tmout_timeout(struct work_struct *work) |
|---|
| 346 | +{ |
|---|
| 347 | + struct delayed_work *delay = to_delayed_work(work); |
|---|
| 348 | + struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout); |
|---|
| 349 | + struct rej_tmout_entry *deleted; |
|---|
| 350 | + |
|---|
| 351 | + deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0); |
|---|
| 352 | + |
|---|
| 353 | + if (deleted != item) |
|---|
| 354 | + pr_debug("deleted(%p) != item(%p)\n", deleted, item); |
|---|
| 355 | + |
|---|
| 356 | + kfree(item); |
|---|
| 357 | +} |
|---|
| 358 | + |
|---|
| 359 | +static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave) |
|---|
| 360 | +{ |
|---|
| 361 | + struct rej_tmout_entry *item; |
|---|
| 362 | + struct rej_tmout_entry *old; |
|---|
| 363 | + int ret = 0; |
|---|
| 364 | + |
|---|
| 365 | + xa_lock(&sriov->xa_rej_tmout); |
|---|
| 366 | + item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); |
|---|
| 367 | + |
|---|
| 368 | + if (item) { |
|---|
| 369 | + if (xa_err(item)) |
|---|
| 370 | + ret = xa_err(item); |
|---|
| 371 | + else |
|---|
| 372 | + /* If a retry, adjust delayed work */ |
|---|
| 373 | + mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); |
|---|
| 374 | + goto err_or_exists; |
|---|
| 375 | + } |
|---|
| 376 | + xa_unlock(&sriov->xa_rej_tmout); |
|---|
| 377 | + |
|---|
| 378 | + item = kmalloc(sizeof(*item), GFP_KERNEL); |
|---|
| 379 | + if (!item) |
|---|
| 380 | + return -ENOMEM; |
|---|
| 381 | + |
|---|
| 382 | + INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout); |
|---|
| 383 | + item->slave = slave; |
|---|
| 384 | + item->rem_pv_cm_id = rem_pv_cm_id; |
|---|
| 385 | + item->xa_rej_tmout = &sriov->xa_rej_tmout; |
|---|
| 386 | + |
|---|
| 387 | + old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL); |
|---|
| 388 | + if (old) { |
|---|
| 389 | + pr_debug( |
|---|
| 390 | + "Non-null old entry (%p) or error (%d) when inserting\n", |
|---|
| 391 | + old, xa_err(old)); |
|---|
| 392 | + kfree(item); |
|---|
| 393 | + return xa_err(old); |
|---|
| 394 | + } |
|---|
| 395 | + |
|---|
| 396 | + schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT); |
|---|
| 359 | 397 | |
|---|
| 360 | 398 | return 0; |
|---|
| 399 | + |
|---|
| 400 | +err_or_exists: |
|---|
| 401 | + xa_unlock(&sriov->xa_rej_tmout); |
|---|
| 402 | + return ret; |
|---|
| 403 | +} |
|---|
| 404 | + |
|---|
| 405 | +static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id) |
|---|
| 406 | +{ |
|---|
| 407 | + struct rej_tmout_entry *item; |
|---|
| 408 | + int slave; |
|---|
| 409 | + |
|---|
| 410 | + xa_lock(&sriov->xa_rej_tmout); |
|---|
| 411 | + item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); |
|---|
| 412 | + |
|---|
| 413 | + if (!item || xa_err(item)) { |
|---|
| 414 | + pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n", |
|---|
| 415 | + rem_pv_cm_id, xa_err(item)); |
|---|
| 416 | + slave = !item ? -ENOENT : xa_err(item); |
|---|
| 417 | + } else { |
|---|
| 418 | + slave = item->slave; |
|---|
| 419 | + } |
|---|
| 420 | + xa_unlock(&sriov->xa_rej_tmout); |
|---|
| 421 | + |
|---|
| 422 | + return slave; |
|---|
| 361 | 423 | } |
|---|
| 362 | 424 | |
|---|
| 363 | 425 | int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, |
|---|
| 364 | 426 | struct ib_mad *mad) |
|---|
| 365 | 427 | { |
|---|
| 428 | + struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; |
|---|
| 429 | + u32 rem_pv_cm_id = get_local_comm_id(mad); |
|---|
| 366 | 430 | u32 pv_cm_id; |
|---|
| 367 | 431 | struct id_map_entry *id; |
|---|
| 432 | + int sts; |
|---|
| 368 | 433 | |
|---|
| 369 | 434 | if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || |
|---|
| 370 | 435 | mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { |
|---|
| .. | .. |
|---|
| 380 | 445 | be64_to_cpu(gid.global.interface_id)); |
|---|
| 381 | 446 | return -ENOENT; |
|---|
| 382 | 447 | } |
|---|
| 448 | + |
|---|
| 449 | + sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave); |
|---|
| 450 | + if (sts) |
|---|
| 451 | + /* Even if this fails, we pass on the REQ to the slave */ |
|---|
| 452 | + pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n", |
|---|
| 453 | + rem_pv_cm_id, *slave, sts); |
|---|
| 454 | + |
|---|
| 383 | 455 | return 0; |
|---|
| 384 | 456 | } |
|---|
| 385 | 457 | |
|---|
| .. | .. |
|---|
| 387 | 459 | id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1); |
|---|
| 388 | 460 | |
|---|
| 389 | 461 | if (!id) { |
|---|
| 390 | | - pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id); |
|---|
| 462 | + if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && |
|---|
| 463 | + REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) { |
|---|
| 464 | + *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id); |
|---|
| 465 | + |
|---|
| 466 | + return (*slave < 0) ? *slave : 0; |
|---|
| 467 | + } |
|---|
| 468 | + pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n", |
|---|
| 469 | + pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id)); |
|---|
| 391 | 470 | return -ENOENT; |
|---|
| 392 | 471 | } |
|---|
| 393 | 472 | |
|---|
| .. | .. |
|---|
| 395 | 474 | *slave = id->slave_id; |
|---|
| 396 | 475 | set_remote_comm_id(mad, id->sl_cm_id); |
|---|
| 397 | 476 | |
|---|
| 398 | | - if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) |
|---|
| 477 | + if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID || |
|---|
| 478 | + mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) |
|---|
| 399 | 479 | schedule_delayed(ibdev, id); |
|---|
| 400 | | - else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || |
|---|
| 401 | | - mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) { |
|---|
| 402 | | - id_map_find_del(ibdev, (int) pv_cm_id); |
|---|
| 403 | | - } |
|---|
| 404 | 480 | |
|---|
| 405 | 481 | return 0; |
|---|
| 406 | 482 | } |
|---|
| .. | .. |
|---|
| 410 | 486 | spin_lock_init(&dev->sriov.id_map_lock); |
|---|
| 411 | 487 | INIT_LIST_HEAD(&dev->sriov.cm_list); |
|---|
| 412 | 488 | dev->sriov.sl_id_map = RB_ROOT; |
|---|
| 413 | | - idr_init(&dev->sriov.pv_id_table); |
|---|
| 489 | + xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC); |
|---|
| 490 | + xa_init(&dev->sriov.xa_rej_tmout); |
|---|
| 491 | +} |
|---|
| 492 | + |
|---|
| 493 | +static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave) |
|---|
| 494 | +{ |
|---|
| 495 | + struct rej_tmout_entry *item; |
|---|
| 496 | + bool flush_needed = false; |
|---|
| 497 | + unsigned long id; |
|---|
| 498 | + int cnt = 0; |
|---|
| 499 | + |
|---|
| 500 | + xa_lock(&sriov->xa_rej_tmout); |
|---|
| 501 | + xa_for_each(&sriov->xa_rej_tmout, id, item) { |
|---|
| 502 | + if (slave < 0 || slave == item->slave) { |
|---|
| 503 | + mod_delayed_work(system_wq, &item->timeout, 0); |
|---|
| 504 | + flush_needed = true; |
|---|
| 505 | + ++cnt; |
|---|
| 506 | + } |
|---|
| 507 | + } |
|---|
| 508 | + xa_unlock(&sriov->xa_rej_tmout); |
|---|
| 509 | + |
|---|
| 510 | + if (flush_needed) { |
|---|
| 511 | + flush_scheduled_work(); |
|---|
| 512 | + pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n", |
|---|
| 513 | + cnt, slave); |
|---|
| 514 | + } |
|---|
| 515 | + |
|---|
| 516 | + if (slave < 0) |
|---|
| 517 | + WARN_ON(!xa_empty(&sriov->xa_rej_tmout)); |
|---|
| 414 | 518 | } |
|---|
| 415 | 519 | |
|---|
| 416 | 520 | /* slave = -1 ==> all slaves */ |
|---|
| .. | .. |
|---|
| 447 | 551 | struct id_map_entry, node); |
|---|
| 448 | 552 | |
|---|
| 449 | 553 | rb_erase(&ent->node, sl_id_map); |
|---|
| 450 | | - idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id); |
|---|
| 554 | + xa_erase(&sriov->pv_id_table, ent->pv_cm_id); |
|---|
| 451 | 555 | } |
|---|
| 452 | 556 | list_splice_init(&dev->sriov.cm_list, &lh); |
|---|
| 453 | 557 | } else { |
|---|
| .. | .. |
|---|
| 463 | 567 | /* remove those nodes from databases */ |
|---|
| 464 | 568 | list_for_each_entry_safe(map, tmp_map, &lh, list) { |
|---|
| 465 | 569 | rb_erase(&map->node, sl_id_map); |
|---|
| 466 | | - idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id); |
|---|
| 570 | + xa_erase(&sriov->pv_id_table, map->pv_cm_id); |
|---|
| 467 | 571 | } |
|---|
| 468 | 572 | |
|---|
| 469 | 573 | /* add remaining nodes from cm_list */ |
|---|
| .. | .. |
|---|
| 480 | 584 | list_del(&map->list); |
|---|
| 481 | 585 | kfree(map); |
|---|
| 482 | 586 | } |
|---|
| 587 | + |
|---|
| 588 | + rej_tmout_xa_cleanup(sriov, slave); |
|---|
| 483 | 589 | } |
|---|