| .. | .. |
|---|
| 42 | 42 | #include "core_priv.h" |
|---|
| 43 | 43 | #include "rdma_core.h" |
|---|
| 44 | 44 | |
|---|
| 45 | | -void uverbs_uobject_get(struct ib_uobject *uobject) |
|---|
| 46 | | -{ |
|---|
| 47 | | - kref_get(&uobject->ref); |
|---|
| 48 | | -} |
|---|
| 49 | | - |
|---|
| 50 | 45 | static void uverbs_uobject_free(struct kref *ref) |
|---|
| 51 | 46 | { |
|---|
| 52 | | - struct ib_uobject *uobj = |
|---|
| 53 | | - container_of(ref, struct ib_uobject, ref); |
|---|
| 54 | | - |
|---|
| 55 | | - if (uobj->uapi_object->type_class->needs_kfree_rcu) |
|---|
| 56 | | - kfree_rcu(uobj, rcu); |
|---|
| 57 | | - else |
|---|
| 58 | | - kfree(uobj); |
|---|
| 47 | + kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu); |
|---|
| 59 | 48 | } |
|---|
| 60 | 49 | |
|---|
| 50 | +/* |
|---|
| 51 | + * In order to indicate we no longer needs this uobject, uverbs_uobject_put |
|---|
| 52 | + * is called. When the reference count is decreased, the uobject is freed. |
|---|
| 53 | + * For example, this is used when attaching a completion channel to a CQ. |
|---|
| 54 | + */ |
|---|
| 61 | 55 | void uverbs_uobject_put(struct ib_uobject *uobject) |
|---|
| 62 | 56 | { |
|---|
| 63 | 57 | kref_put(&uobject->ref, uverbs_uobject_free); |
|---|
| 64 | 58 | } |
|---|
| 59 | +EXPORT_SYMBOL(uverbs_uobject_put); |
|---|
| 65 | 60 | |
|---|
| 66 | 61 | static int uverbs_try_lock_object(struct ib_uobject *uobj, |
|---|
| 67 | 62 | enum rdma_lookup_mode mode) |
|---|
| .. | .. |
|---|
| 125 | 120 | * and consumes the kref on the uobj. |
|---|
| 126 | 121 | */ |
|---|
| 127 | 122 | static int uverbs_destroy_uobject(struct ib_uobject *uobj, |
|---|
| 128 | | - enum rdma_remove_reason reason) |
|---|
| 123 | + enum rdma_remove_reason reason, |
|---|
| 124 | + struct uverbs_attr_bundle *attrs) |
|---|
| 129 | 125 | { |
|---|
| 130 | | - struct ib_uverbs_file *ufile = uobj->ufile; |
|---|
| 126 | + struct ib_uverbs_file *ufile = attrs->ufile; |
|---|
| 131 | 127 | unsigned long flags; |
|---|
| 132 | 128 | int ret; |
|---|
| 133 | 129 | |
|---|
| 134 | 130 | lockdep_assert_held(&ufile->hw_destroy_rwsem); |
|---|
| 135 | 131 | assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE); |
|---|
| 136 | 132 | |
|---|
| 137 | | - if (uobj->object) { |
|---|
| 138 | | - ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason); |
|---|
| 133 | + if (reason == RDMA_REMOVE_ABORT) { |
|---|
| 134 | + WARN_ON(!list_empty(&uobj->list)); |
|---|
| 135 | + WARN_ON(!uobj->context); |
|---|
| 136 | + uobj->uapi_object->type_class->alloc_abort(uobj); |
|---|
| 137 | + } else if (uobj->object) { |
|---|
| 138 | + ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason, |
|---|
| 139 | + attrs); |
|---|
| 139 | 140 | if (ret) { |
|---|
| 140 | 141 | if (ib_is_destroy_retryable(ret, reason, uobj)) |
|---|
| 141 | 142 | return ret; |
|---|
| .. | .. |
|---|
| 147 | 148 | } |
|---|
| 148 | 149 | |
|---|
| 149 | 150 | uobj->object = NULL; |
|---|
| 150 | | - } |
|---|
| 151 | | - |
|---|
| 152 | | - if (reason == RDMA_REMOVE_ABORT) { |
|---|
| 153 | | - WARN_ON(!list_empty(&uobj->list)); |
|---|
| 154 | | - WARN_ON(!uobj->context); |
|---|
| 155 | | - uobj->uapi_object->type_class->alloc_abort(uobj); |
|---|
| 156 | 151 | } |
|---|
| 157 | 152 | |
|---|
| 158 | 153 | uobj->context = NULL; |
|---|
| .. | .. |
|---|
| 196 | 191 | * version requires the caller to have already obtained an |
|---|
| 197 | 192 | * LOOKUP_DESTROY uobject kref. |
|---|
| 198 | 193 | */ |
|---|
| 199 | | -int uobj_destroy(struct ib_uobject *uobj) |
|---|
| 194 | +int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) |
|---|
| 200 | 195 | { |
|---|
| 201 | | - struct ib_uverbs_file *ufile = uobj->ufile; |
|---|
| 196 | + struct ib_uverbs_file *ufile = attrs->ufile; |
|---|
| 202 | 197 | int ret; |
|---|
| 203 | 198 | |
|---|
| 204 | 199 | down_read(&ufile->hw_destroy_rwsem); |
|---|
| .. | .. |
|---|
| 214 | 209 | if (ret) |
|---|
| 215 | 210 | goto out_unlock; |
|---|
| 216 | 211 | |
|---|
| 217 | | - ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY); |
|---|
| 212 | + ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs); |
|---|
| 218 | 213 | if (ret) { |
|---|
| 219 | 214 | atomic_set(&uobj->usecnt, 0); |
|---|
| 220 | 215 | goto out_unlock; |
|---|
| .. | .. |
|---|
| 231 | 226 | * uobj_put_destroy(). |
|---|
| 232 | 227 | */ |
|---|
| 233 | 228 | struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, |
|---|
| 234 | | - u32 id, struct ib_uverbs_file *ufile) |
|---|
| 229 | + u32 id, struct uverbs_attr_bundle *attrs) |
|---|
| 235 | 230 | { |
|---|
| 236 | 231 | struct ib_uobject *uobj; |
|---|
| 237 | 232 | int ret; |
|---|
| 238 | 233 | |
|---|
| 239 | | - uobj = rdma_lookup_get_uobject(obj, ufile, id, UVERBS_LOOKUP_DESTROY); |
|---|
| 234 | + uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id, |
|---|
| 235 | + UVERBS_LOOKUP_DESTROY, attrs); |
|---|
| 240 | 236 | if (IS_ERR(uobj)) |
|---|
| 241 | 237 | return uobj; |
|---|
| 242 | 238 | |
|---|
| 243 | | - ret = uobj_destroy(uobj); |
|---|
| 239 | + ret = uobj_destroy(uobj, attrs); |
|---|
| 244 | 240 | if (ret) { |
|---|
| 245 | 241 | rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); |
|---|
| 246 | 242 | return ERR_PTR(ret); |
|---|
| .. | .. |
|---|
| 250 | 246 | } |
|---|
| 251 | 247 | |
|---|
| 252 | 248 | /* |
|---|
| 253 | | - * Does both uobj_get_destroy() and uobj_put_destroy(). Returns success_res |
|---|
| 254 | | - * on success (negative errno on failure). For use by callers that do not need |
|---|
| 255 | | - * the uobj. |
|---|
| 249 | + * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success |
|---|
| 250 | + * (negative errno on failure). For use by callers that do not need the uobj. |
|---|
| 256 | 251 | */ |
|---|
| 257 | 252 | int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, |
|---|
| 258 | | - struct ib_uverbs_file *ufile, int success_res) |
|---|
| 253 | + struct uverbs_attr_bundle *attrs) |
|---|
| 259 | 254 | { |
|---|
| 260 | 255 | struct ib_uobject *uobj; |
|---|
| 261 | 256 | |
|---|
| 262 | | - uobj = __uobj_get_destroy(obj, id, ufile); |
|---|
| 257 | + uobj = __uobj_get_destroy(obj, id, attrs); |
|---|
| 263 | 258 | if (IS_ERR(uobj)) |
|---|
| 264 | 259 | return PTR_ERR(uobj); |
|---|
| 265 | | - |
|---|
| 266 | 260 | uobj_put_destroy(uobj); |
|---|
| 267 | | - return success_res; |
|---|
| 261 | + return 0; |
|---|
| 268 | 262 | } |
|---|
| 269 | 263 | |
|---|
| 270 | 264 | /* alloc_uobj must be undone by uverbs_destroy_uobject() */ |
|---|
| 271 | | -static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile, |
|---|
| 265 | +static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs, |
|---|
| 272 | 266 | const struct uverbs_api_object *obj) |
|---|
| 273 | 267 | { |
|---|
| 268 | + struct ib_uverbs_file *ufile = attrs->ufile; |
|---|
| 274 | 269 | struct ib_uobject *uobj; |
|---|
| 275 | | - struct ib_ucontext *ucontext; |
|---|
| 276 | 270 | |
|---|
| 277 | | - ucontext = ib_uverbs_get_ucontext(ufile); |
|---|
| 278 | | - if (IS_ERR(ucontext)) |
|---|
| 279 | | - return ERR_CAST(ucontext); |
|---|
| 271 | + if (!attrs->context) { |
|---|
| 272 | + struct ib_ucontext *ucontext = |
|---|
| 273 | + ib_uverbs_get_ucontext_file(ufile); |
|---|
| 274 | + |
|---|
| 275 | + if (IS_ERR(ucontext)) |
|---|
| 276 | + return ERR_CAST(ucontext); |
|---|
| 277 | + attrs->context = ucontext; |
|---|
| 278 | + } |
|---|
| 280 | 279 | |
|---|
| 281 | 280 | uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL); |
|---|
| 282 | 281 | if (!uobj) |
|---|
| .. | .. |
|---|
| 286 | 285 | * The object is added to the list in the commit stage. |
|---|
| 287 | 286 | */ |
|---|
| 288 | 287 | uobj->ufile = ufile; |
|---|
| 289 | | - uobj->context = ucontext; |
|---|
| 288 | + uobj->context = attrs->context; |
|---|
| 290 | 289 | INIT_LIST_HEAD(&uobj->list); |
|---|
| 291 | 290 | uobj->uapi_object = obj; |
|---|
| 292 | 291 | /* |
|---|
| .. | .. |
|---|
| 302 | 301 | |
|---|
| 303 | 302 | static int idr_add_uobj(struct ib_uobject *uobj) |
|---|
| 304 | 303 | { |
|---|
| 305 | | - int ret; |
|---|
| 306 | | - |
|---|
| 307 | | - idr_preload(GFP_KERNEL); |
|---|
| 308 | | - spin_lock(&uobj->ufile->idr_lock); |
|---|
| 309 | | - |
|---|
| 310 | | - /* |
|---|
| 311 | | - * We start with allocating an idr pointing to NULL. This represents an |
|---|
| 312 | | - * object which isn't initialized yet. We'll replace it later on with |
|---|
| 313 | | - * the real object once we commit. |
|---|
| 314 | | - */ |
|---|
| 315 | | - ret = idr_alloc(&uobj->ufile->idr, NULL, 0, |
|---|
| 316 | | - min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT); |
|---|
| 317 | | - if (ret >= 0) |
|---|
| 318 | | - uobj->id = ret; |
|---|
| 319 | | - |
|---|
| 320 | | - spin_unlock(&uobj->ufile->idr_lock); |
|---|
| 321 | | - idr_preload_end(); |
|---|
| 322 | | - |
|---|
| 323 | | - return ret < 0 ? ret : 0; |
|---|
| 304 | + /* |
|---|
| 305 | + * We start with allocating an idr pointing to NULL. This represents an |
|---|
| 306 | + * object which isn't initialized yet. We'll replace it later on with |
|---|
| 307 | + * the real object once we commit. |
|---|
| 308 | + */ |
|---|
| 309 | + return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b, |
|---|
| 310 | + GFP_KERNEL); |
|---|
| 324 | 311 | } |
|---|
| 325 | 312 | |
|---|
| 326 | 313 | /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */ |
|---|
| .. | .. |
|---|
| 330 | 317 | enum rdma_lookup_mode mode) |
|---|
| 331 | 318 | { |
|---|
| 332 | 319 | struct ib_uobject *uobj; |
|---|
| 333 | | - unsigned long idrno = id; |
|---|
| 334 | 320 | |
|---|
| 335 | 321 | if (id < 0 || id > ULONG_MAX) |
|---|
| 336 | 322 | return ERR_PTR(-EINVAL); |
|---|
| 337 | 323 | |
|---|
| 338 | 324 | rcu_read_lock(); |
|---|
| 339 | | - /* object won't be released as we're protected in rcu */ |
|---|
| 340 | | - uobj = idr_find(&ufile->idr, idrno); |
|---|
| 341 | | - if (!uobj) { |
|---|
| 342 | | - uobj = ERR_PTR(-ENOENT); |
|---|
| 343 | | - goto free; |
|---|
| 344 | | - } |
|---|
| 345 | | - |
|---|
| 346 | 325 | /* |
|---|
| 347 | 326 | * The idr_find is guaranteed to return a pointer to something that |
|---|
| 348 | 327 | * isn't freed yet, or NULL, as the free after idr_remove goes through |
|---|
| 349 | 328 | * kfree_rcu(). However the object may still have been released and |
|---|
| 350 | 329 | * kfree() could be called at any time. |
|---|
| 351 | 330 | */ |
|---|
| 352 | | - if (!kref_get_unless_zero(&uobj->ref)) |
|---|
| 331 | + uobj = xa_load(&ufile->idr, id); |
|---|
| 332 | + if (!uobj || !kref_get_unless_zero(&uobj->ref)) |
|---|
| 353 | 333 | uobj = ERR_PTR(-ENOENT); |
|---|
| 354 | | - |
|---|
| 355 | | -free: |
|---|
| 356 | 334 | rcu_read_unlock(); |
|---|
| 357 | 335 | return uobj; |
|---|
| 358 | 336 | } |
|---|
| .. | .. |
|---|
| 384 | 362 | |
|---|
| 385 | 363 | uobject = f->private_data; |
|---|
| 386 | 364 | /* |
|---|
| 387 | | - * fget(id) ensures we are not currently running uverbs_close_fd, |
|---|
| 388 | | - * and the caller is expected to ensure that uverbs_close_fd is never |
|---|
| 389 | | - * done while a call top lookup is possible. |
|---|
| 365 | + * fget(id) ensures we are not currently running |
|---|
| 366 | + * uverbs_uobject_fd_release(), and the caller is expected to ensure |
|---|
| 367 | + * that release is never done while a call to lookup is possible. |
|---|
| 390 | 368 | */ |
|---|
| 391 | 369 | if (f->f_op != fd_type->fops || uobject->ufile != ufile) { |
|---|
| 392 | 370 | fput(f); |
|---|
| .. | .. |
|---|
| 399 | 377 | |
|---|
| 400 | 378 | struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj, |
|---|
| 401 | 379 | struct ib_uverbs_file *ufile, s64 id, |
|---|
| 402 | | - enum rdma_lookup_mode mode) |
|---|
| 380 | + enum rdma_lookup_mode mode, |
|---|
| 381 | + struct uverbs_attr_bundle *attrs) |
|---|
| 403 | 382 | { |
|---|
| 404 | 383 | struct ib_uobject *uobj; |
|---|
| 405 | 384 | int ret; |
|---|
| 406 | 385 | |
|---|
| 407 | | - if (!obj) |
|---|
| 408 | | - return ERR_PTR(-EINVAL); |
|---|
| 386 | + if (obj == ERR_PTR(-ENOMSG)) { |
|---|
| 387 | + /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */ |
|---|
| 388 | + uobj = lookup_get_idr_uobject(NULL, ufile, id, mode); |
|---|
| 389 | + if (IS_ERR(uobj)) |
|---|
| 390 | + return uobj; |
|---|
| 391 | + } else { |
|---|
| 392 | + if (IS_ERR(obj)) |
|---|
| 393 | + return ERR_PTR(-EINVAL); |
|---|
| 409 | 394 | |
|---|
| 410 | | - uobj = obj->type_class->lookup_get(obj, ufile, id, mode); |
|---|
| 411 | | - if (IS_ERR(uobj)) |
|---|
| 412 | | - return uobj; |
|---|
| 395 | + uobj = obj->type_class->lookup_get(obj, ufile, id, mode); |
|---|
| 396 | + if (IS_ERR(uobj)) |
|---|
| 397 | + return uobj; |
|---|
| 413 | 398 | |
|---|
| 414 | | - if (uobj->uapi_object != obj) { |
|---|
| 415 | | - ret = -EINVAL; |
|---|
| 416 | | - goto free; |
|---|
| 399 | + if (uobj->uapi_object != obj) { |
|---|
| 400 | + ret = -EINVAL; |
|---|
| 401 | + goto free; |
|---|
| 402 | + } |
|---|
| 417 | 403 | } |
|---|
| 418 | 404 | |
|---|
| 419 | 405 | /* |
|---|
| .. | .. |
|---|
| 430 | 416 | ret = uverbs_try_lock_object(uobj, mode); |
|---|
| 431 | 417 | if (ret) |
|---|
| 432 | 418 | goto free; |
|---|
| 419 | + if (attrs) |
|---|
| 420 | + attrs->context = uobj->context; |
|---|
| 433 | 421 | |
|---|
| 434 | 422 | return uobj; |
|---|
| 435 | 423 | free: |
|---|
| 436 | | - obj->type_class->lookup_put(uobj, mode); |
|---|
| 424 | + uobj->uapi_object->type_class->lookup_put(uobj, mode); |
|---|
| 437 | 425 | uverbs_uobject_put(uobj); |
|---|
| 438 | 426 | return ERR_PTR(ret); |
|---|
| 439 | 427 | } |
|---|
| 440 | 428 | |
|---|
| 441 | 429 | static struct ib_uobject * |
|---|
| 442 | 430 | alloc_begin_idr_uobject(const struct uverbs_api_object *obj, |
|---|
| 443 | | - struct ib_uverbs_file *ufile) |
|---|
| 431 | + struct uverbs_attr_bundle *attrs) |
|---|
| 444 | 432 | { |
|---|
| 445 | 433 | int ret; |
|---|
| 446 | 434 | struct ib_uobject *uobj; |
|---|
| 447 | 435 | |
|---|
| 448 | | - uobj = alloc_uobj(ufile, obj); |
|---|
| 436 | + uobj = alloc_uobj(attrs, obj); |
|---|
| 449 | 437 | if (IS_ERR(uobj)) |
|---|
| 450 | 438 | return uobj; |
|---|
| 451 | 439 | |
|---|
| .. | .. |
|---|
| 456 | 444 | ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device, |
|---|
| 457 | 445 | RDMACG_RESOURCE_HCA_OBJECT); |
|---|
| 458 | 446 | if (ret) |
|---|
| 459 | | - goto idr_remove; |
|---|
| 447 | + goto remove; |
|---|
| 460 | 448 | |
|---|
| 461 | 449 | return uobj; |
|---|
| 462 | 450 | |
|---|
| 463 | | -idr_remove: |
|---|
| 464 | | - spin_lock(&ufile->idr_lock); |
|---|
| 465 | | - idr_remove(&ufile->idr, uobj->id); |
|---|
| 466 | | - spin_unlock(&ufile->idr_lock); |
|---|
| 451 | +remove: |
|---|
| 452 | + xa_erase(&attrs->ufile->idr, uobj->id); |
|---|
| 467 | 453 | uobj_put: |
|---|
| 468 | 454 | uverbs_uobject_put(uobj); |
|---|
| 469 | 455 | return ERR_PTR(ret); |
|---|
| .. | .. |
|---|
| 471 | 457 | |
|---|
| 472 | 458 | static struct ib_uobject * |
|---|
| 473 | 459 | alloc_begin_fd_uobject(const struct uverbs_api_object *obj, |
|---|
| 474 | | - struct ib_uverbs_file *ufile) |
|---|
| 460 | + struct uverbs_attr_bundle *attrs) |
|---|
| 475 | 461 | { |
|---|
| 462 | + const struct uverbs_obj_fd_type *fd_type; |
|---|
| 476 | 463 | int new_fd; |
|---|
| 477 | | - struct ib_uobject *uobj; |
|---|
| 464 | + struct ib_uobject *uobj, *ret; |
|---|
| 465 | + struct file *filp; |
|---|
| 478 | 466 | |
|---|
| 479 | | - new_fd = get_unused_fd_flags(O_CLOEXEC); |
|---|
| 480 | | - if (new_fd < 0) |
|---|
| 481 | | - return ERR_PTR(new_fd); |
|---|
| 482 | | - |
|---|
| 483 | | - uobj = alloc_uobj(ufile, obj); |
|---|
| 484 | | - if (IS_ERR(uobj)) { |
|---|
| 485 | | - put_unused_fd(new_fd); |
|---|
| 467 | + uobj = alloc_uobj(attrs, obj); |
|---|
| 468 | + if (IS_ERR(uobj)) |
|---|
| 486 | 469 | return uobj; |
|---|
| 470 | + |
|---|
| 471 | + fd_type = |
|---|
| 472 | + container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); |
|---|
| 473 | + if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && |
|---|
| 474 | + fd_type->fops->release != &uverbs_async_event_release)) { |
|---|
| 475 | + ret = ERR_PTR(-EINVAL); |
|---|
| 476 | + goto err_fd; |
|---|
| 487 | 477 | } |
|---|
| 488 | 478 | |
|---|
| 489 | | - uobj->id = new_fd; |
|---|
| 490 | | - uobj->ufile = ufile; |
|---|
| 479 | + new_fd = get_unused_fd_flags(O_CLOEXEC); |
|---|
| 480 | + if (new_fd < 0) { |
|---|
| 481 | + ret = ERR_PTR(new_fd); |
|---|
| 482 | + goto err_fd; |
|---|
| 483 | + } |
|---|
| 491 | 484 | |
|---|
| 485 | + /* Note that uverbs_uobject_fd_release() is called during abort */ |
|---|
| 486 | + filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, |
|---|
| 487 | + fd_type->flags); |
|---|
| 488 | + if (IS_ERR(filp)) { |
|---|
| 489 | + ret = ERR_CAST(filp); |
|---|
| 490 | + goto err_getfile; |
|---|
| 491 | + } |
|---|
| 492 | + uobj->object = filp; |
|---|
| 493 | + |
|---|
| 494 | + uobj->id = new_fd; |
|---|
| 492 | 495 | return uobj; |
|---|
| 496 | + |
|---|
| 497 | +err_getfile: |
|---|
| 498 | + put_unused_fd(new_fd); |
|---|
| 499 | +err_fd: |
|---|
| 500 | + uverbs_uobject_put(uobj); |
|---|
| 501 | + return ret; |
|---|
| 493 | 502 | } |
|---|
| 494 | 503 | |
|---|
| 495 | 504 | struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, |
|---|
| 496 | | - struct ib_uverbs_file *ufile) |
|---|
| 505 | + struct uverbs_attr_bundle *attrs) |
|---|
| 497 | 506 | { |
|---|
| 507 | + struct ib_uverbs_file *ufile = attrs->ufile; |
|---|
| 498 | 508 | struct ib_uobject *ret; |
|---|
| 499 | 509 | |
|---|
| 500 | | - if (!obj) |
|---|
| 510 | + if (IS_ERR(obj)) |
|---|
| 501 | 511 | return ERR_PTR(-EINVAL); |
|---|
| 502 | 512 | |
|---|
| 503 | 513 | /* |
|---|
| .. | .. |
|---|
| 508 | 518 | if (!down_read_trylock(&ufile->hw_destroy_rwsem)) |
|---|
| 509 | 519 | return ERR_PTR(-EIO); |
|---|
| 510 | 520 | |
|---|
| 511 | | - ret = obj->type_class->alloc_begin(obj, ufile); |
|---|
| 521 | + ret = obj->type_class->alloc_begin(obj, attrs); |
|---|
| 512 | 522 | if (IS_ERR(ret)) { |
|---|
| 513 | 523 | up_read(&ufile->hw_destroy_rwsem); |
|---|
| 514 | 524 | return ret; |
|---|
| .. | .. |
|---|
| 521 | 531 | ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device, |
|---|
| 522 | 532 | RDMACG_RESOURCE_HCA_OBJECT); |
|---|
| 523 | 533 | |
|---|
| 524 | | - spin_lock(&uobj->ufile->idr_lock); |
|---|
| 525 | | - idr_remove(&uobj->ufile->idr, uobj->id); |
|---|
| 526 | | - spin_unlock(&uobj->ufile->idr_lock); |
|---|
| 534 | + xa_erase(&uobj->ufile->idr, uobj->id); |
|---|
| 527 | 535 | } |
|---|
| 528 | 536 | |
|---|
| 529 | 537 | static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj, |
|---|
| 530 | | - enum rdma_remove_reason why) |
|---|
| 538 | + enum rdma_remove_reason why, |
|---|
| 539 | + struct uverbs_attr_bundle *attrs) |
|---|
| 531 | 540 | { |
|---|
| 532 | 541 | const struct uverbs_obj_idr_type *idr_type = |
|---|
| 533 | 542 | container_of(uobj->uapi_object->type_attrs, |
|---|
| 534 | 543 | struct uverbs_obj_idr_type, type); |
|---|
| 535 | | - int ret = idr_type->destroy_object(uobj, why); |
|---|
| 544 | + int ret = idr_type->destroy_object(uobj, why, attrs); |
|---|
| 536 | 545 | |
|---|
| 537 | 546 | /* |
|---|
| 538 | 547 | * We can only fail gracefully if the user requested to destroy the |
|---|
| .. | .. |
|---|
| 553 | 562 | |
|---|
| 554 | 563 | static void remove_handle_idr_uobject(struct ib_uobject *uobj) |
|---|
| 555 | 564 | { |
|---|
| 556 | | - spin_lock(&uobj->ufile->idr_lock); |
|---|
| 557 | | - idr_remove(&uobj->ufile->idr, uobj->id); |
|---|
| 558 | | - spin_unlock(&uobj->ufile->idr_lock); |
|---|
| 565 | + xa_erase(&uobj->ufile->idr, uobj->id); |
|---|
| 559 | 566 | /* Matches the kref in alloc_commit_idr_uobject */ |
|---|
| 560 | 567 | uverbs_uobject_put(uobj); |
|---|
| 561 | 568 | } |
|---|
| 562 | 569 | |
|---|
| 563 | 570 | static void alloc_abort_fd_uobject(struct ib_uobject *uobj) |
|---|
| 564 | 571 | { |
|---|
| 572 | + struct file *filp = uobj->object; |
|---|
| 573 | + |
|---|
| 574 | + fput(filp); |
|---|
| 565 | 575 | put_unused_fd(uobj->id); |
|---|
| 566 | 576 | } |
|---|
| 567 | 577 | |
|---|
| 568 | 578 | static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj, |
|---|
| 569 | | - enum rdma_remove_reason why) |
|---|
| 579 | + enum rdma_remove_reason why, |
|---|
| 580 | + struct uverbs_attr_bundle *attrs) |
|---|
| 570 | 581 | { |
|---|
| 571 | 582 | const struct uverbs_obj_fd_type *fd_type = container_of( |
|---|
| 572 | 583 | uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type); |
|---|
| 573 | | - int ret = fd_type->context_closed(uobj, why); |
|---|
| 584 | + int ret = fd_type->destroy_object(uobj, why); |
|---|
| 574 | 585 | |
|---|
| 575 | 586 | if (ib_is_destroy_retryable(ret, why, uobj)) |
|---|
| 576 | 587 | return ret; |
|---|
| .. | .. |
|---|
| 582 | 593 | { |
|---|
| 583 | 594 | } |
|---|
| 584 | 595 | |
|---|
| 585 | | -static int alloc_commit_idr_uobject(struct ib_uobject *uobj) |
|---|
| 596 | +static void alloc_commit_idr_uobject(struct ib_uobject *uobj) |
|---|
| 586 | 597 | { |
|---|
| 587 | 598 | struct ib_uverbs_file *ufile = uobj->ufile; |
|---|
| 599 | + void *old; |
|---|
| 588 | 600 | |
|---|
| 589 | | - spin_lock(&ufile->idr_lock); |
|---|
| 590 | 601 | /* |
|---|
| 591 | 602 | * We already allocated this IDR with a NULL object, so |
|---|
| 592 | 603 | * this shouldn't fail. |
|---|
| 593 | 604 | * |
|---|
| 594 | | - * NOTE: Once we set the IDR we loose ownership of our kref on uobj. |
|---|
| 605 | + * NOTE: Storing the uobj transfers our kref on uobj to the XArray. |
|---|
| 595 | 606 | * It will be put by remove_commit_idr_uobject() |
|---|
| 596 | 607 | */ |
|---|
| 597 | | - WARN_ON(idr_replace(&ufile->idr, uobj, uobj->id)); |
|---|
| 598 | | - spin_unlock(&ufile->idr_lock); |
|---|
| 599 | | - |
|---|
| 600 | | - return 0; |
|---|
| 608 | + old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL); |
|---|
| 609 | + WARN_ON(old != NULL); |
|---|
| 601 | 610 | } |
|---|
| 602 | 611 | |
|---|
| 603 | | -static int alloc_commit_fd_uobject(struct ib_uobject *uobj) |
|---|
| 612 | +static void alloc_commit_fd_uobject(struct ib_uobject *uobj) |
|---|
| 604 | 613 | { |
|---|
| 605 | | - const struct uverbs_obj_fd_type *fd_type = container_of( |
|---|
| 606 | | - uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type); |
|---|
| 607 | 614 | int fd = uobj->id; |
|---|
| 608 | | - struct file *filp; |
|---|
| 615 | + struct file *filp = uobj->object; |
|---|
| 609 | 616 | |
|---|
| 610 | | - /* |
|---|
| 611 | | - * The kref for uobj is moved into filp->private data and put in |
|---|
| 612 | | - * uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd() |
|---|
| 613 | | - * must be guaranteed to be called from the provided fops release |
|---|
| 614 | | - * callback. |
|---|
| 615 | | - */ |
|---|
| 616 | | - filp = anon_inode_getfile(fd_type->name, |
|---|
| 617 | | - fd_type->fops, |
|---|
| 618 | | - uobj, |
|---|
| 619 | | - fd_type->flags); |
|---|
| 620 | | - if (IS_ERR(filp)) |
|---|
| 621 | | - return PTR_ERR(filp); |
|---|
| 622 | | - |
|---|
| 623 | | - uobj->object = filp; |
|---|
| 624 | | - |
|---|
| 625 | | - /* Matching put will be done in uverbs_close_fd() */ |
|---|
| 617 | + /* Matching put will be done in uverbs_uobject_fd_release() */ |
|---|
| 626 | 618 | kref_get(&uobj->ufile->ref); |
|---|
| 627 | 619 | |
|---|
| 628 | 620 | /* This shouldn't be used anymore. Use the file object instead */ |
|---|
| .. | .. |
|---|
| 630 | 622 | |
|---|
| 631 | 623 | /* |
|---|
| 632 | 624 | * NOTE: Once we install the file we loose ownership of our kref on |
|---|
| 633 | | - * uobj. It will be put by uverbs_close_fd() |
|---|
| 625 | + * uobj. It will be put by uverbs_uobject_fd_release() |
|---|
| 634 | 626 | */ |
|---|
| 627 | + filp->private_data = uobj; |
|---|
| 635 | 628 | fd_install(fd, filp); |
|---|
| 636 | | - |
|---|
| 637 | | - return 0; |
|---|
| 638 | 629 | } |
|---|
| 639 | 630 | |
|---|
| 640 | 631 | /* |
|---|
| .. | .. |
|---|
| 642 | 633 | * caller can no longer assume uobj is valid. If this function fails it |
|---|
| 643 | 634 | * destroys the uboject, including the attached HW object. |
|---|
| 644 | 635 | */ |
|---|
| 645 | | -int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj) |
|---|
| 636 | +void rdma_alloc_commit_uobject(struct ib_uobject *uobj, |
|---|
| 637 | + struct uverbs_attr_bundle *attrs) |
|---|
| 646 | 638 | { |
|---|
| 647 | | - struct ib_uverbs_file *ufile = uobj->ufile; |
|---|
| 648 | | - int ret; |
|---|
| 649 | | - |
|---|
| 650 | | - /* alloc_commit consumes the uobj kref */ |
|---|
| 651 | | - ret = uobj->uapi_object->type_class->alloc_commit(uobj); |
|---|
| 652 | | - if (ret) { |
|---|
| 653 | | - uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT); |
|---|
| 654 | | - up_read(&ufile->hw_destroy_rwsem); |
|---|
| 655 | | - return ret; |
|---|
| 656 | | - } |
|---|
| 639 | + struct ib_uverbs_file *ufile = attrs->ufile; |
|---|
| 657 | 640 | |
|---|
| 658 | 641 | /* kref is held so long as the uobj is on the uobj list. */ |
|---|
| 659 | 642 | uverbs_uobject_get(uobj); |
|---|
| .. | .. |
|---|
| 664 | 647 | /* matches atomic_set(-1) in alloc_uobj */ |
|---|
| 665 | 648 | atomic_set(&uobj->usecnt, 0); |
|---|
| 666 | 649 | |
|---|
| 650 | + /* alloc_commit consumes the uobj kref */ |
|---|
| 651 | + uobj->uapi_object->type_class->alloc_commit(uobj); |
|---|
| 652 | + |
|---|
| 667 | 653 | /* Matches the down_read in rdma_alloc_begin_uobject */ |
|---|
| 668 | 654 | up_read(&ufile->hw_destroy_rwsem); |
|---|
| 669 | | - |
|---|
| 670 | | - return 0; |
|---|
| 671 | 655 | } |
|---|
| 672 | 656 | |
|---|
| 673 | 657 | /* |
|---|
| 674 | 658 | * This consumes the kref for uobj. It is up to the caller to unwind the HW |
|---|
| 675 | 659 | * object and anything else connected to uobj before calling this. |
|---|
| 676 | 660 | */ |
|---|
| 677 | | -void rdma_alloc_abort_uobject(struct ib_uobject *uobj) |
|---|
| 661 | +void rdma_alloc_abort_uobject(struct ib_uobject *uobj, |
|---|
| 662 | + struct uverbs_attr_bundle *attrs, |
|---|
| 663 | + bool hw_obj_valid) |
|---|
| 678 | 664 | { |
|---|
| 679 | 665 | struct ib_uverbs_file *ufile = uobj->ufile; |
|---|
| 666 | + int ret; |
|---|
| 680 | 667 | |
|---|
| 681 | | - uobj->object = NULL; |
|---|
| 682 | | - uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT); |
|---|
| 668 | + if (hw_obj_valid) { |
|---|
| 669 | + ret = uobj->uapi_object->type_class->destroy_hw( |
|---|
| 670 | + uobj, RDMA_REMOVE_ABORT, attrs); |
|---|
| 671 | + /* |
|---|
| 672 | + * If the driver couldn't destroy the object then go ahead and |
|---|
| 673 | + * commit it. Leaking objects that can't be destroyed is only |
|---|
| 674 | + * done during FD close after the driver has a few more tries to |
|---|
| 675 | + * destroy it. |
|---|
| 676 | + */ |
|---|
| 677 | + if (WARN_ON(ret)) |
|---|
| 678 | + return rdma_alloc_commit_uobject(uobj, attrs); |
|---|
| 679 | + } |
|---|
| 680 | + |
|---|
| 681 | + uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs); |
|---|
| 683 | 682 | |
|---|
| 684 | 683 | /* Matches the down_read in rdma_alloc_begin_uobject */ |
|---|
| 685 | 684 | up_read(&ufile->hw_destroy_rwsem); |
|---|
| .. | .. |
|---|
| 696 | 695 | struct file *filp = uobj->object; |
|---|
| 697 | 696 | |
|---|
| 698 | 697 | WARN_ON(mode != UVERBS_LOOKUP_READ); |
|---|
| 699 | | - /* This indirectly calls uverbs_close_fd and free the object */ |
|---|
| 698 | + /* |
|---|
| 699 | + * This indirectly calls uverbs_uobject_fd_release() and free the |
|---|
| 700 | + * object |
|---|
| 701 | + */ |
|---|
| 700 | 702 | fput(filp); |
|---|
| 701 | 703 | } |
|---|
| 702 | 704 | |
|---|
| .. | .. |
|---|
| 727 | 729 | |
|---|
| 728 | 730 | void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile) |
|---|
| 729 | 731 | { |
|---|
| 730 | | - spin_lock_init(&ufile->idr_lock); |
|---|
| 731 | | - idr_init(&ufile->idr); |
|---|
| 732 | + xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC); |
|---|
| 732 | 733 | } |
|---|
| 733 | 734 | |
|---|
| 734 | 735 | void release_ufile_idr_uobject(struct ib_uverbs_file *ufile) |
|---|
| 735 | 736 | { |
|---|
| 736 | 737 | struct ib_uobject *entry; |
|---|
| 737 | | - int id; |
|---|
| 738 | + unsigned long id; |
|---|
| 738 | 739 | |
|---|
| 739 | 740 | /* |
|---|
| 740 | 741 | * At this point uverbs_cleanup_ufile() is guaranteed to have run, and |
|---|
| 741 | | - * there are no HW objects left, however the IDR is still populated |
|---|
| 742 | + * there are no HW objects left, however the xarray is still populated |
|---|
| 742 | 743 | * with anything that has not been cleaned up by userspace. Since the |
|---|
| 743 | 744 | * kref on ufile is 0, nothing is allowed to call lookup_get. |
|---|
| 744 | 745 | * |
|---|
| 745 | 746 | * This is an optimized equivalent to remove_handle_idr_uobject |
|---|
| 746 | 747 | */ |
|---|
| 747 | | - idr_for_each_entry(&ufile->idr, entry, id) { |
|---|
| 748 | + xa_for_each(&ufile->idr, id, entry) { |
|---|
| 748 | 749 | WARN_ON(entry->object); |
|---|
| 749 | 750 | uverbs_uobject_put(entry); |
|---|
| 750 | 751 | } |
|---|
| 751 | 752 | |
|---|
| 752 | | - idr_destroy(&ufile->idr); |
|---|
| 753 | + xa_destroy(&ufile->idr); |
|---|
| 753 | 754 | } |
|---|
| 754 | 755 | |
|---|
| 755 | 756 | const struct uverbs_obj_type_class uverbs_idr_class = { |
|---|
| .. | .. |
|---|
| 760 | 761 | .lookup_put = lookup_put_idr_uobject, |
|---|
| 761 | 762 | .destroy_hw = destroy_hw_idr_uobject, |
|---|
| 762 | 763 | .remove_handle = remove_handle_idr_uobject, |
|---|
| 763 | | - /* |
|---|
| 764 | | - * When we destroy an object, we first just lock it for WRITE and |
|---|
| 765 | | - * actually DESTROY it in the finalize stage. So, the problematic |
|---|
| 766 | | - * scenario is when we just started the finalize stage of the |
|---|
| 767 | | - * destruction (nothing was executed yet). Now, the other thread |
|---|
| 768 | | - * fetched the object for READ access, but it didn't lock it yet. |
|---|
| 769 | | - * The DESTROY thread continues and starts destroying the object. |
|---|
| 770 | | - * When the other thread continue - without the RCU, it would |
|---|
| 771 | | - * access freed memory. However, the rcu_read_lock delays the free |
|---|
| 772 | | - * until the rcu_read_lock of the READ operation quits. Since the |
|---|
| 773 | | - * exclusive lock of the object is still taken by the DESTROY flow, the |
|---|
| 774 | | - * READ operation will get -EBUSY and it'll just bail out. |
|---|
| 775 | | - */ |
|---|
| 776 | | - .needs_kfree_rcu = true, |
|---|
| 777 | 764 | }; |
|---|
| 778 | 765 | EXPORT_SYMBOL(uverbs_idr_class); |
|---|
| 779 | 766 | |
|---|
| 780 | | -void uverbs_close_fd(struct file *f) |
|---|
| 767 | +/* |
|---|
| 768 | + * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct |
|---|
| 769 | + * file_operations release method. |
|---|
| 770 | + */ |
|---|
| 771 | +int uverbs_uobject_fd_release(struct inode *inode, struct file *filp) |
|---|
| 781 | 772 | { |
|---|
| 782 | | - struct ib_uobject *uobj = f->private_data; |
|---|
| 783 | | - struct ib_uverbs_file *ufile = uobj->ufile; |
|---|
| 773 | + struct ib_uverbs_file *ufile; |
|---|
| 774 | + struct ib_uobject *uobj; |
|---|
| 775 | + |
|---|
| 776 | + /* |
|---|
| 777 | + * This can only happen if the fput came from alloc_abort_fd_uobject() |
|---|
| 778 | + */ |
|---|
| 779 | + if (!filp->private_data) |
|---|
| 780 | + return 0; |
|---|
| 781 | + uobj = filp->private_data; |
|---|
| 782 | + ufile = uobj->ufile; |
|---|
| 784 | 783 | |
|---|
| 785 | 784 | if (down_read_trylock(&ufile->hw_destroy_rwsem)) { |
|---|
| 785 | + struct uverbs_attr_bundle attrs = { |
|---|
| 786 | + .context = uobj->context, |
|---|
| 787 | + .ufile = ufile, |
|---|
| 788 | + }; |
|---|
| 789 | + |
|---|
| 786 | 790 | /* |
|---|
| 787 | 791 | * lookup_get_fd_uobject holds the kref on the struct file any |
|---|
| 788 | 792 | * time a FD uobj is locked, which prevents this release |
|---|
| .. | .. |
|---|
| 790 | 794 | * write lock here, or we have a kernel bug. |
|---|
| 791 | 795 | */ |
|---|
| 792 | 796 | WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE)); |
|---|
| 793 | | - uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE); |
|---|
| 797 | + uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs); |
|---|
| 794 | 798 | up_read(&ufile->hw_destroy_rwsem); |
|---|
| 795 | 799 | } |
|---|
| 796 | 800 | |
|---|
| 797 | | - /* Matches the get in alloc_begin_fd_uobject */ |
|---|
| 801 | + /* Matches the get in alloc_commit_fd_uobject() */ |
|---|
| 798 | 802 | kref_put(&ufile->ref, ib_uverbs_release_file); |
|---|
| 799 | 803 | |
|---|
| 800 | 804 | /* Pairs with filp->private_data in alloc_begin_fd_uobject */ |
|---|
| 801 | 805 | uverbs_uobject_put(uobj); |
|---|
| 806 | + return 0; |
|---|
| 802 | 807 | } |
|---|
| 803 | | - |
|---|
| 804 | | -static void ufile_disassociate_ucontext(struct ib_ucontext *ibcontext) |
|---|
| 805 | | -{ |
|---|
| 806 | | - struct ib_device *ib_dev = ibcontext->device; |
|---|
| 807 | | - struct task_struct *owning_process = NULL; |
|---|
| 808 | | - struct mm_struct *owning_mm = NULL; |
|---|
| 809 | | - |
|---|
| 810 | | - owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); |
|---|
| 811 | | - if (!owning_process) |
|---|
| 812 | | - return; |
|---|
| 813 | | - |
|---|
| 814 | | - owning_mm = get_task_mm(owning_process); |
|---|
| 815 | | - if (!owning_mm) { |
|---|
| 816 | | - pr_info("no mm, disassociate ucontext is pending task termination\n"); |
|---|
| 817 | | - while (1) { |
|---|
| 818 | | - put_task_struct(owning_process); |
|---|
| 819 | | - usleep_range(1000, 2000); |
|---|
| 820 | | - owning_process = get_pid_task(ibcontext->tgid, |
|---|
| 821 | | - PIDTYPE_PID); |
|---|
| 822 | | - if (!owning_process || |
|---|
| 823 | | - owning_process->state == TASK_DEAD) { |
|---|
| 824 | | - pr_info("disassociate ucontext done, task was terminated\n"); |
|---|
| 825 | | - /* in case task was dead need to release the |
|---|
| 826 | | - * task struct. |
|---|
| 827 | | - */ |
|---|
| 828 | | - if (owning_process) |
|---|
| 829 | | - put_task_struct(owning_process); |
|---|
| 830 | | - return; |
|---|
| 831 | | - } |
|---|
| 832 | | - } |
|---|
| 833 | | - } |
|---|
| 834 | | - |
|---|
| 835 | | - down_write(&owning_mm->mmap_sem); |
|---|
| 836 | | - ib_dev->disassociate_ucontext(ibcontext); |
|---|
| 837 | | - up_write(&owning_mm->mmap_sem); |
|---|
| 838 | | - mmput(owning_mm); |
|---|
| 839 | | - put_task_struct(owning_process); |
|---|
| 840 | | -} |
|---|
| 808 | +EXPORT_SYMBOL(uverbs_uobject_fd_release); |
|---|
| 841 | 809 | |
|---|
| 842 | 810 | /* |
|---|
| 843 | 811 | * Drop the ucontext off the ufile and completely disconnect it from the |
|---|
| .. | .. |
|---|
| 847 | 815 | enum rdma_remove_reason reason) |
|---|
| 848 | 816 | { |
|---|
| 849 | 817 | struct ib_ucontext *ucontext = ufile->ucontext; |
|---|
| 850 | | - int ret; |
|---|
| 851 | | - |
|---|
| 852 | | - if (reason == RDMA_REMOVE_DRIVER_REMOVE) |
|---|
| 853 | | - ufile_disassociate_ucontext(ucontext); |
|---|
| 854 | | - |
|---|
| 855 | | - put_pid(ucontext->tgid); |
|---|
| 856 | | - ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device, |
|---|
| 857 | | - RDMACG_RESOURCE_HCA_HANDLE); |
|---|
| 818 | + struct ib_device *ib_dev = ucontext->device; |
|---|
| 858 | 819 | |
|---|
| 859 | 820 | /* |
|---|
| 860 | | - * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove |
|---|
| 861 | | - * the error return. |
|---|
| 821 | + * If we are closing the FD then the user mmap VMAs must have |
|---|
| 822 | + * already been destroyed as they hold on to the filep, otherwise |
|---|
| 823 | + * they need to be zap'd. |
|---|
| 862 | 824 | */ |
|---|
| 863 | | - ret = ucontext->device->dealloc_ucontext(ucontext); |
|---|
| 864 | | - WARN_ON(ret); |
|---|
| 825 | + if (reason == RDMA_REMOVE_DRIVER_REMOVE) { |
|---|
| 826 | + uverbs_user_mmap_disassociate(ufile); |
|---|
| 827 | + if (ib_dev->ops.disassociate_ucontext) |
|---|
| 828 | + ib_dev->ops.disassociate_ucontext(ucontext); |
|---|
| 829 | + } |
|---|
| 830 | + |
|---|
| 831 | + ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev, |
|---|
| 832 | + RDMACG_RESOURCE_HCA_HANDLE); |
|---|
| 833 | + |
|---|
| 834 | + rdma_restrack_del(&ucontext->res); |
|---|
| 835 | + |
|---|
| 836 | + ib_dev->ops.dealloc_ucontext(ucontext); |
|---|
| 837 | + WARN_ON(!xa_empty(&ucontext->mmap_xa)); |
|---|
| 838 | + kfree(ucontext); |
|---|
| 865 | 839 | |
|---|
| 866 | 840 | ufile->ucontext = NULL; |
|---|
| 867 | 841 | } |
|---|
| .. | .. |
|---|
| 871 | 845 | { |
|---|
| 872 | 846 | struct ib_uobject *obj, *next_obj; |
|---|
| 873 | 847 | int ret = -EINVAL; |
|---|
| 848 | + struct uverbs_attr_bundle attrs = { .ufile = ufile }; |
|---|
| 874 | 849 | |
|---|
| 875 | 850 | /* |
|---|
| 876 | 851 | * This shouldn't run while executing other commands on this |
|---|
| .. | .. |
|---|
| 882 | 857 | * other threads (which might still use the FDs) chance to run. |
|---|
| 883 | 858 | */ |
|---|
| 884 | 859 | list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) { |
|---|
| 860 | + attrs.context = obj->context; |
|---|
| 885 | 861 | /* |
|---|
| 886 | 862 | * if we hit this WARN_ON, that means we are |
|---|
| 887 | 863 | * racing with a lookup_get. |
|---|
| 888 | 864 | */ |
|---|
| 889 | 865 | WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); |
|---|
| 890 | | - if (!uverbs_destroy_uobject(obj, reason)) |
|---|
| 866 | + if (!uverbs_destroy_uobject(obj, reason, &attrs)) |
|---|
| 891 | 867 | ret = 0; |
|---|
| 892 | 868 | else |
|---|
| 893 | 869 | atomic_set(&obj->usecnt, 0); |
|---|
| .. | .. |
|---|
| 896 | 872 | } |
|---|
| 897 | 873 | |
|---|
| 898 | 874 | /* |
|---|
| 899 | | - * Destroy the uncontext and every uobject associated with it. If called with |
|---|
| 900 | | - * reason != RDMA_REMOVE_CLOSE this will not return until the destruction has |
|---|
| 901 | | - * been completed and ufile->ucontext is NULL. |
|---|
| 875 | + * Destroy the uncontext and every uobject associated with it. |
|---|
| 902 | 876 | * |
|---|
| 903 | 877 | * This is internally locked and can be called in parallel from multiple |
|---|
| 904 | 878 | * contexts. |
|---|
| .. | .. |
|---|
| 906 | 880 | void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, |
|---|
| 907 | 881 | enum rdma_remove_reason reason) |
|---|
| 908 | 882 | { |
|---|
| 909 | | - if (reason == RDMA_REMOVE_CLOSE) { |
|---|
| 910 | | - /* |
|---|
| 911 | | - * During destruction we might trigger something that |
|---|
| 912 | | - * synchronously calls release on any file descriptor. For |
|---|
| 913 | | - * this reason all paths that come from file_operations |
|---|
| 914 | | - * release must use try_lock. They can progress knowing that |
|---|
| 915 | | - * there is an ongoing uverbs_destroy_ufile_hw that will clean |
|---|
| 916 | | - * up the driver resources. |
|---|
| 917 | | - */ |
|---|
| 918 | | - if (!mutex_trylock(&ufile->ucontext_lock)) |
|---|
| 919 | | - return; |
|---|
| 920 | | - |
|---|
| 921 | | - } else { |
|---|
| 922 | | - mutex_lock(&ufile->ucontext_lock); |
|---|
| 923 | | - } |
|---|
| 924 | | - |
|---|
| 925 | 883 | down_write(&ufile->hw_destroy_rwsem); |
|---|
| 926 | 884 | |
|---|
| 927 | 885 | /* |
|---|
| .. | .. |
|---|
| 931 | 889 | if (!ufile->ucontext) |
|---|
| 932 | 890 | goto done; |
|---|
| 933 | 891 | |
|---|
| 934 | | - ufile->ucontext->closing = true; |
|---|
| 935 | 892 | ufile->ucontext->cleanup_retryable = true; |
|---|
| 936 | 893 | while (!list_empty(&ufile->uobjects)) |
|---|
| 937 | 894 | if (__uverbs_cleanup_ufile(ufile, reason)) { |
|---|
| 938 | 895 | /* |
|---|
| 939 | 896 | * No entry was cleaned-up successfully during this |
|---|
| 940 | | - * iteration |
|---|
| 897 | + * iteration. It is a driver bug to fail destruction. |
|---|
| 941 | 898 | */ |
|---|
| 899 | + WARN_ON(!list_empty(&ufile->uobjects)); |
|---|
| 942 | 900 | break; |
|---|
| 943 | 901 | } |
|---|
| 944 | 902 | |
|---|
| .. | .. |
|---|
| 950 | 908 | |
|---|
| 951 | 909 | done: |
|---|
| 952 | 910 | up_write(&ufile->hw_destroy_rwsem); |
|---|
| 953 | | - mutex_unlock(&ufile->ucontext_lock); |
|---|
| 954 | 911 | } |
|---|
| 955 | 912 | |
|---|
| 956 | 913 | const struct uverbs_obj_type_class uverbs_fd_class = { |
|---|
| .. | .. |
|---|
| 961 | 918 | .lookup_put = lookup_put_fd_uobject, |
|---|
| 962 | 919 | .destroy_hw = destroy_hw_fd_uobject, |
|---|
| 963 | 920 | .remove_handle = remove_handle_fd_uobject, |
|---|
| 964 | | - .needs_kfree_rcu = false, |
|---|
| 965 | 921 | }; |
|---|
| 966 | 922 | EXPORT_SYMBOL(uverbs_fd_class); |
|---|
| 967 | 923 | |
|---|
| 968 | 924 | struct ib_uobject * |
|---|
| 969 | | -uverbs_get_uobject_from_file(u16 object_id, |
|---|
| 970 | | - struct ib_uverbs_file *ufile, |
|---|
| 971 | | - enum uverbs_obj_access access, s64 id) |
|---|
| 925 | +uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access, |
|---|
| 926 | + s64 id, struct uverbs_attr_bundle *attrs) |
|---|
| 972 | 927 | { |
|---|
| 973 | 928 | const struct uverbs_api_object *obj = |
|---|
| 974 | | - uapi_get_object(ufile->device->uapi, object_id); |
|---|
| 929 | + uapi_get_object(attrs->ufile->device->uapi, object_id); |
|---|
| 975 | 930 | |
|---|
| 976 | 931 | switch (access) { |
|---|
| 977 | 932 | case UVERBS_ACCESS_READ: |
|---|
| 978 | | - return rdma_lookup_get_uobject(obj, ufile, id, |
|---|
| 979 | | - UVERBS_LOOKUP_READ); |
|---|
| 933 | + return rdma_lookup_get_uobject(obj, attrs->ufile, id, |
|---|
| 934 | + UVERBS_LOOKUP_READ, attrs); |
|---|
| 980 | 935 | case UVERBS_ACCESS_DESTROY: |
|---|
| 981 | 936 | /* Actual destruction is done inside uverbs_handle_method */ |
|---|
| 982 | | - return rdma_lookup_get_uobject(obj, ufile, id, |
|---|
| 983 | | - UVERBS_LOOKUP_DESTROY); |
|---|
| 937 | + return rdma_lookup_get_uobject(obj, attrs->ufile, id, |
|---|
| 938 | + UVERBS_LOOKUP_DESTROY, attrs); |
|---|
| 984 | 939 | case UVERBS_ACCESS_WRITE: |
|---|
| 985 | | - return rdma_lookup_get_uobject(obj, ufile, id, |
|---|
| 986 | | - UVERBS_LOOKUP_WRITE); |
|---|
| 940 | + return rdma_lookup_get_uobject(obj, attrs->ufile, id, |
|---|
| 941 | + UVERBS_LOOKUP_WRITE, attrs); |
|---|
| 987 | 942 | case UVERBS_ACCESS_NEW: |
|---|
| 988 | | - return rdma_alloc_begin_uobject(obj, ufile); |
|---|
| 943 | + return rdma_alloc_begin_uobject(obj, attrs); |
|---|
| 989 | 944 | default: |
|---|
| 990 | 945 | WARN_ON(true); |
|---|
| 991 | 946 | return ERR_PTR(-EOPNOTSUPP); |
|---|
| 992 | 947 | } |
|---|
| 993 | 948 | } |
|---|
| 994 | 949 | |
|---|
| 995 | | -int uverbs_finalize_object(struct ib_uobject *uobj, |
|---|
| 996 | | - enum uverbs_obj_access access, |
|---|
| 997 | | - bool commit) |
|---|
| 950 | +void uverbs_finalize_object(struct ib_uobject *uobj, |
|---|
| 951 | + enum uverbs_obj_access access, bool hw_obj_valid, |
|---|
| 952 | + bool commit, struct uverbs_attr_bundle *attrs) |
|---|
| 998 | 953 | { |
|---|
| 999 | | - int ret = 0; |
|---|
| 1000 | | - |
|---|
| 1001 | 954 | /* |
|---|
| 1002 | 955 | * refcounts should be handled at the object level and not at the |
|---|
| 1003 | 956 | * uobject level. Refcounts of the objects themselves are done in |
|---|
| .. | .. |
|---|
| 1017 | 970 | break; |
|---|
| 1018 | 971 | case UVERBS_ACCESS_NEW: |
|---|
| 1019 | 972 | if (commit) |
|---|
| 1020 | | - ret = rdma_alloc_commit_uobject(uobj); |
|---|
| 973 | + rdma_alloc_commit_uobject(uobj, attrs); |
|---|
| 1021 | 974 | else |
|---|
| 1022 | | - rdma_alloc_abort_uobject(uobj); |
|---|
| 975 | + rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid); |
|---|
| 1023 | 976 | break; |
|---|
| 1024 | 977 | default: |
|---|
| 1025 | 978 | WARN_ON(true); |
|---|
| 1026 | | - ret = -EOPNOTSUPP; |
|---|
| 1027 | 979 | } |
|---|
| 1028 | | - |
|---|
| 1029 | | - return ret; |
|---|
| 1030 | 980 | } |
|---|