.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. |
---|
3 | 4 | * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. |
---|
4 | | - * |
---|
5 | | - * This software is available to you under a choice of one of two |
---|
6 | | - * licenses. You may choose to be licensed under the terms of the GNU |
---|
7 | | - * General Public License (GPL) Version 2, available from the file |
---|
8 | | - * COPYING in the main directory of this source tree, or the |
---|
9 | | - * OpenIB.org BSD license below: |
---|
10 | | - * |
---|
11 | | - * Redistribution and use in source and binary forms, with or |
---|
12 | | - * without modification, are permitted provided that the following |
---|
13 | | - * conditions are met: |
---|
14 | | - * |
---|
15 | | - * - Redistributions of source code must retain the above |
---|
16 | | - * copyright notice, this list of conditions and the following |
---|
17 | | - * disclaimer. |
---|
18 | | - * |
---|
19 | | - * - Redistributions in binary form must reproduce the above |
---|
20 | | - * copyright notice, this list of conditions and the following |
---|
21 | | - * disclaimer in the documentation and/or other materials |
---|
22 | | - * provided with the distribution. |
---|
23 | | - * |
---|
24 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
25 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
26 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
27 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
28 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
29 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
30 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
31 | | - * SOFTWARE. |
---|
32 | 5 | */ |
---|
33 | 6 | |
---|
34 | 7 | #include "rxe.h" |
---|
.. | .. |
---|
42 | 15 | [RXE_TYPE_UC] = { |
---|
43 | 16 | .name = "rxe-uc", |
---|
44 | 17 | .size = sizeof(struct rxe_ucontext), |
---|
| 18 | + .flags = RXE_POOL_NO_ALLOC, |
---|
45 | 19 | }, |
---|
46 | 20 | [RXE_TYPE_PD] = { |
---|
47 | 21 | .name = "rxe-pd", |
---|
48 | 22 | .size = sizeof(struct rxe_pd), |
---|
| 23 | + .flags = RXE_POOL_NO_ALLOC, |
---|
49 | 24 | }, |
---|
50 | 25 | [RXE_TYPE_AH] = { |
---|
51 | 26 | .name = "rxe-ah", |
---|
52 | 27 | .size = sizeof(struct rxe_ah), |
---|
53 | | - .flags = RXE_POOL_ATOMIC, |
---|
| 28 | + .flags = RXE_POOL_ATOMIC | RXE_POOL_NO_ALLOC, |
---|
54 | 29 | }, |
---|
55 | 30 | [RXE_TYPE_SRQ] = { |
---|
56 | 31 | .name = "rxe-srq", |
---|
57 | 32 | .size = sizeof(struct rxe_srq), |
---|
58 | | - .flags = RXE_POOL_INDEX, |
---|
| 33 | + .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, |
---|
59 | 34 | .min_index = RXE_MIN_SRQ_INDEX, |
---|
60 | 35 | .max_index = RXE_MAX_SRQ_INDEX, |
---|
61 | 36 | }, |
---|
.. | .. |
---|
70 | 45 | [RXE_TYPE_CQ] = { |
---|
71 | 46 | .name = "rxe-cq", |
---|
72 | 47 | .size = sizeof(struct rxe_cq), |
---|
| 48 | + .flags = RXE_POOL_NO_ALLOC, |
---|
73 | 49 | .cleanup = rxe_cq_cleanup, |
---|
74 | 50 | }, |
---|
75 | 51 | [RXE_TYPE_MR] = { |
---|
.. | .. |
---|
105 | 81 | static inline const char *pool_name(struct rxe_pool *pool) |
---|
106 | 82 | { |
---|
107 | 83 | return rxe_type_info[pool->type].name; |
---|
108 | | -} |
---|
109 | | - |
---|
110 | | -static inline struct kmem_cache *pool_cache(struct rxe_pool *pool) |
---|
111 | | -{ |
---|
112 | | - return rxe_type_info[pool->type].cache; |
---|
113 | | -} |
---|
114 | | - |
---|
115 | | -static void rxe_cache_clean(size_t cnt) |
---|
116 | | -{ |
---|
117 | | - int i; |
---|
118 | | - struct rxe_type_info *type; |
---|
119 | | - |
---|
120 | | - for (i = 0; i < cnt; i++) { |
---|
121 | | - type = &rxe_type_info[i]; |
---|
122 | | - kmem_cache_destroy(type->cache); |
---|
123 | | - type->cache = NULL; |
---|
124 | | - } |
---|
125 | | -} |
---|
126 | | - |
---|
127 | | -int rxe_cache_init(void) |
---|
128 | | -{ |
---|
129 | | - int err; |
---|
130 | | - int i; |
---|
131 | | - size_t size; |
---|
132 | | - struct rxe_type_info *type; |
---|
133 | | - |
---|
134 | | - for (i = 0; i < RXE_NUM_TYPES; i++) { |
---|
135 | | - type = &rxe_type_info[i]; |
---|
136 | | - size = ALIGN(type->size, RXE_POOL_ALIGN); |
---|
137 | | - type->cache = kmem_cache_create(type->name, size, |
---|
138 | | - RXE_POOL_ALIGN, |
---|
139 | | - RXE_POOL_CACHE_FLAGS, NULL); |
---|
140 | | - if (!type->cache) { |
---|
141 | | - pr_err("Unable to init kmem cache for %s\n", |
---|
142 | | - type->name); |
---|
143 | | - err = -ENOMEM; |
---|
144 | | - goto err1; |
---|
145 | | - } |
---|
146 | | - } |
---|
147 | | - |
---|
148 | | - return 0; |
---|
149 | | - |
---|
150 | | -err1: |
---|
151 | | - rxe_cache_clean(i); |
---|
152 | | - |
---|
153 | | - return err; |
---|
154 | | -} |
---|
155 | | - |
---|
156 | | -void rxe_cache_exit(void) |
---|
157 | | -{ |
---|
158 | | - rxe_cache_clean(RXE_NUM_TYPES); |
---|
159 | 84 | } |
---|
160 | 85 | |
---|
161 | 86 | static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) |
---|
.. | .. |
---|
209 | 134 | |
---|
210 | 135 | kref_init(&pool->ref_cnt); |
---|
211 | 136 | |
---|
212 | | - spin_lock_init(&pool->pool_lock); |
---|
| 137 | + rwlock_init(&pool->pool_lock); |
---|
213 | 138 | |
---|
214 | 139 | if (rxe_type_info[type].flags & RXE_POOL_INDEX) { |
---|
215 | 140 | err = rxe_pool_init_index(pool, |
---|
.. | .. |
---|
224 | 149 | pool->key_size = rxe_type_info[type].key_size; |
---|
225 | 150 | } |
---|
226 | 151 | |
---|
227 | | - pool->state = rxe_pool_valid; |
---|
| 152 | + pool->state = RXE_POOL_STATE_VALID; |
---|
228 | 153 | |
---|
229 | 154 | out: |
---|
230 | 155 | return err; |
---|
.. | .. |
---|
234 | 159 | { |
---|
235 | 160 | struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt); |
---|
236 | 161 | |
---|
237 | | - pool->state = rxe_pool_invalid; |
---|
| 162 | + pool->state = RXE_POOL_STATE_INVALID; |
---|
238 | 163 | kfree(pool->table); |
---|
239 | 164 | } |
---|
240 | 165 | |
---|
.. | .. |
---|
243 | 168 | kref_put(&pool->ref_cnt, rxe_pool_release); |
---|
244 | 169 | } |
---|
245 | 170 | |
---|
246 | | -int rxe_pool_cleanup(struct rxe_pool *pool) |
---|
| 171 | +void rxe_pool_cleanup(struct rxe_pool *pool) |
---|
247 | 172 | { |
---|
248 | 173 | unsigned long flags; |
---|
249 | 174 | |
---|
250 | | - spin_lock_irqsave(&pool->pool_lock, flags); |
---|
251 | | - pool->state = rxe_pool_invalid; |
---|
| 175 | + write_lock_irqsave(&pool->pool_lock, flags); |
---|
| 176 | + pool->state = RXE_POOL_STATE_INVALID; |
---|
252 | 177 | if (atomic_read(&pool->num_elem) > 0) |
---|
253 | 178 | pr_warn("%s pool destroyed with unfree'd elem\n", |
---|
254 | 179 | pool_name(pool)); |
---|
255 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 180 | + write_unlock_irqrestore(&pool->pool_lock, flags); |
---|
256 | 181 | |
---|
257 | 182 | rxe_pool_put(pool); |
---|
258 | | - |
---|
259 | | - return 0; |
---|
260 | 183 | } |
---|
261 | 184 | |
---|
262 | 185 | static u32 alloc_index(struct rxe_pool *pool) |
---|
.. | .. |
---|
338 | 261 | struct rxe_pool *pool = elem->pool; |
---|
339 | 262 | unsigned long flags; |
---|
340 | 263 | |
---|
341 | | - spin_lock_irqsave(&pool->pool_lock, flags); |
---|
| 264 | + write_lock_irqsave(&pool->pool_lock, flags); |
---|
342 | 265 | memcpy((u8 *)elem + pool->key_offset, key, pool->key_size); |
---|
343 | 266 | insert_key(pool, elem); |
---|
344 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 267 | + write_unlock_irqrestore(&pool->pool_lock, flags); |
---|
345 | 268 | } |
---|
346 | 269 | |
---|
347 | 270 | void rxe_drop_key(void *arg) |
---|
.. | .. |
---|
350 | 273 | struct rxe_pool *pool = elem->pool; |
---|
351 | 274 | unsigned long flags; |
---|
352 | 275 | |
---|
353 | | - spin_lock_irqsave(&pool->pool_lock, flags); |
---|
| 276 | + write_lock_irqsave(&pool->pool_lock, flags); |
---|
354 | 277 | rb_erase(&elem->node, &pool->tree); |
---|
355 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 278 | + write_unlock_irqrestore(&pool->pool_lock, flags); |
---|
356 | 279 | } |
---|
357 | 280 | |
---|
358 | 281 | void rxe_add_index(void *arg) |
---|
.. | .. |
---|
361 | 284 | struct rxe_pool *pool = elem->pool; |
---|
362 | 285 | unsigned long flags; |
---|
363 | 286 | |
---|
364 | | - spin_lock_irqsave(&pool->pool_lock, flags); |
---|
| 287 | + write_lock_irqsave(&pool->pool_lock, flags); |
---|
365 | 288 | elem->index = alloc_index(pool); |
---|
366 | 289 | insert_index(pool, elem); |
---|
367 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 290 | + write_unlock_irqrestore(&pool->pool_lock, flags); |
---|
368 | 291 | } |
---|
369 | 292 | |
---|
370 | 293 | void rxe_drop_index(void *arg) |
---|
.. | .. |
---|
373 | 296 | struct rxe_pool *pool = elem->pool; |
---|
374 | 297 | unsigned long flags; |
---|
375 | 298 | |
---|
376 | | - spin_lock_irqsave(&pool->pool_lock, flags); |
---|
| 299 | + write_lock_irqsave(&pool->pool_lock, flags); |
---|
377 | 300 | clear_bit(elem->index - pool->min_index, pool->table); |
---|
378 | 301 | rb_erase(&elem->node, &pool->tree); |
---|
379 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 302 | + write_unlock_irqrestore(&pool->pool_lock, flags); |
---|
380 | 303 | } |
---|
381 | 304 | |
---|
382 | 305 | void *rxe_alloc(struct rxe_pool *pool) |
---|
.. | .. |
---|
386 | 309 | |
---|
387 | 310 | might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); |
---|
388 | 311 | |
---|
389 | | - spin_lock_irqsave(&pool->pool_lock, flags); |
---|
390 | | - if (pool->state != rxe_pool_valid) { |
---|
391 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 312 | + read_lock_irqsave(&pool->pool_lock, flags); |
---|
| 313 | + if (pool->state != RXE_POOL_STATE_VALID) { |
---|
| 314 | + read_unlock_irqrestore(&pool->pool_lock, flags); |
---|
392 | 315 | return NULL; |
---|
393 | 316 | } |
---|
394 | 317 | kref_get(&pool->ref_cnt); |
---|
395 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 318 | + read_unlock_irqrestore(&pool->pool_lock, flags); |
---|
396 | 319 | |
---|
397 | | - kref_get(&pool->rxe->ref_cnt); |
---|
398 | | - |
---|
399 | | - if (atomic_inc_return(&pool->num_elem) > pool->max_elem) |
---|
| 320 | + if (!ib_device_try_get(&pool->rxe->ib_dev)) |
---|
400 | 321 | goto out_put_pool; |
---|
401 | 322 | |
---|
402 | | - elem = kmem_cache_zalloc(pool_cache(pool), |
---|
| 323 | + if (atomic_inc_return(&pool->num_elem) > pool->max_elem) |
---|
| 324 | + goto out_cnt; |
---|
| 325 | + |
---|
| 326 | + elem = kzalloc(rxe_type_info[pool->type].size, |
---|
403 | 327 | (pool->flags & RXE_POOL_ATOMIC) ? |
---|
404 | 328 | GFP_ATOMIC : GFP_KERNEL); |
---|
405 | 329 | if (!elem) |
---|
406 | | - goto out_put_pool; |
---|
| 330 | + goto out_cnt; |
---|
407 | 331 | |
---|
408 | 332 | elem->pool = pool; |
---|
409 | 333 | kref_init(&elem->ref_cnt); |
---|
410 | 334 | |
---|
411 | 335 | return elem; |
---|
412 | 336 | |
---|
413 | | -out_put_pool: |
---|
| 337 | +out_cnt: |
---|
414 | 338 | atomic_dec(&pool->num_elem); |
---|
415 | | - rxe_dev_put(pool->rxe); |
---|
| 339 | + ib_device_put(&pool->rxe->ib_dev); |
---|
| 340 | +out_put_pool: |
---|
416 | 341 | rxe_pool_put(pool); |
---|
417 | 342 | return NULL; |
---|
| 343 | +} |
---|
| 344 | + |
---|
| 345 | +int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) |
---|
| 346 | +{ |
---|
| 347 | + unsigned long flags; |
---|
| 348 | + |
---|
| 349 | + might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); |
---|
| 350 | + |
---|
| 351 | + read_lock_irqsave(&pool->pool_lock, flags); |
---|
| 352 | + if (pool->state != RXE_POOL_STATE_VALID) { |
---|
| 353 | + read_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 354 | + return -EINVAL; |
---|
| 355 | + } |
---|
| 356 | + kref_get(&pool->ref_cnt); |
---|
| 357 | + read_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 358 | + |
---|
| 359 | + if (!ib_device_try_get(&pool->rxe->ib_dev)) |
---|
| 360 | + goto out_put_pool; |
---|
| 361 | + |
---|
| 362 | + if (atomic_inc_return(&pool->num_elem) > pool->max_elem) |
---|
| 363 | + goto out_cnt; |
---|
| 364 | + |
---|
| 365 | + elem->pool = pool; |
---|
| 366 | + kref_init(&elem->ref_cnt); |
---|
| 367 | + |
---|
| 368 | + return 0; |
---|
| 369 | + |
---|
| 370 | +out_cnt: |
---|
| 371 | + atomic_dec(&pool->num_elem); |
---|
| 372 | + ib_device_put(&pool->rxe->ib_dev); |
---|
| 373 | +out_put_pool: |
---|
| 374 | + rxe_pool_put(pool); |
---|
| 375 | + return -EINVAL; |
---|
418 | 376 | } |
---|
419 | 377 | |
---|
420 | 378 | void rxe_elem_release(struct kref *kref) |
---|
.. | .. |
---|
426 | 384 | if (pool->cleanup) |
---|
427 | 385 | pool->cleanup(elem); |
---|
428 | 386 | |
---|
429 | | - kmem_cache_free(pool_cache(pool), elem); |
---|
| 387 | + if (!(pool->flags & RXE_POOL_NO_ALLOC)) |
---|
| 388 | + kfree(elem); |
---|
430 | 389 | atomic_dec(&pool->num_elem); |
---|
431 | | - rxe_dev_put(pool->rxe); |
---|
| 390 | + ib_device_put(&pool->rxe->ib_dev); |
---|
432 | 391 | rxe_pool_put(pool); |
---|
433 | 392 | } |
---|
434 | 393 | |
---|
.. | .. |
---|
438 | 397 | struct rxe_pool_entry *elem = NULL; |
---|
439 | 398 | unsigned long flags; |
---|
440 | 399 | |
---|
441 | | - spin_lock_irqsave(&pool->pool_lock, flags); |
---|
| 400 | + read_lock_irqsave(&pool->pool_lock, flags); |
---|
442 | 401 | |
---|
443 | | - if (pool->state != rxe_pool_valid) |
---|
| 402 | + if (pool->state != RXE_POOL_STATE_VALID) |
---|
444 | 403 | goto out; |
---|
445 | 404 | |
---|
446 | 405 | node = pool->tree.rb_node; |
---|
.. | .. |
---|
452 | 411 | node = node->rb_left; |
---|
453 | 412 | else if (elem->index < index) |
---|
454 | 413 | node = node->rb_right; |
---|
455 | | - else |
---|
| 414 | + else { |
---|
| 415 | + kref_get(&elem->ref_cnt); |
---|
456 | 416 | break; |
---|
| 417 | + } |
---|
457 | 418 | } |
---|
458 | 419 | |
---|
459 | | - if (node) |
---|
460 | | - kref_get(&elem->ref_cnt); |
---|
461 | | - |
---|
462 | 420 | out: |
---|
463 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 421 | + read_unlock_irqrestore(&pool->pool_lock, flags); |
---|
464 | 422 | return node ? elem : NULL; |
---|
465 | 423 | } |
---|
466 | 424 | |
---|
.. | .. |
---|
471 | 429 | int cmp; |
---|
472 | 430 | unsigned long flags; |
---|
473 | 431 | |
---|
474 | | - spin_lock_irqsave(&pool->pool_lock, flags); |
---|
| 432 | + read_lock_irqsave(&pool->pool_lock, flags); |
---|
475 | 433 | |
---|
476 | | - if (pool->state != rxe_pool_valid) |
---|
| 434 | + if (pool->state != RXE_POOL_STATE_VALID) |
---|
477 | 435 | goto out; |
---|
478 | 436 | |
---|
479 | 437 | node = pool->tree.rb_node; |
---|
.. | .. |
---|
496 | 454 | kref_get(&elem->ref_cnt); |
---|
497 | 455 | |
---|
498 | 456 | out: |
---|
499 | | - spin_unlock_irqrestore(&pool->pool_lock, flags); |
---|
| 457 | + read_unlock_irqrestore(&pool->pool_lock, flags); |
---|
500 | 458 | return node ? elem : NULL; |
---|
501 | 459 | } |
---|