| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * linux/net/sunrpc/sched.c |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 19 | 20 | #include <linux/spinlock.h> |
|---|
| 20 | 21 | #include <linux/mutex.h> |
|---|
| 21 | 22 | #include <linux/freezer.h> |
|---|
| 23 | +#include <linux/sched/mm.h> |
|---|
| 22 | 24 | |
|---|
| 23 | 25 | #include <linux/sunrpc/clnt.h> |
|---|
| 26 | +#include <linux/sunrpc/metrics.h> |
|---|
| 24 | 27 | |
|---|
| 25 | 28 | #include "sunrpc.h" |
|---|
| 26 | | - |
|---|
| 27 | | -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
|---|
| 28 | | -#define RPCDBG_FACILITY RPCDBG_SCHED |
|---|
| 29 | | -#endif |
|---|
| 30 | 29 | |
|---|
| 31 | 30 | #define CREATE_TRACE_POINTS |
|---|
| 32 | 31 | #include <trace/events/sunrpc.h> |
|---|
| .. | .. |
|---|
| 44 | 43 | |
|---|
| 45 | 44 | static void rpc_async_schedule(struct work_struct *); |
|---|
| 46 | 45 | static void rpc_release_task(struct rpc_task *task); |
|---|
| 47 | | -static void __rpc_queue_timer_fn(struct timer_list *t); |
|---|
| 46 | +static void __rpc_queue_timer_fn(struct work_struct *); |
|---|
| 48 | 47 | |
|---|
| 49 | 48 | /* |
|---|
| 50 | 49 | * RPC tasks sit here while waiting for conditions to improve. |
|---|
| .. | .. |
|---|
| 56 | 55 | */ |
|---|
| 57 | 56 | struct workqueue_struct *rpciod_workqueue __read_mostly; |
|---|
| 58 | 57 | struct workqueue_struct *xprtiod_workqueue __read_mostly; |
|---|
| 58 | +EXPORT_SYMBOL_GPL(xprtiod_workqueue); |
|---|
| 59 | + |
|---|
| 60 | +unsigned long |
|---|
| 61 | +rpc_task_timeout(const struct rpc_task *task) |
|---|
| 62 | +{ |
|---|
| 63 | + unsigned long timeout = READ_ONCE(task->tk_timeout); |
|---|
| 64 | + |
|---|
| 65 | + if (timeout != 0) { |
|---|
| 66 | + unsigned long now = jiffies; |
|---|
| 67 | + if (time_before(now, timeout)) |
|---|
| 68 | + return timeout - now; |
|---|
| 69 | + } |
|---|
| 70 | + return 0; |
|---|
| 71 | +} |
|---|
| 72 | +EXPORT_SYMBOL_GPL(rpc_task_timeout); |
|---|
| 59 | 73 | |
|---|
| 60 | 74 | /* |
|---|
| 61 | 75 | * Disable the timer for a given RPC task. Should be called with |
|---|
| .. | .. |
|---|
| 65 | 79 | static void |
|---|
| 66 | 80 | __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
|---|
| 67 | 81 | { |
|---|
| 68 | | - if (task->tk_timeout == 0) |
|---|
| 82 | + if (list_empty(&task->u.tk_wait.timer_list)) |
|---|
| 69 | 83 | return; |
|---|
| 70 | | - dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
|---|
| 71 | 84 | task->tk_timeout = 0; |
|---|
| 72 | 85 | list_del(&task->u.tk_wait.timer_list); |
|---|
| 73 | 86 | if (list_empty(&queue->timer_list.list)) |
|---|
| 74 | | - del_timer(&queue->timer_list.timer); |
|---|
| 87 | + cancel_delayed_work(&queue->timer_list.dwork); |
|---|
| 75 | 88 | } |
|---|
| 76 | 89 | |
|---|
| 77 | 90 | static void |
|---|
| 78 | 91 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) |
|---|
| 79 | 92 | { |
|---|
| 93 | + unsigned long now = jiffies; |
|---|
| 80 | 94 | queue->timer_list.expires = expires; |
|---|
| 81 | | - mod_timer(&queue->timer_list.timer, expires); |
|---|
| 95 | + if (time_before_eq(expires, now)) |
|---|
| 96 | + expires = 0; |
|---|
| 97 | + else |
|---|
| 98 | + expires -= now; |
|---|
| 99 | + mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); |
|---|
| 82 | 100 | } |
|---|
| 83 | 101 | |
|---|
| 84 | 102 | /* |
|---|
| 85 | 103 | * Set up a timer for the current task. |
|---|
| 86 | 104 | */ |
|---|
| 87 | 105 | static void |
|---|
| 88 | | -__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
|---|
| 106 | +__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, |
|---|
| 107 | + unsigned long timeout) |
|---|
| 89 | 108 | { |
|---|
| 90 | | - if (!task->tk_timeout) |
|---|
| 91 | | - return; |
|---|
| 92 | | - |
|---|
| 93 | | - dprintk("RPC: %5u setting alarm for %u ms\n", |
|---|
| 94 | | - task->tk_pid, jiffies_to_msecs(task->tk_timeout)); |
|---|
| 95 | | - |
|---|
| 96 | | - task->u.tk_wait.expires = jiffies + task->tk_timeout; |
|---|
| 97 | | - if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) |
|---|
| 98 | | - rpc_set_queue_timer(queue, task->u.tk_wait.expires); |
|---|
| 109 | + task->tk_timeout = timeout; |
|---|
| 110 | + if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) |
|---|
| 111 | + rpc_set_queue_timer(queue, timeout); |
|---|
| 99 | 112 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); |
|---|
| 100 | 113 | } |
|---|
| 101 | 114 | |
|---|
| .. | .. |
|---|
| 173 | 186 | |
|---|
| 174 | 187 | /* |
|---|
| 175 | 188 | * Add new request to wait queue. |
|---|
| 176 | | - * |
|---|
| 177 | | - * Swapper tasks always get inserted at the head of the queue. |
|---|
| 178 | | - * This should avoid many nasty memory deadlocks and hopefully |
|---|
| 179 | | - * improve overall performance. |
|---|
| 180 | | - * Everyone else gets appended to the queue to ensure proper FIFO behavior. |
|---|
| 181 | 189 | */ |
|---|
| 182 | 190 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
|---|
| 183 | 191 | struct rpc_task *task, |
|---|
| 184 | 192 | unsigned char queue_priority) |
|---|
| 185 | 193 | { |
|---|
| 186 | | - WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
|---|
| 187 | | - if (RPC_IS_QUEUED(task)) |
|---|
| 188 | | - return; |
|---|
| 189 | | - |
|---|
| 194 | + INIT_LIST_HEAD(&task->u.tk_wait.timer_list); |
|---|
| 190 | 195 | if (RPC_IS_PRIORITY(queue)) |
|---|
| 191 | 196 | __rpc_add_wait_queue_priority(queue, task, queue_priority); |
|---|
| 192 | | - else if (RPC_IS_SWAPPER(task)) |
|---|
| 193 | | - list_add(&task->u.tk_wait.list, &queue->tasks[0]); |
|---|
| 194 | 197 | else |
|---|
| 195 | 198 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); |
|---|
| 196 | 199 | task->tk_waitqueue = queue; |
|---|
| .. | .. |
|---|
| 198 | 201 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ |
|---|
| 199 | 202 | smp_wmb(); |
|---|
| 200 | 203 | rpc_set_queued(task); |
|---|
| 201 | | - |
|---|
| 202 | | - dprintk("RPC: %5u added to queue %p \"%s\"\n", |
|---|
| 203 | | - task->tk_pid, queue, rpc_qname(queue)); |
|---|
| 204 | 204 | } |
|---|
| 205 | 205 | |
|---|
| 206 | 206 | /* |
|---|
| .. | .. |
|---|
| 223 | 223 | else |
|---|
| 224 | 224 | list_del(&task->u.tk_wait.list); |
|---|
| 225 | 225 | queue->qlen--; |
|---|
| 226 | | - dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
|---|
| 227 | | - task->tk_pid, queue, rpc_qname(queue)); |
|---|
| 228 | 226 | } |
|---|
| 229 | 227 | |
|---|
| 230 | 228 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) |
|---|
| .. | .. |
|---|
| 237 | 235 | queue->maxpriority = nr_queues - 1; |
|---|
| 238 | 236 | rpc_reset_waitqueue_priority(queue); |
|---|
| 239 | 237 | queue->qlen = 0; |
|---|
| 240 | | - timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0); |
|---|
| 238 | + queue->timer_list.expires = 0; |
|---|
| 239 | + INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); |
|---|
| 241 | 240 | INIT_LIST_HEAD(&queue->timer_list.list); |
|---|
| 242 | 241 | rpc_assign_waitqueue_name(queue, qname); |
|---|
| 243 | 242 | } |
|---|
| .. | .. |
|---|
| 256 | 255 | |
|---|
| 257 | 256 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
|---|
| 258 | 257 | { |
|---|
| 259 | | - del_timer_sync(&queue->timer_list.timer); |
|---|
| 258 | + cancel_delayed_work_sync(&queue->timer_list.dwork); |
|---|
| 260 | 259 | } |
|---|
| 261 | 260 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); |
|---|
| 262 | 261 | |
|---|
| .. | .. |
|---|
| 359 | 358 | * NB: An RPC task will only receive interrupt-driven events as long |
|---|
| 360 | 359 | * as it's on a wait queue. |
|---|
| 361 | 360 | */ |
|---|
| 362 | | -static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
|---|
| 361 | +static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, |
|---|
| 363 | 362 | struct rpc_task *task, |
|---|
| 364 | | - rpc_action action, |
|---|
| 365 | 363 | unsigned char queue_priority) |
|---|
| 366 | 364 | { |
|---|
| 367 | | - dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
|---|
| 368 | | - task->tk_pid, rpc_qname(q), jiffies); |
|---|
| 369 | | - |
|---|
| 370 | 365 | trace_rpc_task_sleep(task, q); |
|---|
| 371 | 366 | |
|---|
| 372 | 367 | __rpc_add_wait_queue(q, task, queue_priority); |
|---|
| 373 | | - |
|---|
| 374 | | - WARN_ON_ONCE(task->tk_callback != NULL); |
|---|
| 375 | | - task->tk_callback = action; |
|---|
| 376 | | - __rpc_add_timer(q, task); |
|---|
| 377 | 368 | } |
|---|
| 369 | + |
|---|
| 370 | +static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
|---|
| 371 | + struct rpc_task *task, |
|---|
| 372 | + unsigned char queue_priority) |
|---|
| 373 | +{ |
|---|
| 374 | + if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) |
|---|
| 375 | + return; |
|---|
| 376 | + __rpc_do_sleep_on_priority(q, task, queue_priority); |
|---|
| 377 | +} |
|---|
| 378 | + |
|---|
| 379 | +static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, |
|---|
| 380 | + struct rpc_task *task, unsigned long timeout, |
|---|
| 381 | + unsigned char queue_priority) |
|---|
| 382 | +{ |
|---|
| 383 | + if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) |
|---|
| 384 | + return; |
|---|
| 385 | + if (time_is_after_jiffies(timeout)) { |
|---|
| 386 | + __rpc_do_sleep_on_priority(q, task, queue_priority); |
|---|
| 387 | + __rpc_add_timer(q, task, timeout); |
|---|
| 388 | + } else |
|---|
| 389 | + task->tk_status = -ETIMEDOUT; |
|---|
| 390 | +} |
|---|
| 391 | + |
|---|
| 392 | +static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) |
|---|
| 393 | +{ |
|---|
| 394 | + if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) |
|---|
| 395 | + task->tk_callback = action; |
|---|
| 396 | +} |
|---|
| 397 | + |
|---|
| 398 | +static bool rpc_sleep_check_activated(struct rpc_task *task) |
|---|
| 399 | +{ |
|---|
| 400 | + /* We shouldn't ever put an inactive task to sleep */ |
|---|
| 401 | + if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { |
|---|
| 402 | + task->tk_status = -EIO; |
|---|
| 403 | + rpc_put_task_async(task); |
|---|
| 404 | + return false; |
|---|
| 405 | + } |
|---|
| 406 | + return true; |
|---|
| 407 | +} |
|---|
| 408 | + |
|---|
| 409 | +void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, |
|---|
| 410 | + rpc_action action, unsigned long timeout) |
|---|
| 411 | +{ |
|---|
| 412 | + if (!rpc_sleep_check_activated(task)) |
|---|
| 413 | + return; |
|---|
| 414 | + |
|---|
| 415 | + rpc_set_tk_callback(task, action); |
|---|
| 416 | + |
|---|
| 417 | + /* |
|---|
| 418 | + * Protect the queue operations. |
|---|
| 419 | + */ |
|---|
| 420 | + spin_lock(&q->lock); |
|---|
| 421 | + __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); |
|---|
| 422 | + spin_unlock(&q->lock); |
|---|
| 423 | +} |
|---|
| 424 | +EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); |
|---|
| 378 | 425 | |
|---|
| 379 | 426 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
|---|
| 380 | 427 | rpc_action action) |
|---|
| 381 | 428 | { |
|---|
| 382 | | - /* We shouldn't ever put an inactive task to sleep */ |
|---|
| 383 | | - WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); |
|---|
| 384 | | - if (!RPC_IS_ACTIVATED(task)) { |
|---|
| 385 | | - task->tk_status = -EIO; |
|---|
| 386 | | - rpc_put_task_async(task); |
|---|
| 429 | + if (!rpc_sleep_check_activated(task)) |
|---|
| 387 | 430 | return; |
|---|
| 388 | | - } |
|---|
| 389 | 431 | |
|---|
| 432 | + rpc_set_tk_callback(task, action); |
|---|
| 433 | + |
|---|
| 434 | + WARN_ON_ONCE(task->tk_timeout != 0); |
|---|
| 390 | 435 | /* |
|---|
| 391 | 436 | * Protect the queue operations. |
|---|
| 392 | 437 | */ |
|---|
| 393 | | - spin_lock_bh(&q->lock); |
|---|
| 394 | | - __rpc_sleep_on_priority(q, task, action, task->tk_priority); |
|---|
| 395 | | - spin_unlock_bh(&q->lock); |
|---|
| 438 | + spin_lock(&q->lock); |
|---|
| 439 | + __rpc_sleep_on_priority(q, task, task->tk_priority); |
|---|
| 440 | + spin_unlock(&q->lock); |
|---|
| 396 | 441 | } |
|---|
| 397 | 442 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
|---|
| 398 | 443 | |
|---|
| 399 | | -void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, |
|---|
| 400 | | - rpc_action action, int priority) |
|---|
| 444 | +void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, |
|---|
| 445 | + struct rpc_task *task, unsigned long timeout, int priority) |
|---|
| 401 | 446 | { |
|---|
| 402 | | - /* We shouldn't ever put an inactive task to sleep */ |
|---|
| 403 | | - WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); |
|---|
| 404 | | - if (!RPC_IS_ACTIVATED(task)) { |
|---|
| 405 | | - task->tk_status = -EIO; |
|---|
| 406 | | - rpc_put_task_async(task); |
|---|
| 447 | + if (!rpc_sleep_check_activated(task)) |
|---|
| 407 | 448 | return; |
|---|
| 408 | | - } |
|---|
| 409 | 449 | |
|---|
| 450 | + priority -= RPC_PRIORITY_LOW; |
|---|
| 410 | 451 | /* |
|---|
| 411 | 452 | * Protect the queue operations. |
|---|
| 412 | 453 | */ |
|---|
| 413 | | - spin_lock_bh(&q->lock); |
|---|
| 414 | | - __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); |
|---|
| 415 | | - spin_unlock_bh(&q->lock); |
|---|
| 454 | + spin_lock(&q->lock); |
|---|
| 455 | + __rpc_sleep_on_priority_timeout(q, task, timeout, priority); |
|---|
| 456 | + spin_unlock(&q->lock); |
|---|
| 457 | +} |
|---|
| 458 | +EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); |
|---|
| 459 | + |
|---|
| 460 | +void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, |
|---|
| 461 | + int priority) |
|---|
| 462 | +{ |
|---|
| 463 | + if (!rpc_sleep_check_activated(task)) |
|---|
| 464 | + return; |
|---|
| 465 | + |
|---|
| 466 | + WARN_ON_ONCE(task->tk_timeout != 0); |
|---|
| 467 | + priority -= RPC_PRIORITY_LOW; |
|---|
| 468 | + /* |
|---|
| 469 | + * Protect the queue operations. |
|---|
| 470 | + */ |
|---|
| 471 | + spin_lock(&q->lock); |
|---|
| 472 | + __rpc_sleep_on_priority(q, task, priority); |
|---|
| 473 | + spin_unlock(&q->lock); |
|---|
| 416 | 474 | } |
|---|
| 417 | 475 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); |
|---|
| 418 | 476 | |
|---|
| .. | .. |
|---|
| 428 | 486 | struct rpc_wait_queue *queue, |
|---|
| 429 | 487 | struct rpc_task *task) |
|---|
| 430 | 488 | { |
|---|
| 431 | | - dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
|---|
| 432 | | - task->tk_pid, jiffies); |
|---|
| 433 | | - |
|---|
| 434 | 489 | /* Has the task been executed yet? If not, we cannot wake it up! */ |
|---|
| 435 | 490 | if (!RPC_IS_ACTIVATED(task)) { |
|---|
| 436 | 491 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); |
|---|
| .. | .. |
|---|
| 442 | 497 | __rpc_remove_wait_queue(queue, task); |
|---|
| 443 | 498 | |
|---|
| 444 | 499 | rpc_make_runnable(wq, task); |
|---|
| 445 | | - |
|---|
| 446 | | - dprintk("RPC: __rpc_wake_up_task done\n"); |
|---|
| 447 | 500 | } |
|---|
| 448 | 501 | |
|---|
| 449 | 502 | /* |
|---|
| 450 | 503 | * Wake up a queued task while the queue lock is being held |
|---|
| 451 | 504 | */ |
|---|
| 452 | | -static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq, |
|---|
| 453 | | - struct rpc_wait_queue *queue, struct rpc_task *task) |
|---|
| 505 | +static struct rpc_task * |
|---|
| 506 | +rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, |
|---|
| 507 | + struct rpc_wait_queue *queue, struct rpc_task *task, |
|---|
| 508 | + bool (*action)(struct rpc_task *, void *), void *data) |
|---|
| 454 | 509 | { |
|---|
| 455 | 510 | if (RPC_IS_QUEUED(task)) { |
|---|
| 456 | 511 | smp_rmb(); |
|---|
| 457 | | - if (task->tk_waitqueue == queue) |
|---|
| 458 | | - __rpc_do_wake_up_task_on_wq(wq, queue, task); |
|---|
| 512 | + if (task->tk_waitqueue == queue) { |
|---|
| 513 | + if (action == NULL || action(task, data)) { |
|---|
| 514 | + __rpc_do_wake_up_task_on_wq(wq, queue, task); |
|---|
| 515 | + return task; |
|---|
| 516 | + } |
|---|
| 517 | + } |
|---|
| 459 | 518 | } |
|---|
| 519 | + return NULL; |
|---|
| 460 | 520 | } |
|---|
| 461 | 521 | |
|---|
| 462 | 522 | /* |
|---|
| 463 | 523 | * Wake up a queued task while the queue lock is being held |
|---|
| 464 | 524 | */ |
|---|
| 465 | | -static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
|---|
| 525 | +static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, |
|---|
| 526 | + struct rpc_task *task) |
|---|
| 466 | 527 | { |
|---|
| 467 | | - rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task); |
|---|
| 468 | | -} |
|---|
| 469 | | - |
|---|
| 470 | | -/* |
|---|
| 471 | | - * Wake up a task on a specific queue |
|---|
| 472 | | - */ |
|---|
| 473 | | -void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, |
|---|
| 474 | | - struct rpc_wait_queue *queue, |
|---|
| 475 | | - struct rpc_task *task) |
|---|
| 476 | | -{ |
|---|
| 477 | | - spin_lock_bh(&queue->lock); |
|---|
| 478 | | - rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); |
|---|
| 479 | | - spin_unlock_bh(&queue->lock); |
|---|
| 528 | + rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, |
|---|
| 529 | + task, NULL, NULL); |
|---|
| 480 | 530 | } |
|---|
| 481 | 531 | |
|---|
| 482 | 532 | /* |
|---|
| .. | .. |
|---|
| 484 | 534 | */ |
|---|
| 485 | 535 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
|---|
| 486 | 536 | { |
|---|
| 487 | | - spin_lock_bh(&queue->lock); |
|---|
| 537 | + if (!RPC_IS_QUEUED(task)) |
|---|
| 538 | + return; |
|---|
| 539 | + spin_lock(&queue->lock); |
|---|
| 488 | 540 | rpc_wake_up_task_queue_locked(queue, task); |
|---|
| 489 | | - spin_unlock_bh(&queue->lock); |
|---|
| 541 | + spin_unlock(&queue->lock); |
|---|
| 490 | 542 | } |
|---|
| 491 | 543 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
|---|
| 544 | + |
|---|
| 545 | +static bool rpc_task_action_set_status(struct rpc_task *task, void *status) |
|---|
| 546 | +{ |
|---|
| 547 | + task->tk_status = *(int *)status; |
|---|
| 548 | + return true; |
|---|
| 549 | +} |
|---|
| 550 | + |
|---|
| 551 | +static void |
|---|
| 552 | +rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, |
|---|
| 553 | + struct rpc_task *task, int status) |
|---|
| 554 | +{ |
|---|
| 555 | + rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, |
|---|
| 556 | + task, rpc_task_action_set_status, &status); |
|---|
| 557 | +} |
|---|
| 558 | + |
|---|
| 559 | +/** |
|---|
| 560 | + * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status |
|---|
| 561 | + * @queue: pointer to rpc_wait_queue |
|---|
| 562 | + * @task: pointer to rpc_task |
|---|
| 563 | + * @status: integer error value |
|---|
| 564 | + * |
|---|
| 565 | + * If @task is queued on @queue, then it is woken up, and @task->tk_status is |
|---|
| 566 | + * set to the value of @status. |
|---|
| 567 | + */ |
|---|
| 568 | +void |
|---|
| 569 | +rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, |
|---|
| 570 | + struct rpc_task *task, int status) |
|---|
| 571 | +{ |
|---|
| 572 | + if (!RPC_IS_QUEUED(task)) |
|---|
| 573 | + return; |
|---|
| 574 | + spin_lock(&queue->lock); |
|---|
| 575 | + rpc_wake_up_task_queue_set_status_locked(queue, task, status); |
|---|
| 576 | + spin_unlock(&queue->lock); |
|---|
| 577 | +} |
|---|
| 492 | 578 | |
|---|
| 493 | 579 | /* |
|---|
| 494 | 580 | * Wake up the next task on a priority queue. |
|---|
| .. | .. |
|---|
| 558 | 644 | { |
|---|
| 559 | 645 | struct rpc_task *task = NULL; |
|---|
| 560 | 646 | |
|---|
| 561 | | - dprintk("RPC: wake_up_first(%p \"%s\")\n", |
|---|
| 562 | | - queue, rpc_qname(queue)); |
|---|
| 563 | | - spin_lock_bh(&queue->lock); |
|---|
| 647 | + spin_lock(&queue->lock); |
|---|
| 564 | 648 | task = __rpc_find_next_queued(queue); |
|---|
| 565 | | - if (task != NULL) { |
|---|
| 566 | | - if (func(task, data)) |
|---|
| 567 | | - rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); |
|---|
| 568 | | - else |
|---|
| 569 | | - task = NULL; |
|---|
| 570 | | - } |
|---|
| 571 | | - spin_unlock_bh(&queue->lock); |
|---|
| 649 | + if (task != NULL) |
|---|
| 650 | + task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, |
|---|
| 651 | + task, func, data); |
|---|
| 652 | + spin_unlock(&queue->lock); |
|---|
| 572 | 653 | |
|---|
| 573 | 654 | return task; |
|---|
| 574 | 655 | } |
|---|
| .. | .. |
|---|
| 598 | 679 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
|---|
| 599 | 680 | |
|---|
| 600 | 681 | /** |
|---|
| 682 | + * rpc_wake_up_locked - wake up all rpc_tasks |
|---|
| 683 | + * @queue: rpc_wait_queue on which the tasks are sleeping |
|---|
| 684 | + * |
|---|
| 685 | + */ |
|---|
| 686 | +static void rpc_wake_up_locked(struct rpc_wait_queue *queue) |
|---|
| 687 | +{ |
|---|
| 688 | + struct rpc_task *task; |
|---|
| 689 | + |
|---|
| 690 | + for (;;) { |
|---|
| 691 | + task = __rpc_find_next_queued(queue); |
|---|
| 692 | + if (task == NULL) |
|---|
| 693 | + break; |
|---|
| 694 | + rpc_wake_up_task_queue_locked(queue, task); |
|---|
| 695 | + } |
|---|
| 696 | +} |
|---|
| 697 | + |
|---|
| 698 | +/** |
|---|
| 601 | 699 | * rpc_wake_up - wake up all rpc_tasks |
|---|
| 602 | 700 | * @queue: rpc_wait_queue on which the tasks are sleeping |
|---|
| 603 | 701 | * |
|---|
| .. | .. |
|---|
| 605 | 703 | */ |
|---|
| 606 | 704 | void rpc_wake_up(struct rpc_wait_queue *queue) |
|---|
| 607 | 705 | { |
|---|
| 608 | | - struct list_head *head; |
|---|
| 609 | | - |
|---|
| 610 | | - spin_lock_bh(&queue->lock); |
|---|
| 611 | | - head = &queue->tasks[queue->maxpriority]; |
|---|
| 612 | | - for (;;) { |
|---|
| 613 | | - while (!list_empty(head)) { |
|---|
| 614 | | - struct rpc_task *task; |
|---|
| 615 | | - task = list_first_entry(head, |
|---|
| 616 | | - struct rpc_task, |
|---|
| 617 | | - u.tk_wait.list); |
|---|
| 618 | | - rpc_wake_up_task_queue_locked(queue, task); |
|---|
| 619 | | - } |
|---|
| 620 | | - if (head == &queue->tasks[0]) |
|---|
| 621 | | - break; |
|---|
| 622 | | - head--; |
|---|
| 623 | | - } |
|---|
| 624 | | - spin_unlock_bh(&queue->lock); |
|---|
| 706 | + spin_lock(&queue->lock); |
|---|
| 707 | + rpc_wake_up_locked(queue); |
|---|
| 708 | + spin_unlock(&queue->lock); |
|---|
| 625 | 709 | } |
|---|
| 626 | 710 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
|---|
| 711 | + |
|---|
| 712 | +/** |
|---|
| 713 | + * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value. |
|---|
| 714 | + * @queue: rpc_wait_queue on which the tasks are sleeping |
|---|
| 715 | + * @status: status value to set |
|---|
| 716 | + */ |
|---|
| 717 | +static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status) |
|---|
| 718 | +{ |
|---|
| 719 | + struct rpc_task *task; |
|---|
| 720 | + |
|---|
| 721 | + for (;;) { |
|---|
| 722 | + task = __rpc_find_next_queued(queue); |
|---|
| 723 | + if (task == NULL) |
|---|
| 724 | + break; |
|---|
| 725 | + rpc_wake_up_task_queue_set_status_locked(queue, task, status); |
|---|
| 726 | + } |
|---|
| 727 | +} |
|---|
| 627 | 728 | |
|---|
| 628 | 729 | /** |
|---|
| 629 | 730 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. |
|---|
| .. | .. |
|---|
| 634 | 735 | */ |
|---|
| 635 | 736 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) |
|---|
| 636 | 737 | { |
|---|
| 637 | | - struct list_head *head; |
|---|
| 638 | | - |
|---|
| 639 | | - spin_lock_bh(&queue->lock); |
|---|
| 640 | | - head = &queue->tasks[queue->maxpriority]; |
|---|
| 641 | | - for (;;) { |
|---|
| 642 | | - while (!list_empty(head)) { |
|---|
| 643 | | - struct rpc_task *task; |
|---|
| 644 | | - task = list_first_entry(head, |
|---|
| 645 | | - struct rpc_task, |
|---|
| 646 | | - u.tk_wait.list); |
|---|
| 647 | | - task->tk_status = status; |
|---|
| 648 | | - rpc_wake_up_task_queue_locked(queue, task); |
|---|
| 649 | | - } |
|---|
| 650 | | - if (head == &queue->tasks[0]) |
|---|
| 651 | | - break; |
|---|
| 652 | | - head--; |
|---|
| 653 | | - } |
|---|
| 654 | | - spin_unlock_bh(&queue->lock); |
|---|
| 738 | + spin_lock(&queue->lock); |
|---|
| 739 | + rpc_wake_up_status_locked(queue, status); |
|---|
| 740 | + spin_unlock(&queue->lock); |
|---|
| 655 | 741 | } |
|---|
| 656 | 742 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
|---|
| 657 | 743 | |
|---|
| 658 | | -static void __rpc_queue_timer_fn(struct timer_list *t) |
|---|
| 744 | +static void __rpc_queue_timer_fn(struct work_struct *work) |
|---|
| 659 | 745 | { |
|---|
| 660 | | - struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer); |
|---|
| 746 | + struct rpc_wait_queue *queue = container_of(work, |
|---|
| 747 | + struct rpc_wait_queue, |
|---|
| 748 | + timer_list.dwork.work); |
|---|
| 661 | 749 | struct rpc_task *task, *n; |
|---|
| 662 | 750 | unsigned long expires, now, timeo; |
|---|
| 663 | 751 | |
|---|
| 664 | 752 | spin_lock(&queue->lock); |
|---|
| 665 | 753 | expires = now = jiffies; |
|---|
| 666 | 754 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { |
|---|
| 667 | | - timeo = task->u.tk_wait.expires; |
|---|
| 755 | + timeo = task->tk_timeout; |
|---|
| 668 | 756 | if (time_after_eq(now, timeo)) { |
|---|
| 669 | | - dprintk("RPC: %5u timeout\n", task->tk_pid); |
|---|
| 757 | + trace_rpc_task_timeout(task, task->tk_action); |
|---|
| 670 | 758 | task->tk_status = -ETIMEDOUT; |
|---|
| 671 | 759 | rpc_wake_up_task_queue_locked(queue, task); |
|---|
| 672 | 760 | continue; |
|---|
| .. | .. |
|---|
| 690 | 778 | */ |
|---|
| 691 | 779 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
|---|
| 692 | 780 | { |
|---|
| 693 | | - task->tk_timeout = delay; |
|---|
| 694 | | - rpc_sleep_on(&delay_queue, task, __rpc_atrun); |
|---|
| 781 | + rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); |
|---|
| 695 | 782 | } |
|---|
| 696 | 783 | EXPORT_SYMBOL_GPL(rpc_delay); |
|---|
| 697 | 784 | |
|---|
| .. | .. |
|---|
| 709 | 796 | /* Initialize retry counters */ |
|---|
| 710 | 797 | task->tk_garb_retry = 2; |
|---|
| 711 | 798 | task->tk_cred_retry = 2; |
|---|
| 712 | | - task->tk_rebind_retry = 2; |
|---|
| 713 | 799 | |
|---|
| 714 | 800 | /* starting timestamp */ |
|---|
| 715 | 801 | task->tk_start = ktime_get(); |
|---|
| .. | .. |
|---|
| 719 | 805 | rpc_reset_task_statistics(struct rpc_task *task) |
|---|
| 720 | 806 | { |
|---|
| 721 | 807 | task->tk_timeouts = 0; |
|---|
| 722 | | - task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); |
|---|
| 723 | | - |
|---|
| 808 | + task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); |
|---|
| 724 | 809 | rpc_init_task_statistics(task); |
|---|
| 725 | 810 | } |
|---|
| 726 | 811 | |
|---|
| .. | .. |
|---|
| 729 | 814 | */ |
|---|
| 730 | 815 | void rpc_exit_task(struct rpc_task *task) |
|---|
| 731 | 816 | { |
|---|
| 817 | + trace_rpc_task_end(task, task->tk_action); |
|---|
| 732 | 818 | task->tk_action = NULL; |
|---|
| 819 | + if (task->tk_ops->rpc_count_stats) |
|---|
| 820 | + task->tk_ops->rpc_count_stats(task, task->tk_calldata); |
|---|
| 821 | + else if (task->tk_client) |
|---|
| 822 | + rpc_count_iostats(task, task->tk_client->cl_metrics); |
|---|
| 733 | 823 | if (task->tk_ops->rpc_call_done != NULL) { |
|---|
| 734 | 824 | task->tk_ops->rpc_call_done(task, task->tk_calldata); |
|---|
| 735 | 825 | if (task->tk_action != NULL) { |
|---|
| 736 | | - WARN_ON(RPC_ASSASSINATED(task)); |
|---|
| 737 | 826 | /* Always release the RPC slot and buffer memory */ |
|---|
| 738 | 827 | xprt_release(task); |
|---|
| 739 | 828 | rpc_reset_task_statistics(task); |
|---|
| .. | .. |
|---|
| 741 | 830 | } |
|---|
| 742 | 831 | } |
|---|
| 743 | 832 | |
|---|
| 833 | +void rpc_signal_task(struct rpc_task *task) |
|---|
| 834 | +{ |
|---|
| 835 | + struct rpc_wait_queue *queue; |
|---|
| 836 | + |
|---|
| 837 | + if (!RPC_IS_ACTIVATED(task)) |
|---|
| 838 | + return; |
|---|
| 839 | + |
|---|
| 840 | + trace_rpc_task_signalled(task, task->tk_action); |
|---|
| 841 | + set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); |
|---|
| 842 | + smp_mb__after_atomic(); |
|---|
| 843 | + queue = READ_ONCE(task->tk_waitqueue); |
|---|
| 844 | + if (queue) |
|---|
| 845 | + rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); |
|---|
| 846 | +} |
|---|
| 847 | + |
|---|
| 744 | 848 | void rpc_exit(struct rpc_task *task, int status) |
|---|
| 745 | 849 | { |
|---|
| 746 | 850 | task->tk_status = status; |
|---|
| 747 | 851 | task->tk_action = rpc_exit_task; |
|---|
| 748 | | - if (RPC_IS_QUEUED(task)) |
|---|
| 749 | | - rpc_wake_up_queued_task(task->tk_waitqueue, task); |
|---|
| 852 | + rpc_wake_up_queued_task(task->tk_waitqueue, task); |
|---|
| 750 | 853 | } |
|---|
| 751 | 854 | EXPORT_SYMBOL_GPL(rpc_exit); |
|---|
| 752 | 855 | |
|---|
| .. | .. |
|---|
| 764 | 867 | struct rpc_wait_queue *queue; |
|---|
| 765 | 868 | int task_is_async = RPC_IS_ASYNC(task); |
|---|
| 766 | 869 | int status = 0; |
|---|
| 767 | | - |
|---|
| 768 | | - dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
|---|
| 769 | | - task->tk_pid, task->tk_flags); |
|---|
| 770 | 870 | |
|---|
| 771 | 871 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
|---|
| 772 | 872 | if (RPC_IS_QUEUED(task)) |
|---|
| .. | .. |
|---|
| 797 | 897 | */ |
|---|
| 798 | 898 | if (!RPC_IS_QUEUED(task)) |
|---|
| 799 | 899 | continue; |
|---|
| 900 | + |
|---|
| 901 | + /* |
|---|
| 902 | + * Signalled tasks should exit rather than sleep. |
|---|
| 903 | + */ |
|---|
| 904 | + if (RPC_SIGNALLED(task)) { |
|---|
| 905 | + task->tk_rpc_status = -ERESTARTSYS; |
|---|
| 906 | + rpc_exit(task, -ERESTARTSYS); |
|---|
| 907 | + } |
|---|
| 908 | + |
|---|
| 800 | 909 | /* |
|---|
| 801 | 910 | * The queue->lock protects against races with |
|---|
| 802 | 911 | * rpc_make_runnable(). |
|---|
| .. | .. |
|---|
| 807 | 916 | * rpc_task pointer may still be dereferenced. |
|---|
| 808 | 917 | */ |
|---|
| 809 | 918 | queue = task->tk_waitqueue; |
|---|
| 810 | | - spin_lock_bh(&queue->lock); |
|---|
| 919 | + spin_lock(&queue->lock); |
|---|
| 811 | 920 | if (!RPC_IS_QUEUED(task)) { |
|---|
| 812 | | - spin_unlock_bh(&queue->lock); |
|---|
| 921 | + spin_unlock(&queue->lock); |
|---|
| 813 | 922 | continue; |
|---|
| 814 | 923 | } |
|---|
| 815 | 924 | rpc_clear_running(task); |
|---|
| 816 | | - spin_unlock_bh(&queue->lock); |
|---|
| 925 | + spin_unlock(&queue->lock); |
|---|
| 817 | 926 | if (task_is_async) |
|---|
| 818 | 927 | return; |
|---|
| 819 | 928 | |
|---|
| 820 | 929 | /* sync task: sleep here */ |
|---|
| 821 | | - dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
|---|
| 930 | + trace_rpc_task_sync_sleep(task, task->tk_action); |
|---|
| 822 | 931 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
|---|
| 823 | 932 | RPC_TASK_QUEUED, rpc_wait_bit_killable, |
|---|
| 824 | 933 | TASK_KILLABLE); |
|---|
| 825 | | - if (status == -ERESTARTSYS) { |
|---|
| 934 | + if (status < 0) { |
|---|
| 826 | 935 | /* |
|---|
| 827 | 936 | * When a sync task receives a signal, it exits with |
|---|
| 828 | 937 | * -ERESTARTSYS. In order to catch any callbacks that |
|---|
| 829 | 938 | * clean up after sleeping on some queue, we don't |
|---|
| 830 | 939 | * break the loop here, but go around once more. |
|---|
| 831 | 940 | */ |
|---|
| 832 | | - dprintk("RPC: %5u got signal\n", task->tk_pid); |
|---|
| 833 | | - task->tk_flags |= RPC_TASK_KILLED; |
|---|
| 941 | + trace_rpc_task_signalled(task, task->tk_action); |
|---|
| 942 | + set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); |
|---|
| 943 | + task->tk_rpc_status = -ERESTARTSYS; |
|---|
| 834 | 944 | rpc_exit(task, -ERESTARTSYS); |
|---|
| 835 | 945 | } |
|---|
| 836 | | - dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
|---|
| 946 | + trace_rpc_task_sync_wake(task, task->tk_action); |
|---|
| 837 | 947 | } |
|---|
| 838 | 948 | |
|---|
| 839 | | - dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
|---|
| 840 | | - task->tk_status); |
|---|
| 841 | 949 | /* Release all resources associated with the task */ |
|---|
| 842 | 950 | rpc_release_task(task); |
|---|
| 843 | 951 | } |
|---|
| .. | .. |
|---|
| 857 | 965 | |
|---|
| 858 | 966 | rpc_set_active(task); |
|---|
| 859 | 967 | rpc_make_runnable(rpciod_workqueue, task); |
|---|
| 860 | | - if (!is_async) |
|---|
| 968 | + if (!is_async) { |
|---|
| 969 | + unsigned int pflags = memalloc_nofs_save(); |
|---|
| 861 | 970 | __rpc_execute(task); |
|---|
| 971 | + memalloc_nofs_restore(pflags); |
|---|
| 972 | + } |
|---|
| 862 | 973 | } |
|---|
| 863 | 974 | |
|---|
| 864 | 975 | static void rpc_async_schedule(struct work_struct *work) |
|---|
| 865 | 976 | { |
|---|
| 977 | + unsigned int pflags = memalloc_nofs_save(); |
|---|
| 978 | + |
|---|
| 866 | 979 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
|---|
| 980 | + memalloc_nofs_restore(pflags); |
|---|
| 867 | 981 | } |
|---|
| 868 | 982 | |
|---|
| 869 | 983 | /** |
|---|
| .. | .. |
|---|
| 882 | 996 | * Most requests are 'small' (under 2KiB) and can be serviced from a |
|---|
| 883 | 997 | * mempool, ensuring that NFS reads and writes can always proceed, |
|---|
| 884 | 998 | * and that there is good locality of reference for these buffers. |
|---|
| 885 | | - * |
|---|
| 886 | | - * In order to avoid memory starvation triggering more writebacks of |
|---|
| 887 | | - * NFS requests, we avoid using GFP_KERNEL. |
|---|
| 888 | 999 | */ |
|---|
| 889 | 1000 | int rpc_malloc(struct rpc_task *task) |
|---|
| 890 | 1001 | { |
|---|
| 891 | 1002 | struct rpc_rqst *rqst = task->tk_rqstp; |
|---|
| 892 | 1003 | size_t size = rqst->rq_callsize + rqst->rq_rcvsize; |
|---|
| 893 | 1004 | struct rpc_buffer *buf; |
|---|
| 894 | | - gfp_t gfp = GFP_NOIO | __GFP_NOWARN; |
|---|
| 1005 | + gfp_t gfp = GFP_NOFS; |
|---|
| 895 | 1006 | |
|---|
| 1007 | + if (RPC_IS_ASYNC(task)) |
|---|
| 1008 | + gfp = GFP_NOWAIT | __GFP_NOWARN; |
|---|
| 896 | 1009 | if (RPC_IS_SWAPPER(task)) |
|---|
| 897 | | - gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; |
|---|
| 1010 | + gfp |= __GFP_MEMALLOC; |
|---|
| 898 | 1011 | |
|---|
| 899 | 1012 | size += sizeof(struct rpc_buffer); |
|---|
| 900 | 1013 | if (size <= RPC_BUFFER_MAXSIZE) |
|---|
| .. | .. |
|---|
| 906 | 1019 | return -ENOMEM; |
|---|
| 907 | 1020 | |
|---|
| 908 | 1021 | buf->len = size; |
|---|
| 909 | | - dprintk("RPC: %5u allocated buffer of size %zu at %p\n", |
|---|
| 910 | | - task->tk_pid, size, buf); |
|---|
| 911 | 1022 | rqst->rq_buffer = buf->data; |
|---|
| 912 | 1023 | rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; |
|---|
| 913 | 1024 | return 0; |
|---|
| .. | .. |
|---|
| 927 | 1038 | |
|---|
| 928 | 1039 | buf = container_of(buffer, struct rpc_buffer, data); |
|---|
| 929 | 1040 | size = buf->len; |
|---|
| 930 | | - |
|---|
| 931 | | - dprintk("RPC: freeing buffer of size %zu at %p\n", |
|---|
| 932 | | - size, buf); |
|---|
| 933 | 1041 | |
|---|
| 934 | 1042 | if (size <= RPC_BUFFER_MAXSIZE) |
|---|
| 935 | 1043 | mempool_free(buf, rpc_buffer_mempool); |
|---|
| .. | .. |
|---|
| 956 | 1064 | /* Initialize workqueue for async tasks */ |
|---|
| 957 | 1065 | task->tk_workqueue = task_setup_data->workqueue; |
|---|
| 958 | 1066 | |
|---|
| 959 | | - task->tk_xprt = xprt_get(task_setup_data->rpc_xprt); |
|---|
| 1067 | + task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, |
|---|
| 1068 | + xprt_get(task_setup_data->rpc_xprt)); |
|---|
| 1069 | + |
|---|
| 1070 | + task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); |
|---|
| 960 | 1071 | |
|---|
| 961 | 1072 | if (task->tk_ops->rpc_call_prepare != NULL) |
|---|
| 962 | 1073 | task->tk_action = rpc_prepare_task; |
|---|
| 963 | 1074 | |
|---|
| 964 | 1075 | rpc_init_task_statistics(task); |
|---|
| 965 | | - |
|---|
| 966 | | - dprintk("RPC: new task initialized, procpid %u\n", |
|---|
| 967 | | - task_pid_nr(current)); |
|---|
| 968 | 1076 | } |
|---|
| 969 | 1077 | |
|---|
| 970 | 1078 | static struct rpc_task * |
|---|
| 971 | 1079 | rpc_alloc_task(void) |
|---|
| 972 | 1080 | { |
|---|
| 973 | | - return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); |
|---|
| 1081 | + return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); |
|---|
| 974 | 1082 | } |
|---|
| 975 | 1083 | |
|---|
| 976 | 1084 | /* |
|---|
| .. | .. |
|---|
| 988 | 1096 | |
|---|
| 989 | 1097 | rpc_init_task(task, setup_data); |
|---|
| 990 | 1098 | task->tk_flags |= flags; |
|---|
| 991 | | - dprintk("RPC: allocated task %p\n", task); |
|---|
| 992 | 1099 | return task; |
|---|
| 993 | 1100 | } |
|---|
| 994 | 1101 | |
|---|
| .. | .. |
|---|
| 1015 | 1122 | { |
|---|
| 1016 | 1123 | unsigned short tk_flags = task->tk_flags; |
|---|
| 1017 | 1124 | |
|---|
| 1125 | + put_rpccred(task->tk_op_cred); |
|---|
| 1018 | 1126 | rpc_release_calldata(task->tk_ops, task->tk_calldata); |
|---|
| 1019 | 1127 | |
|---|
| 1020 | | - if (tk_flags & RPC_TASK_DYNAMIC) { |
|---|
| 1021 | | - dprintk("RPC: %5u freeing task\n", task->tk_pid); |
|---|
| 1128 | + if (tk_flags & RPC_TASK_DYNAMIC) |
|---|
| 1022 | 1129 | mempool_free(task, rpc_task_mempool); |
|---|
| 1023 | | - } |
|---|
| 1024 | 1130 | } |
|---|
| 1025 | 1131 | |
|---|
| 1026 | 1132 | static void rpc_async_release(struct work_struct *work) |
|---|
| 1027 | 1133 | { |
|---|
| 1134 | + unsigned int pflags = memalloc_nofs_save(); |
|---|
| 1135 | + |
|---|
| 1028 | 1136 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
|---|
| 1137 | + memalloc_nofs_restore(pflags); |
|---|
| 1029 | 1138 | } |
|---|
| 1030 | 1139 | |
|---|
| 1031 | 1140 | static void rpc_release_resources_task(struct rpc_task *task) |
|---|
| 1032 | 1141 | { |
|---|
| 1033 | 1142 | xprt_release(task); |
|---|
| 1034 | 1143 | if (task->tk_msg.rpc_cred) { |
|---|
| 1035 | | - put_rpccred(task->tk_msg.rpc_cred); |
|---|
| 1144 | + if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) |
|---|
| 1145 | + put_cred(task->tk_msg.rpc_cred); |
|---|
| 1036 | 1146 | task->tk_msg.rpc_cred = NULL; |
|---|
| 1037 | 1147 | } |
|---|
| 1038 | 1148 | rpc_task_release_client(task); |
|---|
| .. | .. |
|---|
| 1070 | 1180 | |
|---|
| 1071 | 1181 | static void rpc_release_task(struct rpc_task *task) |
|---|
| 1072 | 1182 | { |
|---|
| 1073 | | - dprintk("RPC: %5u release task\n", task->tk_pid); |
|---|
| 1074 | | - |
|---|
| 1075 | 1183 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
|---|
| 1076 | 1184 | |
|---|
| 1077 | 1185 | rpc_release_resources_task(task); |
|---|
| .. | .. |
|---|
| 1112 | 1220 | /* |
|---|
| 1113 | 1221 | * Create the rpciod thread and wait for it to start. |
|---|
| 1114 | 1222 | */ |
|---|
| 1115 | | - dprintk("RPC: creating workqueue rpciod\n"); |
|---|
| 1116 | 1223 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); |
|---|
| 1117 | 1224 | if (!wq) |
|---|
| 1118 | 1225 | goto out_failed; |
|---|
| .. | .. |
|---|
| 1137 | 1244 | |
|---|
| 1138 | 1245 | if (rpciod_workqueue == NULL) |
|---|
| 1139 | 1246 | return; |
|---|
| 1140 | | - dprintk("RPC: destroying workqueue rpciod\n"); |
|---|
| 1141 | 1247 | |
|---|
| 1142 | 1248 | wq = rpciod_workqueue; |
|---|
| 1143 | 1249 | rpciod_workqueue = NULL; |
|---|