.. | .. |
---|
320 | 320 | rq->extra_len = 0; |
---|
321 | 321 | rq->__deadline = 0; |
---|
322 | 322 | |
---|
| 323 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 324 | + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); |
---|
| 325 | +#endif |
---|
323 | 326 | INIT_LIST_HEAD(&rq->timeout_list); |
---|
324 | 327 | rq->timeout = 0; |
---|
325 | 328 | |
---|
.. | .. |
---|
547 | 550 | } |
---|
548 | 551 | EXPORT_SYMBOL(blk_mq_end_request); |
---|
549 | 552 | |
---|
| 553 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 554 | + |
---|
| 555 | +void __blk_mq_complete_request_remote_work(struct work_struct *work) |
---|
| 556 | +{ |
---|
| 557 | + struct request *rq = container_of(work, struct request, work); |
---|
| 558 | + |
---|
| 559 | + rq->q->softirq_done_fn(rq); |
---|
| 560 | +} |
---|
| 561 | + |
---|
| 562 | +#else |
---|
| 563 | + |
---|
550 | 564 | static void __blk_mq_complete_request_remote(void *data) |
---|
551 | 565 | { |
---|
552 | 566 | struct request *rq = data; |
---|
553 | 567 | |
---|
554 | 568 | rq->q->softirq_done_fn(rq); |
---|
555 | 569 | } |
---|
| 570 | +#endif |
---|
556 | 571 | |
---|
557 | 572 | static void __blk_mq_complete_request(struct request *rq) |
---|
558 | 573 | { |
---|
.. | .. |
---|
570 | 585 | return; |
---|
571 | 586 | } |
---|
572 | 587 | |
---|
573 | | - cpu = get_cpu(); |
---|
| 588 | + cpu = get_cpu_light(); |
---|
574 | 589 | if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) |
---|
575 | 590 | shared = cpus_share_cache(cpu, ctx->cpu); |
---|
576 | 591 | |
---|
577 | 592 | if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { |
---|
| 593 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 594 | + /* |
---|
| 595 | + * We could force QUEUE_FLAG_SAME_FORCE then we would not get in |
---|
| 596 | + * here. But we could try to invoke it one the CPU like this. |
---|
| 597 | + */ |
---|
| 598 | + schedule_work_on(ctx->cpu, &rq->work); |
---|
| 599 | +#else |
---|
578 | 600 | rq->csd.func = __blk_mq_complete_request_remote; |
---|
579 | 601 | rq->csd.info = rq; |
---|
580 | 602 | rq->csd.flags = 0; |
---|
581 | 603 | smp_call_function_single_async(ctx->cpu, &rq->csd); |
---|
| 604 | +#endif |
---|
582 | 605 | } else { |
---|
583 | 606 | rq->q->softirq_done_fn(rq); |
---|
584 | 607 | } |
---|
585 | | - put_cpu(); |
---|
| 608 | + put_cpu_light(); |
---|
586 | 609 | } |
---|
587 | 610 | |
---|
588 | 611 | static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) |
---|
.. | .. |
---|
1387 | 1410 | return; |
---|
1388 | 1411 | |
---|
1389 | 1412 | if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { |
---|
1390 | | - int cpu = get_cpu(); |
---|
| 1413 | + int cpu = get_cpu_light(); |
---|
1391 | 1414 | if (cpumask_test_cpu(cpu, hctx->cpumask)) { |
---|
1392 | 1415 | __blk_mq_run_hw_queue(hctx); |
---|
1393 | | - put_cpu(); |
---|
| 1416 | + put_cpu_light(); |
---|
1394 | 1417 | return; |
---|
1395 | 1418 | } |
---|
1396 | 1419 | |
---|
1397 | | - put_cpu(); |
---|
| 1420 | + put_cpu_light(); |
---|
1398 | 1421 | } |
---|
1399 | 1422 | |
---|
1400 | 1423 | kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, |
---|
.. | .. |
---|
3139 | 3162 | kt = nsecs; |
---|
3140 | 3163 | |
---|
3141 | 3164 | mode = HRTIMER_MODE_REL; |
---|
3142 | | - hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); |
---|
| 3165 | + hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current); |
---|
3143 | 3166 | hrtimer_set_expires(&hs.timer, kt); |
---|
3144 | 3167 | |
---|
3145 | | - hrtimer_init_sleeper(&hs, current); |
---|
3146 | 3168 | do { |
---|
3147 | 3169 | if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) |
---|
3148 | 3170 | break; |
---|