| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Functions related to generic timeout handling of requests. |
|---|
| 3 | 4 | */ |
|---|
| .. | .. |
|---|
| 19 | 20 | } |
|---|
| 20 | 21 | __setup("fail_io_timeout=", setup_fail_io_timeout); |
|---|
| 21 | 22 | |
|---|
| 22 | | -int blk_should_fake_timeout(struct request_queue *q) |
|---|
| 23 | +bool __blk_should_fake_timeout(struct request_queue *q) |
|---|
| 23 | 24 | { |
|---|
| 24 | | - if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) |
|---|
| 25 | | - return 0; |
|---|
| 26 | | - |
|---|
| 27 | 25 | return should_fail(&fail_io_timeout, 1); |
|---|
| 28 | 26 | } |
|---|
| 27 | +EXPORT_SYMBOL_GPL(__blk_should_fake_timeout); |
|---|
| 29 | 28 | |
|---|
| 30 | 29 | static int __init fail_io_timeout_debugfs(void) |
|---|
| 31 | 30 | { |
|---|
| .. | .. |
|---|
| 68 | 67 | |
|---|
| 69 | 68 | #endif /* CONFIG_FAIL_IO_TIMEOUT */ |
|---|
| 70 | 69 | |
|---|
| 71 | | -/* |
|---|
| 72 | | - * blk_delete_timer - Delete/cancel timer for a given function. |
|---|
| 73 | | - * @req: request that we are canceling timer for |
|---|
| 74 | | - * |
|---|
| 75 | | - */ |
|---|
| 76 | | -void blk_delete_timer(struct request *req) |
|---|
| 77 | | -{ |
|---|
| 78 | | - list_del_init(&req->timeout_list); |
|---|
| 79 | | -} |
|---|
| 80 | | - |
|---|
| 81 | | -static void blk_rq_timed_out(struct request *req) |
|---|
| 82 | | -{ |
|---|
| 83 | | - struct request_queue *q = req->q; |
|---|
| 84 | | - enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; |
|---|
| 85 | | - |
|---|
| 86 | | - if (q->rq_timed_out_fn) |
|---|
| 87 | | - ret = q->rq_timed_out_fn(req); |
|---|
| 88 | | - switch (ret) { |
|---|
| 89 | | - case BLK_EH_RESET_TIMER: |
|---|
| 90 | | - blk_add_timer(req); |
|---|
| 91 | | - blk_clear_rq_complete(req); |
|---|
| 92 | | - break; |
|---|
| 93 | | - case BLK_EH_DONE: |
|---|
| 94 | | - /* |
|---|
| 95 | | - * LLD handles this for now but in the future |
|---|
| 96 | | - * we can send a request msg to abort the command |
|---|
| 97 | | - * and we can move more of the generic scsi eh code to |
|---|
| 98 | | - * the blk layer. |
|---|
| 99 | | - */ |
|---|
| 100 | | - break; |
|---|
| 101 | | - default: |
|---|
| 102 | | - printk(KERN_ERR "block: bad eh return: %d\n", ret); |
|---|
| 103 | | - break; |
|---|
| 104 | | - } |
|---|
| 105 | | -} |
|---|
| 106 | | - |
|---|
| 107 | | -static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, |
|---|
| 108 | | - unsigned int *next_set) |
|---|
| 109 | | -{ |
|---|
| 110 | | - const unsigned long deadline = blk_rq_deadline(rq); |
|---|
| 111 | | - |
|---|
| 112 | | - if (time_after_eq(jiffies, deadline)) { |
|---|
| 113 | | - list_del_init(&rq->timeout_list); |
|---|
| 114 | | - |
|---|
| 115 | | - /* |
|---|
| 116 | | - * Check if we raced with end io completion |
|---|
| 117 | | - */ |
|---|
| 118 | | - if (!blk_mark_rq_complete(rq)) |
|---|
| 119 | | - blk_rq_timed_out(rq); |
|---|
| 120 | | - } else if (!*next_set || time_after(*next_timeout, deadline)) { |
|---|
| 121 | | - *next_timeout = deadline; |
|---|
| 122 | | - *next_set = 1; |
|---|
| 123 | | - } |
|---|
| 124 | | -} |
|---|
| 125 | | - |
|---|
| 126 | | -void blk_timeout_work(struct work_struct *work) |
|---|
| 127 | | -{ |
|---|
| 128 | | - struct request_queue *q = |
|---|
| 129 | | - container_of(work, struct request_queue, timeout_work); |
|---|
| 130 | | - unsigned long flags, next = 0; |
|---|
| 131 | | - struct request *rq, *tmp; |
|---|
| 132 | | - int next_set = 0; |
|---|
| 133 | | - |
|---|
| 134 | | - spin_lock_irqsave(q->queue_lock, flags); |
|---|
| 135 | | - |
|---|
| 136 | | - list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) |
|---|
| 137 | | - blk_rq_check_expired(rq, &next, &next_set); |
|---|
| 138 | | - |
|---|
| 139 | | - if (next_set) |
|---|
| 140 | | - mod_timer(&q->timeout, round_jiffies_up(next)); |
|---|
| 141 | | - |
|---|
| 142 | | - spin_unlock_irqrestore(q->queue_lock, flags); |
|---|
| 143 | | -} |
|---|
| 144 | | - |
|---|
| 145 | 70 | /** |
|---|
| 146 | | - * blk_abort_request -- Request request recovery for the specified command |
|---|
| 71 | + * blk_abort_request - Request recovery for the specified command |
|---|
| 147 | 72 | * @req: pointer to the request of interest |
|---|
| 148 | 73 | * |
|---|
| 149 | 74 | * This function requests that the block layer start recovery for the |
|---|
| 150 | 75 | * request by deleting the timer and calling the q's timeout function. |
|---|
| 151 | 76 | * LLDDs who implement their own error recovery MAY ignore the timeout |
|---|
| 152 | | - * event if they generated blk_abort_req. Must hold queue lock. |
|---|
| 77 | + * event if they generated blk_abort_request. |
|---|
| 153 | 78 | */ |
|---|
| 154 | 79 | void blk_abort_request(struct request *req) |
|---|
| 155 | 80 | { |
|---|
| 156 | | - if (req->q->mq_ops) { |
|---|
| 157 | | - /* |
|---|
| 158 | | - * All we need to ensure is that timeout scan takes place |
|---|
| 159 | | - * immediately and that scan sees the new timeout value. |
|---|
| 160 | | - * No need for fancy synchronizations. |
|---|
| 161 | | - */ |
|---|
| 162 | | - blk_rq_set_deadline(req, jiffies); |
|---|
| 163 | | - kblockd_schedule_work(&req->q->timeout_work); |
|---|
| 164 | | - } else { |
|---|
| 165 | | - if (blk_mark_rq_complete(req)) |
|---|
| 166 | | - return; |
|---|
| 167 | | - blk_delete_timer(req); |
|---|
| 168 | | - blk_rq_timed_out(req); |
|---|
| 169 | | - } |
|---|
| 81 | + /* |
|---|
| 82 | + * All we need to ensure is that timeout scan takes place |
|---|
| 83 | + * immediately and that scan sees the new timeout value. |
|---|
| 84 | + * No need for fancy synchronizations. |
|---|
| 85 | + */ |
|---|
| 86 | + WRITE_ONCE(req->deadline, jiffies); |
|---|
| 87 | + kblockd_schedule_work(&req->q->timeout_work); |
|---|
| 170 | 88 | } |
|---|
| 171 | 89 | EXPORT_SYMBOL_GPL(blk_abort_request); |
|---|
| 90 | + |
|---|
| 91 | +static unsigned long blk_timeout_mask __read_mostly; |
|---|
| 92 | + |
|---|
| 93 | +static int __init blk_timeout_init(void) |
|---|
| 94 | +{ |
|---|
| 95 | + blk_timeout_mask = roundup_pow_of_two(HZ) - 1; |
|---|
| 96 | + return 0; |
|---|
| 97 | +} |
|---|
| 98 | + |
|---|
| 99 | +late_initcall(blk_timeout_init); |
|---|
| 100 | + |
|---|
| 101 | +/* |
|---|
| 102 | + * Just a rough estimate, we don't care about specific values for timeouts. |
|---|
| 103 | + */ |
|---|
| 104 | +static inline unsigned long blk_round_jiffies(unsigned long j) |
|---|
| 105 | +{ |
|---|
| 106 | + return (j + blk_timeout_mask) + 1; |
|---|
| 107 | +} |
|---|
| 172 | 108 | |
|---|
| 173 | 109 | unsigned long blk_rq_timeout(unsigned long timeout) |
|---|
| 174 | 110 | { |
|---|
| 175 | 111 | unsigned long maxt; |
|---|
| 176 | 112 | |
|---|
| 177 | | - maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT); |
|---|
| 113 | + maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT); |
|---|
| 178 | 114 | if (time_after(timeout, maxt)) |
|---|
| 179 | 115 | timeout = maxt; |
|---|
| 180 | 116 | |
|---|
| .. | .. |
|---|
| 194 | 130 | struct request_queue *q = req->q; |
|---|
| 195 | 131 | unsigned long expiry; |
|---|
| 196 | 132 | |
|---|
| 197 | | - if (!q->mq_ops) |
|---|
| 198 | | - lockdep_assert_held(q->queue_lock); |
|---|
| 199 | | - |
|---|
| 200 | | - /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ |
|---|
| 201 | | - if (!q->mq_ops && !q->rq_timed_out_fn) |
|---|
| 202 | | - return; |
|---|
| 203 | | - |
|---|
| 204 | | - BUG_ON(!list_empty(&req->timeout_list)); |
|---|
| 205 | | - |
|---|
| 206 | 133 | /* |
|---|
| 207 | 134 | * Some LLDs, like scsi, peek at the timeout to prevent a |
|---|
| 208 | 135 | * command from being retried forever. |
|---|
| .. | .. |
|---|
| 211 | 138 | req->timeout = q->rq_timeout; |
|---|
| 212 | 139 | |
|---|
| 213 | 140 | req->rq_flags &= ~RQF_TIMED_OUT; |
|---|
| 214 | | - blk_rq_set_deadline(req, jiffies + req->timeout); |
|---|
| 215 | 141 | |
|---|
| 216 | | - /* |
|---|
| 217 | | - * Only the non-mq case needs to add the request to a protected list. |
|---|
| 218 | | - * For the mq case we simply scan the tag map. |
|---|
| 219 | | - */ |
|---|
| 220 | | - if (!q->mq_ops) |
|---|
| 221 | | - list_add_tail(&req->timeout_list, &req->q->timeout_list); |
|---|
| 142 | + expiry = jiffies + req->timeout; |
|---|
| 143 | + WRITE_ONCE(req->deadline, expiry); |
|---|
| 222 | 144 | |
|---|
| 223 | 145 | /* |
|---|
| 224 | 146 | * If the timer isn't already pending or this timeout is earlier |
|---|
| 225 | 147 | * than an existing one, modify the timer. Round up to next nearest |
|---|
| 226 | 148 | * second. |
|---|
| 227 | 149 | */ |
|---|
| 228 | | - expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req))); |
|---|
| 150 | + expiry = blk_rq_timeout(blk_round_jiffies(expiry)); |
|---|
| 229 | 151 | |
|---|
| 230 | 152 | if (!timer_pending(&q->timeout) || |
|---|
| 231 | 153 | time_before(expiry, q->timeout.expires)) { |
|---|