.. | .. |
---|
82 | 82 | { |
---|
83 | 83 | struct se_session *sess = se_cmd->se_sess; |
---|
84 | 84 | |
---|
85 | | - assert_spin_locked(&sess->sess_cmd_lock); |
---|
86 | | - WARN_ON_ONCE(!irqs_disabled()); |
---|
| 85 | + lockdep_assert_held(&sess->sess_cmd_lock); |
---|
| 86 | + |
---|
87 | 87 | /* |
---|
88 | 88 | * If command already reached CMD_T_COMPLETE state within |
---|
89 | 89 | * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, |
---|
.. | .. |
---|
121 | 121 | unsigned long flags; |
---|
122 | 122 | bool rc; |
---|
123 | 123 | u64 ref_tag; |
---|
| 124 | + int i; |
---|
124 | 125 | |
---|
125 | | - spin_lock_irqsave(&dev->execute_task_lock, flags); |
---|
126 | | - list_for_each_entry_safe(se_cmd, next, &dev->state_list, state_list) { |
---|
| 126 | + for (i = 0; i < dev->queue_cnt; i++) { |
---|
| 127 | + spin_lock_irqsave(&dev->queues[i].lock, flags); |
---|
| 128 | + list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, |
---|
| 129 | + state_list) { |
---|
| 130 | + if (se_sess != se_cmd->se_sess) |
---|
| 131 | + continue; |
---|
127 | 132 | |
---|
128 | | - if (se_sess != se_cmd->se_sess) |
---|
129 | | - continue; |
---|
| 133 | + /* |
---|
| 134 | + * skip task management functions, including |
---|
| 135 | + * tmr->task_cmd |
---|
| 136 | + */ |
---|
| 137 | + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
---|
| 138 | + continue; |
---|
130 | 139 | |
---|
131 | | - /* skip task management functions, including tmr->task_cmd */ |
---|
132 | | - if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
---|
133 | | - continue; |
---|
| 140 | + ref_tag = se_cmd->tag; |
---|
| 141 | + if (tmr->ref_task_tag != ref_tag) |
---|
| 142 | + continue; |
---|
134 | 143 | |
---|
135 | | - ref_tag = se_cmd->tag; |
---|
136 | | - if (tmr->ref_task_tag != ref_tag) |
---|
137 | | - continue; |
---|
| 144 | + pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n", |
---|
| 145 | + se_cmd->se_tfo->fabric_name, ref_tag); |
---|
138 | 146 | |
---|
139 | | - printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", |
---|
140 | | - se_cmd->se_tfo->fabric_name, ref_tag); |
---|
| 147 | + spin_lock(&se_sess->sess_cmd_lock); |
---|
| 148 | + rc = __target_check_io_state(se_cmd, se_sess, 0); |
---|
| 149 | + spin_unlock(&se_sess->sess_cmd_lock); |
---|
| 150 | + if (!rc) |
---|
| 151 | + continue; |
---|
141 | 152 | |
---|
142 | | - spin_lock(&se_sess->sess_cmd_lock); |
---|
143 | | - rc = __target_check_io_state(se_cmd, se_sess, 0); |
---|
144 | | - spin_unlock(&se_sess->sess_cmd_lock); |
---|
145 | | - if (!rc) |
---|
146 | | - continue; |
---|
| 153 | + list_move_tail(&se_cmd->state_list, &aborted_list); |
---|
| 154 | + se_cmd->state_active = false; |
---|
| 155 | + spin_unlock_irqrestore(&dev->queues[i].lock, flags); |
---|
147 | 156 | |
---|
148 | | - list_move_tail(&se_cmd->state_list, &aborted_list); |
---|
149 | | - se_cmd->state_active = false; |
---|
| 157 | + /* |
---|
| 158 | + * Ensure that this ABORT request is visible to the LU |
---|
| 159 | + * RESET code. |
---|
| 160 | + */ |
---|
| 161 | + if (!tmr->tmr_dev) |
---|
| 162 | + WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0); |
---|
150 | 163 | |
---|
151 | | - spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
---|
| 164 | + if (dev->transport->tmr_notify) |
---|
| 165 | + dev->transport->tmr_notify(dev, TMR_ABORT_TASK, |
---|
| 166 | + &aborted_list); |
---|
152 | 167 | |
---|
153 | | - /* |
---|
154 | | - * Ensure that this ABORT request is visible to the LU RESET |
---|
155 | | - * code. |
---|
156 | | - */ |
---|
157 | | - if (!tmr->tmr_dev) |
---|
158 | | - WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < |
---|
159 | | - 0); |
---|
| 168 | + list_del_init(&se_cmd->state_list); |
---|
| 169 | + target_put_cmd_and_wait(se_cmd); |
---|
160 | 170 | |
---|
161 | | - if (dev->transport->tmr_notify) |
---|
162 | | - dev->transport->tmr_notify(dev, TMR_ABORT_TASK, |
---|
163 | | - &aborted_list); |
---|
164 | | - |
---|
165 | | - list_del_init(&se_cmd->state_list); |
---|
166 | | - target_put_cmd_and_wait(se_cmd); |
---|
167 | | - |
---|
168 | | - printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" |
---|
169 | | - " ref_tag: %llu\n", ref_tag); |
---|
170 | | - tmr->response = TMR_FUNCTION_COMPLETE; |
---|
171 | | - atomic_long_inc(&dev->aborts_complete); |
---|
172 | | - return; |
---|
| 171 | + pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n", |
---|
| 172 | + ref_tag); |
---|
| 173 | + tmr->response = TMR_FUNCTION_COMPLETE; |
---|
| 174 | + atomic_long_inc(&dev->aborts_complete); |
---|
| 175 | + return; |
---|
| 176 | + } |
---|
| 177 | + spin_unlock_irqrestore(&dev->queues[i].lock, flags); |
---|
173 | 178 | } |
---|
174 | | - spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
---|
175 | 179 | |
---|
176 | 180 | if (dev->transport->tmr_notify) |
---|
177 | 181 | dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list); |
---|
.. | .. |
---|
198 | 202 | * LUN_RESET tmr.. |
---|
199 | 203 | */ |
---|
200 | 204 | spin_lock_irqsave(&dev->se_tmr_lock, flags); |
---|
201 | | - if (tmr) |
---|
202 | | - list_del_init(&tmr->tmr_list); |
---|
203 | 205 | list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { |
---|
| 206 | + if (tmr_p == tmr) |
---|
| 207 | + continue; |
---|
| 208 | + |
---|
204 | 209 | cmd = tmr_p->task_cmd; |
---|
205 | 210 | if (!cmd) { |
---|
206 | 211 | pr_err("Unable to locate struct se_cmd for TMR\n"); |
---|
207 | 212 | continue; |
---|
208 | 213 | } |
---|
| 214 | + |
---|
| 215 | + /* |
---|
| 216 | + * We only execute one LUN_RESET at a time so we can't wait |
---|
| 217 | + * on them below. |
---|
| 218 | + */ |
---|
| 219 | + if (tmr_p->function == TMR_LUN_RESET) |
---|
| 220 | + continue; |
---|
| 221 | + |
---|
209 | 222 | /* |
---|
210 | 223 | * If this function was called with a valid pr_res_key |
---|
211 | 224 | * parameter (eg: for PROUT PREEMPT_AND_ABORT service action |
---|
.. | .. |
---|
273 | 286 | struct se_session *sess; |
---|
274 | 287 | struct se_cmd *cmd, *next; |
---|
275 | 288 | unsigned long flags; |
---|
276 | | - int rc; |
---|
| 289 | + int rc, i; |
---|
277 | 290 | |
---|
278 | 291 | /* |
---|
279 | 292 | * Complete outstanding commands with TASK_ABORTED SAM status. |
---|
.. | .. |
---|
297 | 310 | * Note that this seems to be independent of TAS (Task Aborted Status) |
---|
298 | 311 | * in the Control Mode Page. |
---|
299 | 312 | */ |
---|
300 | | - spin_lock_irqsave(&dev->execute_task_lock, flags); |
---|
301 | | - list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) { |
---|
302 | | - /* |
---|
303 | | - * For PREEMPT_AND_ABORT usage, only process commands |
---|
304 | | - * with a matching reservation key. |
---|
305 | | - */ |
---|
306 | | - if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) |
---|
307 | | - continue; |
---|
| 313 | + for (i = 0; i < dev->queue_cnt; i++) { |
---|
| 314 | + spin_lock_irqsave(&dev->queues[i].lock, flags); |
---|
| 315 | + list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, |
---|
| 316 | + state_list) { |
---|
| 317 | + /* |
---|
| 318 | + * For PREEMPT_AND_ABORT usage, only process commands |
---|
| 319 | + * with a matching reservation key. |
---|
| 320 | + */ |
---|
| 321 | + if (target_check_cdb_and_preempt(preempt_and_abort_list, |
---|
| 322 | + cmd)) |
---|
| 323 | + continue; |
---|
308 | 324 | |
---|
309 | | - /* |
---|
310 | | - * Not aborting PROUT PREEMPT_AND_ABORT CDB.. |
---|
311 | | - */ |
---|
312 | | - if (prout_cmd == cmd) |
---|
313 | | - continue; |
---|
| 325 | + /* |
---|
| 326 | + * Not aborting PROUT PREEMPT_AND_ABORT CDB.. |
---|
| 327 | + */ |
---|
| 328 | + if (prout_cmd == cmd) |
---|
| 329 | + continue; |
---|
314 | 330 | |
---|
315 | | - sess = cmd->se_sess; |
---|
316 | | - if (WARN_ON_ONCE(!sess)) |
---|
317 | | - continue; |
---|
| 331 | + sess = cmd->se_sess; |
---|
| 332 | + if (WARN_ON_ONCE(!sess)) |
---|
| 333 | + continue; |
---|
318 | 334 | |
---|
319 | | - spin_lock(&sess->sess_cmd_lock); |
---|
320 | | - rc = __target_check_io_state(cmd, tmr_sess, tas); |
---|
321 | | - spin_unlock(&sess->sess_cmd_lock); |
---|
322 | | - if (!rc) |
---|
323 | | - continue; |
---|
| 335 | + spin_lock(&sess->sess_cmd_lock); |
---|
| 336 | + rc = __target_check_io_state(cmd, tmr_sess, tas); |
---|
| 337 | + spin_unlock(&sess->sess_cmd_lock); |
---|
| 338 | + if (!rc) |
---|
| 339 | + continue; |
---|
324 | 340 | |
---|
325 | | - list_move_tail(&cmd->state_list, &drain_task_list); |
---|
326 | | - cmd->state_active = false; |
---|
| 341 | + list_move_tail(&cmd->state_list, &drain_task_list); |
---|
| 342 | + cmd->state_active = false; |
---|
| 343 | + } |
---|
| 344 | + spin_unlock_irqrestore(&dev->queues[i].lock, flags); |
---|
327 | 345 | } |
---|
328 | | - spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
---|
329 | 346 | |
---|
330 | 347 | if (dev->transport->tmr_notify) |
---|
331 | 348 | dev->transport->tmr_notify(dev, preempt_and_abort_list ? |
---|
.. | .. |
---|
382 | 399 | tmr_nacl->initiatorname); |
---|
383 | 400 | } |
---|
384 | 401 | } |
---|
| 402 | + |
---|
| 403 | + |
---|
| 404 | + /* |
---|
| 405 | + * We only allow one reset or preempt and abort to execute at a time |
---|
| 406 | + * to prevent one call from claiming all the cmds causing a second |
---|
| 407 | + * call from returning while cmds it should have waited on are still |
---|
| 408 | + * running. |
---|
| 409 | + */ |
---|
| 410 | + mutex_lock(&dev->lun_reset_mutex); |
---|
| 411 | + |
---|
385 | 412 | pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", |
---|
386 | 413 | (preempt_and_abort_list) ? "Preempt" : "TMR", |
---|
387 | 414 | dev->transport->name, tas); |
---|
388 | | - |
---|
389 | 415 | core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); |
---|
390 | 416 | core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, |
---|
391 | 417 | preempt_and_abort_list); |
---|
392 | 418 | |
---|
| 419 | + mutex_unlock(&dev->lun_reset_mutex); |
---|
| 420 | + |
---|
393 | 421 | /* |
---|
394 | 422 | * Clear any legacy SPC-2 reservation when called during |
---|
395 | 423 | * LOGICAL UNIT RESET |
---|