forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-02-20 ea08eeccae9297f7aabd2ef7f0c2517ac4549acc
kernel/drivers/target/target_core_transport.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*******************************************************************************
23 * Filename: target_core_transport.c
34 *
....@@ -6,20 +7,6 @@
67 * (c) Copyright 2002-2013 Datera, Inc.
78 *
89 * Nicholas A. Bellinger <nab@kernel.org>
9
- *
10
- * This program is free software; you can redistribute it and/or modify
11
- * it under the terms of the GNU General Public License as published by
12
- * the Free Software Foundation; either version 2 of the License, or
13
- * (at your option) any later version.
14
- *
15
- * This program is distributed in the hope that it will be useful,
16
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
- * GNU General Public License for more details.
19
- *
20
- * You should have received a copy of the GNU General Public License
21
- * along with this program; if not, write to the Free Software
22
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
2310 *
2411 ******************************************************************************/
2512
....@@ -205,19 +192,19 @@
205192 if (sub_api_initialized)
206193 return;
207194
208
- ret = request_module("target_core_iblock");
195
+ ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
209196 if (ret != 0)
210197 pr_err("Unable to load target_core_iblock\n");
211198
212
- ret = request_module("target_core_file");
199
+ ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
213200 if (ret != 0)
214201 pr_err("Unable to load target_core_file\n");
215202
216
- ret = request_module("target_core_pscsi");
203
+ ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
217204 if (ret != 0)
218205 pr_err("Unable to load target_core_pscsi\n");
219206
220
- ret = request_module("target_core_user");
207
+ ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
221208 if (ret != 0)
222209 pr_err("Unable to load target_core_user\n");
223210
....@@ -248,6 +235,11 @@
248235 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
249236 }
250237 EXPORT_SYMBOL(transport_init_session);
238
+
239
+void transport_uninit_session(struct se_session *se_sess)
240
+{
241
+ percpu_ref_exit(&se_sess->cmd_count);
242
+}
251243
252244 /**
253245 * transport_alloc_session - allocate a session object and initialize it
....@@ -287,14 +279,11 @@
287279 {
288280 int rc;
289281
290
- se_sess->sess_cmd_map = kcalloc(tag_size, tag_num,
291
- GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
282
+ se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
283
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL);
292284 if (!se_sess->sess_cmd_map) {
293
- se_sess->sess_cmd_map = vzalloc(array_size(tag_size, tag_num));
294
- if (!se_sess->sess_cmd_map) {
295
- pr_err("Unable to allocate se_sess->sess_cmd_map\n");
296
- return -ENOMEM;
297
- }
285
+ pr_err("Unable to allocate se_sess->sess_cmd_map\n");
286
+ return -ENOMEM;
298287 }
299288
300289 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
....@@ -411,7 +400,7 @@
411400 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
412401
413402 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
414
- se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
403
+ se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
415404 }
416405 EXPORT_SYMBOL(__transport_register_session);
417406
....@@ -595,10 +584,19 @@
595584 sbitmap_queue_free(&se_sess->sess_tag_pool);
596585 kvfree(se_sess->sess_cmd_map);
597586 }
598
- percpu_ref_exit(&se_sess->cmd_count);
587
+ transport_uninit_session(se_sess);
599588 kmem_cache_free(se_sess_cache, se_sess);
600589 }
601590 EXPORT_SYMBOL(transport_free_session);
591
+
592
+static int target_release_res(struct se_device *dev, void *data)
593
+{
594
+ struct se_session *sess = data;
595
+
596
+ if (dev->reservation_holder == sess)
597
+ target_release_reservation(dev);
598
+ return 0;
599
+}
602600
603601 void transport_deregister_session(struct se_session *se_sess)
604602 {
....@@ -616,8 +614,14 @@
616614 se_sess->fabric_sess_ptr = NULL;
617615 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
618616
617
+ /*
618
+ * Since the session is being removed, release SPC-2
619
+ * reservations held by the session that is disappearing.
620
+ */
621
+ target_for_each_device(target_release_res, se_sess);
622
+
619623 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
620
- se_tpg->se_tpg_tfo->get_fabric_name());
624
+ se_tpg->se_tpg_tfo->fabric_name);
621625 /*
622626 * If last kref is dropping now for an explicit NodeACL, awake sleeping
623627 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
....@@ -646,12 +650,12 @@
646650 if (!dev)
647651 return;
648652
649
- spin_lock_irqsave(&dev->execute_task_lock, flags);
653
+ spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
650654 if (cmd->state_active) {
651655 list_del(&cmd->state_list);
652656 cmd->state_active = false;
653657 }
654
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
658
+ spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
655659 }
656660
657661 /*
....@@ -710,32 +714,6 @@
710714 percpu_ref_put(&lun->lun_ref);
711715 }
712716
713
-int transport_cmd_finish_abort(struct se_cmd *cmd)
714
-{
715
- bool send_tas = cmd->transport_state & CMD_T_TAS;
716
- bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
717
- int ret = 0;
718
-
719
- if (send_tas)
720
- transport_send_task_abort(cmd);
721
-
722
- if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
723
- transport_lun_remove_cmd(cmd);
724
- /*
725
- * Allow the fabric driver to unmap any resources before
726
- * releasing the descriptor via TFO->release_cmd()
727
- */
728
- if (!send_tas)
729
- cmd->se_tfo->aborted_task(cmd);
730
-
731
- if (transport_cmd_check_stop_to_fabric(cmd))
732
- return 1;
733
- if (!send_tas && ack_kref)
734
- ret = target_put_sess_cmd(cmd);
735
-
736
- return ret;
737
-}
738
-
739717 static void target_complete_failure_work(struct work_struct *work)
740718 {
741719 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
....@@ -785,11 +763,87 @@
785763 }
786764 EXPORT_SYMBOL(transport_copy_sense_to_cmd);
787765
766
+static void target_handle_abort(struct se_cmd *cmd)
767
+{
768
+ bool tas = cmd->transport_state & CMD_T_TAS;
769
+ bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
770
+ int ret;
771
+
772
+ pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
773
+
774
+ if (tas) {
775
+ if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
776
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
777
+ pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
778
+ cmd->t_task_cdb[0], cmd->tag);
779
+ trace_target_cmd_complete(cmd);
780
+ ret = cmd->se_tfo->queue_status(cmd);
781
+ if (ret) {
782
+ transport_handle_queue_full(cmd, cmd->se_dev,
783
+ ret, false);
784
+ return;
785
+ }
786
+ } else {
787
+ cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
788
+ cmd->se_tfo->queue_tm_rsp(cmd);
789
+ }
790
+ } else {
791
+ /*
792
+ * Allow the fabric driver to unmap any resources before
793
+ * releasing the descriptor via TFO->release_cmd().
794
+ */
795
+ cmd->se_tfo->aborted_task(cmd);
796
+ if (ack_kref)
797
+ WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
798
+ /*
799
+ * To do: establish a unit attention condition on the I_T
800
+ * nexus associated with cmd. See also the paragraph "Aborting
801
+ * commands" in SAM.
802
+ */
803
+ }
804
+
805
+ WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
806
+
807
+ transport_lun_remove_cmd(cmd);
808
+
809
+ transport_cmd_check_stop_to_fabric(cmd);
810
+}
811
+
812
+static void target_abort_work(struct work_struct *work)
813
+{
814
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
815
+
816
+ target_handle_abort(cmd);
817
+}
818
+
819
+static bool target_cmd_interrupted(struct se_cmd *cmd)
820
+{
821
+ int post_ret;
822
+
823
+ if (cmd->transport_state & CMD_T_ABORTED) {
824
+ if (cmd->transport_complete_callback)
825
+ cmd->transport_complete_callback(cmd, false, &post_ret);
826
+ INIT_WORK(&cmd->work, target_abort_work);
827
+ queue_work(target_completion_wq, &cmd->work);
828
+ return true;
829
+ } else if (cmd->transport_state & CMD_T_STOP) {
830
+ if (cmd->transport_complete_callback)
831
+ cmd->transport_complete_callback(cmd, false, &post_ret);
832
+ complete_all(&cmd->t_transport_stop_comp);
833
+ return true;
834
+ }
835
+
836
+ return false;
837
+}
838
+
839
+/* May be called from interrupt context so must not sleep. */
788840 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
789841 {
790
- struct se_device *dev = cmd->se_dev;
791842 int success;
792843 unsigned long flags;
844
+
845
+ if (target_cmd_interrupted(cmd))
846
+ return;
793847
794848 cmd->scsi_status = scsi_status;
795849
....@@ -806,38 +860,13 @@
806860 break;
807861 }
808862
809
- /*
810
- * Check for case where an explicit ABORT_TASK has been received
811
- * and transport_wait_for_tasks() will be waiting for completion..
812
- */
813
- if (cmd->transport_state & CMD_T_ABORTED ||
814
- cmd->transport_state & CMD_T_STOP) {
815
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
816
- /*
817
- * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
818
- * release se_device->caw_sem obtained by sbc_compare_and_write()
819
- * since target_complete_ok_work() or target_complete_failure_work()
820
- * won't be called to invoke the normal CAW completion callbacks.
821
- */
822
- if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
823
- up(&dev->caw_sem);
824
- }
825
- complete_all(&cmd->t_transport_stop_comp);
826
- return;
827
- } else if (!success) {
828
- INIT_WORK(&cmd->work, target_complete_failure_work);
829
- } else {
830
- INIT_WORK(&cmd->work, target_complete_ok_work);
831
- }
832
-
833863 cmd->t_state = TRANSPORT_COMPLETE;
834864 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
835865 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
836866
837
- if (cmd->se_cmd_flags & SCF_USE_CPUID)
838
- queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
839
- else
840
- queue_work(target_completion_wq, &cmd->work);
867
+ INIT_WORK(&cmd->work, success ? target_complete_ok_work :
868
+ target_complete_failure_work);
869
+ queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
841870 }
842871 EXPORT_SYMBOL(target_complete_cmd);
843872
....@@ -872,12 +901,13 @@
872901 struct se_device *dev = cmd->se_dev;
873902 unsigned long flags;
874903
875
- spin_lock_irqsave(&dev->execute_task_lock, flags);
904
+ spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
876905 if (!cmd->state_active) {
877
- list_add_tail(&cmd->state_list, &dev->state_list);
906
+ list_add_tail(&cmd->state_list,
907
+ &dev->queues[cmd->cpuid].state_list);
878908 cmd->state_active = true;
879909 }
880
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
910
+ spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
881911 }
882912
883913 /*
....@@ -902,7 +932,7 @@
902932 atomic_dec_mb(&dev->dev_qf_count);
903933
904934 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
905
- " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
935
+ " context: %s\n", cmd->se_tfo->fabric_name, cmd,
906936 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
907937 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
908938 : "UNKNOWN");
....@@ -1256,6 +1286,19 @@
12561286 return TCM_NO_SENSE;
12571287 }
12581288
1289
+/**
1290
+ * target_cmd_size_check - Check whether there will be a residual.
1291
+ * @cmd: SCSI command.
1292
+ * @size: Data buffer size derived from CDB. The data buffer size provided by
1293
+ * the SCSI transport driver is available in @cmd->data_length.
1294
+ *
1295
+ * Compare the data buffer size from the CDB with the data buffer limit from the transport
1296
+ * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
1297
+ *
1298
+ * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
1299
+ *
1300
+ * Return: TCM_NO_SENSE
1301
+ */
12591302 sense_reason_t
12601303 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
12611304 {
....@@ -1266,7 +1309,7 @@
12661309 } else if (size != cmd->data_length) {
12671310 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
12681311 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1269
- " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1312
+ " 0x%02x\n", cmd->se_tfo->fabric_name,
12701313 cmd->data_length, size, cmd->t_task_cdb[0]);
12711314
12721315 if (cmd->data_direction == DMA_TO_DEVICE) {
....@@ -1331,14 +1374,15 @@
13311374 u32 data_length,
13321375 int data_direction,
13331376 int task_attr,
1334
- unsigned char *sense_buffer)
1377
+ unsigned char *sense_buffer, u64 unpacked_lun)
13351378 {
13361379 INIT_LIST_HEAD(&cmd->se_delayed_node);
13371380 INIT_LIST_HEAD(&cmd->se_qf_node);
13381381 INIT_LIST_HEAD(&cmd->se_cmd_list);
13391382 INIT_LIST_HEAD(&cmd->state_list);
13401383 init_completion(&cmd->t_transport_stop_comp);
1341
- cmd->compl = NULL;
1384
+ cmd->free_compl = NULL;
1385
+ cmd->abrt_compl = NULL;
13421386 spin_lock_init(&cmd->t_state_lock);
13431387 INIT_WORK(&cmd->work, NULL);
13441388 kref_init(&cmd->cmd_kref);
....@@ -1349,6 +1393,10 @@
13491393 cmd->data_direction = data_direction;
13501394 cmd->sam_task_attr = task_attr;
13511395 cmd->sense_buffer = sense_buffer;
1396
+ cmd->orig_fe_lun = unpacked_lun;
1397
+
1398
+ if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
1399
+ cmd->cpuid = raw_smp_processor_id();
13521400
13531401 cmd->state_active = false;
13541402 }
....@@ -1363,7 +1411,7 @@
13631411 * Check if SAM Task Attribute emulation is enabled for this
13641412 * struct se_device storage object
13651413 */
1366
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1414
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
13671415 return 0;
13681416
13691417 if (cmd->sam_task_attr == TCM_ACA_TAG) {
....@@ -1376,11 +1424,11 @@
13761424 }
13771425
13781426 sense_reason_t
1379
-target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1427
+target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
13801428 {
1381
- struct se_device *dev = cmd->se_dev;
13821429 sense_reason_t ret;
13831430
1431
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
13841432 /*
13851433 * Ensure that the received CDB is less than the max (252 + 8) bytes
13861434 * for VARIABLE_LENGTH_CMD
....@@ -1389,7 +1437,8 @@
13891437 pr_err("Received SCSI CDB with command_size: %d that"
13901438 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
13911439 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1392
- return TCM_INVALID_CDB_FIELD;
1440
+ ret = TCM_INVALID_CDB_FIELD;
1441
+ goto err;
13931442 }
13941443 /*
13951444 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
....@@ -1404,21 +1453,39 @@
14041453 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
14051454 scsi_command_size(cdb),
14061455 (unsigned long)sizeof(cmd->__t_task_cdb));
1407
- return TCM_OUT_OF_RESOURCES;
1456
+ ret = TCM_OUT_OF_RESOURCES;
1457
+ goto err;
14081458 }
1409
- } else
1410
- cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1459
+ }
14111460 /*
14121461 * Copy the original CDB into cmd->
14131462 */
14141463 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
14151464
14161465 trace_target_sequencer_start(cmd);
1466
+ return 0;
1467
+
1468
+err:
1469
+ /*
1470
+ * Copy the CDB here to allow trace_target_cmd_complete() to
1471
+ * print the cdb to the trace buffers.
1472
+ */
1473
+ memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
1474
+ (unsigned int)TCM_MAX_COMMAND_SIZE));
1475
+ return ret;
1476
+}
1477
+EXPORT_SYMBOL(target_cmd_init_cdb);
1478
+
1479
+sense_reason_t
1480
+target_cmd_parse_cdb(struct se_cmd *cmd)
1481
+{
1482
+ struct se_device *dev = cmd->se_dev;
1483
+ sense_reason_t ret;
14171484
14181485 ret = dev->transport->parse_cdb(cmd);
14191486 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
14201487 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1421
- cmd->se_tfo->get_fabric_name(),
1488
+ cmd->se_tfo->fabric_name,
14221489 cmd->se_sess->se_node_acl->initiatorname,
14231490 cmd->t_task_cdb[0]);
14241491 if (ret)
....@@ -1432,7 +1499,7 @@
14321499 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
14331500 return 0;
14341501 }
1435
-EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1502
+EXPORT_SYMBOL(target_cmd_parse_cdb);
14361503
14371504 /*
14381505 * Used by fabric module frontends to queue tasks directly.
....@@ -1548,18 +1615,17 @@
15481615 BUG_ON(!se_tpg);
15491616 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
15501617 BUG_ON(in_interrupt());
1618
+
1619
+ if (flags & TARGET_SCF_USE_CPUID)
1620
+ se_cmd->se_cmd_flags |= SCF_USE_CPUID;
15511621 /*
15521622 * Initialize se_cmd for target operation. From this point
15531623 * exceptions are handled by sending exception status via
15541624 * target_core_fabric_ops->queue_status() callback
15551625 */
15561626 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1557
- data_length, data_dir, task_attr, sense);
1558
-
1559
- if (flags & TARGET_SCF_USE_CPUID)
1560
- se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1561
- else
1562
- se_cmd->cpuid = WORK_CPU_UNBOUND;
1627
+ data_length, data_dir, task_attr, sense,
1628
+ unpacked_lun);
15631629
15641630 if (flags & TARGET_SCF_UNKNOWN_SIZE)
15651631 se_cmd->unknown_data_length = 1;
....@@ -1577,17 +1643,25 @@
15771643 */
15781644 if (flags & TARGET_SCF_BIDI_OP)
15791645 se_cmd->se_cmd_flags |= SCF_BIDI;
1580
- /*
1581
- * Locate se_lun pointer and attach it to struct se_cmd
1582
- */
1583
- rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1646
+
1647
+ rc = target_cmd_init_cdb(se_cmd, cdb);
15841648 if (rc) {
15851649 transport_send_check_condition_and_sense(se_cmd, rc, 0);
15861650 target_put_sess_cmd(se_cmd);
15871651 return 0;
15881652 }
15891653
1590
- rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1654
+ /*
1655
+ * Locate se_lun pointer and attach it to struct se_cmd
1656
+ */
1657
+ rc = transport_lookup_cmd_lun(se_cmd);
1658
+ if (rc) {
1659
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
1660
+ target_put_sess_cmd(se_cmd);
1661
+ return 0;
1662
+ }
1663
+
1664
+ rc = target_cmd_parse_cdb(se_cmd);
15911665 if (rc != 0) {
15921666 transport_generic_request_failure(se_cmd, rc);
15931667 return 0;
....@@ -1748,7 +1822,7 @@
17481822 BUG_ON(!se_tpg);
17491823
17501824 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1751
- 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1825
+ 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
17521826 /*
17531827 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
17541828 * allocation failure.
....@@ -1772,11 +1846,12 @@
17721846 * out unpacked_lun for the original se_cmd.
17731847 */
17741848 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1775
- if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1849
+ if (!target_lookup_lun_from_tag(se_sess, tag,
1850
+ &se_cmd->orig_fe_lun))
17761851 goto failure;
17771852 }
17781853
1779
- ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1854
+ ret = transport_lookup_tmr_lun(se_cmd);
17801855 if (ret)
17811856 goto failure;
17821857
....@@ -1800,7 +1875,7 @@
18001875 void transport_generic_request_failure(struct se_cmd *cmd,
18011876 sense_reason_t sense_reason)
18021877 {
1803
- int ret = 0, post_ret = 0;
1878
+ int ret = 0, post_ret;
18041879
18051880 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
18061881 sense_reason);
....@@ -1811,16 +1886,14 @@
18111886 */
18121887 transport_complete_task_attr(cmd);
18131888
1814
- /*
1815
- * Handle special case for COMPARE_AND_WRITE failure, where the
1816
- * callback is expected to drop the per device ->caw_sem.
1817
- */
1818
- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1819
- cmd->transport_complete_callback)
1889
+ if (cmd->transport_complete_callback)
18201890 cmd->transport_complete_callback(cmd, false, &post_ret);
18211891
1822
- if (transport_check_aborted_status(cmd, 1))
1892
+ if (cmd->transport_state & CMD_T_ABORTED) {
1893
+ INIT_WORK(&cmd->work, target_abort_work);
1894
+ queue_work(target_completion_wq, &cmd->work);
18231895 return;
1896
+ }
18241897
18251898 switch (sense_reason) {
18261899 case TCM_NON_EXISTENT_LUN:
....@@ -1866,7 +1939,8 @@
18661939 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
18671940 */
18681941 if (cmd->se_sess &&
1869
- cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1942
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
1943
+ == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
18701944 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
18711945 cmd->orig_fe_lun, 0x2C,
18721946 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
....@@ -1979,7 +2053,7 @@
19792053 {
19802054 struct se_device *dev = cmd->se_dev;
19812055
1982
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2056
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
19832057 return false;
19842058
19852059 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
....@@ -2035,32 +2109,19 @@
20352109 return true;
20362110 }
20372111
2038
-static int __transport_check_aborted_status(struct se_cmd *, int);
2039
-
20402112 void target_execute_cmd(struct se_cmd *cmd)
20412113 {
20422114 /*
20432115 * Determine if frontend context caller is requesting the stopping of
20442116 * this command for frontend exceptions.
20452117 *
2046
- * If the received CDB has aleady been aborted stop processing it here.
2118
+ * If the received CDB has already been aborted stop processing it here.
20472119 */
2120
+ if (target_cmd_interrupted(cmd))
2121
+ return;
2122
+
20482123 spin_lock_irq(&cmd->t_state_lock);
2049
- if (__transport_check_aborted_status(cmd, 1)) {
2050
- spin_unlock_irq(&cmd->t_state_lock);
2051
- return;
2052
- }
2053
- if (cmd->transport_state & CMD_T_STOP) {
2054
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2055
- __func__, __LINE__, cmd->tag);
2056
-
2057
- spin_unlock_irq(&cmd->t_state_lock);
2058
- complete_all(&cmd->t_transport_stop_comp);
2059
- return;
2060
- }
2061
-
20622124 cmd->t_state = TRANSPORT_PROCESSING;
2063
- cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
20642125 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
20652126 spin_unlock_irq(&cmd->t_state_lock);
20662127
....@@ -2134,7 +2195,7 @@
21342195 {
21352196 struct se_device *dev = cmd->se_dev;
21362197
2137
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2198
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
21382199 return;
21392200
21402201 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
....@@ -2214,7 +2275,7 @@
22142275 ret = cmd->se_tfo->queue_data_in(cmd);
22152276 break;
22162277 }
2217
- /* fall through */
2278
+ fallthrough;
22182279 case DMA_NONE:
22192280 queue_status:
22202281 trace_target_cmd_complete(cmd);
....@@ -2409,7 +2470,7 @@
24092470 goto queue_full;
24102471 break;
24112472 }
2412
- /* fall through */
2473
+ fallthrough;
24132474 case DMA_NONE:
24142475 queue_status:
24152476 trace_target_cmd_complete(cmd);
....@@ -2570,7 +2631,7 @@
25702631 }
25712632
25722633 /*
2573
- * Determine is the TCM fabric module has already allocated physical
2634
+ * Determine if the TCM fabric module has already allocated physical
25742635 * memory, and is directly calling transport_generic_map_mem_to_cmd()
25752636 * beforehand.
25762637 */
....@@ -2630,7 +2691,8 @@
26302691 * Determine if frontend context caller is requesting the stopping of
26312692 * this command for frontend exceptions.
26322693 */
2633
- if (cmd->transport_state & CMD_T_STOP) {
2694
+ if (cmd->transport_state & CMD_T_STOP &&
2695
+ !cmd->se_tfo->write_pending_must_be_called) {
26342696 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
26352697 __func__, __LINE__, cmd->tag);
26362698
....@@ -2694,13 +2756,29 @@
26942756 }
26952757
26962758 /*
2759
+ * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2760
+ * finished.
2761
+ */
2762
+void target_put_cmd_and_wait(struct se_cmd *cmd)
2763
+{
2764
+ DECLARE_COMPLETION_ONSTACK(compl);
2765
+
2766
+ WARN_ON_ONCE(cmd->abrt_compl);
2767
+ cmd->abrt_compl = &compl;
2768
+ target_put_sess_cmd(cmd);
2769
+ wait_for_completion(&compl);
2770
+}
2771
+
2772
+/*
26972773 * This function is called by frontend drivers after processing of a command
26982774 * has finished.
26992775 *
2700
- * The protocol for ensuring that either the regular flow or the TMF
2701
- * code drops one reference is as follows:
2776
+ * The protocol for ensuring that either the regular frontend command
2777
+ * processing flow or target_handle_abort() code drops one reference is as
2778
+ * follows:
27022779 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2703
- * the frontend driver to drop one reference, synchronously or asynchronously.
2780
+ * the frontend driver to call this function synchronously or asynchronously.
2781
+ * That will cause one reference to be dropped.
27042782 * - During regular command processing the target core sets CMD_T_COMPLETE
27052783 * before invoking one of the .queue_*() functions.
27062784 * - The code that aborts commands skips commands and TMFs for which
....@@ -2712,7 +2790,7 @@
27122790 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
27132791 * be called and will drop a reference.
27142792 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2715
- * will be called. transport_cmd_finish_abort() will drop the final reference.
2793
+ * will be called. target_handle_abort() will drop the final reference.
27162794 */
27172795 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
27182796 {
....@@ -2736,9 +2814,8 @@
27362814 transport_lun_remove_cmd(cmd);
27372815 }
27382816 if (aborted)
2739
- cmd->compl = &compl;
2740
- if (!aborted || tas)
2741
- ret = target_put_sess_cmd(cmd);
2817
+ cmd->free_compl = &compl;
2818
+ ret = target_put_sess_cmd(cmd);
27422819 if (aborted) {
27432820 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
27442821 wait_for_completion(&compl);
....@@ -2776,7 +2853,6 @@
27762853 ret = -ESHUTDOWN;
27772854 goto out;
27782855 }
2779
- se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
27802856 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
27812857 percpu_ref_get(&se_sess->cmd_count);
27822858 out:
....@@ -2803,7 +2879,8 @@
28032879 {
28042880 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
28052881 struct se_session *se_sess = se_cmd->se_sess;
2806
- struct completion *compl = se_cmd->compl;
2882
+ struct completion *free_compl = se_cmd->free_compl;
2883
+ struct completion *abrt_compl = se_cmd->abrt_compl;
28072884 unsigned long flags;
28082885
28092886 if (se_sess) {
....@@ -2814,8 +2891,10 @@
28142891
28152892 target_free_cmd_mem(se_cmd);
28162893 se_cmd->se_tfo->release_cmd(se_cmd);
2817
- if (compl)
2818
- complete(compl);
2894
+ if (free_compl)
2895
+ complete(free_compl);
2896
+ if (abrt_compl)
2897
+ complete(abrt_compl);
28192898
28202899 percpu_ref_put(&se_sess->cmd_count);
28212900 }
....@@ -2906,6 +2985,7 @@
29062985 case TMR_LUN_RESET: return "LUN_RESET";
29072986 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
29082987 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2988
+ case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO";
29092989 case TMR_UNKNOWN: break;
29102990 }
29112991 return "(?)";
....@@ -2973,38 +3053,13 @@
29733053 }
29743054 EXPORT_SYMBOL(target_wait_for_sess_cmds);
29753055
2976
-static void target_lun_confirm(struct percpu_ref *ref)
2977
-{
2978
- struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
2979
-
2980
- complete(&lun->lun_ref_comp);
2981
-}
2982
-
3056
+/*
3057
+ * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
3058
+ * all references to the LUN have been released. Called during LUN shutdown.
3059
+ */
29833060 void transport_clear_lun_ref(struct se_lun *lun)
29843061 {
2985
- /*
2986
- * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
2987
- * the initial reference and schedule confirm kill to be
2988
- * executed after one full RCU grace period has completed.
2989
- */
2990
- percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
2991
- /*
2992
- * The first completion waits for percpu_ref_switch_to_atomic_rcu()
2993
- * to call target_lun_confirm after lun->lun_ref has been marked
2994
- * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
2995
- * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
2996
- * fails for all new incoming I/O.
2997
- */
2998
- wait_for_completion(&lun->lun_ref_comp);
2999
- /*
3000
- * The second completion waits for percpu_ref_put_many() to
3001
- * invoke ->release() after lun->lun_ref has switched to
3002
- * atomic_t mode, and lun->lun_ref.count has reached zero.
3003
- *
3004
- * At this point all target-core lun->lun_ref references have
3005
- * been dropped via transport_lun_remove_cmd(), and it's safe
3006
- * to proceed with the remaining LUN shutdown.
3007
- */
3062
+ percpu_ref_kill(&lun->lun_ref);
30083063 wait_for_completion(&lun->lun_shutdown_comp);
30093064 }
30103065
....@@ -3075,14 +3130,14 @@
30753130 }
30763131 EXPORT_SYMBOL(transport_wait_for_tasks);
30773132
3078
-struct sense_info {
3133
+struct sense_detail {
30793134 u8 key;
30803135 u8 asc;
30813136 u8 ascq;
3082
- bool add_sector_info;
3137
+ bool add_sense_info;
30833138 };
30843139
3085
-static const struct sense_info sense_info_table[] = {
3140
+static const struct sense_detail sense_detail_table[] = {
30863141 [TCM_NO_SENSE] = {
30873142 .key = NOT_READY
30883143 },
....@@ -3182,19 +3237,19 @@
31823237 .key = ABORTED_COMMAND,
31833238 .asc = 0x10,
31843239 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3185
- .add_sector_info = true,
3240
+ .add_sense_info = true,
31863241 },
31873242 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
31883243 .key = ABORTED_COMMAND,
31893244 .asc = 0x10,
31903245 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3191
- .add_sector_info = true,
3246
+ .add_sense_info = true,
31923247 },
31933248 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
31943249 .key = ABORTED_COMMAND,
31953250 .asc = 0x10,
31963251 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3197
- .add_sector_info = true,
3252
+ .add_sense_info = true,
31983253 },
31993254 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
32003255 .key = COPY_ABORTED,
....@@ -3242,42 +3297,42 @@
32423297 */
32433298 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
32443299 {
3245
- const struct sense_info *si;
3300
+ const struct sense_detail *sd;
32463301 u8 *buffer = cmd->sense_buffer;
32473302 int r = (__force int)reason;
32483303 u8 key, asc, ascq;
32493304 bool desc_format = target_sense_desc_format(cmd->se_dev);
32503305
3251
- if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3252
- si = &sense_info_table[r];
3306
+ if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
3307
+ sd = &sense_detail_table[r];
32533308 else
3254
- si = &sense_info_table[(__force int)
3309
+ sd = &sense_detail_table[(__force int)
32553310 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
32563311
3257
- key = si->key;
3312
+ key = sd->key;
32583313 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
32593314 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
32603315 &ascq)) {
32613316 cmd->scsi_status = SAM_STAT_BUSY;
32623317 return;
32633318 }
3264
- } else if (si->asc == 0) {
3319
+ } else if (sd->asc == 0) {
32653320 WARN_ON_ONCE(cmd->scsi_asc == 0);
32663321 asc = cmd->scsi_asc;
32673322 ascq = cmd->scsi_ascq;
32683323 } else {
3269
- asc = si->asc;
3270
- ascq = si->ascq;
3324
+ asc = sd->asc;
3325
+ ascq = sd->ascq;
32713326 }
32723327
32733328 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
32743329 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
32753330 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
32763331 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3277
- if (si->add_sector_info)
3332
+ if (sd->add_sense_info)
32783333 WARN_ON_ONCE(scsi_set_sense_information(buffer,
32793334 cmd->scsi_sense_length,
3280
- cmd->bad_sector) < 0);
3335
+ cmd->sense_info) < 0);
32813336 }
32823337
32833338 int
....@@ -3285,6 +3340,8 @@
32853340 sense_reason_t reason, int from_transport)
32863341 {
32873342 unsigned long flags;
3343
+
3344
+ WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
32883345
32893346 spin_lock_irqsave(&cmd->t_state_lock, flags);
32903347 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
....@@ -3302,114 +3359,31 @@
33023359 }
33033360 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
33043361
3305
-static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3306
- __releases(&cmd->t_state_lock)
3307
- __acquires(&cmd->t_state_lock)
3362
+/**
3363
+ * target_send_busy - Send SCSI BUSY status back to the initiator
3364
+ * @cmd: SCSI command for which to send a BUSY reply.
3365
+ *
3366
+ * Note: Only call this function if target_submit_cmd*() failed.
3367
+ */
3368
+int target_send_busy(struct se_cmd *cmd)
33083369 {
3309
- int ret;
3370
+ WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
33103371
3311
- assert_spin_locked(&cmd->t_state_lock);
3312
- WARN_ON_ONCE(!irqs_disabled());
3313
-
3314
- if (!(cmd->transport_state & CMD_T_ABORTED))
3315
- return 0;
3316
- /*
3317
- * If cmd has been aborted but either no status is to be sent or it has
3318
- * already been sent, just return
3319
- */
3320
- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
3321
- if (send_status)
3322
- cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3323
- return 1;
3324
- }
3325
-
3326
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
3327
- " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
3328
-
3329
- cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
3330
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3372
+ cmd->scsi_status = SAM_STAT_BUSY;
33313373 trace_target_cmd_complete(cmd);
3332
-
3333
- spin_unlock_irq(&cmd->t_state_lock);
3334
- ret = cmd->se_tfo->queue_status(cmd);
3335
- if (ret)
3336
- transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3337
- spin_lock_irq(&cmd->t_state_lock);
3338
-
3339
- return 1;
3374
+ return cmd->se_tfo->queue_status(cmd);
33403375 }
3341
-
3342
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3343
-{
3344
- int ret;
3345
-
3346
- spin_lock_irq(&cmd->t_state_lock);
3347
- ret = __transport_check_aborted_status(cmd, send_status);
3348
- spin_unlock_irq(&cmd->t_state_lock);
3349
-
3350
- return ret;
3351
-}
3352
-EXPORT_SYMBOL(transport_check_aborted_status);
3353
-
3354
-void transport_send_task_abort(struct se_cmd *cmd)
3355
-{
3356
- unsigned long flags;
3357
- int ret;
3358
-
3359
- spin_lock_irqsave(&cmd->t_state_lock, flags);
3360
- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3361
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3362
- return;
3363
- }
3364
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3365
-
3366
- /*
3367
- * If there are still expected incoming fabric WRITEs, we wait
3368
- * until until they have completed before sending a TASK_ABORTED
3369
- * response. This response with TASK_ABORTED status will be
3370
- * queued back to fabric module by transport_check_aborted_status().
3371
- */
3372
- if (cmd->data_direction == DMA_TO_DEVICE) {
3373
- if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3374
- spin_lock_irqsave(&cmd->t_state_lock, flags);
3375
- if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3376
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3377
- goto send_abort;
3378
- }
3379
- cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3380
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3381
- return;
3382
- }
3383
- }
3384
-send_abort:
3385
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3386
-
3387
- transport_lun_remove_cmd(cmd);
3388
-
3389
- pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3390
- cmd->t_task_cdb[0], cmd->tag);
3391
-
3392
- trace_target_cmd_complete(cmd);
3393
- ret = cmd->se_tfo->queue_status(cmd);
3394
- if (ret)
3395
- transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3396
-}
3376
+EXPORT_SYMBOL(target_send_busy);
33973377
33983378 static void target_tmr_work(struct work_struct *work)
33993379 {
34003380 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
34013381 struct se_device *dev = cmd->se_dev;
34023382 struct se_tmr_req *tmr = cmd->se_tmr_req;
3403
- unsigned long flags;
34043383 int ret;
34053384
3406
- spin_lock_irqsave(&cmd->t_state_lock, flags);
3407
- if (cmd->transport_state & CMD_T_ABORTED) {
3408
- tmr->response = TMR_FUNCTION_REJECTED;
3409
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3410
- goto check_stop;
3411
- }
3412
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3385
+ if (cmd->transport_state & CMD_T_ABORTED)
3386
+ goto aborted;
34133387
34143388 switch (tmr->function) {
34153389 case TMR_ABORT_TASK:
....@@ -3443,18 +3417,17 @@
34433417 break;
34443418 }
34453419
3446
- spin_lock_irqsave(&cmd->t_state_lock, flags);
3447
- if (cmd->transport_state & CMD_T_ABORTED) {
3448
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3449
- goto check_stop;
3450
- }
3451
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3420
+ if (cmd->transport_state & CMD_T_ABORTED)
3421
+ goto aborted;
34523422
34533423 cmd->se_tfo->queue_tm_rsp(cmd);
34543424
3455
-check_stop:
34563425 transport_lun_remove_cmd(cmd);
34573426 transport_cmd_check_stop_to_fabric(cmd);
3427
+ return;
3428
+
3429
+aborted:
3430
+ target_handle_abort(cmd);
34583431 }
34593432
34603433 int transport_generic_handle_tmr(
....@@ -3473,16 +3446,15 @@
34733446 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
34743447
34753448 if (aborted) {
3476
- pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
3477
- "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
3478
- cmd->se_tmr_req->ref_task_tag, cmd->tag);
3479
- transport_lun_remove_cmd(cmd);
3480
- transport_cmd_check_stop_to_fabric(cmd);
3449
+ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3450
+ cmd->se_tmr_req->function,
3451
+ cmd->se_tmr_req->ref_task_tag, cmd->tag);
3452
+ target_handle_abort(cmd);
34813453 return 0;
34823454 }
34833455
34843456 INIT_WORK(&cmd->work, target_tmr_work);
3485
- queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3457
+ schedule_work(&cmd->work);
34863458 return 0;
34873459 }
34883460 EXPORT_SYMBOL(transport_generic_handle_tmr);