forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/drivers/target/target_core_transport.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*******************************************************************************
23 * Filename: target_core_transport.c
34 *
....@@ -6,20 +7,6 @@
67 * (c) Copyright 2002-2013 Datera, Inc.
78 *
89 * Nicholas A. Bellinger <nab@kernel.org>
9
- *
10
- * This program is free software; you can redistribute it and/or modify
11
- * it under the terms of the GNU General Public License as published by
12
- * the Free Software Foundation; either version 2 of the License, or
13
- * (at your option) any later version.
14
- *
15
- * This program is distributed in the hope that it will be useful,
16
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
- * GNU General Public License for more details.
19
- *
20
- * You should have received a copy of the GNU General Public License
21
- * along with this program; if not, write to the Free Software
22
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
2310 *
2411 ******************************************************************************/
2512
....@@ -205,19 +192,19 @@
205192 if (sub_api_initialized)
206193 return;
207194
208
- ret = request_module("target_core_iblock");
195
+ ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
209196 if (ret != 0)
210197 pr_err("Unable to load target_core_iblock\n");
211198
212
- ret = request_module("target_core_file");
199
+ ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
213200 if (ret != 0)
214201 pr_err("Unable to load target_core_file\n");
215202
216
- ret = request_module("target_core_pscsi");
203
+ ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
217204 if (ret != 0)
218205 pr_err("Unable to load target_core_pscsi\n");
219206
220
- ret = request_module("target_core_user");
207
+ ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
221208 if (ret != 0)
222209 pr_err("Unable to load target_core_user\n");
223210
....@@ -248,6 +235,11 @@
248235 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
249236 }
250237 EXPORT_SYMBOL(transport_init_session);
238
+
239
+void transport_uninit_session(struct se_session *se_sess)
240
+{
241
+ percpu_ref_exit(&se_sess->cmd_count);
242
+}
251243
252244 /**
253245 * transport_alloc_session - allocate a session object and initialize it
....@@ -287,14 +279,11 @@
287279 {
288280 int rc;
289281
290
- se_sess->sess_cmd_map = kcalloc(tag_size, tag_num,
291
- GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
282
+ se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
283
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL);
292284 if (!se_sess->sess_cmd_map) {
293
- se_sess->sess_cmd_map = vzalloc(array_size(tag_size, tag_num));
294
- if (!se_sess->sess_cmd_map) {
295
- pr_err("Unable to allocate se_sess->sess_cmd_map\n");
296
- return -ENOMEM;
297
- }
285
+ pr_err("Unable to allocate se_sess->sess_cmd_map\n");
286
+ return -ENOMEM;
298287 }
299288
300289 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
....@@ -411,7 +400,7 @@
411400 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
412401
413402 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
414
- se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
403
+ se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
415404 }
416405 EXPORT_SYMBOL(__transport_register_session);
417406
....@@ -595,10 +584,19 @@
595584 sbitmap_queue_free(&se_sess->sess_tag_pool);
596585 kvfree(se_sess->sess_cmd_map);
597586 }
598
- percpu_ref_exit(&se_sess->cmd_count);
587
+ transport_uninit_session(se_sess);
599588 kmem_cache_free(se_sess_cache, se_sess);
600589 }
601590 EXPORT_SYMBOL(transport_free_session);
591
+
592
+static int target_release_res(struct se_device *dev, void *data)
593
+{
594
+ struct se_session *sess = data;
595
+
596
+ if (dev->reservation_holder == sess)
597
+ target_release_reservation(dev);
598
+ return 0;
599
+}
602600
603601 void transport_deregister_session(struct se_session *se_sess)
604602 {
....@@ -616,8 +614,14 @@
616614 se_sess->fabric_sess_ptr = NULL;
617615 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
618616
617
+ /*
618
+ * Since the session is being removed, release SPC-2
619
+ * reservations held by the session that is disappearing.
620
+ */
621
+ target_for_each_device(target_release_res, se_sess);
622
+
619623 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
620
- se_tpg->se_tpg_tfo->get_fabric_name());
624
+ se_tpg->se_tpg_tfo->fabric_name);
621625 /*
622626 * If last kref is dropping now for an explicit NodeACL, awake sleeping
623627 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
....@@ -710,32 +714,6 @@
710714 percpu_ref_put(&lun->lun_ref);
711715 }
712716
713
-int transport_cmd_finish_abort(struct se_cmd *cmd)
714
-{
715
- bool send_tas = cmd->transport_state & CMD_T_TAS;
716
- bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
717
- int ret = 0;
718
-
719
- if (send_tas)
720
- transport_send_task_abort(cmd);
721
-
722
- if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
723
- transport_lun_remove_cmd(cmd);
724
- /*
725
- * Allow the fabric driver to unmap any resources before
726
- * releasing the descriptor via TFO->release_cmd()
727
- */
728
- if (!send_tas)
729
- cmd->se_tfo->aborted_task(cmd);
730
-
731
- if (transport_cmd_check_stop_to_fabric(cmd))
732
- return 1;
733
- if (!send_tas && ack_kref)
734
- ret = target_put_sess_cmd(cmd);
735
-
736
- return ret;
737
-}
738
-
739717 static void target_complete_failure_work(struct work_struct *work)
740718 {
741719 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
....@@ -785,11 +763,87 @@
785763 }
786764 EXPORT_SYMBOL(transport_copy_sense_to_cmd);
787765
766
+static void target_handle_abort(struct se_cmd *cmd)
767
+{
768
+ bool tas = cmd->transport_state & CMD_T_TAS;
769
+ bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
770
+ int ret;
771
+
772
+ pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
773
+
774
+ if (tas) {
775
+ if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
776
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
777
+ pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
778
+ cmd->t_task_cdb[0], cmd->tag);
779
+ trace_target_cmd_complete(cmd);
780
+ ret = cmd->se_tfo->queue_status(cmd);
781
+ if (ret) {
782
+ transport_handle_queue_full(cmd, cmd->se_dev,
783
+ ret, false);
784
+ return;
785
+ }
786
+ } else {
787
+ cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
788
+ cmd->se_tfo->queue_tm_rsp(cmd);
789
+ }
790
+ } else {
791
+ /*
792
+ * Allow the fabric driver to unmap any resources before
793
+ * releasing the descriptor via TFO->release_cmd().
794
+ */
795
+ cmd->se_tfo->aborted_task(cmd);
796
+ if (ack_kref)
797
+ WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
798
+ /*
799
+ * To do: establish a unit attention condition on the I_T
800
+ * nexus associated with cmd. See also the paragraph "Aborting
801
+ * commands" in SAM.
802
+ */
803
+ }
804
+
805
+ WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
806
+
807
+ transport_lun_remove_cmd(cmd);
808
+
809
+ transport_cmd_check_stop_to_fabric(cmd);
810
+}
811
+
812
+static void target_abort_work(struct work_struct *work)
813
+{
814
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
815
+
816
+ target_handle_abort(cmd);
817
+}
818
+
819
+static bool target_cmd_interrupted(struct se_cmd *cmd)
820
+{
821
+ int post_ret;
822
+
823
+ if (cmd->transport_state & CMD_T_ABORTED) {
824
+ if (cmd->transport_complete_callback)
825
+ cmd->transport_complete_callback(cmd, false, &post_ret);
826
+ INIT_WORK(&cmd->work, target_abort_work);
827
+ queue_work(target_completion_wq, &cmd->work);
828
+ return true;
829
+ } else if (cmd->transport_state & CMD_T_STOP) {
830
+ if (cmd->transport_complete_callback)
831
+ cmd->transport_complete_callback(cmd, false, &post_ret);
832
+ complete_all(&cmd->t_transport_stop_comp);
833
+ return true;
834
+ }
835
+
836
+ return false;
837
+}
838
+
839
+/* May be called from interrupt context so must not sleep. */
788840 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
789841 {
790
- struct se_device *dev = cmd->se_dev;
791842 int success;
792843 unsigned long flags;
844
+
845
+ if (target_cmd_interrupted(cmd))
846
+ return;
793847
794848 cmd->scsi_status = scsi_status;
795849
....@@ -806,34 +860,12 @@
806860 break;
807861 }
808862
809
- /*
810
- * Check for case where an explicit ABORT_TASK has been received
811
- * and transport_wait_for_tasks() will be waiting for completion..
812
- */
813
- if (cmd->transport_state & CMD_T_ABORTED ||
814
- cmd->transport_state & CMD_T_STOP) {
815
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
816
- /*
817
- * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
818
- * release se_device->caw_sem obtained by sbc_compare_and_write()
819
- * since target_complete_ok_work() or target_complete_failure_work()
820
- * won't be called to invoke the normal CAW completion callbacks.
821
- */
822
- if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
823
- up(&dev->caw_sem);
824
- }
825
- complete_all(&cmd->t_transport_stop_comp);
826
- return;
827
- } else if (!success) {
828
- INIT_WORK(&cmd->work, target_complete_failure_work);
829
- } else {
830
- INIT_WORK(&cmd->work, target_complete_ok_work);
831
- }
832
-
833863 cmd->t_state = TRANSPORT_COMPLETE;
834864 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
835865 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
836866
867
+ INIT_WORK(&cmd->work, success ? target_complete_ok_work :
868
+ target_complete_failure_work);
837869 if (cmd->se_cmd_flags & SCF_USE_CPUID)
838870 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
839871 else
....@@ -902,7 +934,7 @@
902934 atomic_dec_mb(&dev->dev_qf_count);
903935
904936 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
905
- " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
937
+ " context: %s\n", cmd->se_tfo->fabric_name, cmd,
906938 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
907939 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
908940 : "UNKNOWN");
....@@ -1256,6 +1288,19 @@
12561288 return TCM_NO_SENSE;
12571289 }
12581290
1291
+/**
1292
+ * target_cmd_size_check - Check whether there will be a residual.
1293
+ * @cmd: SCSI command.
1294
+ * @size: Data buffer size derived from CDB. The data buffer size provided by
1295
+ * the SCSI transport driver is available in @cmd->data_length.
1296
+ *
1297
+ * Compare the data buffer size from the CDB with the data buffer limit from the transport
1298
+ * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
1299
+ *
1300
+ * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
1301
+ *
1302
+ * Return: TCM_NO_SENSE
1303
+ */
12591304 sense_reason_t
12601305 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
12611306 {
....@@ -1266,7 +1311,7 @@
12661311 } else if (size != cmd->data_length) {
12671312 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
12681313 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1269
- " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1314
+ " 0x%02x\n", cmd->se_tfo->fabric_name,
12701315 cmd->data_length, size, cmd->t_task_cdb[0]);
12711316
12721317 if (cmd->data_direction == DMA_TO_DEVICE) {
....@@ -1331,14 +1376,15 @@
13311376 u32 data_length,
13321377 int data_direction,
13331378 int task_attr,
1334
- unsigned char *sense_buffer)
1379
+ unsigned char *sense_buffer, u64 unpacked_lun)
13351380 {
13361381 INIT_LIST_HEAD(&cmd->se_delayed_node);
13371382 INIT_LIST_HEAD(&cmd->se_qf_node);
13381383 INIT_LIST_HEAD(&cmd->se_cmd_list);
13391384 INIT_LIST_HEAD(&cmd->state_list);
13401385 init_completion(&cmd->t_transport_stop_comp);
1341
- cmd->compl = NULL;
1386
+ cmd->free_compl = NULL;
1387
+ cmd->abrt_compl = NULL;
13421388 spin_lock_init(&cmd->t_state_lock);
13431389 INIT_WORK(&cmd->work, NULL);
13441390 kref_init(&cmd->cmd_kref);
....@@ -1349,6 +1395,7 @@
13491395 cmd->data_direction = data_direction;
13501396 cmd->sam_task_attr = task_attr;
13511397 cmd->sense_buffer = sense_buffer;
1398
+ cmd->orig_fe_lun = unpacked_lun;
13521399
13531400 cmd->state_active = false;
13541401 }
....@@ -1363,7 +1410,7 @@
13631410 * Check if SAM Task Attribute emulation is enabled for this
13641411 * struct se_device storage object
13651412 */
1366
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1413
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
13671414 return 0;
13681415
13691416 if (cmd->sam_task_attr == TCM_ACA_TAG) {
....@@ -1376,11 +1423,11 @@
13761423 }
13771424
13781425 sense_reason_t
1379
-target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1426
+target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
13801427 {
1381
- struct se_device *dev = cmd->se_dev;
13821428 sense_reason_t ret;
13831429
1430
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
13841431 /*
13851432 * Ensure that the received CDB is less than the max (252 + 8) bytes
13861433 * for VARIABLE_LENGTH_CMD
....@@ -1389,7 +1436,8 @@
13891436 pr_err("Received SCSI CDB with command_size: %d that"
13901437 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
13911438 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1392
- return TCM_INVALID_CDB_FIELD;
1439
+ ret = TCM_INVALID_CDB_FIELD;
1440
+ goto err;
13931441 }
13941442 /*
13951443 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
....@@ -1404,21 +1452,39 @@
14041452 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
14051453 scsi_command_size(cdb),
14061454 (unsigned long)sizeof(cmd->__t_task_cdb));
1407
- return TCM_OUT_OF_RESOURCES;
1455
+ ret = TCM_OUT_OF_RESOURCES;
1456
+ goto err;
14081457 }
1409
- } else
1410
- cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1458
+ }
14111459 /*
14121460 * Copy the original CDB into cmd->
14131461 */
14141462 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
14151463
14161464 trace_target_sequencer_start(cmd);
1465
+ return 0;
1466
+
1467
+err:
1468
+ /*
1469
+ * Copy the CDB here to allow trace_target_cmd_complete() to
1470
+ * print the cdb to the trace buffers.
1471
+ */
1472
+ memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
1473
+ (unsigned int)TCM_MAX_COMMAND_SIZE));
1474
+ return ret;
1475
+}
1476
+EXPORT_SYMBOL(target_cmd_init_cdb);
1477
+
1478
+sense_reason_t
1479
+target_cmd_parse_cdb(struct se_cmd *cmd)
1480
+{
1481
+ struct se_device *dev = cmd->se_dev;
1482
+ sense_reason_t ret;
14171483
14181484 ret = dev->transport->parse_cdb(cmd);
14191485 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
14201486 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1421
- cmd->se_tfo->get_fabric_name(),
1487
+ cmd->se_tfo->fabric_name,
14221488 cmd->se_sess->se_node_acl->initiatorname,
14231489 cmd->t_task_cdb[0]);
14241490 if (ret)
....@@ -1432,7 +1498,7 @@
14321498 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
14331499 return 0;
14341500 }
1435
-EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1501
+EXPORT_SYMBOL(target_cmd_parse_cdb);
14361502
14371503 /*
14381504 * Used by fabric module frontends to queue tasks directly.
....@@ -1554,7 +1620,8 @@
15541620 * target_core_fabric_ops->queue_status() callback
15551621 */
15561622 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1557
- data_length, data_dir, task_attr, sense);
1623
+ data_length, data_dir, task_attr, sense,
1624
+ unpacked_lun);
15581625
15591626 if (flags & TARGET_SCF_USE_CPUID)
15601627 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
....@@ -1577,17 +1644,25 @@
15771644 */
15781645 if (flags & TARGET_SCF_BIDI_OP)
15791646 se_cmd->se_cmd_flags |= SCF_BIDI;
1580
- /*
1581
- * Locate se_lun pointer and attach it to struct se_cmd
1582
- */
1583
- rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1647
+
1648
+ rc = target_cmd_init_cdb(se_cmd, cdb);
15841649 if (rc) {
15851650 transport_send_check_condition_and_sense(se_cmd, rc, 0);
15861651 target_put_sess_cmd(se_cmd);
15871652 return 0;
15881653 }
15891654
1590
- rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1655
+ /*
1656
+ * Locate se_lun pointer and attach it to struct se_cmd
1657
+ */
1658
+ rc = transport_lookup_cmd_lun(se_cmd);
1659
+ if (rc) {
1660
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
1661
+ target_put_sess_cmd(se_cmd);
1662
+ return 0;
1663
+ }
1664
+
1665
+ rc = target_cmd_parse_cdb(se_cmd);
15911666 if (rc != 0) {
15921667 transport_generic_request_failure(se_cmd, rc);
15931668 return 0;
....@@ -1748,7 +1823,7 @@
17481823 BUG_ON(!se_tpg);
17491824
17501825 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1751
- 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1826
+ 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
17521827 /*
17531828 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
17541829 * allocation failure.
....@@ -1772,11 +1847,12 @@
17721847 * out unpacked_lun for the original se_cmd.
17731848 */
17741849 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1775
- if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1850
+ if (!target_lookup_lun_from_tag(se_sess, tag,
1851
+ &se_cmd->orig_fe_lun))
17761852 goto failure;
17771853 }
17781854
1779
- ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1855
+ ret = transport_lookup_tmr_lun(se_cmd);
17801856 if (ret)
17811857 goto failure;
17821858
....@@ -1800,7 +1876,7 @@
18001876 void transport_generic_request_failure(struct se_cmd *cmd,
18011877 sense_reason_t sense_reason)
18021878 {
1803
- int ret = 0, post_ret = 0;
1879
+ int ret = 0, post_ret;
18041880
18051881 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
18061882 sense_reason);
....@@ -1811,16 +1887,14 @@
18111887 */
18121888 transport_complete_task_attr(cmd);
18131889
1814
- /*
1815
- * Handle special case for COMPARE_AND_WRITE failure, where the
1816
- * callback is expected to drop the per device ->caw_sem.
1817
- */
1818
- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1819
- cmd->transport_complete_callback)
1890
+ if (cmd->transport_complete_callback)
18201891 cmd->transport_complete_callback(cmd, false, &post_ret);
18211892
1822
- if (transport_check_aborted_status(cmd, 1))
1893
+ if (cmd->transport_state & CMD_T_ABORTED) {
1894
+ INIT_WORK(&cmd->work, target_abort_work);
1895
+ queue_work(target_completion_wq, &cmd->work);
18231896 return;
1897
+ }
18241898
18251899 switch (sense_reason) {
18261900 case TCM_NON_EXISTENT_LUN:
....@@ -1866,7 +1940,8 @@
18661940 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
18671941 */
18681942 if (cmd->se_sess &&
1869
- cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1943
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
1944
+ == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
18701945 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
18711946 cmd->orig_fe_lun, 0x2C,
18721947 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
....@@ -1979,7 +2054,7 @@
19792054 {
19802055 struct se_device *dev = cmd->se_dev;
19812056
1982
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2057
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
19832058 return false;
19842059
19852060 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
....@@ -2035,32 +2110,19 @@
20352110 return true;
20362111 }
20372112
2038
-static int __transport_check_aborted_status(struct se_cmd *, int);
2039
-
20402113 void target_execute_cmd(struct se_cmd *cmd)
20412114 {
20422115 /*
20432116 * Determine if frontend context caller is requesting the stopping of
20442117 * this command for frontend exceptions.
20452118 *
2046
- * If the received CDB has aleady been aborted stop processing it here.
2119
+ * If the received CDB has already been aborted stop processing it here.
20472120 */
2121
+ if (target_cmd_interrupted(cmd))
2122
+ return;
2123
+
20482124 spin_lock_irq(&cmd->t_state_lock);
2049
- if (__transport_check_aborted_status(cmd, 1)) {
2050
- spin_unlock_irq(&cmd->t_state_lock);
2051
- return;
2052
- }
2053
- if (cmd->transport_state & CMD_T_STOP) {
2054
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2055
- __func__, __LINE__, cmd->tag);
2056
-
2057
- spin_unlock_irq(&cmd->t_state_lock);
2058
- complete_all(&cmd->t_transport_stop_comp);
2059
- return;
2060
- }
2061
-
20622125 cmd->t_state = TRANSPORT_PROCESSING;
2063
- cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
20642126 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
20652127 spin_unlock_irq(&cmd->t_state_lock);
20662128
....@@ -2134,7 +2196,7 @@
21342196 {
21352197 struct se_device *dev = cmd->se_dev;
21362198
2137
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2199
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
21382200 return;
21392201
21402202 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
....@@ -2214,7 +2276,7 @@
22142276 ret = cmd->se_tfo->queue_data_in(cmd);
22152277 break;
22162278 }
2217
- /* fall through */
2279
+ fallthrough;
22182280 case DMA_NONE:
22192281 queue_status:
22202282 trace_target_cmd_complete(cmd);
....@@ -2409,7 +2471,7 @@
24092471 goto queue_full;
24102472 break;
24112473 }
2412
- /* fall through */
2474
+ fallthrough;
24132475 case DMA_NONE:
24142476 queue_status:
24152477 trace_target_cmd_complete(cmd);
....@@ -2570,7 +2632,7 @@
25702632 }
25712633
25722634 /*
2573
- * Determine is the TCM fabric module has already allocated physical
2635
+ * Determine if the TCM fabric module has already allocated physical
25742636 * memory, and is directly calling transport_generic_map_mem_to_cmd()
25752637 * beforehand.
25762638 */
....@@ -2630,7 +2692,8 @@
26302692 * Determine if frontend context caller is requesting the stopping of
26312693 * this command for frontend exceptions.
26322694 */
2633
- if (cmd->transport_state & CMD_T_STOP) {
2695
+ if (cmd->transport_state & CMD_T_STOP &&
2696
+ !cmd->se_tfo->write_pending_must_be_called) {
26342697 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
26352698 __func__, __LINE__, cmd->tag);
26362699
....@@ -2694,13 +2757,29 @@
26942757 }
26952758
26962759 /*
2760
+ * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2761
+ * finished.
2762
+ */
2763
+void target_put_cmd_and_wait(struct se_cmd *cmd)
2764
+{
2765
+ DECLARE_COMPLETION_ONSTACK(compl);
2766
+
2767
+ WARN_ON_ONCE(cmd->abrt_compl);
2768
+ cmd->abrt_compl = &compl;
2769
+ target_put_sess_cmd(cmd);
2770
+ wait_for_completion(&compl);
2771
+}
2772
+
2773
+/*
26972774 * This function is called by frontend drivers after processing of a command
26982775 * has finished.
26992776 *
2700
- * The protocol for ensuring that either the regular flow or the TMF
2701
- * code drops one reference is as follows:
2777
+ * The protocol for ensuring that either the regular frontend command
2778
+ * processing flow or target_handle_abort() code drops one reference is as
2779
+ * follows:
27022780 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2703
- * the frontend driver to drop one reference, synchronously or asynchronously.
2781
+ * the frontend driver to call this function synchronously or asynchronously.
2782
+ * That will cause one reference to be dropped.
27042783 * - During regular command processing the target core sets CMD_T_COMPLETE
27052784 * before invoking one of the .queue_*() functions.
27062785 * - The code that aborts commands skips commands and TMFs for which
....@@ -2712,7 +2791,7 @@
27122791 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
27132792 * be called and will drop a reference.
27142793 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2715
- * will be called. transport_cmd_finish_abort() will drop the final reference.
2794
+ * will be called. target_handle_abort() will drop the final reference.
27162795 */
27172796 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
27182797 {
....@@ -2736,9 +2815,8 @@
27362815 transport_lun_remove_cmd(cmd);
27372816 }
27382817 if (aborted)
2739
- cmd->compl = &compl;
2740
- if (!aborted || tas)
2741
- ret = target_put_sess_cmd(cmd);
2818
+ cmd->free_compl = &compl;
2819
+ ret = target_put_sess_cmd(cmd);
27422820 if (aborted) {
27432821 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
27442822 wait_for_completion(&compl);
....@@ -2776,7 +2854,6 @@
27762854 ret = -ESHUTDOWN;
27772855 goto out;
27782856 }
2779
- se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
27802857 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
27812858 percpu_ref_get(&se_sess->cmd_count);
27822859 out:
....@@ -2803,7 +2880,8 @@
28032880 {
28042881 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
28052882 struct se_session *se_sess = se_cmd->se_sess;
2806
- struct completion *compl = se_cmd->compl;
2883
+ struct completion *free_compl = se_cmd->free_compl;
2884
+ struct completion *abrt_compl = se_cmd->abrt_compl;
28072885 unsigned long flags;
28082886
28092887 if (se_sess) {
....@@ -2814,8 +2892,10 @@
28142892
28152893 target_free_cmd_mem(se_cmd);
28162894 se_cmd->se_tfo->release_cmd(se_cmd);
2817
- if (compl)
2818
- complete(compl);
2895
+ if (free_compl)
2896
+ complete(free_compl);
2897
+ if (abrt_compl)
2898
+ complete(abrt_compl);
28192899
28202900 percpu_ref_put(&se_sess->cmd_count);
28212901 }
....@@ -2906,6 +2986,7 @@
29062986 case TMR_LUN_RESET: return "LUN_RESET";
29072987 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
29082988 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2989
+ case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO";
29092990 case TMR_UNKNOWN: break;
29102991 }
29112992 return "(?)";
....@@ -2973,38 +3054,13 @@
29733054 }
29743055 EXPORT_SYMBOL(target_wait_for_sess_cmds);
29753056
2976
-static void target_lun_confirm(struct percpu_ref *ref)
2977
-{
2978
- struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
2979
-
2980
- complete(&lun->lun_ref_comp);
2981
-}
2982
-
3057
+/*
3058
+ * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
3059
+ * all references to the LUN have been released. Called during LUN shutdown.
3060
+ */
29833061 void transport_clear_lun_ref(struct se_lun *lun)
29843062 {
2985
- /*
2986
- * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
2987
- * the initial reference and schedule confirm kill to be
2988
- * executed after one full RCU grace period has completed.
2989
- */
2990
- percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
2991
- /*
2992
- * The first completion waits for percpu_ref_switch_to_atomic_rcu()
2993
- * to call target_lun_confirm after lun->lun_ref has been marked
2994
- * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
2995
- * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
2996
- * fails for all new incoming I/O.
2997
- */
2998
- wait_for_completion(&lun->lun_ref_comp);
2999
- /*
3000
- * The second completion waits for percpu_ref_put_many() to
3001
- * invoke ->release() after lun->lun_ref has switched to
3002
- * atomic_t mode, and lun->lun_ref.count has reached zero.
3003
- *
3004
- * At this point all target-core lun->lun_ref references have
3005
- * been dropped via transport_lun_remove_cmd(), and it's safe
3006
- * to proceed with the remaining LUN shutdown.
3007
- */
3063
+ percpu_ref_kill(&lun->lun_ref);
30083064 wait_for_completion(&lun->lun_shutdown_comp);
30093065 }
30103066
....@@ -3286,6 +3342,8 @@
32863342 {
32873343 unsigned long flags;
32883344
3345
+ WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3346
+
32893347 spin_lock_irqsave(&cmd->t_state_lock, flags);
32903348 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
32913349 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
....@@ -3302,114 +3360,31 @@
33023360 }
33033361 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
33043362
3305
-static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3306
- __releases(&cmd->t_state_lock)
3307
- __acquires(&cmd->t_state_lock)
3363
+/**
3364
+ * target_send_busy - Send SCSI BUSY status back to the initiator
3365
+ * @cmd: SCSI command for which to send a BUSY reply.
3366
+ *
3367
+ * Note: Only call this function if target_submit_cmd*() failed.
3368
+ */
3369
+int target_send_busy(struct se_cmd *cmd)
33083370 {
3309
- int ret;
3371
+ WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
33103372
3311
- assert_spin_locked(&cmd->t_state_lock);
3312
- WARN_ON_ONCE(!irqs_disabled());
3313
-
3314
- if (!(cmd->transport_state & CMD_T_ABORTED))
3315
- return 0;
3316
- /*
3317
- * If cmd has been aborted but either no status is to be sent or it has
3318
- * already been sent, just return
3319
- */
3320
- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
3321
- if (send_status)
3322
- cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3323
- return 1;
3324
- }
3325
-
3326
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
3327
- " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
3328
-
3329
- cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
3330
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3373
+ cmd->scsi_status = SAM_STAT_BUSY;
33313374 trace_target_cmd_complete(cmd);
3332
-
3333
- spin_unlock_irq(&cmd->t_state_lock);
3334
- ret = cmd->se_tfo->queue_status(cmd);
3335
- if (ret)
3336
- transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3337
- spin_lock_irq(&cmd->t_state_lock);
3338
-
3339
- return 1;
3375
+ return cmd->se_tfo->queue_status(cmd);
33403376 }
3341
-
3342
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3343
-{
3344
- int ret;
3345
-
3346
- spin_lock_irq(&cmd->t_state_lock);
3347
- ret = __transport_check_aborted_status(cmd, send_status);
3348
- spin_unlock_irq(&cmd->t_state_lock);
3349
-
3350
- return ret;
3351
-}
3352
-EXPORT_SYMBOL(transport_check_aborted_status);
3353
-
3354
-void transport_send_task_abort(struct se_cmd *cmd)
3355
-{
3356
- unsigned long flags;
3357
- int ret;
3358
-
3359
- spin_lock_irqsave(&cmd->t_state_lock, flags);
3360
- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3361
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3362
- return;
3363
- }
3364
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3365
-
3366
- /*
3367
- * If there are still expected incoming fabric WRITEs, we wait
3368
- * until until they have completed before sending a TASK_ABORTED
3369
- * response. This response with TASK_ABORTED status will be
3370
- * queued back to fabric module by transport_check_aborted_status().
3371
- */
3372
- if (cmd->data_direction == DMA_TO_DEVICE) {
3373
- if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3374
- spin_lock_irqsave(&cmd->t_state_lock, flags);
3375
- if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3376
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3377
- goto send_abort;
3378
- }
3379
- cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3380
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3381
- return;
3382
- }
3383
- }
3384
-send_abort:
3385
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3386
-
3387
- transport_lun_remove_cmd(cmd);
3388
-
3389
- pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3390
- cmd->t_task_cdb[0], cmd->tag);
3391
-
3392
- trace_target_cmd_complete(cmd);
3393
- ret = cmd->se_tfo->queue_status(cmd);
3394
- if (ret)
3395
- transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3396
-}
3377
+EXPORT_SYMBOL(target_send_busy);
33973378
33983379 static void target_tmr_work(struct work_struct *work)
33993380 {
34003381 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
34013382 struct se_device *dev = cmd->se_dev;
34023383 struct se_tmr_req *tmr = cmd->se_tmr_req;
3403
- unsigned long flags;
34043384 int ret;
34053385
3406
- spin_lock_irqsave(&cmd->t_state_lock, flags);
3407
- if (cmd->transport_state & CMD_T_ABORTED) {
3408
- tmr->response = TMR_FUNCTION_REJECTED;
3409
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3410
- goto check_stop;
3411
- }
3412
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3386
+ if (cmd->transport_state & CMD_T_ABORTED)
3387
+ goto aborted;
34133388
34143389 switch (tmr->function) {
34153390 case TMR_ABORT_TASK:
....@@ -3443,18 +3418,17 @@
34433418 break;
34443419 }
34453420
3446
- spin_lock_irqsave(&cmd->t_state_lock, flags);
3447
- if (cmd->transport_state & CMD_T_ABORTED) {
3448
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3449
- goto check_stop;
3450
- }
3451
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3421
+ if (cmd->transport_state & CMD_T_ABORTED)
3422
+ goto aborted;
34523423
34533424 cmd->se_tfo->queue_tm_rsp(cmd);
34543425
3455
-check_stop:
34563426 transport_lun_remove_cmd(cmd);
34573427 transport_cmd_check_stop_to_fabric(cmd);
3428
+ return;
3429
+
3430
+aborted:
3431
+ target_handle_abort(cmd);
34583432 }
34593433
34603434 int transport_generic_handle_tmr(
....@@ -3473,16 +3447,15 @@
34733447 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
34743448
34753449 if (aborted) {
3476
- pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
3477
- "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
3478
- cmd->se_tmr_req->ref_task_tag, cmd->tag);
3479
- transport_lun_remove_cmd(cmd);
3480
- transport_cmd_check_stop_to_fabric(cmd);
3450
+ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3451
+ cmd->se_tmr_req->function,
3452
+ cmd->se_tmr_req->ref_task_tag, cmd->tag);
3453
+ target_handle_abort(cmd);
34813454 return 0;
34823455 }
34833456
34843457 INIT_WORK(&cmd->work, target_tmr_work);
3485
- queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3458
+ schedule_work(&cmd->work);
34863459 return 0;
34873460 }
34883461 EXPORT_SYMBOL(transport_generic_handle_tmr);