| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /******************************************************************************* |
|---|
| 2 | 3 | * Filename: target_core_transport.c |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 6 | 7 | * (c) Copyright 2002-2013 Datera, Inc. |
|---|
| 7 | 8 | * |
|---|
| 8 | 9 | * Nicholas A. Bellinger <nab@kernel.org> |
|---|
| 9 | | - * |
|---|
| 10 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 11 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 12 | | - * the Free Software Foundation; either version 2 of the License, or |
|---|
| 13 | | - * (at your option) any later version. |
|---|
| 14 | | - * |
|---|
| 15 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 16 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 17 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 18 | | - * GNU General Public License for more details. |
|---|
| 19 | | - * |
|---|
| 20 | | - * You should have received a copy of the GNU General Public License |
|---|
| 21 | | - * along with this program; if not, write to the Free Software |
|---|
| 22 | | - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
|---|
| 23 | 10 | * |
|---|
| 24 | 11 | ******************************************************************************/ |
|---|
| 25 | 12 | |
|---|
| .. | .. |
|---|
| 205 | 192 | if (sub_api_initialized) |
|---|
| 206 | 193 | return; |
|---|
| 207 | 194 | |
|---|
| 208 | | - ret = request_module("target_core_iblock"); |
|---|
| 195 | + ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); |
|---|
| 209 | 196 | if (ret != 0) |
|---|
| 210 | 197 | pr_err("Unable to load target_core_iblock\n"); |
|---|
| 211 | 198 | |
|---|
| 212 | | - ret = request_module("target_core_file"); |
|---|
| 199 | + ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); |
|---|
| 213 | 200 | if (ret != 0) |
|---|
| 214 | 201 | pr_err("Unable to load target_core_file\n"); |
|---|
| 215 | 202 | |
|---|
| 216 | | - ret = request_module("target_core_pscsi"); |
|---|
| 203 | + ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); |
|---|
| 217 | 204 | if (ret != 0) |
|---|
| 218 | 205 | pr_err("Unable to load target_core_pscsi\n"); |
|---|
| 219 | 206 | |
|---|
| 220 | | - ret = request_module("target_core_user"); |
|---|
| 207 | + ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); |
|---|
| 221 | 208 | if (ret != 0) |
|---|
| 222 | 209 | pr_err("Unable to load target_core_user\n"); |
|---|
| 223 | 210 | |
|---|
| .. | .. |
|---|
| 248 | 235 | target_release_sess_cmd_refcnt, 0, GFP_KERNEL); |
|---|
| 249 | 236 | } |
|---|
| 250 | 237 | EXPORT_SYMBOL(transport_init_session); |
|---|
| 238 | + |
|---|
| 239 | +void transport_uninit_session(struct se_session *se_sess) |
|---|
| 240 | +{ |
|---|
| 241 | + percpu_ref_exit(&se_sess->cmd_count); |
|---|
| 242 | +} |
|---|
| 251 | 243 | |
|---|
| 252 | 244 | /** |
|---|
| 253 | 245 | * transport_alloc_session - allocate a session object and initialize it |
|---|
| .. | .. |
|---|
| 287 | 279 | { |
|---|
| 288 | 280 | int rc; |
|---|
| 289 | 281 | |
|---|
| 290 | | - se_sess->sess_cmd_map = kcalloc(tag_size, tag_num, |
|---|
| 291 | | - GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); |
|---|
| 282 | + se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, |
|---|
| 283 | + GFP_KERNEL | __GFP_RETRY_MAYFAIL); |
|---|
| 292 | 284 | if (!se_sess->sess_cmd_map) { |
|---|
| 293 | | - se_sess->sess_cmd_map = vzalloc(array_size(tag_size, tag_num)); |
|---|
| 294 | | - if (!se_sess->sess_cmd_map) { |
|---|
| 295 | | - pr_err("Unable to allocate se_sess->sess_cmd_map\n"); |
|---|
| 296 | | - return -ENOMEM; |
|---|
| 297 | | - } |
|---|
| 285 | + pr_err("Unable to allocate se_sess->sess_cmd_map\n"); |
|---|
| 286 | + return -ENOMEM; |
|---|
| 298 | 287 | } |
|---|
| 299 | 288 | |
|---|
| 300 | 289 | rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, |
|---|
| .. | .. |
|---|
| 411 | 400 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); |
|---|
| 412 | 401 | |
|---|
| 413 | 402 | pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
|---|
| 414 | | - se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
|---|
| 403 | + se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); |
|---|
| 415 | 404 | } |
|---|
| 416 | 405 | EXPORT_SYMBOL(__transport_register_session); |
|---|
| 417 | 406 | |
|---|
| .. | .. |
|---|
| 595 | 584 | sbitmap_queue_free(&se_sess->sess_tag_pool); |
|---|
| 596 | 585 | kvfree(se_sess->sess_cmd_map); |
|---|
| 597 | 586 | } |
|---|
| 598 | | - percpu_ref_exit(&se_sess->cmd_count); |
|---|
| 587 | + transport_uninit_session(se_sess); |
|---|
| 599 | 588 | kmem_cache_free(se_sess_cache, se_sess); |
|---|
| 600 | 589 | } |
|---|
| 601 | 590 | EXPORT_SYMBOL(transport_free_session); |
|---|
| 591 | + |
|---|
| 592 | +static int target_release_res(struct se_device *dev, void *data) |
|---|
| 593 | +{ |
|---|
| 594 | + struct se_session *sess = data; |
|---|
| 595 | + |
|---|
| 596 | + if (dev->reservation_holder == sess) |
|---|
| 597 | + target_release_reservation(dev); |
|---|
| 598 | + return 0; |
|---|
| 599 | +} |
|---|
| 602 | 600 | |
|---|
| 603 | 601 | void transport_deregister_session(struct se_session *se_sess) |
|---|
| 604 | 602 | { |
|---|
| .. | .. |
|---|
| 616 | 614 | se_sess->fabric_sess_ptr = NULL; |
|---|
| 617 | 615 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
|---|
| 618 | 616 | |
|---|
| 617 | + /* |
|---|
| 618 | + * Since the session is being removed, release SPC-2 |
|---|
| 619 | + * reservations held by the session that is disappearing. |
|---|
| 620 | + */ |
|---|
| 621 | + target_for_each_device(target_release_res, se_sess); |
|---|
| 622 | + |
|---|
| 619 | 623 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
|---|
| 620 | | - se_tpg->se_tpg_tfo->get_fabric_name()); |
|---|
| 624 | + se_tpg->se_tpg_tfo->fabric_name); |
|---|
| 621 | 625 | /* |
|---|
| 622 | 626 | * If last kref is dropping now for an explicit NodeACL, awake sleeping |
|---|
| 623 | 627 | * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group |
|---|
| .. | .. |
|---|
| 646 | 650 | if (!dev) |
|---|
| 647 | 651 | return; |
|---|
| 648 | 652 | |
|---|
| 649 | | - spin_lock_irqsave(&dev->execute_task_lock, flags); |
|---|
| 653 | + spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); |
|---|
| 650 | 654 | if (cmd->state_active) { |
|---|
| 651 | 655 | list_del(&cmd->state_list); |
|---|
| 652 | 656 | cmd->state_active = false; |
|---|
| 653 | 657 | } |
|---|
| 654 | | - spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
|---|
| 658 | + spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); |
|---|
| 655 | 659 | } |
|---|
| 656 | 660 | |
|---|
| 657 | 661 | /* |
|---|
| .. | .. |
|---|
| 710 | 714 | percpu_ref_put(&lun->lun_ref); |
|---|
| 711 | 715 | } |
|---|
| 712 | 716 | |
|---|
| 713 | | -int transport_cmd_finish_abort(struct se_cmd *cmd) |
|---|
| 714 | | -{ |
|---|
| 715 | | - bool send_tas = cmd->transport_state & CMD_T_TAS; |
|---|
| 716 | | - bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); |
|---|
| 717 | | - int ret = 0; |
|---|
| 718 | | - |
|---|
| 719 | | - if (send_tas) |
|---|
| 720 | | - transport_send_task_abort(cmd); |
|---|
| 721 | | - |
|---|
| 722 | | - if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) |
|---|
| 723 | | - transport_lun_remove_cmd(cmd); |
|---|
| 724 | | - /* |
|---|
| 725 | | - * Allow the fabric driver to unmap any resources before |
|---|
| 726 | | - * releasing the descriptor via TFO->release_cmd() |
|---|
| 727 | | - */ |
|---|
| 728 | | - if (!send_tas) |
|---|
| 729 | | - cmd->se_tfo->aborted_task(cmd); |
|---|
| 730 | | - |
|---|
| 731 | | - if (transport_cmd_check_stop_to_fabric(cmd)) |
|---|
| 732 | | - return 1; |
|---|
| 733 | | - if (!send_tas && ack_kref) |
|---|
| 734 | | - ret = target_put_sess_cmd(cmd); |
|---|
| 735 | | - |
|---|
| 736 | | - return ret; |
|---|
| 737 | | -} |
|---|
| 738 | | - |
|---|
| 739 | 717 | static void target_complete_failure_work(struct work_struct *work) |
|---|
| 740 | 718 | { |
|---|
| 741 | 719 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
|---|
| .. | .. |
|---|
| 785 | 763 | } |
|---|
| 786 | 764 | EXPORT_SYMBOL(transport_copy_sense_to_cmd); |
|---|
| 787 | 765 | |
|---|
| 766 | +static void target_handle_abort(struct se_cmd *cmd) |
|---|
| 767 | +{ |
|---|
| 768 | + bool tas = cmd->transport_state & CMD_T_TAS; |
|---|
| 769 | + bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; |
|---|
| 770 | + int ret; |
|---|
| 771 | + |
|---|
| 772 | + pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); |
|---|
| 773 | + |
|---|
| 774 | + if (tas) { |
|---|
| 775 | + if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { |
|---|
| 776 | + cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
|---|
| 777 | + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", |
|---|
| 778 | + cmd->t_task_cdb[0], cmd->tag); |
|---|
| 779 | + trace_target_cmd_complete(cmd); |
|---|
| 780 | + ret = cmd->se_tfo->queue_status(cmd); |
|---|
| 781 | + if (ret) { |
|---|
| 782 | + transport_handle_queue_full(cmd, cmd->se_dev, |
|---|
| 783 | + ret, false); |
|---|
| 784 | + return; |
|---|
| 785 | + } |
|---|
| 786 | + } else { |
|---|
| 787 | + cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; |
|---|
| 788 | + cmd->se_tfo->queue_tm_rsp(cmd); |
|---|
| 789 | + } |
|---|
| 790 | + } else { |
|---|
| 791 | + /* |
|---|
| 792 | + * Allow the fabric driver to unmap any resources before |
|---|
| 793 | + * releasing the descriptor via TFO->release_cmd(). |
|---|
| 794 | + */ |
|---|
| 795 | + cmd->se_tfo->aborted_task(cmd); |
|---|
| 796 | + if (ack_kref) |
|---|
| 797 | + WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); |
|---|
| 798 | + /* |
|---|
| 799 | + * To do: establish a unit attention condition on the I_T |
|---|
| 800 | + * nexus associated with cmd. See also the paragraph "Aborting |
|---|
| 801 | + * commands" in SAM. |
|---|
| 802 | + */ |
|---|
| 803 | + } |
|---|
| 804 | + |
|---|
| 805 | + WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); |
|---|
| 806 | + |
|---|
| 807 | + transport_lun_remove_cmd(cmd); |
|---|
| 808 | + |
|---|
| 809 | + transport_cmd_check_stop_to_fabric(cmd); |
|---|
| 810 | +} |
|---|
| 811 | + |
|---|
| 812 | +static void target_abort_work(struct work_struct *work) |
|---|
| 813 | +{ |
|---|
| 814 | + struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
|---|
| 815 | + |
|---|
| 816 | + target_handle_abort(cmd); |
|---|
| 817 | +} |
|---|
| 818 | + |
|---|
| 819 | +static bool target_cmd_interrupted(struct se_cmd *cmd) |
|---|
| 820 | +{ |
|---|
| 821 | + int post_ret; |
|---|
| 822 | + |
|---|
| 823 | + if (cmd->transport_state & CMD_T_ABORTED) { |
|---|
| 824 | + if (cmd->transport_complete_callback) |
|---|
| 825 | + cmd->transport_complete_callback(cmd, false, &post_ret); |
|---|
| 826 | + INIT_WORK(&cmd->work, target_abort_work); |
|---|
| 827 | + queue_work(target_completion_wq, &cmd->work); |
|---|
| 828 | + return true; |
|---|
| 829 | + } else if (cmd->transport_state & CMD_T_STOP) { |
|---|
| 830 | + if (cmd->transport_complete_callback) |
|---|
| 831 | + cmd->transport_complete_callback(cmd, false, &post_ret); |
|---|
| 832 | + complete_all(&cmd->t_transport_stop_comp); |
|---|
| 833 | + return true; |
|---|
| 834 | + } |
|---|
| 835 | + |
|---|
| 836 | + return false; |
|---|
| 837 | +} |
|---|
| 838 | + |
|---|
| 839 | +/* May be called from interrupt context so must not sleep. */ |
|---|
| 788 | 840 | void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) |
|---|
| 789 | 841 | { |
|---|
| 790 | | - struct se_device *dev = cmd->se_dev; |
|---|
| 791 | 842 | int success; |
|---|
| 792 | 843 | unsigned long flags; |
|---|
| 844 | + |
|---|
| 845 | + if (target_cmd_interrupted(cmd)) |
|---|
| 846 | + return; |
|---|
| 793 | 847 | |
|---|
| 794 | 848 | cmd->scsi_status = scsi_status; |
|---|
| 795 | 849 | |
|---|
| .. | .. |
|---|
| 806 | 860 | break; |
|---|
| 807 | 861 | } |
|---|
| 808 | 862 | |
|---|
| 809 | | - /* |
|---|
| 810 | | - * Check for case where an explicit ABORT_TASK has been received |
|---|
| 811 | | - * and transport_wait_for_tasks() will be waiting for completion.. |
|---|
| 812 | | - */ |
|---|
| 813 | | - if (cmd->transport_state & CMD_T_ABORTED || |
|---|
| 814 | | - cmd->transport_state & CMD_T_STOP) { |
|---|
| 815 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 816 | | - /* |
|---|
| 817 | | - * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(), |
|---|
| 818 | | - * release se_device->caw_sem obtained by sbc_compare_and_write() |
|---|
| 819 | | - * since target_complete_ok_work() or target_complete_failure_work() |
|---|
| 820 | | - * won't be called to invoke the normal CAW completion callbacks. |
|---|
| 821 | | - */ |
|---|
| 822 | | - if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { |
|---|
| 823 | | - up(&dev->caw_sem); |
|---|
| 824 | | - } |
|---|
| 825 | | - complete_all(&cmd->t_transport_stop_comp); |
|---|
| 826 | | - return; |
|---|
| 827 | | - } else if (!success) { |
|---|
| 828 | | - INIT_WORK(&cmd->work, target_complete_failure_work); |
|---|
| 829 | | - } else { |
|---|
| 830 | | - INIT_WORK(&cmd->work, target_complete_ok_work); |
|---|
| 831 | | - } |
|---|
| 832 | | - |
|---|
| 833 | 863 | cmd->t_state = TRANSPORT_COMPLETE; |
|---|
| 834 | 864 | cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); |
|---|
| 835 | 865 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 836 | 866 | |
|---|
| 837 | | - if (cmd->se_cmd_flags & SCF_USE_CPUID) |
|---|
| 838 | | - queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); |
|---|
| 839 | | - else |
|---|
| 840 | | - queue_work(target_completion_wq, &cmd->work); |
|---|
| 867 | + INIT_WORK(&cmd->work, success ? target_complete_ok_work : |
|---|
| 868 | + target_complete_failure_work); |
|---|
| 869 | + queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); |
|---|
| 841 | 870 | } |
|---|
| 842 | 871 | EXPORT_SYMBOL(target_complete_cmd); |
|---|
| 843 | 872 | |
|---|
| .. | .. |
|---|
| 872 | 901 | struct se_device *dev = cmd->se_dev; |
|---|
| 873 | 902 | unsigned long flags; |
|---|
| 874 | 903 | |
|---|
| 875 | | - spin_lock_irqsave(&dev->execute_task_lock, flags); |
|---|
| 904 | + spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); |
|---|
| 876 | 905 | if (!cmd->state_active) { |
|---|
| 877 | | - list_add_tail(&cmd->state_list, &dev->state_list); |
|---|
| 906 | + list_add_tail(&cmd->state_list, |
|---|
| 907 | + &dev->queues[cmd->cpuid].state_list); |
|---|
| 878 | 908 | cmd->state_active = true; |
|---|
| 879 | 909 | } |
|---|
| 880 | | - spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
|---|
| 910 | + spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); |
|---|
| 881 | 911 | } |
|---|
| 882 | 912 | |
|---|
| 883 | 913 | /* |
|---|
| .. | .. |
|---|
| 902 | 932 | atomic_dec_mb(&dev->dev_qf_count); |
|---|
| 903 | 933 | |
|---|
| 904 | 934 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
|---|
| 905 | | - " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
|---|
| 935 | + " context: %s\n", cmd->se_tfo->fabric_name, cmd, |
|---|
| 906 | 936 | (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : |
|---|
| 907 | 937 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" |
|---|
| 908 | 938 | : "UNKNOWN"); |
|---|
| .. | .. |
|---|
| 1256 | 1286 | return TCM_NO_SENSE; |
|---|
| 1257 | 1287 | } |
|---|
| 1258 | 1288 | |
|---|
| 1289 | +/** |
|---|
| 1290 | + * target_cmd_size_check - Check whether there will be a residual. |
|---|
| 1291 | + * @cmd: SCSI command. |
|---|
| 1292 | + * @size: Data buffer size derived from CDB. The data buffer size provided by |
|---|
| 1293 | + * the SCSI transport driver is available in @cmd->data_length. |
|---|
| 1294 | + * |
|---|
| 1295 | + * Compare the data buffer size from the CDB with the data buffer limit from the transport |
|---|
| 1296 | + * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. |
|---|
| 1297 | + * |
|---|
| 1298 | + * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd(). |
|---|
| 1299 | + * |
|---|
| 1300 | + * Return: TCM_NO_SENSE |
|---|
| 1301 | + */ |
|---|
| 1259 | 1302 | sense_reason_t |
|---|
| 1260 | 1303 | target_cmd_size_check(struct se_cmd *cmd, unsigned int size) |
|---|
| 1261 | 1304 | { |
|---|
| .. | .. |
|---|
| 1266 | 1309 | } else if (size != cmd->data_length) { |
|---|
| 1267 | 1310 | pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" |
|---|
| 1268 | 1311 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
|---|
| 1269 | | - " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
|---|
| 1312 | + " 0x%02x\n", cmd->se_tfo->fabric_name, |
|---|
| 1270 | 1313 | cmd->data_length, size, cmd->t_task_cdb[0]); |
|---|
| 1271 | 1314 | |
|---|
| 1272 | 1315 | if (cmd->data_direction == DMA_TO_DEVICE) { |
|---|
| .. | .. |
|---|
| 1331 | 1374 | u32 data_length, |
|---|
| 1332 | 1375 | int data_direction, |
|---|
| 1333 | 1376 | int task_attr, |
|---|
| 1334 | | - unsigned char *sense_buffer) |
|---|
| 1377 | + unsigned char *sense_buffer, u64 unpacked_lun) |
|---|
| 1335 | 1378 | { |
|---|
| 1336 | 1379 | INIT_LIST_HEAD(&cmd->se_delayed_node); |
|---|
| 1337 | 1380 | INIT_LIST_HEAD(&cmd->se_qf_node); |
|---|
| 1338 | 1381 | INIT_LIST_HEAD(&cmd->se_cmd_list); |
|---|
| 1339 | 1382 | INIT_LIST_HEAD(&cmd->state_list); |
|---|
| 1340 | 1383 | init_completion(&cmd->t_transport_stop_comp); |
|---|
| 1341 | | - cmd->compl = NULL; |
|---|
| 1384 | + cmd->free_compl = NULL; |
|---|
| 1385 | + cmd->abrt_compl = NULL; |
|---|
| 1342 | 1386 | spin_lock_init(&cmd->t_state_lock); |
|---|
| 1343 | 1387 | INIT_WORK(&cmd->work, NULL); |
|---|
| 1344 | 1388 | kref_init(&cmd->cmd_kref); |
|---|
| .. | .. |
|---|
| 1349 | 1393 | cmd->data_direction = data_direction; |
|---|
| 1350 | 1394 | cmd->sam_task_attr = task_attr; |
|---|
| 1351 | 1395 | cmd->sense_buffer = sense_buffer; |
|---|
| 1396 | + cmd->orig_fe_lun = unpacked_lun; |
|---|
| 1397 | + |
|---|
| 1398 | + if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) |
|---|
| 1399 | + cmd->cpuid = raw_smp_processor_id(); |
|---|
| 1352 | 1400 | |
|---|
| 1353 | 1401 | cmd->state_active = false; |
|---|
| 1354 | 1402 | } |
|---|
| .. | .. |
|---|
| 1363 | 1411 | * Check if SAM Task Attribute emulation is enabled for this |
|---|
| 1364 | 1412 | * struct se_device storage object |
|---|
| 1365 | 1413 | */ |
|---|
| 1366 | | - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
|---|
| 1414 | + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
|---|
| 1367 | 1415 | return 0; |
|---|
| 1368 | 1416 | |
|---|
| 1369 | 1417 | if (cmd->sam_task_attr == TCM_ACA_TAG) { |
|---|
| .. | .. |
|---|
| 1376 | 1424 | } |
|---|
| 1377 | 1425 | |
|---|
| 1378 | 1426 | sense_reason_t |
|---|
| 1379 | | -target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) |
|---|
| 1427 | +target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb) |
|---|
| 1380 | 1428 | { |
|---|
| 1381 | | - struct se_device *dev = cmd->se_dev; |
|---|
| 1382 | 1429 | sense_reason_t ret; |
|---|
| 1383 | 1430 | |
|---|
| 1431 | + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
|---|
| 1384 | 1432 | /* |
|---|
| 1385 | 1433 | * Ensure that the received CDB is less than the max (252 + 8) bytes |
|---|
| 1386 | 1434 | * for VARIABLE_LENGTH_CMD |
|---|
| .. | .. |
|---|
| 1389 | 1437 | pr_err("Received SCSI CDB with command_size: %d that" |
|---|
| 1390 | 1438 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
|---|
| 1391 | 1439 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); |
|---|
| 1392 | | - return TCM_INVALID_CDB_FIELD; |
|---|
| 1440 | + ret = TCM_INVALID_CDB_FIELD; |
|---|
| 1441 | + goto err; |
|---|
| 1393 | 1442 | } |
|---|
| 1394 | 1443 | /* |
|---|
| 1395 | 1444 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, |
|---|
| .. | .. |
|---|
| 1404 | 1453 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", |
|---|
| 1405 | 1454 | scsi_command_size(cdb), |
|---|
| 1406 | 1455 | (unsigned long)sizeof(cmd->__t_task_cdb)); |
|---|
| 1407 | | - return TCM_OUT_OF_RESOURCES; |
|---|
| 1456 | + ret = TCM_OUT_OF_RESOURCES; |
|---|
| 1457 | + goto err; |
|---|
| 1408 | 1458 | } |
|---|
| 1409 | | - } else |
|---|
| 1410 | | - cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
|---|
| 1459 | + } |
|---|
| 1411 | 1460 | /* |
|---|
| 1412 | 1461 | * Copy the original CDB into cmd-> |
|---|
| 1413 | 1462 | */ |
|---|
| 1414 | 1463 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
|---|
| 1415 | 1464 | |
|---|
| 1416 | 1465 | trace_target_sequencer_start(cmd); |
|---|
| 1466 | + return 0; |
|---|
| 1467 | + |
|---|
| 1468 | +err: |
|---|
| 1469 | + /* |
|---|
| 1470 | + * Copy the CDB here to allow trace_target_cmd_complete() to |
|---|
| 1471 | + * print the cdb to the trace buffers. |
|---|
| 1472 | + */ |
|---|
| 1473 | + memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), |
|---|
| 1474 | + (unsigned int)TCM_MAX_COMMAND_SIZE)); |
|---|
| 1475 | + return ret; |
|---|
| 1476 | +} |
|---|
| 1477 | +EXPORT_SYMBOL(target_cmd_init_cdb); |
|---|
| 1478 | + |
|---|
| 1479 | +sense_reason_t |
|---|
| 1480 | +target_cmd_parse_cdb(struct se_cmd *cmd) |
|---|
| 1481 | +{ |
|---|
| 1482 | + struct se_device *dev = cmd->se_dev; |
|---|
| 1483 | + sense_reason_t ret; |
|---|
| 1417 | 1484 | |
|---|
| 1418 | 1485 | ret = dev->transport->parse_cdb(cmd); |
|---|
| 1419 | 1486 | if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) |
|---|
| 1420 | 1487 | pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", |
|---|
| 1421 | | - cmd->se_tfo->get_fabric_name(), |
|---|
| 1488 | + cmd->se_tfo->fabric_name, |
|---|
| 1422 | 1489 | cmd->se_sess->se_node_acl->initiatorname, |
|---|
| 1423 | 1490 | cmd->t_task_cdb[0]); |
|---|
| 1424 | 1491 | if (ret) |
|---|
| .. | .. |
|---|
| 1432 | 1499 | atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); |
|---|
| 1433 | 1500 | return 0; |
|---|
| 1434 | 1501 | } |
|---|
| 1435 | | -EXPORT_SYMBOL(target_setup_cmd_from_cdb); |
|---|
| 1502 | +EXPORT_SYMBOL(target_cmd_parse_cdb); |
|---|
| 1436 | 1503 | |
|---|
| 1437 | 1504 | /* |
|---|
| 1438 | 1505 | * Used by fabric module frontends to queue tasks directly. |
|---|
| .. | .. |
|---|
| 1548 | 1615 | BUG_ON(!se_tpg); |
|---|
| 1549 | 1616 | BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); |
|---|
| 1550 | 1617 | BUG_ON(in_interrupt()); |
|---|
| 1618 | + |
|---|
| 1619 | + if (flags & TARGET_SCF_USE_CPUID) |
|---|
| 1620 | + se_cmd->se_cmd_flags |= SCF_USE_CPUID; |
|---|
| 1551 | 1621 | /* |
|---|
| 1552 | 1622 | * Initialize se_cmd for target operation. From this point |
|---|
| 1553 | 1623 | * exceptions are handled by sending exception status via |
|---|
| 1554 | 1624 | * target_core_fabric_ops->queue_status() callback |
|---|
| 1555 | 1625 | */ |
|---|
| 1556 | 1626 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, |
|---|
| 1557 | | - data_length, data_dir, task_attr, sense); |
|---|
| 1558 | | - |
|---|
| 1559 | | - if (flags & TARGET_SCF_USE_CPUID) |
|---|
| 1560 | | - se_cmd->se_cmd_flags |= SCF_USE_CPUID; |
|---|
| 1561 | | - else |
|---|
| 1562 | | - se_cmd->cpuid = WORK_CPU_UNBOUND; |
|---|
| 1627 | + data_length, data_dir, task_attr, sense, |
|---|
| 1628 | + unpacked_lun); |
|---|
| 1563 | 1629 | |
|---|
| 1564 | 1630 | if (flags & TARGET_SCF_UNKNOWN_SIZE) |
|---|
| 1565 | 1631 | se_cmd->unknown_data_length = 1; |
|---|
| .. | .. |
|---|
| 1577 | 1643 | */ |
|---|
| 1578 | 1644 | if (flags & TARGET_SCF_BIDI_OP) |
|---|
| 1579 | 1645 | se_cmd->se_cmd_flags |= SCF_BIDI; |
|---|
| 1580 | | - /* |
|---|
| 1581 | | - * Locate se_lun pointer and attach it to struct se_cmd |
|---|
| 1582 | | - */ |
|---|
| 1583 | | - rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); |
|---|
| 1646 | + |
|---|
| 1647 | + rc = target_cmd_init_cdb(se_cmd, cdb); |
|---|
| 1584 | 1648 | if (rc) { |
|---|
| 1585 | 1649 | transport_send_check_condition_and_sense(se_cmd, rc, 0); |
|---|
| 1586 | 1650 | target_put_sess_cmd(se_cmd); |
|---|
| 1587 | 1651 | return 0; |
|---|
| 1588 | 1652 | } |
|---|
| 1589 | 1653 | |
|---|
| 1590 | | - rc = target_setup_cmd_from_cdb(se_cmd, cdb); |
|---|
| 1654 | + /* |
|---|
| 1655 | + * Locate se_lun pointer and attach it to struct se_cmd |
|---|
| 1656 | + */ |
|---|
| 1657 | + rc = transport_lookup_cmd_lun(se_cmd); |
|---|
| 1658 | + if (rc) { |
|---|
| 1659 | + transport_send_check_condition_and_sense(se_cmd, rc, 0); |
|---|
| 1660 | + target_put_sess_cmd(se_cmd); |
|---|
| 1661 | + return 0; |
|---|
| 1662 | + } |
|---|
| 1663 | + |
|---|
| 1664 | + rc = target_cmd_parse_cdb(se_cmd); |
|---|
| 1591 | 1665 | if (rc != 0) { |
|---|
| 1592 | 1666 | transport_generic_request_failure(se_cmd, rc); |
|---|
| 1593 | 1667 | return 0; |
|---|
| .. | .. |
|---|
| 1748 | 1822 | BUG_ON(!se_tpg); |
|---|
| 1749 | 1823 | |
|---|
| 1750 | 1824 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, |
|---|
| 1751 | | - 0, DMA_NONE, TCM_SIMPLE_TAG, sense); |
|---|
| 1825 | + 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); |
|---|
| 1752 | 1826 | /* |
|---|
| 1753 | 1827 | * FIXME: Currently expect caller to handle se_cmd->se_tmr_req |
|---|
| 1754 | 1828 | * allocation failure. |
|---|
| .. | .. |
|---|
| 1772 | 1846 | * out unpacked_lun for the original se_cmd. |
|---|
| 1773 | 1847 | */ |
|---|
| 1774 | 1848 | if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { |
|---|
| 1775 | | - if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) |
|---|
| 1849 | + if (!target_lookup_lun_from_tag(se_sess, tag, |
|---|
| 1850 | + &se_cmd->orig_fe_lun)) |
|---|
| 1776 | 1851 | goto failure; |
|---|
| 1777 | 1852 | } |
|---|
| 1778 | 1853 | |
|---|
| 1779 | | - ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); |
|---|
| 1854 | + ret = transport_lookup_tmr_lun(se_cmd); |
|---|
| 1780 | 1855 | if (ret) |
|---|
| 1781 | 1856 | goto failure; |
|---|
| 1782 | 1857 | |
|---|
| .. | .. |
|---|
| 1800 | 1875 | void transport_generic_request_failure(struct se_cmd *cmd, |
|---|
| 1801 | 1876 | sense_reason_t sense_reason) |
|---|
| 1802 | 1877 | { |
|---|
| 1803 | | - int ret = 0, post_ret = 0; |
|---|
| 1878 | + int ret = 0, post_ret; |
|---|
| 1804 | 1879 | |
|---|
| 1805 | 1880 | pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", |
|---|
| 1806 | 1881 | sense_reason); |
|---|
| .. | .. |
|---|
| 1811 | 1886 | */ |
|---|
| 1812 | 1887 | transport_complete_task_attr(cmd); |
|---|
| 1813 | 1888 | |
|---|
| 1814 | | - /* |
|---|
| 1815 | | - * Handle special case for COMPARE_AND_WRITE failure, where the |
|---|
| 1816 | | - * callback is expected to drop the per device ->caw_sem. |
|---|
| 1817 | | - */ |
|---|
| 1818 | | - if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && |
|---|
| 1819 | | - cmd->transport_complete_callback) |
|---|
| 1889 | + if (cmd->transport_complete_callback) |
|---|
| 1820 | 1890 | cmd->transport_complete_callback(cmd, false, &post_ret); |
|---|
| 1821 | 1891 | |
|---|
| 1822 | | - if (transport_check_aborted_status(cmd, 1)) |
|---|
| 1892 | + if (cmd->transport_state & CMD_T_ABORTED) { |
|---|
| 1893 | + INIT_WORK(&cmd->work, target_abort_work); |
|---|
| 1894 | + queue_work(target_completion_wq, &cmd->work); |
|---|
| 1823 | 1895 | return; |
|---|
| 1896 | + } |
|---|
| 1824 | 1897 | |
|---|
| 1825 | 1898 | switch (sense_reason) { |
|---|
| 1826 | 1899 | case TCM_NON_EXISTENT_LUN: |
|---|
| .. | .. |
|---|
| 1866 | 1939 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
|---|
| 1867 | 1940 | */ |
|---|
| 1868 | 1941 | if (cmd->se_sess && |
|---|
| 1869 | | - cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { |
|---|
| 1942 | + cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl |
|---|
| 1943 | + == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { |
|---|
| 1870 | 1944 | target_ua_allocate_lun(cmd->se_sess->se_node_acl, |
|---|
| 1871 | 1945 | cmd->orig_fe_lun, 0x2C, |
|---|
| 1872 | 1946 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
|---|
| .. | .. |
|---|
| 1979 | 2053 | { |
|---|
| 1980 | 2054 | struct se_device *dev = cmd->se_dev; |
|---|
| 1981 | 2055 | |
|---|
| 1982 | | - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
|---|
| 2056 | + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
|---|
| 1983 | 2057 | return false; |
|---|
| 1984 | 2058 | |
|---|
| 1985 | 2059 | cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; |
|---|
| .. | .. |
|---|
| 2035 | 2109 | return true; |
|---|
| 2036 | 2110 | } |
|---|
| 2037 | 2111 | |
|---|
| 2038 | | -static int __transport_check_aborted_status(struct se_cmd *, int); |
|---|
| 2039 | | - |
|---|
| 2040 | 2112 | void target_execute_cmd(struct se_cmd *cmd) |
|---|
| 2041 | 2113 | { |
|---|
| 2042 | 2114 | /* |
|---|
| 2043 | 2115 | * Determine if frontend context caller is requesting the stopping of |
|---|
| 2044 | 2116 | * this command for frontend exceptions. |
|---|
| 2045 | 2117 | * |
|---|
| 2046 | | - * If the received CDB has aleady been aborted stop processing it here. |
|---|
| 2118 | + * If the received CDB has already been aborted stop processing it here. |
|---|
| 2047 | 2119 | */ |
|---|
| 2120 | + if (target_cmd_interrupted(cmd)) |
|---|
| 2121 | + return; |
|---|
| 2122 | + |
|---|
| 2048 | 2123 | spin_lock_irq(&cmd->t_state_lock); |
|---|
| 2049 | | - if (__transport_check_aborted_status(cmd, 1)) { |
|---|
| 2050 | | - spin_unlock_irq(&cmd->t_state_lock); |
|---|
| 2051 | | - return; |
|---|
| 2052 | | - } |
|---|
| 2053 | | - if (cmd->transport_state & CMD_T_STOP) { |
|---|
| 2054 | | - pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", |
|---|
| 2055 | | - __func__, __LINE__, cmd->tag); |
|---|
| 2056 | | - |
|---|
| 2057 | | - spin_unlock_irq(&cmd->t_state_lock); |
|---|
| 2058 | | - complete_all(&cmd->t_transport_stop_comp); |
|---|
| 2059 | | - return; |
|---|
| 2060 | | - } |
|---|
| 2061 | | - |
|---|
| 2062 | 2124 | cmd->t_state = TRANSPORT_PROCESSING; |
|---|
| 2063 | | - cmd->transport_state &= ~CMD_T_PRE_EXECUTE; |
|---|
| 2064 | 2125 | cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; |
|---|
| 2065 | 2126 | spin_unlock_irq(&cmd->t_state_lock); |
|---|
| 2066 | 2127 | |
|---|
| .. | .. |
|---|
| 2134 | 2195 | { |
|---|
| 2135 | 2196 | struct se_device *dev = cmd->se_dev; |
|---|
| 2136 | 2197 | |
|---|
| 2137 | | - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
|---|
| 2198 | + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
|---|
| 2138 | 2199 | return; |
|---|
| 2139 | 2200 | |
|---|
| 2140 | 2201 | if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) |
|---|
| .. | .. |
|---|
| 2214 | 2275 | ret = cmd->se_tfo->queue_data_in(cmd); |
|---|
| 2215 | 2276 | break; |
|---|
| 2216 | 2277 | } |
|---|
| 2217 | | - /* fall through */ |
|---|
| 2278 | + fallthrough; |
|---|
| 2218 | 2279 | case DMA_NONE: |
|---|
| 2219 | 2280 | queue_status: |
|---|
| 2220 | 2281 | trace_target_cmd_complete(cmd); |
|---|
| .. | .. |
|---|
| 2409 | 2470 | goto queue_full; |
|---|
| 2410 | 2471 | break; |
|---|
| 2411 | 2472 | } |
|---|
| 2412 | | - /* fall through */ |
|---|
| 2473 | + fallthrough; |
|---|
| 2413 | 2474 | case DMA_NONE: |
|---|
| 2414 | 2475 | queue_status: |
|---|
| 2415 | 2476 | trace_target_cmd_complete(cmd); |
|---|
| .. | .. |
|---|
| 2570 | 2631 | } |
|---|
| 2571 | 2632 | |
|---|
| 2572 | 2633 | /* |
|---|
| 2573 | | - * Determine is the TCM fabric module has already allocated physical |
|---|
| 2634 | + * Determine if the TCM fabric module has already allocated physical |
|---|
| 2574 | 2635 | * memory, and is directly calling transport_generic_map_mem_to_cmd() |
|---|
| 2575 | 2636 | * beforehand. |
|---|
| 2576 | 2637 | */ |
|---|
| .. | .. |
|---|
| 2630 | 2691 | * Determine if frontend context caller is requesting the stopping of |
|---|
| 2631 | 2692 | * this command for frontend exceptions. |
|---|
| 2632 | 2693 | */ |
|---|
| 2633 | | - if (cmd->transport_state & CMD_T_STOP) { |
|---|
| 2694 | + if (cmd->transport_state & CMD_T_STOP && |
|---|
| 2695 | + !cmd->se_tfo->write_pending_must_be_called) { |
|---|
| 2634 | 2696 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", |
|---|
| 2635 | 2697 | __func__, __LINE__, cmd->tag); |
|---|
| 2636 | 2698 | |
|---|
| .. | .. |
|---|
| 2694 | 2756 | } |
|---|
| 2695 | 2757 | |
|---|
| 2696 | 2758 | /* |
|---|
| 2759 | + * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has |
|---|
| 2760 | + * finished. |
|---|
| 2761 | + */ |
|---|
| 2762 | +void target_put_cmd_and_wait(struct se_cmd *cmd) |
|---|
| 2763 | +{ |
|---|
| 2764 | + DECLARE_COMPLETION_ONSTACK(compl); |
|---|
| 2765 | + |
|---|
| 2766 | + WARN_ON_ONCE(cmd->abrt_compl); |
|---|
| 2767 | + cmd->abrt_compl = &compl; |
|---|
| 2768 | + target_put_sess_cmd(cmd); |
|---|
| 2769 | + wait_for_completion(&compl); |
|---|
| 2770 | +} |
|---|
| 2771 | + |
|---|
| 2772 | +/* |
|---|
| 2697 | 2773 | * This function is called by frontend drivers after processing of a command |
|---|
| 2698 | 2774 | * has finished. |
|---|
| 2699 | 2775 | * |
|---|
| 2700 | | - * The protocol for ensuring that either the regular flow or the TMF |
|---|
| 2701 | | - * code drops one reference is as follows: |
|---|
| 2776 | + * The protocol for ensuring that either the regular frontend command |
|---|
| 2777 | + * processing flow or target_handle_abort() code drops one reference is as |
|---|
| 2778 | + * follows: |
|---|
| 2702 | 2779 | * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause |
|---|
| 2703 | | - * the frontend driver to drop one reference, synchronously or asynchronously. |
|---|
| 2780 | + * the frontend driver to call this function synchronously or asynchronously. |
|---|
| 2781 | + * That will cause one reference to be dropped. |
|---|
| 2704 | 2782 | * - During regular command processing the target core sets CMD_T_COMPLETE |
|---|
| 2705 | 2783 | * before invoking one of the .queue_*() functions. |
|---|
| 2706 | 2784 | * - The code that aborts commands skips commands and TMFs for which |
|---|
| .. | .. |
|---|
| 2712 | 2790 | * - For aborted commands for which CMD_T_TAS has been set .queue_status() will |
|---|
| 2713 | 2791 | * be called and will drop a reference. |
|---|
| 2714 | 2792 | * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() |
|---|
| 2715 | | - * will be called. transport_cmd_finish_abort() will drop the final reference. |
|---|
| 2793 | + * will be called. target_handle_abort() will drop the final reference. |
|---|
| 2716 | 2794 | */ |
|---|
| 2717 | 2795 | int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
|---|
| 2718 | 2796 | { |
|---|
| .. | .. |
|---|
| 2736 | 2814 | transport_lun_remove_cmd(cmd); |
|---|
| 2737 | 2815 | } |
|---|
| 2738 | 2816 | if (aborted) |
|---|
| 2739 | | - cmd->compl = &compl; |
|---|
| 2740 | | - if (!aborted || tas) |
|---|
| 2741 | | - ret = target_put_sess_cmd(cmd); |
|---|
| 2817 | + cmd->free_compl = &compl; |
|---|
| 2818 | + ret = target_put_sess_cmd(cmd); |
|---|
| 2742 | 2819 | if (aborted) { |
|---|
| 2743 | 2820 | pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); |
|---|
| 2744 | 2821 | wait_for_completion(&compl); |
|---|
| .. | .. |
|---|
| 2776 | 2853 | ret = -ESHUTDOWN; |
|---|
| 2777 | 2854 | goto out; |
|---|
| 2778 | 2855 | } |
|---|
| 2779 | | - se_cmd->transport_state |= CMD_T_PRE_EXECUTE; |
|---|
| 2780 | 2856 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); |
|---|
| 2781 | 2857 | percpu_ref_get(&se_sess->cmd_count); |
|---|
| 2782 | 2858 | out: |
|---|
| .. | .. |
|---|
| 2803 | 2879 | { |
|---|
| 2804 | 2880 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); |
|---|
| 2805 | 2881 | struct se_session *se_sess = se_cmd->se_sess; |
|---|
| 2806 | | - struct completion *compl = se_cmd->compl; |
|---|
| 2882 | + struct completion *free_compl = se_cmd->free_compl; |
|---|
| 2883 | + struct completion *abrt_compl = se_cmd->abrt_compl; |
|---|
| 2807 | 2884 | unsigned long flags; |
|---|
| 2808 | 2885 | |
|---|
| 2809 | 2886 | if (se_sess) { |
|---|
| .. | .. |
|---|
| 2814 | 2891 | |
|---|
| 2815 | 2892 | target_free_cmd_mem(se_cmd); |
|---|
| 2816 | 2893 | se_cmd->se_tfo->release_cmd(se_cmd); |
|---|
| 2817 | | - if (compl) |
|---|
| 2818 | | - complete(compl); |
|---|
| 2894 | + if (free_compl) |
|---|
| 2895 | + complete(free_compl); |
|---|
| 2896 | + if (abrt_compl) |
|---|
| 2897 | + complete(abrt_compl); |
|---|
| 2819 | 2898 | |
|---|
| 2820 | 2899 | percpu_ref_put(&se_sess->cmd_count); |
|---|
| 2821 | 2900 | } |
|---|
| .. | .. |
|---|
| 2906 | 2985 | case TMR_LUN_RESET: return "LUN_RESET"; |
|---|
| 2907 | 2986 | case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; |
|---|
| 2908 | 2987 | case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; |
|---|
| 2988 | + case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; |
|---|
| 2909 | 2989 | case TMR_UNKNOWN: break; |
|---|
| 2910 | 2990 | } |
|---|
| 2911 | 2991 | return "(?)"; |
|---|
| .. | .. |
|---|
| 2973 | 3053 | } |
|---|
| 2974 | 3054 | EXPORT_SYMBOL(target_wait_for_sess_cmds); |
|---|
| 2975 | 3055 | |
|---|
| 2976 | | -static void target_lun_confirm(struct percpu_ref *ref) |
|---|
| 2977 | | -{ |
|---|
| 2978 | | - struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); |
|---|
| 2979 | | - |
|---|
| 2980 | | - complete(&lun->lun_ref_comp); |
|---|
| 2981 | | -} |
|---|
| 2982 | | - |
|---|
| 3056 | +/* |
|---|
| 3057 | + * Prevent that new percpu_ref_tryget_live() calls succeed and wait until |
|---|
| 3058 | + * all references to the LUN have been released. Called during LUN shutdown. |
|---|
| 3059 | + */ |
|---|
| 2983 | 3060 | void transport_clear_lun_ref(struct se_lun *lun) |
|---|
| 2984 | 3061 | { |
|---|
| 2985 | | - /* |
|---|
| 2986 | | - * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop |
|---|
| 2987 | | - * the initial reference and schedule confirm kill to be |
|---|
| 2988 | | - * executed after one full RCU grace period has completed. |
|---|
| 2989 | | - */ |
|---|
| 2990 | | - percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm); |
|---|
| 2991 | | - /* |
|---|
| 2992 | | - * The first completion waits for percpu_ref_switch_to_atomic_rcu() |
|---|
| 2993 | | - * to call target_lun_confirm after lun->lun_ref has been marked |
|---|
| 2994 | | - * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t |
|---|
| 2995 | | - * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref |
|---|
| 2996 | | - * fails for all new incoming I/O. |
|---|
| 2997 | | - */ |
|---|
| 2998 | | - wait_for_completion(&lun->lun_ref_comp); |
|---|
| 2999 | | - /* |
|---|
| 3000 | | - * The second completion waits for percpu_ref_put_many() to |
|---|
| 3001 | | - * invoke ->release() after lun->lun_ref has switched to |
|---|
| 3002 | | - * atomic_t mode, and lun->lun_ref.count has reached zero. |
|---|
| 3003 | | - * |
|---|
| 3004 | | - * At this point all target-core lun->lun_ref references have |
|---|
| 3005 | | - * been dropped via transport_lun_remove_cmd(), and it's safe |
|---|
| 3006 | | - * to proceed with the remaining LUN shutdown. |
|---|
| 3007 | | - */ |
|---|
| 3062 | + percpu_ref_kill(&lun->lun_ref); |
|---|
| 3008 | 3063 | wait_for_completion(&lun->lun_shutdown_comp); |
|---|
| 3009 | 3064 | } |
|---|
| 3010 | 3065 | |
|---|
| .. | .. |
|---|
| 3075 | 3130 | } |
|---|
| 3076 | 3131 | EXPORT_SYMBOL(transport_wait_for_tasks); |
|---|
| 3077 | 3132 | |
|---|
| 3078 | | -struct sense_info { |
|---|
| 3133 | +struct sense_detail { |
|---|
| 3079 | 3134 | u8 key; |
|---|
| 3080 | 3135 | u8 asc; |
|---|
| 3081 | 3136 | u8 ascq; |
|---|
| 3082 | | - bool add_sector_info; |
|---|
| 3137 | + bool add_sense_info; |
|---|
| 3083 | 3138 | }; |
|---|
| 3084 | 3139 | |
|---|
| 3085 | | -static const struct sense_info sense_info_table[] = { |
|---|
| 3140 | +static const struct sense_detail sense_detail_table[] = { |
|---|
| 3086 | 3141 | [TCM_NO_SENSE] = { |
|---|
| 3087 | 3142 | .key = NOT_READY |
|---|
| 3088 | 3143 | }, |
|---|
| .. | .. |
|---|
| 3182 | 3237 | .key = ABORTED_COMMAND, |
|---|
| 3183 | 3238 | .asc = 0x10, |
|---|
| 3184 | 3239 | .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ |
|---|
| 3185 | | - .add_sector_info = true, |
|---|
| 3240 | + .add_sense_info = true, |
|---|
| 3186 | 3241 | }, |
|---|
| 3187 | 3242 | [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { |
|---|
| 3188 | 3243 | .key = ABORTED_COMMAND, |
|---|
| 3189 | 3244 | .asc = 0x10, |
|---|
| 3190 | 3245 | .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ |
|---|
| 3191 | | - .add_sector_info = true, |
|---|
| 3246 | + .add_sense_info = true, |
|---|
| 3192 | 3247 | }, |
|---|
| 3193 | 3248 | [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { |
|---|
| 3194 | 3249 | .key = ABORTED_COMMAND, |
|---|
| 3195 | 3250 | .asc = 0x10, |
|---|
| 3196 | 3251 | .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ |
|---|
| 3197 | | - .add_sector_info = true, |
|---|
| 3252 | + .add_sense_info = true, |
|---|
| 3198 | 3253 | }, |
|---|
| 3199 | 3254 | [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { |
|---|
| 3200 | 3255 | .key = COPY_ABORTED, |
|---|
| .. | .. |
|---|
| 3242 | 3297 | */ |
|---|
| 3243 | 3298 | static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) |
|---|
| 3244 | 3299 | { |
|---|
| 3245 | | - const struct sense_info *si; |
|---|
| 3300 | + const struct sense_detail *sd; |
|---|
| 3246 | 3301 | u8 *buffer = cmd->sense_buffer; |
|---|
| 3247 | 3302 | int r = (__force int)reason; |
|---|
| 3248 | 3303 | u8 key, asc, ascq; |
|---|
| 3249 | 3304 | bool desc_format = target_sense_desc_format(cmd->se_dev); |
|---|
| 3250 | 3305 | |
|---|
| 3251 | | - if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) |
|---|
| 3252 | | - si = &sense_info_table[r]; |
|---|
| 3306 | + if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key) |
|---|
| 3307 | + sd = &sense_detail_table[r]; |
|---|
| 3253 | 3308 | else |
|---|
| 3254 | | - si = &sense_info_table[(__force int) |
|---|
| 3309 | + sd = &sense_detail_table[(__force int) |
|---|
| 3255 | 3310 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; |
|---|
| 3256 | 3311 | |
|---|
| 3257 | | - key = si->key; |
|---|
| 3312 | + key = sd->key; |
|---|
| 3258 | 3313 | if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { |
|---|
| 3259 | 3314 | if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, |
|---|
| 3260 | 3315 | &ascq)) { |
|---|
| 3261 | 3316 | cmd->scsi_status = SAM_STAT_BUSY; |
|---|
| 3262 | 3317 | return; |
|---|
| 3263 | 3318 | } |
|---|
| 3264 | | - } else if (si->asc == 0) { |
|---|
| 3319 | + } else if (sd->asc == 0) { |
|---|
| 3265 | 3320 | WARN_ON_ONCE(cmd->scsi_asc == 0); |
|---|
| 3266 | 3321 | asc = cmd->scsi_asc; |
|---|
| 3267 | 3322 | ascq = cmd->scsi_ascq; |
|---|
| 3268 | 3323 | } else { |
|---|
| 3269 | | - asc = si->asc; |
|---|
| 3270 | | - ascq = si->ascq; |
|---|
| 3324 | + asc = sd->asc; |
|---|
| 3325 | + ascq = sd->ascq; |
|---|
| 3271 | 3326 | } |
|---|
| 3272 | 3327 | |
|---|
| 3273 | 3328 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; |
|---|
| 3274 | 3329 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; |
|---|
| 3275 | 3330 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; |
|---|
| 3276 | 3331 | scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); |
|---|
| 3277 | | - if (si->add_sector_info) |
|---|
| 3332 | + if (sd->add_sense_info) |
|---|
| 3278 | 3333 | WARN_ON_ONCE(scsi_set_sense_information(buffer, |
|---|
| 3279 | 3334 | cmd->scsi_sense_length, |
|---|
| 3280 | | - cmd->bad_sector) < 0); |
|---|
| 3335 | + cmd->sense_info) < 0); |
|---|
| 3281 | 3336 | } |
|---|
| 3282 | 3337 | |
|---|
| 3283 | 3338 | int |
|---|
| .. | .. |
|---|
| 3285 | 3340 | sense_reason_t reason, int from_transport) |
|---|
| 3286 | 3341 | { |
|---|
| 3287 | 3342 | unsigned long flags; |
|---|
| 3343 | + |
|---|
| 3344 | + WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); |
|---|
| 3288 | 3345 | |
|---|
| 3289 | 3346 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
|---|
| 3290 | 3347 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
|---|
| .. | .. |
|---|
| 3302 | 3359 | } |
|---|
| 3303 | 3360 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); |
|---|
| 3304 | 3361 | |
|---|
| 3305 | | -static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) |
|---|
| 3306 | | - __releases(&cmd->t_state_lock) |
|---|
| 3307 | | - __acquires(&cmd->t_state_lock) |
|---|
| 3362 | +/** |
|---|
| 3363 | + * target_send_busy - Send SCSI BUSY status back to the initiator |
|---|
| 3364 | + * @cmd: SCSI command for which to send a BUSY reply. |
|---|
| 3365 | + * |
|---|
| 3366 | + * Note: Only call this function if target_submit_cmd*() failed. |
|---|
| 3367 | + */ |
|---|
| 3368 | +int target_send_busy(struct se_cmd *cmd) |
|---|
| 3308 | 3369 | { |
|---|
| 3309 | | - int ret; |
|---|
| 3370 | + WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); |
|---|
| 3310 | 3371 | |
|---|
| 3311 | | - assert_spin_locked(&cmd->t_state_lock); |
|---|
| 3312 | | - WARN_ON_ONCE(!irqs_disabled()); |
|---|
| 3313 | | - |
|---|
| 3314 | | - if (!(cmd->transport_state & CMD_T_ABORTED)) |
|---|
| 3315 | | - return 0; |
|---|
| 3316 | | - /* |
|---|
| 3317 | | - * If cmd has been aborted but either no status is to be sent or it has |
|---|
| 3318 | | - * already been sent, just return |
|---|
| 3319 | | - */ |
|---|
| 3320 | | - if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { |
|---|
| 3321 | | - if (send_status) |
|---|
| 3322 | | - cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; |
|---|
| 3323 | | - return 1; |
|---|
| 3324 | | - } |
|---|
| 3325 | | - |
|---|
| 3326 | | - pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" |
|---|
| 3327 | | - " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); |
|---|
| 3328 | | - |
|---|
| 3329 | | - cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; |
|---|
| 3330 | | - cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
|---|
| 3372 | + cmd->scsi_status = SAM_STAT_BUSY; |
|---|
| 3331 | 3373 | trace_target_cmd_complete(cmd); |
|---|
| 3332 | | - |
|---|
| 3333 | | - spin_unlock_irq(&cmd->t_state_lock); |
|---|
| 3334 | | - ret = cmd->se_tfo->queue_status(cmd); |
|---|
| 3335 | | - if (ret) |
|---|
| 3336 | | - transport_handle_queue_full(cmd, cmd->se_dev, ret, false); |
|---|
| 3337 | | - spin_lock_irq(&cmd->t_state_lock); |
|---|
| 3338 | | - |
|---|
| 3339 | | - return 1; |
|---|
| 3374 | + return cmd->se_tfo->queue_status(cmd); |
|---|
| 3340 | 3375 | } |
|---|
| 3341 | | - |
|---|
| 3342 | | -int transport_check_aborted_status(struct se_cmd *cmd, int send_status) |
|---|
| 3343 | | -{ |
|---|
| 3344 | | - int ret; |
|---|
| 3345 | | - |
|---|
| 3346 | | - spin_lock_irq(&cmd->t_state_lock); |
|---|
| 3347 | | - ret = __transport_check_aborted_status(cmd, send_status); |
|---|
| 3348 | | - spin_unlock_irq(&cmd->t_state_lock); |
|---|
| 3349 | | - |
|---|
| 3350 | | - return ret; |
|---|
| 3351 | | -} |
|---|
| 3352 | | -EXPORT_SYMBOL(transport_check_aborted_status); |
|---|
| 3353 | | - |
|---|
| 3354 | | -void transport_send_task_abort(struct se_cmd *cmd) |
|---|
| 3355 | | -{ |
|---|
| 3356 | | - unsigned long flags; |
|---|
| 3357 | | - int ret; |
|---|
| 3358 | | - |
|---|
| 3359 | | - spin_lock_irqsave(&cmd->t_state_lock, flags); |
|---|
| 3360 | | - if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { |
|---|
| 3361 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3362 | | - return; |
|---|
| 3363 | | - } |
|---|
| 3364 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3365 | | - |
|---|
| 3366 | | - /* |
|---|
| 3367 | | - * If there are still expected incoming fabric WRITEs, we wait |
|---|
| 3368 | | - * until until they have completed before sending a TASK_ABORTED |
|---|
| 3369 | | - * response. This response with TASK_ABORTED status will be |
|---|
| 3370 | | - * queued back to fabric module by transport_check_aborted_status(). |
|---|
| 3371 | | - */ |
|---|
| 3372 | | - if (cmd->data_direction == DMA_TO_DEVICE) { |
|---|
| 3373 | | - if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
|---|
| 3374 | | - spin_lock_irqsave(&cmd->t_state_lock, flags); |
|---|
| 3375 | | - if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { |
|---|
| 3376 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3377 | | - goto send_abort; |
|---|
| 3378 | | - } |
|---|
| 3379 | | - cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; |
|---|
| 3380 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3381 | | - return; |
|---|
| 3382 | | - } |
|---|
| 3383 | | - } |
|---|
| 3384 | | -send_abort: |
|---|
| 3385 | | - cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
|---|
| 3386 | | - |
|---|
| 3387 | | - transport_lun_remove_cmd(cmd); |
|---|
| 3388 | | - |
|---|
| 3389 | | - pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", |
|---|
| 3390 | | - cmd->t_task_cdb[0], cmd->tag); |
|---|
| 3391 | | - |
|---|
| 3392 | | - trace_target_cmd_complete(cmd); |
|---|
| 3393 | | - ret = cmd->se_tfo->queue_status(cmd); |
|---|
| 3394 | | - if (ret) |
|---|
| 3395 | | - transport_handle_queue_full(cmd, cmd->se_dev, ret, false); |
|---|
| 3396 | | -} |
|---|
| 3376 | +EXPORT_SYMBOL(target_send_busy); |
|---|
| 3397 | 3377 | |
|---|
| 3398 | 3378 | static void target_tmr_work(struct work_struct *work) |
|---|
| 3399 | 3379 | { |
|---|
| 3400 | 3380 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
|---|
| 3401 | 3381 | struct se_device *dev = cmd->se_dev; |
|---|
| 3402 | 3382 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
|---|
| 3403 | | - unsigned long flags; |
|---|
| 3404 | 3383 | int ret; |
|---|
| 3405 | 3384 | |
|---|
| 3406 | | - spin_lock_irqsave(&cmd->t_state_lock, flags); |
|---|
| 3407 | | - if (cmd->transport_state & CMD_T_ABORTED) { |
|---|
| 3408 | | - tmr->response = TMR_FUNCTION_REJECTED; |
|---|
| 3409 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3410 | | - goto check_stop; |
|---|
| 3411 | | - } |
|---|
| 3412 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3385 | + if (cmd->transport_state & CMD_T_ABORTED) |
|---|
| 3386 | + goto aborted; |
|---|
| 3413 | 3387 | |
|---|
| 3414 | 3388 | switch (tmr->function) { |
|---|
| 3415 | 3389 | case TMR_ABORT_TASK: |
|---|
| .. | .. |
|---|
| 3443 | 3417 | break; |
|---|
| 3444 | 3418 | } |
|---|
| 3445 | 3419 | |
|---|
| 3446 | | - spin_lock_irqsave(&cmd->t_state_lock, flags); |
|---|
| 3447 | | - if (cmd->transport_state & CMD_T_ABORTED) { |
|---|
| 3448 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3449 | | - goto check_stop; |
|---|
| 3450 | | - } |
|---|
| 3451 | | - spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3420 | + if (cmd->transport_state & CMD_T_ABORTED) |
|---|
| 3421 | + goto aborted; |
|---|
| 3452 | 3422 | |
|---|
| 3453 | 3423 | cmd->se_tfo->queue_tm_rsp(cmd); |
|---|
| 3454 | 3424 | |
|---|
| 3455 | | -check_stop: |
|---|
| 3456 | 3425 | transport_lun_remove_cmd(cmd); |
|---|
| 3457 | 3426 | transport_cmd_check_stop_to_fabric(cmd); |
|---|
| 3427 | + return; |
|---|
| 3428 | + |
|---|
| 3429 | +aborted: |
|---|
| 3430 | + target_handle_abort(cmd); |
|---|
| 3458 | 3431 | } |
|---|
| 3459 | 3432 | |
|---|
| 3460 | 3433 | int transport_generic_handle_tmr( |
|---|
| .. | .. |
|---|
| 3473 | 3446 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
|---|
| 3474 | 3447 | |
|---|
| 3475 | 3448 | if (aborted) { |
|---|
| 3476 | | - pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" |
|---|
| 3477 | | - "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, |
|---|
| 3478 | | - cmd->se_tmr_req->ref_task_tag, cmd->tag); |
|---|
| 3479 | | - transport_lun_remove_cmd(cmd); |
|---|
| 3480 | | - transport_cmd_check_stop_to_fabric(cmd); |
|---|
| 3449 | + pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", |
|---|
| 3450 | + cmd->se_tmr_req->function, |
|---|
| 3451 | + cmd->se_tmr_req->ref_task_tag, cmd->tag); |
|---|
| 3452 | + target_handle_abort(cmd); |
|---|
| 3481 | 3453 | return 0; |
|---|
| 3482 | 3454 | } |
|---|
| 3483 | 3455 | |
|---|
| 3484 | 3456 | INIT_WORK(&cmd->work, target_tmr_work); |
|---|
| 3485 | | - queue_work(cmd->se_dev->tmr_wq, &cmd->work); |
|---|
| 3457 | + schedule_work(&cmd->work); |
|---|
| 3486 | 3458 | return 0; |
|---|
| 3487 | 3459 | } |
|---|
| 3488 | 3460 | EXPORT_SYMBOL(transport_generic_handle_tmr); |
|---|