.. | .. |
---|
724 | 724 | { |
---|
725 | 725 | struct se_device *dev; |
---|
726 | 726 | struct se_lun *xcopy_lun; |
---|
| 727 | + int i; |
---|
727 | 728 | |
---|
728 | 729 | dev = hba->backend->ops->alloc_device(hba, name); |
---|
729 | 730 | if (!dev) |
---|
730 | 731 | return NULL; |
---|
| 732 | + |
---|
| 733 | + dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); |
---|
| 734 | + if (!dev->queues) { |
---|
| 735 | + dev->transport->free_device(dev); |
---|
| 736 | + return NULL; |
---|
| 737 | + } |
---|
| 738 | + |
---|
| 739 | + dev->queue_cnt = nr_cpu_ids; |
---|
| 740 | + for (i = 0; i < dev->queue_cnt; i++) { |
---|
| 741 | + INIT_LIST_HEAD(&dev->queues[i].state_list); |
---|
| 742 | + spin_lock_init(&dev->queues[i].lock); |
---|
| 743 | + } |
---|
731 | 744 | |
---|
732 | 745 | dev->se_hba = hba; |
---|
733 | 746 | dev->transport = hba->backend->ops; |
---|
.. | .. |
---|
738 | 751 | INIT_LIST_HEAD(&dev->dev_sep_list); |
---|
739 | 752 | INIT_LIST_HEAD(&dev->dev_tmr_list); |
---|
740 | 753 | INIT_LIST_HEAD(&dev->delayed_cmd_list); |
---|
741 | | - INIT_LIST_HEAD(&dev->state_list); |
---|
742 | 754 | INIT_LIST_HEAD(&dev->qf_cmd_list); |
---|
743 | | - spin_lock_init(&dev->execute_task_lock); |
---|
744 | 755 | spin_lock_init(&dev->delayed_cmd_lock); |
---|
745 | 756 | spin_lock_init(&dev->dev_reservation_lock); |
---|
746 | 757 | spin_lock_init(&dev->se_port_lock); |
---|
.. | .. |
---|
759 | 770 | spin_lock_init(&dev->t10_alua.lba_map_lock); |
---|
760 | 771 | |
---|
761 | 772 | INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); |
---|
| 773 | + mutex_init(&dev->lun_reset_mutex); |
---|
762 | 774 | |
---|
763 | 775 | dev->t10_wwn.t10_dev = dev; |
---|
764 | 776 | dev->t10_alua.t10_dev = dev; |
---|
.. | .. |
---|
855 | 867 | EXPORT_SYMBOL(target_to_linux_sector); |
---|
856 | 868 | |
---|
857 | 869 | struct devices_idr_iter { |
---|
858 | | - struct config_item *prev_item; |
---|
859 | 870 | int (*fn)(struct se_device *dev, void *data); |
---|
860 | 871 | void *data; |
---|
861 | 872 | }; |
---|
.. | .. |
---|
865 | 876 | { |
---|
866 | 877 | struct devices_idr_iter *iter = data; |
---|
867 | 878 | struct se_device *dev = p; |
---|
| 879 | + struct config_item *item; |
---|
868 | 880 | int ret; |
---|
869 | | - |
---|
870 | | - config_item_put(iter->prev_item); |
---|
871 | | - iter->prev_item = NULL; |
---|
872 | 881 | |
---|
873 | 882 | /* |
---|
874 | 883 | * We add the device early to the idr, so it can be used |
---|
.. | .. |
---|
879 | 888 | if (!target_dev_configured(dev)) |
---|
880 | 889 | return 0; |
---|
881 | 890 | |
---|
882 | | - iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); |
---|
883 | | - if (!iter->prev_item) |
---|
| 891 | + item = config_item_get_unless_zero(&dev->dev_group.cg_item); |
---|
| 892 | + if (!item) |
---|
884 | 893 | return 0; |
---|
885 | 894 | mutex_unlock(&device_mutex); |
---|
886 | 895 | |
---|
887 | 896 | ret = iter->fn(dev, iter->data); |
---|
| 897 | + config_item_put(item); |
---|
888 | 898 | |
---|
889 | 899 | mutex_lock(&device_mutex); |
---|
890 | 900 | return ret; |
---|
.. | .. |
---|
907 | 917 | mutex_lock(&device_mutex); |
---|
908 | 918 | ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); |
---|
909 | 919 | mutex_unlock(&device_mutex); |
---|
910 | | - config_item_put(iter.prev_item); |
---|
911 | 920 | return ret; |
---|
912 | 921 | } |
---|
913 | 922 | |
---|
.. | .. |
---|
1014 | 1023 | if (dev->transport->free_prot) |
---|
1015 | 1024 | dev->transport->free_prot(dev); |
---|
1016 | 1025 | |
---|
| 1026 | + kfree(dev->queues); |
---|
1017 | 1027 | dev->transport->free_device(dev); |
---|
1018 | 1028 | } |
---|
1019 | 1029 | |
---|