| .. | .. | 
|---|
| 2128 | 2128 | if (device->stopped & | 
|---|
| 2129 | 2129 | ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) | 
|---|
| 2130 | 2130 | return; | 
|---|
| 2131 |  | -	rc = device->discipline->verify_path(device, | 
|---|
| 2132 |  | -					     dasd_path_get_tbvpm(device)); | 
|---|
|  | 2131 | +	rc = device->discipline->pe_handler(device, | 
|---|
|  | 2132 | +					    dasd_path_get_tbvpm(device)); | 
|---|
| 2133 | 2133 | if (rc) | 
|---|
| 2134 | 2134 | dasd_device_set_timer(device, 50); | 
|---|
| 2135 | 2135 | else | 
|---|
| .. | .. | 
|---|
| 2985 | 2985 | * Requeue a request back to the block request queue | 
|---|
| 2986 | 2986 | * only works for block requests | 
|---|
| 2987 | 2987 | */ | 
|---|
| 2988 |  | -static int _dasd_requeue_request(struct dasd_ccw_req *cqr) | 
|---|
|  | 2988 | +static void _dasd_requeue_request(struct dasd_ccw_req *cqr) | 
|---|
| 2989 | 2989 | { | 
|---|
| 2990 |  | -	struct dasd_block *block = cqr->block; | 
|---|
| 2991 | 2990 | struct request *req; | 
|---|
| 2992 | 2991 |  | 
|---|
| 2993 |  | -	if (!block) | 
|---|
| 2994 |  | -		return -EINVAL; | 
|---|
| 2995 | 2992 | /* | 
|---|
| 2996 | 2993 | * If the request is an ERP request there is nothing to requeue. | 
|---|
| 2997 | 2994 | * This will be done with the remaining original request. | 
|---|
| 2998 | 2995 | */ | 
|---|
| 2999 | 2996 | if (cqr->refers) | 
|---|
| 3000 |  | -		return 0; | 
|---|
|  | 2997 | +		return; | 
|---|
| 3001 | 2998 | spin_lock_irq(&cqr->dq->lock); | 
|---|
| 3002 | 2999 | req = (struct request *) cqr->callback_data; | 
|---|
| 3003 |  | -	blk_mq_requeue_request(req, false); | 
|---|
|  | 3000 | +	blk_mq_requeue_request(req, true); | 
|---|
| 3004 | 3001 | spin_unlock_irq(&cqr->dq->lock); | 
|---|
| 3005 | 3002 |  | 
|---|
| 3006 |  | -	return 0; | 
|---|
|  | 3003 | +	return; | 
|---|
| 3007 | 3004 | } | 
|---|
| 3008 | 3005 |  | 
|---|
| 3009 |  | -/* | 
|---|
| 3010 |  | - * Go through all request on the dasd_block request queue, cancel them | 
|---|
| 3011 |  | - * on the respective dasd_device, and return them to the generic | 
|---|
| 3012 |  | - * block layer. | 
|---|
| 3013 |  | - */ | 
|---|
| 3014 |  | -static int dasd_flush_block_queue(struct dasd_block *block) | 
|---|
|  | 3006 | +static int _dasd_requests_to_flushqueue(struct dasd_block *block, | 
|---|
|  | 3007 | +					struct list_head *flush_queue) | 
|---|
| 3015 | 3008 | { | 
|---|
| 3016 | 3009 | struct dasd_ccw_req *cqr, *n; | 
|---|
| 3017 |  | -	int rc, i; | 
|---|
| 3018 |  | -	struct list_head flush_queue; | 
|---|
| 3019 | 3010 | unsigned long flags; | 
|---|
|  | 3011 | +	int rc, i; | 
|---|
| 3020 | 3012 |  | 
|---|
| 3021 |  | -	INIT_LIST_HEAD(&flush_queue); | 
|---|
| 3022 |  | -	spin_lock_bh(&block->queue_lock); | 
|---|
|  | 3013 | +	spin_lock_irqsave(&block->queue_lock, flags); | 
|---|
| 3023 | 3014 | rc = 0; | 
|---|
| 3024 | 3015 | restart: | 
|---|
| 3025 | 3016 | list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { | 
|---|
| .. | .. | 
|---|
| 3034 | 3025 | * is returned from the dasd_device layer. | 
|---|
| 3035 | 3026 | */ | 
|---|
| 3036 | 3027 | cqr->callback = _dasd_wake_block_flush_cb; | 
|---|
| 3037 |  | -		for (i = 0; cqr != NULL; cqr = cqr->refers, i++) | 
|---|
| 3038 |  | -			list_move_tail(&cqr->blocklist, &flush_queue); | 
|---|
|  | 3028 | +		for (i = 0; cqr; cqr = cqr->refers, i++) | 
|---|
|  | 3029 | +			list_move_tail(&cqr->blocklist, flush_queue); | 
|---|
| 3039 | 3030 | if (i > 1) | 
|---|
| 3040 | 3031 | /* moved more than one request - need to restart */ | 
|---|
| 3041 | 3032 | goto restart; | 
|---|
| 3042 | 3033 | } | 
|---|
| 3043 |  | -	spin_unlock_bh(&block->queue_lock); | 
|---|
|  | 3034 | +	spin_unlock_irqrestore(&block->queue_lock, flags); | 
|---|
|  | 3035 | + | 
|---|
|  | 3036 | +	return rc; | 
|---|
|  | 3037 | +} | 
|---|
|  | 3038 | + | 
|---|
|  | 3039 | +/* | 
|---|
|  | 3040 | + * Go through all request on the dasd_block request queue, cancel them | 
|---|
|  | 3041 | + * on the respective dasd_device, and return them to the generic | 
|---|
|  | 3042 | + * block layer. | 
|---|
|  | 3043 | + */ | 
|---|
|  | 3044 | +static int dasd_flush_block_queue(struct dasd_block *block) | 
|---|
|  | 3045 | +{ | 
|---|
|  | 3046 | +	struct dasd_ccw_req *cqr, *n; | 
|---|
|  | 3047 | +	struct list_head flush_queue; | 
|---|
|  | 3048 | +	unsigned long flags; | 
|---|
|  | 3049 | +	int rc; | 
|---|
|  | 3050 | + | 
|---|
|  | 3051 | +	INIT_LIST_HEAD(&flush_queue); | 
|---|
|  | 3052 | +	rc = _dasd_requests_to_flushqueue(block, &flush_queue); | 
|---|
|  | 3053 | + | 
|---|
| 3044 | 3054 | /* Now call the callback function of flushed requests */ | 
|---|
| 3045 | 3055 | restart_cb: | 
|---|
| 3046 | 3056 | list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { | 
|---|
| .. | .. | 
|---|
| 3977 | 3987 | */ | 
|---|
| 3978 | 3988 | static int dasd_generic_requeue_all_requests(struct dasd_device *device) | 
|---|
| 3979 | 3989 | { | 
|---|
|  | 3990 | +	struct dasd_block *block = device->block; | 
|---|
| 3980 | 3991 | struct list_head requeue_queue; | 
|---|
| 3981 | 3992 | struct dasd_ccw_req *cqr, *n; | 
|---|
| 3982 |  | -	struct dasd_ccw_req *refers; | 
|---|
| 3983 | 3993 | int rc; | 
|---|
| 3984 | 3994 |  | 
|---|
|  | 3995 | +	if (!block) | 
|---|
|  | 3996 | +		return 0; | 
|---|
|  | 3997 | + | 
|---|
| 3985 | 3998 | INIT_LIST_HEAD(&requeue_queue); | 
|---|
| 3986 |  | -	spin_lock_irq(get_ccwdev_lock(device->cdev)); | 
|---|
| 3987 |  | -	rc = 0; | 
|---|
| 3988 |  | -	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { | 
|---|
| 3989 |  | -		/* Check status and move request to flush_queue */ | 
|---|
| 3990 |  | -		if (cqr->status == DASD_CQR_IN_IO) { | 
|---|
| 3991 |  | -			rc = device->discipline->term_IO(cqr); | 
|---|
| 3992 |  | -			if (rc) { | 
|---|
| 3993 |  | -				/* unable to terminate requeust */ | 
|---|
| 3994 |  | -				dev_err(&device->cdev->dev, | 
|---|
| 3995 |  | -					"Unable to terminate request %p " | 
|---|
| 3996 |  | -					"on suspend\n", cqr); | 
|---|
| 3997 |  | -				spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 
|---|
| 3998 |  | -				dasd_put_device(device); | 
|---|
| 3999 |  | -				return rc; | 
|---|
| 4000 |  | -			} | 
|---|
|  | 3999 | +	rc = _dasd_requests_to_flushqueue(block, &requeue_queue); | 
|---|
|  | 4000 | + | 
|---|
|  | 4001 | +	/* Now call the callback function of flushed requests */ | 
|---|
|  | 4002 | +restart_cb: | 
|---|
|  | 4003 | +	list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { | 
|---|
|  | 4004 | +		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); | 
|---|
|  | 4005 | +		/* Process finished ERP request. */ | 
|---|
|  | 4006 | +		if (cqr->refers) { | 
|---|
|  | 4007 | +			spin_lock_bh(&block->queue_lock); | 
|---|
|  | 4008 | +			__dasd_process_erp(block->base, cqr); | 
|---|
|  | 4009 | +			spin_unlock_bh(&block->queue_lock); | 
|---|
|  | 4010 | +			/* restart list_for_xx loop since dasd_process_erp | 
|---|
|  | 4011 | +			 * might remove multiple elements | 
|---|
|  | 4012 | +			 */ | 
|---|
|  | 4013 | +			goto restart_cb; | 
|---|
| 4001 | 4014 | } | 
|---|
| 4002 |  | -		list_move_tail(&cqr->devlist, &requeue_queue); | 
|---|
| 4003 |  | -	} | 
|---|
| 4004 |  | -	spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 
|---|
| 4005 |  | - | 
|---|
| 4006 |  | -	list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { | 
|---|
| 4007 |  | -		wait_event(dasd_flush_wq, | 
|---|
| 4008 |  | -			   (cqr->status != DASD_CQR_CLEAR_PENDING)); | 
|---|
| 4009 |  | - | 
|---|
| 4010 |  | -		/* | 
|---|
| 4011 |  | -		 * requeue requests to blocklayer will only work | 
|---|
| 4012 |  | -		 * for block device requests | 
|---|
| 4013 |  | -		 */ | 
|---|
| 4014 |  | -		if (_dasd_requeue_request(cqr)) | 
|---|
| 4015 |  | -			continue; | 
|---|
| 4016 |  | - | 
|---|
| 4017 |  | -		/* remove requests from device and block queue */ | 
|---|
| 4018 |  | -		list_del_init(&cqr->devlist); | 
|---|
| 4019 |  | -		while (cqr->refers != NULL) { | 
|---|
| 4020 |  | -			refers = cqr->refers; | 
|---|
| 4021 |  | -			/* remove the request from the block queue */ | 
|---|
| 4022 |  | -			list_del(&cqr->blocklist); | 
|---|
| 4023 |  | -			/* free the finished erp request */ | 
|---|
| 4024 |  | -			dasd_free_erp_request(cqr, cqr->memdev); | 
|---|
| 4025 |  | -			cqr = refers; | 
|---|
| 4026 |  | -		} | 
|---|
| 4027 |  | - | 
|---|
| 4028 |  | -		/* | 
|---|
| 4029 |  | -		 * _dasd_requeue_request already checked for a valid | 
|---|
| 4030 |  | -		 * blockdevice, no need to check again | 
|---|
| 4031 |  | -		 * all erp requests (cqr->refers) have a cqr->block | 
|---|
| 4032 |  | -		 * pointer copy from the original cqr | 
|---|
| 4033 |  | -		 */ | 
|---|
|  | 4015 | +		_dasd_requeue_request(cqr); | 
|---|
| 4034 | 4016 | list_del_init(&cqr->blocklist); | 
|---|
| 4035 | 4017 | cqr->block->base->discipline->free_cp( | 
|---|
| 4036 | 4018 | cqr, (struct request *) cqr->callback_data); | 
|---|
| 4037 |  | -	} | 
|---|
| 4038 |  | - | 
|---|
| 4039 |  | -	/* | 
|---|
| 4040 |  | -	 * if requests remain then they are internal request | 
|---|
| 4041 |  | -	 * and go back to the device queue | 
|---|
| 4042 |  | -	 */ | 
|---|
| 4043 |  | -	if (!list_empty(&requeue_queue)) { | 
|---|
| 4044 |  | -		/* move freeze_queue to start of the ccw_queue */ | 
|---|
| 4045 |  | -		spin_lock_irq(get_ccwdev_lock(device->cdev)); | 
|---|
| 4046 |  | -		list_splice_tail(&requeue_queue, &device->ccw_queue); | 
|---|
| 4047 |  | -		spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 
|---|
| 4048 | 4019 | } | 
|---|
| 4049 | 4020 | dasd_schedule_device_bh(device); | 
|---|
| 4050 | 4021 | return rc; | 
|---|