forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-09 244b2c5ca8b14627e4a17755e5922221e121c771
kernel/drivers/s390/block/dasd.c
....@@ -70,7 +70,6 @@
7070 * SECTION: prototypes for static functions of dasd.c
7171 */
7272 static int dasd_alloc_queue(struct dasd_block *);
73
-static void dasd_setup_queue(struct dasd_block *);
7473 static void dasd_free_queue(struct dasd_block *);
7574 static int dasd_flush_block_queue(struct dasd_block *);
7675 static void dasd_device_tasklet(unsigned long);
....@@ -120,9 +119,18 @@
120119 kfree(device);
121120 return ERR_PTR(-ENOMEM);
122121 }
122
+ /* Get two pages for ese format. */
123
+ device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
124
+ if (!device->ese_mem) {
125
+ free_page((unsigned long) device->erp_mem);
126
+ free_pages((unsigned long) device->ccw_mem, 1);
127
+ kfree(device);
128
+ return ERR_PTR(-ENOMEM);
129
+ }
123130
124131 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
125132 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
133
+ dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
126134 spin_lock_init(&device->mem_lock);
127135 atomic_set(&device->tasklet_scheduled, 0);
128136 tasklet_init(&device->tasklet, dasd_device_tasklet,
....@@ -146,6 +154,7 @@
146154 void dasd_free_device(struct dasd_device *device)
147155 {
148156 kfree(device->private);
157
+ free_pages((unsigned long) device->ese_mem, 1);
149158 free_page((unsigned long) device->erp_mem);
150159 free_pages((unsigned long) device->ccw_mem, 1);
151160 kfree(device);
....@@ -169,6 +178,8 @@
169178 (unsigned long) block);
170179 INIT_LIST_HEAD(&block->ccw_queue);
171180 spin_lock_init(&block->queue_lock);
181
+ INIT_LIST_HEAD(&block->format_list);
182
+ spin_lock_init(&block->format_lock);
172183 timer_setup(&block->timer, dasd_block_timeout, 0);
173184 spin_lock_init(&block->profile.lock);
174185
....@@ -348,7 +359,8 @@
348359 }
349360 return rc;
350361 }
351
- dasd_setup_queue(block);
362
+ if (device->discipline->setup_blk_queue)
363
+ device->discipline->setup_blk_queue(block);
352364 set_capacity(block->gdp,
353365 block->blocks << block->s2b_shift);
354366 device->state = DASD_STATE_READY;
....@@ -1192,20 +1204,7 @@
11921204 return rc;
11931205 }
11941206
1195
-static int dasd_hosts_open(struct inode *inode, struct file *file)
1196
-{
1197
- struct dasd_device *device = inode->i_private;
1198
-
1199
- return single_open(file, dasd_hosts_show, device);
1200
-}
1201
-
1202
-static const struct file_operations dasd_hosts_fops = {
1203
- .owner = THIS_MODULE,
1204
- .open = dasd_hosts_open,
1205
- .read = seq_read,
1206
- .llseek = seq_lseek,
1207
- .release = single_release,
1208
-};
1207
+DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
12091208
12101209 static void dasd_hosts_exit(struct dasd_device *device)
12111210 {
....@@ -1271,6 +1270,49 @@
12711270 }
12721271 EXPORT_SYMBOL(dasd_smalloc_request);
12731272
1273
+struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
1274
+ int datasize,
1275
+ struct dasd_device *device)
1276
+{
1277
+ struct dasd_ccw_req *cqr;
1278
+ unsigned long flags;
1279
+ int size, cqr_size;
1280
+ char *data;
1281
+
1282
+ cqr_size = (sizeof(*cqr) + 7L) & -8L;
1283
+ size = cqr_size;
1284
+ if (cplength > 0)
1285
+ size += cplength * sizeof(struct ccw1);
1286
+ if (datasize > 0)
1287
+ size += datasize;
1288
+
1289
+ spin_lock_irqsave(&device->mem_lock, flags);
1290
+ cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1291
+ spin_unlock_irqrestore(&device->mem_lock, flags);
1292
+ if (!cqr)
1293
+ return ERR_PTR(-ENOMEM);
1294
+ memset(cqr, 0, sizeof(*cqr));
1295
+ data = (char *)cqr + cqr_size;
1296
+ cqr->cpaddr = NULL;
1297
+ if (cplength > 0) {
1298
+ cqr->cpaddr = data;
1299
+ data += cplength * sizeof(struct ccw1);
1300
+ memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1301
+ }
1302
+ cqr->data = NULL;
1303
+ if (datasize > 0) {
1304
+ cqr->data = data;
1305
+ memset(cqr->data, 0, datasize);
1306
+ }
1307
+
1308
+ cqr->magic = magic;
1309
+ set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1310
+ dasd_get_device(device);
1311
+
1312
+ return cqr;
1313
+}
1314
+EXPORT_SYMBOL(dasd_fmalloc_request);
1315
+
12741316 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
12751317 {
12761318 unsigned long flags;
....@@ -1281,6 +1323,17 @@
12811323 dasd_put_device(device);
12821324 }
12831325 EXPORT_SYMBOL(dasd_sfree_request);
1326
+
1327
+void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1328
+{
1329
+ unsigned long flags;
1330
+
1331
+ spin_lock_irqsave(&device->mem_lock, flags);
1332
+ dasd_free_chunk(&device->ese_chunks, cqr);
1333
+ spin_unlock_irqrestore(&device->mem_lock, flags);
1334
+ dasd_put_device(device);
1335
+}
1336
+EXPORT_SYMBOL(dasd_ffree_request);
12841337
12851338 /*
12861339 * Check discipline magic in cqr.
....@@ -1409,6 +1462,13 @@
14091462 if (!cqr->lpm)
14101463 cqr->lpm = dasd_path_get_opm(device);
14111464 }
1465
+ /*
1466
+ * remember the amount of formatted tracks to prevent double format on
1467
+ * ESE devices
1468
+ */
1469
+ if (cqr->block)
1470
+ cqr->trkcount = atomic_read(&cqr->block->trkcount);
1471
+
14121472 if (cqr->cpmode == 1) {
14131473 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
14141474 (long) cqr, cqr->lpm);
....@@ -1586,17 +1646,48 @@
15861646 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
15871647 }
15881648
1649
+static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
1650
+{
1651
+ struct dasd_device *device = NULL;
1652
+ u8 *sense = NULL;
1653
+
1654
+ if (!block)
1655
+ return 0;
1656
+ device = block->base;
1657
+ if (!device || !device->discipline->is_ese)
1658
+ return 0;
1659
+ if (!device->discipline->is_ese(device))
1660
+ return 0;
1661
+
1662
+ sense = dasd_get_sense(irb);
1663
+ if (!sense)
1664
+ return 0;
1665
+
1666
+ return !!(sense[1] & SNS1_NO_REC_FOUND) ||
1667
+ !!(sense[1] & SNS1_FILE_PROTECTED) ||
1668
+ scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
1669
+}
1670
+
1671
+static int dasd_ese_oos_cond(u8 *sense)
1672
+{
1673
+ return sense[0] & SNS0_EQUIPMENT_CHECK &&
1674
+ sense[1] & SNS1_PERM_ERR &&
1675
+ sense[1] & SNS1_WRITE_INHIBITED &&
1676
+ sense[25] == 0x01;
1677
+}
1678
+
15891679 /*
15901680 * Interrupt handler for "normal" ssch-io based dasd devices.
15911681 */
15921682 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
15931683 struct irb *irb)
15941684 {
1595
- struct dasd_ccw_req *cqr, *next;
1685
+ struct dasd_ccw_req *cqr, *next, *fcqr;
15961686 struct dasd_device *device;
15971687 unsigned long now;
15981688 int nrf_suppressed = 0;
15991689 int fp_suppressed = 0;
1690
+ struct request *req;
16001691 u8 *sense = NULL;
16011692 int expires;
16021693
....@@ -1654,6 +1745,17 @@
16541745 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
16551746 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
16561747 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1748
+
1749
+ /*
1750
+ * Extent pool probably out-of-space.
1751
+ * Stop device and check exhaust level.
1752
+ */
1753
+ if (dasd_ese_oos_cond(sense)) {
1754
+ dasd_generic_space_exhaust(device, cqr);
1755
+ device->discipline->ext_pool_exhaust(device, cqr);
1756
+ dasd_put_device(device);
1757
+ return;
1758
+ }
16571759 }
16581760 if (!(fp_suppressed || nrf_suppressed))
16591761 device->discipline->dump_sense_dbf(device, irb, "int");
....@@ -1683,6 +1785,42 @@
16831785 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
16841786 "invalid device in request");
16851787 return;
1788
+ }
1789
+
1790
+ if (dasd_ese_needs_format(cqr->block, irb)) {
1791
+ req = dasd_get_callback_data(cqr);
1792
+ if (!req) {
1793
+ cqr->status = DASD_CQR_ERROR;
1794
+ return;
1795
+ }
1796
+ if (rq_data_dir(req) == READ) {
1797
+ device->discipline->ese_read(cqr, irb);
1798
+ cqr->status = DASD_CQR_SUCCESS;
1799
+ cqr->stopclk = now;
1800
+ dasd_device_clear_timer(device);
1801
+ dasd_schedule_device_bh(device);
1802
+ return;
1803
+ }
1804
+ fcqr = device->discipline->ese_format(device, cqr, irb);
1805
+ if (IS_ERR(fcqr)) {
1806
+ if (PTR_ERR(fcqr) == -EINVAL) {
1807
+ cqr->status = DASD_CQR_ERROR;
1808
+ return;
1809
+ }
1810
+ /*
1811
+ * If we can't format now, let the request go
1812
+ * one extra round. Maybe we can format later.
1813
+ */
1814
+ cqr->status = DASD_CQR_QUEUED;
1815
+ dasd_schedule_device_bh(device);
1816
+ return;
1817
+ } else {
1818
+ fcqr->status = DASD_CQR_QUEUED;
1819
+ cqr->status = DASD_CQR_QUEUED;
1820
+ list_add(&fcqr->devlist, &device->ccw_queue);
1821
+ dasd_schedule_device_bh(device);
1822
+ return;
1823
+ }
16861824 }
16871825
16881826 /* Check for clear pending */
....@@ -1923,7 +2061,7 @@
19232061 static int __dasd_device_is_unusable(struct dasd_device *device,
19242062 struct dasd_ccw_req *cqr)
19252063 {
1926
- int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
2064
+ int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC);
19272065
19282066 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
19292067 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
....@@ -1990,8 +2128,8 @@
19902128 if (device->stopped &
19912129 ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
19922130 return;
1993
- rc = device->discipline->verify_path(device,
1994
- dasd_path_get_tbvpm(device));
2131
+ rc = device->discipline->pe_handler(device,
2132
+ dasd_path_get_tbvpm(device));
19952133 if (rc)
19962134 dasd_device_set_timer(device, 50);
19972135 else
....@@ -2425,6 +2563,15 @@
24252563 EXPORT_SYMBOL(dasd_sleep_on_queue);
24262564
24272565 /*
2566
+ * Start requests from a ccw_queue and wait interruptible for their completion.
2567
+ */
2568
+int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
2569
+{
2570
+ return _dasd_sleep_on_queue(ccw_queue, 1);
2571
+}
2572
+EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
2573
+
2574
+/*
24282575 * Queue a request to the tail of the device ccw_queue and wait
24292576 * interruptible for it's completion.
24302577 */
....@@ -2622,11 +2769,13 @@
26222769 {
26232770 struct request *req;
26242771 blk_status_t error = BLK_STS_OK;
2772
+ unsigned int proc_bytes;
26252773 int status;
26262774
26272775 req = (struct request *) cqr->callback_data;
26282776 dasd_profile_end(cqr->block, cqr, req);
26292777
2778
+ proc_bytes = cqr->proc_bytes;
26302779 status = cqr->block->base->discipline->free_cp(cqr, req);
26312780 if (status < 0)
26322781 error = errno_to_blk_status(status);
....@@ -2657,7 +2806,17 @@
26572806 blk_mq_end_request(req, error);
26582807 blk_mq_run_hw_queues(req->q, true);
26592808 } else {
2660
- blk_mq_complete_request(req);
2809
+ /*
2810
+ * Partial completed requests can happen with ESE devices.
2811
+ * During read we might have gotten a NRF error and have to
2812
+ * complete a request partially.
2813
+ */
2814
+ if (proc_bytes) {
2815
+ blk_update_request(req, BLK_STS_OK, proc_bytes);
2816
+ blk_mq_requeue_request(req, true);
2817
+ } else if (likely(!blk_should_fake_timeout(req->q))) {
2818
+ blk_mq_complete_request(req);
2819
+ }
26612820 }
26622821 }
26632822
....@@ -2826,41 +2985,32 @@
28262985 * Requeue a request back to the block request queue
28272986 * only works for block requests
28282987 */
2829
-static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
2988
+static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
28302989 {
2831
- struct dasd_block *block = cqr->block;
28322990 struct request *req;
28332991
2834
- if (!block)
2835
- return -EINVAL;
28362992 /*
28372993 * If the request is an ERP request there is nothing to requeue.
28382994 * This will be done with the remaining original request.
28392995 */
28402996 if (cqr->refers)
2841
- return 0;
2997
+ return;
28422998 spin_lock_irq(&cqr->dq->lock);
28432999 req = (struct request *) cqr->callback_data;
2844
- blk_mq_requeue_request(req, false);
3000
+ blk_mq_requeue_request(req, true);
28453001 spin_unlock_irq(&cqr->dq->lock);
28463002
2847
- return 0;
3003
+ return;
28483004 }
28493005
2850
-/*
2851
- * Go through all request on the dasd_block request queue, cancel them
2852
- * on the respective dasd_device, and return them to the generic
2853
- * block layer.
2854
- */
2855
-static int dasd_flush_block_queue(struct dasd_block *block)
3006
+static int _dasd_requests_to_flushqueue(struct dasd_block *block,
3007
+ struct list_head *flush_queue)
28563008 {
28573009 struct dasd_ccw_req *cqr, *n;
2858
- int rc, i;
2859
- struct list_head flush_queue;
28603010 unsigned long flags;
3011
+ int rc, i;
28613012
2862
- INIT_LIST_HEAD(&flush_queue);
2863
- spin_lock_bh(&block->queue_lock);
3013
+ spin_lock_irqsave(&block->queue_lock, flags);
28643014 rc = 0;
28653015 restart:
28663016 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
....@@ -2875,13 +3025,32 @@
28753025 * is returned from the dasd_device layer.
28763026 */
28773027 cqr->callback = _dasd_wake_block_flush_cb;
2878
- for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2879
- list_move_tail(&cqr->blocklist, &flush_queue);
3028
+ for (i = 0; cqr; cqr = cqr->refers, i++)
3029
+ list_move_tail(&cqr->blocklist, flush_queue);
28803030 if (i > 1)
28813031 /* moved more than one request - need to restart */
28823032 goto restart;
28833033 }
2884
- spin_unlock_bh(&block->queue_lock);
3034
+ spin_unlock_irqrestore(&block->queue_lock, flags);
3035
+
3036
+ return rc;
3037
+}
3038
+
3039
+/*
3040
+ * Go through all request on the dasd_block request queue, cancel them
3041
+ * on the respective dasd_device, and return them to the generic
3042
+ * block layer.
3043
+ */
3044
+static int dasd_flush_block_queue(struct dasd_block *block)
3045
+{
3046
+ struct dasd_ccw_req *cqr, *n;
3047
+ struct list_head flush_queue;
3048
+ unsigned long flags;
3049
+ int rc;
3050
+
3051
+ INIT_LIST_HEAD(&flush_queue);
3052
+ rc = _dasd_requests_to_flushqueue(block, &flush_queue);
3053
+
28853054 /* Now call the callback function of flushed requests */
28863055 restart_cb:
28873056 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
....@@ -3150,55 +3319,6 @@
31503319 }
31513320
31523321 /*
3153
- * Allocate and initialize request queue.
3154
- */
3155
-static void dasd_setup_queue(struct dasd_block *block)
3156
-{
3157
- unsigned int logical_block_size = block->bp_block;
3158
- struct request_queue *q = block->request_queue;
3159
- unsigned int max_bytes, max_discard_sectors;
3160
- int max;
3161
-
3162
- if (block->base->features & DASD_FEATURE_USERAW) {
3163
- /*
3164
- * the max_blocks value for raw_track access is 256
3165
- * it is higher than the native ECKD value because we
3166
- * only need one ccw per track
3167
- * so the max_hw_sectors are
3168
- * 2048 x 512B = 1024kB = 16 tracks
3169
- */
3170
- max = 2048;
3171
- } else {
3172
- max = block->base->discipline->max_blocks << block->s2b_shift;
3173
- }
3174
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3175
- q->limits.max_dev_sectors = max;
3176
- blk_queue_logical_block_size(q, logical_block_size);
3177
- blk_queue_max_hw_sectors(q, max);
3178
- blk_queue_max_segments(q, USHRT_MAX);
3179
- /* with page sized segments we can translate each segement into
3180
- * one idaw/tidaw
3181
- */
3182
- blk_queue_max_segment_size(q, PAGE_SIZE);
3183
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
3184
-
3185
- /* Only activate blocklayer discard support for devices that support it */
3186
- if (block->base->features & DASD_FEATURE_DISCARD) {
3187
- q->limits.discard_granularity = logical_block_size;
3188
- q->limits.discard_alignment = PAGE_SIZE;
3189
-
3190
- /* Calculate max_discard_sectors and make it PAGE aligned */
3191
- max_bytes = USHRT_MAX * logical_block_size;
3192
- max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE;
3193
- max_discard_sectors = max_bytes / logical_block_size;
3194
-
3195
- blk_queue_max_discard_sectors(q, max_discard_sectors);
3196
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
3197
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
3198
- }
3199
-}
3200
-
3201
-/*
32023322 * Deactivate and free request queue.
32033323 */
32043324 static void dasd_free_queue(struct dasd_block *block)
....@@ -3316,10 +3436,8 @@
33163436 dasd_proc_exit();
33173437 #endif
33183438 dasd_eer_exit();
3319
- if (dasd_page_cache != NULL) {
3320
- kmem_cache_destroy(dasd_page_cache);
3321
- dasd_page_cache = NULL;
3322
- }
3439
+ kmem_cache_destroy(dasd_page_cache);
3440
+ dasd_page_cache = NULL;
33233441 dasd_gendisk_exit();
33243442 dasd_devmap_exit();
33253443 if (dasd_debug_area != NULL) {
....@@ -3827,79 +3945,77 @@
38273945 }
38283946 EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
38293947
3948
+void dasd_generic_space_exhaust(struct dasd_device *device,
3949
+ struct dasd_ccw_req *cqr)
3950
+{
3951
+ dasd_eer_write(device, NULL, DASD_EER_NOSPC);
3952
+
3953
+ if (device->state < DASD_STATE_BASIC)
3954
+ return;
3955
+
3956
+ if (cqr->status == DASD_CQR_IN_IO ||
3957
+ cqr->status == DASD_CQR_CLEAR_PENDING) {
3958
+ cqr->status = DASD_CQR_QUEUED;
3959
+ cqr->retries++;
3960
+ }
3961
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
3962
+ dasd_device_clear_timer(device);
3963
+ dasd_schedule_device_bh(device);
3964
+}
3965
+EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
3966
+
3967
+void dasd_generic_space_avail(struct dasd_device *device)
3968
+{
3969
+ dev_info(&device->cdev->dev, "Extent pool space is available\n");
3970
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
3971
+
3972
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
3973
+ dasd_schedule_device_bh(device);
3974
+
3975
+ if (device->block) {
3976
+ dasd_schedule_block_bh(device->block);
3977
+ if (device->block->request_queue)
3978
+ blk_mq_run_hw_queues(device->block->request_queue, true);
3979
+ }
3980
+ if (!device->stopped)
3981
+ wake_up(&generic_waitq);
3982
+}
3983
+EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
3984
+
38303985 /*
38313986 * clear active requests and requeue them to block layer if possible
38323987 */
38333988 static int dasd_generic_requeue_all_requests(struct dasd_device *device)
38343989 {
3990
+ struct dasd_block *block = device->block;
38353991 struct list_head requeue_queue;
38363992 struct dasd_ccw_req *cqr, *n;
3837
- struct dasd_ccw_req *refers;
38383993 int rc;
38393994
3995
+ if (!block)
3996
+ return 0;
3997
+
38403998 INIT_LIST_HEAD(&requeue_queue);
3841
- spin_lock_irq(get_ccwdev_lock(device->cdev));
3842
- rc = 0;
3843
- list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
3844
- /* Check status and move request to flush_queue */
3845
- if (cqr->status == DASD_CQR_IN_IO) {
3846
- rc = device->discipline->term_IO(cqr);
3847
- if (rc) {
3848
- /* unable to terminate requeust */
3849
- dev_err(&device->cdev->dev,
3850
- "Unable to terminate request %p "
3851
- "on suspend\n", cqr);
3852
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
3853
- dasd_put_device(device);
3854
- return rc;
3855
- }
3999
+ rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
4000
+
4001
+ /* Now call the callback function of flushed requests */
4002
+restart_cb:
4003
+ list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
4004
+ wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
4005
+ /* Process finished ERP request. */
4006
+ if (cqr->refers) {
4007
+ spin_lock_bh(&block->queue_lock);
4008
+ __dasd_process_erp(block->base, cqr);
4009
+ spin_unlock_bh(&block->queue_lock);
4010
+ /* restart list_for_xx loop since dasd_process_erp
4011
+ * might remove multiple elements
4012
+ */
4013
+ goto restart_cb;
38564014 }
3857
- list_move_tail(&cqr->devlist, &requeue_queue);
3858
- }
3859
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
3860
-
3861
- list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
3862
- wait_event(dasd_flush_wq,
3863
- (cqr->status != DASD_CQR_CLEAR_PENDING));
3864
-
3865
- /*
3866
- * requeue requests to blocklayer will only work
3867
- * for block device requests
3868
- */
3869
- if (_dasd_requeue_request(cqr))
3870
- continue;
3871
-
3872
- /* remove requests from device and block queue */
3873
- list_del_init(&cqr->devlist);
3874
- while (cqr->refers != NULL) {
3875
- refers = cqr->refers;
3876
- /* remove the request from the block queue */
3877
- list_del(&cqr->blocklist);
3878
- /* free the finished erp request */
3879
- dasd_free_erp_request(cqr, cqr->memdev);
3880
- cqr = refers;
3881
- }
3882
-
3883
- /*
3884
- * _dasd_requeue_request already checked for a valid
3885
- * blockdevice, no need to check again
3886
- * all erp requests (cqr->refers) have a cqr->block
3887
- * pointer copy from the original cqr
3888
- */
4015
+ _dasd_requeue_request(cqr);
38894016 list_del_init(&cqr->blocklist);
38904017 cqr->block->base->discipline->free_cp(
38914018 cqr, (struct request *) cqr->callback_data);
3892
- }
3893
-
3894
- /*
3895
- * if requests remain then they are internal request
3896
- * and go back to the device queue
3897
- */
3898
- if (!list_empty(&requeue_queue)) {
3899
- /* move freeze_queue to start of the ccw_queue */
3900
- spin_lock_irq(get_ccwdev_lock(device->cdev));
3901
- list_splice_tail(&requeue_queue, &device->ccw_queue);
3902
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
39034019 }
39044020 dasd_schedule_device_bh(device);
39054021 return rc;
....@@ -3986,13 +4102,11 @@
39864102 EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
39874103
39884104 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
3989
- void *rdc_buffer,
39904105 int rdc_buffer_size,
39914106 int magic)
39924107 {
39934108 struct dasd_ccw_req *cqr;
39944109 struct ccw1 *ccw;
3995
- unsigned long *idaw;
39964110
39974111 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
39984112 NULL);
....@@ -4007,16 +4121,8 @@
40074121
40084122 ccw = cqr->cpaddr;
40094123 ccw->cmd_code = CCW_CMD_RDC;
4010
- if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
4011
- idaw = (unsigned long *) (cqr->data);
4012
- ccw->cda = (__u32)(addr_t) idaw;
4013
- ccw->flags = CCW_FLAG_IDA;
4014
- idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
4015
- } else {
4016
- ccw->cda = (__u32)(addr_t) rdc_buffer;
4017
- ccw->flags = 0;
4018
- }
4019
-
4124
+ ccw->cda = (__u32)(addr_t) cqr->data;
4125
+ ccw->flags = 0;
40204126 ccw->count = rdc_buffer_size;
40214127 cqr->startdev = device;
40224128 cqr->memdev = device;
....@@ -4034,12 +4140,13 @@
40344140 int ret;
40354141 struct dasd_ccw_req *cqr;
40364142
4037
- cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
4038
- magic);
4143
+ cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
40394144 if (IS_ERR(cqr))
40404145 return PTR_ERR(cqr);
40414146
40424147 ret = dasd_sleep_on(cqr);
4148
+ if (ret == 0)
4149
+ memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
40434150 dasd_sfree_request(cqr, cqr->memdev);
40444151 return ret;
40454152 }