| .. | .. |
|---|
| 70 | 70 | * SECTION: prototypes for static functions of dasd.c |
|---|
| 71 | 71 | */ |
|---|
| 72 | 72 | static int dasd_alloc_queue(struct dasd_block *); |
|---|
| 73 | | -static void dasd_setup_queue(struct dasd_block *); |
|---|
| 74 | 73 | static void dasd_free_queue(struct dasd_block *); |
|---|
| 75 | 74 | static int dasd_flush_block_queue(struct dasd_block *); |
|---|
| 76 | 75 | static void dasd_device_tasklet(unsigned long); |
|---|
| .. | .. |
|---|
| 120 | 119 | kfree(device); |
|---|
| 121 | 120 | return ERR_PTR(-ENOMEM); |
|---|
| 122 | 121 | } |
|---|
| 122 | + /* Get two pages for ese format. */ |
|---|
| 123 | + device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); |
|---|
| 124 | + if (!device->ese_mem) { |
|---|
| 125 | + free_page((unsigned long) device->erp_mem); |
|---|
| 126 | + free_pages((unsigned long) device->ccw_mem, 1); |
|---|
| 127 | + kfree(device); |
|---|
| 128 | + return ERR_PTR(-ENOMEM); |
|---|
| 129 | + } |
|---|
| 123 | 130 | |
|---|
| 124 | 131 | dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); |
|---|
| 125 | 132 | dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); |
|---|
| 133 | + dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); |
|---|
| 126 | 134 | spin_lock_init(&device->mem_lock); |
|---|
| 127 | 135 | atomic_set(&device->tasklet_scheduled, 0); |
|---|
| 128 | 136 | tasklet_init(&device->tasklet, dasd_device_tasklet, |
|---|
| .. | .. |
|---|
| 146 | 154 | void dasd_free_device(struct dasd_device *device) |
|---|
| 147 | 155 | { |
|---|
| 148 | 156 | kfree(device->private); |
|---|
| 157 | + free_pages((unsigned long) device->ese_mem, 1); |
|---|
| 149 | 158 | free_page((unsigned long) device->erp_mem); |
|---|
| 150 | 159 | free_pages((unsigned long) device->ccw_mem, 1); |
|---|
| 151 | 160 | kfree(device); |
|---|
| .. | .. |
|---|
| 169 | 178 | (unsigned long) block); |
|---|
| 170 | 179 | INIT_LIST_HEAD(&block->ccw_queue); |
|---|
| 171 | 180 | spin_lock_init(&block->queue_lock); |
|---|
| 181 | + INIT_LIST_HEAD(&block->format_list); |
|---|
| 182 | + spin_lock_init(&block->format_lock); |
|---|
| 172 | 183 | timer_setup(&block->timer, dasd_block_timeout, 0); |
|---|
| 173 | 184 | spin_lock_init(&block->profile.lock); |
|---|
| 174 | 185 | |
|---|
| .. | .. |
|---|
| 348 | 359 | } |
|---|
| 349 | 360 | return rc; |
|---|
| 350 | 361 | } |
|---|
| 351 | | - dasd_setup_queue(block); |
|---|
| 362 | + if (device->discipline->setup_blk_queue) |
|---|
| 363 | + device->discipline->setup_blk_queue(block); |
|---|
| 352 | 364 | set_capacity(block->gdp, |
|---|
| 353 | 365 | block->blocks << block->s2b_shift); |
|---|
| 354 | 366 | device->state = DASD_STATE_READY; |
|---|
| .. | .. |
|---|
| 1192 | 1204 | return rc; |
|---|
| 1193 | 1205 | } |
|---|
| 1194 | 1206 | |
|---|
| 1195 | | -static int dasd_hosts_open(struct inode *inode, struct file *file) |
|---|
| 1196 | | -{ |
|---|
| 1197 | | - struct dasd_device *device = inode->i_private; |
|---|
| 1198 | | - |
|---|
| 1199 | | - return single_open(file, dasd_hosts_show, device); |
|---|
| 1200 | | -} |
|---|
| 1201 | | - |
|---|
| 1202 | | -static const struct file_operations dasd_hosts_fops = { |
|---|
| 1203 | | - .owner = THIS_MODULE, |
|---|
| 1204 | | - .open = dasd_hosts_open, |
|---|
| 1205 | | - .read = seq_read, |
|---|
| 1206 | | - .llseek = seq_lseek, |
|---|
| 1207 | | - .release = single_release, |
|---|
| 1208 | | -}; |
|---|
| 1207 | +DEFINE_SHOW_ATTRIBUTE(dasd_hosts); |
|---|
| 1209 | 1208 | |
|---|
| 1210 | 1209 | static void dasd_hosts_exit(struct dasd_device *device) |
|---|
| 1211 | 1210 | { |
|---|
| .. | .. |
|---|
| 1271 | 1270 | } |
|---|
| 1272 | 1271 | EXPORT_SYMBOL(dasd_smalloc_request); |
|---|
| 1273 | 1272 | |
|---|
| 1273 | +struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, |
|---|
| 1274 | + int datasize, |
|---|
| 1275 | + struct dasd_device *device) |
|---|
| 1276 | +{ |
|---|
| 1277 | + struct dasd_ccw_req *cqr; |
|---|
| 1278 | + unsigned long flags; |
|---|
| 1279 | + int size, cqr_size; |
|---|
| 1280 | + char *data; |
|---|
| 1281 | + |
|---|
| 1282 | + cqr_size = (sizeof(*cqr) + 7L) & -8L; |
|---|
| 1283 | + size = cqr_size; |
|---|
| 1284 | + if (cplength > 0) |
|---|
| 1285 | + size += cplength * sizeof(struct ccw1); |
|---|
| 1286 | + if (datasize > 0) |
|---|
| 1287 | + size += datasize; |
|---|
| 1288 | + |
|---|
| 1289 | + spin_lock_irqsave(&device->mem_lock, flags); |
|---|
| 1290 | + cqr = dasd_alloc_chunk(&device->ese_chunks, size); |
|---|
| 1291 | + spin_unlock_irqrestore(&device->mem_lock, flags); |
|---|
| 1292 | + if (!cqr) |
|---|
| 1293 | + return ERR_PTR(-ENOMEM); |
|---|
| 1294 | + memset(cqr, 0, sizeof(*cqr)); |
|---|
| 1295 | + data = (char *)cqr + cqr_size; |
|---|
| 1296 | + cqr->cpaddr = NULL; |
|---|
| 1297 | + if (cplength > 0) { |
|---|
| 1298 | + cqr->cpaddr = data; |
|---|
| 1299 | + data += cplength * sizeof(struct ccw1); |
|---|
| 1300 | + memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); |
|---|
| 1301 | + } |
|---|
| 1302 | + cqr->data = NULL; |
|---|
| 1303 | + if (datasize > 0) { |
|---|
| 1304 | + cqr->data = data; |
|---|
| 1305 | + memset(cqr->data, 0, datasize); |
|---|
| 1306 | + } |
|---|
| 1307 | + |
|---|
| 1308 | + cqr->magic = magic; |
|---|
| 1309 | + set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
|---|
| 1310 | + dasd_get_device(device); |
|---|
| 1311 | + |
|---|
| 1312 | + return cqr; |
|---|
| 1313 | +} |
|---|
| 1314 | +EXPORT_SYMBOL(dasd_fmalloc_request); |
|---|
| 1315 | + |
|---|
| 1274 | 1316 | void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) |
|---|
| 1275 | 1317 | { |
|---|
| 1276 | 1318 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 1281 | 1323 | dasd_put_device(device); |
|---|
| 1282 | 1324 | } |
|---|
| 1283 | 1325 | EXPORT_SYMBOL(dasd_sfree_request); |
|---|
| 1326 | + |
|---|
| 1327 | +void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) |
|---|
| 1328 | +{ |
|---|
| 1329 | + unsigned long flags; |
|---|
| 1330 | + |
|---|
| 1331 | + spin_lock_irqsave(&device->mem_lock, flags); |
|---|
| 1332 | + dasd_free_chunk(&device->ese_chunks, cqr); |
|---|
| 1333 | + spin_unlock_irqrestore(&device->mem_lock, flags); |
|---|
| 1334 | + dasd_put_device(device); |
|---|
| 1335 | +} |
|---|
| 1336 | +EXPORT_SYMBOL(dasd_ffree_request); |
|---|
| 1284 | 1337 | |
|---|
| 1285 | 1338 | /* |
|---|
| 1286 | 1339 | * Check discipline magic in cqr. |
|---|
| .. | .. |
|---|
| 1409 | 1462 | if (!cqr->lpm) |
|---|
| 1410 | 1463 | cqr->lpm = dasd_path_get_opm(device); |
|---|
| 1411 | 1464 | } |
|---|
| 1465 | + /* |
|---|
| 1466 | + * remember the amount of formatted tracks to prevent double format on |
|---|
| 1467 | + * ESE devices |
|---|
| 1468 | + */ |
|---|
| 1469 | + if (cqr->block) |
|---|
| 1470 | + cqr->trkcount = atomic_read(&cqr->block->trkcount); |
|---|
| 1471 | + |
|---|
| 1412 | 1472 | if (cqr->cpmode == 1) { |
|---|
| 1413 | 1473 | rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, |
|---|
| 1414 | 1474 | (long) cqr, cqr->lpm); |
|---|
| .. | .. |
|---|
| 1586 | 1646 | irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); |
|---|
| 1587 | 1647 | } |
|---|
| 1588 | 1648 | |
|---|
| 1649 | +static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) |
|---|
| 1650 | +{ |
|---|
| 1651 | + struct dasd_device *device = NULL; |
|---|
| 1652 | + u8 *sense = NULL; |
|---|
| 1653 | + |
|---|
| 1654 | + if (!block) |
|---|
| 1655 | + return 0; |
|---|
| 1656 | + device = block->base; |
|---|
| 1657 | + if (!device || !device->discipline->is_ese) |
|---|
| 1658 | + return 0; |
|---|
| 1659 | + if (!device->discipline->is_ese(device)) |
|---|
| 1660 | + return 0; |
|---|
| 1661 | + |
|---|
| 1662 | + sense = dasd_get_sense(irb); |
|---|
| 1663 | + if (!sense) |
|---|
| 1664 | + return 0; |
|---|
| 1665 | + |
|---|
| 1666 | + return !!(sense[1] & SNS1_NO_REC_FOUND) || |
|---|
| 1667 | + !!(sense[1] & SNS1_FILE_PROTECTED) || |
|---|
| 1668 | + scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; |
|---|
| 1669 | +} |
|---|
| 1670 | + |
|---|
| 1671 | +static int dasd_ese_oos_cond(u8 *sense) |
|---|
| 1672 | +{ |
|---|
| 1673 | + return sense[0] & SNS0_EQUIPMENT_CHECK && |
|---|
| 1674 | + sense[1] & SNS1_PERM_ERR && |
|---|
| 1675 | + sense[1] & SNS1_WRITE_INHIBITED && |
|---|
| 1676 | + sense[25] == 0x01; |
|---|
| 1677 | +} |
|---|
| 1678 | + |
|---|
| 1589 | 1679 | /* |
|---|
| 1590 | 1680 | * Interrupt handler for "normal" ssch-io based dasd devices. |
|---|
| 1591 | 1681 | */ |
|---|
| 1592 | 1682 | void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, |
|---|
| 1593 | 1683 | struct irb *irb) |
|---|
| 1594 | 1684 | { |
|---|
| 1595 | | - struct dasd_ccw_req *cqr, *next; |
|---|
| 1685 | + struct dasd_ccw_req *cqr, *next, *fcqr; |
|---|
| 1596 | 1686 | struct dasd_device *device; |
|---|
| 1597 | 1687 | unsigned long now; |
|---|
| 1598 | 1688 | int nrf_suppressed = 0; |
|---|
| 1599 | 1689 | int fp_suppressed = 0; |
|---|
| 1690 | + struct request *req; |
|---|
| 1600 | 1691 | u8 *sense = NULL; |
|---|
| 1601 | 1692 | int expires; |
|---|
| 1602 | 1693 | |
|---|
| .. | .. |
|---|
| 1654 | 1745 | test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); |
|---|
| 1655 | 1746 | nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && |
|---|
| 1656 | 1747 | test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); |
|---|
| 1748 | + |
|---|
| 1749 | + /* |
|---|
| 1750 | + * Extent pool probably out-of-space. |
|---|
| 1751 | + * Stop device and check exhaust level. |
|---|
| 1752 | + */ |
|---|
| 1753 | + if (dasd_ese_oos_cond(sense)) { |
|---|
| 1754 | + dasd_generic_space_exhaust(device, cqr); |
|---|
| 1755 | + device->discipline->ext_pool_exhaust(device, cqr); |
|---|
| 1756 | + dasd_put_device(device); |
|---|
| 1757 | + return; |
|---|
| 1758 | + } |
|---|
| 1657 | 1759 | } |
|---|
| 1658 | 1760 | if (!(fp_suppressed || nrf_suppressed)) |
|---|
| 1659 | 1761 | device->discipline->dump_sense_dbf(device, irb, "int"); |
|---|
| .. | .. |
|---|
| 1683 | 1785 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", |
|---|
| 1684 | 1786 | "invalid device in request"); |
|---|
| 1685 | 1787 | return; |
|---|
| 1788 | + } |
|---|
| 1789 | + |
|---|
| 1790 | + if (dasd_ese_needs_format(cqr->block, irb)) { |
|---|
| 1791 | + req = dasd_get_callback_data(cqr); |
|---|
| 1792 | + if (!req) { |
|---|
| 1793 | + cqr->status = DASD_CQR_ERROR; |
|---|
| 1794 | + return; |
|---|
| 1795 | + } |
|---|
| 1796 | + if (rq_data_dir(req) == READ) { |
|---|
| 1797 | + device->discipline->ese_read(cqr, irb); |
|---|
| 1798 | + cqr->status = DASD_CQR_SUCCESS; |
|---|
| 1799 | + cqr->stopclk = now; |
|---|
| 1800 | + dasd_device_clear_timer(device); |
|---|
| 1801 | + dasd_schedule_device_bh(device); |
|---|
| 1802 | + return; |
|---|
| 1803 | + } |
|---|
| 1804 | + fcqr = device->discipline->ese_format(device, cqr, irb); |
|---|
| 1805 | + if (IS_ERR(fcqr)) { |
|---|
| 1806 | + if (PTR_ERR(fcqr) == -EINVAL) { |
|---|
| 1807 | + cqr->status = DASD_CQR_ERROR; |
|---|
| 1808 | + return; |
|---|
| 1809 | + } |
|---|
| 1810 | + /* |
|---|
| 1811 | + * If we can't format now, let the request go |
|---|
| 1812 | + * one extra round. Maybe we can format later. |
|---|
| 1813 | + */ |
|---|
| 1814 | + cqr->status = DASD_CQR_QUEUED; |
|---|
| 1815 | + dasd_schedule_device_bh(device); |
|---|
| 1816 | + return; |
|---|
| 1817 | + } else { |
|---|
| 1818 | + fcqr->status = DASD_CQR_QUEUED; |
|---|
| 1819 | + cqr->status = DASD_CQR_QUEUED; |
|---|
| 1820 | + list_add(&fcqr->devlist, &device->ccw_queue); |
|---|
| 1821 | + dasd_schedule_device_bh(device); |
|---|
| 1822 | + return; |
|---|
| 1823 | + } |
|---|
| 1686 | 1824 | } |
|---|
| 1687 | 1825 | |
|---|
| 1688 | 1826 | /* Check for clear pending */ |
|---|
| .. | .. |
|---|
| 1923 | 2061 | static int __dasd_device_is_unusable(struct dasd_device *device, |
|---|
| 1924 | 2062 | struct dasd_ccw_req *cqr) |
|---|
| 1925 | 2063 | { |
|---|
| 1926 | | - int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); |
|---|
| 2064 | + int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC); |
|---|
| 1927 | 2065 | |
|---|
| 1928 | 2066 | if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && |
|---|
| 1929 | 2067 | !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { |
|---|
| .. | .. |
|---|
| 1990 | 2128 | if (device->stopped & |
|---|
| 1991 | 2129 | ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) |
|---|
| 1992 | 2130 | return; |
|---|
| 1993 | | - rc = device->discipline->verify_path(device, |
|---|
| 1994 | | - dasd_path_get_tbvpm(device)); |
|---|
| 2131 | + rc = device->discipline->pe_handler(device, |
|---|
| 2132 | + dasd_path_get_tbvpm(device)); |
|---|
| 1995 | 2133 | if (rc) |
|---|
| 1996 | 2134 | dasd_device_set_timer(device, 50); |
|---|
| 1997 | 2135 | else |
|---|
| .. | .. |
|---|
| 2425 | 2563 | EXPORT_SYMBOL(dasd_sleep_on_queue); |
|---|
| 2426 | 2564 | |
|---|
| 2427 | 2565 | /* |
|---|
| 2566 | + * Start requests from a ccw_queue and wait interruptible for their completion. |
|---|
| 2567 | + */ |
|---|
| 2568 | +int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) |
|---|
| 2569 | +{ |
|---|
| 2570 | + return _dasd_sleep_on_queue(ccw_queue, 1); |
|---|
| 2571 | +} |
|---|
| 2572 | +EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); |
|---|
| 2573 | + |
|---|
| 2574 | +/* |
|---|
| 2428 | 2575 | * Queue a request to the tail of the device ccw_queue and wait |
|---|
| 2429 | 2576 | * interruptible for it's completion. |
|---|
| 2430 | 2577 | */ |
|---|
| .. | .. |
|---|
| 2622 | 2769 | { |
|---|
| 2623 | 2770 | struct request *req; |
|---|
| 2624 | 2771 | blk_status_t error = BLK_STS_OK; |
|---|
| 2772 | + unsigned int proc_bytes; |
|---|
| 2625 | 2773 | int status; |
|---|
| 2626 | 2774 | |
|---|
| 2627 | 2775 | req = (struct request *) cqr->callback_data; |
|---|
| 2628 | 2776 | dasd_profile_end(cqr->block, cqr, req); |
|---|
| 2629 | 2777 | |
|---|
| 2778 | + proc_bytes = cqr->proc_bytes; |
|---|
| 2630 | 2779 | status = cqr->block->base->discipline->free_cp(cqr, req); |
|---|
| 2631 | 2780 | if (status < 0) |
|---|
| 2632 | 2781 | error = errno_to_blk_status(status); |
|---|
| .. | .. |
|---|
| 2657 | 2806 | blk_mq_end_request(req, error); |
|---|
| 2658 | 2807 | blk_mq_run_hw_queues(req->q, true); |
|---|
| 2659 | 2808 | } else { |
|---|
| 2660 | | - blk_mq_complete_request(req); |
|---|
| 2809 | + /* |
|---|
| 2810 | + * Partial completed requests can happen with ESE devices. |
|---|
| 2811 | + * During read we might have gotten a NRF error and have to |
|---|
| 2812 | + * complete a request partially. |
|---|
| 2813 | + */ |
|---|
| 2814 | + if (proc_bytes) { |
|---|
| 2815 | + blk_update_request(req, BLK_STS_OK, proc_bytes); |
|---|
| 2816 | + blk_mq_requeue_request(req, true); |
|---|
| 2817 | + } else if (likely(!blk_should_fake_timeout(req->q))) { |
|---|
| 2818 | + blk_mq_complete_request(req); |
|---|
| 2819 | + } |
|---|
| 2661 | 2820 | } |
|---|
| 2662 | 2821 | } |
|---|
| 2663 | 2822 | |
|---|
| .. | .. |
|---|
| 2826 | 2985 | * Requeue a request back to the block request queue |
|---|
| 2827 | 2986 | * only works for block requests |
|---|
| 2828 | 2987 | */ |
|---|
| 2829 | | -static int _dasd_requeue_request(struct dasd_ccw_req *cqr) |
|---|
| 2988 | +static void _dasd_requeue_request(struct dasd_ccw_req *cqr) |
|---|
| 2830 | 2989 | { |
|---|
| 2831 | | - struct dasd_block *block = cqr->block; |
|---|
| 2832 | 2990 | struct request *req; |
|---|
| 2833 | 2991 | |
|---|
| 2834 | | - if (!block) |
|---|
| 2835 | | - return -EINVAL; |
|---|
| 2836 | 2992 | /* |
|---|
| 2837 | 2993 | * If the request is an ERP request there is nothing to requeue. |
|---|
| 2838 | 2994 | * This will be done with the remaining original request. |
|---|
| 2839 | 2995 | */ |
|---|
| 2840 | 2996 | if (cqr->refers) |
|---|
| 2841 | | - return 0; |
|---|
| 2997 | + return; |
|---|
| 2842 | 2998 | spin_lock_irq(&cqr->dq->lock); |
|---|
| 2843 | 2999 | req = (struct request *) cqr->callback_data; |
|---|
| 2844 | | - blk_mq_requeue_request(req, false); |
|---|
| 3000 | + blk_mq_requeue_request(req, true); |
|---|
| 2845 | 3001 | spin_unlock_irq(&cqr->dq->lock); |
|---|
| 2846 | 3002 | |
|---|
| 2847 | | - return 0; |
|---|
| 3003 | + return; |
|---|
| 2848 | 3004 | } |
|---|
| 2849 | 3005 | |
|---|
| 2850 | | -/* |
|---|
| 2851 | | - * Go through all request on the dasd_block request queue, cancel them |
|---|
| 2852 | | - * on the respective dasd_device, and return them to the generic |
|---|
| 2853 | | - * block layer. |
|---|
| 2854 | | - */ |
|---|
| 2855 | | -static int dasd_flush_block_queue(struct dasd_block *block) |
|---|
| 3006 | +static int _dasd_requests_to_flushqueue(struct dasd_block *block, |
|---|
| 3007 | + struct list_head *flush_queue) |
|---|
| 2856 | 3008 | { |
|---|
| 2857 | 3009 | struct dasd_ccw_req *cqr, *n; |
|---|
| 2858 | | - int rc, i; |
|---|
| 2859 | | - struct list_head flush_queue; |
|---|
| 2860 | 3010 | unsigned long flags; |
|---|
| 3011 | + int rc, i; |
|---|
| 2861 | 3012 | |
|---|
| 2862 | | - INIT_LIST_HEAD(&flush_queue); |
|---|
| 2863 | | - spin_lock_bh(&block->queue_lock); |
|---|
| 3013 | + spin_lock_irqsave(&block->queue_lock, flags); |
|---|
| 2864 | 3014 | rc = 0; |
|---|
| 2865 | 3015 | restart: |
|---|
| 2866 | 3016 | list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { |
|---|
| .. | .. |
|---|
| 2875 | 3025 | * is returned from the dasd_device layer. |
|---|
| 2876 | 3026 | */ |
|---|
| 2877 | 3027 | cqr->callback = _dasd_wake_block_flush_cb; |
|---|
| 2878 | | - for (i = 0; cqr != NULL; cqr = cqr->refers, i++) |
|---|
| 2879 | | - list_move_tail(&cqr->blocklist, &flush_queue); |
|---|
| 3028 | + for (i = 0; cqr; cqr = cqr->refers, i++) |
|---|
| 3029 | + list_move_tail(&cqr->blocklist, flush_queue); |
|---|
| 2880 | 3030 | if (i > 1) |
|---|
| 2881 | 3031 | /* moved more than one request - need to restart */ |
|---|
| 2882 | 3032 | goto restart; |
|---|
| 2883 | 3033 | } |
|---|
| 2884 | | - spin_unlock_bh(&block->queue_lock); |
|---|
| 3034 | + spin_unlock_irqrestore(&block->queue_lock, flags); |
|---|
| 3035 | + |
|---|
| 3036 | + return rc; |
|---|
| 3037 | +} |
|---|
| 3038 | + |
|---|
| 3039 | +/* |
|---|
| 3040 | + * Go through all request on the dasd_block request queue, cancel them |
|---|
| 3041 | + * on the respective dasd_device, and return them to the generic |
|---|
| 3042 | + * block layer. |
|---|
| 3043 | + */ |
|---|
| 3044 | +static int dasd_flush_block_queue(struct dasd_block *block) |
|---|
| 3045 | +{ |
|---|
| 3046 | + struct dasd_ccw_req *cqr, *n; |
|---|
| 3047 | + struct list_head flush_queue; |
|---|
| 3048 | + unsigned long flags; |
|---|
| 3049 | + int rc; |
|---|
| 3050 | + |
|---|
| 3051 | + INIT_LIST_HEAD(&flush_queue); |
|---|
| 3052 | + rc = _dasd_requests_to_flushqueue(block, &flush_queue); |
|---|
| 3053 | + |
|---|
| 2885 | 3054 | /* Now call the callback function of flushed requests */ |
|---|
| 2886 | 3055 | restart_cb: |
|---|
| 2887 | 3056 | list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { |
|---|
| .. | .. |
|---|
| 3150 | 3319 | } |
|---|
| 3151 | 3320 | |
|---|
| 3152 | 3321 | /* |
|---|
| 3153 | | - * Allocate and initialize request queue. |
|---|
| 3154 | | - */ |
|---|
| 3155 | | -static void dasd_setup_queue(struct dasd_block *block) |
|---|
| 3156 | | -{ |
|---|
| 3157 | | - unsigned int logical_block_size = block->bp_block; |
|---|
| 3158 | | - struct request_queue *q = block->request_queue; |
|---|
| 3159 | | - unsigned int max_bytes, max_discard_sectors; |
|---|
| 3160 | | - int max; |
|---|
| 3161 | | - |
|---|
| 3162 | | - if (block->base->features & DASD_FEATURE_USERAW) { |
|---|
| 3163 | | - /* |
|---|
| 3164 | | - * the max_blocks value for raw_track access is 256 |
|---|
| 3165 | | - * it is higher than the native ECKD value because we |
|---|
| 3166 | | - * only need one ccw per track |
|---|
| 3167 | | - * so the max_hw_sectors are |
|---|
| 3168 | | - * 2048 x 512B = 1024kB = 16 tracks |
|---|
| 3169 | | - */ |
|---|
| 3170 | | - max = 2048; |
|---|
| 3171 | | - } else { |
|---|
| 3172 | | - max = block->base->discipline->max_blocks << block->s2b_shift; |
|---|
| 3173 | | - } |
|---|
| 3174 | | - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
|---|
| 3175 | | - q->limits.max_dev_sectors = max; |
|---|
| 3176 | | - blk_queue_logical_block_size(q, logical_block_size); |
|---|
| 3177 | | - blk_queue_max_hw_sectors(q, max); |
|---|
| 3178 | | - blk_queue_max_segments(q, USHRT_MAX); |
|---|
| 3179 | | - /* with page sized segments we can translate each segement into |
|---|
| 3180 | | - * one idaw/tidaw |
|---|
| 3181 | | - */ |
|---|
| 3182 | | - blk_queue_max_segment_size(q, PAGE_SIZE); |
|---|
| 3183 | | - blk_queue_segment_boundary(q, PAGE_SIZE - 1); |
|---|
| 3184 | | - |
|---|
| 3185 | | - /* Only activate blocklayer discard support for devices that support it */ |
|---|
| 3186 | | - if (block->base->features & DASD_FEATURE_DISCARD) { |
|---|
| 3187 | | - q->limits.discard_granularity = logical_block_size; |
|---|
| 3188 | | - q->limits.discard_alignment = PAGE_SIZE; |
|---|
| 3189 | | - |
|---|
| 3190 | | - /* Calculate max_discard_sectors and make it PAGE aligned */ |
|---|
| 3191 | | - max_bytes = USHRT_MAX * logical_block_size; |
|---|
| 3192 | | - max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE; |
|---|
| 3193 | | - max_discard_sectors = max_bytes / logical_block_size; |
|---|
| 3194 | | - |
|---|
| 3195 | | - blk_queue_max_discard_sectors(q, max_discard_sectors); |
|---|
| 3196 | | - blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); |
|---|
| 3197 | | - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
|---|
| 3198 | | - } |
|---|
| 3199 | | -} |
|---|
| 3200 | | - |
|---|
| 3201 | | -/* |
|---|
| 3202 | 3322 | * Deactivate and free request queue. |
|---|
| 3203 | 3323 | */ |
|---|
| 3204 | 3324 | static void dasd_free_queue(struct dasd_block *block) |
|---|
| .. | .. |
|---|
| 3316 | 3436 | dasd_proc_exit(); |
|---|
| 3317 | 3437 | #endif |
|---|
| 3318 | 3438 | dasd_eer_exit(); |
|---|
| 3319 | | - if (dasd_page_cache != NULL) { |
|---|
| 3320 | | - kmem_cache_destroy(dasd_page_cache); |
|---|
| 3321 | | - dasd_page_cache = NULL; |
|---|
| 3322 | | - } |
|---|
| 3439 | + kmem_cache_destroy(dasd_page_cache); |
|---|
| 3440 | + dasd_page_cache = NULL; |
|---|
| 3323 | 3441 | dasd_gendisk_exit(); |
|---|
| 3324 | 3442 | dasd_devmap_exit(); |
|---|
| 3325 | 3443 | if (dasd_debug_area != NULL) { |
|---|
| .. | .. |
|---|
| 3827 | 3945 | } |
|---|
| 3828 | 3946 | EXPORT_SYMBOL_GPL(dasd_generic_verify_path); |
|---|
| 3829 | 3947 | |
|---|
| 3948 | +void dasd_generic_space_exhaust(struct dasd_device *device, |
|---|
| 3949 | + struct dasd_ccw_req *cqr) |
|---|
| 3950 | +{ |
|---|
| 3951 | + dasd_eer_write(device, NULL, DASD_EER_NOSPC); |
|---|
| 3952 | + |
|---|
| 3953 | + if (device->state < DASD_STATE_BASIC) |
|---|
| 3954 | + return; |
|---|
| 3955 | + |
|---|
| 3956 | + if (cqr->status == DASD_CQR_IN_IO || |
|---|
| 3957 | + cqr->status == DASD_CQR_CLEAR_PENDING) { |
|---|
| 3958 | + cqr->status = DASD_CQR_QUEUED; |
|---|
| 3959 | + cqr->retries++; |
|---|
| 3960 | + } |
|---|
| 3961 | + dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); |
|---|
| 3962 | + dasd_device_clear_timer(device); |
|---|
| 3963 | + dasd_schedule_device_bh(device); |
|---|
| 3964 | +} |
|---|
| 3965 | +EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); |
|---|
| 3966 | + |
|---|
| 3967 | +void dasd_generic_space_avail(struct dasd_device *device) |
|---|
| 3968 | +{ |
|---|
| 3969 | + dev_info(&device->cdev->dev, "Extent pool space is available\n"); |
|---|
| 3970 | + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); |
|---|
| 3971 | + |
|---|
| 3972 | + dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); |
|---|
| 3973 | + dasd_schedule_device_bh(device); |
|---|
| 3974 | + |
|---|
| 3975 | + if (device->block) { |
|---|
| 3976 | + dasd_schedule_block_bh(device->block); |
|---|
| 3977 | + if (device->block->request_queue) |
|---|
| 3978 | + blk_mq_run_hw_queues(device->block->request_queue, true); |
|---|
| 3979 | + } |
|---|
| 3980 | + if (!device->stopped) |
|---|
| 3981 | + wake_up(&generic_waitq); |
|---|
| 3982 | +} |
|---|
| 3983 | +EXPORT_SYMBOL_GPL(dasd_generic_space_avail); |
|---|
| 3984 | + |
|---|
| 3830 | 3985 | /* |
|---|
| 3831 | 3986 | * clear active requests and requeue them to block layer if possible |
|---|
| 3832 | 3987 | */ |
|---|
| 3833 | 3988 | static int dasd_generic_requeue_all_requests(struct dasd_device *device) |
|---|
| 3834 | 3989 | { |
|---|
| 3990 | + struct dasd_block *block = device->block; |
|---|
| 3835 | 3991 | struct list_head requeue_queue; |
|---|
| 3836 | 3992 | struct dasd_ccw_req *cqr, *n; |
|---|
| 3837 | | - struct dasd_ccw_req *refers; |
|---|
| 3838 | 3993 | int rc; |
|---|
| 3839 | 3994 | |
|---|
| 3995 | + if (!block) |
|---|
| 3996 | + return 0; |
|---|
| 3997 | + |
|---|
| 3840 | 3998 | INIT_LIST_HEAD(&requeue_queue); |
|---|
| 3841 | | - spin_lock_irq(get_ccwdev_lock(device->cdev)); |
|---|
| 3842 | | - rc = 0; |
|---|
| 3843 | | - list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { |
|---|
| 3844 | | - /* Check status and move request to flush_queue */ |
|---|
| 3845 | | - if (cqr->status == DASD_CQR_IN_IO) { |
|---|
| 3846 | | - rc = device->discipline->term_IO(cqr); |
|---|
| 3847 | | - if (rc) { |
|---|
| 3848 | | - /* unable to terminate requeust */ |
|---|
| 3849 | | - dev_err(&device->cdev->dev, |
|---|
| 3850 | | - "Unable to terminate request %p " |
|---|
| 3851 | | - "on suspend\n", cqr); |
|---|
| 3852 | | - spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
|---|
| 3853 | | - dasd_put_device(device); |
|---|
| 3854 | | - return rc; |
|---|
| 3855 | | - } |
|---|
| 3999 | + rc = _dasd_requests_to_flushqueue(block, &requeue_queue); |
|---|
| 4000 | + |
|---|
| 4001 | + /* Now call the callback function of flushed requests */ |
|---|
| 4002 | +restart_cb: |
|---|
| 4003 | + list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { |
|---|
| 4004 | + wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); |
|---|
| 4005 | + /* Process finished ERP request. */ |
|---|
| 4006 | + if (cqr->refers) { |
|---|
| 4007 | + spin_lock_bh(&block->queue_lock); |
|---|
| 4008 | + __dasd_process_erp(block->base, cqr); |
|---|
| 4009 | + spin_unlock_bh(&block->queue_lock); |
|---|
| 4010 | + /* restart list_for_xx loop since dasd_process_erp |
|---|
| 4011 | + * might remove multiple elements |
|---|
| 4012 | + */ |
|---|
| 4013 | + goto restart_cb; |
|---|
| 3856 | 4014 | } |
|---|
| 3857 | | - list_move_tail(&cqr->devlist, &requeue_queue); |
|---|
| 3858 | | - } |
|---|
| 3859 | | - spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
|---|
| 3860 | | - |
|---|
| 3861 | | - list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { |
|---|
| 3862 | | - wait_event(dasd_flush_wq, |
|---|
| 3863 | | - (cqr->status != DASD_CQR_CLEAR_PENDING)); |
|---|
| 3864 | | - |
|---|
| 3865 | | - /* |
|---|
| 3866 | | - * requeue requests to blocklayer will only work |
|---|
| 3867 | | - * for block device requests |
|---|
| 3868 | | - */ |
|---|
| 3869 | | - if (_dasd_requeue_request(cqr)) |
|---|
| 3870 | | - continue; |
|---|
| 3871 | | - |
|---|
| 3872 | | - /* remove requests from device and block queue */ |
|---|
| 3873 | | - list_del_init(&cqr->devlist); |
|---|
| 3874 | | - while (cqr->refers != NULL) { |
|---|
| 3875 | | - refers = cqr->refers; |
|---|
| 3876 | | - /* remove the request from the block queue */ |
|---|
| 3877 | | - list_del(&cqr->blocklist); |
|---|
| 3878 | | - /* free the finished erp request */ |
|---|
| 3879 | | - dasd_free_erp_request(cqr, cqr->memdev); |
|---|
| 3880 | | - cqr = refers; |
|---|
| 3881 | | - } |
|---|
| 3882 | | - |
|---|
| 3883 | | - /* |
|---|
| 3884 | | - * _dasd_requeue_request already checked for a valid |
|---|
| 3885 | | - * blockdevice, no need to check again |
|---|
| 3886 | | - * all erp requests (cqr->refers) have a cqr->block |
|---|
| 3887 | | - * pointer copy from the original cqr |
|---|
| 3888 | | - */ |
|---|
| 4015 | + _dasd_requeue_request(cqr); |
|---|
| 3889 | 4016 | list_del_init(&cqr->blocklist); |
|---|
| 3890 | 4017 | cqr->block->base->discipline->free_cp( |
|---|
| 3891 | 4018 | cqr, (struct request *) cqr->callback_data); |
|---|
| 3892 | | - } |
|---|
| 3893 | | - |
|---|
| 3894 | | - /* |
|---|
| 3895 | | - * if requests remain then they are internal request |
|---|
| 3896 | | - * and go back to the device queue |
|---|
| 3897 | | - */ |
|---|
| 3898 | | - if (!list_empty(&requeue_queue)) { |
|---|
| 3899 | | - /* move freeze_queue to start of the ccw_queue */ |
|---|
| 3900 | | - spin_lock_irq(get_ccwdev_lock(device->cdev)); |
|---|
| 3901 | | - list_splice_tail(&requeue_queue, &device->ccw_queue); |
|---|
| 3902 | | - spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
|---|
| 3903 | 4019 | } |
|---|
| 3904 | 4020 | dasd_schedule_device_bh(device); |
|---|
| 3905 | 4021 | return rc; |
|---|
| .. | .. |
|---|
| 3986 | 4102 | EXPORT_SYMBOL_GPL(dasd_generic_restore_device); |
|---|
| 3987 | 4103 | |
|---|
| 3988 | 4104 | static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, |
|---|
| 3989 | | - void *rdc_buffer, |
|---|
| 3990 | 4105 | int rdc_buffer_size, |
|---|
| 3991 | 4106 | int magic) |
|---|
| 3992 | 4107 | { |
|---|
| 3993 | 4108 | struct dasd_ccw_req *cqr; |
|---|
| 3994 | 4109 | struct ccw1 *ccw; |
|---|
| 3995 | | - unsigned long *idaw; |
|---|
| 3996 | 4110 | |
|---|
| 3997 | 4111 | cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, |
|---|
| 3998 | 4112 | NULL); |
|---|
| .. | .. |
|---|
| 4007 | 4121 | |
|---|
| 4008 | 4122 | ccw = cqr->cpaddr; |
|---|
| 4009 | 4123 | ccw->cmd_code = CCW_CMD_RDC; |
|---|
| 4010 | | - if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { |
|---|
| 4011 | | - idaw = (unsigned long *) (cqr->data); |
|---|
| 4012 | | - ccw->cda = (__u32)(addr_t) idaw; |
|---|
| 4013 | | - ccw->flags = CCW_FLAG_IDA; |
|---|
| 4014 | | - idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); |
|---|
| 4015 | | - } else { |
|---|
| 4016 | | - ccw->cda = (__u32)(addr_t) rdc_buffer; |
|---|
| 4017 | | - ccw->flags = 0; |
|---|
| 4018 | | - } |
|---|
| 4019 | | - |
|---|
| 4124 | + ccw->cda = (__u32)(addr_t) cqr->data; |
|---|
| 4125 | + ccw->flags = 0; |
|---|
| 4020 | 4126 | ccw->count = rdc_buffer_size; |
|---|
| 4021 | 4127 | cqr->startdev = device; |
|---|
| 4022 | 4128 | cqr->memdev = device; |
|---|
| .. | .. |
|---|
| 4034 | 4140 | int ret; |
|---|
| 4035 | 4141 | struct dasd_ccw_req *cqr; |
|---|
| 4036 | 4142 | |
|---|
| 4037 | | - cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, |
|---|
| 4038 | | - magic); |
|---|
| 4143 | + cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); |
|---|
| 4039 | 4144 | if (IS_ERR(cqr)) |
|---|
| 4040 | 4145 | return PTR_ERR(cqr); |
|---|
| 4041 | 4146 | |
|---|
| 4042 | 4147 | ret = dasd_sleep_on(cqr); |
|---|
| 4148 | + if (ret == 0) |
|---|
| 4149 | + memcpy(rdc_buffer, cqr->data, rdc_buffer_size); |
|---|
| 4043 | 4150 | dasd_sfree_request(cqr, cqr->memdev); |
|---|
| 4044 | 4151 | return ret; |
|---|
| 4045 | 4152 | } |
|---|