| .. | .. |
|---|
| 36 | 36 | * block device, assembling the pieces to full packets and queuing them to the |
|---|
| 37 | 37 | * packet I/O scheduler. |
|---|
| 38 | 38 | * |
|---|
| 39 | | - * At the top layer there is a custom make_request_fn function that forwards |
|---|
| 39 | + * At the top layer there is a custom ->submit_bio function that forwards |
|---|
| 40 | 40 | * read requests directly to the iosched queue and puts write requests in the |
|---|
| 41 | 41 | * unaligned write queue. A kernel thread performs the necessary read |
|---|
| 42 | 42 | * gathering to convert the unaligned writes to aligned writes and then feeds |
|---|
| .. | .. |
|---|
| 913 | 913 | } |
|---|
| 914 | 914 | |
|---|
| 915 | 915 | atomic_inc(&pd->cdrw.pending_bios); |
|---|
| 916 | | - generic_make_request(bio); |
|---|
| 916 | + submit_bio_noacct(bio); |
|---|
| 917 | 917 | } |
|---|
| 918 | 918 | } |
|---|
| 919 | 919 | |
|---|
| .. | .. |
|---|
| 1080 | 1080 | } else { |
|---|
| 1081 | 1081 | list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); |
|---|
| 1082 | 1082 | } |
|---|
| 1083 | | -} |
|---|
| 1084 | | - |
|---|
| 1085 | | -/* |
|---|
| 1086 | | - * recover a failed write, query for relocation if possible |
|---|
| 1087 | | - * |
|---|
| 1088 | | - * returns 1 if recovery is possible, or 0 if not |
|---|
| 1089 | | - * |
|---|
| 1090 | | - */ |
|---|
| 1091 | | -static int pkt_start_recovery(struct packet_data *pkt) |
|---|
| 1092 | | -{ |
|---|
| 1093 | | - /* |
|---|
| 1094 | | - * FIXME. We need help from the file system to implement |
|---|
| 1095 | | - * recovery handling. |
|---|
| 1096 | | - */ |
|---|
| 1097 | | - return 0; |
|---|
| 1098 | | -#if 0 |
|---|
| 1099 | | - struct request *rq = pkt->rq; |
|---|
| 1100 | | - struct pktcdvd_device *pd = rq->rq_disk->private_data; |
|---|
| 1101 | | - struct block_device *pkt_bdev; |
|---|
| 1102 | | - struct super_block *sb = NULL; |
|---|
| 1103 | | - unsigned long old_block, new_block; |
|---|
| 1104 | | - sector_t new_sector; |
|---|
| 1105 | | - |
|---|
| 1106 | | - pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev)); |
|---|
| 1107 | | - if (pkt_bdev) { |
|---|
| 1108 | | - sb = get_super(pkt_bdev); |
|---|
| 1109 | | - bdput(pkt_bdev); |
|---|
| 1110 | | - } |
|---|
| 1111 | | - |
|---|
| 1112 | | - if (!sb) |
|---|
| 1113 | | - return 0; |
|---|
| 1114 | | - |
|---|
| 1115 | | - if (!sb->s_op->relocate_blocks) |
|---|
| 1116 | | - goto out; |
|---|
| 1117 | | - |
|---|
| 1118 | | - old_block = pkt->sector / (CD_FRAMESIZE >> 9); |
|---|
| 1119 | | - if (sb->s_op->relocate_blocks(sb, old_block, &new_block)) |
|---|
| 1120 | | - goto out; |
|---|
| 1121 | | - |
|---|
| 1122 | | - new_sector = new_block * (CD_FRAMESIZE >> 9); |
|---|
| 1123 | | - pkt->sector = new_sector; |
|---|
| 1124 | | - |
|---|
| 1125 | | - bio_reset(pkt->bio); |
|---|
| 1126 | | - bio_set_dev(pkt->bio, pd->bdev); |
|---|
| 1127 | | - bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); |
|---|
| 1128 | | - pkt->bio->bi_iter.bi_sector = new_sector; |
|---|
| 1129 | | - pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; |
|---|
| 1130 | | - pkt->bio->bi_vcnt = pkt->frames; |
|---|
| 1131 | | - |
|---|
| 1132 | | - pkt->bio->bi_end_io = pkt_end_io_packet_write; |
|---|
| 1133 | | - pkt->bio->bi_private = pkt; |
|---|
| 1134 | | - |
|---|
| 1135 | | - drop_super(sb); |
|---|
| 1136 | | - return 1; |
|---|
| 1137 | | - |
|---|
| 1138 | | -out: |
|---|
| 1139 | | - drop_super(sb); |
|---|
| 1140 | | - return 0; |
|---|
| 1141 | | -#endif |
|---|
| 1142 | 1083 | } |
|---|
| 1143 | 1084 | |
|---|
| 1144 | 1085 | static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) |
|---|
| .. | .. |
|---|
| 1357 | 1298 | break; |
|---|
| 1358 | 1299 | |
|---|
| 1359 | 1300 | case PACKET_RECOVERY_STATE: |
|---|
| 1360 | | - if (pkt_start_recovery(pkt)) { |
|---|
| 1361 | | - pkt_start_write(pd, pkt); |
|---|
| 1362 | | - } else { |
|---|
| 1363 | | - pkt_dbg(2, pd, "No recovery possible\n"); |
|---|
| 1364 | | - pkt_set_state(pkt, PACKET_FINISHED_STATE); |
|---|
| 1365 | | - } |
|---|
| 1301 | + pkt_dbg(2, pd, "No recovery possible\n"); |
|---|
| 1302 | + pkt_set_state(pkt, PACKET_FINISHED_STATE); |
|---|
| 1366 | 1303 | break; |
|---|
| 1367 | 1304 | |
|---|
| 1368 | 1305 | case PACKET_FINISHED_STATE: |
|---|
| .. | .. |
|---|
| 1613 | 1550 | disc_information di; |
|---|
| 1614 | 1551 | track_information ti; |
|---|
| 1615 | 1552 | __u32 last_track; |
|---|
| 1616 | | - int ret = -1; |
|---|
| 1553 | + int ret; |
|---|
| 1617 | 1554 | |
|---|
| 1618 | 1555 | ret = pkt_get_disc_info(pd, &di); |
|---|
| 1619 | 1556 | if (ret) |
|---|
| .. | .. |
|---|
| 2173 | 2110 | int ret; |
|---|
| 2174 | 2111 | long lba; |
|---|
| 2175 | 2112 | struct request_queue *q; |
|---|
| 2113 | + struct block_device *bdev; |
|---|
| 2176 | 2114 | |
|---|
| 2177 | 2115 | /* |
|---|
| 2178 | 2116 | * We need to re-open the cdrom device without O_NONBLOCK to be able |
|---|
| 2179 | 2117 | * to read/write from/to it. It is already opened in O_NONBLOCK mode |
|---|
| 2180 | | - * so bdget() can't fail. |
|---|
| 2118 | + * so open should not fail. |
|---|
| 2181 | 2119 | */ |
|---|
| 2182 | | - bdget(pd->bdev->bd_dev); |
|---|
| 2183 | | - ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd); |
|---|
| 2184 | | - if (ret) |
|---|
| 2120 | + bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd); |
|---|
| 2121 | + if (IS_ERR(bdev)) { |
|---|
| 2122 | + ret = PTR_ERR(bdev); |
|---|
| 2185 | 2123 | goto out; |
|---|
| 2124 | + } |
|---|
| 2186 | 2125 | |
|---|
| 2187 | 2126 | ret = pkt_get_last_written(pd, &lba); |
|---|
| 2188 | 2127 | if (ret) { |
|---|
| .. | .. |
|---|
| 2192 | 2131 | |
|---|
| 2193 | 2132 | set_capacity(pd->disk, lba << 2); |
|---|
| 2194 | 2133 | set_capacity(pd->bdev->bd_disk, lba << 2); |
|---|
| 2195 | | - bd_set_size(pd->bdev, (loff_t)lba << 11); |
|---|
| 2134 | + bd_set_nr_sectors(pd->bdev, lba << 2); |
|---|
| 2196 | 2135 | |
|---|
| 2197 | 2136 | q = bdev_get_queue(pd->bdev); |
|---|
| 2198 | 2137 | if (write) { |
|---|
| .. | .. |
|---|
| 2203 | 2142 | * Some CDRW drives can not handle writes larger than one packet, |
|---|
| 2204 | 2143 | * even if the size is a multiple of the packet size. |
|---|
| 2205 | 2144 | */ |
|---|
| 2206 | | - spin_lock_irq(q->queue_lock); |
|---|
| 2207 | 2145 | blk_queue_max_hw_sectors(q, pd->settings.size); |
|---|
| 2208 | | - spin_unlock_irq(q->queue_lock); |
|---|
| 2209 | 2146 | set_bit(PACKET_WRITABLE, &pd->flags); |
|---|
| 2210 | 2147 | } else { |
|---|
| 2211 | 2148 | pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); |
|---|
| .. | .. |
|---|
| 2228 | 2165 | return 0; |
|---|
| 2229 | 2166 | |
|---|
| 2230 | 2167 | out_putdev: |
|---|
| 2231 | | - blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); |
|---|
| 2168 | + blkdev_put(bdev, FMODE_READ | FMODE_EXCL); |
|---|
| 2232 | 2169 | out: |
|---|
| 2233 | 2170 | return ret; |
|---|
| 2234 | 2171 | } |
|---|
| .. | .. |
|---|
| 2430 | 2367 | } |
|---|
| 2431 | 2368 | } |
|---|
| 2432 | 2369 | |
|---|
| 2433 | | -static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) |
|---|
| 2370 | +static blk_qc_t pkt_submit_bio(struct bio *bio) |
|---|
| 2434 | 2371 | { |
|---|
| 2435 | 2372 | struct pktcdvd_device *pd; |
|---|
| 2436 | 2373 | char b[BDEVNAME_SIZE]; |
|---|
| 2437 | 2374 | struct bio *split; |
|---|
| 2438 | 2375 | |
|---|
| 2439 | | - blk_queue_split(q, &bio); |
|---|
| 2376 | + blk_queue_split(&bio); |
|---|
| 2440 | 2377 | |
|---|
| 2441 | | - pd = q->queuedata; |
|---|
| 2378 | + pd = bio->bi_disk->queue->queuedata; |
|---|
| 2442 | 2379 | if (!pd) { |
|---|
| 2443 | 2380 | pr_err("%s incorrect request queue\n", bio_devname(bio, b)); |
|---|
| 2444 | 2381 | goto end_io; |
|---|
| .. | .. |
|---|
| 2482 | 2419 | split = bio; |
|---|
| 2483 | 2420 | } |
|---|
| 2484 | 2421 | |
|---|
| 2485 | | - pkt_make_request_write(q, split); |
|---|
| 2422 | + pkt_make_request_write(bio->bi_disk->queue, split); |
|---|
| 2486 | 2423 | } while (split != bio); |
|---|
| 2487 | 2424 | |
|---|
| 2488 | 2425 | return BLK_QC_T_NONE; |
|---|
| .. | .. |
|---|
| 2495 | 2432 | { |
|---|
| 2496 | 2433 | struct request_queue *q = pd->disk->queue; |
|---|
| 2497 | 2434 | |
|---|
| 2498 | | - blk_queue_make_request(q, pkt_make_request); |
|---|
| 2499 | 2435 | blk_queue_logical_block_size(q, CD_FRAMESIZE); |
|---|
| 2500 | 2436 | blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); |
|---|
| 2501 | 2437 | q->queuedata = pd; |
|---|
| .. | .. |
|---|
| 2566 | 2502 | static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) |
|---|
| 2567 | 2503 | { |
|---|
| 2568 | 2504 | int i; |
|---|
| 2569 | | - int ret = 0; |
|---|
| 2570 | 2505 | char b[BDEVNAME_SIZE]; |
|---|
| 2571 | 2506 | struct block_device *bdev; |
|---|
| 2572 | 2507 | |
|---|
| .. | .. |
|---|
| 2589 | 2524 | } |
|---|
| 2590 | 2525 | } |
|---|
| 2591 | 2526 | |
|---|
| 2592 | | - bdev = bdget(dev); |
|---|
| 2593 | | - if (!bdev) |
|---|
| 2594 | | - return -ENOMEM; |
|---|
| 2595 | | - ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); |
|---|
| 2596 | | - if (ret) |
|---|
| 2597 | | - return ret; |
|---|
| 2527 | + bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL); |
|---|
| 2528 | + if (IS_ERR(bdev)) |
|---|
| 2529 | + return PTR_ERR(bdev); |
|---|
| 2598 | 2530 | if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { |
|---|
| 2599 | 2531 | blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); |
|---|
| 2600 | 2532 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 2612 | 2544 | pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); |
|---|
| 2613 | 2545 | if (IS_ERR(pd->cdrw.thread)) { |
|---|
| 2614 | 2546 | pkt_err(pd, "can't start kernel thread\n"); |
|---|
| 2615 | | - ret = -ENOMEM; |
|---|
| 2616 | 2547 | goto out_mem; |
|---|
| 2617 | 2548 | } |
|---|
| 2618 | 2549 | |
|---|
| .. | .. |
|---|
| 2624 | 2555 | blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); |
|---|
| 2625 | 2556 | /* This is safe: open() is still holding a reference. */ |
|---|
| 2626 | 2557 | module_put(THIS_MODULE); |
|---|
| 2627 | | - return ret; |
|---|
| 2558 | + return -ENOMEM; |
|---|
| 2628 | 2559 | } |
|---|
| 2629 | 2560 | |
|---|
| 2630 | 2561 | static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) |
|---|
| .. | .. |
|---|
| 2644 | 2575 | */ |
|---|
| 2645 | 2576 | if (pd->refcnt == 1) |
|---|
| 2646 | 2577 | pkt_lock_door(pd, 0); |
|---|
| 2647 | | - /* fallthru */ |
|---|
| 2578 | + fallthrough; |
|---|
| 2648 | 2579 | /* |
|---|
| 2649 | 2580 | * forward selected CDROM ioctls to CD-ROM, for UDF |
|---|
| 2650 | 2581 | */ |
|---|
| .. | .. |
|---|
| 2681 | 2612 | return attached_disk->fops->check_events(attached_disk, clearing); |
|---|
| 2682 | 2613 | } |
|---|
| 2683 | 2614 | |
|---|
| 2615 | +static char *pkt_devnode(struct gendisk *disk, umode_t *mode) |
|---|
| 2616 | +{ |
|---|
| 2617 | + return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name); |
|---|
| 2618 | +} |
|---|
| 2619 | + |
|---|
| 2684 | 2620 | static const struct block_device_operations pktcdvd_ops = { |
|---|
| 2685 | 2621 | .owner = THIS_MODULE, |
|---|
| 2622 | + .submit_bio = pkt_submit_bio, |
|---|
| 2686 | 2623 | .open = pkt_open, |
|---|
| 2687 | 2624 | .release = pkt_close, |
|---|
| 2688 | 2625 | .ioctl = pkt_ioctl, |
|---|
| 2626 | + .compat_ioctl = blkdev_compat_ptr_ioctl, |
|---|
| 2689 | 2627 | .check_events = pkt_check_events, |
|---|
| 2628 | + .devnode = pkt_devnode, |
|---|
| 2690 | 2629 | }; |
|---|
| 2691 | | - |
|---|
| 2692 | | -static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode) |
|---|
| 2693 | | -{ |
|---|
| 2694 | | - return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name); |
|---|
| 2695 | | -} |
|---|
| 2696 | 2630 | |
|---|
| 2697 | 2631 | /* |
|---|
| 2698 | 2632 | * Set up mapping from pktcdvd device to CD-ROM device. |
|---|
| .. | .. |
|---|
| 2749 | 2683 | disk->fops = &pktcdvd_ops; |
|---|
| 2750 | 2684 | disk->flags = GENHD_FL_REMOVABLE; |
|---|
| 2751 | 2685 | strcpy(disk->disk_name, pd->name); |
|---|
| 2752 | | - disk->devnode = pktcdvd_devnode; |
|---|
| 2753 | 2686 | disk->private_data = pd; |
|---|
| 2754 | | - disk->queue = blk_alloc_queue(GFP_KERNEL); |
|---|
| 2687 | + disk->queue = blk_alloc_queue(NUMA_NO_NODE); |
|---|
| 2755 | 2688 | if (!disk->queue) |
|---|
| 2756 | 2689 | goto out_mem2; |
|---|
| 2757 | 2690 | |
|---|
| .. | .. |
|---|
| 2762 | 2695 | |
|---|
| 2763 | 2696 | /* inherit events of the host device */ |
|---|
| 2764 | 2697 | disk->events = pd->bdev->bd_disk->events; |
|---|
| 2765 | | - disk->async_events = pd->bdev->bd_disk->async_events; |
|---|
| 2766 | 2698 | |
|---|
| 2767 | 2699 | add_disk(disk); |
|---|
| 2768 | 2700 | |
|---|