hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/rkflash/rkflash_blk.c
....@@ -4,6 +4,7 @@
44
55 #include <linux/blkdev.h>
66 #include <linux/blkpg.h>
7
+#include <linux/blk-mq.h>
78 #include <linux/clk.h>
89 #include <linux/delay.h>
910 #include <linux/freezer.h>
....@@ -68,43 +69,14 @@
6869 #define DISABLE_READ _IO('V', 2)
6970 #define ENABLE_READ _IO('V', 3)
7071
71
-static DECLARE_WAIT_QUEUE_HEAD(rkflash_thread_wait);
72
-static unsigned long rkflash_req_jiffies;
73
-static unsigned int rknand_req_do;
72
+/* Thread for gc operation */
73
+static DECLARE_WAIT_QUEUE_HEAD(nand_gc_thread_wait);
74
+static unsigned long nand_gc_do;
75
+static struct task_struct *nand_gc_thread __read_mostly;
7476
7577 /* For rkflash dev private data, including mtd dev and block dev */
76
-static int rkflash_dev_initialised = 0;
78
+static int rkflash_dev_initialised;
7779 static DEFINE_MUTEX(g_flash_ops_mutex);
78
-
79
-static int rkflash_flash_gc(void)
80
-{
81
- int ret;
82
-
83
- if (g_boot_ops->gc) {
84
- mutex_lock(&g_flash_ops_mutex);
85
- ret = g_boot_ops->gc();
86
- mutex_unlock(&g_flash_ops_mutex);
87
- } else {
88
- ret = -EPERM;
89
- }
90
-
91
- return ret;
92
-}
93
-
94
-static int rkflash_blk_discard(u32 sec, u32 n_sec)
95
-{
96
- int ret;
97
-
98
- if (g_boot_ops->discard) {
99
- mutex_lock(&g_flash_ops_mutex);
100
- ret = g_boot_ops->discard(sec, n_sec);
101
- mutex_unlock(&g_flash_ops_mutex);
102
- } else {
103
- ret = -EPERM;
104
- }
105
-
106
- return ret;
107
-};
10880
10981 static unsigned int rk_partition_init(struct flash_part *part)
11082 {
....@@ -172,12 +144,11 @@
172144 return single_open(file, rkflash_blk_proc_show, PDE_DATA(inode));
173145 }
174146
175
-static const struct file_operations rkflash_blk_proc_fops = {
176
- .owner = THIS_MODULE,
177
- .open = rkflash_blk_proc_open,
178
- .read = seq_read,
179
- .llseek = seq_lseek,
180
- .release = single_release,
147
+static const struct proc_ops rkflash_blk_proc_fops = {
148
+ .proc_open = rkflash_blk_proc_open,
149
+ .proc_read = seq_read,
150
+ .proc_lseek = seq_lseek,
151
+ .proc_release = single_release,
181152 };
182153
183154 static int rkflash_blk_create_procfs(void)
....@@ -192,12 +163,23 @@
192163 return 0;
193164 }
194165
166
+static int rkflash_blk_discard(u32 sec, u32 n_sec)
167
+{
168
+ int ret;
169
+
170
+ if (g_boot_ops->discard)
171
+ ret = g_boot_ops->discard(sec, n_sec);
172
+ else
173
+ ret = -EPERM;
174
+
175
+ return ret;
176
+};
177
+
195178 static int rkflash_blk_xfer(struct flash_blk_dev *dev,
196179 unsigned long start,
197180 unsigned long nsector,
198181 char *buf,
199
- int cmd,
200
- int totle_nsec)
182
+ int cmd)
201183 {
202184 int ret;
203185
....@@ -213,11 +195,9 @@
213195 case READ:
214196 totle_read_data += nsector;
215197 totle_read_count++;
216
- mutex_lock(&g_flash_ops_mutex);
217198 rkflash_print_bio("rkflash r sec= %lx, n_sec= %lx\n",
218199 start, nsector);
219200 ret = g_boot_ops->read(start, nsector, buf);
220
- mutex_unlock(&g_flash_ops_mutex);
221201 if (ret)
222202 ret = -EIO;
223203 break;
....@@ -225,11 +205,9 @@
225205 case WRITE:
226206 totle_write_data += nsector;
227207 totle_write_count++;
228
- mutex_lock(&g_flash_ops_mutex);
229208 rkflash_print_bio("rkflash w sec= %lx, n_sec= %lx\n",
230209 start, nsector);
231210 ret = g_boot_ops->write(start, nsector, buf);
232
- mutex_unlock(&g_flash_ops_mutex);
233211 if (ret)
234212 ret = -EIO;
235213 break;
....@@ -270,139 +248,216 @@
270248 return 1;
271249 }
272250
273
-static int rkflash_blktrans_thread(void *arg)
251
+static blk_status_t do_blktrans_all_request(struct flash_blk_ops *tr,
252
+ struct flash_blk_dev *dev,
253
+ struct request *req)
274254 {
275
- struct flash_blk_ops *blk_ops = arg;
276
- struct request_queue *rq = blk_ops->rq;
277
- struct request *req = NULL;
278
- char *buf, *page_buf;
255
+ unsigned long block, nsect;
256
+ char *buf = NULL, *page_buf;
279257 struct req_iterator rq_iter;
280258 struct bio_vec bvec;
281
- unsigned long long sector_index = ULLONG_MAX;
259
+ int ret;
282260 unsigned long totle_nsect;
283
- int rw_flag = 0;
284261
285
- spin_lock_irq(rq->queue_lock);
286
- while (!blk_ops->quit) {
287
- int res;
288
- struct flash_blk_dev *dev;
289
- DECLARE_WAITQUEUE(wait, current);
262
+ block = blk_rq_pos(req);
263
+ nsect = blk_rq_cur_bytes(req) >> 9;
264
+ totle_nsect = (req->__data_len) >> 9;
290265
291
- if (!req)
292
- req = blk_fetch_request(rq);
293
- if (!req) {
294
- add_wait_queue(&blk_ops->thread_wq, &wait);
295
- set_current_state(TASK_INTERRUPTIBLE);
296
- spin_unlock_irq(rq->queue_lock);
297
- rkflash_req_jiffies = HZ / 10;
298
- rkflash_flash_gc();
299
- wait_event_timeout(blk_ops->thread_wq,
300
- blk_ops->quit || rknand_req_do,
301
- rkflash_req_jiffies);
302
- rknand_req_do = 0;
303
- spin_lock_irq(rq->queue_lock);
304
- remove_wait_queue(&blk_ops->thread_wq, &wait);
305
- continue;
306
- } else {
307
- rkflash_req_jiffies = 1 * HZ;
266
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
267
+ get_capacity(req->rq_disk))
268
+ return BLK_STS_IOERR;
269
+
270
+ switch (req_op(req)) {
271
+ case REQ_OP_DISCARD:
272
+ rkflash_print_bio("%s discard\n", __func__);
273
+ if (rkflash_blk_discard(block, nsect))
274
+ return BLK_STS_IOERR;
275
+ return BLK_STS_OK;
276
+ case REQ_OP_READ:
277
+ rkflash_print_bio("%s read block=%lx nsec=%lx\n", __func__, block, totle_nsect);
278
+ buf = mtd_read_temp_buffer;
279
+ rkflash_blk_check_buffer_align(req, &buf);
280
+ ret = rkflash_blk_xfer(dev,
281
+ block,
282
+ totle_nsect,
283
+ buf,
284
+ REQ_OP_READ);
285
+ if (buf == mtd_read_temp_buffer) {
286
+ char *p = buf;
287
+
288
+ rq_for_each_segment(bvec, req, rq_iter) {
289
+ page_buf = kmap_atomic(bvec.bv_page);
290
+ memcpy(page_buf +
291
+ bvec.bv_offset,
292
+ p,
293
+ bvec.bv_len);
294
+ p += bvec.bv_len;
295
+ kunmap_atomic(page_buf);
296
+ }
308297 }
309298
310
- dev = req->rq_disk->private_data;
311
- totle_nsect = (req->__data_len) >> 9;
312
- sector_index = blk_rq_pos(req);
313
- buf = 0;
314
- res = 0;
315
- rw_flag = req_op(req);
316
- if (rw_flag == REQ_OP_DISCARD) {
317
- spin_unlock_irq(rq->queue_lock);
318
- if (rkflash_blk_discard(blk_rq_pos(req) +
319
- dev->off_size, totle_nsect))
320
- res = -EIO;
321
- spin_lock_irq(rq->queue_lock);
322
- if (!__blk_end_request_cur(req, res))
323
- req = NULL;
324
- continue;
325
- } else if (rw_flag == REQ_OP_FLUSH) {
326
- if (!__blk_end_request_cur(req, res))
327
- req = NULL;
328
- continue;
329
- } else if (rw_flag == REQ_OP_READ) {
330
- buf = mtd_read_temp_buffer;
331
- rkflash_blk_check_buffer_align(req, &buf);
332
- spin_unlock_irq(rq->queue_lock);
333
- res = rkflash_blk_xfer(dev,
334
- sector_index,
335
- totle_nsect,
336
- buf,
337
- rw_flag,
338
- totle_nsect);
339
- spin_lock_irq(rq->queue_lock);
340
- if (buf == mtd_read_temp_buffer) {
341
- char *p = buf;
299
+ if (ret)
300
+ return BLK_STS_IOERR;
301
+ else
302
+ return BLK_STS_OK;
303
+ case REQ_OP_WRITE:
304
+ rkflash_print_bio("%s write block=%lx nsec=%lx\n", __func__, block, totle_nsect);
342305
343
- rq_for_each_segment(bvec, req, rq_iter) {
344
- page_buf = kmap_atomic(bvec.bv_page);
345
- memcpy(page_buf +
346
- bvec.bv_offset,
347
- p,
348
- bvec.bv_len);
349
- p += bvec.bv_len;
350
- kunmap_atomic(page_buf);
351
- }
352
- }
353
- } else if (rw_flag == REQ_OP_WRITE){
354
- buf = mtd_read_temp_buffer;
355
- rkflash_blk_check_buffer_align(req, &buf);
356
- if (buf == mtd_read_temp_buffer) {
357
- char *p = buf;
306
+ buf = mtd_read_temp_buffer;
307
+ rkflash_blk_check_buffer_align(req, &buf);
308
+ if (buf == mtd_read_temp_buffer) {
309
+ char *p = buf;
358310
359
- rq_for_each_segment(bvec, req, rq_iter) {
360
- page_buf = kmap_atomic(bvec.bv_page);
361
- memcpy(p,
362
- page_buf +
363
- bvec.bv_offset,
364
- bvec.bv_len);
365
- p += bvec.bv_len;
366
- kunmap_atomic(page_buf);
367
- }
311
+ rq_for_each_segment(bvec, req, rq_iter) {
312
+ page_buf = kmap_atomic(bvec.bv_page);
313
+ memcpy(p,
314
+ page_buf +
315
+ bvec.bv_offset,
316
+ bvec.bv_len);
317
+ p += bvec.bv_len;
318
+ kunmap_atomic(page_buf);
368319 }
369
- spin_unlock_irq(rq->queue_lock);
370
- res = rkflash_blk_xfer(dev,
371
- sector_index,
372
- totle_nsect,
373
- buf,
374
- rw_flag,
375
- totle_nsect);
376
- spin_lock_irq(rq->queue_lock);
377
- } else {
378
- pr_err("%s error req flag\n", __func__);
379320 }
380
- __blk_end_request_all(req, res);
381
- req = NULL;
321
+ ret = rkflash_blk_xfer(dev,
322
+ block,
323
+ totle_nsect,
324
+ buf,
325
+ REQ_OP_WRITE);
326
+
327
+ if (ret)
328
+ return BLK_STS_IOERR;
329
+ else
330
+ return BLK_STS_OK;
331
+ default:
332
+ return BLK_STS_IOERR;
382333 }
383
- pr_info("flash th quited\n");
384
- blk_ops->flash_th_quited = 1;
385
- if (req)
386
- __blk_end_request_all(req, -EIO);
387
- while ((req = blk_fetch_request(rq)) != NULL)
388
- __blk_end_request_all(req, -ENODEV);
389
- spin_unlock_irq(rq->queue_lock);
390
- complete_and_exit(&blk_ops->thread_exit, 0);
391
- return 0;
392334 }
393335
394
-static void rkflash_blk_request(struct request_queue *rq)
336
+static struct request *rkflash_next_request(struct flash_blk_dev *dev)
395337 {
396
- struct flash_blk_ops *blk_ops = rq->queuedata;
338
+ struct request *rq;
339
+ struct flash_blk_ops *tr = dev->blk_ops;
340
+
341
+ rq = list_first_entry_or_null(&tr->rq_list, struct request, queuelist);
342
+ if (rq) {
343
+ list_del_init(&rq->queuelist);
344
+ blk_mq_start_request(rq);
345
+ return rq;
346
+ }
347
+
348
+ return NULL;
349
+}
350
+
351
+static void rkflash_blktrans_work(struct flash_blk_dev *dev)
352
+ __releases(&dev->blk_ops->queue_lock)
353
+ __acquires(&dev->blk_ops->queue_lock)
354
+{
355
+ struct flash_blk_ops *tr = dev->blk_ops;
397356 struct request *req = NULL;
398357
399
- if (blk_ops->flash_th_quited) {
400
- while ((req = blk_fetch_request(rq)) != NULL)
401
- __blk_end_request_all(req, -ENODEV);
402
- return;
358
+ while (1) {
359
+ blk_status_t res;
360
+
361
+ req = rkflash_next_request(dev);
362
+ if (!req)
363
+ break;
364
+
365
+ spin_unlock_irq(&dev->blk_ops->queue_lock);
366
+
367
+ mutex_lock(&g_flash_ops_mutex);
368
+ res = do_blktrans_all_request(tr, dev, req);
369
+ mutex_unlock(&g_flash_ops_mutex);
370
+
371
+ if (!blk_update_request(req, res, req->__data_len)) {
372
+ __blk_mq_end_request(req, res);
373
+ req = NULL;
374
+ }
375
+
376
+ spin_lock_irq(&dev->blk_ops->queue_lock);
403377 }
404
- rknand_req_do = 1;
405
- wake_up(&blk_ops->thread_wq);
378
+}
379
+
380
+static blk_status_t rkflash_queue_rq(struct blk_mq_hw_ctx *hctx,
381
+ const struct blk_mq_queue_data *bd)
382
+{
383
+ struct flash_blk_dev *dev;
384
+
385
+ dev = hctx->queue->queuedata;
386
+ if (!dev) {
387
+ blk_mq_start_request(bd->rq);
388
+ return BLK_STS_IOERR;
389
+ }
390
+
391
+ nand_gc_do = 0;
392
+ spin_lock_irq(&dev->blk_ops->queue_lock);
393
+ list_add_tail(&bd->rq->queuelist, &dev->blk_ops->rq_list);
394
+ rkflash_blktrans_work(dev);
395
+ spin_unlock_irq(&dev->blk_ops->queue_lock);
396
+
397
+ /* wake up gc thread */
398
+ nand_gc_do = 1;
399
+ wake_up(&nand_gc_thread_wait);
400
+
401
+ return BLK_STS_OK;
402
+}
403
+
404
+static const struct blk_mq_ops rkflash_mq_ops = {
405
+ .queue_rq = rkflash_queue_rq,
406
+};
407
+
408
+static int nand_gc_has_work(void)
409
+{
410
+ return nand_gc_do;
411
+}
412
+
413
+static int nand_gc_do_work(void)
414
+{
415
+ int ret = nand_gc_has_work();
416
+
417
+ /* do garbage collect at idle state */
418
+ if (ret) {
419
+ mutex_lock(&g_flash_ops_mutex);
420
+ ret = g_boot_ops->gc();
421
+ rkflash_print_bio("%s gc result= %d\n", __func__, ret);
422
+ mutex_unlock(&g_flash_ops_mutex);
423
+ }
424
+
425
+ return ret;
426
+}
427
+
428
+static void nand_gc_wait_work(void)
429
+{
430
+ unsigned long nand_gc_jiffies = HZ / 20;
431
+
432
+ if (nand_gc_has_work())
433
+ wait_event_freezable_timeout(nand_gc_thread_wait,
434
+ kthread_should_stop(),
435
+ nand_gc_jiffies);
436
+ else
437
+ wait_event_freezable(nand_gc_thread_wait,
438
+ kthread_should_stop() || nand_gc_has_work());
439
+}
440
+
441
+static int nand_gc_mythread(void *arg)
442
+{
443
+ int gc_done_times = 0;
444
+
445
+ set_freezable();
446
+
447
+ while (!kthread_should_stop()) {
448
+ if (nand_gc_do_work() == 0) {
449
+ gc_done_times++;
450
+ if (gc_done_times > 10)
451
+ nand_gc_do = 0;
452
+ } else {
453
+ gc_done_times = 0;
454
+ }
455
+
456
+ nand_gc_wait_work();
457
+ }
458
+ pr_info("nand gc quited\n");
459
+
460
+ return 0;
406461 }
407462
408463 static int rkflash_blk_open(struct block_device *bdev, fmode_t mode)
....@@ -459,18 +514,14 @@
459514 .owner = THIS_MODULE,
460515 };
461516
462
-static int rkflash_blk_add_dev(struct flash_blk_ops *blk_ops,
463
- struct flash_part *part)
517
+static int rkflash_blk_add_dev(struct flash_blk_dev *dev,
518
+ struct flash_blk_ops *blk_ops,
519
+ struct flash_part *part)
464520 {
465
- struct flash_blk_dev *dev;
466521 struct gendisk *gd;
467522
468523 if (part->size == 0)
469524 return -1;
470
-
471
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
472
- if (!dev)
473
- return -ENOMEM;
474525
475526 gd = alloc_disk(1 << blk_ops->minorbits);
476527 if (!gd) {
....@@ -509,7 +560,6 @@
509560 gd->private_data = dev;
510561 dev->blkcore_priv = gd;
511562 gd->queue = blk_ops->rq;
512
- gd->queue->bypass_depth = 1;
513563
514564 if (part->type == PART_NO_ACCESS)
515565 dev->disable_access = 1;
....@@ -545,37 +595,51 @@
545595 {
546596 int i, ret;
547597 u64 offset;
598
+ struct flash_blk_dev *dev;
548599
549
- rknand_req_do = 0;
550
- blk_ops->quit = 0;
551
- blk_ops->flash_th_quited = 0;
600
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
601
+ if (!dev)
602
+ return -ENOMEM;
552603
553604 mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512,
554605 GFP_KERNEL | GFP_DMA);
555606
556607 ret = register_blkdev(blk_ops->major, blk_ops->name);
557
- if (ret)
608
+ if (ret) {
609
+ kfree(dev);
610
+
558611 return -1;
559
-
560
- spin_lock_init(&blk_ops->queue_lock);
561
- init_completion(&blk_ops->thread_exit);
562
- init_waitqueue_head(&blk_ops->thread_wq);
563
-
564
- blk_ops->rq = blk_init_queue(rkflash_blk_request, &blk_ops->queue_lock);
565
- if (!blk_ops->rq) {
566
- unregister_blkdev(blk_ops->major, blk_ops->name);
567
- return -1;
568612 }
613
+
614
+ /* Create the request queue */
615
+ spin_lock_init(&blk_ops->queue_lock);
616
+ INIT_LIST_HEAD(&blk_ops->rq_list);
617
+
618
+ blk_ops->tag_set = kzalloc(sizeof(*blk_ops->tag_set), GFP_KERNEL);
619
+ if (!blk_ops->tag_set)
620
+ goto error1;
621
+
622
+ blk_ops->rq = blk_mq_init_sq_queue(blk_ops->tag_set, &rkflash_mq_ops, 1,
623
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
624
+ if (IS_ERR(blk_ops->rq)) {
625
+ ret = PTR_ERR(blk_ops->rq);
626
+ blk_ops->rq = NULL;
627
+ goto error2;
628
+ }
629
+
630
+ blk_ops->rq->queuedata = dev;
569631
570632 blk_queue_max_hw_sectors(blk_ops->rq, MTD_RW_SECTORS);
571633 blk_queue_max_segments(blk_ops->rq, MTD_RW_SECTORS);
572634
573635 blk_queue_flag_set(QUEUE_FLAG_DISCARD, blk_ops->rq);
574636 blk_queue_max_discard_sectors(blk_ops->rq, UINT_MAX >> 9);
637
+ blk_ops->rq->limits.discard_granularity = 64 << 9;
575638
576
- blk_ops->rq->queuedata = blk_ops;
639
+ if (g_flash_type == FLASH_TYPE_SFC_NAND || g_flash_type == FLASH_TYPE_NANDC_NAND)
640
+ nand_gc_thread = kthread_run(nand_gc_mythread, (void *)blk_ops, "rkflash_gc");
641
+
577642 INIT_LIST_HEAD(&blk_ops->devs);
578
- kthread_run(rkflash_blktrans_thread, (void *)blk_ops, "rkflash");
579643 g_max_part_num = rk_partition_init(disk_array);
580644 if (g_max_part_num) {
581645 /* partition 0 is save vendor data, need hidden */
....@@ -587,9 +651,9 @@
587651 offset * 512,
588652 (u64)(offset + disk_array[i].size) * 512,
589653 (u64)disk_array[i].size / 2048);
590
- rkflash_blk_add_dev(blk_ops, &disk_array[i]);
654
+ rkflash_blk_add_dev(dev, blk_ops, &disk_array[i]);
591655 }
592
- rkflash_blk_add_dev(blk_ops, &fw_header_p);
656
+ rkflash_blk_add_dev(dev, blk_ops, &fw_header_p);
593657 } else {
594658 struct flash_part part;
595659
....@@ -597,20 +661,25 @@
597661 part.size = g_boot_ops->get_capacity();
598662 part.type = 0;
599663 part.name[0] = 0;
600
- rkflash_blk_add_dev(&mytr, &part);
664
+ rkflash_blk_add_dev(dev, blk_ops, &part);
601665 }
602666 rkflash_blk_create_procfs();
603667
604668 return 0;
669
+
670
+error2:
671
+ kfree(blk_ops->tag_set);
672
+error1:
673
+ unregister_blkdev(blk_ops->major, blk_ops->name);
674
+ kfree(dev);
675
+
676
+ return ret;
605677 }
606678
607679 static void rkflash_blk_unregister(struct flash_blk_ops *blk_ops)
608680 {
609681 struct list_head *this, *next;
610682
611
- blk_ops->quit = 1;
612
- wake_up(&blk_ops->thread_wq);
613
- wait_for_completion(&blk_ops->thread_exit);
614683 list_for_each_safe(this, next, &blk_ops->devs) {
615684 struct flash_blk_dev *dev =
616685 list_entry(this, struct flash_blk_dev, list);
....@@ -725,7 +794,6 @@
725794 case FLASH_TYPE_NANDC_NAND:
726795 default:
727796 g_flash_type = type;
728
- mytr.quit = 1;
729797 ret = rkflash_blk_register(&mytr);
730798 pr_err("%s device register as blk dev, ret= %d\n", __func__, ret);
731799 if (ret)
....@@ -768,11 +836,9 @@
768836 void rkflash_dev_shutdown(void)
769837 {
770838 pr_info("rkflash_shutdown...\n");
771
- if (g_flash_type != -1 && mytr.quit == 0) {
772
- mytr.quit = 1;
773
- wake_up(&mytr.thread_wq);
774
- wait_for_completion(&mytr.thread_exit);
775
- }
839
+ if (g_flash_type == FLASH_TYPE_SFC_NAND || g_flash_type == FLASH_TYPE_NANDC_NAND)
840
+ kthread_stop(nand_gc_thread);
841
+
776842 mutex_lock(&g_flash_ops_mutex);
777843 g_boot_ops->deinit();
778844 mutex_unlock(&g_flash_ops_mutex);