hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/rk_nand/rk_nand_blk.c
....@@ -7,12 +7,15 @@
77 * (at your option) any later version.
88 */
99
10
+#define pr_fmt(fmt) "rk_nand: " fmt
11
+
1012 #include <linux/kernel.h>
1113 #include <linux/slab.h>
1214 #include <linux/module.h>
1315 #include <linux/list.h>
1416 #include <linux/fs.h>
1517 #include <linux/blkdev.h>
18
+#include <linux/blk-mq.h>
1619 #include <linux/blkpg.h>
1720 #include <linux/spinlock.h>
1821 #include <linux/hdreg.h>
....@@ -36,9 +39,6 @@
3639 #include "rk_nand_blk.h"
3740 #include "rk_ftl_api.h"
3841
39
-static struct nand_part disk_array[MAX_PART_COUNT];
40
-static int g_max_part_num = 4;
41
-
4242 #define PART_READONLY 0x85
4343 #define PART_WRITEONLY 0x86
4444 #define PART_NO_ACCESS 0x87
....@@ -48,10 +48,17 @@
4848 static unsigned long totle_read_count;
4949 static unsigned long totle_write_count;
5050 static int rk_nand_dev_initialised;
51
+static unsigned long rk_ftl_gc_do;
52
+static DECLARE_WAIT_QUEUE_HEAD(rknand_thread_wait);
53
+static unsigned long rk_ftl_gc_jiffies;
5154
5255 static char *mtd_read_temp_buffer;
5356 #define MTD_RW_SECTORS (512)
5457
58
+#define DISABLE_WRITE _IO('V', 0)
59
+#define ENABLE_WRITE _IO('V', 1)
60
+#define DISABLE_READ _IO('V', 2)
61
+#define ENABLE_READ _IO('V', 3)
5562 static int rknand_proc_show(struct seq_file *m, void *v)
5663 {
5764 m->count = rknand_proc_ftlread(m->buf);
....@@ -62,43 +69,16 @@
6269 return 0;
6370 }
6471
65
-static int rknand_mtd_proc_show(struct seq_file *m, void *v)
66
-{
67
- int i;
68
-
69
- seq_printf(m, "%s", "dev: size erasesize name\n");
70
- for (i = 0; i < g_max_part_num; i++) {
71
- seq_printf(m, "rknand%d: %8.8llx %8.8x \"%s\"\n", i,
72
- (unsigned long long)disk_array[i].size * 512,
73
- 32 * 0x200, disk_array[i].name);
74
- }
75
- return 0;
76
-}
77
-
7872 static int rknand_proc_open(struct inode *inode, struct file *file)
7973 {
8074 return single_open(file, rknand_proc_show, PDE_DATA(inode));
8175 }
8276
83
-static int rknand_mtd_proc_open(struct inode *inode, struct file *file)
84
-{
85
- return single_open(file, rknand_mtd_proc_show, PDE_DATA(inode));
86
-}
87
-
88
-static const struct file_operations rknand_proc_fops = {
89
- .owner = THIS_MODULE,
90
- .open = rknand_proc_open,
91
- .read = seq_read,
92
- .llseek = seq_lseek,
93
- .release = single_release,
94
-};
95
-
96
-static const struct file_operations rknand_mtd_proc_fops = {
97
- .owner = THIS_MODULE,
98
- .open = rknand_mtd_proc_open,
99
- .read = seq_read,
100
- .llseek = seq_lseek,
101
- .release = single_release,
77
+static const struct proc_ops rknand_proc_fops = {
78
+ .proc_open = rknand_proc_open,
79
+ .proc_read = seq_read,
80
+ .proc_lseek = seq_lseek,
81
+ .proc_release = single_release,
10282 };
10383
10484 static int rknand_create_procfs(void)
....@@ -110,10 +90,6 @@
11090 if (!ent)
11191 return -1;
11292
113
- ent = proc_create_data("mtd", 0444, NULL, &rknand_mtd_proc_fops,
114
- (void *)0);
115
- if (!ent)
116
- return -1;
11793 return 0;
11894 }
11995
....@@ -143,8 +119,7 @@
143119 unsigned long start,
144120 unsigned long nsector,
145121 char *buf,
146
- int cmd,
147
- int totle_nsec)
122
+ int cmd)
148123 {
149124 int ret;
150125
....@@ -155,7 +130,6 @@
155130 }
156131
157132 start += dev->off_size;
158
- rknand_device_lock();
159133
160134 switch (cmd) {
161135 case READ:
....@@ -179,22 +153,7 @@
179153 break;
180154 }
181155
182
- rknand_device_unlock();
183156 return ret;
184
-}
185
-
186
-static DECLARE_WAIT_QUEUE_HEAD(rknand_thread_wait);
187
-static void rk_ftl_gc_timeout_hack(struct timer_list *unused);
188
-static DEFINE_TIMER(rk_ftl_gc_timeout, rk_ftl_gc_timeout_hack);
189
-static unsigned long rk_ftl_gc_jiffies;
190
-static unsigned long rk_ftl_gc_do;
191
-
192
-static void rk_ftl_gc_timeout_hack(struct timer_list *unused)
193
-{
194
- del_timer(&rk_ftl_gc_timeout);
195
- rk_ftl_gc_do++;
196
- rk_ftl_gc_timeout.expires = jiffies + rk_ftl_gc_jiffies * rk_ftl_gc_do;
197
- add_timer(&rk_ftl_gc_timeout);
198157 }
199158
200159 static int req_check_buffer_align(struct request *req, char **pbuf)
....@@ -225,284 +184,201 @@
225184 return 1;
226185 }
227186
228
-static int nand_blktrans_thread(void *arg)
187
+static blk_status_t do_blktrans_all_request(struct nand_blk_dev *dev,
188
+ struct request *req)
229189 {
230
- struct nand_blk_ops *nandr = arg;
231
- struct request_queue *rq = nandr->rq;
232
- struct request *req = NULL;
233
- int ftl_gc_status = 0;
234
- char *buf, *page_buf;
190
+ unsigned long block, nsect;
191
+ char *buf = NULL, *page_buf;
235192 struct req_iterator rq_iter;
236193 struct bio_vec bvec;
237
- unsigned long long sector_index = ULLONG_MAX;
194
+ int ret = BLK_STS_IOERR;
238195 unsigned long totle_nsect;
239
- int rw_flag = 0;
240
- int req_empty_times = 0;
241
- int op;
242196
243
- spin_lock_irq(rq->queue_lock);
244
- rk_ftl_gc_jiffies = HZ / 10; /* do garbage collect after 100ms */
197
+ block = blk_rq_pos(req);
198
+ nsect = blk_rq_cur_bytes(req) >> 9;
199
+ totle_nsect = (req->__data_len) >> 9;
200
+
201
+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk))
202
+ return BLK_STS_IOERR;
203
+
204
+ switch (req_op(req)) {
205
+ case REQ_OP_DISCARD:
206
+ if (FtlDiscard(block, nsect))
207
+ return BLK_STS_IOERR;
208
+ return BLK_STS_OK;
209
+ case REQ_OP_READ:
210
+ buf = mtd_read_temp_buffer;
211
+ req_check_buffer_align(req, &buf);
212
+ ret = nand_dev_transfer(dev, block, totle_nsect, buf, REQ_OP_READ);
213
+ if (buf == mtd_read_temp_buffer) {
214
+ char *p = buf;
215
+
216
+ rq_for_each_segment(bvec, req, rq_iter) {
217
+ page_buf = kmap_atomic(bvec.bv_page);
218
+
219
+ memcpy(page_buf + bvec.bv_offset, p, bvec.bv_len);
220
+ p += bvec.bv_len;
221
+ kunmap_atomic(page_buf);
222
+ }
223
+ }
224
+
225
+ if (ret)
226
+ return BLK_STS_IOERR;
227
+ else
228
+ return BLK_STS_OK;
229
+ case REQ_OP_WRITE:
230
+ buf = mtd_read_temp_buffer;
231
+ req_check_buffer_align(req, &buf);
232
+
233
+ if (buf == mtd_read_temp_buffer) {
234
+ char *p = buf;
235
+
236
+ rq_for_each_segment(bvec, req, rq_iter) {
237
+ page_buf = kmap_atomic(bvec.bv_page);
238
+ memcpy(p, page_buf + bvec.bv_offset, bvec.bv_len);
239
+ p += bvec.bv_len;
240
+ kunmap_atomic(page_buf);
241
+ }
242
+ }
243
+
244
+ ret = nand_dev_transfer(dev, block, totle_nsect, buf, REQ_OP_WRITE);
245
+
246
+ if (ret)
247
+ return BLK_STS_IOERR;
248
+ else
249
+ return BLK_STS_OK;
250
+
251
+ default:
252
+ return BLK_STS_IOERR;
253
+ }
254
+}
255
+
256
+static struct request *rk_nand_next_request(struct nand_blk_dev *dev)
257
+{
258
+ struct nand_blk_ops *nand_ops = dev->nand_ops;
259
+ struct request *rq;
260
+
261
+ rq = list_first_entry_or_null(&nand_ops->rq_list, struct request, queuelist);
262
+ if (rq) {
263
+ list_del_init(&rq->queuelist);
264
+ blk_mq_start_request(rq);
265
+ return rq;
266
+ }
267
+
268
+ return NULL;
269
+}
270
+
271
+static void rk_nand_blktrans_work(struct nand_blk_dev *dev)
272
+ __releases(&dev->nand_ops->queue_lock)
273
+ __acquires(&dev->nand_ops->queue_lock)
274
+{
275
+ struct request *req = NULL;
276
+
277
+ while (1) {
278
+ blk_status_t res;
279
+
280
+ req = rk_nand_next_request(dev);
281
+ if (!req)
282
+ break;
283
+
284
+ spin_unlock_irq(&dev->nand_ops->queue_lock);
285
+
286
+ rknand_device_lock();
287
+ res = do_blktrans_all_request(dev, req);
288
+ rknand_device_unlock();
289
+
290
+ if (!blk_update_request(req, res, req->__data_len)) {
291
+ __blk_mq_end_request(req, res);
292
+ req = NULL;
293
+ }
294
+
295
+ spin_lock_irq(&dev->nand_ops->queue_lock);
296
+ }
297
+}
298
+
299
+static blk_status_t rk_nand_queue_rq(struct blk_mq_hw_ctx *hctx,
300
+ const struct blk_mq_queue_data *bd)
301
+{
302
+ struct nand_blk_dev *dev;
303
+
304
+ dev = hctx->queue->queuedata;
305
+ if (!dev) {
306
+ blk_mq_start_request(bd->rq);
307
+ return BLK_STS_IOERR;
308
+ }
309
+
245310 rk_ftl_gc_do = 0;
246
- rk_ftl_gc_timeout.expires = jiffies + rk_ftl_gc_jiffies;
247
- add_timer(&rk_ftl_gc_timeout);
311
+ spin_lock_irq(&dev->nand_ops->queue_lock);
312
+ list_add_tail(&bd->rq->queuelist, &dev->nand_ops->rq_list);
313
+ rk_nand_blktrans_work(dev);
314
+ spin_unlock_irq(&dev->nand_ops->queue_lock);
248315
249
- while (!nandr->quit) {
250
- int res;
251
- struct nand_blk_dev *dev;
316
+ /* wake up gc thread */
317
+ rk_ftl_gc_do = 1;
318
+ wake_up(&dev->nand_ops->thread_wq);
319
+
320
+ return BLK_STS_OK;
321
+}
322
+
323
+static const struct blk_mq_ops rk_nand_mq_ops = {
324
+ .queue_rq = rk_nand_queue_rq,
325
+};
326
+
327
+static int nand_gc_thread(void *arg)
328
+{
329
+ struct nand_blk_ops *nand_ops = arg;
330
+ int ftl_gc_status = 0;
331
+ int req_empty_times = 0;
332
+ int gc_done_times = 0;
333
+
334
+ rk_ftl_gc_jiffies = HZ / 10;
335
+ rk_ftl_gc_do = 1;
336
+
337
+ while (!nand_ops->quit) {
252338 DECLARE_WAITQUEUE(wait, current);
253339
254
- if (!req)
255
- req = blk_fetch_request(rq);
256
- if (!req) {
257
- add_wait_queue(&nandr->thread_wq, &wait);
258
- set_current_state(TASK_INTERRUPTIBLE);
259
- spin_unlock_irq(rq->queue_lock);
340
+ add_wait_queue(&nand_ops->thread_wq, &wait);
341
+ set_current_state(TASK_INTERRUPTIBLE);
342
+
343
+ if (rk_ftl_gc_do) {
344
+ /* do garbage collect at idle state */
260345 if (rknand_device_trylock()) {
261346 ftl_gc_status = rk_ftl_garbage_collect(1, 0);
262347 rknand_device_unlock();
263348 rk_ftl_gc_jiffies = HZ / 50;
264
- if (ftl_gc_status == 0)
265
- rk_ftl_gc_jiffies = 1 * HZ;
266
-
349
+ if (ftl_gc_status == 0) {
350
+ gc_done_times++;
351
+ if (gc_done_times > 10)
352
+ rk_ftl_gc_jiffies = 10 * HZ;
353
+ else
354
+ rk_ftl_gc_jiffies = 1 * HZ;
355
+ } else {
356
+ gc_done_times = 0;
357
+ }
267358 } else {
268
- rk_ftl_gc_jiffies = HZ / 50;
359
+ rk_ftl_gc_jiffies = 1 * HZ;
269360 }
270361 req_empty_times++;
271362 if (req_empty_times < 10)
272363 rk_ftl_gc_jiffies = HZ / 50;
273
- /* 100ms cache write back */
364
+ /* cache write back after 100ms */
274365 if (req_empty_times >= 5 && req_empty_times < 7) {
275366 rknand_device_lock();
276367 rk_ftl_cache_write_back();
277368 rknand_device_unlock();
278369 }
279
- wait_event_timeout(nandr->thread_wq,
280
- rk_ftl_gc_do || nandr->quit,
281
- rk_ftl_gc_jiffies);
282
- rk_ftl_gc_do = 0;
283
- spin_lock_irq(rq->queue_lock);
284
- remove_wait_queue(&nandr->thread_wq, &wait);
285
- continue;
286370 } else {
287
- rk_ftl_gc_jiffies = 1 * HZ;
288371 req_empty_times = 0;
372
+ rk_ftl_gc_jiffies = 1 * HZ;
289373 }
290
-
291
- dev = req->rq_disk->private_data;
292
- totle_nsect = (req->__data_len) >> 9;
293
- sector_index = blk_rq_pos(req);
294
- buf = 0;
295
- res = 0;
296
-
297
- op = req_op(req);
298
- if (op == REQ_OP_DISCARD) {
299
- spin_unlock_irq(rq->queue_lock);
300
- rknand_device_lock();
301
- if (FtlDiscard(blk_rq_pos(req) +
302
- dev->off_size, totle_nsect))
303
- res = BLK_STS_IOERR;
304
- rknand_device_unlock();
305
- spin_lock_irq(rq->queue_lock);
306
- if (!__blk_end_request_cur(req, res))
307
- req = NULL;
308
- continue;
309
- } else if (op == REQ_OP_FLUSH) {
310
- spin_unlock_irq(rq->queue_lock);
311
- rknand_device_lock();
312
- rk_ftl_cache_write_back();
313
- rknand_device_unlock();
314
- spin_lock_irq(rq->queue_lock);
315
- if (!__blk_end_request_cur(req, res))
316
- req = NULL;
317
- continue;
318
- } else if (op == REQ_OP_READ) {
319
- rw_flag = READ;
320
- } else if (op == REQ_OP_WRITE) {
321
- rw_flag = WRITE;
322
- } else {
323
- __blk_end_request_all(req, BLK_STS_IOERR);
324
- req = NULL;
325
- continue;
326
- }
327
-
328
- if (mtd_read_temp_buffer) {
329
- buf = mtd_read_temp_buffer;
330
- req_check_buffer_align(req, &buf);
331
-
332
- if (rw_flag == WRITE && buf == mtd_read_temp_buffer) {
333
- char *p = buf;
334
-
335
- rq_for_each_segment(bvec, req, rq_iter) {
336
- page_buf = kmap_atomic(bvec.bv_page);
337
- memcpy(p,
338
- page_buf + bvec.bv_offset,
339
- bvec.bv_len);
340
- p += bvec.bv_len;
341
- kunmap_atomic(page_buf);
342
- }
343
- }
344
-
345
- spin_unlock_irq(rq->queue_lock);
346
- res = nand_dev_transfer(dev,
347
- sector_index,
348
- totle_nsect,
349
- buf,
350
- rw_flag,
351
- totle_nsect);
352
- spin_lock_irq(rq->queue_lock);
353
-
354
- if (rw_flag == READ && buf == mtd_read_temp_buffer) {
355
- char *p = buf;
356
-
357
- rq_for_each_segment(bvec, req, rq_iter) {
358
- page_buf = kmap_atomic(bvec.bv_page);
359
-
360
- memcpy(page_buf + bvec.bv_offset,
361
- p,
362
- bvec.bv_len);
363
- p += bvec.bv_len;
364
- kunmap_atomic(page_buf);
365
- }
366
- }
367
- __blk_end_request_all(req, res);
368
- req = NULL;
369
- } else {
370
- while (req) {
371
- sector_index = blk_rq_pos(req);
372
- totle_nsect = blk_rq_cur_bytes(req);
373
- buf = kmap_atomic(bio_page(req->bio)) +
374
- bio_offset(req->bio);
375
- spin_unlock_irq(rq->queue_lock);
376
- res = nand_dev_transfer(dev,
377
- sector_index,
378
- totle_nsect,
379
- buf,
380
- rw_flag,
381
- totle_nsect);
382
- spin_lock_irq(rq->queue_lock);
383
- kunmap_atomic(buf);
384
- if (!__blk_end_request_cur(req, res))
385
- req = NULL;
386
- }
387
- }
374
+ wait_event_timeout(nand_ops->thread_wq, nand_ops->quit,
375
+ rk_ftl_gc_jiffies);
376
+ remove_wait_queue(&nand_ops->thread_wq, &wait);
377
+ continue;
388378 }
389
- pr_info("nand th quited\n");
390
- nandr->nand_th_quited = 1;
391
- if (req)
392
- __blk_end_request_all(req, BLK_STS_IOERR);
393
- rk_nand_schedule_enable_config(0);
394
- while ((req = blk_fetch_request(rq)) != NULL)
395
- __blk_end_request_all(req, BLK_STS_IOERR);
396
- spin_unlock_irq(rq->queue_lock);
397
- complete_and_exit(&nandr->thread_exit, 0);
398
- return 0;
399
-}
400
-
401
-static void nand_blk_request(struct request_queue *rq)
402
-{
403
- struct nand_blk_ops *nandr = rq->queuedata;
404
- struct request *req = NULL;
405
-
406
- if (nandr->nand_th_quited) {
407
- while ((req = blk_fetch_request(rq)) != NULL)
408
- __blk_end_request_all(req, BLK_STS_IOERR);
409
- return;
410
- }
411
- rk_ftl_gc_do = 1;
412
- wake_up(&nandr->thread_wq);
413
-}
414
-
415
-static int rknand_get_part(char *parts,
416
- struct nand_part *this_part,
417
- int *part_index)
418
-{
419
- char delim;
420
- unsigned int mask_flags;
421
- unsigned long long size, offset = ULLONG_MAX;
422
- char name[40] = "\0";
423
-
424
- if (*parts == '-') {
425
- size = ULLONG_MAX;
426
- parts++;
427
- } else {
428
- size = memparse(parts, &parts);
429
- }
430
-
431
- if (*parts == '@') {
432
- parts++;
433
- offset = memparse(parts, &parts);
434
- }
435
-
436
- mask_flags = 0;
437
- delim = 0;
438
-
439
- if (*parts == '(')
440
- delim = ')';
441
-
442
- if (delim) {
443
- char *p;
444
-
445
- p = strchr(parts + 1, delim);
446
- if (!p)
447
- return 0;
448
- strncpy(name, parts + 1, p - (parts + 1));
449
- parts = p + 1;
450
- }
451
-
452
- if (strncmp(parts, "ro", 2) == 0) {
453
- mask_flags = PART_READONLY;
454
- parts += 2;
455
- }
456
-
457
- if (strncmp(parts, "wo", 2) == 0) {
458
- mask_flags = PART_WRITEONLY;
459
- parts += 2;
460
- }
461
-
462
- this_part->size = (unsigned long)size;
463
- this_part->offset = (unsigned long)offset;
464
- this_part->type = mask_flags;
465
- sprintf(this_part->name, "%s", name);
466
-
467
- if ((++(*part_index) < MAX_PART_COUNT) && (*parts == ','))
468
- rknand_get_part(++parts, this_part + 1, part_index);
469
-
470
- return 1;
471
-}
472
-
473
-static int nand_prase_cmdline_part(struct nand_part *pdisk_part)
474
-{
475
- char *pbuf;
476
- int part_num = 0, i;
477
- unsigned int cap_size = rk_ftl_get_capacity();
478
- char *cmdline;
479
-
480
- cmdline = strstr(saved_command_line, "mtdparts=");
481
- if (!cmdline)
482
- return 0;
483
- cmdline += 9;
484
- if (!memcmp(cmdline, "rk29xxnand:", strlen("rk29xxnand:"))) {
485
- pbuf = cmdline + strlen("rk29xxnand:");
486
- rknand_get_part(pbuf, pdisk_part, &part_num);
487
- if (part_num)
488
- pdisk_part[part_num - 1].size = cap_size -
489
- pdisk_part[part_num - 1].offset;
490
-
491
- for (i = 0; i < part_num; i++) {
492
- if (pdisk_part[i].size + pdisk_part[i].offset
493
- > cap_size) {
494
- pdisk_part[i].size = cap_size -
495
- pdisk_part[i].offset;
496
- pr_err("partition error....max cap:%x\n",
497
- cap_size);
498
- if (!pdisk_part[i].size)
499
- return i;
500
- else
501
- return (i + 1);
502
- }
503
- }
504
- return part_num;
505
- }
379
+ pr_info("nand gc quited\n");
380
+ nand_ops->nand_th_quited = 1;
381
+ complete_and_exit(&nand_ops->thread_exit, 0);
506382 return 0;
507383 }
508384
....@@ -515,10 +391,6 @@
515391 {
516392 };
517393
518
-#define DISABLE_WRITE _IO('V', 0)
519
-#define ENABLE_WRITE _IO('V', 1)
520
-#define DISABLE_READ _IO('V', 2)
521
-#define ENABLE_READ _IO('V', 3)
522394 static int rknand_ioctl(struct block_device *bdev, fmode_t mode,
523395 unsigned int cmd,
524396 unsigned long arg)
....@@ -564,7 +436,7 @@
564436 .owner = THIS_MODULE,
565437 };
566438
567
-static int nand_add_dev(struct nand_blk_ops *nandr, struct nand_part *part)
439
+static int nand_add_dev(struct nand_blk_ops *nand_ops, struct nand_part *part)
568440 {
569441 struct nand_blk_dev *dev;
570442 struct gendisk *gd;
....@@ -576,45 +448,37 @@
576448 if (!dev)
577449 return -ENOMEM;
578450
579
- gd = alloc_disk(1 << nandr->minorbits);
451
+ gd = alloc_disk(1 << nand_ops->minorbits);
580452 if (!gd) {
581453 kfree(dev);
582454 return -ENOMEM;
583455 }
584
-
585
- dev->nandr = nandr;
456
+ nand_ops->rq->queuedata = dev;
457
+ dev->nand_ops = nand_ops;
586458 dev->size = part->size;
587459 dev->off_size = part->offset;
588
- dev->devnum = nandr->last_dev_index;
589
- list_add_tail(&dev->list, &nandr->devs);
590
- nandr->last_dev_index++;
460
+ dev->devnum = nand_ops->last_dev_index;
461
+ list_add_tail(&dev->list, &nand_ops->devs);
462
+ nand_ops->last_dev_index++;
591463
592
- gd->major = nandr->major;
593
- gd->first_minor = (dev->devnum) << nandr->minorbits;
464
+ gd->major = nand_ops->major;
465
+ gd->first_minor = (dev->devnum) << nand_ops->minorbits;
594466
595467 gd->fops = &nand_blktrans_ops;
596468
597
- if (part->name[0]) {
598
- snprintf(gd->disk_name,
599
- sizeof(gd->disk_name),
600
- "%s_%s",
601
- nandr->name,
602
- part->name);
603
- } else {
604
- gd->flags = GENHD_FL_EXT_DEVT;
605
- gd->minors = 255;
606
- snprintf(gd->disk_name,
607
- sizeof(gd->disk_name),
608
- "%s%d",
609
- nandr->name,
610
- dev->devnum);
611
- }
469
+ gd->flags = GENHD_FL_EXT_DEVT;
470
+ gd->minors = 255;
471
+ snprintf(gd->disk_name,
472
+ sizeof(gd->disk_name),
473
+ "%s%d",
474
+ nand_ops->name,
475
+ dev->devnum);
476
+
612477 set_capacity(gd, dev->size);
613478
614479 gd->private_data = dev;
615480 dev->blkcore_priv = gd;
616
- gd->queue = nandr->rq;
617
- gd->queue->bypass_depth = 1;
481
+ gd->queue = nand_ops->rq;
618482
619483 if (part->type == PART_NO_ACCESS)
620484 dev->disable_access = 1;
....@@ -628,10 +492,7 @@
628492 if (dev->readonly)
629493 set_disk_ro(gd, 1);
630494
631
- if (gd->flags != GENHD_FL_EXT_DEVT)
632
- add_disk(gd);
633
- else
634
- device_add_disk(g_nand_device, gd);
495
+ device_add_disk(g_nand_device, gd, NULL);
635496
636497 return 0;
637498 }
....@@ -646,81 +507,68 @@
646507 del_gendisk(gd);
647508 put_disk(gd);
648509 kfree(dev);
510
+
649511 return 0;
650512 }
651513
652
-int nand_blk_add_whole_disk(void)
514
+static int nand_blk_register(struct nand_blk_ops *nand_ops)
653515 {
654516 struct nand_part part;
655
-
656
- part.offset = 0;
657
- part.size = rk_ftl_get_capacity();
658
- part.type = 0;
659
- strncpy(part.name, "rknand", sizeof(part.name));
660
- nand_add_dev(&mytr, &part);
661
- return 0;
662
-}
663
-
664
-static int nand_blk_register(struct nand_blk_ops *nandr)
665
-{
666
- int i, ret;
667
- u32 part_size;
668
- struct nand_part part;
517
+ int ret;
669518
670519 rk_nand_schedule_enable_config(1);
671
- nandr->quit = 0;
672
- nandr->nand_th_quited = 0;
520
+ nand_ops->quit = 0;
521
+ nand_ops->nand_th_quited = 0;
673522
674
- mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512,
675
- GFP_KERNEL | GFP_DMA);
676
-
677
- ret = register_blkdev(nandr->major, nandr->name);
523
+ ret = register_blkdev(nand_ops->major, nand_ops->name);
678524 if (ret)
679
- return -1;
525
+ return ret;
680526
681
- spin_lock_init(&nandr->queue_lock);
682
- init_completion(&nandr->thread_exit);
683
- init_waitqueue_head(&nandr->thread_wq);
684
- rknand_device_lock_init();
685
-
686
- nandr->rq = blk_init_queue(nand_blk_request, &nandr->queue_lock);
687
- if (!nandr->rq) {
688
- unregister_blkdev(nandr->major, nandr->name);
689
- return -1;
527
+ mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512, GFP_KERNEL | GFP_DMA);
528
+ if (!mtd_read_temp_buffer) {
529
+ ret = -ENOMEM;
530
+ goto mtd_buffer_error;
690531 }
691532
692
- blk_queue_max_hw_sectors(nandr->rq, MTD_RW_SECTORS);
693
- blk_queue_max_segments(nandr->rq, MTD_RW_SECTORS);
533
+ init_completion(&nand_ops->thread_exit);
534
+ init_waitqueue_head(&nand_ops->thread_wq);
535
+ rknand_device_lock_init();
694536
695
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, nandr->rq);
696
- blk_queue_max_discard_sectors(nandr->rq, UINT_MAX >> 9);
537
+ /* Create the request queue */
538
+ spin_lock_init(&nand_ops->queue_lock);
539
+ INIT_LIST_HEAD(&nand_ops->rq_list);
540
+
541
+ nand_ops->tag_set = kzalloc(sizeof(*nand_ops->tag_set), GFP_KERNEL);
542
+ if (!nand_ops->tag_set) {
543
+ ret = -ENOMEM;
544
+ goto tag_set_error;
545
+ }
546
+
547
+ nand_ops->rq = blk_mq_init_sq_queue(nand_ops->tag_set, &rk_nand_mq_ops, 1,
548
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
549
+ if (IS_ERR(nand_ops->rq)) {
550
+ ret = PTR_ERR(nand_ops->rq);
551
+ nand_ops->rq = NULL;
552
+ goto rq_init_error;
553
+ }
554
+
555
+ blk_queue_max_hw_sectors(nand_ops->rq, MTD_RW_SECTORS);
556
+ blk_queue_max_segments(nand_ops->rq, MTD_RW_SECTORS);
557
+
558
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, nand_ops->rq);
559
+ blk_queue_max_discard_sectors(nand_ops->rq, UINT_MAX >> 9);
697560 /* discard_granularity config to one nand page size 32KB*/
698
- nandr->rq->limits.discard_granularity = 64 << 9;
561
+ nand_ops->rq->limits.discard_granularity = 64 << 9;
699562
700
- nandr->rq->queuedata = nandr;
701
- INIT_LIST_HEAD(&nandr->devs);
702
- kthread_run(nand_blktrans_thread, (void *)nandr, "rknand");
563
+ INIT_LIST_HEAD(&nand_ops->devs);
564
+ kthread_run(nand_gc_thread, (void *)nand_ops, "rknand_gc");
703565
704
- g_max_part_num = nand_prase_cmdline_part(disk_array);
705
-
706
- nandr->last_dev_index = 0;
566
+ nand_ops->last_dev_index = 0;
707567 part.offset = 0;
708568 part.size = rk_ftl_get_capacity();
709569 part.type = 0;
710570 part.name[0] = 0;
711
- nand_add_dev(&mytr, &part);
712
-
713
- if (g_max_part_num) {
714
- for (i = 0; i < g_max_part_num; i++) {
715
- part_size = (disk_array[i].offset + disk_array[i].size);
716
- pr_info("%10s: 0x%09llx -- 0x%09llx (%llu MB)\n",
717
- disk_array[i].name,
718
- (u64)disk_array[i].offset * 512,
719
- (u64)part_size * 512,
720
- (u64)disk_array[i].size / 2048);
721
- nand_add_dev(nandr, &disk_array[i]);
722
- }
723
- }
571
+ nand_add_dev(nand_ops, &part);
724572
725573 rknand_create_procfs();
726574 rk_ftl_storage_sys_init();
....@@ -735,25 +583,35 @@
735583 }
736584
737585 return 0;
586
+
587
+rq_init_error:
588
+ kfree(nand_ops->tag_set);
589
+tag_set_error:
590
+ kfree(mtd_read_temp_buffer);
591
+ mtd_read_temp_buffer = NULL;
592
+mtd_buffer_error:
593
+ unregister_blkdev(nand_ops->major, nand_ops->name);
594
+
595
+ return ret;
738596 }
739597
740
-static void nand_blk_unregister(struct nand_blk_ops *nandr)
598
+static void nand_blk_unregister(struct nand_blk_ops *nand_ops)
741599 {
742600 struct list_head *this, *next;
743601
744602 if (!rk_nand_dev_initialised)
745603 return;
746
- nandr->quit = 1;
747
- wake_up(&nandr->thread_wq);
748
- wait_for_completion(&nandr->thread_exit);
749
- list_for_each_safe(this, next, &nandr->devs) {
604
+ nand_ops->quit = 1;
605
+ wake_up(&nand_ops->thread_wq);
606
+ wait_for_completion(&nand_ops->thread_exit);
607
+ list_for_each_safe(this, next, &nand_ops->devs) {
750608 struct nand_blk_dev *dev
751609 = list_entry(this, struct nand_blk_dev, list);
752610
753611 nand_remove_dev(dev);
754612 }
755
- blk_cleanup_queue(nandr->rq);
756
- unregister_blkdev(nandr->major, nandr->name);
613
+ blk_cleanup_queue(nand_ops->rq);
614
+ unregister_blkdev(nand_ops->major, nand_ops->name);
757615 }
758616
759617 void rknand_dev_flush(void)