forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/media/pci/intel/ipu3/ipu3-cio2.c
....@@ -1,6 +1,6 @@
11 // SPDX-License-Identifier: GPL-2.0
22 /*
3
- * Copyright (C) 2017 Intel Corporation
3
+ * Copyright (C) 2017,2020 Intel Corporation
44 *
55 * Based partially on Intel IPU4 driver written by
66 * Sakari Ailus <sakari.ailus@linux.intel.com>
....@@ -9,13 +9,14 @@
99 * Jouni Ukkonen <jouni.ukkonen@intel.com>
1010 * Antti Laakso <antti.laakso@intel.com>
1111 * et al.
12
- *
1312 */
1413
1514 #include <linux/delay.h>
1615 #include <linux/interrupt.h>
16
+#include <linux/iopoll.h>
1717 #include <linux/module.h>
1818 #include <linux/pci.h>
19
+#include <linux/pfn.h>
1920 #include <linux/pm_runtime.h>
2021 #include <linux/property.h>
2122 #include <linux/vmalloc.h>
....@@ -96,12 +97,12 @@
9697 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
9798 {
9899 if (cio2->dummy_lop) {
99
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
100
+ dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
100101 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
101102 cio2->dummy_lop = NULL;
102103 }
103104 if (cio2->dummy_page) {
104
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
105
+ dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
105106 cio2->dummy_page, cio2->dummy_page_bus_addr);
106107 cio2->dummy_page = NULL;
107108 }
....@@ -111,12 +112,10 @@
111112 {
112113 unsigned int i;
113114
114
- cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev,
115
- CIO2_PAGE_SIZE,
115
+ cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
116116 &cio2->dummy_page_bus_addr,
117117 GFP_KERNEL);
118
- cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev,
119
- CIO2_PAGE_SIZE,
118
+ cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
120119 &cio2->dummy_lop_bus_addr,
121120 GFP_KERNEL);
122121 if (!cio2->dummy_page || !cio2->dummy_lop) {
....@@ -127,8 +126,8 @@
127126 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
128127 * Initialize each entry to dummy_page bus base address.
129128 */
130
- for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++)
131
- cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
129
+ for (i = 0; i < CIO2_LOP_ENTRIES; i++)
130
+ cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
132131
133132 return 0;
134133 }
....@@ -160,12 +159,11 @@
160159 unsigned int i;
161160
162161 entry[0].first_entry.first_page_offset = 0;
163
- entry[1].second_entry.num_of_pages =
164
- CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS;
165
- entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1;
162
+ entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
163
+ entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
166164
167165 for (i = 0; i < CIO2_MAX_LOPS; i++)
168
- entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
166
+ entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
169167
170168 cio2_fbpt_entry_enable(cio2, entry);
171169 }
....@@ -182,26 +180,24 @@
182180
183181 entry[0].first_entry.first_page_offset = b->offset;
184182 remaining = length + entry[0].first_entry.first_page_offset;
185
- entry[1].second_entry.num_of_pages =
186
- DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE);
183
+ entry[1].second_entry.num_of_pages = PFN_UP(remaining);
187184 /*
188185 * last_page_available_bytes has the offset of the last byte in the
189186 * last page which is still accessible by DMA. DMA cannot access
190187 * beyond this point. Valid range for this is from 0 to 4095.
191188 * 0 indicates 1st byte in the page is DMA accessible.
192
- * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
189
+ * 4095 (PAGE_SIZE - 1) means every single byte in the last page
193190 * is available for DMA transfer.
194191 */
195192 entry[1].second_entry.last_page_available_bytes =
196193 (remaining & ~PAGE_MASK) ?
197
- (remaining & ~PAGE_MASK) - 1 :
198
- CIO2_PAGE_SIZE - 1;
194
+ (remaining & ~PAGE_MASK) - 1 : PAGE_SIZE - 1;
199195 /* Fill FBPT */
200196 remaining = length;
201197 i = 0;
202198 while (remaining > 0) {
203
- entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT;
204
- remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE;
199
+ entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
200
+ remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
205201 entry++;
206202 i++;
207203 }
....@@ -209,7 +205,7 @@
209205 /*
210206 * The first not meaningful FBPT entry should point to a valid LOP
211207 */
212
- entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
208
+ entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
213209
214210 cio2_fbpt_entry_enable(cio2, entry);
215211 }
....@@ -222,8 +218,6 @@
222218 GFP_KERNEL);
223219 if (!q->fbpt)
224220 return -ENOMEM;
225
-
226
- memset(q->fbpt, 0, CIO2_FBPT_SIZE);
227221
228222 return 0;
229223 }
....@@ -266,7 +260,7 @@
266260 */
267261
268262 /*
269
- * shift for keeping value range suitable for 32-bit integer arithmetics
263
+ * shift for keeping value range suitable for 32-bit integer arithmetic
270264 */
271265 #define LIMIT_SHIFT 8
272266
....@@ -297,7 +291,7 @@
297291 struct cio2_csi2_timing *timing)
298292 {
299293 struct device *dev = &cio2->pci_dev->dev;
300
- struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
294
+ struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
301295 struct v4l2_ctrl *link_freq;
302296 s64 freq;
303297 int r;
....@@ -361,7 +355,7 @@
361355 void __iomem *const base = cio2->base;
362356 u8 lanes, csi2bus = q->csi2.port;
363357 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
364
- struct cio2_csi2_timing timing;
358
+ struct cio2_csi2_timing timing = { 0 };
365359 int i, r;
366360
367361 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
....@@ -477,8 +471,7 @@
477471 }
478472
479473 /* Enable DMA */
480
- writel(q->fbpt_bus_addr >> PAGE_SHIFT,
481
- base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
474
+ writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
482475
483476 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
484477 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
....@@ -514,8 +507,10 @@
514507
515508 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
516509 {
517
- void __iomem *base = cio2->base;
518
- unsigned int i, maxloops = 1000;
510
+ void __iomem *const base = cio2->base;
511
+ unsigned int i;
512
+ u32 value;
513
+ int ret;
519514
520515 /* Disable CSI receiver and MIPI backend devices */
521516 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
....@@ -525,13 +520,10 @@
525520
526521 /* Halt DMA */
527522 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
528
- do {
529
- if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
530
- CIO2_CDMAC0_DMA_HALTED)
531
- break;
532
- usleep_range(1000, 2000);
533
- } while (--maxloops);
534
- if (!maxloops)
523
+ ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
524
+ value, value & CIO2_CDMAC0_DMA_HALTED,
525
+ 4000, 2000000);
526
+ if (ret)
535527 dev_err(&cio2->pci_dev->dev,
536528 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
537529
....@@ -547,7 +539,7 @@
547539 {
548540 struct device *dev = &cio2->pci_dev->dev;
549541 struct cio2_queue *q = cio2->cur_queue;
550
- int buffers_found = 0;
542
+ struct cio2_fbpt_entry *entry;
551543 u64 ns = ktime_get_ns();
552544
553545 if (dma_chan >= CIO2_QUEUES) {
....@@ -555,14 +547,17 @@
555547 return;
556548 }
557549
550
+ entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
551
+ if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
552
+ dev_warn(&cio2->pci_dev->dev,
553
+ "no ready buffers found on DMA channel %u\n",
554
+ dma_chan);
555
+ return;
556
+ }
557
+
558558 /* Find out which buffer(s) are ready */
559559 do {
560
- struct cio2_fbpt_entry *const entry =
561
- &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
562560 struct cio2_buffer *b;
563
-
564
- if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)
565
- break;
566561
567562 b = q->bufs[q->bufs_first];
568563 if (b) {
....@@ -585,13 +580,8 @@
585580 atomic_inc(&q->frame_sequence);
586581 cio2_fbpt_entry_init_dummy(cio2, entry);
587582 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
588
- buffers_found++;
589
- } while (1);
590
-
591
- if (buffers_found == 0)
592
- dev_warn(&cio2->pci_dev->dev,
593
- "no ready buffers found on DMA channel %u\n",
594
- dma_chan);
583
+ entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
584
+ } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
595585 }
596586
597587 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
....@@ -844,13 +834,11 @@
844834 struct device *dev = &cio2->pci_dev->dev;
845835 struct cio2_buffer *b =
846836 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
847
- static const unsigned int entries_per_page =
848
- CIO2_PAGE_SIZE / sizeof(u32);
849
- unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
850
- unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
837
+ unsigned int pages = PFN_UP(vb->planes[0].length);
838
+ unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
851839 struct sg_table *sg;
852
- struct sg_page_iter sg_iter;
853
- int i, j;
840
+ struct sg_dma_page_iter sg_iter;
841
+ unsigned int i, j;
854842
855843 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
856844 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
....@@ -861,7 +849,7 @@
861849 memset(b->lop, 0, sizeof(b->lop));
862850 /* Allocate LOP table */
863851 for (i = 0; i < lops; i++) {
864
- b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE,
852
+ b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
865853 &b->lop_bus_addr[i], GFP_KERNEL);
866854 if (!b->lop[i])
867855 goto fail;
....@@ -876,23 +864,22 @@
876864 b->offset = sg->sgl->offset;
877865
878866 i = j = 0;
879
- for_each_sg_page(sg->sgl, &sg_iter, sg->nents, 0) {
867
+ for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
880868 if (!pages--)
881869 break;
882
- b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
870
+ b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
883871 j++;
884
- if (j == entries_per_page) {
872
+ if (j == CIO2_LOP_ENTRIES) {
885873 i++;
886874 j = 0;
887875 }
888876 }
889877
890
- b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
878
+ b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
891879 return 0;
892880 fail:
893
- for (i--; i >= 0; i--)
894
- dma_free_coherent(dev, CIO2_PAGE_SIZE,
895
- b->lop[i], b->lop_bus_addr[i]);
881
+ while (i--)
882
+ dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
896883 return -ENOMEM;
897884 }
898885
....@@ -982,7 +969,7 @@
982969 /* Free LOP table */
983970 for (i = 0; i < CIO2_MAX_LOPS; i++) {
984971 if (b->lop[i])
985
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
972
+ dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
986973 b->lop[i], b->lop_bus_addr[i]);
987974 }
988975 }
....@@ -1067,8 +1054,8 @@
10671054 {
10681055 struct cio2_device *cio2 = video_drvdata(file);
10691056
1070
- strlcpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1071
- strlcpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1057
+ strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1058
+ strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
10721059 snprintf(cap->bus_info, sizeof(cap->bus_info),
10731060 "PCI:%s", pci_name(cio2->pci_dev));
10741061
....@@ -1146,7 +1133,7 @@
11461133 if (input->index > 0)
11471134 return -EINVAL;
11481135
1149
- strlcpy(input->name, "camera", sizeof(input->name));
1136
+ strscpy(input->name, "camera", sizeof(input->name));
11501137 input->type = V4L2_INPUT_TYPE_CAMERA;
11511138
11521139 return 0;
....@@ -1177,7 +1164,7 @@
11771164
11781165 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
11791166 .vidioc_querycap = cio2_v4l2_querycap,
1180
- .vidioc_enum_fmt_vid_cap_mplane = cio2_v4l2_enum_fmt,
1167
+ .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
11811168 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
11821169 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
11831170 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
....@@ -1438,13 +1425,13 @@
14381425 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
14391426 notifier);
14401427 struct sensor_async_subdev *s_asd;
1428
+ struct v4l2_async_subdev *asd;
14411429 struct cio2_queue *q;
1442
- unsigned int i, pad;
1430
+ unsigned int pad;
14431431 int ret;
14441432
1445
- for (i = 0; i < notifier->num_subdevs; i++) {
1446
- s_asd = container_of(cio2->notifier.subdevs[i],
1447
- struct sensor_async_subdev, asd);
1433
+ list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1434
+ s_asd = container_of(asd, struct sensor_async_subdev, asd);
14481435 q = &cio2->queue[s_asd->csi2.port];
14491436
14501437 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
....@@ -1466,7 +1453,7 @@
14661453 if (ret) {
14671454 dev_err(&cio2->pci_dev->dev,
14681455 "failed to create link for %s\n",
1469
- cio2->queue[i].sensor->name);
1456
+ q->sensor->name);
14701457 return ret;
14711458 }
14721459 }
....@@ -1480,53 +1467,61 @@
14801467 .complete = cio2_notifier_complete,
14811468 };
14821469
1483
-static int cio2_fwnode_parse(struct device *dev,
1484
- struct v4l2_fwnode_endpoint *vep,
1485
- struct v4l2_async_subdev *asd)
1470
+static int cio2_parse_firmware(struct cio2_device *cio2)
14861471 {
1487
- struct sensor_async_subdev *s_asd =
1488
- container_of(asd, struct sensor_async_subdev, asd);
1489
-
1490
- if (vep->bus_type != V4L2_MBUS_CSI2) {
1491
- dev_err(dev, "Only CSI2 bus type is currently supported\n");
1492
- return -EINVAL;
1493
- }
1494
-
1495
- s_asd->csi2.port = vep->base.port;
1496
- s_asd->csi2.lanes = vep->bus.mipi_csi2.num_data_lanes;
1497
-
1498
- return 0;
1499
-}
1500
-
1501
-static int cio2_notifier_init(struct cio2_device *cio2)
1502
-{
1472
+ unsigned int i;
15031473 int ret;
15041474
1505
- ret = v4l2_async_notifier_parse_fwnode_endpoints(
1506
- &cio2->pci_dev->dev, &cio2->notifier,
1507
- sizeof(struct sensor_async_subdev),
1508
- cio2_fwnode_parse);
1509
- if (ret < 0)
1475
+ for (i = 0; i < CIO2_NUM_PORTS; i++) {
1476
+ struct v4l2_fwnode_endpoint vep = {
1477
+ .bus_type = V4L2_MBUS_CSI2_DPHY
1478
+ };
1479
+ struct sensor_async_subdev *s_asd;
1480
+ struct v4l2_async_subdev *asd;
1481
+ struct fwnode_handle *ep;
1482
+
1483
+ ep = fwnode_graph_get_endpoint_by_id(
1484
+ dev_fwnode(&cio2->pci_dev->dev), i, 0,
1485
+ FWNODE_GRAPH_ENDPOINT_NEXT);
1486
+
1487
+ if (!ep)
1488
+ continue;
1489
+
1490
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1491
+ if (ret)
1492
+ goto err_parse;
1493
+
1494
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
1495
+ &cio2->notifier, ep, sizeof(*s_asd));
1496
+ if (IS_ERR(asd)) {
1497
+ ret = PTR_ERR(asd);
1498
+ goto err_parse;
1499
+ }
1500
+
1501
+ s_asd = container_of(asd, struct sensor_async_subdev, asd);
1502
+ s_asd->csi2.port = vep.base.port;
1503
+ s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1504
+
1505
+ fwnode_handle_put(ep);
1506
+
1507
+ continue;
1508
+
1509
+err_parse:
1510
+ fwnode_handle_put(ep);
15101511 return ret;
1511
-
1512
- if (!cio2->notifier.num_subdevs)
1513
- return -ENODEV; /* no endpoint */
1514
-
1515
- cio2->notifier.ops = &cio2_async_ops;
1516
- ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1517
- if (ret) {
1518
- dev_err(&cio2->pci_dev->dev,
1519
- "failed to register async notifier : %d\n", ret);
1520
- v4l2_async_notifier_cleanup(&cio2->notifier);
15211512 }
15221513
1523
- return ret;
1524
-}
1514
+ /*
1515
+ * Proceed even without sensors connected to allow the device to
1516
+ * suspend.
1517
+ */
1518
+ cio2->notifier.ops = &cio2_async_ops;
1519
+ ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1520
+ if (ret)
1521
+ dev_err(&cio2->pci_dev->dev,
1522
+ "failed to register async notifier : %d\n", ret);
15251523
1526
-static void cio2_notifier_exit(struct cio2_device *cio2)
1527
-{
1528
- v4l2_async_notifier_unregister(&cio2->notifier);
1529
- v4l2_async_notifier_cleanup(&cio2->notifier);
1524
+ return ret;
15301525 }
15311526
15321527 /**************** Queue initialization ****************/
....@@ -1605,6 +1600,7 @@
16051600 subdev->owner = THIS_MODULE;
16061601 snprintf(subdev->name, sizeof(subdev->name),
16071602 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1603
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
16081604 v4l2_set_subdevdata(subdev, cio2);
16091605 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
16101606 if (r) {
....@@ -1627,7 +1623,7 @@
16271623 if (r) {
16281624 dev_err(&cio2->pci_dev->dev,
16291625 "failed to initialize videobuf2 queue (%d)\n", r);
1630
- goto fail_vbq;
1626
+ goto fail_subdev;
16311627 }
16321628
16331629 /* Initialize vdev */
....@@ -1641,7 +1637,7 @@
16411637 vdev->queue = &q->vbq;
16421638 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
16431639 video_set_drvdata(vdev, cio2);
1644
- r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
1640
+ r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
16451641 if (r) {
16461642 dev_err(&cio2->pci_dev->dev,
16471643 "failed to register video device (%d)\n", r);
....@@ -1658,10 +1654,8 @@
16581654 return 0;
16591655
16601656 fail_link:
1661
- video_unregister_device(&q->vdev);
1657
+ vb2_video_unregister_device(&q->vdev);
16621658 fail_vdev:
1663
- vb2_queue_release(vbq);
1664
-fail_vbq:
16651659 v4l2_device_unregister_subdev(subdev);
16661660 fail_subdev:
16671661 media_entity_cleanup(&vdev->entity);
....@@ -1678,9 +1672,8 @@
16781672
16791673 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
16801674 {
1681
- video_unregister_device(&q->vdev);
1675
+ vb2_video_unregister_device(&q->vdev);
16821676 media_entity_cleanup(&q->vdev.entity);
1683
- vb2_queue_release(&q->vbq);
16841677 v4l2_device_unregister_subdev(&q->subdev);
16851678 media_entity_cleanup(&q->subdev.entity);
16861679 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
....@@ -1717,29 +1710,10 @@
17171710
17181711 /**************** PCI interface ****************/
17191712
1720
-static int cio2_pci_config_setup(struct pci_dev *dev)
1721
-{
1722
- u16 pci_command;
1723
- int r = pci_enable_msi(dev);
1724
-
1725
- if (r) {
1726
- dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
1727
- return r;
1728
- }
1729
-
1730
- pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1731
- pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1732
- PCI_COMMAND_INTX_DISABLE;
1733
- pci_write_config_word(dev, PCI_COMMAND, pci_command);
1734
-
1735
- return 0;
1736
-}
1737
-
17381713 static int cio2_pci_probe(struct pci_dev *pci_dev,
17391714 const struct pci_device_id *id)
17401715 {
17411716 struct cio2_device *cio2;
1742
- void __iomem *const *iomap;
17431717 int r;
17441718
17451719 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
....@@ -1762,13 +1736,7 @@
17621736 return -ENODEV;
17631737 }
17641738
1765
- iomap = pcim_iomap_table(pci_dev);
1766
- if (!iomap) {
1767
- dev_err(&pci_dev->dev, "failed to iomap table\n");
1768
- return -ENODEV;
1769
- }
1770
-
1771
- cio2->base = iomap[CIO2_PCI_BAR];
1739
+ cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
17721740
17731741 pci_set_drvdata(pci_dev, cio2);
17741742
....@@ -1780,9 +1748,11 @@
17801748 return -ENODEV;
17811749 }
17821750
1783
- r = cio2_pci_config_setup(pci_dev);
1784
- if (r)
1785
- return -ENODEV;
1751
+ r = pci_enable_msi(pci_dev);
1752
+ if (r) {
1753
+ dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
1754
+ return r;
1755
+ }
17861756
17871757 r = cio2_fbpt_init_dummy(cio2);
17881758 if (r)
....@@ -1791,7 +1761,7 @@
17911761 mutex_init(&cio2->lock);
17921762
17931763 cio2->media_dev.dev = &cio2->pci_dev->dev;
1794
- strlcpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1764
+ strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
17951765 sizeof(cio2->media_dev.model));
17961766 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
17971767 "PCI:%s", pci_name(cio2->pci_dev));
....@@ -1814,16 +1784,18 @@
18141784 if (r)
18151785 goto fail_v4l2_device_unregister;
18161786
1787
+ v4l2_async_notifier_init(&cio2->notifier);
1788
+
18171789 /* Register notifier for subdevices we care */
1818
- r = cio2_notifier_init(cio2);
1790
+ r = cio2_parse_firmware(cio2);
18191791 if (r)
1820
- goto fail_cio2_queue_exit;
1792
+ goto fail_clean_notifier;
18211793
18221794 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
18231795 IRQF_SHARED, CIO2_NAME, cio2);
18241796 if (r) {
18251797 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1826
- goto fail;
1798
+ goto fail_clean_notifier;
18271799 }
18281800
18291801 pm_runtime_put_noidle(&pci_dev->dev);
....@@ -1831,9 +1803,9 @@
18311803
18321804 return 0;
18331805
1834
-fail:
1835
- cio2_notifier_exit(cio2);
1836
-fail_cio2_queue_exit:
1806
+fail_clean_notifier:
1807
+ v4l2_async_notifier_unregister(&cio2->notifier);
1808
+ v4l2_async_notifier_cleanup(&cio2->notifier);
18371809 cio2_queues_exit(cio2);
18381810 fail_v4l2_device_unregister:
18391811 v4l2_device_unregister(&cio2->v4l2_dev);
....@@ -1850,16 +1822,18 @@
18501822 static void cio2_pci_remove(struct pci_dev *pci_dev)
18511823 {
18521824 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1853
- unsigned int i;
18541825
18551826 media_device_unregister(&cio2->media_dev);
1856
- cio2_notifier_exit(cio2);
1857
- for (i = 0; i < CIO2_QUEUES; i++)
1858
- cio2_queue_exit(cio2, &cio2->queue[i]);
1827
+ v4l2_async_notifier_unregister(&cio2->notifier);
1828
+ v4l2_async_notifier_cleanup(&cio2->notifier);
1829
+ cio2_queues_exit(cio2);
18591830 cio2_fbpt_exit_dummy(cio2);
18601831 v4l2_device_unregister(&cio2->v4l2_dev);
18611832 media_device_cleanup(&cio2->media_dev);
18621833 mutex_destroy(&cio2->lock);
1834
+
1835
+ pm_runtime_forbid(&pci_dev->dev);
1836
+ pm_runtime_get_noresume(&pci_dev->dev);
18631837 }
18641838
18651839 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
....@@ -2006,10 +1980,9 @@
20061980
20071981 static int __maybe_unused cio2_resume(struct device *dev)
20081982 {
2009
- struct pci_dev *pci_dev = to_pci_dev(dev);
2010
- struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
2011
- int r = 0;
1983
+ struct cio2_device *cio2 = dev_get_drvdata(dev);
20121984 struct cio2_queue *q = cio2->cur_queue;
1985
+ int r;
20131986
20141987 dev_dbg(dev, "cio2 resume\n");
20151988 if (!cio2->streaming)
....@@ -2036,7 +2009,7 @@
20362009
20372010 static const struct pci_device_id cio2_pci_id_table[] = {
20382011 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2039
- { 0 }
2012
+ { }
20402013 };
20412014
20422015 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
....@@ -2055,7 +2028,7 @@
20552028
20562029 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
20572030 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2058
-MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
2031
+MODULE_AUTHOR("Jian Xu Zheng");
20592032 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
20602033 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
20612034 MODULE_LICENSE("GPL v2");