From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 19 Dec 2024 01:47:39 +0000
Subject: [PATCH] add wifi6 8852be driver

---
 kernel/drivers/media/pci/intel/ipu3/ipu3-cio2.c |  307 +++++++++++++++++++++++---------------------------
 1 files changed, 140 insertions(+), 167 deletions(-)

diff --git a/kernel/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/kernel/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index cdf4d07..f8dca47 100644
--- a/kernel/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/kernel/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2017 Intel Corporation
+ * Copyright (C) 2017,2020 Intel Corporation
  *
  * Based partially on Intel IPU4 driver written by
  *  Sakari Ailus <sakari.ailus@linux.intel.com>
@@ -9,13 +9,14 @@
  *  Jouni Ukkonen <jouni.ukkonen@intel.com>
  *  Antti Laakso <antti.laakso@intel.com>
  * et al.
- *
  */
 
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pfn.h>
 #include <linux/pm_runtime.h>
 #include <linux/property.h>
 #include <linux/vmalloc.h>
@@ -96,12 +97,12 @@
 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
 {
 	if (cio2->dummy_lop) {
-		dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
+		dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
 				  cio2->dummy_lop, cio2->dummy_lop_bus_addr);
 		cio2->dummy_lop = NULL;
 	}
 	if (cio2->dummy_page) {
-		dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
+		dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
 				  cio2->dummy_page, cio2->dummy_page_bus_addr);
 		cio2->dummy_page = NULL;
 	}
@@ -111,12 +112,10 @@
 {
 	unsigned int i;
 
-	cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev,
-					      CIO2_PAGE_SIZE,
+	cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
 					      &cio2->dummy_page_bus_addr,
 					      GFP_KERNEL);
-	cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev,
-					     CIO2_PAGE_SIZE,
+	cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
 					     &cio2->dummy_lop_bus_addr,
 					     GFP_KERNEL);
 	if (!cio2->dummy_page || !cio2->dummy_lop) {
@@ -127,8 +126,8 @@
 	 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
 	 * Initialize each entry to dummy_page bus base address.
 	 */
-	for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++)
-		cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
+	for (i = 0; i < CIO2_LOP_ENTRIES; i++)
+		cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
 
 	return 0;
 }
@@ -160,12 +159,11 @@
 	unsigned int i;
 
 	entry[0].first_entry.first_page_offset = 0;
-	entry[1].second_entry.num_of_pages =
-		CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS;
-	entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1;
+	entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
+	entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
 
 	for (i = 0; i < CIO2_MAX_LOPS; i++)
-		entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
+		entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
 
 	cio2_fbpt_entry_enable(cio2, entry);
 }
@@ -182,26 +180,24 @@
 
 	entry[0].first_entry.first_page_offset = b->offset;
 	remaining = length + entry[0].first_entry.first_page_offset;
-	entry[1].second_entry.num_of_pages =
-		DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE);
+	entry[1].second_entry.num_of_pages = PFN_UP(remaining);
 	/*
 	 * last_page_available_bytes has the offset of the last byte in the
 	 * last page which is still accessible by DMA. DMA cannot access
 	 * beyond this point. Valid range for this is from 0 to 4095.
 	 * 0 indicates 1st byte in the page is DMA accessible.
-	 * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
+	 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
 	 * is available for DMA transfer.
 	 */
 	entry[1].second_entry.last_page_available_bytes =
 			(remaining & ~PAGE_MASK) ?
-				(remaining & ~PAGE_MASK) - 1 :
-				CIO2_PAGE_SIZE - 1;
+				(remaining & ~PAGE_MASK) - 1 : PAGE_SIZE - 1;
 	/* Fill FBPT */
 	remaining = length;
 	i = 0;
 	while (remaining > 0) {
-		entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT;
-		remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE;
+		entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
+		remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
 		entry++;
 		i++;
 	}
@@ -209,7 +205,7 @@
 	/*
 	 * The first not meaningful FBPT entry should point to a valid LOP
 	 */
-	entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
+	entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
 
 	cio2_fbpt_entry_enable(cio2, entry);
 }
@@ -222,8 +218,6 @@
 				     GFP_KERNEL);
 	if (!q->fbpt)
 		return -ENOMEM;
-
-	memset(q->fbpt, 0, CIO2_FBPT_SIZE);
 
 	return 0;
 }
@@ -266,7 +260,7 @@
  */
 
 /*
- * shift for keeping value range suitable for 32-bit integer arithmetics
+ * shift for keeping value range suitable for 32-bit integer arithmetic
  */
 #define LIMIT_SHIFT	8
 
@@ -297,7 +291,7 @@
 				 struct cio2_csi2_timing *timing)
 {
 	struct device *dev = &cio2->pci_dev->dev;
-	struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
+	struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
 	struct v4l2_ctrl *link_freq;
 	s64 freq;
 	int r;
@@ -361,7 +355,7 @@
 	void __iomem *const base = cio2->base;
 	u8 lanes, csi2bus = q->csi2.port;
 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
-	struct cio2_csi2_timing timing;
+	struct cio2_csi2_timing timing = { 0 };
 	int i, r;
 
 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
@@ -477,8 +471,7 @@
 	}
 
 	/* Enable DMA */
-	writel(q->fbpt_bus_addr >> PAGE_SHIFT,
-	       base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
+	writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
 
 	writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
 	       FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
@@ -514,8 +507,10 @@
 
 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
 {
-	void __iomem *base = cio2->base;
-	unsigned int i, maxloops = 1000;
+	void __iomem *const base = cio2->base;
+	unsigned int i;
+	u32 value;
+	int ret;
 
 	/* Disable CSI receiver and MIPI backend devices */
 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
@@ -525,13 +520,10 @@
 
 	/* Halt DMA */
 	writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
-	do {
-		if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
-		    CIO2_CDMAC0_DMA_HALTED)
-			break;
-		usleep_range(1000, 2000);
-	} while (--maxloops);
-	if (!maxloops)
+	ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
+				 value, value & CIO2_CDMAC0_DMA_HALTED,
+				 4000, 2000000);
+	if (ret)
 		dev_err(&cio2->pci_dev->dev,
 			"DMA %i can not be halted\n", CIO2_DMA_CHAN);
 
@@ -547,7 +539,7 @@
 {
 	struct device *dev = &cio2->pci_dev->dev;
 	struct cio2_queue *q = cio2->cur_queue;
-	int buffers_found = 0;
+	struct cio2_fbpt_entry *entry;
 	u64 ns = ktime_get_ns();
 
 	if (dma_chan >= CIO2_QUEUES) {
@@ -555,14 +547,17 @@
 		return;
 	}
 
+	entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
+	if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
+		dev_warn(&cio2->pci_dev->dev,
+			 "no ready buffers found on DMA channel %u\n",
+			 dma_chan);
+		return;
+	}
+
 	/* Find out which buffer(s) are ready */
 	do {
-		struct cio2_fbpt_entry *const entry =
-			&q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
 		struct cio2_buffer *b;
-
-		if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)
-			break;
 
 		b = q->bufs[q->bufs_first];
 		if (b) {
@@ -585,13 +580,8 @@
 		atomic_inc(&q->frame_sequence);
 		cio2_fbpt_entry_init_dummy(cio2, entry);
 		q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
-		buffers_found++;
-	} while (1);
-
-	if (buffers_found == 0)
-		dev_warn(&cio2->pci_dev->dev,
-			 "no ready buffers found on DMA channel %u\n",
-			 dma_chan);
+		entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
+	} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
 }
 
 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
@@ -844,13 +834,11 @@
 	struct device *dev = &cio2->pci_dev->dev;
 	struct cio2_buffer *b =
 		container_of(vb, struct cio2_buffer, vbb.vb2_buf);
-	static const unsigned int entries_per_page =
-		CIO2_PAGE_SIZE / sizeof(u32);
-	unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
-	unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
+	unsigned int pages = PFN_UP(vb->planes[0].length);
+	unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
 	struct sg_table *sg;
-	struct sg_page_iter sg_iter;
-	int i, j;
+	struct sg_dma_page_iter sg_iter;
+	unsigned int i, j;
 
 	if (lops <= 0 || lops > CIO2_MAX_LOPS) {
 		dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
@@ -861,7 +849,7 @@
 	memset(b->lop, 0, sizeof(b->lop));
 	/* Allocate LOP table */
 	for (i = 0; i < lops; i++) {
-		b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE,
+		b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
 					       &b->lop_bus_addr[i], GFP_KERNEL);
 		if (!b->lop[i])
 			goto fail;
@@ -876,23 +864,22 @@
 		b->offset = sg->sgl->offset;
 
 	i = j = 0;
-	for_each_sg_page(sg->sgl, &sg_iter, sg->nents, 0) {
+	for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
 		if (!pages--)
 			break;
-		b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
+		b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
 		j++;
-		if (j == entries_per_page) {
+		if (j == CIO2_LOP_ENTRIES) {
 			i++;
 			j = 0;
 		}
 	}
 
-	b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
+	b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
 	return 0;
 fail:
-	for (i--; i >= 0; i--)
-		dma_free_coherent(dev, CIO2_PAGE_SIZE,
-				  b->lop[i], b->lop_bus_addr[i]);
+	while (i--)
+		dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
 	return -ENOMEM;
 }
 
@@ -982,7 +969,7 @@
 	/* Free LOP table */
 	for (i = 0; i < CIO2_MAX_LOPS; i++) {
 		if (b->lop[i])
-			dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
+			dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
 					  b->lop[i], b->lop_bus_addr[i]);
 	}
 }
@@ -1067,8 +1054,8 @@
 {
 	struct cio2_device *cio2 = video_drvdata(file);
 
-	strlcpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
-	strlcpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
+	strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
+	strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
 	snprintf(cap->bus_info, sizeof(cap->bus_info),
 		 "PCI:%s", pci_name(cio2->pci_dev));
 
@@ -1146,7 +1133,7 @@
 	if (input->index > 0)
 		return -EINVAL;
 
-	strlcpy(input->name, "camera", sizeof(input->name));
+	strscpy(input->name, "camera", sizeof(input->name));
 	input->type = V4L2_INPUT_TYPE_CAMERA;
 
 	return 0;
@@ -1177,7 +1164,7 @@
 
 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
 	.vidioc_querycap = cio2_v4l2_querycap,
-	.vidioc_enum_fmt_vid_cap_mplane = cio2_v4l2_enum_fmt,
+	.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
 	.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
 	.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
 	.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
@@ -1438,13 +1425,13 @@
 	struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
 						notifier);
 	struct sensor_async_subdev *s_asd;
+	struct v4l2_async_subdev *asd;
 	struct cio2_queue *q;
-	unsigned int i, pad;
+	unsigned int pad;
 	int ret;
 
-	for (i = 0; i < notifier->num_subdevs; i++) {
-		s_asd = container_of(cio2->notifier.subdevs[i],
-				     struct sensor_async_subdev, asd);
+	list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
+		s_asd = container_of(asd, struct sensor_async_subdev, asd);
 		q = &cio2->queue[s_asd->csi2.port];
 
 		for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
@@ -1466,7 +1453,7 @@
 		if (ret) {
 			dev_err(&cio2->pci_dev->dev,
 				"failed to create link for %s\n",
-				cio2->queue[i].sensor->name);
+				q->sensor->name);
 			return ret;
 		}
 	}
@@ -1480,53 +1467,61 @@
 	.complete = cio2_notifier_complete,
 };
 
-static int cio2_fwnode_parse(struct device *dev,
-			     struct v4l2_fwnode_endpoint *vep,
-			     struct v4l2_async_subdev *asd)
+static int cio2_parse_firmware(struct cio2_device *cio2)
 {
-	struct sensor_async_subdev *s_asd =
-			container_of(asd, struct sensor_async_subdev, asd);
-
-	if (vep->bus_type != V4L2_MBUS_CSI2) {
-		dev_err(dev, "Only CSI2 bus type is currently supported\n");
-		return -EINVAL;
-	}
-
-	s_asd->csi2.port = vep->base.port;
-	s_asd->csi2.lanes = vep->bus.mipi_csi2.num_data_lanes;
-
-	return 0;
-}
-
-static int cio2_notifier_init(struct cio2_device *cio2)
-{
+	unsigned int i;
 	int ret;
 
-	ret = v4l2_async_notifier_parse_fwnode_endpoints(
-		&cio2->pci_dev->dev, &cio2->notifier,
-		sizeof(struct sensor_async_subdev),
-		cio2_fwnode_parse);
-	if (ret < 0)
+	for (i = 0; i < CIO2_NUM_PORTS; i++) {
+		struct v4l2_fwnode_endpoint vep = {
+			.bus_type = V4L2_MBUS_CSI2_DPHY
+		};
+		struct sensor_async_subdev *s_asd;
+		struct v4l2_async_subdev *asd;
+		struct fwnode_handle *ep;
+
+		ep = fwnode_graph_get_endpoint_by_id(
+			dev_fwnode(&cio2->pci_dev->dev), i, 0,
+			FWNODE_GRAPH_ENDPOINT_NEXT);
+
+		if (!ep)
+			continue;
+
+		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+		if (ret)
+			goto err_parse;
+
+		asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+				&cio2->notifier, ep, sizeof(*s_asd));
+		if (IS_ERR(asd)) {
+			ret = PTR_ERR(asd);
+			goto err_parse;
+		}
+
+		s_asd = container_of(asd, struct sensor_async_subdev, asd);
+		s_asd->csi2.port = vep.base.port;
+		s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
+
+		fwnode_handle_put(ep);
+
+		continue;
+
+err_parse:
+		fwnode_handle_put(ep);
 		return ret;
-
-	if (!cio2->notifier.num_subdevs)
-		return -ENODEV;	/* no endpoint */
-
-	cio2->notifier.ops = &cio2_async_ops;
-	ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
-	if (ret) {
-		dev_err(&cio2->pci_dev->dev,
-			"failed to register async notifier : %d\n", ret);
-		v4l2_async_notifier_cleanup(&cio2->notifier);
 	}
 
-	return ret;
-}
+	/*
+	 * Proceed even without sensors connected to allow the device to
+	 * suspend.
+	 */
+	cio2->notifier.ops = &cio2_async_ops;
+	ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
+	if (ret)
+		dev_err(&cio2->pci_dev->dev,
+			"failed to register async notifier : %d\n", ret);
 
-static void cio2_notifier_exit(struct cio2_device *cio2)
-{
-	v4l2_async_notifier_unregister(&cio2->notifier);
-	v4l2_async_notifier_cleanup(&cio2->notifier);
+	return ret;
 }
 
 /**************** Queue initialization ****************/
@@ -1605,6 +1600,7 @@
 	subdev->owner = THIS_MODULE;
 	snprintf(subdev->name, sizeof(subdev->name),
 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
+	subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
 	v4l2_set_subdevdata(subdev, cio2);
 	r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
 	if (r) {
@@ -1627,7 +1623,7 @@
 	if (r) {
 		dev_err(&cio2->pci_dev->dev,
 			"failed to initialize videobuf2 queue (%d)\n", r);
-		goto fail_vbq;
+		goto fail_subdev;
 	}
 
 	/* Initialize vdev */
@@ -1641,7 +1637,7 @@
 	vdev->queue = &q->vbq;
 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
 	video_set_drvdata(vdev, cio2);
-	r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
 	if (r) {
 		dev_err(&cio2->pci_dev->dev,
 			"failed to register video device (%d)\n", r);
@@ -1658,10 +1654,8 @@
 	return 0;
 
 fail_link:
-	video_unregister_device(&q->vdev);
+	vb2_video_unregister_device(&q->vdev);
 fail_vdev:
-	vb2_queue_release(vbq);
-fail_vbq:
 	v4l2_device_unregister_subdev(subdev);
 fail_subdev:
 	media_entity_cleanup(&vdev->entity);
@@ -1678,9 +1672,8 @@
 
 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
 {
-	video_unregister_device(&q->vdev);
+	vb2_video_unregister_device(&q->vdev);
 	media_entity_cleanup(&q->vdev.entity);
-	vb2_queue_release(&q->vbq);
 	v4l2_device_unregister_subdev(&q->subdev);
 	media_entity_cleanup(&q->subdev.entity);
 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
@@ -1717,29 +1710,10 @@
 
 /**************** PCI interface ****************/
 
-static int cio2_pci_config_setup(struct pci_dev *dev)
-{
-	u16 pci_command;
-	int r = pci_enable_msi(dev);
-
-	if (r) {
-		dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
-		return r;
-	}
-
-	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
-	pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
-		PCI_COMMAND_INTX_DISABLE;
-	pci_write_config_word(dev, PCI_COMMAND, pci_command);
-
-	return 0;
-}
-
 static int cio2_pci_probe(struct pci_dev *pci_dev,
 			  const struct pci_device_id *id)
 {
 	struct cio2_device *cio2;
-	void __iomem *const *iomap;
 	int r;
 
 	cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
@@ -1762,13 +1736,7 @@
 		return -ENODEV;
 	}
 
-	iomap = pcim_iomap_table(pci_dev);
-	if (!iomap) {
-		dev_err(&pci_dev->dev, "failed to iomap table\n");
-		return -ENODEV;
-	}
-
-	cio2->base = iomap[CIO2_PCI_BAR];
+	cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
 
 	pci_set_drvdata(pci_dev, cio2);
 
@@ -1780,9 +1748,11 @@
 		return -ENODEV;
 	}
 
-	r = cio2_pci_config_setup(pci_dev);
-	if (r)
-		return -ENODEV;
+	r = pci_enable_msi(pci_dev);
+	if (r) {
+		dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
+		return r;
+	}
 
 	r = cio2_fbpt_init_dummy(cio2);
 	if (r)
@@ -1791,7 +1761,7 @@
 	mutex_init(&cio2->lock);
 
 	cio2->media_dev.dev = &cio2->pci_dev->dev;
-	strlcpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
+	strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
 		sizeof(cio2->media_dev.model));
 	snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
 		 "PCI:%s", pci_name(cio2->pci_dev));
@@ -1814,16 +1784,18 @@
 	if (r)
 		goto fail_v4l2_device_unregister;
 
+	v4l2_async_notifier_init(&cio2->notifier);
+
 	/* Register notifier for subdevices we care */
-	r = cio2_notifier_init(cio2);
+	r = cio2_parse_firmware(cio2);
 	if (r)
-		goto fail_cio2_queue_exit;
+		goto fail_clean_notifier;
 
 	r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
 			     IRQF_SHARED, CIO2_NAME, cio2);
 	if (r) {
 		dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
-		goto fail;
+		goto fail_clean_notifier;
 	}
 
 	pm_runtime_put_noidle(&pci_dev->dev);
@@ -1831,9 +1803,9 @@
 
 	return 0;
 
-fail:
-	cio2_notifier_exit(cio2);
-fail_cio2_queue_exit:
+fail_clean_notifier:
+	v4l2_async_notifier_unregister(&cio2->notifier);
+	v4l2_async_notifier_cleanup(&cio2->notifier);
 	cio2_queues_exit(cio2);
 fail_v4l2_device_unregister:
 	v4l2_device_unregister(&cio2->v4l2_dev);
@@ -1850,16 +1822,18 @@
 static void cio2_pci_remove(struct pci_dev *pci_dev)
 {
 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
-	unsigned int i;
 
 	media_device_unregister(&cio2->media_dev);
-	cio2_notifier_exit(cio2);
-	for (i = 0; i < CIO2_QUEUES; i++)
-		cio2_queue_exit(cio2, &cio2->queue[i]);
+	v4l2_async_notifier_unregister(&cio2->notifier);
+	v4l2_async_notifier_cleanup(&cio2->notifier);
+	cio2_queues_exit(cio2);
 	cio2_fbpt_exit_dummy(cio2);
 	v4l2_device_unregister(&cio2->v4l2_dev);
 	media_device_cleanup(&cio2->media_dev);
 	mutex_destroy(&cio2->lock);
+
+	pm_runtime_forbid(&pci_dev->dev);
+	pm_runtime_get_noresume(&pci_dev->dev);
 }
 
 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
@@ -2006,10 +1980,9 @@
 
 static int __maybe_unused cio2_resume(struct device *dev)
 {
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
-	int r = 0;
+	struct cio2_device *cio2 = dev_get_drvdata(dev);
 	struct cio2_queue *q = cio2->cur_queue;
+	int r;
 
 	dev_dbg(dev, "cio2 resume\n");
 	if (!cio2->streaming)
@@ -2036,7 +2009,7 @@
 
 static const struct pci_device_id cio2_pci_id_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
-	{ 0 }
+	{ }
 };
 
 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
@@ -2055,7 +2028,7 @@
 
 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
-MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
+MODULE_AUTHOR("Jian Xu Zheng");
 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
 MODULE_LICENSE("GPL v2");

--
Gitblit v1.6.2