From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 19 Dec 2024 01:47:39 +0000
Subject: [PATCH] add wifi6 8852be driver

---
 kernel/drivers/video/rockchip/mpp/mpp_iommu.c |  232 +++++++++++++++++++++++++++++++++++++++++++++++++---------
 1 files changed, 196 insertions(+), 36 deletions(-)

diff --git a/kernel/drivers/video/rockchip/mpp/mpp_iommu.c b/kernel/drivers/video/rockchip/mpp/mpp_iommu.c
index 504c7934..1abbfb7 100644
--- a/kernel/drivers/video/rockchip/mpp/mpp_iommu.c
+++ b/kernel/drivers/video/rockchip/mpp/mpp_iommu.c
@@ -8,24 +8,27 @@
  *	Ding Wei, leo.ding@rock-chips.com
  *
  */
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
-#include <asm/dma-iommu.h>
-#endif
 #include <linux/delay.h>
 #include <linux/dma-buf-cache.h>
 #include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
 #include <linux/iommu.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/kref.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#include <asm/dma-iommu.h>
+#endif
 #include <soc/rockchip/rockchip_iommu.h>
 
 #include "mpp_debug.h"
 #include "mpp_iommu.h"
+#include "mpp_common.h"
 
-static struct mpp_dma_buffer *
+struct mpp_dma_buffer *
 mpp_dma_find_buffer_fd(struct mpp_dma_session *dma, int fd)
 {
 	struct dma_buf *dmabuf;
@@ -66,6 +69,15 @@
 	dma_buf_unmap_attachment(buffer->attach, buffer->sgt, buffer->dir);
 	dma_buf_detach(buffer->dmabuf, buffer->attach);
 	dma_buf_put(buffer->dmabuf);
+	buffer->dma = NULL;
+	buffer->dmabuf = NULL;
+	buffer->attach = NULL;
+	buffer->sgt = NULL;
+	buffer->copy_sgt = NULL;
+	buffer->iova = 0;
+	buffer->size = 0;
+	buffer->vaddr = NULL;
+	buffer->last_used = 0;
 }
 
 /* Remove the oldest buffer when count more than the setting */
@@ -194,8 +206,9 @@
 
 	dmabuf = dma_buf_get(fd);
 	if (IS_ERR(dmabuf)) {
-		mpp_err("dma_buf_get fd %d failed\n", fd);
-		return NULL;
+		ret = PTR_ERR(dmabuf);
+		mpp_err("dma_buf_get fd %d failed(%d)\n", fd, ret);
+		return ERR_PTR(ret);
 	}
 	/* A new DMA buffer */
 	mutex_lock(&dma->list_mutex);
@@ -216,15 +229,15 @@
 
 	attach = dma_buf_attach(buffer->dmabuf, dma->dev);
 	if (IS_ERR(attach)) {
-		mpp_err("dma_buf_attach fd %d failed\n", fd);
 		ret = PTR_ERR(attach);
+		mpp_err("dma_buf_attach fd %d failed(%d)\n", fd, ret);
 		goto fail_attach;
 	}
 
 	sgt = dma_buf_map_attachment(attach, buffer->dir);
 	if (IS_ERR(sgt)) {
-		mpp_err("dma_buf_map_attachment fd %d failed\n", fd);
 		ret = PTR_ERR(sgt);
+		mpp_err("dma_buf_map_attachment fd %d failed(%d)\n", fd, ret);
 		goto fail_map;
 	}
 	buffer->iova = sg_dma_address(sgt->sgl);
@@ -234,7 +247,9 @@
 	buffer->dma = dma;
 
 	kref_init(&buffer->ref);
+
 	if (!IS_ENABLED(CONFIG_DMABUF_CACHE))
+		/* Increase the reference for used outside the buffer pool */
 		kref_get(&buffer->ref);
 
 	mutex_lock(&dma->list_mutex);
@@ -361,25 +376,98 @@
 	return dma;
 }
 
+/*
+ * begin cpu access => for_cpu = true
+ * end cpu access => for_cpu = false
+ */
+void mpp_dma_buf_sync(struct mpp_dma_buffer *buffer, u32 offset, u32 length,
+		      enum dma_data_direction dir, bool for_cpu)
+{
+	struct device *dev = buffer->dma->dev;
+	struct sg_table *sgt = buffer->sgt;
+	struct scatterlist *sg = sgt->sgl;
+	dma_addr_t sg_dma_addr = sg_dma_address(sg);
+	unsigned int len = 0;
+	int i;
+
+	for_each_sgtable_sg(sgt, sg, i) {
+		unsigned int sg_offset, sg_left, size = 0;
+
+		len += sg->length;
+		if (len <= offset) {
+			sg_dma_addr += sg->length;
+			continue;
+		}
+
+		sg_left = len - offset;
+		sg_offset = sg->length - sg_left;
+
+		size = (length < sg_left) ? length : sg_left;
+
+		if (for_cpu)
+			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
+						      sg_offset, size, dir);
+		else
+			dma_sync_single_range_for_device(dev, sg_dma_addr,
+							 sg_offset, size, dir);
+
+		offset += size;
+		length -= size;
+		sg_dma_addr += sg->length;
+
+		if (length == 0)
+			break;
+	}
+}
+
 int mpp_iommu_detach(struct mpp_iommu_info *info)
 {
-	struct iommu_domain *domain = info->domain;
-	struct iommu_group *group = info->group;
+	if (!info)
+		return 0;
 
-	iommu_detach_group(domain, group);
-
+	iommu_detach_group(info->domain, info->group);
 	return 0;
 }
 
 int mpp_iommu_attach(struct mpp_iommu_info *info)
 {
-	struct iommu_domain *domain = info->domain;
-	struct iommu_group *group = info->group;
-	int ret;
+	if (!info)
+		return 0;
 
-	ret = iommu_attach_group(domain, group);
-	if (ret)
-		return ret;
+	if (info->domain == iommu_get_domain_for_dev(info->dev))
+		return 0;
+
+	return iommu_attach_group(info->domain, info->group);
+}
+
+static int mpp_iommu_handle(struct iommu_domain *iommu,
+			    struct device *iommu_dev,
+			    unsigned long iova,
+			    int status, void *arg)
+{
+	struct mpp_dev *mpp = (struct mpp_dev *)arg;
+
+	dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
+		iova, status, arg);
+
+	if (!mpp) {
+		dev_err(iommu_dev, "pagefault without device to handle\n");
+		return 0;
+	}
+
+	if (mpp->cur_task)
+		mpp_task_dump_mem_region(mpp, mpp->cur_task);
+
+	if (mpp->dev_ops && mpp->dev_ops->dump_dev)
+		mpp->dev_ops->dump_dev(mpp);
+	else
+		mpp_task_dump_hw_reg(mpp);
+
+	/*
+	 * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
+	 * Until the pagefault task finish by hw timeout.
+	 */
+	rockchip_iommu_mask_irq(mpp->dev);
 
 	return 0;
 }
@@ -391,13 +479,11 @@
 	struct device_node *np = NULL;
 	struct platform_device *pdev = NULL;
 	struct mpp_iommu_info *info = NULL;
+	struct iommu_domain *domain = NULL;
+	struct iommu_group *group = NULL;
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
 	struct dma_iommu_mapping *mapping;
 #endif
-	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
 	np = of_parse_phandle(dev->of_node, "iommus", 0);
 	if (!np || !of_device_is_available(np)) {
 		mpp_err("failed to get device node\n");
@@ -411,8 +497,8 @@
 		return ERR_PTR(-ENODEV);
 	}
 
-	info->group = iommu_group_get(dev);
-	if (!info->group) {
+	group = iommu_group_get(dev);
+	if (!group) {
 		ret = -EINVAL;
 		goto err_put_pdev;
 	}
@@ -423,38 +509,53 @@
 	 * we re-attach domain here
 	 */
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
-	if (!iommu_group_default_domain(info->group)) {
+	if (!iommu_group_default_domain(group)) {
 		mapping = to_dma_iommu_mapping(dev);
 		WARN_ON(!mapping);
-		info->domain = mapping->domain;
+		domain = mapping->domain;
 	}
 #endif
-	if (!info->domain) {
-		info->domain = iommu_get_domain_for_dev(dev);
-		if (!info->domain) {
+	if (!domain) {
+		domain = iommu_get_domain_for_dev(dev);
+		if (!domain) {
 			ret = -EINVAL;
 			goto err_put_group;
 		}
 	}
 
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		ret = -ENOMEM;
+		goto err_put_group;
+	}
+
+	init_rwsem(&info->rw_sem);
+	spin_lock_init(&info->dev_lock);
 	info->dev = dev;
 	info->pdev = pdev;
-	init_rwsem(&info->rw_sem);
+	info->group = group;
+	info->domain = domain;
+	info->dev_active = NULL;
 	info->irq = platform_get_irq(pdev, 0);
 	info->got_irq = (info->irq < 0) ? false : true;
 
 	return info;
 
 err_put_group:
-	iommu_group_put(info->group);
+	if (group)
+		iommu_group_put(group);
 err_put_pdev:
-	platform_device_put(pdev);
+	if (pdev)
+		platform_device_put(pdev);
 
 	return ERR_PTR(ret);
 }
 
 int mpp_iommu_remove(struct mpp_iommu_info *info)
 {
+	if (!info)
+		return 0;
+
 	iommu_group_put(info->group);
 	platform_device_put(info->pdev);
 
@@ -465,21 +566,80 @@
 {
 	int ret;
 
+	if (!info)
+		return 0;
+	/* call av1 iommu ops */
+	if (IS_ENABLED(CONFIG_ROCKCHIP_MPP_AV1DEC) && info->av1d_iommu) {
+		ret = mpp_av1_iommu_disable(dev);
+		if (ret)
+			return ret;
+		return mpp_av1_iommu_enable(dev);
+	}
 	/* disable iommu */
 	ret = rockchip_iommu_disable(dev);
 	if (ret)
 		return ret;
-
 	/* re-enable iommu */
 	return rockchip_iommu_enable(dev);
 }
 
 int mpp_iommu_flush_tlb(struct mpp_iommu_info *info)
 {
-	struct iommu_domain *domain = info->domain;
+	if (!info)
+		return 0;
 
-	if (domain && domain->ops)
-		iommu_flush_tlb_all(domain);
+	if (info->domain && info->domain->ops)
+		iommu_flush_iotlb_all(info->domain);
+
+	return 0;
+}
+
+int mpp_iommu_dev_activate(struct mpp_iommu_info *info, struct mpp_dev *dev)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	if (!info)
+		return 0;
+
+	spin_lock_irqsave(&info->dev_lock, flags);
+
+	if (info->dev_active || !dev) {
+		dev_err(info->dev, "can not activate %s -> %s\n",
+			info->dev_active ? dev_name(info->dev_active->dev) : NULL,
+			dev ? dev_name(dev->dev) : NULL);
+		ret = -EINVAL;
+	} else {
+		info->dev_active = dev;
+		/* switch domain pagefault handler and arg depending on device */
+		iommu_set_fault_handler(info->domain, dev->fault_handler ?
+					dev->fault_handler : mpp_iommu_handle, dev);
+
+		dev_dbg(info->dev, "activate -> %p %s\n", dev, dev_name(dev->dev));
+	}
+
+	spin_unlock_irqrestore(&info->dev_lock, flags);
+
+	return ret;
+}
+
+int mpp_iommu_dev_deactivate(struct mpp_iommu_info *info, struct mpp_dev *dev)
+{
+	unsigned long flags;
+
+	if (!info)
+		return 0;
+
+	spin_lock_irqsave(&info->dev_lock, flags);
+
+	if (info->dev_active != dev)
+		dev_err(info->dev, "can not deactivate %s when %s activated\n",
+			dev_name(dev->dev),
+			info->dev_active ? dev_name(info->dev_active->dev) : NULL);
+
+	dev_dbg(info->dev, "deactivate %p\n", info->dev_active);
+	info->dev_active = NULL;
+	spin_unlock_irqrestore(&info->dev_lock, flags);
 
 	return 0;
 }

--
Gitblit v1.6.2