From ee930fffee469d076998274a2ca55e13dc1efb67 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 08:50:54 +0000
Subject: [PATCH] enable tun/tap/iptables
---
kernel/drivers/pci/controller/dwc/pcie-dw-dmatest.c | 162 ++++++++++++++++++++++++++++++-----------------------
1 files changed, 92 insertions(+), 70 deletions(-)
diff --git a/kernel/drivers/pci/controller/dwc/pcie-dw-dmatest.c b/kernel/drivers/pci/controller/dwc/pcie-dw-dmatest.c
index 4b0d4d3..3d5b44f 100644
--- a/kernel/drivers/pci/controller/dwc/pcie-dw-dmatest.c
+++ b/kernel/drivers/pci/controller/dwc/pcie-dw-dmatest.c
@@ -3,8 +3,6 @@
* Copyright (c) 2022 Rockchip Electronics Co., Ltd.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/init.h>
#include <linux/ktime.h>
#include <linux/module.h>
@@ -14,8 +12,9 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
-#include "pcie-designware.h"
#include "pcie-dw-dmatest.h"
#include "../rockchip-pcie-dma.h"
@@ -35,25 +34,26 @@
module_param(rw_test, uint, 0644);
MODULE_PARM_DESC(rw_test, "Read/Write test, 1-read 2-write 3-both(default 3)");
-static unsigned int bus_addr = 0x3c000000;
-module_param(bus_addr, uint, 0644);
+static unsigned long bus_addr = 0x3c000000;
+module_param(bus_addr, ulong, 0644);
MODULE_PARM_DESC(bus_addr, "Dmatest chn0 bus_addr(remote), chn1 add offset 0x100000, (default 0x3c000000)");
-static unsigned int local_addr = 0x3c000000;
-module_param(local_addr, uint, 0644);
+static unsigned long local_addr = 0x3c000000;
+module_param(local_addr, ulong, 0644);
MODULE_PARM_DESC(local_addr, "Dmatest chn0 local_addr(local), chn1 add offset 0x100000, (default 0x3c000000)");
static unsigned int test_dev;
module_param(test_dev, uint, 0644);
MODULE_PARM_DESC(test_dev, "Choose dma_obj device,(default 0)");
-static bool is_rc = true;
-module_param_named(is_rc, is_rc, bool, 0644);
-MODULE_PARM_DESC(is_rc, "Test port is rc(default true)");
+static bool is_wired;
+module_param_named(is_wired, is_wired, bool, 0644);
+MODULE_PARM_DESC(is_wired, "Transfer is triggered by wired DMA(default false)");
-#define PCIE_DW_MISC_DMATEST_DEV_MAX 5
+#define PCIE_DW_MISC_DMATEST_DEV_MAX 1
#define PCIE_DMA_CHANEL_MAX_NUM 2
+#define PCIE_DMA_LL_MAX_NUM 1024 /* Unrestricted, tentative value */
struct pcie_dw_dmatest_dev {
struct dma_trx_obj *obj;
@@ -64,51 +64,70 @@
struct mutex rd_lock[PCIE_DMA_CHANEL_MAX_NUM]; /* Corresponding to each read DMA channel */
struct mutex wr_lock[PCIE_DMA_CHANEL_MAX_NUM]; /* Corresponding to each write DMA channel */
+
+ struct dma_table rd_tbl_buf[PCIE_DMA_CHANEL_MAX_NUM];
+ struct dma_table wr_tbl_buf[PCIE_DMA_CHANEL_MAX_NUM];
};
-static struct pcie_dw_dmatest_dev s_dmatest_dev[PCIE_DW_MISC_DMATEST_DEV_MAX];
-static int cur_dmatest_dev;
+static struct pcie_dw_dmatest_dev *s_dmatest_dev;
static void pcie_dw_dmatest_show(void)
{
int i;
for (i = 0; i < PCIE_DW_MISC_DMATEST_DEV_MAX; i++) {
- if (s_dmatest_dev[i].obj)
- dev_info(s_dmatest_dev[i].obj->dev, " test_dev index %d\n", i);
+ if (s_dmatest_dev)
+ dev_info(s_dmatest_dev->obj->dev, " test_dev index %d\n", i);
else
break;
}
- dev_info(s_dmatest_dev[test_dev].obj->dev, " is current test_dev\n");
+ dev_info(s_dmatest_dev->obj->dev, " is current test_dev\n");
}
-static int rk_pcie_dma_wait_for_finised(struct dma_trx_obj *obj, struct dma_table *table)
+static int rk_pcie_dma_wait_for_finished(struct dma_trx_obj *obj, struct dma_table *table)
{
- int ret;
+ int ret = 0, timeout_us, i;
- do {
+ timeout_us = table->buf_size / 100 + 1000; /* 100MB/s for redundant calculate */
+
+ for (i = 0; i < timeout_us; i++) {
ret = obj->get_dma_status(obj, table->chn, table->dir);
- } while (!ret);
+ if (ret == 1) {
+ ret = 0;
+ break;
+ } else if (ret < 0) {
+ ret = -EFAULT;
+ break;
+ }
+ udelay(1);
+ }
+
+ if (i >= timeout_us || ret) {
+ dev_err(obj->dev, "%s timeout\n", __func__);
+ if (obj->dma_debug)
+ obj->dma_debug(obj, table);
+ return -EFAULT;
+ }
return ret;
}
-static int rk_pcie_ep_dma_frombus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
- u32 local_paddr, u32 bus_paddr, u32 size)
+static int rk_pcie_local_dma_frombus_block(struct dma_trx_obj *obj, u32 chn,
+ u64 local_paddr, u64 bus_paddr, u32 size)
{
+ struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv;
struct dma_table *table;
- struct dma_trx_obj *obj = dmatest_dev->obj;
int ret;
if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
return -1;
- table = kzalloc(sizeof(struct dma_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
mutex_lock(&dmatest_dev->rd_lock[chn]);
+
+ table = &dmatest_dev->rd_tbl_buf[chn];
+ memset(table, 0, sizeof(struct dma_table));
+
if (dmatest_dev->irq_en)
reinit_completion(&dmatest_dev->rd_done[chn]);
@@ -128,30 +147,28 @@
else if (ret == 0)
dev_err(obj->dev, "%s timed out\n", __func__);
} else {
- ret = rk_pcie_dma_wait_for_finised(obj, table);
+ ret = rk_pcie_dma_wait_for_finished(obj, table);
}
mutex_unlock(&dmatest_dev->rd_lock[chn]);
-
- kfree(table);
return ret;
}
-static int rk_pcie_ep_dma_tobus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
- u32 bus_paddr, u32 local_paddr, u32 size)
+int rk_pcie_local_dma_tobus_block(struct dma_trx_obj *obj, u32 chn,
+ u64 bus_paddr, u64 local_paddr, u32 size)
{
+ struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv;
struct dma_table *table;
- struct dma_trx_obj *obj = dmatest_dev->obj;
int ret;
if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
return -1;
- table = kzalloc(sizeof(struct dma_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
mutex_lock(&dmatest_dev->wr_lock[chn]);
+
+ table = &dmatest_dev->wr_tbl_buf[chn];
+ memset(table, 0, sizeof(struct dma_table));
+
if (dmatest_dev->irq_en)
reinit_completion(&dmatest_dev->wr_done[chn]);
@@ -171,25 +188,11 @@
else if (ret == 0)
dev_err(obj->dev, "%s timed out\n", __func__);
} else {
- ret = rk_pcie_dma_wait_for_finised(obj, table);
+ ret = rk_pcie_dma_wait_for_finished(obj, table);
}
mutex_unlock(&dmatest_dev->wr_lock[chn]);
- kfree(table);
-
return ret;
-}
-
-static int rk_pcie_rc_dma_frombus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
- u32 local_paddr, u32 bus_paddr, u32 size)
-{
- return rk_pcie_ep_dma_tobus(dmatest_dev, chn, local_paddr, bus_paddr, size);
-}
-
-static int rk_pcie_rc_dma_tobus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
- u32 bus_paddr, u32 local_paddr, u32 size)
-{
- return rk_pcie_ep_dma_frombus(dmatest_dev, chn, bus_paddr, local_paddr, size);
}
static int rk_pcie_dma_interrupt_handler_call_back(struct dma_trx_obj *obj, u32 chn, enum dma_dir dir)
@@ -210,8 +213,12 @@
struct dma_trx_obj *pcie_dw_dmatest_register(struct device *dev, bool irq_en)
{
struct dma_trx_obj *obj;
- struct pcie_dw_dmatest_dev *dmatest_dev = &s_dmatest_dev[cur_dmatest_dev];
+ struct pcie_dw_dmatest_dev *dmatest_dev;
int i;
+
+ dmatest_dev = devm_kzalloc(dev, sizeof(struct pcie_dw_dmatest_dev), GFP_KERNEL);
+ if (!dmatest_dev)
+ return ERR_PTR(-ENOMEM);
obj = devm_kzalloc(dev, sizeof(struct dma_trx_obj), GFP_KERNEL);
if (!obj)
@@ -232,13 +239,30 @@
/* Enable IRQ transfer as default */
dmatest_dev->irq_en = irq_en;
- cur_dmatest_dev++;
+ s_dmatest_dev = dmatest_dev;
return obj;
}
+void pcie_dw_dmatest_unregister(struct dma_trx_obj *obj)
+{
+ s_dmatest_dev = NULL;
+}
+
+int pcie_dw_wired_dma_frombus_block(struct dma_trx_obj *obj, u32 chn,
+ u64 local_paddr, u64 bus_paddr, u32 size)
+{
+ return rk_pcie_local_dma_tobus_block(obj, chn, local_paddr, bus_paddr, size);
+}
+
+int pcie_dw_wired_dma_tobus_block(struct dma_trx_obj *obj, u32 chn,
+ u64 bus_paddr, u64 local_paddr, u32 size)
+{
+ return rk_pcie_local_dma_frombus_block(obj, chn, bus_paddr, local_paddr, size);
+}
+
static int dma_test(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
- u32 bus_paddr, u32 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en)
+ u64 bus_paddr, u64 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en)
{
ktime_t start_time;
ktime_t end_time;
@@ -259,19 +283,19 @@
start_time = ktime_get();
for (i = 0; i < loop; i++) {
if (rd_en) {
- if (is_rc)
- rk_pcie_rc_dma_frombus(dmatest_dev, chn, local_paddr, bus_paddr, size);
+ if (is_wired)
+ pcie_dw_wired_dma_frombus_block(dmatest_dev->obj, chn, local_paddr, bus_paddr, size);
else
- rk_pcie_ep_dma_frombus(dmatest_dev, chn, local_paddr, bus_paddr, size);
+ rk_pcie_local_dma_frombus_block(dmatest_dev->obj, chn, local_paddr, bus_paddr, size);
dma_sync_single_for_cpu(obj->dev, local_paddr, size, DMA_FROM_DEVICE);
}
if (wr_en) {
dma_sync_single_for_device(obj->dev, local_paddr, size, DMA_TO_DEVICE);
- if (is_rc)
- rk_pcie_rc_dma_tobus(dmatest_dev, chn, bus_paddr, local_paddr, size);
+ if (is_wired)
+ pcie_dw_wired_dma_tobus_block(dmatest_dev->obj, chn, bus_paddr, local_paddr, size);
else
- rk_pcie_ep_dma_tobus(dmatest_dev, chn, bus_paddr, local_paddr, size);
+ rk_pcie_local_dma_tobus_block(dmatest_dev->obj, chn, bus_paddr, local_paddr, size);
}
}
end_time = ktime_get();
@@ -288,7 +312,7 @@
static int dma_test_ch0(void *p)
{
- dma_test(&s_dmatest_dev[test_dev], 0, bus_addr, local_addr, test_size,
+ dma_test(s_dmatest_dev, 0, bus_addr, local_addr, test_size,
cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
return 0;
@@ -298,10 +322,10 @@
{
/* Test in different area with ch0 */
if (chn_en == 3)
- dma_test(&s_dmatest_dev[test_dev], 1, bus_addr + test_size, local_addr + test_size, test_size,
+ dma_test(s_dmatest_dev, 1, bus_addr + test_size, local_addr + test_size, test_size,
cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
else
- dma_test(&s_dmatest_dev[test_dev], 1, bus_addr, local_addr, test_size,
+ dma_test(s_dmatest_dev, 1, bus_addr, local_addr, test_size,
cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
return 0;
@@ -325,21 +349,19 @@
{
char tmp[8];
- if (!s_dmatest_dev[0].obj) {
+ if (!s_dmatest_dev) {
pr_err("dmatest dev not exits\n");
- kfree(tmp);
return -1;
}
strncpy(tmp, val, 8);
- if (!strncmp(tmp, "run", 3)) {
+ if (!strncmp(tmp, "run", 3))
dma_run();
- } else if (!strncmp(tmp, "show", 4)) {
+ else if (!strncmp(tmp, "show", 4))
pcie_dw_dmatest_show();
- } else {
+ else
pr_info("input error\n");
- }
return 0;
}
--
Gitblit v1.6.2