hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/pci/controller/dwc/pcie-dw-dmatest.c
....@@ -3,8 +3,6 @@
33 * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
44 */
55
6
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
-
86 #include <linux/init.h>
97 #include <linux/ktime.h>
108 #include <linux/module.h>
....@@ -14,8 +12,9 @@
1412 #include <linux/slab.h>
1513 #include <linux/sched.h>
1614 #include <linux/kthread.h>
15
+#include <linux/delay.h>
16
+#include <linux/dma-mapping.h>
1717
18
-#include "pcie-designware.h"
1918 #include "pcie-dw-dmatest.h"
2019 #include "../rockchip-pcie-dma.h"
2120
....@@ -35,25 +34,26 @@
3534 module_param(rw_test, uint, 0644);
3635 MODULE_PARM_DESC(rw_test, "Read/Write test, 1-read 2-write 3-both(default 3)");
3736
38
-static unsigned int bus_addr = 0x3c000000;
39
-module_param(bus_addr, uint, 0644);
37
+static unsigned long bus_addr = 0x3c000000;
38
+module_param(bus_addr, ulong, 0644);
4039 MODULE_PARM_DESC(bus_addr, "Dmatest chn0 bus_addr(remote), chn1 add offset 0x100000, (default 0x3c000000)");
4140
42
-static unsigned int local_addr = 0x3c000000;
43
-module_param(local_addr, uint, 0644);
41
+static unsigned long local_addr = 0x3c000000;
42
+module_param(local_addr, ulong, 0644);
4443 MODULE_PARM_DESC(local_addr, "Dmatest chn0 local_addr(local), chn1 add offset 0x100000, (default 0x3c000000)");
4544
4645 static unsigned int test_dev;
4746 module_param(test_dev, uint, 0644);
4847 MODULE_PARM_DESC(test_dev, "Choose dma_obj device,(default 0)");
4948
50
-static bool is_rc = true;
51
-module_param_named(is_rc, is_rc, bool, 0644);
52
-MODULE_PARM_DESC(is_rc, "Test port is rc(default true)");
49
+static bool is_wired;
50
+module_param_named(is_wired, is_wired, bool, 0644);
51
+MODULE_PARM_DESC(is_wired, "Transfer is triggered by wired DMA(default false)");
5352
54
-#define PCIE_DW_MISC_DMATEST_DEV_MAX 5
53
+#define PCIE_DW_MISC_DMATEST_DEV_MAX 1
5554
5655 #define PCIE_DMA_CHANEL_MAX_NUM 2
56
+#define PCIE_DMA_LL_MAX_NUM 1024 /* Unrestricted, tentative value */
5757
5858 struct pcie_dw_dmatest_dev {
5959 struct dma_trx_obj *obj;
....@@ -64,51 +64,70 @@
6464
6565 struct mutex rd_lock[PCIE_DMA_CHANEL_MAX_NUM]; /* Corresponding to each read DMA channel */
6666 struct mutex wr_lock[PCIE_DMA_CHANEL_MAX_NUM]; /* Corresponding to each write DMA channel */
67
+
68
+ struct dma_table rd_tbl_buf[PCIE_DMA_CHANEL_MAX_NUM];
69
+ struct dma_table wr_tbl_buf[PCIE_DMA_CHANEL_MAX_NUM];
6770 };
6871
69
-static struct pcie_dw_dmatest_dev s_dmatest_dev[PCIE_DW_MISC_DMATEST_DEV_MAX];
70
-static int cur_dmatest_dev;
72
+static struct pcie_dw_dmatest_dev *s_dmatest_dev;
7173
7274 static void pcie_dw_dmatest_show(void)
7375 {
7476 int i;
7577
7678 for (i = 0; i < PCIE_DW_MISC_DMATEST_DEV_MAX; i++) {
77
- if (s_dmatest_dev[i].obj)
78
- dev_info(s_dmatest_dev[i].obj->dev, " test_dev index %d\n", i);
79
+ if (s_dmatest_dev)
80
+ dev_info(s_dmatest_dev->obj->dev, " test_dev index %d\n", i);
7981 else
8082 break;
8183 }
8284
83
- dev_info(s_dmatest_dev[test_dev].obj->dev, " is current test_dev\n");
85
+ dev_info(s_dmatest_dev->obj->dev, " is current test_dev\n");
8486 }
8587
86
-static int rk_pcie_dma_wait_for_finised(struct dma_trx_obj *obj, struct dma_table *table)
88
+static int rk_pcie_dma_wait_for_finished(struct dma_trx_obj *obj, struct dma_table *table)
8789 {
88
- int ret;
90
+ int ret = 0, timeout_us, i;
8991
90
- do {
92
+ timeout_us = table->buf_size / 100 + 1000; /* 100MB/s for redundant calculate */
93
+
94
+ for (i = 0; i < timeout_us; i++) {
9195 ret = obj->get_dma_status(obj, table->chn, table->dir);
92
- } while (!ret);
96
+ if (ret == 1) {
97
+ ret = 0;
98
+ break;
99
+ } else if (ret < 0) {
100
+ ret = -EFAULT;
101
+ break;
102
+ }
103
+ udelay(1);
104
+ }
105
+
106
+ if (i >= timeout_us || ret) {
107
+ dev_err(obj->dev, "%s timeout\n", __func__);
108
+ if (obj->dma_debug)
109
+ obj->dma_debug(obj, table);
110
+ return -EFAULT;
111
+ }
93112
94113 return ret;
95114 }
96115
97
-static int rk_pcie_ep_dma_frombus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
98
- u32 local_paddr, u32 bus_paddr, u32 size)
116
+static int rk_pcie_local_dma_frombus_block(struct dma_trx_obj *obj, u32 chn,
117
+ u64 local_paddr, u64 bus_paddr, u32 size)
99118 {
119
+ struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv;
100120 struct dma_table *table;
101
- struct dma_trx_obj *obj = dmatest_dev->obj;
102121 int ret;
103122
104123 if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
105124 return -1;
106125
107
- table = kzalloc(sizeof(struct dma_table), GFP_KERNEL);
108
- if (!table)
109
- return -ENOMEM;
110
-
111126 mutex_lock(&dmatest_dev->rd_lock[chn]);
127
+
128
+ table = &dmatest_dev->rd_tbl_buf[chn];
129
+ memset(table, 0, sizeof(struct dma_table));
130
+
112131 if (dmatest_dev->irq_en)
113132 reinit_completion(&dmatest_dev->rd_done[chn]);
114133
....@@ -128,30 +147,28 @@
128147 else if (ret == 0)
129148 dev_err(obj->dev, "%s timed out\n", __func__);
130149 } else {
131
- ret = rk_pcie_dma_wait_for_finised(obj, table);
150
+ ret = rk_pcie_dma_wait_for_finished(obj, table);
132151 }
133152 mutex_unlock(&dmatest_dev->rd_lock[chn]);
134
-
135
- kfree(table);
136153
137154 return ret;
138155 }
139156
140
-static int rk_pcie_ep_dma_tobus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
141
- u32 bus_paddr, u32 local_paddr, u32 size)
157
+int rk_pcie_local_dma_tobus_block(struct dma_trx_obj *obj, u32 chn,
158
+ u64 bus_paddr, u64 local_paddr, u32 size)
142159 {
160
+ struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv;
143161 struct dma_table *table;
144
- struct dma_trx_obj *obj = dmatest_dev->obj;
145162 int ret;
146163
147164 if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
148165 return -1;
149166
150
- table = kzalloc(sizeof(struct dma_table), GFP_KERNEL);
151
- if (!table)
152
- return -ENOMEM;
153
-
154167 mutex_lock(&dmatest_dev->wr_lock[chn]);
168
+
169
+ table = &dmatest_dev->wr_tbl_buf[chn];
170
+ memset(table, 0, sizeof(struct dma_table));
171
+
155172 if (dmatest_dev->irq_en)
156173 reinit_completion(&dmatest_dev->wr_done[chn]);
157174
....@@ -171,25 +188,11 @@
171188 else if (ret == 0)
172189 dev_err(obj->dev, "%s timed out\n", __func__);
173190 } else {
174
- ret = rk_pcie_dma_wait_for_finised(obj, table);
191
+ ret = rk_pcie_dma_wait_for_finished(obj, table);
175192 }
176193 mutex_unlock(&dmatest_dev->wr_lock[chn]);
177194
178
- kfree(table);
179
-
180195 return ret;
181
-}
182
-
183
-static int rk_pcie_rc_dma_frombus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
184
- u32 local_paddr, u32 bus_paddr, u32 size)
185
-{
186
- return rk_pcie_ep_dma_tobus(dmatest_dev, chn, local_paddr, bus_paddr, size);
187
-}
188
-
189
-static int rk_pcie_rc_dma_tobus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
190
- u32 bus_paddr, u32 local_paddr, u32 size)
191
-{
192
- return rk_pcie_ep_dma_frombus(dmatest_dev, chn, bus_paddr, local_paddr, size);
193196 }
194197
195198 static int rk_pcie_dma_interrupt_handler_call_back(struct dma_trx_obj *obj, u32 chn, enum dma_dir dir)
....@@ -210,8 +213,12 @@
210213 struct dma_trx_obj *pcie_dw_dmatest_register(struct device *dev, bool irq_en)
211214 {
212215 struct dma_trx_obj *obj;
213
- struct pcie_dw_dmatest_dev *dmatest_dev = &s_dmatest_dev[cur_dmatest_dev];
216
+ struct pcie_dw_dmatest_dev *dmatest_dev;
214217 int i;
218
+
219
+ dmatest_dev = devm_kzalloc(dev, sizeof(struct pcie_dw_dmatest_dev), GFP_KERNEL);
220
+ if (!dmatest_dev)
221
+ return ERR_PTR(-ENOMEM);
215222
216223 obj = devm_kzalloc(dev, sizeof(struct dma_trx_obj), GFP_KERNEL);
217224 if (!obj)
....@@ -232,13 +239,30 @@
232239
233240 /* Enable IRQ transfer as default */
234241 dmatest_dev->irq_en = irq_en;
235
- cur_dmatest_dev++;
242
+ s_dmatest_dev = dmatest_dev;
236243
237244 return obj;
238245 }
239246
247
+void pcie_dw_dmatest_unregister(struct dma_trx_obj *obj)
248
+{
249
+ s_dmatest_dev = NULL;
250
+}
251
+
252
+int pcie_dw_wired_dma_frombus_block(struct dma_trx_obj *obj, u32 chn,
253
+ u64 local_paddr, u64 bus_paddr, u32 size)
254
+{
255
+ return rk_pcie_local_dma_tobus_block(obj, chn, local_paddr, bus_paddr, size);
256
+}
257
+
258
+int pcie_dw_wired_dma_tobus_block(struct dma_trx_obj *obj, u32 chn,
259
+ u64 bus_paddr, u64 local_paddr, u32 size)
260
+{
261
+ return rk_pcie_local_dma_frombus_block(obj, chn, bus_paddr, local_paddr, size);
262
+}
263
+
240264 static int dma_test(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
241
- u32 bus_paddr, u32 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en)
265
+ u64 bus_paddr, u64 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en)
242266 {
243267 ktime_t start_time;
244268 ktime_t end_time;
....@@ -259,19 +283,19 @@
259283 start_time = ktime_get();
260284 for (i = 0; i < loop; i++) {
261285 if (rd_en) {
262
- if (is_rc)
263
- rk_pcie_rc_dma_frombus(dmatest_dev, chn, local_paddr, bus_paddr, size);
286
+ if (is_wired)
287
+ pcie_dw_wired_dma_frombus_block(dmatest_dev->obj, chn, local_paddr, bus_paddr, size);
264288 else
265
- rk_pcie_ep_dma_frombus(dmatest_dev, chn, local_paddr, bus_paddr, size);
289
+ rk_pcie_local_dma_frombus_block(dmatest_dev->obj, chn, local_paddr, bus_paddr, size);
266290 dma_sync_single_for_cpu(obj->dev, local_paddr, size, DMA_FROM_DEVICE);
267291 }
268292
269293 if (wr_en) {
270294 dma_sync_single_for_device(obj->dev, local_paddr, size, DMA_TO_DEVICE);
271
- if (is_rc)
272
- rk_pcie_rc_dma_tobus(dmatest_dev, chn, bus_paddr, local_paddr, size);
295
+ if (is_wired)
296
+ pcie_dw_wired_dma_tobus_block(dmatest_dev->obj, chn, bus_paddr, local_paddr, size);
273297 else
274
- rk_pcie_ep_dma_tobus(dmatest_dev, chn, bus_paddr, local_paddr, size);
298
+ rk_pcie_local_dma_tobus_block(dmatest_dev->obj, chn, bus_paddr, local_paddr, size);
275299 }
276300 }
277301 end_time = ktime_get();
....@@ -288,7 +312,7 @@
288312
289313 static int dma_test_ch0(void *p)
290314 {
291
- dma_test(&s_dmatest_dev[test_dev], 0, bus_addr, local_addr, test_size,
315
+ dma_test(s_dmatest_dev, 0, bus_addr, local_addr, test_size,
292316 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
293317
294318 return 0;
....@@ -298,10 +322,10 @@
298322 {
299323 /* Test in different area with ch0 */
300324 if (chn_en == 3)
301
- dma_test(&s_dmatest_dev[test_dev], 1, bus_addr + test_size, local_addr + test_size, test_size,
325
+ dma_test(s_dmatest_dev, 1, bus_addr + test_size, local_addr + test_size, test_size,
302326 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
303327 else
304
- dma_test(&s_dmatest_dev[test_dev], 1, bus_addr, local_addr, test_size,
328
+ dma_test(s_dmatest_dev, 1, bus_addr, local_addr, test_size,
305329 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
306330
307331 return 0;
....@@ -325,21 +349,19 @@
325349 {
326350 char tmp[8];
327351
328
- if (!s_dmatest_dev[0].obj) {
352
+ if (!s_dmatest_dev) {
329353 pr_err("dmatest dev not exits\n");
330
- kfree(tmp);
331354
332355 return -1;
333356 }
334357
335358 strncpy(tmp, val, 8);
336
- if (!strncmp(tmp, "run", 3)) {
359
+ if (!strncmp(tmp, "run", 3))
337360 dma_run();
338
- } else if (!strncmp(tmp, "show", 4)) {
361
+ else if (!strncmp(tmp, "show", 4))
339362 pcie_dw_dmatest_show();
340
- } else {
363
+ else
341364 pr_info("input error\n");
342
- }
343365
344366 return 0;
345367 }