hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/pci/controller/dwc/pcie-dw-dmatest.c
....@@ -3,8 +3,6 @@
33 * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
44 */
55
6
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
-
86 #include <linux/init.h>
97 #include <linux/ktime.h>
108 #include <linux/module.h>
....@@ -14,8 +12,9 @@
1412 #include <linux/slab.h>
1513 #include <linux/sched.h>
1614 #include <linux/kthread.h>
15
+#include <linux/delay.h>
16
+#include <linux/dma-mapping.h>
1717
18
-#include "pcie-designware.h"
1918 #include "pcie-dw-dmatest.h"
2019 #include "../rockchip-pcie-dma.h"
2120
....@@ -35,58 +34,29 @@
3534 module_param(rw_test, uint, 0644);
3635 MODULE_PARM_DESC(rw_test, "Read/Write test, 1-read 2-write 3-both(default 3)");
3736
38
-static unsigned int bus_addr = 0x3c000000;
39
-module_param(bus_addr, uint, 0644);
37
+static unsigned long bus_addr = 0x3c000000;
38
+module_param(bus_addr, ulong, 0644);
4039 MODULE_PARM_DESC(bus_addr, "Dmatest chn0 bus_addr(remote), chn1 add offset 0x100000, (default 0x3c000000)");
4140
42
-static unsigned int local_addr = 0x3c000000;
43
-module_param(local_addr, uint, 0644);
41
+static unsigned long local_addr = 0x3c000000;
42
+module_param(local_addr, ulong, 0644);
4443 MODULE_PARM_DESC(local_addr, "Dmatest chn0 local_addr(local), chn1 add offset 0x100000, (default 0x3c000000)");
4544
4645 static unsigned int test_dev;
4746 module_param(test_dev, uint, 0644);
4847 MODULE_PARM_DESC(test_dev, "Choose dma_obj device,(default 0)");
4948
50
-#define PCIE_DW_MISC_DMATEST_DEV_MAX 5
49
+static bool is_wired;
50
+module_param_named(is_wired, is_wired, bool, 0644);
51
+MODULE_PARM_DESC(is_wired, "Transfer is triggered by wired DMA(default false)");
5152
52
-#define PCIE_DMA_OFFSET 0x380000
53
-
54
-#define PCIE_DMA_CTRL_OFF 0x8
55
-#define PCIE_DMA_WR_ENB 0xc
56
-#define PCIE_DMA_WR_CTRL_LO 0x200
57
-#define PCIE_DMA_WR_CTRL_HI 0x204
58
-#define PCIE_DMA_WR_XFERSIZE 0x208
59
-#define PCIE_DMA_WR_SAR_PTR_LO 0x20c
60
-#define PCIE_DMA_WR_SAR_PTR_HI 0x210
61
-#define PCIE_DMA_WR_DAR_PTR_LO 0x214
62
-#define PCIE_DMA_WR_DAR_PTR_HI 0x218
63
-#define PCIE_DMA_WR_WEILO 0x18
64
-#define PCIE_DMA_WR_WEIHI 0x1c
65
-#define PCIE_DMA_WR_DOORBELL 0x10
66
-#define PCIE_DMA_WR_INT_STATUS 0x4c
67
-#define PCIE_DMA_WR_INT_MASK 0x54
68
-#define PCIE_DMA_WR_INT_CLEAR 0x58
69
-
70
-#define PCIE_DMA_RD_ENB 0x2c
71
-#define PCIE_DMA_RD_CTRL_LO 0x300
72
-#define PCIE_DMA_RD_CTRL_HI 0x304
73
-#define PCIE_DMA_RD_XFERSIZE 0x308
74
-#define PCIE_DMA_RD_SAR_PTR_LO 0x30c
75
-#define PCIE_DMA_RD_SAR_PTR_HI 0x310
76
-#define PCIE_DMA_RD_DAR_PTR_LO 0x314
77
-#define PCIE_DMA_RD_DAR_PTR_HI 0x318
78
-#define PCIE_DMA_RD_WEILO 0x38
79
-#define PCIE_DMA_RD_WEIHI 0x3c
80
-#define PCIE_DMA_RD_DOORBELL 0x30
81
-#define PCIE_DMA_RD_INT_STATUS 0xa0
82
-#define PCIE_DMA_RD_INT_MASK 0xa8
83
-#define PCIE_DMA_RD_INT_CLEAR 0xac
53
+#define PCIE_DW_MISC_DMATEST_DEV_MAX 1
8454
8555 #define PCIE_DMA_CHANEL_MAX_NUM 2
56
+#define PCIE_DMA_LL_MAX_NUM 1024 /* Unrestricted, tentative value */
8657
8758 struct pcie_dw_dmatest_dev {
8859 struct dma_trx_obj *obj;
89
- struct dw_pcie *pci;
9060
9161 bool irq_en;
9262 struct completion rd_done[PCIE_DMA_CHANEL_MAX_NUM];
....@@ -94,95 +64,70 @@
9464
9565 struct mutex rd_lock[PCIE_DMA_CHANEL_MAX_NUM]; /* Corresponding to each read DMA channel */
9666 struct mutex wr_lock[PCIE_DMA_CHANEL_MAX_NUM]; /* Corresponding to each write DMA channel */
67
+
68
+ struct dma_table rd_tbl_buf[PCIE_DMA_CHANEL_MAX_NUM];
69
+ struct dma_table wr_tbl_buf[PCIE_DMA_CHANEL_MAX_NUM];
9770 };
9871
99
-static struct pcie_dw_dmatest_dev s_dmatest_dev[PCIE_DW_MISC_DMATEST_DEV_MAX];
100
-static int cur_dmatest_dev;
72
+static struct pcie_dw_dmatest_dev *s_dmatest_dev;
10173
10274 static void pcie_dw_dmatest_show(void)
10375 {
10476 int i;
10577
10678 for (i = 0; i < PCIE_DW_MISC_DMATEST_DEV_MAX; i++) {
107
- if (s_dmatest_dev[i].obj)
108
- dev_info(s_dmatest_dev[i].obj->dev, " test_dev index %d\n", i);
79
+ if (s_dmatest_dev)
80
+ dev_info(s_dmatest_dev->obj->dev, " test_dev index %d\n", i);
10981 else
11082 break;
11183 }
11284
113
- dev_info(s_dmatest_dev[test_dev].obj->dev, " is current test_dev\n");
85
+ dev_info(s_dmatest_dev->obj->dev, " is current test_dev\n");
11486 }
11587
116
-static int rk_pcie_get_dma_status(struct dw_pcie *pci, u8 chn, enum dma_dir dir)
88
+static int rk_pcie_dma_wait_for_finished(struct dma_trx_obj *obj, struct dma_table *table)
11789 {
118
- union int_status status;
119
- union int_clear clears;
120
- int ret = 0;
90
+ int ret = 0, timeout_us, i;
12191
122
- dev_dbg(pci->dev, "%s %x %x\n", __func__, dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS),
123
- dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS));
92
+ timeout_us = table->buf_size / 100 + 1000; /* 100MB/s for redundant calculate */
12493
125
- if (dir == DMA_TO_BUS) {
126
- status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS);
127
- if (status.donesta & BIT(chn)) {
128
- clears.doneclr = 0x1 << chn;
129
- dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_CLEAR, clears.asdword);
130
- ret = 1;
94
+ for (i = 0; i < timeout_us; i++) {
95
+ ret = obj->get_dma_status(obj, table->chn, table->dir);
96
+ if (ret == 1) {
97
+ ret = 0;
98
+ break;
99
+ } else if (ret < 0) {
100
+ ret = -EFAULT;
101
+ break;
131102 }
103
+ udelay(1);
104
+ }
132105
133
- if (status.abortsta & BIT(chn)) {
134
- dev_err(pci->dev, "%s, write abort\n", __func__);
135
- clears.abortclr = 0x1 << chn;
136
- dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_CLEAR, clears.asdword);
137
- ret = -1;
138
- }
139
- } else {
140
- status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS);
141
-
142
- if (status.donesta & BIT(chn)) {
143
- clears.doneclr = 0x1 << chn;
144
- dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_CLEAR, clears.asdword);
145
- ret = 1;
146
- }
147
-
148
- if (status.abortsta & BIT(chn)) {
149
- dev_err(pci->dev, "%s, read abort %x\n", __func__, status.asdword);
150
- clears.abortclr = 0x1 << chn;
151
- dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_CLEAR, clears.asdword);
152
- ret = -1;
153
- }
106
+ if (i >= timeout_us || ret) {
107
+ dev_err(obj->dev, "%s timeout\n", __func__);
108
+ if (obj->dma_debug)
109
+ obj->dma_debug(obj, table);
110
+ return -EFAULT;
154111 }
155112
156113 return ret;
157114 }
158115
159
-static int rk_pcie_dma_wait_for_finised(struct dma_trx_obj *obj, struct dw_pcie *pci, struct dma_table *table)
116
+static int rk_pcie_local_dma_frombus_block(struct dma_trx_obj *obj, u32 chn,
117
+ u64 local_paddr, u64 bus_paddr, u32 size)
160118 {
161
- int ret;
162
-
163
- do {
164
- ret = rk_pcie_get_dma_status(pci, table->chn, table->dir);
165
- } while (!ret);
166
-
167
- return ret;
168
-}
169
-
170
-static int rk_pcie_dma_frombus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
171
- u32 local_paddr, u32 bus_paddr, u32 size)
172
-{
119
+ struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv;
173120 struct dma_table *table;
174
- struct dma_trx_obj *obj = dmatest_dev->obj;
175
- struct dw_pcie *pci = dmatest_dev->pci;
176121 int ret;
177122
178123 if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
179124 return -1;
180125
181
- table = kzalloc(sizeof(struct dma_table), GFP_KERNEL);
182
- if (!table)
183
- return -ENOMEM;
184
-
185126 mutex_lock(&dmatest_dev->rd_lock[chn]);
127
+
128
+ table = &dmatest_dev->rd_tbl_buf[chn];
129
+ memset(table, 0, sizeof(struct dma_table));
130
+
186131 if (dmatest_dev->irq_en)
187132 reinit_completion(&dmatest_dev->rd_done[chn]);
188133
....@@ -202,31 +147,28 @@
202147 else if (ret == 0)
203148 dev_err(obj->dev, "%s timed out\n", __func__);
204149 } else {
205
- ret = rk_pcie_dma_wait_for_finised(obj, pci, table);
150
+ ret = rk_pcie_dma_wait_for_finished(obj, table);
206151 }
207152 mutex_unlock(&dmatest_dev->rd_lock[chn]);
208
-
209
- kfree(table);
210153
211154 return ret;
212155 }
213156
214
-static int rk_pcie_dma_tobus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
215
- u32 bus_paddr, u32 local_paddr, u32 size)
157
+int rk_pcie_local_dma_tobus_block(struct dma_trx_obj *obj, u32 chn,
158
+ u64 bus_paddr, u64 local_paddr, u32 size)
216159 {
160
+ struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv;
217161 struct dma_table *table;
218
- struct dma_trx_obj *obj = dmatest_dev->obj;
219
- struct dw_pcie *pci = dmatest_dev->pci;
220162 int ret;
221163
222164 if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
223165 return -1;
224166
225
- table = kzalloc(sizeof(struct dma_table), GFP_KERNEL);
226
- if (!table)
227
- return -ENOMEM;
228
-
229167 mutex_lock(&dmatest_dev->wr_lock[chn]);
168
+
169
+ table = &dmatest_dev->wr_tbl_buf[chn];
170
+ memset(table, 0, sizeof(struct dma_table));
171
+
230172 if (dmatest_dev->irq_en)
231173 reinit_completion(&dmatest_dev->wr_done[chn]);
232174
....@@ -246,11 +188,9 @@
246188 else if (ret == 0)
247189 dev_err(obj->dev, "%s timed out\n", __func__);
248190 } else {
249
- ret = rk_pcie_dma_wait_for_finised(obj, pci, table);
191
+ ret = rk_pcie_dma_wait_for_finished(obj, table);
250192 }
251193 mutex_unlock(&dmatest_dev->wr_lock[chn]);
252
-
253
- kfree(table);
254194
255195 return ret;
256196 }
....@@ -270,23 +210,26 @@
270210 return 0;
271211 }
272212
273
-struct dma_trx_obj *pcie_dw_dmatest_register(struct dw_pcie *pci, bool irq_en)
213
+struct dma_trx_obj *pcie_dw_dmatest_register(struct device *dev, bool irq_en)
274214 {
275215 struct dma_trx_obj *obj;
276
- struct pcie_dw_dmatest_dev *dmatest_dev = &s_dmatest_dev[cur_dmatest_dev];
216
+ struct pcie_dw_dmatest_dev *dmatest_dev;
277217 int i;
278218
279
- obj = devm_kzalloc(pci->dev, sizeof(struct dma_trx_obj), GFP_KERNEL);
219
+ dmatest_dev = devm_kzalloc(dev, sizeof(struct pcie_dw_dmatest_dev), GFP_KERNEL);
220
+ if (!dmatest_dev)
221
+ return ERR_PTR(-ENOMEM);
222
+
223
+ obj = devm_kzalloc(dev, sizeof(struct dma_trx_obj), GFP_KERNEL);
280224 if (!obj)
281225 return ERR_PTR(-ENOMEM);
282226
283
- obj->dev = pci->dev;
227
+ obj->dev = dev;
284228 obj->priv = dmatest_dev;
285229 obj->cb = rk_pcie_dma_interrupt_handler_call_back;
286230
287231 /* Save for dmatest */
288232 dmatest_dev->obj = obj;
289
- dmatest_dev->pci = pci;
290233 for (i = 0; i < PCIE_DMA_CHANEL_MAX_NUM; i++) {
291234 init_completion(&dmatest_dev->rd_done[i]);
292235 init_completion(&dmatest_dev->wr_done[i]);
....@@ -296,13 +239,30 @@
296239
297240 /* Enable IRQ transfer as default */
298241 dmatest_dev->irq_en = irq_en;
299
- cur_dmatest_dev++;
242
+ s_dmatest_dev = dmatest_dev;
300243
301244 return obj;
302245 }
303246
247
+void pcie_dw_dmatest_unregister(struct dma_trx_obj *obj)
248
+{
249
+ s_dmatest_dev = NULL;
250
+}
251
+
252
+int pcie_dw_wired_dma_frombus_block(struct dma_trx_obj *obj, u32 chn,
253
+ u64 local_paddr, u64 bus_paddr, u32 size)
254
+{
255
+ return rk_pcie_local_dma_tobus_block(obj, chn, local_paddr, bus_paddr, size);
256
+}
257
+
258
+int pcie_dw_wired_dma_tobus_block(struct dma_trx_obj *obj, u32 chn,
259
+ u64 bus_paddr, u64 local_paddr, u32 size)
260
+{
261
+ return rk_pcie_local_dma_frombus_block(obj, chn, bus_paddr, local_paddr, size);
262
+}
263
+
304264 static int dma_test(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
305
- u32 bus_paddr, u32 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en)
265
+ u64 bus_paddr, u64 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en)
306266 {
307267 ktime_t start_time;
308268 ktime_t end_time;
....@@ -323,13 +283,19 @@
323283 start_time = ktime_get();
324284 for (i = 0; i < loop; i++) {
325285 if (rd_en) {
326
- rk_pcie_dma_frombus(dmatest_dev, chn, local_paddr, bus_paddr, size);
286
+ if (is_wired)
287
+ pcie_dw_wired_dma_frombus_block(dmatest_dev->obj, chn, local_paddr, bus_paddr, size);
288
+ else
289
+ rk_pcie_local_dma_frombus_block(dmatest_dev->obj, chn, local_paddr, bus_paddr, size);
327290 dma_sync_single_for_cpu(obj->dev, local_paddr, size, DMA_FROM_DEVICE);
328291 }
329292
330293 if (wr_en) {
331294 dma_sync_single_for_device(obj->dev, local_paddr, size, DMA_TO_DEVICE);
332
- rk_pcie_dma_tobus(dmatest_dev, chn, bus_paddr, local_paddr, size);
295
+ if (is_wired)
296
+ pcie_dw_wired_dma_tobus_block(dmatest_dev->obj, chn, bus_paddr, local_paddr, size);
297
+ else
298
+ rk_pcie_local_dma_tobus_block(dmatest_dev->obj, chn, bus_paddr, local_paddr, size);
333299 }
334300 }
335301 end_time = ktime_get();
....@@ -346,7 +312,7 @@
346312
347313 static int dma_test_ch0(void *p)
348314 {
349
- dma_test(&s_dmatest_dev[test_dev], 0, bus_addr, local_addr, test_size,
315
+ dma_test(s_dmatest_dev, 0, bus_addr, local_addr, test_size,
350316 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
351317
352318 return 0;
....@@ -356,10 +322,10 @@
356322 {
357323 /* Test in different area with ch0 */
358324 if (chn_en == 3)
359
- dma_test(&s_dmatest_dev[test_dev], 1, bus_addr + test_size, local_addr + test_size, test_size,
325
+ dma_test(s_dmatest_dev, 1, bus_addr + test_size, local_addr + test_size, test_size,
360326 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
361327 else
362
- dma_test(&s_dmatest_dev[test_dev], 1, bus_addr, local_addr, test_size,
328
+ dma_test(s_dmatest_dev, 1, bus_addr, local_addr, test_size,
363329 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
364330
365331 return 0;
....@@ -383,21 +349,19 @@
383349 {
384350 char tmp[8];
385351
386
- if (!s_dmatest_dev[0].obj) {
352
+ if (!s_dmatest_dev) {
387353 pr_err("dmatest dev not exits\n");
388
- kfree(tmp);
389354
390355 return -1;
391356 }
392357
393358 strncpy(tmp, val, 8);
394
- if (!strncmp(tmp, "run", 3)) {
359
+ if (!strncmp(tmp, "run", 3))
395360 dma_run();
396
- } else if (!strncmp(tmp, "show", 4)) {
361
+ else if (!strncmp(tmp, "show", 4))
397362 pcie_dw_dmatest_show();
398
- } else {
363
+ else
399364 pr_info("input error\n");
400
- }
401365
402366 return 0;
403367 }