.. | .. |
---|
3 | 3 | * Copyright (c) 2022 Rockchip Electronics Co., Ltd. |
---|
4 | 4 | */ |
---|
5 | 5 | |
---|
6 | | -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
7 | | - |
---|
8 | 6 | #include <linux/init.h> |
---|
9 | 7 | #include <linux/ktime.h> |
---|
10 | 8 | #include <linux/module.h> |
---|
.. | .. |
---|
14 | 12 | #include <linux/slab.h> |
---|
15 | 13 | #include <linux/sched.h> |
---|
16 | 14 | #include <linux/kthread.h> |
---|
| 15 | +#include <linux/delay.h> |
---|
| 16 | +#include <linux/dma-mapping.h> |
---|
17 | 17 | |
---|
18 | | -#include "pcie-designware.h" |
---|
19 | 18 | #include "pcie-dw-dmatest.h" |
---|
20 | 19 | #include "../rockchip-pcie-dma.h" |
---|
21 | 20 | |
---|
.. | .. |
---|
35 | 34 | module_param(rw_test, uint, 0644); |
---|
36 | 35 | MODULE_PARM_DESC(rw_test, "Read/Write test, 1-read 2-write 3-both(default 3)"); |
---|
37 | 36 | |
---|
38 | | -static unsigned int bus_addr = 0x3c000000; |
---|
39 | | -module_param(bus_addr, uint, 0644); |
---|
| 37 | +static unsigned long bus_addr = 0x3c000000; |
---|
| 38 | +module_param(bus_addr, ulong, 0644); |
---|
40 | 39 | MODULE_PARM_DESC(bus_addr, "Dmatest chn0 bus_addr(remote), chn1 add offset 0x100000, (default 0x3c000000)"); |
---|
41 | 40 | |
---|
42 | | -static unsigned int local_addr = 0x3c000000; |
---|
43 | | -module_param(local_addr, uint, 0644); |
---|
| 41 | +static unsigned long local_addr = 0x3c000000; |
---|
| 42 | +module_param(local_addr, ulong, 0644); |
---|
44 | 43 | MODULE_PARM_DESC(local_addr, "Dmatest chn0 local_addr(local), chn1 add offset 0x100000, (default 0x3c000000)"); |
---|
45 | 44 | |
---|
46 | 45 | static unsigned int test_dev; |
---|
47 | 46 | module_param(test_dev, uint, 0644); |
---|
48 | 47 | MODULE_PARM_DESC(test_dev, "Choose dma_obj device,(default 0)"); |
---|
49 | 48 | |
---|
50 | | -#define PCIE_DW_MISC_DMATEST_DEV_MAX 5 |
---|
| 49 | +static bool is_wired; |
---|
| 50 | +module_param_named(is_wired, is_wired, bool, 0644); |
---|
| 51 | +MODULE_PARM_DESC(is_wired, "Transfer is triggered by wired DMA(default false)"); |
---|
51 | 52 | |
---|
52 | | -#define PCIE_DMA_OFFSET 0x380000 |
---|
53 | | - |
---|
54 | | -#define PCIE_DMA_CTRL_OFF 0x8 |
---|
55 | | -#define PCIE_DMA_WR_ENB 0xc |
---|
56 | | -#define PCIE_DMA_WR_CTRL_LO 0x200 |
---|
57 | | -#define PCIE_DMA_WR_CTRL_HI 0x204 |
---|
58 | | -#define PCIE_DMA_WR_XFERSIZE 0x208 |
---|
59 | | -#define PCIE_DMA_WR_SAR_PTR_LO 0x20c |
---|
60 | | -#define PCIE_DMA_WR_SAR_PTR_HI 0x210 |
---|
61 | | -#define PCIE_DMA_WR_DAR_PTR_LO 0x214 |
---|
62 | | -#define PCIE_DMA_WR_DAR_PTR_HI 0x218 |
---|
63 | | -#define PCIE_DMA_WR_WEILO 0x18 |
---|
64 | | -#define PCIE_DMA_WR_WEIHI 0x1c |
---|
65 | | -#define PCIE_DMA_WR_DOORBELL 0x10 |
---|
66 | | -#define PCIE_DMA_WR_INT_STATUS 0x4c |
---|
67 | | -#define PCIE_DMA_WR_INT_MASK 0x54 |
---|
68 | | -#define PCIE_DMA_WR_INT_CLEAR 0x58 |
---|
69 | | - |
---|
70 | | -#define PCIE_DMA_RD_ENB 0x2c |
---|
71 | | -#define PCIE_DMA_RD_CTRL_LO 0x300 |
---|
72 | | -#define PCIE_DMA_RD_CTRL_HI 0x304 |
---|
73 | | -#define PCIE_DMA_RD_XFERSIZE 0x308 |
---|
74 | | -#define PCIE_DMA_RD_SAR_PTR_LO 0x30c |
---|
75 | | -#define PCIE_DMA_RD_SAR_PTR_HI 0x310 |
---|
76 | | -#define PCIE_DMA_RD_DAR_PTR_LO 0x314 |
---|
77 | | -#define PCIE_DMA_RD_DAR_PTR_HI 0x318 |
---|
78 | | -#define PCIE_DMA_RD_WEILO 0x38 |
---|
79 | | -#define PCIE_DMA_RD_WEIHI 0x3c |
---|
80 | | -#define PCIE_DMA_RD_DOORBELL 0x30 |
---|
81 | | -#define PCIE_DMA_RD_INT_STATUS 0xa0 |
---|
82 | | -#define PCIE_DMA_RD_INT_MASK 0xa8 |
---|
83 | | -#define PCIE_DMA_RD_INT_CLEAR 0xac |
---|
| 53 | +#define PCIE_DW_MISC_DMATEST_DEV_MAX 1 |
---|
84 | 54 | |
---|
85 | 55 | #define PCIE_DMA_CHANEL_MAX_NUM 2 |
---|
| 56 | +#define PCIE_DMA_LL_MAX_NUM 1024 /* Unrestricted, tentative value */ |
---|
86 | 57 | |
---|
87 | 58 | struct pcie_dw_dmatest_dev { |
---|
88 | 59 | struct dma_trx_obj *obj; |
---|
89 | | - struct dw_pcie *pci; |
---|
90 | 60 | |
---|
91 | 61 | bool irq_en; |
---|
92 | 62 | struct completion rd_done[PCIE_DMA_CHANEL_MAX_NUM]; |
---|
.. | .. |
---|
94 | 64 | |
---|
95 | 65 | struct mutex rd_lock[PCIE_DMA_CHANEL_MAX_NUM]; /* Corresponding to each read DMA channel */ |
---|
96 | 66 | struct mutex wr_lock[PCIE_DMA_CHANEL_MAX_NUM]; /* Corresponding to each write DMA channel */ |
---|
| 67 | + |
---|
| 68 | + struct dma_table rd_tbl_buf[PCIE_DMA_CHANEL_MAX_NUM]; |
---|
| 69 | + struct dma_table wr_tbl_buf[PCIE_DMA_CHANEL_MAX_NUM]; |
---|
97 | 70 | }; |
---|
98 | 71 | |
---|
99 | | -static struct pcie_dw_dmatest_dev s_dmatest_dev[PCIE_DW_MISC_DMATEST_DEV_MAX]; |
---|
100 | | -static int cur_dmatest_dev; |
---|
| 72 | +static struct pcie_dw_dmatest_dev *s_dmatest_dev; |
---|
101 | 73 | |
---|
102 | 74 | static void pcie_dw_dmatest_show(void) |
---|
103 | 75 | { |
---|
104 | 76 | int i; |
---|
105 | 77 | |
---|
106 | 78 | for (i = 0; i < PCIE_DW_MISC_DMATEST_DEV_MAX; i++) { |
---|
107 | | - if (s_dmatest_dev[i].obj) |
---|
108 | | - dev_info(s_dmatest_dev[i].obj->dev, " test_dev index %d\n", i); |
---|
| 79 | + if (s_dmatest_dev) |
---|
| 80 | + dev_info(s_dmatest_dev->obj->dev, " test_dev index %d\n", i); |
---|
109 | 81 | else |
---|
110 | 82 | break; |
---|
111 | 83 | } |
---|
112 | 84 | |
---|
113 | | - dev_info(s_dmatest_dev[test_dev].obj->dev, " is current test_dev\n"); |
---|
| 85 | + dev_info(s_dmatest_dev->obj->dev, " is current test_dev\n"); |
---|
114 | 86 | } |
---|
115 | 87 | |
---|
116 | | -static int rk_pcie_get_dma_status(struct dw_pcie *pci, u8 chn, enum dma_dir dir) |
---|
| 88 | +static int rk_pcie_dma_wait_for_finished(struct dma_trx_obj *obj, struct dma_table *table) |
---|
117 | 89 | { |
---|
118 | | - union int_status status; |
---|
119 | | - union int_clear clears; |
---|
120 | | - int ret = 0; |
---|
| 90 | + int ret = 0, timeout_us, i; |
---|
121 | 91 | |
---|
122 | | - dev_dbg(pci->dev, "%s %x %x\n", __func__, dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS), |
---|
123 | | - dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS)); |
---|
| 92 | + timeout_us = table->buf_size / 100 + 1000; /* 100MB/s for redundant calculate */ |
---|
124 | 93 | |
---|
125 | | - if (dir == DMA_TO_BUS) { |
---|
126 | | - status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS); |
---|
127 | | - if (status.donesta & BIT(chn)) { |
---|
128 | | - clears.doneclr = 0x1 << chn; |
---|
129 | | - dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_CLEAR, clears.asdword); |
---|
130 | | - ret = 1; |
---|
| 94 | + for (i = 0; i < timeout_us; i++) { |
---|
| 95 | + ret = obj->get_dma_status(obj, table->chn, table->dir); |
---|
| 96 | + if (ret == 1) { |
---|
| 97 | + ret = 0; |
---|
| 98 | + break; |
---|
| 99 | + } else if (ret < 0) { |
---|
| 100 | + ret = -EFAULT; |
---|
| 101 | + break; |
---|
131 | 102 | } |
---|
| 103 | + udelay(1); |
---|
| 104 | + } |
---|
132 | 105 | |
---|
133 | | - if (status.abortsta & BIT(chn)) { |
---|
134 | | - dev_err(pci->dev, "%s, write abort\n", __func__); |
---|
135 | | - clears.abortclr = 0x1 << chn; |
---|
136 | | - dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_CLEAR, clears.asdword); |
---|
137 | | - ret = -1; |
---|
138 | | - } |
---|
139 | | - } else { |
---|
140 | | - status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS); |
---|
141 | | - |
---|
142 | | - if (status.donesta & BIT(chn)) { |
---|
143 | | - clears.doneclr = 0x1 << chn; |
---|
144 | | - dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_CLEAR, clears.asdword); |
---|
145 | | - ret = 1; |
---|
146 | | - } |
---|
147 | | - |
---|
148 | | - if (status.abortsta & BIT(chn)) { |
---|
149 | | - dev_err(pci->dev, "%s, read abort %x\n", __func__, status.asdword); |
---|
150 | | - clears.abortclr = 0x1 << chn; |
---|
151 | | - dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_CLEAR, clears.asdword); |
---|
152 | | - ret = -1; |
---|
153 | | - } |
---|
| 106 | + if (i >= timeout_us || ret) { |
---|
| 107 | + dev_err(obj->dev, "%s timeout\n", __func__); |
---|
| 108 | + if (obj->dma_debug) |
---|
| 109 | + obj->dma_debug(obj, table); |
---|
| 110 | + return -EFAULT; |
---|
154 | 111 | } |
---|
155 | 112 | |
---|
156 | 113 | return ret; |
---|
157 | 114 | } |
---|
158 | 115 | |
---|
159 | | -static int rk_pcie_dma_wait_for_finised(struct dma_trx_obj *obj, struct dw_pcie *pci, struct dma_table *table) |
---|
| 116 | +static int rk_pcie_local_dma_frombus_block(struct dma_trx_obj *obj, u32 chn, |
---|
| 117 | + u64 local_paddr, u64 bus_paddr, u32 size) |
---|
160 | 118 | { |
---|
161 | | - int ret; |
---|
162 | | - |
---|
163 | | - do { |
---|
164 | | - ret = rk_pcie_get_dma_status(pci, table->chn, table->dir); |
---|
165 | | - } while (!ret); |
---|
166 | | - |
---|
167 | | - return ret; |
---|
168 | | -} |
---|
169 | | - |
---|
170 | | -static int rk_pcie_dma_frombus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn, |
---|
171 | | - u32 local_paddr, u32 bus_paddr, u32 size) |
---|
172 | | -{ |
---|
| 119 | + struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv; |
---|
173 | 120 | struct dma_table *table; |
---|
174 | | - struct dma_trx_obj *obj = dmatest_dev->obj; |
---|
175 | | - struct dw_pcie *pci = dmatest_dev->pci; |
---|
176 | 121 | int ret; |
---|
177 | 122 | |
---|
178 | 123 | if (chn >= PCIE_DMA_CHANEL_MAX_NUM) |
---|
179 | 124 | return -1; |
---|
180 | 125 | |
---|
181 | | - table = kzalloc(sizeof(struct dma_table), GFP_KERNEL); |
---|
182 | | - if (!table) |
---|
183 | | - return -ENOMEM; |
---|
184 | | - |
---|
185 | 126 | mutex_lock(&dmatest_dev->rd_lock[chn]); |
---|
| 127 | + |
---|
| 128 | + table = &dmatest_dev->rd_tbl_buf[chn]; |
---|
| 129 | + memset(table, 0, sizeof(struct dma_table)); |
---|
| 130 | + |
---|
186 | 131 | if (dmatest_dev->irq_en) |
---|
187 | 132 | reinit_completion(&dmatest_dev->rd_done[chn]); |
---|
188 | 133 | |
---|
.. | .. |
---|
202 | 147 | else if (ret == 0) |
---|
203 | 148 | dev_err(obj->dev, "%s timed out\n", __func__); |
---|
204 | 149 | } else { |
---|
205 | | - ret = rk_pcie_dma_wait_for_finised(obj, pci, table); |
---|
| 150 | + ret = rk_pcie_dma_wait_for_finished(obj, table); |
---|
206 | 151 | } |
---|
207 | 152 | mutex_unlock(&dmatest_dev->rd_lock[chn]); |
---|
208 | | - |
---|
209 | | - kfree(table); |
---|
210 | 153 | |
---|
211 | 154 | return ret; |
---|
212 | 155 | } |
---|
213 | 156 | |
---|
214 | | -static int rk_pcie_dma_tobus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn, |
---|
215 | | - u32 bus_paddr, u32 local_paddr, u32 size) |
---|
| 157 | +int rk_pcie_local_dma_tobus_block(struct dma_trx_obj *obj, u32 chn, |
---|
| 158 | + u64 bus_paddr, u64 local_paddr, u32 size) |
---|
216 | 159 | { |
---|
| 160 | + struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv; |
---|
217 | 161 | struct dma_table *table; |
---|
218 | | - struct dma_trx_obj *obj = dmatest_dev->obj; |
---|
219 | | - struct dw_pcie *pci = dmatest_dev->pci; |
---|
220 | 162 | int ret; |
---|
221 | 163 | |
---|
222 | 164 | if (chn >= PCIE_DMA_CHANEL_MAX_NUM) |
---|
223 | 165 | return -1; |
---|
224 | 166 | |
---|
225 | | - table = kzalloc(sizeof(struct dma_table), GFP_KERNEL); |
---|
226 | | - if (!table) |
---|
227 | | - return -ENOMEM; |
---|
228 | | - |
---|
229 | 167 | mutex_lock(&dmatest_dev->wr_lock[chn]); |
---|
| 168 | + |
---|
| 169 | + table = &dmatest_dev->wr_tbl_buf[chn]; |
---|
| 170 | + memset(table, 0, sizeof(struct dma_table)); |
---|
| 171 | + |
---|
230 | 172 | if (dmatest_dev->irq_en) |
---|
231 | 173 | reinit_completion(&dmatest_dev->wr_done[chn]); |
---|
232 | 174 | |
---|
.. | .. |
---|
246 | 188 | else if (ret == 0) |
---|
247 | 189 | dev_err(obj->dev, "%s timed out\n", __func__); |
---|
248 | 190 | } else { |
---|
249 | | - ret = rk_pcie_dma_wait_for_finised(obj, pci, table); |
---|
| 191 | + ret = rk_pcie_dma_wait_for_finished(obj, table); |
---|
250 | 192 | } |
---|
251 | 193 | mutex_unlock(&dmatest_dev->wr_lock[chn]); |
---|
252 | | - |
---|
253 | | - kfree(table); |
---|
254 | 194 | |
---|
255 | 195 | return ret; |
---|
256 | 196 | } |
---|
.. | .. |
---|
270 | 210 | return 0; |
---|
271 | 211 | } |
---|
272 | 212 | |
---|
273 | | -struct dma_trx_obj *pcie_dw_dmatest_register(struct dw_pcie *pci, bool irq_en) |
---|
| 213 | +struct dma_trx_obj *pcie_dw_dmatest_register(struct device *dev, bool irq_en) |
---|
274 | 214 | { |
---|
275 | 215 | struct dma_trx_obj *obj; |
---|
276 | | - struct pcie_dw_dmatest_dev *dmatest_dev = &s_dmatest_dev[cur_dmatest_dev]; |
---|
| 216 | + struct pcie_dw_dmatest_dev *dmatest_dev; |
---|
277 | 217 | int i; |
---|
278 | 218 | |
---|
279 | | - obj = devm_kzalloc(pci->dev, sizeof(struct dma_trx_obj), GFP_KERNEL); |
---|
| 219 | + dmatest_dev = devm_kzalloc(dev, sizeof(struct pcie_dw_dmatest_dev), GFP_KERNEL); |
---|
| 220 | + if (!dmatest_dev) |
---|
| 221 | + return ERR_PTR(-ENOMEM); |
---|
| 222 | + |
---|
| 223 | + obj = devm_kzalloc(dev, sizeof(struct dma_trx_obj), GFP_KERNEL); |
---|
280 | 224 | if (!obj) |
---|
281 | 225 | return ERR_PTR(-ENOMEM); |
---|
282 | 226 | |
---|
283 | | - obj->dev = pci->dev; |
---|
| 227 | + obj->dev = dev; |
---|
284 | 228 | obj->priv = dmatest_dev; |
---|
285 | 229 | obj->cb = rk_pcie_dma_interrupt_handler_call_back; |
---|
286 | 230 | |
---|
287 | 231 | /* Save for dmatest */ |
---|
288 | 232 | dmatest_dev->obj = obj; |
---|
289 | | - dmatest_dev->pci = pci; |
---|
290 | 233 | for (i = 0; i < PCIE_DMA_CHANEL_MAX_NUM; i++) { |
---|
291 | 234 | init_completion(&dmatest_dev->rd_done[i]); |
---|
292 | 235 | init_completion(&dmatest_dev->wr_done[i]); |
---|
.. | .. |
---|
296 | 239 | |
---|
297 | 240 | /* Enable IRQ transfer as default */ |
---|
298 | 241 | dmatest_dev->irq_en = irq_en; |
---|
299 | | - cur_dmatest_dev++; |
---|
| 242 | + s_dmatest_dev = dmatest_dev; |
---|
300 | 243 | |
---|
301 | 244 | return obj; |
---|
302 | 245 | } |
---|
303 | 246 | |
---|
| 247 | +void pcie_dw_dmatest_unregister(struct dma_trx_obj *obj) |
---|
| 248 | +{ |
---|
| 249 | + s_dmatest_dev = NULL; |
---|
| 250 | +} |
---|
| 251 | + |
---|
| 252 | +int pcie_dw_wired_dma_frombus_block(struct dma_trx_obj *obj, u32 chn, |
---|
| 253 | + u64 local_paddr, u64 bus_paddr, u32 size) |
---|
| 254 | +{ |
---|
| 255 | + return rk_pcie_local_dma_tobus_block(obj, chn, local_paddr, bus_paddr, size); |
---|
| 256 | +} |
---|
| 257 | + |
---|
| 258 | +int pcie_dw_wired_dma_tobus_block(struct dma_trx_obj *obj, u32 chn, |
---|
| 259 | + u64 bus_paddr, u64 local_paddr, u32 size) |
---|
| 260 | +{ |
---|
| 261 | + return rk_pcie_local_dma_frombus_block(obj, chn, bus_paddr, local_paddr, size); |
---|
| 262 | +} |
---|
| 263 | + |
---|
304 | 264 | static int dma_test(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn, |
---|
305 | | - u32 bus_paddr, u32 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en) |
---|
| 265 | + u64 bus_paddr, u64 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en) |
---|
306 | 266 | { |
---|
307 | 267 | ktime_t start_time; |
---|
308 | 268 | ktime_t end_time; |
---|
.. | .. |
---|
323 | 283 | start_time = ktime_get(); |
---|
324 | 284 | for (i = 0; i < loop; i++) { |
---|
325 | 285 | if (rd_en) { |
---|
326 | | - rk_pcie_dma_frombus(dmatest_dev, chn, local_paddr, bus_paddr, size); |
---|
| 286 | + if (is_wired) |
---|
| 287 | + pcie_dw_wired_dma_frombus_block(dmatest_dev->obj, chn, local_paddr, bus_paddr, size); |
---|
| 288 | + else |
---|
| 289 | + rk_pcie_local_dma_frombus_block(dmatest_dev->obj, chn, local_paddr, bus_paddr, size); |
---|
327 | 290 | dma_sync_single_for_cpu(obj->dev, local_paddr, size, DMA_FROM_DEVICE); |
---|
328 | 291 | } |
---|
329 | 292 | |
---|
330 | 293 | if (wr_en) { |
---|
331 | 294 | dma_sync_single_for_device(obj->dev, local_paddr, size, DMA_TO_DEVICE); |
---|
332 | | - rk_pcie_dma_tobus(dmatest_dev, chn, bus_paddr, local_paddr, size); |
---|
| 295 | + if (is_wired) |
---|
| 296 | + pcie_dw_wired_dma_tobus_block(dmatest_dev->obj, chn, bus_paddr, local_paddr, size); |
---|
| 297 | + else |
---|
| 298 | + rk_pcie_local_dma_tobus_block(dmatest_dev->obj, chn, bus_paddr, local_paddr, size); |
---|
333 | 299 | } |
---|
334 | 300 | } |
---|
335 | 301 | end_time = ktime_get(); |
---|
.. | .. |
---|
346 | 312 | |
---|
347 | 313 | static int dma_test_ch0(void *p) |
---|
348 | 314 | { |
---|
349 | | - dma_test(&s_dmatest_dev[test_dev], 0, bus_addr, local_addr, test_size, |
---|
| 315 | + dma_test(s_dmatest_dev, 0, bus_addr, local_addr, test_size, |
---|
350 | 316 | cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1); |
---|
351 | 317 | |
---|
352 | 318 | return 0; |
---|
.. | .. |
---|
356 | 322 | { |
---|
357 | 323 | /* Test in different area with ch0 */ |
---|
358 | 324 | if (chn_en == 3) |
---|
359 | | - dma_test(&s_dmatest_dev[test_dev], 1, bus_addr + test_size, local_addr + test_size, test_size, |
---|
| 325 | + dma_test(s_dmatest_dev, 1, bus_addr + test_size, local_addr + test_size, test_size, |
---|
360 | 326 | cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1); |
---|
361 | 327 | else |
---|
362 | | - dma_test(&s_dmatest_dev[test_dev], 1, bus_addr, local_addr, test_size, |
---|
| 328 | + dma_test(s_dmatest_dev, 1, bus_addr, local_addr, test_size, |
---|
363 | 329 | cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1); |
---|
364 | 330 | |
---|
365 | 331 | return 0; |
---|
.. | .. |
---|
383 | 349 | { |
---|
384 | 350 | char tmp[8]; |
---|
385 | 351 | |
---|
386 | | - if (!s_dmatest_dev[0].obj) { |
---|
| 352 | + if (!s_dmatest_dev) { |
---|
387 | 353 | pr_err("dmatest dev not exits\n"); |
---|
388 | | - kfree(tmp); |
---|
389 | 354 | |
---|
390 | 355 | return -1; |
---|
391 | 356 | } |
---|
392 | 357 | |
---|
393 | 358 | strncpy(tmp, val, 8); |
---|
394 | | - if (!strncmp(tmp, "run", 3)) { |
---|
| 359 | + if (!strncmp(tmp, "run", 3)) |
---|
395 | 360 | dma_run(); |
---|
396 | | - } else if (!strncmp(tmp, "show", 4)) { |
---|
| 361 | + else if (!strncmp(tmp, "show", 4)) |
---|
397 | 362 | pcie_dw_dmatest_show(); |
---|
398 | | - } else { |
---|
| 363 | + else |
---|
399 | 364 | pr_info("input error\n"); |
---|
400 | | - } |
---|
401 | 365 | |
---|
402 | 366 | return 0; |
---|
403 | 367 | } |
---|