.. | .. |
---|
19 | 19 | #include <linux/mutex.h> |
---|
20 | 20 | #include <linux/ctype.h> |
---|
21 | 21 | #include <linux/of.h> |
---|
| 22 | +#include <linux/interrupt.h> |
---|
| 23 | +#include <linux/iopoll.h> |
---|
| 24 | + |
---|
22 | 25 | #include <uapi/linux/rk-pcie-ep.h> |
---|
23 | 26 | |
---|
24 | 27 | #include "../../pci/controller/rockchip-pcie-dma.h" |
---|
25 | 28 | #include "../../pci/controller/dwc/pcie-dw-dmatest.h" |
---|
| 29 | +#if IS_MODULE(CONFIG_PCIE_FUNC_RKEP) && IS_ENABLED(CONFIG_PCIE_DW_DMATEST) |
---|
| 30 | +#include "../../pci/controller/dwc/pcie-dw-dmatest.c" |
---|
| 31 | +#endif |
---|
26 | 32 | |
---|
27 | 33 | #define DRV_NAME "pcie-rkep" |
---|
28 | 34 | |
---|
.. | .. |
---|
34 | 40 | |
---|
35 | 41 | static DEFINE_MUTEX(rkep_mutex); |
---|
36 | 42 | #define BAR_0_SZ SZ_4M |
---|
37 | | -#define RKEP_NUM_MSI_VECTORS 4 |
---|
38 | | -#define RKEP_NUM_MSIX_VECTORS 8 |
---|
| 43 | +#define RKEP_NUM_IRQ_VECTORS 4 |
---|
39 | 44 | |
---|
40 | 45 | #define PCIe_CLIENT_MSI_IRQ_OBJ 0 /* rockchip ep object special irq */ |
---|
41 | 46 | |
---|
.. | .. |
---|
50 | 55 | #define PCIE_DMA_WR_SAR_PTR_HI 0x210 |
---|
51 | 56 | #define PCIE_DMA_WR_DAR_PTR_LO 0x214 |
---|
52 | 57 | #define PCIE_DMA_WR_DAR_PTR_HI 0x218 |
---|
| 58 | +#define PCIE_DMA_WR_LL_PTR_LO 0x21c |
---|
| 59 | +#define PCIE_DMA_WR_LL_PTR_HI 0x220 |
---|
53 | 60 | #define PCIE_DMA_WR_WEILO 0x18 |
---|
54 | 61 | #define PCIE_DMA_WR_WEIHI 0x1c |
---|
55 | 62 | #define PCIE_DMA_WR_DOORBELL 0x10 |
---|
56 | 63 | #define PCIE_DMA_WR_INT_STATUS 0x4c |
---|
57 | 64 | #define PCIE_DMA_WR_INT_MASK 0x54 |
---|
58 | 65 | #define PCIE_DMA_WR_INT_CLEAR 0x58 |
---|
| 66 | +#define PCIE_DMA_WR_ERR_STATUS 0x5c |
---|
| 67 | +#define PCIE_DMA_WR_LL_ERR_EN 0x90 |
---|
59 | 68 | |
---|
60 | 69 | #define PCIE_DMA_RD_ENB 0x2c |
---|
61 | 70 | #define PCIE_DMA_RD_CTRL_LO 0x300 |
---|
.. | .. |
---|
65 | 74 | #define PCIE_DMA_RD_SAR_PTR_HI 0x310 |
---|
66 | 75 | #define PCIE_DMA_RD_DAR_PTR_LO 0x314 |
---|
67 | 76 | #define PCIE_DMA_RD_DAR_PTR_HI 0x318 |
---|
| 77 | +#define PCIE_DMA_RD_LL_PTR_LO 0x31c |
---|
| 78 | +#define PCIE_DMA_RD_LL_PTR_HI 0x320 |
---|
68 | 79 | #define PCIE_DMA_RD_WEILO 0x38 |
---|
69 | 80 | #define PCIE_DMA_RD_WEIHI 0x3c |
---|
70 | 81 | #define PCIE_DMA_RD_DOORBELL 0x30 |
---|
71 | 82 | #define PCIE_DMA_RD_INT_STATUS 0xa0 |
---|
72 | 83 | #define PCIE_DMA_RD_INT_MASK 0xa8 |
---|
73 | 84 | #define PCIE_DMA_RD_INT_CLEAR 0xac |
---|
| 85 | +#define PCIE_DMA_RD_ERR_STATUS_LOW 0xb8 |
---|
| 86 | +#define PCIE_DMA_RD_ERR_STATUS_HIGH 0xbc |
---|
| 87 | +#define PCIE_DMA_RD_LL_ERR_EN 0xc4 |
---|
74 | 88 | |
---|
75 | 89 | #define PCIE_DMA_CHANEL_MAX_NUM 2 |
---|
76 | 90 | |
---|
77 | 91 | #define RKEP_USER_MEM_SIZE SZ_64M |
---|
78 | 92 | |
---|
79 | 93 | #define PCIE_CFG_ELBI_APP_OFFSET 0xe00 |
---|
| 94 | +#define PCIE_CFG_ELBI_USER_DATA_OFF 0x10 |
---|
| 95 | + |
---|
80 | 96 | #define PCIE_ELBI_REG_NUM 0x2 |
---|
81 | 97 | |
---|
82 | | -struct pcie_rkep_msix_context { |
---|
| 98 | +#define RKEP_EP_ELBI_TIEMOUT_US 100000 |
---|
| 99 | + |
---|
| 100 | +#define PCIE_RK3568_RC_DBI_BASE 0xf6000000 |
---|
| 101 | +#define PCIE_RK3588_RC_DBI_BASE 0xf5000000 |
---|
| 102 | +#define PCIE_DBI_SIZE 0x400000 |
---|
| 103 | + |
---|
| 104 | +struct pcie_rkep_irq_context { |
---|
83 | 105 | struct pci_dev *dev; |
---|
84 | 106 | u16 msg_id; |
---|
85 | | - u8 *name; |
---|
86 | 107 | }; |
---|
87 | 108 | |
---|
88 | 109 | struct pcie_rkep { |
---|
.. | .. |
---|
90 | 111 | void __iomem *bar0; |
---|
91 | 112 | void __iomem *bar2; |
---|
92 | 113 | void __iomem *bar4; |
---|
93 | | - bool in_used; |
---|
| 114 | + int cur_mmap_res; |
---|
| 115 | + struct pcie_rkep_irq_context irq_ctx[RKEP_NUM_IRQ_VECTORS]; |
---|
| 116 | + int irq_valid; |
---|
| 117 | + |
---|
94 | 118 | struct miscdevice dev; |
---|
95 | | - struct msix_entry msix_entries[RKEP_NUM_MSIX_VECTORS]; |
---|
96 | | - struct pcie_rkep_msix_context msix_ctx[RKEP_NUM_MSIX_VECTORS]; |
---|
97 | | - struct pcie_rkep_msix_context msi_ctx[RKEP_NUM_MSI_VECTORS]; |
---|
98 | | - bool msi_enable; |
---|
99 | | - bool msix_enable; |
---|
100 | 119 | struct dma_trx_obj *dma_obj; |
---|
101 | 120 | struct pcie_ep_obj_info *obj_info; |
---|
102 | 121 | struct page *user_pages; /* Allocated physical memory for user space */ |
---|
103 | | - struct fasync_struct *async; |
---|
| 122 | + struct mutex dev_lock_mutex; /* Sync resources in multi-process, such as vid and ELBI0 */ |
---|
| 123 | + DECLARE_BITMAP(virtual_id_bitmap, RKEP_EP_VIRTUAL_ID_MAX); |
---|
| 124 | + DECLARE_BITMAP(virtual_id_irq_bitmap, RKEP_EP_VIRTUAL_ID_MAX); |
---|
| 125 | + wait_queue_head_t wq_head; |
---|
104 | 126 | }; |
---|
105 | 127 | |
---|
106 | | -static int pcie_rkep_fasync(int fd, struct file *file, int mode) |
---|
107 | | -{ |
---|
108 | | - struct miscdevice *miscdev = file->private_data; |
---|
109 | | - struct pcie_rkep *pcie_rkep = container_of(miscdev, struct pcie_rkep, dev); |
---|
| 128 | +struct pcie_file { |
---|
| 129 | + struct mutex file_lock_mutex; |
---|
| 130 | + struct pcie_rkep *pcie_rkep; |
---|
| 131 | + DECLARE_BITMAP(child_vid_bitmap, RKEP_EP_VIRTUAL_ID_MAX); /* The virtual IDs applied for each task */ |
---|
| 132 | +}; |
---|
110 | 133 | |
---|
111 | | - return fasync_helper(fd, file, mode, &pcie_rkep->async); |
---|
| 134 | +static int rkep_ep_dma_xfer(struct pcie_rkep *pcie_rkep, struct pcie_ep_dma_block_req *dma) |
---|
| 135 | +{ |
---|
| 136 | + int ret; |
---|
| 137 | + |
---|
| 138 | + if (dma->wr) |
---|
| 139 | + ret = pcie_dw_wired_dma_tobus_block(pcie_rkep->dma_obj, dma->chn, dma->block.bus_paddr, dma->block.local_paddr, dma->block.size); |
---|
| 140 | + else |
---|
| 141 | + ret = pcie_dw_wired_dma_frombus_block(pcie_rkep->dma_obj, dma->chn, dma->block.local_paddr, dma->block.bus_paddr, dma->block.size); |
---|
| 142 | + |
---|
| 143 | + return ret; |
---|
| 144 | +} |
---|
| 145 | + |
---|
| 146 | +static int rkep_ep_request_virtual_id(struct pcie_file *pcie_file) |
---|
| 147 | +{ |
---|
| 148 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 149 | + int index; |
---|
| 150 | + |
---|
| 151 | + mutex_lock(&pcie_rkep->dev_lock_mutex); |
---|
| 152 | + index = find_first_zero_bit(pcie_rkep->virtual_id_bitmap, RKEP_EP_VIRTUAL_ID_MAX); |
---|
| 153 | + if (index >= RKEP_EP_VIRTUAL_ID_MAX) { |
---|
| 154 | + dev_err(&pcie_rkep->pdev->dev, "request virtual id %d is invalid\n", index); |
---|
| 155 | + mutex_unlock(&pcie_rkep->dev_lock_mutex); |
---|
| 156 | + return -EINVAL; |
---|
| 157 | + } |
---|
| 158 | + set_bit(index, pcie_rkep->virtual_id_bitmap); |
---|
| 159 | + mutex_unlock(&pcie_rkep->dev_lock_mutex); |
---|
| 160 | + |
---|
| 161 | + mutex_lock(&pcie_file->file_lock_mutex); |
---|
| 162 | + set_bit(index, pcie_file->child_vid_bitmap); |
---|
| 163 | + mutex_unlock(&pcie_file->file_lock_mutex); |
---|
| 164 | + |
---|
| 165 | + dev_dbg(&pcie_rkep->pdev->dev, "request virtual id %d\n", index); |
---|
| 166 | + |
---|
| 167 | + return index; |
---|
| 168 | +} |
---|
| 169 | + |
---|
| 170 | +static int rkep_ep_release_virtual_id(struct pcie_file *pcie_file, int index) |
---|
| 171 | +{ |
---|
| 172 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 173 | + |
---|
| 174 | + if (index >= RKEP_EP_VIRTUAL_ID_MAX) { |
---|
| 175 | + dev_err(&pcie_rkep->pdev->dev, "release virtual id %d out of range\n", index); |
---|
| 176 | + |
---|
| 177 | + return -EINVAL; |
---|
| 178 | + } |
---|
| 179 | + |
---|
| 180 | + if (!test_bit(index, pcie_rkep->virtual_id_bitmap)) |
---|
| 181 | + dev_err(&pcie_rkep->pdev->dev, "release virtual id %d is already free\n", index); |
---|
| 182 | + |
---|
| 183 | + mutex_lock(&pcie_file->file_lock_mutex); |
---|
| 184 | + __clear_bit(index, pcie_file->child_vid_bitmap); |
---|
| 185 | + mutex_unlock(&pcie_file->file_lock_mutex); |
---|
| 186 | + |
---|
| 187 | + mutex_lock(&pcie_rkep->dev_lock_mutex); |
---|
| 188 | + __clear_bit(index, pcie_rkep->virtual_id_bitmap); |
---|
| 189 | + mutex_unlock(&pcie_rkep->dev_lock_mutex); |
---|
| 190 | + |
---|
| 191 | + dev_dbg(&pcie_rkep->pdev->dev, "release virtual id %d\n", index); |
---|
| 192 | + |
---|
| 193 | + return 0; |
---|
| 194 | +} |
---|
| 195 | + |
---|
| 196 | +static int rkep_ep_raise_elbi_irq(struct pcie_file *pcie_file, u32 interrupt_num) |
---|
| 197 | +{ |
---|
| 198 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 199 | + u32 index, off; |
---|
| 200 | + int i, gap_us = 100; |
---|
| 201 | + u32 val; |
---|
| 202 | + int ret; |
---|
| 203 | + |
---|
| 204 | + if (interrupt_num >= (PCIE_ELBI_REG_NUM * 16)) { |
---|
| 205 | + dev_err(&pcie_rkep->pdev->dev, "elbi int num out of max count\n"); |
---|
| 206 | + return -EINVAL; |
---|
| 207 | + } |
---|
| 208 | + |
---|
| 209 | + index = interrupt_num / 16; |
---|
| 210 | + off = interrupt_num % 16; |
---|
| 211 | + |
---|
| 212 | + for (i = 0; i < RKEP_EP_ELBI_TIEMOUT_US; i += gap_us) { |
---|
| 213 | + pci_read_config_dword(pcie_rkep->pdev, PCIE_CFG_ELBI_APP_OFFSET + 4 * index, &val); |
---|
| 214 | + if (val & BIT(off)) |
---|
| 215 | + usleep_range(gap_us, gap_us + 10); |
---|
| 216 | + else |
---|
| 217 | + break; |
---|
| 218 | + } |
---|
| 219 | + |
---|
| 220 | + if (i >= gap_us) |
---|
| 221 | + dev_err(&pcie_rkep->pdev->dev, "elbi int is not clear, status=%x\n", val); |
---|
| 222 | + |
---|
| 223 | + ret = pci_write_config_dword(pcie_rkep->pdev, PCIE_CFG_ELBI_APP_OFFSET + 4 * index, |
---|
| 224 | + (1 << (off + 16)) | (1 << off)); |
---|
| 225 | + |
---|
| 226 | + return ret; |
---|
| 227 | +} |
---|
| 228 | + |
---|
| 229 | +static int rkep_ep_raise_irq_user_obj(struct pcie_file *pcie_file, u32 index) |
---|
| 230 | +{ |
---|
| 231 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 232 | + int ret; |
---|
| 233 | + |
---|
| 234 | + if (index >= RKEP_EP_VIRTUAL_ID_MAX) { |
---|
| 235 | + dev_err(&pcie_rkep->pdev->dev, "raise irq_user, virtual id %d out of range\n", index); |
---|
| 236 | + |
---|
| 237 | + return -EINVAL; |
---|
| 238 | + } |
---|
| 239 | + |
---|
| 240 | + pcie_rkep->obj_info->irq_type_ep = OBJ_IRQ_USER; |
---|
| 241 | + pcie_rkep->obj_info->irq_user_data_ep = index; |
---|
| 242 | + mutex_lock(&pcie_rkep->dev_lock_mutex); |
---|
| 243 | + ret = rkep_ep_raise_elbi_irq(pcie_file, 0); |
---|
| 244 | + mutex_unlock(&pcie_rkep->dev_lock_mutex); |
---|
| 245 | + |
---|
| 246 | + return ret; |
---|
| 247 | +} |
---|
| 248 | + |
---|
| 249 | +static int rkep_ep_poll_irq_user(struct pcie_file *pcie_file, struct pcie_ep_obj_poll_virtual_id_cfg *cfg) |
---|
| 250 | +{ |
---|
| 251 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 252 | + u32 index = cfg->virtual_id; |
---|
| 253 | + |
---|
| 254 | + if (index >= RKEP_EP_VIRTUAL_ID_MAX) { |
---|
| 255 | + dev_err(&pcie_rkep->pdev->dev, "poll irq_user, virtual id %d out of range\n", index); |
---|
| 256 | + |
---|
| 257 | + return -EINVAL; |
---|
| 258 | + } |
---|
| 259 | + |
---|
| 260 | + cfg->poll_status = NSIGPOLL; |
---|
| 261 | + if (cfg->sync) { |
---|
| 262 | + wait_event_interruptible(pcie_rkep->wq_head, |
---|
| 263 | + test_bit(index, pcie_rkep->virtual_id_irq_bitmap)); |
---|
| 264 | + } else { |
---|
| 265 | + wait_event_interruptible_timeout(pcie_rkep->wq_head, |
---|
| 266 | + test_bit(index, pcie_rkep->virtual_id_irq_bitmap), |
---|
| 267 | + cfg->timeout_ms); |
---|
| 268 | + } |
---|
| 269 | + if (test_and_clear_bit(index, pcie_rkep->virtual_id_irq_bitmap)) |
---|
| 270 | + cfg->poll_status = POLL_IN; |
---|
| 271 | + |
---|
| 272 | + dev_dbg(&pcie_rkep->pdev->dev, "poll virtual id %d, ret=%d\n", index, cfg->poll_status); |
---|
| 273 | + |
---|
| 274 | + return 0; |
---|
112 | 275 | } |
---|
113 | 276 | |
---|
114 | 277 | static int pcie_rkep_open(struct inode *inode, struct file *file) |
---|
115 | 278 | { |
---|
116 | 279 | struct miscdevice *miscdev = file->private_data; |
---|
117 | 280 | struct pcie_rkep *pcie_rkep = container_of(miscdev, struct pcie_rkep, dev); |
---|
118 | | - int ret = 0; |
---|
| 281 | + struct pcie_file *pcie_file = NULL; |
---|
119 | 282 | |
---|
120 | | - mutex_lock(&rkep_mutex); |
---|
| 283 | + pcie_file = devm_kzalloc(&pcie_rkep->pdev->dev, sizeof(struct pcie_file), GFP_KERNEL); |
---|
| 284 | + if (!pcie_file) |
---|
| 285 | + return -ENOMEM; |
---|
121 | 286 | |
---|
122 | | - if (pcie_rkep->in_used) |
---|
123 | | - ret = -EINVAL; |
---|
124 | | - else |
---|
125 | | - pcie_rkep->in_used = true; |
---|
| 287 | + pcie_file->pcie_rkep = pcie_rkep; |
---|
126 | 288 | |
---|
127 | | - mutex_unlock(&rkep_mutex); |
---|
| 289 | + mutex_init(&pcie_file->file_lock_mutex); |
---|
128 | 290 | |
---|
129 | | - return ret; |
---|
| 291 | + file->private_data = pcie_file; |
---|
| 292 | + |
---|
| 293 | + return 0; |
---|
130 | 294 | } |
---|
131 | 295 | |
---|
132 | 296 | static int pcie_rkep_release(struct inode *inode, struct file *file) |
---|
133 | 297 | { |
---|
134 | | - struct miscdevice *miscdev = file->private_data; |
---|
135 | | - struct pcie_rkep *pcie_rkep = container_of(miscdev, struct pcie_rkep, dev); |
---|
| 298 | + struct pcie_file *pcie_file = file->private_data; |
---|
| 299 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 300 | + int index; |
---|
136 | 301 | |
---|
137 | | - mutex_lock(&rkep_mutex); |
---|
138 | | - pcie_rkep->in_used = false; |
---|
139 | | - pcie_rkep_fasync(-1, file, 0); |
---|
140 | | - mutex_unlock(&rkep_mutex); |
---|
| 302 | + while (1) { |
---|
| 303 | + mutex_lock(&pcie_file->file_lock_mutex); |
---|
| 304 | + index = find_first_bit(pcie_file->child_vid_bitmap, RKEP_EP_VIRTUAL_ID_MAX); |
---|
| 305 | + |
---|
| 306 | + if (index >= RKEP_EP_VIRTUAL_ID_MAX) |
---|
| 307 | + break; |
---|
| 308 | + |
---|
| 309 | + __clear_bit(index, pcie_file->child_vid_bitmap); |
---|
| 310 | + mutex_unlock(&pcie_file->file_lock_mutex); |
---|
| 311 | + |
---|
| 312 | + mutex_lock(&pcie_rkep->dev_lock_mutex); |
---|
| 313 | + __clear_bit(index, pcie_rkep->virtual_id_bitmap); |
---|
| 314 | + mutex_unlock(&pcie_rkep->dev_lock_mutex); |
---|
| 315 | + |
---|
| 316 | + dev_dbg(&pcie_rkep->pdev->dev, "release virtual id %d\n", index); |
---|
| 317 | + } |
---|
| 318 | + |
---|
| 319 | + devm_kfree(&pcie_rkep->pdev->dev, pcie_file); |
---|
141 | 320 | |
---|
142 | 321 | return 0; |
---|
143 | 322 | } |
---|
.. | .. |
---|
145 | 324 | static ssize_t pcie_rkep_write(struct file *file, const char __user *buf, |
---|
146 | 325 | size_t count, loff_t *ppos) |
---|
147 | 326 | { |
---|
148 | | - struct miscdevice *miscdev = file->private_data; |
---|
149 | | - struct pcie_rkep *pcie_rkep = container_of(miscdev, struct pcie_rkep, dev); |
---|
150 | | - u32 *bar0_buf; |
---|
151 | | - int loop, i = 0; |
---|
152 | | - size_t raw_count = count; |
---|
| 327 | + struct pcie_file *pcie_file = file->private_data; |
---|
| 328 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 329 | + struct pci_dev *dev = pcie_rkep->pdev; |
---|
| 330 | + unsigned int size = count; |
---|
| 331 | + loff_t init_off = *ppos, off = *ppos; |
---|
| 332 | + u8 *data; |
---|
153 | 333 | |
---|
154 | | - count = (count % 4) ? (count - count % 4) : count; |
---|
155 | | - |
---|
156 | | - if (count > BAR_0_SZ) |
---|
157 | | - return -EINVAL; |
---|
158 | | - |
---|
159 | | - bar0_buf = kzalloc(count, GFP_KERNEL); |
---|
160 | | - if (!bar0_buf) |
---|
| 334 | + data = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL); |
---|
| 335 | + if (!data) |
---|
161 | 336 | return -ENOMEM; |
---|
162 | 337 | |
---|
163 | | - if (copy_from_user(bar0_buf, buf, count)) { |
---|
164 | | - raw_count = -EFAULT; |
---|
165 | | - goto exit; |
---|
| 338 | + if (off > dev->cfg_size) { |
---|
| 339 | + kfree(data); |
---|
| 340 | + return 0; |
---|
| 341 | + } |
---|
| 342 | + if (off + count > dev->cfg_size) { |
---|
| 343 | + size = dev->cfg_size - off; |
---|
| 344 | + count = size; |
---|
166 | 345 | } |
---|
167 | 346 | |
---|
168 | | - for (loop = 0; loop < count / 4; loop++) { |
---|
169 | | - iowrite32(bar0_buf[i], pcie_rkep->bar0 + loop * 4); |
---|
170 | | - i++; |
---|
| 347 | + if (copy_from_user(data, buf, count)) { |
---|
| 348 | + kfree(data); |
---|
| 349 | + return -EFAULT; |
---|
171 | 350 | } |
---|
172 | 351 | |
---|
173 | | -exit: |
---|
174 | | - kfree(bar0_buf); |
---|
| 352 | + if ((off & 1) && size) { |
---|
| 353 | + pci_write_config_byte(dev, off, data[off - init_off]); |
---|
| 354 | + off++; |
---|
| 355 | + size--; |
---|
| 356 | + } |
---|
175 | 357 | |
---|
176 | | - return raw_count; |
---|
| 358 | + if ((off & 3) && size > 2) { |
---|
| 359 | + u16 val = data[off - init_off]; |
---|
| 360 | + |
---|
| 361 | + val |= (u16) data[off - init_off + 1] << 8; |
---|
| 362 | + pci_write_config_word(dev, off, val); |
---|
| 363 | + off += 2; |
---|
| 364 | + size -= 2; |
---|
| 365 | + } |
---|
| 366 | + |
---|
| 367 | + while (size > 3) { |
---|
| 368 | + u32 val = data[off - init_off]; |
---|
| 369 | + |
---|
| 370 | + val |= (u32) data[off - init_off + 1] << 8; |
---|
| 371 | + val |= (u32) data[off - init_off + 2] << 16; |
---|
| 372 | + val |= (u32) data[off - init_off + 3] << 24; |
---|
| 373 | + pci_write_config_dword(dev, off, val); |
---|
| 374 | + off += 4; |
---|
| 375 | + size -= 4; |
---|
| 376 | + } |
---|
| 377 | + |
---|
| 378 | + if (size >= 2) { |
---|
| 379 | + u16 val = data[off - init_off]; |
---|
| 380 | + |
---|
| 381 | + val |= (u16) data[off - init_off + 1] << 8; |
---|
| 382 | + pci_write_config_word(dev, off, val); |
---|
| 383 | + off += 2; |
---|
| 384 | + size -= 2; |
---|
| 385 | + } |
---|
| 386 | + |
---|
| 387 | + if (size) { |
---|
| 388 | + pci_write_config_byte(dev, off, data[off - init_off]); |
---|
| 389 | + off++; |
---|
| 390 | + --size; |
---|
| 391 | + } |
---|
| 392 | + |
---|
| 393 | + kfree(data); |
---|
| 394 | + |
---|
| 395 | + return count; |
---|
177 | 396 | } |
---|
178 | 397 | |
---|
179 | 398 | static ssize_t pcie_rkep_read(struct file *file, char __user *buf, |
---|
180 | 399 | size_t count, loff_t *ppos) |
---|
181 | 400 | { |
---|
182 | | - struct miscdevice *miscdev = file->private_data; |
---|
183 | | - struct pcie_rkep *pcie_rkep = container_of(miscdev, struct pcie_rkep, dev); |
---|
184 | | - u32 *bar0_buf; |
---|
185 | | - int loop, i = 0; |
---|
186 | | - size_t raw_count = count; |
---|
| 401 | + struct pcie_file *pcie_file = file->private_data; |
---|
| 402 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 403 | + struct pci_dev *dev = pcie_rkep->pdev; |
---|
| 404 | + unsigned int size = count; |
---|
| 405 | + loff_t init_off = *ppos, off = *ppos; |
---|
| 406 | + u8 *data; |
---|
187 | 407 | |
---|
188 | | - count = (count % 4) ? (count - count % 4) : count; |
---|
189 | | - |
---|
190 | | - if (count > BAR_0_SZ) |
---|
191 | | - return -EINVAL; |
---|
192 | | - |
---|
193 | | - bar0_buf = kzalloc(count, GFP_ATOMIC); |
---|
194 | | - if (!bar0_buf) |
---|
| 408 | + data = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL); |
---|
| 409 | + if (!data) |
---|
195 | 410 | return -ENOMEM; |
---|
196 | 411 | |
---|
197 | | - for (loop = 0; loop < count / 4; loop++) { |
---|
198 | | - bar0_buf[i] = ioread32(pcie_rkep->bar0 + loop * 4); |
---|
199 | | - i++; |
---|
| 412 | + if (off > dev->cfg_size) { |
---|
| 413 | + kfree(data); |
---|
| 414 | + return 0; |
---|
| 415 | + } |
---|
| 416 | + if (off + count > dev->cfg_size) { |
---|
| 417 | + size = dev->cfg_size - off; |
---|
| 418 | + count = size; |
---|
200 | 419 | } |
---|
201 | 420 | |
---|
202 | | - if (copy_to_user(buf, bar0_buf, count)) { |
---|
203 | | - raw_count = -EFAULT; |
---|
204 | | - goto exit; |
---|
| 421 | + if ((off & 1) && size) { |
---|
| 422 | + u8 val; |
---|
| 423 | + |
---|
| 424 | + pci_read_config_byte(dev, off, &val); |
---|
| 425 | + data[off - init_off] = val; |
---|
| 426 | + off++; |
---|
| 427 | + size--; |
---|
205 | 428 | } |
---|
206 | 429 | |
---|
207 | | -exit: |
---|
208 | | - kfree(bar0_buf); |
---|
| 430 | + if ((off & 3) && size > 2) { |
---|
| 431 | + u16 val; |
---|
209 | 432 | |
---|
210 | | - return raw_count; |
---|
| 433 | + pci_read_config_word(dev, off, &val); |
---|
| 434 | + data[off - init_off] = val & 0xff; |
---|
| 435 | + data[off - init_off + 1] = (val >> 8) & 0xff; |
---|
| 436 | + off += 2; |
---|
| 437 | + size -= 2; |
---|
| 438 | + } |
---|
| 439 | + |
---|
| 440 | + while (size > 3) { |
---|
| 441 | + u32 val; |
---|
| 442 | + |
---|
| 443 | + pci_read_config_dword(dev, off, &val); |
---|
| 444 | + data[off - init_off] = val & 0xff; |
---|
| 445 | + data[off - init_off + 1] = (val >> 8) & 0xff; |
---|
| 446 | + data[off - init_off + 2] = (val >> 16) & 0xff; |
---|
| 447 | + data[off - init_off + 3] = (val >> 24) & 0xff; |
---|
| 448 | + off += 4; |
---|
| 449 | + size -= 4; |
---|
| 450 | + } |
---|
| 451 | + |
---|
| 452 | + if (size >= 2) { |
---|
| 453 | + u16 val; |
---|
| 454 | + |
---|
| 455 | + pci_read_config_word(dev, off, &val); |
---|
| 456 | + data[off - init_off] = val & 0xff; |
---|
| 457 | + data[off - init_off + 1] = (val >> 8) & 0xff; |
---|
| 458 | + off += 2; |
---|
| 459 | + size -= 2; |
---|
| 460 | + } |
---|
| 461 | + |
---|
| 462 | + if (size > 0) { |
---|
| 463 | + u8 val; |
---|
| 464 | + |
---|
| 465 | + pci_read_config_byte(dev, off, &val); |
---|
| 466 | + data[off - init_off] = val; |
---|
| 467 | + off++; |
---|
| 468 | + --size; |
---|
| 469 | + } |
---|
| 470 | + |
---|
| 471 | + if (copy_to_user(buf, data, count)) { |
---|
| 472 | + kfree(data); |
---|
| 473 | + return -EFAULT; |
---|
| 474 | + } |
---|
| 475 | + |
---|
| 476 | + kfree(data); |
---|
| 477 | + |
---|
| 478 | + return count; |
---|
211 | 479 | } |
---|
212 | 480 | |
---|
213 | 481 | static int pcie_rkep_mmap(struct file *file, struct vm_area_struct *vma) |
---|
214 | 482 | { |
---|
215 | 483 | u64 addr; |
---|
216 | | - struct miscdevice *miscdev = file->private_data; |
---|
217 | | - struct pcie_rkep *pcie_rkep = container_of(miscdev, struct pcie_rkep, dev); |
---|
| 484 | + struct pcie_file *pcie_file = file->private_data; |
---|
| 485 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
| 486 | + struct pci_dev *dev = pcie_rkep->pdev; |
---|
218 | 487 | size_t size = vma->vm_end - vma->vm_start; |
---|
| 488 | + resource_size_t bar_size; |
---|
| 489 | + int err; |
---|
219 | 490 | |
---|
220 | | - if (size > RKEP_USER_MEM_SIZE) { |
---|
221 | | - dev_warn(&pcie_rkep->pdev->dev, "mmap size is out of limitation\n"); |
---|
| 491 | + switch (pcie_rkep->cur_mmap_res) { |
---|
| 492 | + case PCIE_EP_MMAP_RESOURCE_RK3568_RC_DBI: |
---|
| 493 | + if (size > PCIE_DBI_SIZE) { |
---|
| 494 | + dev_warn(&pcie_rkep->pdev->dev, "dbi mmap size is out of limitation\n"); |
---|
| 495 | + return -EINVAL; |
---|
| 496 | + } |
---|
| 497 | + addr = PCIE_RK3568_RC_DBI_BASE; |
---|
| 498 | + break; |
---|
| 499 | + case PCIE_EP_MMAP_RESOURCE_RK3588_RC_DBI: |
---|
| 500 | + if (size > PCIE_DBI_SIZE) { |
---|
| 501 | + dev_warn(&pcie_rkep->pdev->dev, "dbi mmap size is out of limitation\n"); |
---|
| 502 | + return -EINVAL; |
---|
| 503 | + } |
---|
| 504 | + addr = PCIE_RK3588_RC_DBI_BASE; |
---|
| 505 | + break; |
---|
| 506 | + case PCIE_EP_MMAP_RESOURCE_BAR0: |
---|
| 507 | + bar_size = pci_resource_len(dev, 0); |
---|
| 508 | + if (size > bar_size) { |
---|
| 509 | + dev_warn(&pcie_rkep->pdev->dev, "bar0 mmap size is out of limitation\n"); |
---|
| 510 | + return -EINVAL; |
---|
| 511 | + } |
---|
| 512 | + addr = pci_resource_start(dev, 0); |
---|
| 513 | + break; |
---|
| 514 | + case PCIE_EP_MMAP_RESOURCE_BAR2: |
---|
| 515 | + bar_size = pci_resource_len(dev, 2); |
---|
| 516 | + if (size > bar_size) { |
---|
| 517 | + dev_warn(&pcie_rkep->pdev->dev, "bar2 mmap size is out of limitation\n"); |
---|
| 518 | + return -EINVAL; |
---|
| 519 | + } |
---|
| 520 | + addr = pci_resource_start(dev, 2); |
---|
| 521 | + break; |
---|
| 522 | + case PCIE_EP_MMAP_RESOURCE_BAR4: |
---|
| 523 | + bar_size = pci_resource_len(dev, 4); |
---|
| 524 | + if (size > bar_size) { |
---|
| 525 | + dev_warn(&pcie_rkep->pdev->dev, "bar4 mmap size is out of limitation\n"); |
---|
| 526 | + return -EINVAL; |
---|
| 527 | + } |
---|
| 528 | + addr = pci_resource_start(dev, 4); |
---|
| 529 | + break; |
---|
| 530 | + case PCIE_EP_MMAP_RESOURCE_USER_MEM: |
---|
| 531 | + if (size > RKEP_USER_MEM_SIZE) { |
---|
| 532 | + dev_warn(&pcie_rkep->pdev->dev, "mmap size is out of limitation\n"); |
---|
| 533 | + return -EINVAL; |
---|
| 534 | + } |
---|
| 535 | + |
---|
| 536 | + if (!pcie_rkep->user_pages) { |
---|
| 537 | + dev_warn(&pcie_rkep->pdev->dev, "user_pages has not been allocated yet\n"); |
---|
| 538 | + return -EINVAL; |
---|
| 539 | + } |
---|
| 540 | + addr = page_to_phys(pcie_rkep->user_pages); |
---|
| 541 | + break; |
---|
| 542 | + default: |
---|
| 543 | + dev_err(&pcie_rkep->pdev->dev, "cur mmap_res %d is unsurreport\n", pcie_rkep->cur_mmap_res); |
---|
222 | 544 | return -EINVAL; |
---|
223 | 545 | } |
---|
224 | 546 | |
---|
225 | | - if (!pcie_rkep->user_pages) { |
---|
226 | | - dev_warn(&pcie_rkep->pdev->dev, "user_pages has not been allocated yet\n"); |
---|
227 | | - return -EINVAL; |
---|
228 | | - } |
---|
229 | | - |
---|
230 | | - addr = page_to_phys(pcie_rkep->user_pages); |
---|
231 | 547 | vma->vm_flags |= VM_IO; |
---|
232 | | - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
---|
| 548 | + vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP); |
---|
233 | 549 | |
---|
234 | | - if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, size, vma->vm_page_prot)) { |
---|
235 | | - dev_err(&pcie_rkep->pdev->dev, "io_remap_pfn_range failed\n"); |
---|
| 550 | + if (pcie_rkep->cur_mmap_res == PCIE_EP_MMAP_RESOURCE_BAR2 || |
---|
| 551 | + pcie_rkep->cur_mmap_res == PCIE_EP_MMAP_RESOURCE_USER_MEM) |
---|
| 552 | + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
---|
| 553 | + else |
---|
| 554 | + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
---|
| 555 | + |
---|
| 556 | + err = remap_pfn_range(vma, vma->vm_start, |
---|
| 557 | + __phys_to_pfn(addr), |
---|
| 558 | + size, vma->vm_page_prot); |
---|
| 559 | + if (err) |
---|
236 | 560 | return -EAGAIN; |
---|
237 | | - } |
---|
238 | 561 | |
---|
239 | 562 | return 0; |
---|
240 | 563 | } |
---|
.. | .. |
---|
242 | 565 | static long pcie_rkep_ioctl(struct file *file, unsigned int cmd, unsigned long args) |
---|
243 | 566 | { |
---|
244 | 567 | void __user *argp; |
---|
245 | | - struct miscdevice *miscdev = file->private_data; |
---|
246 | | - struct pcie_rkep *pcie_rkep = container_of(miscdev, struct pcie_rkep, dev); |
---|
| 568 | + struct pcie_file *pcie_file = file->private_data; |
---|
| 569 | + struct pcie_rkep *pcie_rkep = pcie_file->pcie_rkep; |
---|
247 | 570 | struct pcie_ep_dma_cache_cfg cfg; |
---|
| 571 | + struct pcie_ep_dma_block_req dma; |
---|
248 | 572 | void __user *uarg = (void __user *)args; |
---|
| 573 | + struct pcie_ep_obj_poll_virtual_id_cfg poll_cfg; |
---|
| 574 | + int mmap_res; |
---|
249 | 575 | int ret; |
---|
| 576 | + int index; |
---|
250 | 577 | u64 addr; |
---|
251 | 578 | |
---|
252 | 579 | argp = (void __user *)args; |
---|
.. | .. |
---|
265 | 592 | case PCIE_DMA_CACHE_INVALIDE: |
---|
266 | 593 | ret = copy_from_user(&cfg, uarg, sizeof(cfg)); |
---|
267 | 594 | if (ret) { |
---|
268 | | - dev_err(&pcie_rkep->pdev->dev, "failed to get copy from\n"); |
---|
| 595 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 596 | + "failed to get invalid cfg copy from userspace\n"); |
---|
269 | 597 | return -EFAULT; |
---|
270 | 598 | } |
---|
271 | 599 | dma_sync_single_for_cpu(&pcie_rkep->pdev->dev, cfg.addr, cfg.size, DMA_FROM_DEVICE); |
---|
.. | .. |
---|
273 | 601 | case PCIE_DMA_CACHE_FLUSH: |
---|
274 | 602 | ret = copy_from_user(&cfg, uarg, sizeof(cfg)); |
---|
275 | 603 | if (ret) { |
---|
276 | | - dev_err(&pcie_rkep->pdev->dev, "failed to get copy from\n"); |
---|
| 604 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 605 | + "failed to get flush cfg copy from userspace\n"); |
---|
277 | 606 | return -EFAULT; |
---|
278 | 607 | } |
---|
279 | 608 | dma_sync_single_for_device(&pcie_rkep->pdev->dev, cfg.addr, cfg.size, |
---|
280 | 609 | DMA_TO_DEVICE); |
---|
| 610 | + break; |
---|
| 611 | + case PCIE_EP_DMA_XFER_BLOCK: |
---|
| 612 | + ret = copy_from_user(&dma, uarg, sizeof(dma)); |
---|
| 613 | + if (ret) { |
---|
| 614 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 615 | + "failed to get dma_data copy from userspace\n"); |
---|
| 616 | + return -EFAULT; |
---|
| 617 | + } |
---|
| 618 | + ret = rkep_ep_dma_xfer(pcie_rkep, &dma); |
---|
| 619 | + if (ret) { |
---|
| 620 | + dev_err(&pcie_rkep->pdev->dev, "failed to transfer dma, ret=%d\n", ret); |
---|
| 621 | + return -EFAULT; |
---|
| 622 | + } |
---|
| 623 | + break; |
---|
| 624 | + case PCIE_EP_REQUEST_VIRTUAL_ID: |
---|
| 625 | + index = rkep_ep_request_virtual_id(pcie_file); |
---|
| 626 | + if (index < 0) { |
---|
| 627 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 628 | + "request virtual id failed, ret=%d\n", index); |
---|
| 629 | + |
---|
| 630 | + return -EFAULT; |
---|
| 631 | + } |
---|
| 632 | + if (copy_to_user(argp, &index, sizeof(index))) |
---|
| 633 | + return -EFAULT; |
---|
| 634 | + break; |
---|
| 635 | + case PCIE_EP_RELEASE_VIRTUAL_ID: |
---|
| 636 | + ret = copy_from_user(&index, uarg, sizeof(index)); |
---|
| 637 | + if (ret) { |
---|
| 638 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 639 | + "failed to get release data copy from userspace\n"); |
---|
| 640 | + return -EFAULT; |
---|
| 641 | + } |
---|
| 642 | + ret = rkep_ep_release_virtual_id(pcie_file, index); |
---|
| 643 | + if (ret < 0) { |
---|
| 644 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 645 | + "release virtual id %d failed, ret=%d\n", index, ret); |
---|
| 646 | + |
---|
| 647 | + return -EFAULT; |
---|
| 648 | + } |
---|
| 649 | + break; |
---|
| 650 | + case PCIE_EP_RAISE_IRQ_USER: |
---|
| 651 | + ret = copy_from_user(&index, uarg, sizeof(index)); |
---|
| 652 | + if (ret) { |
---|
| 653 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 654 | + "failed to get raise irq data copy from userspace\n"); |
---|
| 655 | + return -EFAULT; |
---|
| 656 | + } |
---|
| 657 | + |
---|
| 658 | + ret = rkep_ep_raise_irq_user_obj(pcie_file, index); |
---|
| 659 | + if (ret < 0) |
---|
| 660 | + return -EFAULT; |
---|
| 661 | + break; |
---|
| 662 | + case PCIE_EP_POLL_IRQ_USER: |
---|
| 663 | + ret = copy_from_user(&poll_cfg, uarg, sizeof(poll_cfg)); |
---|
| 664 | + if (ret) { |
---|
| 665 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 666 | + "failed to get poll irq data copy from userspace\n"); |
---|
| 667 | + |
---|
| 668 | + return -EFAULT; |
---|
| 669 | + } |
---|
| 670 | + |
---|
| 671 | + ret = rkep_ep_poll_irq_user(pcie_file, &poll_cfg); |
---|
| 672 | + if (ret < 0) |
---|
| 673 | + return -EFAULT; |
---|
| 674 | + |
---|
| 675 | + if (copy_to_user(argp, &poll_cfg, sizeof(poll_cfg))) |
---|
| 676 | + return -EFAULT; |
---|
| 677 | + break; |
---|
| 678 | + case PCIE_EP_RAISE_ELBI: |
---|
| 679 | + ret = copy_from_user(&index, uarg, sizeof(index)); |
---|
| 680 | + if (ret) { |
---|
| 681 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 682 | + "failed to get raise elbi data copy from userspace\n"); |
---|
| 683 | + return -EFAULT; |
---|
| 684 | + } |
---|
| 685 | + ret = rkep_ep_raise_elbi_irq(pcie_file, index); |
---|
| 686 | + if (ret < 0) { |
---|
| 687 | + dev_err(&pcie_rkep->pdev->dev, |
---|
| 688 | + "raise elbi %d failed, ret=%d\n", index, ret); |
---|
| 689 | + |
---|
| 690 | + return -EFAULT; |
---|
| 691 | + } |
---|
| 692 | + break; |
---|
| 693 | + case PCIE_EP_SET_MMAP_RESOURCE: |
---|
| 694 | + ret = copy_from_user(&mmap_res, uarg, sizeof(mmap_res)); |
---|
| 695 | + if (ret) { |
---|
| 696 | + dev_err(&pcie_rkep->pdev->dev, "failed to get copy from\n"); |
---|
| 697 | + return -EFAULT; |
---|
| 698 | + } |
---|
| 699 | + |
---|
| 700 | + if (mmap_res >= PCIE_EP_MMAP_RESOURCE_MAX || mmap_res < 0) { |
---|
| 701 | + dev_err(&pcie_rkep->pdev->dev, "mmap index %d is out of number\n", mmap_res); |
---|
| 702 | + return -EINVAL; |
---|
| 703 | + } |
---|
| 704 | + |
---|
| 705 | + pcie_rkep->cur_mmap_res = mmap_res; |
---|
281 | 706 | break; |
---|
282 | 707 | default: |
---|
283 | 708 | break; |
---|
.. | .. |
---|
293 | 718 | .read = pcie_rkep_read, |
---|
294 | 719 | .unlocked_ioctl = pcie_rkep_ioctl, |
---|
295 | 720 | .mmap = pcie_rkep_mmap, |
---|
296 | | - .fasync = pcie_rkep_fasync, |
---|
297 | 721 | .release = pcie_rkep_release, |
---|
298 | | - .llseek = no_llseek, |
---|
| 722 | + .llseek = default_llseek, |
---|
299 | 723 | }; |
---|
300 | 724 | |
---|
301 | 725 | static inline void pcie_rkep_writel_dbi(struct pcie_rkep *pcie_rkep, u32 reg, u32 val) |
---|
.. | .. |
---|
308 | 732 | return readl(pcie_rkep->bar4 + reg); |
---|
309 | 733 | } |
---|
310 | 734 | |
---|
| 735 | +static void pcie_rkep_dma_debug(struct dma_trx_obj *obj, struct dma_table *table) |
---|
| 736 | +{ |
---|
| 737 | + struct pci_dev *pdev = container_of(obj->dev, struct pci_dev, dev); |
---|
| 738 | + struct pcie_rkep *pcie_rkep = pci_get_drvdata(pdev); |
---|
| 739 | + unsigned int ctr_off = PCIE_DMA_OFFSET + table->chn * 0x200; |
---|
| 740 | + |
---|
| 741 | + dev_err(&pdev->dev, "chnl=%x\n", table->start.chnl); |
---|
| 742 | + dev_err(&pdev->dev, "%s\n", table->dir == DMA_FROM_BUS ? "udma read" : "udma write"); |
---|
| 743 | + if (table->dma_mode == RK_PCIE_DMA_BLOCK) { |
---|
| 744 | + dev_err(&pdev->dev, "src=0x%x %x\n", table->ctx_reg.sarptrhi, table->ctx_reg.sarptrlo); |
---|
| 745 | + dev_err(&pdev->dev, "dst=0x%x %x\n", table->ctx_reg.darptrhi, table->ctx_reg.darptrlo); |
---|
| 746 | + } else { |
---|
| 747 | + dev_err(&pdev->dev, "phys_descs=0x%llx\n", table->phys_descs); |
---|
| 748 | + } |
---|
| 749 | + dev_err(&pdev->dev, "xfersize=%x\n", table->ctx_reg.xfersize); |
---|
| 750 | + |
---|
| 751 | + if (table->dir == DMA_FROM_BUS) { |
---|
| 752 | + if (table->dma_mode == RK_PCIE_DMA_BLOCK) { |
---|
| 753 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_INT_MASK = %x\n", PCIE_DMA_RD_INT_MASK, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK)); |
---|
| 754 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_ENB = %x\n", PCIE_DMA_RD_ENB, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB)); |
---|
| 755 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_CTRL_LO = %x\n", ctr_off + PCIE_DMA_RD_CTRL_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_LO)); |
---|
| 756 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_CTRL_HI = %x\n", ctr_off + PCIE_DMA_RD_CTRL_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_HI)); |
---|
| 757 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_XFERSIZE = %x\n", ctr_off + PCIE_DMA_RD_XFERSIZE, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_XFERSIZE)); |
---|
| 758 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_SAR_PTR_LO = %x\n", ctr_off + PCIE_DMA_RD_SAR_PTR_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_SAR_PTR_LO)); |
---|
| 759 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_SAR_PTR_HI = %x\n", ctr_off + PCIE_DMA_RD_SAR_PTR_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_SAR_PTR_HI)); |
---|
| 760 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_DAR_PTR_LO = %x\n", ctr_off + PCIE_DMA_RD_DAR_PTR_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_DAR_PTR_LO)); |
---|
| 761 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_DAR_PTR_HI = %x\n", ctr_off + PCIE_DMA_RD_DAR_PTR_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_DAR_PTR_HI)); |
---|
| 762 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_DOORBELL = %x\n", PCIE_DMA_RD_DOORBELL, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL)); |
---|
| 763 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_INT_STATUS = %x\n", PCIE_DMA_RD_INT_STATUS, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS)); |
---|
| 764 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_ERR_STATUS_LOW = %x\n", PCIE_DMA_RD_ERR_STATUS_LOW, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ERR_STATUS_LOW)); |
---|
| 765 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_ERR_STATUS_HIGH = %x\n", PCIE_DMA_RD_ERR_STATUS_HIGH, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ERR_STATUS_HIGH)); |
---|
| 766 | + } else { |
---|
| 767 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_INT_MASK = %x\n", PCIE_DMA_RD_INT_MASK, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK)); |
---|
| 768 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_ENB = %x\n", PCIE_DMA_RD_ENB, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB)); |
---|
| 769 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_CTRL_LO = %x\n", ctr_off + PCIE_DMA_RD_CTRL_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_LO)); |
---|
| 770 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_CTRL_HI = %x\n", ctr_off + PCIE_DMA_RD_CTRL_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_HI)); |
---|
| 771 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_LL_PTR_LO = %x\n", ctr_off + PCIE_DMA_RD_LL_PTR_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_LL_PTR_LO)); |
---|
| 772 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_LL_PTR_HI = %x\n", ctr_off + PCIE_DMA_RD_LL_PTR_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_LL_PTR_HI)); |
---|
| 773 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_DOORBELL = %x\n", PCIE_DMA_RD_DOORBELL, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL)); |
---|
| 774 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_ERR_STATUS_LOW = %x\n", PCIE_DMA_RD_ERR_STATUS_LOW, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ERR_STATUS_LOW)); |
---|
| 775 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_RD_ERR_STATUS_HIGH = %x\n", PCIE_DMA_RD_ERR_STATUS_HIGH, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ERR_STATUS_HIGH)); |
---|
| 776 | + } |
---|
| 777 | + } else { |
---|
| 778 | + if (table->dma_mode == RK_PCIE_DMA_BLOCK) { |
---|
| 779 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_INT_MASK = %x\n", PCIE_DMA_WR_INT_MASK, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK)); |
---|
| 780 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_ENB = %x\n", PCIE_DMA_WR_ENB, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB)); |
---|
| 781 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_CTRL_LO = %x\n", ctr_off + PCIE_DMA_WR_CTRL_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_LO)); |
---|
| 782 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_CTRL_HI = %x\n", ctr_off + PCIE_DMA_WR_CTRL_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_HI)); |
---|
| 783 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_XFERSIZE = %x\n", ctr_off + PCIE_DMA_WR_XFERSIZE, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_XFERSIZE)); |
---|
| 784 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_SAR_PTR_LO = %x\n", ctr_off + PCIE_DMA_WR_SAR_PTR_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_SAR_PTR_LO)); |
---|
| 785 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_SAR_PTR_HI = %x\n", ctr_off + PCIE_DMA_WR_SAR_PTR_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_SAR_PTR_HI)); |
---|
| 786 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_DAR_PTR_LO = %x\n", ctr_off + PCIE_DMA_WR_DAR_PTR_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_DAR_PTR_LO)); |
---|
| 787 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_DAR_PTR_HI = %x\n", ctr_off + PCIE_DMA_WR_DAR_PTR_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_DAR_PTR_HI)); |
---|
| 788 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_DOORBELL = %x\n", PCIE_DMA_WR_DOORBELL, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL)); |
---|
| 789 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_INT_STATUS = %x\n", PCIE_DMA_WR_INT_STATUS, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS)); |
---|
| 790 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_ERR_STATUS = %x\n", PCIE_DMA_WR_ERR_STATUS, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_ERR_STATUS)); |
---|
| 791 | + } else { |
---|
| 792 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_INT_MASK = %x\n", PCIE_DMA_WR_INT_MASK, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK)); |
---|
| 793 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_ENB = %x\n", PCIE_DMA_WR_ENB, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB)); |
---|
| 794 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_CTRL_LO = %x\n", ctr_off + PCIE_DMA_WR_CTRL_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_LO)); |
---|
| 795 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_CTRL_HI = %x\n", ctr_off + PCIE_DMA_WR_CTRL_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_HI)); |
---|
| 796 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_LL_PTR_LO = %x\n", ctr_off + PCIE_DMA_WR_LL_PTR_LO, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_LL_PTR_LO)); |
---|
| 797 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_LL_PTR_HI = %x\n", ctr_off + PCIE_DMA_WR_LL_PTR_HI, pcie_rkep_readl_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_LL_PTR_HI)); |
---|
| 798 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_DOORBELL = %x\n", PCIE_DMA_WR_DOORBELL, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL)); |
---|
| 799 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_INT_STATUS = %x\n", PCIE_DMA_WR_INT_STATUS, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS)); |
---|
| 800 | + dev_err(&pdev->dev, "reg[0x%x] PCIE_DMA_WR_ERR_STATUS = %x\n", PCIE_DMA_WR_ERR_STATUS, pcie_rkep_readl_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_ERR_STATUS)); |
---|
| 801 | + } |
---|
| 802 | + } |
---|
| 803 | +} |
---|
| 804 | + |
---|
311 | 805 | static void pcie_rkep_start_dma_rd(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off) |
---|
312 | 806 | { |
---|
313 | 807 | struct pci_dev *pdev = container_of(obj->dev, struct pci_dev, dev); |
---|
314 | 808 | struct pcie_rkep *pcie_rkep = pci_get_drvdata(pdev); |
---|
315 | 809 | |
---|
316 | | - pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB, |
---|
317 | | - cur->enb.asdword); |
---|
318 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_LO, |
---|
319 | | - cur->ctx_reg.ctrllo.asdword); |
---|
320 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_HI, |
---|
321 | | - cur->ctx_reg.ctrlhi.asdword); |
---|
322 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_XFERSIZE, |
---|
323 | | - cur->ctx_reg.xfersize); |
---|
324 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_SAR_PTR_LO, |
---|
325 | | - cur->ctx_reg.sarptrlo); |
---|
326 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_SAR_PTR_HI, |
---|
327 | | - cur->ctx_reg.sarptrhi); |
---|
328 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_DAR_PTR_LO, |
---|
329 | | - cur->ctx_reg.darptrlo); |
---|
330 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_DAR_PTR_HI, |
---|
331 | | - cur->ctx_reg.darptrhi); |
---|
332 | | - pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL, |
---|
333 | | - cur->start.asdword); |
---|
| 810 | + if (cur->dma_mode == RK_PCIE_DMA_BLOCK) { |
---|
| 811 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB, |
---|
| 812 | + cur->enb.asdword); |
---|
| 813 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_LO, |
---|
| 814 | + cur->ctx_reg.ctrllo.asdword); |
---|
| 815 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_HI, |
---|
| 816 | + cur->ctx_reg.ctrlhi.asdword); |
---|
| 817 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_XFERSIZE, |
---|
| 818 | + cur->ctx_reg.xfersize); |
---|
| 819 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_SAR_PTR_LO, |
---|
| 820 | + cur->ctx_reg.sarptrlo); |
---|
| 821 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_SAR_PTR_HI, |
---|
| 822 | + cur->ctx_reg.sarptrhi); |
---|
| 823 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_DAR_PTR_LO, |
---|
| 824 | + cur->ctx_reg.darptrlo); |
---|
| 825 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_DAR_PTR_HI, |
---|
| 826 | + cur->ctx_reg.darptrhi); |
---|
| 827 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL, |
---|
| 828 | + cur->start.asdword); |
---|
| 829 | + } else { |
---|
| 830 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB, |
---|
| 831 | + cur->enb.asdword); |
---|
| 832 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_LO, |
---|
| 833 | + cur->ctx_reg.ctrllo.asdword); |
---|
| 834 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_CTRL_HI, |
---|
| 835 | + cur->ctx_reg.ctrlhi.asdword); |
---|
| 836 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_LL_PTR_LO, |
---|
| 837 | + lower_32_bits(cur->phys_descs)); |
---|
| 838 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_RD_LL_PTR_HI, |
---|
| 839 | + upper_32_bits(cur->phys_descs)); |
---|
| 840 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL, |
---|
| 841 | + cur->start.asdword); |
---|
| 842 | + } |
---|
| 843 | + /* pcie_rkep_dma_debug(obj, cur); */ |
---|
334 | 844 | } |
---|
335 | 845 | |
---|
336 | 846 | static void pcie_rkep_start_dma_wr(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off) |
---|
.. | .. |
---|
338 | 848 | struct pci_dev *pdev = container_of(obj->dev, struct pci_dev, dev); |
---|
339 | 849 | struct pcie_rkep *pcie_rkep = pci_get_drvdata(pdev); |
---|
340 | 850 | |
---|
341 | | - pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB, |
---|
342 | | - cur->enb.asdword); |
---|
343 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_LO, |
---|
344 | | - cur->ctx_reg.ctrllo.asdword); |
---|
345 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_HI, |
---|
346 | | - cur->ctx_reg.ctrlhi.asdword); |
---|
347 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_XFERSIZE, |
---|
348 | | - cur->ctx_reg.xfersize); |
---|
349 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_SAR_PTR_LO, |
---|
350 | | - cur->ctx_reg.sarptrlo); |
---|
351 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_SAR_PTR_HI, |
---|
352 | | - cur->ctx_reg.sarptrhi); |
---|
353 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_DAR_PTR_LO, |
---|
354 | | - cur->ctx_reg.darptrlo); |
---|
355 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_DAR_PTR_HI, |
---|
356 | | - cur->ctx_reg.darptrhi); |
---|
357 | | - pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_WEILO, |
---|
358 | | - cur->weilo.asdword); |
---|
359 | | - pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL, |
---|
360 | | - cur->start.asdword); |
---|
| 851 | + if (cur->dma_mode == RK_PCIE_DMA_BLOCK) { |
---|
| 852 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB, |
---|
| 853 | + cur->enb.asdword); |
---|
| 854 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_LO, |
---|
| 855 | + cur->ctx_reg.ctrllo.asdword); |
---|
| 856 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_HI, |
---|
| 857 | + cur->ctx_reg.ctrlhi.asdword); |
---|
| 858 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_XFERSIZE, |
---|
| 859 | + cur->ctx_reg.xfersize); |
---|
| 860 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_SAR_PTR_LO, |
---|
| 861 | + cur->ctx_reg.sarptrlo); |
---|
| 862 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_SAR_PTR_HI, |
---|
| 863 | + cur->ctx_reg.sarptrhi); |
---|
| 864 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_DAR_PTR_LO, |
---|
| 865 | + cur->ctx_reg.darptrlo); |
---|
| 866 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_DAR_PTR_HI, |
---|
| 867 | + cur->ctx_reg.darptrhi); |
---|
| 868 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_WEILO, |
---|
| 869 | + cur->weilo.asdword); |
---|
| 870 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL, |
---|
| 871 | + cur->start.asdword); |
---|
| 872 | + } else { |
---|
| 873 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB, |
---|
| 874 | + cur->enb.asdword); |
---|
| 875 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_LO, |
---|
| 876 | + cur->ctx_reg.ctrllo.asdword); |
---|
| 877 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_CTRL_HI, |
---|
| 878 | + cur->ctx_reg.ctrlhi.asdword); |
---|
| 879 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_LL_PTR_LO, |
---|
| 880 | + lower_32_bits(cur->phys_descs)); |
---|
| 881 | + pcie_rkep_writel_dbi(pcie_rkep, ctr_off + PCIE_DMA_WR_LL_PTR_HI, |
---|
| 882 | + upper_32_bits(cur->phys_descs)); |
---|
| 883 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL, |
---|
| 884 | + cur->start.asdword); |
---|
| 885 | + } |
---|
| 886 | + /* pcie_rkep_dma_debug(obj, cur); */ |
---|
361 | 887 | } |
---|
362 | 888 | |
---|
363 | 889 | static void pcie_rkep_start_dma_dwc(struct dma_trx_obj *obj, struct dma_table *table) |
---|
.. | .. |
---|
375 | 901 | |
---|
376 | 902 | static void pcie_rkep_config_dma_dwc(struct dma_table *table) |
---|
377 | 903 | { |
---|
378 | | - table->enb.enb = 0x1; |
---|
379 | | - table->ctx_reg.ctrllo.lie = 0x1; |
---|
380 | | - table->ctx_reg.ctrllo.rie = 0x0; |
---|
381 | | - table->ctx_reg.ctrllo.td = 0x1; |
---|
382 | | - table->ctx_reg.ctrlhi.asdword = 0x0; |
---|
383 | | - table->ctx_reg.xfersize = table->buf_size; |
---|
384 | | - if (table->dir == DMA_FROM_BUS) { |
---|
385 | | - table->ctx_reg.sarptrlo = (u32)(table->bus & 0xffffffff); |
---|
386 | | - table->ctx_reg.sarptrhi = (u32)(table->bus >> 32); |
---|
387 | | - table->ctx_reg.darptrlo = (u32)(table->local & 0xffffffff); |
---|
388 | | - table->ctx_reg.darptrhi = (u32)(table->local >> 32); |
---|
389 | | - } else if (table->dir == DMA_TO_BUS) { |
---|
390 | | - table->ctx_reg.sarptrlo = (u32)(table->local & 0xffffffff); |
---|
391 | | - table->ctx_reg.sarptrhi = (u32)(table->local >> 32); |
---|
392 | | - table->ctx_reg.darptrlo = (u32)(table->bus & 0xffffffff); |
---|
393 | | - table->ctx_reg.darptrhi = (u32)(table->bus >> 32); |
---|
| 904 | + if (table->dma_mode == RK_PCIE_DMA_BLOCK) { |
---|
| 905 | + table->enb.enb = 0x1; |
---|
| 906 | + table->ctx_reg.ctrllo.lie = 0x1; |
---|
| 907 | + table->ctx_reg.ctrllo.rie = 0x0; |
---|
| 908 | + table->ctx_reg.ctrllo.td = 0x1; |
---|
| 909 | + table->ctx_reg.ctrlhi.asdword = 0x0; |
---|
| 910 | + table->ctx_reg.xfersize = table->buf_size; |
---|
| 911 | + if (table->dir == DMA_FROM_BUS) { |
---|
| 912 | + table->ctx_reg.sarptrlo = (u32)(table->bus & 0xffffffff); |
---|
| 913 | + table->ctx_reg.sarptrhi = (u32)(table->bus >> 32); |
---|
| 914 | + table->ctx_reg.darptrlo = (u32)(table->local & 0xffffffff); |
---|
| 915 | + table->ctx_reg.darptrhi = (u32)(table->local >> 32); |
---|
| 916 | + } else if (table->dir == DMA_TO_BUS) { |
---|
| 917 | + table->ctx_reg.sarptrlo = (u32)(table->local & 0xffffffff); |
---|
| 918 | + table->ctx_reg.sarptrhi = (u32)(table->local >> 32); |
---|
| 919 | + table->ctx_reg.darptrlo = (u32)(table->bus & 0xffffffff); |
---|
| 920 | + table->ctx_reg.darptrhi = (u32)(table->bus >> 32); |
---|
| 921 | + } |
---|
| 922 | + table->weilo.weight0 = 0x0; |
---|
| 923 | + table->start.stop = 0x0; |
---|
| 924 | + table->start.chnl = table->chn; |
---|
| 925 | + } else { |
---|
| 926 | + table->enb.enb = 0x1; |
---|
| 927 | + table->ctx_reg.ctrllo.lie = 0x1; |
---|
| 928 | + table->ctx_reg.ctrllo.rie = 0x0; |
---|
| 929 | + table->ctx_reg.ctrllo.ccs = 1; |
---|
| 930 | + table->ctx_reg.ctrllo.llen = 1; |
---|
| 931 | + table->ctx_reg.ctrlhi.asdword = 0x0; |
---|
| 932 | + table->start.chnl = table->chn; |
---|
394 | 933 | } |
---|
395 | | - table->weilo.weight0 = 0x0; |
---|
396 | | - table->start.stop = 0x0; |
---|
397 | | - table->start.chnl = table->chn; |
---|
398 | 934 | } |
---|
399 | 935 | |
---|
400 | 936 | static int pcie_rkep_get_dma_status(struct dma_trx_obj *obj, u8 chn, enum dma_dir dir) |
---|
.. | .. |
---|
420 | 956 | } |
---|
421 | 957 | |
---|
422 | 958 | if (status.abortsta & BIT(chn)) { |
---|
423 | | - dev_err(&pdev->dev, "%s, write abort\n", __func__); |
---|
| 959 | + dev_err(&pdev->dev, "%s, write abort %x\n", __func__, status.asdword); |
---|
424 | 960 | clears.abortclr = BIT(chn); |
---|
425 | 961 | pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_CLEAR, |
---|
426 | 962 | clears.asdword); |
---|
.. | .. |
---|
455 | 991 | u32 irq_type; |
---|
456 | 992 | u32 chn; |
---|
457 | 993 | union int_clear clears; |
---|
| 994 | + u32 reg; |
---|
458 | 995 | |
---|
459 | | - kill_fasync(&pcie_rkep->async, SIGIO, POLL_IN); |
---|
460 | 996 | irq_type = pcie_rkep->obj_info->irq_type_rc; |
---|
461 | 997 | if (irq_type == OBJ_IRQ_DMA) { |
---|
462 | 998 | /* DMA helper */ |
---|
.. | .. |
---|
499 | 1035 | } |
---|
500 | 1036 | } |
---|
501 | 1037 | } |
---|
| 1038 | + } else if (irq_type == OBJ_IRQ_USER) { |
---|
| 1039 | + reg = pcie_rkep->obj_info->irq_user_data_rc; |
---|
| 1040 | + if (reg < RKEP_EP_VIRTUAL_ID_MAX) { |
---|
| 1041 | + set_bit(reg, pcie_rkep->virtual_id_irq_bitmap); |
---|
| 1042 | + wake_up_interruptible(&pcie_rkep->wq_head); |
---|
| 1043 | + } |
---|
502 | 1044 | } |
---|
503 | 1045 | |
---|
504 | 1046 | return 0; |
---|
505 | 1047 | } |
---|
506 | 1048 | |
---|
507 | | -static int __maybe_unused rockchip_pcie_raise_elbi_irq(struct pcie_rkep *pcie_rkep, |
---|
508 | | - u8 interrupt_num) |
---|
509 | | -{ |
---|
510 | | - u32 index, off; |
---|
511 | | - |
---|
512 | | - if (interrupt_num >= (PCIE_ELBI_REG_NUM * 16)) { |
---|
513 | | - dev_err(&pcie_rkep->pdev->dev, "elbi int num out of max count\n"); |
---|
514 | | - return -EINVAL; |
---|
515 | | - } |
---|
516 | | - |
---|
517 | | - index = interrupt_num / 16; |
---|
518 | | - off = interrupt_num % 16; |
---|
519 | | - return pci_write_config_dword(pcie_rkep->pdev, PCIE_CFG_ELBI_APP_OFFSET + 4 * index, |
---|
520 | | - (1 << (off + 16)) | (1 << off)); |
---|
521 | | -} |
---|
522 | | - |
---|
523 | 1049 | static irqreturn_t pcie_rkep_pcie_interrupt(int irq, void *context) |
---|
524 | 1050 | { |
---|
525 | | - struct pcie_rkep_msix_context *ctx = context; |
---|
| 1051 | + struct pcie_rkep_irq_context *ctx = context; |
---|
526 | 1052 | struct pci_dev *pdev = ctx->dev; |
---|
527 | 1053 | struct pcie_rkep *pcie_rkep = pci_get_drvdata(pdev); |
---|
528 | 1054 | |
---|
529 | 1055 | if (!pcie_rkep) |
---|
530 | 1056 | return IRQ_HANDLED; |
---|
531 | 1057 | |
---|
532 | | - if (pcie_rkep->msix_enable) |
---|
533 | | - dev_info(&pdev->dev, "MSI-X is triggered for 0x%x\n", ctx->msg_id); |
---|
534 | | - |
---|
535 | | - else /* pcie_rkep->msi_enable */ { |
---|
536 | | - /* |
---|
537 | | - * The msi 0 is the dedicated interrupt for obj to issue remote rc device. |
---|
538 | | - */ |
---|
539 | | - if (irq == pci_irq_vector(pcie_rkep->pdev, PCIe_CLIENT_MSI_IRQ_OBJ)) |
---|
540 | | - pcie_rkep_obj_handler(pcie_rkep, pdev); |
---|
541 | | - } |
---|
| 1058 | + /* |
---|
| 1059 | + * The irq 0 is the dedicated interrupt for obj to issue remote rc device. |
---|
| 1060 | + */ |
---|
| 1061 | + if (irq == pci_irq_vector(pcie_rkep->pdev, PCIe_CLIENT_MSI_IRQ_OBJ)) |
---|
| 1062 | + pcie_rkep_obj_handler(pcie_rkep, pdev); |
---|
542 | 1063 | |
---|
543 | 1064 | return IRQ_HANDLED; |
---|
544 | 1065 | } |
---|
545 | 1066 | |
---|
546 | | -static int __maybe_unused pcie_rkep_request_msi_irq(struct pcie_rkep *pcie_rkep) |
---|
| 1067 | +static void pcie_rkep_release_irq(struct pcie_rkep *pcie_rkep) |
---|
547 | 1068 | { |
---|
548 | | - int nvec, ret = -EINVAL, i, j; |
---|
| 1069 | + int i; |
---|
| 1070 | + |
---|
| 1071 | + if (pcie_rkep->irq_valid) { |
---|
| 1072 | + for (i = 0; i < pcie_rkep->irq_valid; i++) |
---|
| 1073 | + pci_free_irq(pcie_rkep->pdev, i, &pcie_rkep->irq_ctx[i]); |
---|
| 1074 | + |
---|
| 1075 | + pci_free_irq_vectors(pcie_rkep->pdev); |
---|
| 1076 | + } |
---|
| 1077 | + pcie_rkep->irq_valid = 0; |
---|
| 1078 | +} |
---|
| 1079 | + |
---|
| 1080 | +static int pcie_rkep_request_irq(struct pcie_rkep *pcie_rkep, u32 irq_type) |
---|
| 1081 | +{ |
---|
| 1082 | + int nvec, ret = -EINVAL, i; |
---|
549 | 1083 | |
---|
550 | 1084 | /* Using msi as default */ |
---|
551 | | - nvec = pci_alloc_irq_vectors(pcie_rkep->pdev, 1, RKEP_NUM_MSI_VECTORS, PCI_IRQ_MSI); |
---|
| 1085 | + nvec = pci_alloc_irq_vectors(pcie_rkep->pdev, 1, RKEP_NUM_IRQ_VECTORS, irq_type); |
---|
552 | 1086 | if (nvec < 0) |
---|
553 | 1087 | return nvec; |
---|
554 | 1088 | |
---|
555 | | - if (nvec != RKEP_NUM_MSI_VECTORS) |
---|
556 | | - dev_err(&pcie_rkep->pdev->dev, "only allocate %d msi interrupt\n", nvec); |
---|
| 1089 | + if (nvec != RKEP_NUM_IRQ_VECTORS) |
---|
| 1090 | + dev_err(&pcie_rkep->pdev->dev, "only allocate %d irq interrupt, irq_type=%d\n", nvec, irq_type); |
---|
557 | 1091 | |
---|
| 1092 | + pcie_rkep->irq_valid = 0; |
---|
558 | 1093 | for (i = 0; i < nvec; i++) { |
---|
559 | | - pcie_rkep->msi_ctx[i].dev = pcie_rkep->pdev; |
---|
560 | | - pcie_rkep->msi_ctx[i].msg_id = i; |
---|
561 | | - pcie_rkep->msi_ctx[i].name = |
---|
562 | | - devm_kzalloc(&pcie_rkep->pdev->dev, RKEP_NUM_MSIX_VECTORS, GFP_KERNEL); |
---|
563 | | - sprintf(pcie_rkep->msi_ctx[i].name, "%s-%d\n", pcie_rkep->dev.name, i); |
---|
564 | | - ret = request_irq(pci_irq_vector(pcie_rkep->pdev, i), |
---|
565 | | - pcie_rkep_pcie_interrupt, IRQF_SHARED, |
---|
566 | | - pcie_rkep->msi_ctx[i].name, &pcie_rkep->msi_ctx[i]); |
---|
| 1094 | + pcie_rkep->irq_ctx[i].dev = pcie_rkep->pdev; |
---|
| 1095 | + pcie_rkep->irq_ctx[i].msg_id = i; |
---|
| 1096 | + ret = pci_request_irq(pcie_rkep->pdev, i, |
---|
| 1097 | + pcie_rkep_pcie_interrupt, NULL, |
---|
| 1098 | + &pcie_rkep->irq_ctx[i], "%s-%d", pcie_rkep->dev.name, i); |
---|
567 | 1099 | if (ret) |
---|
568 | 1100 | break; |
---|
| 1101 | + pcie_rkep->irq_valid++; |
---|
569 | 1102 | } |
---|
570 | 1103 | |
---|
571 | 1104 | if (ret) { |
---|
572 | | - for (j = 0; j < i; j++) |
---|
573 | | - free_irq(pci_irq_vector(pcie_rkep->pdev, j), &pcie_rkep->msi_ctx[j]); |
---|
574 | | - pci_disable_msi(pcie_rkep->pdev); |
---|
| 1105 | + pcie_rkep_release_irq(pcie_rkep); |
---|
575 | 1106 | dev_err(&pcie_rkep->pdev->dev, "fail to allocate msi interrupt\n"); |
---|
576 | 1107 | } else { |
---|
577 | | - pcie_rkep->msi_enable = true; |
---|
578 | 1108 | dev_err(&pcie_rkep->pdev->dev, "success to request msi irq\n"); |
---|
579 | | - } |
---|
580 | | - |
---|
581 | | - return ret; |
---|
582 | | -} |
---|
583 | | - |
---|
584 | | -static int __maybe_unused pcie_rkep_request_msix_irq(struct pcie_rkep *pcie_rkep) |
---|
585 | | -{ |
---|
586 | | - int ret, i, j; |
---|
587 | | - |
---|
588 | | - for (i = 0; i < RKEP_NUM_MSIX_VECTORS; i++) |
---|
589 | | - pcie_rkep->msix_entries[i].entry = i; |
---|
590 | | - |
---|
591 | | - ret = pci_enable_msix_exact(pcie_rkep->pdev, pcie_rkep->msix_entries, |
---|
592 | | - RKEP_NUM_MSIX_VECTORS); |
---|
593 | | - if (ret) |
---|
594 | | - return ret; |
---|
595 | | - |
---|
596 | | - for (i = 0; i < RKEP_NUM_MSIX_VECTORS; i++) { |
---|
597 | | - pcie_rkep->msix_ctx[i].dev = pcie_rkep->pdev; |
---|
598 | | - pcie_rkep->msix_ctx[i].msg_id = i; |
---|
599 | | - pcie_rkep->msix_ctx[i].name = |
---|
600 | | - devm_kzalloc(&pcie_rkep->pdev->dev, RKEP_NUM_MSIX_VECTORS, GFP_KERNEL); |
---|
601 | | - sprintf(pcie_rkep->msix_ctx[i].name, "%s-%d\n", pcie_rkep->dev.name, i); |
---|
602 | | - ret = request_irq(pcie_rkep->msix_entries[i].vector, |
---|
603 | | - pcie_rkep_pcie_interrupt, 0, pcie_rkep->msix_ctx[i].name, |
---|
604 | | - &pcie_rkep->msix_ctx[i]); |
---|
605 | | - |
---|
606 | | - if (ret) |
---|
607 | | - break; |
---|
608 | | - } |
---|
609 | | - |
---|
610 | | - if (ret) { |
---|
611 | | - for (j = 0; j < i; j++) |
---|
612 | | - free_irq(pcie_rkep->msix_entries[j].vector, &pcie_rkep->msix_ctx[j]); |
---|
613 | | - pci_disable_msix(pcie_rkep->pdev); |
---|
614 | | - dev_err(&pcie_rkep->pdev->dev, "fail to allocate msi-x interrupt\n"); |
---|
615 | | - } else { |
---|
616 | | - pcie_rkep->msix_enable = true; |
---|
617 | | - dev_err(&pcie_rkep->pdev->dev, "success to request msi-x irq\n"); |
---|
618 | 1109 | } |
---|
619 | 1110 | |
---|
620 | 1111 | return ret; |
---|
.. | .. |
---|
637 | 1128 | dev_info(dev, "%s file %s size %lld to %p\n", __func__, path, size, bar + pos); |
---|
638 | 1129 | |
---|
639 | 1130 | offset = 0; |
---|
640 | | - kernel_read(p_file, bar + pos, size, &offset); |
---|
| 1131 | + kernel_read(p_file, (void *)bar + pos, (size_t)size, (loff_t *)&offset); |
---|
641 | 1132 | |
---|
642 | 1133 | dev_info(dev, "kernel_read size %lld from %s to %p\n", size, path, bar + pos); |
---|
643 | 1134 | |
---|
.. | .. |
---|
670 | 1161 | |
---|
671 | 1162 | static int pcie_rkep_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
---|
672 | 1163 | { |
---|
673 | | - int ret, i; |
---|
| 1164 | + int ret; |
---|
674 | 1165 | struct pcie_rkep *pcie_rkep; |
---|
675 | 1166 | u8 *name; |
---|
676 | 1167 | u16 val; |
---|
| 1168 | + bool dmatest_irq = false; |
---|
677 | 1169 | |
---|
678 | 1170 | pcie_rkep = devm_kzalloc(&pdev->dev, sizeof(*pcie_rkep), GFP_KERNEL); |
---|
679 | 1171 | if (!pcie_rkep) |
---|
680 | 1172 | return -ENOMEM; |
---|
| 1173 | + |
---|
| 1174 | + name = devm_kzalloc(&pdev->dev, MISC_DEV_NAME_MAX_LENGTH, GFP_KERNEL); |
---|
| 1175 | + if (!name) |
---|
| 1176 | + return -ENOMEM; |
---|
| 1177 | + |
---|
| 1178 | + set_bit(0, pcie_rkep->virtual_id_bitmap); |
---|
681 | 1179 | |
---|
682 | 1180 | ret = pci_enable_device(pdev); |
---|
683 | 1181 | if (ret) { |
---|
.. | .. |
---|
717 | 1215 | |
---|
718 | 1216 | dev_dbg(&pdev->dev, "get bar4 address is %p\n", pcie_rkep->bar4); |
---|
719 | 1217 | |
---|
720 | | - name = devm_kzalloc(&pdev->dev, MISC_DEV_NAME_MAX_LENGTH, GFP_KERNEL); |
---|
721 | | - if (!name) { |
---|
722 | | - ret = -ENOMEM; |
---|
723 | | - goto err_pci_iomap; |
---|
724 | | - } |
---|
725 | 1218 | sprintf(name, "%s-%s", DRV_NAME, dev_name(&pdev->dev)); |
---|
726 | 1219 | pcie_rkep->dev.minor = MISC_DYNAMIC_MINOR; |
---|
727 | 1220 | pcie_rkep->dev.name = name; |
---|
728 | 1221 | pcie_rkep->dev.fops = &pcie_rkep_fops; |
---|
729 | 1222 | pcie_rkep->dev.parent = NULL; |
---|
| 1223 | + |
---|
| 1224 | + mutex_init(&pcie_rkep->dev_lock_mutex); |
---|
730 | 1225 | |
---|
731 | 1226 | ret = misc_register(&pcie_rkep->dev); |
---|
732 | 1227 | if (ret) { |
---|
.. | .. |
---|
738 | 1233 | |
---|
739 | 1234 | pci_set_drvdata(pdev, pcie_rkep); |
---|
740 | 1235 | |
---|
741 | | - ret = pcie_rkep_request_msi_irq(pcie_rkep); |
---|
| 1236 | + init_waitqueue_head(&pcie_rkep->wq_head); |
---|
| 1237 | + ret = pcie_rkep_request_irq(pcie_rkep, PCI_IRQ_MSI); |
---|
742 | 1238 | if (ret) |
---|
743 | 1239 | goto err_register_irq; |
---|
744 | 1240 | |
---|
745 | | - pcie_rkep->dma_obj = pcie_dw_dmatest_register(&pdev->dev, true); |
---|
| 1241 | + pcie_rkep->dma_obj = pcie_dw_dmatest_register(&pdev->dev, dmatest_irq); |
---|
746 | 1242 | if (IS_ERR(pcie_rkep->dma_obj)) { |
---|
747 | 1243 | dev_err(&pcie_rkep->pdev->dev, "failed to prepare dmatest\n"); |
---|
748 | 1244 | ret = -EINVAL; |
---|
.. | .. |
---|
750 | 1246 | } |
---|
751 | 1247 | |
---|
752 | 1248 | if (pcie_rkep->dma_obj) { |
---|
| 1249 | + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
---|
753 | 1250 | pcie_rkep->dma_obj->start_dma_func = pcie_rkep_start_dma_dwc; |
---|
754 | 1251 | pcie_rkep->dma_obj->config_dma_func = pcie_rkep_config_dma_dwc; |
---|
755 | 1252 | pcie_rkep->dma_obj->get_dma_status = pcie_rkep_get_dma_status; |
---|
| 1253 | + pcie_rkep->dma_obj->dma_debug = pcie_rkep_dma_debug; |
---|
| 1254 | + if (!dmatest_irq) { |
---|
| 1255 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK, 0xffffffff); |
---|
| 1256 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK, 0xffffffff); |
---|
| 1257 | + |
---|
| 1258 | + /* Enable linked list err en */ |
---|
| 1259 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_WR_LL_ERR_EN, 0xffffffff); |
---|
| 1260 | + pcie_rkep_writel_dbi(pcie_rkep, PCIE_DMA_OFFSET + PCIE_DMA_RD_LL_ERR_EN, 0xffffffff); |
---|
| 1261 | + } |
---|
756 | 1262 | } |
---|
757 | 1263 | |
---|
758 | 1264 | #if IS_ENABLED(CONFIG_PCIE_FUNC_RKEP_USERPAGES) |
---|
.. | .. |
---|
761 | 1267 | if (!pcie_rkep->user_pages) { |
---|
762 | 1268 | dev_err(&pcie_rkep->pdev->dev, "failed to allocate contiguous pages\n"); |
---|
763 | 1269 | ret = -EINVAL; |
---|
| 1270 | + if (pcie_rkep->dma_obj) |
---|
| 1271 | + pcie_dw_dmatest_unregister(pcie_rkep->dma_obj); |
---|
764 | 1272 | goto err_register_obj; |
---|
765 | 1273 | } |
---|
| 1274 | + pcie_rkep->cur_mmap_res = PCIE_EP_MMAP_RESOURCE_USER_MEM; |
---|
766 | 1275 | dev_err(&pdev->dev, "successfully allocate continuouse buffer for userspace\n"); |
---|
767 | 1276 | #endif |
---|
768 | 1277 | |
---|
.. | .. |
---|
777 | 1286 | |
---|
778 | 1287 | return 0; |
---|
779 | 1288 | err_register_obj: |
---|
780 | | - if (pcie_rkep->msix_enable) { |
---|
781 | | - for (i = 0; i < RKEP_NUM_MSIX_VECTORS; i++) |
---|
782 | | - free_irq(pcie_rkep->msix_entries[i].vector, &pcie_rkep->msix_ctx[i]); |
---|
783 | | - pci_disable_msix(pdev); |
---|
784 | | - } else if (pcie_rkep->msi_enable) { |
---|
785 | | - for (i = 0; i < RKEP_NUM_MSI_VECTORS; i++) { |
---|
786 | | - if (pcie_rkep->msi_ctx[i].dev) |
---|
787 | | - free_irq(pci_irq_vector(pdev, i), &pcie_rkep->msi_ctx[i]); |
---|
788 | | - } |
---|
789 | | - |
---|
790 | | - pci_disable_msi(pcie_rkep->pdev); |
---|
791 | | - } |
---|
| 1289 | + pcie_rkep_release_irq(pcie_rkep); |
---|
792 | 1290 | err_register_irq: |
---|
793 | 1291 | misc_deregister(&pcie_rkep->dev); |
---|
794 | 1292 | err_pci_iomap: |
---|
.. | .. |
---|
809 | 1307 | static void pcie_rkep_remove(struct pci_dev *pdev) |
---|
810 | 1308 | { |
---|
811 | 1309 | struct pcie_rkep *pcie_rkep = pci_get_drvdata(pdev); |
---|
812 | | - int i; |
---|
| 1310 | + |
---|
| 1311 | + if (pcie_rkep->dma_obj) |
---|
| 1312 | + pcie_dw_dmatest_unregister(pcie_rkep->dma_obj); |
---|
813 | 1313 | |
---|
814 | 1314 | device_remove_file(&pdev->dev, &dev_attr_rkep); |
---|
815 | 1315 | #if IS_ENABLED(CONFIG_PCIE_FUNC_RKEP_USERPAGES) |
---|
816 | 1316 | free_contig_range(page_to_pfn(pcie_rkep->user_pages), RKEP_USER_MEM_SIZE >> PAGE_SHIFT); |
---|
817 | 1317 | #endif |
---|
818 | | - pci_iounmap(pdev, pcie_rkep->bar0); |
---|
| 1318 | + pcie_rkep_release_irq(pcie_rkep); |
---|
| 1319 | + |
---|
| 1320 | + if (pcie_rkep->bar0) |
---|
| 1321 | + pci_iounmap(pdev, pcie_rkep->bar0); |
---|
| 1322 | + if (pcie_rkep->bar2) |
---|
| 1323 | + pci_iounmap(pdev, pcie_rkep->bar2); |
---|
| 1324 | + if (pcie_rkep->bar4) |
---|
| 1325 | + pci_iounmap(pdev, pcie_rkep->bar4); |
---|
819 | 1326 | pci_release_regions(pdev); |
---|
820 | 1327 | pci_disable_device(pdev); |
---|
821 | 1328 | misc_deregister(&pcie_rkep->dev); |
---|
822 | | - |
---|
823 | | - if (pcie_rkep->msix_enable) { |
---|
824 | | - for (i = 0; i < RKEP_NUM_MSIX_VECTORS; i++) |
---|
825 | | - free_irq(pcie_rkep->msix_entries[i].vector, &pcie_rkep->msix_ctx[i]); |
---|
826 | | - pci_disable_msix(pdev); |
---|
827 | | - } else if (pcie_rkep->msi_enable) { |
---|
828 | | - for (i = 0; i < RKEP_NUM_MSI_VECTORS; i++) |
---|
829 | | - if (pcie_rkep->msi_ctx[i].dev) |
---|
830 | | - free_irq(pci_irq_vector(pdev, i), &pcie_rkep->msi_ctx[i]); |
---|
831 | | - pci_disable_msi(pcie_rkep->pdev); |
---|
832 | | - } |
---|
833 | 1329 | } |
---|
834 | 1330 | |
---|
835 | 1331 | static const struct pci_device_id pcie_rkep_pcidev_id[] = { |
---|