| .. | .. |
|---|
| 15 | 15 | #include "rknpu_ioctl.h" |
|---|
| 16 | 16 | #include "rknpu_mem.h" |
|---|
| 17 | 17 | |
|---|
| 18 | | -int rknpu_mem_create_ioctl(struct rknpu_device *rknpu_dev, unsigned long data) |
|---|
| 18 | +#ifdef CONFIG_ROCKCHIP_RKNPU_DMA_HEAP |
|---|
| 19 | + |
|---|
| 20 | +int rknpu_mem_create_ioctl(struct rknpu_device *rknpu_dev, unsigned long data, |
|---|
| 21 | + struct file *file) |
|---|
| 19 | 22 | { |
|---|
| 20 | 23 | struct rknpu_mem_create args; |
|---|
| 21 | 24 | int ret = -EINVAL; |
|---|
| .. | .. |
|---|
| 27 | 30 | struct page **pages; |
|---|
| 28 | 31 | struct page *page; |
|---|
| 29 | 32 | struct rknpu_mem_object *rknpu_obj = NULL; |
|---|
| 33 | + struct rknpu_session *session = NULL; |
|---|
| 30 | 34 | int i, fd; |
|---|
| 31 | 35 | unsigned int length, page_count; |
|---|
| 32 | 36 | |
|---|
| .. | .. |
|---|
| 65 | 69 | O_CLOEXEC | O_RDWR, 0x0, |
|---|
| 66 | 70 | dev_name(rknpu_dev->dev)); |
|---|
| 67 | 71 | if (IS_ERR(dmabuf)) { |
|---|
| 72 | + LOG_ERROR("dmabuf alloc failed, args.size = %llu\n", |
|---|
| 73 | + args.size); |
|---|
| 68 | 74 | ret = PTR_ERR(dmabuf); |
|---|
| 69 | 75 | goto err_free_obj; |
|---|
| 70 | 76 | } |
|---|
| .. | .. |
|---|
| 74 | 80 | |
|---|
| 75 | 81 | fd = dma_buf_fd(dmabuf, O_CLOEXEC | O_RDWR); |
|---|
| 76 | 82 | if (fd < 0) { |
|---|
| 83 | + LOG_ERROR("dmabuf fd get failed\n"); |
|---|
| 77 | 84 | ret = -EFAULT; |
|---|
| 78 | 85 | goto err_free_dma_buf; |
|---|
| 79 | 86 | } |
|---|
| .. | .. |
|---|
| 81 | 88 | |
|---|
| 82 | 89 | attachment = dma_buf_attach(dmabuf, rknpu_dev->dev); |
|---|
| 83 | 90 | if (IS_ERR(attachment)) { |
|---|
| 91 | + LOG_ERROR("dma_buf_attach failed\n"); |
|---|
| 84 | 92 | ret = PTR_ERR(attachment); |
|---|
| 85 | 93 | goto err_free_dma_buf; |
|---|
| 86 | 94 | } |
|---|
| 87 | 95 | |
|---|
| 88 | 96 | table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); |
|---|
| 89 | 97 | if (IS_ERR(table)) { |
|---|
| 98 | + LOG_ERROR("dma_buf_attach failed\n"); |
|---|
| 90 | 99 | dma_buf_detach(dmabuf, attachment); |
|---|
| 91 | 100 | ret = PTR_ERR(table); |
|---|
| 92 | 101 | goto err_free_dma_buf; |
|---|
| .. | .. |
|---|
| 101 | 110 | } |
|---|
| 102 | 111 | |
|---|
| 103 | 112 | page_count = length >> PAGE_SHIFT; |
|---|
| 104 | | - pages = kmalloc_array(page_count, sizeof(struct page), GFP_KERNEL); |
|---|
| 113 | + pages = vmalloc(page_count * sizeof(struct page)); |
|---|
| 105 | 114 | if (!pages) { |
|---|
| 115 | + LOG_ERROR("alloc pages failed\n"); |
|---|
| 106 | 116 | ret = -ENOMEM; |
|---|
| 107 | 117 | goto err_detach_dma_buf; |
|---|
| 108 | 118 | } |
|---|
| .. | .. |
|---|
| 112 | 122 | |
|---|
| 113 | 123 | rknpu_obj->kv_addr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); |
|---|
| 114 | 124 | if (!rknpu_obj->kv_addr) { |
|---|
| 125 | + LOG_ERROR("vmap pages addr failed\n"); |
|---|
| 115 | 126 | ret = -ENOMEM; |
|---|
| 116 | 127 | goto err_free_pages; |
|---|
| 117 | 128 | } |
|---|
| .. | .. |
|---|
| 137 | 148 | goto err_unmap_kv_addr; |
|---|
| 138 | 149 | } |
|---|
| 139 | 150 | |
|---|
| 140 | | - kfree(pages); |
|---|
| 151 | + vfree(pages); |
|---|
| 152 | + pages = NULL; |
|---|
| 141 | 153 | dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL); |
|---|
| 142 | 154 | dma_buf_detach(dmabuf, attachment); |
|---|
| 155 | + |
|---|
| 156 | + spin_lock(&rknpu_dev->lock); |
|---|
| 157 | + |
|---|
| 158 | + session = file->private_data; |
|---|
| 159 | + if (!session) { |
|---|
| 160 | + spin_unlock(&rknpu_dev->lock); |
|---|
| 161 | + ret = -EFAULT; |
|---|
| 162 | + goto err_unmap_kv_addr; |
|---|
| 163 | + } |
|---|
| 164 | + list_add_tail(&rknpu_obj->head, &session->list); |
|---|
| 165 | + |
|---|
| 166 | + spin_unlock(&rknpu_dev->lock); |
|---|
| 143 | 167 | |
|---|
| 144 | 168 | return 0; |
|---|
| 145 | 169 | |
|---|
| .. | .. |
|---|
| 148 | 172 | rknpu_obj->kv_addr = NULL; |
|---|
| 149 | 173 | |
|---|
| 150 | 174 | err_free_pages: |
|---|
| 151 | | - kfree(pages); |
|---|
| 175 | + vfree(pages); |
|---|
| 176 | + pages = NULL; |
|---|
| 152 | 177 | |
|---|
| 153 | 178 | err_detach_dma_buf: |
|---|
| 154 | 179 | dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL); |
|---|
| .. | .. |
|---|
| 166 | 191 | return ret; |
|---|
| 167 | 192 | } |
|---|
| 168 | 193 | |
|---|
| 169 | | -int rknpu_mem_destroy_ioctl(struct rknpu_device *rknpu_dev, unsigned long data) |
|---|
| 194 | +int rknpu_mem_destroy_ioctl(struct rknpu_device *rknpu_dev, unsigned long data, |
|---|
| 195 | + struct file *file) |
|---|
| 170 | 196 | { |
|---|
| 171 | | - struct rknpu_mem_object *rknpu_obj = NULL; |
|---|
| 197 | + struct rknpu_mem_object *rknpu_obj, *entry, *q; |
|---|
| 198 | + struct rknpu_session *session = NULL; |
|---|
| 172 | 199 | struct rknpu_mem_destroy args; |
|---|
| 173 | | - struct dma_buf *dmabuf; |
|---|
| 174 | 200 | int ret = -EFAULT; |
|---|
| 175 | 201 | |
|---|
| 176 | 202 | if (unlikely(copy_from_user(&args, (struct rknpu_mem_destroy *)data, |
|---|
| .. | .. |
|---|
| 188 | 214 | } |
|---|
| 189 | 215 | |
|---|
| 190 | 216 | rknpu_obj = (struct rknpu_mem_object *)(uintptr_t)args.obj_addr; |
|---|
| 191 | | - dmabuf = rknpu_obj->dmabuf; |
|---|
| 192 | 217 | LOG_DEBUG( |
|---|
| 193 | 218 | "free args.handle: %d, rknpu_obj: %#llx, rknpu_obj->dma_addr: %#llx\n", |
|---|
| 194 | 219 | args.handle, (__u64)(uintptr_t)rknpu_obj, |
|---|
| 195 | 220 | (__u64)rknpu_obj->dma_addr); |
|---|
| 196 | 221 | |
|---|
| 197 | | - vunmap(rknpu_obj->kv_addr); |
|---|
| 198 | | - rknpu_obj->kv_addr = NULL; |
|---|
| 222 | + spin_lock(&rknpu_dev->lock); |
|---|
| 223 | + session = file->private_data; |
|---|
| 224 | + if (!session) { |
|---|
| 225 | + spin_unlock(&rknpu_dev->lock); |
|---|
| 226 | + ret = -EFAULT; |
|---|
| 227 | + return ret; |
|---|
| 228 | + } |
|---|
| 229 | + list_for_each_entry_safe(entry, q, &session->list, head) { |
|---|
| 230 | + if (entry == rknpu_obj) { |
|---|
| 231 | + list_del(&entry->head); |
|---|
| 232 | + break; |
|---|
| 233 | + } |
|---|
| 234 | + } |
|---|
| 235 | + spin_unlock(&rknpu_dev->lock); |
|---|
| 199 | 236 | |
|---|
| 200 | | - if (!rknpu_obj->owner) |
|---|
| 201 | | - dma_buf_put(dmabuf); |
|---|
| 237 | + if (rknpu_obj == entry) { |
|---|
| 238 | + vunmap(rknpu_obj->kv_addr); |
|---|
| 239 | + rknpu_obj->kv_addr = NULL; |
|---|
| 202 | 240 | |
|---|
| 203 | | - kfree(rknpu_obj); |
|---|
| 241 | + if (!rknpu_obj->owner) |
|---|
| 242 | + dma_buf_put(rknpu_obj->dmabuf); |
|---|
| 243 | + |
|---|
| 244 | + kfree(rknpu_obj); |
|---|
| 245 | + } |
|---|
| 204 | 246 | |
|---|
| 205 | 247 | return 0; |
|---|
| 248 | +} |
|---|
| 249 | + |
|---|
| 250 | +/* |
|---|
| 251 | + * begin cpu access => for_cpu = true |
|---|
| 252 | + * end cpu access => for_cpu = false |
|---|
| 253 | + */ |
|---|
| 254 | +static void __maybe_unused rknpu_dma_buf_sync( |
|---|
| 255 | + struct rknpu_device *rknpu_dev, struct rknpu_mem_object *rknpu_obj, |
|---|
| 256 | + u32 offset, u32 length, enum dma_data_direction dir, bool for_cpu) |
|---|
| 257 | +{ |
|---|
| 258 | + struct device *dev = rknpu_dev->dev; |
|---|
| 259 | + struct sg_table *sgt = rknpu_obj->sgt; |
|---|
| 260 | + struct scatterlist *sg = sgt->sgl; |
|---|
| 261 | + dma_addr_t sg_dma_addr = sg_dma_address(sg); |
|---|
| 262 | + unsigned int len = 0; |
|---|
| 263 | + int i; |
|---|
| 264 | + |
|---|
| 265 | + for_each_sgtable_sg(sgt, sg, i) { |
|---|
| 266 | + unsigned int sg_offset, sg_left, size = 0; |
|---|
| 267 | + |
|---|
| 268 | + len += sg->length; |
|---|
| 269 | + if (len <= offset) { |
|---|
| 270 | + sg_dma_addr += sg->length; |
|---|
| 271 | + continue; |
|---|
| 272 | + } |
|---|
| 273 | + |
|---|
| 274 | + sg_left = len - offset; |
|---|
| 275 | + sg_offset = sg->length - sg_left; |
|---|
| 276 | + |
|---|
| 277 | + size = (length < sg_left) ? length : sg_left; |
|---|
| 278 | + |
|---|
| 279 | + if (for_cpu) |
|---|
| 280 | + dma_sync_single_range_for_cpu(dev, sg_dma_addr, |
|---|
| 281 | + sg_offset, size, dir); |
|---|
| 282 | + else |
|---|
| 283 | + dma_sync_single_range_for_device(dev, sg_dma_addr, |
|---|
| 284 | + sg_offset, size, dir); |
|---|
| 285 | + |
|---|
| 286 | + offset += size; |
|---|
| 287 | + length -= size; |
|---|
| 288 | + sg_dma_addr += sg->length; |
|---|
| 289 | + |
|---|
| 290 | + if (length == 0) |
|---|
| 291 | + break; |
|---|
| 292 | + } |
|---|
| 206 | 293 | } |
|---|
| 207 | 294 | |
|---|
| 208 | 295 | int rknpu_mem_sync_ioctl(struct rknpu_device *rknpu_dev, unsigned long data) |
|---|
| 209 | 296 | { |
|---|
| 210 | 297 | struct rknpu_mem_object *rknpu_obj = NULL; |
|---|
| 211 | 298 | struct rknpu_mem_sync args; |
|---|
| 299 | +#ifdef CONFIG_DMABUF_PARTIAL |
|---|
| 212 | 300 | struct dma_buf *dmabuf; |
|---|
| 301 | +#endif |
|---|
| 213 | 302 | int ret = -EFAULT; |
|---|
| 214 | 303 | |
|---|
| 215 | 304 | if (unlikely(copy_from_user(&args, (struct rknpu_mem_sync *)data, |
|---|
| .. | .. |
|---|
| 227 | 316 | } |
|---|
| 228 | 317 | |
|---|
| 229 | 318 | rknpu_obj = (struct rknpu_mem_object *)(uintptr_t)args.obj_addr; |
|---|
| 230 | | - dmabuf = rknpu_obj->dmabuf; |
|---|
| 231 | 319 | |
|---|
| 320 | +#ifndef CONFIG_DMABUF_PARTIAL |
|---|
| 321 | + if (args.flags & RKNPU_MEM_SYNC_TO_DEVICE) { |
|---|
| 322 | + rknpu_dma_buf_sync(rknpu_dev, rknpu_obj, args.offset, args.size, |
|---|
| 323 | + DMA_TO_DEVICE, false); |
|---|
| 324 | + } |
|---|
| 325 | + if (args.flags & RKNPU_MEM_SYNC_FROM_DEVICE) { |
|---|
| 326 | + rknpu_dma_buf_sync(rknpu_dev, rknpu_obj, args.offset, args.size, |
|---|
| 327 | + DMA_FROM_DEVICE, true); |
|---|
| 328 | + } |
|---|
| 329 | +#else |
|---|
| 330 | + dmabuf = rknpu_obj->dmabuf; |
|---|
| 232 | 331 | if (args.flags & RKNPU_MEM_SYNC_TO_DEVICE) { |
|---|
| 233 | 332 | dmabuf->ops->end_cpu_access_partial(dmabuf, DMA_TO_DEVICE, |
|---|
| 234 | 333 | args.offset, args.size); |
|---|
| .. | .. |
|---|
| 237 | 336 | dmabuf->ops->begin_cpu_access_partial(dmabuf, DMA_FROM_DEVICE, |
|---|
| 238 | 337 | args.offset, args.size); |
|---|
| 239 | 338 | } |
|---|
| 339 | +#endif |
|---|
| 240 | 340 | |
|---|
| 241 | 341 | return 0; |
|---|
| 242 | 342 | } |
|---|
| 343 | + |
|---|
| 344 | +#endif |
|---|