.. | .. |
---|
8 | 8 | * Ding Wei, leo.ding@rock-chips.com |
---|
9 | 9 | * |
---|
10 | 10 | */ |
---|
11 | | -#ifdef CONFIG_ARM_DMA_USE_IOMMU |
---|
12 | | -#include <asm/dma-iommu.h> |
---|
13 | | -#endif |
---|
14 | 11 | #include <linux/delay.h> |
---|
15 | 12 | #include <linux/dma-buf-cache.h> |
---|
16 | 13 | #include <linux/dma-iommu.h> |
---|
| 14 | +#include <linux/dma-mapping.h> |
---|
17 | 15 | #include <linux/iommu.h> |
---|
18 | 16 | #include <linux/of.h> |
---|
19 | 17 | #include <linux/of_platform.h> |
---|
20 | 18 | #include <linux/kref.h> |
---|
21 | 19 | #include <linux/slab.h> |
---|
22 | 20 | #include <linux/pm_runtime.h> |
---|
| 21 | + |
---|
| 22 | +#ifdef CONFIG_ARM_DMA_USE_IOMMU |
---|
| 23 | +#include <asm/dma-iommu.h> |
---|
| 24 | +#endif |
---|
23 | 25 | #include <soc/rockchip/rockchip_iommu.h> |
---|
24 | 26 | |
---|
25 | 27 | #include "mpp_debug.h" |
---|
26 | 28 | #include "mpp_iommu.h" |
---|
| 29 | +#include "mpp_common.h" |
---|
27 | 30 | |
---|
28 | | -static struct mpp_dma_buffer * |
---|
| 31 | +struct mpp_dma_buffer * |
---|
29 | 32 | mpp_dma_find_buffer_fd(struct mpp_dma_session *dma, int fd) |
---|
30 | 33 | { |
---|
31 | 34 | struct dma_buf *dmabuf; |
---|
.. | .. |
---|
66 | 69 | dma_buf_unmap_attachment(buffer->attach, buffer->sgt, buffer->dir); |
---|
67 | 70 | dma_buf_detach(buffer->dmabuf, buffer->attach); |
---|
68 | 71 | dma_buf_put(buffer->dmabuf); |
---|
| 72 | + buffer->dma = NULL; |
---|
| 73 | + buffer->dmabuf = NULL; |
---|
| 74 | + buffer->attach = NULL; |
---|
| 75 | + buffer->sgt = NULL; |
---|
| 76 | + buffer->copy_sgt = NULL; |
---|
| 77 | + buffer->iova = 0; |
---|
| 78 | + buffer->size = 0; |
---|
| 79 | + buffer->vaddr = NULL; |
---|
| 80 | + buffer->last_used = 0; |
---|
69 | 81 | } |
---|
70 | 82 | |
---|
71 | 83 | /* Remove the oldest buffer when count more than the setting */ |
---|
.. | .. |
---|
87 | 99 | oldest = buffer; |
---|
88 | 100 | } |
---|
89 | 101 | } |
---|
90 | | - if (oldest && kref_read(&oldest->ref) <= 1) |
---|
| 102 | + if (oldest && kref_read(&oldest->ref) == 1) |
---|
91 | 103 | kref_put(&oldest->ref, mpp_dma_release_buffer); |
---|
92 | 104 | mutex_unlock(&dma->list_mutex); |
---|
93 | 105 | } |
---|
.. | .. |
---|
179 | 191 | } |
---|
180 | 192 | |
---|
181 | 193 | /* remove the oldest before add buffer */ |
---|
182 | | - mpp_dma_remove_extra_buffer(dma); |
---|
| 194 | + if (!IS_ENABLED(CONFIG_DMABUF_CACHE)) |
---|
| 195 | + mpp_dma_remove_extra_buffer(dma); |
---|
183 | 196 | |
---|
184 | 197 | /* Check whether in dma session */ |
---|
185 | 198 | buffer = mpp_dma_find_buffer_fd(dma, fd); |
---|
.. | .. |
---|
193 | 206 | |
---|
194 | 207 | dmabuf = dma_buf_get(fd); |
---|
195 | 208 | if (IS_ERR(dmabuf)) { |
---|
196 | | - mpp_err("dma_buf_get fd %d failed\n", fd); |
---|
197 | | - return NULL; |
---|
| 209 | + ret = PTR_ERR(dmabuf); |
---|
| 210 | + mpp_err("dma_buf_get fd %d failed(%d)\n", fd, ret); |
---|
| 211 | + return ERR_PTR(ret); |
---|
198 | 212 | } |
---|
199 | 213 | /* A new DMA buffer */ |
---|
200 | 214 | mutex_lock(&dma->list_mutex); |
---|
.. | .. |
---|
215 | 229 | |
---|
216 | 230 | attach = dma_buf_attach(buffer->dmabuf, dma->dev); |
---|
217 | 231 | if (IS_ERR(attach)) { |
---|
218 | | - mpp_err("dma_buf_attach fd %d failed\n", fd); |
---|
219 | 232 | ret = PTR_ERR(attach); |
---|
| 233 | + mpp_err("dma_buf_attach fd %d failed(%d)\n", fd, ret); |
---|
220 | 234 | goto fail_attach; |
---|
221 | 235 | } |
---|
222 | 236 | |
---|
223 | 237 | sgt = dma_buf_map_attachment(attach, buffer->dir); |
---|
224 | 238 | if (IS_ERR(sgt)) { |
---|
225 | | - mpp_err("dma_buf_map_attachment fd %d failed\n", fd); |
---|
226 | 239 | ret = PTR_ERR(sgt); |
---|
| 240 | + mpp_err("dma_buf_map_attachment fd %d failed(%d)\n", fd, ret); |
---|
227 | 241 | goto fail_map; |
---|
228 | 242 | } |
---|
229 | 243 | buffer->iova = sg_dma_address(sgt->sgl); |
---|
.. | .. |
---|
233 | 247 | buffer->dma = dma; |
---|
234 | 248 | |
---|
235 | 249 | kref_init(&buffer->ref); |
---|
| 250 | + |
---|
| 251 | + if (!IS_ENABLED(CONFIG_DMABUF_CACHE)) |
---|
| 252 | + /* Increase the reference for used outside the buffer pool */ |
---|
| 253 | + kref_get(&buffer->ref); |
---|
236 | 254 | |
---|
237 | 255 | mutex_lock(&dma->list_mutex); |
---|
238 | 256 | dma->buffer_count++; |
---|
.. | .. |
---|
358 | 376 | return dma; |
---|
359 | 377 | } |
---|
360 | 378 | |
---|
| 379 | +/* |
---|
| 380 | + * begin cpu access => for_cpu = true |
---|
| 381 | + * end cpu access => for_cpu = false |
---|
| 382 | + */ |
---|
| 383 | +void mpp_dma_buf_sync(struct mpp_dma_buffer *buffer, u32 offset, u32 length, |
---|
| 384 | + enum dma_data_direction dir, bool for_cpu) |
---|
| 385 | +{ |
---|
| 386 | + struct device *dev = buffer->dma->dev; |
---|
| 387 | + struct sg_table *sgt = buffer->sgt; |
---|
| 388 | + struct scatterlist *sg = sgt->sgl; |
---|
| 389 | + dma_addr_t sg_dma_addr = sg_dma_address(sg); |
---|
| 390 | + unsigned int len = 0; |
---|
| 391 | + int i; |
---|
| 392 | + |
---|
| 393 | + for_each_sgtable_sg(sgt, sg, i) { |
---|
| 394 | + unsigned int sg_offset, sg_left, size = 0; |
---|
| 395 | + |
---|
| 396 | + len += sg->length; |
---|
| 397 | + if (len <= offset) { |
---|
| 398 | + sg_dma_addr += sg->length; |
---|
| 399 | + continue; |
---|
| 400 | + } |
---|
| 401 | + |
---|
| 402 | + sg_left = len - offset; |
---|
| 403 | + sg_offset = sg->length - sg_left; |
---|
| 404 | + |
---|
| 405 | + size = (length < sg_left) ? length : sg_left; |
---|
| 406 | + |
---|
| 407 | + if (for_cpu) |
---|
| 408 | + dma_sync_single_range_for_cpu(dev, sg_dma_addr, |
---|
| 409 | + sg_offset, size, dir); |
---|
| 410 | + else |
---|
| 411 | + dma_sync_single_range_for_device(dev, sg_dma_addr, |
---|
| 412 | + sg_offset, size, dir); |
---|
| 413 | + |
---|
| 414 | + offset += size; |
---|
| 415 | + length -= size; |
---|
| 416 | + sg_dma_addr += sg->length; |
---|
| 417 | + |
---|
| 418 | + if (length == 0) |
---|
| 419 | + break; |
---|
| 420 | + } |
---|
| 421 | +} |
---|
| 422 | + |
---|
361 | 423 | int mpp_iommu_detach(struct mpp_iommu_info *info) |
---|
362 | 424 | { |
---|
363 | | - struct iommu_domain *domain = info->domain; |
---|
364 | | - struct iommu_group *group = info->group; |
---|
| 425 | + if (!info) |
---|
| 426 | + return 0; |
---|
365 | 427 | |
---|
366 | | - iommu_detach_group(domain, group); |
---|
367 | | - |
---|
| 428 | + iommu_detach_group(info->domain, info->group); |
---|
368 | 429 | return 0; |
---|
369 | 430 | } |
---|
370 | 431 | |
---|
371 | 432 | int mpp_iommu_attach(struct mpp_iommu_info *info) |
---|
372 | 433 | { |
---|
373 | | - struct iommu_domain *domain = info->domain; |
---|
374 | | - struct iommu_group *group = info->group; |
---|
375 | | - int ret; |
---|
| 434 | + if (!info) |
---|
| 435 | + return 0; |
---|
376 | 436 | |
---|
377 | | - ret = iommu_attach_group(domain, group); |
---|
378 | | - if (ret) |
---|
379 | | - return ret; |
---|
| 437 | + if (info->domain == iommu_get_domain_for_dev(info->dev)) |
---|
| 438 | + return 0; |
---|
| 439 | + |
---|
| 440 | + return iommu_attach_group(info->domain, info->group); |
---|
| 441 | +} |
---|
| 442 | + |
---|
| 443 | +static int mpp_iommu_handle(struct iommu_domain *iommu, |
---|
| 444 | + struct device *iommu_dev, |
---|
| 445 | + unsigned long iova, |
---|
| 446 | + int status, void *arg) |
---|
| 447 | +{ |
---|
| 448 | + struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 449 | + |
---|
| 450 | + dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n", |
---|
| 451 | + iova, status, arg); |
---|
| 452 | + |
---|
| 453 | + if (!mpp) { |
---|
| 454 | + dev_err(iommu_dev, "pagefault without device to handle\n"); |
---|
| 455 | + return 0; |
---|
| 456 | + } |
---|
| 457 | + |
---|
| 458 | + if (mpp->cur_task) |
---|
| 459 | + mpp_task_dump_mem_region(mpp, mpp->cur_task); |
---|
| 460 | + |
---|
| 461 | + if (mpp->dev_ops && mpp->dev_ops->dump_dev) |
---|
| 462 | + mpp->dev_ops->dump_dev(mpp); |
---|
| 463 | + else |
---|
| 464 | + mpp_task_dump_hw_reg(mpp); |
---|
| 465 | + |
---|
| 466 | + /* |
---|
| 467 | + * Mask iommu irq, in order for iommu not repeatedly trigger pagefault. |
---|
| 468 | + * Until the pagefault task finish by hw timeout. |
---|
| 469 | + */ |
---|
| 470 | + rockchip_iommu_mask_irq(mpp->dev); |
---|
380 | 471 | |
---|
381 | 472 | return 0; |
---|
382 | 473 | } |
---|
.. | .. |
---|
388 | 479 | struct device_node *np = NULL; |
---|
389 | 480 | struct platform_device *pdev = NULL; |
---|
390 | 481 | struct mpp_iommu_info *info = NULL; |
---|
| 482 | + struct iommu_domain *domain = NULL; |
---|
| 483 | + struct iommu_group *group = NULL; |
---|
391 | 484 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
---|
392 | 485 | struct dma_iommu_mapping *mapping; |
---|
393 | 486 | #endif |
---|
394 | | - info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); |
---|
395 | | - if (!info) |
---|
396 | | - return ERR_PTR(-ENOMEM); |
---|
397 | | - |
---|
398 | 487 | np = of_parse_phandle(dev->of_node, "iommus", 0); |
---|
399 | 488 | if (!np || !of_device_is_available(np)) { |
---|
400 | 489 | mpp_err("failed to get device node\n"); |
---|
.. | .. |
---|
408 | 497 | return ERR_PTR(-ENODEV); |
---|
409 | 498 | } |
---|
410 | 499 | |
---|
411 | | - info->group = iommu_group_get(dev); |
---|
412 | | - if (!info->group) { |
---|
| 500 | + group = iommu_group_get(dev); |
---|
| 501 | + if (!group) { |
---|
413 | 502 | ret = -EINVAL; |
---|
414 | 503 | goto err_put_pdev; |
---|
415 | 504 | } |
---|
.. | .. |
---|
420 | 509 | * we re-attach domain here |
---|
421 | 510 | */ |
---|
422 | 511 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
---|
423 | | - if (!iommu_group_default_domain(info->group)) { |
---|
| 512 | + if (!iommu_group_default_domain(group)) { |
---|
424 | 513 | mapping = to_dma_iommu_mapping(dev); |
---|
425 | 514 | WARN_ON(!mapping); |
---|
426 | | - info->domain = mapping->domain; |
---|
| 515 | + domain = mapping->domain; |
---|
427 | 516 | } |
---|
428 | 517 | #endif |
---|
429 | | - if (!info->domain) { |
---|
430 | | - info->domain = iommu_get_domain_for_dev(dev); |
---|
431 | | - if (!info->domain) { |
---|
| 518 | + if (!domain) { |
---|
| 519 | + domain = iommu_get_domain_for_dev(dev); |
---|
| 520 | + if (!domain) { |
---|
432 | 521 | ret = -EINVAL; |
---|
433 | 522 | goto err_put_group; |
---|
434 | 523 | } |
---|
435 | 524 | } |
---|
436 | 525 | |
---|
| 526 | + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); |
---|
| 527 | + if (!info) { |
---|
| 528 | + ret = -ENOMEM; |
---|
| 529 | + goto err_put_group; |
---|
| 530 | + } |
---|
| 531 | + |
---|
| 532 | + init_rwsem(&info->rw_sem); |
---|
| 533 | + spin_lock_init(&info->dev_lock); |
---|
437 | 534 | info->dev = dev; |
---|
438 | 535 | info->pdev = pdev; |
---|
439 | | - init_rwsem(&info->rw_sem); |
---|
| 536 | + info->group = group; |
---|
| 537 | + info->domain = domain; |
---|
| 538 | + info->dev_active = NULL; |
---|
| 539 | + info->irq = platform_get_irq(pdev, 0); |
---|
| 540 | + info->got_irq = (info->irq < 0) ? false : true; |
---|
440 | 541 | |
---|
441 | 542 | return info; |
---|
442 | 543 | |
---|
443 | 544 | err_put_group: |
---|
444 | | - iommu_group_put(info->group); |
---|
| 545 | + if (group) |
---|
| 546 | + iommu_group_put(group); |
---|
445 | 547 | err_put_pdev: |
---|
446 | | - platform_device_put(pdev); |
---|
| 548 | + if (pdev) |
---|
| 549 | + platform_device_put(pdev); |
---|
447 | 550 | |
---|
448 | 551 | return ERR_PTR(ret); |
---|
449 | 552 | } |
---|
450 | 553 | |
---|
451 | 554 | int mpp_iommu_remove(struct mpp_iommu_info *info) |
---|
452 | 555 | { |
---|
| 556 | + if (!info) |
---|
| 557 | + return 0; |
---|
| 558 | + |
---|
453 | 559 | iommu_group_put(info->group); |
---|
454 | 560 | platform_device_put(info->pdev); |
---|
455 | 561 | |
---|
.. | .. |
---|
460 | 566 | { |
---|
461 | 567 | int ret; |
---|
462 | 568 | |
---|
| 569 | + if (!info) |
---|
| 570 | + return 0; |
---|
| 571 | + /* call av1 iommu ops */ |
---|
| 572 | + if (IS_ENABLED(CONFIG_ROCKCHIP_MPP_AV1DEC) && info->av1d_iommu) { |
---|
| 573 | + ret = mpp_av1_iommu_disable(dev); |
---|
| 574 | + if (ret) |
---|
| 575 | + return ret; |
---|
| 576 | + return mpp_av1_iommu_enable(dev); |
---|
| 577 | + } |
---|
463 | 578 | /* disable iommu */ |
---|
464 | 579 | ret = rockchip_iommu_disable(dev); |
---|
465 | 580 | if (ret) |
---|
466 | 581 | return ret; |
---|
467 | | - |
---|
468 | 582 | /* re-enable iommu */ |
---|
469 | 583 | return rockchip_iommu_enable(dev); |
---|
470 | 584 | } |
---|
471 | 585 | |
---|
472 | 586 | int mpp_iommu_flush_tlb(struct mpp_iommu_info *info) |
---|
473 | 587 | { |
---|
474 | | - struct iommu_domain *domain = info->domain; |
---|
| 588 | + if (!info) |
---|
| 589 | + return 0; |
---|
475 | 590 | |
---|
476 | | - if (domain && domain->ops) |
---|
477 | | - iommu_flush_tlb_all(domain); |
---|
| 591 | + if (info->domain && info->domain->ops) |
---|
| 592 | + iommu_flush_iotlb_all(info->domain); |
---|
| 593 | + |
---|
| 594 | + return 0; |
---|
| 595 | +} |
---|
| 596 | + |
---|
| 597 | +int mpp_iommu_dev_activate(struct mpp_iommu_info *info, struct mpp_dev *dev) |
---|
| 598 | +{ |
---|
| 599 | + unsigned long flags; |
---|
| 600 | + int ret = 0; |
---|
| 601 | + |
---|
| 602 | + if (!info) |
---|
| 603 | + return 0; |
---|
| 604 | + |
---|
| 605 | + spin_lock_irqsave(&info->dev_lock, flags); |
---|
| 606 | + |
---|
| 607 | + if (info->dev_active || !dev) { |
---|
| 608 | + dev_err(info->dev, "can not activate %s -> %s\n", |
---|
| 609 | + info->dev_active ? dev_name(info->dev_active->dev) : NULL, |
---|
| 610 | + dev ? dev_name(dev->dev) : NULL); |
---|
| 611 | + ret = -EINVAL; |
---|
| 612 | + } else { |
---|
| 613 | + info->dev_active = dev; |
---|
| 614 | + /* switch domain pagefault handler and arg depending on device */ |
---|
| 615 | + iommu_set_fault_handler(info->domain, dev->fault_handler ? |
---|
| 616 | + dev->fault_handler : mpp_iommu_handle, dev); |
---|
| 617 | + |
---|
| 618 | + dev_dbg(info->dev, "activate -> %p %s\n", dev, dev_name(dev->dev)); |
---|
| 619 | + } |
---|
| 620 | + |
---|
| 621 | + spin_unlock_irqrestore(&info->dev_lock, flags); |
---|
| 622 | + |
---|
| 623 | + return ret; |
---|
| 624 | +} |
---|
| 625 | + |
---|
| 626 | +int mpp_iommu_dev_deactivate(struct mpp_iommu_info *info, struct mpp_dev *dev) |
---|
| 627 | +{ |
---|
| 628 | + unsigned long flags; |
---|
| 629 | + |
---|
| 630 | + if (!info) |
---|
| 631 | + return 0; |
---|
| 632 | + |
---|
| 633 | + spin_lock_irqsave(&info->dev_lock, flags); |
---|
| 634 | + |
---|
| 635 | + if (info->dev_active != dev) |
---|
| 636 | + dev_err(info->dev, "can not deactivate %s when %s activated\n", |
---|
| 637 | + dev_name(dev->dev), |
---|
| 638 | + info->dev_active ? dev_name(info->dev_active->dev) : NULL); |
---|
| 639 | + |
---|
| 640 | + dev_dbg(info->dev, "deactivate %p\n", info->dev_active); |
---|
| 641 | + info->dev_active = NULL; |
---|
| 642 | + spin_unlock_irqrestore(&info->dev_lock, flags); |
---|
478 | 643 | |
---|
479 | 644 | return 0; |
---|
480 | 645 | } |
---|