| .. | .. |
|---|
| 8 | 8 | * Ding Wei, leo.ding@rock-chips.com |
|---|
| 9 | 9 | * |
|---|
| 10 | 10 | */ |
|---|
| 11 | | -#ifdef CONFIG_ARM_DMA_USE_IOMMU |
|---|
| 12 | | -#include <asm/dma-iommu.h> |
|---|
| 13 | | -#endif |
|---|
| 14 | 11 | #include <linux/delay.h> |
|---|
| 15 | 12 | #include <linux/dma-buf-cache.h> |
|---|
| 16 | 13 | #include <linux/dma-iommu.h> |
|---|
| 14 | +#include <linux/dma-mapping.h> |
|---|
| 17 | 15 | #include <linux/iommu.h> |
|---|
| 18 | 16 | #include <linux/of.h> |
|---|
| 19 | 17 | #include <linux/of_platform.h> |
|---|
| 20 | 18 | #include <linux/kref.h> |
|---|
| 21 | 19 | #include <linux/slab.h> |
|---|
| 22 | 20 | #include <linux/pm_runtime.h> |
|---|
| 21 | + |
|---|
| 22 | +#ifdef CONFIG_ARM_DMA_USE_IOMMU |
|---|
| 23 | +#include <asm/dma-iommu.h> |
|---|
| 24 | +#endif |
|---|
| 23 | 25 | #include <soc/rockchip/rockchip_iommu.h> |
|---|
| 24 | 26 | |
|---|
| 25 | 27 | #include "mpp_debug.h" |
|---|
| 26 | 28 | #include "mpp_iommu.h" |
|---|
| 29 | +#include "mpp_common.h" |
|---|
| 27 | 30 | |
|---|
| 28 | | -static struct mpp_dma_buffer * |
|---|
| 31 | +struct mpp_dma_buffer * |
|---|
| 29 | 32 | mpp_dma_find_buffer_fd(struct mpp_dma_session *dma, int fd) |
|---|
| 30 | 33 | { |
|---|
| 31 | 34 | struct dma_buf *dmabuf; |
|---|
| .. | .. |
|---|
| 66 | 69 | dma_buf_unmap_attachment(buffer->attach, buffer->sgt, buffer->dir); |
|---|
| 67 | 70 | dma_buf_detach(buffer->dmabuf, buffer->attach); |
|---|
| 68 | 71 | dma_buf_put(buffer->dmabuf); |
|---|
| 72 | + buffer->dma = NULL; |
|---|
| 73 | + buffer->dmabuf = NULL; |
|---|
| 74 | + buffer->attach = NULL; |
|---|
| 75 | + buffer->sgt = NULL; |
|---|
| 76 | + buffer->copy_sgt = NULL; |
|---|
| 77 | + buffer->iova = 0; |
|---|
| 78 | + buffer->size = 0; |
|---|
| 79 | + buffer->vaddr = NULL; |
|---|
| 80 | + buffer->last_used = 0; |
|---|
| 69 | 81 | } |
|---|
| 70 | 82 | |
|---|
| 71 | 83 | /* Remove the oldest buffer when count more than the setting */ |
|---|
| .. | .. |
|---|
| 194 | 206 | |
|---|
| 195 | 207 | dmabuf = dma_buf_get(fd); |
|---|
| 196 | 208 | if (IS_ERR(dmabuf)) { |
|---|
| 197 | | - mpp_err("dma_buf_get fd %d failed\n", fd); |
|---|
| 198 | | - return NULL; |
|---|
| 209 | + ret = PTR_ERR(dmabuf); |
|---|
| 210 | + mpp_err("dma_buf_get fd %d failed(%d)\n", fd, ret); |
|---|
| 211 | + return ERR_PTR(ret); |
|---|
| 199 | 212 | } |
|---|
| 200 | 213 | /* A new DMA buffer */ |
|---|
| 201 | 214 | mutex_lock(&dma->list_mutex); |
|---|
| .. | .. |
|---|
| 216 | 229 | |
|---|
| 217 | 230 | attach = dma_buf_attach(buffer->dmabuf, dma->dev); |
|---|
| 218 | 231 | if (IS_ERR(attach)) { |
|---|
| 219 | | - mpp_err("dma_buf_attach fd %d failed\n", fd); |
|---|
| 220 | 232 | ret = PTR_ERR(attach); |
|---|
| 233 | + mpp_err("dma_buf_attach fd %d failed(%d)\n", fd, ret); |
|---|
| 221 | 234 | goto fail_attach; |
|---|
| 222 | 235 | } |
|---|
| 223 | 236 | |
|---|
| 224 | 237 | sgt = dma_buf_map_attachment(attach, buffer->dir); |
|---|
| 225 | 238 | if (IS_ERR(sgt)) { |
|---|
| 226 | | - mpp_err("dma_buf_map_attachment fd %d failed\n", fd); |
|---|
| 227 | 239 | ret = PTR_ERR(sgt); |
|---|
| 240 | + mpp_err("dma_buf_map_attachment fd %d failed(%d)\n", fd, ret); |
|---|
| 228 | 241 | goto fail_map; |
|---|
| 229 | 242 | } |
|---|
| 230 | 243 | buffer->iova = sg_dma_address(sgt->sgl); |
|---|
| .. | .. |
|---|
| 234 | 247 | buffer->dma = dma; |
|---|
| 235 | 248 | |
|---|
| 236 | 249 | kref_init(&buffer->ref); |
|---|
| 250 | + |
|---|
| 237 | 251 | if (!IS_ENABLED(CONFIG_DMABUF_CACHE)) |
|---|
| 252 | + /* Increase the reference for used outside the buffer pool */ |
|---|
| 238 | 253 | kref_get(&buffer->ref); |
|---|
| 239 | 254 | |
|---|
| 240 | 255 | mutex_lock(&dma->list_mutex); |
|---|
| .. | .. |
|---|
| 361 | 376 | return dma; |
|---|
| 362 | 377 | } |
|---|
| 363 | 378 | |
|---|
| 379 | +/* |
|---|
| 380 | + * begin cpu access => for_cpu = true |
|---|
| 381 | + * end cpu access => for_cpu = false |
|---|
| 382 | + */ |
|---|
| 383 | +void mpp_dma_buf_sync(struct mpp_dma_buffer *buffer, u32 offset, u32 length, |
|---|
| 384 | + enum dma_data_direction dir, bool for_cpu) |
|---|
| 385 | +{ |
|---|
| 386 | + struct device *dev = buffer->dma->dev; |
|---|
| 387 | + struct sg_table *sgt = buffer->sgt; |
|---|
| 388 | + struct scatterlist *sg = sgt->sgl; |
|---|
| 389 | + dma_addr_t sg_dma_addr = sg_dma_address(sg); |
|---|
| 390 | + unsigned int len = 0; |
|---|
| 391 | + int i; |
|---|
| 392 | + |
|---|
| 393 | + for_each_sgtable_sg(sgt, sg, i) { |
|---|
| 394 | + unsigned int sg_offset, sg_left, size = 0; |
|---|
| 395 | + |
|---|
| 396 | + len += sg->length; |
|---|
| 397 | + if (len <= offset) { |
|---|
| 398 | + sg_dma_addr += sg->length; |
|---|
| 399 | + continue; |
|---|
| 400 | + } |
|---|
| 401 | + |
|---|
| 402 | + sg_left = len - offset; |
|---|
| 403 | + sg_offset = sg->length - sg_left; |
|---|
| 404 | + |
|---|
| 405 | + size = (length < sg_left) ? length : sg_left; |
|---|
| 406 | + |
|---|
| 407 | + if (for_cpu) |
|---|
| 408 | + dma_sync_single_range_for_cpu(dev, sg_dma_addr, |
|---|
| 409 | + sg_offset, size, dir); |
|---|
| 410 | + else |
|---|
| 411 | + dma_sync_single_range_for_device(dev, sg_dma_addr, |
|---|
| 412 | + sg_offset, size, dir); |
|---|
| 413 | + |
|---|
| 414 | + offset += size; |
|---|
| 415 | + length -= size; |
|---|
| 416 | + sg_dma_addr += sg->length; |
|---|
| 417 | + |
|---|
| 418 | + if (length == 0) |
|---|
| 419 | + break; |
|---|
| 420 | + } |
|---|
| 421 | +} |
|---|
| 422 | + |
|---|
| 364 | 423 | int mpp_iommu_detach(struct mpp_iommu_info *info) |
|---|
| 365 | 424 | { |
|---|
| 366 | | - struct iommu_domain *domain = info->domain; |
|---|
| 367 | | - struct iommu_group *group = info->group; |
|---|
| 425 | + if (!info) |
|---|
| 426 | + return 0; |
|---|
| 368 | 427 | |
|---|
| 369 | | - iommu_detach_group(domain, group); |
|---|
| 370 | | - |
|---|
| 428 | + iommu_detach_group(info->domain, info->group); |
|---|
| 371 | 429 | return 0; |
|---|
| 372 | 430 | } |
|---|
| 373 | 431 | |
|---|
| 374 | 432 | int mpp_iommu_attach(struct mpp_iommu_info *info) |
|---|
| 375 | 433 | { |
|---|
| 376 | | - struct iommu_domain *domain = info->domain; |
|---|
| 377 | | - struct iommu_group *group = info->group; |
|---|
| 378 | | - int ret; |
|---|
| 434 | + if (!info) |
|---|
| 435 | + return 0; |
|---|
| 379 | 436 | |
|---|
| 380 | | - ret = iommu_attach_group(domain, group); |
|---|
| 381 | | - if (ret) |
|---|
| 382 | | - return ret; |
|---|
| 437 | + if (info->domain == iommu_get_domain_for_dev(info->dev)) |
|---|
| 438 | + return 0; |
|---|
| 439 | + |
|---|
| 440 | + return iommu_attach_group(info->domain, info->group); |
|---|
| 441 | +} |
|---|
| 442 | + |
|---|
| 443 | +static int mpp_iommu_handle(struct iommu_domain *iommu, |
|---|
| 444 | + struct device *iommu_dev, |
|---|
| 445 | + unsigned long iova, |
|---|
| 446 | + int status, void *arg) |
|---|
| 447 | +{ |
|---|
| 448 | + struct mpp_dev *mpp = (struct mpp_dev *)arg; |
|---|
| 449 | + |
|---|
| 450 | + dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n", |
|---|
| 451 | + iova, status, arg); |
|---|
| 452 | + |
|---|
| 453 | + if (!mpp) { |
|---|
| 454 | + dev_err(iommu_dev, "pagefault without device to handle\n"); |
|---|
| 455 | + return 0; |
|---|
| 456 | + } |
|---|
| 457 | + |
|---|
| 458 | + if (mpp->cur_task) |
|---|
| 459 | + mpp_task_dump_mem_region(mpp, mpp->cur_task); |
|---|
| 460 | + |
|---|
| 461 | + if (mpp->dev_ops && mpp->dev_ops->dump_dev) |
|---|
| 462 | + mpp->dev_ops->dump_dev(mpp); |
|---|
| 463 | + else |
|---|
| 464 | + mpp_task_dump_hw_reg(mpp); |
|---|
| 465 | + |
|---|
| 466 | + /* |
|---|
| 467 | + * Mask iommu irq, in order for iommu not repeatedly trigger pagefault. |
|---|
| 468 | + * Until the pagefault task finish by hw timeout. |
|---|
| 469 | + */ |
|---|
| 470 | + rockchip_iommu_mask_irq(mpp->dev); |
|---|
| 383 | 471 | |
|---|
| 384 | 472 | return 0; |
|---|
| 385 | 473 | } |
|---|
| .. | .. |
|---|
| 391 | 479 | struct device_node *np = NULL; |
|---|
| 392 | 480 | struct platform_device *pdev = NULL; |
|---|
| 393 | 481 | struct mpp_iommu_info *info = NULL; |
|---|
| 482 | + struct iommu_domain *domain = NULL; |
|---|
| 483 | + struct iommu_group *group = NULL; |
|---|
| 394 | 484 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
|---|
| 395 | 485 | struct dma_iommu_mapping *mapping; |
|---|
| 396 | 486 | #endif |
|---|
| 397 | | - info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); |
|---|
| 398 | | - if (!info) |
|---|
| 399 | | - return ERR_PTR(-ENOMEM); |
|---|
| 400 | | - |
|---|
| 401 | 487 | np = of_parse_phandle(dev->of_node, "iommus", 0); |
|---|
| 402 | 488 | if (!np || !of_device_is_available(np)) { |
|---|
| 403 | 489 | mpp_err("failed to get device node\n"); |
|---|
| .. | .. |
|---|
| 411 | 497 | return ERR_PTR(-ENODEV); |
|---|
| 412 | 498 | } |
|---|
| 413 | 499 | |
|---|
| 414 | | - info->group = iommu_group_get(dev); |
|---|
| 415 | | - if (!info->group) { |
|---|
| 500 | + group = iommu_group_get(dev); |
|---|
| 501 | + if (!group) { |
|---|
| 416 | 502 | ret = -EINVAL; |
|---|
| 417 | 503 | goto err_put_pdev; |
|---|
| 418 | 504 | } |
|---|
| .. | .. |
|---|
| 423 | 509 | * we re-attach domain here |
|---|
| 424 | 510 | */ |
|---|
| 425 | 511 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
|---|
| 426 | | - if (!iommu_group_default_domain(info->group)) { |
|---|
| 512 | + if (!iommu_group_default_domain(group)) { |
|---|
| 427 | 513 | mapping = to_dma_iommu_mapping(dev); |
|---|
| 428 | 514 | WARN_ON(!mapping); |
|---|
| 429 | | - info->domain = mapping->domain; |
|---|
| 515 | + domain = mapping->domain; |
|---|
| 430 | 516 | } |
|---|
| 431 | 517 | #endif |
|---|
| 432 | | - if (!info->domain) { |
|---|
| 433 | | - info->domain = iommu_get_domain_for_dev(dev); |
|---|
| 434 | | - if (!info->domain) { |
|---|
| 518 | + if (!domain) { |
|---|
| 519 | + domain = iommu_get_domain_for_dev(dev); |
|---|
| 520 | + if (!domain) { |
|---|
| 435 | 521 | ret = -EINVAL; |
|---|
| 436 | 522 | goto err_put_group; |
|---|
| 437 | 523 | } |
|---|
| 438 | 524 | } |
|---|
| 439 | 525 | |
|---|
| 526 | + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); |
|---|
| 527 | + if (!info) { |
|---|
| 528 | + ret = -ENOMEM; |
|---|
| 529 | + goto err_put_group; |
|---|
| 530 | + } |
|---|
| 531 | + |
|---|
| 532 | + init_rwsem(&info->rw_sem); |
|---|
| 533 | + spin_lock_init(&info->dev_lock); |
|---|
| 440 | 534 | info->dev = dev; |
|---|
| 441 | 535 | info->pdev = pdev; |
|---|
| 442 | | - init_rwsem(&info->rw_sem); |
|---|
| 536 | + info->group = group; |
|---|
| 537 | + info->domain = domain; |
|---|
| 538 | + info->dev_active = NULL; |
|---|
| 443 | 539 | info->irq = platform_get_irq(pdev, 0); |
|---|
| 444 | 540 | info->got_irq = (info->irq < 0) ? false : true; |
|---|
| 445 | 541 | |
|---|
| 446 | 542 | return info; |
|---|
| 447 | 543 | |
|---|
| 448 | 544 | err_put_group: |
|---|
| 449 | | - iommu_group_put(info->group); |
|---|
| 545 | + if (group) |
|---|
| 546 | + iommu_group_put(group); |
|---|
| 450 | 547 | err_put_pdev: |
|---|
| 451 | | - platform_device_put(pdev); |
|---|
| 548 | + if (pdev) |
|---|
| 549 | + platform_device_put(pdev); |
|---|
| 452 | 550 | |
|---|
| 453 | 551 | return ERR_PTR(ret); |
|---|
| 454 | 552 | } |
|---|
| 455 | 553 | |
|---|
| 456 | 554 | int mpp_iommu_remove(struct mpp_iommu_info *info) |
|---|
| 457 | 555 | { |
|---|
| 556 | + if (!info) |
|---|
| 557 | + return 0; |
|---|
| 558 | + |
|---|
| 458 | 559 | iommu_group_put(info->group); |
|---|
| 459 | 560 | platform_device_put(info->pdev); |
|---|
| 460 | 561 | |
|---|
| .. | .. |
|---|
| 465 | 566 | { |
|---|
| 466 | 567 | int ret; |
|---|
| 467 | 568 | |
|---|
| 569 | + if (!info) |
|---|
| 570 | + return 0; |
|---|
| 571 | + /* call av1 iommu ops */ |
|---|
| 572 | + if (IS_ENABLED(CONFIG_ROCKCHIP_MPP_AV1DEC) && info->av1d_iommu) { |
|---|
| 573 | + ret = mpp_av1_iommu_disable(dev); |
|---|
| 574 | + if (ret) |
|---|
| 575 | + return ret; |
|---|
| 576 | + return mpp_av1_iommu_enable(dev); |
|---|
| 577 | + } |
|---|
| 468 | 578 | /* disable iommu */ |
|---|
| 469 | 579 | ret = rockchip_iommu_disable(dev); |
|---|
| 470 | 580 | if (ret) |
|---|
| 471 | 581 | return ret; |
|---|
| 472 | | - |
|---|
| 473 | 582 | /* re-enable iommu */ |
|---|
| 474 | 583 | return rockchip_iommu_enable(dev); |
|---|
| 475 | 584 | } |
|---|
| 476 | 585 | |
|---|
| 477 | 586 | int mpp_iommu_flush_tlb(struct mpp_iommu_info *info) |
|---|
| 478 | 587 | { |
|---|
| 479 | | - struct iommu_domain *domain = info->domain; |
|---|
| 588 | + if (!info) |
|---|
| 589 | + return 0; |
|---|
| 480 | 590 | |
|---|
| 481 | | - if (domain && domain->ops) |
|---|
| 482 | | - iommu_flush_tlb_all(domain); |
|---|
| 591 | + if (info->domain && info->domain->ops) |
|---|
| 592 | + iommu_flush_iotlb_all(info->domain); |
|---|
| 593 | + |
|---|
| 594 | + return 0; |
|---|
| 595 | +} |
|---|
| 596 | + |
|---|
| 597 | +int mpp_iommu_dev_activate(struct mpp_iommu_info *info, struct mpp_dev *dev) |
|---|
| 598 | +{ |
|---|
| 599 | + unsigned long flags; |
|---|
| 600 | + int ret = 0; |
|---|
| 601 | + |
|---|
| 602 | + if (!info) |
|---|
| 603 | + return 0; |
|---|
| 604 | + |
|---|
| 605 | + spin_lock_irqsave(&info->dev_lock, flags); |
|---|
| 606 | + |
|---|
| 607 | + if (info->dev_active || !dev) { |
|---|
| 608 | + dev_err(info->dev, "can not activate %s -> %s\n", |
|---|
| 609 | + info->dev_active ? dev_name(info->dev_active->dev) : NULL, |
|---|
| 610 | + dev ? dev_name(dev->dev) : NULL); |
|---|
| 611 | + ret = -EINVAL; |
|---|
| 612 | + } else { |
|---|
| 613 | + info->dev_active = dev; |
|---|
| 614 | + /* switch domain pagefault handler and arg depending on device */ |
|---|
| 615 | + iommu_set_fault_handler(info->domain, dev->fault_handler ? |
|---|
| 616 | + dev->fault_handler : mpp_iommu_handle, dev); |
|---|
| 617 | + |
|---|
| 618 | + dev_dbg(info->dev, "activate -> %p %s\n", dev, dev_name(dev->dev)); |
|---|
| 619 | + } |
|---|
| 620 | + |
|---|
| 621 | + spin_unlock_irqrestore(&info->dev_lock, flags); |
|---|
| 622 | + |
|---|
| 623 | + return ret; |
|---|
| 624 | +} |
|---|
| 625 | + |
|---|
| 626 | +int mpp_iommu_dev_deactivate(struct mpp_iommu_info *info, struct mpp_dev *dev) |
|---|
| 627 | +{ |
|---|
| 628 | + unsigned long flags; |
|---|
| 629 | + |
|---|
| 630 | + if (!info) |
|---|
| 631 | + return 0; |
|---|
| 632 | + |
|---|
| 633 | + spin_lock_irqsave(&info->dev_lock, flags); |
|---|
| 634 | + |
|---|
| 635 | + if (info->dev_active != dev) |
|---|
| 636 | + dev_err(info->dev, "can not deactivate %s when %s activated\n", |
|---|
| 637 | + dev_name(dev->dev), |
|---|
| 638 | + info->dev_active ? dev_name(info->dev_active->dev) : NULL); |
|---|
| 639 | + |
|---|
| 640 | + dev_dbg(info->dev, "deactivate %p\n", info->dev_active); |
|---|
| 641 | + info->dev_active = NULL; |
|---|
| 642 | + spin_unlock_irqrestore(&info->dev_lock, flags); |
|---|
| 483 | 643 | |
|---|
| 484 | 644 | return 0; |
|---|
| 485 | 645 | } |
|---|