forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/video/rockchip/mpp/mpp_iommu.c
....@@ -8,24 +8,27 @@
88 * Ding Wei, leo.ding@rock-chips.com
99 *
1010 */
11
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
12
-#include <asm/dma-iommu.h>
13
-#endif
1411 #include <linux/delay.h>
1512 #include <linux/dma-buf-cache.h>
1613 #include <linux/dma-iommu.h>
14
+#include <linux/dma-mapping.h>
1715 #include <linux/iommu.h>
1816 #include <linux/of.h>
1917 #include <linux/of_platform.h>
2018 #include <linux/kref.h>
2119 #include <linux/slab.h>
2220 #include <linux/pm_runtime.h>
21
+
22
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
23
+#include <asm/dma-iommu.h>
24
+#endif
2325 #include <soc/rockchip/rockchip_iommu.h>
2426
2527 #include "mpp_debug.h"
2628 #include "mpp_iommu.h"
29
+#include "mpp_common.h"
2730
28
-static struct mpp_dma_buffer *
31
+struct mpp_dma_buffer *
2932 mpp_dma_find_buffer_fd(struct mpp_dma_session *dma, int fd)
3033 {
3134 struct dma_buf *dmabuf;
....@@ -66,6 +69,15 @@
6669 dma_buf_unmap_attachment(buffer->attach, buffer->sgt, buffer->dir);
6770 dma_buf_detach(buffer->dmabuf, buffer->attach);
6871 dma_buf_put(buffer->dmabuf);
72
+ buffer->dma = NULL;
73
+ buffer->dmabuf = NULL;
74
+ buffer->attach = NULL;
75
+ buffer->sgt = NULL;
76
+ buffer->copy_sgt = NULL;
77
+ buffer->iova = 0;
78
+ buffer->size = 0;
79
+ buffer->vaddr = NULL;
80
+ buffer->last_used = 0;
6981 }
7082
7183 /* Remove the oldest buffer when count more than the setting */
....@@ -194,8 +206,9 @@
194206
195207 dmabuf = dma_buf_get(fd);
196208 if (IS_ERR(dmabuf)) {
197
- mpp_err("dma_buf_get fd %d failed\n", fd);
198
- return NULL;
209
+ ret = PTR_ERR(dmabuf);
210
+ mpp_err("dma_buf_get fd %d failed(%d)\n", fd, ret);
211
+ return ERR_PTR(ret);
199212 }
200213 /* A new DMA buffer */
201214 mutex_lock(&dma->list_mutex);
....@@ -216,15 +229,15 @@
216229
217230 attach = dma_buf_attach(buffer->dmabuf, dma->dev);
218231 if (IS_ERR(attach)) {
219
- mpp_err("dma_buf_attach fd %d failed\n", fd);
220232 ret = PTR_ERR(attach);
233
+ mpp_err("dma_buf_attach fd %d failed(%d)\n", fd, ret);
221234 goto fail_attach;
222235 }
223236
224237 sgt = dma_buf_map_attachment(attach, buffer->dir);
225238 if (IS_ERR(sgt)) {
226
- mpp_err("dma_buf_map_attachment fd %d failed\n", fd);
227239 ret = PTR_ERR(sgt);
240
+ mpp_err("dma_buf_map_attachment fd %d failed(%d)\n", fd, ret);
228241 goto fail_map;
229242 }
230243 buffer->iova = sg_dma_address(sgt->sgl);
....@@ -234,7 +247,9 @@
234247 buffer->dma = dma;
235248
236249 kref_init(&buffer->ref);
250
+
237251 if (!IS_ENABLED(CONFIG_DMABUF_CACHE))
252
+ /* Increase the reference for used outside the buffer pool */
238253 kref_get(&buffer->ref);
239254
240255 mutex_lock(&dma->list_mutex);
....@@ -361,25 +376,98 @@
361376 return dma;
362377 }
363378
379
+/*
380
+ * begin cpu access => for_cpu = true
381
+ * end cpu access => for_cpu = false
382
+ */
383
+void mpp_dma_buf_sync(struct mpp_dma_buffer *buffer, u32 offset, u32 length,
384
+ enum dma_data_direction dir, bool for_cpu)
385
+{
386
+ struct device *dev = buffer->dma->dev;
387
+ struct sg_table *sgt = buffer->sgt;
388
+ struct scatterlist *sg = sgt->sgl;
389
+ dma_addr_t sg_dma_addr = sg_dma_address(sg);
390
+ unsigned int len = 0;
391
+ int i;
392
+
393
+ for_each_sgtable_sg(sgt, sg, i) {
394
+ unsigned int sg_offset, sg_left, size = 0;
395
+
396
+ len += sg->length;
397
+ if (len <= offset) {
398
+ sg_dma_addr += sg->length;
399
+ continue;
400
+ }
401
+
402
+ sg_left = len - offset;
403
+ sg_offset = sg->length - sg_left;
404
+
405
+ size = (length < sg_left) ? length : sg_left;
406
+
407
+ if (for_cpu)
408
+ dma_sync_single_range_for_cpu(dev, sg_dma_addr,
409
+ sg_offset, size, dir);
410
+ else
411
+ dma_sync_single_range_for_device(dev, sg_dma_addr,
412
+ sg_offset, size, dir);
413
+
414
+ offset += size;
415
+ length -= size;
416
+ sg_dma_addr += sg->length;
417
+
418
+ if (length == 0)
419
+ break;
420
+ }
421
+}
422
+
364423 int mpp_iommu_detach(struct mpp_iommu_info *info)
365424 {
366
- struct iommu_domain *domain = info->domain;
367
- struct iommu_group *group = info->group;
425
+ if (!info)
426
+ return 0;
368427
369
- iommu_detach_group(domain, group);
370
-
428
+ iommu_detach_group(info->domain, info->group);
371429 return 0;
372430 }
373431
374432 int mpp_iommu_attach(struct mpp_iommu_info *info)
375433 {
376
- struct iommu_domain *domain = info->domain;
377
- struct iommu_group *group = info->group;
378
- int ret;
434
+ if (!info)
435
+ return 0;
379436
380
- ret = iommu_attach_group(domain, group);
381
- if (ret)
382
- return ret;
437
+ if (info->domain == iommu_get_domain_for_dev(info->dev))
438
+ return 0;
439
+
440
+ return iommu_attach_group(info->domain, info->group);
441
+}
442
+
443
+static int mpp_iommu_handle(struct iommu_domain *iommu,
444
+ struct device *iommu_dev,
445
+ unsigned long iova,
446
+ int status, void *arg)
447
+{
448
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
449
+
450
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
451
+ iova, status, arg);
452
+
453
+ if (!mpp) {
454
+ dev_err(iommu_dev, "pagefault without device to handle\n");
455
+ return 0;
456
+ }
457
+
458
+ if (mpp->cur_task)
459
+ mpp_task_dump_mem_region(mpp, mpp->cur_task);
460
+
461
+ if (mpp->dev_ops && mpp->dev_ops->dump_dev)
462
+ mpp->dev_ops->dump_dev(mpp);
463
+ else
464
+ mpp_task_dump_hw_reg(mpp);
465
+
466
+ /*
467
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
468
+ * Until the pagefault task finish by hw timeout.
469
+ */
470
+ rockchip_iommu_mask_irq(mpp->dev);
383471
384472 return 0;
385473 }
....@@ -391,13 +479,11 @@
391479 struct device_node *np = NULL;
392480 struct platform_device *pdev = NULL;
393481 struct mpp_iommu_info *info = NULL;
482
+ struct iommu_domain *domain = NULL;
483
+ struct iommu_group *group = NULL;
394484 #ifdef CONFIG_ARM_DMA_USE_IOMMU
395485 struct dma_iommu_mapping *mapping;
396486 #endif
397
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
398
- if (!info)
399
- return ERR_PTR(-ENOMEM);
400
-
401487 np = of_parse_phandle(dev->of_node, "iommus", 0);
402488 if (!np || !of_device_is_available(np)) {
403489 mpp_err("failed to get device node\n");
....@@ -411,8 +497,8 @@
411497 return ERR_PTR(-ENODEV);
412498 }
413499
414
- info->group = iommu_group_get(dev);
415
- if (!info->group) {
500
+ group = iommu_group_get(dev);
501
+ if (!group) {
416502 ret = -EINVAL;
417503 goto err_put_pdev;
418504 }
....@@ -423,38 +509,53 @@
423509 * we re-attach domain here
424510 */
425511 #ifdef CONFIG_ARM_DMA_USE_IOMMU
426
- if (!iommu_group_default_domain(info->group)) {
512
+ if (!iommu_group_default_domain(group)) {
427513 mapping = to_dma_iommu_mapping(dev);
428514 WARN_ON(!mapping);
429
- info->domain = mapping->domain;
515
+ domain = mapping->domain;
430516 }
431517 #endif
432
- if (!info->domain) {
433
- info->domain = iommu_get_domain_for_dev(dev);
434
- if (!info->domain) {
518
+ if (!domain) {
519
+ domain = iommu_get_domain_for_dev(dev);
520
+ if (!domain) {
435521 ret = -EINVAL;
436522 goto err_put_group;
437523 }
438524 }
439525
526
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
527
+ if (!info) {
528
+ ret = -ENOMEM;
529
+ goto err_put_group;
530
+ }
531
+
532
+ init_rwsem(&info->rw_sem);
533
+ spin_lock_init(&info->dev_lock);
440534 info->dev = dev;
441535 info->pdev = pdev;
442
- init_rwsem(&info->rw_sem);
536
+ info->group = group;
537
+ info->domain = domain;
538
+ info->dev_active = NULL;
443539 info->irq = platform_get_irq(pdev, 0);
444540 info->got_irq = (info->irq < 0) ? false : true;
445541
446542 return info;
447543
448544 err_put_group:
449
- iommu_group_put(info->group);
545
+ if (group)
546
+ iommu_group_put(group);
450547 err_put_pdev:
451
- platform_device_put(pdev);
548
+ if (pdev)
549
+ platform_device_put(pdev);
452550
453551 return ERR_PTR(ret);
454552 }
455553
456554 int mpp_iommu_remove(struct mpp_iommu_info *info)
457555 {
556
+ if (!info)
557
+ return 0;
558
+
458559 iommu_group_put(info->group);
459560 platform_device_put(info->pdev);
460561
....@@ -465,21 +566,80 @@
465566 {
466567 int ret;
467568
569
+ if (!info)
570
+ return 0;
571
+ /* call av1 iommu ops */
572
+ if (IS_ENABLED(CONFIG_ROCKCHIP_MPP_AV1DEC) && info->av1d_iommu) {
573
+ ret = mpp_av1_iommu_disable(dev);
574
+ if (ret)
575
+ return ret;
576
+ return mpp_av1_iommu_enable(dev);
577
+ }
468578 /* disable iommu */
469579 ret = rockchip_iommu_disable(dev);
470580 if (ret)
471581 return ret;
472
-
473582 /* re-enable iommu */
474583 return rockchip_iommu_enable(dev);
475584 }
476585
477586 int mpp_iommu_flush_tlb(struct mpp_iommu_info *info)
478587 {
479
- struct iommu_domain *domain = info->domain;
588
+ if (!info)
589
+ return 0;
480590
481
- if (domain && domain->ops)
482
- iommu_flush_tlb_all(domain);
591
+ if (info->domain && info->domain->ops)
592
+ iommu_flush_iotlb_all(info->domain);
593
+
594
+ return 0;
595
+}
596
+
597
+int mpp_iommu_dev_activate(struct mpp_iommu_info *info, struct mpp_dev *dev)
598
+{
599
+ unsigned long flags;
600
+ int ret = 0;
601
+
602
+ if (!info)
603
+ return 0;
604
+
605
+ spin_lock_irqsave(&info->dev_lock, flags);
606
+
607
+ if (info->dev_active || !dev) {
608
+ dev_err(info->dev, "can not activate %s -> %s\n",
609
+ info->dev_active ? dev_name(info->dev_active->dev) : NULL,
610
+ dev ? dev_name(dev->dev) : NULL);
611
+ ret = -EINVAL;
612
+ } else {
613
+ info->dev_active = dev;
614
+ /* switch domain pagefault handler and arg depending on device */
615
+ iommu_set_fault_handler(info->domain, dev->fault_handler ?
616
+ dev->fault_handler : mpp_iommu_handle, dev);
617
+
618
+ dev_dbg(info->dev, "activate -> %p %s\n", dev, dev_name(dev->dev));
619
+ }
620
+
621
+ spin_unlock_irqrestore(&info->dev_lock, flags);
622
+
623
+ return ret;
624
+}
625
+
626
+int mpp_iommu_dev_deactivate(struct mpp_iommu_info *info, struct mpp_dev *dev)
627
+{
628
+ unsigned long flags;
629
+
630
+ if (!info)
631
+ return 0;
632
+
633
+ spin_lock_irqsave(&info->dev_lock, flags);
634
+
635
+ if (info->dev_active != dev)
636
+ dev_err(info->dev, "can not deactivate %s when %s activated\n",
637
+ dev_name(dev->dev),
638
+ info->dev_active ? dev_name(info->dev_active->dev) : NULL);
639
+
640
+ dev_dbg(info->dev, "deactivate %p\n", info->dev_active);
641
+ info->dev_active = NULL;
642
+ spin_unlock_irqrestore(&info->dev_lock, flags);
483643
484644 return 0;
485645 }