forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/video/rockchip/mpp/mpp_iommu.c
....@@ -8,24 +8,27 @@
88 * Ding Wei, leo.ding@rock-chips.com
99 *
1010 */
11
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
12
-#include <asm/dma-iommu.h>
13
-#endif
1411 #include <linux/delay.h>
1512 #include <linux/dma-buf-cache.h>
1613 #include <linux/dma-iommu.h>
14
+#include <linux/dma-mapping.h>
1715 #include <linux/iommu.h>
1816 #include <linux/of.h>
1917 #include <linux/of_platform.h>
2018 #include <linux/kref.h>
2119 #include <linux/slab.h>
2220 #include <linux/pm_runtime.h>
21
+
22
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
23
+#include <asm/dma-iommu.h>
24
+#endif
2325 #include <soc/rockchip/rockchip_iommu.h>
2426
2527 #include "mpp_debug.h"
2628 #include "mpp_iommu.h"
29
+#include "mpp_common.h"
2730
28
-static struct mpp_dma_buffer *
31
+struct mpp_dma_buffer *
2932 mpp_dma_find_buffer_fd(struct mpp_dma_session *dma, int fd)
3033 {
3134 struct dma_buf *dmabuf;
....@@ -66,6 +69,15 @@
6669 dma_buf_unmap_attachment(buffer->attach, buffer->sgt, buffer->dir);
6770 dma_buf_detach(buffer->dmabuf, buffer->attach);
6871 dma_buf_put(buffer->dmabuf);
72
+ buffer->dma = NULL;
73
+ buffer->dmabuf = NULL;
74
+ buffer->attach = NULL;
75
+ buffer->sgt = NULL;
76
+ buffer->copy_sgt = NULL;
77
+ buffer->iova = 0;
78
+ buffer->size = 0;
79
+ buffer->vaddr = NULL;
80
+ buffer->last_used = 0;
6981 }
7082
7183 /* Remove the oldest buffer when count more than the setting */
....@@ -87,7 +99,7 @@
8799 oldest = buffer;
88100 }
89101 }
90
- if (oldest && kref_read(&oldest->ref) <= 1)
102
+ if (oldest && kref_read(&oldest->ref) == 1)
91103 kref_put(&oldest->ref, mpp_dma_release_buffer);
92104 mutex_unlock(&dma->list_mutex);
93105 }
....@@ -179,7 +191,8 @@
179191 }
180192
181193 /* remove the oldest before add buffer */
182
- mpp_dma_remove_extra_buffer(dma);
194
+ if (!IS_ENABLED(CONFIG_DMABUF_CACHE))
195
+ mpp_dma_remove_extra_buffer(dma);
183196
184197 /* Check whether in dma session */
185198 buffer = mpp_dma_find_buffer_fd(dma, fd);
....@@ -193,8 +206,9 @@
193206
194207 dmabuf = dma_buf_get(fd);
195208 if (IS_ERR(dmabuf)) {
196
- mpp_err("dma_buf_get fd %d failed\n", fd);
197
- return NULL;
209
+ ret = PTR_ERR(dmabuf);
210
+ mpp_err("dma_buf_get fd %d failed(%d)\n", fd, ret);
211
+ return ERR_PTR(ret);
198212 }
199213 /* A new DMA buffer */
200214 mutex_lock(&dma->list_mutex);
....@@ -215,15 +229,15 @@
215229
216230 attach = dma_buf_attach(buffer->dmabuf, dma->dev);
217231 if (IS_ERR(attach)) {
218
- mpp_err("dma_buf_attach fd %d failed\n", fd);
219232 ret = PTR_ERR(attach);
233
+ mpp_err("dma_buf_attach fd %d failed(%d)\n", fd, ret);
220234 goto fail_attach;
221235 }
222236
223237 sgt = dma_buf_map_attachment(attach, buffer->dir);
224238 if (IS_ERR(sgt)) {
225
- mpp_err("dma_buf_map_attachment fd %d failed\n", fd);
226239 ret = PTR_ERR(sgt);
240
+ mpp_err("dma_buf_map_attachment fd %d failed(%d)\n", fd, ret);
227241 goto fail_map;
228242 }
229243 buffer->iova = sg_dma_address(sgt->sgl);
....@@ -233,6 +247,10 @@
233247 buffer->dma = dma;
234248
235249 kref_init(&buffer->ref);
250
+
251
+ if (!IS_ENABLED(CONFIG_DMABUF_CACHE))
252
+ /* Increase the reference for used outside the buffer pool */
253
+ kref_get(&buffer->ref);
236254
237255 mutex_lock(&dma->list_mutex);
238256 dma->buffer_count++;
....@@ -358,25 +376,98 @@
358376 return dma;
359377 }
360378
379
+/*
380
+ * begin cpu access => for_cpu = true
381
+ * end cpu access => for_cpu = false
382
+ */
383
+void mpp_dma_buf_sync(struct mpp_dma_buffer *buffer, u32 offset, u32 length,
384
+ enum dma_data_direction dir, bool for_cpu)
385
+{
386
+ struct device *dev = buffer->dma->dev;
387
+ struct sg_table *sgt = buffer->sgt;
388
+ struct scatterlist *sg = sgt->sgl;
389
+ dma_addr_t sg_dma_addr = sg_dma_address(sg);
390
+ unsigned int len = 0;
391
+ int i;
392
+
393
+ for_each_sgtable_sg(sgt, sg, i) {
394
+ unsigned int sg_offset, sg_left, size = 0;
395
+
396
+ len += sg->length;
397
+ if (len <= offset) {
398
+ sg_dma_addr += sg->length;
399
+ continue;
400
+ }
401
+
402
+ sg_left = len - offset;
403
+ sg_offset = sg->length - sg_left;
404
+
405
+ size = (length < sg_left) ? length : sg_left;
406
+
407
+ if (for_cpu)
408
+ dma_sync_single_range_for_cpu(dev, sg_dma_addr,
409
+ sg_offset, size, dir);
410
+ else
411
+ dma_sync_single_range_for_device(dev, sg_dma_addr,
412
+ sg_offset, size, dir);
413
+
414
+ offset += size;
415
+ length -= size;
416
+ sg_dma_addr += sg->length;
417
+
418
+ if (length == 0)
419
+ break;
420
+ }
421
+}
422
+
361423 int mpp_iommu_detach(struct mpp_iommu_info *info)
362424 {
363
- struct iommu_domain *domain = info->domain;
364
- struct iommu_group *group = info->group;
425
+ if (!info)
426
+ return 0;
365427
366
- iommu_detach_group(domain, group);
367
-
428
+ iommu_detach_group(info->domain, info->group);
368429 return 0;
369430 }
370431
371432 int mpp_iommu_attach(struct mpp_iommu_info *info)
372433 {
373
- struct iommu_domain *domain = info->domain;
374
- struct iommu_group *group = info->group;
375
- int ret;
434
+ if (!info)
435
+ return 0;
376436
377
- ret = iommu_attach_group(domain, group);
378
- if (ret)
379
- return ret;
437
+ if (info->domain == iommu_get_domain_for_dev(info->dev))
438
+ return 0;
439
+
440
+ return iommu_attach_group(info->domain, info->group);
441
+}
442
+
443
+static int mpp_iommu_handle(struct iommu_domain *iommu,
444
+ struct device *iommu_dev,
445
+ unsigned long iova,
446
+ int status, void *arg)
447
+{
448
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
449
+
450
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
451
+ iova, status, arg);
452
+
453
+ if (!mpp) {
454
+ dev_err(iommu_dev, "pagefault without device to handle\n");
455
+ return 0;
456
+ }
457
+
458
+ if (mpp->cur_task)
459
+ mpp_task_dump_mem_region(mpp, mpp->cur_task);
460
+
461
+ if (mpp->dev_ops && mpp->dev_ops->dump_dev)
462
+ mpp->dev_ops->dump_dev(mpp);
463
+ else
464
+ mpp_task_dump_hw_reg(mpp);
465
+
466
+ /*
467
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
468
+ * Until the pagefault task finish by hw timeout.
469
+ */
470
+ rockchip_iommu_mask_irq(mpp->dev);
380471
381472 return 0;
382473 }
....@@ -388,13 +479,11 @@
388479 struct device_node *np = NULL;
389480 struct platform_device *pdev = NULL;
390481 struct mpp_iommu_info *info = NULL;
482
+ struct iommu_domain *domain = NULL;
483
+ struct iommu_group *group = NULL;
391484 #ifdef CONFIG_ARM_DMA_USE_IOMMU
392485 struct dma_iommu_mapping *mapping;
393486 #endif
394
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
395
- if (!info)
396
- return ERR_PTR(-ENOMEM);
397
-
398487 np = of_parse_phandle(dev->of_node, "iommus", 0);
399488 if (!np || !of_device_is_available(np)) {
400489 mpp_err("failed to get device node\n");
....@@ -408,8 +497,8 @@
408497 return ERR_PTR(-ENODEV);
409498 }
410499
411
- info->group = iommu_group_get(dev);
412
- if (!info->group) {
500
+ group = iommu_group_get(dev);
501
+ if (!group) {
413502 ret = -EINVAL;
414503 goto err_put_pdev;
415504 }
....@@ -420,36 +509,53 @@
420509 * we re-attach domain here
421510 */
422511 #ifdef CONFIG_ARM_DMA_USE_IOMMU
423
- if (!iommu_group_default_domain(info->group)) {
512
+ if (!iommu_group_default_domain(group)) {
424513 mapping = to_dma_iommu_mapping(dev);
425514 WARN_ON(!mapping);
426
- info->domain = mapping->domain;
515
+ domain = mapping->domain;
427516 }
428517 #endif
429
- if (!info->domain) {
430
- info->domain = iommu_get_domain_for_dev(dev);
431
- if (!info->domain) {
518
+ if (!domain) {
519
+ domain = iommu_get_domain_for_dev(dev);
520
+ if (!domain) {
432521 ret = -EINVAL;
433522 goto err_put_group;
434523 }
435524 }
436525
526
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
527
+ if (!info) {
528
+ ret = -ENOMEM;
529
+ goto err_put_group;
530
+ }
531
+
532
+ init_rwsem(&info->rw_sem);
533
+ spin_lock_init(&info->dev_lock);
437534 info->dev = dev;
438535 info->pdev = pdev;
439
- init_rwsem(&info->rw_sem);
536
+ info->group = group;
537
+ info->domain = domain;
538
+ info->dev_active = NULL;
539
+ info->irq = platform_get_irq(pdev, 0);
540
+ info->got_irq = (info->irq < 0) ? false : true;
440541
441542 return info;
442543
443544 err_put_group:
444
- iommu_group_put(info->group);
545
+ if (group)
546
+ iommu_group_put(group);
445547 err_put_pdev:
446
- platform_device_put(pdev);
548
+ if (pdev)
549
+ platform_device_put(pdev);
447550
448551 return ERR_PTR(ret);
449552 }
450553
451554 int mpp_iommu_remove(struct mpp_iommu_info *info)
452555 {
556
+ if (!info)
557
+ return 0;
558
+
453559 iommu_group_put(info->group);
454560 platform_device_put(info->pdev);
455561
....@@ -460,21 +566,80 @@
460566 {
461567 int ret;
462568
569
+ if (!info)
570
+ return 0;
571
+ /* call av1 iommu ops */
572
+ if (IS_ENABLED(CONFIG_ROCKCHIP_MPP_AV1DEC) && info->av1d_iommu) {
573
+ ret = mpp_av1_iommu_disable(dev);
574
+ if (ret)
575
+ return ret;
576
+ return mpp_av1_iommu_enable(dev);
577
+ }
463578 /* disable iommu */
464579 ret = rockchip_iommu_disable(dev);
465580 if (ret)
466581 return ret;
467
-
468582 /* re-enable iommu */
469583 return rockchip_iommu_enable(dev);
470584 }
471585
472586 int mpp_iommu_flush_tlb(struct mpp_iommu_info *info)
473587 {
474
- struct iommu_domain *domain = info->domain;
588
+ if (!info)
589
+ return 0;
475590
476
- if (domain && domain->ops)
477
- iommu_flush_tlb_all(domain);
591
+ if (info->domain && info->domain->ops)
592
+ iommu_flush_iotlb_all(info->domain);
593
+
594
+ return 0;
595
+}
596
+
597
+int mpp_iommu_dev_activate(struct mpp_iommu_info *info, struct mpp_dev *dev)
598
+{
599
+ unsigned long flags;
600
+ int ret = 0;
601
+
602
+ if (!info)
603
+ return 0;
604
+
605
+ spin_lock_irqsave(&info->dev_lock, flags);
606
+
607
+ if (info->dev_active || !dev) {
608
+ dev_err(info->dev, "can not activate %s -> %s\n",
609
+ info->dev_active ? dev_name(info->dev_active->dev) : NULL,
610
+ dev ? dev_name(dev->dev) : NULL);
611
+ ret = -EINVAL;
612
+ } else {
613
+ info->dev_active = dev;
614
+ /* switch domain pagefault handler and arg depending on device */
615
+ iommu_set_fault_handler(info->domain, dev->fault_handler ?
616
+ dev->fault_handler : mpp_iommu_handle, dev);
617
+
618
+ dev_dbg(info->dev, "activate -> %p %s\n", dev, dev_name(dev->dev));
619
+ }
620
+
621
+ spin_unlock_irqrestore(&info->dev_lock, flags);
622
+
623
+ return ret;
624
+}
625
+
626
+int mpp_iommu_dev_deactivate(struct mpp_iommu_info *info, struct mpp_dev *dev)
627
+{
628
+ unsigned long flags;
629
+
630
+ if (!info)
631
+ return 0;
632
+
633
+ spin_lock_irqsave(&info->dev_lock, flags);
634
+
635
+ if (info->dev_active != dev)
636
+ dev_err(info->dev, "can not deactivate %s when %s activated\n",
637
+ dev_name(dev->dev),
638
+ info->dev_active ? dev_name(info->dev_active->dev) : NULL);
639
+
640
+ dev_dbg(info->dev, "deactivate %p\n", info->dev_active);
641
+ info->dev_active = NULL;
642
+ spin_unlock_irqrestore(&info->dev_lock, flags);
478643
479644 return 0;
480645 }