hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/iommu/tegra-smmu.c
....@@ -1,9 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
74 */
85
96 #include <linux/bitops.h>
....@@ -15,6 +12,7 @@
1512 #include <linux/of_device.h>
1613 #include <linux/platform_device.h>
1714 #include <linux/slab.h>
15
+#include <linux/spinlock.h>
1816 #include <linux/dma-mapping.h>
1917
2018 #include <soc/tegra/ahb.h>
....@@ -22,8 +20,10 @@
2220
2321 struct tegra_smmu_group {
2422 struct list_head list;
23
+ struct tegra_smmu *smmu;
2524 const struct tegra_smmu_group_soc *soc;
2625 struct iommu_group *group;
26
+ unsigned int swgroup;
2727 };
2828
2929 struct tegra_smmu {
....@@ -52,6 +52,7 @@
5252 struct iommu_domain domain;
5353 struct tegra_smmu *smmu;
5454 unsigned int use_count;
55
+ spinlock_t lock;
5556 u32 *count;
5657 struct page **pts;
5758 struct page *pd;
....@@ -130,6 +131,11 @@
130131 #define SMMU_PDE_SHIFT 22
131132 #define SMMU_PTE_SHIFT 12
132133
134
+#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
135
+#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
136
+#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
137
+#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
138
+
133139 #define SMMU_PD_READABLE (1 << 31)
134140 #define SMMU_PD_WRITABLE (1 << 30)
135141 #define SMMU_PD_NONSECURE (1 << 29)
....@@ -145,8 +151,6 @@
145151
146152 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
147153 SMMU_PDE_NONSECURE)
148
-#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
149
- SMMU_PTE_NONSECURE)
150154
151155 static unsigned int iova_pd_index(unsigned long iova)
152156 {
....@@ -245,7 +249,7 @@
245249
246250 static inline void smmu_flush(struct tegra_smmu *smmu)
247251 {
248
- smmu_readl(smmu, SMMU_CONFIG);
252
+ smmu_readl(smmu, SMMU_PTB_ASID);
249253 }
250254
251255 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
....@@ -313,6 +317,8 @@
313317 return NULL;
314318 }
315319
320
+ spin_lock_init(&as->lock);
321
+
316322 /* setup aperture */
317323 as->domain.geometry.aperture_start = 0;
318324 as->domain.geometry.aperture_end = 0xffffffff;
....@@ -327,6 +333,9 @@
327333
328334 /* TODO: free page directory and page tables */
329335
336
+ WARN_ON_ONCE(as->use_count);
337
+ kfree(as->count);
338
+ kfree(as->pts);
330339 kfree(as);
331340 }
332341
....@@ -353,6 +362,20 @@
353362 unsigned int i;
354363 u32 value;
355364
365
+ group = tegra_smmu_find_swgroup(smmu, swgroup);
366
+ if (group) {
367
+ value = smmu_readl(smmu, group->reg);
368
+ value &= ~SMMU_ASID_MASK;
369
+ value |= SMMU_ASID_VALUE(asid);
370
+ value |= SMMU_ASID_ENABLE;
371
+ smmu_writel(smmu, value, group->reg);
372
+ } else {
373
+ pr_warn("%s group from swgroup %u not found\n", __func__,
374
+ swgroup);
375
+ /* No point moving ahead if group was not found */
376
+ return;
377
+ }
378
+
356379 for (i = 0; i < smmu->soc->num_clients; i++) {
357380 const struct tegra_mc_client *client = &smmu->soc->clients[i];
358381
....@@ -362,15 +385,6 @@
362385 value = smmu_readl(smmu, client->smmu.reg);
363386 value |= BIT(client->smmu.bit);
364387 smmu_writel(smmu, value, client->smmu.reg);
365
- }
366
-
367
- group = tegra_smmu_find_swgroup(smmu, swgroup);
368
- if (group) {
369
- value = smmu_readl(smmu, group->reg);
370
- value &= ~SMMU_ASID_MASK;
371
- value |= SMMU_ASID_VALUE(asid);
372
- value |= SMMU_ASID_ENABLE;
373
- smmu_writel(smmu, value, group->reg);
374388 }
375389 }
376390
....@@ -462,7 +476,7 @@
462476 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
463477 struct device *dev)
464478 {
465
- struct tegra_smmu *smmu = dev->archdata.iommu;
479
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
466480 struct tegra_smmu_as *as = to_smmu_as(domain);
467481 struct device_node *np = dev->of_node;
468482 struct of_phandle_args args;
....@@ -566,18 +580,13 @@
566580 }
567581
568582 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
569
- dma_addr_t *dmap)
583
+ dma_addr_t *dmap, struct page *page)
570584 {
571585 unsigned int pde = iova_pd_index(iova);
572586 struct tegra_smmu *smmu = as->smmu;
573587
574588 if (!as->pts[pde]) {
575
- struct page *page;
576589 dma_addr_t dma;
577
-
578
- page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
579
- if (!page)
580
- return NULL;
581590
582591 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
583592 DMA_TO_DEVICE);
....@@ -641,7 +650,7 @@
641650 u32 *pte, dma_addr_t pte_dma, u32 val)
642651 {
643652 struct tegra_smmu *smmu = as->smmu;
644
- unsigned long offset = offset_in_page(pte);
653
+ unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
645654
646655 *pte = val;
647656
....@@ -652,14 +661,61 @@
652661 smmu_flush(smmu);
653662 }
654663
655
-static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
656
- phys_addr_t paddr, size_t size, int prot)
664
+static struct page *as_get_pde_page(struct tegra_smmu_as *as,
665
+ unsigned long iova, gfp_t gfp,
666
+ unsigned long *flags)
667
+{
668
+ unsigned int pde = iova_pd_index(iova);
669
+ struct page *page = as->pts[pde];
670
+
671
+ /* at first check whether allocation needs to be done at all */
672
+ if (page)
673
+ return page;
674
+
675
+ /*
676
+ * In order to prevent exhaustion of the atomic memory pool, we
677
+ * allocate page in a sleeping context if GFP flags permit. Hence
678
+ * spinlock needs to be unlocked and re-locked after allocation.
679
+ */
680
+ if (!(gfp & __GFP_ATOMIC))
681
+ spin_unlock_irqrestore(&as->lock, *flags);
682
+
683
+ page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
684
+
685
+ if (!(gfp & __GFP_ATOMIC))
686
+ spin_lock_irqsave(&as->lock, *flags);
687
+
688
+ /*
689
+ * In a case of blocking allocation, a concurrent mapping may win
690
+ * the PDE allocation. In this case the allocated page isn't needed
691
+ * if allocation succeeded and the allocation failure isn't fatal.
692
+ */
693
+ if (as->pts[pde]) {
694
+ if (page)
695
+ __free_page(page);
696
+
697
+ page = as->pts[pde];
698
+ }
699
+
700
+ return page;
701
+}
702
+
703
+static int
704
+__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
705
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
706
+ unsigned long *flags)
657707 {
658708 struct tegra_smmu_as *as = to_smmu_as(domain);
659709 dma_addr_t pte_dma;
710
+ struct page *page;
711
+ u32 pte_attrs;
660712 u32 *pte;
661713
662
- pte = as_get_pte(as, iova, &pte_dma);
714
+ page = as_get_pde_page(as, iova, gfp, flags);
715
+ if (!page)
716
+ return -ENOMEM;
717
+
718
+ pte = as_get_pte(as, iova, &pte_dma, page);
663719 if (!pte)
664720 return -ENOMEM;
665721
....@@ -667,14 +723,23 @@
667723 if (*pte == 0)
668724 tegra_smmu_pte_get_use(as, iova);
669725
726
+ pte_attrs = SMMU_PTE_NONSECURE;
727
+
728
+ if (prot & IOMMU_READ)
729
+ pte_attrs |= SMMU_PTE_READABLE;
730
+
731
+ if (prot & IOMMU_WRITE)
732
+ pte_attrs |= SMMU_PTE_WRITABLE;
733
+
670734 tegra_smmu_set_pte(as, iova, pte, pte_dma,
671
- __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
735
+ SMMU_PHYS_PFN(paddr) | pte_attrs);
672736
673737 return 0;
674738 }
675739
676
-static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
677
- size_t size)
740
+static size_t
741
+__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
742
+ size_t size, struct iommu_iotlb_gather *gather)
678743 {
679744 struct tegra_smmu_as *as = to_smmu_as(domain);
680745 dma_addr_t pte_dma;
....@@ -686,6 +751,33 @@
686751
687752 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
688753 tegra_smmu_pte_put_use(as, iova);
754
+
755
+ return size;
756
+}
757
+
758
+static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
759
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
760
+{
761
+ struct tegra_smmu_as *as = to_smmu_as(domain);
762
+ unsigned long flags;
763
+ int ret;
764
+
765
+ spin_lock_irqsave(&as->lock, flags);
766
+ ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
767
+ spin_unlock_irqrestore(&as->lock, flags);
768
+
769
+ return ret;
770
+}
771
+
772
+static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
773
+ size_t size, struct iommu_iotlb_gather *gather)
774
+{
775
+ struct tegra_smmu_as *as = to_smmu_as(domain);
776
+ unsigned long flags;
777
+
778
+ spin_lock_irqsave(&as->lock, flags);
779
+ size = __tegra_smmu_unmap(domain, iova, size, gather);
780
+ spin_unlock_irqrestore(&as->lock, flags);
689781
690782 return size;
691783 }
....@@ -704,7 +796,7 @@
704796
705797 pfn = *pte & as->smmu->pfn_mask;
706798
707
- return PFN_PHYS(pfn);
799
+ return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
708800 }
709801
710802 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
....@@ -745,11 +837,10 @@
745837 return 0;
746838 }
747839
748
-static int tegra_smmu_add_device(struct device *dev)
840
+static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
749841 {
750842 struct device_node *np = dev->of_node;
751843 struct tegra_smmu *smmu = NULL;
752
- struct iommu_group *group;
753844 struct of_phandle_args args;
754845 unsigned int index = 0;
755846 int err;
....@@ -762,16 +853,14 @@
762853 of_node_put(args.np);
763854
764855 if (err < 0)
765
- return err;
856
+ return ERR_PTR(err);
766857
767858 /*
768859 * Only a single IOMMU master interface is currently
769860 * supported by the Linux kernel, so abort after the
770861 * first match.
771862 */
772
- dev->archdata.iommu = smmu;
773
-
774
- iommu_device_link(&smmu->iommu, dev);
863
+ dev_iommu_priv_set(dev, smmu);
775864
776865 break;
777866 }
....@@ -781,26 +870,14 @@
781870 }
782871
783872 if (!smmu)
784
- return -ENODEV;
873
+ return ERR_PTR(-ENODEV);
785874
786
- group = iommu_group_get_for_dev(dev);
787
- if (IS_ERR(group))
788
- return PTR_ERR(group);
789
-
790
- iommu_group_put(group);
791
-
792
- return 0;
875
+ return &smmu->iommu;
793876 }
794877
795
-static void tegra_smmu_remove_device(struct device *dev)
878
+static void tegra_smmu_release_device(struct device *dev)
796879 {
797
- struct tegra_smmu *smmu = dev->archdata.iommu;
798
-
799
- if (smmu)
800
- iommu_device_unlink(&smmu->iommu, dev);
801
-
802
- dev->archdata.iommu = NULL;
803
- iommu_group_remove_device(dev);
880
+ dev_iommu_priv_set(dev, NULL);
804881 }
805882
806883 static const struct tegra_smmu_group_soc *
....@@ -816,22 +893,34 @@
816893 return NULL;
817894 }
818895
896
+static void tegra_smmu_group_release(void *iommu_data)
897
+{
898
+ struct tegra_smmu_group *group = iommu_data;
899
+ struct tegra_smmu *smmu = group->smmu;
900
+
901
+ mutex_lock(&smmu->lock);
902
+ list_del(&group->list);
903
+ mutex_unlock(&smmu->lock);
904
+}
905
+
819906 static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
820907 unsigned int swgroup)
821908 {
822909 const struct tegra_smmu_group_soc *soc;
823910 struct tegra_smmu_group *group;
911
+ struct iommu_group *grp;
824912
913
+ /* Find group_soc associating with swgroup */
825914 soc = tegra_smmu_find_group(smmu, swgroup);
826
- if (!soc)
827
- return NULL;
828915
829916 mutex_lock(&smmu->lock);
830917
918
+ /* Find existing iommu_group associating with swgroup or group_soc */
831919 list_for_each_entry(group, &smmu->groups, list)
832
- if (group->soc == soc) {
920
+ if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
921
+ grp = iommu_group_ref_get(group->group);
833922 mutex_unlock(&smmu->lock);
834
- return group->group;
923
+ return grp;
835924 }
836925
837926 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
....@@ -841,6 +930,8 @@
841930 }
842931
843932 INIT_LIST_HEAD(&group->list);
933
+ group->swgroup = swgroup;
934
+ group->smmu = smmu;
844935 group->soc = soc;
845936
846937 group->group = iommu_group_alloc();
....@@ -850,6 +941,9 @@
850941 return NULL;
851942 }
852943
944
+ iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
945
+ if (soc)
946
+ iommu_group_set_name(group->group, soc->name);
853947 list_add_tail(&group->list, &smmu->groups);
854948 mutex_unlock(&smmu->lock);
855949
....@@ -858,8 +952,8 @@
858952
859953 static struct iommu_group *tegra_smmu_device_group(struct device *dev)
860954 {
861
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
862
- struct tegra_smmu *smmu = dev->archdata.iommu;
955
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
956
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
863957 struct iommu_group *group;
864958
865959 group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
....@@ -883,8 +977,8 @@
883977 .domain_free = tegra_smmu_domain_free,
884978 .attach_dev = tegra_smmu_attach_dev,
885979 .detach_dev = tegra_smmu_detach_dev,
886
- .add_device = tegra_smmu_add_device,
887
- .remove_device = tegra_smmu_remove_device,
980
+ .probe_device = tegra_smmu_probe_device,
981
+ .release_device = tegra_smmu_release_device,
888982 .device_group = tegra_smmu_device_group,
889983 .map = tegra_smmu_map,
890984 .unmap = tegra_smmu_unmap,
....@@ -938,17 +1032,7 @@
9381032 return 0;
9391033 }
9401034
941
-static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
942
-{
943
- return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
944
-}
945
-
946
-static const struct file_operations tegra_smmu_swgroups_fops = {
947
- .open = tegra_smmu_swgroups_open,
948
- .read = seq_read,
949
- .llseek = seq_lseek,
950
- .release = single_release,
951
-};
1035
+DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
9521036
9531037 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
9541038 {
....@@ -976,17 +1060,7 @@
9761060 return 0;
9771061 }
9781062
979
-static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
980
-{
981
- return single_open(file, tegra_smmu_clients_show, inode->i_private);
982
-}
983
-
984
-static const struct file_operations tegra_smmu_clients_fops = {
985
- .open = tegra_smmu_clients_open,
986
- .read = seq_read,
987
- .llseek = seq_lseek,
988
- .release = single_release,
989
-};
1063
+DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
9901064
9911065 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
9921066 {
....@@ -1014,10 +1088,6 @@
10141088 u32 value;
10151089 int err;
10161090
1017
- /* This can happen on Tegra20 which doesn't have an SMMU */
1018
- if (!soc)
1019
- return NULL;
1020
-
10211091 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
10221092 if (!smmu)
10231093 return ERR_PTR(-ENOMEM);
....@@ -1027,7 +1097,7 @@
10271097 * value. However the IOMMU registration process will attempt to add
10281098 * all devices to the IOMMU when bus_set_iommu() is called. In order
10291099 * not to rely on global variables to track the IOMMU instance, we
1030
- * set it here so that it can be looked up from the .add_device()
1100
+ * set it here so that it can be looked up from the .probe_device()
10311101 * callback via the IOMMU device's .drvdata field.
10321102 */
10331103 mc->smmu = smmu;
....@@ -1046,10 +1116,11 @@
10461116 smmu->dev = dev;
10471117 smmu->mc = mc;
10481118
1049
- smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
1119
+ smmu->pfn_mask =
1120
+ BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
10501121 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
10511122 mc->soc->num_address_bits, smmu->pfn_mask);
1052
- smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
1123
+ smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
10531124 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
10541125 smmu->tlb_mask);
10551126