hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/iommu/msm_iommu.c
....@@ -1,23 +1,12 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
23 *
3
- * This program is free software; you can redistribute it and/or modify
4
- * it under the terms of the GNU General Public License version 2 and
5
- * only version 2 as published by the Free Software Foundation.
6
- *
7
- * This program is distributed in the hope that it will be useful,
8
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
- * GNU General Public License for more details.
11
- *
12
- * You should have received a copy of the GNU General Public License
13
- * along with this program; if not, write to the Free Software
14
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15
- * 02110-1301, USA.
4
+ * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
165 */
176
187 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
198 #include <linux/kernel.h>
20
-#include <linux/module.h>
9
+#include <linux/init.h>
2110 #include <linux/platform_device.h>
2211 #include <linux/errno.h>
2312 #include <linux/io.h>
....@@ -32,7 +21,7 @@
3221 #include <linux/of_iommu.h>
3322
3423 #include <asm/cacheflush.h>
35
-#include <asm/sizes.h>
24
+#include <linux/sizes.h>
3625
3726 #include "msm_iommu_hw-8xxx.h"
3827 #include "msm_iommu.h"
....@@ -45,7 +34,7 @@
4534 /* bitmap of the page sizes currently supported */
4635 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
4736
48
-DEFINE_SPINLOCK(msm_iommu_lock);
37
+static DEFINE_SPINLOCK(msm_iommu_lock);
4938 static LIST_HEAD(qcom_iommu_devices);
5039 static struct iommu_ops msm_iommu_ops;
5140
....@@ -179,20 +168,22 @@
179168 return;
180169 }
181170
182
-static void __flush_iotlb_sync(void *cookie)
171
+static void __flush_iotlb_walk(unsigned long iova, size_t size,
172
+ size_t granule, void *cookie)
183173 {
184
- /*
185
- * Nothing is needed here, the barrier to guarantee
186
- * completion of the tlb sync operation is implicitly
187
- * taken care when the iommu client does a writel before
188
- * kick starting the other master.
189
- */
174
+ __flush_iotlb_range(iova, size, granule, false, cookie);
190175 }
191176
192
-static const struct iommu_gather_ops msm_iommu_gather_ops = {
177
+static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
178
+ unsigned long iova, size_t granule, void *cookie)
179
+{
180
+ __flush_iotlb_range(iova, granule, granule, true, cookie);
181
+}
182
+
183
+static const struct iommu_flush_ops msm_iommu_flush_ops = {
193184 .tlb_flush_all = __flush_iotlb,
194
- .tlb_add_flush = __flush_iotlb_range,
195
- .tlb_sync = __flush_iotlb_sync,
185
+ .tlb_flush_walk = __flush_iotlb_walk,
186
+ .tlb_add_page = __flush_iotlb_page,
196187 };
197188
198189 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
....@@ -281,8 +272,8 @@
281272 SET_V2PCFG(base, ctx, 0x3);
282273
283274 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
284
- SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
285
- SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
275
+ SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
276
+ SET_TTBR1(base, ctx, 0);
286277
287278 /* Set prrr and nmrr */
288279 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
....@@ -352,11 +343,10 @@
352343 spin_lock_init(&priv->pgtlock);
353344
354345 priv->cfg = (struct io_pgtable_cfg) {
355
- .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
356346 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
357347 .ias = 32,
358348 .oas = 32,
359
- .tlb = &msm_iommu_gather_ops,
349
+ .tlb = &msm_iommu_flush_ops,
360350 .iommu_dev = priv->dev,
361351 };
362352
....@@ -390,43 +380,23 @@
390380 return ret;
391381 }
392382
393
-static int msm_iommu_add_device(struct device *dev)
383
+static struct iommu_device *msm_iommu_probe_device(struct device *dev)
394384 {
395385 struct msm_iommu_dev *iommu;
396
- struct iommu_group *group;
397386 unsigned long flags;
398387
399388 spin_lock_irqsave(&msm_iommu_lock, flags);
400389 iommu = find_iommu_for_dev(dev);
401390 spin_unlock_irqrestore(&msm_iommu_lock, flags);
402391
403
- if (iommu)
404
- iommu_device_link(&iommu->iommu, dev);
405
- else
406
- return -ENODEV;
392
+ if (!iommu)
393
+ return ERR_PTR(-ENODEV);
407394
408
- group = iommu_group_get_for_dev(dev);
409
- if (IS_ERR(group))
410
- return PTR_ERR(group);
411
-
412
- iommu_group_put(group);
413
-
414
- return 0;
395
+ return &iommu->iommu;
415396 }
416397
417
-static void msm_iommu_remove_device(struct device *dev)
398
+static void msm_iommu_release_device(struct device *dev)
418399 {
419
- struct msm_iommu_dev *iommu;
420
- unsigned long flags;
421
-
422
- spin_lock_irqsave(&msm_iommu_lock, flags);
423
- iommu = find_iommu_for_dev(dev);
424
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
425
-
426
- if (iommu)
427
- iommu_device_unlink(&iommu->iommu, dev);
428
-
429
- iommu_group_remove_device(dev);
430400 }
431401
432402 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
....@@ -459,10 +429,10 @@
459429 master->num =
460430 msm_iommu_alloc_ctx(iommu->context_map,
461431 0, iommu->ncb);
462
- if (IS_ERR_VALUE(master->num)) {
463
- ret = -ENODEV;
464
- goto fail;
465
- }
432
+ if (IS_ERR_VALUE(master->num)) {
433
+ ret = -ENODEV;
434
+ goto fail;
435
+ }
466436 config_mids(iommu, master);
467437 __program_context(iommu->base, master->num,
468438 priv);
....@@ -506,27 +476,35 @@
506476 }
507477
508478 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
509
- phys_addr_t pa, size_t len, int prot)
479
+ phys_addr_t pa, size_t len, int prot, gfp_t gfp)
510480 {
511481 struct msm_priv *priv = to_msm_priv(domain);
512482 unsigned long flags;
513483 int ret;
514484
515485 spin_lock_irqsave(&priv->pgtlock, flags);
516
- ret = priv->iop->map(priv->iop, iova, pa, len, prot);
486
+ ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
517487 spin_unlock_irqrestore(&priv->pgtlock, flags);
518488
519489 return ret;
520490 }
521491
492
+static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
493
+ size_t size)
494
+{
495
+ struct msm_priv *priv = to_msm_priv(domain);
496
+
497
+ __flush_iotlb_range(iova, size, SZ_4K, false, priv);
498
+}
499
+
522500 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
523
- size_t len)
501
+ size_t len, struct iommu_iotlb_gather *gather)
524502 {
525503 struct msm_priv *priv = to_msm_priv(domain);
526504 unsigned long flags;
527505
528506 spin_lock_irqsave(&priv->pgtlock, flags);
529
- len = priv->iop->unmap(priv->iop, iova, len);
507
+ len = priv->iop->unmap(priv->iop, iova, len, gather);
530508 spin_unlock_irqrestore(&priv->pgtlock, flags);
531509
532510 return len;
....@@ -615,14 +593,14 @@
615593 struct msm_iommu_dev **iommu,
616594 struct of_phandle_args *spec)
617595 {
618
- struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
596
+ struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
619597 int sid;
620598
621599 if (list_empty(&(*iommu)->ctx_list)) {
622600 master = kzalloc(sizeof(*master), GFP_ATOMIC);
623601 master->of_node = dev->of_node;
624602 list_add(&master->list, &(*iommu)->ctx_list);
625
- dev->archdata.iommu = master;
603
+ dev_iommu_priv_set(dev, master);
626604 }
627605
628606 for (sid = 0; sid < master->num_mids; sid++)
....@@ -638,16 +616,19 @@
638616 static int qcom_iommu_of_xlate(struct device *dev,
639617 struct of_phandle_args *spec)
640618 {
641
- struct msm_iommu_dev *iommu;
619
+ struct msm_iommu_dev *iommu = NULL, *iter;
642620 unsigned long flags;
643621 int ret = 0;
644622
645623 spin_lock_irqsave(&msm_iommu_lock, flags);
646
- list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
647
- if (iommu->dev->of_node == spec->np)
624
+ list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
625
+ if (iter->dev->of_node == spec->np) {
626
+ iommu = iter;
648627 break;
628
+ }
629
+ }
649630
650
- if (!iommu || iommu->dev->of_node != spec->np) {
631
+ if (!iommu) {
651632 ret = -ENODEV;
652633 goto fail;
653634 }
....@@ -702,9 +683,17 @@
702683 .detach_dev = msm_iommu_detach_dev,
703684 .map = msm_iommu_map,
704685 .unmap = msm_iommu_unmap,
686
+ /*
687
+ * Nothing is needed here, the barrier to guarantee
688
+ * completion of the tlb sync operation is implicitly
689
+ * taken care when the iommu client does a writel before
690
+ * kick starting the other master.
691
+ */
692
+ .iotlb_sync = NULL,
693
+ .iotlb_sync_map = msm_iommu_sync_map,
705694 .iova_to_phys = msm_iommu_iova_to_phys,
706
- .add_device = msm_iommu_add_device,
707
- .remove_device = msm_iommu_remove_device,
695
+ .probe_device = msm_iommu_probe_device,
696
+ .release_device = msm_iommu_release_device,
708697 .device_group = generic_device_group,
709698 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
710699 .of_xlate = qcom_iommu_of_xlate,
....@@ -761,7 +750,6 @@
761750
762751 iommu->irq = platform_get_irq(pdev, 0);
763752 if (iommu->irq < 0) {
764
- dev_err(iommu->dev, "could not get iommu irq\n");
765753 ret = -ENODEV;
766754 goto fail;
767755 }
....@@ -861,14 +849,5 @@
861849
862850 return ret;
863851 }
864
-
865
-static void __exit msm_iommu_driver_exit(void)
866
-{
867
- platform_driver_unregister(&msm_iommu_driver);
868
-}
869
-
870852 subsys_initcall(msm_iommu_driver_init);
871
-module_exit(msm_iommu_driver_exit);
872853
873
-MODULE_LICENSE("GPL v2");
874
-MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");