hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/rknpu/rknpu_mm.c
....@@ -236,54 +236,3 @@
236236
237237 return 0;
238238 }
239
-
240
-dma_addr_t rknpu_iommu_dma_alloc_iova(struct iommu_domain *domain, size_t size,
241
- u64 dma_limit, struct device *dev)
242
-{
243
- struct rknpu_iommu_dma_cookie *cookie = domain->iova_cookie;
244
- struct iova_domain *iovad = &cookie->iovad;
245
- unsigned long shift, iova_len, iova = 0;
246
-#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
247
- dma_addr_t limit;
248
-#endif
249
-
250
- shift = iova_shift(iovad);
251
- iova_len = size >> shift;
252
- /*
253
- * Freeing non-power-of-two-sized allocations back into the IOVA caches
254
- * will come back to bite us badly, so we have to waste a bit of space
255
- * rounding up anything cacheable to make sure that can't happen. The
256
- * order of the unadjusted size will still match upon freeing.
257
- */
258
- if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
259
- iova_len = roundup_pow_of_two(iova_len);
260
-
261
-#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE)
262
- dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
263
-#else
264
- if (dev->bus_dma_mask)
265
- dma_limit &= dev->bus_dma_mask;
266
-#endif
267
-
268
- if (domain->geometry.force_aperture)
269
- dma_limit =
270
- min_t(u64, dma_limit, domain->geometry.aperture_end);
271
-
272
-#if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE)
273
- iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
274
-#else
275
- limit = min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn);
276
-
277
- iova = alloc_iova_fast(iovad, iova_len, limit, true);
278
-#endif
279
-
280
- return (dma_addr_t)iova << shift;
281
-}
282
-
283
-void rknpu_iommu_dma_free_iova(struct rknpu_iommu_dma_cookie *cookie,
284
- dma_addr_t iova, size_t size)
285
-{
286
- struct iova_domain *iovad = &cookie->iovad;
287
-
288
- free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad));
289
-}