.. | .. |
---|
12 | 12 | #include <linux/init.h> |
---|
13 | 13 | #include <linux/mm.h> |
---|
14 | 14 | #include <linux/slab.h> |
---|
15 | | -#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ |
---|
16 | | -#include <linux/scatterlist.h> |
---|
| 15 | +#include <linux/dma-map-ops.h> |
---|
17 | 16 | #include <linux/of.h> |
---|
18 | 17 | #include <linux/of_device.h> |
---|
19 | 18 | |
---|
20 | | -#include <asm/pgalloc.h> |
---|
21 | | -#include <asm/pgtable.h> |
---|
22 | 19 | #include <asm/io.h> |
---|
23 | 20 | #include <asm/mxcc.h> |
---|
24 | 21 | #include <asm/mbus.h> |
---|
.. | .. |
---|
53 | 50 | |
---|
54 | 51 | #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID) |
---|
55 | 52 | #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ) |
---|
| 53 | + |
---|
| 54 | +static const struct dma_map_ops sbus_iommu_dma_gflush_ops; |
---|
| 55 | +static const struct dma_map_ops sbus_iommu_dma_pflush_ops; |
---|
56 | 56 | |
---|
57 | 57 | static void __init sbus_iommu_init(struct platform_device *op) |
---|
58 | 58 | { |
---|
.. | .. |
---|
129 | 129 | (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES); |
---|
130 | 130 | |
---|
131 | 131 | op->dev.archdata.iommu = iommu; |
---|
| 132 | + |
---|
| 133 | + if (flush_page_for_dma_global) |
---|
| 134 | + op->dev.dma_ops = &sbus_iommu_dma_gflush_ops; |
---|
| 135 | + else |
---|
| 136 | + op->dev.dma_ops = &sbus_iommu_dma_pflush_ops; |
---|
132 | 137 | } |
---|
133 | 138 | |
---|
134 | 139 | static int __init iommu_init(void) |
---|
.. | .. |
---|
175 | 180 | } |
---|
176 | 181 | } |
---|
177 | 182 | |
---|
178 | | -static u32 iommu_get_one(struct device *dev, struct page *page, int npages) |
---|
| 183 | +static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, |
---|
| 184 | + unsigned long offset, size_t len, bool per_page_flush) |
---|
179 | 185 | { |
---|
180 | 186 | struct iommu_struct *iommu = dev->archdata.iommu; |
---|
181 | | - int ioptex; |
---|
182 | | - iopte_t *iopte, *iopte0; |
---|
| 187 | + phys_addr_t paddr = page_to_phys(page) + offset; |
---|
| 188 | + unsigned long off = paddr & ~PAGE_MASK; |
---|
| 189 | + unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
---|
| 190 | + unsigned long pfn = __phys_to_pfn(paddr); |
---|
183 | 191 | unsigned int busa, busa0; |
---|
184 | | - int i; |
---|
| 192 | + iopte_t *iopte, *iopte0; |
---|
| 193 | + int ioptex, i; |
---|
| 194 | + |
---|
| 195 | + /* XXX So what is maxphys for us and how do drivers know it? */ |
---|
| 196 | + if (!len || len > 256 * 1024) |
---|
| 197 | + return DMA_MAPPING_ERROR; |
---|
| 198 | + |
---|
| 199 | + /* |
---|
| 200 | + * We expect unmapped highmem pages to be not in the cache. |
---|
| 201 | + * XXX Is this a good assumption? |
---|
| 202 | + * XXX What if someone else unmaps it here and races us? |
---|
| 203 | + */ |
---|
| 204 | + if (per_page_flush && !PageHighMem(page)) { |
---|
| 205 | + unsigned long vaddr, p; |
---|
| 206 | + |
---|
| 207 | + vaddr = (unsigned long)page_address(page) + offset; |
---|
| 208 | + for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE) |
---|
| 209 | + flush_page_for_dma(p); |
---|
| 210 | + } |
---|
185 | 211 | |
---|
186 | 212 | /* page color = pfn of page */ |
---|
187 | | - ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); |
---|
| 213 | + ioptex = bit_map_string_get(&iommu->usemap, npages, pfn); |
---|
188 | 214 | if (ioptex < 0) |
---|
189 | 215 | panic("iommu out"); |
---|
190 | 216 | busa0 = iommu->start + (ioptex << PAGE_SHIFT); |
---|
.. | .. |
---|
193 | 219 | busa = busa0; |
---|
194 | 220 | iopte = iopte0; |
---|
195 | 221 | for (i = 0; i < npages; i++) { |
---|
196 | | - iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM); |
---|
| 222 | + iopte_val(*iopte) = MKIOPTE(pfn, IOPERM); |
---|
197 | 223 | iommu_invalidate_page(iommu->regs, busa); |
---|
198 | 224 | busa += PAGE_SIZE; |
---|
199 | 225 | iopte++; |
---|
200 | | - page++; |
---|
| 226 | + pfn++; |
---|
201 | 227 | } |
---|
202 | 228 | |
---|
203 | 229 | iommu_flush_iotlb(iopte0, npages); |
---|
204 | | - |
---|
205 | | - return busa0; |
---|
| 230 | + return busa0 + off; |
---|
206 | 231 | } |
---|
207 | 232 | |
---|
208 | | -static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len) |
---|
209 | | -{ |
---|
210 | | - unsigned long off; |
---|
211 | | - int npages; |
---|
212 | | - struct page *page; |
---|
213 | | - u32 busa; |
---|
214 | | - |
---|
215 | | - off = (unsigned long)vaddr & ~PAGE_MASK; |
---|
216 | | - npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; |
---|
217 | | - page = virt_to_page((unsigned long)vaddr & PAGE_MASK); |
---|
218 | | - busa = iommu_get_one(dev, page, npages); |
---|
219 | | - return busa + off; |
---|
220 | | -} |
---|
221 | | - |
---|
222 | | -static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len) |
---|
| 233 | +static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, |
---|
| 234 | + struct page *page, unsigned long offset, size_t len, |
---|
| 235 | + enum dma_data_direction dir, unsigned long attrs) |
---|
223 | 236 | { |
---|
224 | 237 | flush_page_for_dma(0); |
---|
225 | | - return iommu_get_scsi_one(dev, vaddr, len); |
---|
| 238 | + return __sbus_iommu_map_page(dev, page, offset, len, false); |
---|
226 | 239 | } |
---|
227 | 240 | |
---|
228 | | -static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len) |
---|
| 241 | +static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev, |
---|
| 242 | + struct page *page, unsigned long offset, size_t len, |
---|
| 243 | + enum dma_data_direction dir, unsigned long attrs) |
---|
229 | 244 | { |
---|
230 | | - unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; |
---|
| 245 | + return __sbus_iommu_map_page(dev, page, offset, len, true); |
---|
| 246 | +} |
---|
231 | 247 | |
---|
232 | | - while(page < ((unsigned long)(vaddr + len))) { |
---|
233 | | - flush_page_for_dma(page); |
---|
234 | | - page += PAGE_SIZE; |
---|
| 248 | +static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl, |
---|
| 249 | + int nents, enum dma_data_direction dir, unsigned long attrs, |
---|
| 250 | + bool per_page_flush) |
---|
| 251 | +{ |
---|
| 252 | + struct scatterlist *sg; |
---|
| 253 | + int j; |
---|
| 254 | + |
---|
| 255 | + for_each_sg(sgl, sg, nents, j) { |
---|
| 256 | + sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg), |
---|
| 257 | + sg->offset, sg->length, per_page_flush); |
---|
| 258 | + if (sg->dma_address == DMA_MAPPING_ERROR) |
---|
| 259 | + return 0; |
---|
| 260 | + sg->dma_length = sg->length; |
---|
235 | 261 | } |
---|
236 | | - return iommu_get_scsi_one(dev, vaddr, len); |
---|
| 262 | + |
---|
| 263 | + return nents; |
---|
237 | 264 | } |
---|
238 | 265 | |
---|
239 | | -static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz) |
---|
| 266 | +static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl, |
---|
| 267 | + int nents, enum dma_data_direction dir, unsigned long attrs) |
---|
240 | 268 | { |
---|
241 | | - int n; |
---|
242 | | - |
---|
243 | 269 | flush_page_for_dma(0); |
---|
244 | | - while (sz != 0) { |
---|
245 | | - --sz; |
---|
246 | | - n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; |
---|
247 | | - sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; |
---|
248 | | - sg->dma_length = sg->length; |
---|
249 | | - sg = sg_next(sg); |
---|
250 | | - } |
---|
| 270 | + return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false); |
---|
251 | 271 | } |
---|
252 | 272 | |
---|
253 | | -static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz) |
---|
| 273 | +static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, |
---|
| 274 | + int nents, enum dma_data_direction dir, unsigned long attrs) |
---|
254 | 275 | { |
---|
255 | | - unsigned long page, oldpage = 0; |
---|
256 | | - int n, i; |
---|
257 | | - |
---|
258 | | - while(sz != 0) { |
---|
259 | | - --sz; |
---|
260 | | - |
---|
261 | | - n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; |
---|
262 | | - |
---|
263 | | - /* |
---|
264 | | - * We expect unmapped highmem pages to be not in the cache. |
---|
265 | | - * XXX Is this a good assumption? |
---|
266 | | - * XXX What if someone else unmaps it here and races us? |
---|
267 | | - */ |
---|
268 | | - if ((page = (unsigned long) page_address(sg_page(sg))) != 0) { |
---|
269 | | - for (i = 0; i < n; i++) { |
---|
270 | | - if (page != oldpage) { /* Already flushed? */ |
---|
271 | | - flush_page_for_dma(page); |
---|
272 | | - oldpage = page; |
---|
273 | | - } |
---|
274 | | - page += PAGE_SIZE; |
---|
275 | | - } |
---|
276 | | - } |
---|
277 | | - |
---|
278 | | - sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; |
---|
279 | | - sg->dma_length = sg->length; |
---|
280 | | - sg = sg_next(sg); |
---|
281 | | - } |
---|
| 276 | + return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true); |
---|
282 | 277 | } |
---|
283 | 278 | |
---|
284 | | -static void iommu_release_one(struct device *dev, u32 busa, int npages) |
---|
| 279 | +static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr, |
---|
| 280 | + size_t len, enum dma_data_direction dir, unsigned long attrs) |
---|
285 | 281 | { |
---|
286 | 282 | struct iommu_struct *iommu = dev->archdata.iommu; |
---|
287 | | - int ioptex; |
---|
288 | | - int i; |
---|
| 283 | + unsigned int busa = dma_addr & PAGE_MASK; |
---|
| 284 | + unsigned long off = dma_addr & ~PAGE_MASK; |
---|
| 285 | + unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; |
---|
| 286 | + unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT; |
---|
| 287 | + unsigned int i; |
---|
289 | 288 | |
---|
290 | 289 | BUG_ON(busa < iommu->start); |
---|
291 | | - ioptex = (busa - iommu->start) >> PAGE_SHIFT; |
---|
292 | 290 | for (i = 0; i < npages; i++) { |
---|
293 | 291 | iopte_val(iommu->page_table[ioptex + i]) = 0; |
---|
294 | 292 | iommu_invalidate_page(iommu->regs, busa); |
---|
.. | .. |
---|
297 | 295 | bit_map_clear(&iommu->usemap, ioptex, npages); |
---|
298 | 296 | } |
---|
299 | 297 | |
---|
300 | | -static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) |
---|
| 298 | +static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, |
---|
| 299 | + int nents, enum dma_data_direction dir, unsigned long attrs) |
---|
301 | 300 | { |
---|
302 | | - unsigned long off; |
---|
303 | | - int npages; |
---|
| 301 | + struct scatterlist *sg; |
---|
| 302 | + int i; |
---|
304 | 303 | |
---|
305 | | - off = vaddr & ~PAGE_MASK; |
---|
306 | | - npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; |
---|
307 | | - iommu_release_one(dev, vaddr & PAGE_MASK, npages); |
---|
308 | | -} |
---|
309 | | - |
---|
310 | | -static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) |
---|
311 | | -{ |
---|
312 | | - int n; |
---|
313 | | - |
---|
314 | | - while(sz != 0) { |
---|
315 | | - --sz; |
---|
316 | | - |
---|
317 | | - n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; |
---|
318 | | - iommu_release_one(dev, sg->dma_address & PAGE_MASK, n); |
---|
| 304 | + for_each_sg(sgl, sg, nents, i) { |
---|
| 305 | + sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir, |
---|
| 306 | + attrs); |
---|
319 | 307 | sg->dma_address = 0x21212121; |
---|
320 | | - sg = sg_next(sg); |
---|
321 | 308 | } |
---|
322 | 309 | } |
---|
323 | 310 | |
---|
324 | 311 | #ifdef CONFIG_SBUS |
---|
325 | | -static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, |
---|
326 | | - unsigned long addr, int len) |
---|
| 312 | +static void *sbus_iommu_alloc(struct device *dev, size_t len, |
---|
| 313 | + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
---|
327 | 314 | { |
---|
328 | 315 | struct iommu_struct *iommu = dev->archdata.iommu; |
---|
329 | | - unsigned long page, end; |
---|
| 316 | + unsigned long va, addr, page, end, ret; |
---|
330 | 317 | iopte_t *iopte = iommu->page_table; |
---|
331 | 318 | iopte_t *first; |
---|
332 | 319 | int ioptex; |
---|
| 320 | + |
---|
| 321 | + /* XXX So what is maxphys for us and how do drivers know it? */ |
---|
| 322 | + if (!len || len > 256 * 1024) |
---|
| 323 | + return NULL; |
---|
| 324 | + |
---|
| 325 | + len = PAGE_ALIGN(len); |
---|
| 326 | + va = __get_free_pages(gfp | __GFP_ZERO, get_order(len)); |
---|
| 327 | + if (va == 0) |
---|
| 328 | + return NULL; |
---|
| 329 | + |
---|
| 330 | + addr = ret = sparc_dma_alloc_resource(dev, len); |
---|
| 331 | + if (!addr) |
---|
| 332 | + goto out_free_pages; |
---|
333 | 333 | |
---|
334 | 334 | BUG_ON((va & ~PAGE_MASK) != 0); |
---|
335 | 335 | BUG_ON((addr & ~PAGE_MASK) != 0); |
---|
.. | .. |
---|
347 | 347 | while(addr < end) { |
---|
348 | 348 | page = va; |
---|
349 | 349 | { |
---|
350 | | - pgd_t *pgdp; |
---|
351 | 350 | pmd_t *pmdp; |
---|
352 | 351 | pte_t *ptep; |
---|
353 | 352 | |
---|
.. | .. |
---|
358 | 357 | else |
---|
359 | 358 | __flush_page_to_ram(page); |
---|
360 | 359 | |
---|
361 | | - pgdp = pgd_offset(&init_mm, addr); |
---|
362 | | - pmdp = pmd_offset(pgdp, addr); |
---|
| 360 | + pmdp = pmd_off_k(addr); |
---|
363 | 361 | ptep = pte_offset_map(pmdp, addr); |
---|
364 | 362 | |
---|
365 | 363 | set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); |
---|
.. | .. |
---|
385 | 383 | flush_tlb_all(); |
---|
386 | 384 | iommu_invalidate(iommu->regs); |
---|
387 | 385 | |
---|
388 | | - *pba = iommu->start + (ioptex << PAGE_SHIFT); |
---|
389 | | - return 0; |
---|
| 386 | + *dma_handle = iommu->start + (ioptex << PAGE_SHIFT); |
---|
| 387 | + return (void *)ret; |
---|
| 388 | + |
---|
| 389 | +out_free_pages: |
---|
| 390 | + free_pages(va, get_order(len)); |
---|
| 391 | + return NULL; |
---|
390 | 392 | } |
---|
391 | 393 | |
---|
392 | | -static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len) |
---|
| 394 | +static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr, |
---|
| 395 | + dma_addr_t busa, unsigned long attrs) |
---|
393 | 396 | { |
---|
394 | 397 | struct iommu_struct *iommu = dev->archdata.iommu; |
---|
395 | 398 | iopte_t *iopte = iommu->page_table; |
---|
396 | | - unsigned long end; |
---|
| 399 | + struct page *page = virt_to_page(cpu_addr); |
---|
397 | 400 | int ioptex = (busa - iommu->start) >> PAGE_SHIFT; |
---|
| 401 | + unsigned long end; |
---|
| 402 | + |
---|
| 403 | + if (!sparc_dma_free_resource(cpu_addr, len)) |
---|
| 404 | + return; |
---|
398 | 405 | |
---|
399 | 406 | BUG_ON((busa & ~PAGE_MASK) != 0); |
---|
400 | 407 | BUG_ON((len & ~PAGE_MASK) != 0); |
---|
.. | .. |
---|
408 | 415 | flush_tlb_all(); |
---|
409 | 416 | iommu_invalidate(iommu->regs); |
---|
410 | 417 | bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT); |
---|
| 418 | + |
---|
| 419 | + __free_pages(page, get_order(len)); |
---|
411 | 420 | } |
---|
412 | 421 | #endif |
---|
413 | 422 | |
---|
414 | | -static const struct sparc32_dma_ops iommu_dma_gflush_ops = { |
---|
415 | | - .get_scsi_one = iommu_get_scsi_one_gflush, |
---|
416 | | - .get_scsi_sgl = iommu_get_scsi_sgl_gflush, |
---|
417 | | - .release_scsi_one = iommu_release_scsi_one, |
---|
418 | | - .release_scsi_sgl = iommu_release_scsi_sgl, |
---|
| 423 | +static const struct dma_map_ops sbus_iommu_dma_gflush_ops = { |
---|
419 | 424 | #ifdef CONFIG_SBUS |
---|
420 | | - .map_dma_area = iommu_map_dma_area, |
---|
421 | | - .unmap_dma_area = iommu_unmap_dma_area, |
---|
| 425 | + .alloc = sbus_iommu_alloc, |
---|
| 426 | + .free = sbus_iommu_free, |
---|
422 | 427 | #endif |
---|
| 428 | + .map_page = sbus_iommu_map_page_gflush, |
---|
| 429 | + .unmap_page = sbus_iommu_unmap_page, |
---|
| 430 | + .map_sg = sbus_iommu_map_sg_gflush, |
---|
| 431 | + .unmap_sg = sbus_iommu_unmap_sg, |
---|
423 | 432 | }; |
---|
424 | 433 | |
---|
425 | | -static const struct sparc32_dma_ops iommu_dma_pflush_ops = { |
---|
426 | | - .get_scsi_one = iommu_get_scsi_one_pflush, |
---|
427 | | - .get_scsi_sgl = iommu_get_scsi_sgl_pflush, |
---|
428 | | - .release_scsi_one = iommu_release_scsi_one, |
---|
429 | | - .release_scsi_sgl = iommu_release_scsi_sgl, |
---|
| 434 | +static const struct dma_map_ops sbus_iommu_dma_pflush_ops = { |
---|
430 | 435 | #ifdef CONFIG_SBUS |
---|
431 | | - .map_dma_area = iommu_map_dma_area, |
---|
432 | | - .unmap_dma_area = iommu_unmap_dma_area, |
---|
| 436 | + .alloc = sbus_iommu_alloc, |
---|
| 437 | + .free = sbus_iommu_free, |
---|
433 | 438 | #endif |
---|
| 439 | + .map_page = sbus_iommu_map_page_pflush, |
---|
| 440 | + .unmap_page = sbus_iommu_unmap_page, |
---|
| 441 | + .map_sg = sbus_iommu_map_sg_pflush, |
---|
| 442 | + .unmap_sg = sbus_iommu_unmap_sg, |
---|
434 | 443 | }; |
---|
435 | 444 | |
---|
436 | 445 | void __init ld_mmu_iommu(void) |
---|
437 | 446 | { |
---|
438 | | - if (flush_page_for_dma_global) { |
---|
439 | | - /* flush_page_for_dma flushes everything, no matter of what page is it */ |
---|
440 | | - sparc32_dma_ops = &iommu_dma_gflush_ops; |
---|
441 | | - } else { |
---|
442 | | - sparc32_dma_ops = &iommu_dma_pflush_ops; |
---|
443 | | - } |
---|
444 | | - |
---|
445 | 447 | if (viking_mxcc_present || srmmu_modtype == HyperSparc) { |
---|
446 | 448 | dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); |
---|
447 | 449 | ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID; |
---|