.. | .. |
---|
10 | 10 | #include <linux/slab.h> |
---|
11 | 11 | #include <linux/export.h> |
---|
12 | 12 | #include <linux/iommu-helper.h> |
---|
13 | | -#include <linux/dma-mapping.h> |
---|
| 13 | +#include <linux/dma-map-ops.h> |
---|
14 | 14 | #include <linux/vmalloc.h> |
---|
15 | 15 | #include <linux/pci.h> |
---|
16 | 16 | #include <asm/pci_dma.h> |
---|
17 | | - |
---|
18 | | -#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0) |
---|
19 | 17 | |
---|
20 | 18 | static struct kmem_cache *dma_region_table_cache; |
---|
21 | 19 | static struct kmem_cache *dma_page_table_cache; |
---|
.. | .. |
---|
263 | 261 | unsigned long start, int size) |
---|
264 | 262 | { |
---|
265 | 263 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
---|
266 | | - unsigned long boundary_size; |
---|
267 | 264 | |
---|
268 | | - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
---|
269 | | - PAGE_SIZE) >> PAGE_SHIFT; |
---|
270 | 265 | return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, |
---|
271 | 266 | start, size, zdev->start_dma >> PAGE_SHIFT, |
---|
272 | | - boundary_size, 0); |
---|
| 267 | + dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT), |
---|
| 268 | + 0); |
---|
273 | 269 | } |
---|
274 | 270 | |
---|
275 | 271 | static dma_addr_t dma_alloc_address(struct device *dev, int size) |
---|
.. | .. |
---|
301 | 297 | |
---|
302 | 298 | out_error: |
---|
303 | 299 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); |
---|
304 | | - return S390_MAPPING_ERROR; |
---|
| 300 | + return DMA_MAPPING_ERROR; |
---|
305 | 301 | } |
---|
306 | 302 | |
---|
307 | 303 | static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size) |
---|
.. | .. |
---|
349 | 345 | /* This rounds up number of pages based on size and offset */ |
---|
350 | 346 | nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); |
---|
351 | 347 | dma_addr = dma_alloc_address(dev, nr_pages); |
---|
352 | | - if (dma_addr == S390_MAPPING_ERROR) { |
---|
| 348 | + if (dma_addr == DMA_MAPPING_ERROR) { |
---|
353 | 349 | ret = -ENOSPC; |
---|
354 | 350 | goto out_err; |
---|
355 | 351 | } |
---|
.. | .. |
---|
372 | 368 | out_err: |
---|
373 | 369 | zpci_err("map error:\n"); |
---|
374 | 370 | zpci_err_dma(ret, pa); |
---|
375 | | - return S390_MAPPING_ERROR; |
---|
| 371 | + return DMA_MAPPING_ERROR; |
---|
376 | 372 | } |
---|
377 | 373 | |
---|
378 | 374 | static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, |
---|
.. | .. |
---|
406 | 402 | dma_addr_t map; |
---|
407 | 403 | |
---|
408 | 404 | size = PAGE_ALIGN(size); |
---|
409 | | - page = alloc_pages(flag, get_order(size)); |
---|
| 405 | + page = alloc_pages(flag | __GFP_ZERO, get_order(size)); |
---|
410 | 406 | if (!page) |
---|
411 | 407 | return NULL; |
---|
412 | 408 | |
---|
.. | .. |
---|
449 | 445 | int ret; |
---|
450 | 446 | |
---|
451 | 447 | dma_addr_base = dma_alloc_address(dev, nr_pages); |
---|
452 | | - if (dma_addr_base == S390_MAPPING_ERROR) |
---|
| 448 | + if (dma_addr_base == DMA_MAPPING_ERROR) |
---|
453 | 449 | return -ENOMEM; |
---|
454 | 450 | |
---|
455 | 451 | dma_addr = dma_addr_base; |
---|
.. | .. |
---|
496 | 492 | for (i = 1; i < nr_elements; i++) { |
---|
497 | 493 | s = sg_next(s); |
---|
498 | 494 | |
---|
499 | | - s->dma_address = S390_MAPPING_ERROR; |
---|
| 495 | + s->dma_address = DMA_MAPPING_ERROR; |
---|
500 | 496 | s->dma_length = 0; |
---|
501 | 497 | |
---|
502 | 498 | if (s->offset || (size & ~PAGE_MASK) || |
---|
.. | .. |
---|
546 | 542 | } |
---|
547 | 543 | } |
---|
548 | 544 | |
---|
549 | | -static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
550 | | -{ |
---|
551 | | - return dma_addr == S390_MAPPING_ERROR; |
---|
552 | | -} |
---|
553 | | - |
---|
554 | 545 | int zpci_dma_init_device(struct zpci_dev *zdev) |
---|
555 | 546 | { |
---|
556 | 547 | int rc; |
---|
.. | .. |
---|
675 | 666 | .unmap_sg = s390_dma_unmap_sg, |
---|
676 | 667 | .map_page = s390_dma_map_pages, |
---|
677 | 668 | .unmap_page = s390_dma_unmap_pages, |
---|
678 | | - .mapping_error = s390_mapping_error, |
---|
| 669 | + .mmap = dma_common_mmap, |
---|
| 670 | + .get_sgtable = dma_common_get_sgtable, |
---|
| 671 | + .alloc_pages = dma_common_alloc_pages, |
---|
| 672 | + .free_pages = dma_common_free_pages, |
---|
679 | 673 | /* dma_supported is unconditionally true without a callback */ |
---|
680 | 674 | }; |
---|
681 | 675 | EXPORT_SYMBOL_GPL(s390_pci_dma_ops); |
---|
682 | 676 | |
---|
683 | 677 | static int __init s390_iommu_setup(char *str) |
---|
684 | 678 | { |
---|
685 | | - if (!strncmp(str, "strict", 6)) |
---|
| 679 | + if (!strcmp(str, "strict")) |
---|
686 | 680 | s390_iommu_strict = 1; |
---|
687 | | - return 0; |
---|
| 681 | + return 1; |
---|
688 | 682 | } |
---|
689 | 683 | |
---|
690 | 684 | __setup("s390_iommu=", s390_iommu_setup); |
---|