| .. | .. |
|---|
| 58 | 58 | bool uncached; |
|---|
| 59 | 59 | }; |
|---|
| 60 | 60 | |
|---|
| 61 | | -#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP) |
|---|
| 62 | | -#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN) |
|---|
| 61 | +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO) |
|---|
| 63 | 62 | #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ |
|---|
| 64 | 63 | | __GFP_NORETRY) & ~__GFP_RECLAIM) \ |
|---|
| 65 | 64 | | __GFP_COMP) |
|---|
| 66 | | -static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP}; |
|---|
| 65 | +static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP}; |
|---|
| 67 | 66 | /* |
|---|
| 68 | 67 | * The selection of the orders used for allocation (1MB, 64K, 4K) is designed |
|---|
| 69 | 68 | * to match with the sizes often found in IOMMUs. Using order 4 pages instead |
|---|
| .. | .. |
|---|
| 266 | 265 | return 0; |
|---|
| 267 | 266 | } |
|---|
| 268 | 267 | |
|---|
| 269 | | -static int |
|---|
| 268 | +static int __maybe_unused |
|---|
| 270 | 269 | system_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, |
|---|
| 271 | 270 | enum dma_data_direction direction, |
|---|
| 272 | 271 | unsigned int offset, |
|---|
| .. | .. |
|---|
| 296 | 295 | return ret; |
|---|
| 297 | 296 | } |
|---|
| 298 | 297 | |
|---|
| 299 | | -static int |
|---|
| 298 | +static int __maybe_unused |
|---|
| 300 | 299 | system_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, |
|---|
| 301 | 300 | enum dma_data_direction direction, |
|---|
| 302 | 301 | unsigned int offset, |
|---|
| .. | .. |
|---|
| 479 | 478 | .unmap_dma_buf = system_heap_unmap_dma_buf, |
|---|
| 480 | 479 | .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, |
|---|
| 481 | 480 | .end_cpu_access = system_heap_dma_buf_end_cpu_access, |
|---|
| 481 | +#ifdef CONFIG_DMABUF_PARTIAL |
|---|
| 482 | 482 | .begin_cpu_access_partial = system_heap_dma_buf_begin_cpu_access_partial, |
|---|
| 483 | 483 | .end_cpu_access_partial = system_heap_dma_buf_end_cpu_access_partial, |
|---|
| 484 | +#endif |
|---|
| 484 | 485 | .mmap = system_heap_mmap, |
|---|
| 485 | 486 | .vmap = system_heap_vmap, |
|---|
| 486 | 487 | .vunmap = system_heap_vunmap, |
|---|
| .. | .. |
|---|
| 702 | 703 | |
|---|
| 703 | 704 | if (!heap_dev) |
|---|
| 704 | 705 | return -EINVAL; |
|---|
| 705 | | - |
|---|
| 706 | | - /* Set a dma ops(swiotlb) for the heap device. */ |
|---|
| 707 | | - arch_setup_dma_ops(heap_dev, 0, 0, NULL, 0); |
|---|
| 708 | 706 | |
|---|
| 709 | 707 | dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64)); |
|---|
| 710 | 708 | |
|---|