| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * OpenRISC Linux |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 9 | 10 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> |
|---|
| 10 | 11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> |
|---|
| 11 | 12 | * |
|---|
| 12 | | - * This program is free software; you can redistribute it and/or |
|---|
| 13 | | - * modify it under the terms of the GNU General Public License |
|---|
| 14 | | - * as published by the Free Software Foundation; either version |
|---|
| 15 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 16 | | - * |
|---|
| 17 | 13 | * DMA mapping callbacks... |
|---|
| 18 | | - * As alloc_coherent is the only DMA callback being used currently, that's |
|---|
| 19 | | - * the only thing implemented properly. The rest need looking into... |
|---|
| 20 | 14 | */ |
|---|
| 21 | 15 | |
|---|
| 22 | | -#include <linux/dma-noncoherent.h> |
|---|
| 16 | +#include <linux/dma-map-ops.h> |
|---|
| 17 | +#include <linux/pagewalk.h> |
|---|
| 23 | 18 | |
|---|
| 24 | 19 | #include <asm/cpuinfo.h> |
|---|
| 25 | 20 | #include <asm/spr_defs.h> |
|---|
| .. | .. |
|---|
| 38 | 33 | * Flush the page out of the TLB so that the new page flags get |
|---|
| 39 | 34 | * picked up next time there's an access |
|---|
| 40 | 35 | */ |
|---|
| 41 | | - flush_tlb_page(NULL, addr); |
|---|
| 36 | + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
|---|
| 42 | 37 | |
|---|
| 43 | 38 | /* Flush page out of dcache */ |
|---|
| 44 | 39 | for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) |
|---|
| .. | .. |
|---|
| 46 | 41 | |
|---|
| 47 | 42 | return 0; |
|---|
| 48 | 43 | } |
|---|
| 44 | + |
|---|
| 45 | +static const struct mm_walk_ops set_nocache_walk_ops = { |
|---|
| 46 | + .pte_entry = page_set_nocache, |
|---|
| 47 | +}; |
|---|
| 49 | 48 | |
|---|
| 50 | 49 | static int |
|---|
| 51 | 50 | page_clear_nocache(pte_t *pte, unsigned long addr, |
|---|
| .. | .. |
|---|
| 57 | 56 | * Flush the page out of the TLB so that the new page flags get |
|---|
| 58 | 57 | * picked up next time there's an access |
|---|
| 59 | 58 | */ |
|---|
| 60 | | - flush_tlb_page(NULL, addr); |
|---|
| 59 | + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
|---|
| 61 | 60 | |
|---|
| 62 | 61 | return 0; |
|---|
| 63 | 62 | } |
|---|
| 64 | 63 | |
|---|
| 65 | | -/* |
|---|
| 66 | | - * Alloc "coherent" memory, which for OpenRISC means simply uncached. |
|---|
| 67 | | - * |
|---|
| 68 | | - * This function effectively just calls __get_free_pages, sets the |
|---|
| 69 | | - * cache-inhibit bit on those pages, and makes sure that the pages are |
|---|
| 70 | | - * flushed out of the cache before they are used. |
|---|
| 71 | | - * |
|---|
| 72 | | - * If the NON_CONSISTENT attribute is set, then this function just |
|---|
| 73 | | - * returns "normal", cachable memory. |
|---|
| 74 | | - * |
|---|
| 75 | | - * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take |
|---|
| 76 | | - * into consideration here, too. All current known implementations of |
|---|
| 77 | | - * the OR1K support only strongly ordered memory accesses, so that flag |
|---|
| 78 | | - * is being ignored for now; uncached but write-combined memory is a |
|---|
| 79 | | - * missing feature of the OR1K. |
|---|
| 80 | | - */ |
|---|
| 81 | | -void * |
|---|
| 82 | | -arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
|---|
| 83 | | - gfp_t gfp, unsigned long attrs) |
|---|
| 64 | +static const struct mm_walk_ops clear_nocache_walk_ops = { |
|---|
| 65 | + .pte_entry = page_clear_nocache, |
|---|
| 66 | +}; |
|---|
| 67 | + |
|---|
| 68 | +void *arch_dma_set_uncached(void *cpu_addr, size_t size) |
|---|
| 84 | 69 | { |
|---|
| 85 | | - unsigned long va; |
|---|
| 86 | | - void *page; |
|---|
| 87 | | - struct mm_walk walk = { |
|---|
| 88 | | - .pte_entry = page_set_nocache, |
|---|
| 89 | | - .mm = &init_mm |
|---|
| 90 | | - }; |
|---|
| 70 | + unsigned long va = (unsigned long)cpu_addr; |
|---|
| 71 | + int error; |
|---|
| 91 | 72 | |
|---|
| 92 | | - page = alloc_pages_exact(size, gfp); |
|---|
| 93 | | - if (!page) |
|---|
| 94 | | - return NULL; |
|---|
| 73 | + /* |
|---|
| 74 | + * We need to iterate through the pages, clearing the dcache for |
|---|
| 75 | + * them and setting the cache-inhibit bit. |
|---|
| 76 | + */ |
|---|
| 77 | + mmap_read_lock(&init_mm); |
|---|
| 78 | + error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, |
|---|
| 79 | + NULL); |
|---|
| 80 | + mmap_read_unlock(&init_mm); |
|---|
| 95 | 81 | |
|---|
| 96 | | - /* This gives us the real physical address of the first page. */ |
|---|
| 97 | | - *dma_handle = __pa(page); |
|---|
| 98 | | - |
|---|
| 99 | | - va = (unsigned long)page; |
|---|
| 100 | | - |
|---|
| 101 | | - if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { |
|---|
| 102 | | - /* |
|---|
| 103 | | - * We need to iterate through the pages, clearing the dcache for |
|---|
| 104 | | - * them and setting the cache-inhibit bit. |
|---|
| 105 | | - */ |
|---|
| 106 | | - if (walk_page_range(va, va + size, &walk)) { |
|---|
| 107 | | - free_pages_exact(page, size); |
|---|
| 108 | | - return NULL; |
|---|
| 109 | | - } |
|---|
| 110 | | - } |
|---|
| 111 | | - |
|---|
| 112 | | - return (void *)va; |
|---|
| 82 | + if (error) |
|---|
| 83 | + return ERR_PTR(error); |
|---|
| 84 | + return cpu_addr; |
|---|
| 113 | 85 | } |
|---|
| 114 | 86 | |
|---|
| 115 | | -void |
|---|
| 116 | | -arch_dma_free(struct device *dev, size_t size, void *vaddr, |
|---|
| 117 | | - dma_addr_t dma_handle, unsigned long attrs) |
|---|
| 87 | +void arch_dma_clear_uncached(void *cpu_addr, size_t size) |
|---|
| 118 | 88 | { |
|---|
| 119 | | - unsigned long va = (unsigned long)vaddr; |
|---|
| 120 | | - struct mm_walk walk = { |
|---|
| 121 | | - .pte_entry = page_clear_nocache, |
|---|
| 122 | | - .mm = &init_mm |
|---|
| 123 | | - }; |
|---|
| 89 | + unsigned long va = (unsigned long)cpu_addr; |
|---|
| 124 | 90 | |
|---|
| 125 | | - if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { |
|---|
| 126 | | - /* walk_page_range shouldn't be able to fail here */ |
|---|
| 127 | | - WARN_ON(walk_page_range(va, va + size, &walk)); |
|---|
| 128 | | - } |
|---|
| 129 | | - |
|---|
| 130 | | - free_pages_exact(vaddr, size); |
|---|
| 91 | + mmap_read_lock(&init_mm); |
|---|
| 92 | + /* walk_page_range shouldn't be able to fail here */ |
|---|
| 93 | + WARN_ON(walk_page_range(&init_mm, va, va + size, |
|---|
| 94 | + &clear_nocache_walk_ops, NULL)); |
|---|
| 95 | + mmap_read_unlock(&init_mm); |
|---|
| 131 | 96 | } |
|---|
| 132 | 97 | |
|---|
| 133 | | -void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size, |
|---|
| 98 | +void arch_sync_dma_for_device(phys_addr_t addr, size_t size, |
|---|
| 134 | 99 | enum dma_data_direction dir) |
|---|
| 135 | 100 | { |
|---|
| 136 | 101 | unsigned long cl; |
|---|