From 1c055e55a242a33e574e48be530e06770a210dcd Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 19 Feb 2024 03:26:26 +0000 Subject: [PATCH] add r8169 read mac form eeprom --- kernel/drivers/dma-buf/heaps/cma_heap.c | 233 ++++------------------------------------------------------ 1 files changed, 17 insertions(+), 216 deletions(-) diff --git a/kernel/drivers/dma-buf/heaps/cma_heap.c b/kernel/drivers/dma-buf/heaps/cma_heap.c index d89b861..511d678 100644 --- a/kernel/drivers/dma-buf/heaps/cma_heap.c +++ b/kernel/drivers/dma-buf/heaps/cma_heap.c @@ -12,7 +12,7 @@ #include <linux/cma.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/err.h> #include <linux/highmem.h> #include <linux/io.h> @@ -21,7 +21,6 @@ #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <uapi/linux/dma-heap.h> struct cma_heap { @@ -39,8 +38,6 @@ pgoff_t pagecount; int vmap_cnt; void *vaddr; - - bool uncached; }; struct dma_heap_attachment { @@ -48,8 +45,6 @@ struct sg_table table; struct list_head list; bool mapped; - - bool uncached; }; static int cma_heap_attach(struct dma_buf *dmabuf, @@ -76,7 +71,6 @@ INIT_LIST_HEAD(&a->list); a->mapped = false; - a->uncached = buffer->uncached; attachment->priv = a; mutex_lock(&buffer->lock); @@ -108,9 +102,6 @@ int attrs = attachment->dma_map_attrs; int ret; - if (a->uncached) - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - ret = dma_map_sgtable(attachment->dev, table, direction, attrs); if (ret) return ERR_PTR(-ENOMEM); @@ -126,93 +117,49 @@ int attrs = attachment->dma_map_attrs; a->mapped = false; - - if (a->uncached) - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - dma_unmap_sgtable(attachment->dev, table, direction, attrs); } -static int -cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, - enum dma_data_direction direction, - unsigned int offset, - unsigned int len) +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) { struct cma_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); - if (buffer->uncached) - return 0; - - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); } - if (list_empty(&buffer->attachments)) { - phys_addr_t phys = page_to_phys(buffer->cma_pages); - - dma_sync_single_for_cpu(dma_heap_get_dev(buffer->heap->heap), - phys + offset, - len, - direction); - } mutex_unlock(&buffer->lock); return 0; } -static int -cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, - enum dma_data_direction direction, - unsigned int offset, - unsigned int len) +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) { struct cma_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) flush_kernel_vmap_range(buffer->vaddr, buffer->len); - if (buffer->uncached) - return 0; - - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; dma_sync_sgtable_for_device(a->dev, &a->table, direction); } - if (list_empty(&buffer->attachments)) { - phys_addr_t phys = page_to_phys(buffer->cma_pages); - - dma_sync_single_for_device(dma_heap_get_dev(buffer->heap->heap), - phys + offset, - len, - direction); - } mutex_unlock(&buffer->lock); return 0; -} - -static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction dir) -{ - return cma_heap_dma_buf_begin_cpu_access_partial(dmabuf, dir, 0, - dmabuf->size); -} - -static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction dir) -{ - return cma_heap_dma_buf_end_cpu_access_partial(dmabuf, dir, 0, - dmabuf->size); } static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) @@ -240,9 +187,6 @@ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; - if (buffer->uncached) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - vma->vm_ops = &dma_heap_vm_ops; vma->vm_private_data = buffer; @@ -252,12 +196,8 @@ static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) { void *vaddr; - pgprot_t pgprot = PAGE_KERNEL; - if (buffer->uncached) - pgprot = pgprot_writecombine(PAGE_KERNEL); - - vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot); + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); if (!vaddr) return ERR_PTR(-ENOMEM); @@ -324,18 +264,16 @@ .unmap_dma_buf = cma_heap_unmap_dma_buf, .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, .end_cpu_access = cma_heap_dma_buf_end_cpu_access, - .begin_cpu_access_partial = cma_heap_dma_buf_begin_cpu_access_partial, - .end_cpu_access_partial = cma_heap_dma_buf_end_cpu_access_partial, .mmap = cma_heap_mmap, .vmap = cma_heap_vmap, .vunmap = cma_heap_vunmap, .release = cma_heap_dma_buf_release, }; -static struct dma_buf *cma_heap_do_allocate(struct dma_heap *heap, +static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, unsigned long len, unsigned long fd_flags, - unsigned long heap_flags, bool uncached) + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); struct cma_heap_buffer *buffer; @@ -347,13 +285,10 @@ struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; - dma_addr_t dma; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); - - buffer->uncached = uncached; INIT_LIST_HEAD(&buffer->attachments); mutex_init(&buffer->lock); @@ -414,13 +349,6 @@ goto free_pages; } - if (buffer->uncached) { - dma = dma_map_page(dma_heap_get_dev(heap), buffer->cma_pages, 0, - buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE); - dma_unmap_page(dma_heap_get_dev(heap), dma, - buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE); - } - return dmabuf; free_pages: @@ -433,107 +361,14 @@ return ERR_PTR(ret); } -static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) -{ - return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, false); -} - -#if IS_ENABLED(CONFIG_NO_GKI) -static int cma_heap_get_phys(struct dma_heap *heap, - struct dma_heap_phys_data *phys) -{ - struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); - struct cma_heap_buffer *buffer; - struct dma_buf *dmabuf; - - phys->paddr = (__u64)-1; - - if (IS_ERR_OR_NULL(phys)) - return -EINVAL; - - dmabuf = dma_buf_get(phys->fd); - if (IS_ERR_OR_NULL(dmabuf)) - return -EBADFD; - - buffer = dmabuf->priv; - if (IS_ERR_OR_NULL(buffer)) - goto err; - - if (buffer->heap != cma_heap) - goto err; - - phys->paddr = page_to_phys(buffer->cma_pages); - -err: - dma_buf_put(dmabuf); - - return (phys->paddr == (__u64)-1) ? -EINVAL : 0; -} -#endif - static const struct dma_heap_ops cma_heap_ops = { .allocate = cma_heap_allocate, -#if IS_ENABLED(CONFIG_NO_GKI) - .get_phys = cma_heap_get_phys, -#endif }; - -static struct dma_buf *cma_uncached_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) -{ - return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, true); -} - -static struct dma_buf *cma_uncached_heap_not_initialized(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) -{ - pr_info("heap %s not initialized\n", dma_heap_get_name(heap)); - return ERR_PTR(-EBUSY); -} - -static struct dma_heap_ops cma_uncached_heap_ops = { - .allocate = cma_uncached_heap_not_initialized, -}; - -static int set_heap_dev_dma(struct device *heap_dev) -{ - int err = 0; - - if (!heap_dev) - return -EINVAL; - - dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64)); - - if (!heap_dev->dma_parms) { - heap_dev->dma_parms = devm_kzalloc(heap_dev, - sizeof(*heap_dev->dma_parms), - GFP_KERNEL); - if (!heap_dev->dma_parms) - return -ENOMEM; - - err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64)); - if (err) { - devm_kfree(heap_dev, heap_dev->dma_parms); - dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err); - return err; - } - } - - return 0; -} static int __add_cma_heap(struct cma *cma, void *data) { - struct cma_heap *cma_heap, *cma_uncached_heap; + struct cma_heap *cma_heap; struct dma_heap_export_info exp_info; - int ret; cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); if (!cma_heap) @@ -546,47 +381,13 @@ cma_heap->heap = dma_heap_add(&exp_info); if (IS_ERR(cma_heap->heap)) { - ret = PTR_ERR(cma_heap->heap); - goto free_cma_heap; + int ret = PTR_ERR(cma_heap->heap); + + kfree(cma_heap); + return ret; } - - cma_uncached_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); - if (!cma_uncached_heap) { - ret = -ENOMEM; - goto put_cma_heap; - } - - cma_uncached_heap->cma = cma; - - exp_info.name = "cma-uncached"; - exp_info.ops = &cma_uncached_heap_ops; - exp_info.priv = cma_uncached_heap; - - cma_uncached_heap->heap = dma_heap_add(&exp_info); - if (IS_ERR(cma_uncached_heap->heap)) { - ret = PTR_ERR(cma_uncached_heap->heap); - goto free_uncached_cma_heap; - } - - ret = set_heap_dev_dma(dma_heap_get_dev(cma_uncached_heap->heap)); - if (ret) - goto put_uncached_cma_heap; - - mb(); /* make sure we only set allocate after dma_mask is set */ - cma_uncached_heap_ops.allocate = cma_uncached_heap_allocate; return 0; - -put_uncached_cma_heap: - dma_heap_put(cma_uncached_heap->heap); -free_uncached_cma_heap: - kfree(cma_uncached_heap); -put_cma_heap: - dma_heap_put(cma_heap->heap); -free_cma_heap: - kfree(cma_heap); - - return ret; } static int add_default_cma_heap(void) -- Gitblit v1.6.2