/* SPDX-License-Identifier: GPL-2.0 */
|
/*
|
* drivers/staging/android/ion/ion.h
|
*
|
* Copyright (C) 2011 Google, Inc.
|
*/
|
|
#ifndef _ION_H
|
#define _ION_H
|
|
#include <linux/device.h>
|
#include <linux/dma-direction.h>
|
#include <linux/kref.h>
|
#include <linux/mm_types.h>
|
#include <linux/mutex.h>
|
#include <linux/rbtree.h>
|
#include <linux/sched.h>
|
#include <linux/shrinker.h>
|
#include <linux/types.h>
|
#include <linux/miscdevice.h>
|
|
#include "../uapi/ion.h"
|
|
/**
|
* struct ion_platform_heap - defines a heap in the given platform
|
* @type: type of the heap from ion_heap_type enum
|
* @id: unique identifier for heap. When allocating higher numb ers
|
* will be allocated from first. At allocation these are passed
|
* as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
|
* @name: used for debug purposes
|
* @base: base address of heap in physical memory if applicable
|
* @size: size of the heap in bytes if applicable
|
* @priv: private info passed from the board file
|
*
|
* Provided by the board file.
|
*/
|
struct ion_platform_heap {
|
enum ion_heap_type type;
|
unsigned int id;
|
const char *name;
|
phys_addr_t base;
|
size_t size;
|
phys_addr_t align;
|
void *priv;
|
};
|
|
/**
|
* struct ion_buffer - metadata for a particular buffer
|
* @ref: reference count
|
* @node: node in the ion_device buffers tree
|
* @dev: back pointer to the ion_device
|
* @heap: back pointer to the heap the buffer came from
|
* @flags: buffer specific flags
|
* @private_flags: internal buffer specific flags
|
* @size: size of the buffer
|
* @priv_virt: private data to the buffer representable as
|
* a void *
|
* @lock: protects the buffers cnt fields
|
* @kmap_cnt: number of times the buffer is mapped to the kernel
|
* @vaddr: the kernel mapping if kmap_cnt is not zero
|
* @sg_table: the sg table for the buffer if dmap_cnt is not zero
|
*/
|
struct ion_buffer {
|
union {
|
struct rb_node node;
|
struct list_head list;
|
};
|
struct ion_device *dev;
|
struct ion_heap *heap;
|
unsigned long flags;
|
unsigned long private_flags;
|
size_t size;
|
void *priv_virt;
|
struct mutex lock;
|
int kmap_cnt;
|
void *vaddr;
|
struct sg_table *sg_table;
|
struct list_head attachments;
|
};
|
|
void ion_buffer_destroy(struct ion_buffer *buffer);
|
|
/**
|
* struct ion_device - the metadata of the ion device node
|
* @dev: the actual misc device
|
* @buffers: an rb tree of all the existing buffers
|
* @buffer_lock: lock protecting the tree of buffers
|
* @lock: rwsem protecting the tree of heaps and clients
|
*/
|
struct ion_device {
|
struct miscdevice dev;
|
struct rb_root buffers;
|
struct mutex buffer_lock;
|
struct rw_semaphore lock;
|
struct plist_head heaps;
|
struct dentry *debug_root;
|
int heap_cnt;
|
};
|
|
/**
|
* struct ion_heap_ops - ops to operate on a given heap
|
* @allocate: allocate memory
|
* @free: free memory
|
* @map_kernel map memory to the kernel
|
* @unmap_kernel unmap memory to the kernel
|
* @map_user map memory to userspace
|
*
|
* allocate, phys, and map_user return 0 on success, -errno on error.
|
* map_dma and map_kernel return pointer on success, ERR_PTR on
|
* error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
|
* the buffer's private_flags when called from a shrinker. In that
|
* case, the pages being free'd must be truly free'd back to the
|
* system, not put in a page pool or otherwise cached.
|
*/
|
struct ion_heap_ops {
|
int (*allocate)(struct ion_heap *heap,
|
struct ion_buffer *buffer, unsigned long len,
|
unsigned long flags);
|
void (*free)(struct ion_buffer *buffer);
|
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
|
struct vm_area_struct *vma);
|
int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
|
};
|
|
/**
|
* heap flags - flags between the heaps and core ion code
|
*/
|
#define ION_HEAP_FLAG_DEFER_FREE BIT(0)
|
|
/**
|
* private flags - flags internal to ion
|
*/
|
/*
|
* Buffer is being freed from a shrinker function. Skip any possible
|
* heap-specific caching mechanism (e.g. page pools). Guarantees that
|
* any buffer storage that came from the system allocator will be
|
* returned to the system allocator.
|
*/
|
#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
|
|
/**
|
* struct ion_heap - represents a heap in the system
|
* @node: rb node to put the heap on the device's tree of heaps
|
* @dev: back pointer to the ion_device
|
* @type: type of heap
|
* @ops: ops struct as above
|
* @flags: flags
|
* @id: id of heap, also indicates priority of this heap when
|
* allocating. These are specified by platform data and
|
* MUST be unique
|
* @name: used for debugging
|
* @shrinker: a shrinker for the heap
|
* @free_list: free list head if deferred free is used
|
* @free_list_size size of the deferred free list in bytes
|
* @lock: protects the free list
|
* @waitqueue: queue to wait on from deferred free thread
|
* @task: task struct of deferred free thread
|
* @debug_show: called when heap debug file is read to add any
|
* heap specific debug info to output
|
*
|
* Represents a pool of memory from which buffers can be made. In some
|
* systems the only heap is regular system memory allocated via vmalloc.
|
* On others, some blocks might require large physically contiguous buffers
|
* that are allocated from a specially reserved heap.
|
*/
|
struct ion_heap {
|
struct plist_node node;
|
struct ion_device *dev;
|
enum ion_heap_type type;
|
struct ion_heap_ops *ops;
|
unsigned long flags;
|
unsigned int id;
|
const char *name;
|
struct shrinker shrinker;
|
struct list_head free_list;
|
size_t free_list_size;
|
spinlock_t free_lock;
|
wait_queue_head_t waitqueue;
|
struct task_struct *task;
|
|
int (*debug_show)(struct ion_heap *heap, struct seq_file *s,
|
void *unused);
|
};
|
|
/**
|
* ion_device_add_heap - adds a heap to the ion device
|
* @heap: the heap to add
|
*/
|
void ion_device_add_heap(struct ion_heap *heap);
|
|
/**
|
* some helpers for common operations on buffers using the sg_table
|
* and vaddr fields
|
*/
|
void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
|
void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
|
int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
struct vm_area_struct *vma);
|
int ion_heap_buffer_zero(struct ion_buffer *buffer);
|
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
|
|
int ion_alloc(size_t len,
|
unsigned int heap_id_mask,
|
unsigned int flags);
|
|
/**
|
* ion_heap_init_shrinker
|
* @heap: the heap
|
*
|
* If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
|
* this function will be called to setup a shrinker to shrink the freelists
|
* and call the heap's shrink op.
|
*/
|
int ion_heap_init_shrinker(struct ion_heap *heap);
|
|
/**
|
* ion_heap_init_deferred_free -- initialize deferred free functionality
|
* @heap: the heap
|
*
|
* If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
|
* be called to setup deferred frees. Calls to free the buffer will
|
* return immediately and the actual free will occur some time later
|
*/
|
int ion_heap_init_deferred_free(struct ion_heap *heap);
|
|
/**
|
* ion_heap_freelist_add - add a buffer to the deferred free list
|
* @heap: the heap
|
* @buffer: the buffer
|
*
|
* Adds an item to the deferred freelist.
|
*/
|
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
|
|
/**
|
* ion_heap_freelist_drain - drain the deferred free list
|
* @heap: the heap
|
* @size: amount of memory to drain in bytes
|
*
|
* Drains the indicated amount of memory from the deferred freelist immediately.
|
* Returns the total amount freed. The total freed may be higher depending
|
* on the size of the items in the list, or lower if there is insufficient
|
* total memory on the freelist.
|
*/
|
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
|
|
/**
|
* ion_heap_freelist_shrink - drain the deferred free
|
* list, skipping any heap-specific
|
* pooling or caching mechanisms
|
*
|
* @heap: the heap
|
* @size: amount of memory to drain in bytes
|
*
|
* Drains the indicated amount of memory from the deferred freelist immediately.
|
* Returns the total amount freed. The total freed may be higher depending
|
* on the size of the items in the list, or lower if there is insufficient
|
* total memory on the freelist.
|
*
|
* Unlike with @ion_heap_freelist_drain, don't put any pages back into
|
* page pools or otherwise cache the pages. Everything must be
|
* genuinely free'd back to the system. If you're free'ing from a
|
* shrinker you probably want to use this. Note that this relies on
|
* the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
|
* flag.
|
*/
|
size_t ion_heap_freelist_shrink(struct ion_heap *heap,
|
size_t size);
|
|
/**
|
* ion_heap_freelist_size - returns the size of the freelist in bytes
|
* @heap: the heap
|
*/
|
size_t ion_heap_freelist_size(struct ion_heap *heap);
|
|
/**
|
* functions for creating and destroying a heap pool -- allows you
|
* to keep a pool of pre allocated memory to use from your heap. Keeping
|
* a pool of memory that is ready for dma, ie any cached mapping have been
|
* invalidated from the cache, provides a significant performance benefit on
|
* many systems
|
*/
|
|
/**
|
* struct ion_page_pool - pagepool struct
|
* @high_count: number of highmem items in the pool
|
* @low_count: number of lowmem items in the pool
|
* @high_items: list of highmem items
|
* @low_items: list of lowmem items
|
* @mutex: lock protecting this struct and especially the count
|
* item list
|
* @gfp_mask: gfp_mask to use from alloc
|
* @order: order of pages in the pool
|
* @list: plist node for list of pools
|
*
|
* Allows you to keep a pool of pre allocated pages to use from your heap.
|
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
|
* been invalidated from the cache, provides a significant performance benefit
|
* on many systems
|
*/
|
struct ion_page_pool {
|
int high_count;
|
int low_count;
|
struct list_head high_items;
|
struct list_head low_items;
|
struct mutex mutex;
|
gfp_t gfp_mask;
|
unsigned int order;
|
struct plist_node list;
|
};
|
|
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
|
void ion_page_pool_destroy(struct ion_page_pool *pool);
|
struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
|
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
|
|
#ifdef CONFIG_ION_SYSTEM_HEAP
|
long ion_page_pool_nr_pages(void);
|
#else
|
static inline long ion_page_pool_nr_pages(void) { return 0; }
|
#endif
|
|
/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
|
* @pool: the pool
|
* @gfp_mask: the memory type to reclaim
|
* @nr_to_scan: number of items to shrink in pages
|
*
|
* returns the number of items freed in pages
|
*/
|
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
int nr_to_scan);
|
|
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
|
|
int ion_query_heaps(struct ion_heap_query *query);
|
int ion_get_phys(struct ion_phys_data *phys);
|
|
#ifdef CONFIG_ION_MODULE
|
int ion_add_cma_heaps(void);
|
int ion_protected_heap_create(void);
|
int ion_system_heap_create(void);
|
int ion_system_contig_heap_create(void);
|
#endif
|
#endif /* _ION_H */
|