From cde9070d9970eef1f7ec2360586c802a16230ad8 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:43:50 +0000
Subject: [PATCH] rtl88x2CE_WiFi_linux driver
---
kernel/include/linux/dma-buf.h | 288 +++++++++++++++++++++++++++++++++++---------------------
1 files changed, 179 insertions(+), 109 deletions(-)
diff --git a/kernel/include/linux/dma-buf.h b/kernel/include/linux/dma-buf.h
index 4696b11..0b3c064 100644
--- a/kernel/include/linux/dma-buf.h
+++ b/kernel/include/linux/dma-buf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header file for dma buffer sharing framework.
*
@@ -8,18 +9,6 @@
* Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
* Daniel Vetter <daniel@ffwll.ch> for their support in creation and
* refining of this idea.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
@@ -32,6 +21,7 @@
#include <linux/fs.h>
#include <linux/dma-fence.h>
#include <linux/wait.h>
+#include <linux/android_kabi.h>
struct device;
struct dma_buf;
@@ -39,18 +29,20 @@
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: [optional] maps a page from the buffer into kernel address
- * space, users may not block until the subsequent unmap call.
- * This callback must not sleep.
- * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
- * This Callback must not sleep.
- * @map: [optional] maps a page from the buffer into kernel address space.
- * @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
+ /**
+ * @cache_sgt_mapping:
+ *
+ * If true the framework will cache the first mapping made for each
+ * attachment. This avoids creating mappings for attachments multiple
+ * times.
+ */
+ bool cache_sgt_mapping;
+
/**
* @attach:
*
@@ -91,13 +83,42 @@
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
/**
+ * @pin:
+ *
+ * This is called by dma_buf_pin and lets the exporter know that the
+ * DMA-buf can't be moved any more.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional and should only be used in limited use
+ * cases like scanout and not for temporary pin operations.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+ int (*pin)(struct dma_buf_attachment *attach);
+
+ /**
+ * @unpin:
+ *
+ * This is called by dma_buf_unpin and lets the exporter know that the
+ * DMA-buf can be moved again.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct dma_buf_attachment *attach);
+
+ /**
* @map_dma_buf:
*
* This is called by dma_buf_map_attachment() and is used to map a
* shared &dma_buf into device address space, and it is mandatory. It
- * can only be called if @attach has been called successfully. This
- * essentially pins the DMA buffer into place, and it cannot be moved
- * any more
+ * can only be called if @attach has been called successfully.
*
* This call may sleep, e.g. when the backing storage first needs to be
* allocated, or moved to a location suitable for all currently attached
@@ -118,6 +139,9 @@
* any other kind of sharing that the exporter might wish to make
* available to buffer-users.
*
+ * This is always called with the dmabuf->resv object locked when
+ * the dynamic_mapping flag is true.
+ *
* Returns:
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
@@ -135,9 +159,8 @@
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * It should also unpin the backing storage if this is the last mapping
- * of the DMA buffer, it the exporter supports backing storage
- * migration.
+ * For static dma_buf handling this might also unpins the backing
+ * storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
struct sg_table *,
@@ -146,12 +169,7 @@
/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
* if the call would block.
*/
-#ifdef CONFIG_ARCH_ROCKCHIP
- int (*set_release_callback)(void (*release_callback)(void *data),
- void *data);
- void *(*get_release_callback_data)(void *callback);
- /* after final dma_buf_put() */
-#endif
+
/**
* @release:
*
@@ -191,33 +209,6 @@
* needs to be restarted.
*/
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
-
- /**
- * @begin_cpu_access_umapped:
- *
- * This is called as a result of the DMA_BUF_IOCTL_SYNC IOCTL being
- * called with the DMA_BUF_SYNC_START and DMA_BUF_SYNC_USER_MAPPED flags
- * set. It allows the exporter to ensure that the mmap(ed) portions of
- * the buffer are available for cpu access - the exporter might need to
- * allocate or swap-in and pin the backing storage.
- * The exporter also needs to ensure that cpu access is
- * coherent for the access direction. The direction can be used by the
- * exporter to optimize the cache flushing, i.e. access with a different
- * direction (read instead of write) might return stale or even bogus
- * data (e.g. when the exporter needs to copy the data to temporary
- * storage).
- *
- * This callback is optional.
- *
- * Returns:
- *
- * 0 on success or a negative error code on failure. This can for
- * example fail when the backing storage can't be allocated. Can also
- * return -ERESTARTSYS or -EINTR when the call has been interrupted and
- * needs to be restarted.
- */
- int (*begin_cpu_access_umapped)(struct dma_buf *dmabuf,
- enum dma_data_direction);
/**
* @begin_cpu_access_partial:
@@ -274,28 +265,6 @@
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
/**
- * @end_cpu_access_umapped:
- *
- * This is called as result a of the DMA_BUF_IOCTL_SYNC IOCTL being
- * called with the DMA_BUF_SYNC_END and DMA_BUF_SYNC_USER_MAPPED flags
- * set. The exporter can use to limit cache flushing to only those parts
- * of the buffer which are mmap(ed) and to unpin any resources pinned in
- * @begin_cpu_access_umapped.
- * The result of any dma_buf kmap calls after end_cpu_access_umapped is
- * undefined.
- *
- * This callback is optional.
- *
- * Returns:
- *
- * 0 on success or a negative error code on failure. Can return
- * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
- * to be restarted.
- */
- int (*end_cpu_access_umapped)(struct dma_buf *dmabuf,
- enum dma_data_direction);
-
- /**
* @end_cpu_access_partial:
*
* This is called from dma_buf_end_cpu_access_partial() when the
@@ -316,9 +285,6 @@
int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
enum dma_data_direction,
unsigned int offset, unsigned int len);
-
- void *(*map)(struct dma_buf *, unsigned long);
- void (*unmap)(struct dma_buf *, unsigned long, void *);
/**
* @mmap:
@@ -388,8 +354,12 @@
* will be populated with the buffer's flags.
*/
int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
+
+ ANDROID_KABI_RESERVE(1);
+ ANDROID_KABI_RESERVE(2);
};
+#ifdef CONFIG_DMABUF_CACHE
/**
* dma_buf_destructor - dma-buf destructor function
* @dmabuf: [in] pointer to dma-buf
@@ -401,21 +371,23 @@
* won't be called.
*/
typedef int (*dma_buf_destructor)(struct dma_buf *dmabuf, void *dtor_data);
+#endif
/**
* struct dma_buf - shared buffer object
* @size: size of the buffer
* @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached.
+ * @attachments: list of dma_buf_attachment that denotes all devices attached,
+ * protected by dma_resv lock.
* @ops: dma_buf_ops associated with this buffer object.
* @lock: used internally to serialize list manipulation, attach/detach and
- * vmap/unmap, and accesses to name
+ * vmap/unmap
* @vmapping_counter: used internally to refcnt the vmaps
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
- * @name: userspace-provided name; useful for accounting and debugging.
- * @name_lock: lock to protect name.
- * @ktime: time (in jiffies) at which the buffer was born
+ * @name: userspace-provided name; useful for accounting and debugging,
+ * protected by @resv.
+ * @name_lock: spinlock to protect name access
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
@@ -424,6 +396,7 @@
* @poll: for userspace poll support
* @cb_excl: for userspace poll support
* @cb_shared: for userspace poll support
+ * @sysfs_entry: for exposing information about this buffer in sysfs.
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@@ -438,10 +411,6 @@
size_t size;
struct file *file;
struct list_head attachments;
-#ifdef CONFIG_ARCH_ROCKCHIP
- struct list_head release_callbacks;
- struct mutex release_lock;
-#endif
const struct dma_buf_ops *ops;
struct mutex lock;
unsigned vmapping_counter;
@@ -449,13 +418,10 @@
const char *exp_name;
const char *name;
spinlock_t name_lock;
-#if defined(CONFIG_DEBUG_FS)
- ktime_t ktime;
-#endif
struct module *owner;
struct list_head list_node;
void *priv;
- struct reservation_object *resv;
+ struct dma_resv *resv;
/* poll support */
wait_queue_head_t poll;
@@ -466,17 +432,70 @@
__poll_t active;
} cb_excl, cb_shared;
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+ /* for sysfs stats */
+ struct dma_buf_sysfs_entry {
+ struct kobject kobj;
+ struct dma_buf *dmabuf;
+ } *sysfs_entry;
+#endif
+#ifdef CONFIG_DMABUF_CACHE
dma_buf_destructor dtor;
void *dtor_data;
- atomic_t dent_count;
+ struct mutex cache_lock;
+#endif
+
+ ANDROID_KABI_RESERVE(1);
+ ANDROID_KABI_RESERVE(2);
+};
+
+/**
+ * struct dma_buf_attach_ops - importer operations for an attachment
+ *
+ * Attachment operations implemented by the importer.
+ */
+struct dma_buf_attach_ops {
+ /**
+ * @allow_peer2peer:
+ *
+ * If this is set to true the importer must be able to handle peer
+ * resources without struct pages.
+ */
+ bool allow_peer2peer;
+
+ /**
+ * @move_notify: [optional] notification that the DMA-buf is moving
+ *
+ * If this callback is provided the framework can avoid pinning the
+ * backing store while mappings exists.
+ *
+ * This callback is called with the lock of the reservation object
+ * associated with the dma_buf held and the mapping function must be
+ * called with this lock held as well. This makes sure that no mapping
+ * is created concurrently with an ongoing move operation.
+ *
+ * Mappings stay valid and are not directly affected by this callback.
+ * But the DMA-buf can now be in a different physical location, so all
+ * mappings should be destroyed and re-created as soon as possible.
+ *
+ * New mappings can be created after this callback returns, and will
+ * point to the new location of the DMA-buf.
+ */
+ void (*move_notify)(struct dma_buf_attachment *attach);
};
/**
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
- * @node: list of dma_buf_attachment.
+ * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
+ * @sgt: cached mapping.
+ * @dir: direction of cached mapping.
+ * @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
+ * @importer_ops: importer operations for this attachment, if provided
+ * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
+ * @importer_priv: importer specific attachment data.
* @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
* through dma_buf_map_attachment.
*
@@ -493,8 +512,16 @@
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ bool peer2peer;
+ const struct dma_buf_attach_ops *importer_ops;
+ void *importer_priv;
void *priv;
unsigned long dma_map_attrs;
+
+ ANDROID_KABI_RESERVE(1);
+ ANDROID_KABI_RESERVE(2);
};
/**
@@ -516,8 +543,11 @@
const struct dma_buf_ops *ops;
size_t size;
int flags;
- struct reservation_object *resv;
+ struct dma_resv *resv;
void *priv;
+
+ ANDROID_KABI_RESERVE(1);
+ ANDROID_KABI_RESERVE(2);
};
/**
@@ -545,18 +575,46 @@
get_file(dmabuf->file);
}
-#ifdef CONFIG_ARCH_ROCKCHIP
-int dma_buf_set_release_callback(struct dma_buf *dmabuf,
- void (*callback)(void *), void *data);
+/**
+ * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
+ * @dmabuf: the DMA-buf to check
+ *
+ * Returns true if a DMA-buf exporter wants to be called with the dma_resv
+ * locked for the map/unmap callbacks, false if it doesn't wants to be called
+ * with the lock held.
+ */
+static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
+{
+ return !!dmabuf->ops->pin;
+}
-void *dma_buf_get_release_callback_data(struct dma_buf *dmabuf,
- void (*callback)(void *));
-#endif
+/**
+ * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
+ * mappinsg
+ * @attach: the DMA-buf attachment to check
+ *
+ * Returns true if a DMA-buf importer wants to call the map/unmap functions with
+ * the dma_resv lock held.
+ */
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
+int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf,
+ void *private), void *private);
+int is_dma_buf_file(struct file *file);
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev);
+ struct device *dev);
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv);
void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach);
+ struct dma_buf_attachment *attach);
+int dma_buf_pin(struct dma_buf_attachment *attach);
+void dma_buf_unpin(struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@@ -568,6 +626,7 @@
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
+void dma_buf_move_notify(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
@@ -578,16 +637,16 @@
int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
enum dma_data_direction dir,
unsigned int offset, unsigned int len);
-void *dma_buf_kmap(struct dma_buf *, unsigned long);
-void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+long dma_buf_set_name(struct dma_buf *dmabuf, const char *name);
int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid);
+#ifdef CONFIG_DMABUF_CACHE
/**
* dma_buf_set_destructor - set the dma-buf's destructor
* @dmabuf: [in] pointer to dma-buf
@@ -601,5 +660,16 @@
dmabuf->dtor = dtor;
dmabuf->dtor_data = dtor_data;
}
+#endif
+
+#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)
+void dma_buf_reset_peak_size(void);
+size_t dma_buf_get_peak_size(void);
+size_t dma_buf_get_total_size(void);
+#else
+static inline void dma_buf_reset_peak_size(void) {}
+static inline size_t dma_buf_get_peak_size(void) { return 0; }
+static inline size_t dma_buf_get_total_size(void) { return 0; }
+#endif
#endif /* __DMA_BUF_H__ */
--
Gitblit v1.6.2