From 102a0743326a03cd1a1202ceda21e175b7d3575c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 20 Feb 2024 01:20:52 +0000 Subject: [PATCH] add new system file --- kernel/drivers/dma-buf/dma-buf.c | 869 +++++++++++++++++++++++++++------------------------------ 1 files changed, 413 insertions(+), 456 deletions(-) diff --git a/kernel/drivers/dma-buf/dma-buf.c b/kernel/drivers/dma-buf/dma-buf.c index 0d36aef..ad7ddaa 100644 --- a/kernel/drivers/dma-buf/dma-buf.c +++ b/kernel/drivers/dma-buf/dma-buf.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Framework for buffer objects that can be shared across devices/subsystems. * @@ -8,18 +9,6 @@ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and * Daniel Vetter <daniel@ffwll.ch> for their support in creation and * refining of this idea. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fs.h> @@ -32,27 +21,16 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/poll.h> -#include <linux/reservation.h> +#include <linux/dma-resv.h> #include <linux/mm.h> -#include <linux/sched/signal.h> -#include <linux/fdtable.h> -#include <linux/hashtable.h> -#include <linux/list_sort.h> #include <linux/mount.h> -#include <linux/cache.h> +#include <linux/pseudo_fs.h> +#include <linux/sched/task.h> #include <uapi/linux/dma-buf.h> #include <uapi/linux/magic.h> -static inline int is_dma_buf_file(struct file *); - -#ifdef CONFIG_ARCH_ROCKCHIP -struct dma_buf_callback { - struct list_head list; - void (*callback)(void *); - void *data; -}; -#endif +#include "dma-buf-sysfs-stats.h" struct dma_buf_list { struct list_head head; @@ -60,6 +38,67 @@ }; static struct dma_buf_list db_list; + +/* + * This function helps in traversing the db_list and calls the + * callback function which can extract required info out of each + * dmabuf. + */ +int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf, + void *private), void *private) +{ + struct dma_buf *buf; + int ret = mutex_lock_interruptible(&db_list.lock); + + if (ret) + return ret; + + list_for_each_entry(buf, &db_list.head, list_node) { + ret = callback(buf, private); + if (ret) + break; + } + mutex_unlock(&db_list.lock); + return ret; +} +EXPORT_SYMBOL_GPL(get_each_dmabuf); + +#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG) +static size_t db_total_size; +static size_t db_peak_size; + +void dma_buf_reset_peak_size(void) +{ + mutex_lock(&db_list.lock); + db_peak_size = 0; + mutex_unlock(&db_list.lock); +} +EXPORT_SYMBOL_GPL(dma_buf_reset_peak_size); + +size_t dma_buf_get_peak_size(void) +{ + size_t sz; + + mutex_lock(&db_list.lock); + sz = db_peak_size; + mutex_unlock(&db_list.lock); + + return sz; +} +EXPORT_SYMBOL_GPL(dma_buf_get_peak_size); + +size_t dma_buf_get_total_size(void) +{ + size_t sz; + + mutex_lock(&db_list.lock); + sz = db_total_size; + mutex_unlock(&db_list.lock); + + return sz; +} +EXPORT_SYMBOL_GPL(dma_buf_get_total_size); +#endif static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { @@ -80,10 +119,9 @@ static void dma_buf_release(struct dentry *dentry) { struct dma_buf *dmabuf; -#ifdef CONFIG_ARCH_ROCKCHIP - struct dma_buf_callback *cb, *tmp; -#endif +#ifdef CONFIG_DMABUF_CACHE int dtor_ret = 0; +#endif dmabuf = dentry->d_fsdata; if (unlikely(!dmabuf)) @@ -101,28 +139,19 @@ */ BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); -#ifdef CONFIG_ARCH_ROCKCHIP - mutex_lock(&dmabuf->release_lock); - list_for_each_entry_safe(cb, tmp, &dmabuf->release_callbacks, list) { - if (cb->callback) - cb->callback(cb->data); - list_del(&cb->list); - kfree(cb); - } - mutex_unlock(&dmabuf->release_lock); -#endif + dma_buf_stats_teardown(dmabuf); +#ifdef CONFIG_DMABUF_CACHE if (dmabuf->dtor) dtor_ret = dmabuf->dtor(dmabuf, dmabuf->dtor_data); if (!dtor_ret) +#endif dmabuf->ops->release(dmabuf); - else - pr_warn_ratelimited("Leaking dmabuf %s because destructor failed error:%d\n", - dmabuf->name, dtor_ret); - if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) - reservation_object_fini(dmabuf->resv); + if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) + dma_resv_fini(dmabuf->resv); + WARN_ON(!list_empty(&dmabuf->attachments)); module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); @@ -138,6 +167,9 @@ dmabuf = file->private_data; mutex_lock(&db_list.lock); +#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG) + db_total_size -= dmabuf->size; +#endif list_del(&dmabuf->list_node); mutex_unlock(&db_list.lock); @@ -151,16 +183,20 @@ static struct vfsmount *dma_buf_mnt; -static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type, - int flags, const char *name, void *data) +static int dma_buf_fs_init_context(struct fs_context *fc) { - return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops, - DMA_BUF_MAGIC); + struct pseudo_fs_context *ctx; + + ctx = init_pseudo(fc, DMA_BUF_MAGIC); + if (!ctx) + return -ENOMEM; + ctx->dops = &dma_buf_dentry_ops; + return 0; } static struct file_system_type dma_buf_fs_type = { .name = "dmabuf", - .mount = dma_buf_fs_mount, + .init_fs_context = dma_buf_fs_init_context, .kill_sb = kill_anon_super, }; @@ -212,12 +248,12 @@ } /** - * DOC: fence polling + * DOC: implicit fence polling * * To support cross-device and cross-driver synchronization of buffer access - * implicit fences (represented internally in the kernel with &struct fence) can - * be attached to a &dma_buf. The glue for that and a few related things are - * provided in the &reservation_object structure. + * implicit fences (represented internally in the kernel with &struct dma_fence) + * can be attached to a &dma_buf. The glue for that and a few related things are + * provided in the &dma_resv structure. * * Userspace can query the state of these implicitly tracked fences using poll() * and related system calls: @@ -247,8 +283,8 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; - struct reservation_object *resv; - struct reservation_object_list *fobj; + struct dma_resv *resv; + struct dma_resv_list *fobj; struct dma_fence *fence_excl; __poll_t events; unsigned shared_count, seq; @@ -266,7 +302,7 @@ return 0; retry: - seq = read_seqbegin(&resv->seq); + seq = read_seqcount_begin(&resv->seq); rcu_read_lock(); fobj = rcu_dereference(resv->fence); @@ -275,7 +311,7 @@ else shared_count = 0; fence_excl = rcu_dereference(resv->fence_excl); - if (read_seqretry(&resv->seq, seq)) { + if (read_seqcount_retry(&resv->seq, seq)) { rcu_read_unlock(); goto retry; } @@ -363,22 +399,46 @@ return events; } +static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name) +{ + spin_lock(&dmabuf->name_lock); + kfree(dmabuf->name); + dmabuf->name = name; + spin_unlock(&dmabuf->name_lock); + + return 0; +} + /** * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. - * The name of the dma-buf buffer can only be set when the dma-buf is not - * attached to any devices. It could theoritically support changing the - * name of the dma-buf if the same piece of memory is used for multiple - * purpose between different devices. + * It could support changing the name of the dma-buf if the same piece of + * memory is used for multiple purpose between different devices. * - * @dmabuf [in] dmabuf buffer that will be renamed. - * @buf: [in] A piece of userspace memory that contains the name of - * the dma-buf. + * @dmabuf: [in] dmabuf buffer that will be renamed. + * @buf: [in] A piece of userspace memory that contains the name of + * the dma-buf. * * Returns 0 on success. If the dma-buf buffer is already attached to * devices, return -EBUSY. * */ -static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) +long dma_buf_set_name(struct dma_buf *dmabuf, const char *name) +{ + long ret = 0; + char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + ret = _dma_buf_set_name(dmabuf, buf); + if (ret) + kfree(buf); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_set_name); + +static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf) { char *name = strndup_user(buf, DMA_BUF_NAME_LEN); long ret = 0; @@ -386,28 +446,12 @@ if (IS_ERR(name)) return PTR_ERR(name); - mutex_lock(&dmabuf->lock); - spin_lock(&dmabuf->name_lock); - if (!list_empty(&dmabuf->attachments)) { - ret = -EBUSY; + ret = _dma_buf_set_name(dmabuf, name); + if (ret) kfree(name); - goto out_unlock; - } - kfree(dmabuf->name); - dmabuf->name = name; -out_unlock: - spin_unlock(&dmabuf->name_lock); - mutex_unlock(&dmabuf->lock); return ret; } - -static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf, - enum dma_data_direction direction); - - -static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf, - enum dma_data_direction direction); static long dma_buf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -415,7 +459,7 @@ struct dma_buf *dmabuf; struct dma_buf_sync sync; struct dma_buf_sync_partial sync_p; - enum dma_data_direction dir; + enum dma_data_direction direction; int ret; dmabuf = file->private_data; @@ -430,46 +474,35 @@ switch (sync.flags & DMA_BUF_SYNC_RW) { case DMA_BUF_SYNC_READ: - dir = DMA_FROM_DEVICE; + direction = DMA_FROM_DEVICE; break; case DMA_BUF_SYNC_WRITE: - dir = DMA_TO_DEVICE; + direction = DMA_TO_DEVICE; break; case DMA_BUF_SYNC_RW: - dir = DMA_BIDIRECTIONAL; + direction = DMA_BIDIRECTIONAL; break; default: return -EINVAL; } if (sync.flags & DMA_BUF_SYNC_END) - if (sync.flags & DMA_BUF_SYNC_USER_MAPPED) - ret = dma_buf_end_cpu_access_umapped(dmabuf, - dir); - else - ret = dma_buf_end_cpu_access(dmabuf, dir); + ret = dma_buf_end_cpu_access(dmabuf, direction); else - if (sync.flags & DMA_BUF_SYNC_USER_MAPPED) - ret = dma_buf_begin_cpu_access_umapped(dmabuf, - dir); - else - ret = dma_buf_begin_cpu_access(dmabuf, dir); + ret = dma_buf_begin_cpu_access(dmabuf, direction); return ret; case DMA_BUF_SET_NAME_A: case DMA_BUF_SET_NAME_B: - return dma_buf_set_name(dmabuf, (const char __user *)arg); + return dma_buf_set_name_user(dmabuf, (const char __user *)arg); case DMA_BUF_IOCTL_SYNC_PARTIAL: if (copy_from_user(&sync_p, (void __user *) arg, sizeof(sync_p))) return -EFAULT; if (sync_p.len == 0) - return -EINVAL; - - if ((sync_p.offset % cache_line_size()) || (sync_p.len % cache_line_size())) - return -EINVAL; + return 0; if (sync_p.len > dmabuf->size || sync_p.offset > dmabuf->size - sync_p.len) return -EINVAL; @@ -479,24 +512,24 @@ switch (sync_p.flags & DMA_BUF_SYNC_RW) { case DMA_BUF_SYNC_READ: - dir = DMA_FROM_DEVICE; + direction = DMA_FROM_DEVICE; break; case DMA_BUF_SYNC_WRITE: - dir = DMA_TO_DEVICE; + direction = DMA_TO_DEVICE; break; case DMA_BUF_SYNC_RW: - dir = DMA_BIDIRECTIONAL; + direction = DMA_BIDIRECTIONAL; break; default: return -EINVAL; } if (sync_p.flags & DMA_BUF_SYNC_END) - ret = dma_buf_end_cpu_access_partial(dmabuf, dir, + ret = dma_buf_end_cpu_access_partial(dmabuf, direction, sync_p.offset, sync_p.len); else - ret = dma_buf_begin_cpu_access_partial(dmabuf, dir, + ret = dma_buf_begin_cpu_access_partial(dmabuf, direction, sync_p.offset, sync_p.len); @@ -527,22 +560,22 @@ .llseek = dma_buf_llseek, .poll = dma_buf_poll, .unlocked_ioctl = dma_buf_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = dma_buf_ioctl, -#endif + .compat_ioctl = compat_ptr_ioctl, .show_fdinfo = dma_buf_show_fdinfo, }; /* * is_dma_buf_file - Check if struct file* is associated with dma_buf */ -static inline int is_dma_buf_file(struct file *file) +int is_dma_buf_file(struct file *file) { return file->f_op == &dma_buf_fops; } +EXPORT_SYMBOL_GPL(is_dma_buf_file); static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) { + static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); struct file *file; struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); @@ -552,6 +585,13 @@ inode->i_size = dmabuf->size; inode_set_bytes(inode, dmabuf->size); + /* + * The ->i_ino acquired from get_next_ino() is not unique thus + * not suitable for using it as dentry name by dmabuf stats. + * Override ->i_ino with the unique and dmabuffs specific + * value. + */ + inode->i_ino = atomic64_add_return(1, &dmabuf_inode); file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", flags, &dma_buf_fops); if (IS_ERR(file)) @@ -567,48 +607,16 @@ return file; } -#ifdef CONFIG_ARCH_ROCKCHIP -void *dma_buf_get_release_callback_data(struct dma_buf *dmabuf, - void (*callback)(void *)) +static void dma_buf_set_default_name(struct dma_buf *dmabuf) { - struct dma_buf_callback *cb, *tmp; - void *result = NULL; + char task_comm[TASK_COMM_LEN]; + char *name; - mutex_lock(&dmabuf->release_lock); - list_for_each_entry_safe(cb, tmp, &dmabuf->release_callbacks, list) { - if (cb->callback == callback) { - result = cb->data; - break; - } - } - mutex_unlock(&dmabuf->release_lock); - - return result; + get_task_comm(task_comm, current->group_leader); + name = kasprintf(GFP_KERNEL, "%d-%s", current->tgid, task_comm); + dma_buf_set_name(dmabuf, name); + kfree(name); } -EXPORT_SYMBOL_GPL(dma_buf_get_release_callback_data); - -int dma_buf_set_release_callback(struct dma_buf *dmabuf, - void (*callback)(void *), void *data) -{ - struct dma_buf_callback *cb; - - if (WARN_ON(dma_buf_get_release_callback_data(dmabuf, callback))) - return -EINVAL; - - cb = kzalloc(sizeof(*cb), GFP_KERNEL); - if (!cb) - return -ENOMEM; - - cb->callback = callback; - cb->data = data; - mutex_lock(&dmabuf->release_lock); - list_add_tail(&cb->list, &dmabuf->release_callbacks); - mutex_unlock(&dmabuf->release_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(dma_buf_set_release_callback); -#endif /** * DOC: dma buf device access @@ -661,13 +669,13 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { struct dma_buf *dmabuf; - struct reservation_object *resv = exp_info->resv; + struct dma_resv *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); int ret; if (!exp_info->resv) - alloc_size += sizeof(struct reservation_object); + alloc_size += sizeof(struct dma_resv); else /* prevent &dma_buf[1] == dma_buf->resv */ alloc_size += 1; @@ -679,6 +687,13 @@ || !exp_info->ops->release)) { return ERR_PTR(-EINVAL); } + + if (WARN_ON(exp_info->ops->cache_sgt_mapping && + (exp_info->ops->pin || exp_info->ops->unpin))) + return ERR_PTR(-EINVAL); + + if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) + return ERR_PTR(-EINVAL); if (!try_module_get(exp_info->owner)) return ERR_PTR(-ENOENT); @@ -694,16 +709,17 @@ dmabuf->size = exp_info->size; dmabuf->exp_name = exp_info->exp_name; dmabuf->owner = exp_info->owner; + spin_lock_init(&dmabuf->name_lock); +#ifdef CONFIG_DMABUF_CACHE + mutex_init(&dmabuf->cache_lock); +#endif init_waitqueue_head(&dmabuf->poll); dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; -#if defined(CONFIG_DEBUG_FS) - dmabuf->ktime = ktime_get(); -#endif if (!resv) { - resv = (struct reservation_object *)&dmabuf[1]; - reservation_object_init(resv); + resv = (struct dma_resv *)&dmabuf[1]; + dma_resv_init(resv); } dmabuf->resv = resv; @@ -717,19 +733,33 @@ dmabuf->file = file; mutex_init(&dmabuf->lock); - spin_lock_init(&dmabuf->name_lock); INIT_LIST_HEAD(&dmabuf->attachments); -#ifdef CONFIG_ARCH_ROCKCHIP - mutex_init(&dmabuf->release_lock); - INIT_LIST_HEAD(&dmabuf->release_callbacks); -#endif mutex_lock(&db_list.lock); list_add(&dmabuf->list_node, &db_list.head); +#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG) + db_total_size += dmabuf->size; + db_peak_size = max(db_total_size, db_peak_size); +#endif mutex_unlock(&db_list.lock); + + ret = dma_buf_stats_setup(dmabuf); + if (ret) + goto err_sysfs; + + if (IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)) + dma_buf_set_default_name(dmabuf); return dmabuf; +err_sysfs: + /* + * Set file->f_path.dentry->d_fsdata to NULL so that when + * dma_buf_release() gets invoked by dentry_ops, it exits + * early before calling the release() dma_buf op. + */ + file->f_path.dentry->d_fsdata = NULL; + fput(file); err_dmabuf: kfree(dmabuf); err_module: @@ -808,10 +838,12 @@ EXPORT_SYMBOL_GPL(dma_buf_put); /** - * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, + * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally, * calls attach() of dma_buf_ops to allow device-specific attach functionality - * @dmabuf: [in] buffer to attach device to. - * @dev: [in] device to be attached. + * @dmabuf: [in] buffer to attach device to. + * @dev: [in] device to be attached. + * @importer_ops: [in] importer operations for the attachment + * @importer_priv: [in] importer private pointer for the attachment * * Returns struct dma_buf_attachment pointer for this attachment. Attachments * must be cleaned up by calling dma_buf_detach(). @@ -825,13 +857,18 @@ * accessible to @dev, and cannot be moved to a more suitable place. This is * indicated with the error code -EBUSY. */ -struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, - struct device *dev) +struct dma_buf_attachment * +dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, + const struct dma_buf_attach_ops *importer_ops, + void *importer_priv) { struct dma_buf_attachment *attach; int ret; if (WARN_ON(!dmabuf || !dev)) + return ERR_PTR(-EINVAL); + + if (WARN_ON(importer_ops && !importer_ops->move_notify)) return ERR_PTR(-EINVAL); attach = kzalloc(sizeof(*attach), GFP_KERNEL); @@ -840,23 +877,79 @@ attach->dev = dev; attach->dmabuf = dmabuf; - - mutex_lock(&dmabuf->lock); + if (importer_ops) + attach->peer2peer = importer_ops->allow_peer2peer; + attach->importer_ops = importer_ops; + attach->importer_priv = importer_priv; if (dmabuf->ops->attach) { ret = dmabuf->ops->attach(dmabuf, attach); if (ret) goto err_attach; } + dma_resv_lock(dmabuf->resv, NULL); list_add(&attach->node, &dmabuf->attachments); + dma_resv_unlock(dmabuf->resv); - mutex_unlock(&dmabuf->lock); + /* When either the importer or the exporter can't handle dynamic + * mappings we cache the mapping here to avoid issues with the + * reservation object lock. + */ + if (dma_buf_attachment_is_dynamic(attach) != + dma_buf_is_dynamic(dmabuf)) { + struct sg_table *sgt; + + if (dma_buf_is_dynamic(attach->dmabuf)) { + dma_resv_lock(attach->dmabuf->resv, NULL); + ret = dma_buf_pin(attach); + if (ret) + goto err_unlock; + } + + sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL); + if (!sgt) + sgt = ERR_PTR(-ENOMEM); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto err_unpin; + } + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_unlock(attach->dmabuf->resv); + attach->sgt = sgt; + attach->dir = DMA_BIDIRECTIONAL; + } + return attach; err_attach: kfree(attach); - mutex_unlock(&dmabuf->lock); return ERR_PTR(ret); + +err_unpin: + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_buf_unpin(attach); + +err_unlock: + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_unlock(attach->dmabuf->resv); + + dma_buf_detach(dmabuf, attach); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach); + +/** + * dma_buf_attach - Wrapper for dma_buf_dynamic_attach + * @dmabuf: [in] buffer to attach device to. + * @dev: [in] device to be attached. + * + * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static + * mapping. + */ +struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev) +{ + return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); } EXPORT_SYMBOL_GPL(dma_buf_attach); @@ -873,15 +966,65 @@ if (WARN_ON(!dmabuf || !attach)) return; - mutex_lock(&dmabuf->lock); + if (attach->sgt) { + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_lock(attach->dmabuf->resv, NULL); + + dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir); + + if (dma_buf_is_dynamic(attach->dmabuf)) { + dma_buf_unpin(attach); + dma_resv_unlock(attach->dmabuf->resv); + } + } + + dma_resv_lock(dmabuf->resv, NULL); list_del(&attach->node); + dma_resv_unlock(dmabuf->resv); if (dmabuf->ops->detach) dmabuf->ops->detach(dmabuf, attach); - mutex_unlock(&dmabuf->lock); kfree(attach); } EXPORT_SYMBOL_GPL(dma_buf_detach); + +/** + * dma_buf_pin - Lock down the DMA-buf + * + * @attach: [in] attachment which should be pinned + * + * Returns: + * 0 on success, negative error code on failure. + */ +int dma_buf_pin(struct dma_buf_attachment *attach) +{ + struct dma_buf *dmabuf = attach->dmabuf; + int ret = 0; + + dma_resv_assert_held(dmabuf->resv); + + if (dmabuf->ops->pin) + ret = dmabuf->ops->pin(attach); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_pin); + +/** + * dma_buf_unpin - Remove lock from DMA-buf + * + * @attach: [in] attachment which should be unpinned + */ +void dma_buf_unpin(struct dma_buf_attachment *attach) +{ + struct dma_buf *dmabuf = attach->dmabuf; + + dma_resv_assert_held(dmabuf->resv); + + if (dmabuf->ops->unpin) + dmabuf->ops->unpin(attach); +} +EXPORT_SYMBOL_GPL(dma_buf_unpin); /** * dma_buf_map_attachment - Returns the scatterlist table of the attachment; @@ -902,15 +1045,49 @@ enum dma_data_direction direction) { struct sg_table *sg_table; + int r; might_sleep(); if (WARN_ON(!attach || !attach->dmabuf)) return ERR_PTR(-EINVAL); + if (dma_buf_attachment_is_dynamic(attach)) + dma_resv_assert_held(attach->dmabuf->resv); + + if (attach->sgt) { + /* + * Two mappings with different directions for the same + * attachment are not allowed. + */ + if (attach->dir != direction && + attach->dir != DMA_BIDIRECTIONAL) + return ERR_PTR(-EBUSY); + + return attach->sgt; + } + + if (dma_buf_is_dynamic(attach->dmabuf)) { + dma_resv_assert_held(attach->dmabuf->resv); + if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { + r = dma_buf_pin(attach); + if (r) + return ERR_PTR(r); + } + } + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); + + if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) + dma_buf_unpin(attach); + + if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { + attach->sgt = sg_table; + attach->dir = direction; + } return sg_table; } @@ -935,10 +1112,42 @@ if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) return; - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, - direction); + if (dma_buf_attachment_is_dynamic(attach)) + dma_resv_assert_held(attach->dmabuf->resv); + + if (attach->sgt == sg_table) + return; + + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_assert_held(attach->dmabuf->resv); + + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); + + if (dma_buf_is_dynamic(attach->dmabuf) && + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) + dma_buf_unpin(attach); } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); + +/** + * dma_buf_move_notify - notify attachments that DMA-buf is moving + * + * @dmabuf: [in] buffer which is moving + * + * Informs all attachmenst that they need to destroy and recreated all their + * mappings. + */ +void dma_buf_move_notify(struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + + dma_resv_assert_held(dmabuf->resv); + + list_for_each_entry(attach, &dmabuf->attachments, node) + if (attach->importer_ops) + attach->importer_ops->move_notify(attach); +} +EXPORT_SYMBOL_GPL(dma_buf_move_notify); /** * DOC: cpu access @@ -951,29 +1160,9 @@ * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() * access. * - * To support dma_buf objects residing in highmem cpu access is page-based - * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks - * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which - * returns a pointer in kernel virtual address space. Afterwards the chunk - * needs to be unmapped again. There is no limit on how often a given chunk - * can be mapped and unmapped, i.e. the importer does not need to call - * begin_cpu_access again before mapping the same chunk again. - * - * Interfaces:: - * void \*dma_buf_kmap(struct dma_buf \*, unsigned long); - * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*); - * - * Implementing the functions is optional for exporters and for importers all - * the restrictions of using kmap apply. - * - * dma_buf kmap calls outside of the range specified in begin_cpu_access are - * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on - * the partial chunks at the beginning and end but may return stale or bogus - * data outside of the range (in these partial chunks). - * - * For some cases the overhead of kmap can be too high, a vmap interface - * is introduced. This interface should be used very carefully, as vmalloc - * space is a limited resources on many architectures. + * Since for most kernel internal dma-buf accesses need the entire buffer, a + * vmap interface is introduced. Note that on very old 32-bit architectures + * vmalloc space might be limited and result in vmap calls failing. * * Interfaces:: * void \*dma_buf_vmap(struct dma_buf \*dmabuf) @@ -1010,8 +1199,7 @@ * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you * want (with the new data being consumed by say the GPU or the scanout - * device). Optionally SYNC_USER_MAPPED can be set to restrict cache - * maintenance to only the parts of the buffer which are mmap(ed). + * device) * - munmap once you don't need the buffer any more * * For correctness and optimal performance, it is always required to use @@ -1050,11 +1238,11 @@ { bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); - struct reservation_object *resv = dmabuf->resv; + struct dma_resv *resv = dmabuf->resv; long ret; /* Wait on any implicit rendering fences */ - ret = reservation_object_wait_timeout_rcu(resv, write, true, + ret = dma_resv_wait_timeout_rcu(resv, write, true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; @@ -1097,27 +1285,6 @@ return ret; } EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); - -static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - int ret = 0; - - if (WARN_ON(!dmabuf)) - return -EINVAL; - - if (dmabuf->ops->begin_cpu_access_umapped) - ret = dmabuf->ops->begin_cpu_access_umapped(dmabuf, direction); - - /* Ensure that all fences are waited upon - but we first allow - * the native handler the chance to do so more efficiently if it - * chooses. A double invocation here will be reasonably cheap no-op. - */ - if (ret == 0) - ret = __dma_buf_begin_cpu_access(dmabuf, direction); - - return ret; -} int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, enum dma_data_direction direction, @@ -1169,19 +1336,6 @@ } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); -static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - int ret = 0; - - WARN_ON(!dmabuf); - - if (dmabuf->ops->end_cpu_access_umapped) - ret = dmabuf->ops->end_cpu_access_umapped(dmabuf, direction); - - return ret; -} - int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, enum dma_data_direction direction, unsigned int offset, unsigned int len) @@ -1197,44 +1351,6 @@ return ret; } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial); - -/** - * dma_buf_kmap - Map a page of the buffer object into kernel address space. The - * same restrictions as for kmap and friends apply. - * @dmabuf: [in] buffer to map page from. - * @page_num: [in] page in PAGE_SIZE units to map. - * - * This call must always succeed, any necessary preparations that might fail - * need to be done in begin_cpu_access. - */ -void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) -{ - WARN_ON(!dmabuf); - - if (!dmabuf->ops->map) - return NULL; - return dmabuf->ops->map(dmabuf, page_num); -} -EXPORT_SYMBOL_GPL(dma_buf_kmap); - -/** - * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. - * @dmabuf: [in] buffer to unmap page from. - * @page_num: [in] page in PAGE_SIZE units to unmap. - * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. - * - * This call must always succeed. - */ -void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, - void *vaddr) -{ - WARN_ON(!dmabuf); - - if (dmabuf->ops->unmap) - dmabuf->ops->unmap(dmabuf, page_num, vaddr); -} -EXPORT_SYMBOL_GPL(dma_buf_kunmap); - /** * dma_buf_mmap - Setup up a userspace mmap with the given vma @@ -1395,8 +1511,8 @@ int ret; struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; - struct reservation_object *robj; - struct reservation_object_list *fobj; + struct dma_resv *robj; + struct dma_resv_list *fobj; struct dma_fence *fence; unsigned seq; int count = 0, attach_count, shared_count, i; @@ -1408,20 +1524,17 @@ return ret; seq_puts(s, "\nDma-buf Objects:\n"); - seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\t%-60s\t%-8s\n", - "size", "flags", "mode", "count", "exp_name", "ino"); + seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n", + "size", "flags", "mode", "count", "ino"); list_for_each_entry(buf_obj, &db_list.head, list_node) { - ret = mutex_lock_interruptible(&buf_obj->lock); - if (ret) { - seq_puts(s, - "\tERROR locking buffer object: skipping\n"); - continue; - } + ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); + if (ret) + goto error_unlock; spin_lock(&buf_obj->name_lock); - seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%-60s\t%08lu\t%s\n", + seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", buf_obj->size, buf_obj->file->f_flags, buf_obj->file->f_mode, file_count(buf_obj->file), @@ -1432,12 +1545,12 @@ robj = buf_obj->resv; while (true) { - seq = read_seqbegin(&robj->seq); + seq = read_seqcount_begin(&robj->seq); rcu_read_lock(); fobj = rcu_dereference(robj->fence); shared_count = fobj ? fobj->shared_count : 0; fence = rcu_dereference(robj->fence_excl); - if (!read_seqretry(&robj->seq, seq)) + if (!read_seqcount_retry(&robj->seq, seq)) break; rcu_read_unlock(); } @@ -1466,178 +1579,26 @@ seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); attach_count++; } + dma_resv_unlock(buf_obj->resv); seq_printf(s, "Total %d devices attached\n\n", attach_count); count++; size += buf_obj->size; - mutex_unlock(&buf_obj->lock); } seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); mutex_unlock(&db_list.lock); return 0; -} -static int dma_buf_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, dma_buf_debug_show, NULL); -} - -static const struct file_operations dma_buf_debug_fops = { - .open = dma_buf_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -struct dma_info { - struct dma_buf *dmabuf; - struct hlist_node head; -}; - -struct dma_proc { - char name[TASK_COMM_LEN]; - pid_t pid; - size_t size; - struct hlist_head dma_bufs[1 << 10]; - struct list_head head; -}; - -static int get_dma_info(const void *data, struct file *file, unsigned int n) -{ - struct dma_proc *dma_proc; - struct dma_info *dma_info; - - dma_proc = (struct dma_proc *)data; - if (!is_dma_buf_file(file)) - return 0; - - hash_for_each_possible(dma_proc->dma_bufs, dma_info, - head, (unsigned long)file->private_data) { - if (file->private_data == dma_info->dmabuf) - return 0; - } - - dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC); - if (!dma_info) - return -ENOMEM; - - get_file(file); - dma_info->dmabuf = file->private_data; - dma_proc->size += dma_info->dmabuf->size / SZ_1K; - hash_add(dma_proc->dma_bufs, &dma_info->head, - (unsigned long)dma_info->dmabuf); - return 0; -} - -static void write_proc(struct seq_file *s, struct dma_proc *proc) -{ - struct dma_info *tmp; - int i; - - seq_printf(s, "\n%s (PID %d) size: %zu\nDMA Buffers:\n", - proc->name, proc->pid, proc->size); - seq_printf(s, "%-8s\t%-60s\t%-8s\t%-8s\t%s\n", - "Name", "Exp_name", "Size (KB)", "Alive (sec)", "Attached Devices"); - - hash_for_each(proc->dma_bufs, i, tmp, head) { - struct dma_buf *dmabuf = tmp->dmabuf; - struct dma_buf_attachment *a; - ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime); - - elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC); - seq_printf(s, "%-8s\t%-60s\t%-8zu\t%-8lld", - dmabuf->name, - dmabuf->exp_name, - dmabuf->size / SZ_1K, - elapmstime); - - list_for_each_entry(a, &dmabuf->attachments, node) { - seq_printf(s, "\t%s", dev_name(a->dev)); - } - seq_printf(s, "\n"); - } -} - -static void free_proc(struct dma_proc *proc) -{ - struct dma_info *tmp; - struct hlist_node *n; - int i; - - hash_for_each_safe(proc->dma_bufs, i, n, tmp, head) { - fput(tmp->dmabuf->file); - hash_del(&tmp->head); - kfree(tmp); - } - kfree(proc); -} - -static int cmp_proc(void *unused, struct list_head *a, struct list_head *b) -{ - struct dma_proc *a_proc, *b_proc; - - a_proc = list_entry(a, struct dma_proc, head); - b_proc = list_entry(b, struct dma_proc, head); - return b_proc->size - a_proc->size; -} - -static int dma_procs_debug_show(struct seq_file *s, void *unused) -{ - struct task_struct *task, *thread; - struct files_struct *files; - int ret = 0; - struct dma_proc *tmp, *n; - LIST_HEAD(plist); - - rcu_read_lock(); - for_each_process(task) { - struct files_struct *group_leader_files = NULL; - - tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); - if (!tmp) { - ret = -ENOMEM; - rcu_read_unlock(); - goto mem_err; - } - hash_init(tmp->dma_bufs); - for_each_thread(task, thread) { - task_lock(thread); - if (unlikely(!group_leader_files)) - group_leader_files = task->group_leader->files; - files = thread->files; - if (files && (group_leader_files != files || - thread == task->group_leader)) - ret = iterate_fd(files, 0, get_dma_info, tmp); - task_unlock(thread); - } - if (ret || hash_empty(tmp->dma_bufs)) - goto skip; - get_task_comm(tmp->name, task); - tmp->pid = task->tgid; - list_add(&tmp->head, &plist); - continue; -skip: - free_proc(tmp); - } - rcu_read_unlock(); - - list_sort(NULL, &plist, cmp_proc); - list_for_each_entry(tmp, &plist, head) - write_proc(s, tmp); - - ret = 0; -mem_err: - list_for_each_entry_safe(tmp, n, &plist, head) { - list_del(&tmp->head); - free_proc(tmp); - } +error_unlock: + mutex_unlock(&db_list.lock); return ret; } -DEFINE_SHOW_ATTRIBUTE(dma_procs_debug); + +DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); static struct dentry *dma_buf_debugfs_dir; @@ -1656,17 +1617,6 @@ NULL, &dma_buf_debug_fops); if (IS_ERR(d)) { pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); - debugfs_remove_recursive(dma_buf_debugfs_dir); - dma_buf_debugfs_dir = NULL; - err = PTR_ERR(d); - return err; - } - - d = debugfs_create_file("bufprocs", 0444, dma_buf_debugfs_dir, - NULL, &dma_procs_debug_fops); - - if (IS_ERR(d)) { - pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n"); debugfs_remove_recursive(dma_buf_debugfs_dir); dma_buf_debugfs_dir = NULL; err = PTR_ERR(d); @@ -1691,6 +1641,12 @@ static int __init dma_buf_init(void) { + int ret; + + ret = dma_buf_init_sysfs_statistics(); + if (ret) + return ret; + dma_buf_mnt = kern_mount(&dma_buf_fs_type); if (IS_ERR(dma_buf_mnt)) return PTR_ERR(dma_buf_mnt); @@ -1706,5 +1662,6 @@ { dma_buf_uninit_debugfs(); kern_unmount(dma_buf_mnt); + dma_buf_uninit_sysfs_statistics(); } __exitcall(dma_buf_deinit); -- Gitblit v1.6.2