From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp
---
kernel/drivers/android/binder.c | 2080 ++++++++++++++++++++++++++++++++++++----------------------
1 files changed, 1,283 insertions(+), 797 deletions(-)
diff --git a/kernel/drivers/android/binder.c b/kernel/drivers/android/binder.c
index d65097c..16e0ba3 100644
--- a/kernel/drivers/android/binder.c
+++ b/kernel/drivers/android/binder.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* binder.c
*
* Android IPC Subsystem
*
* Copyright (C) 2007-2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
/*
@@ -66,20 +57,25 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
#include <linux/spinlock.h>
#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
+#include <linux/task_work.h>
+#include <linux/sizes.h>
+#include <linux/android_vendor.h>
-#include <uapi/linux/android/binder.h>
#include <uapi/linux/sched/types.h>
+#include <uapi/linux/android/binder.h>
#include <asm/cacheflush.h>
-#include "binder_alloc.h"
#include "binder_internal.h"
#include "binder_trace.h"
+#include <trace/hooks/binder.h>
static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
@@ -97,15 +93,6 @@
static int proc_show(struct seq_file *m, void *unused);
DEFINE_SHOW_ATTRIBUTE(proc);
-
-/* This is only defined in include/asm-arm/sizes.h */
-#ifndef SZ_1K
-#define SZ_1K 0x400
-#endif
-
-#ifndef SZ_4M
-#define SZ_4M 0x400000
-#endif
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
@@ -174,24 +161,6 @@
#define to_binder_fd_array_object(hdr) \
container_of(hdr, struct binder_fd_array_object, hdr)
-enum binder_stat_types {
- BINDER_STAT_PROC,
- BINDER_STAT_THREAD,
- BINDER_STAT_NODE,
- BINDER_STAT_REF,
- BINDER_STAT_DEATH,
- BINDER_STAT_TRANSACTION,
- BINDER_STAT_TRANSACTION_COMPLETE,
- BINDER_STAT_COUNT
-};
-
-struct binder_stats {
- atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
- atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
- atomic_t obj_created[BINDER_STAT_COUNT];
- atomic_t obj_deleted[BINDER_STAT_COUNT];
-};
-
static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
@@ -204,8 +173,32 @@
atomic_inc(&binder_stats.obj_created[type]);
}
-struct binder_transaction_log binder_transaction_log;
-struct binder_transaction_log binder_transaction_log_failed;
+struct binder_transaction_log_entry {
+ int debug_id;
+ int debug_id_done;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
+ char context_name[BINDERFS_MAX_NAME + 1];
+};
+
+struct binder_transaction_log {
+ atomic_t cur;
+ bool full;
+ struct binder_transaction_log_entry entry[32];
+};
+
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
@@ -227,307 +220,9 @@
return e;
}
-/**
- * struct binder_work - work enqueued on a worklist
- * @entry: node enqueued on list
- * @type: type of work to be performed
- *
- * There are separate work lists for proc, thread, and node (async).
- */
-struct binder_work {
- struct list_head entry;
-
- enum binder_work_type {
- BINDER_WORK_TRANSACTION = 1,
- BINDER_WORK_TRANSACTION_COMPLETE,
- BINDER_WORK_RETURN_ERROR,
- BINDER_WORK_NODE,
- BINDER_WORK_DEAD_BINDER,
- BINDER_WORK_DEAD_BINDER_AND_CLEAR,
- BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
- } type;
-};
-
-struct binder_error {
- struct binder_work work;
- uint32_t cmd;
-};
-
-/**
- * struct binder_node - binder node bookkeeping
- * @debug_id: unique ID for debugging
- * (invariant after initialized)
- * @lock: lock for node fields
- * @work: worklist element for node work
- * (protected by @proc->inner_lock)
- * @rb_node: element for proc->nodes tree
- * (protected by @proc->inner_lock)
- * @dead_node: element for binder_dead_nodes list
- * (protected by binder_dead_nodes_lock)
- * @proc: binder_proc that owns this node
- * (invariant after initialized)
- * @refs: list of references on this node
- * (protected by @lock)
- * @internal_strong_refs: used to take strong references when
- * initiating a transaction
- * (protected by @proc->inner_lock if @proc
- * and by @lock)
- * @local_weak_refs: weak user refs from local process
- * (protected by @proc->inner_lock if @proc
- * and by @lock)
- * @local_strong_refs: strong user refs from local process
- * (protected by @proc->inner_lock if @proc
- * and by @lock)
- * @tmp_refs: temporary kernel refs
- * (protected by @proc->inner_lock while @proc
- * is valid, and by binder_dead_nodes_lock
- * if @proc is NULL. During inc/dec and node release
- * it is also protected by @lock to provide safety
- * as the node dies and @proc becomes NULL)
- * @ptr: userspace pointer for node
- * (invariant, no lock needed)
- * @cookie: userspace cookie for node
- * (invariant, no lock needed)
- * @has_strong_ref: userspace notified of strong ref
- * (protected by @proc->inner_lock if @proc
- * and by @lock)
- * @pending_strong_ref: userspace has acked notification of strong ref
- * (protected by @proc->inner_lock if @proc
- * and by @lock)
- * @has_weak_ref: userspace notified of weak ref
- * (protected by @proc->inner_lock if @proc
- * and by @lock)
- * @pending_weak_ref: userspace has acked notification of weak ref
- * (protected by @proc->inner_lock if @proc
- * and by @lock)
- * @has_async_transaction: async transaction to node in progress
- * (protected by @lock)
- * @sched_policy: minimum scheduling policy for node
- * (invariant after initialized)
- * @accept_fds: file descriptor operations supported for node
- * (invariant after initialized)
- * @min_priority: minimum scheduling priority
- * (invariant after initialized)
- * @inherit_rt: inherit RT scheduling policy from caller
- * @txn_security_ctx: require sender's security context
- * (invariant after initialized)
- * @async_todo: list of async work items
- * (protected by @proc->inner_lock)
- *
- * Bookkeeping structure for binder nodes.
- */
-struct binder_node {
- int debug_id;
- spinlock_t lock;
- struct binder_work work;
- union {
- struct rb_node rb_node;
- struct hlist_node dead_node;
- };
- struct binder_proc *proc;
- struct hlist_head refs;
- int internal_strong_refs;
- int local_weak_refs;
- int local_strong_refs;
- int tmp_refs;
- binder_uintptr_t ptr;
- binder_uintptr_t cookie;
- struct {
- /*
- * bitfield elements protected by
- * proc inner_lock
- */
- u8 has_strong_ref:1;
- u8 pending_strong_ref:1;
- u8 has_weak_ref:1;
- u8 pending_weak_ref:1;
- };
- struct {
- /*
- * invariant after initialization
- */
- u8 sched_policy:2;
- u8 inherit_rt:1;
- u8 accept_fds:1;
- u8 txn_security_ctx:1;
- u8 min_priority;
- };
- bool has_async_transaction;
- struct list_head async_todo;
-};
-
-struct binder_ref_death {
- /**
- * @work: worklist element for death notifications
- * (protected by inner_lock of the proc that
- * this ref belongs to)
- */
- struct binder_work work;
- binder_uintptr_t cookie;
-};
-
-/**
- * struct binder_ref_data - binder_ref counts and id
- * @debug_id: unique ID for the ref
- * @desc: unique userspace handle for ref
- * @strong: strong ref count (debugging only if not locked)
- * @weak: weak ref count (debugging only if not locked)
- *
- * Structure to hold ref count and ref id information. Since
- * the actual ref can only be accessed with a lock, this structure
- * is used to return information about the ref to callers of
- * ref inc/dec functions.
- */
-struct binder_ref_data {
- int debug_id;
- uint32_t desc;
- int strong;
- int weak;
-};
-
-/**
- * struct binder_ref - struct to track references on nodes
- * @data: binder_ref_data containing id, handle, and current refcounts
- * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
- * @rb_node_node: node for lookup by @node in proc's rb_tree
- * @node_entry: list entry for node->refs list in target node
- * (protected by @node->lock)
- * @proc: binder_proc containing ref
- * @node: binder_node of target node. When cleaning up a
- * ref for deletion in binder_cleanup_ref, a non-NULL
- * @node indicates the node must be freed
- * @death: pointer to death notification (ref_death) if requested
- * (protected by @node->lock)
- *
- * Structure to track references from procA to target node (on procB). This
- * structure is unsafe to access without holding @proc->outer_lock.
- */
-struct binder_ref {
- /* Lookups needed: */
- /* node + proc => ref (transaction) */
- /* desc + proc => ref (transaction, inc/dec ref) */
- /* node => refs + procs (proc exit) */
- struct binder_ref_data data;
- struct rb_node rb_node_desc;
- struct rb_node rb_node_node;
- struct hlist_node node_entry;
- struct binder_proc *proc;
- struct binder_node *node;
- struct binder_ref_death *death;
-};
-
enum binder_deferred_state {
- BINDER_DEFERRED_PUT_FILES = 0x01,
- BINDER_DEFERRED_FLUSH = 0x02,
- BINDER_DEFERRED_RELEASE = 0x04,
-};
-
-/**
- * struct binder_priority - scheduler policy and priority
- * @sched_policy scheduler policy
- * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
- *
- * The binder driver supports inheriting the following scheduler policies:
- * SCHED_NORMAL
- * SCHED_BATCH
- * SCHED_FIFO
- * SCHED_RR
- */
-struct binder_priority {
- unsigned int sched_policy;
- int prio;
-};
-
-/**
- * struct binder_proc - binder process bookkeeping
- * @proc_node: element for binder_procs list
- * @threads: rbtree of binder_threads in this proc
- * (protected by @inner_lock)
- * @nodes: rbtree of binder nodes associated with
- * this proc ordered by node->ptr
- * (protected by @inner_lock)
- * @refs_by_desc: rbtree of refs ordered by ref->desc
- * (protected by @outer_lock)
- * @refs_by_node: rbtree of refs ordered by ref->node
- * (protected by @outer_lock)
- * @waiting_threads: threads currently waiting for proc work
- * (protected by @inner_lock)
- * @pid PID of group_leader of process
- * (invariant after initialized)
- * @tsk task_struct for group_leader of process
- * (invariant after initialized)
- * @files files_struct for process
- * (protected by @files_lock)
- * @files_lock mutex to protect @files
- * @cred struct cred associated with the `struct file`
- * in binder_open()
- * (invariant after initialized)
- * @deferred_work_node: element for binder_deferred_list
- * (protected by binder_deferred_lock)
- * @deferred_work: bitmap of deferred work to perform
- * (protected by binder_deferred_lock)
- * @is_dead: process is dead and awaiting free
- * when outstanding transactions are cleaned up
- * (protected by @inner_lock)
- * @todo: list of work for this process
- * (protected by @inner_lock)
- * @stats: per-process binder statistics
- * (atomics, no lock needed)
- * @delivered_death: list of delivered death notification
- * (protected by @inner_lock)
- * @max_threads: cap on number of binder threads
- * (protected by @inner_lock)
- * @requested_threads: number of binder threads requested but not
- * yet started. In current implementation, can
- * only be 0 or 1.
- * (protected by @inner_lock)
- * @requested_threads_started: number binder threads started
- * (protected by @inner_lock)
- * @tmp_ref: temporary reference to indicate proc is in use
- * (protected by @inner_lock)
- * @default_priority: default scheduler priority
- * (invariant after initialized)
- * @debugfs_entry: debugfs node
- * @alloc: binder allocator bookkeeping
- * @context: binder_context for this proc
- * (invariant after initialized)
- * @inner_lock: can nest under outer_lock and/or node lock
- * @outer_lock: no nesting under innor or node lock
- * Lock order: 1) outer, 2) node, 3) inner
- * @binderfs_entry: process-specific binderfs log file
- *
- * Bookkeeping structure for binder processes
- */
-struct binder_proc {
- struct hlist_node proc_node;
- struct rb_root threads;
- struct rb_root nodes;
- struct rb_root refs_by_desc;
- struct rb_root refs_by_node;
- struct list_head waiting_threads;
- int pid;
- struct task_struct *tsk;
- struct files_struct *files;
- struct mutex files_lock;
- const struct cred *cred;
- struct hlist_node deferred_work_node;
- int deferred_work;
- bool is_dead;
-
- struct list_head todo;
- struct binder_stats stats;
- struct list_head delivered_death;
- int max_threads;
- int requested_threads;
- int requested_threads_started;
- int tmp_ref;
- struct binder_priority default_priority;
- struct dentry *debugfs_entry;
- struct binder_alloc alloc;
- struct binder_context *context;
- spinlock_t inner_lock;
- spinlock_t outer_lock;
- struct dentry *binderfs_entry;
+ BINDER_DEFERRED_FLUSH = 0x01,
+ BINDER_DEFERRED_RELEASE = 0x02,
};
enum {
@@ -540,110 +235,6 @@
};
/**
- * struct binder_thread - binder thread bookkeeping
- * @proc: binder process for this thread
- * (invariant after initialization)
- * @rb_node: element for proc->threads rbtree
- * (protected by @proc->inner_lock)
- * @waiting_thread_node: element for @proc->waiting_threads list
- * (protected by @proc->inner_lock)
- * @pid: PID for this thread
- * (invariant after initialization)
- * @looper: bitmap of looping state
- * (only accessed by this thread)
- * @looper_needs_return: looping thread needs to exit driver
- * (no lock needed)
- * @transaction_stack: stack of in-progress transactions for this thread
- * (protected by @proc->inner_lock)
- * @todo: list of work to do for this thread
- * (protected by @proc->inner_lock)
- * @process_todo: whether work in @todo should be processed
- * (protected by @proc->inner_lock)
- * @return_error: transaction errors reported by this thread
- * (only accessed by this thread)
- * @reply_error: transaction errors reported by target thread
- * (protected by @proc->inner_lock)
- * @wait: wait queue for thread work
- * @stats: per-thread statistics
- * (atomics, no lock needed)
- * @tmp_ref: temporary reference to indicate thread is in use
- * (atomic since @proc->inner_lock cannot
- * always be acquired)
- * @is_dead: thread is dead and awaiting free
- * when outstanding transactions are cleaned up
- * (protected by @proc->inner_lock)
- * @task: struct task_struct for this thread
- *
- * Bookkeeping structure for binder threads.
- */
-struct binder_thread {
- struct binder_proc *proc;
- struct rb_node rb_node;
- struct list_head waiting_thread_node;
- int pid;
- int looper; /* only modified by this thread */
- bool looper_need_return; /* can be written by other thread */
- struct binder_transaction *transaction_stack;
- struct list_head todo;
- bool process_todo;
- struct binder_error return_error;
- struct binder_error reply_error;
- wait_queue_head_t wait;
- struct binder_stats stats;
- atomic_t tmp_ref;
- bool is_dead;
- struct task_struct *task;
-};
-
-struct binder_transaction {
- int debug_id;
- struct binder_work work;
- struct binder_thread *from;
- struct binder_transaction *from_parent;
- struct binder_proc *to_proc;
- struct binder_thread *to_thread;
- struct binder_transaction *to_parent;
- unsigned need_reply:1;
- /* unsigned is_dead:1; */ /* not used at the moment */
-
- struct binder_buffer *buffer;
- unsigned int code;
- unsigned int flags;
- struct binder_priority priority;
- struct binder_priority saved_priority;
- bool set_priority_called;
- kuid_t sender_euid;
- binder_uintptr_t security_ctx;
- /**
- * @lock: protects @from, @to_proc, and @to_thread
- *
- * @from, @to_proc, and @to_thread can be set to NULL
- * during thread teardown
- */
- spinlock_t lock;
-};
-
-/**
- * struct binder_object - union of flat binder object types
- * @hdr: generic object header
- * @fbo: binder object (nodes and refs)
- * @fdo: file descriptor object
- * @bbo: binder buffer pointer
- * @fdao: file descriptor array
- *
- * Used for type-independent object copies
- */
-struct binder_object {
- union {
- struct binder_object_header hdr;
- struct flat_binder_object fbo;
- struct binder_fd_object fdo;
- struct binder_buffer_object bbo;
- struct binder_fd_array_object fdao;
- };
-};
-
-/**
* binder_proc_lock() - Acquire outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
@@ -653,6 +244,7 @@
#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
static void
_binder_proc_lock(struct binder_proc *proc, int line)
+ __acquires(&proc->outer_lock)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
@@ -668,6 +260,7 @@
#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
static void
_binder_proc_unlock(struct binder_proc *proc, int line)
+ __releases(&proc->outer_lock)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
@@ -683,6 +276,7 @@
#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
static void
_binder_inner_proc_lock(struct binder_proc *proc, int line)
+ __acquires(&proc->inner_lock)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
@@ -698,6 +292,7 @@
#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
static void
_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+ __releases(&proc->inner_lock)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
@@ -713,6 +308,7 @@
#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
static void
_binder_node_lock(struct binder_node *node, int line)
+ __acquires(&node->lock)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
@@ -728,6 +324,7 @@
#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
static void
_binder_node_unlock(struct binder_node *node, int line)
+ __releases(&node->lock)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
@@ -744,12 +341,16 @@
#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
static void
_binder_node_inner_lock(struct binder_node *node, int line)
+ __acquires(&node->lock) __acquires(&node->proc->inner_lock)
{
binder_debug(BINDER_DEBUG_SPINLOCKS,
"%s: line=%d\n", __func__, line);
spin_lock(&node->lock);
if (node->proc)
binder_inner_proc_lock(node->proc);
+ else
+ /* annotation for sparse */
+ __acquire(&node->proc->inner_lock);
}
/**
@@ -761,6 +362,7 @@
#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
static void
_binder_node_inner_unlock(struct binder_node *node, int line)
+ __releases(&node->lock) __releases(&node->proc->inner_lock)
{
struct binder_proc *proc = node->proc;
@@ -768,6 +370,9 @@
"%s: line=%d\n", __func__, line);
if (proc)
binder_inner_proc_unlock(proc);
+ else
+ /* annotation for sparse */
+ __release(&node->proc->inner_lock);
spin_unlock(&node->lock);
}
@@ -907,69 +512,14 @@
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
-static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
-{
- unsigned long rlim_cur;
- unsigned long irqs;
- int ret;
-
- mutex_lock(&proc->files_lock);
- if (proc->files == NULL) {
- ret = -ESRCH;
- goto err;
- }
- if (!lock_task_sighand(proc->tsk, &irqs)) {
- ret = -EMFILE;
- goto err;
- }
- rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
- unlock_task_sighand(proc->tsk, &irqs);
-
- ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
-err:
- mutex_unlock(&proc->files_lock);
- return ret;
-}
-
-/*
- * copied from fd_install
- */
-static void task_fd_install(
- struct binder_proc *proc, unsigned int fd, struct file *file)
-{
- mutex_lock(&proc->files_lock);
- if (proc->files)
- __fd_install(proc->files, fd, file);
- mutex_unlock(&proc->files_lock);
-}
-
-/*
- * copied from sys_close
- */
-static long task_close_fd(struct binder_proc *proc, unsigned int fd)
-{
- int retval;
-
- mutex_lock(&proc->files_lock);
- if (proc->files == NULL) {
- retval = -ESRCH;
- goto err;
- }
- retval = __close_fd(proc->files, fd);
- /* can't restart close syscall because file table entry was cleared */
- if (unlikely(retval == -ERESTARTSYS ||
- retval == -ERESTARTNOINTR ||
- retval == -ERESTARTNOHAND ||
- retval == -ERESTART_RESTARTBLOCK))
- retval = -EINTR;
-err:
- mutex_unlock(&proc->files_lock);
- return retval;
-}
-
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
+ int ret = 0;
+
+ trace_android_vh_binder_has_work_ilocked(thread, do_proc_work, &ret);
+ if (ret)
+ return true;
return thread->process_todo ||
thread->looper_need_return ||
(do_proc_work &&
@@ -1005,6 +555,7 @@
thread = rb_entry(n, struct binder_thread, rb_node);
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
binder_available_for_proc_work_ilocked(thread)) {
+ trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
if (sync)
wake_up_interruptible_sync(&thread->wait);
else
@@ -1064,6 +615,7 @@
assert_spin_locked(&proc->inner_lock);
if (thread) {
+ trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
if (sync)
wake_up_interruptible_sync(&thread->wait);
else
@@ -1206,6 +758,7 @@
bool inherit_rt)
{
struct binder_priority desired_prio = t->priority;
+ bool skip = false;
if (t->set_priority_called)
return;
@@ -1213,6 +766,10 @@
t->set_priority_called = true;
t->saved_priority.sched_policy = task->policy;
t->saved_priority.prio = task->normal_prio;
+
+ trace_android_vh_binder_priority_skip(task, &skip);
+ if (skip)
+ return;
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
@@ -1233,6 +790,7 @@
}
binder_set_priority(task, desired_prio);
+ trace_android_vh_binder_set_priority(t, task);
}
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
@@ -1545,10 +1103,14 @@
binder_node_inner_lock(node);
if (!node->proc)
spin_lock(&binder_dead_nodes_lock);
+ else
+ __acquire(&binder_dead_nodes_lock);
node->tmp_refs--;
BUG_ON(node->tmp_refs < 0);
if (!node->proc)
spin_unlock(&binder_dead_nodes_lock);
+ else
+ __release(&binder_dead_nodes_lock);
/*
* Call binder_dec_node() to check if all refcounts are 0
* and cleanup is needed. Calling with strong=0 and internal=1
@@ -1669,6 +1231,7 @@
"%d new ref %d desc %d for node %d\n",
proc->pid, new_ref->data.debug_id, new_ref->data.desc,
node->debug_id);
+ trace_android_vh_binder_new_ref(proc->tsk, new_ref->data.desc, new_ref->node->debug_id);
binder_node_unlock(node);
return new_ref;
}
@@ -1836,6 +1399,7 @@
*/
static void binder_free_ref(struct binder_ref *ref)
{
+ trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : 0, ref->data.desc);
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
@@ -1940,6 +1504,18 @@
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
+ if (ret && ref == new_ref) {
+ /*
+ * Cleanup the failed reference here as the target
+ * could now be dead and have already released its
+ * references by now. Calling on the new reference
+ * with strong=0 and a tmp_refs will not decrement
+ * the node. The new_ref gets kfree'd below.
+ */
+ binder_cleanup_ref_olocked(new_ref);
+ ref = NULL;
+ }
+
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
@@ -2051,20 +1627,45 @@
*/
static struct binder_thread *binder_get_txn_from_and_acq_inner(
struct binder_transaction *t)
+ __acquires(&t->from->proc->inner_lock)
{
struct binder_thread *from;
from = binder_get_txn_from(t);
- if (!from)
+ if (!from) {
+ __acquire(&from->proc->inner_lock);
return NULL;
+ }
binder_inner_proc_lock(from->proc);
if (t->from) {
BUG_ON(from != t->from);
return from;
}
binder_inner_proc_unlock(from->proc);
+ __acquire(&from->proc->inner_lock);
binder_thread_dec_tmpref(from);
return NULL;
+}
+
+/**
+ * binder_free_txn_fixups() - free unprocessed fd fixups
+ * @t: binder transaction for t->from
+ *
+ * If the transaction is being torn down prior to being
+ * processed by the target process, free all of the
+ * fd fixups and fput the file structs. It is safe to
+ * call this function after the fixups have been
+ * processed -- in that case, the list will be empty.
+ */
+static void binder_free_txn_fixups(struct binder_transaction *t)
+{
+ struct binder_txn_fd_fixup *fixup, *tmp;
+
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
+ fput(fixup->file);
+ list_del(&fixup->fixup_entry);
+ kfree(fixup);
+ }
}
static void binder_free_transaction(struct binder_transaction *t)
@@ -2073,6 +1674,12 @@
if (target_proc) {
binder_inner_proc_lock(target_proc);
+ target_proc->outstanding_txns--;
+ if (target_proc->outstanding_txns < 0)
+ pr_warn("%s: Unexpected outstanding_txns %d\n",
+ __func__, target_proc->outstanding_txns);
+ if (!target_proc->outstanding_txns && target_proc->is_frozen)
+ wake_up_interruptible_all(&target_proc->freeze_wait);
if (t->buffer)
t->buffer->transaction = NULL;
binder_inner_proc_unlock(target_proc);
@@ -2081,6 +1688,7 @@
* If the transaction has no target_proc, then
* t->buffer->transaction has already been cleared.
*/
+ binder_free_txn_fixups(t);
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
@@ -2123,6 +1731,7 @@
binder_free_transaction(t);
return;
}
+ __release(&target_thread->proc->inner_lock);
next = t->from_parent;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2165,15 +1774,21 @@
/**
* binder_get_object() - gets object and checks for valid metadata
* @proc: binder_proc owning the buffer
+ * @u: sender's user pointer to base of buffer
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the @buffer at which to validate an object.
* @object: struct binder_object to read into
*
- * Return: If there's a valid metadata object at @offset in @buffer, the
+ * Copy the binder object at the given offset into @object. If @u is
+ * provided then the copy is from the sender's buffer. If not, then
+ * it is copied from the target's @buffer.
+ *
+ * Return: If there's a valid metadata object at @offset, the
* size of that object. Otherwise, it returns zero. The object
* is read into the struct binder_object pointed to by @object.
*/
static size_t binder_get_object(struct binder_proc *proc,
+ const void __user *u,
struct binder_buffer *buffer,
unsigned long offset,
struct binder_object *object)
@@ -2183,11 +1798,16 @@
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
- !IS_ALIGNED(offset, sizeof(u32)))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr))
return 0;
- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
- offset, read_size);
+ if (u) {
+ if (copy_from_user(object, u + offset, read_size))
+ return 0;
+ } else {
+ if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+ offset, read_size))
+ return 0;
+ }
/* Ok, now see if we read a complete object. */
hdr = &object->hdr;
@@ -2256,9 +1876,11 @@
return NULL;
buffer_offset = start_offset + sizeof(binder_size_t) * index;
- binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
- b, buffer_offset, sizeof(object_offset));
- object_size = binder_get_object(proc, b, object_offset, object);
+ if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+ b, buffer_offset,
+ sizeof(object_offset)))
+ return NULL;
+ object_size = binder_get_object(proc, NULL, b, object_offset, object);
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
return NULL;
if (object_offsetp)
@@ -2323,7 +1945,8 @@
unsigned long buffer_offset;
struct binder_object last_object;
struct binder_buffer_object *last_bbo;
- size_t object_size = binder_get_object(proc, b, last_obj_offset,
+ size_t object_size = binder_get_object(proc, NULL, b,
+ last_obj_offset,
&last_object);
if (object_size != sizeof(*last_bbo))
return false;
@@ -2337,15 +1960,78 @@
return false;
last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
buffer_offset = objects_start_offset +
- sizeof(binder_size_t) * last_bbo->parent,
- binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
- b, buffer_offset,
- sizeof(last_obj_offset));
+ sizeof(binder_size_t) * last_bbo->parent;
+ if (binder_alloc_copy_from_buffer(&proc->alloc,
+ &last_obj_offset,
+ b, buffer_offset,
+ sizeof(last_obj_offset)))
+ return false;
}
return (fixup_offset >= last_min_offset);
}
+/**
+ * struct binder_task_work_cb - for deferred close
+ *
+ * @twork: callback_head for task work
+ * @fd: fd to close
+ *
+ * Structure to pass task work to be handled after
+ * returning from binder_ioctl() via task_work_add().
+ */
+struct binder_task_work_cb {
+ struct callback_head twork;
+ struct file *file;
+};
+
+/**
+ * binder_do_fd_close() - close list of file descriptors
+ * @twork: callback head for task work
+ *
+ * It is not safe to call ksys_close() during the binder_ioctl()
+ * function if there is a chance that binder's own file descriptor
+ * might be closed. This is to meet the requirements for using
+ * fdget() (see comments for __fget_light()). Therefore use
+ * task_work_add() to schedule the close operation once we have
+ * returned from binder_ioctl(). This function is a callback
+ * for that mechanism and does the actual ksys_close() on the
+ * given file descriptor.
+ */
+static void binder_do_fd_close(struct callback_head *twork)
+{
+ struct binder_task_work_cb *twcb = container_of(twork,
+ struct binder_task_work_cb, twork);
+
+ fput(twcb->file);
+ kfree(twcb);
+}
+
+/**
+ * binder_deferred_fd_close() - schedule a close for the given file-descriptor
+ * @fd: file-descriptor to close
+ *
+ * See comments in binder_do_fd_close(). This function is used to schedule
+ * a file-descriptor to be closed after returning from binder_ioctl().
+ */
+static void binder_deferred_fd_close(int fd)
+{
+ struct binder_task_work_cb *twcb;
+
+ twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
+ if (!twcb)
+ return;
+ init_task_work(&twcb->twork, binder_do_fd_close);
+ close_fd_get_file(fd, &twcb->file);
+ if (twcb->file) {
+ filp_close(twcb->file, current->files);
+ task_work_add(current, &twcb->twork, TWA_RESUME);
+ } else {
+ kfree(twcb);
+ }
+}
+
static void binder_transaction_buffer_release(struct binder_proc *proc,
+ struct binder_thread *thread,
struct binder_buffer *buffer,
binder_size_t failed_at,
bool is_failure)
@@ -2363,20 +2049,20 @@
binder_dec_node(buffer->target_node, 1, 0);
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
- off_end_offset = is_failure ? failed_at :
+ off_end_offset = is_failure && failed_at ? failed_at :
off_start_offset + buffer->offsets_size;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
- size_t object_size;
+ size_t object_size = 0;
struct binder_object object;
binder_size_t object_offset;
- binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
- buffer, buffer_offset,
- sizeof(object_offset));
- object_size = binder_get_object(proc, buffer,
- object_offset, &object);
+ if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+ buffer, buffer_offset,
+ sizeof(object_offset)))
+ object_size = binder_get_object(proc, NULL, buffer,
+ object_offset, &object);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
debug_id, (u64)object_offset, buffer->data_size);
@@ -2424,12 +2110,15 @@
} break;
case BINDER_TYPE_FD: {
- struct binder_fd_object *fp = to_binder_fd_object(hdr);
-
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d\n", fp->fd);
- if (failed_at)
- task_close_fd(proc, fp->fd);
+ /*
+ * No need to close the file here since user-space
+ * closes it for for successfully delivered
+ * transactions. For transactions that weren't
+ * delivered, the new fd was never allocated so
+ * there is no need to close and the fput on the
+ * file is done when the transaction is torn
+ * down.
+ */
} break;
case BINDER_TYPE_PTR:
/*
@@ -2445,6 +2134,14 @@
size_t fd_index;
binder_size_t fd_buf_size;
binder_size_t num_valid;
+
+ if (is_failure) {
+ /*
+ * The fd fixups have not been applied so no
+ * fds need to be closed.
+ */
+ continue;
+ }
num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
@@ -2485,15 +2182,24 @@
for (fd_index = 0; fd_index < fda->num_fds;
fd_index++) {
u32 fd;
+ int err;
binder_size_t offset = fda_offset +
fd_index * sizeof(fd);
- binder_alloc_copy_from_buffer(&proc->alloc,
- &fd,
- buffer,
- offset,
- sizeof(fd));
- task_close_fd(proc, fd);
+ err = binder_alloc_copy_from_buffer(
+ &proc->alloc, &fd, buffer,
+ offset, sizeof(fd));
+ WARN_ON(err);
+ if (!err) {
+ binder_deferred_fd_close(fd);
+ /*
+ * Need to make sure the thread goes
+ * back to userspace to complete the
+ * deferred close
+ */
+ if (thread)
+ thread->looper_need_return = true;
+ }
}
} break;
default:
@@ -2528,7 +2234,8 @@
ret = -EINVAL;
goto done;
}
- if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
+ if (security_binder_transfer_binder(binder_get_cred(proc),
+ binder_get_cred(target_proc))) {
ret = -EPERM;
goto done;
}
@@ -2574,7 +2281,8 @@
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
+ if (security_binder_transfer_binder(binder_get_cred(proc),
+ binder_get_cred(target_proc))) {
ret = -EPERM;
goto done;
}
@@ -2589,11 +2297,15 @@
fp->cookie = node->cookie;
if (node->proc)
binder_inner_proc_lock(node->proc);
+ else
+ __acquire(&node->proc->inner_lock);
binder_inc_node_nilocked(node,
fp->hdr.type == BINDER_TYPE_BINDER,
0, NULL);
if (node->proc)
binder_inner_proc_unlock(node->proc);
+ else
+ __release(&node->proc->inner_lock);
trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
@@ -2626,16 +2338,16 @@
return ret;
}
-static int binder_translate_fd(int fd,
+static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
- int target_fd;
+ struct binder_txn_fd_fixup *fixup;
struct file *file;
- int ret;
+ int ret = 0;
bool target_allows_fd;
if (in_reply_to)
@@ -2658,25 +2370,31 @@
ret = -EBADF;
goto err_fget;
}
- ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
+ ret = security_binder_transfer_file(binder_get_cred(proc),
+ binder_get_cred(target_proc), file);
if (ret < 0) {
ret = -EPERM;
goto err_security;
}
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
- if (target_fd < 0) {
+ /*
+ * Add fixup record for this transaction. The allocation
+ * of the fd in the target needs to be done from a
+ * target thread.
+ */
+ fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
+ if (!fixup) {
ret = -ENOMEM;
- goto err_get_unused_fd;
+ goto err_alloc;
}
- task_fd_install(target_proc, target_fd, file);
- trace_binder_transaction_fd(t, fd, target_fd);
- binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
- fd, target_fd);
+ fixup->file = file;
+ fixup->offset = fd_offset;
+ trace_binder_transaction_fd_send(t, fd, fixup->offset);
+ list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
- return target_fd;
+ return ret;
-err_get_unused_fd:
+err_alloc:
err_security:
fput(file);
err_fget:
@@ -2684,17 +2402,266 @@
return ret;
}
-static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+/**
+ * struct binder_ptr_fixup - data to be fixed-up in target buffer
+ * @offset offset in target buffer to fixup
+ * @skip_size bytes to skip in copy (fixup will be written later)
+ * @fixup_data data to write at fixup offset
+ * @node list node
+ *
+ * This is used for the pointer fixup list (pf) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_ptr_fixup {
+ binder_size_t offset;
+ size_t skip_size;
+ binder_uintptr_t fixup_data;
+ struct list_head node;
+};
+
+/**
+ * struct binder_sg_copy - scatter-gather data to be copied
+ * @offset offset in target buffer
+ * @sender_uaddr user address in source buffer
+ * @length bytes to copy
+ * @node list node
+ *
+ * This is used for the sg copy list (sgc) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_sg_copy {
+ binder_size_t offset;
+ const void __user *sender_uaddr;
+ size_t length;
+ struct list_head node;
+};
+
+/**
+ * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
+ * @alloc: binder_alloc associated with @buffer
+ * @buffer: binder buffer in target process
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Processes all elements of @sgc_head, applying fixups from @pf_head
+ * and copying the scatter-gather data from the source process' user
+ * buffer to the target's buffer. It is expected that the list creation
+ * and processing all occurs during binder_transaction() so these lists
+ * are only accessed in local context.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ int ret = 0;
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *tmppf;
+ struct binder_ptr_fixup *pf =
+ list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
+ node);
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < sgc->length) {
+ size_t copy_size;
+ size_t bytes_left = sgc->length - bytes_copied;
+ size_t offset = sgc->offset + bytes_copied;
+
+ /*
+ * We copy up to the fixup (pointed to by pf)
+ */
+ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
+ : bytes_left;
+ if (!ret && copy_size)
+ ret = binder_alloc_copy_user_to_buffer(
+ alloc, buffer,
+ offset,
+ sgc->sender_uaddr + bytes_copied,
+ copy_size);
+ bytes_copied += copy_size;
+ if (copy_size != bytes_left) {
+ BUG_ON(!pf);
+ /* we stopped at a fixup offset */
+ if (pf->skip_size) {
+ /*
+ * we are just skipping. This is for
+ * BINDER_TYPE_FDA where the translated
+ * fds will be fixed up when we get
+ * to target context.
+ */
+ bytes_copied += pf->skip_size;
+ } else {
+ /* apply the fixup indicated by pf */
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(
+ alloc, buffer,
+ pf->offset,
+ &pf->fixup_data,
+ sizeof(pf->fixup_data));
+ bytes_copied += sizeof(pf->fixup_data);
+ }
+ list_del(&pf->node);
+ kfree(pf);
+ pf = list_first_entry_or_null(pf_head,
+ struct binder_ptr_fixup, node);
+ }
+ }
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ BUG_ON(pf->skip_size == 0);
+ list_del(&pf->node);
+ kfree(pf);
+ }
+ BUG_ON(!list_empty(sgc_head));
+
+ return ret > 0 ? -EINVAL : ret;
+}
+
+/**
+ * binder_cleanup_deferred_txn_lists() - free specified lists
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Called to clean up @sgc_head and @pf_head if there is an
+ * error.
+ */
+static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *pf, *tmppf;
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ list_del(&pf->node);
+ kfree(pf);
+ }
+}
+
+/**
+ * binder_defer_copy() - queue a scatter-gather buffer for copy
+ * @sgc_head: list_head of scatter-gather copy list
+ * @offset: binder buffer offset in target process
+ * @sender_uaddr: user address in source process
+ * @length: bytes to copy
+ *
+ * Specify a scatter-gather block to be copied. The actual copy must
+ * be deferred until all the needed fixups are identified and queued.
+ * Then the copy and fixups are done together so un-translated values
+ * from the source are never visible in the target buffer.
+ *
+ * We are guaranteed that repeated calls to this function will have
+ * monotonically increasing @offset values so the list will naturally
+ * be ordered.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
+ const void __user *sender_uaddr, size_t length)
+{
+ struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+
+ if (!bc)
+ return -ENOMEM;
+
+ bc->offset = offset;
+ bc->sender_uaddr = sender_uaddr;
+ bc->length = length;
+ INIT_LIST_HEAD(&bc->node);
+
+ /*
+ * We are guaranteed that the deferred copies are in-order
+ * so just add to the tail.
+ */
+ list_add_tail(&bc->node, sgc_head);
+
+ return 0;
+}
+
+/**
+ * binder_add_fixup() - queue a fixup to be applied to sg copy
+ * @pf_head: list_head of binder ptr fixup list
+ * @offset: binder buffer offset in target process
+ * @fixup: bytes to be copied for fixup
+ * @skip_size: bytes to skip when copying (fixup will be applied later)
+ *
+ * Add the specified fixup to a list ordered by @offset. When copying
+ * the scatter-gather buffers, the fixup will be copied instead of
+ * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
+ * will be applied later (in target process context), so we just skip
+ * the bytes specified by @skip_size. If @skip_size is 0, we copy the
+ * value in @fixup.
+ *
+ * This function is called *mostly* in @offset order, but there are
+ * exceptions. Since out-of-order inserts are relatively uncommon,
+ * we insert the new element by searching backward from the tail of
+ * the list.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
+ binder_uintptr_t fixup, size_t skip_size)
+{
+ struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ struct binder_ptr_fixup *tmppf;
+
+ if (!pf)
+ return -ENOMEM;
+
+ pf->offset = offset;
+ pf->fixup_data = fixup;
+ pf->skip_size = skip_size;
+ INIT_LIST_HEAD(&pf->node);
+
+ /* Fixups are *mostly* added in-order, but there are some
+ * exceptions. Look backwards through list for insertion point.
+ */
+ list_for_each_entry_reverse(tmppf, pf_head, node) {
+ if (tmppf->offset < pf->offset) {
+ list_add(&pf->node, &tmppf->node);
+ return 0;
+ }
+ }
+ /*
+ * if we get here, then the new offset is the lowest so
+ * insert at the head
+ */
+ list_add(&pf->node, pf_head);
+ return 0;
+}
+
+static int binder_translate_fd_array(struct list_head *pf_head,
+ struct binder_fd_array_object *fda,
+ const void __user *sender_ubuffer,
struct binder_buffer_object *parent,
+ struct binder_buffer_object *sender_uparent,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
- binder_size_t fdi, fd_buf_size, num_installed_fds;
+ binder_size_t fdi, fd_buf_size;
binder_size_t fda_offset;
- int target_fd;
+ const void __user *sender_ufda_base;
struct binder_proc *proc = thread->proc;
- struct binder_proc *target_proc = t->to_proc;
+ int ret;
+
+ if (fda->num_fds == 0)
+ return 0;
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -2718,46 +2685,36 @@
*/
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
fda->parent_offset;
- if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
+ sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
+ fda->parent_offset;
+
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
+ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
return -EINVAL;
}
+ ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
+ if (ret)
+ return ret;
+
for (fdi = 0; fdi < fda->num_fds; fdi++) {
u32 fd;
-
binder_size_t offset = fda_offset + fdi * sizeof(fd);
+ binder_size_t sender_uoffset = fdi * sizeof(fd);
- binder_alloc_copy_from_buffer(&target_proc->alloc,
- &fd, t->buffer,
- offset, sizeof(fd));
- target_fd = binder_translate_fd(fd, t, thread, in_reply_to);
- if (target_fd < 0)
- goto err_translate_fd_failed;
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, offset,
- &target_fd, sizeof(fd));
+ ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
+ if (!ret)
+ ret = binder_translate_fd(fd, offset, t, thread,
+ in_reply_to);
+ if (ret)
+ return ret > 0 ? -EINVAL : ret;
}
return 0;
-
-err_translate_fd_failed:
- /*
- * Failed to allocate fd or security error, free fds
- * installed so far.
- */
- num_installed_fds = fdi;
- for (fdi = 0; fdi < num_installed_fds; fdi++) {
- u32 fd;
- binder_size_t offset = fda_offset + fdi * sizeof(fd);
- binder_alloc_copy_from_buffer(&target_proc->alloc,
- &fd, t->buffer,
- offset, sizeof(fd));
- task_close_fd(target_proc, fd);
- }
- return target_fd;
}
-static int binder_fixup_parent(struct binder_transaction *t,
+static int binder_fixup_parent(struct list_head *pf_head,
+ struct binder_transaction *t,
struct binder_thread *thread,
struct binder_buffer_object *bp,
binder_size_t off_start_offset,
@@ -2803,10 +2760,57 @@
}
buffer_offset = bp->parent_offset +
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
- binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
- &bp->buffer, sizeof(bp->buffer));
+ return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
+}
- return 0;
+/**
+ * binder_can_update_transaction() - Can a txn be superseded by an updated one?
+ * @t1: the pending async txn in the frozen process
+ * @t2: the new async txn to supersede the outdated pending one
+ *
+ * Return: true if t2 can supersede t1
+ * false if t2 can not supersede t1
+ */
+static bool binder_can_update_transaction(struct binder_transaction *t1,
+ struct binder_transaction *t2)
+{
+ if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
+ (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
+ return false;
+ if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
+ t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
+ t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
+ t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
+ return true;
+ return false;
+}
+
+/**
+ * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
+ * @t: new async transaction
+ * @target_list: list to find outdated transaction
+ *
+ * Return: the outdated transaction if found
+ * NULL if no outdated transacton can be found
+ *
+ * Requires the proc->inner_lock to be held.
+ */
+static struct binder_transaction *
+binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
+ struct list_head *target_list)
+{
+ struct binder_work *w;
+
+ list_for_each_entry(w, target_list, entry) {
+ struct binder_transaction *t_queued;
+
+ if (w->type != BINDER_WORK_TRANSACTION)
+ continue;
+ t_queued = container_of(w, struct binder_transaction, work);
+ if (binder_can_update_transaction(t_queued, t))
+ return t_queued;
+ }
+ return NULL;
}
/**
@@ -2823,10 +2827,11 @@
* If the @thread parameter is not NULL, the transaction is always queued
* to the waitlist of that specific thread.
*
- * Return: true if the transactions was successfully queued
- * false if the target process or thread is dead
+ * Return: 0 if the transaction was successfully queued
+ * BR_DEAD_REPLY if the target process or thread is dead
+ * BR_FROZEN_REPLY if the target process or thread is frozen
*/
-static bool binder_proc_transaction(struct binder_transaction *t,
+static int binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
@@ -2834,6 +2839,7 @@
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
+ struct binder_transaction *t_outdated = NULL;
BUG_ON(!node);
binder_node_lock(node);
@@ -2842,23 +2848,30 @@
if (oneway) {
BUG_ON(thread);
- if (node->has_async_transaction) {
+ if (node->has_async_transaction)
pending_async = true;
- } else {
+ else
node->has_async_transaction = true;
- }
}
binder_inner_proc_lock(proc);
+ if (proc->is_frozen) {
+ proc->sync_recv |= !oneway;
+ proc->async_recv |= oneway;
+ }
- if (proc->is_dead || (thread && thread->is_dead)) {
+ if ((proc->is_frozen && !oneway) || proc->is_dead ||
+ (thread && thread->is_dead)) {
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
- return false;
+ return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
}
if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
+
+ trace_android_vh_binder_proc_transaction(current, proc->tsk,
+ thread ? thread->task : 0, node->debug_id, t->code, pending_async);
if (thread) {
binder_transaction_priority(thread->task, t, node_prio,
@@ -2867,16 +2880,47 @@
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
+ if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
+ t_outdated = binder_find_outdated_transaction_ilocked(t,
+ &node->async_todo);
+ if (t_outdated) {
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "txn %d supersedes %d\n",
+ t->debug_id, t_outdated->debug_id);
+ list_del_init(&t_outdated->work.entry);
+ proc->outstanding_txns--;
+ }
+ }
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
+
+ trace_android_vh_binder_proc_transaction_end(current, proc->tsk,
+ thread ? thread->task : NULL, t->code, pending_async, !oneway);
if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+ proc->outstanding_txns++;
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
- return true;
+ /*
+ * To reduce potential contention, free the outdated transaction and
+ * buffer after releasing the locks.
+ */
+ if (t_outdated) {
+ struct binder_buffer *buffer = t_outdated->buffer;
+
+ t_outdated->buffer = NULL;
+ buffer->transaction = NULL;
+ trace_binder_transaction_update_buffer_release(buffer);
+ binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
+ binder_alloc_free_buf(&proc->alloc, buffer);
+ kfree(t_outdated);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ }
+
+ return 0;
}
/**
@@ -2934,6 +2978,7 @@
binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
binder_size_t sg_buf_offset, sg_buf_end_offset;
+ binder_size_t user_offset = 0;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -2948,6 +2993,12 @@
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
+ struct list_head sgc_head;
+ struct list_head pf_head;
+ const void __user *user_buffer = (const void __user *)
+ (uintptr_t)tr->data.ptr.buffer;
+ INIT_LIST_HEAD(&sgc_head);
+ INIT_LIST_HEAD(&pf_head);
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -2957,7 +3008,7 @@
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
- e->context_name = proc->context->name;
+ strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
if (reply) {
binder_inner_proc_lock(proc);
@@ -2991,6 +3042,8 @@
binder_inner_proc_unlock(proc);
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
+ /* annotation for sparse */
+ __release(&target_thread->proc->inner_lock);
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
goto err_dead_binder;
@@ -3012,6 +3065,7 @@
target_proc = target_thread->proc;
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_thread->proc);
+ trace_android_vh_binder_reply(target_proc, proc, thread, tr);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
@@ -3031,8 +3085,8 @@
ref->node, &target_proc,
&return_error);
} else {
- binder_user_error("%d:%d got transaction to invalid handle\n",
- proc->pid, thread->pid);
+ binder_user_error("%d:%d got transaction to invalid handle, %u\n",
+ proc->pid, thread->pid, tr->target.handle);
return_error = BR_FAILED_REPLY;
}
binder_proc_unlock(proc);
@@ -3064,8 +3118,9 @@
goto err_dead_binder;
}
e->to_node = target_node->debug_id;
- if (security_binder_transaction(proc->cred,
- target_proc->cred) < 0) {
+ trace_android_vh_binder_trans(target_proc, proc, thread, tr);
+ if (security_binder_transaction(binder_get_cred(proc),
+ binder_get_cred(target_proc)) < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = -EPERM;
return_error_line = __LINE__;
@@ -3133,6 +3188,7 @@
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
+ trace_android_rvh_binder_transaction(target_proc, proc, thread, tr);
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
@@ -3142,8 +3198,10 @@
return_error_line = __LINE__;
goto err_alloc_t_failed;
}
+ INIT_LIST_HEAD(&t->fd_fixups);
binder_stats_created(BINDER_STAT_TRANSACTION);
spin_lock_init(&t->lock);
+ trace_android_vh_binder_transaction_init(t);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
@@ -3197,9 +3255,28 @@
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
+ int max_retries = 100;
- security_cred_getsecid(proc->cred, &secid);
+ security_cred_getsecid(binder_get_cred(proc), &secid);
+ retry_alloc:
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
+ if (ret == -ENOMEM && max_retries-- > 0) {
+ struct page *dummy_page;
+
+ /*
+ * security_secid_to_secctx() can fail because of a
+ * GFP_ATOMIC allocation in which case -ENOMEM is
+ * returned. This needs to be retried, but there is
+ * currently no way to tell userspace to retry so we
+ * do it here. We make sure there is still available
+ * memory first and then retry.
+ */
+ dummy_page = alloc_page(GFP_KERNEL);
+ if (dummy_page) {
+ __free_page(dummy_page);
+ goto retry_alloc;
+ }
+ }
if (ret) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
@@ -3234,36 +3311,29 @@
goto err_binder_alloc_buf_failed;
}
if (secctx) {
+ int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
ALIGN(secctx_sz, sizeof(u64));
t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, buf_offset,
- secctx, secctx_sz);
+ err = binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, buf_offset,
+ secctx, secctx_sz);
+ if (err) {
+ t->security_ctx = 0;
+ WARN_ON(1);
+ }
security_release_secctx(secctx, secctx_sz);
secctx = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
+ t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
trace_binder_transaction_alloc_buf(t->buffer);
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
- t->buffer, 0,
- (const void __user *)
- (uintptr_t)tr->data.ptr.buffer,
- tr->data_size)) {
- binder_user_error("%d:%d got transaction with invalid data ptr\n",
- proc->pid, thread->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -EFAULT;
- return_error_line = __LINE__;
- goto err_copy_data_failed;
- }
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer,
@@ -3308,14 +3378,39 @@
size_t object_size;
struct binder_object object;
binder_size_t object_offset;
+ binder_size_t copy_size;
- binder_alloc_copy_from_buffer(&target_proc->alloc,
- &object_offset,
- t->buffer,
- buffer_offset,
- sizeof(object_offset));
- object_size = binder_get_object(target_proc, t->buffer,
- object_offset, &object);
+ if (binder_alloc_copy_from_buffer(&target_proc->alloc,
+ &object_offset,
+ t->buffer,
+ buffer_offset,
+ sizeof(object_offset))) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_offset;
+ }
+
+ /*
+ * Copy the source user buffer up to the next object
+ * that will be processed.
+ */
+ copy_size = object_offset - user_offset;
+ if (copy_size && (user_offset > object_offset ||
+ binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ copy_size))) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+ object_size = binder_get_object(target_proc, user_buffer,
+ t->buffer, object_offset, &object);
if (object_size == 0 || object_offset < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid,
@@ -3327,6 +3422,11 @@
return_error_line = __LINE__;
goto err_bad_offset;
}
+ /*
+ * Set offset to the next buffer fragment to be
+ * copied
+ */
+ user_offset = object_offset + object_size;
hdr = &object.hdr;
off_min = object_offset + object_size;
@@ -3337,15 +3437,17 @@
fp = to_flat_binder_object(hdr);
ret = binder_translate_binder(fp, t, thread);
- if (ret < 0) {
+
+ if (ret < 0 ||
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fp, sizeof(*fp))) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, object_offset,
- fp, sizeof(*fp));
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
@@ -3353,37 +3455,42 @@
fp = to_flat_binder_object(hdr);
ret = binder_translate_handle(fp, t, thread);
- if (ret < 0) {
+ if (ret < 0 ||
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fp, sizeof(*fp))) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, object_offset,
- fp, sizeof(*fp));
} break;
case BINDER_TYPE_FD: {
struct binder_fd_object *fp = to_binder_fd_object(hdr);
- int target_fd = binder_translate_fd(fp->fd, t, thread,
- in_reply_to);
+ binder_size_t fd_offset = object_offset +
+ (uintptr_t)&fp->fd - (uintptr_t)fp;
+ int ret = binder_translate_fd(fp->fd, fd_offset, t,
+ thread, in_reply_to);
- if (target_fd < 0) {
+ fp->pad_binder = 0;
+ if (ret < 0 ||
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fp, sizeof(*fp))) {
return_error = BR_FAILED_REPLY;
- return_error_param = target_fd;
+ return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
- fp->pad_binder = 0;
- fp->fd = target_fd;
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, object_offset,
- fp, sizeof(*fp));
} break;
case BINDER_TYPE_FDA: {
struct binder_object ptr_object;
binder_size_t parent_offset;
+ struct binder_object user_object;
+ size_t user_parent_size;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
size_t num_valid = (buffer_offset - off_start_offset) /
@@ -3415,11 +3522,35 @@
return_error_line = __LINE__;
goto err_bad_parent;
}
- ret = binder_translate_fd_array(fda, parent, t, thread,
- in_reply_to);
- if (ret < 0) {
+ /*
+ * We need to read the user version of the parent
+ * object to get the original user offset
+ */
+ user_parent_size =
+ binder_get_object(proc, user_buffer, t->buffer,
+ parent_offset, &user_object);
+ if (user_parent_size != sizeof(user_object.bbo)) {
+ binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
+ proc->pid, thread->pid,
+ user_parent_size,
+ sizeof(user_object.bbo));
return_error = BR_FAILED_REPLY;
- return_error_param = ret;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_parent;
+ }
+ ret = binder_translate_fd_array(&pf_head, fda,
+ user_buffer, parent,
+ &user_object.bbo, t,
+ thread, in_reply_to);
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fda, sizeof(*fda));
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret > 0 ? -EINVAL : ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
@@ -3441,19 +3572,14 @@
return_error_line = __LINE__;
goto err_bad_offset;
}
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
- t->buffer,
- sg_buf_offset,
- (const void __user *)
- (uintptr_t)bp->buffer,
- bp->length)) {
- binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
- proc->pid, thread->pid);
- return_error_param = -EFAULT;
+ ret = binder_defer_copy(&sgc_head, sg_buf_offset,
+ (const void __user *)(uintptr_t)bp->buffer,
+ bp->length);
+ if (ret) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
return_error_line = __LINE__;
- goto err_copy_data_failed;
+ goto err_translate_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)
@@ -3462,20 +3588,22 @@
num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
- ret = binder_fixup_parent(t, thread, bp,
+ ret = binder_fixup_parent(&pf_head, t,
+ thread, bp,
off_start_offset,
num_valid,
last_fixup_obj_off,
last_fixup_min_off);
- if (ret < 0) {
+ if (ret < 0 ||
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ bp, sizeof(*bp))) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
- binder_alloc_copy_to_buffer(&target_proc->alloc,
- t->buffer, object_offset,
- bp, sizeof(*bp));
last_fixup_obj_off = object_offset;
last_fixup_min_off = 0;
} break;
@@ -3488,21 +3616,51 @@
goto err_bad_object_type;
}
}
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ /* Done processing objects, copy the rest of the buffer */
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ tr->data_size - user_offset)) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+
+ ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
+ &sgc_head, &pf_head);
+ if (ret) {
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+ if (t->buffer->oneway_spam_suspect)
+ tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
+ else
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
+ return_error = BR_DEAD_REPLY;
binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread;
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
+ target_proc->outstanding_txns++;
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
+ trace_android_vh_binder_restore_priority(in_reply_to, current);
binder_restore_priority(current, in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
@@ -3520,7 +3678,9 @@
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
- if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ return_error = binder_proc_transaction(t,
+ target_proc, target_thread);
+ if (return_error) {
binder_inner_proc_lock(proc);
binder_pop_transaction_ilocked(thread, t);
binder_inner_proc_unlock(proc);
@@ -3530,7 +3690,8 @@
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
binder_enqueue_thread_work(thread, tcomplete);
- if (!binder_proc_transaction(t, target_proc, NULL))
+ return_error = binder_proc_transaction(t, target_proc, NULL);
+ if (return_error)
goto err_dead_proc_or_thread;
}
if (target_thread)
@@ -3547,7 +3708,6 @@
return;
err_dead_proc_or_thread:
- return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
@@ -3555,8 +3715,10 @@
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
+ binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
+ binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
- binder_transaction_buffer_release(target_proc, t->buffer,
+ binder_transaction_buffer_release(target_proc, NULL, t->buffer,
buffer_offset, true);
if (target_node)
binder_dec_node_tmpref(target_node);
@@ -3613,6 +3775,7 @@
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
+ trace_android_vh_binder_restore_priority(in_reply_to, current);
binder_restore_priority(current, in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_thread_work(thread, &thread->return_error.work);
@@ -3621,6 +3784,52 @@
thread->return_error.cmd = return_error;
binder_enqueue_thread_work(thread, &thread->return_error.work);
}
+}
+
+/**
+ * binder_free_buf() - free the specified buffer
+ * @proc: binder proc that owns buffer
+ * @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
+ *
+ * If buffer for an async transaction, enqueue the next async
+ * transaction from the node.
+ *
+ * Cleanup buffer and free it.
+ */
+static void
+binder_free_buf(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_buffer *buffer, bool is_failure)
+{
+ binder_inner_proc_lock(proc);
+ if (buffer->transaction) {
+ buffer->transaction->buffer = NULL;
+ buffer->transaction = NULL;
+ }
+ binder_inner_proc_unlock(proc);
+ if (buffer->async_transaction && buffer->target_node) {
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w) {
+ buf_node->has_async_transaction = false;
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_node_inner_unlock(buf_node);
+ }
+ trace_binder_transaction_buffer_release(buffer);
+ binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
+ binder_alloc_free_buf(&proc->alloc, buffer);
}
static int binder_thread_write(struct binder_proc *proc,
@@ -3813,35 +4022,7 @@
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
-
- binder_inner_proc_lock(proc);
- if (buffer->transaction) {
- buffer->transaction->buffer = NULL;
- buffer->transaction = NULL;
- }
- binder_inner_proc_unlock(proc);
- if (buffer->async_transaction && buffer->target_node) {
- struct binder_node *buf_node;
- struct binder_work *w;
-
- buf_node = buffer->target_node;
- binder_node_inner_lock(buf_node);
- BUG_ON(!buf_node->has_async_transaction);
- BUG_ON(buf_node->proc != proc);
- w = binder_dequeue_work_head_ilocked(
- &buf_node->async_todo);
- if (!w) {
- buf_node->has_async_transaction = false;
- } else {
- binder_enqueue_work_ilocked(
- w, &proc->todo);
- binder_wakeup_proc_ilocked(proc);
- }
- binder_node_inner_unlock(buf_node);
- }
- trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, buffer, 0, false);
- binder_alloc_free_buf(&proc->alloc, buffer);
+ binder_free_buf(proc, thread, buffer, false);
break;
}
@@ -3887,6 +4068,7 @@
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
binder_inner_proc_unlock(proc);
+ trace_android_vh_binder_looper_state_registered(thread, proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
@@ -4148,18 +4330,84 @@
if (do_proc_work)
list_add(&thread->waiting_thread_node,
&proc->waiting_threads);
+ trace_android_vh_binder_wait_for_work(do_proc_work, thread, proc);
binder_inner_proc_unlock(proc);
schedule();
binder_inner_proc_lock(proc);
list_del_init(&thread->waiting_thread_node);
if (signal_pending(current)) {
- ret = -ERESTARTSYS;
+ ret = -EINTR;
break;
}
}
finish_wait(&thread->wait, &wait);
binder_inner_proc_unlock(proc);
freezer_count();
+
+ return ret;
+}
+
+/**
+ * binder_apply_fd_fixups() - finish fd translation
+ * @proc: binder_proc associated @t->buffer
+ * @t: binder transaction with list of fd fixups
+ *
+ * Now that we are in the context of the transaction target
+ * process, we can allocate and install fds. Process the
+ * list of fds to translate and fixup the buffer with the
+ * new fds.
+ *
+ * If we fail to allocate an fd, then free the resources by
+ * fput'ing files that have not been processed and ksys_close'ing
+ * any fds that have already been allocated.
+ */
+static int binder_apply_fd_fixups(struct binder_proc *proc,
+ struct binder_transaction *t)
+{
+ struct binder_txn_fd_fixup *fixup, *tmp;
+ int ret = 0;
+
+ list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+
+ if (fd < 0) {
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "failed fd fixup txn %d fd %d\n",
+ t->debug_id, fd);
+ ret = -ENOMEM;
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "fd fixup txn %d fd %d\n",
+ t->debug_id, fd);
+ trace_binder_transaction_fd_recv(t, fd, fixup->offset);
+ fd_install(fd, fixup->file);
+ fixup->file = NULL;
+ if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
+ fixup->offset, &fd,
+ sizeof(u32))) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
+ if (fixup->file) {
+ fput(fixup->file);
+ } else if (ret) {
+ u32 fd;
+ int err;
+
+ err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
+ t->buffer,
+ fixup->offset,
+ sizeof(fd));
+ WARN_ON(err);
+ if (!err)
+ binder_deferred_fd_close(fd);
+ }
+ list_del(&fixup->fixup_entry);
+ kfree(fixup);
+ }
return ret;
}
@@ -4200,6 +4448,7 @@
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
+ trace_android_vh_binder_restore_priority(NULL, current);
binder_restore_priority(current, proc->default_priority);
}
@@ -4244,6 +4493,7 @@
binder_inner_proc_unlock(proc);
break;
}
+ trace_android_vh_binder_thread_read(&list, proc, thread);
w = binder_dequeue_work_head_ilocked(list);
if (binder_worklist_empty_ilocked(&thread->todo))
thread->process_todo = false;
@@ -4267,9 +4517,14 @@
binder_stat_br(proc, thread, cmd);
} break;
- case BINDER_WORK_TRANSACTION_COMPLETE: {
+ case BINDER_WORK_TRANSACTION_COMPLETE:
+ case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
+ if (proc->oneway_spam_detection_enabled &&
+ w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
+ cmd = BR_ONEWAY_SPAM_SUSPECT;
+ else
+ cmd = BR_TRANSACTION_COMPLETE;
binder_inner_proc_unlock(proc);
- cmd = BR_TRANSACTION_COMPLETE;
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
if (put_user(cmd, (uint32_t __user *)ptr))
@@ -4409,6 +4664,11 @@
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
+ default:
+ binder_inner_proc_unlock(proc);
+ pr_err("%d:%d: bad work type %d\n",
+ proc->pid, thread->pid, w->type);
+ break;
}
if (!t)
@@ -4442,10 +4702,39 @@
trd->sender_pid =
task_tgid_nr_ns(sender,
task_active_pid_ns(current));
+ trace_android_vh_sync_txn_recvd(thread->task, t_from->task);
} else {
trd->sender_pid = 0;
}
+ ret = binder_apply_fd_fixups(proc, t);
+ if (ret) {
+ struct binder_buffer *buffer = t->buffer;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ int tid = t->debug_id;
+
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
+ buffer->transaction = NULL;
+ binder_cleanup_transaction(t, "fd fixups failed",
+ BR_FAILED_REPLY);
+ binder_free_buf(proc, thread, buffer, true);
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
+ proc->pid, thread->pid,
+ oneway ? "async " :
+ (cmd == BR_REPLY ? "reply " : ""),
+ tid, BR_FAILED_REPLY, ret, __LINE__);
+ if (cmd == BR_REPLY) {
+ cmd = BR_FAILED_REPLY;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ binder_stat_br(proc, thread, cmd);
+ break;
+ }
+ continue;
+ }
trd->data_size = t->buffer->data_size;
trd->offsets_size = t->buffer->offsets_size;
trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
@@ -4654,9 +4943,14 @@
static void binder_free_proc(struct binder_proc *proc)
{
struct binder_device *device;
+ struct binder_proc_ext *eproc =
+ container_of(proc, struct binder_proc_ext, proc);
BUG_ON(!list_empty(&proc->todo));
BUG_ON(!list_empty(&proc->delivered_death));
+ if (proc->outstanding_txns)
+ pr_warn("%s: Unexpected outstanding_txns %d\n",
+ __func__, proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
kfree(proc->context->name);
@@ -4664,9 +4958,10 @@
}
binder_alloc_deferred_release(&proc->alloc);
put_task_struct(proc->tsk);
- put_cred(proc->cred);
+ put_cred(eproc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
- kfree(proc);
+ trace_android_vh_binder_free_proc(proc);
+ kfree(eproc);
}
static void binder_free_thread(struct binder_thread *thread)
@@ -4705,6 +5000,8 @@
spin_lock(&t->lock);
if (t->to_thread == thread)
send_reply = t;
+ } else {
+ __acquire(&t->lock);
}
thread->is_dead = true;
@@ -4718,6 +5015,7 @@
(t->to_thread == thread) ? "in" : "out");
if (t->to_thread == thread) {
+ thread->proc->outstanding_txns--;
t->to_proc = NULL;
t->to_thread = NULL;
if (t->buffer) {
@@ -4733,7 +5031,11 @@
spin_unlock(&last_t->lock);
if (t)
spin_lock(&t->lock);
+ else
+ __acquire(&t->lock);
}
+ /* annotation for sparse, lock not acquired in last iteration above */
+ __release(&t->lock);
/*
* If this thread used poll, make sure we remove the waitqueue from any
@@ -4757,6 +5059,7 @@
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(proc, &thread->todo);
+ trace_android_vh_binder_thread_release(proc, thread);
binder_thread_dec_tmpref(thread);
return active_transactions;
}
@@ -4833,6 +5136,7 @@
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
+ trace_android_vh_binder_read_done(proc, thread);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
@@ -4867,7 +5171,7 @@
ret = -EBUSY;
goto out;
}
- ret = security_binder_set_context_mgr(proc->cred);
+ ret = security_binder_set_context_mgr(binder_get_cred(proc));
if (ret < 0)
goto out;
if (uid_valid(context->binder_context_mgr_uid)) {
@@ -4957,6 +5261,100 @@
}
}
binder_inner_proc_unlock(proc);
+
+ return 0;
+}
+
+static bool binder_txns_pending_ilocked(struct binder_proc *proc)
+{
+ struct rb_node *n;
+ struct binder_thread *thread;
+
+ if (proc->outstanding_txns > 0)
+ return true;
+
+ for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->transaction_stack)
+ return true;
+ }
+ return false;
+}
+
+static int binder_ioctl_freeze(struct binder_freeze_info *info,
+ struct binder_proc *target_proc)
+{
+ int ret = 0;
+
+ if (!info->enable) {
+ binder_inner_proc_lock(target_proc);
+ target_proc->sync_recv = false;
+ target_proc->async_recv = false;
+ target_proc->is_frozen = false;
+ binder_inner_proc_unlock(target_proc);
+ return 0;
+ }
+
+ /*
+ * Freezing the target. Prevent new transactions by
+ * setting frozen state. If timeout specified, wait
+ * for transactions to drain.
+ */
+ binder_inner_proc_lock(target_proc);
+ target_proc->sync_recv = false;
+ target_proc->async_recv = false;
+ target_proc->is_frozen = true;
+ binder_inner_proc_unlock(target_proc);
+
+ if (info->timeout_ms > 0)
+ ret = wait_event_interruptible_timeout(
+ target_proc->freeze_wait,
+ (!target_proc->outstanding_txns),
+ msecs_to_jiffies(info->timeout_ms));
+
+ /* Check pending transactions that wait for reply */
+ if (ret >= 0) {
+ binder_inner_proc_lock(target_proc);
+ if (binder_txns_pending_ilocked(target_proc))
+ ret = -EAGAIN;
+ binder_inner_proc_unlock(target_proc);
+ }
+
+ if (ret < 0) {
+ binder_inner_proc_lock(target_proc);
+ target_proc->is_frozen = false;
+ binder_inner_proc_unlock(target_proc);
+ }
+
+ return ret;
+}
+
+static int binder_ioctl_get_freezer_info(
+ struct binder_frozen_status_info *info)
+{
+ struct binder_proc *target_proc;
+ bool found = false;
+ __u32 txns_pending;
+
+ info->sync_recv = 0;
+ info->async_recv = 0;
+
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid == info->pid) {
+ found = true;
+ binder_inner_proc_lock(target_proc);
+ txns_pending = binder_txns_pending_ilocked(target_proc);
+ info->sync_recv |= target_proc->sync_recv |
+ (txns_pending << 1);
+ info->async_recv |= target_proc->async_recv;
+ binder_inner_proc_unlock(target_proc);
+ }
+ }
+ mutex_unlock(&binder_procs_lock);
+
+ if (!found)
+ return -EINVAL;
return 0;
}
@@ -5079,6 +5477,96 @@
}
break;
}
+ case BINDER_FREEZE: {
+ struct binder_freeze_info info;
+ struct binder_proc **target_procs = NULL, *target_proc;
+ int target_procs_count = 0, i = 0;
+
+ ret = 0;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid == info.pid)
+ target_procs_count++;
+ }
+
+ if (target_procs_count == 0) {
+ mutex_unlock(&binder_procs_lock);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ target_procs = kcalloc(target_procs_count,
+ sizeof(struct binder_proc *),
+ GFP_KERNEL);
+
+ if (!target_procs) {
+ mutex_unlock(&binder_procs_lock);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid != info.pid)
+ continue;
+
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+
+ target_procs[i++] = target_proc;
+ }
+ mutex_unlock(&binder_procs_lock);
+
+ for (i = 0; i < target_procs_count; i++) {
+ if (ret >= 0)
+ ret = binder_ioctl_freeze(&info,
+ target_procs[i]);
+
+ binder_proc_dec_tmpref(target_procs[i]);
+ }
+
+ kfree(target_procs);
+
+ if (ret < 0)
+ goto err;
+ break;
+ }
+ case BINDER_GET_FROZEN_INFO: {
+ struct binder_frozen_status_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_freezer_info(&info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
+ uint32_t enable;
+
+ if (copy_from_user(&enable, ubuf, sizeof(enable))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ binder_inner_proc_lock(proc);
+ proc->oneway_spam_detection_enabled = (bool)enable;
+ binder_inner_proc_unlock(proc);
+ break;
+ }
default:
ret = -EINVAL;
goto err;
@@ -5088,7 +5576,7 @@
if (thread)
thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
- if (ret && ret != -ERESTARTSYS)
+ if (ret && ret != -EINTR)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
trace_binder_ioctl_done(ret);
@@ -5116,7 +5604,6 @@
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
binder_alloc_vma_close(&proc->alloc);
- binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
@@ -5132,15 +5619,10 @@
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
- int ret;
struct binder_proc *proc = filp->private_data;
- const char *failure_string;
if (proc->tsk != current->group_leader)
return -EINVAL;
-
- if ((vma->vm_end - vma->vm_start) > SZ_4M)
- vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
@@ -5149,9 +5631,9 @@
(unsigned long)pgprot_val(vma->vm_page_prot));
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
- ret = -EPERM;
- failure_string = "bad vm_flags";
- goto err_bad_arg;
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
+ return -EPERM;
}
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
vma->vm_flags &= ~VM_MAYWRITE;
@@ -5159,40 +5641,32 @@
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- ret = binder_alloc_mmap_handler(&proc->alloc, vma);
- if (ret)
- return ret;
- mutex_lock(&proc->files_lock);
- proc->files = get_files_struct(current);
- mutex_unlock(&proc->files_lock);
- return 0;
-
-err_bad_arg:
- pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
- proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
- return ret;
+ return binder_alloc_mmap_handler(&proc->alloc, vma);
}
static int binder_open(struct inode *nodp, struct file *filp)
{
- struct binder_proc *proc;
+ struct binder_proc *proc, *itr;
+ struct binder_proc_ext *eproc;
struct binder_device *binder_dev;
struct binderfs_info *info;
struct dentry *binder_binderfs_dir_entry_proc = NULL;
+ bool existing_pid = false;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
- proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ eproc = kzalloc(sizeof(*eproc), GFP_KERNEL);
+ proc = &eproc->proc;
if (proc == NULL)
return -ENOMEM;
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
- mutex_init(&proc->files_lock);
- proc->cred = get_cred(filp->f_cred);
+ eproc->cred = get_cred(filp->f_cred);
INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->freeze_wait);
if (binder_supported_policy(current->policy)) {
proc->default_priority.sched_policy = current->policy;
proc->default_priority.prio = current->normal_prio;
@@ -5221,19 +5695,24 @@
filp->private_data = proc;
mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(itr, &binder_procs, proc_node) {
+ if (itr->pid == proc->pid) {
+ existing_pid = true;
+ break;
+ }
+ }
hlist_add_head(&proc->proc_node, &binder_procs);
mutex_unlock(&binder_procs_lock);
-
- if (binder_debugfs_dir_entry_proc) {
+ trace_android_vh_binder_preset(&binder_procs, &binder_procs_lock);
+ if (binder_debugfs_dir_entry_proc && !existing_pid) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
/*
- * proc debug entries are shared between contexts, so
- * this will fail if the process tries to open the driver
- * again with a different context. The priting code will
- * anyway print all contexts that a given PID has, so this
- * is not a problem.
+ * proc debug entries are shared between contexts.
+ * Only create for the first PID to avoid debugfs log spamming
+ * The printing code will anyway print all contexts for a given
+ * PID so this is not a problem.
*/
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
@@ -5241,19 +5720,16 @@
&proc_fops);
}
- if (binder_binderfs_dir_entry_proc) {
+ if (binder_binderfs_dir_entry_proc && !existing_pid) {
char strbuf[11];
struct dentry *binderfs_entry;
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
/*
* Similar to debugfs, the process specific log file is shared
- * between contexts. If the file has already been created for a
- * process, the following binderfs_create_file() call will
- * fail with error code EEXIST if another context of the same
- * process invoked binder_open(). This is ok since same as
- * debugfs, the log file will contain information on all
- * contexts of a given PID.
+ * between contexts. Only create for the first PID.
+ * This is ok since same as debugfs, the log file will contain
+ * information on all contexts of a given PID.
*/
binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
@@ -5263,10 +5739,8 @@
int error;
error = PTR_ERR(binderfs_entry);
- if (error != -EEXIST) {
- pr_warn("Unable to create file %s in binderfs (error %d)\n",
- strbuf, error);
- }
+ pr_warn("Unable to create file %s in binderfs (error %d)\n",
+ strbuf, error);
}
}
@@ -5391,8 +5865,6 @@
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->files);
-
mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
mutex_unlock(&binder_procs_lock);
@@ -5414,6 +5886,9 @@
proc->tmp_ref++;
proc->is_dead = true;
+ proc->is_frozen = false;
+ proc->sync_recv = false;
+ proc->async_recv = false;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
@@ -5474,7 +5949,6 @@
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
- struct files_struct *files;
int defer;
@@ -5492,23 +5966,11 @@
}
mutex_unlock(&binder_deferred_lock);
- files = NULL;
- if (defer & BINDER_DEFERRED_PUT_FILES) {
- mutex_lock(&proc->files_lock);
- files = proc->files;
- if (files)
- proc->files = NULL;
- mutex_unlock(&proc->files_lock);
- }
-
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
-
- if (files)
- put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
@@ -5535,6 +5997,7 @@
struct binder_buffer *buffer = t->buffer;
spin_lock(&t->lock);
+ trace_android_vh_binder_print_transaction_info(m, proc, prefix, t);
to_proc = t->to_proc;
seq_printf(m,
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
@@ -5779,7 +6242,9 @@
"BR_FINISHED",
"BR_DEAD_BINDER",
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- "BR_FAILED_REPLY"
+ "BR_FAILED_REPLY",
+ "BR_FROZEN_REPLY",
+ "BR_ONEWAY_SPAM_SUSPECT",
};
static const char * const binder_command_strings[] = {
@@ -5920,8 +6385,7 @@
print_binder_stats(m, " ", &proc->stats);
}
-
-int binder_state_show(struct seq_file *m, void *unused)
+static int state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -5960,7 +6424,7 @@
return 0;
}
-int binder_stats_show(struct seq_file *m, void *unused)
+static int stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -5976,7 +6440,7 @@
return 0;
}
-int binder_transactions_show(struct seq_file *m, void *unused)
+static int transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -6032,7 +6496,7 @@
"\n" : " (incomplete)\n");
}
-int binder_transaction_log_show(struct seq_file *m, void *unused)
+static int transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
unsigned int log_cur = atomic_read(&log->cur);
@@ -6057,11 +6521,50 @@
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
- .compat_ioctl = binder_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
+};
+
+DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(stats);
+DEFINE_SHOW_ATTRIBUTE(transactions);
+DEFINE_SHOW_ATTRIBUTE(transaction_log);
+
+const struct binder_debugfs_entry binder_debugfs_entries[] = {
+ {
+ .name = "state",
+ .mode = 0444,
+ .fops = &state_fops,
+ .data = NULL,
+ },
+ {
+ .name = "stats",
+ .mode = 0444,
+ .fops = &stats_fops,
+ .data = NULL,
+ },
+ {
+ .name = "transactions",
+ .mode = 0444,
+ .fops = &transactions_fops,
+ .data = NULL,
+ },
+ {
+ .name = "transaction_log",
+ .mode = 0444,
+ .fops = &transaction_log_fops,
+ .data = &binder_transaction_log,
+ },
+ {
+ .name = "failed_transaction_log",
+ .mode = 0444,
+ .fops = &transaction_log_fops,
+ .data = &binder_transaction_log_failed,
+ },
+ {} /* terminator */
};
static int __init init_binder_device(const char *name)
@@ -6109,36 +6612,18 @@
atomic_set(&binder_transaction_log_failed.cur, ~0U);
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
- if (binder_debugfs_dir_entry_root)
+ if (binder_debugfs_dir_entry_root) {
+ const struct binder_debugfs_entry *db_entry;
+
+ binder_for_each_debugfs_entry(db_entry)
+ debugfs_create_file(db_entry->name,
+ db_entry->mode,
+ binder_debugfs_dir_entry_root,
+ db_entry->data,
+ db_entry->fops);
+
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);
-
- if (binder_debugfs_dir_entry_root) {
- debugfs_create_file("state",
- 0444,
- binder_debugfs_dir_entry_root,
- NULL,
- &binder_state_fops);
- debugfs_create_file("stats",
- 0444,
- binder_debugfs_dir_entry_root,
- NULL,
- &binder_stats_fops);
- debugfs_create_file("transactions",
- 0444,
- binder_debugfs_dir_entry_root,
- NULL,
- &binder_transactions_fops);
- debugfs_create_file("transaction_log",
- 0444,
- binder_debugfs_dir_entry_root,
- &binder_transaction_log,
- &binder_transaction_log_fops);
- debugfs_create_file("failed_transaction_log",
- 0444,
- binder_debugfs_dir_entry_root,
- &binder_transaction_log_failed,
- &binder_transaction_log_fops);
}
if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
@@ -6186,5 +6671,6 @@
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(binder_transaction_received);
MODULE_LICENSE("GPL v2");
--
Gitblit v1.6.2