hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/android/binder.c
....@@ -1,18 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /* binder.c
23 *
34 * Android IPC Subsystem
45 *
56 * Copyright (C) 2007-2008 Google, Inc.
6
- *
7
- * This software is licensed under the terms of the GNU General Public
8
- * License version 2, as published by the Free Software Foundation, and
9
- * may be copied, distributed, and modified under those terms.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
167 */
178
189 /*
....@@ -66,20 +57,25 @@
6657 #include <linux/sched/signal.h>
6758 #include <linux/sched/mm.h>
6859 #include <linux/seq_file.h>
60
+#include <linux/string.h>
6961 #include <linux/uaccess.h>
7062 #include <linux/pid_namespace.h>
7163 #include <linux/security.h>
7264 #include <linux/spinlock.h>
7365 #include <linux/ratelimit.h>
66
+#include <linux/syscalls.h>
67
+#include <linux/task_work.h>
68
+#include <linux/sizes.h>
69
+#include <linux/android_vendor.h>
7470
75
-#include <uapi/linux/android/binder.h>
7671 #include <uapi/linux/sched/types.h>
72
+#include <uapi/linux/android/binder.h>
7773
7874 #include <asm/cacheflush.h>
7975
80
-#include "binder_alloc.h"
8176 #include "binder_internal.h"
8277 #include "binder_trace.h"
78
+#include <trace/hooks/binder.h>
8379
8480 static HLIST_HEAD(binder_deferred_list);
8581 static DEFINE_MUTEX(binder_deferred_lock);
....@@ -97,15 +93,6 @@
9793
9894 static int proc_show(struct seq_file *m, void *unused);
9995 DEFINE_SHOW_ATTRIBUTE(proc);
100
-
101
-/* This is only defined in include/asm-arm/sizes.h */
102
-#ifndef SZ_1K
103
-#define SZ_1K 0x400
104
-#endif
105
-
106
-#ifndef SZ_4M
107
-#define SZ_4M 0x400000
108
-#endif
10996
11097 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
11198
....@@ -174,24 +161,6 @@
174161 #define to_binder_fd_array_object(hdr) \
175162 container_of(hdr, struct binder_fd_array_object, hdr)
176163
177
-enum binder_stat_types {
178
- BINDER_STAT_PROC,
179
- BINDER_STAT_THREAD,
180
- BINDER_STAT_NODE,
181
- BINDER_STAT_REF,
182
- BINDER_STAT_DEATH,
183
- BINDER_STAT_TRANSACTION,
184
- BINDER_STAT_TRANSACTION_COMPLETE,
185
- BINDER_STAT_COUNT
186
-};
187
-
188
-struct binder_stats {
189
- atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
190
- atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
191
- atomic_t obj_created[BINDER_STAT_COUNT];
192
- atomic_t obj_deleted[BINDER_STAT_COUNT];
193
-};
194
-
195164 static struct binder_stats binder_stats;
196165
197166 static inline void binder_stats_deleted(enum binder_stat_types type)
....@@ -204,8 +173,32 @@
204173 atomic_inc(&binder_stats.obj_created[type]);
205174 }
206175
207
-struct binder_transaction_log binder_transaction_log;
208
-struct binder_transaction_log binder_transaction_log_failed;
176
+struct binder_transaction_log_entry {
177
+ int debug_id;
178
+ int debug_id_done;
179
+ int call_type;
180
+ int from_proc;
181
+ int from_thread;
182
+ int target_handle;
183
+ int to_proc;
184
+ int to_thread;
185
+ int to_node;
186
+ int data_size;
187
+ int offsets_size;
188
+ int return_error_line;
189
+ uint32_t return_error;
190
+ uint32_t return_error_param;
191
+ char context_name[BINDERFS_MAX_NAME + 1];
192
+};
193
+
194
+struct binder_transaction_log {
195
+ atomic_t cur;
196
+ bool full;
197
+ struct binder_transaction_log_entry entry[32];
198
+};
199
+
200
+static struct binder_transaction_log binder_transaction_log;
201
+static struct binder_transaction_log binder_transaction_log_failed;
209202
210203 static struct binder_transaction_log_entry *binder_transaction_log_add(
211204 struct binder_transaction_log *log)
....@@ -227,307 +220,9 @@
227220 return e;
228221 }
229222
230
-/**
231
- * struct binder_work - work enqueued on a worklist
232
- * @entry: node enqueued on list
233
- * @type: type of work to be performed
234
- *
235
- * There are separate work lists for proc, thread, and node (async).
236
- */
237
-struct binder_work {
238
- struct list_head entry;
239
-
240
- enum binder_work_type {
241
- BINDER_WORK_TRANSACTION = 1,
242
- BINDER_WORK_TRANSACTION_COMPLETE,
243
- BINDER_WORK_RETURN_ERROR,
244
- BINDER_WORK_NODE,
245
- BINDER_WORK_DEAD_BINDER,
246
- BINDER_WORK_DEAD_BINDER_AND_CLEAR,
247
- BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
248
- } type;
249
-};
250
-
251
-struct binder_error {
252
- struct binder_work work;
253
- uint32_t cmd;
254
-};
255
-
256
-/**
257
- * struct binder_node - binder node bookkeeping
258
- * @debug_id: unique ID for debugging
259
- * (invariant after initialized)
260
- * @lock: lock for node fields
261
- * @work: worklist element for node work
262
- * (protected by @proc->inner_lock)
263
- * @rb_node: element for proc->nodes tree
264
- * (protected by @proc->inner_lock)
265
- * @dead_node: element for binder_dead_nodes list
266
- * (protected by binder_dead_nodes_lock)
267
- * @proc: binder_proc that owns this node
268
- * (invariant after initialized)
269
- * @refs: list of references on this node
270
- * (protected by @lock)
271
- * @internal_strong_refs: used to take strong references when
272
- * initiating a transaction
273
- * (protected by @proc->inner_lock if @proc
274
- * and by @lock)
275
- * @local_weak_refs: weak user refs from local process
276
- * (protected by @proc->inner_lock if @proc
277
- * and by @lock)
278
- * @local_strong_refs: strong user refs from local process
279
- * (protected by @proc->inner_lock if @proc
280
- * and by @lock)
281
- * @tmp_refs: temporary kernel refs
282
- * (protected by @proc->inner_lock while @proc
283
- * is valid, and by binder_dead_nodes_lock
284
- * if @proc is NULL. During inc/dec and node release
285
- * it is also protected by @lock to provide safety
286
- * as the node dies and @proc becomes NULL)
287
- * @ptr: userspace pointer for node
288
- * (invariant, no lock needed)
289
- * @cookie: userspace cookie for node
290
- * (invariant, no lock needed)
291
- * @has_strong_ref: userspace notified of strong ref
292
- * (protected by @proc->inner_lock if @proc
293
- * and by @lock)
294
- * @pending_strong_ref: userspace has acked notification of strong ref
295
- * (protected by @proc->inner_lock if @proc
296
- * and by @lock)
297
- * @has_weak_ref: userspace notified of weak ref
298
- * (protected by @proc->inner_lock if @proc
299
- * and by @lock)
300
- * @pending_weak_ref: userspace has acked notification of weak ref
301
- * (protected by @proc->inner_lock if @proc
302
- * and by @lock)
303
- * @has_async_transaction: async transaction to node in progress
304
- * (protected by @lock)
305
- * @sched_policy: minimum scheduling policy for node
306
- * (invariant after initialized)
307
- * @accept_fds: file descriptor operations supported for node
308
- * (invariant after initialized)
309
- * @min_priority: minimum scheduling priority
310
- * (invariant after initialized)
311
- * @inherit_rt: inherit RT scheduling policy from caller
312
- * @txn_security_ctx: require sender's security context
313
- * (invariant after initialized)
314
- * @async_todo: list of async work items
315
- * (protected by @proc->inner_lock)
316
- *
317
- * Bookkeeping structure for binder nodes.
318
- */
319
-struct binder_node {
320
- int debug_id;
321
- spinlock_t lock;
322
- struct binder_work work;
323
- union {
324
- struct rb_node rb_node;
325
- struct hlist_node dead_node;
326
- };
327
- struct binder_proc *proc;
328
- struct hlist_head refs;
329
- int internal_strong_refs;
330
- int local_weak_refs;
331
- int local_strong_refs;
332
- int tmp_refs;
333
- binder_uintptr_t ptr;
334
- binder_uintptr_t cookie;
335
- struct {
336
- /*
337
- * bitfield elements protected by
338
- * proc inner_lock
339
- */
340
- u8 has_strong_ref:1;
341
- u8 pending_strong_ref:1;
342
- u8 has_weak_ref:1;
343
- u8 pending_weak_ref:1;
344
- };
345
- struct {
346
- /*
347
- * invariant after initialization
348
- */
349
- u8 sched_policy:2;
350
- u8 inherit_rt:1;
351
- u8 accept_fds:1;
352
- u8 txn_security_ctx:1;
353
- u8 min_priority;
354
- };
355
- bool has_async_transaction;
356
- struct list_head async_todo;
357
-};
358
-
359
-struct binder_ref_death {
360
- /**
361
- * @work: worklist element for death notifications
362
- * (protected by inner_lock of the proc that
363
- * this ref belongs to)
364
- */
365
- struct binder_work work;
366
- binder_uintptr_t cookie;
367
-};
368
-
369
-/**
370
- * struct binder_ref_data - binder_ref counts and id
371
- * @debug_id: unique ID for the ref
372
- * @desc: unique userspace handle for ref
373
- * @strong: strong ref count (debugging only if not locked)
374
- * @weak: weak ref count (debugging only if not locked)
375
- *
376
- * Structure to hold ref count and ref id information. Since
377
- * the actual ref can only be accessed with a lock, this structure
378
- * is used to return information about the ref to callers of
379
- * ref inc/dec functions.
380
- */
381
-struct binder_ref_data {
382
- int debug_id;
383
- uint32_t desc;
384
- int strong;
385
- int weak;
386
-};
387
-
388
-/**
389
- * struct binder_ref - struct to track references on nodes
390
- * @data: binder_ref_data containing id, handle, and current refcounts
391
- * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
392
- * @rb_node_node: node for lookup by @node in proc's rb_tree
393
- * @node_entry: list entry for node->refs list in target node
394
- * (protected by @node->lock)
395
- * @proc: binder_proc containing ref
396
- * @node: binder_node of target node. When cleaning up a
397
- * ref for deletion in binder_cleanup_ref, a non-NULL
398
- * @node indicates the node must be freed
399
- * @death: pointer to death notification (ref_death) if requested
400
- * (protected by @node->lock)
401
- *
402
- * Structure to track references from procA to target node (on procB). This
403
- * structure is unsafe to access without holding @proc->outer_lock.
404
- */
405
-struct binder_ref {
406
- /* Lookups needed: */
407
- /* node + proc => ref (transaction) */
408
- /* desc + proc => ref (transaction, inc/dec ref) */
409
- /* node => refs + procs (proc exit) */
410
- struct binder_ref_data data;
411
- struct rb_node rb_node_desc;
412
- struct rb_node rb_node_node;
413
- struct hlist_node node_entry;
414
- struct binder_proc *proc;
415
- struct binder_node *node;
416
- struct binder_ref_death *death;
417
-};
418
-
419223 enum binder_deferred_state {
420
- BINDER_DEFERRED_PUT_FILES = 0x01,
421
- BINDER_DEFERRED_FLUSH = 0x02,
422
- BINDER_DEFERRED_RELEASE = 0x04,
423
-};
424
-
425
-/**
426
- * struct binder_priority - scheduler policy and priority
427
- * @sched_policy scheduler policy
428
- * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
429
- *
430
- * The binder driver supports inheriting the following scheduler policies:
431
- * SCHED_NORMAL
432
- * SCHED_BATCH
433
- * SCHED_FIFO
434
- * SCHED_RR
435
- */
436
-struct binder_priority {
437
- unsigned int sched_policy;
438
- int prio;
439
-};
440
-
441
-/**
442
- * struct binder_proc - binder process bookkeeping
443
- * @proc_node: element for binder_procs list
444
- * @threads: rbtree of binder_threads in this proc
445
- * (protected by @inner_lock)
446
- * @nodes: rbtree of binder nodes associated with
447
- * this proc ordered by node->ptr
448
- * (protected by @inner_lock)
449
- * @refs_by_desc: rbtree of refs ordered by ref->desc
450
- * (protected by @outer_lock)
451
- * @refs_by_node: rbtree of refs ordered by ref->node
452
- * (protected by @outer_lock)
453
- * @waiting_threads: threads currently waiting for proc work
454
- * (protected by @inner_lock)
455
- * @pid PID of group_leader of process
456
- * (invariant after initialized)
457
- * @tsk task_struct for group_leader of process
458
- * (invariant after initialized)
459
- * @files files_struct for process
460
- * (protected by @files_lock)
461
- * @files_lock mutex to protect @files
462
- * @cred struct cred associated with the `struct file`
463
- * in binder_open()
464
- * (invariant after initialized)
465
- * @deferred_work_node: element for binder_deferred_list
466
- * (protected by binder_deferred_lock)
467
- * @deferred_work: bitmap of deferred work to perform
468
- * (protected by binder_deferred_lock)
469
- * @is_dead: process is dead and awaiting free
470
- * when outstanding transactions are cleaned up
471
- * (protected by @inner_lock)
472
- * @todo: list of work for this process
473
- * (protected by @inner_lock)
474
- * @stats: per-process binder statistics
475
- * (atomics, no lock needed)
476
- * @delivered_death: list of delivered death notification
477
- * (protected by @inner_lock)
478
- * @max_threads: cap on number of binder threads
479
- * (protected by @inner_lock)
480
- * @requested_threads: number of binder threads requested but not
481
- * yet started. In current implementation, can
482
- * only be 0 or 1.
483
- * (protected by @inner_lock)
484
- * @requested_threads_started: number binder threads started
485
- * (protected by @inner_lock)
486
- * @tmp_ref: temporary reference to indicate proc is in use
487
- * (protected by @inner_lock)
488
- * @default_priority: default scheduler priority
489
- * (invariant after initialized)
490
- * @debugfs_entry: debugfs node
491
- * @alloc: binder allocator bookkeeping
492
- * @context: binder_context for this proc
493
- * (invariant after initialized)
494
- * @inner_lock: can nest under outer_lock and/or node lock
495
- * @outer_lock: no nesting under innor or node lock
496
- * Lock order: 1) outer, 2) node, 3) inner
497
- * @binderfs_entry: process-specific binderfs log file
498
- *
499
- * Bookkeeping structure for binder processes
500
- */
501
-struct binder_proc {
502
- struct hlist_node proc_node;
503
- struct rb_root threads;
504
- struct rb_root nodes;
505
- struct rb_root refs_by_desc;
506
- struct rb_root refs_by_node;
507
- struct list_head waiting_threads;
508
- int pid;
509
- struct task_struct *tsk;
510
- struct files_struct *files;
511
- struct mutex files_lock;
512
- const struct cred *cred;
513
- struct hlist_node deferred_work_node;
514
- int deferred_work;
515
- bool is_dead;
516
-
517
- struct list_head todo;
518
- struct binder_stats stats;
519
- struct list_head delivered_death;
520
- int max_threads;
521
- int requested_threads;
522
- int requested_threads_started;
523
- int tmp_ref;
524
- struct binder_priority default_priority;
525
- struct dentry *debugfs_entry;
526
- struct binder_alloc alloc;
527
- struct binder_context *context;
528
- spinlock_t inner_lock;
529
- spinlock_t outer_lock;
530
- struct dentry *binderfs_entry;
224
+ BINDER_DEFERRED_FLUSH = 0x01,
225
+ BINDER_DEFERRED_RELEASE = 0x02,
531226 };
532227
533228 enum {
....@@ -540,110 +235,6 @@
540235 };
541236
542237 /**
543
- * struct binder_thread - binder thread bookkeeping
544
- * @proc: binder process for this thread
545
- * (invariant after initialization)
546
- * @rb_node: element for proc->threads rbtree
547
- * (protected by @proc->inner_lock)
548
- * @waiting_thread_node: element for @proc->waiting_threads list
549
- * (protected by @proc->inner_lock)
550
- * @pid: PID for this thread
551
- * (invariant after initialization)
552
- * @looper: bitmap of looping state
553
- * (only accessed by this thread)
554
- * @looper_needs_return: looping thread needs to exit driver
555
- * (no lock needed)
556
- * @transaction_stack: stack of in-progress transactions for this thread
557
- * (protected by @proc->inner_lock)
558
- * @todo: list of work to do for this thread
559
- * (protected by @proc->inner_lock)
560
- * @process_todo: whether work in @todo should be processed
561
- * (protected by @proc->inner_lock)
562
- * @return_error: transaction errors reported by this thread
563
- * (only accessed by this thread)
564
- * @reply_error: transaction errors reported by target thread
565
- * (protected by @proc->inner_lock)
566
- * @wait: wait queue for thread work
567
- * @stats: per-thread statistics
568
- * (atomics, no lock needed)
569
- * @tmp_ref: temporary reference to indicate thread is in use
570
- * (atomic since @proc->inner_lock cannot
571
- * always be acquired)
572
- * @is_dead: thread is dead and awaiting free
573
- * when outstanding transactions are cleaned up
574
- * (protected by @proc->inner_lock)
575
- * @task: struct task_struct for this thread
576
- *
577
- * Bookkeeping structure for binder threads.
578
- */
579
-struct binder_thread {
580
- struct binder_proc *proc;
581
- struct rb_node rb_node;
582
- struct list_head waiting_thread_node;
583
- int pid;
584
- int looper; /* only modified by this thread */
585
- bool looper_need_return; /* can be written by other thread */
586
- struct binder_transaction *transaction_stack;
587
- struct list_head todo;
588
- bool process_todo;
589
- struct binder_error return_error;
590
- struct binder_error reply_error;
591
- wait_queue_head_t wait;
592
- struct binder_stats stats;
593
- atomic_t tmp_ref;
594
- bool is_dead;
595
- struct task_struct *task;
596
-};
597
-
598
-struct binder_transaction {
599
- int debug_id;
600
- struct binder_work work;
601
- struct binder_thread *from;
602
- struct binder_transaction *from_parent;
603
- struct binder_proc *to_proc;
604
- struct binder_thread *to_thread;
605
- struct binder_transaction *to_parent;
606
- unsigned need_reply:1;
607
- /* unsigned is_dead:1; */ /* not used at the moment */
608
-
609
- struct binder_buffer *buffer;
610
- unsigned int code;
611
- unsigned int flags;
612
- struct binder_priority priority;
613
- struct binder_priority saved_priority;
614
- bool set_priority_called;
615
- kuid_t sender_euid;
616
- binder_uintptr_t security_ctx;
617
- /**
618
- * @lock: protects @from, @to_proc, and @to_thread
619
- *
620
- * @from, @to_proc, and @to_thread can be set to NULL
621
- * during thread teardown
622
- */
623
- spinlock_t lock;
624
-};
625
-
626
-/**
627
- * struct binder_object - union of flat binder object types
628
- * @hdr: generic object header
629
- * @fbo: binder object (nodes and refs)
630
- * @fdo: file descriptor object
631
- * @bbo: binder buffer pointer
632
- * @fdao: file descriptor array
633
- *
634
- * Used for type-independent object copies
635
- */
636
-struct binder_object {
637
- union {
638
- struct binder_object_header hdr;
639
- struct flat_binder_object fbo;
640
- struct binder_fd_object fdo;
641
- struct binder_buffer_object bbo;
642
- struct binder_fd_array_object fdao;
643
- };
644
-};
645
-
646
-/**
647238 * binder_proc_lock() - Acquire outer lock for given binder_proc
648239 * @proc: struct binder_proc to acquire
649240 *
....@@ -653,6 +244,7 @@
653244 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
654245 static void
655246 _binder_proc_lock(struct binder_proc *proc, int line)
247
+ __acquires(&proc->outer_lock)
656248 {
657249 binder_debug(BINDER_DEBUG_SPINLOCKS,
658250 "%s: line=%d\n", __func__, line);
....@@ -668,6 +260,7 @@
668260 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
669261 static void
670262 _binder_proc_unlock(struct binder_proc *proc, int line)
263
+ __releases(&proc->outer_lock)
671264 {
672265 binder_debug(BINDER_DEBUG_SPINLOCKS,
673266 "%s: line=%d\n", __func__, line);
....@@ -683,6 +276,7 @@
683276 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
684277 static void
685278 _binder_inner_proc_lock(struct binder_proc *proc, int line)
279
+ __acquires(&proc->inner_lock)
686280 {
687281 binder_debug(BINDER_DEBUG_SPINLOCKS,
688282 "%s: line=%d\n", __func__, line);
....@@ -698,6 +292,7 @@
698292 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
699293 static void
700294 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
295
+ __releases(&proc->inner_lock)
701296 {
702297 binder_debug(BINDER_DEBUG_SPINLOCKS,
703298 "%s: line=%d\n", __func__, line);
....@@ -713,6 +308,7 @@
713308 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
714309 static void
715310 _binder_node_lock(struct binder_node *node, int line)
311
+ __acquires(&node->lock)
716312 {
717313 binder_debug(BINDER_DEBUG_SPINLOCKS,
718314 "%s: line=%d\n", __func__, line);
....@@ -728,6 +324,7 @@
728324 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
729325 static void
730326 _binder_node_unlock(struct binder_node *node, int line)
327
+ __releases(&node->lock)
731328 {
732329 binder_debug(BINDER_DEBUG_SPINLOCKS,
733330 "%s: line=%d\n", __func__, line);
....@@ -744,12 +341,16 @@
744341 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
745342 static void
746343 _binder_node_inner_lock(struct binder_node *node, int line)
344
+ __acquires(&node->lock) __acquires(&node->proc->inner_lock)
747345 {
748346 binder_debug(BINDER_DEBUG_SPINLOCKS,
749347 "%s: line=%d\n", __func__, line);
750348 spin_lock(&node->lock);
751349 if (node->proc)
752350 binder_inner_proc_lock(node->proc);
351
+ else
352
+ /* annotation for sparse */
353
+ __acquire(&node->proc->inner_lock);
753354 }
754355
755356 /**
....@@ -761,6 +362,7 @@
761362 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
762363 static void
763364 _binder_node_inner_unlock(struct binder_node *node, int line)
365
+ __releases(&node->lock) __releases(&node->proc->inner_lock)
764366 {
765367 struct binder_proc *proc = node->proc;
766368
....@@ -768,6 +370,9 @@
768370 "%s: line=%d\n", __func__, line);
769371 if (proc)
770372 binder_inner_proc_unlock(proc);
373
+ else
374
+ /* annotation for sparse */
375
+ __release(&node->proc->inner_lock);
771376 spin_unlock(&node->lock);
772377 }
773378
....@@ -907,69 +512,14 @@
907512 static void binder_free_proc(struct binder_proc *proc);
908513 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
909514
910
-static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
911
-{
912
- unsigned long rlim_cur;
913
- unsigned long irqs;
914
- int ret;
915
-
916
- mutex_lock(&proc->files_lock);
917
- if (proc->files == NULL) {
918
- ret = -ESRCH;
919
- goto err;
920
- }
921
- if (!lock_task_sighand(proc->tsk, &irqs)) {
922
- ret = -EMFILE;
923
- goto err;
924
- }
925
- rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
926
- unlock_task_sighand(proc->tsk, &irqs);
927
-
928
- ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
929
-err:
930
- mutex_unlock(&proc->files_lock);
931
- return ret;
932
-}
933
-
934
-/*
935
- * copied from fd_install
936
- */
937
-static void task_fd_install(
938
- struct binder_proc *proc, unsigned int fd, struct file *file)
939
-{
940
- mutex_lock(&proc->files_lock);
941
- if (proc->files)
942
- __fd_install(proc->files, fd, file);
943
- mutex_unlock(&proc->files_lock);
944
-}
945
-
946
-/*
947
- * copied from sys_close
948
- */
949
-static long task_close_fd(struct binder_proc *proc, unsigned int fd)
950
-{
951
- int retval;
952
-
953
- mutex_lock(&proc->files_lock);
954
- if (proc->files == NULL) {
955
- retval = -ESRCH;
956
- goto err;
957
- }
958
- retval = __close_fd(proc->files, fd);
959
- /* can't restart close syscall because file table entry was cleared */
960
- if (unlikely(retval == -ERESTARTSYS ||
961
- retval == -ERESTARTNOINTR ||
962
- retval == -ERESTARTNOHAND ||
963
- retval == -ERESTART_RESTARTBLOCK))
964
- retval = -EINTR;
965
-err:
966
- mutex_unlock(&proc->files_lock);
967
- return retval;
968
-}
969
-
970515 static bool binder_has_work_ilocked(struct binder_thread *thread,
971516 bool do_proc_work)
972517 {
518
+ int ret = 0;
519
+
520
+ trace_android_vh_binder_has_work_ilocked(thread, do_proc_work, &ret);
521
+ if (ret)
522
+ return true;
973523 return thread->process_todo ||
974524 thread->looper_need_return ||
975525 (do_proc_work &&
....@@ -1005,6 +555,7 @@
1005555 thread = rb_entry(n, struct binder_thread, rb_node);
1006556 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1007557 binder_available_for_proc_work_ilocked(thread)) {
558
+ trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
1008559 if (sync)
1009560 wake_up_interruptible_sync(&thread->wait);
1010561 else
....@@ -1064,6 +615,7 @@
1064615 assert_spin_locked(&proc->inner_lock);
1065616
1066617 if (thread) {
618
+ trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
1067619 if (sync)
1068620 wake_up_interruptible_sync(&thread->wait);
1069621 else
....@@ -1206,6 +758,7 @@
1206758 bool inherit_rt)
1207759 {
1208760 struct binder_priority desired_prio = t->priority;
761
+ bool skip = false;
1209762
1210763 if (t->set_priority_called)
1211764 return;
....@@ -1213,6 +766,10 @@
1213766 t->set_priority_called = true;
1214767 t->saved_priority.sched_policy = task->policy;
1215768 t->saved_priority.prio = task->normal_prio;
769
+
770
+ trace_android_vh_binder_priority_skip(task, &skip);
771
+ if (skip)
772
+ return;
1216773
1217774 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1218775 desired_prio.prio = NICE_TO_PRIO(0);
....@@ -1233,6 +790,7 @@
1233790 }
1234791
1235792 binder_set_priority(task, desired_prio);
793
+ trace_android_vh_binder_set_priority(t, task);
1236794 }
1237795
1238796 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
....@@ -1545,10 +1103,14 @@
15451103 binder_node_inner_lock(node);
15461104 if (!node->proc)
15471105 spin_lock(&binder_dead_nodes_lock);
1106
+ else
1107
+ __acquire(&binder_dead_nodes_lock);
15481108 node->tmp_refs--;
15491109 BUG_ON(node->tmp_refs < 0);
15501110 if (!node->proc)
15511111 spin_unlock(&binder_dead_nodes_lock);
1112
+ else
1113
+ __release(&binder_dead_nodes_lock);
15521114 /*
15531115 * Call binder_dec_node() to check if all refcounts are 0
15541116 * and cleanup is needed. Calling with strong=0 and internal=1
....@@ -1669,6 +1231,7 @@
16691231 "%d new ref %d desc %d for node %d\n",
16701232 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
16711233 node->debug_id);
1234
+ trace_android_vh_binder_new_ref(proc->tsk, new_ref->data.desc, new_ref->node->debug_id);
16721235 binder_node_unlock(node);
16731236 return new_ref;
16741237 }
....@@ -1836,6 +1399,8 @@
18361399 */
18371400 static void binder_free_ref(struct binder_ref *ref)
18381401 {
1402
+ trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : NULL,
1403
+ ref->data.desc);
18391404 if (ref->node)
18401405 binder_free_node(ref->node);
18411406 kfree(ref->death);
....@@ -1940,6 +1505,18 @@
19401505 }
19411506 ret = binder_inc_ref_olocked(ref, strong, target_list);
19421507 *rdata = ref->data;
1508
+ if (ret && ref == new_ref) {
1509
+ /*
1510
+ * Cleanup the failed reference here as the target
1511
+ * could now be dead and have already released its
1512
+ * references by now. Calling on the new reference
1513
+ * with strong=0 and a tmp_refs will not decrement
1514
+ * the node. The new_ref gets kfree'd below.
1515
+ */
1516
+ binder_cleanup_ref_olocked(new_ref);
1517
+ ref = NULL;
1518
+ }
1519
+
19431520 binder_proc_unlock(proc);
19441521 if (new_ref && ref != new_ref)
19451522 /*
....@@ -2051,20 +1628,45 @@
20511628 */
20521629 static struct binder_thread *binder_get_txn_from_and_acq_inner(
20531630 struct binder_transaction *t)
1631
+ __acquires(&t->from->proc->inner_lock)
20541632 {
20551633 struct binder_thread *from;
20561634
20571635 from = binder_get_txn_from(t);
2058
- if (!from)
1636
+ if (!from) {
1637
+ __acquire(&from->proc->inner_lock);
20591638 return NULL;
1639
+ }
20601640 binder_inner_proc_lock(from->proc);
20611641 if (t->from) {
20621642 BUG_ON(from != t->from);
20631643 return from;
20641644 }
20651645 binder_inner_proc_unlock(from->proc);
1646
+ __acquire(&from->proc->inner_lock);
20661647 binder_thread_dec_tmpref(from);
20671648 return NULL;
1649
+}
1650
+
1651
+/**
1652
+ * binder_free_txn_fixups() - free unprocessed fd fixups
1653
+ * @t: binder transaction for t->from
1654
+ *
1655
+ * If the transaction is being torn down prior to being
1656
+ * processed by the target process, free all of the
1657
+ * fd fixups and fput the file structs. It is safe to
1658
+ * call this function after the fixups have been
1659
+ * processed -- in that case, the list will be empty.
1660
+ */
1661
+static void binder_free_txn_fixups(struct binder_transaction *t)
1662
+{
1663
+ struct binder_txn_fd_fixup *fixup, *tmp;
1664
+
1665
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1666
+ fput(fixup->file);
1667
+ list_del(&fixup->fixup_entry);
1668
+ kfree(fixup);
1669
+ }
20681670 }
20691671
20701672 static void binder_free_transaction(struct binder_transaction *t)
....@@ -2073,6 +1675,12 @@
20731675
20741676 if (target_proc) {
20751677 binder_inner_proc_lock(target_proc);
1678
+ target_proc->outstanding_txns--;
1679
+ if (target_proc->outstanding_txns < 0)
1680
+ pr_warn("%s: Unexpected outstanding_txns %d\n",
1681
+ __func__, target_proc->outstanding_txns);
1682
+ if (!target_proc->outstanding_txns && target_proc->is_frozen)
1683
+ wake_up_interruptible_all(&target_proc->freeze_wait);
20761684 if (t->buffer)
20771685 t->buffer->transaction = NULL;
20781686 binder_inner_proc_unlock(target_proc);
....@@ -2081,6 +1689,7 @@
20811689 * If the transaction has no target_proc, then
20821690 * t->buffer->transaction has already been cleared.
20831691 */
1692
+ binder_free_txn_fixups(t);
20841693 kfree(t);
20851694 binder_stats_deleted(BINDER_STAT_TRANSACTION);
20861695 }
....@@ -2123,6 +1732,7 @@
21231732 binder_free_transaction(t);
21241733 return;
21251734 }
1735
+ __release(&target_thread->proc->inner_lock);
21261736 next = t->from_parent;
21271737
21281738 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
....@@ -2165,15 +1775,21 @@
21651775 /**
21661776 * binder_get_object() - gets object and checks for valid metadata
21671777 * @proc: binder_proc owning the buffer
1778
+ * @u: sender's user pointer to base of buffer
21681779 * @buffer: binder_buffer that we're parsing.
21691780 * @offset: offset in the @buffer at which to validate an object.
21701781 * @object: struct binder_object to read into
21711782 *
2172
- * Return: If there's a valid metadata object at @offset in @buffer, the
1783
+ * Copy the binder object at the given offset into @object. If @u is
1784
+ * provided then the copy is from the sender's buffer. If not, then
1785
+ * it is copied from the target's @buffer.
1786
+ *
1787
+ * Return: If there's a valid metadata object at @offset, the
21731788 * size of that object. Otherwise, it returns zero. The object
21741789 * is read into the struct binder_object pointed to by @object.
21751790 */
21761791 static size_t binder_get_object(struct binder_proc *proc,
1792
+ const void __user *u,
21771793 struct binder_buffer *buffer,
21781794 unsigned long offset,
21791795 struct binder_object *object)
....@@ -2183,11 +1799,16 @@
21831799 size_t object_size = 0;
21841800
21851801 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2186
- if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2187
- !IS_ALIGNED(offset, sizeof(u32)))
1802
+ if (offset > buffer->data_size || read_size < sizeof(*hdr))
21881803 return 0;
2189
- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2190
- offset, read_size);
1804
+ if (u) {
1805
+ if (copy_from_user(object, u + offset, read_size))
1806
+ return 0;
1807
+ } else {
1808
+ if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1809
+ offset, read_size))
1810
+ return 0;
1811
+ }
21911812
21921813 /* Ok, now see if we read a complete object. */
21931814 hdr = &object->hdr;
....@@ -2256,9 +1877,11 @@
22561877 return NULL;
22571878
22581879 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2259
- binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2260
- b, buffer_offset, sizeof(object_offset));
2261
- object_size = binder_get_object(proc, b, object_offset, object);
1880
+ if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1881
+ b, buffer_offset,
1882
+ sizeof(object_offset)))
1883
+ return NULL;
1884
+ object_size = binder_get_object(proc, NULL, b, object_offset, object);
22621885 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
22631886 return NULL;
22641887 if (object_offsetp)
....@@ -2323,7 +1946,8 @@
23231946 unsigned long buffer_offset;
23241947 struct binder_object last_object;
23251948 struct binder_buffer_object *last_bbo;
2326
- size_t object_size = binder_get_object(proc, b, last_obj_offset,
1949
+ size_t object_size = binder_get_object(proc, NULL, b,
1950
+ last_obj_offset,
23271951 &last_object);
23281952 if (object_size != sizeof(*last_bbo))
23291953 return false;
....@@ -2337,46 +1961,108 @@
23371961 return false;
23381962 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
23391963 buffer_offset = objects_start_offset +
2340
- sizeof(binder_size_t) * last_bbo->parent,
2341
- binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
2342
- b, buffer_offset,
2343
- sizeof(last_obj_offset));
1964
+ sizeof(binder_size_t) * last_bbo->parent;
1965
+ if (binder_alloc_copy_from_buffer(&proc->alloc,
1966
+ &last_obj_offset,
1967
+ b, buffer_offset,
1968
+ sizeof(last_obj_offset)))
1969
+ return false;
23441970 }
23451971 return (fixup_offset >= last_min_offset);
23461972 }
23471973
1974
+/**
1975
+ * struct binder_task_work_cb - for deferred close
1976
+ *
1977
+ * @twork: callback_head for task work
1978
+ * @fd: fd to close
1979
+ *
1980
+ * Structure to pass task work to be handled after
1981
+ * returning from binder_ioctl() via task_work_add().
1982
+ */
1983
+struct binder_task_work_cb {
1984
+ struct callback_head twork;
1985
+ struct file *file;
1986
+};
1987
+
1988
+/**
1989
+ * binder_do_fd_close() - close list of file descriptors
1990
+ * @twork: callback head for task work
1991
+ *
1992
+ * It is not safe to call ksys_close() during the binder_ioctl()
1993
+ * function if there is a chance that binder's own file descriptor
1994
+ * might be closed. This is to meet the requirements for using
1995
+ * fdget() (see comments for __fget_light()). Therefore use
1996
+ * task_work_add() to schedule the close operation once we have
1997
+ * returned from binder_ioctl(). This function is a callback
1998
+ * for that mechanism and does the actual ksys_close() on the
1999
+ * given file descriptor.
2000
+ */
2001
+static void binder_do_fd_close(struct callback_head *twork)
2002
+{
2003
+ struct binder_task_work_cb *twcb = container_of(twork,
2004
+ struct binder_task_work_cb, twork);
2005
+
2006
+ fput(twcb->file);
2007
+ kfree(twcb);
2008
+}
2009
+
2010
+/**
2011
+ * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2012
+ * @fd: file-descriptor to close
2013
+ *
2014
+ * See comments in binder_do_fd_close(). This function is used to schedule
2015
+ * a file-descriptor to be closed after returning from binder_ioctl().
2016
+ */
2017
+static void binder_deferred_fd_close(int fd)
2018
+{
2019
+ struct binder_task_work_cb *twcb;
2020
+
2021
+ twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2022
+ if (!twcb)
2023
+ return;
2024
+ init_task_work(&twcb->twork, binder_do_fd_close);
2025
+ close_fd_get_file(fd, &twcb->file);
2026
+ if (twcb->file) {
2027
+ filp_close(twcb->file, current->files);
2028
+ task_work_add(current, &twcb->twork, TWA_RESUME);
2029
+ } else {
2030
+ kfree(twcb);
2031
+ }
2032
+}
2033
+
23482034 static void binder_transaction_buffer_release(struct binder_proc *proc,
2035
+ struct binder_thread *thread,
23492036 struct binder_buffer *buffer,
2350
- binder_size_t failed_at,
2037
+ binder_size_t off_end_offset,
23512038 bool is_failure)
23522039 {
23532040 int debug_id = buffer->debug_id;
2354
- binder_size_t off_start_offset, buffer_offset, off_end_offset;
2041
+ binder_size_t off_start_offset, buffer_offset;
23552042
23562043 binder_debug(BINDER_DEBUG_TRANSACTION,
23572044 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
23582045 proc->pid, buffer->debug_id,
23592046 buffer->data_size, buffer->offsets_size,
2360
- (unsigned long long)failed_at);
2047
+ (unsigned long long)off_end_offset);
23612048
23622049 if (buffer->target_node)
23632050 binder_dec_node(buffer->target_node, 1, 0);
23642051
23652052 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2366
- off_end_offset = is_failure ? failed_at :
2367
- off_start_offset + buffer->offsets_size;
2053
+
23682054 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
23692055 buffer_offset += sizeof(binder_size_t)) {
23702056 struct binder_object_header *hdr;
2371
- size_t object_size;
2057
+ size_t object_size = 0;
23722058 struct binder_object object;
23732059 binder_size_t object_offset;
23742060
2375
- binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2376
- buffer, buffer_offset,
2377
- sizeof(object_offset));
2378
- object_size = binder_get_object(proc, buffer,
2379
- object_offset, &object);
2061
+ if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2062
+ buffer, buffer_offset,
2063
+ sizeof(object_offset)))
2064
+ object_size = binder_get_object(proc, NULL, buffer,
2065
+ object_offset, &object);
23802066 if (object_size == 0) {
23812067 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
23822068 debug_id, (u64)object_offset, buffer->data_size);
....@@ -2424,12 +2110,15 @@
24242110 } break;
24252111
24262112 case BINDER_TYPE_FD: {
2427
- struct binder_fd_object *fp = to_binder_fd_object(hdr);
2428
-
2429
- binder_debug(BINDER_DEBUG_TRANSACTION,
2430
- " fd %d\n", fp->fd);
2431
- if (failed_at)
2432
- task_close_fd(proc, fp->fd);
2113
+ /*
2114
+ * No need to close the file here since user-space
2115
+ * closes it for for successfully delivered
2116
+ * transactions. For transactions that weren't
2117
+ * delivered, the new fd was never allocated so
2118
+ * there is no need to close and the fput on the
2119
+ * file is done when the transaction is torn
2120
+ * down.
2121
+ */
24332122 } break;
24342123 case BINDER_TYPE_PTR:
24352124 /*
....@@ -2445,6 +2134,14 @@
24452134 size_t fd_index;
24462135 binder_size_t fd_buf_size;
24472136 binder_size_t num_valid;
2137
+
2138
+ if (is_failure) {
2139
+ /*
2140
+ * The fd fixups have not been applied so no
2141
+ * fds need to be closed.
2142
+ */
2143
+ continue;
2144
+ }
24482145
24492146 num_valid = (buffer_offset - off_start_offset) /
24502147 sizeof(binder_size_t);
....@@ -2485,15 +2182,24 @@
24852182 for (fd_index = 0; fd_index < fda->num_fds;
24862183 fd_index++) {
24872184 u32 fd;
2185
+ int err;
24882186 binder_size_t offset = fda_offset +
24892187 fd_index * sizeof(fd);
24902188
2491
- binder_alloc_copy_from_buffer(&proc->alloc,
2492
- &fd,
2493
- buffer,
2494
- offset,
2495
- sizeof(fd));
2496
- task_close_fd(proc, fd);
2189
+ err = binder_alloc_copy_from_buffer(
2190
+ &proc->alloc, &fd, buffer,
2191
+ offset, sizeof(fd));
2192
+ WARN_ON(err);
2193
+ if (!err) {
2194
+ binder_deferred_fd_close(fd);
2195
+ /*
2196
+ * Need to make sure the thread goes
2197
+ * back to userspace to complete the
2198
+ * deferred close
2199
+ */
2200
+ if (thread)
2201
+ thread->looper_need_return = true;
2202
+ }
24972203 }
24982204 } break;
24992205 default:
....@@ -2502,6 +2208,21 @@
25022208 break;
25032209 }
25042210 }
2211
+}
2212
+
2213
+/* Clean up all the objects in the buffer */
2214
+static inline void binder_release_entire_buffer(struct binder_proc *proc,
2215
+ struct binder_thread *thread,
2216
+ struct binder_buffer *buffer,
2217
+ bool is_failure)
2218
+{
2219
+ binder_size_t off_end_offset;
2220
+
2221
+ off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2222
+ off_end_offset += buffer->offsets_size;
2223
+
2224
+ binder_transaction_buffer_release(proc, thread, buffer,
2225
+ off_end_offset, is_failure);
25052226 }
25062227
25072228 static int binder_translate_binder(struct flat_binder_object *fp,
....@@ -2528,7 +2249,8 @@
25282249 ret = -EINVAL;
25292250 goto done;
25302251 }
2531
- if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2252
+ if (security_binder_transfer_binder(binder_get_cred(proc),
2253
+ binder_get_cred(target_proc))) {
25322254 ret = -EPERM;
25332255 goto done;
25342256 }
....@@ -2574,7 +2296,8 @@
25742296 proc->pid, thread->pid, fp->handle);
25752297 return -EINVAL;
25762298 }
2577
- if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2299
+ if (security_binder_transfer_binder(binder_get_cred(proc),
2300
+ binder_get_cred(target_proc))) {
25782301 ret = -EPERM;
25792302 goto done;
25802303 }
....@@ -2589,11 +2312,15 @@
25892312 fp->cookie = node->cookie;
25902313 if (node->proc)
25912314 binder_inner_proc_lock(node->proc);
2315
+ else
2316
+ __acquire(&node->proc->inner_lock);
25922317 binder_inc_node_nilocked(node,
25932318 fp->hdr.type == BINDER_TYPE_BINDER,
25942319 0, NULL);
25952320 if (node->proc)
25962321 binder_inner_proc_unlock(node->proc);
2322
+ else
2323
+ __release(&node->proc->inner_lock);
25972324 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
25982325 binder_debug(BINDER_DEBUG_TRANSACTION,
25992326 " ref %d desc %d -> node %d u%016llx\n",
....@@ -2626,16 +2353,16 @@
26262353 return ret;
26272354 }
26282355
2629
-static int binder_translate_fd(int fd,
2356
+static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
26302357 struct binder_transaction *t,
26312358 struct binder_thread *thread,
26322359 struct binder_transaction *in_reply_to)
26332360 {
26342361 struct binder_proc *proc = thread->proc;
26352362 struct binder_proc *target_proc = t->to_proc;
2636
- int target_fd;
2363
+ struct binder_txn_fd_fixup *fixup;
26372364 struct file *file;
2638
- int ret;
2365
+ int ret = 0;
26392366 bool target_allows_fd;
26402367
26412368 if (in_reply_to)
....@@ -2658,25 +2385,31 @@
26582385 ret = -EBADF;
26592386 goto err_fget;
26602387 }
2661
- ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2388
+ ret = security_binder_transfer_file(binder_get_cred(proc),
2389
+ binder_get_cred(target_proc), file);
26622390 if (ret < 0) {
26632391 ret = -EPERM;
26642392 goto err_security;
26652393 }
26662394
2667
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2668
- if (target_fd < 0) {
2395
+ /*
2396
+ * Add fixup record for this transaction. The allocation
2397
+ * of the fd in the target needs to be done from a
2398
+ * target thread.
2399
+ */
2400
+ fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2401
+ if (!fixup) {
26692402 ret = -ENOMEM;
2670
- goto err_get_unused_fd;
2403
+ goto err_alloc;
26712404 }
2672
- task_fd_install(target_proc, target_fd, file);
2673
- trace_binder_transaction_fd(t, fd, target_fd);
2674
- binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2675
- fd, target_fd);
2405
+ fixup->file = file;
2406
+ fixup->offset = fd_offset;
2407
+ trace_binder_transaction_fd_send(t, fd, fixup->offset);
2408
+ list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
26762409
2677
- return target_fd;
2410
+ return ret;
26782411
2679
-err_get_unused_fd:
2412
+err_alloc:
26802413 err_security:
26812414 fput(file);
26822415 err_fget:
....@@ -2684,17 +2417,266 @@
26842417 return ret;
26852418 }
26862419
2687
-static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2420
+/**
2421
+ * struct binder_ptr_fixup - data to be fixed-up in target buffer
2422
+ * @offset offset in target buffer to fixup
2423
+ * @skip_size bytes to skip in copy (fixup will be written later)
2424
+ * @fixup_data data to write at fixup offset
2425
+ * @node list node
2426
+ *
2427
+ * This is used for the pointer fixup list (pf) which is created and consumed
2428
+ * during binder_transaction() and is only accessed locally. No
2429
+ * locking is necessary.
2430
+ *
2431
+ * The list is ordered by @offset.
2432
+ */
2433
+struct binder_ptr_fixup {
2434
+ binder_size_t offset;
2435
+ size_t skip_size;
2436
+ binder_uintptr_t fixup_data;
2437
+ struct list_head node;
2438
+};
2439
+
2440
+/**
2441
+ * struct binder_sg_copy - scatter-gather data to be copied
2442
+ * @offset offset in target buffer
2443
+ * @sender_uaddr user address in source buffer
2444
+ * @length bytes to copy
2445
+ * @node list node
2446
+ *
2447
+ * This is used for the sg copy list (sgc) which is created and consumed
2448
+ * during binder_transaction() and is only accessed locally. No
2449
+ * locking is necessary.
2450
+ *
2451
+ * The list is ordered by @offset.
2452
+ */
2453
+struct binder_sg_copy {
2454
+ binder_size_t offset;
2455
+ const void __user *sender_uaddr;
2456
+ size_t length;
2457
+ struct list_head node;
2458
+};
2459
+
2460
+/**
2461
+ * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2462
+ * @alloc: binder_alloc associated with @buffer
2463
+ * @buffer: binder buffer in target process
2464
+ * @sgc_head: list_head of scatter-gather copy list
2465
+ * @pf_head: list_head of pointer fixup list
2466
+ *
2467
+ * Processes all elements of @sgc_head, applying fixups from @pf_head
2468
+ * and copying the scatter-gather data from the source process' user
2469
+ * buffer to the target's buffer. It is expected that the list creation
2470
+ * and processing all occurs during binder_transaction() so these lists
2471
+ * are only accessed in local context.
2472
+ *
2473
+ * Return: 0=success, else -errno
2474
+ */
2475
+static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2476
+ struct binder_buffer *buffer,
2477
+ struct list_head *sgc_head,
2478
+ struct list_head *pf_head)
2479
+{
2480
+ int ret = 0;
2481
+ struct binder_sg_copy *sgc, *tmpsgc;
2482
+ struct binder_ptr_fixup *tmppf;
2483
+ struct binder_ptr_fixup *pf =
2484
+ list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2485
+ node);
2486
+
2487
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2488
+ size_t bytes_copied = 0;
2489
+
2490
+ while (bytes_copied < sgc->length) {
2491
+ size_t copy_size;
2492
+ size_t bytes_left = sgc->length - bytes_copied;
2493
+ size_t offset = sgc->offset + bytes_copied;
2494
+
2495
+ /*
2496
+ * We copy up to the fixup (pointed to by pf)
2497
+ */
2498
+ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2499
+ : bytes_left;
2500
+ if (!ret && copy_size)
2501
+ ret = binder_alloc_copy_user_to_buffer(
2502
+ alloc, buffer,
2503
+ offset,
2504
+ sgc->sender_uaddr + bytes_copied,
2505
+ copy_size);
2506
+ bytes_copied += copy_size;
2507
+ if (copy_size != bytes_left) {
2508
+ BUG_ON(!pf);
2509
+ /* we stopped at a fixup offset */
2510
+ if (pf->skip_size) {
2511
+ /*
2512
+ * we are just skipping. This is for
2513
+ * BINDER_TYPE_FDA where the translated
2514
+ * fds will be fixed up when we get
2515
+ * to target context.
2516
+ */
2517
+ bytes_copied += pf->skip_size;
2518
+ } else {
2519
+ /* apply the fixup indicated by pf */
2520
+ if (!ret)
2521
+ ret = binder_alloc_copy_to_buffer(
2522
+ alloc, buffer,
2523
+ pf->offset,
2524
+ &pf->fixup_data,
2525
+ sizeof(pf->fixup_data));
2526
+ bytes_copied += sizeof(pf->fixup_data);
2527
+ }
2528
+ list_del(&pf->node);
2529
+ kfree(pf);
2530
+ pf = list_first_entry_or_null(pf_head,
2531
+ struct binder_ptr_fixup, node);
2532
+ }
2533
+ }
2534
+ list_del(&sgc->node);
2535
+ kfree(sgc);
2536
+ }
2537
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2538
+ BUG_ON(pf->skip_size == 0);
2539
+ list_del(&pf->node);
2540
+ kfree(pf);
2541
+ }
2542
+ BUG_ON(!list_empty(sgc_head));
2543
+
2544
+ return ret > 0 ? -EINVAL : ret;
2545
+}
2546
+
2547
+/**
2548
+ * binder_cleanup_deferred_txn_lists() - free specified lists
2549
+ * @sgc_head: list_head of scatter-gather copy list
2550
+ * @pf_head: list_head of pointer fixup list
2551
+ *
2552
+ * Called to clean up @sgc_head and @pf_head if there is an
2553
+ * error.
2554
+ */
2555
+static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2556
+ struct list_head *pf_head)
2557
+{
2558
+ struct binder_sg_copy *sgc, *tmpsgc;
2559
+ struct binder_ptr_fixup *pf, *tmppf;
2560
+
2561
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2562
+ list_del(&sgc->node);
2563
+ kfree(sgc);
2564
+ }
2565
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2566
+ list_del(&pf->node);
2567
+ kfree(pf);
2568
+ }
2569
+}
2570
+
2571
+/**
2572
+ * binder_defer_copy() - queue a scatter-gather buffer for copy
2573
+ * @sgc_head: list_head of scatter-gather copy list
2574
+ * @offset: binder buffer offset in target process
2575
+ * @sender_uaddr: user address in source process
2576
+ * @length: bytes to copy
2577
+ *
2578
+ * Specify a scatter-gather block to be copied. The actual copy must
2579
+ * be deferred until all the needed fixups are identified and queued.
2580
+ * Then the copy and fixups are done together so un-translated values
2581
+ * from the source are never visible in the target buffer.
2582
+ *
2583
+ * We are guaranteed that repeated calls to this function will have
2584
+ * monotonically increasing @offset values so the list will naturally
2585
+ * be ordered.
2586
+ *
2587
+ * Return: 0=success, else -errno
2588
+ */
2589
+static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2590
+ const void __user *sender_uaddr, size_t length)
2591
+{
2592
+ struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2593
+
2594
+ if (!bc)
2595
+ return -ENOMEM;
2596
+
2597
+ bc->offset = offset;
2598
+ bc->sender_uaddr = sender_uaddr;
2599
+ bc->length = length;
2600
+ INIT_LIST_HEAD(&bc->node);
2601
+
2602
+ /*
2603
+ * We are guaranteed that the deferred copies are in-order
2604
+ * so just add to the tail.
2605
+ */
2606
+ list_add_tail(&bc->node, sgc_head);
2607
+
2608
+ return 0;
2609
+}
2610
+
2611
+/**
2612
+ * binder_add_fixup() - queue a fixup to be applied to sg copy
2613
+ * @pf_head: list_head of binder ptr fixup list
2614
+ * @offset: binder buffer offset in target process
2615
+ * @fixup: bytes to be copied for fixup
2616
+ * @skip_size: bytes to skip when copying (fixup will be applied later)
2617
+ *
2618
+ * Add the specified fixup to a list ordered by @offset. When copying
2619
+ * the scatter-gather buffers, the fixup will be copied instead of
2620
+ * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2621
+ * will be applied later (in target process context), so we just skip
2622
+ * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2623
+ * value in @fixup.
2624
+ *
2625
+ * This function is called *mostly* in @offset order, but there are
2626
+ * exceptions. Since out-of-order inserts are relatively uncommon,
2627
+ * we insert the new element by searching backward from the tail of
2628
+ * the list.
2629
+ *
2630
+ * Return: 0=success, else -errno
2631
+ */
2632
+static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2633
+ binder_uintptr_t fixup, size_t skip_size)
2634
+{
2635
+ struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2636
+ struct binder_ptr_fixup *tmppf;
2637
+
2638
+ if (!pf)
2639
+ return -ENOMEM;
2640
+
2641
+ pf->offset = offset;
2642
+ pf->fixup_data = fixup;
2643
+ pf->skip_size = skip_size;
2644
+ INIT_LIST_HEAD(&pf->node);
2645
+
2646
+ /* Fixups are *mostly* added in-order, but there are some
2647
+ * exceptions. Look backwards through list for insertion point.
2648
+ */
2649
+ list_for_each_entry_reverse(tmppf, pf_head, node) {
2650
+ if (tmppf->offset < pf->offset) {
2651
+ list_add(&pf->node, &tmppf->node);
2652
+ return 0;
2653
+ }
2654
+ }
2655
+ /*
2656
+ * if we get here, then the new offset is the lowest so
2657
+ * insert at the head
2658
+ */
2659
+ list_add(&pf->node, pf_head);
2660
+ return 0;
2661
+}
2662
+
2663
+static int binder_translate_fd_array(struct list_head *pf_head,
2664
+ struct binder_fd_array_object *fda,
2665
+ const void __user *sender_ubuffer,
26882666 struct binder_buffer_object *parent,
2667
+ struct binder_buffer_object *sender_uparent,
26892668 struct binder_transaction *t,
26902669 struct binder_thread *thread,
26912670 struct binder_transaction *in_reply_to)
26922671 {
2693
- binder_size_t fdi, fd_buf_size, num_installed_fds;
2672
+ binder_size_t fdi, fd_buf_size;
26942673 binder_size_t fda_offset;
2695
- int target_fd;
2674
+ const void __user *sender_ufda_base;
26962675 struct binder_proc *proc = thread->proc;
2697
- struct binder_proc *target_proc = t->to_proc;
2676
+ int ret;
2677
+
2678
+ if (fda->num_fds == 0)
2679
+ return 0;
26982680
26992681 fd_buf_size = sizeof(u32) * fda->num_fds;
27002682 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
....@@ -2718,46 +2700,36 @@
27182700 */
27192701 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
27202702 fda->parent_offset;
2721
- if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2703
+ sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2704
+ fda->parent_offset;
2705
+
2706
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2707
+ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
27222708 binder_user_error("%d:%d parent offset not aligned correctly.\n",
27232709 proc->pid, thread->pid);
27242710 return -EINVAL;
27252711 }
2712
+ ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2713
+ if (ret)
2714
+ return ret;
2715
+
27262716 for (fdi = 0; fdi < fda->num_fds; fdi++) {
27272717 u32 fd;
2728
-
27292718 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2719
+ binder_size_t sender_uoffset = fdi * sizeof(fd);
27302720
2731
- binder_alloc_copy_from_buffer(&target_proc->alloc,
2732
- &fd, t->buffer,
2733
- offset, sizeof(fd));
2734
- target_fd = binder_translate_fd(fd, t, thread, in_reply_to);
2735
- if (target_fd < 0)
2736
- goto err_translate_fd_failed;
2737
- binder_alloc_copy_to_buffer(&target_proc->alloc,
2738
- t->buffer, offset,
2739
- &target_fd, sizeof(fd));
2721
+ ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2722
+ if (!ret)
2723
+ ret = binder_translate_fd(fd, offset, t, thread,
2724
+ in_reply_to);
2725
+ if (ret)
2726
+ return ret > 0 ? -EINVAL : ret;
27402727 }
27412728 return 0;
2742
-
2743
-err_translate_fd_failed:
2744
- /*
2745
- * Failed to allocate fd or security error, free fds
2746
- * installed so far.
2747
- */
2748
- num_installed_fds = fdi;
2749
- for (fdi = 0; fdi < num_installed_fds; fdi++) {
2750
- u32 fd;
2751
- binder_size_t offset = fda_offset + fdi * sizeof(fd);
2752
- binder_alloc_copy_from_buffer(&target_proc->alloc,
2753
- &fd, t->buffer,
2754
- offset, sizeof(fd));
2755
- task_close_fd(target_proc, fd);
2756
- }
2757
- return target_fd;
27582729 }
27592730
2760
-static int binder_fixup_parent(struct binder_transaction *t,
2731
+static int binder_fixup_parent(struct list_head *pf_head,
2732
+ struct binder_transaction *t,
27612733 struct binder_thread *thread,
27622734 struct binder_buffer_object *bp,
27632735 binder_size_t off_start_offset,
....@@ -2803,10 +2775,57 @@
28032775 }
28042776 buffer_offset = bp->parent_offset +
28052777 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2806
- binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2807
- &bp->buffer, sizeof(bp->buffer));
2778
+ return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2779
+}
28082780
2809
- return 0;
2781
+/**
2782
+ * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2783
+ * @t1: the pending async txn in the frozen process
2784
+ * @t2: the new async txn to supersede the outdated pending one
2785
+ *
2786
+ * Return: true if t2 can supersede t1
2787
+ * false if t2 can not supersede t1
2788
+ */
2789
+static bool binder_can_update_transaction(struct binder_transaction *t1,
2790
+ struct binder_transaction *t2)
2791
+{
2792
+ if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2793
+ (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2794
+ return false;
2795
+ if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2796
+ t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2797
+ t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2798
+ t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2799
+ return true;
2800
+ return false;
2801
+}
2802
+
2803
+/**
2804
+ * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2805
+ * @t: new async transaction
2806
+ * @target_list: list to find outdated transaction
2807
+ *
2808
+ * Return: the outdated transaction if found
2809
+ * NULL if no outdated transacton can be found
2810
+ *
2811
+ * Requires the proc->inner_lock to be held.
2812
+ */
2813
+static struct binder_transaction *
2814
+binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2815
+ struct list_head *target_list)
2816
+{
2817
+ struct binder_work *w;
2818
+
2819
+ list_for_each_entry(w, target_list, entry) {
2820
+ struct binder_transaction *t_queued;
2821
+
2822
+ if (w->type != BINDER_WORK_TRANSACTION)
2823
+ continue;
2824
+ t_queued = container_of(w, struct binder_transaction, work);
2825
+ if (binder_can_update_transaction(t_queued, t))
2826
+ return t_queued;
2827
+ }
2828
+ return NULL;
28102829 }
28112830
28122831 /**
....@@ -2823,10 +2842,11 @@
28232842 * If the @thread parameter is not NULL, the transaction is always queued
28242843 * to the waitlist of that specific thread.
28252844 *
2826
- * Return: true if the transactions was successfully queued
2827
- * false if the target process or thread is dead
2845
+ * Return: 0 if the transaction was successfully queued
2846
+ * BR_DEAD_REPLY if the target process or thread is dead
2847
+ * BR_FROZEN_REPLY if the target process or thread is frozen
28282848 */
2829
-static bool binder_proc_transaction(struct binder_transaction *t,
2849
+static int binder_proc_transaction(struct binder_transaction *t,
28302850 struct binder_proc *proc,
28312851 struct binder_thread *thread)
28322852 {
....@@ -2834,6 +2854,7 @@
28342854 struct binder_priority node_prio;
28352855 bool oneway = !!(t->flags & TF_ONE_WAY);
28362856 bool pending_async = false;
2857
+ struct binder_transaction *t_outdated = NULL;
28372858
28382859 BUG_ON(!node);
28392860 binder_node_lock(node);
....@@ -2842,23 +2863,31 @@
28422863
28432864 if (oneway) {
28442865 BUG_ON(thread);
2845
- if (node->has_async_transaction) {
2866
+ if (node->has_async_transaction)
28462867 pending_async = true;
2847
- } else {
2868
+ else
28482869 node->has_async_transaction = true;
2849
- }
28502870 }
28512871
28522872 binder_inner_proc_lock(proc);
2873
+ if (proc->is_frozen) {
2874
+ proc->sync_recv |= !oneway;
2875
+ proc->async_recv |= oneway;
2876
+ }
28532877
2854
- if (proc->is_dead || (thread && thread->is_dead)) {
2878
+ if ((proc->is_frozen && !oneway) || proc->is_dead ||
2879
+ (thread && thread->is_dead)) {
28552880 binder_inner_proc_unlock(proc);
28562881 binder_node_unlock(node);
2857
- return false;
2882
+ return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
28582883 }
28592884
28602885 if (!thread && !pending_async)
28612886 thread = binder_select_thread_ilocked(proc);
2887
+
2888
+ trace_android_vh_binder_proc_transaction(current, proc->tsk,
2889
+ thread ? thread->task : NULL, node->debug_id, t->code,
2890
+ pending_async);
28622891
28632892 if (thread) {
28642893 binder_transaction_priority(thread->task, t, node_prio,
....@@ -2867,16 +2896,47 @@
28672896 } else if (!pending_async) {
28682897 binder_enqueue_work_ilocked(&t->work, &proc->todo);
28692898 } else {
2899
+ if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
2900
+ t_outdated = binder_find_outdated_transaction_ilocked(t,
2901
+ &node->async_todo);
2902
+ if (t_outdated) {
2903
+ binder_debug(BINDER_DEBUG_TRANSACTION,
2904
+ "txn %d supersedes %d\n",
2905
+ t->debug_id, t_outdated->debug_id);
2906
+ list_del_init(&t_outdated->work.entry);
2907
+ proc->outstanding_txns--;
2908
+ }
2909
+ }
28702910 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
28712911 }
2912
+
2913
+ trace_android_vh_binder_proc_transaction_end(current, proc->tsk,
2914
+ thread ? thread->task : NULL, t->code, pending_async, !oneway);
28722915
28732916 if (!pending_async)
28742917 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
28752918
2919
+ proc->outstanding_txns++;
28762920 binder_inner_proc_unlock(proc);
28772921 binder_node_unlock(node);
28782922
2879
- return true;
2923
+ /*
2924
+ * To reduce potential contention, free the outdated transaction and
2925
+ * buffer after releasing the locks.
2926
+ */
2927
+ if (t_outdated) {
2928
+ struct binder_buffer *buffer = t_outdated->buffer;
2929
+
2930
+ t_outdated->buffer = NULL;
2931
+ buffer->transaction = NULL;
2932
+ trace_binder_transaction_update_buffer_release(buffer);
2933
+ binder_release_entire_buffer(proc, NULL, buffer, false);
2934
+ binder_alloc_free_buf(&proc->alloc, buffer);
2935
+ kfree(t_outdated);
2936
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
2937
+ }
2938
+
2939
+ return 0;
28802940 }
28812941
28822942 /**
....@@ -2934,6 +2994,7 @@
29342994 binder_size_t off_start_offset, off_end_offset;
29352995 binder_size_t off_min;
29362996 binder_size_t sg_buf_offset, sg_buf_end_offset;
2997
+ binder_size_t user_offset = 0;
29372998 struct binder_proc *target_proc = NULL;
29382999 struct binder_thread *target_thread = NULL;
29393000 struct binder_node *target_node = NULL;
....@@ -2948,6 +3009,12 @@
29483009 int t_debug_id = atomic_inc_return(&binder_last_id);
29493010 char *secctx = NULL;
29503011 u32 secctx_sz = 0;
3012
+ struct list_head sgc_head;
3013
+ struct list_head pf_head;
3014
+ const void __user *user_buffer = (const void __user *)
3015
+ (uintptr_t)tr->data.ptr.buffer;
3016
+ INIT_LIST_HEAD(&sgc_head);
3017
+ INIT_LIST_HEAD(&pf_head);
29513018
29523019 e = binder_transaction_log_add(&binder_transaction_log);
29533020 e->debug_id = t_debug_id;
....@@ -2957,7 +3024,7 @@
29573024 e->target_handle = tr->target.handle;
29583025 e->data_size = tr->data_size;
29593026 e->offsets_size = tr->offsets_size;
2960
- e->context_name = proc->context->name;
3027
+ strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
29613028
29623029 if (reply) {
29633030 binder_inner_proc_lock(proc);
....@@ -2991,6 +3058,8 @@
29913058 binder_inner_proc_unlock(proc);
29923059 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
29933060 if (target_thread == NULL) {
3061
+ /* annotation for sparse */
3062
+ __release(&target_thread->proc->inner_lock);
29943063 return_error = BR_DEAD_REPLY;
29953064 return_error_line = __LINE__;
29963065 goto err_dead_binder;
....@@ -3012,6 +3081,7 @@
30123081 target_proc = target_thread->proc;
30133082 target_proc->tmp_ref++;
30143083 binder_inner_proc_unlock(target_thread->proc);
3084
+ trace_android_vh_binder_reply(target_proc, proc, thread, tr);
30153085 } else {
30163086 if (tr->target.handle) {
30173087 struct binder_ref *ref;
....@@ -3031,8 +3101,8 @@
30313101 ref->node, &target_proc,
30323102 &return_error);
30333103 } else {
3034
- binder_user_error("%d:%d got transaction to invalid handle\n",
3035
- proc->pid, thread->pid);
3104
+ binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3105
+ proc->pid, thread->pid, tr->target.handle);
30363106 return_error = BR_FAILED_REPLY;
30373107 }
30383108 binder_proc_unlock(proc);
....@@ -3064,8 +3134,9 @@
30643134 goto err_dead_binder;
30653135 }
30663136 e->to_node = target_node->debug_id;
3067
- if (security_binder_transaction(proc->cred,
3068
- target_proc->cred) < 0) {
3137
+ trace_android_vh_binder_trans(target_proc, proc, thread, tr);
3138
+ if (security_binder_transaction(binder_get_cred(proc),
3139
+ binder_get_cred(target_proc)) < 0) {
30693140 return_error = BR_FAILED_REPLY;
30703141 return_error_param = -EPERM;
30713142 return_error_line = __LINE__;
....@@ -3133,6 +3204,7 @@
31333204 if (target_thread)
31343205 e->to_thread = target_thread->pid;
31353206 e->to_proc = target_proc->pid;
3207
+ trace_android_rvh_binder_transaction(target_proc, proc, thread, tr);
31363208
31373209 /* TODO: reuse incoming transaction for reply */
31383210 t = kzalloc(sizeof(*t), GFP_KERNEL);
....@@ -3142,8 +3214,10 @@
31423214 return_error_line = __LINE__;
31433215 goto err_alloc_t_failed;
31443216 }
3217
+ INIT_LIST_HEAD(&t->fd_fixups);
31453218 binder_stats_created(BINDER_STAT_TRANSACTION);
31463219 spin_lock_init(&t->lock);
3220
+ trace_android_vh_binder_transaction_init(t);
31473221
31483222 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
31493223 if (tcomplete == NULL) {
....@@ -3197,9 +3271,28 @@
31973271 if (target_node && target_node->txn_security_ctx) {
31983272 u32 secid;
31993273 size_t added_size;
3274
+ int max_retries = 100;
32003275
3201
- security_cred_getsecid(proc->cred, &secid);
3276
+ security_cred_getsecid(binder_get_cred(proc), &secid);
3277
+ retry_alloc:
32023278 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3279
+ if (ret == -ENOMEM && max_retries-- > 0) {
3280
+ struct page *dummy_page;
3281
+
3282
+ /*
3283
+ * security_secid_to_secctx() can fail because of a
3284
+ * GFP_ATOMIC allocation in which case -ENOMEM is
3285
+ * returned. This needs to be retried, but there is
3286
+ * currently no way to tell userspace to retry so we
3287
+ * do it here. We make sure there is still available
3288
+ * memory first and then retry.
3289
+ */
3290
+ dummy_page = alloc_page(GFP_KERNEL);
3291
+ if (dummy_page) {
3292
+ __free_page(dummy_page);
3293
+ goto retry_alloc;
3294
+ }
3295
+ }
32033296 if (ret) {
32043297 return_error = BR_FAILED_REPLY;
32053298 return_error_param = ret;
....@@ -3234,36 +3327,29 @@
32343327 goto err_binder_alloc_buf_failed;
32353328 }
32363329 if (secctx) {
3330
+ int err;
32373331 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
32383332 ALIGN(tr->offsets_size, sizeof(void *)) +
32393333 ALIGN(extra_buffers_size, sizeof(void *)) -
32403334 ALIGN(secctx_sz, sizeof(u64));
32413335
32423336 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3243
- binder_alloc_copy_to_buffer(&target_proc->alloc,
3244
- t->buffer, buf_offset,
3245
- secctx, secctx_sz);
3337
+ err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3338
+ t->buffer, buf_offset,
3339
+ secctx, secctx_sz);
3340
+ if (err) {
3341
+ t->security_ctx = 0;
3342
+ WARN_ON(1);
3343
+ }
32463344 security_release_secctx(secctx, secctx_sz);
32473345 secctx = NULL;
32483346 }
32493347 t->buffer->debug_id = t->debug_id;
32503348 t->buffer->transaction = t;
32513349 t->buffer->target_node = target_node;
3350
+ t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
32523351 trace_binder_transaction_alloc_buf(t->buffer);
32533352
3254
- if (binder_alloc_copy_user_to_buffer(
3255
- &target_proc->alloc,
3256
- t->buffer, 0,
3257
- (const void __user *)
3258
- (uintptr_t)tr->data.ptr.buffer,
3259
- tr->data_size)) {
3260
- binder_user_error("%d:%d got transaction with invalid data ptr\n",
3261
- proc->pid, thread->pid);
3262
- return_error = BR_FAILED_REPLY;
3263
- return_error_param = -EFAULT;
3264
- return_error_line = __LINE__;
3265
- goto err_copy_data_failed;
3266
- }
32673353 if (binder_alloc_copy_user_to_buffer(
32683354 &target_proc->alloc,
32693355 t->buffer,
....@@ -3308,14 +3394,39 @@
33083394 size_t object_size;
33093395 struct binder_object object;
33103396 binder_size_t object_offset;
3397
+ binder_size_t copy_size;
33113398
3312
- binder_alloc_copy_from_buffer(&target_proc->alloc,
3313
- &object_offset,
3314
- t->buffer,
3315
- buffer_offset,
3316
- sizeof(object_offset));
3317
- object_size = binder_get_object(target_proc, t->buffer,
3318
- object_offset, &object);
3399
+ if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3400
+ &object_offset,
3401
+ t->buffer,
3402
+ buffer_offset,
3403
+ sizeof(object_offset))) {
3404
+ return_error = BR_FAILED_REPLY;
3405
+ return_error_param = -EINVAL;
3406
+ return_error_line = __LINE__;
3407
+ goto err_bad_offset;
3408
+ }
3409
+
3410
+ /*
3411
+ * Copy the source user buffer up to the next object
3412
+ * that will be processed.
3413
+ */
3414
+ copy_size = object_offset - user_offset;
3415
+ if (copy_size && (user_offset > object_offset ||
3416
+ binder_alloc_copy_user_to_buffer(
3417
+ &target_proc->alloc,
3418
+ t->buffer, user_offset,
3419
+ user_buffer + user_offset,
3420
+ copy_size))) {
3421
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
3422
+ proc->pid, thread->pid);
3423
+ return_error = BR_FAILED_REPLY;
3424
+ return_error_param = -EFAULT;
3425
+ return_error_line = __LINE__;
3426
+ goto err_copy_data_failed;
3427
+ }
3428
+ object_size = binder_get_object(target_proc, user_buffer,
3429
+ t->buffer, object_offset, &object);
33193430 if (object_size == 0 || object_offset < off_min) {
33203431 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
33213432 proc->pid, thread->pid,
....@@ -3327,6 +3438,11 @@
33273438 return_error_line = __LINE__;
33283439 goto err_bad_offset;
33293440 }
3441
+ /*
3442
+ * Set offset to the next buffer fragment to be
3443
+ * copied
3444
+ */
3445
+ user_offset = object_offset + object_size;
33303446
33313447 hdr = &object.hdr;
33323448 off_min = object_offset + object_size;
....@@ -3337,15 +3453,17 @@
33373453
33383454 fp = to_flat_binder_object(hdr);
33393455 ret = binder_translate_binder(fp, t, thread);
3340
- if (ret < 0) {
3456
+
3457
+ if (ret < 0 ||
3458
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
3459
+ t->buffer,
3460
+ object_offset,
3461
+ fp, sizeof(*fp))) {
33413462 return_error = BR_FAILED_REPLY;
33423463 return_error_param = ret;
33433464 return_error_line = __LINE__;
33443465 goto err_translate_failed;
33453466 }
3346
- binder_alloc_copy_to_buffer(&target_proc->alloc,
3347
- t->buffer, object_offset,
3348
- fp, sizeof(*fp));
33493467 } break;
33503468 case BINDER_TYPE_HANDLE:
33513469 case BINDER_TYPE_WEAK_HANDLE: {
....@@ -3353,37 +3471,42 @@
33533471
33543472 fp = to_flat_binder_object(hdr);
33553473 ret = binder_translate_handle(fp, t, thread);
3356
- if (ret < 0) {
3474
+ if (ret < 0 ||
3475
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
3476
+ t->buffer,
3477
+ object_offset,
3478
+ fp, sizeof(*fp))) {
33573479 return_error = BR_FAILED_REPLY;
33583480 return_error_param = ret;
33593481 return_error_line = __LINE__;
33603482 goto err_translate_failed;
33613483 }
3362
- binder_alloc_copy_to_buffer(&target_proc->alloc,
3363
- t->buffer, object_offset,
3364
- fp, sizeof(*fp));
33653484 } break;
33663485
33673486 case BINDER_TYPE_FD: {
33683487 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3369
- int target_fd = binder_translate_fd(fp->fd, t, thread,
3370
- in_reply_to);
3488
+ binder_size_t fd_offset = object_offset +
3489
+ (uintptr_t)&fp->fd - (uintptr_t)fp;
3490
+ int ret = binder_translate_fd(fp->fd, fd_offset, t,
3491
+ thread, in_reply_to);
33713492
3372
- if (target_fd < 0) {
3493
+ fp->pad_binder = 0;
3494
+ if (ret < 0 ||
3495
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
3496
+ t->buffer,
3497
+ object_offset,
3498
+ fp, sizeof(*fp))) {
33733499 return_error = BR_FAILED_REPLY;
3374
- return_error_param = target_fd;
3500
+ return_error_param = ret;
33753501 return_error_line = __LINE__;
33763502 goto err_translate_failed;
33773503 }
3378
- fp->pad_binder = 0;
3379
- fp->fd = target_fd;
3380
- binder_alloc_copy_to_buffer(&target_proc->alloc,
3381
- t->buffer, object_offset,
3382
- fp, sizeof(*fp));
33833504 } break;
33843505 case BINDER_TYPE_FDA: {
33853506 struct binder_object ptr_object;
33863507 binder_size_t parent_offset;
3508
+ struct binder_object user_object;
3509
+ size_t user_parent_size;
33873510 struct binder_fd_array_object *fda =
33883511 to_binder_fd_array_object(hdr);
33893512 size_t num_valid = (buffer_offset - off_start_offset) /
....@@ -3415,11 +3538,35 @@
34153538 return_error_line = __LINE__;
34163539 goto err_bad_parent;
34173540 }
3418
- ret = binder_translate_fd_array(fda, parent, t, thread,
3419
- in_reply_to);
3420
- if (ret < 0) {
3541
+ /*
3542
+ * We need to read the user version of the parent
3543
+ * object to get the original user offset
3544
+ */
3545
+ user_parent_size =
3546
+ binder_get_object(proc, user_buffer, t->buffer,
3547
+ parent_offset, &user_object);
3548
+ if (user_parent_size != sizeof(user_object.bbo)) {
3549
+ binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3550
+ proc->pid, thread->pid,
3551
+ user_parent_size,
3552
+ sizeof(user_object.bbo));
34213553 return_error = BR_FAILED_REPLY;
3422
- return_error_param = ret;
3554
+ return_error_param = -EINVAL;
3555
+ return_error_line = __LINE__;
3556
+ goto err_bad_parent;
3557
+ }
3558
+ ret = binder_translate_fd_array(&pf_head, fda,
3559
+ user_buffer, parent,
3560
+ &user_object.bbo, t,
3561
+ thread, in_reply_to);
3562
+ if (!ret)
3563
+ ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3564
+ t->buffer,
3565
+ object_offset,
3566
+ fda, sizeof(*fda));
3567
+ if (ret) {
3568
+ return_error = BR_FAILED_REPLY;
3569
+ return_error_param = ret > 0 ? -EINVAL : ret;
34233570 return_error_line = __LINE__;
34243571 goto err_translate_failed;
34253572 }
....@@ -3441,19 +3588,14 @@
34413588 return_error_line = __LINE__;
34423589 goto err_bad_offset;
34433590 }
3444
- if (binder_alloc_copy_user_to_buffer(
3445
- &target_proc->alloc,
3446
- t->buffer,
3447
- sg_buf_offset,
3448
- (const void __user *)
3449
- (uintptr_t)bp->buffer,
3450
- bp->length)) {
3451
- binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3452
- proc->pid, thread->pid);
3453
- return_error_param = -EFAULT;
3591
+ ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3592
+ (const void __user *)(uintptr_t)bp->buffer,
3593
+ bp->length);
3594
+ if (ret) {
34543595 return_error = BR_FAILED_REPLY;
3596
+ return_error_param = ret;
34553597 return_error_line = __LINE__;
3456
- goto err_copy_data_failed;
3598
+ goto err_translate_failed;
34573599 }
34583600 /* Fixup buffer pointer to target proc address space */
34593601 bp->buffer = (uintptr_t)
....@@ -3462,20 +3604,22 @@
34623604
34633605 num_valid = (buffer_offset - off_start_offset) /
34643606 sizeof(binder_size_t);
3465
- ret = binder_fixup_parent(t, thread, bp,
3607
+ ret = binder_fixup_parent(&pf_head, t,
3608
+ thread, bp,
34663609 off_start_offset,
34673610 num_valid,
34683611 last_fixup_obj_off,
34693612 last_fixup_min_off);
3470
- if (ret < 0) {
3613
+ if (ret < 0 ||
3614
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
3615
+ t->buffer,
3616
+ object_offset,
3617
+ bp, sizeof(*bp))) {
34713618 return_error = BR_FAILED_REPLY;
34723619 return_error_param = ret;
34733620 return_error_line = __LINE__;
34743621 goto err_translate_failed;
34753622 }
3476
- binder_alloc_copy_to_buffer(&target_proc->alloc,
3477
- t->buffer, object_offset,
3478
- bp, sizeof(*bp));
34793623 last_fixup_obj_off = object_offset;
34803624 last_fixup_min_off = 0;
34813625 } break;
....@@ -3488,21 +3632,51 @@
34883632 goto err_bad_object_type;
34893633 }
34903634 }
3491
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3635
+ /* Done processing objects, copy the rest of the buffer */
3636
+ if (binder_alloc_copy_user_to_buffer(
3637
+ &target_proc->alloc,
3638
+ t->buffer, user_offset,
3639
+ user_buffer + user_offset,
3640
+ tr->data_size - user_offset)) {
3641
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
3642
+ proc->pid, thread->pid);
3643
+ return_error = BR_FAILED_REPLY;
3644
+ return_error_param = -EFAULT;
3645
+ return_error_line = __LINE__;
3646
+ goto err_copy_data_failed;
3647
+ }
3648
+
3649
+ ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3650
+ &sgc_head, &pf_head);
3651
+ if (ret) {
3652
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3653
+ proc->pid, thread->pid);
3654
+ return_error = BR_FAILED_REPLY;
3655
+ return_error_param = ret;
3656
+ return_error_line = __LINE__;
3657
+ goto err_copy_data_failed;
3658
+ }
3659
+ if (t->buffer->oneway_spam_suspect)
3660
+ tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3661
+ else
3662
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
34923663 t->work.type = BINDER_WORK_TRANSACTION;
34933664
34943665 if (reply) {
34953666 binder_enqueue_thread_work(thread, tcomplete);
34963667 binder_inner_proc_lock(target_proc);
34973668 if (target_thread->is_dead) {
3669
+ return_error = BR_DEAD_REPLY;
34983670 binder_inner_proc_unlock(target_proc);
34993671 goto err_dead_proc_or_thread;
35003672 }
35013673 BUG_ON(t->buffer->async_transaction != 0);
35023674 binder_pop_transaction_ilocked(target_thread, in_reply_to);
35033675 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3676
+ target_proc->outstanding_txns++;
35043677 binder_inner_proc_unlock(target_proc);
35053678 wake_up_interruptible_sync(&target_thread->wait);
3679
+ trace_android_vh_binder_restore_priority(in_reply_to, current);
35063680 binder_restore_priority(current, in_reply_to->saved_priority);
35073681 binder_free_transaction(in_reply_to);
35083682 } else if (!(t->flags & TF_ONE_WAY)) {
....@@ -3520,7 +3694,9 @@
35203694 t->from_parent = thread->transaction_stack;
35213695 thread->transaction_stack = t;
35223696 binder_inner_proc_unlock(proc);
3523
- if (!binder_proc_transaction(t, target_proc, target_thread)) {
3697
+ return_error = binder_proc_transaction(t,
3698
+ target_proc, target_thread);
3699
+ if (return_error) {
35243700 binder_inner_proc_lock(proc);
35253701 binder_pop_transaction_ilocked(thread, t);
35263702 binder_inner_proc_unlock(proc);
....@@ -3530,7 +3706,8 @@
35303706 BUG_ON(target_node == NULL);
35313707 BUG_ON(t->buffer->async_transaction != 1);
35323708 binder_enqueue_thread_work(thread, tcomplete);
3533
- if (!binder_proc_transaction(t, target_proc, NULL))
3709
+ return_error = binder_proc_transaction(t, target_proc, NULL);
3710
+ if (return_error)
35343711 goto err_dead_proc_or_thread;
35353712 }
35363713 if (target_thread)
....@@ -3547,7 +3724,6 @@
35473724 return;
35483725
35493726 err_dead_proc_or_thread:
3550
- return_error = BR_DEAD_REPLY;
35513727 return_error_line = __LINE__;
35523728 binder_dequeue_work(proc, tcomplete);
35533729 err_translate_failed:
....@@ -3555,8 +3731,10 @@
35553731 err_bad_offset:
35563732 err_bad_parent:
35573733 err_copy_data_failed:
3734
+ binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3735
+ binder_free_txn_fixups(t);
35583736 trace_binder_transaction_failed_buffer_release(t->buffer);
3559
- binder_transaction_buffer_release(target_proc, t->buffer,
3737
+ binder_transaction_buffer_release(target_proc, NULL, t->buffer,
35603738 buffer_offset, true);
35613739 if (target_node)
35623740 binder_dec_node_tmpref(target_node);
....@@ -3613,6 +3791,7 @@
36133791
36143792 BUG_ON(thread->return_error.cmd != BR_OK);
36153793 if (in_reply_to) {
3794
+ trace_android_vh_binder_restore_priority(in_reply_to, current);
36163795 binder_restore_priority(current, in_reply_to->saved_priority);
36173796 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
36183797 binder_enqueue_thread_work(thread, &thread->return_error.work);
....@@ -3621,6 +3800,52 @@
36213800 thread->return_error.cmd = return_error;
36223801 binder_enqueue_thread_work(thread, &thread->return_error.work);
36233802 }
3803
+}
3804
+
3805
+/**
3806
+ * binder_free_buf() - free the specified buffer
3807
+ * @proc: binder proc that owns buffer
3808
+ * @buffer: buffer to be freed
3809
+ * @is_failure: failed to send transaction
3810
+ *
3811
+ * If buffer for an async transaction, enqueue the next async
3812
+ * transaction from the node.
3813
+ *
3814
+ * Cleanup buffer and free it.
3815
+ */
3816
+static void
3817
+binder_free_buf(struct binder_proc *proc,
3818
+ struct binder_thread *thread,
3819
+ struct binder_buffer *buffer, bool is_failure)
3820
+{
3821
+ binder_inner_proc_lock(proc);
3822
+ if (buffer->transaction) {
3823
+ buffer->transaction->buffer = NULL;
3824
+ buffer->transaction = NULL;
3825
+ }
3826
+ binder_inner_proc_unlock(proc);
3827
+ if (buffer->async_transaction && buffer->target_node) {
3828
+ struct binder_node *buf_node;
3829
+ struct binder_work *w;
3830
+
3831
+ buf_node = buffer->target_node;
3832
+ binder_node_inner_lock(buf_node);
3833
+ BUG_ON(!buf_node->has_async_transaction);
3834
+ BUG_ON(buf_node->proc != proc);
3835
+ w = binder_dequeue_work_head_ilocked(
3836
+ &buf_node->async_todo);
3837
+ if (!w) {
3838
+ buf_node->has_async_transaction = false;
3839
+ } else {
3840
+ binder_enqueue_work_ilocked(
3841
+ w, &proc->todo);
3842
+ binder_wakeup_proc_ilocked(proc);
3843
+ }
3844
+ binder_node_inner_unlock(buf_node);
3845
+ }
3846
+ trace_binder_transaction_buffer_release(buffer);
3847
+ binder_release_entire_buffer(proc, thread, buffer, is_failure);
3848
+ binder_alloc_free_buf(&proc->alloc, buffer);
36243849 }
36253850
36263851 static int binder_thread_write(struct binder_proc *proc,
....@@ -3813,35 +4038,7 @@
38134038 proc->pid, thread->pid, (u64)data_ptr,
38144039 buffer->debug_id,
38154040 buffer->transaction ? "active" : "finished");
3816
-
3817
- binder_inner_proc_lock(proc);
3818
- if (buffer->transaction) {
3819
- buffer->transaction->buffer = NULL;
3820
- buffer->transaction = NULL;
3821
- }
3822
- binder_inner_proc_unlock(proc);
3823
- if (buffer->async_transaction && buffer->target_node) {
3824
- struct binder_node *buf_node;
3825
- struct binder_work *w;
3826
-
3827
- buf_node = buffer->target_node;
3828
- binder_node_inner_lock(buf_node);
3829
- BUG_ON(!buf_node->has_async_transaction);
3830
- BUG_ON(buf_node->proc != proc);
3831
- w = binder_dequeue_work_head_ilocked(
3832
- &buf_node->async_todo);
3833
- if (!w) {
3834
- buf_node->has_async_transaction = false;
3835
- } else {
3836
- binder_enqueue_work_ilocked(
3837
- w, &proc->todo);
3838
- binder_wakeup_proc_ilocked(proc);
3839
- }
3840
- binder_node_inner_unlock(buf_node);
3841
- }
3842
- trace_binder_transaction_buffer_release(buffer);
3843
- binder_transaction_buffer_release(proc, buffer, 0, false);
3844
- binder_alloc_free_buf(&proc->alloc, buffer);
4041
+ binder_free_buf(proc, thread, buffer, false);
38454042 break;
38464043 }
38474044
....@@ -3887,6 +4084,7 @@
38874084 }
38884085 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
38894086 binder_inner_proc_unlock(proc);
4087
+ trace_android_vh_binder_looper_state_registered(thread, proc);
38904088 break;
38914089 case BC_ENTER_LOOPER:
38924090 binder_debug(BINDER_DEBUG_THREADS,
....@@ -4148,18 +4346,84 @@
41484346 if (do_proc_work)
41494347 list_add(&thread->waiting_thread_node,
41504348 &proc->waiting_threads);
4349
+ trace_android_vh_binder_wait_for_work(do_proc_work, thread, proc);
41514350 binder_inner_proc_unlock(proc);
41524351 schedule();
41534352 binder_inner_proc_lock(proc);
41544353 list_del_init(&thread->waiting_thread_node);
41554354 if (signal_pending(current)) {
4156
- ret = -ERESTARTSYS;
4355
+ ret = -EINTR;
41574356 break;
41584357 }
41594358 }
41604359 finish_wait(&thread->wait, &wait);
41614360 binder_inner_proc_unlock(proc);
41624361 freezer_count();
4362
+
4363
+ return ret;
4364
+}
4365
+
4366
+/**
4367
+ * binder_apply_fd_fixups() - finish fd translation
4368
+ * @proc: binder_proc associated @t->buffer
4369
+ * @t: binder transaction with list of fd fixups
4370
+ *
4371
+ * Now that we are in the context of the transaction target
4372
+ * process, we can allocate and install fds. Process the
4373
+ * list of fds to translate and fixup the buffer with the
4374
+ * new fds.
4375
+ *
4376
+ * If we fail to allocate an fd, then free the resources by
4377
+ * fput'ing files that have not been processed and ksys_close'ing
4378
+ * any fds that have already been allocated.
4379
+ */
4380
+static int binder_apply_fd_fixups(struct binder_proc *proc,
4381
+ struct binder_transaction *t)
4382
+{
4383
+ struct binder_txn_fd_fixup *fixup, *tmp;
4384
+ int ret = 0;
4385
+
4386
+ list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4387
+ int fd = get_unused_fd_flags(O_CLOEXEC);
4388
+
4389
+ if (fd < 0) {
4390
+ binder_debug(BINDER_DEBUG_TRANSACTION,
4391
+ "failed fd fixup txn %d fd %d\n",
4392
+ t->debug_id, fd);
4393
+ ret = -ENOMEM;
4394
+ break;
4395
+ }
4396
+ binder_debug(BINDER_DEBUG_TRANSACTION,
4397
+ "fd fixup txn %d fd %d\n",
4398
+ t->debug_id, fd);
4399
+ trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4400
+ fd_install(fd, fixup->file);
4401
+ fixup->file = NULL;
4402
+ if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4403
+ fixup->offset, &fd,
4404
+ sizeof(u32))) {
4405
+ ret = -EINVAL;
4406
+ break;
4407
+ }
4408
+ }
4409
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4410
+ if (fixup->file) {
4411
+ fput(fixup->file);
4412
+ } else if (ret) {
4413
+ u32 fd;
4414
+ int err;
4415
+
4416
+ err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4417
+ t->buffer,
4418
+ fixup->offset,
4419
+ sizeof(fd));
4420
+ WARN_ON(err);
4421
+ if (!err)
4422
+ binder_deferred_fd_close(fd);
4423
+ }
4424
+ list_del(&fixup->fixup_entry);
4425
+ kfree(fixup);
4426
+ }
41634427
41644428 return ret;
41654429 }
....@@ -4200,6 +4464,7 @@
42004464 wait_event_interruptible(binder_user_error_wait,
42014465 binder_stop_on_user_error < 2);
42024466 }
4467
+ trace_android_vh_binder_restore_priority(NULL, current);
42034468 binder_restore_priority(current, proc->default_priority);
42044469 }
42054470
....@@ -4244,6 +4509,7 @@
42444509 binder_inner_proc_unlock(proc);
42454510 break;
42464511 }
4512
+ trace_android_vh_binder_thread_read(&list, proc, thread);
42474513 w = binder_dequeue_work_head_ilocked(list);
42484514 if (binder_worklist_empty_ilocked(&thread->todo))
42494515 thread->process_todo = false;
....@@ -4267,9 +4533,14 @@
42674533
42684534 binder_stat_br(proc, thread, cmd);
42694535 } break;
4270
- case BINDER_WORK_TRANSACTION_COMPLETE: {
4536
+ case BINDER_WORK_TRANSACTION_COMPLETE:
4537
+ case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4538
+ if (proc->oneway_spam_detection_enabled &&
4539
+ w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4540
+ cmd = BR_ONEWAY_SPAM_SUSPECT;
4541
+ else
4542
+ cmd = BR_TRANSACTION_COMPLETE;
42714543 binder_inner_proc_unlock(proc);
4272
- cmd = BR_TRANSACTION_COMPLETE;
42734544 kfree(w);
42744545 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
42754546 if (put_user(cmd, (uint32_t __user *)ptr))
....@@ -4409,6 +4680,11 @@
44094680 if (cmd == BR_DEAD_BINDER)
44104681 goto done; /* DEAD_BINDER notifications can cause transactions */
44114682 } break;
4683
+ default:
4684
+ binder_inner_proc_unlock(proc);
4685
+ pr_err("%d:%d: bad work type %d\n",
4686
+ proc->pid, thread->pid, w->type);
4687
+ break;
44124688 }
44134689
44144690 if (!t)
....@@ -4442,10 +4718,39 @@
44424718 trd->sender_pid =
44434719 task_tgid_nr_ns(sender,
44444720 task_active_pid_ns(current));
4721
+ trace_android_vh_sync_txn_recvd(thread->task, t_from->task);
44454722 } else {
44464723 trd->sender_pid = 0;
44474724 }
44484725
4726
+ ret = binder_apply_fd_fixups(proc, t);
4727
+ if (ret) {
4728
+ struct binder_buffer *buffer = t->buffer;
4729
+ bool oneway = !!(t->flags & TF_ONE_WAY);
4730
+ int tid = t->debug_id;
4731
+
4732
+ if (t_from)
4733
+ binder_thread_dec_tmpref(t_from);
4734
+ buffer->transaction = NULL;
4735
+ binder_cleanup_transaction(t, "fd fixups failed",
4736
+ BR_FAILED_REPLY);
4737
+ binder_free_buf(proc, thread, buffer, true);
4738
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4739
+ "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4740
+ proc->pid, thread->pid,
4741
+ oneway ? "async " :
4742
+ (cmd == BR_REPLY ? "reply " : ""),
4743
+ tid, BR_FAILED_REPLY, ret, __LINE__);
4744
+ if (cmd == BR_REPLY) {
4745
+ cmd = BR_FAILED_REPLY;
4746
+ if (put_user(cmd, (uint32_t __user *)ptr))
4747
+ return -EFAULT;
4748
+ ptr += sizeof(uint32_t);
4749
+ binder_stat_br(proc, thread, cmd);
4750
+ break;
4751
+ }
4752
+ continue;
4753
+ }
44494754 trd->data_size = t->buffer->data_size;
44504755 trd->offsets_size = t->buffer->offsets_size;
44514756 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
....@@ -4654,9 +4959,14 @@
46544959 static void binder_free_proc(struct binder_proc *proc)
46554960 {
46564961 struct binder_device *device;
4962
+ struct binder_proc_ext *eproc =
4963
+ container_of(proc, struct binder_proc_ext, proc);
46574964
46584965 BUG_ON(!list_empty(&proc->todo));
46594966 BUG_ON(!list_empty(&proc->delivered_death));
4967
+ if (proc->outstanding_txns)
4968
+ pr_warn("%s: Unexpected outstanding_txns %d\n",
4969
+ __func__, proc->outstanding_txns);
46604970 device = container_of(proc->context, struct binder_device, context);
46614971 if (refcount_dec_and_test(&device->ref)) {
46624972 kfree(proc->context->name);
....@@ -4664,9 +4974,10 @@
46644974 }
46654975 binder_alloc_deferred_release(&proc->alloc);
46664976 put_task_struct(proc->tsk);
4667
- put_cred(proc->cred);
4977
+ put_cred(eproc->cred);
46684978 binder_stats_deleted(BINDER_STAT_PROC);
4669
- kfree(proc);
4979
+ trace_android_vh_binder_free_proc(proc);
4980
+ kfree(eproc);
46704981 }
46714982
46724983 static void binder_free_thread(struct binder_thread *thread)
....@@ -4705,6 +5016,8 @@
47055016 spin_lock(&t->lock);
47065017 if (t->to_thread == thread)
47075018 send_reply = t;
5019
+ } else {
5020
+ __acquire(&t->lock);
47085021 }
47095022 thread->is_dead = true;
47105023
....@@ -4718,6 +5031,7 @@
47185031 (t->to_thread == thread) ? "in" : "out");
47195032
47205033 if (t->to_thread == thread) {
5034
+ thread->proc->outstanding_txns--;
47215035 t->to_proc = NULL;
47225036 t->to_thread = NULL;
47235037 if (t->buffer) {
....@@ -4733,7 +5047,11 @@
47335047 spin_unlock(&last_t->lock);
47345048 if (t)
47355049 spin_lock(&t->lock);
5050
+ else
5051
+ __acquire(&t->lock);
47365052 }
5053
+ /* annotation for sparse, lock not acquired in last iteration above */
5054
+ __release(&t->lock);
47375055
47385056 /*
47395057 * If this thread used poll, make sure we remove the waitqueue from any
....@@ -4757,6 +5075,7 @@
47575075 if (send_reply)
47585076 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
47595077 binder_release_work(proc, &thread->todo);
5078
+ trace_android_vh_binder_thread_release(proc, thread);
47605079 binder_thread_dec_tmpref(thread);
47615080 return active_transactions;
47625081 }
....@@ -4833,6 +5152,7 @@
48335152 if (!binder_worklist_empty_ilocked(&proc->todo))
48345153 binder_wakeup_proc_ilocked(proc);
48355154 binder_inner_proc_unlock(proc);
5155
+ trace_android_vh_binder_read_done(proc, thread);
48365156 if (ret < 0) {
48375157 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
48385158 ret = -EFAULT;
....@@ -4867,7 +5187,7 @@
48675187 ret = -EBUSY;
48685188 goto out;
48695189 }
4870
- ret = security_binder_set_context_mgr(proc->cred);
5190
+ ret = security_binder_set_context_mgr(binder_get_cred(proc));
48715191 if (ret < 0)
48725192 goto out;
48735193 if (uid_valid(context->binder_context_mgr_uid)) {
....@@ -4957,6 +5277,100 @@
49575277 }
49585278 }
49595279 binder_inner_proc_unlock(proc);
5280
+
5281
+ return 0;
5282
+}
5283
+
5284
+static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5285
+{
5286
+ struct rb_node *n;
5287
+ struct binder_thread *thread;
5288
+
5289
+ if (proc->outstanding_txns > 0)
5290
+ return true;
5291
+
5292
+ for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5293
+ thread = rb_entry(n, struct binder_thread, rb_node);
5294
+ if (thread->transaction_stack)
5295
+ return true;
5296
+ }
5297
+ return false;
5298
+}
5299
+
5300
+static int binder_ioctl_freeze(struct binder_freeze_info *info,
5301
+ struct binder_proc *target_proc)
5302
+{
5303
+ int ret = 0;
5304
+
5305
+ if (!info->enable) {
5306
+ binder_inner_proc_lock(target_proc);
5307
+ target_proc->sync_recv = false;
5308
+ target_proc->async_recv = false;
5309
+ target_proc->is_frozen = false;
5310
+ binder_inner_proc_unlock(target_proc);
5311
+ return 0;
5312
+ }
5313
+
5314
+ /*
5315
+ * Freezing the target. Prevent new transactions by
5316
+ * setting frozen state. If timeout specified, wait
5317
+ * for transactions to drain.
5318
+ */
5319
+ binder_inner_proc_lock(target_proc);
5320
+ target_proc->sync_recv = false;
5321
+ target_proc->async_recv = false;
5322
+ target_proc->is_frozen = true;
5323
+ binder_inner_proc_unlock(target_proc);
5324
+
5325
+ if (info->timeout_ms > 0)
5326
+ ret = wait_event_interruptible_timeout(
5327
+ target_proc->freeze_wait,
5328
+ (!target_proc->outstanding_txns),
5329
+ msecs_to_jiffies(info->timeout_ms));
5330
+
5331
+ /* Check pending transactions that wait for reply */
5332
+ if (ret >= 0) {
5333
+ binder_inner_proc_lock(target_proc);
5334
+ if (binder_txns_pending_ilocked(target_proc))
5335
+ ret = -EAGAIN;
5336
+ binder_inner_proc_unlock(target_proc);
5337
+ }
5338
+
5339
+ if (ret < 0) {
5340
+ binder_inner_proc_lock(target_proc);
5341
+ target_proc->is_frozen = false;
5342
+ binder_inner_proc_unlock(target_proc);
5343
+ }
5344
+
5345
+ return ret;
5346
+}
5347
+
5348
+static int binder_ioctl_get_freezer_info(
5349
+ struct binder_frozen_status_info *info)
5350
+{
5351
+ struct binder_proc *target_proc;
5352
+ bool found = false;
5353
+ __u32 txns_pending;
5354
+
5355
+ info->sync_recv = 0;
5356
+ info->async_recv = 0;
5357
+
5358
+ mutex_lock(&binder_procs_lock);
5359
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5360
+ if (target_proc->pid == info->pid) {
5361
+ found = true;
5362
+ binder_inner_proc_lock(target_proc);
5363
+ txns_pending = binder_txns_pending_ilocked(target_proc);
5364
+ info->sync_recv |= target_proc->sync_recv |
5365
+ (txns_pending << 1);
5366
+ info->async_recv |= target_proc->async_recv;
5367
+ binder_inner_proc_unlock(target_proc);
5368
+ }
5369
+ }
5370
+ mutex_unlock(&binder_procs_lock);
5371
+
5372
+ if (!found)
5373
+ return -EINVAL;
49605374
49615375 return 0;
49625376 }
....@@ -5079,6 +5493,96 @@
50795493 }
50805494 break;
50815495 }
5496
+ case BINDER_FREEZE: {
5497
+ struct binder_freeze_info info;
5498
+ struct binder_proc **target_procs = NULL, *target_proc;
5499
+ int target_procs_count = 0, i = 0;
5500
+
5501
+ ret = 0;
5502
+
5503
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
5504
+ ret = -EFAULT;
5505
+ goto err;
5506
+ }
5507
+
5508
+ mutex_lock(&binder_procs_lock);
5509
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5510
+ if (target_proc->pid == info.pid)
5511
+ target_procs_count++;
5512
+ }
5513
+
5514
+ if (target_procs_count == 0) {
5515
+ mutex_unlock(&binder_procs_lock);
5516
+ ret = -EINVAL;
5517
+ goto err;
5518
+ }
5519
+
5520
+ target_procs = kcalloc(target_procs_count,
5521
+ sizeof(struct binder_proc *),
5522
+ GFP_KERNEL);
5523
+
5524
+ if (!target_procs) {
5525
+ mutex_unlock(&binder_procs_lock);
5526
+ ret = -ENOMEM;
5527
+ goto err;
5528
+ }
5529
+
5530
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5531
+ if (target_proc->pid != info.pid)
5532
+ continue;
5533
+
5534
+ binder_inner_proc_lock(target_proc);
5535
+ target_proc->tmp_ref++;
5536
+ binder_inner_proc_unlock(target_proc);
5537
+
5538
+ target_procs[i++] = target_proc;
5539
+ }
5540
+ mutex_unlock(&binder_procs_lock);
5541
+
5542
+ for (i = 0; i < target_procs_count; i++) {
5543
+ if (ret >= 0)
5544
+ ret = binder_ioctl_freeze(&info,
5545
+ target_procs[i]);
5546
+
5547
+ binder_proc_dec_tmpref(target_procs[i]);
5548
+ }
5549
+
5550
+ kfree(target_procs);
5551
+
5552
+ if (ret < 0)
5553
+ goto err;
5554
+ break;
5555
+ }
5556
+ case BINDER_GET_FROZEN_INFO: {
5557
+ struct binder_frozen_status_info info;
5558
+
5559
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
5560
+ ret = -EFAULT;
5561
+ goto err;
5562
+ }
5563
+
5564
+ ret = binder_ioctl_get_freezer_info(&info);
5565
+ if (ret < 0)
5566
+ goto err;
5567
+
5568
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
5569
+ ret = -EFAULT;
5570
+ goto err;
5571
+ }
5572
+ break;
5573
+ }
5574
+ case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5575
+ uint32_t enable;
5576
+
5577
+ if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5578
+ ret = -EFAULT;
5579
+ goto err;
5580
+ }
5581
+ binder_inner_proc_lock(proc);
5582
+ proc->oneway_spam_detection_enabled = (bool)enable;
5583
+ binder_inner_proc_unlock(proc);
5584
+ break;
5585
+ }
50825586 default:
50835587 ret = -EINVAL;
50845588 goto err;
....@@ -5088,7 +5592,7 @@
50885592 if (thread)
50895593 thread->looper_need_return = false;
50905594 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5091
- if (ret && ret != -ERESTARTSYS)
5595
+ if (ret && ret != -EINTR)
50925596 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
50935597 err_unlocked:
50945598 trace_binder_ioctl_done(ret);
....@@ -5116,7 +5620,6 @@
51165620 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
51175621 (unsigned long)pgprot_val(vma->vm_page_prot));
51185622 binder_alloc_vma_close(&proc->alloc);
5119
- binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
51205623 }
51215624
51225625 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
....@@ -5132,15 +5635,10 @@
51325635
51335636 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
51345637 {
5135
- int ret;
51365638 struct binder_proc *proc = filp->private_data;
5137
- const char *failure_string;
51385639
51395640 if (proc->tsk != current->group_leader)
51405641 return -EINVAL;
5141
-
5142
- if ((vma->vm_end - vma->vm_start) > SZ_4M)
5143
- vma->vm_end = vma->vm_start + SZ_4M;
51445642
51455643 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
51465644 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
....@@ -5149,9 +5647,9 @@
51495647 (unsigned long)pgprot_val(vma->vm_page_prot));
51505648
51515649 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5152
- ret = -EPERM;
5153
- failure_string = "bad vm_flags";
5154
- goto err_bad_arg;
5650
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5651
+ proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5652
+ return -EPERM;
51555653 }
51565654 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
51575655 vma->vm_flags &= ~VM_MAYWRITE;
....@@ -5159,40 +5657,32 @@
51595657 vma->vm_ops = &binder_vm_ops;
51605658 vma->vm_private_data = proc;
51615659
5162
- ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5163
- if (ret)
5164
- return ret;
5165
- mutex_lock(&proc->files_lock);
5166
- proc->files = get_files_struct(current);
5167
- mutex_unlock(&proc->files_lock);
5168
- return 0;
5169
-
5170
-err_bad_arg:
5171
- pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5172
- proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5173
- return ret;
5660
+ return binder_alloc_mmap_handler(&proc->alloc, vma);
51745661 }
51755662
51765663 static int binder_open(struct inode *nodp, struct file *filp)
51775664 {
5178
- struct binder_proc *proc;
5665
+ struct binder_proc *proc, *itr;
5666
+ struct binder_proc_ext *eproc;
51795667 struct binder_device *binder_dev;
51805668 struct binderfs_info *info;
51815669 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5670
+ bool existing_pid = false;
51825671
51835672 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
51845673 current->group_leader->pid, current->pid);
51855674
5186
- proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5675
+ eproc = kzalloc(sizeof(*eproc), GFP_KERNEL);
5676
+ proc = &eproc->proc;
51875677 if (proc == NULL)
51885678 return -ENOMEM;
51895679 spin_lock_init(&proc->inner_lock);
51905680 spin_lock_init(&proc->outer_lock);
51915681 get_task_struct(current->group_leader);
51925682 proc->tsk = current->group_leader;
5193
- mutex_init(&proc->files_lock);
5194
- proc->cred = get_cred(filp->f_cred);
5683
+ eproc->cred = get_cred(filp->f_cred);
51955684 INIT_LIST_HEAD(&proc->todo);
5685
+ init_waitqueue_head(&proc->freeze_wait);
51965686 if (binder_supported_policy(current->policy)) {
51975687 proc->default_priority.sched_policy = current->policy;
51985688 proc->default_priority.prio = current->normal_prio;
....@@ -5221,19 +5711,24 @@
52215711 filp->private_data = proc;
52225712
52235713 mutex_lock(&binder_procs_lock);
5714
+ hlist_for_each_entry(itr, &binder_procs, proc_node) {
5715
+ if (itr->pid == proc->pid) {
5716
+ existing_pid = true;
5717
+ break;
5718
+ }
5719
+ }
52245720 hlist_add_head(&proc->proc_node, &binder_procs);
52255721 mutex_unlock(&binder_procs_lock);
5226
-
5227
- if (binder_debugfs_dir_entry_proc) {
5722
+ trace_android_vh_binder_preset(&binder_procs, &binder_procs_lock);
5723
+ if (binder_debugfs_dir_entry_proc && !existing_pid) {
52285724 char strbuf[11];
52295725
52305726 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
52315727 /*
5232
- * proc debug entries are shared between contexts, so
5233
- * this will fail if the process tries to open the driver
5234
- * again with a different context. The priting code will
5235
- * anyway print all contexts that a given PID has, so this
5236
- * is not a problem.
5728
+ * proc debug entries are shared between contexts.
5729
+ * Only create for the first PID to avoid debugfs log spamming
5730
+ * The printing code will anyway print all contexts for a given
5731
+ * PID so this is not a problem.
52375732 */
52385733 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
52395734 binder_debugfs_dir_entry_proc,
....@@ -5241,19 +5736,16 @@
52415736 &proc_fops);
52425737 }
52435738
5244
- if (binder_binderfs_dir_entry_proc) {
5739
+ if (binder_binderfs_dir_entry_proc && !existing_pid) {
52455740 char strbuf[11];
52465741 struct dentry *binderfs_entry;
52475742
52485743 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
52495744 /*
52505745 * Similar to debugfs, the process specific log file is shared
5251
- * between contexts. If the file has already been created for a
5252
- * process, the following binderfs_create_file() call will
5253
- * fail with error code EEXIST if another context of the same
5254
- * process invoked binder_open(). This is ok since same as
5255
- * debugfs, the log file will contain information on all
5256
- * contexts of a given PID.
5746
+ * between contexts. Only create for the first PID.
5747
+ * This is ok since same as debugfs, the log file will contain
5748
+ * information on all contexts of a given PID.
52575749 */
52585750 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
52595751 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
....@@ -5263,10 +5755,8 @@
52635755 int error;
52645756
52655757 error = PTR_ERR(binderfs_entry);
5266
- if (error != -EEXIST) {
5267
- pr_warn("Unable to create file %s in binderfs (error %d)\n",
5268
- strbuf, error);
5269
- }
5758
+ pr_warn("Unable to create file %s in binderfs (error %d)\n",
5759
+ strbuf, error);
52705760 }
52715761 }
52725762
....@@ -5391,8 +5881,6 @@
53915881 struct rb_node *n;
53925882 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
53935883
5394
- BUG_ON(proc->files);
5395
-
53965884 mutex_lock(&binder_procs_lock);
53975885 hlist_del(&proc->proc_node);
53985886 mutex_unlock(&binder_procs_lock);
....@@ -5414,6 +5902,9 @@
54145902 proc->tmp_ref++;
54155903
54165904 proc->is_dead = true;
5905
+ proc->is_frozen = false;
5906
+ proc->sync_recv = false;
5907
+ proc->async_recv = false;
54175908 threads = 0;
54185909 active_transactions = 0;
54195910 while ((n = rb_first(&proc->threads))) {
....@@ -5474,7 +5965,6 @@
54745965 static void binder_deferred_func(struct work_struct *work)
54755966 {
54765967 struct binder_proc *proc;
5477
- struct files_struct *files;
54785968
54795969 int defer;
54805970
....@@ -5492,23 +5982,11 @@
54925982 }
54935983 mutex_unlock(&binder_deferred_lock);
54945984
5495
- files = NULL;
5496
- if (defer & BINDER_DEFERRED_PUT_FILES) {
5497
- mutex_lock(&proc->files_lock);
5498
- files = proc->files;
5499
- if (files)
5500
- proc->files = NULL;
5501
- mutex_unlock(&proc->files_lock);
5502
- }
5503
-
55045985 if (defer & BINDER_DEFERRED_FLUSH)
55055986 binder_deferred_flush(proc);
55065987
55075988 if (defer & BINDER_DEFERRED_RELEASE)
55085989 binder_deferred_release(proc); /* frees proc */
5509
-
5510
- if (files)
5511
- put_files_struct(files);
55125990 } while (proc);
55135991 }
55145992 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
....@@ -5535,6 +6013,7 @@
55356013 struct binder_buffer *buffer = t->buffer;
55366014
55376015 spin_lock(&t->lock);
6016
+ trace_android_vh_binder_print_transaction_info(m, proc, prefix, t);
55386017 to_proc = t->to_proc;
55396018 seq_printf(m,
55406019 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
....@@ -5779,7 +6258,9 @@
57796258 "BR_FINISHED",
57806259 "BR_DEAD_BINDER",
57816260 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5782
- "BR_FAILED_REPLY"
6261
+ "BR_FAILED_REPLY",
6262
+ "BR_FROZEN_REPLY",
6263
+ "BR_ONEWAY_SPAM_SUSPECT",
57836264 };
57846265
57856266 static const char * const binder_command_strings[] = {
....@@ -5920,8 +6401,7 @@
59206401 print_binder_stats(m, " ", &proc->stats);
59216402 }
59226403
5923
-
5924
-int binder_state_show(struct seq_file *m, void *unused)
6404
+static int state_show(struct seq_file *m, void *unused)
59256405 {
59266406 struct binder_proc *proc;
59276407 struct binder_node *node;
....@@ -5960,7 +6440,7 @@
59606440 return 0;
59616441 }
59626442
5963
-int binder_stats_show(struct seq_file *m, void *unused)
6443
+static int stats_show(struct seq_file *m, void *unused)
59646444 {
59656445 struct binder_proc *proc;
59666446
....@@ -5976,7 +6456,7 @@
59766456 return 0;
59776457 }
59786458
5979
-int binder_transactions_show(struct seq_file *m, void *unused)
6459
+static int transactions_show(struct seq_file *m, void *unused)
59806460 {
59816461 struct binder_proc *proc;
59826462
....@@ -6032,7 +6512,7 @@
60326512 "\n" : " (incomplete)\n");
60336513 }
60346514
6035
-int binder_transaction_log_show(struct seq_file *m, void *unused)
6515
+static int transaction_log_show(struct seq_file *m, void *unused)
60366516 {
60376517 struct binder_transaction_log *log = m->private;
60386518 unsigned int log_cur = atomic_read(&log->cur);
....@@ -6057,11 +6537,50 @@
60576537 .owner = THIS_MODULE,
60586538 .poll = binder_poll,
60596539 .unlocked_ioctl = binder_ioctl,
6060
- .compat_ioctl = binder_ioctl,
6540
+ .compat_ioctl = compat_ptr_ioctl,
60616541 .mmap = binder_mmap,
60626542 .open = binder_open,
60636543 .flush = binder_flush,
60646544 .release = binder_release,
6545
+};
6546
+
6547
+DEFINE_SHOW_ATTRIBUTE(state);
6548
+DEFINE_SHOW_ATTRIBUTE(stats);
6549
+DEFINE_SHOW_ATTRIBUTE(transactions);
6550
+DEFINE_SHOW_ATTRIBUTE(transaction_log);
6551
+
6552
+const struct binder_debugfs_entry binder_debugfs_entries[] = {
6553
+ {
6554
+ .name = "state",
6555
+ .mode = 0444,
6556
+ .fops = &state_fops,
6557
+ .data = NULL,
6558
+ },
6559
+ {
6560
+ .name = "stats",
6561
+ .mode = 0444,
6562
+ .fops = &stats_fops,
6563
+ .data = NULL,
6564
+ },
6565
+ {
6566
+ .name = "transactions",
6567
+ .mode = 0444,
6568
+ .fops = &transactions_fops,
6569
+ .data = NULL,
6570
+ },
6571
+ {
6572
+ .name = "transaction_log",
6573
+ .mode = 0444,
6574
+ .fops = &transaction_log_fops,
6575
+ .data = &binder_transaction_log,
6576
+ },
6577
+ {
6578
+ .name = "failed_transaction_log",
6579
+ .mode = 0444,
6580
+ .fops = &transaction_log_fops,
6581
+ .data = &binder_transaction_log_failed,
6582
+ },
6583
+ {} /* terminator */
60656584 };
60666585
60676586 static int __init init_binder_device(const char *name)
....@@ -6109,36 +6628,18 @@
61096628 atomic_set(&binder_transaction_log_failed.cur, ~0U);
61106629
61116630 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6112
- if (binder_debugfs_dir_entry_root)
6631
+ if (binder_debugfs_dir_entry_root) {
6632
+ const struct binder_debugfs_entry *db_entry;
6633
+
6634
+ binder_for_each_debugfs_entry(db_entry)
6635
+ debugfs_create_file(db_entry->name,
6636
+ db_entry->mode,
6637
+ binder_debugfs_dir_entry_root,
6638
+ db_entry->data,
6639
+ db_entry->fops);
6640
+
61136641 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
61146642 binder_debugfs_dir_entry_root);
6115
-
6116
- if (binder_debugfs_dir_entry_root) {
6117
- debugfs_create_file("state",
6118
- 0444,
6119
- binder_debugfs_dir_entry_root,
6120
- NULL,
6121
- &binder_state_fops);
6122
- debugfs_create_file("stats",
6123
- 0444,
6124
- binder_debugfs_dir_entry_root,
6125
- NULL,
6126
- &binder_stats_fops);
6127
- debugfs_create_file("transactions",
6128
- 0444,
6129
- binder_debugfs_dir_entry_root,
6130
- NULL,
6131
- &binder_transactions_fops);
6132
- debugfs_create_file("transaction_log",
6133
- 0444,
6134
- binder_debugfs_dir_entry_root,
6135
- &binder_transaction_log,
6136
- &binder_transaction_log_fops);
6137
- debugfs_create_file("failed_transaction_log",
6138
- 0444,
6139
- binder_debugfs_dir_entry_root,
6140
- &binder_transaction_log_failed,
6141
- &binder_transaction_log_fops);
61426643 }
61436644
61446645 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
....@@ -6178,6 +6679,7 @@
61786679
61796680 err_alloc_device_names_failed:
61806681 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6682
+ binder_alloc_shrinker_exit();
61816683
61826684 return ret;
61836685 }
....@@ -6186,5 +6688,6 @@
61866688
61876689 #define CREATE_TRACE_POINTS
61886690 #include "binder_trace.h"
6691
+EXPORT_TRACEPOINT_SYMBOL_GPL(binder_transaction_received);
61896692
61906693 MODULE_LICENSE("GPL v2");