| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* binder.c |
|---|
| 2 | 3 | * |
|---|
| 3 | 4 | * Android IPC Subsystem |
|---|
| 4 | 5 | * |
|---|
| 5 | 6 | * Copyright (C) 2007-2008 Google, Inc. |
|---|
| 6 | | - * |
|---|
| 7 | | - * This software is licensed under the terms of the GNU General Public |
|---|
| 8 | | - * License version 2, as published by the Free Software Foundation, and |
|---|
| 9 | | - * may be copied, distributed, and modified under those terms. |
|---|
| 10 | | - * |
|---|
| 11 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 14 | | - * GNU General Public License for more details. |
|---|
| 15 | | - * |
|---|
| 16 | 7 | */ |
|---|
| 17 | 8 | |
|---|
| 18 | 9 | /* |
|---|
| .. | .. |
|---|
| 66 | 57 | #include <linux/sched/signal.h> |
|---|
| 67 | 58 | #include <linux/sched/mm.h> |
|---|
| 68 | 59 | #include <linux/seq_file.h> |
|---|
| 60 | +#include <linux/string.h> |
|---|
| 69 | 61 | #include <linux/uaccess.h> |
|---|
| 70 | 62 | #include <linux/pid_namespace.h> |
|---|
| 71 | 63 | #include <linux/security.h> |
|---|
| 72 | 64 | #include <linux/spinlock.h> |
|---|
| 73 | 65 | #include <linux/ratelimit.h> |
|---|
| 66 | +#include <linux/syscalls.h> |
|---|
| 67 | +#include <linux/task_work.h> |
|---|
| 68 | +#include <linux/sizes.h> |
|---|
| 69 | +#include <linux/android_vendor.h> |
|---|
| 74 | 70 | |
|---|
| 75 | | -#include <uapi/linux/android/binder.h> |
|---|
| 76 | 71 | #include <uapi/linux/sched/types.h> |
|---|
| 72 | +#include <uapi/linux/android/binder.h> |
|---|
| 77 | 73 | |
|---|
| 78 | 74 | #include <asm/cacheflush.h> |
|---|
| 79 | 75 | |
|---|
| 80 | | -#include "binder_alloc.h" |
|---|
| 81 | 76 | #include "binder_internal.h" |
|---|
| 82 | 77 | #include "binder_trace.h" |
|---|
| 78 | +#include <trace/hooks/binder.h> |
|---|
| 83 | 79 | |
|---|
| 84 | 80 | static HLIST_HEAD(binder_deferred_list); |
|---|
| 85 | 81 | static DEFINE_MUTEX(binder_deferred_lock); |
|---|
| .. | .. |
|---|
| 97 | 93 | |
|---|
| 98 | 94 | static int proc_show(struct seq_file *m, void *unused); |
|---|
| 99 | 95 | DEFINE_SHOW_ATTRIBUTE(proc); |
|---|
| 100 | | - |
|---|
| 101 | | -/* This is only defined in include/asm-arm/sizes.h */ |
|---|
| 102 | | -#ifndef SZ_1K |
|---|
| 103 | | -#define SZ_1K 0x400 |
|---|
| 104 | | -#endif |
|---|
| 105 | | - |
|---|
| 106 | | -#ifndef SZ_4M |
|---|
| 107 | | -#define SZ_4M 0x400000 |
|---|
| 108 | | -#endif |
|---|
| 109 | 96 | |
|---|
| 110 | 97 | #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) |
|---|
| 111 | 98 | |
|---|
| .. | .. |
|---|
| 174 | 161 | #define to_binder_fd_array_object(hdr) \ |
|---|
| 175 | 162 | container_of(hdr, struct binder_fd_array_object, hdr) |
|---|
| 176 | 163 | |
|---|
| 177 | | -enum binder_stat_types { |
|---|
| 178 | | - BINDER_STAT_PROC, |
|---|
| 179 | | - BINDER_STAT_THREAD, |
|---|
| 180 | | - BINDER_STAT_NODE, |
|---|
| 181 | | - BINDER_STAT_REF, |
|---|
| 182 | | - BINDER_STAT_DEATH, |
|---|
| 183 | | - BINDER_STAT_TRANSACTION, |
|---|
| 184 | | - BINDER_STAT_TRANSACTION_COMPLETE, |
|---|
| 185 | | - BINDER_STAT_COUNT |
|---|
| 186 | | -}; |
|---|
| 187 | | - |
|---|
| 188 | | -struct binder_stats { |
|---|
| 189 | | - atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; |
|---|
| 190 | | - atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; |
|---|
| 191 | | - atomic_t obj_created[BINDER_STAT_COUNT]; |
|---|
| 192 | | - atomic_t obj_deleted[BINDER_STAT_COUNT]; |
|---|
| 193 | | -}; |
|---|
| 194 | | - |
|---|
| 195 | 164 | static struct binder_stats binder_stats; |
|---|
| 196 | 165 | |
|---|
| 197 | 166 | static inline void binder_stats_deleted(enum binder_stat_types type) |
|---|
| .. | .. |
|---|
| 204 | 173 | atomic_inc(&binder_stats.obj_created[type]); |
|---|
| 205 | 174 | } |
|---|
| 206 | 175 | |
|---|
| 207 | | -struct binder_transaction_log binder_transaction_log; |
|---|
| 208 | | -struct binder_transaction_log binder_transaction_log_failed; |
|---|
| 176 | +struct binder_transaction_log_entry { |
|---|
| 177 | + int debug_id; |
|---|
| 178 | + int debug_id_done; |
|---|
| 179 | + int call_type; |
|---|
| 180 | + int from_proc; |
|---|
| 181 | + int from_thread; |
|---|
| 182 | + int target_handle; |
|---|
| 183 | + int to_proc; |
|---|
| 184 | + int to_thread; |
|---|
| 185 | + int to_node; |
|---|
| 186 | + int data_size; |
|---|
| 187 | + int offsets_size; |
|---|
| 188 | + int return_error_line; |
|---|
| 189 | + uint32_t return_error; |
|---|
| 190 | + uint32_t return_error_param; |
|---|
| 191 | + char context_name[BINDERFS_MAX_NAME + 1]; |
|---|
| 192 | +}; |
|---|
| 193 | + |
|---|
| 194 | +struct binder_transaction_log { |
|---|
| 195 | + atomic_t cur; |
|---|
| 196 | + bool full; |
|---|
| 197 | + struct binder_transaction_log_entry entry[32]; |
|---|
| 198 | +}; |
|---|
| 199 | + |
|---|
| 200 | +static struct binder_transaction_log binder_transaction_log; |
|---|
| 201 | +static struct binder_transaction_log binder_transaction_log_failed; |
|---|
| 209 | 202 | |
|---|
| 210 | 203 | static struct binder_transaction_log_entry *binder_transaction_log_add( |
|---|
| 211 | 204 | struct binder_transaction_log *log) |
|---|
| .. | .. |
|---|
| 227 | 220 | return e; |
|---|
| 228 | 221 | } |
|---|
| 229 | 222 | |
|---|
| 230 | | -/** |
|---|
| 231 | | - * struct binder_work - work enqueued on a worklist |
|---|
| 232 | | - * @entry: node enqueued on list |
|---|
| 233 | | - * @type: type of work to be performed |
|---|
| 234 | | - * |
|---|
| 235 | | - * There are separate work lists for proc, thread, and node (async). |
|---|
| 236 | | - */ |
|---|
| 237 | | -struct binder_work { |
|---|
| 238 | | - struct list_head entry; |
|---|
| 239 | | - |
|---|
| 240 | | - enum binder_work_type { |
|---|
| 241 | | - BINDER_WORK_TRANSACTION = 1, |
|---|
| 242 | | - BINDER_WORK_TRANSACTION_COMPLETE, |
|---|
| 243 | | - BINDER_WORK_RETURN_ERROR, |
|---|
| 244 | | - BINDER_WORK_NODE, |
|---|
| 245 | | - BINDER_WORK_DEAD_BINDER, |
|---|
| 246 | | - BINDER_WORK_DEAD_BINDER_AND_CLEAR, |
|---|
| 247 | | - BINDER_WORK_CLEAR_DEATH_NOTIFICATION, |
|---|
| 248 | | - } type; |
|---|
| 249 | | -}; |
|---|
| 250 | | - |
|---|
| 251 | | -struct binder_error { |
|---|
| 252 | | - struct binder_work work; |
|---|
| 253 | | - uint32_t cmd; |
|---|
| 254 | | -}; |
|---|
| 255 | | - |
|---|
| 256 | | -/** |
|---|
| 257 | | - * struct binder_node - binder node bookkeeping |
|---|
| 258 | | - * @debug_id: unique ID for debugging |
|---|
| 259 | | - * (invariant after initialized) |
|---|
| 260 | | - * @lock: lock for node fields |
|---|
| 261 | | - * @work: worklist element for node work |
|---|
| 262 | | - * (protected by @proc->inner_lock) |
|---|
| 263 | | - * @rb_node: element for proc->nodes tree |
|---|
| 264 | | - * (protected by @proc->inner_lock) |
|---|
| 265 | | - * @dead_node: element for binder_dead_nodes list |
|---|
| 266 | | - * (protected by binder_dead_nodes_lock) |
|---|
| 267 | | - * @proc: binder_proc that owns this node |
|---|
| 268 | | - * (invariant after initialized) |
|---|
| 269 | | - * @refs: list of references on this node |
|---|
| 270 | | - * (protected by @lock) |
|---|
| 271 | | - * @internal_strong_refs: used to take strong references when |
|---|
| 272 | | - * initiating a transaction |
|---|
| 273 | | - * (protected by @proc->inner_lock if @proc |
|---|
| 274 | | - * and by @lock) |
|---|
| 275 | | - * @local_weak_refs: weak user refs from local process |
|---|
| 276 | | - * (protected by @proc->inner_lock if @proc |
|---|
| 277 | | - * and by @lock) |
|---|
| 278 | | - * @local_strong_refs: strong user refs from local process |
|---|
| 279 | | - * (protected by @proc->inner_lock if @proc |
|---|
| 280 | | - * and by @lock) |
|---|
| 281 | | - * @tmp_refs: temporary kernel refs |
|---|
| 282 | | - * (protected by @proc->inner_lock while @proc |
|---|
| 283 | | - * is valid, and by binder_dead_nodes_lock |
|---|
| 284 | | - * if @proc is NULL. During inc/dec and node release |
|---|
| 285 | | - * it is also protected by @lock to provide safety |
|---|
| 286 | | - * as the node dies and @proc becomes NULL) |
|---|
| 287 | | - * @ptr: userspace pointer for node |
|---|
| 288 | | - * (invariant, no lock needed) |
|---|
| 289 | | - * @cookie: userspace cookie for node |
|---|
| 290 | | - * (invariant, no lock needed) |
|---|
| 291 | | - * @has_strong_ref: userspace notified of strong ref |
|---|
| 292 | | - * (protected by @proc->inner_lock if @proc |
|---|
| 293 | | - * and by @lock) |
|---|
| 294 | | - * @pending_strong_ref: userspace has acked notification of strong ref |
|---|
| 295 | | - * (protected by @proc->inner_lock if @proc |
|---|
| 296 | | - * and by @lock) |
|---|
| 297 | | - * @has_weak_ref: userspace notified of weak ref |
|---|
| 298 | | - * (protected by @proc->inner_lock if @proc |
|---|
| 299 | | - * and by @lock) |
|---|
| 300 | | - * @pending_weak_ref: userspace has acked notification of weak ref |
|---|
| 301 | | - * (protected by @proc->inner_lock if @proc |
|---|
| 302 | | - * and by @lock) |
|---|
| 303 | | - * @has_async_transaction: async transaction to node in progress |
|---|
| 304 | | - * (protected by @lock) |
|---|
| 305 | | - * @sched_policy: minimum scheduling policy for node |
|---|
| 306 | | - * (invariant after initialized) |
|---|
| 307 | | - * @accept_fds: file descriptor operations supported for node |
|---|
| 308 | | - * (invariant after initialized) |
|---|
| 309 | | - * @min_priority: minimum scheduling priority |
|---|
| 310 | | - * (invariant after initialized) |
|---|
| 311 | | - * @inherit_rt: inherit RT scheduling policy from caller |
|---|
| 312 | | - * @txn_security_ctx: require sender's security context |
|---|
| 313 | | - * (invariant after initialized) |
|---|
| 314 | | - * @async_todo: list of async work items |
|---|
| 315 | | - * (protected by @proc->inner_lock) |
|---|
| 316 | | - * |
|---|
| 317 | | - * Bookkeeping structure for binder nodes. |
|---|
| 318 | | - */ |
|---|
| 319 | | -struct binder_node { |
|---|
| 320 | | - int debug_id; |
|---|
| 321 | | - spinlock_t lock; |
|---|
| 322 | | - struct binder_work work; |
|---|
| 323 | | - union { |
|---|
| 324 | | - struct rb_node rb_node; |
|---|
| 325 | | - struct hlist_node dead_node; |
|---|
| 326 | | - }; |
|---|
| 327 | | - struct binder_proc *proc; |
|---|
| 328 | | - struct hlist_head refs; |
|---|
| 329 | | - int internal_strong_refs; |
|---|
| 330 | | - int local_weak_refs; |
|---|
| 331 | | - int local_strong_refs; |
|---|
| 332 | | - int tmp_refs; |
|---|
| 333 | | - binder_uintptr_t ptr; |
|---|
| 334 | | - binder_uintptr_t cookie; |
|---|
| 335 | | - struct { |
|---|
| 336 | | - /* |
|---|
| 337 | | - * bitfield elements protected by |
|---|
| 338 | | - * proc inner_lock |
|---|
| 339 | | - */ |
|---|
| 340 | | - u8 has_strong_ref:1; |
|---|
| 341 | | - u8 pending_strong_ref:1; |
|---|
| 342 | | - u8 has_weak_ref:1; |
|---|
| 343 | | - u8 pending_weak_ref:1; |
|---|
| 344 | | - }; |
|---|
| 345 | | - struct { |
|---|
| 346 | | - /* |
|---|
| 347 | | - * invariant after initialization |
|---|
| 348 | | - */ |
|---|
| 349 | | - u8 sched_policy:2; |
|---|
| 350 | | - u8 inherit_rt:1; |
|---|
| 351 | | - u8 accept_fds:1; |
|---|
| 352 | | - u8 txn_security_ctx:1; |
|---|
| 353 | | - u8 min_priority; |
|---|
| 354 | | - }; |
|---|
| 355 | | - bool has_async_transaction; |
|---|
| 356 | | - struct list_head async_todo; |
|---|
| 357 | | -}; |
|---|
| 358 | | - |
|---|
| 359 | | -struct binder_ref_death { |
|---|
| 360 | | - /** |
|---|
| 361 | | - * @work: worklist element for death notifications |
|---|
| 362 | | - * (protected by inner_lock of the proc that |
|---|
| 363 | | - * this ref belongs to) |
|---|
| 364 | | - */ |
|---|
| 365 | | - struct binder_work work; |
|---|
| 366 | | - binder_uintptr_t cookie; |
|---|
| 367 | | -}; |
|---|
| 368 | | - |
|---|
| 369 | | -/** |
|---|
| 370 | | - * struct binder_ref_data - binder_ref counts and id |
|---|
| 371 | | - * @debug_id: unique ID for the ref |
|---|
| 372 | | - * @desc: unique userspace handle for ref |
|---|
| 373 | | - * @strong: strong ref count (debugging only if not locked) |
|---|
| 374 | | - * @weak: weak ref count (debugging only if not locked) |
|---|
| 375 | | - * |
|---|
| 376 | | - * Structure to hold ref count and ref id information. Since |
|---|
| 377 | | - * the actual ref can only be accessed with a lock, this structure |
|---|
| 378 | | - * is used to return information about the ref to callers of |
|---|
| 379 | | - * ref inc/dec functions. |
|---|
| 380 | | - */ |
|---|
| 381 | | -struct binder_ref_data { |
|---|
| 382 | | - int debug_id; |
|---|
| 383 | | - uint32_t desc; |
|---|
| 384 | | - int strong; |
|---|
| 385 | | - int weak; |
|---|
| 386 | | -}; |
|---|
| 387 | | - |
|---|
| 388 | | -/** |
|---|
| 389 | | - * struct binder_ref - struct to track references on nodes |
|---|
| 390 | | - * @data: binder_ref_data containing id, handle, and current refcounts |
|---|
| 391 | | - * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree |
|---|
| 392 | | - * @rb_node_node: node for lookup by @node in proc's rb_tree |
|---|
| 393 | | - * @node_entry: list entry for node->refs list in target node |
|---|
| 394 | | - * (protected by @node->lock) |
|---|
| 395 | | - * @proc: binder_proc containing ref |
|---|
| 396 | | - * @node: binder_node of target node. When cleaning up a |
|---|
| 397 | | - * ref for deletion in binder_cleanup_ref, a non-NULL |
|---|
| 398 | | - * @node indicates the node must be freed |
|---|
| 399 | | - * @death: pointer to death notification (ref_death) if requested |
|---|
| 400 | | - * (protected by @node->lock) |
|---|
| 401 | | - * |
|---|
| 402 | | - * Structure to track references from procA to target node (on procB). This |
|---|
| 403 | | - * structure is unsafe to access without holding @proc->outer_lock. |
|---|
| 404 | | - */ |
|---|
| 405 | | -struct binder_ref { |
|---|
| 406 | | - /* Lookups needed: */ |
|---|
| 407 | | - /* node + proc => ref (transaction) */ |
|---|
| 408 | | - /* desc + proc => ref (transaction, inc/dec ref) */ |
|---|
| 409 | | - /* node => refs + procs (proc exit) */ |
|---|
| 410 | | - struct binder_ref_data data; |
|---|
| 411 | | - struct rb_node rb_node_desc; |
|---|
| 412 | | - struct rb_node rb_node_node; |
|---|
| 413 | | - struct hlist_node node_entry; |
|---|
| 414 | | - struct binder_proc *proc; |
|---|
| 415 | | - struct binder_node *node; |
|---|
| 416 | | - struct binder_ref_death *death; |
|---|
| 417 | | -}; |
|---|
| 418 | | - |
|---|
| 419 | 223 | enum binder_deferred_state { |
|---|
| 420 | | - BINDER_DEFERRED_PUT_FILES = 0x01, |
|---|
| 421 | | - BINDER_DEFERRED_FLUSH = 0x02, |
|---|
| 422 | | - BINDER_DEFERRED_RELEASE = 0x04, |
|---|
| 423 | | -}; |
|---|
| 424 | | - |
|---|
| 425 | | -/** |
|---|
| 426 | | - * struct binder_priority - scheduler policy and priority |
|---|
| 427 | | - * @sched_policy scheduler policy |
|---|
| 428 | | - * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT |
|---|
| 429 | | - * |
|---|
| 430 | | - * The binder driver supports inheriting the following scheduler policies: |
|---|
| 431 | | - * SCHED_NORMAL |
|---|
| 432 | | - * SCHED_BATCH |
|---|
| 433 | | - * SCHED_FIFO |
|---|
| 434 | | - * SCHED_RR |
|---|
| 435 | | - */ |
|---|
| 436 | | -struct binder_priority { |
|---|
| 437 | | - unsigned int sched_policy; |
|---|
| 438 | | - int prio; |
|---|
| 439 | | -}; |
|---|
| 440 | | - |
|---|
| 441 | | -/** |
|---|
| 442 | | - * struct binder_proc - binder process bookkeeping |
|---|
| 443 | | - * @proc_node: element for binder_procs list |
|---|
| 444 | | - * @threads: rbtree of binder_threads in this proc |
|---|
| 445 | | - * (protected by @inner_lock) |
|---|
| 446 | | - * @nodes: rbtree of binder nodes associated with |
|---|
| 447 | | - * this proc ordered by node->ptr |
|---|
| 448 | | - * (protected by @inner_lock) |
|---|
| 449 | | - * @refs_by_desc: rbtree of refs ordered by ref->desc |
|---|
| 450 | | - * (protected by @outer_lock) |
|---|
| 451 | | - * @refs_by_node: rbtree of refs ordered by ref->node |
|---|
| 452 | | - * (protected by @outer_lock) |
|---|
| 453 | | - * @waiting_threads: threads currently waiting for proc work |
|---|
| 454 | | - * (protected by @inner_lock) |
|---|
| 455 | | - * @pid PID of group_leader of process |
|---|
| 456 | | - * (invariant after initialized) |
|---|
| 457 | | - * @tsk task_struct for group_leader of process |
|---|
| 458 | | - * (invariant after initialized) |
|---|
| 459 | | - * @files files_struct for process |
|---|
| 460 | | - * (protected by @files_lock) |
|---|
| 461 | | - * @files_lock mutex to protect @files |
|---|
| 462 | | - * @cred struct cred associated with the `struct file` |
|---|
| 463 | | - * in binder_open() |
|---|
| 464 | | - * (invariant after initialized) |
|---|
| 465 | | - * @deferred_work_node: element for binder_deferred_list |
|---|
| 466 | | - * (protected by binder_deferred_lock) |
|---|
| 467 | | - * @deferred_work: bitmap of deferred work to perform |
|---|
| 468 | | - * (protected by binder_deferred_lock) |
|---|
| 469 | | - * @is_dead: process is dead and awaiting free |
|---|
| 470 | | - * when outstanding transactions are cleaned up |
|---|
| 471 | | - * (protected by @inner_lock) |
|---|
| 472 | | - * @todo: list of work for this process |
|---|
| 473 | | - * (protected by @inner_lock) |
|---|
| 474 | | - * @stats: per-process binder statistics |
|---|
| 475 | | - * (atomics, no lock needed) |
|---|
| 476 | | - * @delivered_death: list of delivered death notification |
|---|
| 477 | | - * (protected by @inner_lock) |
|---|
| 478 | | - * @max_threads: cap on number of binder threads |
|---|
| 479 | | - * (protected by @inner_lock) |
|---|
| 480 | | - * @requested_threads: number of binder threads requested but not |
|---|
| 481 | | - * yet started. In current implementation, can |
|---|
| 482 | | - * only be 0 or 1. |
|---|
| 483 | | - * (protected by @inner_lock) |
|---|
| 484 | | - * @requested_threads_started: number binder threads started |
|---|
| 485 | | - * (protected by @inner_lock) |
|---|
| 486 | | - * @tmp_ref: temporary reference to indicate proc is in use |
|---|
| 487 | | - * (protected by @inner_lock) |
|---|
| 488 | | - * @default_priority: default scheduler priority |
|---|
| 489 | | - * (invariant after initialized) |
|---|
| 490 | | - * @debugfs_entry: debugfs node |
|---|
| 491 | | - * @alloc: binder allocator bookkeeping |
|---|
| 492 | | - * @context: binder_context for this proc |
|---|
| 493 | | - * (invariant after initialized) |
|---|
| 494 | | - * @inner_lock: can nest under outer_lock and/or node lock |
|---|
| 495 | | - * @outer_lock: no nesting under innor or node lock |
|---|
| 496 | | - * Lock order: 1) outer, 2) node, 3) inner |
|---|
| 497 | | - * @binderfs_entry: process-specific binderfs log file |
|---|
| 498 | | - * |
|---|
| 499 | | - * Bookkeeping structure for binder processes |
|---|
| 500 | | - */ |
|---|
| 501 | | -struct binder_proc { |
|---|
| 502 | | - struct hlist_node proc_node; |
|---|
| 503 | | - struct rb_root threads; |
|---|
| 504 | | - struct rb_root nodes; |
|---|
| 505 | | - struct rb_root refs_by_desc; |
|---|
| 506 | | - struct rb_root refs_by_node; |
|---|
| 507 | | - struct list_head waiting_threads; |
|---|
| 508 | | - int pid; |
|---|
| 509 | | - struct task_struct *tsk; |
|---|
| 510 | | - struct files_struct *files; |
|---|
| 511 | | - struct mutex files_lock; |
|---|
| 512 | | - const struct cred *cred; |
|---|
| 513 | | - struct hlist_node deferred_work_node; |
|---|
| 514 | | - int deferred_work; |
|---|
| 515 | | - bool is_dead; |
|---|
| 516 | | - |
|---|
| 517 | | - struct list_head todo; |
|---|
| 518 | | - struct binder_stats stats; |
|---|
| 519 | | - struct list_head delivered_death; |
|---|
| 520 | | - int max_threads; |
|---|
| 521 | | - int requested_threads; |
|---|
| 522 | | - int requested_threads_started; |
|---|
| 523 | | - int tmp_ref; |
|---|
| 524 | | - struct binder_priority default_priority; |
|---|
| 525 | | - struct dentry *debugfs_entry; |
|---|
| 526 | | - struct binder_alloc alloc; |
|---|
| 527 | | - struct binder_context *context; |
|---|
| 528 | | - spinlock_t inner_lock; |
|---|
| 529 | | - spinlock_t outer_lock; |
|---|
| 530 | | - struct dentry *binderfs_entry; |
|---|
| 224 | + BINDER_DEFERRED_FLUSH = 0x01, |
|---|
| 225 | + BINDER_DEFERRED_RELEASE = 0x02, |
|---|
| 531 | 226 | }; |
|---|
| 532 | 227 | |
|---|
| 533 | 228 | enum { |
|---|
| .. | .. |
|---|
| 540 | 235 | }; |
|---|
| 541 | 236 | |
|---|
| 542 | 237 | /** |
|---|
| 543 | | - * struct binder_thread - binder thread bookkeeping |
|---|
| 544 | | - * @proc: binder process for this thread |
|---|
| 545 | | - * (invariant after initialization) |
|---|
| 546 | | - * @rb_node: element for proc->threads rbtree |
|---|
| 547 | | - * (protected by @proc->inner_lock) |
|---|
| 548 | | - * @waiting_thread_node: element for @proc->waiting_threads list |
|---|
| 549 | | - * (protected by @proc->inner_lock) |
|---|
| 550 | | - * @pid: PID for this thread |
|---|
| 551 | | - * (invariant after initialization) |
|---|
| 552 | | - * @looper: bitmap of looping state |
|---|
| 553 | | - * (only accessed by this thread) |
|---|
| 554 | | - * @looper_needs_return: looping thread needs to exit driver |
|---|
| 555 | | - * (no lock needed) |
|---|
| 556 | | - * @transaction_stack: stack of in-progress transactions for this thread |
|---|
| 557 | | - * (protected by @proc->inner_lock) |
|---|
| 558 | | - * @todo: list of work to do for this thread |
|---|
| 559 | | - * (protected by @proc->inner_lock) |
|---|
| 560 | | - * @process_todo: whether work in @todo should be processed |
|---|
| 561 | | - * (protected by @proc->inner_lock) |
|---|
| 562 | | - * @return_error: transaction errors reported by this thread |
|---|
| 563 | | - * (only accessed by this thread) |
|---|
| 564 | | - * @reply_error: transaction errors reported by target thread |
|---|
| 565 | | - * (protected by @proc->inner_lock) |
|---|
| 566 | | - * @wait: wait queue for thread work |
|---|
| 567 | | - * @stats: per-thread statistics |
|---|
| 568 | | - * (atomics, no lock needed) |
|---|
| 569 | | - * @tmp_ref: temporary reference to indicate thread is in use |
|---|
| 570 | | - * (atomic since @proc->inner_lock cannot |
|---|
| 571 | | - * always be acquired) |
|---|
| 572 | | - * @is_dead: thread is dead and awaiting free |
|---|
| 573 | | - * when outstanding transactions are cleaned up |
|---|
| 574 | | - * (protected by @proc->inner_lock) |
|---|
| 575 | | - * @task: struct task_struct for this thread |
|---|
| 576 | | - * |
|---|
| 577 | | - * Bookkeeping structure for binder threads. |
|---|
| 578 | | - */ |
|---|
| 579 | | -struct binder_thread { |
|---|
| 580 | | - struct binder_proc *proc; |
|---|
| 581 | | - struct rb_node rb_node; |
|---|
| 582 | | - struct list_head waiting_thread_node; |
|---|
| 583 | | - int pid; |
|---|
| 584 | | - int looper; /* only modified by this thread */ |
|---|
| 585 | | - bool looper_need_return; /* can be written by other thread */ |
|---|
| 586 | | - struct binder_transaction *transaction_stack; |
|---|
| 587 | | - struct list_head todo; |
|---|
| 588 | | - bool process_todo; |
|---|
| 589 | | - struct binder_error return_error; |
|---|
| 590 | | - struct binder_error reply_error; |
|---|
| 591 | | - wait_queue_head_t wait; |
|---|
| 592 | | - struct binder_stats stats; |
|---|
| 593 | | - atomic_t tmp_ref; |
|---|
| 594 | | - bool is_dead; |
|---|
| 595 | | - struct task_struct *task; |
|---|
| 596 | | -}; |
|---|
| 597 | | - |
|---|
| 598 | | -struct binder_transaction { |
|---|
| 599 | | - int debug_id; |
|---|
| 600 | | - struct binder_work work; |
|---|
| 601 | | - struct binder_thread *from; |
|---|
| 602 | | - struct binder_transaction *from_parent; |
|---|
| 603 | | - struct binder_proc *to_proc; |
|---|
| 604 | | - struct binder_thread *to_thread; |
|---|
| 605 | | - struct binder_transaction *to_parent; |
|---|
| 606 | | - unsigned need_reply:1; |
|---|
| 607 | | - /* unsigned is_dead:1; */ /* not used at the moment */ |
|---|
| 608 | | - |
|---|
| 609 | | - struct binder_buffer *buffer; |
|---|
| 610 | | - unsigned int code; |
|---|
| 611 | | - unsigned int flags; |
|---|
| 612 | | - struct binder_priority priority; |
|---|
| 613 | | - struct binder_priority saved_priority; |
|---|
| 614 | | - bool set_priority_called; |
|---|
| 615 | | - kuid_t sender_euid; |
|---|
| 616 | | - binder_uintptr_t security_ctx; |
|---|
| 617 | | - /** |
|---|
| 618 | | - * @lock: protects @from, @to_proc, and @to_thread |
|---|
| 619 | | - * |
|---|
| 620 | | - * @from, @to_proc, and @to_thread can be set to NULL |
|---|
| 621 | | - * during thread teardown |
|---|
| 622 | | - */ |
|---|
| 623 | | - spinlock_t lock; |
|---|
| 624 | | -}; |
|---|
| 625 | | - |
|---|
| 626 | | -/** |
|---|
| 627 | | - * struct binder_object - union of flat binder object types |
|---|
| 628 | | - * @hdr: generic object header |
|---|
| 629 | | - * @fbo: binder object (nodes and refs) |
|---|
| 630 | | - * @fdo: file descriptor object |
|---|
| 631 | | - * @bbo: binder buffer pointer |
|---|
| 632 | | - * @fdao: file descriptor array |
|---|
| 633 | | - * |
|---|
| 634 | | - * Used for type-independent object copies |
|---|
| 635 | | - */ |
|---|
| 636 | | -struct binder_object { |
|---|
| 637 | | - union { |
|---|
| 638 | | - struct binder_object_header hdr; |
|---|
| 639 | | - struct flat_binder_object fbo; |
|---|
| 640 | | - struct binder_fd_object fdo; |
|---|
| 641 | | - struct binder_buffer_object bbo; |
|---|
| 642 | | - struct binder_fd_array_object fdao; |
|---|
| 643 | | - }; |
|---|
| 644 | | -}; |
|---|
| 645 | | - |
|---|
| 646 | | -/** |
|---|
| 647 | 238 | * binder_proc_lock() - Acquire outer lock for given binder_proc |
|---|
| 648 | 239 | * @proc: struct binder_proc to acquire |
|---|
| 649 | 240 | * |
|---|
| .. | .. |
|---|
| 653 | 244 | #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) |
|---|
| 654 | 245 | static void |
|---|
| 655 | 246 | _binder_proc_lock(struct binder_proc *proc, int line) |
|---|
| 247 | + __acquires(&proc->outer_lock) |
|---|
| 656 | 248 | { |
|---|
| 657 | 249 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
|---|
| 658 | 250 | "%s: line=%d\n", __func__, line); |
|---|
| .. | .. |
|---|
| 668 | 260 | #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) |
|---|
| 669 | 261 | static void |
|---|
| 670 | 262 | _binder_proc_unlock(struct binder_proc *proc, int line) |
|---|
| 263 | + __releases(&proc->outer_lock) |
|---|
| 671 | 264 | { |
|---|
| 672 | 265 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
|---|
| 673 | 266 | "%s: line=%d\n", __func__, line); |
|---|
| .. | .. |
|---|
| 683 | 276 | #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) |
|---|
| 684 | 277 | static void |
|---|
| 685 | 278 | _binder_inner_proc_lock(struct binder_proc *proc, int line) |
|---|
| 279 | + __acquires(&proc->inner_lock) |
|---|
| 686 | 280 | { |
|---|
| 687 | 281 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
|---|
| 688 | 282 | "%s: line=%d\n", __func__, line); |
|---|
| .. | .. |
|---|
| 698 | 292 | #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) |
|---|
| 699 | 293 | static void |
|---|
| 700 | 294 | _binder_inner_proc_unlock(struct binder_proc *proc, int line) |
|---|
| 295 | + __releases(&proc->inner_lock) |
|---|
| 701 | 296 | { |
|---|
| 702 | 297 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
|---|
| 703 | 298 | "%s: line=%d\n", __func__, line); |
|---|
| .. | .. |
|---|
| 713 | 308 | #define binder_node_lock(node) _binder_node_lock(node, __LINE__) |
|---|
| 714 | 309 | static void |
|---|
| 715 | 310 | _binder_node_lock(struct binder_node *node, int line) |
|---|
| 311 | + __acquires(&node->lock) |
|---|
| 716 | 312 | { |
|---|
| 717 | 313 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
|---|
| 718 | 314 | "%s: line=%d\n", __func__, line); |
|---|
| .. | .. |
|---|
| 728 | 324 | #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) |
|---|
| 729 | 325 | static void |
|---|
| 730 | 326 | _binder_node_unlock(struct binder_node *node, int line) |
|---|
| 327 | + __releases(&node->lock) |
|---|
| 731 | 328 | { |
|---|
| 732 | 329 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
|---|
| 733 | 330 | "%s: line=%d\n", __func__, line); |
|---|
| .. | .. |
|---|
| 744 | 341 | #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) |
|---|
| 745 | 342 | static void |
|---|
| 746 | 343 | _binder_node_inner_lock(struct binder_node *node, int line) |
|---|
| 344 | + __acquires(&node->lock) __acquires(&node->proc->inner_lock) |
|---|
| 747 | 345 | { |
|---|
| 748 | 346 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
|---|
| 749 | 347 | "%s: line=%d\n", __func__, line); |
|---|
| 750 | 348 | spin_lock(&node->lock); |
|---|
| 751 | 349 | if (node->proc) |
|---|
| 752 | 350 | binder_inner_proc_lock(node->proc); |
|---|
| 351 | + else |
|---|
| 352 | + /* annotation for sparse */ |
|---|
| 353 | + __acquire(&node->proc->inner_lock); |
|---|
| 753 | 354 | } |
|---|
| 754 | 355 | |
|---|
| 755 | 356 | /** |
|---|
| .. | .. |
|---|
| 761 | 362 | #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) |
|---|
| 762 | 363 | static void |
|---|
| 763 | 364 | _binder_node_inner_unlock(struct binder_node *node, int line) |
|---|
| 365 | + __releases(&node->lock) __releases(&node->proc->inner_lock) |
|---|
| 764 | 366 | { |
|---|
| 765 | 367 | struct binder_proc *proc = node->proc; |
|---|
| 766 | 368 | |
|---|
| .. | .. |
|---|
| 768 | 370 | "%s: line=%d\n", __func__, line); |
|---|
| 769 | 371 | if (proc) |
|---|
| 770 | 372 | binder_inner_proc_unlock(proc); |
|---|
| 373 | + else |
|---|
| 374 | + /* annotation for sparse */ |
|---|
| 375 | + __release(&node->proc->inner_lock); |
|---|
| 771 | 376 | spin_unlock(&node->lock); |
|---|
| 772 | 377 | } |
|---|
| 773 | 378 | |
|---|
| .. | .. |
|---|
| 907 | 512 | static void binder_free_proc(struct binder_proc *proc); |
|---|
| 908 | 513 | static void binder_inc_node_tmpref_ilocked(struct binder_node *node); |
|---|
| 909 | 514 | |
|---|
| 910 | | -static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) |
|---|
| 911 | | -{ |
|---|
| 912 | | - unsigned long rlim_cur; |
|---|
| 913 | | - unsigned long irqs; |
|---|
| 914 | | - int ret; |
|---|
| 915 | | - |
|---|
| 916 | | - mutex_lock(&proc->files_lock); |
|---|
| 917 | | - if (proc->files == NULL) { |
|---|
| 918 | | - ret = -ESRCH; |
|---|
| 919 | | - goto err; |
|---|
| 920 | | - } |
|---|
| 921 | | - if (!lock_task_sighand(proc->tsk, &irqs)) { |
|---|
| 922 | | - ret = -EMFILE; |
|---|
| 923 | | - goto err; |
|---|
| 924 | | - } |
|---|
| 925 | | - rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); |
|---|
| 926 | | - unlock_task_sighand(proc->tsk, &irqs); |
|---|
| 927 | | - |
|---|
| 928 | | - ret = __alloc_fd(proc->files, 0, rlim_cur, flags); |
|---|
| 929 | | -err: |
|---|
| 930 | | - mutex_unlock(&proc->files_lock); |
|---|
| 931 | | - return ret; |
|---|
| 932 | | -} |
|---|
| 933 | | - |
|---|
| 934 | | -/* |
|---|
| 935 | | - * copied from fd_install |
|---|
| 936 | | - */ |
|---|
| 937 | | -static void task_fd_install( |
|---|
| 938 | | - struct binder_proc *proc, unsigned int fd, struct file *file) |
|---|
| 939 | | -{ |
|---|
| 940 | | - mutex_lock(&proc->files_lock); |
|---|
| 941 | | - if (proc->files) |
|---|
| 942 | | - __fd_install(proc->files, fd, file); |
|---|
| 943 | | - mutex_unlock(&proc->files_lock); |
|---|
| 944 | | -} |
|---|
| 945 | | - |
|---|
| 946 | | -/* |
|---|
| 947 | | - * copied from sys_close |
|---|
| 948 | | - */ |
|---|
| 949 | | -static long task_close_fd(struct binder_proc *proc, unsigned int fd) |
|---|
| 950 | | -{ |
|---|
| 951 | | - int retval; |
|---|
| 952 | | - |
|---|
| 953 | | - mutex_lock(&proc->files_lock); |
|---|
| 954 | | - if (proc->files == NULL) { |
|---|
| 955 | | - retval = -ESRCH; |
|---|
| 956 | | - goto err; |
|---|
| 957 | | - } |
|---|
| 958 | | - retval = __close_fd(proc->files, fd); |
|---|
| 959 | | - /* can't restart close syscall because file table entry was cleared */ |
|---|
| 960 | | - if (unlikely(retval == -ERESTARTSYS || |
|---|
| 961 | | - retval == -ERESTARTNOINTR || |
|---|
| 962 | | - retval == -ERESTARTNOHAND || |
|---|
| 963 | | - retval == -ERESTART_RESTARTBLOCK)) |
|---|
| 964 | | - retval = -EINTR; |
|---|
| 965 | | -err: |
|---|
| 966 | | - mutex_unlock(&proc->files_lock); |
|---|
| 967 | | - return retval; |
|---|
| 968 | | -} |
|---|
| 969 | | - |
|---|
| 970 | 515 | static bool binder_has_work_ilocked(struct binder_thread *thread, |
|---|
| 971 | 516 | bool do_proc_work) |
|---|
| 972 | 517 | { |
|---|
| 518 | + int ret = 0; |
|---|
| 519 | + |
|---|
| 520 | + trace_android_vh_binder_has_work_ilocked(thread, do_proc_work, &ret); |
|---|
| 521 | + if (ret) |
|---|
| 522 | + return true; |
|---|
| 973 | 523 | return thread->process_todo || |
|---|
| 974 | 524 | thread->looper_need_return || |
|---|
| 975 | 525 | (do_proc_work && |
|---|
| .. | .. |
|---|
| 1005 | 555 | thread = rb_entry(n, struct binder_thread, rb_node); |
|---|
| 1006 | 556 | if (thread->looper & BINDER_LOOPER_STATE_POLL && |
|---|
| 1007 | 557 | binder_available_for_proc_work_ilocked(thread)) { |
|---|
| 558 | + trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc); |
|---|
| 1008 | 559 | if (sync) |
|---|
| 1009 | 560 | wake_up_interruptible_sync(&thread->wait); |
|---|
| 1010 | 561 | else |
|---|
| .. | .. |
|---|
| 1064 | 615 | assert_spin_locked(&proc->inner_lock); |
|---|
| 1065 | 616 | |
|---|
| 1066 | 617 | if (thread) { |
|---|
| 618 | + trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc); |
|---|
| 1067 | 619 | if (sync) |
|---|
| 1068 | 620 | wake_up_interruptible_sync(&thread->wait); |
|---|
| 1069 | 621 | else |
|---|
| .. | .. |
|---|
| 1206 | 758 | bool inherit_rt) |
|---|
| 1207 | 759 | { |
|---|
| 1208 | 760 | struct binder_priority desired_prio = t->priority; |
|---|
| 761 | + bool skip = false; |
|---|
| 1209 | 762 | |
|---|
| 1210 | 763 | if (t->set_priority_called) |
|---|
| 1211 | 764 | return; |
|---|
| .. | .. |
|---|
| 1213 | 766 | t->set_priority_called = true; |
|---|
| 1214 | 767 | t->saved_priority.sched_policy = task->policy; |
|---|
| 1215 | 768 | t->saved_priority.prio = task->normal_prio; |
|---|
| 769 | + |
|---|
| 770 | + trace_android_vh_binder_priority_skip(task, &skip); |
|---|
| 771 | + if (skip) |
|---|
| 772 | + return; |
|---|
| 1216 | 773 | |
|---|
| 1217 | 774 | if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) { |
|---|
| 1218 | 775 | desired_prio.prio = NICE_TO_PRIO(0); |
|---|
| .. | .. |
|---|
| 1233 | 790 | } |
|---|
| 1234 | 791 | |
|---|
| 1235 | 792 | binder_set_priority(task, desired_prio); |
|---|
| 793 | + trace_android_vh_binder_set_priority(t, task); |
|---|
| 1236 | 794 | } |
|---|
| 1237 | 795 | |
|---|
| 1238 | 796 | static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, |
|---|
| .. | .. |
|---|
| 1545 | 1103 | binder_node_inner_lock(node); |
|---|
| 1546 | 1104 | if (!node->proc) |
|---|
| 1547 | 1105 | spin_lock(&binder_dead_nodes_lock); |
|---|
| 1106 | + else |
|---|
| 1107 | + __acquire(&binder_dead_nodes_lock); |
|---|
| 1548 | 1108 | node->tmp_refs--; |
|---|
| 1549 | 1109 | BUG_ON(node->tmp_refs < 0); |
|---|
| 1550 | 1110 | if (!node->proc) |
|---|
| 1551 | 1111 | spin_unlock(&binder_dead_nodes_lock); |
|---|
| 1112 | + else |
|---|
| 1113 | + __release(&binder_dead_nodes_lock); |
|---|
| 1552 | 1114 | /* |
|---|
| 1553 | 1115 | * Call binder_dec_node() to check if all refcounts are 0 |
|---|
| 1554 | 1116 | * and cleanup is needed. Calling with strong=0 and internal=1 |
|---|
| .. | .. |
|---|
| 1669 | 1231 | "%d new ref %d desc %d for node %d\n", |
|---|
| 1670 | 1232 | proc->pid, new_ref->data.debug_id, new_ref->data.desc, |
|---|
| 1671 | 1233 | node->debug_id); |
|---|
| 1234 | + trace_android_vh_binder_new_ref(proc->tsk, new_ref->data.desc, new_ref->node->debug_id); |
|---|
| 1672 | 1235 | binder_node_unlock(node); |
|---|
| 1673 | 1236 | return new_ref; |
|---|
| 1674 | 1237 | } |
|---|
| .. | .. |
|---|
| 1836 | 1399 | */ |
|---|
| 1837 | 1400 | static void binder_free_ref(struct binder_ref *ref) |
|---|
| 1838 | 1401 | { |
|---|
| 1402 | + trace_android_vh_binder_del_ref(ref->proc ? ref->proc->tsk : 0, ref->data.desc); |
|---|
| 1839 | 1403 | if (ref->node) |
|---|
| 1840 | 1404 | binder_free_node(ref->node); |
|---|
| 1841 | 1405 | kfree(ref->death); |
|---|
| .. | .. |
|---|
| 1940 | 1504 | } |
|---|
| 1941 | 1505 | ret = binder_inc_ref_olocked(ref, strong, target_list); |
|---|
| 1942 | 1506 | *rdata = ref->data; |
|---|
| 1507 | + if (ret && ref == new_ref) { |
|---|
| 1508 | + /* |
|---|
| 1509 | + * Cleanup the failed reference here as the target |
|---|
| 1510 | + * could now be dead and have already released its |
|---|
| 1511 | + * references by now. Calling on the new reference |
|---|
| 1512 | + * with strong=0 and a tmp_refs will not decrement |
|---|
| 1513 | + * the node. The new_ref gets kfree'd below. |
|---|
| 1514 | + */ |
|---|
| 1515 | + binder_cleanup_ref_olocked(new_ref); |
|---|
| 1516 | + ref = NULL; |
|---|
| 1517 | + } |
|---|
| 1518 | + |
|---|
| 1943 | 1519 | binder_proc_unlock(proc); |
|---|
| 1944 | 1520 | if (new_ref && ref != new_ref) |
|---|
| 1945 | 1521 | /* |
|---|
| .. | .. |
|---|
| 2051 | 1627 | */ |
|---|
| 2052 | 1628 | static struct binder_thread *binder_get_txn_from_and_acq_inner( |
|---|
| 2053 | 1629 | struct binder_transaction *t) |
|---|
| 1630 | + __acquires(&t->from->proc->inner_lock) |
|---|
| 2054 | 1631 | { |
|---|
| 2055 | 1632 | struct binder_thread *from; |
|---|
| 2056 | 1633 | |
|---|
| 2057 | 1634 | from = binder_get_txn_from(t); |
|---|
| 2058 | | - if (!from) |
|---|
| 1635 | + if (!from) { |
|---|
| 1636 | + __acquire(&from->proc->inner_lock); |
|---|
| 2059 | 1637 | return NULL; |
|---|
| 1638 | + } |
|---|
| 2060 | 1639 | binder_inner_proc_lock(from->proc); |
|---|
| 2061 | 1640 | if (t->from) { |
|---|
| 2062 | 1641 | BUG_ON(from != t->from); |
|---|
| 2063 | 1642 | return from; |
|---|
| 2064 | 1643 | } |
|---|
| 2065 | 1644 | binder_inner_proc_unlock(from->proc); |
|---|
| 1645 | + __acquire(&from->proc->inner_lock); |
|---|
| 2066 | 1646 | binder_thread_dec_tmpref(from); |
|---|
| 2067 | 1647 | return NULL; |
|---|
| 1648 | +} |
|---|
| 1649 | + |
|---|
| 1650 | +/** |
|---|
| 1651 | + * binder_free_txn_fixups() - free unprocessed fd fixups |
|---|
| 1652 | + * @t: binder transaction for t->from |
|---|
| 1653 | + * |
|---|
| 1654 | + * If the transaction is being torn down prior to being |
|---|
| 1655 | + * processed by the target process, free all of the |
|---|
| 1656 | + * fd fixups and fput the file structs. It is safe to |
|---|
| 1657 | + * call this function after the fixups have been |
|---|
| 1658 | + * processed -- in that case, the list will be empty. |
|---|
| 1659 | + */ |
|---|
| 1660 | +static void binder_free_txn_fixups(struct binder_transaction *t) |
|---|
| 1661 | +{ |
|---|
| 1662 | + struct binder_txn_fd_fixup *fixup, *tmp; |
|---|
| 1663 | + |
|---|
| 1664 | + list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { |
|---|
| 1665 | + fput(fixup->file); |
|---|
| 1666 | + list_del(&fixup->fixup_entry); |
|---|
| 1667 | + kfree(fixup); |
|---|
| 1668 | + } |
|---|
| 2068 | 1669 | } |
|---|
| 2069 | 1670 | |
|---|
| 2070 | 1671 | static void binder_free_transaction(struct binder_transaction *t) |
|---|
| .. | .. |
|---|
| 2073 | 1674 | |
|---|
| 2074 | 1675 | if (target_proc) { |
|---|
| 2075 | 1676 | binder_inner_proc_lock(target_proc); |
|---|
| 1677 | + target_proc->outstanding_txns--; |
|---|
| 1678 | + if (target_proc->outstanding_txns < 0) |
|---|
| 1679 | + pr_warn("%s: Unexpected outstanding_txns %d\n", |
|---|
| 1680 | + __func__, target_proc->outstanding_txns); |
|---|
| 1681 | + if (!target_proc->outstanding_txns && target_proc->is_frozen) |
|---|
| 1682 | + wake_up_interruptible_all(&target_proc->freeze_wait); |
|---|
| 2076 | 1683 | if (t->buffer) |
|---|
| 2077 | 1684 | t->buffer->transaction = NULL; |
|---|
| 2078 | 1685 | binder_inner_proc_unlock(target_proc); |
|---|
| .. | .. |
|---|
| 2081 | 1688 | * If the transaction has no target_proc, then |
|---|
| 2082 | 1689 | * t->buffer->transaction has already been cleared. |
|---|
| 2083 | 1690 | */ |
|---|
| 1691 | + binder_free_txn_fixups(t); |
|---|
| 2084 | 1692 | kfree(t); |
|---|
| 2085 | 1693 | binder_stats_deleted(BINDER_STAT_TRANSACTION); |
|---|
| 2086 | 1694 | } |
|---|
| .. | .. |
|---|
| 2123 | 1731 | binder_free_transaction(t); |
|---|
| 2124 | 1732 | return; |
|---|
| 2125 | 1733 | } |
|---|
| 1734 | + __release(&target_thread->proc->inner_lock); |
|---|
| 2126 | 1735 | next = t->from_parent; |
|---|
| 2127 | 1736 | |
|---|
| 2128 | 1737 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, |
|---|
| .. | .. |
|---|
| 2165 | 1774 | /** |
|---|
| 2166 | 1775 | * binder_get_object() - gets object and checks for valid metadata |
|---|
| 2167 | 1776 | * @proc: binder_proc owning the buffer |
|---|
| 1777 | + * @u: sender's user pointer to base of buffer |
|---|
| 2168 | 1778 | * @buffer: binder_buffer that we're parsing. |
|---|
| 2169 | 1779 | * @offset: offset in the @buffer at which to validate an object. |
|---|
| 2170 | 1780 | * @object: struct binder_object to read into |
|---|
| 2171 | 1781 | * |
|---|
| 2172 | | - * Return: If there's a valid metadata object at @offset in @buffer, the |
|---|
| 1782 | + * Copy the binder object at the given offset into @object. If @u is |
|---|
| 1783 | + * provided then the copy is from the sender's buffer. If not, then |
|---|
| 1784 | + * it is copied from the target's @buffer. |
|---|
| 1785 | + * |
|---|
| 1786 | + * Return: If there's a valid metadata object at @offset, the |
|---|
| 2173 | 1787 | * size of that object. Otherwise, it returns zero. The object |
|---|
| 2174 | 1788 | * is read into the struct binder_object pointed to by @object. |
|---|
| 2175 | 1789 | */ |
|---|
| 2176 | 1790 | static size_t binder_get_object(struct binder_proc *proc, |
|---|
| 1791 | + const void __user *u, |
|---|
| 2177 | 1792 | struct binder_buffer *buffer, |
|---|
| 2178 | 1793 | unsigned long offset, |
|---|
| 2179 | 1794 | struct binder_object *object) |
|---|
| .. | .. |
|---|
| 2183 | 1798 | size_t object_size = 0; |
|---|
| 2184 | 1799 | |
|---|
| 2185 | 1800 | read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); |
|---|
| 2186 | | - if (offset > buffer->data_size || read_size < sizeof(*hdr) || |
|---|
| 2187 | | - !IS_ALIGNED(offset, sizeof(u32))) |
|---|
| 1801 | + if (offset > buffer->data_size || read_size < sizeof(*hdr)) |
|---|
| 2188 | 1802 | return 0; |
|---|
| 2189 | | - binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, |
|---|
| 2190 | | - offset, read_size); |
|---|
| 1803 | + if (u) { |
|---|
| 1804 | + if (copy_from_user(object, u + offset, read_size)) |
|---|
| 1805 | + return 0; |
|---|
| 1806 | + } else { |
|---|
| 1807 | + if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, |
|---|
| 1808 | + offset, read_size)) |
|---|
| 1809 | + return 0; |
|---|
| 1810 | + } |
|---|
| 2191 | 1811 | |
|---|
| 2192 | 1812 | /* Ok, now see if we read a complete object. */ |
|---|
| 2193 | 1813 | hdr = &object->hdr; |
|---|
| .. | .. |
|---|
| 2256 | 1876 | return NULL; |
|---|
| 2257 | 1877 | |
|---|
| 2258 | 1878 | buffer_offset = start_offset + sizeof(binder_size_t) * index; |
|---|
| 2259 | | - binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, |
|---|
| 2260 | | - b, buffer_offset, sizeof(object_offset)); |
|---|
| 2261 | | - object_size = binder_get_object(proc, b, object_offset, object); |
|---|
| 1879 | + if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, |
|---|
| 1880 | + b, buffer_offset, |
|---|
| 1881 | + sizeof(object_offset))) |
|---|
| 1882 | + return NULL; |
|---|
| 1883 | + object_size = binder_get_object(proc, NULL, b, object_offset, object); |
|---|
| 2262 | 1884 | if (!object_size || object->hdr.type != BINDER_TYPE_PTR) |
|---|
| 2263 | 1885 | return NULL; |
|---|
| 2264 | 1886 | if (object_offsetp) |
|---|
| .. | .. |
|---|
| 2323 | 1945 | unsigned long buffer_offset; |
|---|
| 2324 | 1946 | struct binder_object last_object; |
|---|
| 2325 | 1947 | struct binder_buffer_object *last_bbo; |
|---|
| 2326 | | - size_t object_size = binder_get_object(proc, b, last_obj_offset, |
|---|
| 1948 | + size_t object_size = binder_get_object(proc, NULL, b, |
|---|
| 1949 | + last_obj_offset, |
|---|
| 2327 | 1950 | &last_object); |
|---|
| 2328 | 1951 | if (object_size != sizeof(*last_bbo)) |
|---|
| 2329 | 1952 | return false; |
|---|
| .. | .. |
|---|
| 2337 | 1960 | return false; |
|---|
| 2338 | 1961 | last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); |
|---|
| 2339 | 1962 | buffer_offset = objects_start_offset + |
|---|
| 2340 | | - sizeof(binder_size_t) * last_bbo->parent, |
|---|
| 2341 | | - binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset, |
|---|
| 2342 | | - b, buffer_offset, |
|---|
| 2343 | | - sizeof(last_obj_offset)); |
|---|
| 1963 | + sizeof(binder_size_t) * last_bbo->parent; |
|---|
| 1964 | + if (binder_alloc_copy_from_buffer(&proc->alloc, |
|---|
| 1965 | + &last_obj_offset, |
|---|
| 1966 | + b, buffer_offset, |
|---|
| 1967 | + sizeof(last_obj_offset))) |
|---|
| 1968 | + return false; |
|---|
| 2344 | 1969 | } |
|---|
| 2345 | 1970 | return (fixup_offset >= last_min_offset); |
|---|
| 2346 | 1971 | } |
|---|
| 2347 | 1972 | |
|---|
| 1973 | +/** |
|---|
| 1974 | + * struct binder_task_work_cb - for deferred close |
|---|
| 1975 | + * |
|---|
| 1976 | + * @twork: callback_head for task work |
|---|
| 1977 | + * @fd: fd to close |
|---|
| 1978 | + * |
|---|
| 1979 | + * Structure to pass task work to be handled after |
|---|
| 1980 | + * returning from binder_ioctl() via task_work_add(). |
|---|
| 1981 | + */ |
|---|
| 1982 | +struct binder_task_work_cb { |
|---|
| 1983 | + struct callback_head twork; |
|---|
| 1984 | + struct file *file; |
|---|
| 1985 | +}; |
|---|
| 1986 | + |
|---|
| 1987 | +/** |
|---|
| 1988 | + * binder_do_fd_close() - close list of file descriptors |
|---|
| 1989 | + * @twork: callback head for task work |
|---|
| 1990 | + * |
|---|
| 1991 | + * It is not safe to call ksys_close() during the binder_ioctl() |
|---|
| 1992 | + * function if there is a chance that binder's own file descriptor |
|---|
| 1993 | + * might be closed. This is to meet the requirements for using |
|---|
| 1994 | + * fdget() (see comments for __fget_light()). Therefore use |
|---|
| 1995 | + * task_work_add() to schedule the close operation once we have |
|---|
| 1996 | + * returned from binder_ioctl(). This function is a callback |
|---|
| 1997 | + * for that mechanism and does the actual ksys_close() on the |
|---|
| 1998 | + * given file descriptor. |
|---|
| 1999 | + */ |
|---|
| 2000 | +static void binder_do_fd_close(struct callback_head *twork) |
|---|
| 2001 | +{ |
|---|
| 2002 | + struct binder_task_work_cb *twcb = container_of(twork, |
|---|
| 2003 | + struct binder_task_work_cb, twork); |
|---|
| 2004 | + |
|---|
| 2005 | + fput(twcb->file); |
|---|
| 2006 | + kfree(twcb); |
|---|
| 2007 | +} |
|---|
| 2008 | + |
|---|
| 2009 | +/** |
|---|
| 2010 | + * binder_deferred_fd_close() - schedule a close for the given file-descriptor |
|---|
| 2011 | + * @fd: file-descriptor to close |
|---|
| 2012 | + * |
|---|
| 2013 | + * See comments in binder_do_fd_close(). This function is used to schedule |
|---|
| 2014 | + * a file-descriptor to be closed after returning from binder_ioctl(). |
|---|
| 2015 | + */ |
|---|
| 2016 | +static void binder_deferred_fd_close(int fd) |
|---|
| 2017 | +{ |
|---|
| 2018 | + struct binder_task_work_cb *twcb; |
|---|
| 2019 | + |
|---|
| 2020 | + twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); |
|---|
| 2021 | + if (!twcb) |
|---|
| 2022 | + return; |
|---|
| 2023 | + init_task_work(&twcb->twork, binder_do_fd_close); |
|---|
| 2024 | + close_fd_get_file(fd, &twcb->file); |
|---|
| 2025 | + if (twcb->file) { |
|---|
| 2026 | + filp_close(twcb->file, current->files); |
|---|
| 2027 | + task_work_add(current, &twcb->twork, TWA_RESUME); |
|---|
| 2028 | + } else { |
|---|
| 2029 | + kfree(twcb); |
|---|
| 2030 | + } |
|---|
| 2031 | +} |
|---|
| 2032 | + |
|---|
| 2348 | 2033 | static void binder_transaction_buffer_release(struct binder_proc *proc, |
|---|
| 2034 | + struct binder_thread *thread, |
|---|
| 2349 | 2035 | struct binder_buffer *buffer, |
|---|
| 2350 | 2036 | binder_size_t failed_at, |
|---|
| 2351 | 2037 | bool is_failure) |
|---|
| .. | .. |
|---|
| 2363 | 2049 | binder_dec_node(buffer->target_node, 1, 0); |
|---|
| 2364 | 2050 | |
|---|
| 2365 | 2051 | off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); |
|---|
| 2366 | | - off_end_offset = is_failure ? failed_at : |
|---|
| 2052 | + off_end_offset = is_failure && failed_at ? failed_at : |
|---|
| 2367 | 2053 | off_start_offset + buffer->offsets_size; |
|---|
| 2368 | 2054 | for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; |
|---|
| 2369 | 2055 | buffer_offset += sizeof(binder_size_t)) { |
|---|
| 2370 | 2056 | struct binder_object_header *hdr; |
|---|
| 2371 | | - size_t object_size; |
|---|
| 2057 | + size_t object_size = 0; |
|---|
| 2372 | 2058 | struct binder_object object; |
|---|
| 2373 | 2059 | binder_size_t object_offset; |
|---|
| 2374 | 2060 | |
|---|
| 2375 | | - binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, |
|---|
| 2376 | | - buffer, buffer_offset, |
|---|
| 2377 | | - sizeof(object_offset)); |
|---|
| 2378 | | - object_size = binder_get_object(proc, buffer, |
|---|
| 2379 | | - object_offset, &object); |
|---|
| 2061 | + if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, |
|---|
| 2062 | + buffer, buffer_offset, |
|---|
| 2063 | + sizeof(object_offset))) |
|---|
| 2064 | + object_size = binder_get_object(proc, NULL, buffer, |
|---|
| 2065 | + object_offset, &object); |
|---|
| 2380 | 2066 | if (object_size == 0) { |
|---|
| 2381 | 2067 | pr_err("transaction release %d bad object at offset %lld, size %zd\n", |
|---|
| 2382 | 2068 | debug_id, (u64)object_offset, buffer->data_size); |
|---|
| .. | .. |
|---|
| 2424 | 2110 | } break; |
|---|
| 2425 | 2111 | |
|---|
| 2426 | 2112 | case BINDER_TYPE_FD: { |
|---|
| 2427 | | - struct binder_fd_object *fp = to_binder_fd_object(hdr); |
|---|
| 2428 | | - |
|---|
| 2429 | | - binder_debug(BINDER_DEBUG_TRANSACTION, |
|---|
| 2430 | | - " fd %d\n", fp->fd); |
|---|
| 2431 | | - if (failed_at) |
|---|
| 2432 | | - task_close_fd(proc, fp->fd); |
|---|
| 2113 | + /* |
|---|
| 2114 | + * No need to close the file here since user-space |
|---|
| 2115 | + * closes it for for successfully delivered |
|---|
| 2116 | + * transactions. For transactions that weren't |
|---|
| 2117 | + * delivered, the new fd was never allocated so |
|---|
| 2118 | + * there is no need to close and the fput on the |
|---|
| 2119 | + * file is done when the transaction is torn |
|---|
| 2120 | + * down. |
|---|
| 2121 | + */ |
|---|
| 2433 | 2122 | } break; |
|---|
| 2434 | 2123 | case BINDER_TYPE_PTR: |
|---|
| 2435 | 2124 | /* |
|---|
| .. | .. |
|---|
| 2445 | 2134 | size_t fd_index; |
|---|
| 2446 | 2135 | binder_size_t fd_buf_size; |
|---|
| 2447 | 2136 | binder_size_t num_valid; |
|---|
| 2137 | + |
|---|
| 2138 | + if (is_failure) { |
|---|
| 2139 | + /* |
|---|
| 2140 | + * The fd fixups have not been applied so no |
|---|
| 2141 | + * fds need to be closed. |
|---|
| 2142 | + */ |
|---|
| 2143 | + continue; |
|---|
| 2144 | + } |
|---|
| 2448 | 2145 | |
|---|
| 2449 | 2146 | num_valid = (buffer_offset - off_start_offset) / |
|---|
| 2450 | 2147 | sizeof(binder_size_t); |
|---|
| .. | .. |
|---|
| 2485 | 2182 | for (fd_index = 0; fd_index < fda->num_fds; |
|---|
| 2486 | 2183 | fd_index++) { |
|---|
| 2487 | 2184 | u32 fd; |
|---|
| 2185 | + int err; |
|---|
| 2488 | 2186 | binder_size_t offset = fda_offset + |
|---|
| 2489 | 2187 | fd_index * sizeof(fd); |
|---|
| 2490 | 2188 | |
|---|
| 2491 | | - binder_alloc_copy_from_buffer(&proc->alloc, |
|---|
| 2492 | | - &fd, |
|---|
| 2493 | | - buffer, |
|---|
| 2494 | | - offset, |
|---|
| 2495 | | - sizeof(fd)); |
|---|
| 2496 | | - task_close_fd(proc, fd); |
|---|
| 2189 | + err = binder_alloc_copy_from_buffer( |
|---|
| 2190 | + &proc->alloc, &fd, buffer, |
|---|
| 2191 | + offset, sizeof(fd)); |
|---|
| 2192 | + WARN_ON(err); |
|---|
| 2193 | + if (!err) { |
|---|
| 2194 | + binder_deferred_fd_close(fd); |
|---|
| 2195 | + /* |
|---|
| 2196 | + * Need to make sure the thread goes |
|---|
| 2197 | + * back to userspace to complete the |
|---|
| 2198 | + * deferred close |
|---|
| 2199 | + */ |
|---|
| 2200 | + if (thread) |
|---|
| 2201 | + thread->looper_need_return = true; |
|---|
| 2202 | + } |
|---|
| 2497 | 2203 | } |
|---|
| 2498 | 2204 | } break; |
|---|
| 2499 | 2205 | default: |
|---|
| .. | .. |
|---|
| 2528 | 2234 | ret = -EINVAL; |
|---|
| 2529 | 2235 | goto done; |
|---|
| 2530 | 2236 | } |
|---|
| 2531 | | - if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { |
|---|
| 2237 | + if (security_binder_transfer_binder(binder_get_cred(proc), |
|---|
| 2238 | + binder_get_cred(target_proc))) { |
|---|
| 2532 | 2239 | ret = -EPERM; |
|---|
| 2533 | 2240 | goto done; |
|---|
| 2534 | 2241 | } |
|---|
| .. | .. |
|---|
| 2574 | 2281 | proc->pid, thread->pid, fp->handle); |
|---|
| 2575 | 2282 | return -EINVAL; |
|---|
| 2576 | 2283 | } |
|---|
| 2577 | | - if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { |
|---|
| 2284 | + if (security_binder_transfer_binder(binder_get_cred(proc), |
|---|
| 2285 | + binder_get_cred(target_proc))) { |
|---|
| 2578 | 2286 | ret = -EPERM; |
|---|
| 2579 | 2287 | goto done; |
|---|
| 2580 | 2288 | } |
|---|
| .. | .. |
|---|
| 2589 | 2297 | fp->cookie = node->cookie; |
|---|
| 2590 | 2298 | if (node->proc) |
|---|
| 2591 | 2299 | binder_inner_proc_lock(node->proc); |
|---|
| 2300 | + else |
|---|
| 2301 | + __acquire(&node->proc->inner_lock); |
|---|
| 2592 | 2302 | binder_inc_node_nilocked(node, |
|---|
| 2593 | 2303 | fp->hdr.type == BINDER_TYPE_BINDER, |
|---|
| 2594 | 2304 | 0, NULL); |
|---|
| 2595 | 2305 | if (node->proc) |
|---|
| 2596 | 2306 | binder_inner_proc_unlock(node->proc); |
|---|
| 2307 | + else |
|---|
| 2308 | + __release(&node->proc->inner_lock); |
|---|
| 2597 | 2309 | trace_binder_transaction_ref_to_node(t, node, &src_rdata); |
|---|
| 2598 | 2310 | binder_debug(BINDER_DEBUG_TRANSACTION, |
|---|
| 2599 | 2311 | " ref %d desc %d -> node %d u%016llx\n", |
|---|
| .. | .. |
|---|
| 2626 | 2338 | return ret; |
|---|
| 2627 | 2339 | } |
|---|
| 2628 | 2340 | |
|---|
| 2629 | | -static int binder_translate_fd(int fd, |
|---|
| 2341 | +static int binder_translate_fd(u32 fd, binder_size_t fd_offset, |
|---|
| 2630 | 2342 | struct binder_transaction *t, |
|---|
| 2631 | 2343 | struct binder_thread *thread, |
|---|
| 2632 | 2344 | struct binder_transaction *in_reply_to) |
|---|
| 2633 | 2345 | { |
|---|
| 2634 | 2346 | struct binder_proc *proc = thread->proc; |
|---|
| 2635 | 2347 | struct binder_proc *target_proc = t->to_proc; |
|---|
| 2636 | | - int target_fd; |
|---|
| 2348 | + struct binder_txn_fd_fixup *fixup; |
|---|
| 2637 | 2349 | struct file *file; |
|---|
| 2638 | | - int ret; |
|---|
| 2350 | + int ret = 0; |
|---|
| 2639 | 2351 | bool target_allows_fd; |
|---|
| 2640 | 2352 | |
|---|
| 2641 | 2353 | if (in_reply_to) |
|---|
| .. | .. |
|---|
| 2658 | 2370 | ret = -EBADF; |
|---|
| 2659 | 2371 | goto err_fget; |
|---|
| 2660 | 2372 | } |
|---|
| 2661 | | - ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); |
|---|
| 2373 | + ret = security_binder_transfer_file(binder_get_cred(proc), |
|---|
| 2374 | + binder_get_cred(target_proc), file); |
|---|
| 2662 | 2375 | if (ret < 0) { |
|---|
| 2663 | 2376 | ret = -EPERM; |
|---|
| 2664 | 2377 | goto err_security; |
|---|
| 2665 | 2378 | } |
|---|
| 2666 | 2379 | |
|---|
| 2667 | | - target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); |
|---|
| 2668 | | - if (target_fd < 0) { |
|---|
| 2380 | + /* |
|---|
| 2381 | + * Add fixup record for this transaction. The allocation |
|---|
| 2382 | + * of the fd in the target needs to be done from a |
|---|
| 2383 | + * target thread. |
|---|
| 2384 | + */ |
|---|
| 2385 | + fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); |
|---|
| 2386 | + if (!fixup) { |
|---|
| 2669 | 2387 | ret = -ENOMEM; |
|---|
| 2670 | | - goto err_get_unused_fd; |
|---|
| 2388 | + goto err_alloc; |
|---|
| 2671 | 2389 | } |
|---|
| 2672 | | - task_fd_install(target_proc, target_fd, file); |
|---|
| 2673 | | - trace_binder_transaction_fd(t, fd, target_fd); |
|---|
| 2674 | | - binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", |
|---|
| 2675 | | - fd, target_fd); |
|---|
| 2390 | + fixup->file = file; |
|---|
| 2391 | + fixup->offset = fd_offset; |
|---|
| 2392 | + trace_binder_transaction_fd_send(t, fd, fixup->offset); |
|---|
| 2393 | + list_add_tail(&fixup->fixup_entry, &t->fd_fixups); |
|---|
| 2676 | 2394 | |
|---|
| 2677 | | - return target_fd; |
|---|
| 2395 | + return ret; |
|---|
| 2678 | 2396 | |
|---|
| 2679 | | -err_get_unused_fd: |
|---|
| 2397 | +err_alloc: |
|---|
| 2680 | 2398 | err_security: |
|---|
| 2681 | 2399 | fput(file); |
|---|
| 2682 | 2400 | err_fget: |
|---|
| .. | .. |
|---|
| 2684 | 2402 | return ret; |
|---|
| 2685 | 2403 | } |
|---|
| 2686 | 2404 | |
|---|
| 2687 | | -static int binder_translate_fd_array(struct binder_fd_array_object *fda, |
|---|
| 2405 | +/** |
|---|
| 2406 | + * struct binder_ptr_fixup - data to be fixed-up in target buffer |
|---|
| 2407 | + * @offset offset in target buffer to fixup |
|---|
| 2408 | + * @skip_size bytes to skip in copy (fixup will be written later) |
|---|
| 2409 | + * @fixup_data data to write at fixup offset |
|---|
| 2410 | + * @node list node |
|---|
| 2411 | + * |
|---|
| 2412 | + * This is used for the pointer fixup list (pf) which is created and consumed |
|---|
| 2413 | + * during binder_transaction() and is only accessed locally. No |
|---|
| 2414 | + * locking is necessary. |
|---|
| 2415 | + * |
|---|
| 2416 | + * The list is ordered by @offset. |
|---|
| 2417 | + */ |
|---|
| 2418 | +struct binder_ptr_fixup { |
|---|
| 2419 | + binder_size_t offset; |
|---|
| 2420 | + size_t skip_size; |
|---|
| 2421 | + binder_uintptr_t fixup_data; |
|---|
| 2422 | + struct list_head node; |
|---|
| 2423 | +}; |
|---|
| 2424 | + |
|---|
| 2425 | +/** |
|---|
| 2426 | + * struct binder_sg_copy - scatter-gather data to be copied |
|---|
| 2427 | + * @offset offset in target buffer |
|---|
| 2428 | + * @sender_uaddr user address in source buffer |
|---|
| 2429 | + * @length bytes to copy |
|---|
| 2430 | + * @node list node |
|---|
| 2431 | + * |
|---|
| 2432 | + * This is used for the sg copy list (sgc) which is created and consumed |
|---|
| 2433 | + * during binder_transaction() and is only accessed locally. No |
|---|
| 2434 | + * locking is necessary. |
|---|
| 2435 | + * |
|---|
| 2436 | + * The list is ordered by @offset. |
|---|
| 2437 | + */ |
|---|
| 2438 | +struct binder_sg_copy { |
|---|
| 2439 | + binder_size_t offset; |
|---|
| 2440 | + const void __user *sender_uaddr; |
|---|
| 2441 | + size_t length; |
|---|
| 2442 | + struct list_head node; |
|---|
| 2443 | +}; |
|---|
| 2444 | + |
|---|
| 2445 | +/** |
|---|
| 2446 | + * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data |
|---|
| 2447 | + * @alloc: binder_alloc associated with @buffer |
|---|
| 2448 | + * @buffer: binder buffer in target process |
|---|
| 2449 | + * @sgc_head: list_head of scatter-gather copy list |
|---|
| 2450 | + * @pf_head: list_head of pointer fixup list |
|---|
| 2451 | + * |
|---|
| 2452 | + * Processes all elements of @sgc_head, applying fixups from @pf_head |
|---|
| 2453 | + * and copying the scatter-gather data from the source process' user |
|---|
| 2454 | + * buffer to the target's buffer. It is expected that the list creation |
|---|
| 2455 | + * and processing all occurs during binder_transaction() so these lists |
|---|
| 2456 | + * are only accessed in local context. |
|---|
| 2457 | + * |
|---|
| 2458 | + * Return: 0=success, else -errno |
|---|
| 2459 | + */ |
|---|
| 2460 | +static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, |
|---|
| 2461 | + struct binder_buffer *buffer, |
|---|
| 2462 | + struct list_head *sgc_head, |
|---|
| 2463 | + struct list_head *pf_head) |
|---|
| 2464 | +{ |
|---|
| 2465 | + int ret = 0; |
|---|
| 2466 | + struct binder_sg_copy *sgc, *tmpsgc; |
|---|
| 2467 | + struct binder_ptr_fixup *tmppf; |
|---|
| 2468 | + struct binder_ptr_fixup *pf = |
|---|
| 2469 | + list_first_entry_or_null(pf_head, struct binder_ptr_fixup, |
|---|
| 2470 | + node); |
|---|
| 2471 | + |
|---|
| 2472 | + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { |
|---|
| 2473 | + size_t bytes_copied = 0; |
|---|
| 2474 | + |
|---|
| 2475 | + while (bytes_copied < sgc->length) { |
|---|
| 2476 | + size_t copy_size; |
|---|
| 2477 | + size_t bytes_left = sgc->length - bytes_copied; |
|---|
| 2478 | + size_t offset = sgc->offset + bytes_copied; |
|---|
| 2479 | + |
|---|
| 2480 | + /* |
|---|
| 2481 | + * We copy up to the fixup (pointed to by pf) |
|---|
| 2482 | + */ |
|---|
| 2483 | + copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) |
|---|
| 2484 | + : bytes_left; |
|---|
| 2485 | + if (!ret && copy_size) |
|---|
| 2486 | + ret = binder_alloc_copy_user_to_buffer( |
|---|
| 2487 | + alloc, buffer, |
|---|
| 2488 | + offset, |
|---|
| 2489 | + sgc->sender_uaddr + bytes_copied, |
|---|
| 2490 | + copy_size); |
|---|
| 2491 | + bytes_copied += copy_size; |
|---|
| 2492 | + if (copy_size != bytes_left) { |
|---|
| 2493 | + BUG_ON(!pf); |
|---|
| 2494 | + /* we stopped at a fixup offset */ |
|---|
| 2495 | + if (pf->skip_size) { |
|---|
| 2496 | + /* |
|---|
| 2497 | + * we are just skipping. This is for |
|---|
| 2498 | + * BINDER_TYPE_FDA where the translated |
|---|
| 2499 | + * fds will be fixed up when we get |
|---|
| 2500 | + * to target context. |
|---|
| 2501 | + */ |
|---|
| 2502 | + bytes_copied += pf->skip_size; |
|---|
| 2503 | + } else { |
|---|
| 2504 | + /* apply the fixup indicated by pf */ |
|---|
| 2505 | + if (!ret) |
|---|
| 2506 | + ret = binder_alloc_copy_to_buffer( |
|---|
| 2507 | + alloc, buffer, |
|---|
| 2508 | + pf->offset, |
|---|
| 2509 | + &pf->fixup_data, |
|---|
| 2510 | + sizeof(pf->fixup_data)); |
|---|
| 2511 | + bytes_copied += sizeof(pf->fixup_data); |
|---|
| 2512 | + } |
|---|
| 2513 | + list_del(&pf->node); |
|---|
| 2514 | + kfree(pf); |
|---|
| 2515 | + pf = list_first_entry_or_null(pf_head, |
|---|
| 2516 | + struct binder_ptr_fixup, node); |
|---|
| 2517 | + } |
|---|
| 2518 | + } |
|---|
| 2519 | + list_del(&sgc->node); |
|---|
| 2520 | + kfree(sgc); |
|---|
| 2521 | + } |
|---|
| 2522 | + list_for_each_entry_safe(pf, tmppf, pf_head, node) { |
|---|
| 2523 | + BUG_ON(pf->skip_size == 0); |
|---|
| 2524 | + list_del(&pf->node); |
|---|
| 2525 | + kfree(pf); |
|---|
| 2526 | + } |
|---|
| 2527 | + BUG_ON(!list_empty(sgc_head)); |
|---|
| 2528 | + |
|---|
| 2529 | + return ret > 0 ? -EINVAL : ret; |
|---|
| 2530 | +} |
|---|
| 2531 | + |
|---|
| 2532 | +/** |
|---|
| 2533 | + * binder_cleanup_deferred_txn_lists() - free specified lists |
|---|
| 2534 | + * @sgc_head: list_head of scatter-gather copy list |
|---|
| 2535 | + * @pf_head: list_head of pointer fixup list |
|---|
| 2536 | + * |
|---|
| 2537 | + * Called to clean up @sgc_head and @pf_head if there is an |
|---|
| 2538 | + * error. |
|---|
| 2539 | + */ |
|---|
| 2540 | +static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, |
|---|
| 2541 | + struct list_head *pf_head) |
|---|
| 2542 | +{ |
|---|
| 2543 | + struct binder_sg_copy *sgc, *tmpsgc; |
|---|
| 2544 | + struct binder_ptr_fixup *pf, *tmppf; |
|---|
| 2545 | + |
|---|
| 2546 | + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { |
|---|
| 2547 | + list_del(&sgc->node); |
|---|
| 2548 | + kfree(sgc); |
|---|
| 2549 | + } |
|---|
| 2550 | + list_for_each_entry_safe(pf, tmppf, pf_head, node) { |
|---|
| 2551 | + list_del(&pf->node); |
|---|
| 2552 | + kfree(pf); |
|---|
| 2553 | + } |
|---|
| 2554 | +} |
|---|
| 2555 | + |
|---|
| 2556 | +/** |
|---|
| 2557 | + * binder_defer_copy() - queue a scatter-gather buffer for copy |
|---|
| 2558 | + * @sgc_head: list_head of scatter-gather copy list |
|---|
| 2559 | + * @offset: binder buffer offset in target process |
|---|
| 2560 | + * @sender_uaddr: user address in source process |
|---|
| 2561 | + * @length: bytes to copy |
|---|
| 2562 | + * |
|---|
| 2563 | + * Specify a scatter-gather block to be copied. The actual copy must |
|---|
| 2564 | + * be deferred until all the needed fixups are identified and queued. |
|---|
| 2565 | + * Then the copy and fixups are done together so un-translated values |
|---|
| 2566 | + * from the source are never visible in the target buffer. |
|---|
| 2567 | + * |
|---|
| 2568 | + * We are guaranteed that repeated calls to this function will have |
|---|
| 2569 | + * monotonically increasing @offset values so the list will naturally |
|---|
| 2570 | + * be ordered. |
|---|
| 2571 | + * |
|---|
| 2572 | + * Return: 0=success, else -errno |
|---|
| 2573 | + */ |
|---|
| 2574 | +static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, |
|---|
| 2575 | + const void __user *sender_uaddr, size_t length) |
|---|
| 2576 | +{ |
|---|
| 2577 | + struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); |
|---|
| 2578 | + |
|---|
| 2579 | + if (!bc) |
|---|
| 2580 | + return -ENOMEM; |
|---|
| 2581 | + |
|---|
| 2582 | + bc->offset = offset; |
|---|
| 2583 | + bc->sender_uaddr = sender_uaddr; |
|---|
| 2584 | + bc->length = length; |
|---|
| 2585 | + INIT_LIST_HEAD(&bc->node); |
|---|
| 2586 | + |
|---|
| 2587 | + /* |
|---|
| 2588 | + * We are guaranteed that the deferred copies are in-order |
|---|
| 2589 | + * so just add to the tail. |
|---|
| 2590 | + */ |
|---|
| 2591 | + list_add_tail(&bc->node, sgc_head); |
|---|
| 2592 | + |
|---|
| 2593 | + return 0; |
|---|
| 2594 | +} |
|---|
| 2595 | + |
|---|
| 2596 | +/** |
|---|
| 2597 | + * binder_add_fixup() - queue a fixup to be applied to sg copy |
|---|
| 2598 | + * @pf_head: list_head of binder ptr fixup list |
|---|
| 2599 | + * @offset: binder buffer offset in target process |
|---|
| 2600 | + * @fixup: bytes to be copied for fixup |
|---|
| 2601 | + * @skip_size: bytes to skip when copying (fixup will be applied later) |
|---|
| 2602 | + * |
|---|
| 2603 | + * Add the specified fixup to a list ordered by @offset. When copying |
|---|
| 2604 | + * the scatter-gather buffers, the fixup will be copied instead of |
|---|
| 2605 | + * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup |
|---|
| 2606 | + * will be applied later (in target process context), so we just skip |
|---|
| 2607 | + * the bytes specified by @skip_size. If @skip_size is 0, we copy the |
|---|
| 2608 | + * value in @fixup. |
|---|
| 2609 | + * |
|---|
| 2610 | + * This function is called *mostly* in @offset order, but there are |
|---|
| 2611 | + * exceptions. Since out-of-order inserts are relatively uncommon, |
|---|
| 2612 | + * we insert the new element by searching backward from the tail of |
|---|
| 2613 | + * the list. |
|---|
| 2614 | + * |
|---|
| 2615 | + * Return: 0=success, else -errno |
|---|
| 2616 | + */ |
|---|
| 2617 | +static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, |
|---|
| 2618 | + binder_uintptr_t fixup, size_t skip_size) |
|---|
| 2619 | +{ |
|---|
| 2620 | + struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); |
|---|
| 2621 | + struct binder_ptr_fixup *tmppf; |
|---|
| 2622 | + |
|---|
| 2623 | + if (!pf) |
|---|
| 2624 | + return -ENOMEM; |
|---|
| 2625 | + |
|---|
| 2626 | + pf->offset = offset; |
|---|
| 2627 | + pf->fixup_data = fixup; |
|---|
| 2628 | + pf->skip_size = skip_size; |
|---|
| 2629 | + INIT_LIST_HEAD(&pf->node); |
|---|
| 2630 | + |
|---|
| 2631 | + /* Fixups are *mostly* added in-order, but there are some |
|---|
| 2632 | + * exceptions. Look backwards through list for insertion point. |
|---|
| 2633 | + */ |
|---|
| 2634 | + list_for_each_entry_reverse(tmppf, pf_head, node) { |
|---|
| 2635 | + if (tmppf->offset < pf->offset) { |
|---|
| 2636 | + list_add(&pf->node, &tmppf->node); |
|---|
| 2637 | + return 0; |
|---|
| 2638 | + } |
|---|
| 2639 | + } |
|---|
| 2640 | + /* |
|---|
| 2641 | + * if we get here, then the new offset is the lowest so |
|---|
| 2642 | + * insert at the head |
|---|
| 2643 | + */ |
|---|
| 2644 | + list_add(&pf->node, pf_head); |
|---|
| 2645 | + return 0; |
|---|
| 2646 | +} |
|---|
| 2647 | + |
|---|
| 2648 | +static int binder_translate_fd_array(struct list_head *pf_head, |
|---|
| 2649 | + struct binder_fd_array_object *fda, |
|---|
| 2650 | + const void __user *sender_ubuffer, |
|---|
| 2688 | 2651 | struct binder_buffer_object *parent, |
|---|
| 2652 | + struct binder_buffer_object *sender_uparent, |
|---|
| 2689 | 2653 | struct binder_transaction *t, |
|---|
| 2690 | 2654 | struct binder_thread *thread, |
|---|
| 2691 | 2655 | struct binder_transaction *in_reply_to) |
|---|
| 2692 | 2656 | { |
|---|
| 2693 | | - binder_size_t fdi, fd_buf_size, num_installed_fds; |
|---|
| 2657 | + binder_size_t fdi, fd_buf_size; |
|---|
| 2694 | 2658 | binder_size_t fda_offset; |
|---|
| 2695 | | - int target_fd; |
|---|
| 2659 | + const void __user *sender_ufda_base; |
|---|
| 2696 | 2660 | struct binder_proc *proc = thread->proc; |
|---|
| 2697 | | - struct binder_proc *target_proc = t->to_proc; |
|---|
| 2661 | + int ret; |
|---|
| 2662 | + |
|---|
| 2663 | + if (fda->num_fds == 0) |
|---|
| 2664 | + return 0; |
|---|
| 2698 | 2665 | |
|---|
| 2699 | 2666 | fd_buf_size = sizeof(u32) * fda->num_fds; |
|---|
| 2700 | 2667 | if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { |
|---|
| .. | .. |
|---|
| 2718 | 2685 | */ |
|---|
| 2719 | 2686 | fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + |
|---|
| 2720 | 2687 | fda->parent_offset; |
|---|
| 2721 | | - if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { |
|---|
| 2688 | + sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + |
|---|
| 2689 | + fda->parent_offset; |
|---|
| 2690 | + |
|---|
| 2691 | + if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || |
|---|
| 2692 | + !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { |
|---|
| 2722 | 2693 | binder_user_error("%d:%d parent offset not aligned correctly.\n", |
|---|
| 2723 | 2694 | proc->pid, thread->pid); |
|---|
| 2724 | 2695 | return -EINVAL; |
|---|
| 2725 | 2696 | } |
|---|
| 2697 | + ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); |
|---|
| 2698 | + if (ret) |
|---|
| 2699 | + return ret; |
|---|
| 2700 | + |
|---|
| 2726 | 2701 | for (fdi = 0; fdi < fda->num_fds; fdi++) { |
|---|
| 2727 | 2702 | u32 fd; |
|---|
| 2728 | | - |
|---|
| 2729 | 2703 | binder_size_t offset = fda_offset + fdi * sizeof(fd); |
|---|
| 2704 | + binder_size_t sender_uoffset = fdi * sizeof(fd); |
|---|
| 2730 | 2705 | |
|---|
| 2731 | | - binder_alloc_copy_from_buffer(&target_proc->alloc, |
|---|
| 2732 | | - &fd, t->buffer, |
|---|
| 2733 | | - offset, sizeof(fd)); |
|---|
| 2734 | | - target_fd = binder_translate_fd(fd, t, thread, in_reply_to); |
|---|
| 2735 | | - if (target_fd < 0) |
|---|
| 2736 | | - goto err_translate_fd_failed; |
|---|
| 2737 | | - binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 2738 | | - t->buffer, offset, |
|---|
| 2739 | | - &target_fd, sizeof(fd)); |
|---|
| 2706 | + ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); |
|---|
| 2707 | + if (!ret) |
|---|
| 2708 | + ret = binder_translate_fd(fd, offset, t, thread, |
|---|
| 2709 | + in_reply_to); |
|---|
| 2710 | + if (ret) |
|---|
| 2711 | + return ret > 0 ? -EINVAL : ret; |
|---|
| 2740 | 2712 | } |
|---|
| 2741 | 2713 | return 0; |
|---|
| 2742 | | - |
|---|
| 2743 | | -err_translate_fd_failed: |
|---|
| 2744 | | - /* |
|---|
| 2745 | | - * Failed to allocate fd or security error, free fds |
|---|
| 2746 | | - * installed so far. |
|---|
| 2747 | | - */ |
|---|
| 2748 | | - num_installed_fds = fdi; |
|---|
| 2749 | | - for (fdi = 0; fdi < num_installed_fds; fdi++) { |
|---|
| 2750 | | - u32 fd; |
|---|
| 2751 | | - binder_size_t offset = fda_offset + fdi * sizeof(fd); |
|---|
| 2752 | | - binder_alloc_copy_from_buffer(&target_proc->alloc, |
|---|
| 2753 | | - &fd, t->buffer, |
|---|
| 2754 | | - offset, sizeof(fd)); |
|---|
| 2755 | | - task_close_fd(target_proc, fd); |
|---|
| 2756 | | - } |
|---|
| 2757 | | - return target_fd; |
|---|
| 2758 | 2714 | } |
|---|
| 2759 | 2715 | |
|---|
| 2760 | | -static int binder_fixup_parent(struct binder_transaction *t, |
|---|
| 2716 | +static int binder_fixup_parent(struct list_head *pf_head, |
|---|
| 2717 | + struct binder_transaction *t, |
|---|
| 2761 | 2718 | struct binder_thread *thread, |
|---|
| 2762 | 2719 | struct binder_buffer_object *bp, |
|---|
| 2763 | 2720 | binder_size_t off_start_offset, |
|---|
| .. | .. |
|---|
| 2803 | 2760 | } |
|---|
| 2804 | 2761 | buffer_offset = bp->parent_offset + |
|---|
| 2805 | 2762 | (uintptr_t)parent->buffer - (uintptr_t)b->user_data; |
|---|
| 2806 | | - binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, |
|---|
| 2807 | | - &bp->buffer, sizeof(bp->buffer)); |
|---|
| 2763 | + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); |
|---|
| 2764 | +} |
|---|
| 2808 | 2765 | |
|---|
| 2809 | | - return 0; |
|---|
| 2766 | +/** |
|---|
| 2767 | + * binder_can_update_transaction() - Can a txn be superseded by an updated one? |
|---|
| 2768 | + * @t1: the pending async txn in the frozen process |
|---|
| 2769 | + * @t2: the new async txn to supersede the outdated pending one |
|---|
| 2770 | + * |
|---|
| 2771 | + * Return: true if t2 can supersede t1 |
|---|
| 2772 | + * false if t2 can not supersede t1 |
|---|
| 2773 | + */ |
|---|
| 2774 | +static bool binder_can_update_transaction(struct binder_transaction *t1, |
|---|
| 2775 | + struct binder_transaction *t2) |
|---|
| 2776 | +{ |
|---|
| 2777 | + if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != |
|---|
| 2778 | + (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) |
|---|
| 2779 | + return false; |
|---|
| 2780 | + if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && |
|---|
| 2781 | + t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && |
|---|
| 2782 | + t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && |
|---|
| 2783 | + t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) |
|---|
| 2784 | + return true; |
|---|
| 2785 | + return false; |
|---|
| 2786 | +} |
|---|
| 2787 | + |
|---|
| 2788 | +/** |
|---|
| 2789 | + * binder_find_outdated_transaction_ilocked() - Find the outdated transaction |
|---|
| 2790 | + * @t: new async transaction |
|---|
| 2791 | + * @target_list: list to find outdated transaction |
|---|
| 2792 | + * |
|---|
| 2793 | + * Return: the outdated transaction if found |
|---|
| 2794 | + * NULL if no outdated transacton can be found |
|---|
| 2795 | + * |
|---|
| 2796 | + * Requires the proc->inner_lock to be held. |
|---|
| 2797 | + */ |
|---|
| 2798 | +static struct binder_transaction * |
|---|
| 2799 | +binder_find_outdated_transaction_ilocked(struct binder_transaction *t, |
|---|
| 2800 | + struct list_head *target_list) |
|---|
| 2801 | +{ |
|---|
| 2802 | + struct binder_work *w; |
|---|
| 2803 | + |
|---|
| 2804 | + list_for_each_entry(w, target_list, entry) { |
|---|
| 2805 | + struct binder_transaction *t_queued; |
|---|
| 2806 | + |
|---|
| 2807 | + if (w->type != BINDER_WORK_TRANSACTION) |
|---|
| 2808 | + continue; |
|---|
| 2809 | + t_queued = container_of(w, struct binder_transaction, work); |
|---|
| 2810 | + if (binder_can_update_transaction(t_queued, t)) |
|---|
| 2811 | + return t_queued; |
|---|
| 2812 | + } |
|---|
| 2813 | + return NULL; |
|---|
| 2810 | 2814 | } |
|---|
| 2811 | 2815 | |
|---|
| 2812 | 2816 | /** |
|---|
| .. | .. |
|---|
| 2823 | 2827 | * If the @thread parameter is not NULL, the transaction is always queued |
|---|
| 2824 | 2828 | * to the waitlist of that specific thread. |
|---|
| 2825 | 2829 | * |
|---|
| 2826 | | - * Return: true if the transactions was successfully queued |
|---|
| 2827 | | - * false if the target process or thread is dead |
|---|
| 2830 | + * Return: 0 if the transaction was successfully queued |
|---|
| 2831 | + * BR_DEAD_REPLY if the target process or thread is dead |
|---|
| 2832 | + * BR_FROZEN_REPLY if the target process or thread is frozen |
|---|
| 2828 | 2833 | */ |
|---|
| 2829 | | -static bool binder_proc_transaction(struct binder_transaction *t, |
|---|
| 2834 | +static int binder_proc_transaction(struct binder_transaction *t, |
|---|
| 2830 | 2835 | struct binder_proc *proc, |
|---|
| 2831 | 2836 | struct binder_thread *thread) |
|---|
| 2832 | 2837 | { |
|---|
| .. | .. |
|---|
| 2834 | 2839 | struct binder_priority node_prio; |
|---|
| 2835 | 2840 | bool oneway = !!(t->flags & TF_ONE_WAY); |
|---|
| 2836 | 2841 | bool pending_async = false; |
|---|
| 2842 | + struct binder_transaction *t_outdated = NULL; |
|---|
| 2837 | 2843 | |
|---|
| 2838 | 2844 | BUG_ON(!node); |
|---|
| 2839 | 2845 | binder_node_lock(node); |
|---|
| .. | .. |
|---|
| 2842 | 2848 | |
|---|
| 2843 | 2849 | if (oneway) { |
|---|
| 2844 | 2850 | BUG_ON(thread); |
|---|
| 2845 | | - if (node->has_async_transaction) { |
|---|
| 2851 | + if (node->has_async_transaction) |
|---|
| 2846 | 2852 | pending_async = true; |
|---|
| 2847 | | - } else { |
|---|
| 2853 | + else |
|---|
| 2848 | 2854 | node->has_async_transaction = true; |
|---|
| 2849 | | - } |
|---|
| 2850 | 2855 | } |
|---|
| 2851 | 2856 | |
|---|
| 2852 | 2857 | binder_inner_proc_lock(proc); |
|---|
| 2858 | + if (proc->is_frozen) { |
|---|
| 2859 | + proc->sync_recv |= !oneway; |
|---|
| 2860 | + proc->async_recv |= oneway; |
|---|
| 2861 | + } |
|---|
| 2853 | 2862 | |
|---|
| 2854 | | - if (proc->is_dead || (thread && thread->is_dead)) { |
|---|
| 2863 | + if ((proc->is_frozen && !oneway) || proc->is_dead || |
|---|
| 2864 | + (thread && thread->is_dead)) { |
|---|
| 2855 | 2865 | binder_inner_proc_unlock(proc); |
|---|
| 2856 | 2866 | binder_node_unlock(node); |
|---|
| 2857 | | - return false; |
|---|
| 2867 | + return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; |
|---|
| 2858 | 2868 | } |
|---|
| 2859 | 2869 | |
|---|
| 2860 | 2870 | if (!thread && !pending_async) |
|---|
| 2861 | 2871 | thread = binder_select_thread_ilocked(proc); |
|---|
| 2872 | + |
|---|
| 2873 | + trace_android_vh_binder_proc_transaction(current, proc->tsk, |
|---|
| 2874 | + thread ? thread->task : 0, node->debug_id, t->code, pending_async); |
|---|
| 2862 | 2875 | |
|---|
| 2863 | 2876 | if (thread) { |
|---|
| 2864 | 2877 | binder_transaction_priority(thread->task, t, node_prio, |
|---|
| .. | .. |
|---|
| 2867 | 2880 | } else if (!pending_async) { |
|---|
| 2868 | 2881 | binder_enqueue_work_ilocked(&t->work, &proc->todo); |
|---|
| 2869 | 2882 | } else { |
|---|
| 2883 | + if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) { |
|---|
| 2884 | + t_outdated = binder_find_outdated_transaction_ilocked(t, |
|---|
| 2885 | + &node->async_todo); |
|---|
| 2886 | + if (t_outdated) { |
|---|
| 2887 | + binder_debug(BINDER_DEBUG_TRANSACTION, |
|---|
| 2888 | + "txn %d supersedes %d\n", |
|---|
| 2889 | + t->debug_id, t_outdated->debug_id); |
|---|
| 2890 | + list_del_init(&t_outdated->work.entry); |
|---|
| 2891 | + proc->outstanding_txns--; |
|---|
| 2892 | + } |
|---|
| 2893 | + } |
|---|
| 2870 | 2894 | binder_enqueue_work_ilocked(&t->work, &node->async_todo); |
|---|
| 2871 | 2895 | } |
|---|
| 2896 | + |
|---|
| 2897 | + trace_android_vh_binder_proc_transaction_end(current, proc->tsk, |
|---|
| 2898 | + thread ? thread->task : NULL, t->code, pending_async, !oneway); |
|---|
| 2872 | 2899 | |
|---|
| 2873 | 2900 | if (!pending_async) |
|---|
| 2874 | 2901 | binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); |
|---|
| 2875 | 2902 | |
|---|
| 2903 | + proc->outstanding_txns++; |
|---|
| 2876 | 2904 | binder_inner_proc_unlock(proc); |
|---|
| 2877 | 2905 | binder_node_unlock(node); |
|---|
| 2878 | 2906 | |
|---|
| 2879 | | - return true; |
|---|
| 2907 | + /* |
|---|
| 2908 | + * To reduce potential contention, free the outdated transaction and |
|---|
| 2909 | + * buffer after releasing the locks. |
|---|
| 2910 | + */ |
|---|
| 2911 | + if (t_outdated) { |
|---|
| 2912 | + struct binder_buffer *buffer = t_outdated->buffer; |
|---|
| 2913 | + |
|---|
| 2914 | + t_outdated->buffer = NULL; |
|---|
| 2915 | + buffer->transaction = NULL; |
|---|
| 2916 | + trace_binder_transaction_update_buffer_release(buffer); |
|---|
| 2917 | + binder_transaction_buffer_release(proc, NULL, buffer, 0, 0); |
|---|
| 2918 | + binder_alloc_free_buf(&proc->alloc, buffer); |
|---|
| 2919 | + kfree(t_outdated); |
|---|
| 2920 | + binder_stats_deleted(BINDER_STAT_TRANSACTION); |
|---|
| 2921 | + } |
|---|
| 2922 | + |
|---|
| 2923 | + return 0; |
|---|
| 2880 | 2924 | } |
|---|
| 2881 | 2925 | |
|---|
| 2882 | 2926 | /** |
|---|
| .. | .. |
|---|
| 2934 | 2978 | binder_size_t off_start_offset, off_end_offset; |
|---|
| 2935 | 2979 | binder_size_t off_min; |
|---|
| 2936 | 2980 | binder_size_t sg_buf_offset, sg_buf_end_offset; |
|---|
| 2981 | + binder_size_t user_offset = 0; |
|---|
| 2937 | 2982 | struct binder_proc *target_proc = NULL; |
|---|
| 2938 | 2983 | struct binder_thread *target_thread = NULL; |
|---|
| 2939 | 2984 | struct binder_node *target_node = NULL; |
|---|
| .. | .. |
|---|
| 2948 | 2993 | int t_debug_id = atomic_inc_return(&binder_last_id); |
|---|
| 2949 | 2994 | char *secctx = NULL; |
|---|
| 2950 | 2995 | u32 secctx_sz = 0; |
|---|
| 2996 | + struct list_head sgc_head; |
|---|
| 2997 | + struct list_head pf_head; |
|---|
| 2998 | + const void __user *user_buffer = (const void __user *) |
|---|
| 2999 | + (uintptr_t)tr->data.ptr.buffer; |
|---|
| 3000 | + INIT_LIST_HEAD(&sgc_head); |
|---|
| 3001 | + INIT_LIST_HEAD(&pf_head); |
|---|
| 2951 | 3002 | |
|---|
| 2952 | 3003 | e = binder_transaction_log_add(&binder_transaction_log); |
|---|
| 2953 | 3004 | e->debug_id = t_debug_id; |
|---|
| .. | .. |
|---|
| 2957 | 3008 | e->target_handle = tr->target.handle; |
|---|
| 2958 | 3009 | e->data_size = tr->data_size; |
|---|
| 2959 | 3010 | e->offsets_size = tr->offsets_size; |
|---|
| 2960 | | - e->context_name = proc->context->name; |
|---|
| 3011 | + strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); |
|---|
| 2961 | 3012 | |
|---|
| 2962 | 3013 | if (reply) { |
|---|
| 2963 | 3014 | binder_inner_proc_lock(proc); |
|---|
| .. | .. |
|---|
| 2991 | 3042 | binder_inner_proc_unlock(proc); |
|---|
| 2992 | 3043 | target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); |
|---|
| 2993 | 3044 | if (target_thread == NULL) { |
|---|
| 3045 | + /* annotation for sparse */ |
|---|
| 3046 | + __release(&target_thread->proc->inner_lock); |
|---|
| 2994 | 3047 | return_error = BR_DEAD_REPLY; |
|---|
| 2995 | 3048 | return_error_line = __LINE__; |
|---|
| 2996 | 3049 | goto err_dead_binder; |
|---|
| .. | .. |
|---|
| 3012 | 3065 | target_proc = target_thread->proc; |
|---|
| 3013 | 3066 | target_proc->tmp_ref++; |
|---|
| 3014 | 3067 | binder_inner_proc_unlock(target_thread->proc); |
|---|
| 3068 | + trace_android_vh_binder_reply(target_proc, proc, thread, tr); |
|---|
| 3015 | 3069 | } else { |
|---|
| 3016 | 3070 | if (tr->target.handle) { |
|---|
| 3017 | 3071 | struct binder_ref *ref; |
|---|
| .. | .. |
|---|
| 3031 | 3085 | ref->node, &target_proc, |
|---|
| 3032 | 3086 | &return_error); |
|---|
| 3033 | 3087 | } else { |
|---|
| 3034 | | - binder_user_error("%d:%d got transaction to invalid handle\n", |
|---|
| 3035 | | - proc->pid, thread->pid); |
|---|
| 3088 | + binder_user_error("%d:%d got transaction to invalid handle, %u\n", |
|---|
| 3089 | + proc->pid, thread->pid, tr->target.handle); |
|---|
| 3036 | 3090 | return_error = BR_FAILED_REPLY; |
|---|
| 3037 | 3091 | } |
|---|
| 3038 | 3092 | binder_proc_unlock(proc); |
|---|
| .. | .. |
|---|
| 3064 | 3118 | goto err_dead_binder; |
|---|
| 3065 | 3119 | } |
|---|
| 3066 | 3120 | e->to_node = target_node->debug_id; |
|---|
| 3067 | | - if (security_binder_transaction(proc->cred, |
|---|
| 3068 | | - target_proc->cred) < 0) { |
|---|
| 3121 | + trace_android_vh_binder_trans(target_proc, proc, thread, tr); |
|---|
| 3122 | + if (security_binder_transaction(binder_get_cred(proc), |
|---|
| 3123 | + binder_get_cred(target_proc)) < 0) { |
|---|
| 3069 | 3124 | return_error = BR_FAILED_REPLY; |
|---|
| 3070 | 3125 | return_error_param = -EPERM; |
|---|
| 3071 | 3126 | return_error_line = __LINE__; |
|---|
| .. | .. |
|---|
| 3133 | 3188 | if (target_thread) |
|---|
| 3134 | 3189 | e->to_thread = target_thread->pid; |
|---|
| 3135 | 3190 | e->to_proc = target_proc->pid; |
|---|
| 3191 | + trace_android_rvh_binder_transaction(target_proc, proc, thread, tr); |
|---|
| 3136 | 3192 | |
|---|
| 3137 | 3193 | /* TODO: reuse incoming transaction for reply */ |
|---|
| 3138 | 3194 | t = kzalloc(sizeof(*t), GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 3142 | 3198 | return_error_line = __LINE__; |
|---|
| 3143 | 3199 | goto err_alloc_t_failed; |
|---|
| 3144 | 3200 | } |
|---|
| 3201 | + INIT_LIST_HEAD(&t->fd_fixups); |
|---|
| 3145 | 3202 | binder_stats_created(BINDER_STAT_TRANSACTION); |
|---|
| 3146 | 3203 | spin_lock_init(&t->lock); |
|---|
| 3204 | + trace_android_vh_binder_transaction_init(t); |
|---|
| 3147 | 3205 | |
|---|
| 3148 | 3206 | tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); |
|---|
| 3149 | 3207 | if (tcomplete == NULL) { |
|---|
| .. | .. |
|---|
| 3197 | 3255 | if (target_node && target_node->txn_security_ctx) { |
|---|
| 3198 | 3256 | u32 secid; |
|---|
| 3199 | 3257 | size_t added_size; |
|---|
| 3258 | + int max_retries = 100; |
|---|
| 3200 | 3259 | |
|---|
| 3201 | | - security_cred_getsecid(proc->cred, &secid); |
|---|
| 3260 | + security_cred_getsecid(binder_get_cred(proc), &secid); |
|---|
| 3261 | + retry_alloc: |
|---|
| 3202 | 3262 | ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); |
|---|
| 3263 | + if (ret == -ENOMEM && max_retries-- > 0) { |
|---|
| 3264 | + struct page *dummy_page; |
|---|
| 3265 | + |
|---|
| 3266 | + /* |
|---|
| 3267 | + * security_secid_to_secctx() can fail because of a |
|---|
| 3268 | + * GFP_ATOMIC allocation in which case -ENOMEM is |
|---|
| 3269 | + * returned. This needs to be retried, but there is |
|---|
| 3270 | + * currently no way to tell userspace to retry so we |
|---|
| 3271 | + * do it here. We make sure there is still available |
|---|
| 3272 | + * memory first and then retry. |
|---|
| 3273 | + */ |
|---|
| 3274 | + dummy_page = alloc_page(GFP_KERNEL); |
|---|
| 3275 | + if (dummy_page) { |
|---|
| 3276 | + __free_page(dummy_page); |
|---|
| 3277 | + goto retry_alloc; |
|---|
| 3278 | + } |
|---|
| 3279 | + } |
|---|
| 3203 | 3280 | if (ret) { |
|---|
| 3204 | 3281 | return_error = BR_FAILED_REPLY; |
|---|
| 3205 | 3282 | return_error_param = ret; |
|---|
| .. | .. |
|---|
| 3234 | 3311 | goto err_binder_alloc_buf_failed; |
|---|
| 3235 | 3312 | } |
|---|
| 3236 | 3313 | if (secctx) { |
|---|
| 3314 | + int err; |
|---|
| 3237 | 3315 | size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + |
|---|
| 3238 | 3316 | ALIGN(tr->offsets_size, sizeof(void *)) + |
|---|
| 3239 | 3317 | ALIGN(extra_buffers_size, sizeof(void *)) - |
|---|
| 3240 | 3318 | ALIGN(secctx_sz, sizeof(u64)); |
|---|
| 3241 | 3319 | |
|---|
| 3242 | 3320 | t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; |
|---|
| 3243 | | - binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3244 | | - t->buffer, buf_offset, |
|---|
| 3245 | | - secctx, secctx_sz); |
|---|
| 3321 | + err = binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3322 | + t->buffer, buf_offset, |
|---|
| 3323 | + secctx, secctx_sz); |
|---|
| 3324 | + if (err) { |
|---|
| 3325 | + t->security_ctx = 0; |
|---|
| 3326 | + WARN_ON(1); |
|---|
| 3327 | + } |
|---|
| 3246 | 3328 | security_release_secctx(secctx, secctx_sz); |
|---|
| 3247 | 3329 | secctx = NULL; |
|---|
| 3248 | 3330 | } |
|---|
| 3249 | 3331 | t->buffer->debug_id = t->debug_id; |
|---|
| 3250 | 3332 | t->buffer->transaction = t; |
|---|
| 3251 | 3333 | t->buffer->target_node = target_node; |
|---|
| 3334 | + t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); |
|---|
| 3252 | 3335 | trace_binder_transaction_alloc_buf(t->buffer); |
|---|
| 3253 | 3336 | |
|---|
| 3254 | | - if (binder_alloc_copy_user_to_buffer( |
|---|
| 3255 | | - &target_proc->alloc, |
|---|
| 3256 | | - t->buffer, 0, |
|---|
| 3257 | | - (const void __user *) |
|---|
| 3258 | | - (uintptr_t)tr->data.ptr.buffer, |
|---|
| 3259 | | - tr->data_size)) { |
|---|
| 3260 | | - binder_user_error("%d:%d got transaction with invalid data ptr\n", |
|---|
| 3261 | | - proc->pid, thread->pid); |
|---|
| 3262 | | - return_error = BR_FAILED_REPLY; |
|---|
| 3263 | | - return_error_param = -EFAULT; |
|---|
| 3264 | | - return_error_line = __LINE__; |
|---|
| 3265 | | - goto err_copy_data_failed; |
|---|
| 3266 | | - } |
|---|
| 3267 | 3337 | if (binder_alloc_copy_user_to_buffer( |
|---|
| 3268 | 3338 | &target_proc->alloc, |
|---|
| 3269 | 3339 | t->buffer, |
|---|
| .. | .. |
|---|
| 3308 | 3378 | size_t object_size; |
|---|
| 3309 | 3379 | struct binder_object object; |
|---|
| 3310 | 3380 | binder_size_t object_offset; |
|---|
| 3381 | + binder_size_t copy_size; |
|---|
| 3311 | 3382 | |
|---|
| 3312 | | - binder_alloc_copy_from_buffer(&target_proc->alloc, |
|---|
| 3313 | | - &object_offset, |
|---|
| 3314 | | - t->buffer, |
|---|
| 3315 | | - buffer_offset, |
|---|
| 3316 | | - sizeof(object_offset)); |
|---|
| 3317 | | - object_size = binder_get_object(target_proc, t->buffer, |
|---|
| 3318 | | - object_offset, &object); |
|---|
| 3383 | + if (binder_alloc_copy_from_buffer(&target_proc->alloc, |
|---|
| 3384 | + &object_offset, |
|---|
| 3385 | + t->buffer, |
|---|
| 3386 | + buffer_offset, |
|---|
| 3387 | + sizeof(object_offset))) { |
|---|
| 3388 | + return_error = BR_FAILED_REPLY; |
|---|
| 3389 | + return_error_param = -EINVAL; |
|---|
| 3390 | + return_error_line = __LINE__; |
|---|
| 3391 | + goto err_bad_offset; |
|---|
| 3392 | + } |
|---|
| 3393 | + |
|---|
| 3394 | + /* |
|---|
| 3395 | + * Copy the source user buffer up to the next object |
|---|
| 3396 | + * that will be processed. |
|---|
| 3397 | + */ |
|---|
| 3398 | + copy_size = object_offset - user_offset; |
|---|
| 3399 | + if (copy_size && (user_offset > object_offset || |
|---|
| 3400 | + binder_alloc_copy_user_to_buffer( |
|---|
| 3401 | + &target_proc->alloc, |
|---|
| 3402 | + t->buffer, user_offset, |
|---|
| 3403 | + user_buffer + user_offset, |
|---|
| 3404 | + copy_size))) { |
|---|
| 3405 | + binder_user_error("%d:%d got transaction with invalid data ptr\n", |
|---|
| 3406 | + proc->pid, thread->pid); |
|---|
| 3407 | + return_error = BR_FAILED_REPLY; |
|---|
| 3408 | + return_error_param = -EFAULT; |
|---|
| 3409 | + return_error_line = __LINE__; |
|---|
| 3410 | + goto err_copy_data_failed; |
|---|
| 3411 | + } |
|---|
| 3412 | + object_size = binder_get_object(target_proc, user_buffer, |
|---|
| 3413 | + t->buffer, object_offset, &object); |
|---|
| 3319 | 3414 | if (object_size == 0 || object_offset < off_min) { |
|---|
| 3320 | 3415 | binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", |
|---|
| 3321 | 3416 | proc->pid, thread->pid, |
|---|
| .. | .. |
|---|
| 3327 | 3422 | return_error_line = __LINE__; |
|---|
| 3328 | 3423 | goto err_bad_offset; |
|---|
| 3329 | 3424 | } |
|---|
| 3425 | + /* |
|---|
| 3426 | + * Set offset to the next buffer fragment to be |
|---|
| 3427 | + * copied |
|---|
| 3428 | + */ |
|---|
| 3429 | + user_offset = object_offset + object_size; |
|---|
| 3330 | 3430 | |
|---|
| 3331 | 3431 | hdr = &object.hdr; |
|---|
| 3332 | 3432 | off_min = object_offset + object_size; |
|---|
| .. | .. |
|---|
| 3337 | 3437 | |
|---|
| 3338 | 3438 | fp = to_flat_binder_object(hdr); |
|---|
| 3339 | 3439 | ret = binder_translate_binder(fp, t, thread); |
|---|
| 3340 | | - if (ret < 0) { |
|---|
| 3440 | + |
|---|
| 3441 | + if (ret < 0 || |
|---|
| 3442 | + binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3443 | + t->buffer, |
|---|
| 3444 | + object_offset, |
|---|
| 3445 | + fp, sizeof(*fp))) { |
|---|
| 3341 | 3446 | return_error = BR_FAILED_REPLY; |
|---|
| 3342 | 3447 | return_error_param = ret; |
|---|
| 3343 | 3448 | return_error_line = __LINE__; |
|---|
| 3344 | 3449 | goto err_translate_failed; |
|---|
| 3345 | 3450 | } |
|---|
| 3346 | | - binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3347 | | - t->buffer, object_offset, |
|---|
| 3348 | | - fp, sizeof(*fp)); |
|---|
| 3349 | 3451 | } break; |
|---|
| 3350 | 3452 | case BINDER_TYPE_HANDLE: |
|---|
| 3351 | 3453 | case BINDER_TYPE_WEAK_HANDLE: { |
|---|
| .. | .. |
|---|
| 3353 | 3455 | |
|---|
| 3354 | 3456 | fp = to_flat_binder_object(hdr); |
|---|
| 3355 | 3457 | ret = binder_translate_handle(fp, t, thread); |
|---|
| 3356 | | - if (ret < 0) { |
|---|
| 3458 | + if (ret < 0 || |
|---|
| 3459 | + binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3460 | + t->buffer, |
|---|
| 3461 | + object_offset, |
|---|
| 3462 | + fp, sizeof(*fp))) { |
|---|
| 3357 | 3463 | return_error = BR_FAILED_REPLY; |
|---|
| 3358 | 3464 | return_error_param = ret; |
|---|
| 3359 | 3465 | return_error_line = __LINE__; |
|---|
| 3360 | 3466 | goto err_translate_failed; |
|---|
| 3361 | 3467 | } |
|---|
| 3362 | | - binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3363 | | - t->buffer, object_offset, |
|---|
| 3364 | | - fp, sizeof(*fp)); |
|---|
| 3365 | 3468 | } break; |
|---|
| 3366 | 3469 | |
|---|
| 3367 | 3470 | case BINDER_TYPE_FD: { |
|---|
| 3368 | 3471 | struct binder_fd_object *fp = to_binder_fd_object(hdr); |
|---|
| 3369 | | - int target_fd = binder_translate_fd(fp->fd, t, thread, |
|---|
| 3370 | | - in_reply_to); |
|---|
| 3472 | + binder_size_t fd_offset = object_offset + |
|---|
| 3473 | + (uintptr_t)&fp->fd - (uintptr_t)fp; |
|---|
| 3474 | + int ret = binder_translate_fd(fp->fd, fd_offset, t, |
|---|
| 3475 | + thread, in_reply_to); |
|---|
| 3371 | 3476 | |
|---|
| 3372 | | - if (target_fd < 0) { |
|---|
| 3477 | + fp->pad_binder = 0; |
|---|
| 3478 | + if (ret < 0 || |
|---|
| 3479 | + binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3480 | + t->buffer, |
|---|
| 3481 | + object_offset, |
|---|
| 3482 | + fp, sizeof(*fp))) { |
|---|
| 3373 | 3483 | return_error = BR_FAILED_REPLY; |
|---|
| 3374 | | - return_error_param = target_fd; |
|---|
| 3484 | + return_error_param = ret; |
|---|
| 3375 | 3485 | return_error_line = __LINE__; |
|---|
| 3376 | 3486 | goto err_translate_failed; |
|---|
| 3377 | 3487 | } |
|---|
| 3378 | | - fp->pad_binder = 0; |
|---|
| 3379 | | - fp->fd = target_fd; |
|---|
| 3380 | | - binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3381 | | - t->buffer, object_offset, |
|---|
| 3382 | | - fp, sizeof(*fp)); |
|---|
| 3383 | 3488 | } break; |
|---|
| 3384 | 3489 | case BINDER_TYPE_FDA: { |
|---|
| 3385 | 3490 | struct binder_object ptr_object; |
|---|
| 3386 | 3491 | binder_size_t parent_offset; |
|---|
| 3492 | + struct binder_object user_object; |
|---|
| 3493 | + size_t user_parent_size; |
|---|
| 3387 | 3494 | struct binder_fd_array_object *fda = |
|---|
| 3388 | 3495 | to_binder_fd_array_object(hdr); |
|---|
| 3389 | 3496 | size_t num_valid = (buffer_offset - off_start_offset) / |
|---|
| .. | .. |
|---|
| 3415 | 3522 | return_error_line = __LINE__; |
|---|
| 3416 | 3523 | goto err_bad_parent; |
|---|
| 3417 | 3524 | } |
|---|
| 3418 | | - ret = binder_translate_fd_array(fda, parent, t, thread, |
|---|
| 3419 | | - in_reply_to); |
|---|
| 3420 | | - if (ret < 0) { |
|---|
| 3525 | + /* |
|---|
| 3526 | + * We need to read the user version of the parent |
|---|
| 3527 | + * object to get the original user offset |
|---|
| 3528 | + */ |
|---|
| 3529 | + user_parent_size = |
|---|
| 3530 | + binder_get_object(proc, user_buffer, t->buffer, |
|---|
| 3531 | + parent_offset, &user_object); |
|---|
| 3532 | + if (user_parent_size != sizeof(user_object.bbo)) { |
|---|
| 3533 | + binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", |
|---|
| 3534 | + proc->pid, thread->pid, |
|---|
| 3535 | + user_parent_size, |
|---|
| 3536 | + sizeof(user_object.bbo)); |
|---|
| 3421 | 3537 | return_error = BR_FAILED_REPLY; |
|---|
| 3422 | | - return_error_param = ret; |
|---|
| 3538 | + return_error_param = -EINVAL; |
|---|
| 3539 | + return_error_line = __LINE__; |
|---|
| 3540 | + goto err_bad_parent; |
|---|
| 3541 | + } |
|---|
| 3542 | + ret = binder_translate_fd_array(&pf_head, fda, |
|---|
| 3543 | + user_buffer, parent, |
|---|
| 3544 | + &user_object.bbo, t, |
|---|
| 3545 | + thread, in_reply_to); |
|---|
| 3546 | + if (!ret) |
|---|
| 3547 | + ret = binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3548 | + t->buffer, |
|---|
| 3549 | + object_offset, |
|---|
| 3550 | + fda, sizeof(*fda)); |
|---|
| 3551 | + if (ret) { |
|---|
| 3552 | + return_error = BR_FAILED_REPLY; |
|---|
| 3553 | + return_error_param = ret > 0 ? -EINVAL : ret; |
|---|
| 3423 | 3554 | return_error_line = __LINE__; |
|---|
| 3424 | 3555 | goto err_translate_failed; |
|---|
| 3425 | 3556 | } |
|---|
| .. | .. |
|---|
| 3441 | 3572 | return_error_line = __LINE__; |
|---|
| 3442 | 3573 | goto err_bad_offset; |
|---|
| 3443 | 3574 | } |
|---|
| 3444 | | - if (binder_alloc_copy_user_to_buffer( |
|---|
| 3445 | | - &target_proc->alloc, |
|---|
| 3446 | | - t->buffer, |
|---|
| 3447 | | - sg_buf_offset, |
|---|
| 3448 | | - (const void __user *) |
|---|
| 3449 | | - (uintptr_t)bp->buffer, |
|---|
| 3450 | | - bp->length)) { |
|---|
| 3451 | | - binder_user_error("%d:%d got transaction with invalid offsets ptr\n", |
|---|
| 3452 | | - proc->pid, thread->pid); |
|---|
| 3453 | | - return_error_param = -EFAULT; |
|---|
| 3575 | + ret = binder_defer_copy(&sgc_head, sg_buf_offset, |
|---|
| 3576 | + (const void __user *)(uintptr_t)bp->buffer, |
|---|
| 3577 | + bp->length); |
|---|
| 3578 | + if (ret) { |
|---|
| 3454 | 3579 | return_error = BR_FAILED_REPLY; |
|---|
| 3580 | + return_error_param = ret; |
|---|
| 3455 | 3581 | return_error_line = __LINE__; |
|---|
| 3456 | | - goto err_copy_data_failed; |
|---|
| 3582 | + goto err_translate_failed; |
|---|
| 3457 | 3583 | } |
|---|
| 3458 | 3584 | /* Fixup buffer pointer to target proc address space */ |
|---|
| 3459 | 3585 | bp->buffer = (uintptr_t) |
|---|
| .. | .. |
|---|
| 3462 | 3588 | |
|---|
| 3463 | 3589 | num_valid = (buffer_offset - off_start_offset) / |
|---|
| 3464 | 3590 | sizeof(binder_size_t); |
|---|
| 3465 | | - ret = binder_fixup_parent(t, thread, bp, |
|---|
| 3591 | + ret = binder_fixup_parent(&pf_head, t, |
|---|
| 3592 | + thread, bp, |
|---|
| 3466 | 3593 | off_start_offset, |
|---|
| 3467 | 3594 | num_valid, |
|---|
| 3468 | 3595 | last_fixup_obj_off, |
|---|
| 3469 | 3596 | last_fixup_min_off); |
|---|
| 3470 | | - if (ret < 0) { |
|---|
| 3597 | + if (ret < 0 || |
|---|
| 3598 | + binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3599 | + t->buffer, |
|---|
| 3600 | + object_offset, |
|---|
| 3601 | + bp, sizeof(*bp))) { |
|---|
| 3471 | 3602 | return_error = BR_FAILED_REPLY; |
|---|
| 3472 | 3603 | return_error_param = ret; |
|---|
| 3473 | 3604 | return_error_line = __LINE__; |
|---|
| 3474 | 3605 | goto err_translate_failed; |
|---|
| 3475 | 3606 | } |
|---|
| 3476 | | - binder_alloc_copy_to_buffer(&target_proc->alloc, |
|---|
| 3477 | | - t->buffer, object_offset, |
|---|
| 3478 | | - bp, sizeof(*bp)); |
|---|
| 3479 | 3607 | last_fixup_obj_off = object_offset; |
|---|
| 3480 | 3608 | last_fixup_min_off = 0; |
|---|
| 3481 | 3609 | } break; |
|---|
| .. | .. |
|---|
| 3488 | 3616 | goto err_bad_object_type; |
|---|
| 3489 | 3617 | } |
|---|
| 3490 | 3618 | } |
|---|
| 3491 | | - tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; |
|---|
| 3619 | + /* Done processing objects, copy the rest of the buffer */ |
|---|
| 3620 | + if (binder_alloc_copy_user_to_buffer( |
|---|
| 3621 | + &target_proc->alloc, |
|---|
| 3622 | + t->buffer, user_offset, |
|---|
| 3623 | + user_buffer + user_offset, |
|---|
| 3624 | + tr->data_size - user_offset)) { |
|---|
| 3625 | + binder_user_error("%d:%d got transaction with invalid data ptr\n", |
|---|
| 3626 | + proc->pid, thread->pid); |
|---|
| 3627 | + return_error = BR_FAILED_REPLY; |
|---|
| 3628 | + return_error_param = -EFAULT; |
|---|
| 3629 | + return_error_line = __LINE__; |
|---|
| 3630 | + goto err_copy_data_failed; |
|---|
| 3631 | + } |
|---|
| 3632 | + |
|---|
| 3633 | + ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, |
|---|
| 3634 | + &sgc_head, &pf_head); |
|---|
| 3635 | + if (ret) { |
|---|
| 3636 | + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", |
|---|
| 3637 | + proc->pid, thread->pid); |
|---|
| 3638 | + return_error = BR_FAILED_REPLY; |
|---|
| 3639 | + return_error_param = ret; |
|---|
| 3640 | + return_error_line = __LINE__; |
|---|
| 3641 | + goto err_copy_data_failed; |
|---|
| 3642 | + } |
|---|
| 3643 | + if (t->buffer->oneway_spam_suspect) |
|---|
| 3644 | + tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; |
|---|
| 3645 | + else |
|---|
| 3646 | + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; |
|---|
| 3492 | 3647 | t->work.type = BINDER_WORK_TRANSACTION; |
|---|
| 3493 | 3648 | |
|---|
| 3494 | 3649 | if (reply) { |
|---|
| 3495 | 3650 | binder_enqueue_thread_work(thread, tcomplete); |
|---|
| 3496 | 3651 | binder_inner_proc_lock(target_proc); |
|---|
| 3497 | 3652 | if (target_thread->is_dead) { |
|---|
| 3653 | + return_error = BR_DEAD_REPLY; |
|---|
| 3498 | 3654 | binder_inner_proc_unlock(target_proc); |
|---|
| 3499 | 3655 | goto err_dead_proc_or_thread; |
|---|
| 3500 | 3656 | } |
|---|
| 3501 | 3657 | BUG_ON(t->buffer->async_transaction != 0); |
|---|
| 3502 | 3658 | binder_pop_transaction_ilocked(target_thread, in_reply_to); |
|---|
| 3503 | 3659 | binder_enqueue_thread_work_ilocked(target_thread, &t->work); |
|---|
| 3660 | + target_proc->outstanding_txns++; |
|---|
| 3504 | 3661 | binder_inner_proc_unlock(target_proc); |
|---|
| 3505 | 3662 | wake_up_interruptible_sync(&target_thread->wait); |
|---|
| 3663 | + trace_android_vh_binder_restore_priority(in_reply_to, current); |
|---|
| 3506 | 3664 | binder_restore_priority(current, in_reply_to->saved_priority); |
|---|
| 3507 | 3665 | binder_free_transaction(in_reply_to); |
|---|
| 3508 | 3666 | } else if (!(t->flags & TF_ONE_WAY)) { |
|---|
| .. | .. |
|---|
| 3520 | 3678 | t->from_parent = thread->transaction_stack; |
|---|
| 3521 | 3679 | thread->transaction_stack = t; |
|---|
| 3522 | 3680 | binder_inner_proc_unlock(proc); |
|---|
| 3523 | | - if (!binder_proc_transaction(t, target_proc, target_thread)) { |
|---|
| 3681 | + return_error = binder_proc_transaction(t, |
|---|
| 3682 | + target_proc, target_thread); |
|---|
| 3683 | + if (return_error) { |
|---|
| 3524 | 3684 | binder_inner_proc_lock(proc); |
|---|
| 3525 | 3685 | binder_pop_transaction_ilocked(thread, t); |
|---|
| 3526 | 3686 | binder_inner_proc_unlock(proc); |
|---|
| .. | .. |
|---|
| 3530 | 3690 | BUG_ON(target_node == NULL); |
|---|
| 3531 | 3691 | BUG_ON(t->buffer->async_transaction != 1); |
|---|
| 3532 | 3692 | binder_enqueue_thread_work(thread, tcomplete); |
|---|
| 3533 | | - if (!binder_proc_transaction(t, target_proc, NULL)) |
|---|
| 3693 | + return_error = binder_proc_transaction(t, target_proc, NULL); |
|---|
| 3694 | + if (return_error) |
|---|
| 3534 | 3695 | goto err_dead_proc_or_thread; |
|---|
| 3535 | 3696 | } |
|---|
| 3536 | 3697 | if (target_thread) |
|---|
| .. | .. |
|---|
| 3547 | 3708 | return; |
|---|
| 3548 | 3709 | |
|---|
| 3549 | 3710 | err_dead_proc_or_thread: |
|---|
| 3550 | | - return_error = BR_DEAD_REPLY; |
|---|
| 3551 | 3711 | return_error_line = __LINE__; |
|---|
| 3552 | 3712 | binder_dequeue_work(proc, tcomplete); |
|---|
| 3553 | 3713 | err_translate_failed: |
|---|
| .. | .. |
|---|
| 3555 | 3715 | err_bad_offset: |
|---|
| 3556 | 3716 | err_bad_parent: |
|---|
| 3557 | 3717 | err_copy_data_failed: |
|---|
| 3718 | + binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); |
|---|
| 3719 | + binder_free_txn_fixups(t); |
|---|
| 3558 | 3720 | trace_binder_transaction_failed_buffer_release(t->buffer); |
|---|
| 3559 | | - binder_transaction_buffer_release(target_proc, t->buffer, |
|---|
| 3721 | + binder_transaction_buffer_release(target_proc, NULL, t->buffer, |
|---|
| 3560 | 3722 | buffer_offset, true); |
|---|
| 3561 | 3723 | if (target_node) |
|---|
| 3562 | 3724 | binder_dec_node_tmpref(target_node); |
|---|
| .. | .. |
|---|
| 3613 | 3775 | |
|---|
| 3614 | 3776 | BUG_ON(thread->return_error.cmd != BR_OK); |
|---|
| 3615 | 3777 | if (in_reply_to) { |
|---|
| 3778 | + trace_android_vh_binder_restore_priority(in_reply_to, current); |
|---|
| 3616 | 3779 | binder_restore_priority(current, in_reply_to->saved_priority); |
|---|
| 3617 | 3780 | thread->return_error.cmd = BR_TRANSACTION_COMPLETE; |
|---|
| 3618 | 3781 | binder_enqueue_thread_work(thread, &thread->return_error.work); |
|---|
| .. | .. |
|---|
| 3621 | 3784 | thread->return_error.cmd = return_error; |
|---|
| 3622 | 3785 | binder_enqueue_thread_work(thread, &thread->return_error.work); |
|---|
| 3623 | 3786 | } |
|---|
| 3787 | +} |
|---|
| 3788 | + |
|---|
| 3789 | +/** |
|---|
| 3790 | + * binder_free_buf() - free the specified buffer |
|---|
| 3791 | + * @proc: binder proc that owns buffer |
|---|
| 3792 | + * @buffer: buffer to be freed |
|---|
| 3793 | + * @is_failure: failed to send transaction |
|---|
| 3794 | + * |
|---|
| 3795 | + * If buffer for an async transaction, enqueue the next async |
|---|
| 3796 | + * transaction from the node. |
|---|
| 3797 | + * |
|---|
| 3798 | + * Cleanup buffer and free it. |
|---|
| 3799 | + */ |
|---|
| 3800 | +static void |
|---|
| 3801 | +binder_free_buf(struct binder_proc *proc, |
|---|
| 3802 | + struct binder_thread *thread, |
|---|
| 3803 | + struct binder_buffer *buffer, bool is_failure) |
|---|
| 3804 | +{ |
|---|
| 3805 | + binder_inner_proc_lock(proc); |
|---|
| 3806 | + if (buffer->transaction) { |
|---|
| 3807 | + buffer->transaction->buffer = NULL; |
|---|
| 3808 | + buffer->transaction = NULL; |
|---|
| 3809 | + } |
|---|
| 3810 | + binder_inner_proc_unlock(proc); |
|---|
| 3811 | + if (buffer->async_transaction && buffer->target_node) { |
|---|
| 3812 | + struct binder_node *buf_node; |
|---|
| 3813 | + struct binder_work *w; |
|---|
| 3814 | + |
|---|
| 3815 | + buf_node = buffer->target_node; |
|---|
| 3816 | + binder_node_inner_lock(buf_node); |
|---|
| 3817 | + BUG_ON(!buf_node->has_async_transaction); |
|---|
| 3818 | + BUG_ON(buf_node->proc != proc); |
|---|
| 3819 | + w = binder_dequeue_work_head_ilocked( |
|---|
| 3820 | + &buf_node->async_todo); |
|---|
| 3821 | + if (!w) { |
|---|
| 3822 | + buf_node->has_async_transaction = false; |
|---|
| 3823 | + } else { |
|---|
| 3824 | + binder_enqueue_work_ilocked( |
|---|
| 3825 | + w, &proc->todo); |
|---|
| 3826 | + binder_wakeup_proc_ilocked(proc); |
|---|
| 3827 | + } |
|---|
| 3828 | + binder_node_inner_unlock(buf_node); |
|---|
| 3829 | + } |
|---|
| 3830 | + trace_binder_transaction_buffer_release(buffer); |
|---|
| 3831 | + binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure); |
|---|
| 3832 | + binder_alloc_free_buf(&proc->alloc, buffer); |
|---|
| 3624 | 3833 | } |
|---|
| 3625 | 3834 | |
|---|
| 3626 | 3835 | static int binder_thread_write(struct binder_proc *proc, |
|---|
| .. | .. |
|---|
| 3813 | 4022 | proc->pid, thread->pid, (u64)data_ptr, |
|---|
| 3814 | 4023 | buffer->debug_id, |
|---|
| 3815 | 4024 | buffer->transaction ? "active" : "finished"); |
|---|
| 3816 | | - |
|---|
| 3817 | | - binder_inner_proc_lock(proc); |
|---|
| 3818 | | - if (buffer->transaction) { |
|---|
| 3819 | | - buffer->transaction->buffer = NULL; |
|---|
| 3820 | | - buffer->transaction = NULL; |
|---|
| 3821 | | - } |
|---|
| 3822 | | - binder_inner_proc_unlock(proc); |
|---|
| 3823 | | - if (buffer->async_transaction && buffer->target_node) { |
|---|
| 3824 | | - struct binder_node *buf_node; |
|---|
| 3825 | | - struct binder_work *w; |
|---|
| 3826 | | - |
|---|
| 3827 | | - buf_node = buffer->target_node; |
|---|
| 3828 | | - binder_node_inner_lock(buf_node); |
|---|
| 3829 | | - BUG_ON(!buf_node->has_async_transaction); |
|---|
| 3830 | | - BUG_ON(buf_node->proc != proc); |
|---|
| 3831 | | - w = binder_dequeue_work_head_ilocked( |
|---|
| 3832 | | - &buf_node->async_todo); |
|---|
| 3833 | | - if (!w) { |
|---|
| 3834 | | - buf_node->has_async_transaction = false; |
|---|
| 3835 | | - } else { |
|---|
| 3836 | | - binder_enqueue_work_ilocked( |
|---|
| 3837 | | - w, &proc->todo); |
|---|
| 3838 | | - binder_wakeup_proc_ilocked(proc); |
|---|
| 3839 | | - } |
|---|
| 3840 | | - binder_node_inner_unlock(buf_node); |
|---|
| 3841 | | - } |
|---|
| 3842 | | - trace_binder_transaction_buffer_release(buffer); |
|---|
| 3843 | | - binder_transaction_buffer_release(proc, buffer, 0, false); |
|---|
| 3844 | | - binder_alloc_free_buf(&proc->alloc, buffer); |
|---|
| 4025 | + binder_free_buf(proc, thread, buffer, false); |
|---|
| 3845 | 4026 | break; |
|---|
| 3846 | 4027 | } |
|---|
| 3847 | 4028 | |
|---|
| .. | .. |
|---|
| 3887 | 4068 | } |
|---|
| 3888 | 4069 | thread->looper |= BINDER_LOOPER_STATE_REGISTERED; |
|---|
| 3889 | 4070 | binder_inner_proc_unlock(proc); |
|---|
| 4071 | + trace_android_vh_binder_looper_state_registered(thread, proc); |
|---|
| 3890 | 4072 | break; |
|---|
| 3891 | 4073 | case BC_ENTER_LOOPER: |
|---|
| 3892 | 4074 | binder_debug(BINDER_DEBUG_THREADS, |
|---|
| .. | .. |
|---|
| 4148 | 4330 | if (do_proc_work) |
|---|
| 4149 | 4331 | list_add(&thread->waiting_thread_node, |
|---|
| 4150 | 4332 | &proc->waiting_threads); |
|---|
| 4333 | + trace_android_vh_binder_wait_for_work(do_proc_work, thread, proc); |
|---|
| 4151 | 4334 | binder_inner_proc_unlock(proc); |
|---|
| 4152 | 4335 | schedule(); |
|---|
| 4153 | 4336 | binder_inner_proc_lock(proc); |
|---|
| 4154 | 4337 | list_del_init(&thread->waiting_thread_node); |
|---|
| 4155 | 4338 | if (signal_pending(current)) { |
|---|
| 4156 | | - ret = -ERESTARTSYS; |
|---|
| 4339 | + ret = -EINTR; |
|---|
| 4157 | 4340 | break; |
|---|
| 4158 | 4341 | } |
|---|
| 4159 | 4342 | } |
|---|
| 4160 | 4343 | finish_wait(&thread->wait, &wait); |
|---|
| 4161 | 4344 | binder_inner_proc_unlock(proc); |
|---|
| 4162 | 4345 | freezer_count(); |
|---|
| 4346 | + |
|---|
| 4347 | + return ret; |
|---|
| 4348 | +} |
|---|
| 4349 | + |
|---|
| 4350 | +/** |
|---|
| 4351 | + * binder_apply_fd_fixups() - finish fd translation |
|---|
| 4352 | + * @proc: binder_proc associated @t->buffer |
|---|
| 4353 | + * @t: binder transaction with list of fd fixups |
|---|
| 4354 | + * |
|---|
| 4355 | + * Now that we are in the context of the transaction target |
|---|
| 4356 | + * process, we can allocate and install fds. Process the |
|---|
| 4357 | + * list of fds to translate and fixup the buffer with the |
|---|
| 4358 | + * new fds. |
|---|
| 4359 | + * |
|---|
| 4360 | + * If we fail to allocate an fd, then free the resources by |
|---|
| 4361 | + * fput'ing files that have not been processed and ksys_close'ing |
|---|
| 4362 | + * any fds that have already been allocated. |
|---|
| 4363 | + */ |
|---|
| 4364 | +static int binder_apply_fd_fixups(struct binder_proc *proc, |
|---|
| 4365 | + struct binder_transaction *t) |
|---|
| 4366 | +{ |
|---|
| 4367 | + struct binder_txn_fd_fixup *fixup, *tmp; |
|---|
| 4368 | + int ret = 0; |
|---|
| 4369 | + |
|---|
| 4370 | + list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { |
|---|
| 4371 | + int fd = get_unused_fd_flags(O_CLOEXEC); |
|---|
| 4372 | + |
|---|
| 4373 | + if (fd < 0) { |
|---|
| 4374 | + binder_debug(BINDER_DEBUG_TRANSACTION, |
|---|
| 4375 | + "failed fd fixup txn %d fd %d\n", |
|---|
| 4376 | + t->debug_id, fd); |
|---|
| 4377 | + ret = -ENOMEM; |
|---|
| 4378 | + break; |
|---|
| 4379 | + } |
|---|
| 4380 | + binder_debug(BINDER_DEBUG_TRANSACTION, |
|---|
| 4381 | + "fd fixup txn %d fd %d\n", |
|---|
| 4382 | + t->debug_id, fd); |
|---|
| 4383 | + trace_binder_transaction_fd_recv(t, fd, fixup->offset); |
|---|
| 4384 | + fd_install(fd, fixup->file); |
|---|
| 4385 | + fixup->file = NULL; |
|---|
| 4386 | + if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, |
|---|
| 4387 | + fixup->offset, &fd, |
|---|
| 4388 | + sizeof(u32))) { |
|---|
| 4389 | + ret = -EINVAL; |
|---|
| 4390 | + break; |
|---|
| 4391 | + } |
|---|
| 4392 | + } |
|---|
| 4393 | + list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { |
|---|
| 4394 | + if (fixup->file) { |
|---|
| 4395 | + fput(fixup->file); |
|---|
| 4396 | + } else if (ret) { |
|---|
| 4397 | + u32 fd; |
|---|
| 4398 | + int err; |
|---|
| 4399 | + |
|---|
| 4400 | + err = binder_alloc_copy_from_buffer(&proc->alloc, &fd, |
|---|
| 4401 | + t->buffer, |
|---|
| 4402 | + fixup->offset, |
|---|
| 4403 | + sizeof(fd)); |
|---|
| 4404 | + WARN_ON(err); |
|---|
| 4405 | + if (!err) |
|---|
| 4406 | + binder_deferred_fd_close(fd); |
|---|
| 4407 | + } |
|---|
| 4408 | + list_del(&fixup->fixup_entry); |
|---|
| 4409 | + kfree(fixup); |
|---|
| 4410 | + } |
|---|
| 4163 | 4411 | |
|---|
| 4164 | 4412 | return ret; |
|---|
| 4165 | 4413 | } |
|---|
| .. | .. |
|---|
| 4200 | 4448 | wait_event_interruptible(binder_user_error_wait, |
|---|
| 4201 | 4449 | binder_stop_on_user_error < 2); |
|---|
| 4202 | 4450 | } |
|---|
| 4451 | + trace_android_vh_binder_restore_priority(NULL, current); |
|---|
| 4203 | 4452 | binder_restore_priority(current, proc->default_priority); |
|---|
| 4204 | 4453 | } |
|---|
| 4205 | 4454 | |
|---|
| .. | .. |
|---|
| 4244 | 4493 | binder_inner_proc_unlock(proc); |
|---|
| 4245 | 4494 | break; |
|---|
| 4246 | 4495 | } |
|---|
| 4496 | + trace_android_vh_binder_thread_read(&list, proc, thread); |
|---|
| 4247 | 4497 | w = binder_dequeue_work_head_ilocked(list); |
|---|
| 4248 | 4498 | if (binder_worklist_empty_ilocked(&thread->todo)) |
|---|
| 4249 | 4499 | thread->process_todo = false; |
|---|
| .. | .. |
|---|
| 4267 | 4517 | |
|---|
| 4268 | 4518 | binder_stat_br(proc, thread, cmd); |
|---|
| 4269 | 4519 | } break; |
|---|
| 4270 | | - case BINDER_WORK_TRANSACTION_COMPLETE: { |
|---|
| 4520 | + case BINDER_WORK_TRANSACTION_COMPLETE: |
|---|
| 4521 | + case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { |
|---|
| 4522 | + if (proc->oneway_spam_detection_enabled && |
|---|
| 4523 | + w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) |
|---|
| 4524 | + cmd = BR_ONEWAY_SPAM_SUSPECT; |
|---|
| 4525 | + else |
|---|
| 4526 | + cmd = BR_TRANSACTION_COMPLETE; |
|---|
| 4271 | 4527 | binder_inner_proc_unlock(proc); |
|---|
| 4272 | | - cmd = BR_TRANSACTION_COMPLETE; |
|---|
| 4273 | 4528 | kfree(w); |
|---|
| 4274 | 4529 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); |
|---|
| 4275 | 4530 | if (put_user(cmd, (uint32_t __user *)ptr)) |
|---|
| .. | .. |
|---|
| 4409 | 4664 | if (cmd == BR_DEAD_BINDER) |
|---|
| 4410 | 4665 | goto done; /* DEAD_BINDER notifications can cause transactions */ |
|---|
| 4411 | 4666 | } break; |
|---|
| 4667 | + default: |
|---|
| 4668 | + binder_inner_proc_unlock(proc); |
|---|
| 4669 | + pr_err("%d:%d: bad work type %d\n", |
|---|
| 4670 | + proc->pid, thread->pid, w->type); |
|---|
| 4671 | + break; |
|---|
| 4412 | 4672 | } |
|---|
| 4413 | 4673 | |
|---|
| 4414 | 4674 | if (!t) |
|---|
| .. | .. |
|---|
| 4442 | 4702 | trd->sender_pid = |
|---|
| 4443 | 4703 | task_tgid_nr_ns(sender, |
|---|
| 4444 | 4704 | task_active_pid_ns(current)); |
|---|
| 4705 | + trace_android_vh_sync_txn_recvd(thread->task, t_from->task); |
|---|
| 4445 | 4706 | } else { |
|---|
| 4446 | 4707 | trd->sender_pid = 0; |
|---|
| 4447 | 4708 | } |
|---|
| 4448 | 4709 | |
|---|
| 4710 | + ret = binder_apply_fd_fixups(proc, t); |
|---|
| 4711 | + if (ret) { |
|---|
| 4712 | + struct binder_buffer *buffer = t->buffer; |
|---|
| 4713 | + bool oneway = !!(t->flags & TF_ONE_WAY); |
|---|
| 4714 | + int tid = t->debug_id; |
|---|
| 4715 | + |
|---|
| 4716 | + if (t_from) |
|---|
| 4717 | + binder_thread_dec_tmpref(t_from); |
|---|
| 4718 | + buffer->transaction = NULL; |
|---|
| 4719 | + binder_cleanup_transaction(t, "fd fixups failed", |
|---|
| 4720 | + BR_FAILED_REPLY); |
|---|
| 4721 | + binder_free_buf(proc, thread, buffer, true); |
|---|
| 4722 | + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, |
|---|
| 4723 | + "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", |
|---|
| 4724 | + proc->pid, thread->pid, |
|---|
| 4725 | + oneway ? "async " : |
|---|
| 4726 | + (cmd == BR_REPLY ? "reply " : ""), |
|---|
| 4727 | + tid, BR_FAILED_REPLY, ret, __LINE__); |
|---|
| 4728 | + if (cmd == BR_REPLY) { |
|---|
| 4729 | + cmd = BR_FAILED_REPLY; |
|---|
| 4730 | + if (put_user(cmd, (uint32_t __user *)ptr)) |
|---|
| 4731 | + return -EFAULT; |
|---|
| 4732 | + ptr += sizeof(uint32_t); |
|---|
| 4733 | + binder_stat_br(proc, thread, cmd); |
|---|
| 4734 | + break; |
|---|
| 4735 | + } |
|---|
| 4736 | + continue; |
|---|
| 4737 | + } |
|---|
| 4449 | 4738 | trd->data_size = t->buffer->data_size; |
|---|
| 4450 | 4739 | trd->offsets_size = t->buffer->offsets_size; |
|---|
| 4451 | 4740 | trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; |
|---|
| .. | .. |
|---|
| 4654 | 4943 | static void binder_free_proc(struct binder_proc *proc) |
|---|
| 4655 | 4944 | { |
|---|
| 4656 | 4945 | struct binder_device *device; |
|---|
| 4946 | + struct binder_proc_ext *eproc = |
|---|
| 4947 | + container_of(proc, struct binder_proc_ext, proc); |
|---|
| 4657 | 4948 | |
|---|
| 4658 | 4949 | BUG_ON(!list_empty(&proc->todo)); |
|---|
| 4659 | 4950 | BUG_ON(!list_empty(&proc->delivered_death)); |
|---|
| 4951 | + if (proc->outstanding_txns) |
|---|
| 4952 | + pr_warn("%s: Unexpected outstanding_txns %d\n", |
|---|
| 4953 | + __func__, proc->outstanding_txns); |
|---|
| 4660 | 4954 | device = container_of(proc->context, struct binder_device, context); |
|---|
| 4661 | 4955 | if (refcount_dec_and_test(&device->ref)) { |
|---|
| 4662 | 4956 | kfree(proc->context->name); |
|---|
| .. | .. |
|---|
| 4664 | 4958 | } |
|---|
| 4665 | 4959 | binder_alloc_deferred_release(&proc->alloc); |
|---|
| 4666 | 4960 | put_task_struct(proc->tsk); |
|---|
| 4667 | | - put_cred(proc->cred); |
|---|
| 4961 | + put_cred(eproc->cred); |
|---|
| 4668 | 4962 | binder_stats_deleted(BINDER_STAT_PROC); |
|---|
| 4669 | | - kfree(proc); |
|---|
| 4963 | + trace_android_vh_binder_free_proc(proc); |
|---|
| 4964 | + kfree(eproc); |
|---|
| 4670 | 4965 | } |
|---|
| 4671 | 4966 | |
|---|
| 4672 | 4967 | static void binder_free_thread(struct binder_thread *thread) |
|---|
| .. | .. |
|---|
| 4705 | 5000 | spin_lock(&t->lock); |
|---|
| 4706 | 5001 | if (t->to_thread == thread) |
|---|
| 4707 | 5002 | send_reply = t; |
|---|
| 5003 | + } else { |
|---|
| 5004 | + __acquire(&t->lock); |
|---|
| 4708 | 5005 | } |
|---|
| 4709 | 5006 | thread->is_dead = true; |
|---|
| 4710 | 5007 | |
|---|
| .. | .. |
|---|
| 4718 | 5015 | (t->to_thread == thread) ? "in" : "out"); |
|---|
| 4719 | 5016 | |
|---|
| 4720 | 5017 | if (t->to_thread == thread) { |
|---|
| 5018 | + thread->proc->outstanding_txns--; |
|---|
| 4721 | 5019 | t->to_proc = NULL; |
|---|
| 4722 | 5020 | t->to_thread = NULL; |
|---|
| 4723 | 5021 | if (t->buffer) { |
|---|
| .. | .. |
|---|
| 4733 | 5031 | spin_unlock(&last_t->lock); |
|---|
| 4734 | 5032 | if (t) |
|---|
| 4735 | 5033 | spin_lock(&t->lock); |
|---|
| 5034 | + else |
|---|
| 5035 | + __acquire(&t->lock); |
|---|
| 4736 | 5036 | } |
|---|
| 5037 | + /* annotation for sparse, lock not acquired in last iteration above */ |
|---|
| 5038 | + __release(&t->lock); |
|---|
| 4737 | 5039 | |
|---|
| 4738 | 5040 | /* |
|---|
| 4739 | 5041 | * If this thread used poll, make sure we remove the waitqueue from any |
|---|
| .. | .. |
|---|
| 4757 | 5059 | if (send_reply) |
|---|
| 4758 | 5060 | binder_send_failed_reply(send_reply, BR_DEAD_REPLY); |
|---|
| 4759 | 5061 | binder_release_work(proc, &thread->todo); |
|---|
| 5062 | + trace_android_vh_binder_thread_release(proc, thread); |
|---|
| 4760 | 5063 | binder_thread_dec_tmpref(thread); |
|---|
| 4761 | 5064 | return active_transactions; |
|---|
| 4762 | 5065 | } |
|---|
| .. | .. |
|---|
| 4833 | 5136 | if (!binder_worklist_empty_ilocked(&proc->todo)) |
|---|
| 4834 | 5137 | binder_wakeup_proc_ilocked(proc); |
|---|
| 4835 | 5138 | binder_inner_proc_unlock(proc); |
|---|
| 5139 | + trace_android_vh_binder_read_done(proc, thread); |
|---|
| 4836 | 5140 | if (ret < 0) { |
|---|
| 4837 | 5141 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) |
|---|
| 4838 | 5142 | ret = -EFAULT; |
|---|
| .. | .. |
|---|
| 4867 | 5171 | ret = -EBUSY; |
|---|
| 4868 | 5172 | goto out; |
|---|
| 4869 | 5173 | } |
|---|
| 4870 | | - ret = security_binder_set_context_mgr(proc->cred); |
|---|
| 5174 | + ret = security_binder_set_context_mgr(binder_get_cred(proc)); |
|---|
| 4871 | 5175 | if (ret < 0) |
|---|
| 4872 | 5176 | goto out; |
|---|
| 4873 | 5177 | if (uid_valid(context->binder_context_mgr_uid)) { |
|---|
| .. | .. |
|---|
| 4957 | 5261 | } |
|---|
| 4958 | 5262 | } |
|---|
| 4959 | 5263 | binder_inner_proc_unlock(proc); |
|---|
| 5264 | + |
|---|
| 5265 | + return 0; |
|---|
| 5266 | +} |
|---|
| 5267 | + |
|---|
| 5268 | +static bool binder_txns_pending_ilocked(struct binder_proc *proc) |
|---|
| 5269 | +{ |
|---|
| 5270 | + struct rb_node *n; |
|---|
| 5271 | + struct binder_thread *thread; |
|---|
| 5272 | + |
|---|
| 5273 | + if (proc->outstanding_txns > 0) |
|---|
| 5274 | + return true; |
|---|
| 5275 | + |
|---|
| 5276 | + for (n = rb_first(&proc->threads); n; n = rb_next(n)) { |
|---|
| 5277 | + thread = rb_entry(n, struct binder_thread, rb_node); |
|---|
| 5278 | + if (thread->transaction_stack) |
|---|
| 5279 | + return true; |
|---|
| 5280 | + } |
|---|
| 5281 | + return false; |
|---|
| 5282 | +} |
|---|
| 5283 | + |
|---|
| 5284 | +static int binder_ioctl_freeze(struct binder_freeze_info *info, |
|---|
| 5285 | + struct binder_proc *target_proc) |
|---|
| 5286 | +{ |
|---|
| 5287 | + int ret = 0; |
|---|
| 5288 | + |
|---|
| 5289 | + if (!info->enable) { |
|---|
| 5290 | + binder_inner_proc_lock(target_proc); |
|---|
| 5291 | + target_proc->sync_recv = false; |
|---|
| 5292 | + target_proc->async_recv = false; |
|---|
| 5293 | + target_proc->is_frozen = false; |
|---|
| 5294 | + binder_inner_proc_unlock(target_proc); |
|---|
| 5295 | + return 0; |
|---|
| 5296 | + } |
|---|
| 5297 | + |
|---|
| 5298 | + /* |
|---|
| 5299 | + * Freezing the target. Prevent new transactions by |
|---|
| 5300 | + * setting frozen state. If timeout specified, wait |
|---|
| 5301 | + * for transactions to drain. |
|---|
| 5302 | + */ |
|---|
| 5303 | + binder_inner_proc_lock(target_proc); |
|---|
| 5304 | + target_proc->sync_recv = false; |
|---|
| 5305 | + target_proc->async_recv = false; |
|---|
| 5306 | + target_proc->is_frozen = true; |
|---|
| 5307 | + binder_inner_proc_unlock(target_proc); |
|---|
| 5308 | + |
|---|
| 5309 | + if (info->timeout_ms > 0) |
|---|
| 5310 | + ret = wait_event_interruptible_timeout( |
|---|
| 5311 | + target_proc->freeze_wait, |
|---|
| 5312 | + (!target_proc->outstanding_txns), |
|---|
| 5313 | + msecs_to_jiffies(info->timeout_ms)); |
|---|
| 5314 | + |
|---|
| 5315 | + /* Check pending transactions that wait for reply */ |
|---|
| 5316 | + if (ret >= 0) { |
|---|
| 5317 | + binder_inner_proc_lock(target_proc); |
|---|
| 5318 | + if (binder_txns_pending_ilocked(target_proc)) |
|---|
| 5319 | + ret = -EAGAIN; |
|---|
| 5320 | + binder_inner_proc_unlock(target_proc); |
|---|
| 5321 | + } |
|---|
| 5322 | + |
|---|
| 5323 | + if (ret < 0) { |
|---|
| 5324 | + binder_inner_proc_lock(target_proc); |
|---|
| 5325 | + target_proc->is_frozen = false; |
|---|
| 5326 | + binder_inner_proc_unlock(target_proc); |
|---|
| 5327 | + } |
|---|
| 5328 | + |
|---|
| 5329 | + return ret; |
|---|
| 5330 | +} |
|---|
| 5331 | + |
|---|
| 5332 | +static int binder_ioctl_get_freezer_info( |
|---|
| 5333 | + struct binder_frozen_status_info *info) |
|---|
| 5334 | +{ |
|---|
| 5335 | + struct binder_proc *target_proc; |
|---|
| 5336 | + bool found = false; |
|---|
| 5337 | + __u32 txns_pending; |
|---|
| 5338 | + |
|---|
| 5339 | + info->sync_recv = 0; |
|---|
| 5340 | + info->async_recv = 0; |
|---|
| 5341 | + |
|---|
| 5342 | + mutex_lock(&binder_procs_lock); |
|---|
| 5343 | + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { |
|---|
| 5344 | + if (target_proc->pid == info->pid) { |
|---|
| 5345 | + found = true; |
|---|
| 5346 | + binder_inner_proc_lock(target_proc); |
|---|
| 5347 | + txns_pending = binder_txns_pending_ilocked(target_proc); |
|---|
| 5348 | + info->sync_recv |= target_proc->sync_recv | |
|---|
| 5349 | + (txns_pending << 1); |
|---|
| 5350 | + info->async_recv |= target_proc->async_recv; |
|---|
| 5351 | + binder_inner_proc_unlock(target_proc); |
|---|
| 5352 | + } |
|---|
| 5353 | + } |
|---|
| 5354 | + mutex_unlock(&binder_procs_lock); |
|---|
| 5355 | + |
|---|
| 5356 | + if (!found) |
|---|
| 5357 | + return -EINVAL; |
|---|
| 4960 | 5358 | |
|---|
| 4961 | 5359 | return 0; |
|---|
| 4962 | 5360 | } |
|---|
| .. | .. |
|---|
| 5079 | 5477 | } |
|---|
| 5080 | 5478 | break; |
|---|
| 5081 | 5479 | } |
|---|
| 5480 | + case BINDER_FREEZE: { |
|---|
| 5481 | + struct binder_freeze_info info; |
|---|
| 5482 | + struct binder_proc **target_procs = NULL, *target_proc; |
|---|
| 5483 | + int target_procs_count = 0, i = 0; |
|---|
| 5484 | + |
|---|
| 5485 | + ret = 0; |
|---|
| 5486 | + |
|---|
| 5487 | + if (copy_from_user(&info, ubuf, sizeof(info))) { |
|---|
| 5488 | + ret = -EFAULT; |
|---|
| 5489 | + goto err; |
|---|
| 5490 | + } |
|---|
| 5491 | + |
|---|
| 5492 | + mutex_lock(&binder_procs_lock); |
|---|
| 5493 | + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { |
|---|
| 5494 | + if (target_proc->pid == info.pid) |
|---|
| 5495 | + target_procs_count++; |
|---|
| 5496 | + } |
|---|
| 5497 | + |
|---|
| 5498 | + if (target_procs_count == 0) { |
|---|
| 5499 | + mutex_unlock(&binder_procs_lock); |
|---|
| 5500 | + ret = -EINVAL; |
|---|
| 5501 | + goto err; |
|---|
| 5502 | + } |
|---|
| 5503 | + |
|---|
| 5504 | + target_procs = kcalloc(target_procs_count, |
|---|
| 5505 | + sizeof(struct binder_proc *), |
|---|
| 5506 | + GFP_KERNEL); |
|---|
| 5507 | + |
|---|
| 5508 | + if (!target_procs) { |
|---|
| 5509 | + mutex_unlock(&binder_procs_lock); |
|---|
| 5510 | + ret = -ENOMEM; |
|---|
| 5511 | + goto err; |
|---|
| 5512 | + } |
|---|
| 5513 | + |
|---|
| 5514 | + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { |
|---|
| 5515 | + if (target_proc->pid != info.pid) |
|---|
| 5516 | + continue; |
|---|
| 5517 | + |
|---|
| 5518 | + binder_inner_proc_lock(target_proc); |
|---|
| 5519 | + target_proc->tmp_ref++; |
|---|
| 5520 | + binder_inner_proc_unlock(target_proc); |
|---|
| 5521 | + |
|---|
| 5522 | + target_procs[i++] = target_proc; |
|---|
| 5523 | + } |
|---|
| 5524 | + mutex_unlock(&binder_procs_lock); |
|---|
| 5525 | + |
|---|
| 5526 | + for (i = 0; i < target_procs_count; i++) { |
|---|
| 5527 | + if (ret >= 0) |
|---|
| 5528 | + ret = binder_ioctl_freeze(&info, |
|---|
| 5529 | + target_procs[i]); |
|---|
| 5530 | + |
|---|
| 5531 | + binder_proc_dec_tmpref(target_procs[i]); |
|---|
| 5532 | + } |
|---|
| 5533 | + |
|---|
| 5534 | + kfree(target_procs); |
|---|
| 5535 | + |
|---|
| 5536 | + if (ret < 0) |
|---|
| 5537 | + goto err; |
|---|
| 5538 | + break; |
|---|
| 5539 | + } |
|---|
| 5540 | + case BINDER_GET_FROZEN_INFO: { |
|---|
| 5541 | + struct binder_frozen_status_info info; |
|---|
| 5542 | + |
|---|
| 5543 | + if (copy_from_user(&info, ubuf, sizeof(info))) { |
|---|
| 5544 | + ret = -EFAULT; |
|---|
| 5545 | + goto err; |
|---|
| 5546 | + } |
|---|
| 5547 | + |
|---|
| 5548 | + ret = binder_ioctl_get_freezer_info(&info); |
|---|
| 5549 | + if (ret < 0) |
|---|
| 5550 | + goto err; |
|---|
| 5551 | + |
|---|
| 5552 | + if (copy_to_user(ubuf, &info, sizeof(info))) { |
|---|
| 5553 | + ret = -EFAULT; |
|---|
| 5554 | + goto err; |
|---|
| 5555 | + } |
|---|
| 5556 | + break; |
|---|
| 5557 | + } |
|---|
| 5558 | + case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { |
|---|
| 5559 | + uint32_t enable; |
|---|
| 5560 | + |
|---|
| 5561 | + if (copy_from_user(&enable, ubuf, sizeof(enable))) { |
|---|
| 5562 | + ret = -EFAULT; |
|---|
| 5563 | + goto err; |
|---|
| 5564 | + } |
|---|
| 5565 | + binder_inner_proc_lock(proc); |
|---|
| 5566 | + proc->oneway_spam_detection_enabled = (bool)enable; |
|---|
| 5567 | + binder_inner_proc_unlock(proc); |
|---|
| 5568 | + break; |
|---|
| 5569 | + } |
|---|
| 5082 | 5570 | default: |
|---|
| 5083 | 5571 | ret = -EINVAL; |
|---|
| 5084 | 5572 | goto err; |
|---|
| .. | .. |
|---|
| 5088 | 5576 | if (thread) |
|---|
| 5089 | 5577 | thread->looper_need_return = false; |
|---|
| 5090 | 5578 | wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); |
|---|
| 5091 | | - if (ret && ret != -ERESTARTSYS) |
|---|
| 5579 | + if (ret && ret != -EINTR) |
|---|
| 5092 | 5580 | pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); |
|---|
| 5093 | 5581 | err_unlocked: |
|---|
| 5094 | 5582 | trace_binder_ioctl_done(ret); |
|---|
| .. | .. |
|---|
| 5116 | 5604 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
|---|
| 5117 | 5605 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
|---|
| 5118 | 5606 | binder_alloc_vma_close(&proc->alloc); |
|---|
| 5119 | | - binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); |
|---|
| 5120 | 5607 | } |
|---|
| 5121 | 5608 | |
|---|
| 5122 | 5609 | static vm_fault_t binder_vm_fault(struct vm_fault *vmf) |
|---|
| .. | .. |
|---|
| 5132 | 5619 | |
|---|
| 5133 | 5620 | static int binder_mmap(struct file *filp, struct vm_area_struct *vma) |
|---|
| 5134 | 5621 | { |
|---|
| 5135 | | - int ret; |
|---|
| 5136 | 5622 | struct binder_proc *proc = filp->private_data; |
|---|
| 5137 | | - const char *failure_string; |
|---|
| 5138 | 5623 | |
|---|
| 5139 | 5624 | if (proc->tsk != current->group_leader) |
|---|
| 5140 | 5625 | return -EINVAL; |
|---|
| 5141 | | - |
|---|
| 5142 | | - if ((vma->vm_end - vma->vm_start) > SZ_4M) |
|---|
| 5143 | | - vma->vm_end = vma->vm_start + SZ_4M; |
|---|
| 5144 | 5626 | |
|---|
| 5145 | 5627 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, |
|---|
| 5146 | 5628 | "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", |
|---|
| .. | .. |
|---|
| 5149 | 5631 | (unsigned long)pgprot_val(vma->vm_page_prot)); |
|---|
| 5150 | 5632 | |
|---|
| 5151 | 5633 | if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { |
|---|
| 5152 | | - ret = -EPERM; |
|---|
| 5153 | | - failure_string = "bad vm_flags"; |
|---|
| 5154 | | - goto err_bad_arg; |
|---|
| 5634 | + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, |
|---|
| 5635 | + proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); |
|---|
| 5636 | + return -EPERM; |
|---|
| 5155 | 5637 | } |
|---|
| 5156 | 5638 | vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; |
|---|
| 5157 | 5639 | vma->vm_flags &= ~VM_MAYWRITE; |
|---|
| .. | .. |
|---|
| 5159 | 5641 | vma->vm_ops = &binder_vm_ops; |
|---|
| 5160 | 5642 | vma->vm_private_data = proc; |
|---|
| 5161 | 5643 | |
|---|
| 5162 | | - ret = binder_alloc_mmap_handler(&proc->alloc, vma); |
|---|
| 5163 | | - if (ret) |
|---|
| 5164 | | - return ret; |
|---|
| 5165 | | - mutex_lock(&proc->files_lock); |
|---|
| 5166 | | - proc->files = get_files_struct(current); |
|---|
| 5167 | | - mutex_unlock(&proc->files_lock); |
|---|
| 5168 | | - return 0; |
|---|
| 5169 | | - |
|---|
| 5170 | | -err_bad_arg: |
|---|
| 5171 | | - pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, |
|---|
| 5172 | | - proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); |
|---|
| 5173 | | - return ret; |
|---|
| 5644 | + return binder_alloc_mmap_handler(&proc->alloc, vma); |
|---|
| 5174 | 5645 | } |
|---|
| 5175 | 5646 | |
|---|
| 5176 | 5647 | static int binder_open(struct inode *nodp, struct file *filp) |
|---|
| 5177 | 5648 | { |
|---|
| 5178 | | - struct binder_proc *proc; |
|---|
| 5649 | + struct binder_proc *proc, *itr; |
|---|
| 5650 | + struct binder_proc_ext *eproc; |
|---|
| 5179 | 5651 | struct binder_device *binder_dev; |
|---|
| 5180 | 5652 | struct binderfs_info *info; |
|---|
| 5181 | 5653 | struct dentry *binder_binderfs_dir_entry_proc = NULL; |
|---|
| 5654 | + bool existing_pid = false; |
|---|
| 5182 | 5655 | |
|---|
| 5183 | 5656 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, |
|---|
| 5184 | 5657 | current->group_leader->pid, current->pid); |
|---|
| 5185 | 5658 | |
|---|
| 5186 | | - proc = kzalloc(sizeof(*proc), GFP_KERNEL); |
|---|
| 5659 | + eproc = kzalloc(sizeof(*eproc), GFP_KERNEL); |
|---|
| 5660 | + proc = &eproc->proc; |
|---|
| 5187 | 5661 | if (proc == NULL) |
|---|
| 5188 | 5662 | return -ENOMEM; |
|---|
| 5189 | 5663 | spin_lock_init(&proc->inner_lock); |
|---|
| 5190 | 5664 | spin_lock_init(&proc->outer_lock); |
|---|
| 5191 | 5665 | get_task_struct(current->group_leader); |
|---|
| 5192 | 5666 | proc->tsk = current->group_leader; |
|---|
| 5193 | | - mutex_init(&proc->files_lock); |
|---|
| 5194 | | - proc->cred = get_cred(filp->f_cred); |
|---|
| 5667 | + eproc->cred = get_cred(filp->f_cred); |
|---|
| 5195 | 5668 | INIT_LIST_HEAD(&proc->todo); |
|---|
| 5669 | + init_waitqueue_head(&proc->freeze_wait); |
|---|
| 5196 | 5670 | if (binder_supported_policy(current->policy)) { |
|---|
| 5197 | 5671 | proc->default_priority.sched_policy = current->policy; |
|---|
| 5198 | 5672 | proc->default_priority.prio = current->normal_prio; |
|---|
| .. | .. |
|---|
| 5221 | 5695 | filp->private_data = proc; |
|---|
| 5222 | 5696 | |
|---|
| 5223 | 5697 | mutex_lock(&binder_procs_lock); |
|---|
| 5698 | + hlist_for_each_entry(itr, &binder_procs, proc_node) { |
|---|
| 5699 | + if (itr->pid == proc->pid) { |
|---|
| 5700 | + existing_pid = true; |
|---|
| 5701 | + break; |
|---|
| 5702 | + } |
|---|
| 5703 | + } |
|---|
| 5224 | 5704 | hlist_add_head(&proc->proc_node, &binder_procs); |
|---|
| 5225 | 5705 | mutex_unlock(&binder_procs_lock); |
|---|
| 5226 | | - |
|---|
| 5227 | | - if (binder_debugfs_dir_entry_proc) { |
|---|
| 5706 | + trace_android_vh_binder_preset(&binder_procs, &binder_procs_lock); |
|---|
| 5707 | + if (binder_debugfs_dir_entry_proc && !existing_pid) { |
|---|
| 5228 | 5708 | char strbuf[11]; |
|---|
| 5229 | 5709 | |
|---|
| 5230 | 5710 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); |
|---|
| 5231 | 5711 | /* |
|---|
| 5232 | | - * proc debug entries are shared between contexts, so |
|---|
| 5233 | | - * this will fail if the process tries to open the driver |
|---|
| 5234 | | - * again with a different context. The priting code will |
|---|
| 5235 | | - * anyway print all contexts that a given PID has, so this |
|---|
| 5236 | | - * is not a problem. |
|---|
| 5712 | + * proc debug entries are shared between contexts. |
|---|
| 5713 | + * Only create for the first PID to avoid debugfs log spamming |
|---|
| 5714 | + * The printing code will anyway print all contexts for a given |
|---|
| 5715 | + * PID so this is not a problem. |
|---|
| 5237 | 5716 | */ |
|---|
| 5238 | 5717 | proc->debugfs_entry = debugfs_create_file(strbuf, 0444, |
|---|
| 5239 | 5718 | binder_debugfs_dir_entry_proc, |
|---|
| .. | .. |
|---|
| 5241 | 5720 | &proc_fops); |
|---|
| 5242 | 5721 | } |
|---|
| 5243 | 5722 | |
|---|
| 5244 | | - if (binder_binderfs_dir_entry_proc) { |
|---|
| 5723 | + if (binder_binderfs_dir_entry_proc && !existing_pid) { |
|---|
| 5245 | 5724 | char strbuf[11]; |
|---|
| 5246 | 5725 | struct dentry *binderfs_entry; |
|---|
| 5247 | 5726 | |
|---|
| 5248 | 5727 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); |
|---|
| 5249 | 5728 | /* |
|---|
| 5250 | 5729 | * Similar to debugfs, the process specific log file is shared |
|---|
| 5251 | | - * between contexts. If the file has already been created for a |
|---|
| 5252 | | - * process, the following binderfs_create_file() call will |
|---|
| 5253 | | - * fail with error code EEXIST if another context of the same |
|---|
| 5254 | | - * process invoked binder_open(). This is ok since same as |
|---|
| 5255 | | - * debugfs, the log file will contain information on all |
|---|
| 5256 | | - * contexts of a given PID. |
|---|
| 5730 | + * between contexts. Only create for the first PID. |
|---|
| 5731 | + * This is ok since same as debugfs, the log file will contain |
|---|
| 5732 | + * information on all contexts of a given PID. |
|---|
| 5257 | 5733 | */ |
|---|
| 5258 | 5734 | binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, |
|---|
| 5259 | 5735 | strbuf, &proc_fops, (void *)(unsigned long)proc->pid); |
|---|
| .. | .. |
|---|
| 5263 | 5739 | int error; |
|---|
| 5264 | 5740 | |
|---|
| 5265 | 5741 | error = PTR_ERR(binderfs_entry); |
|---|
| 5266 | | - if (error != -EEXIST) { |
|---|
| 5267 | | - pr_warn("Unable to create file %s in binderfs (error %d)\n", |
|---|
| 5268 | | - strbuf, error); |
|---|
| 5269 | | - } |
|---|
| 5742 | + pr_warn("Unable to create file %s in binderfs (error %d)\n", |
|---|
| 5743 | + strbuf, error); |
|---|
| 5270 | 5744 | } |
|---|
| 5271 | 5745 | } |
|---|
| 5272 | 5746 | |
|---|
| .. | .. |
|---|
| 5391 | 5865 | struct rb_node *n; |
|---|
| 5392 | 5866 | int threads, nodes, incoming_refs, outgoing_refs, active_transactions; |
|---|
| 5393 | 5867 | |
|---|
| 5394 | | - BUG_ON(proc->files); |
|---|
| 5395 | | - |
|---|
| 5396 | 5868 | mutex_lock(&binder_procs_lock); |
|---|
| 5397 | 5869 | hlist_del(&proc->proc_node); |
|---|
| 5398 | 5870 | mutex_unlock(&binder_procs_lock); |
|---|
| .. | .. |
|---|
| 5414 | 5886 | proc->tmp_ref++; |
|---|
| 5415 | 5887 | |
|---|
| 5416 | 5888 | proc->is_dead = true; |
|---|
| 5889 | + proc->is_frozen = false; |
|---|
| 5890 | + proc->sync_recv = false; |
|---|
| 5891 | + proc->async_recv = false; |
|---|
| 5417 | 5892 | threads = 0; |
|---|
| 5418 | 5893 | active_transactions = 0; |
|---|
| 5419 | 5894 | while ((n = rb_first(&proc->threads))) { |
|---|
| .. | .. |
|---|
| 5474 | 5949 | static void binder_deferred_func(struct work_struct *work) |
|---|
| 5475 | 5950 | { |
|---|
| 5476 | 5951 | struct binder_proc *proc; |
|---|
| 5477 | | - struct files_struct *files; |
|---|
| 5478 | 5952 | |
|---|
| 5479 | 5953 | int defer; |
|---|
| 5480 | 5954 | |
|---|
| .. | .. |
|---|
| 5492 | 5966 | } |
|---|
| 5493 | 5967 | mutex_unlock(&binder_deferred_lock); |
|---|
| 5494 | 5968 | |
|---|
| 5495 | | - files = NULL; |
|---|
| 5496 | | - if (defer & BINDER_DEFERRED_PUT_FILES) { |
|---|
| 5497 | | - mutex_lock(&proc->files_lock); |
|---|
| 5498 | | - files = proc->files; |
|---|
| 5499 | | - if (files) |
|---|
| 5500 | | - proc->files = NULL; |
|---|
| 5501 | | - mutex_unlock(&proc->files_lock); |
|---|
| 5502 | | - } |
|---|
| 5503 | | - |
|---|
| 5504 | 5969 | if (defer & BINDER_DEFERRED_FLUSH) |
|---|
| 5505 | 5970 | binder_deferred_flush(proc); |
|---|
| 5506 | 5971 | |
|---|
| 5507 | 5972 | if (defer & BINDER_DEFERRED_RELEASE) |
|---|
| 5508 | 5973 | binder_deferred_release(proc); /* frees proc */ |
|---|
| 5509 | | - |
|---|
| 5510 | | - if (files) |
|---|
| 5511 | | - put_files_struct(files); |
|---|
| 5512 | 5974 | } while (proc); |
|---|
| 5513 | 5975 | } |
|---|
| 5514 | 5976 | static DECLARE_WORK(binder_deferred_work, binder_deferred_func); |
|---|
| .. | .. |
|---|
| 5535 | 5997 | struct binder_buffer *buffer = t->buffer; |
|---|
| 5536 | 5998 | |
|---|
| 5537 | 5999 | spin_lock(&t->lock); |
|---|
| 6000 | + trace_android_vh_binder_print_transaction_info(m, proc, prefix, t); |
|---|
| 5538 | 6001 | to_proc = t->to_proc; |
|---|
| 5539 | 6002 | seq_printf(m, |
|---|
| 5540 | 6003 | "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d", |
|---|
| .. | .. |
|---|
| 5779 | 6242 | "BR_FINISHED", |
|---|
| 5780 | 6243 | "BR_DEAD_BINDER", |
|---|
| 5781 | 6244 | "BR_CLEAR_DEATH_NOTIFICATION_DONE", |
|---|
| 5782 | | - "BR_FAILED_REPLY" |
|---|
| 6245 | + "BR_FAILED_REPLY", |
|---|
| 6246 | + "BR_FROZEN_REPLY", |
|---|
| 6247 | + "BR_ONEWAY_SPAM_SUSPECT", |
|---|
| 5783 | 6248 | }; |
|---|
| 5784 | 6249 | |
|---|
| 5785 | 6250 | static const char * const binder_command_strings[] = { |
|---|
| .. | .. |
|---|
| 5920 | 6385 | print_binder_stats(m, " ", &proc->stats); |
|---|
| 5921 | 6386 | } |
|---|
| 5922 | 6387 | |
|---|
| 5923 | | - |
|---|
| 5924 | | -int binder_state_show(struct seq_file *m, void *unused) |
|---|
| 6388 | +static int state_show(struct seq_file *m, void *unused) |
|---|
| 5925 | 6389 | { |
|---|
| 5926 | 6390 | struct binder_proc *proc; |
|---|
| 5927 | 6391 | struct binder_node *node; |
|---|
| .. | .. |
|---|
| 5960 | 6424 | return 0; |
|---|
| 5961 | 6425 | } |
|---|
| 5962 | 6426 | |
|---|
| 5963 | | -int binder_stats_show(struct seq_file *m, void *unused) |
|---|
| 6427 | +static int stats_show(struct seq_file *m, void *unused) |
|---|
| 5964 | 6428 | { |
|---|
| 5965 | 6429 | struct binder_proc *proc; |
|---|
| 5966 | 6430 | |
|---|
| .. | .. |
|---|
| 5976 | 6440 | return 0; |
|---|
| 5977 | 6441 | } |
|---|
| 5978 | 6442 | |
|---|
| 5979 | | -int binder_transactions_show(struct seq_file *m, void *unused) |
|---|
| 6443 | +static int transactions_show(struct seq_file *m, void *unused) |
|---|
| 5980 | 6444 | { |
|---|
| 5981 | 6445 | struct binder_proc *proc; |
|---|
| 5982 | 6446 | |
|---|
| .. | .. |
|---|
| 6032 | 6496 | "\n" : " (incomplete)\n"); |
|---|
| 6033 | 6497 | } |
|---|
| 6034 | 6498 | |
|---|
| 6035 | | -int binder_transaction_log_show(struct seq_file *m, void *unused) |
|---|
| 6499 | +static int transaction_log_show(struct seq_file *m, void *unused) |
|---|
| 6036 | 6500 | { |
|---|
| 6037 | 6501 | struct binder_transaction_log *log = m->private; |
|---|
| 6038 | 6502 | unsigned int log_cur = atomic_read(&log->cur); |
|---|
| .. | .. |
|---|
| 6057 | 6521 | .owner = THIS_MODULE, |
|---|
| 6058 | 6522 | .poll = binder_poll, |
|---|
| 6059 | 6523 | .unlocked_ioctl = binder_ioctl, |
|---|
| 6060 | | - .compat_ioctl = binder_ioctl, |
|---|
| 6524 | + .compat_ioctl = compat_ptr_ioctl, |
|---|
| 6061 | 6525 | .mmap = binder_mmap, |
|---|
| 6062 | 6526 | .open = binder_open, |
|---|
| 6063 | 6527 | .flush = binder_flush, |
|---|
| 6064 | 6528 | .release = binder_release, |
|---|
| 6529 | +}; |
|---|
| 6530 | + |
|---|
| 6531 | +DEFINE_SHOW_ATTRIBUTE(state); |
|---|
| 6532 | +DEFINE_SHOW_ATTRIBUTE(stats); |
|---|
| 6533 | +DEFINE_SHOW_ATTRIBUTE(transactions); |
|---|
| 6534 | +DEFINE_SHOW_ATTRIBUTE(transaction_log); |
|---|
| 6535 | + |
|---|
| 6536 | +const struct binder_debugfs_entry binder_debugfs_entries[] = { |
|---|
| 6537 | + { |
|---|
| 6538 | + .name = "state", |
|---|
| 6539 | + .mode = 0444, |
|---|
| 6540 | + .fops = &state_fops, |
|---|
| 6541 | + .data = NULL, |
|---|
| 6542 | + }, |
|---|
| 6543 | + { |
|---|
| 6544 | + .name = "stats", |
|---|
| 6545 | + .mode = 0444, |
|---|
| 6546 | + .fops = &stats_fops, |
|---|
| 6547 | + .data = NULL, |
|---|
| 6548 | + }, |
|---|
| 6549 | + { |
|---|
| 6550 | + .name = "transactions", |
|---|
| 6551 | + .mode = 0444, |
|---|
| 6552 | + .fops = &transactions_fops, |
|---|
| 6553 | + .data = NULL, |
|---|
| 6554 | + }, |
|---|
| 6555 | + { |
|---|
| 6556 | + .name = "transaction_log", |
|---|
| 6557 | + .mode = 0444, |
|---|
| 6558 | + .fops = &transaction_log_fops, |
|---|
| 6559 | + .data = &binder_transaction_log, |
|---|
| 6560 | + }, |
|---|
| 6561 | + { |
|---|
| 6562 | + .name = "failed_transaction_log", |
|---|
| 6563 | + .mode = 0444, |
|---|
| 6564 | + .fops = &transaction_log_fops, |
|---|
| 6565 | + .data = &binder_transaction_log_failed, |
|---|
| 6566 | + }, |
|---|
| 6567 | + {} /* terminator */ |
|---|
| 6065 | 6568 | }; |
|---|
| 6066 | 6569 | |
|---|
| 6067 | 6570 | static int __init init_binder_device(const char *name) |
|---|
| .. | .. |
|---|
| 6109 | 6612 | atomic_set(&binder_transaction_log_failed.cur, ~0U); |
|---|
| 6110 | 6613 | |
|---|
| 6111 | 6614 | binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); |
|---|
| 6112 | | - if (binder_debugfs_dir_entry_root) |
|---|
| 6615 | + if (binder_debugfs_dir_entry_root) { |
|---|
| 6616 | + const struct binder_debugfs_entry *db_entry; |
|---|
| 6617 | + |
|---|
| 6618 | + binder_for_each_debugfs_entry(db_entry) |
|---|
| 6619 | + debugfs_create_file(db_entry->name, |
|---|
| 6620 | + db_entry->mode, |
|---|
| 6621 | + binder_debugfs_dir_entry_root, |
|---|
| 6622 | + db_entry->data, |
|---|
| 6623 | + db_entry->fops); |
|---|
| 6624 | + |
|---|
| 6113 | 6625 | binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", |
|---|
| 6114 | 6626 | binder_debugfs_dir_entry_root); |
|---|
| 6115 | | - |
|---|
| 6116 | | - if (binder_debugfs_dir_entry_root) { |
|---|
| 6117 | | - debugfs_create_file("state", |
|---|
| 6118 | | - 0444, |
|---|
| 6119 | | - binder_debugfs_dir_entry_root, |
|---|
| 6120 | | - NULL, |
|---|
| 6121 | | - &binder_state_fops); |
|---|
| 6122 | | - debugfs_create_file("stats", |
|---|
| 6123 | | - 0444, |
|---|
| 6124 | | - binder_debugfs_dir_entry_root, |
|---|
| 6125 | | - NULL, |
|---|
| 6126 | | - &binder_stats_fops); |
|---|
| 6127 | | - debugfs_create_file("transactions", |
|---|
| 6128 | | - 0444, |
|---|
| 6129 | | - binder_debugfs_dir_entry_root, |
|---|
| 6130 | | - NULL, |
|---|
| 6131 | | - &binder_transactions_fops); |
|---|
| 6132 | | - debugfs_create_file("transaction_log", |
|---|
| 6133 | | - 0444, |
|---|
| 6134 | | - binder_debugfs_dir_entry_root, |
|---|
| 6135 | | - &binder_transaction_log, |
|---|
| 6136 | | - &binder_transaction_log_fops); |
|---|
| 6137 | | - debugfs_create_file("failed_transaction_log", |
|---|
| 6138 | | - 0444, |
|---|
| 6139 | | - binder_debugfs_dir_entry_root, |
|---|
| 6140 | | - &binder_transaction_log_failed, |
|---|
| 6141 | | - &binder_transaction_log_fops); |
|---|
| 6142 | 6627 | } |
|---|
| 6143 | 6628 | |
|---|
| 6144 | 6629 | if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && |
|---|
| .. | .. |
|---|
| 6186 | 6671 | |
|---|
| 6187 | 6672 | #define CREATE_TRACE_POINTS |
|---|
| 6188 | 6673 | #include "binder_trace.h" |
|---|
| 6674 | +EXPORT_TRACEPOINT_SYMBOL_GPL(binder_transaction_received); |
|---|
| 6189 | 6675 | |
|---|
| 6190 | 6676 | MODULE_LICENSE("GPL v2"); |
|---|