hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/android/binder_internal.h
....@@ -12,6 +12,8 @@
1212 #include <linux/stddef.h>
1313 #include <linux/types.h>
1414 #include <linux/uidgid.h>
15
+#include <uapi/linux/android/binderfs.h>
16
+#include "binder_alloc.h"
1517
1618 struct binder_context {
1719 struct binder_node *binder_context_mgr_node;
....@@ -105,42 +107,495 @@
105107 }
106108 #endif
107109
108
-int binder_stats_show(struct seq_file *m, void *unused);
109
-DEFINE_SHOW_ATTRIBUTE(binder_stats);
110
+struct binder_debugfs_entry {
111
+ const char *name;
112
+ umode_t mode;
113
+ const struct file_operations *fops;
114
+ void *data;
115
+};
110116
111
-int binder_state_show(struct seq_file *m, void *unused);
112
-DEFINE_SHOW_ATTRIBUTE(binder_state);
117
+extern const struct binder_debugfs_entry binder_debugfs_entries[];
113118
114
-int binder_transactions_show(struct seq_file *m, void *unused);
115
-DEFINE_SHOW_ATTRIBUTE(binder_transactions);
119
+#define binder_for_each_debugfs_entry(entry) \
120
+ for ((entry) = binder_debugfs_entries; \
121
+ (entry)->name; \
122
+ (entry)++)
116123
117
-int binder_transaction_log_show(struct seq_file *m, void *unused);
118
-DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
124
+enum binder_stat_types {
125
+ BINDER_STAT_PROC,
126
+ BINDER_STAT_THREAD,
127
+ BINDER_STAT_NODE,
128
+ BINDER_STAT_REF,
129
+ BINDER_STAT_DEATH,
130
+ BINDER_STAT_TRANSACTION,
131
+ BINDER_STAT_TRANSACTION_COMPLETE,
132
+ BINDER_STAT_COUNT
133
+};
119134
120
-struct binder_transaction_log_entry {
135
+struct binder_stats {
136
+ atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
137
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
138
+ atomic_t obj_created[BINDER_STAT_COUNT];
139
+ atomic_t obj_deleted[BINDER_STAT_COUNT];
140
+};
141
+
142
+/**
143
+ * struct binder_work - work enqueued on a worklist
144
+ * @entry: node enqueued on list
145
+ * @type: type of work to be performed
146
+ *
147
+ * There are separate work lists for proc, thread, and node (async).
148
+ */
149
+struct binder_work {
150
+ struct list_head entry;
151
+
152
+ enum binder_work_type {
153
+ BINDER_WORK_TRANSACTION = 1,
154
+ BINDER_WORK_TRANSACTION_COMPLETE,
155
+ BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
156
+ BINDER_WORK_RETURN_ERROR,
157
+ BINDER_WORK_NODE,
158
+ BINDER_WORK_DEAD_BINDER,
159
+ BINDER_WORK_DEAD_BINDER_AND_CLEAR,
160
+ BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
161
+ } type;
162
+};
163
+
164
+struct binder_error {
165
+ struct binder_work work;
166
+ uint32_t cmd;
167
+};
168
+
169
+/**
170
+ * struct binder_node - binder node bookkeeping
171
+ * @debug_id: unique ID for debugging
172
+ * (invariant after initialized)
173
+ * @lock: lock for node fields
174
+ * @work: worklist element for node work
175
+ * (protected by @proc->inner_lock)
176
+ * @rb_node: element for proc->nodes tree
177
+ * (protected by @proc->inner_lock)
178
+ * @dead_node: element for binder_dead_nodes list
179
+ * (protected by binder_dead_nodes_lock)
180
+ * @proc: binder_proc that owns this node
181
+ * (invariant after initialized)
182
+ * @refs: list of references on this node
183
+ * (protected by @lock)
184
+ * @internal_strong_refs: used to take strong references when
185
+ * initiating a transaction
186
+ * (protected by @proc->inner_lock if @proc
187
+ * and by @lock)
188
+ * @local_weak_refs: weak user refs from local process
189
+ * (protected by @proc->inner_lock if @proc
190
+ * and by @lock)
191
+ * @local_strong_refs: strong user refs from local process
192
+ * (protected by @proc->inner_lock if @proc
193
+ * and by @lock)
194
+ * @tmp_refs: temporary kernel refs
195
+ * (protected by @proc->inner_lock while @proc
196
+ * is valid, and by binder_dead_nodes_lock
197
+ * if @proc is NULL. During inc/dec and node release
198
+ * it is also protected by @lock to provide safety
199
+ * as the node dies and @proc becomes NULL)
200
+ * @ptr: userspace pointer for node
201
+ * (invariant, no lock needed)
202
+ * @cookie: userspace cookie for node
203
+ * (invariant, no lock needed)
204
+ * @has_strong_ref: userspace notified of strong ref
205
+ * (protected by @proc->inner_lock if @proc
206
+ * and by @lock)
207
+ * @pending_strong_ref: userspace has acked notification of strong ref
208
+ * (protected by @proc->inner_lock if @proc
209
+ * and by @lock)
210
+ * @has_weak_ref: userspace notified of weak ref
211
+ * (protected by @proc->inner_lock if @proc
212
+ * and by @lock)
213
+ * @pending_weak_ref: userspace has acked notification of weak ref
214
+ * (protected by @proc->inner_lock if @proc
215
+ * and by @lock)
216
+ * @has_async_transaction: async transaction to node in progress
217
+ * (protected by @lock)
218
+ * @sched_policy: minimum scheduling policy for node
219
+ * (invariant after initialized)
220
+ * @accept_fds: file descriptor operations supported for node
221
+ * (invariant after initialized)
222
+ * @min_priority: minimum scheduling priority
223
+ * (invariant after initialized)
224
+ * @inherit_rt: inherit RT scheduling policy from caller
225
+ * @txn_security_ctx: require sender's security context
226
+ * (invariant after initialized)
227
+ * @async_todo: list of async work items
228
+ * (protected by @proc->inner_lock)
229
+ *
230
+ * Bookkeeping structure for binder nodes.
231
+ */
232
+struct binder_node {
121233 int debug_id;
122
- int debug_id_done;
123
- int call_type;
124
- int from_proc;
125
- int from_thread;
126
- int target_handle;
127
- int to_proc;
128
- int to_thread;
129
- int to_node;
130
- int data_size;
131
- int offsets_size;
132
- int return_error_line;
133
- uint32_t return_error;
134
- uint32_t return_error_param;
135
- const char *context_name;
234
+ spinlock_t lock;
235
+ struct binder_work work;
236
+ union {
237
+ struct rb_node rb_node;
238
+ struct hlist_node dead_node;
239
+ };
240
+ struct binder_proc *proc;
241
+ struct hlist_head refs;
242
+ int internal_strong_refs;
243
+ int local_weak_refs;
244
+ int local_strong_refs;
245
+ int tmp_refs;
246
+ binder_uintptr_t ptr;
247
+ binder_uintptr_t cookie;
248
+ struct {
249
+ /*
250
+ * bitfield elements protected by
251
+ * proc inner_lock
252
+ */
253
+ u8 has_strong_ref:1;
254
+ u8 pending_strong_ref:1;
255
+ u8 has_weak_ref:1;
256
+ u8 pending_weak_ref:1;
257
+ };
258
+ struct {
259
+ /*
260
+ * invariant after initialization
261
+ */
262
+ u8 sched_policy:2;
263
+ u8 inherit_rt:1;
264
+ u8 accept_fds:1;
265
+ u8 txn_security_ctx:1;
266
+ u8 min_priority;
267
+ };
268
+ bool has_async_transaction;
269
+ struct list_head async_todo;
136270 };
137271
138
-struct binder_transaction_log {
139
- atomic_t cur;
140
- bool full;
141
- struct binder_transaction_log_entry entry[32];
272
+struct binder_ref_death {
273
+ /**
274
+ * @work: worklist element for death notifications
275
+ * (protected by inner_lock of the proc that
276
+ * this ref belongs to)
277
+ */
278
+ struct binder_work work;
279
+ binder_uintptr_t cookie;
142280 };
143281
144
-extern struct binder_transaction_log binder_transaction_log;
145
-extern struct binder_transaction_log binder_transaction_log_failed;
282
+/**
283
+ * struct binder_ref_data - binder_ref counts and id
284
+ * @debug_id: unique ID for the ref
285
+ * @desc: unique userspace handle for ref
286
+ * @strong: strong ref count (debugging only if not locked)
287
+ * @weak: weak ref count (debugging only if not locked)
288
+ *
289
+ * Structure to hold ref count and ref id information. Since
290
+ * the actual ref can only be accessed with a lock, this structure
291
+ * is used to return information about the ref to callers of
292
+ * ref inc/dec functions.
293
+ */
294
+struct binder_ref_data {
295
+ int debug_id;
296
+ uint32_t desc;
297
+ int strong;
298
+ int weak;
299
+};
300
+
301
+/**
302
+ * struct binder_ref - struct to track references on nodes
303
+ * @data: binder_ref_data containing id, handle, and current refcounts
304
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
305
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
306
+ * @node_entry: list entry for node->refs list in target node
307
+ * (protected by @node->lock)
308
+ * @proc: binder_proc containing ref
309
+ * @node: binder_node of target node. When cleaning up a
310
+ * ref for deletion in binder_cleanup_ref, a non-NULL
311
+ * @node indicates the node must be freed
312
+ * @death: pointer to death notification (ref_death) if requested
313
+ * (protected by @node->lock)
314
+ *
315
+ * Structure to track references from procA to target node (on procB). This
316
+ * structure is unsafe to access without holding @proc->outer_lock.
317
+ */
318
+struct binder_ref {
319
+ /* Lookups needed: */
320
+ /* node + proc => ref (transaction) */
321
+ /* desc + proc => ref (transaction, inc/dec ref) */
322
+ /* node => refs + procs (proc exit) */
323
+ struct binder_ref_data data;
324
+ struct rb_node rb_node_desc;
325
+ struct rb_node rb_node_node;
326
+ struct hlist_node node_entry;
327
+ struct binder_proc *proc;
328
+ struct binder_node *node;
329
+ struct binder_ref_death *death;
330
+};
331
+
332
+/**
333
+ * struct binder_priority - scheduler policy and priority
334
+ * @sched_policy scheduler policy
335
+ * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
336
+ *
337
+ * The binder driver supports inheriting the following scheduler policies:
338
+ * SCHED_NORMAL
339
+ * SCHED_BATCH
340
+ * SCHED_FIFO
341
+ * SCHED_RR
342
+ */
343
+struct binder_priority {
344
+ unsigned int sched_policy;
345
+ int prio;
346
+};
347
+
348
+/**
349
+ * struct binder_proc - binder process bookkeeping
350
+ * @proc_node: element for binder_procs list
351
+ * @threads: rbtree of binder_threads in this proc
352
+ * (protected by @inner_lock)
353
+ * @nodes: rbtree of binder nodes associated with
354
+ * this proc ordered by node->ptr
355
+ * (protected by @inner_lock)
356
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
357
+ * (protected by @outer_lock)
358
+ * @refs_by_node: rbtree of refs ordered by ref->node
359
+ * (protected by @outer_lock)
360
+ * @waiting_threads: threads currently waiting for proc work
361
+ * (protected by @inner_lock)
362
+ * @pid PID of group_leader of process
363
+ * (invariant after initialized)
364
+ * @tsk task_struct for group_leader of process
365
+ * (invariant after initialized)
366
+ * @deferred_work_node: element for binder_deferred_list
367
+ * (protected by binder_deferred_lock)
368
+ * @deferred_work: bitmap of deferred work to perform
369
+ * (protected by binder_deferred_lock)
370
+ * @outstanding_txns: number of transactions to be transmitted before
371
+ * processes in freeze_wait are woken up
372
+ * (protected by @inner_lock)
373
+ * @is_dead: process is dead and awaiting free
374
+ * when outstanding transactions are cleaned up
375
+ * (protected by @inner_lock)
376
+ * @is_frozen: process is frozen and unable to service
377
+ * binder transactions
378
+ * (protected by @inner_lock)
379
+ * @sync_recv: process received sync transactions since last frozen
380
+ * bit 0: received sync transaction after being frozen
381
+ * bit 1: new pending sync transaction during freezing
382
+ * (protected by @inner_lock)
383
+ * @async_recv: process received async transactions since last frozen
384
+ * (protected by @inner_lock)
385
+ * @freeze_wait: waitqueue of processes waiting for all outstanding
386
+ * transactions to be processed
387
+ * (protected by @inner_lock)
388
+ * @todo: list of work for this process
389
+ * (protected by @inner_lock)
390
+ * @stats: per-process binder statistics
391
+ * (atomics, no lock needed)
392
+ * @delivered_death: list of delivered death notification
393
+ * (protected by @inner_lock)
394
+ * @max_threads: cap on number of binder threads
395
+ * (protected by @inner_lock)
396
+ * @requested_threads: number of binder threads requested but not
397
+ * yet started. In current implementation, can
398
+ * only be 0 or 1.
399
+ * (protected by @inner_lock)
400
+ * @requested_threads_started: number binder threads started
401
+ * (protected by @inner_lock)
402
+ * @tmp_ref: temporary reference to indicate proc is in use
403
+ * (protected by @inner_lock)
404
+ * @default_priority: default scheduler priority
405
+ * (invariant after initialized)
406
+ * @debugfs_entry: debugfs node
407
+ * @alloc: binder allocator bookkeeping
408
+ * @context: binder_context for this proc
409
+ * (invariant after initialized)
410
+ * @inner_lock: can nest under outer_lock and/or node lock
411
+ * @outer_lock: no nesting under innor or node lock
412
+ * Lock order: 1) outer, 2) node, 3) inner
413
+ * @binderfs_entry: process-specific binderfs log file
414
+ * @oneway_spam_detection_enabled: process enabled oneway spam detection
415
+ * or not
416
+ *
417
+ * Bookkeeping structure for binder processes
418
+ */
419
+struct binder_proc {
420
+ struct hlist_node proc_node;
421
+ struct rb_root threads;
422
+ struct rb_root nodes;
423
+ struct rb_root refs_by_desc;
424
+ struct rb_root refs_by_node;
425
+ struct list_head waiting_threads;
426
+ int pid;
427
+ struct task_struct *tsk;
428
+ struct hlist_node deferred_work_node;
429
+ int deferred_work;
430
+ int outstanding_txns;
431
+ bool is_dead;
432
+ bool is_frozen;
433
+ bool sync_recv;
434
+ bool async_recv;
435
+ wait_queue_head_t freeze_wait;
436
+
437
+ struct list_head todo;
438
+ struct binder_stats stats;
439
+ struct list_head delivered_death;
440
+ int max_threads;
441
+ int requested_threads;
442
+ int requested_threads_started;
443
+ int tmp_ref;
444
+ struct binder_priority default_priority;
445
+ struct dentry *debugfs_entry;
446
+ struct binder_alloc alloc;
447
+ struct binder_context *context;
448
+ spinlock_t inner_lock;
449
+ spinlock_t outer_lock;
450
+ struct dentry *binderfs_entry;
451
+ bool oneway_spam_detection_enabled;
452
+};
453
+
454
+/**
455
+ * struct binder_proc_ext - binder process bookkeeping
456
+ * @proc: element for binder_procs list
457
+ * @cred struct cred associated with the `struct file`
458
+ * in binder_open()
459
+ * (invariant after initialized)
460
+ *
461
+ * Extended binder_proc -- needed to add the "cred" field without
462
+ * changing the KMI for binder_proc.
463
+ */
464
+struct binder_proc_ext {
465
+ struct binder_proc proc;
466
+ const struct cred *cred;
467
+};
468
+
469
+static inline const struct cred *binder_get_cred(struct binder_proc *proc)
470
+{
471
+ struct binder_proc_ext *eproc;
472
+
473
+ eproc = container_of(proc, struct binder_proc_ext, proc);
474
+ return eproc->cred;
475
+}
476
+
477
+/**
478
+ * struct binder_thread - binder thread bookkeeping
479
+ * @proc: binder process for this thread
480
+ * (invariant after initialization)
481
+ * @rb_node: element for proc->threads rbtree
482
+ * (protected by @proc->inner_lock)
483
+ * @waiting_thread_node: element for @proc->waiting_threads list
484
+ * (protected by @proc->inner_lock)
485
+ * @pid: PID for this thread
486
+ * (invariant after initialization)
487
+ * @looper: bitmap of looping state
488
+ * (only accessed by this thread)
489
+ * @looper_needs_return: looping thread needs to exit driver
490
+ * (no lock needed)
491
+ * @transaction_stack: stack of in-progress transactions for this thread
492
+ * (protected by @proc->inner_lock)
493
+ * @todo: list of work to do for this thread
494
+ * (protected by @proc->inner_lock)
495
+ * @process_todo: whether work in @todo should be processed
496
+ * (protected by @proc->inner_lock)
497
+ * @return_error: transaction errors reported by this thread
498
+ * (only accessed by this thread)
499
+ * @reply_error: transaction errors reported by target thread
500
+ * (protected by @proc->inner_lock)
501
+ * @wait: wait queue for thread work
502
+ * @stats: per-thread statistics
503
+ * (atomics, no lock needed)
504
+ * @tmp_ref: temporary reference to indicate thread is in use
505
+ * (atomic since @proc->inner_lock cannot
506
+ * always be acquired)
507
+ * @is_dead: thread is dead and awaiting free
508
+ * when outstanding transactions are cleaned up
509
+ * (protected by @proc->inner_lock)
510
+ * @task: struct task_struct for this thread
511
+ *
512
+ * Bookkeeping structure for binder threads.
513
+ */
514
+struct binder_thread {
515
+ struct binder_proc *proc;
516
+ struct rb_node rb_node;
517
+ struct list_head waiting_thread_node;
518
+ int pid;
519
+ int looper; /* only modified by this thread */
520
+ bool looper_need_return; /* can be written by other thread */
521
+ struct binder_transaction *transaction_stack;
522
+ struct list_head todo;
523
+ bool process_todo;
524
+ struct binder_error return_error;
525
+ struct binder_error reply_error;
526
+ wait_queue_head_t wait;
527
+ struct binder_stats stats;
528
+ atomic_t tmp_ref;
529
+ bool is_dead;
530
+ struct task_struct *task;
531
+};
532
+
533
+/**
534
+ * struct binder_txn_fd_fixup - transaction fd fixup list element
535
+ * @fixup_entry: list entry
536
+ * @file: struct file to be associated with new fd
537
+ * @offset: offset in buffer data to this fixup
538
+ *
539
+ * List element for fd fixups in a transaction. Since file
540
+ * descriptors need to be allocated in the context of the
541
+ * target process, we pass each fd to be processed in this
542
+ * struct.
543
+ */
544
+struct binder_txn_fd_fixup {
545
+ struct list_head fixup_entry;
546
+ struct file *file;
547
+ size_t offset;
548
+};
549
+
550
+struct binder_transaction {
551
+ int debug_id;
552
+ struct binder_work work;
553
+ struct binder_thread *from;
554
+ struct binder_transaction *from_parent;
555
+ struct binder_proc *to_proc;
556
+ struct binder_thread *to_thread;
557
+ struct binder_transaction *to_parent;
558
+ unsigned need_reply:1;
559
+ /* unsigned is_dead:1; */ /* not used at the moment */
560
+
561
+ struct binder_buffer *buffer;
562
+ unsigned int code;
563
+ unsigned int flags;
564
+ struct binder_priority priority;
565
+ struct binder_priority saved_priority;
566
+ bool set_priority_called;
567
+ kuid_t sender_euid;
568
+ struct list_head fd_fixups;
569
+ binder_uintptr_t security_ctx;
570
+ /**
571
+ * @lock: protects @from, @to_proc, and @to_thread
572
+ *
573
+ * @from, @to_proc, and @to_thread can be set to NULL
574
+ * during thread teardown
575
+ */
576
+ spinlock_t lock;
577
+ ANDROID_VENDOR_DATA(1);
578
+ ANDROID_OEM_DATA_ARRAY(1, 2);
579
+};
580
+
581
+/**
582
+ * struct binder_object - union of flat binder object types
583
+ * @hdr: generic object header
584
+ * @fbo: binder object (nodes and refs)
585
+ * @fdo: file descriptor object
586
+ * @bbo: binder buffer pointer
587
+ * @fdao: file descriptor array
588
+ *
589
+ * Used for type-independent object copies
590
+ */
591
+struct binder_object {
592
+ union {
593
+ struct binder_object_header hdr;
594
+ struct flat_binder_object fbo;
595
+ struct binder_fd_object fdo;
596
+ struct binder_buffer_object bbo;
597
+ struct binder_fd_array_object fdao;
598
+ };
599
+};
600
+
146601 #endif /* _LINUX_BINDER_INTERNAL_H */