xref: /linux/drivers/android/binder.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 
70 #include <uapi/linux/android/binder.h>
71 #include <uapi/linux/android/binderfs.h>
72 
73 #include <asm/cacheflush.h>
74 
75 #include "binder_alloc.h"
76 #include "binder_internal.h"
77 #include "binder_trace.h"
78 
79 static HLIST_HEAD(binder_deferred_list);
80 static DEFINE_MUTEX(binder_deferred_lock);
81 
82 static HLIST_HEAD(binder_devices);
83 static HLIST_HEAD(binder_procs);
84 static DEFINE_MUTEX(binder_procs_lock);
85 
86 static HLIST_HEAD(binder_dead_nodes);
87 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88 
89 static struct dentry *binder_debugfs_dir_entry_root;
90 static struct dentry *binder_debugfs_dir_entry_proc;
91 static atomic_t binder_last_id;
92 
93 static int proc_show(struct seq_file *m, void *unused);
94 DEFINE_SHOW_ATTRIBUTE(proc);
95 
96 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
97 
98 enum {
99 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
100 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
101 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
102 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
103 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
104 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
105 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
106 	BINDER_DEBUG_USER_REFS              = 1U << 7,
107 	BINDER_DEBUG_THREADS                = 1U << 8,
108 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
109 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
110 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
111 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
112 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
113 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
114 };
115 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
116 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
117 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
118 
119 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
120 module_param_named(devices, binder_devices_param, charp, 0444);
121 
122 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
123 static int binder_stop_on_user_error;
124 
125 static int binder_set_stop_on_user_error(const char *val,
126 					 const struct kernel_param *kp)
127 {
128 	int ret;
129 
130 	ret = param_set_int(val, kp);
131 	if (binder_stop_on_user_error < 2)
132 		wake_up(&binder_user_error_wait);
133 	return ret;
134 }
135 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
136 	param_get_int, &binder_stop_on_user_error, 0644);
137 
138 #define binder_debug(mask, x...) \
139 	do { \
140 		if (binder_debug_mask & mask) \
141 			pr_info_ratelimited(x); \
142 	} while (0)
143 
144 #define binder_user_error(x...) \
145 	do { \
146 		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 			pr_info_ratelimited(x); \
148 		if (binder_stop_on_user_error) \
149 			binder_stop_on_user_error = 2; \
150 	} while (0)
151 
152 #define to_flat_binder_object(hdr) \
153 	container_of(hdr, struct flat_binder_object, hdr)
154 
155 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
156 
157 #define to_binder_buffer_object(hdr) \
158 	container_of(hdr, struct binder_buffer_object, hdr)
159 
160 #define to_binder_fd_array_object(hdr) \
161 	container_of(hdr, struct binder_fd_array_object, hdr)
162 
163 enum binder_stat_types {
164 	BINDER_STAT_PROC,
165 	BINDER_STAT_THREAD,
166 	BINDER_STAT_NODE,
167 	BINDER_STAT_REF,
168 	BINDER_STAT_DEATH,
169 	BINDER_STAT_TRANSACTION,
170 	BINDER_STAT_TRANSACTION_COMPLETE,
171 	BINDER_STAT_COUNT
172 };
173 
174 struct binder_stats {
175 	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
176 	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
177 	atomic_t obj_created[BINDER_STAT_COUNT];
178 	atomic_t obj_deleted[BINDER_STAT_COUNT];
179 };
180 
181 static struct binder_stats binder_stats;
182 
183 static inline void binder_stats_deleted(enum binder_stat_types type)
184 {
185 	atomic_inc(&binder_stats.obj_deleted[type]);
186 }
187 
188 static inline void binder_stats_created(enum binder_stat_types type)
189 {
190 	atomic_inc(&binder_stats.obj_created[type]);
191 }
192 
193 struct binder_transaction_log binder_transaction_log;
194 struct binder_transaction_log binder_transaction_log_failed;
195 
196 static struct binder_transaction_log_entry *binder_transaction_log_add(
197 	struct binder_transaction_log *log)
198 {
199 	struct binder_transaction_log_entry *e;
200 	unsigned int cur = atomic_inc_return(&log->cur);
201 
202 	if (cur >= ARRAY_SIZE(log->entry))
203 		log->full = true;
204 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
205 	WRITE_ONCE(e->debug_id_done, 0);
206 	/*
207 	 * write-barrier to synchronize access to e->debug_id_done.
208 	 * We make sure the initialized 0 value is seen before
209 	 * memset() other fields are zeroed by memset.
210 	 */
211 	smp_wmb();
212 	memset(e, 0, sizeof(*e));
213 	return e;
214 }
215 
216 /**
217  * struct binder_work - work enqueued on a worklist
218  * @entry:             node enqueued on list
219  * @type:              type of work to be performed
220  *
221  * There are separate work lists for proc, thread, and node (async).
222  */
223 struct binder_work {
224 	struct list_head entry;
225 
226 	enum {
227 		BINDER_WORK_TRANSACTION = 1,
228 		BINDER_WORK_TRANSACTION_COMPLETE,
229 		BINDER_WORK_RETURN_ERROR,
230 		BINDER_WORK_NODE,
231 		BINDER_WORK_DEAD_BINDER,
232 		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
233 		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
234 	} type;
235 };
236 
237 struct binder_error {
238 	struct binder_work work;
239 	uint32_t cmd;
240 };
241 
242 /**
243  * struct binder_node - binder node bookkeeping
244  * @debug_id:             unique ID for debugging
245  *                        (invariant after initialized)
246  * @lock:                 lock for node fields
247  * @work:                 worklist element for node work
248  *                        (protected by @proc->inner_lock)
249  * @rb_node:              element for proc->nodes tree
250  *                        (protected by @proc->inner_lock)
251  * @dead_node:            element for binder_dead_nodes list
252  *                        (protected by binder_dead_nodes_lock)
253  * @proc:                 binder_proc that owns this node
254  *                        (invariant after initialized)
255  * @refs:                 list of references on this node
256  *                        (protected by @lock)
257  * @internal_strong_refs: used to take strong references when
258  *                        initiating a transaction
259  *                        (protected by @proc->inner_lock if @proc
260  *                        and by @lock)
261  * @local_weak_refs:      weak user refs from local process
262  *                        (protected by @proc->inner_lock if @proc
263  *                        and by @lock)
264  * @local_strong_refs:    strong user refs from local process
265  *                        (protected by @proc->inner_lock if @proc
266  *                        and by @lock)
267  * @tmp_refs:             temporary kernel refs
268  *                        (protected by @proc->inner_lock while @proc
269  *                        is valid, and by binder_dead_nodes_lock
270  *                        if @proc is NULL. During inc/dec and node release
271  *                        it is also protected by @lock to provide safety
272  *                        as the node dies and @proc becomes NULL)
273  * @ptr:                  userspace pointer for node
274  *                        (invariant, no lock needed)
275  * @cookie:               userspace cookie for node
276  *                        (invariant, no lock needed)
277  * @has_strong_ref:       userspace notified of strong ref
278  *                        (protected by @proc->inner_lock if @proc
279  *                        and by @lock)
280  * @pending_strong_ref:   userspace has acked notification of strong ref
281  *                        (protected by @proc->inner_lock if @proc
282  *                        and by @lock)
283  * @has_weak_ref:         userspace notified of weak ref
284  *                        (protected by @proc->inner_lock if @proc
285  *                        and by @lock)
286  * @pending_weak_ref:     userspace has acked notification of weak ref
287  *                        (protected by @proc->inner_lock if @proc
288  *                        and by @lock)
289  * @has_async_transaction: async transaction to node in progress
290  *                        (protected by @lock)
291  * @accept_fds:           file descriptor operations supported for node
292  *                        (invariant after initialized)
293  * @min_priority:         minimum scheduling priority
294  *                        (invariant after initialized)
295  * @txn_security_ctx:     require sender's security context
296  *                        (invariant after initialized)
297  * @async_todo:           list of async work items
298  *                        (protected by @proc->inner_lock)
299  *
300  * Bookkeeping structure for binder nodes.
301  */
302 struct binder_node {
303 	int debug_id;
304 	spinlock_t lock;
305 	struct binder_work work;
306 	union {
307 		struct rb_node rb_node;
308 		struct hlist_node dead_node;
309 	};
310 	struct binder_proc *proc;
311 	struct hlist_head refs;
312 	int internal_strong_refs;
313 	int local_weak_refs;
314 	int local_strong_refs;
315 	int tmp_refs;
316 	binder_uintptr_t ptr;
317 	binder_uintptr_t cookie;
318 	struct {
319 		/*
320 		 * bitfield elements protected by
321 		 * proc inner_lock
322 		 */
323 		u8 has_strong_ref:1;
324 		u8 pending_strong_ref:1;
325 		u8 has_weak_ref:1;
326 		u8 pending_weak_ref:1;
327 	};
328 	struct {
329 		/*
330 		 * invariant after initialization
331 		 */
332 		u8 accept_fds:1;
333 		u8 txn_security_ctx:1;
334 		u8 min_priority;
335 	};
336 	bool has_async_transaction;
337 	struct list_head async_todo;
338 };
339 
340 struct binder_ref_death {
341 	/**
342 	 * @work: worklist element for death notifications
343 	 *        (protected by inner_lock of the proc that
344 	 *        this ref belongs to)
345 	 */
346 	struct binder_work work;
347 	binder_uintptr_t cookie;
348 };
349 
350 /**
351  * struct binder_ref_data - binder_ref counts and id
352  * @debug_id:        unique ID for the ref
353  * @desc:            unique userspace handle for ref
354  * @strong:          strong ref count (debugging only if not locked)
355  * @weak:            weak ref count (debugging only if not locked)
356  *
357  * Structure to hold ref count and ref id information. Since
358  * the actual ref can only be accessed with a lock, this structure
359  * is used to return information about the ref to callers of
360  * ref inc/dec functions.
361  */
362 struct binder_ref_data {
363 	int debug_id;
364 	uint32_t desc;
365 	int strong;
366 	int weak;
367 };
368 
369 /**
370  * struct binder_ref - struct to track references on nodes
371  * @data:        binder_ref_data containing id, handle, and current refcounts
372  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
373  * @rb_node_node: node for lookup by @node in proc's rb_tree
374  * @node_entry:  list entry for node->refs list in target node
375  *               (protected by @node->lock)
376  * @proc:        binder_proc containing ref
377  * @node:        binder_node of target node. When cleaning up a
378  *               ref for deletion in binder_cleanup_ref, a non-NULL
379  *               @node indicates the node must be freed
380  * @death:       pointer to death notification (ref_death) if requested
381  *               (protected by @node->lock)
382  *
383  * Structure to track references from procA to target node (on procB). This
384  * structure is unsafe to access without holding @proc->outer_lock.
385  */
386 struct binder_ref {
387 	/* Lookups needed: */
388 	/*   node + proc => ref (transaction) */
389 	/*   desc + proc => ref (transaction, inc/dec ref) */
390 	/*   node => refs + procs (proc exit) */
391 	struct binder_ref_data data;
392 	struct rb_node rb_node_desc;
393 	struct rb_node rb_node_node;
394 	struct hlist_node node_entry;
395 	struct binder_proc *proc;
396 	struct binder_node *node;
397 	struct binder_ref_death *death;
398 };
399 
400 enum binder_deferred_state {
401 	BINDER_DEFERRED_FLUSH        = 0x01,
402 	BINDER_DEFERRED_RELEASE      = 0x02,
403 };
404 
405 /**
406  * struct binder_proc - binder process bookkeeping
407  * @proc_node:            element for binder_procs list
408  * @threads:              rbtree of binder_threads in this proc
409  *                        (protected by @inner_lock)
410  * @nodes:                rbtree of binder nodes associated with
411  *                        this proc ordered by node->ptr
412  *                        (protected by @inner_lock)
413  * @refs_by_desc:         rbtree of refs ordered by ref->desc
414  *                        (protected by @outer_lock)
415  * @refs_by_node:         rbtree of refs ordered by ref->node
416  *                        (protected by @outer_lock)
417  * @waiting_threads:      threads currently waiting for proc work
418  *                        (protected by @inner_lock)
419  * @pid                   PID of group_leader of process
420  *                        (invariant after initialized)
421  * @tsk                   task_struct for group_leader of process
422  *                        (invariant after initialized)
423  * @deferred_work_node:   element for binder_deferred_list
424  *                        (protected by binder_deferred_lock)
425  * @deferred_work:        bitmap of deferred work to perform
426  *                        (protected by binder_deferred_lock)
427  * @is_dead:              process is dead and awaiting free
428  *                        when outstanding transactions are cleaned up
429  *                        (protected by @inner_lock)
430  * @todo:                 list of work for this process
431  *                        (protected by @inner_lock)
432  * @stats:                per-process binder statistics
433  *                        (atomics, no lock needed)
434  * @delivered_death:      list of delivered death notification
435  *                        (protected by @inner_lock)
436  * @max_threads:          cap on number of binder threads
437  *                        (protected by @inner_lock)
438  * @requested_threads:    number of binder threads requested but not
439  *                        yet started. In current implementation, can
440  *                        only be 0 or 1.
441  *                        (protected by @inner_lock)
442  * @requested_threads_started: number binder threads started
443  *                        (protected by @inner_lock)
444  * @tmp_ref:              temporary reference to indicate proc is in use
445  *                        (protected by @inner_lock)
446  * @default_priority:     default scheduler priority
447  *                        (invariant after initialized)
448  * @debugfs_entry:        debugfs node
449  * @alloc:                binder allocator bookkeeping
450  * @context:              binder_context for this proc
451  *                        (invariant after initialized)
452  * @inner_lock:           can nest under outer_lock and/or node lock
453  * @outer_lock:           no nesting under innor or node lock
454  *                        Lock order: 1) outer, 2) node, 3) inner
455  * @binderfs_entry:       process-specific binderfs log file
456  *
457  * Bookkeeping structure for binder processes
458  */
459 struct binder_proc {
460 	struct hlist_node proc_node;
461 	struct rb_root threads;
462 	struct rb_root nodes;
463 	struct rb_root refs_by_desc;
464 	struct rb_root refs_by_node;
465 	struct list_head waiting_threads;
466 	int pid;
467 	struct task_struct *tsk;
468 	struct hlist_node deferred_work_node;
469 	int deferred_work;
470 	bool is_dead;
471 
472 	struct list_head todo;
473 	struct binder_stats stats;
474 	struct list_head delivered_death;
475 	int max_threads;
476 	int requested_threads;
477 	int requested_threads_started;
478 	int tmp_ref;
479 	long default_priority;
480 	struct dentry *debugfs_entry;
481 	struct binder_alloc alloc;
482 	struct binder_context *context;
483 	spinlock_t inner_lock;
484 	spinlock_t outer_lock;
485 	struct dentry *binderfs_entry;
486 };
487 
488 enum {
489 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
490 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
491 	BINDER_LOOPER_STATE_EXITED      = 0x04,
492 	BINDER_LOOPER_STATE_INVALID     = 0x08,
493 	BINDER_LOOPER_STATE_WAITING     = 0x10,
494 	BINDER_LOOPER_STATE_POLL        = 0x20,
495 };
496 
497 /**
498  * struct binder_thread - binder thread bookkeeping
499  * @proc:                 binder process for this thread
500  *                        (invariant after initialization)
501  * @rb_node:              element for proc->threads rbtree
502  *                        (protected by @proc->inner_lock)
503  * @waiting_thread_node:  element for @proc->waiting_threads list
504  *                        (protected by @proc->inner_lock)
505  * @pid:                  PID for this thread
506  *                        (invariant after initialization)
507  * @looper:               bitmap of looping state
508  *                        (only accessed by this thread)
509  * @looper_needs_return:  looping thread needs to exit driver
510  *                        (no lock needed)
511  * @transaction_stack:    stack of in-progress transactions for this thread
512  *                        (protected by @proc->inner_lock)
513  * @todo:                 list of work to do for this thread
514  *                        (protected by @proc->inner_lock)
515  * @process_todo:         whether work in @todo should be processed
516  *                        (protected by @proc->inner_lock)
517  * @return_error:         transaction errors reported by this thread
518  *                        (only accessed by this thread)
519  * @reply_error:          transaction errors reported by target thread
520  *                        (protected by @proc->inner_lock)
521  * @wait:                 wait queue for thread work
522  * @stats:                per-thread statistics
523  *                        (atomics, no lock needed)
524  * @tmp_ref:              temporary reference to indicate thread is in use
525  *                        (atomic since @proc->inner_lock cannot
526  *                        always be acquired)
527  * @is_dead:              thread is dead and awaiting free
528  *                        when outstanding transactions are cleaned up
529  *                        (protected by @proc->inner_lock)
530  *
531  * Bookkeeping structure for binder threads.
532  */
533 struct binder_thread {
534 	struct binder_proc *proc;
535 	struct rb_node rb_node;
536 	struct list_head waiting_thread_node;
537 	int pid;
538 	int looper;              /* only modified by this thread */
539 	bool looper_need_return; /* can be written by other thread */
540 	struct binder_transaction *transaction_stack;
541 	struct list_head todo;
542 	bool process_todo;
543 	struct binder_error return_error;
544 	struct binder_error reply_error;
545 	wait_queue_head_t wait;
546 	struct binder_stats stats;
547 	atomic_t tmp_ref;
548 	bool is_dead;
549 };
550 
551 /**
552  * struct binder_txn_fd_fixup - transaction fd fixup list element
553  * @fixup_entry:          list entry
554  * @file:                 struct file to be associated with new fd
555  * @offset:               offset in buffer data to this fixup
556  *
557  * List element for fd fixups in a transaction. Since file
558  * descriptors need to be allocated in the context of the
559  * target process, we pass each fd to be processed in this
560  * struct.
561  */
562 struct binder_txn_fd_fixup {
563 	struct list_head fixup_entry;
564 	struct file *file;
565 	size_t offset;
566 };
567 
568 struct binder_transaction {
569 	int debug_id;
570 	struct binder_work work;
571 	struct binder_thread *from;
572 	struct binder_transaction *from_parent;
573 	struct binder_proc *to_proc;
574 	struct binder_thread *to_thread;
575 	struct binder_transaction *to_parent;
576 	unsigned need_reply:1;
577 	/* unsigned is_dead:1; */	/* not used at the moment */
578 
579 	struct binder_buffer *buffer;
580 	unsigned int	code;
581 	unsigned int	flags;
582 	long	priority;
583 	long	saved_priority;
584 	kuid_t	sender_euid;
585 	struct list_head fd_fixups;
586 	binder_uintptr_t security_ctx;
587 	/**
588 	 * @lock:  protects @from, @to_proc, and @to_thread
589 	 *
590 	 * @from, @to_proc, and @to_thread can be set to NULL
591 	 * during thread teardown
592 	 */
593 	spinlock_t lock;
594 };
595 
596 /**
597  * struct binder_object - union of flat binder object types
598  * @hdr:   generic object header
599  * @fbo:   binder object (nodes and refs)
600  * @fdo:   file descriptor object
601  * @bbo:   binder buffer pointer
602  * @fdao:  file descriptor array
603  *
604  * Used for type-independent object copies
605  */
606 struct binder_object {
607 	union {
608 		struct binder_object_header hdr;
609 		struct flat_binder_object fbo;
610 		struct binder_fd_object fdo;
611 		struct binder_buffer_object bbo;
612 		struct binder_fd_array_object fdao;
613 	};
614 };
615 
616 /**
617  * binder_proc_lock() - Acquire outer lock for given binder_proc
618  * @proc:         struct binder_proc to acquire
619  *
620  * Acquires proc->outer_lock. Used to protect binder_ref
621  * structures associated with the given proc.
622  */
623 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
624 static void
625 _binder_proc_lock(struct binder_proc *proc, int line)
626 	__acquires(&proc->outer_lock)
627 {
628 	binder_debug(BINDER_DEBUG_SPINLOCKS,
629 		     "%s: line=%d\n", __func__, line);
630 	spin_lock(&proc->outer_lock);
631 }
632 
633 /**
634  * binder_proc_unlock() - Release spinlock for given binder_proc
635  * @proc:         struct binder_proc to acquire
636  *
637  * Release lock acquired via binder_proc_lock()
638  */
639 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
640 static void
641 _binder_proc_unlock(struct binder_proc *proc, int line)
642 	__releases(&proc->outer_lock)
643 {
644 	binder_debug(BINDER_DEBUG_SPINLOCKS,
645 		     "%s: line=%d\n", __func__, line);
646 	spin_unlock(&proc->outer_lock);
647 }
648 
649 /**
650  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
651  * @proc:         struct binder_proc to acquire
652  *
653  * Acquires proc->inner_lock. Used to protect todo lists
654  */
655 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
656 static void
657 _binder_inner_proc_lock(struct binder_proc *proc, int line)
658 	__acquires(&proc->inner_lock)
659 {
660 	binder_debug(BINDER_DEBUG_SPINLOCKS,
661 		     "%s: line=%d\n", __func__, line);
662 	spin_lock(&proc->inner_lock);
663 }
664 
665 /**
666  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
667  * @proc:         struct binder_proc to acquire
668  *
669  * Release lock acquired via binder_inner_proc_lock()
670  */
671 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
672 static void
673 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
674 	__releases(&proc->inner_lock)
675 {
676 	binder_debug(BINDER_DEBUG_SPINLOCKS,
677 		     "%s: line=%d\n", __func__, line);
678 	spin_unlock(&proc->inner_lock);
679 }
680 
681 /**
682  * binder_node_lock() - Acquire spinlock for given binder_node
683  * @node:         struct binder_node to acquire
684  *
685  * Acquires node->lock. Used to protect binder_node fields
686  */
687 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
688 static void
689 _binder_node_lock(struct binder_node *node, int line)
690 	__acquires(&node->lock)
691 {
692 	binder_debug(BINDER_DEBUG_SPINLOCKS,
693 		     "%s: line=%d\n", __func__, line);
694 	spin_lock(&node->lock);
695 }
696 
697 /**
698  * binder_node_unlock() - Release spinlock for given binder_proc
699  * @node:         struct binder_node to acquire
700  *
701  * Release lock acquired via binder_node_lock()
702  */
703 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
704 static void
705 _binder_node_unlock(struct binder_node *node, int line)
706 	__releases(&node->lock)
707 {
708 	binder_debug(BINDER_DEBUG_SPINLOCKS,
709 		     "%s: line=%d\n", __func__, line);
710 	spin_unlock(&node->lock);
711 }
712 
713 /**
714  * binder_node_inner_lock() - Acquire node and inner locks
715  * @node:         struct binder_node to acquire
716  *
717  * Acquires node->lock. If node->proc also acquires
718  * proc->inner_lock. Used to protect binder_node fields
719  */
720 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
721 static void
722 _binder_node_inner_lock(struct binder_node *node, int line)
723 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
724 {
725 	binder_debug(BINDER_DEBUG_SPINLOCKS,
726 		     "%s: line=%d\n", __func__, line);
727 	spin_lock(&node->lock);
728 	if (node->proc)
729 		binder_inner_proc_lock(node->proc);
730 	else
731 		/* annotation for sparse */
732 		__acquire(&node->proc->inner_lock);
733 }
734 
735 /**
736  * binder_node_unlock() - Release node and inner locks
737  * @node:         struct binder_node to acquire
738  *
739  * Release lock acquired via binder_node_lock()
740  */
741 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
742 static void
743 _binder_node_inner_unlock(struct binder_node *node, int line)
744 	__releases(&node->lock) __releases(&node->proc->inner_lock)
745 {
746 	struct binder_proc *proc = node->proc;
747 
748 	binder_debug(BINDER_DEBUG_SPINLOCKS,
749 		     "%s: line=%d\n", __func__, line);
750 	if (proc)
751 		binder_inner_proc_unlock(proc);
752 	else
753 		/* annotation for sparse */
754 		__release(&node->proc->inner_lock);
755 	spin_unlock(&node->lock);
756 }
757 
758 static bool binder_worklist_empty_ilocked(struct list_head *list)
759 {
760 	return list_empty(list);
761 }
762 
763 /**
764  * binder_worklist_empty() - Check if no items on the work list
765  * @proc:       binder_proc associated with list
766  * @list:	list to check
767  *
768  * Return: true if there are no items on list, else false
769  */
770 static bool binder_worklist_empty(struct binder_proc *proc,
771 				  struct list_head *list)
772 {
773 	bool ret;
774 
775 	binder_inner_proc_lock(proc);
776 	ret = binder_worklist_empty_ilocked(list);
777 	binder_inner_proc_unlock(proc);
778 	return ret;
779 }
780 
781 /**
782  * binder_enqueue_work_ilocked() - Add an item to the work list
783  * @work:         struct binder_work to add to list
784  * @target_list:  list to add work to
785  *
786  * Adds the work to the specified list. Asserts that work
787  * is not already on a list.
788  *
789  * Requires the proc->inner_lock to be held.
790  */
791 static void
792 binder_enqueue_work_ilocked(struct binder_work *work,
793 			   struct list_head *target_list)
794 {
795 	BUG_ON(target_list == NULL);
796 	BUG_ON(work->entry.next && !list_empty(&work->entry));
797 	list_add_tail(&work->entry, target_list);
798 }
799 
800 /**
801  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
802  * @thread:       thread to queue work to
803  * @work:         struct binder_work to add to list
804  *
805  * Adds the work to the todo list of the thread. Doesn't set the process_todo
806  * flag, which means that (if it wasn't already set) the thread will go to
807  * sleep without handling this work when it calls read.
808  *
809  * Requires the proc->inner_lock to be held.
810  */
811 static void
812 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
813 					    struct binder_work *work)
814 {
815 	WARN_ON(!list_empty(&thread->waiting_thread_node));
816 	binder_enqueue_work_ilocked(work, &thread->todo);
817 }
818 
819 /**
820  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
821  * @thread:       thread to queue work to
822  * @work:         struct binder_work to add to list
823  *
824  * Adds the work to the todo list of the thread, and enables processing
825  * of the todo queue.
826  *
827  * Requires the proc->inner_lock to be held.
828  */
829 static void
830 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
831 				   struct binder_work *work)
832 {
833 	WARN_ON(!list_empty(&thread->waiting_thread_node));
834 	binder_enqueue_work_ilocked(work, &thread->todo);
835 	thread->process_todo = true;
836 }
837 
838 /**
839  * binder_enqueue_thread_work() - Add an item to the thread work list
840  * @thread:       thread to queue work to
841  * @work:         struct binder_work to add to list
842  *
843  * Adds the work to the todo list of the thread, and enables processing
844  * of the todo queue.
845  */
846 static void
847 binder_enqueue_thread_work(struct binder_thread *thread,
848 			   struct binder_work *work)
849 {
850 	binder_inner_proc_lock(thread->proc);
851 	binder_enqueue_thread_work_ilocked(thread, work);
852 	binder_inner_proc_unlock(thread->proc);
853 }
854 
855 static void
856 binder_dequeue_work_ilocked(struct binder_work *work)
857 {
858 	list_del_init(&work->entry);
859 }
860 
861 /**
862  * binder_dequeue_work() - Removes an item from the work list
863  * @proc:         binder_proc associated with list
864  * @work:         struct binder_work to remove from list
865  *
866  * Removes the specified work item from whatever list it is on.
867  * Can safely be called if work is not on any list.
868  */
869 static void
870 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
871 {
872 	binder_inner_proc_lock(proc);
873 	binder_dequeue_work_ilocked(work);
874 	binder_inner_proc_unlock(proc);
875 }
876 
877 static struct binder_work *binder_dequeue_work_head_ilocked(
878 					struct list_head *list)
879 {
880 	struct binder_work *w;
881 
882 	w = list_first_entry_or_null(list, struct binder_work, entry);
883 	if (w)
884 		list_del_init(&w->entry);
885 	return w;
886 }
887 
888 /**
889  * binder_dequeue_work_head() - Dequeues the item at head of list
890  * @proc:         binder_proc associated with list
891  * @list:         list to dequeue head
892  *
893  * Removes the head of the list if there are items on the list
894  *
895  * Return: pointer dequeued binder_work, NULL if list was empty
896  */
897 static struct binder_work *binder_dequeue_work_head(
898 					struct binder_proc *proc,
899 					struct list_head *list)
900 {
901 	struct binder_work *w;
902 
903 	binder_inner_proc_lock(proc);
904 	w = binder_dequeue_work_head_ilocked(list);
905 	binder_inner_proc_unlock(proc);
906 	return w;
907 }
908 
909 static void
910 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
911 static void binder_free_thread(struct binder_thread *thread);
912 static void binder_free_proc(struct binder_proc *proc);
913 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
914 
915 static bool binder_has_work_ilocked(struct binder_thread *thread,
916 				    bool do_proc_work)
917 {
918 	return thread->process_todo ||
919 		thread->looper_need_return ||
920 		(do_proc_work &&
921 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
922 }
923 
924 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
925 {
926 	bool has_work;
927 
928 	binder_inner_proc_lock(thread->proc);
929 	has_work = binder_has_work_ilocked(thread, do_proc_work);
930 	binder_inner_proc_unlock(thread->proc);
931 
932 	return has_work;
933 }
934 
935 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
936 {
937 	return !thread->transaction_stack &&
938 		binder_worklist_empty_ilocked(&thread->todo) &&
939 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
940 				   BINDER_LOOPER_STATE_REGISTERED));
941 }
942 
943 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
944 					       bool sync)
945 {
946 	struct rb_node *n;
947 	struct binder_thread *thread;
948 
949 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
950 		thread = rb_entry(n, struct binder_thread, rb_node);
951 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
952 		    binder_available_for_proc_work_ilocked(thread)) {
953 			if (sync)
954 				wake_up_interruptible_sync(&thread->wait);
955 			else
956 				wake_up_interruptible(&thread->wait);
957 		}
958 	}
959 }
960 
961 /**
962  * binder_select_thread_ilocked() - selects a thread for doing proc work.
963  * @proc:	process to select a thread from
964  *
965  * Note that calling this function moves the thread off the waiting_threads
966  * list, so it can only be woken up by the caller of this function, or a
967  * signal. Therefore, callers *should* always wake up the thread this function
968  * returns.
969  *
970  * Return:	If there's a thread currently waiting for process work,
971  *		returns that thread. Otherwise returns NULL.
972  */
973 static struct binder_thread *
974 binder_select_thread_ilocked(struct binder_proc *proc)
975 {
976 	struct binder_thread *thread;
977 
978 	assert_spin_locked(&proc->inner_lock);
979 	thread = list_first_entry_or_null(&proc->waiting_threads,
980 					  struct binder_thread,
981 					  waiting_thread_node);
982 
983 	if (thread)
984 		list_del_init(&thread->waiting_thread_node);
985 
986 	return thread;
987 }
988 
989 /**
990  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
991  * @proc:	process to wake up a thread in
992  * @thread:	specific thread to wake-up (may be NULL)
993  * @sync:	whether to do a synchronous wake-up
994  *
995  * This function wakes up a thread in the @proc process.
996  * The caller may provide a specific thread to wake-up in
997  * the @thread parameter. If @thread is NULL, this function
998  * will wake up threads that have called poll().
999  *
1000  * Note that for this function to work as expected, callers
1001  * should first call binder_select_thread() to find a thread
1002  * to handle the work (if they don't have a thread already),
1003  * and pass the result into the @thread parameter.
1004  */
1005 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1006 					 struct binder_thread *thread,
1007 					 bool sync)
1008 {
1009 	assert_spin_locked(&proc->inner_lock);
1010 
1011 	if (thread) {
1012 		if (sync)
1013 			wake_up_interruptible_sync(&thread->wait);
1014 		else
1015 			wake_up_interruptible(&thread->wait);
1016 		return;
1017 	}
1018 
1019 	/* Didn't find a thread waiting for proc work; this can happen
1020 	 * in two scenarios:
1021 	 * 1. All threads are busy handling transactions
1022 	 *    In that case, one of those threads should call back into
1023 	 *    the kernel driver soon and pick up this work.
1024 	 * 2. Threads are using the (e)poll interface, in which case
1025 	 *    they may be blocked on the waitqueue without having been
1026 	 *    added to waiting_threads. For this case, we just iterate
1027 	 *    over all threads not handling transaction work, and
1028 	 *    wake them all up. We wake all because we don't know whether
1029 	 *    a thread that called into (e)poll is handling non-binder
1030 	 *    work currently.
1031 	 */
1032 	binder_wakeup_poll_threads_ilocked(proc, sync);
1033 }
1034 
1035 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1036 {
1037 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1038 
1039 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1040 }
1041 
1042 static void binder_set_nice(long nice)
1043 {
1044 	long min_nice;
1045 
1046 	if (can_nice(current, nice)) {
1047 		set_user_nice(current, nice);
1048 		return;
1049 	}
1050 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1051 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1052 		     "%d: nice value %ld not allowed use %ld instead\n",
1053 		      current->pid, nice, min_nice);
1054 	set_user_nice(current, min_nice);
1055 	if (min_nice <= MAX_NICE)
1056 		return;
1057 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1058 }
1059 
1060 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1061 						   binder_uintptr_t ptr)
1062 {
1063 	struct rb_node *n = proc->nodes.rb_node;
1064 	struct binder_node *node;
1065 
1066 	assert_spin_locked(&proc->inner_lock);
1067 
1068 	while (n) {
1069 		node = rb_entry(n, struct binder_node, rb_node);
1070 
1071 		if (ptr < node->ptr)
1072 			n = n->rb_left;
1073 		else if (ptr > node->ptr)
1074 			n = n->rb_right;
1075 		else {
1076 			/*
1077 			 * take an implicit weak reference
1078 			 * to ensure node stays alive until
1079 			 * call to binder_put_node()
1080 			 */
1081 			binder_inc_node_tmpref_ilocked(node);
1082 			return node;
1083 		}
1084 	}
1085 	return NULL;
1086 }
1087 
1088 static struct binder_node *binder_get_node(struct binder_proc *proc,
1089 					   binder_uintptr_t ptr)
1090 {
1091 	struct binder_node *node;
1092 
1093 	binder_inner_proc_lock(proc);
1094 	node = binder_get_node_ilocked(proc, ptr);
1095 	binder_inner_proc_unlock(proc);
1096 	return node;
1097 }
1098 
1099 static struct binder_node *binder_init_node_ilocked(
1100 						struct binder_proc *proc,
1101 						struct binder_node *new_node,
1102 						struct flat_binder_object *fp)
1103 {
1104 	struct rb_node **p = &proc->nodes.rb_node;
1105 	struct rb_node *parent = NULL;
1106 	struct binder_node *node;
1107 	binder_uintptr_t ptr = fp ? fp->binder : 0;
1108 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1109 	__u32 flags = fp ? fp->flags : 0;
1110 
1111 	assert_spin_locked(&proc->inner_lock);
1112 
1113 	while (*p) {
1114 
1115 		parent = *p;
1116 		node = rb_entry(parent, struct binder_node, rb_node);
1117 
1118 		if (ptr < node->ptr)
1119 			p = &(*p)->rb_left;
1120 		else if (ptr > node->ptr)
1121 			p = &(*p)->rb_right;
1122 		else {
1123 			/*
1124 			 * A matching node is already in
1125 			 * the rb tree. Abandon the init
1126 			 * and return it.
1127 			 */
1128 			binder_inc_node_tmpref_ilocked(node);
1129 			return node;
1130 		}
1131 	}
1132 	node = new_node;
1133 	binder_stats_created(BINDER_STAT_NODE);
1134 	node->tmp_refs++;
1135 	rb_link_node(&node->rb_node, parent, p);
1136 	rb_insert_color(&node->rb_node, &proc->nodes);
1137 	node->debug_id = atomic_inc_return(&binder_last_id);
1138 	node->proc = proc;
1139 	node->ptr = ptr;
1140 	node->cookie = cookie;
1141 	node->work.type = BINDER_WORK_NODE;
1142 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1143 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1144 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1145 	spin_lock_init(&node->lock);
1146 	INIT_LIST_HEAD(&node->work.entry);
1147 	INIT_LIST_HEAD(&node->async_todo);
1148 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1149 		     "%d:%d node %d u%016llx c%016llx created\n",
1150 		     proc->pid, current->pid, node->debug_id,
1151 		     (u64)node->ptr, (u64)node->cookie);
1152 
1153 	return node;
1154 }
1155 
1156 static struct binder_node *binder_new_node(struct binder_proc *proc,
1157 					   struct flat_binder_object *fp)
1158 {
1159 	struct binder_node *node;
1160 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1161 
1162 	if (!new_node)
1163 		return NULL;
1164 	binder_inner_proc_lock(proc);
1165 	node = binder_init_node_ilocked(proc, new_node, fp);
1166 	binder_inner_proc_unlock(proc);
1167 	if (node != new_node)
1168 		/*
1169 		 * The node was already added by another thread
1170 		 */
1171 		kfree(new_node);
1172 
1173 	return node;
1174 }
1175 
1176 static void binder_free_node(struct binder_node *node)
1177 {
1178 	kfree(node);
1179 	binder_stats_deleted(BINDER_STAT_NODE);
1180 }
1181 
1182 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1183 				    int internal,
1184 				    struct list_head *target_list)
1185 {
1186 	struct binder_proc *proc = node->proc;
1187 
1188 	assert_spin_locked(&node->lock);
1189 	if (proc)
1190 		assert_spin_locked(&proc->inner_lock);
1191 	if (strong) {
1192 		if (internal) {
1193 			if (target_list == NULL &&
1194 			    node->internal_strong_refs == 0 &&
1195 			    !(node->proc &&
1196 			      node == node->proc->context->binder_context_mgr_node &&
1197 			      node->has_strong_ref)) {
1198 				pr_err("invalid inc strong node for %d\n",
1199 					node->debug_id);
1200 				return -EINVAL;
1201 			}
1202 			node->internal_strong_refs++;
1203 		} else
1204 			node->local_strong_refs++;
1205 		if (!node->has_strong_ref && target_list) {
1206 			struct binder_thread *thread = container_of(target_list,
1207 						    struct binder_thread, todo);
1208 			binder_dequeue_work_ilocked(&node->work);
1209 			BUG_ON(&thread->todo != target_list);
1210 			binder_enqueue_deferred_thread_work_ilocked(thread,
1211 								   &node->work);
1212 		}
1213 	} else {
1214 		if (!internal)
1215 			node->local_weak_refs++;
1216 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1217 			if (target_list == NULL) {
1218 				pr_err("invalid inc weak node for %d\n",
1219 					node->debug_id);
1220 				return -EINVAL;
1221 			}
1222 			/*
1223 			 * See comment above
1224 			 */
1225 			binder_enqueue_work_ilocked(&node->work, target_list);
1226 		}
1227 	}
1228 	return 0;
1229 }
1230 
1231 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1232 			   struct list_head *target_list)
1233 {
1234 	int ret;
1235 
1236 	binder_node_inner_lock(node);
1237 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1238 	binder_node_inner_unlock(node);
1239 
1240 	return ret;
1241 }
1242 
1243 static bool binder_dec_node_nilocked(struct binder_node *node,
1244 				     int strong, int internal)
1245 {
1246 	struct binder_proc *proc = node->proc;
1247 
1248 	assert_spin_locked(&node->lock);
1249 	if (proc)
1250 		assert_spin_locked(&proc->inner_lock);
1251 	if (strong) {
1252 		if (internal)
1253 			node->internal_strong_refs--;
1254 		else
1255 			node->local_strong_refs--;
1256 		if (node->local_strong_refs || node->internal_strong_refs)
1257 			return false;
1258 	} else {
1259 		if (!internal)
1260 			node->local_weak_refs--;
1261 		if (node->local_weak_refs || node->tmp_refs ||
1262 				!hlist_empty(&node->refs))
1263 			return false;
1264 	}
1265 
1266 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1267 		if (list_empty(&node->work.entry)) {
1268 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1269 			binder_wakeup_proc_ilocked(proc);
1270 		}
1271 	} else {
1272 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1273 		    !node->local_weak_refs && !node->tmp_refs) {
1274 			if (proc) {
1275 				binder_dequeue_work_ilocked(&node->work);
1276 				rb_erase(&node->rb_node, &proc->nodes);
1277 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1278 					     "refless node %d deleted\n",
1279 					     node->debug_id);
1280 			} else {
1281 				BUG_ON(!list_empty(&node->work.entry));
1282 				spin_lock(&binder_dead_nodes_lock);
1283 				/*
1284 				 * tmp_refs could have changed so
1285 				 * check it again
1286 				 */
1287 				if (node->tmp_refs) {
1288 					spin_unlock(&binder_dead_nodes_lock);
1289 					return false;
1290 				}
1291 				hlist_del(&node->dead_node);
1292 				spin_unlock(&binder_dead_nodes_lock);
1293 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1294 					     "dead node %d deleted\n",
1295 					     node->debug_id);
1296 			}
1297 			return true;
1298 		}
1299 	}
1300 	return false;
1301 }
1302 
1303 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1304 {
1305 	bool free_node;
1306 
1307 	binder_node_inner_lock(node);
1308 	free_node = binder_dec_node_nilocked(node, strong, internal);
1309 	binder_node_inner_unlock(node);
1310 	if (free_node)
1311 		binder_free_node(node);
1312 }
1313 
1314 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1315 {
1316 	/*
1317 	 * No call to binder_inc_node() is needed since we
1318 	 * don't need to inform userspace of any changes to
1319 	 * tmp_refs
1320 	 */
1321 	node->tmp_refs++;
1322 }
1323 
1324 /**
1325  * binder_inc_node_tmpref() - take a temporary reference on node
1326  * @node:	node to reference
1327  *
1328  * Take reference on node to prevent the node from being freed
1329  * while referenced only by a local variable. The inner lock is
1330  * needed to serialize with the node work on the queue (which
1331  * isn't needed after the node is dead). If the node is dead
1332  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1333  * node->tmp_refs against dead-node-only cases where the node
1334  * lock cannot be acquired (eg traversing the dead node list to
1335  * print nodes)
1336  */
1337 static void binder_inc_node_tmpref(struct binder_node *node)
1338 {
1339 	binder_node_lock(node);
1340 	if (node->proc)
1341 		binder_inner_proc_lock(node->proc);
1342 	else
1343 		spin_lock(&binder_dead_nodes_lock);
1344 	binder_inc_node_tmpref_ilocked(node);
1345 	if (node->proc)
1346 		binder_inner_proc_unlock(node->proc);
1347 	else
1348 		spin_unlock(&binder_dead_nodes_lock);
1349 	binder_node_unlock(node);
1350 }
1351 
1352 /**
1353  * binder_dec_node_tmpref() - remove a temporary reference on node
1354  * @node:	node to reference
1355  *
1356  * Release temporary reference on node taken via binder_inc_node_tmpref()
1357  */
1358 static void binder_dec_node_tmpref(struct binder_node *node)
1359 {
1360 	bool free_node;
1361 
1362 	binder_node_inner_lock(node);
1363 	if (!node->proc)
1364 		spin_lock(&binder_dead_nodes_lock);
1365 	else
1366 		__acquire(&binder_dead_nodes_lock);
1367 	node->tmp_refs--;
1368 	BUG_ON(node->tmp_refs < 0);
1369 	if (!node->proc)
1370 		spin_unlock(&binder_dead_nodes_lock);
1371 	else
1372 		__release(&binder_dead_nodes_lock);
1373 	/*
1374 	 * Call binder_dec_node() to check if all refcounts are 0
1375 	 * and cleanup is needed. Calling with strong=0 and internal=1
1376 	 * causes no actual reference to be released in binder_dec_node().
1377 	 * If that changes, a change is needed here too.
1378 	 */
1379 	free_node = binder_dec_node_nilocked(node, 0, 1);
1380 	binder_node_inner_unlock(node);
1381 	if (free_node)
1382 		binder_free_node(node);
1383 }
1384 
1385 static void binder_put_node(struct binder_node *node)
1386 {
1387 	binder_dec_node_tmpref(node);
1388 }
1389 
1390 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1391 						 u32 desc, bool need_strong_ref)
1392 {
1393 	struct rb_node *n = proc->refs_by_desc.rb_node;
1394 	struct binder_ref *ref;
1395 
1396 	while (n) {
1397 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1398 
1399 		if (desc < ref->data.desc) {
1400 			n = n->rb_left;
1401 		} else if (desc > ref->data.desc) {
1402 			n = n->rb_right;
1403 		} else if (need_strong_ref && !ref->data.strong) {
1404 			binder_user_error("tried to use weak ref as strong ref\n");
1405 			return NULL;
1406 		} else {
1407 			return ref;
1408 		}
1409 	}
1410 	return NULL;
1411 }
1412 
1413 /**
1414  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1415  * @proc:	binder_proc that owns the ref
1416  * @node:	binder_node of target
1417  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1418  *
1419  * Look up the ref for the given node and return it if it exists
1420  *
1421  * If it doesn't exist and the caller provides a newly allocated
1422  * ref, initialize the fields of the newly allocated ref and insert
1423  * into the given proc rb_trees and node refs list.
1424  *
1425  * Return:	the ref for node. It is possible that another thread
1426  *		allocated/initialized the ref first in which case the
1427  *		returned ref would be different than the passed-in
1428  *		new_ref. new_ref must be kfree'd by the caller in
1429  *		this case.
1430  */
1431 static struct binder_ref *binder_get_ref_for_node_olocked(
1432 					struct binder_proc *proc,
1433 					struct binder_node *node,
1434 					struct binder_ref *new_ref)
1435 {
1436 	struct binder_context *context = proc->context;
1437 	struct rb_node **p = &proc->refs_by_node.rb_node;
1438 	struct rb_node *parent = NULL;
1439 	struct binder_ref *ref;
1440 	struct rb_node *n;
1441 
1442 	while (*p) {
1443 		parent = *p;
1444 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1445 
1446 		if (node < ref->node)
1447 			p = &(*p)->rb_left;
1448 		else if (node > ref->node)
1449 			p = &(*p)->rb_right;
1450 		else
1451 			return ref;
1452 	}
1453 	if (!new_ref)
1454 		return NULL;
1455 
1456 	binder_stats_created(BINDER_STAT_REF);
1457 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1458 	new_ref->proc = proc;
1459 	new_ref->node = node;
1460 	rb_link_node(&new_ref->rb_node_node, parent, p);
1461 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1462 
1463 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1464 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1465 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1466 		if (ref->data.desc > new_ref->data.desc)
1467 			break;
1468 		new_ref->data.desc = ref->data.desc + 1;
1469 	}
1470 
1471 	p = &proc->refs_by_desc.rb_node;
1472 	while (*p) {
1473 		parent = *p;
1474 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1475 
1476 		if (new_ref->data.desc < ref->data.desc)
1477 			p = &(*p)->rb_left;
1478 		else if (new_ref->data.desc > ref->data.desc)
1479 			p = &(*p)->rb_right;
1480 		else
1481 			BUG();
1482 	}
1483 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1484 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1485 
1486 	binder_node_lock(node);
1487 	hlist_add_head(&new_ref->node_entry, &node->refs);
1488 
1489 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1490 		     "%d new ref %d desc %d for node %d\n",
1491 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1492 		      node->debug_id);
1493 	binder_node_unlock(node);
1494 	return new_ref;
1495 }
1496 
1497 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1498 {
1499 	bool delete_node = false;
1500 
1501 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1502 		     "%d delete ref %d desc %d for node %d\n",
1503 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1504 		      ref->node->debug_id);
1505 
1506 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1507 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1508 
1509 	binder_node_inner_lock(ref->node);
1510 	if (ref->data.strong)
1511 		binder_dec_node_nilocked(ref->node, 1, 1);
1512 
1513 	hlist_del(&ref->node_entry);
1514 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1515 	binder_node_inner_unlock(ref->node);
1516 	/*
1517 	 * Clear ref->node unless we want the caller to free the node
1518 	 */
1519 	if (!delete_node) {
1520 		/*
1521 		 * The caller uses ref->node to determine
1522 		 * whether the node needs to be freed. Clear
1523 		 * it since the node is still alive.
1524 		 */
1525 		ref->node = NULL;
1526 	}
1527 
1528 	if (ref->death) {
1529 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1530 			     "%d delete ref %d desc %d has death notification\n",
1531 			      ref->proc->pid, ref->data.debug_id,
1532 			      ref->data.desc);
1533 		binder_dequeue_work(ref->proc, &ref->death->work);
1534 		binder_stats_deleted(BINDER_STAT_DEATH);
1535 	}
1536 	binder_stats_deleted(BINDER_STAT_REF);
1537 }
1538 
1539 /**
1540  * binder_inc_ref_olocked() - increment the ref for given handle
1541  * @ref:         ref to be incremented
1542  * @strong:      if true, strong increment, else weak
1543  * @target_list: list to queue node work on
1544  *
1545  * Increment the ref. @ref->proc->outer_lock must be held on entry
1546  *
1547  * Return: 0, if successful, else errno
1548  */
1549 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1550 				  struct list_head *target_list)
1551 {
1552 	int ret;
1553 
1554 	if (strong) {
1555 		if (ref->data.strong == 0) {
1556 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1557 			if (ret)
1558 				return ret;
1559 		}
1560 		ref->data.strong++;
1561 	} else {
1562 		if (ref->data.weak == 0) {
1563 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1564 			if (ret)
1565 				return ret;
1566 		}
1567 		ref->data.weak++;
1568 	}
1569 	return 0;
1570 }
1571 
1572 /**
1573  * binder_dec_ref() - dec the ref for given handle
1574  * @ref:	ref to be decremented
1575  * @strong:	if true, strong decrement, else weak
1576  *
1577  * Decrement the ref.
1578  *
1579  * Return: true if ref is cleaned up and ready to be freed
1580  */
1581 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1582 {
1583 	if (strong) {
1584 		if (ref->data.strong == 0) {
1585 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1586 					  ref->proc->pid, ref->data.debug_id,
1587 					  ref->data.desc, ref->data.strong,
1588 					  ref->data.weak);
1589 			return false;
1590 		}
1591 		ref->data.strong--;
1592 		if (ref->data.strong == 0)
1593 			binder_dec_node(ref->node, strong, 1);
1594 	} else {
1595 		if (ref->data.weak == 0) {
1596 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1597 					  ref->proc->pid, ref->data.debug_id,
1598 					  ref->data.desc, ref->data.strong,
1599 					  ref->data.weak);
1600 			return false;
1601 		}
1602 		ref->data.weak--;
1603 	}
1604 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1605 		binder_cleanup_ref_olocked(ref);
1606 		return true;
1607 	}
1608 	return false;
1609 }
1610 
1611 /**
1612  * binder_get_node_from_ref() - get the node from the given proc/desc
1613  * @proc:	proc containing the ref
1614  * @desc:	the handle associated with the ref
1615  * @need_strong_ref: if true, only return node if ref is strong
1616  * @rdata:	the id/refcount data for the ref
1617  *
1618  * Given a proc and ref handle, return the associated binder_node
1619  *
1620  * Return: a binder_node or NULL if not found or not strong when strong required
1621  */
1622 static struct binder_node *binder_get_node_from_ref(
1623 		struct binder_proc *proc,
1624 		u32 desc, bool need_strong_ref,
1625 		struct binder_ref_data *rdata)
1626 {
1627 	struct binder_node *node;
1628 	struct binder_ref *ref;
1629 
1630 	binder_proc_lock(proc);
1631 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1632 	if (!ref)
1633 		goto err_no_ref;
1634 	node = ref->node;
1635 	/*
1636 	 * Take an implicit reference on the node to ensure
1637 	 * it stays alive until the call to binder_put_node()
1638 	 */
1639 	binder_inc_node_tmpref(node);
1640 	if (rdata)
1641 		*rdata = ref->data;
1642 	binder_proc_unlock(proc);
1643 
1644 	return node;
1645 
1646 err_no_ref:
1647 	binder_proc_unlock(proc);
1648 	return NULL;
1649 }
1650 
1651 /**
1652  * binder_free_ref() - free the binder_ref
1653  * @ref:	ref to free
1654  *
1655  * Free the binder_ref. Free the binder_node indicated by ref->node
1656  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1657  */
1658 static void binder_free_ref(struct binder_ref *ref)
1659 {
1660 	if (ref->node)
1661 		binder_free_node(ref->node);
1662 	kfree(ref->death);
1663 	kfree(ref);
1664 }
1665 
1666 /**
1667  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1668  * @proc:	proc containing the ref
1669  * @desc:	the handle associated with the ref
1670  * @increment:	true=inc reference, false=dec reference
1671  * @strong:	true=strong reference, false=weak reference
1672  * @rdata:	the id/refcount data for the ref
1673  *
1674  * Given a proc and ref handle, increment or decrement the ref
1675  * according to "increment" arg.
1676  *
1677  * Return: 0 if successful, else errno
1678  */
1679 static int binder_update_ref_for_handle(struct binder_proc *proc,
1680 		uint32_t desc, bool increment, bool strong,
1681 		struct binder_ref_data *rdata)
1682 {
1683 	int ret = 0;
1684 	struct binder_ref *ref;
1685 	bool delete_ref = false;
1686 
1687 	binder_proc_lock(proc);
1688 	ref = binder_get_ref_olocked(proc, desc, strong);
1689 	if (!ref) {
1690 		ret = -EINVAL;
1691 		goto err_no_ref;
1692 	}
1693 	if (increment)
1694 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1695 	else
1696 		delete_ref = binder_dec_ref_olocked(ref, strong);
1697 
1698 	if (rdata)
1699 		*rdata = ref->data;
1700 	binder_proc_unlock(proc);
1701 
1702 	if (delete_ref)
1703 		binder_free_ref(ref);
1704 	return ret;
1705 
1706 err_no_ref:
1707 	binder_proc_unlock(proc);
1708 	return ret;
1709 }
1710 
1711 /**
1712  * binder_dec_ref_for_handle() - dec the ref for given handle
1713  * @proc:	proc containing the ref
1714  * @desc:	the handle associated with the ref
1715  * @strong:	true=strong reference, false=weak reference
1716  * @rdata:	the id/refcount data for the ref
1717  *
1718  * Just calls binder_update_ref_for_handle() to decrement the ref.
1719  *
1720  * Return: 0 if successful, else errno
1721  */
1722 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1723 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1724 {
1725 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1726 }
1727 
1728 
1729 /**
1730  * binder_inc_ref_for_node() - increment the ref for given proc/node
1731  * @proc:	 proc containing the ref
1732  * @node:	 target node
1733  * @strong:	 true=strong reference, false=weak reference
1734  * @target_list: worklist to use if node is incremented
1735  * @rdata:	 the id/refcount data for the ref
1736  *
1737  * Given a proc and node, increment the ref. Create the ref if it
1738  * doesn't already exist
1739  *
1740  * Return: 0 if successful, else errno
1741  */
1742 static int binder_inc_ref_for_node(struct binder_proc *proc,
1743 			struct binder_node *node,
1744 			bool strong,
1745 			struct list_head *target_list,
1746 			struct binder_ref_data *rdata)
1747 {
1748 	struct binder_ref *ref;
1749 	struct binder_ref *new_ref = NULL;
1750 	int ret = 0;
1751 
1752 	binder_proc_lock(proc);
1753 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1754 	if (!ref) {
1755 		binder_proc_unlock(proc);
1756 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1757 		if (!new_ref)
1758 			return -ENOMEM;
1759 		binder_proc_lock(proc);
1760 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1761 	}
1762 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1763 	*rdata = ref->data;
1764 	binder_proc_unlock(proc);
1765 	if (new_ref && ref != new_ref)
1766 		/*
1767 		 * Another thread created the ref first so
1768 		 * free the one we allocated
1769 		 */
1770 		kfree(new_ref);
1771 	return ret;
1772 }
1773 
1774 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1775 					   struct binder_transaction *t)
1776 {
1777 	BUG_ON(!target_thread);
1778 	assert_spin_locked(&target_thread->proc->inner_lock);
1779 	BUG_ON(target_thread->transaction_stack != t);
1780 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1781 	target_thread->transaction_stack =
1782 		target_thread->transaction_stack->from_parent;
1783 	t->from = NULL;
1784 }
1785 
1786 /**
1787  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1788  * @thread:	thread to decrement
1789  *
1790  * A thread needs to be kept alive while being used to create or
1791  * handle a transaction. binder_get_txn_from() is used to safely
1792  * extract t->from from a binder_transaction and keep the thread
1793  * indicated by t->from from being freed. When done with that
1794  * binder_thread, this function is called to decrement the
1795  * tmp_ref and free if appropriate (thread has been released
1796  * and no transaction being processed by the driver)
1797  */
1798 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1799 {
1800 	/*
1801 	 * atomic is used to protect the counter value while
1802 	 * it cannot reach zero or thread->is_dead is false
1803 	 */
1804 	binder_inner_proc_lock(thread->proc);
1805 	atomic_dec(&thread->tmp_ref);
1806 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1807 		binder_inner_proc_unlock(thread->proc);
1808 		binder_free_thread(thread);
1809 		return;
1810 	}
1811 	binder_inner_proc_unlock(thread->proc);
1812 }
1813 
1814 /**
1815  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1816  * @proc:	proc to decrement
1817  *
1818  * A binder_proc needs to be kept alive while being used to create or
1819  * handle a transaction. proc->tmp_ref is incremented when
1820  * creating a new transaction or the binder_proc is currently in-use
1821  * by threads that are being released. When done with the binder_proc,
1822  * this function is called to decrement the counter and free the
1823  * proc if appropriate (proc has been released, all threads have
1824  * been released and not currenly in-use to process a transaction).
1825  */
1826 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1827 {
1828 	binder_inner_proc_lock(proc);
1829 	proc->tmp_ref--;
1830 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1831 			!proc->tmp_ref) {
1832 		binder_inner_proc_unlock(proc);
1833 		binder_free_proc(proc);
1834 		return;
1835 	}
1836 	binder_inner_proc_unlock(proc);
1837 }
1838 
1839 /**
1840  * binder_get_txn_from() - safely extract the "from" thread in transaction
1841  * @t:	binder transaction for t->from
1842  *
1843  * Atomically return the "from" thread and increment the tmp_ref
1844  * count for the thread to ensure it stays alive until
1845  * binder_thread_dec_tmpref() is called.
1846  *
1847  * Return: the value of t->from
1848  */
1849 static struct binder_thread *binder_get_txn_from(
1850 		struct binder_transaction *t)
1851 {
1852 	struct binder_thread *from;
1853 
1854 	spin_lock(&t->lock);
1855 	from = t->from;
1856 	if (from)
1857 		atomic_inc(&from->tmp_ref);
1858 	spin_unlock(&t->lock);
1859 	return from;
1860 }
1861 
1862 /**
1863  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1864  * @t:	binder transaction for t->from
1865  *
1866  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1867  * to guarantee that the thread cannot be released while operating on it.
1868  * The caller must call binder_inner_proc_unlock() to release the inner lock
1869  * as well as call binder_dec_thread_txn() to release the reference.
1870  *
1871  * Return: the value of t->from
1872  */
1873 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1874 		struct binder_transaction *t)
1875 	__acquires(&t->from->proc->inner_lock)
1876 {
1877 	struct binder_thread *from;
1878 
1879 	from = binder_get_txn_from(t);
1880 	if (!from) {
1881 		__acquire(&from->proc->inner_lock);
1882 		return NULL;
1883 	}
1884 	binder_inner_proc_lock(from->proc);
1885 	if (t->from) {
1886 		BUG_ON(from != t->from);
1887 		return from;
1888 	}
1889 	binder_inner_proc_unlock(from->proc);
1890 	__acquire(&from->proc->inner_lock);
1891 	binder_thread_dec_tmpref(from);
1892 	return NULL;
1893 }
1894 
1895 /**
1896  * binder_free_txn_fixups() - free unprocessed fd fixups
1897  * @t:	binder transaction for t->from
1898  *
1899  * If the transaction is being torn down prior to being
1900  * processed by the target process, free all of the
1901  * fd fixups and fput the file structs. It is safe to
1902  * call this function after the fixups have been
1903  * processed -- in that case, the list will be empty.
1904  */
1905 static void binder_free_txn_fixups(struct binder_transaction *t)
1906 {
1907 	struct binder_txn_fd_fixup *fixup, *tmp;
1908 
1909 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1910 		fput(fixup->file);
1911 		list_del(&fixup->fixup_entry);
1912 		kfree(fixup);
1913 	}
1914 }
1915 
1916 static void binder_free_transaction(struct binder_transaction *t)
1917 {
1918 	struct binder_proc *target_proc = t->to_proc;
1919 
1920 	if (target_proc) {
1921 		binder_inner_proc_lock(target_proc);
1922 		if (t->buffer)
1923 			t->buffer->transaction = NULL;
1924 		binder_inner_proc_unlock(target_proc);
1925 	}
1926 	/*
1927 	 * If the transaction has no target_proc, then
1928 	 * t->buffer->transaction has already been cleared.
1929 	 */
1930 	binder_free_txn_fixups(t);
1931 	kfree(t);
1932 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1933 }
1934 
1935 static void binder_send_failed_reply(struct binder_transaction *t,
1936 				     uint32_t error_code)
1937 {
1938 	struct binder_thread *target_thread;
1939 	struct binder_transaction *next;
1940 
1941 	BUG_ON(t->flags & TF_ONE_WAY);
1942 	while (1) {
1943 		target_thread = binder_get_txn_from_and_acq_inner(t);
1944 		if (target_thread) {
1945 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1946 				     "send failed reply for transaction %d to %d:%d\n",
1947 				      t->debug_id,
1948 				      target_thread->proc->pid,
1949 				      target_thread->pid);
1950 
1951 			binder_pop_transaction_ilocked(target_thread, t);
1952 			if (target_thread->reply_error.cmd == BR_OK) {
1953 				target_thread->reply_error.cmd = error_code;
1954 				binder_enqueue_thread_work_ilocked(
1955 					target_thread,
1956 					&target_thread->reply_error.work);
1957 				wake_up_interruptible(&target_thread->wait);
1958 			} else {
1959 				/*
1960 				 * Cannot get here for normal operation, but
1961 				 * we can if multiple synchronous transactions
1962 				 * are sent without blocking for responses.
1963 				 * Just ignore the 2nd error in this case.
1964 				 */
1965 				pr_warn("Unexpected reply error: %u\n",
1966 					target_thread->reply_error.cmd);
1967 			}
1968 			binder_inner_proc_unlock(target_thread->proc);
1969 			binder_thread_dec_tmpref(target_thread);
1970 			binder_free_transaction(t);
1971 			return;
1972 		} else {
1973 			__release(&target_thread->proc->inner_lock);
1974 		}
1975 		next = t->from_parent;
1976 
1977 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1978 			     "send failed reply for transaction %d, target dead\n",
1979 			     t->debug_id);
1980 
1981 		binder_free_transaction(t);
1982 		if (next == NULL) {
1983 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1984 				     "reply failed, no target thread at root\n");
1985 			return;
1986 		}
1987 		t = next;
1988 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1989 			     "reply failed, no target thread -- retry %d\n",
1990 			      t->debug_id);
1991 	}
1992 }
1993 
1994 /**
1995  * binder_cleanup_transaction() - cleans up undelivered transaction
1996  * @t:		transaction that needs to be cleaned up
1997  * @reason:	reason the transaction wasn't delivered
1998  * @error_code:	error to return to caller (if synchronous call)
1999  */
2000 static void binder_cleanup_transaction(struct binder_transaction *t,
2001 				       const char *reason,
2002 				       uint32_t error_code)
2003 {
2004 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2005 		binder_send_failed_reply(t, error_code);
2006 	} else {
2007 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2008 			"undelivered transaction %d, %s\n",
2009 			t->debug_id, reason);
2010 		binder_free_transaction(t);
2011 	}
2012 }
2013 
2014 /**
2015  * binder_get_object() - gets object and checks for valid metadata
2016  * @proc:	binder_proc owning the buffer
2017  * @buffer:	binder_buffer that we're parsing.
2018  * @offset:	offset in the @buffer at which to validate an object.
2019  * @object:	struct binder_object to read into
2020  *
2021  * Return:	If there's a valid metadata object at @offset in @buffer, the
2022  *		size of that object. Otherwise, it returns zero. The object
2023  *		is read into the struct binder_object pointed to by @object.
2024  */
2025 static size_t binder_get_object(struct binder_proc *proc,
2026 				struct binder_buffer *buffer,
2027 				unsigned long offset,
2028 				struct binder_object *object)
2029 {
2030 	size_t read_size;
2031 	struct binder_object_header *hdr;
2032 	size_t object_size = 0;
2033 
2034 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2035 	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2036 	    binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2037 					  offset, read_size))
2038 		return 0;
2039 
2040 	/* Ok, now see if we read a complete object. */
2041 	hdr = &object->hdr;
2042 	switch (hdr->type) {
2043 	case BINDER_TYPE_BINDER:
2044 	case BINDER_TYPE_WEAK_BINDER:
2045 	case BINDER_TYPE_HANDLE:
2046 	case BINDER_TYPE_WEAK_HANDLE:
2047 		object_size = sizeof(struct flat_binder_object);
2048 		break;
2049 	case BINDER_TYPE_FD:
2050 		object_size = sizeof(struct binder_fd_object);
2051 		break;
2052 	case BINDER_TYPE_PTR:
2053 		object_size = sizeof(struct binder_buffer_object);
2054 		break;
2055 	case BINDER_TYPE_FDA:
2056 		object_size = sizeof(struct binder_fd_array_object);
2057 		break;
2058 	default:
2059 		return 0;
2060 	}
2061 	if (offset <= buffer->data_size - object_size &&
2062 	    buffer->data_size >= object_size)
2063 		return object_size;
2064 	else
2065 		return 0;
2066 }
2067 
2068 /**
2069  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2070  * @proc:	binder_proc owning the buffer
2071  * @b:		binder_buffer containing the object
2072  * @object:	struct binder_object to read into
2073  * @index:	index in offset array at which the binder_buffer_object is
2074  *		located
2075  * @start_offset: points to the start of the offset array
2076  * @object_offsetp: offset of @object read from @b
2077  * @num_valid:	the number of valid offsets in the offset array
2078  *
2079  * Return:	If @index is within the valid range of the offset array
2080  *		described by @start and @num_valid, and if there's a valid
2081  *		binder_buffer_object at the offset found in index @index
2082  *		of the offset array, that object is returned. Otherwise,
2083  *		%NULL is returned.
2084  *		Note that the offset found in index @index itself is not
2085  *		verified; this function assumes that @num_valid elements
2086  *		from @start were previously verified to have valid offsets.
2087  *		If @object_offsetp is non-NULL, then the offset within
2088  *		@b is written to it.
2089  */
2090 static struct binder_buffer_object *binder_validate_ptr(
2091 						struct binder_proc *proc,
2092 						struct binder_buffer *b,
2093 						struct binder_object *object,
2094 						binder_size_t index,
2095 						binder_size_t start_offset,
2096 						binder_size_t *object_offsetp,
2097 						binder_size_t num_valid)
2098 {
2099 	size_t object_size;
2100 	binder_size_t object_offset;
2101 	unsigned long buffer_offset;
2102 
2103 	if (index >= num_valid)
2104 		return NULL;
2105 
2106 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
2107 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2108 					  b, buffer_offset,
2109 					  sizeof(object_offset)))
2110 		return NULL;
2111 	object_size = binder_get_object(proc, b, object_offset, object);
2112 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2113 		return NULL;
2114 	if (object_offsetp)
2115 		*object_offsetp = object_offset;
2116 
2117 	return &object->bbo;
2118 }
2119 
2120 /**
2121  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2122  * @proc:		binder_proc owning the buffer
2123  * @b:			transaction buffer
2124  * @objects_start_offset: offset to start of objects buffer
2125  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
2126  * @fixup_offset:	start offset in @buffer to fix up
2127  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
2128  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
2129  *
2130  * Return:		%true if a fixup in buffer @buffer at offset @offset is
2131  *			allowed.
2132  *
2133  * For safety reasons, we only allow fixups inside a buffer to happen
2134  * at increasing offsets; additionally, we only allow fixup on the last
2135  * buffer object that was verified, or one of its parents.
2136  *
2137  * Example of what is allowed:
2138  *
2139  * A
2140  *   B (parent = A, offset = 0)
2141  *   C (parent = A, offset = 16)
2142  *     D (parent = C, offset = 0)
2143  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2144  *
2145  * Examples of what is not allowed:
2146  *
2147  * Decreasing offsets within the same parent:
2148  * A
2149  *   C (parent = A, offset = 16)
2150  *   B (parent = A, offset = 0) // decreasing offset within A
2151  *
2152  * Referring to a parent that wasn't the last object or any of its parents:
2153  * A
2154  *   B (parent = A, offset = 0)
2155  *   C (parent = A, offset = 0)
2156  *   C (parent = A, offset = 16)
2157  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2158  */
2159 static bool binder_validate_fixup(struct binder_proc *proc,
2160 				  struct binder_buffer *b,
2161 				  binder_size_t objects_start_offset,
2162 				  binder_size_t buffer_obj_offset,
2163 				  binder_size_t fixup_offset,
2164 				  binder_size_t last_obj_offset,
2165 				  binder_size_t last_min_offset)
2166 {
2167 	if (!last_obj_offset) {
2168 		/* Nothing to fix up in */
2169 		return false;
2170 	}
2171 
2172 	while (last_obj_offset != buffer_obj_offset) {
2173 		unsigned long buffer_offset;
2174 		struct binder_object last_object;
2175 		struct binder_buffer_object *last_bbo;
2176 		size_t object_size = binder_get_object(proc, b, last_obj_offset,
2177 						       &last_object);
2178 		if (object_size != sizeof(*last_bbo))
2179 			return false;
2180 
2181 		last_bbo = &last_object.bbo;
2182 		/*
2183 		 * Safe to retrieve the parent of last_obj, since it
2184 		 * was already previously verified by the driver.
2185 		 */
2186 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2187 			return false;
2188 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2189 		buffer_offset = objects_start_offset +
2190 			sizeof(binder_size_t) * last_bbo->parent;
2191 		if (binder_alloc_copy_from_buffer(&proc->alloc,
2192 						  &last_obj_offset,
2193 						  b, buffer_offset,
2194 						  sizeof(last_obj_offset)))
2195 			return false;
2196 	}
2197 	return (fixup_offset >= last_min_offset);
2198 }
2199 
2200 /**
2201  * struct binder_task_work_cb - for deferred close
2202  *
2203  * @twork:                callback_head for task work
2204  * @fd:                   fd to close
2205  *
2206  * Structure to pass task work to be handled after
2207  * returning from binder_ioctl() via task_work_add().
2208  */
2209 struct binder_task_work_cb {
2210 	struct callback_head twork;
2211 	struct file *file;
2212 };
2213 
2214 /**
2215  * binder_do_fd_close() - close list of file descriptors
2216  * @twork:	callback head for task work
2217  *
2218  * It is not safe to call ksys_close() during the binder_ioctl()
2219  * function if there is a chance that binder's own file descriptor
2220  * might be closed. This is to meet the requirements for using
2221  * fdget() (see comments for __fget_light()). Therefore use
2222  * task_work_add() to schedule the close operation once we have
2223  * returned from binder_ioctl(). This function is a callback
2224  * for that mechanism and does the actual ksys_close() on the
2225  * given file descriptor.
2226  */
2227 static void binder_do_fd_close(struct callback_head *twork)
2228 {
2229 	struct binder_task_work_cb *twcb = container_of(twork,
2230 			struct binder_task_work_cb, twork);
2231 
2232 	fput(twcb->file);
2233 	kfree(twcb);
2234 }
2235 
2236 /**
2237  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2238  * @fd:		file-descriptor to close
2239  *
2240  * See comments in binder_do_fd_close(). This function is used to schedule
2241  * a file-descriptor to be closed after returning from binder_ioctl().
2242  */
2243 static void binder_deferred_fd_close(int fd)
2244 {
2245 	struct binder_task_work_cb *twcb;
2246 
2247 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2248 	if (!twcb)
2249 		return;
2250 	init_task_work(&twcb->twork, binder_do_fd_close);
2251 	__close_fd_get_file(fd, &twcb->file);
2252 	if (twcb->file)
2253 		task_work_add(current, &twcb->twork, true);
2254 	else
2255 		kfree(twcb);
2256 }
2257 
2258 static void binder_transaction_buffer_release(struct binder_proc *proc,
2259 					      struct binder_buffer *buffer,
2260 					      binder_size_t failed_at,
2261 					      bool is_failure)
2262 {
2263 	int debug_id = buffer->debug_id;
2264 	binder_size_t off_start_offset, buffer_offset, off_end_offset;
2265 
2266 	binder_debug(BINDER_DEBUG_TRANSACTION,
2267 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2268 		     proc->pid, buffer->debug_id,
2269 		     buffer->data_size, buffer->offsets_size,
2270 		     (unsigned long long)failed_at);
2271 
2272 	if (buffer->target_node)
2273 		binder_dec_node(buffer->target_node, 1, 0);
2274 
2275 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2276 	off_end_offset = is_failure ? failed_at :
2277 				off_start_offset + buffer->offsets_size;
2278 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2279 	     buffer_offset += sizeof(binder_size_t)) {
2280 		struct binder_object_header *hdr;
2281 		size_t object_size = 0;
2282 		struct binder_object object;
2283 		binder_size_t object_offset;
2284 
2285 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2286 						   buffer, buffer_offset,
2287 						   sizeof(object_offset)))
2288 			object_size = binder_get_object(proc, buffer,
2289 							object_offset, &object);
2290 		if (object_size == 0) {
2291 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2292 			       debug_id, (u64)object_offset, buffer->data_size);
2293 			continue;
2294 		}
2295 		hdr = &object.hdr;
2296 		switch (hdr->type) {
2297 		case BINDER_TYPE_BINDER:
2298 		case BINDER_TYPE_WEAK_BINDER: {
2299 			struct flat_binder_object *fp;
2300 			struct binder_node *node;
2301 
2302 			fp = to_flat_binder_object(hdr);
2303 			node = binder_get_node(proc, fp->binder);
2304 			if (node == NULL) {
2305 				pr_err("transaction release %d bad node %016llx\n",
2306 				       debug_id, (u64)fp->binder);
2307 				break;
2308 			}
2309 			binder_debug(BINDER_DEBUG_TRANSACTION,
2310 				     "        node %d u%016llx\n",
2311 				     node->debug_id, (u64)node->ptr);
2312 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2313 					0);
2314 			binder_put_node(node);
2315 		} break;
2316 		case BINDER_TYPE_HANDLE:
2317 		case BINDER_TYPE_WEAK_HANDLE: {
2318 			struct flat_binder_object *fp;
2319 			struct binder_ref_data rdata;
2320 			int ret;
2321 
2322 			fp = to_flat_binder_object(hdr);
2323 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2324 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2325 
2326 			if (ret) {
2327 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2328 				 debug_id, fp->handle, ret);
2329 				break;
2330 			}
2331 			binder_debug(BINDER_DEBUG_TRANSACTION,
2332 				     "        ref %d desc %d\n",
2333 				     rdata.debug_id, rdata.desc);
2334 		} break;
2335 
2336 		case BINDER_TYPE_FD: {
2337 			/*
2338 			 * No need to close the file here since user-space
2339 			 * closes it for for successfully delivered
2340 			 * transactions. For transactions that weren't
2341 			 * delivered, the new fd was never allocated so
2342 			 * there is no need to close and the fput on the
2343 			 * file is done when the transaction is torn
2344 			 * down.
2345 			 */
2346 			WARN_ON(failed_at &&
2347 				proc->tsk == current->group_leader);
2348 		} break;
2349 		case BINDER_TYPE_PTR:
2350 			/*
2351 			 * Nothing to do here, this will get cleaned up when the
2352 			 * transaction buffer gets freed
2353 			 */
2354 			break;
2355 		case BINDER_TYPE_FDA: {
2356 			struct binder_fd_array_object *fda;
2357 			struct binder_buffer_object *parent;
2358 			struct binder_object ptr_object;
2359 			binder_size_t fda_offset;
2360 			size_t fd_index;
2361 			binder_size_t fd_buf_size;
2362 			binder_size_t num_valid;
2363 
2364 			if (proc->tsk != current->group_leader) {
2365 				/*
2366 				 * Nothing to do if running in sender context
2367 				 * The fd fixups have not been applied so no
2368 				 * fds need to be closed.
2369 				 */
2370 				continue;
2371 			}
2372 
2373 			num_valid = (buffer_offset - off_start_offset) /
2374 						sizeof(binder_size_t);
2375 			fda = to_binder_fd_array_object(hdr);
2376 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2377 						     fda->parent,
2378 						     off_start_offset,
2379 						     NULL,
2380 						     num_valid);
2381 			if (!parent) {
2382 				pr_err("transaction release %d bad parent offset\n",
2383 				       debug_id);
2384 				continue;
2385 			}
2386 			fd_buf_size = sizeof(u32) * fda->num_fds;
2387 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2388 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2389 				       debug_id, (u64)fda->num_fds);
2390 				continue;
2391 			}
2392 			if (fd_buf_size > parent->length ||
2393 			    fda->parent_offset > parent->length - fd_buf_size) {
2394 				/* No space for all file descriptors here. */
2395 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2396 				       debug_id, (u64)fda->num_fds);
2397 				continue;
2398 			}
2399 			/*
2400 			 * the source data for binder_buffer_object is visible
2401 			 * to user-space and the @buffer element is the user
2402 			 * pointer to the buffer_object containing the fd_array.
2403 			 * Convert the address to an offset relative to
2404 			 * the base of the transaction buffer.
2405 			 */
2406 			fda_offset =
2407 			    (parent->buffer - (uintptr_t)buffer->user_data) +
2408 			    fda->parent_offset;
2409 			for (fd_index = 0; fd_index < fda->num_fds;
2410 			     fd_index++) {
2411 				u32 fd;
2412 				int err;
2413 				binder_size_t offset = fda_offset +
2414 					fd_index * sizeof(fd);
2415 
2416 				err = binder_alloc_copy_from_buffer(
2417 						&proc->alloc, &fd, buffer,
2418 						offset, sizeof(fd));
2419 				WARN_ON(err);
2420 				if (!err)
2421 					binder_deferred_fd_close(fd);
2422 			}
2423 		} break;
2424 		default:
2425 			pr_err("transaction release %d bad object type %x\n",
2426 				debug_id, hdr->type);
2427 			break;
2428 		}
2429 	}
2430 }
2431 
2432 static int binder_translate_binder(struct flat_binder_object *fp,
2433 				   struct binder_transaction *t,
2434 				   struct binder_thread *thread)
2435 {
2436 	struct binder_node *node;
2437 	struct binder_proc *proc = thread->proc;
2438 	struct binder_proc *target_proc = t->to_proc;
2439 	struct binder_ref_data rdata;
2440 	int ret = 0;
2441 
2442 	node = binder_get_node(proc, fp->binder);
2443 	if (!node) {
2444 		node = binder_new_node(proc, fp);
2445 		if (!node)
2446 			return -ENOMEM;
2447 	}
2448 	if (fp->cookie != node->cookie) {
2449 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2450 				  proc->pid, thread->pid, (u64)fp->binder,
2451 				  node->debug_id, (u64)fp->cookie,
2452 				  (u64)node->cookie);
2453 		ret = -EINVAL;
2454 		goto done;
2455 	}
2456 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2457 		ret = -EPERM;
2458 		goto done;
2459 	}
2460 
2461 	ret = binder_inc_ref_for_node(target_proc, node,
2462 			fp->hdr.type == BINDER_TYPE_BINDER,
2463 			&thread->todo, &rdata);
2464 	if (ret)
2465 		goto done;
2466 
2467 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2468 		fp->hdr.type = BINDER_TYPE_HANDLE;
2469 	else
2470 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2471 	fp->binder = 0;
2472 	fp->handle = rdata.desc;
2473 	fp->cookie = 0;
2474 
2475 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2476 	binder_debug(BINDER_DEBUG_TRANSACTION,
2477 		     "        node %d u%016llx -> ref %d desc %d\n",
2478 		     node->debug_id, (u64)node->ptr,
2479 		     rdata.debug_id, rdata.desc);
2480 done:
2481 	binder_put_node(node);
2482 	return ret;
2483 }
2484 
2485 static int binder_translate_handle(struct flat_binder_object *fp,
2486 				   struct binder_transaction *t,
2487 				   struct binder_thread *thread)
2488 {
2489 	struct binder_proc *proc = thread->proc;
2490 	struct binder_proc *target_proc = t->to_proc;
2491 	struct binder_node *node;
2492 	struct binder_ref_data src_rdata;
2493 	int ret = 0;
2494 
2495 	node = binder_get_node_from_ref(proc, fp->handle,
2496 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2497 	if (!node) {
2498 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2499 				  proc->pid, thread->pid, fp->handle);
2500 		return -EINVAL;
2501 	}
2502 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2503 		ret = -EPERM;
2504 		goto done;
2505 	}
2506 
2507 	binder_node_lock(node);
2508 	if (node->proc == target_proc) {
2509 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2510 			fp->hdr.type = BINDER_TYPE_BINDER;
2511 		else
2512 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2513 		fp->binder = node->ptr;
2514 		fp->cookie = node->cookie;
2515 		if (node->proc)
2516 			binder_inner_proc_lock(node->proc);
2517 		else
2518 			__acquire(&node->proc->inner_lock);
2519 		binder_inc_node_nilocked(node,
2520 					 fp->hdr.type == BINDER_TYPE_BINDER,
2521 					 0, NULL);
2522 		if (node->proc)
2523 			binder_inner_proc_unlock(node->proc);
2524 		else
2525 			__release(&node->proc->inner_lock);
2526 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2527 		binder_debug(BINDER_DEBUG_TRANSACTION,
2528 			     "        ref %d desc %d -> node %d u%016llx\n",
2529 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2530 			     (u64)node->ptr);
2531 		binder_node_unlock(node);
2532 	} else {
2533 		struct binder_ref_data dest_rdata;
2534 
2535 		binder_node_unlock(node);
2536 		ret = binder_inc_ref_for_node(target_proc, node,
2537 				fp->hdr.type == BINDER_TYPE_HANDLE,
2538 				NULL, &dest_rdata);
2539 		if (ret)
2540 			goto done;
2541 
2542 		fp->binder = 0;
2543 		fp->handle = dest_rdata.desc;
2544 		fp->cookie = 0;
2545 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2546 						    &dest_rdata);
2547 		binder_debug(BINDER_DEBUG_TRANSACTION,
2548 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2549 			     src_rdata.debug_id, src_rdata.desc,
2550 			     dest_rdata.debug_id, dest_rdata.desc,
2551 			     node->debug_id);
2552 	}
2553 done:
2554 	binder_put_node(node);
2555 	return ret;
2556 }
2557 
2558 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2559 			       struct binder_transaction *t,
2560 			       struct binder_thread *thread,
2561 			       struct binder_transaction *in_reply_to)
2562 {
2563 	struct binder_proc *proc = thread->proc;
2564 	struct binder_proc *target_proc = t->to_proc;
2565 	struct binder_txn_fd_fixup *fixup;
2566 	struct file *file;
2567 	int ret = 0;
2568 	bool target_allows_fd;
2569 
2570 	if (in_reply_to)
2571 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2572 	else
2573 		target_allows_fd = t->buffer->target_node->accept_fds;
2574 	if (!target_allows_fd) {
2575 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2576 				  proc->pid, thread->pid,
2577 				  in_reply_to ? "reply" : "transaction",
2578 				  fd);
2579 		ret = -EPERM;
2580 		goto err_fd_not_accepted;
2581 	}
2582 
2583 	file = fget(fd);
2584 	if (!file) {
2585 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2586 				  proc->pid, thread->pid, fd);
2587 		ret = -EBADF;
2588 		goto err_fget;
2589 	}
2590 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2591 	if (ret < 0) {
2592 		ret = -EPERM;
2593 		goto err_security;
2594 	}
2595 
2596 	/*
2597 	 * Add fixup record for this transaction. The allocation
2598 	 * of the fd in the target needs to be done from a
2599 	 * target thread.
2600 	 */
2601 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2602 	if (!fixup) {
2603 		ret = -ENOMEM;
2604 		goto err_alloc;
2605 	}
2606 	fixup->file = file;
2607 	fixup->offset = fd_offset;
2608 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2609 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2610 
2611 	return ret;
2612 
2613 err_alloc:
2614 err_security:
2615 	fput(file);
2616 err_fget:
2617 err_fd_not_accepted:
2618 	return ret;
2619 }
2620 
2621 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2622 				     struct binder_buffer_object *parent,
2623 				     struct binder_transaction *t,
2624 				     struct binder_thread *thread,
2625 				     struct binder_transaction *in_reply_to)
2626 {
2627 	binder_size_t fdi, fd_buf_size;
2628 	binder_size_t fda_offset;
2629 	struct binder_proc *proc = thread->proc;
2630 	struct binder_proc *target_proc = t->to_proc;
2631 
2632 	fd_buf_size = sizeof(u32) * fda->num_fds;
2633 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2634 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2635 				  proc->pid, thread->pid, (u64)fda->num_fds);
2636 		return -EINVAL;
2637 	}
2638 	if (fd_buf_size > parent->length ||
2639 	    fda->parent_offset > parent->length - fd_buf_size) {
2640 		/* No space for all file descriptors here. */
2641 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2642 				  proc->pid, thread->pid, (u64)fda->num_fds);
2643 		return -EINVAL;
2644 	}
2645 	/*
2646 	 * the source data for binder_buffer_object is visible
2647 	 * to user-space and the @buffer element is the user
2648 	 * pointer to the buffer_object containing the fd_array.
2649 	 * Convert the address to an offset relative to
2650 	 * the base of the transaction buffer.
2651 	 */
2652 	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2653 		fda->parent_offset;
2654 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2655 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2656 				  proc->pid, thread->pid);
2657 		return -EINVAL;
2658 	}
2659 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2660 		u32 fd;
2661 		int ret;
2662 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2663 
2664 		ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2665 						    &fd, t->buffer,
2666 						    offset, sizeof(fd));
2667 		if (!ret)
2668 			ret = binder_translate_fd(fd, offset, t, thread,
2669 						  in_reply_to);
2670 		if (ret < 0)
2671 			return ret;
2672 	}
2673 	return 0;
2674 }
2675 
2676 static int binder_fixup_parent(struct binder_transaction *t,
2677 			       struct binder_thread *thread,
2678 			       struct binder_buffer_object *bp,
2679 			       binder_size_t off_start_offset,
2680 			       binder_size_t num_valid,
2681 			       binder_size_t last_fixup_obj_off,
2682 			       binder_size_t last_fixup_min_off)
2683 {
2684 	struct binder_buffer_object *parent;
2685 	struct binder_buffer *b = t->buffer;
2686 	struct binder_proc *proc = thread->proc;
2687 	struct binder_proc *target_proc = t->to_proc;
2688 	struct binder_object object;
2689 	binder_size_t buffer_offset;
2690 	binder_size_t parent_offset;
2691 
2692 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2693 		return 0;
2694 
2695 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2696 				     off_start_offset, &parent_offset,
2697 				     num_valid);
2698 	if (!parent) {
2699 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2700 				  proc->pid, thread->pid);
2701 		return -EINVAL;
2702 	}
2703 
2704 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2705 				   parent_offset, bp->parent_offset,
2706 				   last_fixup_obj_off,
2707 				   last_fixup_min_off)) {
2708 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2709 				  proc->pid, thread->pid);
2710 		return -EINVAL;
2711 	}
2712 
2713 	if (parent->length < sizeof(binder_uintptr_t) ||
2714 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2715 		/* No space for a pointer here! */
2716 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2717 				  proc->pid, thread->pid);
2718 		return -EINVAL;
2719 	}
2720 	buffer_offset = bp->parent_offset +
2721 			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2722 	if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2723 					&bp->buffer, sizeof(bp->buffer))) {
2724 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2725 				  proc->pid, thread->pid);
2726 		return -EINVAL;
2727 	}
2728 
2729 	return 0;
2730 }
2731 
2732 /**
2733  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2734  * @t:		transaction to send
2735  * @proc:	process to send the transaction to
2736  * @thread:	thread in @proc to send the transaction to (may be NULL)
2737  *
2738  * This function queues a transaction to the specified process. It will try
2739  * to find a thread in the target process to handle the transaction and
2740  * wake it up. If no thread is found, the work is queued to the proc
2741  * waitqueue.
2742  *
2743  * If the @thread parameter is not NULL, the transaction is always queued
2744  * to the waitlist of that specific thread.
2745  *
2746  * Return:	true if the transactions was successfully queued
2747  *		false if the target process or thread is dead
2748  */
2749 static bool binder_proc_transaction(struct binder_transaction *t,
2750 				    struct binder_proc *proc,
2751 				    struct binder_thread *thread)
2752 {
2753 	struct binder_node *node = t->buffer->target_node;
2754 	bool oneway = !!(t->flags & TF_ONE_WAY);
2755 	bool pending_async = false;
2756 
2757 	BUG_ON(!node);
2758 	binder_node_lock(node);
2759 	if (oneway) {
2760 		BUG_ON(thread);
2761 		if (node->has_async_transaction) {
2762 			pending_async = true;
2763 		} else {
2764 			node->has_async_transaction = true;
2765 		}
2766 	}
2767 
2768 	binder_inner_proc_lock(proc);
2769 
2770 	if (proc->is_dead || (thread && thread->is_dead)) {
2771 		binder_inner_proc_unlock(proc);
2772 		binder_node_unlock(node);
2773 		return false;
2774 	}
2775 
2776 	if (!thread && !pending_async)
2777 		thread = binder_select_thread_ilocked(proc);
2778 
2779 	if (thread)
2780 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2781 	else if (!pending_async)
2782 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2783 	else
2784 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2785 
2786 	if (!pending_async)
2787 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2788 
2789 	binder_inner_proc_unlock(proc);
2790 	binder_node_unlock(node);
2791 
2792 	return true;
2793 }
2794 
2795 /**
2796  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2797  * @node:         struct binder_node for which to get refs
2798  * @proc:         returns @node->proc if valid
2799  * @error:        if no @proc then returns BR_DEAD_REPLY
2800  *
2801  * User-space normally keeps the node alive when creating a transaction
2802  * since it has a reference to the target. The local strong ref keeps it
2803  * alive if the sending process dies before the target process processes
2804  * the transaction. If the source process is malicious or has a reference
2805  * counting bug, relying on the local strong ref can fail.
2806  *
2807  * Since user-space can cause the local strong ref to go away, we also take
2808  * a tmpref on the node to ensure it survives while we are constructing
2809  * the transaction. We also need a tmpref on the proc while we are
2810  * constructing the transaction, so we take that here as well.
2811  *
2812  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2813  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2814  * target proc has died, @error is set to BR_DEAD_REPLY
2815  */
2816 static struct binder_node *binder_get_node_refs_for_txn(
2817 		struct binder_node *node,
2818 		struct binder_proc **procp,
2819 		uint32_t *error)
2820 {
2821 	struct binder_node *target_node = NULL;
2822 
2823 	binder_node_inner_lock(node);
2824 	if (node->proc) {
2825 		target_node = node;
2826 		binder_inc_node_nilocked(node, 1, 0, NULL);
2827 		binder_inc_node_tmpref_ilocked(node);
2828 		node->proc->tmp_ref++;
2829 		*procp = node->proc;
2830 	} else
2831 		*error = BR_DEAD_REPLY;
2832 	binder_node_inner_unlock(node);
2833 
2834 	return target_node;
2835 }
2836 
2837 static void binder_transaction(struct binder_proc *proc,
2838 			       struct binder_thread *thread,
2839 			       struct binder_transaction_data *tr, int reply,
2840 			       binder_size_t extra_buffers_size)
2841 {
2842 	int ret;
2843 	struct binder_transaction *t;
2844 	struct binder_work *w;
2845 	struct binder_work *tcomplete;
2846 	binder_size_t buffer_offset = 0;
2847 	binder_size_t off_start_offset, off_end_offset;
2848 	binder_size_t off_min;
2849 	binder_size_t sg_buf_offset, sg_buf_end_offset;
2850 	struct binder_proc *target_proc = NULL;
2851 	struct binder_thread *target_thread = NULL;
2852 	struct binder_node *target_node = NULL;
2853 	struct binder_transaction *in_reply_to = NULL;
2854 	struct binder_transaction_log_entry *e;
2855 	uint32_t return_error = 0;
2856 	uint32_t return_error_param = 0;
2857 	uint32_t return_error_line = 0;
2858 	binder_size_t last_fixup_obj_off = 0;
2859 	binder_size_t last_fixup_min_off = 0;
2860 	struct binder_context *context = proc->context;
2861 	int t_debug_id = atomic_inc_return(&binder_last_id);
2862 	char *secctx = NULL;
2863 	u32 secctx_sz = 0;
2864 
2865 	e = binder_transaction_log_add(&binder_transaction_log);
2866 	e->debug_id = t_debug_id;
2867 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2868 	e->from_proc = proc->pid;
2869 	e->from_thread = thread->pid;
2870 	e->target_handle = tr->target.handle;
2871 	e->data_size = tr->data_size;
2872 	e->offsets_size = tr->offsets_size;
2873 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2874 
2875 	if (reply) {
2876 		binder_inner_proc_lock(proc);
2877 		in_reply_to = thread->transaction_stack;
2878 		if (in_reply_to == NULL) {
2879 			binder_inner_proc_unlock(proc);
2880 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2881 					  proc->pid, thread->pid);
2882 			return_error = BR_FAILED_REPLY;
2883 			return_error_param = -EPROTO;
2884 			return_error_line = __LINE__;
2885 			goto err_empty_call_stack;
2886 		}
2887 		if (in_reply_to->to_thread != thread) {
2888 			spin_lock(&in_reply_to->lock);
2889 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2890 				proc->pid, thread->pid, in_reply_to->debug_id,
2891 				in_reply_to->to_proc ?
2892 				in_reply_to->to_proc->pid : 0,
2893 				in_reply_to->to_thread ?
2894 				in_reply_to->to_thread->pid : 0);
2895 			spin_unlock(&in_reply_to->lock);
2896 			binder_inner_proc_unlock(proc);
2897 			return_error = BR_FAILED_REPLY;
2898 			return_error_param = -EPROTO;
2899 			return_error_line = __LINE__;
2900 			in_reply_to = NULL;
2901 			goto err_bad_call_stack;
2902 		}
2903 		thread->transaction_stack = in_reply_to->to_parent;
2904 		binder_inner_proc_unlock(proc);
2905 		binder_set_nice(in_reply_to->saved_priority);
2906 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2907 		if (target_thread == NULL) {
2908 			/* annotation for sparse */
2909 			__release(&target_thread->proc->inner_lock);
2910 			return_error = BR_DEAD_REPLY;
2911 			return_error_line = __LINE__;
2912 			goto err_dead_binder;
2913 		}
2914 		if (target_thread->transaction_stack != in_reply_to) {
2915 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2916 				proc->pid, thread->pid,
2917 				target_thread->transaction_stack ?
2918 				target_thread->transaction_stack->debug_id : 0,
2919 				in_reply_to->debug_id);
2920 			binder_inner_proc_unlock(target_thread->proc);
2921 			return_error = BR_FAILED_REPLY;
2922 			return_error_param = -EPROTO;
2923 			return_error_line = __LINE__;
2924 			in_reply_to = NULL;
2925 			target_thread = NULL;
2926 			goto err_dead_binder;
2927 		}
2928 		target_proc = target_thread->proc;
2929 		target_proc->tmp_ref++;
2930 		binder_inner_proc_unlock(target_thread->proc);
2931 	} else {
2932 		if (tr->target.handle) {
2933 			struct binder_ref *ref;
2934 
2935 			/*
2936 			 * There must already be a strong ref
2937 			 * on this node. If so, do a strong
2938 			 * increment on the node to ensure it
2939 			 * stays alive until the transaction is
2940 			 * done.
2941 			 */
2942 			binder_proc_lock(proc);
2943 			ref = binder_get_ref_olocked(proc, tr->target.handle,
2944 						     true);
2945 			if (ref) {
2946 				target_node = binder_get_node_refs_for_txn(
2947 						ref->node, &target_proc,
2948 						&return_error);
2949 			} else {
2950 				binder_user_error("%d:%d got transaction to invalid handle\n",
2951 						  proc->pid, thread->pid);
2952 				return_error = BR_FAILED_REPLY;
2953 			}
2954 			binder_proc_unlock(proc);
2955 		} else {
2956 			mutex_lock(&context->context_mgr_node_lock);
2957 			target_node = context->binder_context_mgr_node;
2958 			if (target_node)
2959 				target_node = binder_get_node_refs_for_txn(
2960 						target_node, &target_proc,
2961 						&return_error);
2962 			else
2963 				return_error = BR_DEAD_REPLY;
2964 			mutex_unlock(&context->context_mgr_node_lock);
2965 			if (target_node && target_proc->pid == proc->pid) {
2966 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2967 						  proc->pid, thread->pid);
2968 				return_error = BR_FAILED_REPLY;
2969 				return_error_param = -EINVAL;
2970 				return_error_line = __LINE__;
2971 				goto err_invalid_target_handle;
2972 			}
2973 		}
2974 		if (!target_node) {
2975 			/*
2976 			 * return_error is set above
2977 			 */
2978 			return_error_param = -EINVAL;
2979 			return_error_line = __LINE__;
2980 			goto err_dead_binder;
2981 		}
2982 		e->to_node = target_node->debug_id;
2983 		if (security_binder_transaction(proc->tsk,
2984 						target_proc->tsk) < 0) {
2985 			return_error = BR_FAILED_REPLY;
2986 			return_error_param = -EPERM;
2987 			return_error_line = __LINE__;
2988 			goto err_invalid_target_handle;
2989 		}
2990 		binder_inner_proc_lock(proc);
2991 
2992 		w = list_first_entry_or_null(&thread->todo,
2993 					     struct binder_work, entry);
2994 		if (!(tr->flags & TF_ONE_WAY) && w &&
2995 		    w->type == BINDER_WORK_TRANSACTION) {
2996 			/*
2997 			 * Do not allow new outgoing transaction from a
2998 			 * thread that has a transaction at the head of
2999 			 * its todo list. Only need to check the head
3000 			 * because binder_select_thread_ilocked picks a
3001 			 * thread from proc->waiting_threads to enqueue
3002 			 * the transaction, and nothing is queued to the
3003 			 * todo list while the thread is on waiting_threads.
3004 			 */
3005 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3006 					  proc->pid, thread->pid);
3007 			binder_inner_proc_unlock(proc);
3008 			return_error = BR_FAILED_REPLY;
3009 			return_error_param = -EPROTO;
3010 			return_error_line = __LINE__;
3011 			goto err_bad_todo_list;
3012 		}
3013 
3014 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3015 			struct binder_transaction *tmp;
3016 
3017 			tmp = thread->transaction_stack;
3018 			if (tmp->to_thread != thread) {
3019 				spin_lock(&tmp->lock);
3020 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3021 					proc->pid, thread->pid, tmp->debug_id,
3022 					tmp->to_proc ? tmp->to_proc->pid : 0,
3023 					tmp->to_thread ?
3024 					tmp->to_thread->pid : 0);
3025 				spin_unlock(&tmp->lock);
3026 				binder_inner_proc_unlock(proc);
3027 				return_error = BR_FAILED_REPLY;
3028 				return_error_param = -EPROTO;
3029 				return_error_line = __LINE__;
3030 				goto err_bad_call_stack;
3031 			}
3032 			while (tmp) {
3033 				struct binder_thread *from;
3034 
3035 				spin_lock(&tmp->lock);
3036 				from = tmp->from;
3037 				if (from && from->proc == target_proc) {
3038 					atomic_inc(&from->tmp_ref);
3039 					target_thread = from;
3040 					spin_unlock(&tmp->lock);
3041 					break;
3042 				}
3043 				spin_unlock(&tmp->lock);
3044 				tmp = tmp->from_parent;
3045 			}
3046 		}
3047 		binder_inner_proc_unlock(proc);
3048 	}
3049 	if (target_thread)
3050 		e->to_thread = target_thread->pid;
3051 	e->to_proc = target_proc->pid;
3052 
3053 	/* TODO: reuse incoming transaction for reply */
3054 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3055 	if (t == NULL) {
3056 		return_error = BR_FAILED_REPLY;
3057 		return_error_param = -ENOMEM;
3058 		return_error_line = __LINE__;
3059 		goto err_alloc_t_failed;
3060 	}
3061 	INIT_LIST_HEAD(&t->fd_fixups);
3062 	binder_stats_created(BINDER_STAT_TRANSACTION);
3063 	spin_lock_init(&t->lock);
3064 
3065 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3066 	if (tcomplete == NULL) {
3067 		return_error = BR_FAILED_REPLY;
3068 		return_error_param = -ENOMEM;
3069 		return_error_line = __LINE__;
3070 		goto err_alloc_tcomplete_failed;
3071 	}
3072 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3073 
3074 	t->debug_id = t_debug_id;
3075 
3076 	if (reply)
3077 		binder_debug(BINDER_DEBUG_TRANSACTION,
3078 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3079 			     proc->pid, thread->pid, t->debug_id,
3080 			     target_proc->pid, target_thread->pid,
3081 			     (u64)tr->data.ptr.buffer,
3082 			     (u64)tr->data.ptr.offsets,
3083 			     (u64)tr->data_size, (u64)tr->offsets_size,
3084 			     (u64)extra_buffers_size);
3085 	else
3086 		binder_debug(BINDER_DEBUG_TRANSACTION,
3087 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3088 			     proc->pid, thread->pid, t->debug_id,
3089 			     target_proc->pid, target_node->debug_id,
3090 			     (u64)tr->data.ptr.buffer,
3091 			     (u64)tr->data.ptr.offsets,
3092 			     (u64)tr->data_size, (u64)tr->offsets_size,
3093 			     (u64)extra_buffers_size);
3094 
3095 	if (!reply && !(tr->flags & TF_ONE_WAY))
3096 		t->from = thread;
3097 	else
3098 		t->from = NULL;
3099 	t->sender_euid = task_euid(proc->tsk);
3100 	t->to_proc = target_proc;
3101 	t->to_thread = target_thread;
3102 	t->code = tr->code;
3103 	t->flags = tr->flags;
3104 	t->priority = task_nice(current);
3105 
3106 	if (target_node && target_node->txn_security_ctx) {
3107 		u32 secid;
3108 		size_t added_size;
3109 
3110 		security_task_getsecid(proc->tsk, &secid);
3111 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3112 		if (ret) {
3113 			return_error = BR_FAILED_REPLY;
3114 			return_error_param = ret;
3115 			return_error_line = __LINE__;
3116 			goto err_get_secctx_failed;
3117 		}
3118 		added_size = ALIGN(secctx_sz, sizeof(u64));
3119 		extra_buffers_size += added_size;
3120 		if (extra_buffers_size < added_size) {
3121 			/* integer overflow of extra_buffers_size */
3122 			return_error = BR_FAILED_REPLY;
3123 			return_error_param = EINVAL;
3124 			return_error_line = __LINE__;
3125 			goto err_bad_extra_size;
3126 		}
3127 	}
3128 
3129 	trace_binder_transaction(reply, t, target_node);
3130 
3131 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3132 		tr->offsets_size, extra_buffers_size,
3133 		!reply && (t->flags & TF_ONE_WAY));
3134 	if (IS_ERR(t->buffer)) {
3135 		/*
3136 		 * -ESRCH indicates VMA cleared. The target is dying.
3137 		 */
3138 		return_error_param = PTR_ERR(t->buffer);
3139 		return_error = return_error_param == -ESRCH ?
3140 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3141 		return_error_line = __LINE__;
3142 		t->buffer = NULL;
3143 		goto err_binder_alloc_buf_failed;
3144 	}
3145 	if (secctx) {
3146 		int err;
3147 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3148 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3149 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3150 				    ALIGN(secctx_sz, sizeof(u64));
3151 
3152 		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3153 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3154 						  t->buffer, buf_offset,
3155 						  secctx, secctx_sz);
3156 		if (err) {
3157 			t->security_ctx = 0;
3158 			WARN_ON(1);
3159 		}
3160 		security_release_secctx(secctx, secctx_sz);
3161 		secctx = NULL;
3162 	}
3163 	t->buffer->debug_id = t->debug_id;
3164 	t->buffer->transaction = t;
3165 	t->buffer->target_node = target_node;
3166 	trace_binder_transaction_alloc_buf(t->buffer);
3167 
3168 	if (binder_alloc_copy_user_to_buffer(
3169 				&target_proc->alloc,
3170 				t->buffer, 0,
3171 				(const void __user *)
3172 					(uintptr_t)tr->data.ptr.buffer,
3173 				tr->data_size)) {
3174 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3175 				proc->pid, thread->pid);
3176 		return_error = BR_FAILED_REPLY;
3177 		return_error_param = -EFAULT;
3178 		return_error_line = __LINE__;
3179 		goto err_copy_data_failed;
3180 	}
3181 	if (binder_alloc_copy_user_to_buffer(
3182 				&target_proc->alloc,
3183 				t->buffer,
3184 				ALIGN(tr->data_size, sizeof(void *)),
3185 				(const void __user *)
3186 					(uintptr_t)tr->data.ptr.offsets,
3187 				tr->offsets_size)) {
3188 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3189 				proc->pid, thread->pid);
3190 		return_error = BR_FAILED_REPLY;
3191 		return_error_param = -EFAULT;
3192 		return_error_line = __LINE__;
3193 		goto err_copy_data_failed;
3194 	}
3195 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3196 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3197 				proc->pid, thread->pid, (u64)tr->offsets_size);
3198 		return_error = BR_FAILED_REPLY;
3199 		return_error_param = -EINVAL;
3200 		return_error_line = __LINE__;
3201 		goto err_bad_offset;
3202 	}
3203 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3204 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3205 				  proc->pid, thread->pid,
3206 				  (u64)extra_buffers_size);
3207 		return_error = BR_FAILED_REPLY;
3208 		return_error_param = -EINVAL;
3209 		return_error_line = __LINE__;
3210 		goto err_bad_offset;
3211 	}
3212 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3213 	buffer_offset = off_start_offset;
3214 	off_end_offset = off_start_offset + tr->offsets_size;
3215 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3216 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3217 		ALIGN(secctx_sz, sizeof(u64));
3218 	off_min = 0;
3219 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3220 	     buffer_offset += sizeof(binder_size_t)) {
3221 		struct binder_object_header *hdr;
3222 		size_t object_size;
3223 		struct binder_object object;
3224 		binder_size_t object_offset;
3225 
3226 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3227 						  &object_offset,
3228 						  t->buffer,
3229 						  buffer_offset,
3230 						  sizeof(object_offset))) {
3231 			return_error = BR_FAILED_REPLY;
3232 			return_error_param = -EINVAL;
3233 			return_error_line = __LINE__;
3234 			goto err_bad_offset;
3235 		}
3236 		object_size = binder_get_object(target_proc, t->buffer,
3237 						object_offset, &object);
3238 		if (object_size == 0 || object_offset < off_min) {
3239 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3240 					  proc->pid, thread->pid,
3241 					  (u64)object_offset,
3242 					  (u64)off_min,
3243 					  (u64)t->buffer->data_size);
3244 			return_error = BR_FAILED_REPLY;
3245 			return_error_param = -EINVAL;
3246 			return_error_line = __LINE__;
3247 			goto err_bad_offset;
3248 		}
3249 
3250 		hdr = &object.hdr;
3251 		off_min = object_offset + object_size;
3252 		switch (hdr->type) {
3253 		case BINDER_TYPE_BINDER:
3254 		case BINDER_TYPE_WEAK_BINDER: {
3255 			struct flat_binder_object *fp;
3256 
3257 			fp = to_flat_binder_object(hdr);
3258 			ret = binder_translate_binder(fp, t, thread);
3259 
3260 			if (ret < 0 ||
3261 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3262 							t->buffer,
3263 							object_offset,
3264 							fp, sizeof(*fp))) {
3265 				return_error = BR_FAILED_REPLY;
3266 				return_error_param = ret;
3267 				return_error_line = __LINE__;
3268 				goto err_translate_failed;
3269 			}
3270 		} break;
3271 		case BINDER_TYPE_HANDLE:
3272 		case BINDER_TYPE_WEAK_HANDLE: {
3273 			struct flat_binder_object *fp;
3274 
3275 			fp = to_flat_binder_object(hdr);
3276 			ret = binder_translate_handle(fp, t, thread);
3277 			if (ret < 0 ||
3278 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3279 							t->buffer,
3280 							object_offset,
3281 							fp, sizeof(*fp))) {
3282 				return_error = BR_FAILED_REPLY;
3283 				return_error_param = ret;
3284 				return_error_line = __LINE__;
3285 				goto err_translate_failed;
3286 			}
3287 		} break;
3288 
3289 		case BINDER_TYPE_FD: {
3290 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3291 			binder_size_t fd_offset = object_offset +
3292 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3293 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3294 						      thread, in_reply_to);
3295 
3296 			fp->pad_binder = 0;
3297 			if (ret < 0 ||
3298 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3299 							t->buffer,
3300 							object_offset,
3301 							fp, sizeof(*fp))) {
3302 				return_error = BR_FAILED_REPLY;
3303 				return_error_param = ret;
3304 				return_error_line = __LINE__;
3305 				goto err_translate_failed;
3306 			}
3307 		} break;
3308 		case BINDER_TYPE_FDA: {
3309 			struct binder_object ptr_object;
3310 			binder_size_t parent_offset;
3311 			struct binder_fd_array_object *fda =
3312 				to_binder_fd_array_object(hdr);
3313 			size_t num_valid = (buffer_offset - off_start_offset) /
3314 						sizeof(binder_size_t);
3315 			struct binder_buffer_object *parent =
3316 				binder_validate_ptr(target_proc, t->buffer,
3317 						    &ptr_object, fda->parent,
3318 						    off_start_offset,
3319 						    &parent_offset,
3320 						    num_valid);
3321 			if (!parent) {
3322 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3323 						  proc->pid, thread->pid);
3324 				return_error = BR_FAILED_REPLY;
3325 				return_error_param = -EINVAL;
3326 				return_error_line = __LINE__;
3327 				goto err_bad_parent;
3328 			}
3329 			if (!binder_validate_fixup(target_proc, t->buffer,
3330 						   off_start_offset,
3331 						   parent_offset,
3332 						   fda->parent_offset,
3333 						   last_fixup_obj_off,
3334 						   last_fixup_min_off)) {
3335 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3336 						  proc->pid, thread->pid);
3337 				return_error = BR_FAILED_REPLY;
3338 				return_error_param = -EINVAL;
3339 				return_error_line = __LINE__;
3340 				goto err_bad_parent;
3341 			}
3342 			ret = binder_translate_fd_array(fda, parent, t, thread,
3343 							in_reply_to);
3344 			if (ret < 0) {
3345 				return_error = BR_FAILED_REPLY;
3346 				return_error_param = ret;
3347 				return_error_line = __LINE__;
3348 				goto err_translate_failed;
3349 			}
3350 			last_fixup_obj_off = parent_offset;
3351 			last_fixup_min_off =
3352 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3353 		} break;
3354 		case BINDER_TYPE_PTR: {
3355 			struct binder_buffer_object *bp =
3356 				to_binder_buffer_object(hdr);
3357 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3358 			size_t num_valid;
3359 
3360 			if (bp->length > buf_left) {
3361 				binder_user_error("%d:%d got transaction with too large buffer\n",
3362 						  proc->pid, thread->pid);
3363 				return_error = BR_FAILED_REPLY;
3364 				return_error_param = -EINVAL;
3365 				return_error_line = __LINE__;
3366 				goto err_bad_offset;
3367 			}
3368 			if (binder_alloc_copy_user_to_buffer(
3369 						&target_proc->alloc,
3370 						t->buffer,
3371 						sg_buf_offset,
3372 						(const void __user *)
3373 							(uintptr_t)bp->buffer,
3374 						bp->length)) {
3375 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3376 						  proc->pid, thread->pid);
3377 				return_error_param = -EFAULT;
3378 				return_error = BR_FAILED_REPLY;
3379 				return_error_line = __LINE__;
3380 				goto err_copy_data_failed;
3381 			}
3382 			/* Fixup buffer pointer to target proc address space */
3383 			bp->buffer = (uintptr_t)
3384 				t->buffer->user_data + sg_buf_offset;
3385 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3386 
3387 			num_valid = (buffer_offset - off_start_offset) /
3388 					sizeof(binder_size_t);
3389 			ret = binder_fixup_parent(t, thread, bp,
3390 						  off_start_offset,
3391 						  num_valid,
3392 						  last_fixup_obj_off,
3393 						  last_fixup_min_off);
3394 			if (ret < 0 ||
3395 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3396 							t->buffer,
3397 							object_offset,
3398 							bp, sizeof(*bp))) {
3399 				return_error = BR_FAILED_REPLY;
3400 				return_error_param = ret;
3401 				return_error_line = __LINE__;
3402 				goto err_translate_failed;
3403 			}
3404 			last_fixup_obj_off = object_offset;
3405 			last_fixup_min_off = 0;
3406 		} break;
3407 		default:
3408 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3409 				proc->pid, thread->pid, hdr->type);
3410 			return_error = BR_FAILED_REPLY;
3411 			return_error_param = -EINVAL;
3412 			return_error_line = __LINE__;
3413 			goto err_bad_object_type;
3414 		}
3415 	}
3416 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3417 	t->work.type = BINDER_WORK_TRANSACTION;
3418 
3419 	if (reply) {
3420 		binder_enqueue_thread_work(thread, tcomplete);
3421 		binder_inner_proc_lock(target_proc);
3422 		if (target_thread->is_dead) {
3423 			binder_inner_proc_unlock(target_proc);
3424 			goto err_dead_proc_or_thread;
3425 		}
3426 		BUG_ON(t->buffer->async_transaction != 0);
3427 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3428 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3429 		binder_inner_proc_unlock(target_proc);
3430 		wake_up_interruptible_sync(&target_thread->wait);
3431 		binder_free_transaction(in_reply_to);
3432 	} else if (!(t->flags & TF_ONE_WAY)) {
3433 		BUG_ON(t->buffer->async_transaction != 0);
3434 		binder_inner_proc_lock(proc);
3435 		/*
3436 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3437 		 * userspace immediately; this allows the target process to
3438 		 * immediately start processing this transaction, reducing
3439 		 * latency. We will then return the TRANSACTION_COMPLETE when
3440 		 * the target replies (or there is an error).
3441 		 */
3442 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3443 		t->need_reply = 1;
3444 		t->from_parent = thread->transaction_stack;
3445 		thread->transaction_stack = t;
3446 		binder_inner_proc_unlock(proc);
3447 		if (!binder_proc_transaction(t, target_proc, target_thread)) {
3448 			binder_inner_proc_lock(proc);
3449 			binder_pop_transaction_ilocked(thread, t);
3450 			binder_inner_proc_unlock(proc);
3451 			goto err_dead_proc_or_thread;
3452 		}
3453 	} else {
3454 		BUG_ON(target_node == NULL);
3455 		BUG_ON(t->buffer->async_transaction != 1);
3456 		binder_enqueue_thread_work(thread, tcomplete);
3457 		if (!binder_proc_transaction(t, target_proc, NULL))
3458 			goto err_dead_proc_or_thread;
3459 	}
3460 	if (target_thread)
3461 		binder_thread_dec_tmpref(target_thread);
3462 	binder_proc_dec_tmpref(target_proc);
3463 	if (target_node)
3464 		binder_dec_node_tmpref(target_node);
3465 	/*
3466 	 * write barrier to synchronize with initialization
3467 	 * of log entry
3468 	 */
3469 	smp_wmb();
3470 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3471 	return;
3472 
3473 err_dead_proc_or_thread:
3474 	return_error = BR_DEAD_REPLY;
3475 	return_error_line = __LINE__;
3476 	binder_dequeue_work(proc, tcomplete);
3477 err_translate_failed:
3478 err_bad_object_type:
3479 err_bad_offset:
3480 err_bad_parent:
3481 err_copy_data_failed:
3482 	binder_free_txn_fixups(t);
3483 	trace_binder_transaction_failed_buffer_release(t->buffer);
3484 	binder_transaction_buffer_release(target_proc, t->buffer,
3485 					  buffer_offset, true);
3486 	if (target_node)
3487 		binder_dec_node_tmpref(target_node);
3488 	target_node = NULL;
3489 	t->buffer->transaction = NULL;
3490 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3491 err_binder_alloc_buf_failed:
3492 err_bad_extra_size:
3493 	if (secctx)
3494 		security_release_secctx(secctx, secctx_sz);
3495 err_get_secctx_failed:
3496 	kfree(tcomplete);
3497 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3498 err_alloc_tcomplete_failed:
3499 	kfree(t);
3500 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3501 err_alloc_t_failed:
3502 err_bad_todo_list:
3503 err_bad_call_stack:
3504 err_empty_call_stack:
3505 err_dead_binder:
3506 err_invalid_target_handle:
3507 	if (target_thread)
3508 		binder_thread_dec_tmpref(target_thread);
3509 	if (target_proc)
3510 		binder_proc_dec_tmpref(target_proc);
3511 	if (target_node) {
3512 		binder_dec_node(target_node, 1, 0);
3513 		binder_dec_node_tmpref(target_node);
3514 	}
3515 
3516 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3517 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3518 		     proc->pid, thread->pid, return_error, return_error_param,
3519 		     (u64)tr->data_size, (u64)tr->offsets_size,
3520 		     return_error_line);
3521 
3522 	{
3523 		struct binder_transaction_log_entry *fe;
3524 
3525 		e->return_error = return_error;
3526 		e->return_error_param = return_error_param;
3527 		e->return_error_line = return_error_line;
3528 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3529 		*fe = *e;
3530 		/*
3531 		 * write barrier to synchronize with initialization
3532 		 * of log entry
3533 		 */
3534 		smp_wmb();
3535 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3536 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3537 	}
3538 
3539 	BUG_ON(thread->return_error.cmd != BR_OK);
3540 	if (in_reply_to) {
3541 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3542 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3543 		binder_send_failed_reply(in_reply_to, return_error);
3544 	} else {
3545 		thread->return_error.cmd = return_error;
3546 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3547 	}
3548 }
3549 
3550 /**
3551  * binder_free_buf() - free the specified buffer
3552  * @proc:	binder proc that owns buffer
3553  * @buffer:	buffer to be freed
3554  *
3555  * If buffer for an async transaction, enqueue the next async
3556  * transaction from the node.
3557  *
3558  * Cleanup buffer and free it.
3559  */
3560 static void
3561 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3562 {
3563 	binder_inner_proc_lock(proc);
3564 	if (buffer->transaction) {
3565 		buffer->transaction->buffer = NULL;
3566 		buffer->transaction = NULL;
3567 	}
3568 	binder_inner_proc_unlock(proc);
3569 	if (buffer->async_transaction && buffer->target_node) {
3570 		struct binder_node *buf_node;
3571 		struct binder_work *w;
3572 
3573 		buf_node = buffer->target_node;
3574 		binder_node_inner_lock(buf_node);
3575 		BUG_ON(!buf_node->has_async_transaction);
3576 		BUG_ON(buf_node->proc != proc);
3577 		w = binder_dequeue_work_head_ilocked(
3578 				&buf_node->async_todo);
3579 		if (!w) {
3580 			buf_node->has_async_transaction = false;
3581 		} else {
3582 			binder_enqueue_work_ilocked(
3583 					w, &proc->todo);
3584 			binder_wakeup_proc_ilocked(proc);
3585 		}
3586 		binder_node_inner_unlock(buf_node);
3587 	}
3588 	trace_binder_transaction_buffer_release(buffer);
3589 	binder_transaction_buffer_release(proc, buffer, 0, false);
3590 	binder_alloc_free_buf(&proc->alloc, buffer);
3591 }
3592 
3593 static int binder_thread_write(struct binder_proc *proc,
3594 			struct binder_thread *thread,
3595 			binder_uintptr_t binder_buffer, size_t size,
3596 			binder_size_t *consumed)
3597 {
3598 	uint32_t cmd;
3599 	struct binder_context *context = proc->context;
3600 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3601 	void __user *ptr = buffer + *consumed;
3602 	void __user *end = buffer + size;
3603 
3604 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3605 		int ret;
3606 
3607 		if (get_user(cmd, (uint32_t __user *)ptr))
3608 			return -EFAULT;
3609 		ptr += sizeof(uint32_t);
3610 		trace_binder_command(cmd);
3611 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3612 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3613 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3614 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3615 		}
3616 		switch (cmd) {
3617 		case BC_INCREFS:
3618 		case BC_ACQUIRE:
3619 		case BC_RELEASE:
3620 		case BC_DECREFS: {
3621 			uint32_t target;
3622 			const char *debug_string;
3623 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3624 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3625 			struct binder_ref_data rdata;
3626 
3627 			if (get_user(target, (uint32_t __user *)ptr))
3628 				return -EFAULT;
3629 
3630 			ptr += sizeof(uint32_t);
3631 			ret = -1;
3632 			if (increment && !target) {
3633 				struct binder_node *ctx_mgr_node;
3634 				mutex_lock(&context->context_mgr_node_lock);
3635 				ctx_mgr_node = context->binder_context_mgr_node;
3636 				if (ctx_mgr_node)
3637 					ret = binder_inc_ref_for_node(
3638 							proc, ctx_mgr_node,
3639 							strong, NULL, &rdata);
3640 				mutex_unlock(&context->context_mgr_node_lock);
3641 			}
3642 			if (ret)
3643 				ret = binder_update_ref_for_handle(
3644 						proc, target, increment, strong,
3645 						&rdata);
3646 			if (!ret && rdata.desc != target) {
3647 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3648 					proc->pid, thread->pid,
3649 					target, rdata.desc);
3650 			}
3651 			switch (cmd) {
3652 			case BC_INCREFS:
3653 				debug_string = "IncRefs";
3654 				break;
3655 			case BC_ACQUIRE:
3656 				debug_string = "Acquire";
3657 				break;
3658 			case BC_RELEASE:
3659 				debug_string = "Release";
3660 				break;
3661 			case BC_DECREFS:
3662 			default:
3663 				debug_string = "DecRefs";
3664 				break;
3665 			}
3666 			if (ret) {
3667 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3668 					proc->pid, thread->pid, debug_string,
3669 					strong, target, ret);
3670 				break;
3671 			}
3672 			binder_debug(BINDER_DEBUG_USER_REFS,
3673 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3674 				     proc->pid, thread->pid, debug_string,
3675 				     rdata.debug_id, rdata.desc, rdata.strong,
3676 				     rdata.weak);
3677 			break;
3678 		}
3679 		case BC_INCREFS_DONE:
3680 		case BC_ACQUIRE_DONE: {
3681 			binder_uintptr_t node_ptr;
3682 			binder_uintptr_t cookie;
3683 			struct binder_node *node;
3684 			bool free_node;
3685 
3686 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3687 				return -EFAULT;
3688 			ptr += sizeof(binder_uintptr_t);
3689 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3690 				return -EFAULT;
3691 			ptr += sizeof(binder_uintptr_t);
3692 			node = binder_get_node(proc, node_ptr);
3693 			if (node == NULL) {
3694 				binder_user_error("%d:%d %s u%016llx no match\n",
3695 					proc->pid, thread->pid,
3696 					cmd == BC_INCREFS_DONE ?
3697 					"BC_INCREFS_DONE" :
3698 					"BC_ACQUIRE_DONE",
3699 					(u64)node_ptr);
3700 				break;
3701 			}
3702 			if (cookie != node->cookie) {
3703 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3704 					proc->pid, thread->pid,
3705 					cmd == BC_INCREFS_DONE ?
3706 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3707 					(u64)node_ptr, node->debug_id,
3708 					(u64)cookie, (u64)node->cookie);
3709 				binder_put_node(node);
3710 				break;
3711 			}
3712 			binder_node_inner_lock(node);
3713 			if (cmd == BC_ACQUIRE_DONE) {
3714 				if (node->pending_strong_ref == 0) {
3715 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3716 						proc->pid, thread->pid,
3717 						node->debug_id);
3718 					binder_node_inner_unlock(node);
3719 					binder_put_node(node);
3720 					break;
3721 				}
3722 				node->pending_strong_ref = 0;
3723 			} else {
3724 				if (node->pending_weak_ref == 0) {
3725 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3726 						proc->pid, thread->pid,
3727 						node->debug_id);
3728 					binder_node_inner_unlock(node);
3729 					binder_put_node(node);
3730 					break;
3731 				}
3732 				node->pending_weak_ref = 0;
3733 			}
3734 			free_node = binder_dec_node_nilocked(node,
3735 					cmd == BC_ACQUIRE_DONE, 0);
3736 			WARN_ON(free_node);
3737 			binder_debug(BINDER_DEBUG_USER_REFS,
3738 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3739 				     proc->pid, thread->pid,
3740 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3741 				     node->debug_id, node->local_strong_refs,
3742 				     node->local_weak_refs, node->tmp_refs);
3743 			binder_node_inner_unlock(node);
3744 			binder_put_node(node);
3745 			break;
3746 		}
3747 		case BC_ATTEMPT_ACQUIRE:
3748 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3749 			return -EINVAL;
3750 		case BC_ACQUIRE_RESULT:
3751 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3752 			return -EINVAL;
3753 
3754 		case BC_FREE_BUFFER: {
3755 			binder_uintptr_t data_ptr;
3756 			struct binder_buffer *buffer;
3757 
3758 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3759 				return -EFAULT;
3760 			ptr += sizeof(binder_uintptr_t);
3761 
3762 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3763 							      data_ptr);
3764 			if (IS_ERR_OR_NULL(buffer)) {
3765 				if (PTR_ERR(buffer) == -EPERM) {
3766 					binder_user_error(
3767 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3768 						proc->pid, thread->pid,
3769 						(u64)data_ptr);
3770 				} else {
3771 					binder_user_error(
3772 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3773 						proc->pid, thread->pid,
3774 						(u64)data_ptr);
3775 				}
3776 				break;
3777 			}
3778 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3779 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3780 				     proc->pid, thread->pid, (u64)data_ptr,
3781 				     buffer->debug_id,
3782 				     buffer->transaction ? "active" : "finished");
3783 			binder_free_buf(proc, buffer);
3784 			break;
3785 		}
3786 
3787 		case BC_TRANSACTION_SG:
3788 		case BC_REPLY_SG: {
3789 			struct binder_transaction_data_sg tr;
3790 
3791 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3792 				return -EFAULT;
3793 			ptr += sizeof(tr);
3794 			binder_transaction(proc, thread, &tr.transaction_data,
3795 					   cmd == BC_REPLY_SG, tr.buffers_size);
3796 			break;
3797 		}
3798 		case BC_TRANSACTION:
3799 		case BC_REPLY: {
3800 			struct binder_transaction_data tr;
3801 
3802 			if (copy_from_user(&tr, ptr, sizeof(tr)))
3803 				return -EFAULT;
3804 			ptr += sizeof(tr);
3805 			binder_transaction(proc, thread, &tr,
3806 					   cmd == BC_REPLY, 0);
3807 			break;
3808 		}
3809 
3810 		case BC_REGISTER_LOOPER:
3811 			binder_debug(BINDER_DEBUG_THREADS,
3812 				     "%d:%d BC_REGISTER_LOOPER\n",
3813 				     proc->pid, thread->pid);
3814 			binder_inner_proc_lock(proc);
3815 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3816 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3817 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3818 					proc->pid, thread->pid);
3819 			} else if (proc->requested_threads == 0) {
3820 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3821 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3822 					proc->pid, thread->pid);
3823 			} else {
3824 				proc->requested_threads--;
3825 				proc->requested_threads_started++;
3826 			}
3827 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3828 			binder_inner_proc_unlock(proc);
3829 			break;
3830 		case BC_ENTER_LOOPER:
3831 			binder_debug(BINDER_DEBUG_THREADS,
3832 				     "%d:%d BC_ENTER_LOOPER\n",
3833 				     proc->pid, thread->pid);
3834 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3835 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3836 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3837 					proc->pid, thread->pid);
3838 			}
3839 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3840 			break;
3841 		case BC_EXIT_LOOPER:
3842 			binder_debug(BINDER_DEBUG_THREADS,
3843 				     "%d:%d BC_EXIT_LOOPER\n",
3844 				     proc->pid, thread->pid);
3845 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3846 			break;
3847 
3848 		case BC_REQUEST_DEATH_NOTIFICATION:
3849 		case BC_CLEAR_DEATH_NOTIFICATION: {
3850 			uint32_t target;
3851 			binder_uintptr_t cookie;
3852 			struct binder_ref *ref;
3853 			struct binder_ref_death *death = NULL;
3854 
3855 			if (get_user(target, (uint32_t __user *)ptr))
3856 				return -EFAULT;
3857 			ptr += sizeof(uint32_t);
3858 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3859 				return -EFAULT;
3860 			ptr += sizeof(binder_uintptr_t);
3861 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3862 				/*
3863 				 * Allocate memory for death notification
3864 				 * before taking lock
3865 				 */
3866 				death = kzalloc(sizeof(*death), GFP_KERNEL);
3867 				if (death == NULL) {
3868 					WARN_ON(thread->return_error.cmd !=
3869 						BR_OK);
3870 					thread->return_error.cmd = BR_ERROR;
3871 					binder_enqueue_thread_work(
3872 						thread,
3873 						&thread->return_error.work);
3874 					binder_debug(
3875 						BINDER_DEBUG_FAILED_TRANSACTION,
3876 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3877 						proc->pid, thread->pid);
3878 					break;
3879 				}
3880 			}
3881 			binder_proc_lock(proc);
3882 			ref = binder_get_ref_olocked(proc, target, false);
3883 			if (ref == NULL) {
3884 				binder_user_error("%d:%d %s invalid ref %d\n",
3885 					proc->pid, thread->pid,
3886 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3887 					"BC_REQUEST_DEATH_NOTIFICATION" :
3888 					"BC_CLEAR_DEATH_NOTIFICATION",
3889 					target);
3890 				binder_proc_unlock(proc);
3891 				kfree(death);
3892 				break;
3893 			}
3894 
3895 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3896 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3897 				     proc->pid, thread->pid,
3898 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3899 				     "BC_REQUEST_DEATH_NOTIFICATION" :
3900 				     "BC_CLEAR_DEATH_NOTIFICATION",
3901 				     (u64)cookie, ref->data.debug_id,
3902 				     ref->data.desc, ref->data.strong,
3903 				     ref->data.weak, ref->node->debug_id);
3904 
3905 			binder_node_lock(ref->node);
3906 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3907 				if (ref->death) {
3908 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3909 						proc->pid, thread->pid);
3910 					binder_node_unlock(ref->node);
3911 					binder_proc_unlock(proc);
3912 					kfree(death);
3913 					break;
3914 				}
3915 				binder_stats_created(BINDER_STAT_DEATH);
3916 				INIT_LIST_HEAD(&death->work.entry);
3917 				death->cookie = cookie;
3918 				ref->death = death;
3919 				if (ref->node->proc == NULL) {
3920 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3921 
3922 					binder_inner_proc_lock(proc);
3923 					binder_enqueue_work_ilocked(
3924 						&ref->death->work, &proc->todo);
3925 					binder_wakeup_proc_ilocked(proc);
3926 					binder_inner_proc_unlock(proc);
3927 				}
3928 			} else {
3929 				if (ref->death == NULL) {
3930 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3931 						proc->pid, thread->pid);
3932 					binder_node_unlock(ref->node);
3933 					binder_proc_unlock(proc);
3934 					break;
3935 				}
3936 				death = ref->death;
3937 				if (death->cookie != cookie) {
3938 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3939 						proc->pid, thread->pid,
3940 						(u64)death->cookie,
3941 						(u64)cookie);
3942 					binder_node_unlock(ref->node);
3943 					binder_proc_unlock(proc);
3944 					break;
3945 				}
3946 				ref->death = NULL;
3947 				binder_inner_proc_lock(proc);
3948 				if (list_empty(&death->work.entry)) {
3949 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3950 					if (thread->looper &
3951 					    (BINDER_LOOPER_STATE_REGISTERED |
3952 					     BINDER_LOOPER_STATE_ENTERED))
3953 						binder_enqueue_thread_work_ilocked(
3954 								thread,
3955 								&death->work);
3956 					else {
3957 						binder_enqueue_work_ilocked(
3958 								&death->work,
3959 								&proc->todo);
3960 						binder_wakeup_proc_ilocked(
3961 								proc);
3962 					}
3963 				} else {
3964 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3965 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3966 				}
3967 				binder_inner_proc_unlock(proc);
3968 			}
3969 			binder_node_unlock(ref->node);
3970 			binder_proc_unlock(proc);
3971 		} break;
3972 		case BC_DEAD_BINDER_DONE: {
3973 			struct binder_work *w;
3974 			binder_uintptr_t cookie;
3975 			struct binder_ref_death *death = NULL;
3976 
3977 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3978 				return -EFAULT;
3979 
3980 			ptr += sizeof(cookie);
3981 			binder_inner_proc_lock(proc);
3982 			list_for_each_entry(w, &proc->delivered_death,
3983 					    entry) {
3984 				struct binder_ref_death *tmp_death =
3985 					container_of(w,
3986 						     struct binder_ref_death,
3987 						     work);
3988 
3989 				if (tmp_death->cookie == cookie) {
3990 					death = tmp_death;
3991 					break;
3992 				}
3993 			}
3994 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3995 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3996 				     proc->pid, thread->pid, (u64)cookie,
3997 				     death);
3998 			if (death == NULL) {
3999 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4000 					proc->pid, thread->pid, (u64)cookie);
4001 				binder_inner_proc_unlock(proc);
4002 				break;
4003 			}
4004 			binder_dequeue_work_ilocked(&death->work);
4005 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4006 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4007 				if (thread->looper &
4008 					(BINDER_LOOPER_STATE_REGISTERED |
4009 					 BINDER_LOOPER_STATE_ENTERED))
4010 					binder_enqueue_thread_work_ilocked(
4011 						thread, &death->work);
4012 				else {
4013 					binder_enqueue_work_ilocked(
4014 							&death->work,
4015 							&proc->todo);
4016 					binder_wakeup_proc_ilocked(proc);
4017 				}
4018 			}
4019 			binder_inner_proc_unlock(proc);
4020 		} break;
4021 
4022 		default:
4023 			pr_err("%d:%d unknown command %d\n",
4024 			       proc->pid, thread->pid, cmd);
4025 			return -EINVAL;
4026 		}
4027 		*consumed = ptr - buffer;
4028 	}
4029 	return 0;
4030 }
4031 
4032 static void binder_stat_br(struct binder_proc *proc,
4033 			   struct binder_thread *thread, uint32_t cmd)
4034 {
4035 	trace_binder_return(cmd);
4036 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4037 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4038 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4039 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4040 	}
4041 }
4042 
4043 static int binder_put_node_cmd(struct binder_proc *proc,
4044 			       struct binder_thread *thread,
4045 			       void __user **ptrp,
4046 			       binder_uintptr_t node_ptr,
4047 			       binder_uintptr_t node_cookie,
4048 			       int node_debug_id,
4049 			       uint32_t cmd, const char *cmd_name)
4050 {
4051 	void __user *ptr = *ptrp;
4052 
4053 	if (put_user(cmd, (uint32_t __user *)ptr))
4054 		return -EFAULT;
4055 	ptr += sizeof(uint32_t);
4056 
4057 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4058 		return -EFAULT;
4059 	ptr += sizeof(binder_uintptr_t);
4060 
4061 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4062 		return -EFAULT;
4063 	ptr += sizeof(binder_uintptr_t);
4064 
4065 	binder_stat_br(proc, thread, cmd);
4066 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4067 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4068 		     (u64)node_ptr, (u64)node_cookie);
4069 
4070 	*ptrp = ptr;
4071 	return 0;
4072 }
4073 
4074 static int binder_wait_for_work(struct binder_thread *thread,
4075 				bool do_proc_work)
4076 {
4077 	DEFINE_WAIT(wait);
4078 	struct binder_proc *proc = thread->proc;
4079 	int ret = 0;
4080 
4081 	freezer_do_not_count();
4082 	binder_inner_proc_lock(proc);
4083 	for (;;) {
4084 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4085 		if (binder_has_work_ilocked(thread, do_proc_work))
4086 			break;
4087 		if (do_proc_work)
4088 			list_add(&thread->waiting_thread_node,
4089 				 &proc->waiting_threads);
4090 		binder_inner_proc_unlock(proc);
4091 		schedule();
4092 		binder_inner_proc_lock(proc);
4093 		list_del_init(&thread->waiting_thread_node);
4094 		if (signal_pending(current)) {
4095 			ret = -ERESTARTSYS;
4096 			break;
4097 		}
4098 	}
4099 	finish_wait(&thread->wait, &wait);
4100 	binder_inner_proc_unlock(proc);
4101 	freezer_count();
4102 
4103 	return ret;
4104 }
4105 
4106 /**
4107  * binder_apply_fd_fixups() - finish fd translation
4108  * @proc:         binder_proc associated @t->buffer
4109  * @t:	binder transaction with list of fd fixups
4110  *
4111  * Now that we are in the context of the transaction target
4112  * process, we can allocate and install fds. Process the
4113  * list of fds to translate and fixup the buffer with the
4114  * new fds.
4115  *
4116  * If we fail to allocate an fd, then free the resources by
4117  * fput'ing files that have not been processed and ksys_close'ing
4118  * any fds that have already been allocated.
4119  */
4120 static int binder_apply_fd_fixups(struct binder_proc *proc,
4121 				  struct binder_transaction *t)
4122 {
4123 	struct binder_txn_fd_fixup *fixup, *tmp;
4124 	int ret = 0;
4125 
4126 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4127 		int fd = get_unused_fd_flags(O_CLOEXEC);
4128 
4129 		if (fd < 0) {
4130 			binder_debug(BINDER_DEBUG_TRANSACTION,
4131 				     "failed fd fixup txn %d fd %d\n",
4132 				     t->debug_id, fd);
4133 			ret = -ENOMEM;
4134 			break;
4135 		}
4136 		binder_debug(BINDER_DEBUG_TRANSACTION,
4137 			     "fd fixup txn %d fd %d\n",
4138 			     t->debug_id, fd);
4139 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4140 		fd_install(fd, fixup->file);
4141 		fixup->file = NULL;
4142 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4143 						fixup->offset, &fd,
4144 						sizeof(u32))) {
4145 			ret = -EINVAL;
4146 			break;
4147 		}
4148 	}
4149 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4150 		if (fixup->file) {
4151 			fput(fixup->file);
4152 		} else if (ret) {
4153 			u32 fd;
4154 			int err;
4155 
4156 			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4157 							    t->buffer,
4158 							    fixup->offset,
4159 							    sizeof(fd));
4160 			WARN_ON(err);
4161 			if (!err)
4162 				binder_deferred_fd_close(fd);
4163 		}
4164 		list_del(&fixup->fixup_entry);
4165 		kfree(fixup);
4166 	}
4167 
4168 	return ret;
4169 }
4170 
4171 static int binder_thread_read(struct binder_proc *proc,
4172 			      struct binder_thread *thread,
4173 			      binder_uintptr_t binder_buffer, size_t size,
4174 			      binder_size_t *consumed, int non_block)
4175 {
4176 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4177 	void __user *ptr = buffer + *consumed;
4178 	void __user *end = buffer + size;
4179 
4180 	int ret = 0;
4181 	int wait_for_proc_work;
4182 
4183 	if (*consumed == 0) {
4184 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4185 			return -EFAULT;
4186 		ptr += sizeof(uint32_t);
4187 	}
4188 
4189 retry:
4190 	binder_inner_proc_lock(proc);
4191 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4192 	binder_inner_proc_unlock(proc);
4193 
4194 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4195 
4196 	trace_binder_wait_for_work(wait_for_proc_work,
4197 				   !!thread->transaction_stack,
4198 				   !binder_worklist_empty(proc, &thread->todo));
4199 	if (wait_for_proc_work) {
4200 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4201 					BINDER_LOOPER_STATE_ENTERED))) {
4202 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4203 				proc->pid, thread->pid, thread->looper);
4204 			wait_event_interruptible(binder_user_error_wait,
4205 						 binder_stop_on_user_error < 2);
4206 		}
4207 		binder_set_nice(proc->default_priority);
4208 	}
4209 
4210 	if (non_block) {
4211 		if (!binder_has_work(thread, wait_for_proc_work))
4212 			ret = -EAGAIN;
4213 	} else {
4214 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4215 	}
4216 
4217 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4218 
4219 	if (ret)
4220 		return ret;
4221 
4222 	while (1) {
4223 		uint32_t cmd;
4224 		struct binder_transaction_data_secctx tr;
4225 		struct binder_transaction_data *trd = &tr.transaction_data;
4226 		struct binder_work *w = NULL;
4227 		struct list_head *list = NULL;
4228 		struct binder_transaction *t = NULL;
4229 		struct binder_thread *t_from;
4230 		size_t trsize = sizeof(*trd);
4231 
4232 		binder_inner_proc_lock(proc);
4233 		if (!binder_worklist_empty_ilocked(&thread->todo))
4234 			list = &thread->todo;
4235 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4236 			   wait_for_proc_work)
4237 			list = &proc->todo;
4238 		else {
4239 			binder_inner_proc_unlock(proc);
4240 
4241 			/* no data added */
4242 			if (ptr - buffer == 4 && !thread->looper_need_return)
4243 				goto retry;
4244 			break;
4245 		}
4246 
4247 		if (end - ptr < sizeof(tr) + 4) {
4248 			binder_inner_proc_unlock(proc);
4249 			break;
4250 		}
4251 		w = binder_dequeue_work_head_ilocked(list);
4252 		if (binder_worklist_empty_ilocked(&thread->todo))
4253 			thread->process_todo = false;
4254 
4255 		switch (w->type) {
4256 		case BINDER_WORK_TRANSACTION: {
4257 			binder_inner_proc_unlock(proc);
4258 			t = container_of(w, struct binder_transaction, work);
4259 		} break;
4260 		case BINDER_WORK_RETURN_ERROR: {
4261 			struct binder_error *e = container_of(
4262 					w, struct binder_error, work);
4263 
4264 			WARN_ON(e->cmd == BR_OK);
4265 			binder_inner_proc_unlock(proc);
4266 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4267 				return -EFAULT;
4268 			cmd = e->cmd;
4269 			e->cmd = BR_OK;
4270 			ptr += sizeof(uint32_t);
4271 
4272 			binder_stat_br(proc, thread, cmd);
4273 		} break;
4274 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4275 			binder_inner_proc_unlock(proc);
4276 			cmd = BR_TRANSACTION_COMPLETE;
4277 			kfree(w);
4278 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4279 			if (put_user(cmd, (uint32_t __user *)ptr))
4280 				return -EFAULT;
4281 			ptr += sizeof(uint32_t);
4282 
4283 			binder_stat_br(proc, thread, cmd);
4284 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4285 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4286 				     proc->pid, thread->pid);
4287 		} break;
4288 		case BINDER_WORK_NODE: {
4289 			struct binder_node *node = container_of(w, struct binder_node, work);
4290 			int strong, weak;
4291 			binder_uintptr_t node_ptr = node->ptr;
4292 			binder_uintptr_t node_cookie = node->cookie;
4293 			int node_debug_id = node->debug_id;
4294 			int has_weak_ref;
4295 			int has_strong_ref;
4296 			void __user *orig_ptr = ptr;
4297 
4298 			BUG_ON(proc != node->proc);
4299 			strong = node->internal_strong_refs ||
4300 					node->local_strong_refs;
4301 			weak = !hlist_empty(&node->refs) ||
4302 					node->local_weak_refs ||
4303 					node->tmp_refs || strong;
4304 			has_strong_ref = node->has_strong_ref;
4305 			has_weak_ref = node->has_weak_ref;
4306 
4307 			if (weak && !has_weak_ref) {
4308 				node->has_weak_ref = 1;
4309 				node->pending_weak_ref = 1;
4310 				node->local_weak_refs++;
4311 			}
4312 			if (strong && !has_strong_ref) {
4313 				node->has_strong_ref = 1;
4314 				node->pending_strong_ref = 1;
4315 				node->local_strong_refs++;
4316 			}
4317 			if (!strong && has_strong_ref)
4318 				node->has_strong_ref = 0;
4319 			if (!weak && has_weak_ref)
4320 				node->has_weak_ref = 0;
4321 			if (!weak && !strong) {
4322 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4323 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4324 					     proc->pid, thread->pid,
4325 					     node_debug_id,
4326 					     (u64)node_ptr,
4327 					     (u64)node_cookie);
4328 				rb_erase(&node->rb_node, &proc->nodes);
4329 				binder_inner_proc_unlock(proc);
4330 				binder_node_lock(node);
4331 				/*
4332 				 * Acquire the node lock before freeing the
4333 				 * node to serialize with other threads that
4334 				 * may have been holding the node lock while
4335 				 * decrementing this node (avoids race where
4336 				 * this thread frees while the other thread
4337 				 * is unlocking the node after the final
4338 				 * decrement)
4339 				 */
4340 				binder_node_unlock(node);
4341 				binder_free_node(node);
4342 			} else
4343 				binder_inner_proc_unlock(proc);
4344 
4345 			if (weak && !has_weak_ref)
4346 				ret = binder_put_node_cmd(
4347 						proc, thread, &ptr, node_ptr,
4348 						node_cookie, node_debug_id,
4349 						BR_INCREFS, "BR_INCREFS");
4350 			if (!ret && strong && !has_strong_ref)
4351 				ret = binder_put_node_cmd(
4352 						proc, thread, &ptr, node_ptr,
4353 						node_cookie, node_debug_id,
4354 						BR_ACQUIRE, "BR_ACQUIRE");
4355 			if (!ret && !strong && has_strong_ref)
4356 				ret = binder_put_node_cmd(
4357 						proc, thread, &ptr, node_ptr,
4358 						node_cookie, node_debug_id,
4359 						BR_RELEASE, "BR_RELEASE");
4360 			if (!ret && !weak && has_weak_ref)
4361 				ret = binder_put_node_cmd(
4362 						proc, thread, &ptr, node_ptr,
4363 						node_cookie, node_debug_id,
4364 						BR_DECREFS, "BR_DECREFS");
4365 			if (orig_ptr == ptr)
4366 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4367 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4368 					     proc->pid, thread->pid,
4369 					     node_debug_id,
4370 					     (u64)node_ptr,
4371 					     (u64)node_cookie);
4372 			if (ret)
4373 				return ret;
4374 		} break;
4375 		case BINDER_WORK_DEAD_BINDER:
4376 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4377 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4378 			struct binder_ref_death *death;
4379 			uint32_t cmd;
4380 			binder_uintptr_t cookie;
4381 
4382 			death = container_of(w, struct binder_ref_death, work);
4383 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4384 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4385 			else
4386 				cmd = BR_DEAD_BINDER;
4387 			cookie = death->cookie;
4388 
4389 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4390 				     "%d:%d %s %016llx\n",
4391 				      proc->pid, thread->pid,
4392 				      cmd == BR_DEAD_BINDER ?
4393 				      "BR_DEAD_BINDER" :
4394 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4395 				      (u64)cookie);
4396 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4397 				binder_inner_proc_unlock(proc);
4398 				kfree(death);
4399 				binder_stats_deleted(BINDER_STAT_DEATH);
4400 			} else {
4401 				binder_enqueue_work_ilocked(
4402 						w, &proc->delivered_death);
4403 				binder_inner_proc_unlock(proc);
4404 			}
4405 			if (put_user(cmd, (uint32_t __user *)ptr))
4406 				return -EFAULT;
4407 			ptr += sizeof(uint32_t);
4408 			if (put_user(cookie,
4409 				     (binder_uintptr_t __user *)ptr))
4410 				return -EFAULT;
4411 			ptr += sizeof(binder_uintptr_t);
4412 			binder_stat_br(proc, thread, cmd);
4413 			if (cmd == BR_DEAD_BINDER)
4414 				goto done; /* DEAD_BINDER notifications can cause transactions */
4415 		} break;
4416 		default:
4417 			binder_inner_proc_unlock(proc);
4418 			pr_err("%d:%d: bad work type %d\n",
4419 			       proc->pid, thread->pid, w->type);
4420 			break;
4421 		}
4422 
4423 		if (!t)
4424 			continue;
4425 
4426 		BUG_ON(t->buffer == NULL);
4427 		if (t->buffer->target_node) {
4428 			struct binder_node *target_node = t->buffer->target_node;
4429 
4430 			trd->target.ptr = target_node->ptr;
4431 			trd->cookie =  target_node->cookie;
4432 			t->saved_priority = task_nice(current);
4433 			if (t->priority < target_node->min_priority &&
4434 			    !(t->flags & TF_ONE_WAY))
4435 				binder_set_nice(t->priority);
4436 			else if (!(t->flags & TF_ONE_WAY) ||
4437 				 t->saved_priority > target_node->min_priority)
4438 				binder_set_nice(target_node->min_priority);
4439 			cmd = BR_TRANSACTION;
4440 		} else {
4441 			trd->target.ptr = 0;
4442 			trd->cookie = 0;
4443 			cmd = BR_REPLY;
4444 		}
4445 		trd->code = t->code;
4446 		trd->flags = t->flags;
4447 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4448 
4449 		t_from = binder_get_txn_from(t);
4450 		if (t_from) {
4451 			struct task_struct *sender = t_from->proc->tsk;
4452 
4453 			trd->sender_pid =
4454 				task_tgid_nr_ns(sender,
4455 						task_active_pid_ns(current));
4456 		} else {
4457 			trd->sender_pid = 0;
4458 		}
4459 
4460 		ret = binder_apply_fd_fixups(proc, t);
4461 		if (ret) {
4462 			struct binder_buffer *buffer = t->buffer;
4463 			bool oneway = !!(t->flags & TF_ONE_WAY);
4464 			int tid = t->debug_id;
4465 
4466 			if (t_from)
4467 				binder_thread_dec_tmpref(t_from);
4468 			buffer->transaction = NULL;
4469 			binder_cleanup_transaction(t, "fd fixups failed",
4470 						   BR_FAILED_REPLY);
4471 			binder_free_buf(proc, buffer);
4472 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4473 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4474 				     proc->pid, thread->pid,
4475 				     oneway ? "async " :
4476 					(cmd == BR_REPLY ? "reply " : ""),
4477 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4478 			if (cmd == BR_REPLY) {
4479 				cmd = BR_FAILED_REPLY;
4480 				if (put_user(cmd, (uint32_t __user *)ptr))
4481 					return -EFAULT;
4482 				ptr += sizeof(uint32_t);
4483 				binder_stat_br(proc, thread, cmd);
4484 				break;
4485 			}
4486 			continue;
4487 		}
4488 		trd->data_size = t->buffer->data_size;
4489 		trd->offsets_size = t->buffer->offsets_size;
4490 		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4491 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4492 					ALIGN(t->buffer->data_size,
4493 					    sizeof(void *));
4494 
4495 		tr.secctx = t->security_ctx;
4496 		if (t->security_ctx) {
4497 			cmd = BR_TRANSACTION_SEC_CTX;
4498 			trsize = sizeof(tr);
4499 		}
4500 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4501 			if (t_from)
4502 				binder_thread_dec_tmpref(t_from);
4503 
4504 			binder_cleanup_transaction(t, "put_user failed",
4505 						   BR_FAILED_REPLY);
4506 
4507 			return -EFAULT;
4508 		}
4509 		ptr += sizeof(uint32_t);
4510 		if (copy_to_user(ptr, &tr, trsize)) {
4511 			if (t_from)
4512 				binder_thread_dec_tmpref(t_from);
4513 
4514 			binder_cleanup_transaction(t, "copy_to_user failed",
4515 						   BR_FAILED_REPLY);
4516 
4517 			return -EFAULT;
4518 		}
4519 		ptr += trsize;
4520 
4521 		trace_binder_transaction_received(t);
4522 		binder_stat_br(proc, thread, cmd);
4523 		binder_debug(BINDER_DEBUG_TRANSACTION,
4524 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4525 			     proc->pid, thread->pid,
4526 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4527 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4528 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4529 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4530 			     t_from ? t_from->pid : 0, cmd,
4531 			     t->buffer->data_size, t->buffer->offsets_size,
4532 			     (u64)trd->data.ptr.buffer,
4533 			     (u64)trd->data.ptr.offsets);
4534 
4535 		if (t_from)
4536 			binder_thread_dec_tmpref(t_from);
4537 		t->buffer->allow_user_free = 1;
4538 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4539 			binder_inner_proc_lock(thread->proc);
4540 			t->to_parent = thread->transaction_stack;
4541 			t->to_thread = thread;
4542 			thread->transaction_stack = t;
4543 			binder_inner_proc_unlock(thread->proc);
4544 		} else {
4545 			binder_free_transaction(t);
4546 		}
4547 		break;
4548 	}
4549 
4550 done:
4551 
4552 	*consumed = ptr - buffer;
4553 	binder_inner_proc_lock(proc);
4554 	if (proc->requested_threads == 0 &&
4555 	    list_empty(&thread->proc->waiting_threads) &&
4556 	    proc->requested_threads_started < proc->max_threads &&
4557 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4558 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4559 	     /*spawn a new thread if we leave this out */) {
4560 		proc->requested_threads++;
4561 		binder_inner_proc_unlock(proc);
4562 		binder_debug(BINDER_DEBUG_THREADS,
4563 			     "%d:%d BR_SPAWN_LOOPER\n",
4564 			     proc->pid, thread->pid);
4565 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4566 			return -EFAULT;
4567 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4568 	} else
4569 		binder_inner_proc_unlock(proc);
4570 	return 0;
4571 }
4572 
4573 static void binder_release_work(struct binder_proc *proc,
4574 				struct list_head *list)
4575 {
4576 	struct binder_work *w;
4577 
4578 	while (1) {
4579 		w = binder_dequeue_work_head(proc, list);
4580 		if (!w)
4581 			return;
4582 
4583 		switch (w->type) {
4584 		case BINDER_WORK_TRANSACTION: {
4585 			struct binder_transaction *t;
4586 
4587 			t = container_of(w, struct binder_transaction, work);
4588 
4589 			binder_cleanup_transaction(t, "process died.",
4590 						   BR_DEAD_REPLY);
4591 		} break;
4592 		case BINDER_WORK_RETURN_ERROR: {
4593 			struct binder_error *e = container_of(
4594 					w, struct binder_error, work);
4595 
4596 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4597 				"undelivered TRANSACTION_ERROR: %u\n",
4598 				e->cmd);
4599 		} break;
4600 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4601 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4602 				"undelivered TRANSACTION_COMPLETE\n");
4603 			kfree(w);
4604 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4605 		} break;
4606 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4607 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4608 			struct binder_ref_death *death;
4609 
4610 			death = container_of(w, struct binder_ref_death, work);
4611 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4612 				"undelivered death notification, %016llx\n",
4613 				(u64)death->cookie);
4614 			kfree(death);
4615 			binder_stats_deleted(BINDER_STAT_DEATH);
4616 		} break;
4617 		default:
4618 			pr_err("unexpected work type, %d, not freed\n",
4619 			       w->type);
4620 			break;
4621 		}
4622 	}
4623 
4624 }
4625 
4626 static struct binder_thread *binder_get_thread_ilocked(
4627 		struct binder_proc *proc, struct binder_thread *new_thread)
4628 {
4629 	struct binder_thread *thread = NULL;
4630 	struct rb_node *parent = NULL;
4631 	struct rb_node **p = &proc->threads.rb_node;
4632 
4633 	while (*p) {
4634 		parent = *p;
4635 		thread = rb_entry(parent, struct binder_thread, rb_node);
4636 
4637 		if (current->pid < thread->pid)
4638 			p = &(*p)->rb_left;
4639 		else if (current->pid > thread->pid)
4640 			p = &(*p)->rb_right;
4641 		else
4642 			return thread;
4643 	}
4644 	if (!new_thread)
4645 		return NULL;
4646 	thread = new_thread;
4647 	binder_stats_created(BINDER_STAT_THREAD);
4648 	thread->proc = proc;
4649 	thread->pid = current->pid;
4650 	atomic_set(&thread->tmp_ref, 0);
4651 	init_waitqueue_head(&thread->wait);
4652 	INIT_LIST_HEAD(&thread->todo);
4653 	rb_link_node(&thread->rb_node, parent, p);
4654 	rb_insert_color(&thread->rb_node, &proc->threads);
4655 	thread->looper_need_return = true;
4656 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4657 	thread->return_error.cmd = BR_OK;
4658 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4659 	thread->reply_error.cmd = BR_OK;
4660 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4661 	return thread;
4662 }
4663 
4664 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4665 {
4666 	struct binder_thread *thread;
4667 	struct binder_thread *new_thread;
4668 
4669 	binder_inner_proc_lock(proc);
4670 	thread = binder_get_thread_ilocked(proc, NULL);
4671 	binder_inner_proc_unlock(proc);
4672 	if (!thread) {
4673 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4674 		if (new_thread == NULL)
4675 			return NULL;
4676 		binder_inner_proc_lock(proc);
4677 		thread = binder_get_thread_ilocked(proc, new_thread);
4678 		binder_inner_proc_unlock(proc);
4679 		if (thread != new_thread)
4680 			kfree(new_thread);
4681 	}
4682 	return thread;
4683 }
4684 
4685 static void binder_free_proc(struct binder_proc *proc)
4686 {
4687 	BUG_ON(!list_empty(&proc->todo));
4688 	BUG_ON(!list_empty(&proc->delivered_death));
4689 	binder_alloc_deferred_release(&proc->alloc);
4690 	put_task_struct(proc->tsk);
4691 	binder_stats_deleted(BINDER_STAT_PROC);
4692 	kfree(proc);
4693 }
4694 
4695 static void binder_free_thread(struct binder_thread *thread)
4696 {
4697 	BUG_ON(!list_empty(&thread->todo));
4698 	binder_stats_deleted(BINDER_STAT_THREAD);
4699 	binder_proc_dec_tmpref(thread->proc);
4700 	kfree(thread);
4701 }
4702 
4703 static int binder_thread_release(struct binder_proc *proc,
4704 				 struct binder_thread *thread)
4705 {
4706 	struct binder_transaction *t;
4707 	struct binder_transaction *send_reply = NULL;
4708 	int active_transactions = 0;
4709 	struct binder_transaction *last_t = NULL;
4710 
4711 	binder_inner_proc_lock(thread->proc);
4712 	/*
4713 	 * take a ref on the proc so it survives
4714 	 * after we remove this thread from proc->threads.
4715 	 * The corresponding dec is when we actually
4716 	 * free the thread in binder_free_thread()
4717 	 */
4718 	proc->tmp_ref++;
4719 	/*
4720 	 * take a ref on this thread to ensure it
4721 	 * survives while we are releasing it
4722 	 */
4723 	atomic_inc(&thread->tmp_ref);
4724 	rb_erase(&thread->rb_node, &proc->threads);
4725 	t = thread->transaction_stack;
4726 	if (t) {
4727 		spin_lock(&t->lock);
4728 		if (t->to_thread == thread)
4729 			send_reply = t;
4730 	} else {
4731 		__acquire(&t->lock);
4732 	}
4733 	thread->is_dead = true;
4734 
4735 	while (t) {
4736 		last_t = t;
4737 		active_transactions++;
4738 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4739 			     "release %d:%d transaction %d %s, still active\n",
4740 			      proc->pid, thread->pid,
4741 			     t->debug_id,
4742 			     (t->to_thread == thread) ? "in" : "out");
4743 
4744 		if (t->to_thread == thread) {
4745 			t->to_proc = NULL;
4746 			t->to_thread = NULL;
4747 			if (t->buffer) {
4748 				t->buffer->transaction = NULL;
4749 				t->buffer = NULL;
4750 			}
4751 			t = t->to_parent;
4752 		} else if (t->from == thread) {
4753 			t->from = NULL;
4754 			t = t->from_parent;
4755 		} else
4756 			BUG();
4757 		spin_unlock(&last_t->lock);
4758 		if (t)
4759 			spin_lock(&t->lock);
4760 		else
4761 			__acquire(&t->lock);
4762 	}
4763 	/* annotation for sparse, lock not acquired in last iteration above */
4764 	__release(&t->lock);
4765 
4766 	/*
4767 	 * If this thread used poll, make sure we remove the waitqueue
4768 	 * from any epoll data structures holding it with POLLFREE.
4769 	 * waitqueue_active() is safe to use here because we're holding
4770 	 * the inner lock.
4771 	 */
4772 	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4773 	    waitqueue_active(&thread->wait)) {
4774 		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4775 	}
4776 
4777 	binder_inner_proc_unlock(thread->proc);
4778 
4779 	/*
4780 	 * This is needed to avoid races between wake_up_poll() above and
4781 	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4782 	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4783 	 * lock, so we can be sure it's done after calling synchronize_rcu().
4784 	 */
4785 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4786 		synchronize_rcu();
4787 
4788 	if (send_reply)
4789 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4790 	binder_release_work(proc, &thread->todo);
4791 	binder_thread_dec_tmpref(thread);
4792 	return active_transactions;
4793 }
4794 
4795 static __poll_t binder_poll(struct file *filp,
4796 				struct poll_table_struct *wait)
4797 {
4798 	struct binder_proc *proc = filp->private_data;
4799 	struct binder_thread *thread = NULL;
4800 	bool wait_for_proc_work;
4801 
4802 	thread = binder_get_thread(proc);
4803 	if (!thread)
4804 		return POLLERR;
4805 
4806 	binder_inner_proc_lock(thread->proc);
4807 	thread->looper |= BINDER_LOOPER_STATE_POLL;
4808 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4809 
4810 	binder_inner_proc_unlock(thread->proc);
4811 
4812 	poll_wait(filp, &thread->wait, wait);
4813 
4814 	if (binder_has_work(thread, wait_for_proc_work))
4815 		return EPOLLIN;
4816 
4817 	return 0;
4818 }
4819 
4820 static int binder_ioctl_write_read(struct file *filp,
4821 				unsigned int cmd, unsigned long arg,
4822 				struct binder_thread *thread)
4823 {
4824 	int ret = 0;
4825 	struct binder_proc *proc = filp->private_data;
4826 	unsigned int size = _IOC_SIZE(cmd);
4827 	void __user *ubuf = (void __user *)arg;
4828 	struct binder_write_read bwr;
4829 
4830 	if (size != sizeof(struct binder_write_read)) {
4831 		ret = -EINVAL;
4832 		goto out;
4833 	}
4834 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4835 		ret = -EFAULT;
4836 		goto out;
4837 	}
4838 	binder_debug(BINDER_DEBUG_READ_WRITE,
4839 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4840 		     proc->pid, thread->pid,
4841 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4842 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4843 
4844 	if (bwr.write_size > 0) {
4845 		ret = binder_thread_write(proc, thread,
4846 					  bwr.write_buffer,
4847 					  bwr.write_size,
4848 					  &bwr.write_consumed);
4849 		trace_binder_write_done(ret);
4850 		if (ret < 0) {
4851 			bwr.read_consumed = 0;
4852 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4853 				ret = -EFAULT;
4854 			goto out;
4855 		}
4856 	}
4857 	if (bwr.read_size > 0) {
4858 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4859 					 bwr.read_size,
4860 					 &bwr.read_consumed,
4861 					 filp->f_flags & O_NONBLOCK);
4862 		trace_binder_read_done(ret);
4863 		binder_inner_proc_lock(proc);
4864 		if (!binder_worklist_empty_ilocked(&proc->todo))
4865 			binder_wakeup_proc_ilocked(proc);
4866 		binder_inner_proc_unlock(proc);
4867 		if (ret < 0) {
4868 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4869 				ret = -EFAULT;
4870 			goto out;
4871 		}
4872 	}
4873 	binder_debug(BINDER_DEBUG_READ_WRITE,
4874 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4875 		     proc->pid, thread->pid,
4876 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4877 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4878 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4879 		ret = -EFAULT;
4880 		goto out;
4881 	}
4882 out:
4883 	return ret;
4884 }
4885 
4886 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4887 				    struct flat_binder_object *fbo)
4888 {
4889 	int ret = 0;
4890 	struct binder_proc *proc = filp->private_data;
4891 	struct binder_context *context = proc->context;
4892 	struct binder_node *new_node;
4893 	kuid_t curr_euid = current_euid();
4894 
4895 	mutex_lock(&context->context_mgr_node_lock);
4896 	if (context->binder_context_mgr_node) {
4897 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4898 		ret = -EBUSY;
4899 		goto out;
4900 	}
4901 	ret = security_binder_set_context_mgr(proc->tsk);
4902 	if (ret < 0)
4903 		goto out;
4904 	if (uid_valid(context->binder_context_mgr_uid)) {
4905 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4906 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4907 			       from_kuid(&init_user_ns, curr_euid),
4908 			       from_kuid(&init_user_ns,
4909 					 context->binder_context_mgr_uid));
4910 			ret = -EPERM;
4911 			goto out;
4912 		}
4913 	} else {
4914 		context->binder_context_mgr_uid = curr_euid;
4915 	}
4916 	new_node = binder_new_node(proc, fbo);
4917 	if (!new_node) {
4918 		ret = -ENOMEM;
4919 		goto out;
4920 	}
4921 	binder_node_lock(new_node);
4922 	new_node->local_weak_refs++;
4923 	new_node->local_strong_refs++;
4924 	new_node->has_strong_ref = 1;
4925 	new_node->has_weak_ref = 1;
4926 	context->binder_context_mgr_node = new_node;
4927 	binder_node_unlock(new_node);
4928 	binder_put_node(new_node);
4929 out:
4930 	mutex_unlock(&context->context_mgr_node_lock);
4931 	return ret;
4932 }
4933 
4934 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4935 		struct binder_node_info_for_ref *info)
4936 {
4937 	struct binder_node *node;
4938 	struct binder_context *context = proc->context;
4939 	__u32 handle = info->handle;
4940 
4941 	if (info->strong_count || info->weak_count || info->reserved1 ||
4942 	    info->reserved2 || info->reserved3) {
4943 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4944 				  proc->pid);
4945 		return -EINVAL;
4946 	}
4947 
4948 	/* This ioctl may only be used by the context manager */
4949 	mutex_lock(&context->context_mgr_node_lock);
4950 	if (!context->binder_context_mgr_node ||
4951 		context->binder_context_mgr_node->proc != proc) {
4952 		mutex_unlock(&context->context_mgr_node_lock);
4953 		return -EPERM;
4954 	}
4955 	mutex_unlock(&context->context_mgr_node_lock);
4956 
4957 	node = binder_get_node_from_ref(proc, handle, true, NULL);
4958 	if (!node)
4959 		return -EINVAL;
4960 
4961 	info->strong_count = node->local_strong_refs +
4962 		node->internal_strong_refs;
4963 	info->weak_count = node->local_weak_refs;
4964 
4965 	binder_put_node(node);
4966 
4967 	return 0;
4968 }
4969 
4970 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4971 				struct binder_node_debug_info *info)
4972 {
4973 	struct rb_node *n;
4974 	binder_uintptr_t ptr = info->ptr;
4975 
4976 	memset(info, 0, sizeof(*info));
4977 
4978 	binder_inner_proc_lock(proc);
4979 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4980 		struct binder_node *node = rb_entry(n, struct binder_node,
4981 						    rb_node);
4982 		if (node->ptr > ptr) {
4983 			info->ptr = node->ptr;
4984 			info->cookie = node->cookie;
4985 			info->has_strong_ref = node->has_strong_ref;
4986 			info->has_weak_ref = node->has_weak_ref;
4987 			break;
4988 		}
4989 	}
4990 	binder_inner_proc_unlock(proc);
4991 
4992 	return 0;
4993 }
4994 
4995 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4996 {
4997 	int ret;
4998 	struct binder_proc *proc = filp->private_data;
4999 	struct binder_thread *thread;
5000 	unsigned int size = _IOC_SIZE(cmd);
5001 	void __user *ubuf = (void __user *)arg;
5002 
5003 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5004 			proc->pid, current->pid, cmd, arg);*/
5005 
5006 	binder_selftest_alloc(&proc->alloc);
5007 
5008 	trace_binder_ioctl(cmd, arg);
5009 
5010 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5011 	if (ret)
5012 		goto err_unlocked;
5013 
5014 	thread = binder_get_thread(proc);
5015 	if (thread == NULL) {
5016 		ret = -ENOMEM;
5017 		goto err;
5018 	}
5019 
5020 	switch (cmd) {
5021 	case BINDER_WRITE_READ:
5022 		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5023 		if (ret)
5024 			goto err;
5025 		break;
5026 	case BINDER_SET_MAX_THREADS: {
5027 		int max_threads;
5028 
5029 		if (copy_from_user(&max_threads, ubuf,
5030 				   sizeof(max_threads))) {
5031 			ret = -EINVAL;
5032 			goto err;
5033 		}
5034 		binder_inner_proc_lock(proc);
5035 		proc->max_threads = max_threads;
5036 		binder_inner_proc_unlock(proc);
5037 		break;
5038 	}
5039 	case BINDER_SET_CONTEXT_MGR_EXT: {
5040 		struct flat_binder_object fbo;
5041 
5042 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5043 			ret = -EINVAL;
5044 			goto err;
5045 		}
5046 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5047 		if (ret)
5048 			goto err;
5049 		break;
5050 	}
5051 	case BINDER_SET_CONTEXT_MGR:
5052 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5053 		if (ret)
5054 			goto err;
5055 		break;
5056 	case BINDER_THREAD_EXIT:
5057 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5058 			     proc->pid, thread->pid);
5059 		binder_thread_release(proc, thread);
5060 		thread = NULL;
5061 		break;
5062 	case BINDER_VERSION: {
5063 		struct binder_version __user *ver = ubuf;
5064 
5065 		if (size != sizeof(struct binder_version)) {
5066 			ret = -EINVAL;
5067 			goto err;
5068 		}
5069 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5070 			     &ver->protocol_version)) {
5071 			ret = -EINVAL;
5072 			goto err;
5073 		}
5074 		break;
5075 	}
5076 	case BINDER_GET_NODE_INFO_FOR_REF: {
5077 		struct binder_node_info_for_ref info;
5078 
5079 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5080 			ret = -EFAULT;
5081 			goto err;
5082 		}
5083 
5084 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5085 		if (ret < 0)
5086 			goto err;
5087 
5088 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5089 			ret = -EFAULT;
5090 			goto err;
5091 		}
5092 
5093 		break;
5094 	}
5095 	case BINDER_GET_NODE_DEBUG_INFO: {
5096 		struct binder_node_debug_info info;
5097 
5098 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5099 			ret = -EFAULT;
5100 			goto err;
5101 		}
5102 
5103 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5104 		if (ret < 0)
5105 			goto err;
5106 
5107 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5108 			ret = -EFAULT;
5109 			goto err;
5110 		}
5111 		break;
5112 	}
5113 	default:
5114 		ret = -EINVAL;
5115 		goto err;
5116 	}
5117 	ret = 0;
5118 err:
5119 	if (thread)
5120 		thread->looper_need_return = false;
5121 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5122 	if (ret && ret != -ERESTARTSYS)
5123 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5124 err_unlocked:
5125 	trace_binder_ioctl_done(ret);
5126 	return ret;
5127 }
5128 
5129 static void binder_vma_open(struct vm_area_struct *vma)
5130 {
5131 	struct binder_proc *proc = vma->vm_private_data;
5132 
5133 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5134 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5135 		     proc->pid, vma->vm_start, vma->vm_end,
5136 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5137 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5138 }
5139 
5140 static void binder_vma_close(struct vm_area_struct *vma)
5141 {
5142 	struct binder_proc *proc = vma->vm_private_data;
5143 
5144 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5145 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5146 		     proc->pid, vma->vm_start, vma->vm_end,
5147 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5148 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5149 	binder_alloc_vma_close(&proc->alloc);
5150 }
5151 
5152 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5153 {
5154 	return VM_FAULT_SIGBUS;
5155 }
5156 
5157 static const struct vm_operations_struct binder_vm_ops = {
5158 	.open = binder_vma_open,
5159 	.close = binder_vma_close,
5160 	.fault = binder_vm_fault,
5161 };
5162 
5163 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5164 {
5165 	int ret;
5166 	struct binder_proc *proc = filp->private_data;
5167 	const char *failure_string;
5168 
5169 	if (proc->tsk != current->group_leader)
5170 		return -EINVAL;
5171 
5172 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5173 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5174 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5175 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5176 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5177 
5178 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5179 		ret = -EPERM;
5180 		failure_string = "bad vm_flags";
5181 		goto err_bad_arg;
5182 	}
5183 	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5184 	vma->vm_flags &= ~VM_MAYWRITE;
5185 
5186 	vma->vm_ops = &binder_vm_ops;
5187 	vma->vm_private_data = proc;
5188 
5189 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5190 	if (ret)
5191 		return ret;
5192 	return 0;
5193 
5194 err_bad_arg:
5195 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5196 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5197 	return ret;
5198 }
5199 
5200 static int binder_open(struct inode *nodp, struct file *filp)
5201 {
5202 	struct binder_proc *proc;
5203 	struct binder_device *binder_dev;
5204 	struct binderfs_info *info;
5205 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5206 
5207 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5208 		     current->group_leader->pid, current->pid);
5209 
5210 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5211 	if (proc == NULL)
5212 		return -ENOMEM;
5213 	spin_lock_init(&proc->inner_lock);
5214 	spin_lock_init(&proc->outer_lock);
5215 	get_task_struct(current->group_leader);
5216 	proc->tsk = current->group_leader;
5217 	INIT_LIST_HEAD(&proc->todo);
5218 	proc->default_priority = task_nice(current);
5219 	/* binderfs stashes devices in i_private */
5220 	if (is_binderfs_device(nodp)) {
5221 		binder_dev = nodp->i_private;
5222 		info = nodp->i_sb->s_fs_info;
5223 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5224 	} else {
5225 		binder_dev = container_of(filp->private_data,
5226 					  struct binder_device, miscdev);
5227 	}
5228 	proc->context = &binder_dev->context;
5229 	binder_alloc_init(&proc->alloc);
5230 
5231 	binder_stats_created(BINDER_STAT_PROC);
5232 	proc->pid = current->group_leader->pid;
5233 	INIT_LIST_HEAD(&proc->delivered_death);
5234 	INIT_LIST_HEAD(&proc->waiting_threads);
5235 	filp->private_data = proc;
5236 
5237 	mutex_lock(&binder_procs_lock);
5238 	hlist_add_head(&proc->proc_node, &binder_procs);
5239 	mutex_unlock(&binder_procs_lock);
5240 
5241 	if (binder_debugfs_dir_entry_proc) {
5242 		char strbuf[11];
5243 
5244 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5245 		/*
5246 		 * proc debug entries are shared between contexts, so
5247 		 * this will fail if the process tries to open the driver
5248 		 * again with a different context. The priting code will
5249 		 * anyway print all contexts that a given PID has, so this
5250 		 * is not a problem.
5251 		 */
5252 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5253 			binder_debugfs_dir_entry_proc,
5254 			(void *)(unsigned long)proc->pid,
5255 			&proc_fops);
5256 	}
5257 
5258 	if (binder_binderfs_dir_entry_proc) {
5259 		char strbuf[11];
5260 		struct dentry *binderfs_entry;
5261 
5262 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5263 		/*
5264 		 * Similar to debugfs, the process specific log file is shared
5265 		 * between contexts. If the file has already been created for a
5266 		 * process, the following binderfs_create_file() call will
5267 		 * fail with error code EEXIST if another context of the same
5268 		 * process invoked binder_open(). This is ok since same as
5269 		 * debugfs, the log file will contain information on all
5270 		 * contexts of a given PID.
5271 		 */
5272 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5273 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5274 		if (!IS_ERR(binderfs_entry)) {
5275 			proc->binderfs_entry = binderfs_entry;
5276 		} else {
5277 			int error;
5278 
5279 			error = PTR_ERR(binderfs_entry);
5280 			if (error != -EEXIST) {
5281 				pr_warn("Unable to create file %s in binderfs (error %d)\n",
5282 					strbuf, error);
5283 			}
5284 		}
5285 	}
5286 
5287 	return 0;
5288 }
5289 
5290 static int binder_flush(struct file *filp, fl_owner_t id)
5291 {
5292 	struct binder_proc *proc = filp->private_data;
5293 
5294 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5295 
5296 	return 0;
5297 }
5298 
5299 static void binder_deferred_flush(struct binder_proc *proc)
5300 {
5301 	struct rb_node *n;
5302 	int wake_count = 0;
5303 
5304 	binder_inner_proc_lock(proc);
5305 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5306 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5307 
5308 		thread->looper_need_return = true;
5309 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5310 			wake_up_interruptible(&thread->wait);
5311 			wake_count++;
5312 		}
5313 	}
5314 	binder_inner_proc_unlock(proc);
5315 
5316 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5317 		     "binder_flush: %d woke %d threads\n", proc->pid,
5318 		     wake_count);
5319 }
5320 
5321 static int binder_release(struct inode *nodp, struct file *filp)
5322 {
5323 	struct binder_proc *proc = filp->private_data;
5324 
5325 	debugfs_remove(proc->debugfs_entry);
5326 
5327 	if (proc->binderfs_entry) {
5328 		binderfs_remove_file(proc->binderfs_entry);
5329 		proc->binderfs_entry = NULL;
5330 	}
5331 
5332 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5333 
5334 	return 0;
5335 }
5336 
5337 static int binder_node_release(struct binder_node *node, int refs)
5338 {
5339 	struct binder_ref *ref;
5340 	int death = 0;
5341 	struct binder_proc *proc = node->proc;
5342 
5343 	binder_release_work(proc, &node->async_todo);
5344 
5345 	binder_node_lock(node);
5346 	binder_inner_proc_lock(proc);
5347 	binder_dequeue_work_ilocked(&node->work);
5348 	/*
5349 	 * The caller must have taken a temporary ref on the node,
5350 	 */
5351 	BUG_ON(!node->tmp_refs);
5352 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5353 		binder_inner_proc_unlock(proc);
5354 		binder_node_unlock(node);
5355 		binder_free_node(node);
5356 
5357 		return refs;
5358 	}
5359 
5360 	node->proc = NULL;
5361 	node->local_strong_refs = 0;
5362 	node->local_weak_refs = 0;
5363 	binder_inner_proc_unlock(proc);
5364 
5365 	spin_lock(&binder_dead_nodes_lock);
5366 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5367 	spin_unlock(&binder_dead_nodes_lock);
5368 
5369 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5370 		refs++;
5371 		/*
5372 		 * Need the node lock to synchronize
5373 		 * with new notification requests and the
5374 		 * inner lock to synchronize with queued
5375 		 * death notifications.
5376 		 */
5377 		binder_inner_proc_lock(ref->proc);
5378 		if (!ref->death) {
5379 			binder_inner_proc_unlock(ref->proc);
5380 			continue;
5381 		}
5382 
5383 		death++;
5384 
5385 		BUG_ON(!list_empty(&ref->death->work.entry));
5386 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5387 		binder_enqueue_work_ilocked(&ref->death->work,
5388 					    &ref->proc->todo);
5389 		binder_wakeup_proc_ilocked(ref->proc);
5390 		binder_inner_proc_unlock(ref->proc);
5391 	}
5392 
5393 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5394 		     "node %d now dead, refs %d, death %d\n",
5395 		     node->debug_id, refs, death);
5396 	binder_node_unlock(node);
5397 	binder_put_node(node);
5398 
5399 	return refs;
5400 }
5401 
5402 static void binder_deferred_release(struct binder_proc *proc)
5403 {
5404 	struct binder_context *context = proc->context;
5405 	struct rb_node *n;
5406 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5407 
5408 	mutex_lock(&binder_procs_lock);
5409 	hlist_del(&proc->proc_node);
5410 	mutex_unlock(&binder_procs_lock);
5411 
5412 	mutex_lock(&context->context_mgr_node_lock);
5413 	if (context->binder_context_mgr_node &&
5414 	    context->binder_context_mgr_node->proc == proc) {
5415 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5416 			     "%s: %d context_mgr_node gone\n",
5417 			     __func__, proc->pid);
5418 		context->binder_context_mgr_node = NULL;
5419 	}
5420 	mutex_unlock(&context->context_mgr_node_lock);
5421 	binder_inner_proc_lock(proc);
5422 	/*
5423 	 * Make sure proc stays alive after we
5424 	 * remove all the threads
5425 	 */
5426 	proc->tmp_ref++;
5427 
5428 	proc->is_dead = true;
5429 	threads = 0;
5430 	active_transactions = 0;
5431 	while ((n = rb_first(&proc->threads))) {
5432 		struct binder_thread *thread;
5433 
5434 		thread = rb_entry(n, struct binder_thread, rb_node);
5435 		binder_inner_proc_unlock(proc);
5436 		threads++;
5437 		active_transactions += binder_thread_release(proc, thread);
5438 		binder_inner_proc_lock(proc);
5439 	}
5440 
5441 	nodes = 0;
5442 	incoming_refs = 0;
5443 	while ((n = rb_first(&proc->nodes))) {
5444 		struct binder_node *node;
5445 
5446 		node = rb_entry(n, struct binder_node, rb_node);
5447 		nodes++;
5448 		/*
5449 		 * take a temporary ref on the node before
5450 		 * calling binder_node_release() which will either
5451 		 * kfree() the node or call binder_put_node()
5452 		 */
5453 		binder_inc_node_tmpref_ilocked(node);
5454 		rb_erase(&node->rb_node, &proc->nodes);
5455 		binder_inner_proc_unlock(proc);
5456 		incoming_refs = binder_node_release(node, incoming_refs);
5457 		binder_inner_proc_lock(proc);
5458 	}
5459 	binder_inner_proc_unlock(proc);
5460 
5461 	outgoing_refs = 0;
5462 	binder_proc_lock(proc);
5463 	while ((n = rb_first(&proc->refs_by_desc))) {
5464 		struct binder_ref *ref;
5465 
5466 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5467 		outgoing_refs++;
5468 		binder_cleanup_ref_olocked(ref);
5469 		binder_proc_unlock(proc);
5470 		binder_free_ref(ref);
5471 		binder_proc_lock(proc);
5472 	}
5473 	binder_proc_unlock(proc);
5474 
5475 	binder_release_work(proc, &proc->todo);
5476 	binder_release_work(proc, &proc->delivered_death);
5477 
5478 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5479 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5480 		     __func__, proc->pid, threads, nodes, incoming_refs,
5481 		     outgoing_refs, active_transactions);
5482 
5483 	binder_proc_dec_tmpref(proc);
5484 }
5485 
5486 static void binder_deferred_func(struct work_struct *work)
5487 {
5488 	struct binder_proc *proc;
5489 
5490 	int defer;
5491 
5492 	do {
5493 		mutex_lock(&binder_deferred_lock);
5494 		if (!hlist_empty(&binder_deferred_list)) {
5495 			proc = hlist_entry(binder_deferred_list.first,
5496 					struct binder_proc, deferred_work_node);
5497 			hlist_del_init(&proc->deferred_work_node);
5498 			defer = proc->deferred_work;
5499 			proc->deferred_work = 0;
5500 		} else {
5501 			proc = NULL;
5502 			defer = 0;
5503 		}
5504 		mutex_unlock(&binder_deferred_lock);
5505 
5506 		if (defer & BINDER_DEFERRED_FLUSH)
5507 			binder_deferred_flush(proc);
5508 
5509 		if (defer & BINDER_DEFERRED_RELEASE)
5510 			binder_deferred_release(proc); /* frees proc */
5511 	} while (proc);
5512 }
5513 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5514 
5515 static void
5516 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5517 {
5518 	mutex_lock(&binder_deferred_lock);
5519 	proc->deferred_work |= defer;
5520 	if (hlist_unhashed(&proc->deferred_work_node)) {
5521 		hlist_add_head(&proc->deferred_work_node,
5522 				&binder_deferred_list);
5523 		schedule_work(&binder_deferred_work);
5524 	}
5525 	mutex_unlock(&binder_deferred_lock);
5526 }
5527 
5528 static void print_binder_transaction_ilocked(struct seq_file *m,
5529 					     struct binder_proc *proc,
5530 					     const char *prefix,
5531 					     struct binder_transaction *t)
5532 {
5533 	struct binder_proc *to_proc;
5534 	struct binder_buffer *buffer = t->buffer;
5535 
5536 	spin_lock(&t->lock);
5537 	to_proc = t->to_proc;
5538 	seq_printf(m,
5539 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5540 		   prefix, t->debug_id, t,
5541 		   t->from ? t->from->proc->pid : 0,
5542 		   t->from ? t->from->pid : 0,
5543 		   to_proc ? to_proc->pid : 0,
5544 		   t->to_thread ? t->to_thread->pid : 0,
5545 		   t->code, t->flags, t->priority, t->need_reply);
5546 	spin_unlock(&t->lock);
5547 
5548 	if (proc != to_proc) {
5549 		/*
5550 		 * Can only safely deref buffer if we are holding the
5551 		 * correct proc inner lock for this node
5552 		 */
5553 		seq_puts(m, "\n");
5554 		return;
5555 	}
5556 
5557 	if (buffer == NULL) {
5558 		seq_puts(m, " buffer free\n");
5559 		return;
5560 	}
5561 	if (buffer->target_node)
5562 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5563 	seq_printf(m, " size %zd:%zd data %pK\n",
5564 		   buffer->data_size, buffer->offsets_size,
5565 		   buffer->user_data);
5566 }
5567 
5568 static void print_binder_work_ilocked(struct seq_file *m,
5569 				     struct binder_proc *proc,
5570 				     const char *prefix,
5571 				     const char *transaction_prefix,
5572 				     struct binder_work *w)
5573 {
5574 	struct binder_node *node;
5575 	struct binder_transaction *t;
5576 
5577 	switch (w->type) {
5578 	case BINDER_WORK_TRANSACTION:
5579 		t = container_of(w, struct binder_transaction, work);
5580 		print_binder_transaction_ilocked(
5581 				m, proc, transaction_prefix, t);
5582 		break;
5583 	case BINDER_WORK_RETURN_ERROR: {
5584 		struct binder_error *e = container_of(
5585 				w, struct binder_error, work);
5586 
5587 		seq_printf(m, "%stransaction error: %u\n",
5588 			   prefix, e->cmd);
5589 	} break;
5590 	case BINDER_WORK_TRANSACTION_COMPLETE:
5591 		seq_printf(m, "%stransaction complete\n", prefix);
5592 		break;
5593 	case BINDER_WORK_NODE:
5594 		node = container_of(w, struct binder_node, work);
5595 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5596 			   prefix, node->debug_id,
5597 			   (u64)node->ptr, (u64)node->cookie);
5598 		break;
5599 	case BINDER_WORK_DEAD_BINDER:
5600 		seq_printf(m, "%shas dead binder\n", prefix);
5601 		break;
5602 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5603 		seq_printf(m, "%shas cleared dead binder\n", prefix);
5604 		break;
5605 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5606 		seq_printf(m, "%shas cleared death notification\n", prefix);
5607 		break;
5608 	default:
5609 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5610 		break;
5611 	}
5612 }
5613 
5614 static void print_binder_thread_ilocked(struct seq_file *m,
5615 					struct binder_thread *thread,
5616 					int print_always)
5617 {
5618 	struct binder_transaction *t;
5619 	struct binder_work *w;
5620 	size_t start_pos = m->count;
5621 	size_t header_pos;
5622 
5623 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5624 			thread->pid, thread->looper,
5625 			thread->looper_need_return,
5626 			atomic_read(&thread->tmp_ref));
5627 	header_pos = m->count;
5628 	t = thread->transaction_stack;
5629 	while (t) {
5630 		if (t->from == thread) {
5631 			print_binder_transaction_ilocked(m, thread->proc,
5632 					"    outgoing transaction", t);
5633 			t = t->from_parent;
5634 		} else if (t->to_thread == thread) {
5635 			print_binder_transaction_ilocked(m, thread->proc,
5636 						 "    incoming transaction", t);
5637 			t = t->to_parent;
5638 		} else {
5639 			print_binder_transaction_ilocked(m, thread->proc,
5640 					"    bad transaction", t);
5641 			t = NULL;
5642 		}
5643 	}
5644 	list_for_each_entry(w, &thread->todo, entry) {
5645 		print_binder_work_ilocked(m, thread->proc, "    ",
5646 					  "    pending transaction", w);
5647 	}
5648 	if (!print_always && m->count == header_pos)
5649 		m->count = start_pos;
5650 }
5651 
5652 static void print_binder_node_nilocked(struct seq_file *m,
5653 				       struct binder_node *node)
5654 {
5655 	struct binder_ref *ref;
5656 	struct binder_work *w;
5657 	int count;
5658 
5659 	count = 0;
5660 	hlist_for_each_entry(ref, &node->refs, node_entry)
5661 		count++;
5662 
5663 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5664 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5665 		   node->has_strong_ref, node->has_weak_ref,
5666 		   node->local_strong_refs, node->local_weak_refs,
5667 		   node->internal_strong_refs, count, node->tmp_refs);
5668 	if (count) {
5669 		seq_puts(m, " proc");
5670 		hlist_for_each_entry(ref, &node->refs, node_entry)
5671 			seq_printf(m, " %d", ref->proc->pid);
5672 	}
5673 	seq_puts(m, "\n");
5674 	if (node->proc) {
5675 		list_for_each_entry(w, &node->async_todo, entry)
5676 			print_binder_work_ilocked(m, node->proc, "    ",
5677 					  "    pending async transaction", w);
5678 	}
5679 }
5680 
5681 static void print_binder_ref_olocked(struct seq_file *m,
5682 				     struct binder_ref *ref)
5683 {
5684 	binder_node_lock(ref->node);
5685 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5686 		   ref->data.debug_id, ref->data.desc,
5687 		   ref->node->proc ? "" : "dead ",
5688 		   ref->node->debug_id, ref->data.strong,
5689 		   ref->data.weak, ref->death);
5690 	binder_node_unlock(ref->node);
5691 }
5692 
5693 static void print_binder_proc(struct seq_file *m,
5694 			      struct binder_proc *proc, int print_all)
5695 {
5696 	struct binder_work *w;
5697 	struct rb_node *n;
5698 	size_t start_pos = m->count;
5699 	size_t header_pos;
5700 	struct binder_node *last_node = NULL;
5701 
5702 	seq_printf(m, "proc %d\n", proc->pid);
5703 	seq_printf(m, "context %s\n", proc->context->name);
5704 	header_pos = m->count;
5705 
5706 	binder_inner_proc_lock(proc);
5707 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5708 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5709 						rb_node), print_all);
5710 
5711 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5712 		struct binder_node *node = rb_entry(n, struct binder_node,
5713 						    rb_node);
5714 		if (!print_all && !node->has_async_transaction)
5715 			continue;
5716 
5717 		/*
5718 		 * take a temporary reference on the node so it
5719 		 * survives and isn't removed from the tree
5720 		 * while we print it.
5721 		 */
5722 		binder_inc_node_tmpref_ilocked(node);
5723 		/* Need to drop inner lock to take node lock */
5724 		binder_inner_proc_unlock(proc);
5725 		if (last_node)
5726 			binder_put_node(last_node);
5727 		binder_node_inner_lock(node);
5728 		print_binder_node_nilocked(m, node);
5729 		binder_node_inner_unlock(node);
5730 		last_node = node;
5731 		binder_inner_proc_lock(proc);
5732 	}
5733 	binder_inner_proc_unlock(proc);
5734 	if (last_node)
5735 		binder_put_node(last_node);
5736 
5737 	if (print_all) {
5738 		binder_proc_lock(proc);
5739 		for (n = rb_first(&proc->refs_by_desc);
5740 		     n != NULL;
5741 		     n = rb_next(n))
5742 			print_binder_ref_olocked(m, rb_entry(n,
5743 							    struct binder_ref,
5744 							    rb_node_desc));
5745 		binder_proc_unlock(proc);
5746 	}
5747 	binder_alloc_print_allocated(m, &proc->alloc);
5748 	binder_inner_proc_lock(proc);
5749 	list_for_each_entry(w, &proc->todo, entry)
5750 		print_binder_work_ilocked(m, proc, "  ",
5751 					  "  pending transaction", w);
5752 	list_for_each_entry(w, &proc->delivered_death, entry) {
5753 		seq_puts(m, "  has delivered dead binder\n");
5754 		break;
5755 	}
5756 	binder_inner_proc_unlock(proc);
5757 	if (!print_all && m->count == header_pos)
5758 		m->count = start_pos;
5759 }
5760 
5761 static const char * const binder_return_strings[] = {
5762 	"BR_ERROR",
5763 	"BR_OK",
5764 	"BR_TRANSACTION",
5765 	"BR_REPLY",
5766 	"BR_ACQUIRE_RESULT",
5767 	"BR_DEAD_REPLY",
5768 	"BR_TRANSACTION_COMPLETE",
5769 	"BR_INCREFS",
5770 	"BR_ACQUIRE",
5771 	"BR_RELEASE",
5772 	"BR_DECREFS",
5773 	"BR_ATTEMPT_ACQUIRE",
5774 	"BR_NOOP",
5775 	"BR_SPAWN_LOOPER",
5776 	"BR_FINISHED",
5777 	"BR_DEAD_BINDER",
5778 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5779 	"BR_FAILED_REPLY"
5780 };
5781 
5782 static const char * const binder_command_strings[] = {
5783 	"BC_TRANSACTION",
5784 	"BC_REPLY",
5785 	"BC_ACQUIRE_RESULT",
5786 	"BC_FREE_BUFFER",
5787 	"BC_INCREFS",
5788 	"BC_ACQUIRE",
5789 	"BC_RELEASE",
5790 	"BC_DECREFS",
5791 	"BC_INCREFS_DONE",
5792 	"BC_ACQUIRE_DONE",
5793 	"BC_ATTEMPT_ACQUIRE",
5794 	"BC_REGISTER_LOOPER",
5795 	"BC_ENTER_LOOPER",
5796 	"BC_EXIT_LOOPER",
5797 	"BC_REQUEST_DEATH_NOTIFICATION",
5798 	"BC_CLEAR_DEATH_NOTIFICATION",
5799 	"BC_DEAD_BINDER_DONE",
5800 	"BC_TRANSACTION_SG",
5801 	"BC_REPLY_SG",
5802 };
5803 
5804 static const char * const binder_objstat_strings[] = {
5805 	"proc",
5806 	"thread",
5807 	"node",
5808 	"ref",
5809 	"death",
5810 	"transaction",
5811 	"transaction_complete"
5812 };
5813 
5814 static void print_binder_stats(struct seq_file *m, const char *prefix,
5815 			       struct binder_stats *stats)
5816 {
5817 	int i;
5818 
5819 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5820 		     ARRAY_SIZE(binder_command_strings));
5821 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5822 		int temp = atomic_read(&stats->bc[i]);
5823 
5824 		if (temp)
5825 			seq_printf(m, "%s%s: %d\n", prefix,
5826 				   binder_command_strings[i], temp);
5827 	}
5828 
5829 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5830 		     ARRAY_SIZE(binder_return_strings));
5831 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5832 		int temp = atomic_read(&stats->br[i]);
5833 
5834 		if (temp)
5835 			seq_printf(m, "%s%s: %d\n", prefix,
5836 				   binder_return_strings[i], temp);
5837 	}
5838 
5839 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5840 		     ARRAY_SIZE(binder_objstat_strings));
5841 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5842 		     ARRAY_SIZE(stats->obj_deleted));
5843 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5844 		int created = atomic_read(&stats->obj_created[i]);
5845 		int deleted = atomic_read(&stats->obj_deleted[i]);
5846 
5847 		if (created || deleted)
5848 			seq_printf(m, "%s%s: active %d total %d\n",
5849 				prefix,
5850 				binder_objstat_strings[i],
5851 				created - deleted,
5852 				created);
5853 	}
5854 }
5855 
5856 static void print_binder_proc_stats(struct seq_file *m,
5857 				    struct binder_proc *proc)
5858 {
5859 	struct binder_work *w;
5860 	struct binder_thread *thread;
5861 	struct rb_node *n;
5862 	int count, strong, weak, ready_threads;
5863 	size_t free_async_space =
5864 		binder_alloc_get_free_async_space(&proc->alloc);
5865 
5866 	seq_printf(m, "proc %d\n", proc->pid);
5867 	seq_printf(m, "context %s\n", proc->context->name);
5868 	count = 0;
5869 	ready_threads = 0;
5870 	binder_inner_proc_lock(proc);
5871 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5872 		count++;
5873 
5874 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5875 		ready_threads++;
5876 
5877 	seq_printf(m, "  threads: %d\n", count);
5878 	seq_printf(m, "  requested threads: %d+%d/%d\n"
5879 			"  ready threads %d\n"
5880 			"  free async space %zd\n", proc->requested_threads,
5881 			proc->requested_threads_started, proc->max_threads,
5882 			ready_threads,
5883 			free_async_space);
5884 	count = 0;
5885 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5886 		count++;
5887 	binder_inner_proc_unlock(proc);
5888 	seq_printf(m, "  nodes: %d\n", count);
5889 	count = 0;
5890 	strong = 0;
5891 	weak = 0;
5892 	binder_proc_lock(proc);
5893 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5894 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5895 						  rb_node_desc);
5896 		count++;
5897 		strong += ref->data.strong;
5898 		weak += ref->data.weak;
5899 	}
5900 	binder_proc_unlock(proc);
5901 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5902 
5903 	count = binder_alloc_get_allocated_count(&proc->alloc);
5904 	seq_printf(m, "  buffers: %d\n", count);
5905 
5906 	binder_alloc_print_pages(m, &proc->alloc);
5907 
5908 	count = 0;
5909 	binder_inner_proc_lock(proc);
5910 	list_for_each_entry(w, &proc->todo, entry) {
5911 		if (w->type == BINDER_WORK_TRANSACTION)
5912 			count++;
5913 	}
5914 	binder_inner_proc_unlock(proc);
5915 	seq_printf(m, "  pending transactions: %d\n", count);
5916 
5917 	print_binder_stats(m, "  ", &proc->stats);
5918 }
5919 
5920 
5921 int binder_state_show(struct seq_file *m, void *unused)
5922 {
5923 	struct binder_proc *proc;
5924 	struct binder_node *node;
5925 	struct binder_node *last_node = NULL;
5926 
5927 	seq_puts(m, "binder state:\n");
5928 
5929 	spin_lock(&binder_dead_nodes_lock);
5930 	if (!hlist_empty(&binder_dead_nodes))
5931 		seq_puts(m, "dead nodes:\n");
5932 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5933 		/*
5934 		 * take a temporary reference on the node so it
5935 		 * survives and isn't removed from the list
5936 		 * while we print it.
5937 		 */
5938 		node->tmp_refs++;
5939 		spin_unlock(&binder_dead_nodes_lock);
5940 		if (last_node)
5941 			binder_put_node(last_node);
5942 		binder_node_lock(node);
5943 		print_binder_node_nilocked(m, node);
5944 		binder_node_unlock(node);
5945 		last_node = node;
5946 		spin_lock(&binder_dead_nodes_lock);
5947 	}
5948 	spin_unlock(&binder_dead_nodes_lock);
5949 	if (last_node)
5950 		binder_put_node(last_node);
5951 
5952 	mutex_lock(&binder_procs_lock);
5953 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5954 		print_binder_proc(m, proc, 1);
5955 	mutex_unlock(&binder_procs_lock);
5956 
5957 	return 0;
5958 }
5959 
5960 int binder_stats_show(struct seq_file *m, void *unused)
5961 {
5962 	struct binder_proc *proc;
5963 
5964 	seq_puts(m, "binder stats:\n");
5965 
5966 	print_binder_stats(m, "", &binder_stats);
5967 
5968 	mutex_lock(&binder_procs_lock);
5969 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5970 		print_binder_proc_stats(m, proc);
5971 	mutex_unlock(&binder_procs_lock);
5972 
5973 	return 0;
5974 }
5975 
5976 int binder_transactions_show(struct seq_file *m, void *unused)
5977 {
5978 	struct binder_proc *proc;
5979 
5980 	seq_puts(m, "binder transactions:\n");
5981 	mutex_lock(&binder_procs_lock);
5982 	hlist_for_each_entry(proc, &binder_procs, proc_node)
5983 		print_binder_proc(m, proc, 0);
5984 	mutex_unlock(&binder_procs_lock);
5985 
5986 	return 0;
5987 }
5988 
5989 static int proc_show(struct seq_file *m, void *unused)
5990 {
5991 	struct binder_proc *itr;
5992 	int pid = (unsigned long)m->private;
5993 
5994 	mutex_lock(&binder_procs_lock);
5995 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5996 		if (itr->pid == pid) {
5997 			seq_puts(m, "binder proc state:\n");
5998 			print_binder_proc(m, itr, 1);
5999 		}
6000 	}
6001 	mutex_unlock(&binder_procs_lock);
6002 
6003 	return 0;
6004 }
6005 
6006 static void print_binder_transaction_log_entry(struct seq_file *m,
6007 					struct binder_transaction_log_entry *e)
6008 {
6009 	int debug_id = READ_ONCE(e->debug_id_done);
6010 	/*
6011 	 * read barrier to guarantee debug_id_done read before
6012 	 * we print the log values
6013 	 */
6014 	smp_rmb();
6015 	seq_printf(m,
6016 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6017 		   e->debug_id, (e->call_type == 2) ? "reply" :
6018 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6019 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6020 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6021 		   e->return_error, e->return_error_param,
6022 		   e->return_error_line);
6023 	/*
6024 	 * read-barrier to guarantee read of debug_id_done after
6025 	 * done printing the fields of the entry
6026 	 */
6027 	smp_rmb();
6028 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6029 			"\n" : " (incomplete)\n");
6030 }
6031 
6032 int binder_transaction_log_show(struct seq_file *m, void *unused)
6033 {
6034 	struct binder_transaction_log *log = m->private;
6035 	unsigned int log_cur = atomic_read(&log->cur);
6036 	unsigned int count;
6037 	unsigned int cur;
6038 	int i;
6039 
6040 	count = log_cur + 1;
6041 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6042 		0 : count % ARRAY_SIZE(log->entry);
6043 	if (count > ARRAY_SIZE(log->entry) || log->full)
6044 		count = ARRAY_SIZE(log->entry);
6045 	for (i = 0; i < count; i++) {
6046 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6047 
6048 		print_binder_transaction_log_entry(m, &log->entry[index]);
6049 	}
6050 	return 0;
6051 }
6052 
6053 const struct file_operations binder_fops = {
6054 	.owner = THIS_MODULE,
6055 	.poll = binder_poll,
6056 	.unlocked_ioctl = binder_ioctl,
6057 	.compat_ioctl = compat_ptr_ioctl,
6058 	.mmap = binder_mmap,
6059 	.open = binder_open,
6060 	.flush = binder_flush,
6061 	.release = binder_release,
6062 };
6063 
6064 static int __init init_binder_device(const char *name)
6065 {
6066 	int ret;
6067 	struct binder_device *binder_device;
6068 
6069 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6070 	if (!binder_device)
6071 		return -ENOMEM;
6072 
6073 	binder_device->miscdev.fops = &binder_fops;
6074 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6075 	binder_device->miscdev.name = name;
6076 
6077 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6078 	binder_device->context.name = name;
6079 	mutex_init(&binder_device->context.context_mgr_node_lock);
6080 
6081 	ret = misc_register(&binder_device->miscdev);
6082 	if (ret < 0) {
6083 		kfree(binder_device);
6084 		return ret;
6085 	}
6086 
6087 	hlist_add_head(&binder_device->hlist, &binder_devices);
6088 
6089 	return ret;
6090 }
6091 
6092 static int __init binder_init(void)
6093 {
6094 	int ret;
6095 	char *device_name, *device_tmp;
6096 	struct binder_device *device;
6097 	struct hlist_node *tmp;
6098 	char *device_names = NULL;
6099 
6100 	ret = binder_alloc_shrinker_init();
6101 	if (ret)
6102 		return ret;
6103 
6104 	atomic_set(&binder_transaction_log.cur, ~0U);
6105 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6106 
6107 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6108 	if (binder_debugfs_dir_entry_root)
6109 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6110 						 binder_debugfs_dir_entry_root);
6111 
6112 	if (binder_debugfs_dir_entry_root) {
6113 		debugfs_create_file("state",
6114 				    0444,
6115 				    binder_debugfs_dir_entry_root,
6116 				    NULL,
6117 				    &binder_state_fops);
6118 		debugfs_create_file("stats",
6119 				    0444,
6120 				    binder_debugfs_dir_entry_root,
6121 				    NULL,
6122 				    &binder_stats_fops);
6123 		debugfs_create_file("transactions",
6124 				    0444,
6125 				    binder_debugfs_dir_entry_root,
6126 				    NULL,
6127 				    &binder_transactions_fops);
6128 		debugfs_create_file("transaction_log",
6129 				    0444,
6130 				    binder_debugfs_dir_entry_root,
6131 				    &binder_transaction_log,
6132 				    &binder_transaction_log_fops);
6133 		debugfs_create_file("failed_transaction_log",
6134 				    0444,
6135 				    binder_debugfs_dir_entry_root,
6136 				    &binder_transaction_log_failed,
6137 				    &binder_transaction_log_fops);
6138 	}
6139 
6140 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6141 	    strcmp(binder_devices_param, "") != 0) {
6142 		/*
6143 		* Copy the module_parameter string, because we don't want to
6144 		* tokenize it in-place.
6145 		 */
6146 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6147 		if (!device_names) {
6148 			ret = -ENOMEM;
6149 			goto err_alloc_device_names_failed;
6150 		}
6151 
6152 		device_tmp = device_names;
6153 		while ((device_name = strsep(&device_tmp, ","))) {
6154 			ret = init_binder_device(device_name);
6155 			if (ret)
6156 				goto err_init_binder_device_failed;
6157 		}
6158 	}
6159 
6160 	ret = init_binderfs();
6161 	if (ret)
6162 		goto err_init_binder_device_failed;
6163 
6164 	return ret;
6165 
6166 err_init_binder_device_failed:
6167 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6168 		misc_deregister(&device->miscdev);
6169 		hlist_del(&device->hlist);
6170 		kfree(device);
6171 	}
6172 
6173 	kfree(device_names);
6174 
6175 err_alloc_device_names_failed:
6176 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6177 
6178 	return ret;
6179 }
6180 
6181 device_initcall(binder_init);
6182 
6183 #define CREATE_TRACE_POINTS
6184 #include "binder_trace.h"
6185 
6186 MODULE_LICENSE("GPL v2");
6187