xref: /linux/drivers/android/binder.c (revision 84b9b44b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8 
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 
70 #include <uapi/linux/android/binder.h>
71 
72 #include <linux/cacheflush.h>
73 
74 #include "binder_internal.h"
75 #include "binder_trace.h"
76 
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
79 
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
83 
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86 
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
90 
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
93 
94 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
95 
96 enum {
97 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
98 	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
99 	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
100 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
101 	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
102 	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
103 	BINDER_DEBUG_READ_WRITE             = 1U << 6,
104 	BINDER_DEBUG_USER_REFS              = 1U << 7,
105 	BINDER_DEBUG_THREADS                = 1U << 8,
106 	BINDER_DEBUG_TRANSACTION            = 1U << 9,
107 	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
108 	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
109 	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
110 	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
111 	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
112 };
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116 
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
119 
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
122 
123 static int binder_set_stop_on_user_error(const char *val,
124 					 const struct kernel_param *kp)
125 {
126 	int ret;
127 
128 	ret = param_set_int(val, kp);
129 	if (binder_stop_on_user_error < 2)
130 		wake_up(&binder_user_error_wait);
131 	return ret;
132 }
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 	param_get_int, &binder_stop_on_user_error, 0644);
135 
136 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
137 {
138 	struct va_format vaf;
139 	va_list args;
140 
141 	if (binder_debug_mask & mask) {
142 		va_start(args, format);
143 		vaf.va = &args;
144 		vaf.fmt = format;
145 		pr_info_ratelimited("%pV", &vaf);
146 		va_end(args);
147 	}
148 }
149 
150 #define binder_txn_error(x...) \
151 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
152 
153 static __printf(1, 2) void binder_user_error(const char *format, ...)
154 {
155 	struct va_format vaf;
156 	va_list args;
157 
158 	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
159 		va_start(args, format);
160 		vaf.va = &args;
161 		vaf.fmt = format;
162 		pr_info_ratelimited("%pV", &vaf);
163 		va_end(args);
164 	}
165 
166 	if (binder_stop_on_user_error)
167 		binder_stop_on_user_error = 2;
168 }
169 
170 #define binder_set_extended_error(ee, _id, _command, _param) \
171 	do { \
172 		(ee)->id = _id; \
173 		(ee)->command = _command; \
174 		(ee)->param = _param; \
175 	} while (0)
176 
177 #define to_flat_binder_object(hdr) \
178 	container_of(hdr, struct flat_binder_object, hdr)
179 
180 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
181 
182 #define to_binder_buffer_object(hdr) \
183 	container_of(hdr, struct binder_buffer_object, hdr)
184 
185 #define to_binder_fd_array_object(hdr) \
186 	container_of(hdr, struct binder_fd_array_object, hdr)
187 
188 static struct binder_stats binder_stats;
189 
190 static inline void binder_stats_deleted(enum binder_stat_types type)
191 {
192 	atomic_inc(&binder_stats.obj_deleted[type]);
193 }
194 
195 static inline void binder_stats_created(enum binder_stat_types type)
196 {
197 	atomic_inc(&binder_stats.obj_created[type]);
198 }
199 
200 struct binder_transaction_log_entry {
201 	int debug_id;
202 	int debug_id_done;
203 	int call_type;
204 	int from_proc;
205 	int from_thread;
206 	int target_handle;
207 	int to_proc;
208 	int to_thread;
209 	int to_node;
210 	int data_size;
211 	int offsets_size;
212 	int return_error_line;
213 	uint32_t return_error;
214 	uint32_t return_error_param;
215 	char context_name[BINDERFS_MAX_NAME + 1];
216 };
217 
218 struct binder_transaction_log {
219 	atomic_t cur;
220 	bool full;
221 	struct binder_transaction_log_entry entry[32];
222 };
223 
224 static struct binder_transaction_log binder_transaction_log;
225 static struct binder_transaction_log binder_transaction_log_failed;
226 
227 static struct binder_transaction_log_entry *binder_transaction_log_add(
228 	struct binder_transaction_log *log)
229 {
230 	struct binder_transaction_log_entry *e;
231 	unsigned int cur = atomic_inc_return(&log->cur);
232 
233 	if (cur >= ARRAY_SIZE(log->entry))
234 		log->full = true;
235 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
236 	WRITE_ONCE(e->debug_id_done, 0);
237 	/*
238 	 * write-barrier to synchronize access to e->debug_id_done.
239 	 * We make sure the initialized 0 value is seen before
240 	 * memset() other fields are zeroed by memset.
241 	 */
242 	smp_wmb();
243 	memset(e, 0, sizeof(*e));
244 	return e;
245 }
246 
247 enum binder_deferred_state {
248 	BINDER_DEFERRED_FLUSH        = 0x01,
249 	BINDER_DEFERRED_RELEASE      = 0x02,
250 };
251 
252 enum {
253 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
254 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
255 	BINDER_LOOPER_STATE_EXITED      = 0x04,
256 	BINDER_LOOPER_STATE_INVALID     = 0x08,
257 	BINDER_LOOPER_STATE_WAITING     = 0x10,
258 	BINDER_LOOPER_STATE_POLL        = 0x20,
259 };
260 
261 /**
262  * binder_proc_lock() - Acquire outer lock for given binder_proc
263  * @proc:         struct binder_proc to acquire
264  *
265  * Acquires proc->outer_lock. Used to protect binder_ref
266  * structures associated with the given proc.
267  */
268 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
269 static void
270 _binder_proc_lock(struct binder_proc *proc, int line)
271 	__acquires(&proc->outer_lock)
272 {
273 	binder_debug(BINDER_DEBUG_SPINLOCKS,
274 		     "%s: line=%d\n", __func__, line);
275 	spin_lock(&proc->outer_lock);
276 }
277 
278 /**
279  * binder_proc_unlock() - Release spinlock for given binder_proc
280  * @proc:                struct binder_proc to acquire
281  *
282  * Release lock acquired via binder_proc_lock()
283  */
284 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
285 static void
286 _binder_proc_unlock(struct binder_proc *proc, int line)
287 	__releases(&proc->outer_lock)
288 {
289 	binder_debug(BINDER_DEBUG_SPINLOCKS,
290 		     "%s: line=%d\n", __func__, line);
291 	spin_unlock(&proc->outer_lock);
292 }
293 
294 /**
295  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
296  * @proc:         struct binder_proc to acquire
297  *
298  * Acquires proc->inner_lock. Used to protect todo lists
299  */
300 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
301 static void
302 _binder_inner_proc_lock(struct binder_proc *proc, int line)
303 	__acquires(&proc->inner_lock)
304 {
305 	binder_debug(BINDER_DEBUG_SPINLOCKS,
306 		     "%s: line=%d\n", __func__, line);
307 	spin_lock(&proc->inner_lock);
308 }
309 
310 /**
311  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
312  * @proc:         struct binder_proc to acquire
313  *
314  * Release lock acquired via binder_inner_proc_lock()
315  */
316 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
317 static void
318 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
319 	__releases(&proc->inner_lock)
320 {
321 	binder_debug(BINDER_DEBUG_SPINLOCKS,
322 		     "%s: line=%d\n", __func__, line);
323 	spin_unlock(&proc->inner_lock);
324 }
325 
326 /**
327  * binder_node_lock() - Acquire spinlock for given binder_node
328  * @node:         struct binder_node to acquire
329  *
330  * Acquires node->lock. Used to protect binder_node fields
331  */
332 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
333 static void
334 _binder_node_lock(struct binder_node *node, int line)
335 	__acquires(&node->lock)
336 {
337 	binder_debug(BINDER_DEBUG_SPINLOCKS,
338 		     "%s: line=%d\n", __func__, line);
339 	spin_lock(&node->lock);
340 }
341 
342 /**
343  * binder_node_unlock() - Release spinlock for given binder_proc
344  * @node:         struct binder_node to acquire
345  *
346  * Release lock acquired via binder_node_lock()
347  */
348 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
349 static void
350 _binder_node_unlock(struct binder_node *node, int line)
351 	__releases(&node->lock)
352 {
353 	binder_debug(BINDER_DEBUG_SPINLOCKS,
354 		     "%s: line=%d\n", __func__, line);
355 	spin_unlock(&node->lock);
356 }
357 
358 /**
359  * binder_node_inner_lock() - Acquire node and inner locks
360  * @node:         struct binder_node to acquire
361  *
362  * Acquires node->lock. If node->proc also acquires
363  * proc->inner_lock. Used to protect binder_node fields
364  */
365 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
366 static void
367 _binder_node_inner_lock(struct binder_node *node, int line)
368 	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
369 {
370 	binder_debug(BINDER_DEBUG_SPINLOCKS,
371 		     "%s: line=%d\n", __func__, line);
372 	spin_lock(&node->lock);
373 	if (node->proc)
374 		binder_inner_proc_lock(node->proc);
375 	else
376 		/* annotation for sparse */
377 		__acquire(&node->proc->inner_lock);
378 }
379 
380 /**
381  * binder_node_inner_unlock() - Release node and inner locks
382  * @node:         struct binder_node to acquire
383  *
384  * Release lock acquired via binder_node_lock()
385  */
386 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
387 static void
388 _binder_node_inner_unlock(struct binder_node *node, int line)
389 	__releases(&node->lock) __releases(&node->proc->inner_lock)
390 {
391 	struct binder_proc *proc = node->proc;
392 
393 	binder_debug(BINDER_DEBUG_SPINLOCKS,
394 		     "%s: line=%d\n", __func__, line);
395 	if (proc)
396 		binder_inner_proc_unlock(proc);
397 	else
398 		/* annotation for sparse */
399 		__release(&node->proc->inner_lock);
400 	spin_unlock(&node->lock);
401 }
402 
403 static bool binder_worklist_empty_ilocked(struct list_head *list)
404 {
405 	return list_empty(list);
406 }
407 
408 /**
409  * binder_worklist_empty() - Check if no items on the work list
410  * @proc:       binder_proc associated with list
411  * @list:	list to check
412  *
413  * Return: true if there are no items on list, else false
414  */
415 static bool binder_worklist_empty(struct binder_proc *proc,
416 				  struct list_head *list)
417 {
418 	bool ret;
419 
420 	binder_inner_proc_lock(proc);
421 	ret = binder_worklist_empty_ilocked(list);
422 	binder_inner_proc_unlock(proc);
423 	return ret;
424 }
425 
426 /**
427  * binder_enqueue_work_ilocked() - Add an item to the work list
428  * @work:         struct binder_work to add to list
429  * @target_list:  list to add work to
430  *
431  * Adds the work to the specified list. Asserts that work
432  * is not already on a list.
433  *
434  * Requires the proc->inner_lock to be held.
435  */
436 static void
437 binder_enqueue_work_ilocked(struct binder_work *work,
438 			   struct list_head *target_list)
439 {
440 	BUG_ON(target_list == NULL);
441 	BUG_ON(work->entry.next && !list_empty(&work->entry));
442 	list_add_tail(&work->entry, target_list);
443 }
444 
445 /**
446  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
447  * @thread:       thread to queue work to
448  * @work:         struct binder_work to add to list
449  *
450  * Adds the work to the todo list of the thread. Doesn't set the process_todo
451  * flag, which means that (if it wasn't already set) the thread will go to
452  * sleep without handling this work when it calls read.
453  *
454  * Requires the proc->inner_lock to be held.
455  */
456 static void
457 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
458 					    struct binder_work *work)
459 {
460 	WARN_ON(!list_empty(&thread->waiting_thread_node));
461 	binder_enqueue_work_ilocked(work, &thread->todo);
462 }
463 
464 /**
465  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
466  * @thread:       thread to queue work to
467  * @work:         struct binder_work to add to list
468  *
469  * Adds the work to the todo list of the thread, and enables processing
470  * of the todo queue.
471  *
472  * Requires the proc->inner_lock to be held.
473  */
474 static void
475 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
476 				   struct binder_work *work)
477 {
478 	WARN_ON(!list_empty(&thread->waiting_thread_node));
479 	binder_enqueue_work_ilocked(work, &thread->todo);
480 	thread->process_todo = true;
481 }
482 
483 /**
484  * binder_enqueue_thread_work() - Add an item to the thread work list
485  * @thread:       thread to queue work to
486  * @work:         struct binder_work to add to list
487  *
488  * Adds the work to the todo list of the thread, and enables processing
489  * of the todo queue.
490  */
491 static void
492 binder_enqueue_thread_work(struct binder_thread *thread,
493 			   struct binder_work *work)
494 {
495 	binder_inner_proc_lock(thread->proc);
496 	binder_enqueue_thread_work_ilocked(thread, work);
497 	binder_inner_proc_unlock(thread->proc);
498 }
499 
500 static void
501 binder_dequeue_work_ilocked(struct binder_work *work)
502 {
503 	list_del_init(&work->entry);
504 }
505 
506 /**
507  * binder_dequeue_work() - Removes an item from the work list
508  * @proc:         binder_proc associated with list
509  * @work:         struct binder_work to remove from list
510  *
511  * Removes the specified work item from whatever list it is on.
512  * Can safely be called if work is not on any list.
513  */
514 static void
515 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
516 {
517 	binder_inner_proc_lock(proc);
518 	binder_dequeue_work_ilocked(work);
519 	binder_inner_proc_unlock(proc);
520 }
521 
522 static struct binder_work *binder_dequeue_work_head_ilocked(
523 					struct list_head *list)
524 {
525 	struct binder_work *w;
526 
527 	w = list_first_entry_or_null(list, struct binder_work, entry);
528 	if (w)
529 		list_del_init(&w->entry);
530 	return w;
531 }
532 
533 static void
534 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
535 static void binder_free_thread(struct binder_thread *thread);
536 static void binder_free_proc(struct binder_proc *proc);
537 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
538 
539 static bool binder_has_work_ilocked(struct binder_thread *thread,
540 				    bool do_proc_work)
541 {
542 	return thread->process_todo ||
543 		thread->looper_need_return ||
544 		(do_proc_work &&
545 		 !binder_worklist_empty_ilocked(&thread->proc->todo));
546 }
547 
548 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
549 {
550 	bool has_work;
551 
552 	binder_inner_proc_lock(thread->proc);
553 	has_work = binder_has_work_ilocked(thread, do_proc_work);
554 	binder_inner_proc_unlock(thread->proc);
555 
556 	return has_work;
557 }
558 
559 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
560 {
561 	return !thread->transaction_stack &&
562 		binder_worklist_empty_ilocked(&thread->todo) &&
563 		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
564 				   BINDER_LOOPER_STATE_REGISTERED));
565 }
566 
567 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
568 					       bool sync)
569 {
570 	struct rb_node *n;
571 	struct binder_thread *thread;
572 
573 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
574 		thread = rb_entry(n, struct binder_thread, rb_node);
575 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
576 		    binder_available_for_proc_work_ilocked(thread)) {
577 			if (sync)
578 				wake_up_interruptible_sync(&thread->wait);
579 			else
580 				wake_up_interruptible(&thread->wait);
581 		}
582 	}
583 }
584 
585 /**
586  * binder_select_thread_ilocked() - selects a thread for doing proc work.
587  * @proc:	process to select a thread from
588  *
589  * Note that calling this function moves the thread off the waiting_threads
590  * list, so it can only be woken up by the caller of this function, or a
591  * signal. Therefore, callers *should* always wake up the thread this function
592  * returns.
593  *
594  * Return:	If there's a thread currently waiting for process work,
595  *		returns that thread. Otherwise returns NULL.
596  */
597 static struct binder_thread *
598 binder_select_thread_ilocked(struct binder_proc *proc)
599 {
600 	struct binder_thread *thread;
601 
602 	assert_spin_locked(&proc->inner_lock);
603 	thread = list_first_entry_or_null(&proc->waiting_threads,
604 					  struct binder_thread,
605 					  waiting_thread_node);
606 
607 	if (thread)
608 		list_del_init(&thread->waiting_thread_node);
609 
610 	return thread;
611 }
612 
613 /**
614  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
615  * @proc:	process to wake up a thread in
616  * @thread:	specific thread to wake-up (may be NULL)
617  * @sync:	whether to do a synchronous wake-up
618  *
619  * This function wakes up a thread in the @proc process.
620  * The caller may provide a specific thread to wake-up in
621  * the @thread parameter. If @thread is NULL, this function
622  * will wake up threads that have called poll().
623  *
624  * Note that for this function to work as expected, callers
625  * should first call binder_select_thread() to find a thread
626  * to handle the work (if they don't have a thread already),
627  * and pass the result into the @thread parameter.
628  */
629 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
630 					 struct binder_thread *thread,
631 					 bool sync)
632 {
633 	assert_spin_locked(&proc->inner_lock);
634 
635 	if (thread) {
636 		if (sync)
637 			wake_up_interruptible_sync(&thread->wait);
638 		else
639 			wake_up_interruptible(&thread->wait);
640 		return;
641 	}
642 
643 	/* Didn't find a thread waiting for proc work; this can happen
644 	 * in two scenarios:
645 	 * 1. All threads are busy handling transactions
646 	 *    In that case, one of those threads should call back into
647 	 *    the kernel driver soon and pick up this work.
648 	 * 2. Threads are using the (e)poll interface, in which case
649 	 *    they may be blocked on the waitqueue without having been
650 	 *    added to waiting_threads. For this case, we just iterate
651 	 *    over all threads not handling transaction work, and
652 	 *    wake them all up. We wake all because we don't know whether
653 	 *    a thread that called into (e)poll is handling non-binder
654 	 *    work currently.
655 	 */
656 	binder_wakeup_poll_threads_ilocked(proc, sync);
657 }
658 
659 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
660 {
661 	struct binder_thread *thread = binder_select_thread_ilocked(proc);
662 
663 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
664 }
665 
666 static void binder_set_nice(long nice)
667 {
668 	long min_nice;
669 
670 	if (can_nice(current, nice)) {
671 		set_user_nice(current, nice);
672 		return;
673 	}
674 	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
675 	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
676 		     "%d: nice value %ld not allowed use %ld instead\n",
677 		      current->pid, nice, min_nice);
678 	set_user_nice(current, min_nice);
679 	if (min_nice <= MAX_NICE)
680 		return;
681 	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
682 }
683 
684 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
685 						   binder_uintptr_t ptr)
686 {
687 	struct rb_node *n = proc->nodes.rb_node;
688 	struct binder_node *node;
689 
690 	assert_spin_locked(&proc->inner_lock);
691 
692 	while (n) {
693 		node = rb_entry(n, struct binder_node, rb_node);
694 
695 		if (ptr < node->ptr)
696 			n = n->rb_left;
697 		else if (ptr > node->ptr)
698 			n = n->rb_right;
699 		else {
700 			/*
701 			 * take an implicit weak reference
702 			 * to ensure node stays alive until
703 			 * call to binder_put_node()
704 			 */
705 			binder_inc_node_tmpref_ilocked(node);
706 			return node;
707 		}
708 	}
709 	return NULL;
710 }
711 
712 static struct binder_node *binder_get_node(struct binder_proc *proc,
713 					   binder_uintptr_t ptr)
714 {
715 	struct binder_node *node;
716 
717 	binder_inner_proc_lock(proc);
718 	node = binder_get_node_ilocked(proc, ptr);
719 	binder_inner_proc_unlock(proc);
720 	return node;
721 }
722 
723 static struct binder_node *binder_init_node_ilocked(
724 						struct binder_proc *proc,
725 						struct binder_node *new_node,
726 						struct flat_binder_object *fp)
727 {
728 	struct rb_node **p = &proc->nodes.rb_node;
729 	struct rb_node *parent = NULL;
730 	struct binder_node *node;
731 	binder_uintptr_t ptr = fp ? fp->binder : 0;
732 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
733 	__u32 flags = fp ? fp->flags : 0;
734 
735 	assert_spin_locked(&proc->inner_lock);
736 
737 	while (*p) {
738 
739 		parent = *p;
740 		node = rb_entry(parent, struct binder_node, rb_node);
741 
742 		if (ptr < node->ptr)
743 			p = &(*p)->rb_left;
744 		else if (ptr > node->ptr)
745 			p = &(*p)->rb_right;
746 		else {
747 			/*
748 			 * A matching node is already in
749 			 * the rb tree. Abandon the init
750 			 * and return it.
751 			 */
752 			binder_inc_node_tmpref_ilocked(node);
753 			return node;
754 		}
755 	}
756 	node = new_node;
757 	binder_stats_created(BINDER_STAT_NODE);
758 	node->tmp_refs++;
759 	rb_link_node(&node->rb_node, parent, p);
760 	rb_insert_color(&node->rb_node, &proc->nodes);
761 	node->debug_id = atomic_inc_return(&binder_last_id);
762 	node->proc = proc;
763 	node->ptr = ptr;
764 	node->cookie = cookie;
765 	node->work.type = BINDER_WORK_NODE;
766 	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
767 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
768 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
769 	spin_lock_init(&node->lock);
770 	INIT_LIST_HEAD(&node->work.entry);
771 	INIT_LIST_HEAD(&node->async_todo);
772 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
773 		     "%d:%d node %d u%016llx c%016llx created\n",
774 		     proc->pid, current->pid, node->debug_id,
775 		     (u64)node->ptr, (u64)node->cookie);
776 
777 	return node;
778 }
779 
780 static struct binder_node *binder_new_node(struct binder_proc *proc,
781 					   struct flat_binder_object *fp)
782 {
783 	struct binder_node *node;
784 	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
785 
786 	if (!new_node)
787 		return NULL;
788 	binder_inner_proc_lock(proc);
789 	node = binder_init_node_ilocked(proc, new_node, fp);
790 	binder_inner_proc_unlock(proc);
791 	if (node != new_node)
792 		/*
793 		 * The node was already added by another thread
794 		 */
795 		kfree(new_node);
796 
797 	return node;
798 }
799 
800 static void binder_free_node(struct binder_node *node)
801 {
802 	kfree(node);
803 	binder_stats_deleted(BINDER_STAT_NODE);
804 }
805 
806 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
807 				    int internal,
808 				    struct list_head *target_list)
809 {
810 	struct binder_proc *proc = node->proc;
811 
812 	assert_spin_locked(&node->lock);
813 	if (proc)
814 		assert_spin_locked(&proc->inner_lock);
815 	if (strong) {
816 		if (internal) {
817 			if (target_list == NULL &&
818 			    node->internal_strong_refs == 0 &&
819 			    !(node->proc &&
820 			      node == node->proc->context->binder_context_mgr_node &&
821 			      node->has_strong_ref)) {
822 				pr_err("invalid inc strong node for %d\n",
823 					node->debug_id);
824 				return -EINVAL;
825 			}
826 			node->internal_strong_refs++;
827 		} else
828 			node->local_strong_refs++;
829 		if (!node->has_strong_ref && target_list) {
830 			struct binder_thread *thread = container_of(target_list,
831 						    struct binder_thread, todo);
832 			binder_dequeue_work_ilocked(&node->work);
833 			BUG_ON(&thread->todo != target_list);
834 			binder_enqueue_deferred_thread_work_ilocked(thread,
835 								   &node->work);
836 		}
837 	} else {
838 		if (!internal)
839 			node->local_weak_refs++;
840 		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
841 			if (target_list == NULL) {
842 				pr_err("invalid inc weak node for %d\n",
843 					node->debug_id);
844 				return -EINVAL;
845 			}
846 			/*
847 			 * See comment above
848 			 */
849 			binder_enqueue_work_ilocked(&node->work, target_list);
850 		}
851 	}
852 	return 0;
853 }
854 
855 static int binder_inc_node(struct binder_node *node, int strong, int internal,
856 			   struct list_head *target_list)
857 {
858 	int ret;
859 
860 	binder_node_inner_lock(node);
861 	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
862 	binder_node_inner_unlock(node);
863 
864 	return ret;
865 }
866 
867 static bool binder_dec_node_nilocked(struct binder_node *node,
868 				     int strong, int internal)
869 {
870 	struct binder_proc *proc = node->proc;
871 
872 	assert_spin_locked(&node->lock);
873 	if (proc)
874 		assert_spin_locked(&proc->inner_lock);
875 	if (strong) {
876 		if (internal)
877 			node->internal_strong_refs--;
878 		else
879 			node->local_strong_refs--;
880 		if (node->local_strong_refs || node->internal_strong_refs)
881 			return false;
882 	} else {
883 		if (!internal)
884 			node->local_weak_refs--;
885 		if (node->local_weak_refs || node->tmp_refs ||
886 				!hlist_empty(&node->refs))
887 			return false;
888 	}
889 
890 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
891 		if (list_empty(&node->work.entry)) {
892 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
893 			binder_wakeup_proc_ilocked(proc);
894 		}
895 	} else {
896 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
897 		    !node->local_weak_refs && !node->tmp_refs) {
898 			if (proc) {
899 				binder_dequeue_work_ilocked(&node->work);
900 				rb_erase(&node->rb_node, &proc->nodes);
901 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
902 					     "refless node %d deleted\n",
903 					     node->debug_id);
904 			} else {
905 				BUG_ON(!list_empty(&node->work.entry));
906 				spin_lock(&binder_dead_nodes_lock);
907 				/*
908 				 * tmp_refs could have changed so
909 				 * check it again
910 				 */
911 				if (node->tmp_refs) {
912 					spin_unlock(&binder_dead_nodes_lock);
913 					return false;
914 				}
915 				hlist_del(&node->dead_node);
916 				spin_unlock(&binder_dead_nodes_lock);
917 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
918 					     "dead node %d deleted\n",
919 					     node->debug_id);
920 			}
921 			return true;
922 		}
923 	}
924 	return false;
925 }
926 
927 static void binder_dec_node(struct binder_node *node, int strong, int internal)
928 {
929 	bool free_node;
930 
931 	binder_node_inner_lock(node);
932 	free_node = binder_dec_node_nilocked(node, strong, internal);
933 	binder_node_inner_unlock(node);
934 	if (free_node)
935 		binder_free_node(node);
936 }
937 
938 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
939 {
940 	/*
941 	 * No call to binder_inc_node() is needed since we
942 	 * don't need to inform userspace of any changes to
943 	 * tmp_refs
944 	 */
945 	node->tmp_refs++;
946 }
947 
948 /**
949  * binder_inc_node_tmpref() - take a temporary reference on node
950  * @node:	node to reference
951  *
952  * Take reference on node to prevent the node from being freed
953  * while referenced only by a local variable. The inner lock is
954  * needed to serialize with the node work on the queue (which
955  * isn't needed after the node is dead). If the node is dead
956  * (node->proc is NULL), use binder_dead_nodes_lock to protect
957  * node->tmp_refs against dead-node-only cases where the node
958  * lock cannot be acquired (eg traversing the dead node list to
959  * print nodes)
960  */
961 static void binder_inc_node_tmpref(struct binder_node *node)
962 {
963 	binder_node_lock(node);
964 	if (node->proc)
965 		binder_inner_proc_lock(node->proc);
966 	else
967 		spin_lock(&binder_dead_nodes_lock);
968 	binder_inc_node_tmpref_ilocked(node);
969 	if (node->proc)
970 		binder_inner_proc_unlock(node->proc);
971 	else
972 		spin_unlock(&binder_dead_nodes_lock);
973 	binder_node_unlock(node);
974 }
975 
976 /**
977  * binder_dec_node_tmpref() - remove a temporary reference on node
978  * @node:	node to reference
979  *
980  * Release temporary reference on node taken via binder_inc_node_tmpref()
981  */
982 static void binder_dec_node_tmpref(struct binder_node *node)
983 {
984 	bool free_node;
985 
986 	binder_node_inner_lock(node);
987 	if (!node->proc)
988 		spin_lock(&binder_dead_nodes_lock);
989 	else
990 		__acquire(&binder_dead_nodes_lock);
991 	node->tmp_refs--;
992 	BUG_ON(node->tmp_refs < 0);
993 	if (!node->proc)
994 		spin_unlock(&binder_dead_nodes_lock);
995 	else
996 		__release(&binder_dead_nodes_lock);
997 	/*
998 	 * Call binder_dec_node() to check if all refcounts are 0
999 	 * and cleanup is needed. Calling with strong=0 and internal=1
1000 	 * causes no actual reference to be released in binder_dec_node().
1001 	 * If that changes, a change is needed here too.
1002 	 */
1003 	free_node = binder_dec_node_nilocked(node, 0, 1);
1004 	binder_node_inner_unlock(node);
1005 	if (free_node)
1006 		binder_free_node(node);
1007 }
1008 
1009 static void binder_put_node(struct binder_node *node)
1010 {
1011 	binder_dec_node_tmpref(node);
1012 }
1013 
1014 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1015 						 u32 desc, bool need_strong_ref)
1016 {
1017 	struct rb_node *n = proc->refs_by_desc.rb_node;
1018 	struct binder_ref *ref;
1019 
1020 	while (n) {
1021 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1022 
1023 		if (desc < ref->data.desc) {
1024 			n = n->rb_left;
1025 		} else if (desc > ref->data.desc) {
1026 			n = n->rb_right;
1027 		} else if (need_strong_ref && !ref->data.strong) {
1028 			binder_user_error("tried to use weak ref as strong ref\n");
1029 			return NULL;
1030 		} else {
1031 			return ref;
1032 		}
1033 	}
1034 	return NULL;
1035 }
1036 
1037 /**
1038  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1039  * @proc:	binder_proc that owns the ref
1040  * @node:	binder_node of target
1041  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1042  *
1043  * Look up the ref for the given node and return it if it exists
1044  *
1045  * If it doesn't exist and the caller provides a newly allocated
1046  * ref, initialize the fields of the newly allocated ref and insert
1047  * into the given proc rb_trees and node refs list.
1048  *
1049  * Return:	the ref for node. It is possible that another thread
1050  *		allocated/initialized the ref first in which case the
1051  *		returned ref would be different than the passed-in
1052  *		new_ref. new_ref must be kfree'd by the caller in
1053  *		this case.
1054  */
1055 static struct binder_ref *binder_get_ref_for_node_olocked(
1056 					struct binder_proc *proc,
1057 					struct binder_node *node,
1058 					struct binder_ref *new_ref)
1059 {
1060 	struct binder_context *context = proc->context;
1061 	struct rb_node **p = &proc->refs_by_node.rb_node;
1062 	struct rb_node *parent = NULL;
1063 	struct binder_ref *ref;
1064 	struct rb_node *n;
1065 
1066 	while (*p) {
1067 		parent = *p;
1068 		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1069 
1070 		if (node < ref->node)
1071 			p = &(*p)->rb_left;
1072 		else if (node > ref->node)
1073 			p = &(*p)->rb_right;
1074 		else
1075 			return ref;
1076 	}
1077 	if (!new_ref)
1078 		return NULL;
1079 
1080 	binder_stats_created(BINDER_STAT_REF);
1081 	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1082 	new_ref->proc = proc;
1083 	new_ref->node = node;
1084 	rb_link_node(&new_ref->rb_node_node, parent, p);
1085 	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1086 
1087 	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1088 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1089 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1090 		if (ref->data.desc > new_ref->data.desc)
1091 			break;
1092 		new_ref->data.desc = ref->data.desc + 1;
1093 	}
1094 
1095 	p = &proc->refs_by_desc.rb_node;
1096 	while (*p) {
1097 		parent = *p;
1098 		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1099 
1100 		if (new_ref->data.desc < ref->data.desc)
1101 			p = &(*p)->rb_left;
1102 		else if (new_ref->data.desc > ref->data.desc)
1103 			p = &(*p)->rb_right;
1104 		else
1105 			BUG();
1106 	}
1107 	rb_link_node(&new_ref->rb_node_desc, parent, p);
1108 	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1109 
1110 	binder_node_lock(node);
1111 	hlist_add_head(&new_ref->node_entry, &node->refs);
1112 
1113 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1114 		     "%d new ref %d desc %d for node %d\n",
1115 		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1116 		      node->debug_id);
1117 	binder_node_unlock(node);
1118 	return new_ref;
1119 }
1120 
1121 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1122 {
1123 	bool delete_node = false;
1124 
1125 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1126 		     "%d delete ref %d desc %d for node %d\n",
1127 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1128 		      ref->node->debug_id);
1129 
1130 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1131 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1132 
1133 	binder_node_inner_lock(ref->node);
1134 	if (ref->data.strong)
1135 		binder_dec_node_nilocked(ref->node, 1, 1);
1136 
1137 	hlist_del(&ref->node_entry);
1138 	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1139 	binder_node_inner_unlock(ref->node);
1140 	/*
1141 	 * Clear ref->node unless we want the caller to free the node
1142 	 */
1143 	if (!delete_node) {
1144 		/*
1145 		 * The caller uses ref->node to determine
1146 		 * whether the node needs to be freed. Clear
1147 		 * it since the node is still alive.
1148 		 */
1149 		ref->node = NULL;
1150 	}
1151 
1152 	if (ref->death) {
1153 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1154 			     "%d delete ref %d desc %d has death notification\n",
1155 			      ref->proc->pid, ref->data.debug_id,
1156 			      ref->data.desc);
1157 		binder_dequeue_work(ref->proc, &ref->death->work);
1158 		binder_stats_deleted(BINDER_STAT_DEATH);
1159 	}
1160 	binder_stats_deleted(BINDER_STAT_REF);
1161 }
1162 
1163 /**
1164  * binder_inc_ref_olocked() - increment the ref for given handle
1165  * @ref:         ref to be incremented
1166  * @strong:      if true, strong increment, else weak
1167  * @target_list: list to queue node work on
1168  *
1169  * Increment the ref. @ref->proc->outer_lock must be held on entry
1170  *
1171  * Return: 0, if successful, else errno
1172  */
1173 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1174 				  struct list_head *target_list)
1175 {
1176 	int ret;
1177 
1178 	if (strong) {
1179 		if (ref->data.strong == 0) {
1180 			ret = binder_inc_node(ref->node, 1, 1, target_list);
1181 			if (ret)
1182 				return ret;
1183 		}
1184 		ref->data.strong++;
1185 	} else {
1186 		if (ref->data.weak == 0) {
1187 			ret = binder_inc_node(ref->node, 0, 1, target_list);
1188 			if (ret)
1189 				return ret;
1190 		}
1191 		ref->data.weak++;
1192 	}
1193 	return 0;
1194 }
1195 
1196 /**
1197  * binder_dec_ref_olocked() - dec the ref for given handle
1198  * @ref:	ref to be decremented
1199  * @strong:	if true, strong decrement, else weak
1200  *
1201  * Decrement the ref.
1202  *
1203  * Return: %true if ref is cleaned up and ready to be freed.
1204  */
1205 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1206 {
1207 	if (strong) {
1208 		if (ref->data.strong == 0) {
1209 			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1210 					  ref->proc->pid, ref->data.debug_id,
1211 					  ref->data.desc, ref->data.strong,
1212 					  ref->data.weak);
1213 			return false;
1214 		}
1215 		ref->data.strong--;
1216 		if (ref->data.strong == 0)
1217 			binder_dec_node(ref->node, strong, 1);
1218 	} else {
1219 		if (ref->data.weak == 0) {
1220 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1221 					  ref->proc->pid, ref->data.debug_id,
1222 					  ref->data.desc, ref->data.strong,
1223 					  ref->data.weak);
1224 			return false;
1225 		}
1226 		ref->data.weak--;
1227 	}
1228 	if (ref->data.strong == 0 && ref->data.weak == 0) {
1229 		binder_cleanup_ref_olocked(ref);
1230 		return true;
1231 	}
1232 	return false;
1233 }
1234 
1235 /**
1236  * binder_get_node_from_ref() - get the node from the given proc/desc
1237  * @proc:	proc containing the ref
1238  * @desc:	the handle associated with the ref
1239  * @need_strong_ref: if true, only return node if ref is strong
1240  * @rdata:	the id/refcount data for the ref
1241  *
1242  * Given a proc and ref handle, return the associated binder_node
1243  *
1244  * Return: a binder_node or NULL if not found or not strong when strong required
1245  */
1246 static struct binder_node *binder_get_node_from_ref(
1247 		struct binder_proc *proc,
1248 		u32 desc, bool need_strong_ref,
1249 		struct binder_ref_data *rdata)
1250 {
1251 	struct binder_node *node;
1252 	struct binder_ref *ref;
1253 
1254 	binder_proc_lock(proc);
1255 	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1256 	if (!ref)
1257 		goto err_no_ref;
1258 	node = ref->node;
1259 	/*
1260 	 * Take an implicit reference on the node to ensure
1261 	 * it stays alive until the call to binder_put_node()
1262 	 */
1263 	binder_inc_node_tmpref(node);
1264 	if (rdata)
1265 		*rdata = ref->data;
1266 	binder_proc_unlock(proc);
1267 
1268 	return node;
1269 
1270 err_no_ref:
1271 	binder_proc_unlock(proc);
1272 	return NULL;
1273 }
1274 
1275 /**
1276  * binder_free_ref() - free the binder_ref
1277  * @ref:	ref to free
1278  *
1279  * Free the binder_ref. Free the binder_node indicated by ref->node
1280  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1281  */
1282 static void binder_free_ref(struct binder_ref *ref)
1283 {
1284 	if (ref->node)
1285 		binder_free_node(ref->node);
1286 	kfree(ref->death);
1287 	kfree(ref);
1288 }
1289 
1290 /**
1291  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1292  * @proc:	proc containing the ref
1293  * @desc:	the handle associated with the ref
1294  * @increment:	true=inc reference, false=dec reference
1295  * @strong:	true=strong reference, false=weak reference
1296  * @rdata:	the id/refcount data for the ref
1297  *
1298  * Given a proc and ref handle, increment or decrement the ref
1299  * according to "increment" arg.
1300  *
1301  * Return: 0 if successful, else errno
1302  */
1303 static int binder_update_ref_for_handle(struct binder_proc *proc,
1304 		uint32_t desc, bool increment, bool strong,
1305 		struct binder_ref_data *rdata)
1306 {
1307 	int ret = 0;
1308 	struct binder_ref *ref;
1309 	bool delete_ref = false;
1310 
1311 	binder_proc_lock(proc);
1312 	ref = binder_get_ref_olocked(proc, desc, strong);
1313 	if (!ref) {
1314 		ret = -EINVAL;
1315 		goto err_no_ref;
1316 	}
1317 	if (increment)
1318 		ret = binder_inc_ref_olocked(ref, strong, NULL);
1319 	else
1320 		delete_ref = binder_dec_ref_olocked(ref, strong);
1321 
1322 	if (rdata)
1323 		*rdata = ref->data;
1324 	binder_proc_unlock(proc);
1325 
1326 	if (delete_ref)
1327 		binder_free_ref(ref);
1328 	return ret;
1329 
1330 err_no_ref:
1331 	binder_proc_unlock(proc);
1332 	return ret;
1333 }
1334 
1335 /**
1336  * binder_dec_ref_for_handle() - dec the ref for given handle
1337  * @proc:	proc containing the ref
1338  * @desc:	the handle associated with the ref
1339  * @strong:	true=strong reference, false=weak reference
1340  * @rdata:	the id/refcount data for the ref
1341  *
1342  * Just calls binder_update_ref_for_handle() to decrement the ref.
1343  *
1344  * Return: 0 if successful, else errno
1345  */
1346 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1347 		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1348 {
1349 	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1350 }
1351 
1352 
1353 /**
1354  * binder_inc_ref_for_node() - increment the ref for given proc/node
1355  * @proc:	 proc containing the ref
1356  * @node:	 target node
1357  * @strong:	 true=strong reference, false=weak reference
1358  * @target_list: worklist to use if node is incremented
1359  * @rdata:	 the id/refcount data for the ref
1360  *
1361  * Given a proc and node, increment the ref. Create the ref if it
1362  * doesn't already exist
1363  *
1364  * Return: 0 if successful, else errno
1365  */
1366 static int binder_inc_ref_for_node(struct binder_proc *proc,
1367 			struct binder_node *node,
1368 			bool strong,
1369 			struct list_head *target_list,
1370 			struct binder_ref_data *rdata)
1371 {
1372 	struct binder_ref *ref;
1373 	struct binder_ref *new_ref = NULL;
1374 	int ret = 0;
1375 
1376 	binder_proc_lock(proc);
1377 	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1378 	if (!ref) {
1379 		binder_proc_unlock(proc);
1380 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1381 		if (!new_ref)
1382 			return -ENOMEM;
1383 		binder_proc_lock(proc);
1384 		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1385 	}
1386 	ret = binder_inc_ref_olocked(ref, strong, target_list);
1387 	*rdata = ref->data;
1388 	if (ret && ref == new_ref) {
1389 		/*
1390 		 * Cleanup the failed reference here as the target
1391 		 * could now be dead and have already released its
1392 		 * references by now. Calling on the new reference
1393 		 * with strong=0 and a tmp_refs will not decrement
1394 		 * the node. The new_ref gets kfree'd below.
1395 		 */
1396 		binder_cleanup_ref_olocked(new_ref);
1397 		ref = NULL;
1398 	}
1399 
1400 	binder_proc_unlock(proc);
1401 	if (new_ref && ref != new_ref)
1402 		/*
1403 		 * Another thread created the ref first so
1404 		 * free the one we allocated
1405 		 */
1406 		kfree(new_ref);
1407 	return ret;
1408 }
1409 
1410 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1411 					   struct binder_transaction *t)
1412 {
1413 	BUG_ON(!target_thread);
1414 	assert_spin_locked(&target_thread->proc->inner_lock);
1415 	BUG_ON(target_thread->transaction_stack != t);
1416 	BUG_ON(target_thread->transaction_stack->from != target_thread);
1417 	target_thread->transaction_stack =
1418 		target_thread->transaction_stack->from_parent;
1419 	t->from = NULL;
1420 }
1421 
1422 /**
1423  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1424  * @thread:	thread to decrement
1425  *
1426  * A thread needs to be kept alive while being used to create or
1427  * handle a transaction. binder_get_txn_from() is used to safely
1428  * extract t->from from a binder_transaction and keep the thread
1429  * indicated by t->from from being freed. When done with that
1430  * binder_thread, this function is called to decrement the
1431  * tmp_ref and free if appropriate (thread has been released
1432  * and no transaction being processed by the driver)
1433  */
1434 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1435 {
1436 	/*
1437 	 * atomic is used to protect the counter value while
1438 	 * it cannot reach zero or thread->is_dead is false
1439 	 */
1440 	binder_inner_proc_lock(thread->proc);
1441 	atomic_dec(&thread->tmp_ref);
1442 	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1443 		binder_inner_proc_unlock(thread->proc);
1444 		binder_free_thread(thread);
1445 		return;
1446 	}
1447 	binder_inner_proc_unlock(thread->proc);
1448 }
1449 
1450 /**
1451  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1452  * @proc:	proc to decrement
1453  *
1454  * A binder_proc needs to be kept alive while being used to create or
1455  * handle a transaction. proc->tmp_ref is incremented when
1456  * creating a new transaction or the binder_proc is currently in-use
1457  * by threads that are being released. When done with the binder_proc,
1458  * this function is called to decrement the counter and free the
1459  * proc if appropriate (proc has been released, all threads have
1460  * been released and not currenly in-use to process a transaction).
1461  */
1462 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1463 {
1464 	binder_inner_proc_lock(proc);
1465 	proc->tmp_ref--;
1466 	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1467 			!proc->tmp_ref) {
1468 		binder_inner_proc_unlock(proc);
1469 		binder_free_proc(proc);
1470 		return;
1471 	}
1472 	binder_inner_proc_unlock(proc);
1473 }
1474 
1475 /**
1476  * binder_get_txn_from() - safely extract the "from" thread in transaction
1477  * @t:	binder transaction for t->from
1478  *
1479  * Atomically return the "from" thread and increment the tmp_ref
1480  * count for the thread to ensure it stays alive until
1481  * binder_thread_dec_tmpref() is called.
1482  *
1483  * Return: the value of t->from
1484  */
1485 static struct binder_thread *binder_get_txn_from(
1486 		struct binder_transaction *t)
1487 {
1488 	struct binder_thread *from;
1489 
1490 	spin_lock(&t->lock);
1491 	from = t->from;
1492 	if (from)
1493 		atomic_inc(&from->tmp_ref);
1494 	spin_unlock(&t->lock);
1495 	return from;
1496 }
1497 
1498 /**
1499  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1500  * @t:	binder transaction for t->from
1501  *
1502  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1503  * to guarantee that the thread cannot be released while operating on it.
1504  * The caller must call binder_inner_proc_unlock() to release the inner lock
1505  * as well as call binder_dec_thread_txn() to release the reference.
1506  *
1507  * Return: the value of t->from
1508  */
1509 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1510 		struct binder_transaction *t)
1511 	__acquires(&t->from->proc->inner_lock)
1512 {
1513 	struct binder_thread *from;
1514 
1515 	from = binder_get_txn_from(t);
1516 	if (!from) {
1517 		__acquire(&from->proc->inner_lock);
1518 		return NULL;
1519 	}
1520 	binder_inner_proc_lock(from->proc);
1521 	if (t->from) {
1522 		BUG_ON(from != t->from);
1523 		return from;
1524 	}
1525 	binder_inner_proc_unlock(from->proc);
1526 	__acquire(&from->proc->inner_lock);
1527 	binder_thread_dec_tmpref(from);
1528 	return NULL;
1529 }
1530 
1531 /**
1532  * binder_free_txn_fixups() - free unprocessed fd fixups
1533  * @t:	binder transaction for t->from
1534  *
1535  * If the transaction is being torn down prior to being
1536  * processed by the target process, free all of the
1537  * fd fixups and fput the file structs. It is safe to
1538  * call this function after the fixups have been
1539  * processed -- in that case, the list will be empty.
1540  */
1541 static void binder_free_txn_fixups(struct binder_transaction *t)
1542 {
1543 	struct binder_txn_fd_fixup *fixup, *tmp;
1544 
1545 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1546 		fput(fixup->file);
1547 		if (fixup->target_fd >= 0)
1548 			put_unused_fd(fixup->target_fd);
1549 		list_del(&fixup->fixup_entry);
1550 		kfree(fixup);
1551 	}
1552 }
1553 
1554 static void binder_txn_latency_free(struct binder_transaction *t)
1555 {
1556 	int from_proc, from_thread, to_proc, to_thread;
1557 
1558 	spin_lock(&t->lock);
1559 	from_proc = t->from ? t->from->proc->pid : 0;
1560 	from_thread = t->from ? t->from->pid : 0;
1561 	to_proc = t->to_proc ? t->to_proc->pid : 0;
1562 	to_thread = t->to_thread ? t->to_thread->pid : 0;
1563 	spin_unlock(&t->lock);
1564 
1565 	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1566 }
1567 
1568 static void binder_free_transaction(struct binder_transaction *t)
1569 {
1570 	struct binder_proc *target_proc = t->to_proc;
1571 
1572 	if (target_proc) {
1573 		binder_inner_proc_lock(target_proc);
1574 		target_proc->outstanding_txns--;
1575 		if (target_proc->outstanding_txns < 0)
1576 			pr_warn("%s: Unexpected outstanding_txns %d\n",
1577 				__func__, target_proc->outstanding_txns);
1578 		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1579 			wake_up_interruptible_all(&target_proc->freeze_wait);
1580 		if (t->buffer)
1581 			t->buffer->transaction = NULL;
1582 		binder_inner_proc_unlock(target_proc);
1583 	}
1584 	if (trace_binder_txn_latency_free_enabled())
1585 		binder_txn_latency_free(t);
1586 	/*
1587 	 * If the transaction has no target_proc, then
1588 	 * t->buffer->transaction has already been cleared.
1589 	 */
1590 	binder_free_txn_fixups(t);
1591 	kfree(t);
1592 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1593 }
1594 
1595 static void binder_send_failed_reply(struct binder_transaction *t,
1596 				     uint32_t error_code)
1597 {
1598 	struct binder_thread *target_thread;
1599 	struct binder_transaction *next;
1600 
1601 	BUG_ON(t->flags & TF_ONE_WAY);
1602 	while (1) {
1603 		target_thread = binder_get_txn_from_and_acq_inner(t);
1604 		if (target_thread) {
1605 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1606 				     "send failed reply for transaction %d to %d:%d\n",
1607 				      t->debug_id,
1608 				      target_thread->proc->pid,
1609 				      target_thread->pid);
1610 
1611 			binder_pop_transaction_ilocked(target_thread, t);
1612 			if (target_thread->reply_error.cmd == BR_OK) {
1613 				target_thread->reply_error.cmd = error_code;
1614 				binder_enqueue_thread_work_ilocked(
1615 					target_thread,
1616 					&target_thread->reply_error.work);
1617 				wake_up_interruptible(&target_thread->wait);
1618 			} else {
1619 				/*
1620 				 * Cannot get here for normal operation, but
1621 				 * we can if multiple synchronous transactions
1622 				 * are sent without blocking for responses.
1623 				 * Just ignore the 2nd error in this case.
1624 				 */
1625 				pr_warn("Unexpected reply error: %u\n",
1626 					target_thread->reply_error.cmd);
1627 			}
1628 			binder_inner_proc_unlock(target_thread->proc);
1629 			binder_thread_dec_tmpref(target_thread);
1630 			binder_free_transaction(t);
1631 			return;
1632 		}
1633 		__release(&target_thread->proc->inner_lock);
1634 		next = t->from_parent;
1635 
1636 		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1637 			     "send failed reply for transaction %d, target dead\n",
1638 			     t->debug_id);
1639 
1640 		binder_free_transaction(t);
1641 		if (next == NULL) {
1642 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1643 				     "reply failed, no target thread at root\n");
1644 			return;
1645 		}
1646 		t = next;
1647 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1648 			     "reply failed, no target thread -- retry %d\n",
1649 			      t->debug_id);
1650 	}
1651 }
1652 
1653 /**
1654  * binder_cleanup_transaction() - cleans up undelivered transaction
1655  * @t:		transaction that needs to be cleaned up
1656  * @reason:	reason the transaction wasn't delivered
1657  * @error_code:	error to return to caller (if synchronous call)
1658  */
1659 static void binder_cleanup_transaction(struct binder_transaction *t,
1660 				       const char *reason,
1661 				       uint32_t error_code)
1662 {
1663 	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1664 		binder_send_failed_reply(t, error_code);
1665 	} else {
1666 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1667 			"undelivered transaction %d, %s\n",
1668 			t->debug_id, reason);
1669 		binder_free_transaction(t);
1670 	}
1671 }
1672 
1673 /**
1674  * binder_get_object() - gets object and checks for valid metadata
1675  * @proc:	binder_proc owning the buffer
1676  * @u:		sender's user pointer to base of buffer
1677  * @buffer:	binder_buffer that we're parsing.
1678  * @offset:	offset in the @buffer at which to validate an object.
1679  * @object:	struct binder_object to read into
1680  *
1681  * Copy the binder object at the given offset into @object. If @u is
1682  * provided then the copy is from the sender's buffer. If not, then
1683  * it is copied from the target's @buffer.
1684  *
1685  * Return:	If there's a valid metadata object at @offset, the
1686  *		size of that object. Otherwise, it returns zero. The object
1687  *		is read into the struct binder_object pointed to by @object.
1688  */
1689 static size_t binder_get_object(struct binder_proc *proc,
1690 				const void __user *u,
1691 				struct binder_buffer *buffer,
1692 				unsigned long offset,
1693 				struct binder_object *object)
1694 {
1695 	size_t read_size;
1696 	struct binder_object_header *hdr;
1697 	size_t object_size = 0;
1698 
1699 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1700 	if (offset > buffer->data_size || read_size < sizeof(*hdr))
1701 		return 0;
1702 	if (u) {
1703 		if (copy_from_user(object, u + offset, read_size))
1704 			return 0;
1705 	} else {
1706 		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1707 						  offset, read_size))
1708 			return 0;
1709 	}
1710 
1711 	/* Ok, now see if we read a complete object. */
1712 	hdr = &object->hdr;
1713 	switch (hdr->type) {
1714 	case BINDER_TYPE_BINDER:
1715 	case BINDER_TYPE_WEAK_BINDER:
1716 	case BINDER_TYPE_HANDLE:
1717 	case BINDER_TYPE_WEAK_HANDLE:
1718 		object_size = sizeof(struct flat_binder_object);
1719 		break;
1720 	case BINDER_TYPE_FD:
1721 		object_size = sizeof(struct binder_fd_object);
1722 		break;
1723 	case BINDER_TYPE_PTR:
1724 		object_size = sizeof(struct binder_buffer_object);
1725 		break;
1726 	case BINDER_TYPE_FDA:
1727 		object_size = sizeof(struct binder_fd_array_object);
1728 		break;
1729 	default:
1730 		return 0;
1731 	}
1732 	if (offset <= buffer->data_size - object_size &&
1733 	    buffer->data_size >= object_size)
1734 		return object_size;
1735 	else
1736 		return 0;
1737 }
1738 
1739 /**
1740  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1741  * @proc:	binder_proc owning the buffer
1742  * @b:		binder_buffer containing the object
1743  * @object:	struct binder_object to read into
1744  * @index:	index in offset array at which the binder_buffer_object is
1745  *		located
1746  * @start_offset: points to the start of the offset array
1747  * @object_offsetp: offset of @object read from @b
1748  * @num_valid:	the number of valid offsets in the offset array
1749  *
1750  * Return:	If @index is within the valid range of the offset array
1751  *		described by @start and @num_valid, and if there's a valid
1752  *		binder_buffer_object at the offset found in index @index
1753  *		of the offset array, that object is returned. Otherwise,
1754  *		%NULL is returned.
1755  *		Note that the offset found in index @index itself is not
1756  *		verified; this function assumes that @num_valid elements
1757  *		from @start were previously verified to have valid offsets.
1758  *		If @object_offsetp is non-NULL, then the offset within
1759  *		@b is written to it.
1760  */
1761 static struct binder_buffer_object *binder_validate_ptr(
1762 						struct binder_proc *proc,
1763 						struct binder_buffer *b,
1764 						struct binder_object *object,
1765 						binder_size_t index,
1766 						binder_size_t start_offset,
1767 						binder_size_t *object_offsetp,
1768 						binder_size_t num_valid)
1769 {
1770 	size_t object_size;
1771 	binder_size_t object_offset;
1772 	unsigned long buffer_offset;
1773 
1774 	if (index >= num_valid)
1775 		return NULL;
1776 
1777 	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1778 	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1779 					  b, buffer_offset,
1780 					  sizeof(object_offset)))
1781 		return NULL;
1782 	object_size = binder_get_object(proc, NULL, b, object_offset, object);
1783 	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1784 		return NULL;
1785 	if (object_offsetp)
1786 		*object_offsetp = object_offset;
1787 
1788 	return &object->bbo;
1789 }
1790 
1791 /**
1792  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1793  * @proc:		binder_proc owning the buffer
1794  * @b:			transaction buffer
1795  * @objects_start_offset: offset to start of objects buffer
1796  * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1797  * @fixup_offset:	start offset in @buffer to fix up
1798  * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1799  * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1800  *
1801  * Return:		%true if a fixup in buffer @buffer at offset @offset is
1802  *			allowed.
1803  *
1804  * For safety reasons, we only allow fixups inside a buffer to happen
1805  * at increasing offsets; additionally, we only allow fixup on the last
1806  * buffer object that was verified, or one of its parents.
1807  *
1808  * Example of what is allowed:
1809  *
1810  * A
1811  *   B (parent = A, offset = 0)
1812  *   C (parent = A, offset = 16)
1813  *     D (parent = C, offset = 0)
1814  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1815  *
1816  * Examples of what is not allowed:
1817  *
1818  * Decreasing offsets within the same parent:
1819  * A
1820  *   C (parent = A, offset = 16)
1821  *   B (parent = A, offset = 0) // decreasing offset within A
1822  *
1823  * Referring to a parent that wasn't the last object or any of its parents:
1824  * A
1825  *   B (parent = A, offset = 0)
1826  *   C (parent = A, offset = 0)
1827  *   C (parent = A, offset = 16)
1828  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1829  */
1830 static bool binder_validate_fixup(struct binder_proc *proc,
1831 				  struct binder_buffer *b,
1832 				  binder_size_t objects_start_offset,
1833 				  binder_size_t buffer_obj_offset,
1834 				  binder_size_t fixup_offset,
1835 				  binder_size_t last_obj_offset,
1836 				  binder_size_t last_min_offset)
1837 {
1838 	if (!last_obj_offset) {
1839 		/* Nothing to fix up in */
1840 		return false;
1841 	}
1842 
1843 	while (last_obj_offset != buffer_obj_offset) {
1844 		unsigned long buffer_offset;
1845 		struct binder_object last_object;
1846 		struct binder_buffer_object *last_bbo;
1847 		size_t object_size = binder_get_object(proc, NULL, b,
1848 						       last_obj_offset,
1849 						       &last_object);
1850 		if (object_size != sizeof(*last_bbo))
1851 			return false;
1852 
1853 		last_bbo = &last_object.bbo;
1854 		/*
1855 		 * Safe to retrieve the parent of last_obj, since it
1856 		 * was already previously verified by the driver.
1857 		 */
1858 		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1859 			return false;
1860 		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1861 		buffer_offset = objects_start_offset +
1862 			sizeof(binder_size_t) * last_bbo->parent;
1863 		if (binder_alloc_copy_from_buffer(&proc->alloc,
1864 						  &last_obj_offset,
1865 						  b, buffer_offset,
1866 						  sizeof(last_obj_offset)))
1867 			return false;
1868 	}
1869 	return (fixup_offset >= last_min_offset);
1870 }
1871 
1872 /**
1873  * struct binder_task_work_cb - for deferred close
1874  *
1875  * @twork:                callback_head for task work
1876  * @fd:                   fd to close
1877  *
1878  * Structure to pass task work to be handled after
1879  * returning from binder_ioctl() via task_work_add().
1880  */
1881 struct binder_task_work_cb {
1882 	struct callback_head twork;
1883 	struct file *file;
1884 };
1885 
1886 /**
1887  * binder_do_fd_close() - close list of file descriptors
1888  * @twork:	callback head for task work
1889  *
1890  * It is not safe to call ksys_close() during the binder_ioctl()
1891  * function if there is a chance that binder's own file descriptor
1892  * might be closed. This is to meet the requirements for using
1893  * fdget() (see comments for __fget_light()). Therefore use
1894  * task_work_add() to schedule the close operation once we have
1895  * returned from binder_ioctl(). This function is a callback
1896  * for that mechanism and does the actual ksys_close() on the
1897  * given file descriptor.
1898  */
1899 static void binder_do_fd_close(struct callback_head *twork)
1900 {
1901 	struct binder_task_work_cb *twcb = container_of(twork,
1902 			struct binder_task_work_cb, twork);
1903 
1904 	fput(twcb->file);
1905 	kfree(twcb);
1906 }
1907 
1908 /**
1909  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1910  * @fd:		file-descriptor to close
1911  *
1912  * See comments in binder_do_fd_close(). This function is used to schedule
1913  * a file-descriptor to be closed after returning from binder_ioctl().
1914  */
1915 static void binder_deferred_fd_close(int fd)
1916 {
1917 	struct binder_task_work_cb *twcb;
1918 
1919 	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1920 	if (!twcb)
1921 		return;
1922 	init_task_work(&twcb->twork, binder_do_fd_close);
1923 	twcb->file = close_fd_get_file(fd);
1924 	if (twcb->file) {
1925 		// pin it until binder_do_fd_close(); see comments there
1926 		get_file(twcb->file);
1927 		filp_close(twcb->file, current->files);
1928 		task_work_add(current, &twcb->twork, TWA_RESUME);
1929 	} else {
1930 		kfree(twcb);
1931 	}
1932 }
1933 
1934 static void binder_transaction_buffer_release(struct binder_proc *proc,
1935 					      struct binder_thread *thread,
1936 					      struct binder_buffer *buffer,
1937 					      binder_size_t off_end_offset,
1938 					      bool is_failure)
1939 {
1940 	int debug_id = buffer->debug_id;
1941 	binder_size_t off_start_offset, buffer_offset;
1942 
1943 	binder_debug(BINDER_DEBUG_TRANSACTION,
1944 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1945 		     proc->pid, buffer->debug_id,
1946 		     buffer->data_size, buffer->offsets_size,
1947 		     (unsigned long long)off_end_offset);
1948 
1949 	if (buffer->target_node)
1950 		binder_dec_node(buffer->target_node, 1, 0);
1951 
1952 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1953 
1954 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1955 	     buffer_offset += sizeof(binder_size_t)) {
1956 		struct binder_object_header *hdr;
1957 		size_t object_size = 0;
1958 		struct binder_object object;
1959 		binder_size_t object_offset;
1960 
1961 		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1962 						   buffer, buffer_offset,
1963 						   sizeof(object_offset)))
1964 			object_size = binder_get_object(proc, NULL, buffer,
1965 							object_offset, &object);
1966 		if (object_size == 0) {
1967 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1968 			       debug_id, (u64)object_offset, buffer->data_size);
1969 			continue;
1970 		}
1971 		hdr = &object.hdr;
1972 		switch (hdr->type) {
1973 		case BINDER_TYPE_BINDER:
1974 		case BINDER_TYPE_WEAK_BINDER: {
1975 			struct flat_binder_object *fp;
1976 			struct binder_node *node;
1977 
1978 			fp = to_flat_binder_object(hdr);
1979 			node = binder_get_node(proc, fp->binder);
1980 			if (node == NULL) {
1981 				pr_err("transaction release %d bad node %016llx\n",
1982 				       debug_id, (u64)fp->binder);
1983 				break;
1984 			}
1985 			binder_debug(BINDER_DEBUG_TRANSACTION,
1986 				     "        node %d u%016llx\n",
1987 				     node->debug_id, (u64)node->ptr);
1988 			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1989 					0);
1990 			binder_put_node(node);
1991 		} break;
1992 		case BINDER_TYPE_HANDLE:
1993 		case BINDER_TYPE_WEAK_HANDLE: {
1994 			struct flat_binder_object *fp;
1995 			struct binder_ref_data rdata;
1996 			int ret;
1997 
1998 			fp = to_flat_binder_object(hdr);
1999 			ret = binder_dec_ref_for_handle(proc, fp->handle,
2000 				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2001 
2002 			if (ret) {
2003 				pr_err("transaction release %d bad handle %d, ret = %d\n",
2004 				 debug_id, fp->handle, ret);
2005 				break;
2006 			}
2007 			binder_debug(BINDER_DEBUG_TRANSACTION,
2008 				     "        ref %d desc %d\n",
2009 				     rdata.debug_id, rdata.desc);
2010 		} break;
2011 
2012 		case BINDER_TYPE_FD: {
2013 			/*
2014 			 * No need to close the file here since user-space
2015 			 * closes it for successfully delivered
2016 			 * transactions. For transactions that weren't
2017 			 * delivered, the new fd was never allocated so
2018 			 * there is no need to close and the fput on the
2019 			 * file is done when the transaction is torn
2020 			 * down.
2021 			 */
2022 		} break;
2023 		case BINDER_TYPE_PTR:
2024 			/*
2025 			 * Nothing to do here, this will get cleaned up when the
2026 			 * transaction buffer gets freed
2027 			 */
2028 			break;
2029 		case BINDER_TYPE_FDA: {
2030 			struct binder_fd_array_object *fda;
2031 			struct binder_buffer_object *parent;
2032 			struct binder_object ptr_object;
2033 			binder_size_t fda_offset;
2034 			size_t fd_index;
2035 			binder_size_t fd_buf_size;
2036 			binder_size_t num_valid;
2037 
2038 			if (is_failure) {
2039 				/*
2040 				 * The fd fixups have not been applied so no
2041 				 * fds need to be closed.
2042 				 */
2043 				continue;
2044 			}
2045 
2046 			num_valid = (buffer_offset - off_start_offset) /
2047 						sizeof(binder_size_t);
2048 			fda = to_binder_fd_array_object(hdr);
2049 			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2050 						     fda->parent,
2051 						     off_start_offset,
2052 						     NULL,
2053 						     num_valid);
2054 			if (!parent) {
2055 				pr_err("transaction release %d bad parent offset\n",
2056 				       debug_id);
2057 				continue;
2058 			}
2059 			fd_buf_size = sizeof(u32) * fda->num_fds;
2060 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2061 				pr_err("transaction release %d invalid number of fds (%lld)\n",
2062 				       debug_id, (u64)fda->num_fds);
2063 				continue;
2064 			}
2065 			if (fd_buf_size > parent->length ||
2066 			    fda->parent_offset > parent->length - fd_buf_size) {
2067 				/* No space for all file descriptors here. */
2068 				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2069 				       debug_id, (u64)fda->num_fds);
2070 				continue;
2071 			}
2072 			/*
2073 			 * the source data for binder_buffer_object is visible
2074 			 * to user-space and the @buffer element is the user
2075 			 * pointer to the buffer_object containing the fd_array.
2076 			 * Convert the address to an offset relative to
2077 			 * the base of the transaction buffer.
2078 			 */
2079 			fda_offset =
2080 			    (parent->buffer - (uintptr_t)buffer->user_data) +
2081 			    fda->parent_offset;
2082 			for (fd_index = 0; fd_index < fda->num_fds;
2083 			     fd_index++) {
2084 				u32 fd;
2085 				int err;
2086 				binder_size_t offset = fda_offset +
2087 					fd_index * sizeof(fd);
2088 
2089 				err = binder_alloc_copy_from_buffer(
2090 						&proc->alloc, &fd, buffer,
2091 						offset, sizeof(fd));
2092 				WARN_ON(err);
2093 				if (!err) {
2094 					binder_deferred_fd_close(fd);
2095 					/*
2096 					 * Need to make sure the thread goes
2097 					 * back to userspace to complete the
2098 					 * deferred close
2099 					 */
2100 					if (thread)
2101 						thread->looper_need_return = true;
2102 				}
2103 			}
2104 		} break;
2105 		default:
2106 			pr_err("transaction release %d bad object type %x\n",
2107 				debug_id, hdr->type);
2108 			break;
2109 		}
2110 	}
2111 }
2112 
2113 /* Clean up all the objects in the buffer */
2114 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2115 						struct binder_thread *thread,
2116 						struct binder_buffer *buffer,
2117 						bool is_failure)
2118 {
2119 	binder_size_t off_end_offset;
2120 
2121 	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2122 	off_end_offset += buffer->offsets_size;
2123 
2124 	binder_transaction_buffer_release(proc, thread, buffer,
2125 					  off_end_offset, is_failure);
2126 }
2127 
2128 static int binder_translate_binder(struct flat_binder_object *fp,
2129 				   struct binder_transaction *t,
2130 				   struct binder_thread *thread)
2131 {
2132 	struct binder_node *node;
2133 	struct binder_proc *proc = thread->proc;
2134 	struct binder_proc *target_proc = t->to_proc;
2135 	struct binder_ref_data rdata;
2136 	int ret = 0;
2137 
2138 	node = binder_get_node(proc, fp->binder);
2139 	if (!node) {
2140 		node = binder_new_node(proc, fp);
2141 		if (!node)
2142 			return -ENOMEM;
2143 	}
2144 	if (fp->cookie != node->cookie) {
2145 		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2146 				  proc->pid, thread->pid, (u64)fp->binder,
2147 				  node->debug_id, (u64)fp->cookie,
2148 				  (u64)node->cookie);
2149 		ret = -EINVAL;
2150 		goto done;
2151 	}
2152 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2153 		ret = -EPERM;
2154 		goto done;
2155 	}
2156 
2157 	ret = binder_inc_ref_for_node(target_proc, node,
2158 			fp->hdr.type == BINDER_TYPE_BINDER,
2159 			&thread->todo, &rdata);
2160 	if (ret)
2161 		goto done;
2162 
2163 	if (fp->hdr.type == BINDER_TYPE_BINDER)
2164 		fp->hdr.type = BINDER_TYPE_HANDLE;
2165 	else
2166 		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2167 	fp->binder = 0;
2168 	fp->handle = rdata.desc;
2169 	fp->cookie = 0;
2170 
2171 	trace_binder_transaction_node_to_ref(t, node, &rdata);
2172 	binder_debug(BINDER_DEBUG_TRANSACTION,
2173 		     "        node %d u%016llx -> ref %d desc %d\n",
2174 		     node->debug_id, (u64)node->ptr,
2175 		     rdata.debug_id, rdata.desc);
2176 done:
2177 	binder_put_node(node);
2178 	return ret;
2179 }
2180 
2181 static int binder_translate_handle(struct flat_binder_object *fp,
2182 				   struct binder_transaction *t,
2183 				   struct binder_thread *thread)
2184 {
2185 	struct binder_proc *proc = thread->proc;
2186 	struct binder_proc *target_proc = t->to_proc;
2187 	struct binder_node *node;
2188 	struct binder_ref_data src_rdata;
2189 	int ret = 0;
2190 
2191 	node = binder_get_node_from_ref(proc, fp->handle,
2192 			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2193 	if (!node) {
2194 		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2195 				  proc->pid, thread->pid, fp->handle);
2196 		return -EINVAL;
2197 	}
2198 	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2199 		ret = -EPERM;
2200 		goto done;
2201 	}
2202 
2203 	binder_node_lock(node);
2204 	if (node->proc == target_proc) {
2205 		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2206 			fp->hdr.type = BINDER_TYPE_BINDER;
2207 		else
2208 			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2209 		fp->binder = node->ptr;
2210 		fp->cookie = node->cookie;
2211 		if (node->proc)
2212 			binder_inner_proc_lock(node->proc);
2213 		else
2214 			__acquire(&node->proc->inner_lock);
2215 		binder_inc_node_nilocked(node,
2216 					 fp->hdr.type == BINDER_TYPE_BINDER,
2217 					 0, NULL);
2218 		if (node->proc)
2219 			binder_inner_proc_unlock(node->proc);
2220 		else
2221 			__release(&node->proc->inner_lock);
2222 		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2223 		binder_debug(BINDER_DEBUG_TRANSACTION,
2224 			     "        ref %d desc %d -> node %d u%016llx\n",
2225 			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2226 			     (u64)node->ptr);
2227 		binder_node_unlock(node);
2228 	} else {
2229 		struct binder_ref_data dest_rdata;
2230 
2231 		binder_node_unlock(node);
2232 		ret = binder_inc_ref_for_node(target_proc, node,
2233 				fp->hdr.type == BINDER_TYPE_HANDLE,
2234 				NULL, &dest_rdata);
2235 		if (ret)
2236 			goto done;
2237 
2238 		fp->binder = 0;
2239 		fp->handle = dest_rdata.desc;
2240 		fp->cookie = 0;
2241 		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2242 						    &dest_rdata);
2243 		binder_debug(BINDER_DEBUG_TRANSACTION,
2244 			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2245 			     src_rdata.debug_id, src_rdata.desc,
2246 			     dest_rdata.debug_id, dest_rdata.desc,
2247 			     node->debug_id);
2248 	}
2249 done:
2250 	binder_put_node(node);
2251 	return ret;
2252 }
2253 
2254 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2255 			       struct binder_transaction *t,
2256 			       struct binder_thread *thread,
2257 			       struct binder_transaction *in_reply_to)
2258 {
2259 	struct binder_proc *proc = thread->proc;
2260 	struct binder_proc *target_proc = t->to_proc;
2261 	struct binder_txn_fd_fixup *fixup;
2262 	struct file *file;
2263 	int ret = 0;
2264 	bool target_allows_fd;
2265 
2266 	if (in_reply_to)
2267 		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2268 	else
2269 		target_allows_fd = t->buffer->target_node->accept_fds;
2270 	if (!target_allows_fd) {
2271 		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2272 				  proc->pid, thread->pid,
2273 				  in_reply_to ? "reply" : "transaction",
2274 				  fd);
2275 		ret = -EPERM;
2276 		goto err_fd_not_accepted;
2277 	}
2278 
2279 	file = fget(fd);
2280 	if (!file) {
2281 		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2282 				  proc->pid, thread->pid, fd);
2283 		ret = -EBADF;
2284 		goto err_fget;
2285 	}
2286 	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2287 	if (ret < 0) {
2288 		ret = -EPERM;
2289 		goto err_security;
2290 	}
2291 
2292 	/*
2293 	 * Add fixup record for this transaction. The allocation
2294 	 * of the fd in the target needs to be done from a
2295 	 * target thread.
2296 	 */
2297 	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2298 	if (!fixup) {
2299 		ret = -ENOMEM;
2300 		goto err_alloc;
2301 	}
2302 	fixup->file = file;
2303 	fixup->offset = fd_offset;
2304 	fixup->target_fd = -1;
2305 	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2306 	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2307 
2308 	return ret;
2309 
2310 err_alloc:
2311 err_security:
2312 	fput(file);
2313 err_fget:
2314 err_fd_not_accepted:
2315 	return ret;
2316 }
2317 
2318 /**
2319  * struct binder_ptr_fixup - data to be fixed-up in target buffer
2320  * @offset	offset in target buffer to fixup
2321  * @skip_size	bytes to skip in copy (fixup will be written later)
2322  * @fixup_data	data to write at fixup offset
2323  * @node	list node
2324  *
2325  * This is used for the pointer fixup list (pf) which is created and consumed
2326  * during binder_transaction() and is only accessed locally. No
2327  * locking is necessary.
2328  *
2329  * The list is ordered by @offset.
2330  */
2331 struct binder_ptr_fixup {
2332 	binder_size_t offset;
2333 	size_t skip_size;
2334 	binder_uintptr_t fixup_data;
2335 	struct list_head node;
2336 };
2337 
2338 /**
2339  * struct binder_sg_copy - scatter-gather data to be copied
2340  * @offset		offset in target buffer
2341  * @sender_uaddr	user address in source buffer
2342  * @length		bytes to copy
2343  * @node		list node
2344  *
2345  * This is used for the sg copy list (sgc) which is created and consumed
2346  * during binder_transaction() and is only accessed locally. No
2347  * locking is necessary.
2348  *
2349  * The list is ordered by @offset.
2350  */
2351 struct binder_sg_copy {
2352 	binder_size_t offset;
2353 	const void __user *sender_uaddr;
2354 	size_t length;
2355 	struct list_head node;
2356 };
2357 
2358 /**
2359  * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2360  * @alloc:	binder_alloc associated with @buffer
2361  * @buffer:	binder buffer in target process
2362  * @sgc_head:	list_head of scatter-gather copy list
2363  * @pf_head:	list_head of pointer fixup list
2364  *
2365  * Processes all elements of @sgc_head, applying fixups from @pf_head
2366  * and copying the scatter-gather data from the source process' user
2367  * buffer to the target's buffer. It is expected that the list creation
2368  * and processing all occurs during binder_transaction() so these lists
2369  * are only accessed in local context.
2370  *
2371  * Return: 0=success, else -errno
2372  */
2373 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2374 					 struct binder_buffer *buffer,
2375 					 struct list_head *sgc_head,
2376 					 struct list_head *pf_head)
2377 {
2378 	int ret = 0;
2379 	struct binder_sg_copy *sgc, *tmpsgc;
2380 	struct binder_ptr_fixup *tmppf;
2381 	struct binder_ptr_fixup *pf =
2382 		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2383 					 node);
2384 
2385 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2386 		size_t bytes_copied = 0;
2387 
2388 		while (bytes_copied < sgc->length) {
2389 			size_t copy_size;
2390 			size_t bytes_left = sgc->length - bytes_copied;
2391 			size_t offset = sgc->offset + bytes_copied;
2392 
2393 			/*
2394 			 * We copy up to the fixup (pointed to by pf)
2395 			 */
2396 			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2397 				       : bytes_left;
2398 			if (!ret && copy_size)
2399 				ret = binder_alloc_copy_user_to_buffer(
2400 						alloc, buffer,
2401 						offset,
2402 						sgc->sender_uaddr + bytes_copied,
2403 						copy_size);
2404 			bytes_copied += copy_size;
2405 			if (copy_size != bytes_left) {
2406 				BUG_ON(!pf);
2407 				/* we stopped at a fixup offset */
2408 				if (pf->skip_size) {
2409 					/*
2410 					 * we are just skipping. This is for
2411 					 * BINDER_TYPE_FDA where the translated
2412 					 * fds will be fixed up when we get
2413 					 * to target context.
2414 					 */
2415 					bytes_copied += pf->skip_size;
2416 				} else {
2417 					/* apply the fixup indicated by pf */
2418 					if (!ret)
2419 						ret = binder_alloc_copy_to_buffer(
2420 							alloc, buffer,
2421 							pf->offset,
2422 							&pf->fixup_data,
2423 							sizeof(pf->fixup_data));
2424 					bytes_copied += sizeof(pf->fixup_data);
2425 				}
2426 				list_del(&pf->node);
2427 				kfree(pf);
2428 				pf = list_first_entry_or_null(pf_head,
2429 						struct binder_ptr_fixup, node);
2430 			}
2431 		}
2432 		list_del(&sgc->node);
2433 		kfree(sgc);
2434 	}
2435 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2436 		BUG_ON(pf->skip_size == 0);
2437 		list_del(&pf->node);
2438 		kfree(pf);
2439 	}
2440 	BUG_ON(!list_empty(sgc_head));
2441 
2442 	return ret > 0 ? -EINVAL : ret;
2443 }
2444 
2445 /**
2446  * binder_cleanup_deferred_txn_lists() - free specified lists
2447  * @sgc_head:	list_head of scatter-gather copy list
2448  * @pf_head:	list_head of pointer fixup list
2449  *
2450  * Called to clean up @sgc_head and @pf_head if there is an
2451  * error.
2452  */
2453 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2454 					      struct list_head *pf_head)
2455 {
2456 	struct binder_sg_copy *sgc, *tmpsgc;
2457 	struct binder_ptr_fixup *pf, *tmppf;
2458 
2459 	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2460 		list_del(&sgc->node);
2461 		kfree(sgc);
2462 	}
2463 	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2464 		list_del(&pf->node);
2465 		kfree(pf);
2466 	}
2467 }
2468 
2469 /**
2470  * binder_defer_copy() - queue a scatter-gather buffer for copy
2471  * @sgc_head:		list_head of scatter-gather copy list
2472  * @offset:		binder buffer offset in target process
2473  * @sender_uaddr:	user address in source process
2474  * @length:		bytes to copy
2475  *
2476  * Specify a scatter-gather block to be copied. The actual copy must
2477  * be deferred until all the needed fixups are identified and queued.
2478  * Then the copy and fixups are done together so un-translated values
2479  * from the source are never visible in the target buffer.
2480  *
2481  * We are guaranteed that repeated calls to this function will have
2482  * monotonically increasing @offset values so the list will naturally
2483  * be ordered.
2484  *
2485  * Return: 0=success, else -errno
2486  */
2487 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2488 			     const void __user *sender_uaddr, size_t length)
2489 {
2490 	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2491 
2492 	if (!bc)
2493 		return -ENOMEM;
2494 
2495 	bc->offset = offset;
2496 	bc->sender_uaddr = sender_uaddr;
2497 	bc->length = length;
2498 	INIT_LIST_HEAD(&bc->node);
2499 
2500 	/*
2501 	 * We are guaranteed that the deferred copies are in-order
2502 	 * so just add to the tail.
2503 	 */
2504 	list_add_tail(&bc->node, sgc_head);
2505 
2506 	return 0;
2507 }
2508 
2509 /**
2510  * binder_add_fixup() - queue a fixup to be applied to sg copy
2511  * @pf_head:	list_head of binder ptr fixup list
2512  * @offset:	binder buffer offset in target process
2513  * @fixup:	bytes to be copied for fixup
2514  * @skip_size:	bytes to skip when copying (fixup will be applied later)
2515  *
2516  * Add the specified fixup to a list ordered by @offset. When copying
2517  * the scatter-gather buffers, the fixup will be copied instead of
2518  * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2519  * will be applied later (in target process context), so we just skip
2520  * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2521  * value in @fixup.
2522  *
2523  * This function is called *mostly* in @offset order, but there are
2524  * exceptions. Since out-of-order inserts are relatively uncommon,
2525  * we insert the new element by searching backward from the tail of
2526  * the list.
2527  *
2528  * Return: 0=success, else -errno
2529  */
2530 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2531 			    binder_uintptr_t fixup, size_t skip_size)
2532 {
2533 	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2534 	struct binder_ptr_fixup *tmppf;
2535 
2536 	if (!pf)
2537 		return -ENOMEM;
2538 
2539 	pf->offset = offset;
2540 	pf->fixup_data = fixup;
2541 	pf->skip_size = skip_size;
2542 	INIT_LIST_HEAD(&pf->node);
2543 
2544 	/* Fixups are *mostly* added in-order, but there are some
2545 	 * exceptions. Look backwards through list for insertion point.
2546 	 */
2547 	list_for_each_entry_reverse(tmppf, pf_head, node) {
2548 		if (tmppf->offset < pf->offset) {
2549 			list_add(&pf->node, &tmppf->node);
2550 			return 0;
2551 		}
2552 	}
2553 	/*
2554 	 * if we get here, then the new offset is the lowest so
2555 	 * insert at the head
2556 	 */
2557 	list_add(&pf->node, pf_head);
2558 	return 0;
2559 }
2560 
2561 static int binder_translate_fd_array(struct list_head *pf_head,
2562 				     struct binder_fd_array_object *fda,
2563 				     const void __user *sender_ubuffer,
2564 				     struct binder_buffer_object *parent,
2565 				     struct binder_buffer_object *sender_uparent,
2566 				     struct binder_transaction *t,
2567 				     struct binder_thread *thread,
2568 				     struct binder_transaction *in_reply_to)
2569 {
2570 	binder_size_t fdi, fd_buf_size;
2571 	binder_size_t fda_offset;
2572 	const void __user *sender_ufda_base;
2573 	struct binder_proc *proc = thread->proc;
2574 	int ret;
2575 
2576 	if (fda->num_fds == 0)
2577 		return 0;
2578 
2579 	fd_buf_size = sizeof(u32) * fda->num_fds;
2580 	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2581 		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2582 				  proc->pid, thread->pid, (u64)fda->num_fds);
2583 		return -EINVAL;
2584 	}
2585 	if (fd_buf_size > parent->length ||
2586 	    fda->parent_offset > parent->length - fd_buf_size) {
2587 		/* No space for all file descriptors here. */
2588 		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2589 				  proc->pid, thread->pid, (u64)fda->num_fds);
2590 		return -EINVAL;
2591 	}
2592 	/*
2593 	 * the source data for binder_buffer_object is visible
2594 	 * to user-space and the @buffer element is the user
2595 	 * pointer to the buffer_object containing the fd_array.
2596 	 * Convert the address to an offset relative to
2597 	 * the base of the transaction buffer.
2598 	 */
2599 	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2600 		fda->parent_offset;
2601 	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2602 				fda->parent_offset;
2603 
2604 	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2605 	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2606 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2607 				  proc->pid, thread->pid);
2608 		return -EINVAL;
2609 	}
2610 	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2611 	if (ret)
2612 		return ret;
2613 
2614 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2615 		u32 fd;
2616 		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2617 		binder_size_t sender_uoffset = fdi * sizeof(fd);
2618 
2619 		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2620 		if (!ret)
2621 			ret = binder_translate_fd(fd, offset, t, thread,
2622 						  in_reply_to);
2623 		if (ret)
2624 			return ret > 0 ? -EINVAL : ret;
2625 	}
2626 	return 0;
2627 }
2628 
2629 static int binder_fixup_parent(struct list_head *pf_head,
2630 			       struct binder_transaction *t,
2631 			       struct binder_thread *thread,
2632 			       struct binder_buffer_object *bp,
2633 			       binder_size_t off_start_offset,
2634 			       binder_size_t num_valid,
2635 			       binder_size_t last_fixup_obj_off,
2636 			       binder_size_t last_fixup_min_off)
2637 {
2638 	struct binder_buffer_object *parent;
2639 	struct binder_buffer *b = t->buffer;
2640 	struct binder_proc *proc = thread->proc;
2641 	struct binder_proc *target_proc = t->to_proc;
2642 	struct binder_object object;
2643 	binder_size_t buffer_offset;
2644 	binder_size_t parent_offset;
2645 
2646 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2647 		return 0;
2648 
2649 	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2650 				     off_start_offset, &parent_offset,
2651 				     num_valid);
2652 	if (!parent) {
2653 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2654 				  proc->pid, thread->pid);
2655 		return -EINVAL;
2656 	}
2657 
2658 	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2659 				   parent_offset, bp->parent_offset,
2660 				   last_fixup_obj_off,
2661 				   last_fixup_min_off)) {
2662 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2663 				  proc->pid, thread->pid);
2664 		return -EINVAL;
2665 	}
2666 
2667 	if (parent->length < sizeof(binder_uintptr_t) ||
2668 	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2669 		/* No space for a pointer here! */
2670 		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2671 				  proc->pid, thread->pid);
2672 		return -EINVAL;
2673 	}
2674 	buffer_offset = bp->parent_offset +
2675 			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2676 	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2677 }
2678 
2679 /**
2680  * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2681  * @t1: the pending async txn in the frozen process
2682  * @t2: the new async txn to supersede the outdated pending one
2683  *
2684  * Return:  true if t2 can supersede t1
2685  *          false if t2 can not supersede t1
2686  */
2687 static bool binder_can_update_transaction(struct binder_transaction *t1,
2688 					  struct binder_transaction *t2)
2689 {
2690 	if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2691 	    (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2692 		return false;
2693 	if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2694 	    t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2695 	    t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2696 	    t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2697 		return true;
2698 	return false;
2699 }
2700 
2701 /**
2702  * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2703  * @t:		 new async transaction
2704  * @target_list: list to find outdated transaction
2705  *
2706  * Return: the outdated transaction if found
2707  *         NULL if no outdated transacton can be found
2708  *
2709  * Requires the proc->inner_lock to be held.
2710  */
2711 static struct binder_transaction *
2712 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2713 					 struct list_head *target_list)
2714 {
2715 	struct binder_work *w;
2716 
2717 	list_for_each_entry(w, target_list, entry) {
2718 		struct binder_transaction *t_queued;
2719 
2720 		if (w->type != BINDER_WORK_TRANSACTION)
2721 			continue;
2722 		t_queued = container_of(w, struct binder_transaction, work);
2723 		if (binder_can_update_transaction(t_queued, t))
2724 			return t_queued;
2725 	}
2726 	return NULL;
2727 }
2728 
2729 /**
2730  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2731  * @t:		transaction to send
2732  * @proc:	process to send the transaction to
2733  * @thread:	thread in @proc to send the transaction to (may be NULL)
2734  *
2735  * This function queues a transaction to the specified process. It will try
2736  * to find a thread in the target process to handle the transaction and
2737  * wake it up. If no thread is found, the work is queued to the proc
2738  * waitqueue.
2739  *
2740  * If the @thread parameter is not NULL, the transaction is always queued
2741  * to the waitlist of that specific thread.
2742  *
2743  * Return:	0 if the transaction was successfully queued
2744  *		BR_DEAD_REPLY if the target process or thread is dead
2745  *		BR_FROZEN_REPLY if the target process or thread is frozen and
2746  *			the sync transaction was rejected
2747  *		BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2748  *		and the async transaction was successfully queued
2749  */
2750 static int binder_proc_transaction(struct binder_transaction *t,
2751 				    struct binder_proc *proc,
2752 				    struct binder_thread *thread)
2753 {
2754 	struct binder_node *node = t->buffer->target_node;
2755 	bool oneway = !!(t->flags & TF_ONE_WAY);
2756 	bool pending_async = false;
2757 	struct binder_transaction *t_outdated = NULL;
2758 	bool frozen = false;
2759 
2760 	BUG_ON(!node);
2761 	binder_node_lock(node);
2762 	if (oneway) {
2763 		BUG_ON(thread);
2764 		if (node->has_async_transaction)
2765 			pending_async = true;
2766 		else
2767 			node->has_async_transaction = true;
2768 	}
2769 
2770 	binder_inner_proc_lock(proc);
2771 	if (proc->is_frozen) {
2772 		frozen = true;
2773 		proc->sync_recv |= !oneway;
2774 		proc->async_recv |= oneway;
2775 	}
2776 
2777 	if ((frozen && !oneway) || proc->is_dead ||
2778 			(thread && thread->is_dead)) {
2779 		binder_inner_proc_unlock(proc);
2780 		binder_node_unlock(node);
2781 		return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2782 	}
2783 
2784 	if (!thread && !pending_async)
2785 		thread = binder_select_thread_ilocked(proc);
2786 
2787 	if (thread) {
2788 		binder_enqueue_thread_work_ilocked(thread, &t->work);
2789 	} else if (!pending_async) {
2790 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2791 	} else {
2792 		if ((t->flags & TF_UPDATE_TXN) && frozen) {
2793 			t_outdated = binder_find_outdated_transaction_ilocked(t,
2794 									      &node->async_todo);
2795 			if (t_outdated) {
2796 				binder_debug(BINDER_DEBUG_TRANSACTION,
2797 					     "txn %d supersedes %d\n",
2798 					     t->debug_id, t_outdated->debug_id);
2799 				list_del_init(&t_outdated->work.entry);
2800 				proc->outstanding_txns--;
2801 			}
2802 		}
2803 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2804 	}
2805 
2806 	if (!pending_async)
2807 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2808 
2809 	proc->outstanding_txns++;
2810 	binder_inner_proc_unlock(proc);
2811 	binder_node_unlock(node);
2812 
2813 	/*
2814 	 * To reduce potential contention, free the outdated transaction and
2815 	 * buffer after releasing the locks.
2816 	 */
2817 	if (t_outdated) {
2818 		struct binder_buffer *buffer = t_outdated->buffer;
2819 
2820 		t_outdated->buffer = NULL;
2821 		buffer->transaction = NULL;
2822 		trace_binder_transaction_update_buffer_release(buffer);
2823 		binder_release_entire_buffer(proc, NULL, buffer, false);
2824 		binder_alloc_free_buf(&proc->alloc, buffer);
2825 		kfree(t_outdated);
2826 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
2827 	}
2828 
2829 	if (oneway && frozen)
2830 		return BR_TRANSACTION_PENDING_FROZEN;
2831 
2832 	return 0;
2833 }
2834 
2835 /**
2836  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2837  * @node:         struct binder_node for which to get refs
2838  * @procp:        returns @node->proc if valid
2839  * @error:        if no @procp then returns BR_DEAD_REPLY
2840  *
2841  * User-space normally keeps the node alive when creating a transaction
2842  * since it has a reference to the target. The local strong ref keeps it
2843  * alive if the sending process dies before the target process processes
2844  * the transaction. If the source process is malicious or has a reference
2845  * counting bug, relying on the local strong ref can fail.
2846  *
2847  * Since user-space can cause the local strong ref to go away, we also take
2848  * a tmpref on the node to ensure it survives while we are constructing
2849  * the transaction. We also need a tmpref on the proc while we are
2850  * constructing the transaction, so we take that here as well.
2851  *
2852  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2853  * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2854  * target proc has died, @error is set to BR_DEAD_REPLY.
2855  */
2856 static struct binder_node *binder_get_node_refs_for_txn(
2857 		struct binder_node *node,
2858 		struct binder_proc **procp,
2859 		uint32_t *error)
2860 {
2861 	struct binder_node *target_node = NULL;
2862 
2863 	binder_node_inner_lock(node);
2864 	if (node->proc) {
2865 		target_node = node;
2866 		binder_inc_node_nilocked(node, 1, 0, NULL);
2867 		binder_inc_node_tmpref_ilocked(node);
2868 		node->proc->tmp_ref++;
2869 		*procp = node->proc;
2870 	} else
2871 		*error = BR_DEAD_REPLY;
2872 	binder_node_inner_unlock(node);
2873 
2874 	return target_node;
2875 }
2876 
2877 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2878 				      uint32_t command, int32_t param)
2879 {
2880 	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2881 
2882 	if (!from) {
2883 		/* annotation for sparse */
2884 		__release(&from->proc->inner_lock);
2885 		return;
2886 	}
2887 
2888 	/* don't override existing errors */
2889 	if (from->ee.command == BR_OK)
2890 		binder_set_extended_error(&from->ee, id, command, param);
2891 	binder_inner_proc_unlock(from->proc);
2892 	binder_thread_dec_tmpref(from);
2893 }
2894 
2895 static void binder_transaction(struct binder_proc *proc,
2896 			       struct binder_thread *thread,
2897 			       struct binder_transaction_data *tr, int reply,
2898 			       binder_size_t extra_buffers_size)
2899 {
2900 	int ret;
2901 	struct binder_transaction *t;
2902 	struct binder_work *w;
2903 	struct binder_work *tcomplete;
2904 	binder_size_t buffer_offset = 0;
2905 	binder_size_t off_start_offset, off_end_offset;
2906 	binder_size_t off_min;
2907 	binder_size_t sg_buf_offset, sg_buf_end_offset;
2908 	binder_size_t user_offset = 0;
2909 	struct binder_proc *target_proc = NULL;
2910 	struct binder_thread *target_thread = NULL;
2911 	struct binder_node *target_node = NULL;
2912 	struct binder_transaction *in_reply_to = NULL;
2913 	struct binder_transaction_log_entry *e;
2914 	uint32_t return_error = 0;
2915 	uint32_t return_error_param = 0;
2916 	uint32_t return_error_line = 0;
2917 	binder_size_t last_fixup_obj_off = 0;
2918 	binder_size_t last_fixup_min_off = 0;
2919 	struct binder_context *context = proc->context;
2920 	int t_debug_id = atomic_inc_return(&binder_last_id);
2921 	char *secctx = NULL;
2922 	u32 secctx_sz = 0;
2923 	struct list_head sgc_head;
2924 	struct list_head pf_head;
2925 	const void __user *user_buffer = (const void __user *)
2926 				(uintptr_t)tr->data.ptr.buffer;
2927 	INIT_LIST_HEAD(&sgc_head);
2928 	INIT_LIST_HEAD(&pf_head);
2929 
2930 	e = binder_transaction_log_add(&binder_transaction_log);
2931 	e->debug_id = t_debug_id;
2932 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2933 	e->from_proc = proc->pid;
2934 	e->from_thread = thread->pid;
2935 	e->target_handle = tr->target.handle;
2936 	e->data_size = tr->data_size;
2937 	e->offsets_size = tr->offsets_size;
2938 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2939 
2940 	binder_inner_proc_lock(proc);
2941 	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2942 	binder_inner_proc_unlock(proc);
2943 
2944 	if (reply) {
2945 		binder_inner_proc_lock(proc);
2946 		in_reply_to = thread->transaction_stack;
2947 		if (in_reply_to == NULL) {
2948 			binder_inner_proc_unlock(proc);
2949 			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2950 					  proc->pid, thread->pid);
2951 			return_error = BR_FAILED_REPLY;
2952 			return_error_param = -EPROTO;
2953 			return_error_line = __LINE__;
2954 			goto err_empty_call_stack;
2955 		}
2956 		if (in_reply_to->to_thread != thread) {
2957 			spin_lock(&in_reply_to->lock);
2958 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2959 				proc->pid, thread->pid, in_reply_to->debug_id,
2960 				in_reply_to->to_proc ?
2961 				in_reply_to->to_proc->pid : 0,
2962 				in_reply_to->to_thread ?
2963 				in_reply_to->to_thread->pid : 0);
2964 			spin_unlock(&in_reply_to->lock);
2965 			binder_inner_proc_unlock(proc);
2966 			return_error = BR_FAILED_REPLY;
2967 			return_error_param = -EPROTO;
2968 			return_error_line = __LINE__;
2969 			in_reply_to = NULL;
2970 			goto err_bad_call_stack;
2971 		}
2972 		thread->transaction_stack = in_reply_to->to_parent;
2973 		binder_inner_proc_unlock(proc);
2974 		binder_set_nice(in_reply_to->saved_priority);
2975 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2976 		if (target_thread == NULL) {
2977 			/* annotation for sparse */
2978 			__release(&target_thread->proc->inner_lock);
2979 			binder_txn_error("%d:%d reply target not found\n",
2980 				thread->pid, proc->pid);
2981 			return_error = BR_DEAD_REPLY;
2982 			return_error_line = __LINE__;
2983 			goto err_dead_binder;
2984 		}
2985 		if (target_thread->transaction_stack != in_reply_to) {
2986 			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2987 				proc->pid, thread->pid,
2988 				target_thread->transaction_stack ?
2989 				target_thread->transaction_stack->debug_id : 0,
2990 				in_reply_to->debug_id);
2991 			binder_inner_proc_unlock(target_thread->proc);
2992 			return_error = BR_FAILED_REPLY;
2993 			return_error_param = -EPROTO;
2994 			return_error_line = __LINE__;
2995 			in_reply_to = NULL;
2996 			target_thread = NULL;
2997 			goto err_dead_binder;
2998 		}
2999 		target_proc = target_thread->proc;
3000 		target_proc->tmp_ref++;
3001 		binder_inner_proc_unlock(target_thread->proc);
3002 	} else {
3003 		if (tr->target.handle) {
3004 			struct binder_ref *ref;
3005 
3006 			/*
3007 			 * There must already be a strong ref
3008 			 * on this node. If so, do a strong
3009 			 * increment on the node to ensure it
3010 			 * stays alive until the transaction is
3011 			 * done.
3012 			 */
3013 			binder_proc_lock(proc);
3014 			ref = binder_get_ref_olocked(proc, tr->target.handle,
3015 						     true);
3016 			if (ref) {
3017 				target_node = binder_get_node_refs_for_txn(
3018 						ref->node, &target_proc,
3019 						&return_error);
3020 			} else {
3021 				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3022 						  proc->pid, thread->pid, tr->target.handle);
3023 				return_error = BR_FAILED_REPLY;
3024 			}
3025 			binder_proc_unlock(proc);
3026 		} else {
3027 			mutex_lock(&context->context_mgr_node_lock);
3028 			target_node = context->binder_context_mgr_node;
3029 			if (target_node)
3030 				target_node = binder_get_node_refs_for_txn(
3031 						target_node, &target_proc,
3032 						&return_error);
3033 			else
3034 				return_error = BR_DEAD_REPLY;
3035 			mutex_unlock(&context->context_mgr_node_lock);
3036 			if (target_node && target_proc->pid == proc->pid) {
3037 				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3038 						  proc->pid, thread->pid);
3039 				return_error = BR_FAILED_REPLY;
3040 				return_error_param = -EINVAL;
3041 				return_error_line = __LINE__;
3042 				goto err_invalid_target_handle;
3043 			}
3044 		}
3045 		if (!target_node) {
3046 			binder_txn_error("%d:%d cannot find target node\n",
3047 				thread->pid, proc->pid);
3048 			/*
3049 			 * return_error is set above
3050 			 */
3051 			return_error_param = -EINVAL;
3052 			return_error_line = __LINE__;
3053 			goto err_dead_binder;
3054 		}
3055 		e->to_node = target_node->debug_id;
3056 		if (WARN_ON(proc == target_proc)) {
3057 			binder_txn_error("%d:%d self transactions not allowed\n",
3058 				thread->pid, proc->pid);
3059 			return_error = BR_FAILED_REPLY;
3060 			return_error_param = -EINVAL;
3061 			return_error_line = __LINE__;
3062 			goto err_invalid_target_handle;
3063 		}
3064 		if (security_binder_transaction(proc->cred,
3065 						target_proc->cred) < 0) {
3066 			binder_txn_error("%d:%d transaction credentials failed\n",
3067 				thread->pid, proc->pid);
3068 			return_error = BR_FAILED_REPLY;
3069 			return_error_param = -EPERM;
3070 			return_error_line = __LINE__;
3071 			goto err_invalid_target_handle;
3072 		}
3073 		binder_inner_proc_lock(proc);
3074 
3075 		w = list_first_entry_or_null(&thread->todo,
3076 					     struct binder_work, entry);
3077 		if (!(tr->flags & TF_ONE_WAY) && w &&
3078 		    w->type == BINDER_WORK_TRANSACTION) {
3079 			/*
3080 			 * Do not allow new outgoing transaction from a
3081 			 * thread that has a transaction at the head of
3082 			 * its todo list. Only need to check the head
3083 			 * because binder_select_thread_ilocked picks a
3084 			 * thread from proc->waiting_threads to enqueue
3085 			 * the transaction, and nothing is queued to the
3086 			 * todo list while the thread is on waiting_threads.
3087 			 */
3088 			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3089 					  proc->pid, thread->pid);
3090 			binder_inner_proc_unlock(proc);
3091 			return_error = BR_FAILED_REPLY;
3092 			return_error_param = -EPROTO;
3093 			return_error_line = __LINE__;
3094 			goto err_bad_todo_list;
3095 		}
3096 
3097 		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3098 			struct binder_transaction *tmp;
3099 
3100 			tmp = thread->transaction_stack;
3101 			if (tmp->to_thread != thread) {
3102 				spin_lock(&tmp->lock);
3103 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3104 					proc->pid, thread->pid, tmp->debug_id,
3105 					tmp->to_proc ? tmp->to_proc->pid : 0,
3106 					tmp->to_thread ?
3107 					tmp->to_thread->pid : 0);
3108 				spin_unlock(&tmp->lock);
3109 				binder_inner_proc_unlock(proc);
3110 				return_error = BR_FAILED_REPLY;
3111 				return_error_param = -EPROTO;
3112 				return_error_line = __LINE__;
3113 				goto err_bad_call_stack;
3114 			}
3115 			while (tmp) {
3116 				struct binder_thread *from;
3117 
3118 				spin_lock(&tmp->lock);
3119 				from = tmp->from;
3120 				if (from && from->proc == target_proc) {
3121 					atomic_inc(&from->tmp_ref);
3122 					target_thread = from;
3123 					spin_unlock(&tmp->lock);
3124 					break;
3125 				}
3126 				spin_unlock(&tmp->lock);
3127 				tmp = tmp->from_parent;
3128 			}
3129 		}
3130 		binder_inner_proc_unlock(proc);
3131 	}
3132 	if (target_thread)
3133 		e->to_thread = target_thread->pid;
3134 	e->to_proc = target_proc->pid;
3135 
3136 	/* TODO: reuse incoming transaction for reply */
3137 	t = kzalloc(sizeof(*t), GFP_KERNEL);
3138 	if (t == NULL) {
3139 		binder_txn_error("%d:%d cannot allocate transaction\n",
3140 			thread->pid, proc->pid);
3141 		return_error = BR_FAILED_REPLY;
3142 		return_error_param = -ENOMEM;
3143 		return_error_line = __LINE__;
3144 		goto err_alloc_t_failed;
3145 	}
3146 	INIT_LIST_HEAD(&t->fd_fixups);
3147 	binder_stats_created(BINDER_STAT_TRANSACTION);
3148 	spin_lock_init(&t->lock);
3149 
3150 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3151 	if (tcomplete == NULL) {
3152 		binder_txn_error("%d:%d cannot allocate work for transaction\n",
3153 			thread->pid, proc->pid);
3154 		return_error = BR_FAILED_REPLY;
3155 		return_error_param = -ENOMEM;
3156 		return_error_line = __LINE__;
3157 		goto err_alloc_tcomplete_failed;
3158 	}
3159 	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3160 
3161 	t->debug_id = t_debug_id;
3162 
3163 	if (reply)
3164 		binder_debug(BINDER_DEBUG_TRANSACTION,
3165 			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3166 			     proc->pid, thread->pid, t->debug_id,
3167 			     target_proc->pid, target_thread->pid,
3168 			     (u64)tr->data.ptr.buffer,
3169 			     (u64)tr->data.ptr.offsets,
3170 			     (u64)tr->data_size, (u64)tr->offsets_size,
3171 			     (u64)extra_buffers_size);
3172 	else
3173 		binder_debug(BINDER_DEBUG_TRANSACTION,
3174 			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3175 			     proc->pid, thread->pid, t->debug_id,
3176 			     target_proc->pid, target_node->debug_id,
3177 			     (u64)tr->data.ptr.buffer,
3178 			     (u64)tr->data.ptr.offsets,
3179 			     (u64)tr->data_size, (u64)tr->offsets_size,
3180 			     (u64)extra_buffers_size);
3181 
3182 	if (!reply && !(tr->flags & TF_ONE_WAY))
3183 		t->from = thread;
3184 	else
3185 		t->from = NULL;
3186 	t->sender_euid = task_euid(proc->tsk);
3187 	t->to_proc = target_proc;
3188 	t->to_thread = target_thread;
3189 	t->code = tr->code;
3190 	t->flags = tr->flags;
3191 	t->priority = task_nice(current);
3192 
3193 	if (target_node && target_node->txn_security_ctx) {
3194 		u32 secid;
3195 		size_t added_size;
3196 
3197 		security_cred_getsecid(proc->cred, &secid);
3198 		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3199 		if (ret) {
3200 			binder_txn_error("%d:%d failed to get security context\n",
3201 				thread->pid, proc->pid);
3202 			return_error = BR_FAILED_REPLY;
3203 			return_error_param = ret;
3204 			return_error_line = __LINE__;
3205 			goto err_get_secctx_failed;
3206 		}
3207 		added_size = ALIGN(secctx_sz, sizeof(u64));
3208 		extra_buffers_size += added_size;
3209 		if (extra_buffers_size < added_size) {
3210 			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3211 				thread->pid, proc->pid);
3212 			return_error = BR_FAILED_REPLY;
3213 			return_error_param = -EINVAL;
3214 			return_error_line = __LINE__;
3215 			goto err_bad_extra_size;
3216 		}
3217 	}
3218 
3219 	trace_binder_transaction(reply, t, target_node);
3220 
3221 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3222 		tr->offsets_size, extra_buffers_size,
3223 		!reply && (t->flags & TF_ONE_WAY), current->tgid);
3224 	if (IS_ERR(t->buffer)) {
3225 		char *s;
3226 
3227 		ret = PTR_ERR(t->buffer);
3228 		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3229 			: (ret == -ENOSPC) ? ": no space left"
3230 			: (ret == -ENOMEM) ? ": memory allocation failed"
3231 			: "";
3232 		binder_txn_error("cannot allocate buffer%s", s);
3233 
3234 		return_error_param = PTR_ERR(t->buffer);
3235 		return_error = return_error_param == -ESRCH ?
3236 			BR_DEAD_REPLY : BR_FAILED_REPLY;
3237 		return_error_line = __LINE__;
3238 		t->buffer = NULL;
3239 		goto err_binder_alloc_buf_failed;
3240 	}
3241 	if (secctx) {
3242 		int err;
3243 		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3244 				    ALIGN(tr->offsets_size, sizeof(void *)) +
3245 				    ALIGN(extra_buffers_size, sizeof(void *)) -
3246 				    ALIGN(secctx_sz, sizeof(u64));
3247 
3248 		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3249 		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3250 						  t->buffer, buf_offset,
3251 						  secctx, secctx_sz);
3252 		if (err) {
3253 			t->security_ctx = 0;
3254 			WARN_ON(1);
3255 		}
3256 		security_release_secctx(secctx, secctx_sz);
3257 		secctx = NULL;
3258 	}
3259 	t->buffer->debug_id = t->debug_id;
3260 	t->buffer->transaction = t;
3261 	t->buffer->target_node = target_node;
3262 	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3263 	trace_binder_transaction_alloc_buf(t->buffer);
3264 
3265 	if (binder_alloc_copy_user_to_buffer(
3266 				&target_proc->alloc,
3267 				t->buffer,
3268 				ALIGN(tr->data_size, sizeof(void *)),
3269 				(const void __user *)
3270 					(uintptr_t)tr->data.ptr.offsets,
3271 				tr->offsets_size)) {
3272 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3273 				proc->pid, thread->pid);
3274 		return_error = BR_FAILED_REPLY;
3275 		return_error_param = -EFAULT;
3276 		return_error_line = __LINE__;
3277 		goto err_copy_data_failed;
3278 	}
3279 	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3280 		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3281 				proc->pid, thread->pid, (u64)tr->offsets_size);
3282 		return_error = BR_FAILED_REPLY;
3283 		return_error_param = -EINVAL;
3284 		return_error_line = __LINE__;
3285 		goto err_bad_offset;
3286 	}
3287 	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3288 		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3289 				  proc->pid, thread->pid,
3290 				  (u64)extra_buffers_size);
3291 		return_error = BR_FAILED_REPLY;
3292 		return_error_param = -EINVAL;
3293 		return_error_line = __LINE__;
3294 		goto err_bad_offset;
3295 	}
3296 	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3297 	buffer_offset = off_start_offset;
3298 	off_end_offset = off_start_offset + tr->offsets_size;
3299 	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3300 	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3301 		ALIGN(secctx_sz, sizeof(u64));
3302 	off_min = 0;
3303 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3304 	     buffer_offset += sizeof(binder_size_t)) {
3305 		struct binder_object_header *hdr;
3306 		size_t object_size;
3307 		struct binder_object object;
3308 		binder_size_t object_offset;
3309 		binder_size_t copy_size;
3310 
3311 		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3312 						  &object_offset,
3313 						  t->buffer,
3314 						  buffer_offset,
3315 						  sizeof(object_offset))) {
3316 			binder_txn_error("%d:%d copy offset from buffer failed\n",
3317 				thread->pid, proc->pid);
3318 			return_error = BR_FAILED_REPLY;
3319 			return_error_param = -EINVAL;
3320 			return_error_line = __LINE__;
3321 			goto err_bad_offset;
3322 		}
3323 
3324 		/*
3325 		 * Copy the source user buffer up to the next object
3326 		 * that will be processed.
3327 		 */
3328 		copy_size = object_offset - user_offset;
3329 		if (copy_size && (user_offset > object_offset ||
3330 				binder_alloc_copy_user_to_buffer(
3331 					&target_proc->alloc,
3332 					t->buffer, user_offset,
3333 					user_buffer + user_offset,
3334 					copy_size))) {
3335 			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3336 					proc->pid, thread->pid);
3337 			return_error = BR_FAILED_REPLY;
3338 			return_error_param = -EFAULT;
3339 			return_error_line = __LINE__;
3340 			goto err_copy_data_failed;
3341 		}
3342 		object_size = binder_get_object(target_proc, user_buffer,
3343 				t->buffer, object_offset, &object);
3344 		if (object_size == 0 || object_offset < off_min) {
3345 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3346 					  proc->pid, thread->pid,
3347 					  (u64)object_offset,
3348 					  (u64)off_min,
3349 					  (u64)t->buffer->data_size);
3350 			return_error = BR_FAILED_REPLY;
3351 			return_error_param = -EINVAL;
3352 			return_error_line = __LINE__;
3353 			goto err_bad_offset;
3354 		}
3355 		/*
3356 		 * Set offset to the next buffer fragment to be
3357 		 * copied
3358 		 */
3359 		user_offset = object_offset + object_size;
3360 
3361 		hdr = &object.hdr;
3362 		off_min = object_offset + object_size;
3363 		switch (hdr->type) {
3364 		case BINDER_TYPE_BINDER:
3365 		case BINDER_TYPE_WEAK_BINDER: {
3366 			struct flat_binder_object *fp;
3367 
3368 			fp = to_flat_binder_object(hdr);
3369 			ret = binder_translate_binder(fp, t, thread);
3370 
3371 			if (ret < 0 ||
3372 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3373 							t->buffer,
3374 							object_offset,
3375 							fp, sizeof(*fp))) {
3376 				binder_txn_error("%d:%d translate binder failed\n",
3377 					thread->pid, proc->pid);
3378 				return_error = BR_FAILED_REPLY;
3379 				return_error_param = ret;
3380 				return_error_line = __LINE__;
3381 				goto err_translate_failed;
3382 			}
3383 		} break;
3384 		case BINDER_TYPE_HANDLE:
3385 		case BINDER_TYPE_WEAK_HANDLE: {
3386 			struct flat_binder_object *fp;
3387 
3388 			fp = to_flat_binder_object(hdr);
3389 			ret = binder_translate_handle(fp, t, thread);
3390 			if (ret < 0 ||
3391 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3392 							t->buffer,
3393 							object_offset,
3394 							fp, sizeof(*fp))) {
3395 				binder_txn_error("%d:%d translate handle failed\n",
3396 					thread->pid, proc->pid);
3397 				return_error = BR_FAILED_REPLY;
3398 				return_error_param = ret;
3399 				return_error_line = __LINE__;
3400 				goto err_translate_failed;
3401 			}
3402 		} break;
3403 
3404 		case BINDER_TYPE_FD: {
3405 			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3406 			binder_size_t fd_offset = object_offset +
3407 				(uintptr_t)&fp->fd - (uintptr_t)fp;
3408 			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3409 						      thread, in_reply_to);
3410 
3411 			fp->pad_binder = 0;
3412 			if (ret < 0 ||
3413 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3414 							t->buffer,
3415 							object_offset,
3416 							fp, sizeof(*fp))) {
3417 				binder_txn_error("%d:%d translate fd failed\n",
3418 					thread->pid, proc->pid);
3419 				return_error = BR_FAILED_REPLY;
3420 				return_error_param = ret;
3421 				return_error_line = __LINE__;
3422 				goto err_translate_failed;
3423 			}
3424 		} break;
3425 		case BINDER_TYPE_FDA: {
3426 			struct binder_object ptr_object;
3427 			binder_size_t parent_offset;
3428 			struct binder_object user_object;
3429 			size_t user_parent_size;
3430 			struct binder_fd_array_object *fda =
3431 				to_binder_fd_array_object(hdr);
3432 			size_t num_valid = (buffer_offset - off_start_offset) /
3433 						sizeof(binder_size_t);
3434 			struct binder_buffer_object *parent =
3435 				binder_validate_ptr(target_proc, t->buffer,
3436 						    &ptr_object, fda->parent,
3437 						    off_start_offset,
3438 						    &parent_offset,
3439 						    num_valid);
3440 			if (!parent) {
3441 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3442 						  proc->pid, thread->pid);
3443 				return_error = BR_FAILED_REPLY;
3444 				return_error_param = -EINVAL;
3445 				return_error_line = __LINE__;
3446 				goto err_bad_parent;
3447 			}
3448 			if (!binder_validate_fixup(target_proc, t->buffer,
3449 						   off_start_offset,
3450 						   parent_offset,
3451 						   fda->parent_offset,
3452 						   last_fixup_obj_off,
3453 						   last_fixup_min_off)) {
3454 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3455 						  proc->pid, thread->pid);
3456 				return_error = BR_FAILED_REPLY;
3457 				return_error_param = -EINVAL;
3458 				return_error_line = __LINE__;
3459 				goto err_bad_parent;
3460 			}
3461 			/*
3462 			 * We need to read the user version of the parent
3463 			 * object to get the original user offset
3464 			 */
3465 			user_parent_size =
3466 				binder_get_object(proc, user_buffer, t->buffer,
3467 						  parent_offset, &user_object);
3468 			if (user_parent_size != sizeof(user_object.bbo)) {
3469 				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3470 						  proc->pid, thread->pid,
3471 						  user_parent_size,
3472 						  sizeof(user_object.bbo));
3473 				return_error = BR_FAILED_REPLY;
3474 				return_error_param = -EINVAL;
3475 				return_error_line = __LINE__;
3476 				goto err_bad_parent;
3477 			}
3478 			ret = binder_translate_fd_array(&pf_head, fda,
3479 							user_buffer, parent,
3480 							&user_object.bbo, t,
3481 							thread, in_reply_to);
3482 			if (!ret)
3483 				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3484 								  t->buffer,
3485 								  object_offset,
3486 								  fda, sizeof(*fda));
3487 			if (ret) {
3488 				binder_txn_error("%d:%d translate fd array failed\n",
3489 					thread->pid, proc->pid);
3490 				return_error = BR_FAILED_REPLY;
3491 				return_error_param = ret > 0 ? -EINVAL : ret;
3492 				return_error_line = __LINE__;
3493 				goto err_translate_failed;
3494 			}
3495 			last_fixup_obj_off = parent_offset;
3496 			last_fixup_min_off =
3497 				fda->parent_offset + sizeof(u32) * fda->num_fds;
3498 		} break;
3499 		case BINDER_TYPE_PTR: {
3500 			struct binder_buffer_object *bp =
3501 				to_binder_buffer_object(hdr);
3502 			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3503 			size_t num_valid;
3504 
3505 			if (bp->length > buf_left) {
3506 				binder_user_error("%d:%d got transaction with too large buffer\n",
3507 						  proc->pid, thread->pid);
3508 				return_error = BR_FAILED_REPLY;
3509 				return_error_param = -EINVAL;
3510 				return_error_line = __LINE__;
3511 				goto err_bad_offset;
3512 			}
3513 			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3514 				(const void __user *)(uintptr_t)bp->buffer,
3515 				bp->length);
3516 			if (ret) {
3517 				binder_txn_error("%d:%d deferred copy failed\n",
3518 					thread->pid, proc->pid);
3519 				return_error = BR_FAILED_REPLY;
3520 				return_error_param = ret;
3521 				return_error_line = __LINE__;
3522 				goto err_translate_failed;
3523 			}
3524 			/* Fixup buffer pointer to target proc address space */
3525 			bp->buffer = (uintptr_t)
3526 				t->buffer->user_data + sg_buf_offset;
3527 			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3528 
3529 			num_valid = (buffer_offset - off_start_offset) /
3530 					sizeof(binder_size_t);
3531 			ret = binder_fixup_parent(&pf_head, t,
3532 						  thread, bp,
3533 						  off_start_offset,
3534 						  num_valid,
3535 						  last_fixup_obj_off,
3536 						  last_fixup_min_off);
3537 			if (ret < 0 ||
3538 			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3539 							t->buffer,
3540 							object_offset,
3541 							bp, sizeof(*bp))) {
3542 				binder_txn_error("%d:%d failed to fixup parent\n",
3543 					thread->pid, proc->pid);
3544 				return_error = BR_FAILED_REPLY;
3545 				return_error_param = ret;
3546 				return_error_line = __LINE__;
3547 				goto err_translate_failed;
3548 			}
3549 			last_fixup_obj_off = object_offset;
3550 			last_fixup_min_off = 0;
3551 		} break;
3552 		default:
3553 			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3554 				proc->pid, thread->pid, hdr->type);
3555 			return_error = BR_FAILED_REPLY;
3556 			return_error_param = -EINVAL;
3557 			return_error_line = __LINE__;
3558 			goto err_bad_object_type;
3559 		}
3560 	}
3561 	/* Done processing objects, copy the rest of the buffer */
3562 	if (binder_alloc_copy_user_to_buffer(
3563 				&target_proc->alloc,
3564 				t->buffer, user_offset,
3565 				user_buffer + user_offset,
3566 				tr->data_size - user_offset)) {
3567 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3568 				proc->pid, thread->pid);
3569 		return_error = BR_FAILED_REPLY;
3570 		return_error_param = -EFAULT;
3571 		return_error_line = __LINE__;
3572 		goto err_copy_data_failed;
3573 	}
3574 
3575 	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3576 					    &sgc_head, &pf_head);
3577 	if (ret) {
3578 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3579 				  proc->pid, thread->pid);
3580 		return_error = BR_FAILED_REPLY;
3581 		return_error_param = ret;
3582 		return_error_line = __LINE__;
3583 		goto err_copy_data_failed;
3584 	}
3585 	if (t->buffer->oneway_spam_suspect)
3586 		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3587 	else
3588 		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3589 	t->work.type = BINDER_WORK_TRANSACTION;
3590 
3591 	if (reply) {
3592 		binder_enqueue_thread_work(thread, tcomplete);
3593 		binder_inner_proc_lock(target_proc);
3594 		if (target_thread->is_dead) {
3595 			return_error = BR_DEAD_REPLY;
3596 			binder_inner_proc_unlock(target_proc);
3597 			goto err_dead_proc_or_thread;
3598 		}
3599 		BUG_ON(t->buffer->async_transaction != 0);
3600 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3601 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3602 		target_proc->outstanding_txns++;
3603 		binder_inner_proc_unlock(target_proc);
3604 		wake_up_interruptible_sync(&target_thread->wait);
3605 		binder_free_transaction(in_reply_to);
3606 	} else if (!(t->flags & TF_ONE_WAY)) {
3607 		BUG_ON(t->buffer->async_transaction != 0);
3608 		binder_inner_proc_lock(proc);
3609 		/*
3610 		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3611 		 * userspace immediately; this allows the target process to
3612 		 * immediately start processing this transaction, reducing
3613 		 * latency. We will then return the TRANSACTION_COMPLETE when
3614 		 * the target replies (or there is an error).
3615 		 */
3616 		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3617 		t->need_reply = 1;
3618 		t->from_parent = thread->transaction_stack;
3619 		thread->transaction_stack = t;
3620 		binder_inner_proc_unlock(proc);
3621 		return_error = binder_proc_transaction(t,
3622 				target_proc, target_thread);
3623 		if (return_error) {
3624 			binder_inner_proc_lock(proc);
3625 			binder_pop_transaction_ilocked(thread, t);
3626 			binder_inner_proc_unlock(proc);
3627 			goto err_dead_proc_or_thread;
3628 		}
3629 	} else {
3630 		BUG_ON(target_node == NULL);
3631 		BUG_ON(t->buffer->async_transaction != 1);
3632 		return_error = binder_proc_transaction(t, target_proc, NULL);
3633 		/*
3634 		 * Let the caller know when async transaction reaches a frozen
3635 		 * process and is put in a pending queue, waiting for the target
3636 		 * process to be unfrozen.
3637 		 */
3638 		if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3639 			tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3640 		binder_enqueue_thread_work(thread, tcomplete);
3641 		if (return_error &&
3642 		    return_error != BR_TRANSACTION_PENDING_FROZEN)
3643 			goto err_dead_proc_or_thread;
3644 	}
3645 	if (target_thread)
3646 		binder_thread_dec_tmpref(target_thread);
3647 	binder_proc_dec_tmpref(target_proc);
3648 	if (target_node)
3649 		binder_dec_node_tmpref(target_node);
3650 	/*
3651 	 * write barrier to synchronize with initialization
3652 	 * of log entry
3653 	 */
3654 	smp_wmb();
3655 	WRITE_ONCE(e->debug_id_done, t_debug_id);
3656 	return;
3657 
3658 err_dead_proc_or_thread:
3659 	binder_txn_error("%d:%d dead process or thread\n",
3660 		thread->pid, proc->pid);
3661 	return_error_line = __LINE__;
3662 	binder_dequeue_work(proc, tcomplete);
3663 err_translate_failed:
3664 err_bad_object_type:
3665 err_bad_offset:
3666 err_bad_parent:
3667 err_copy_data_failed:
3668 	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3669 	binder_free_txn_fixups(t);
3670 	trace_binder_transaction_failed_buffer_release(t->buffer);
3671 	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3672 					  buffer_offset, true);
3673 	if (target_node)
3674 		binder_dec_node_tmpref(target_node);
3675 	target_node = NULL;
3676 	t->buffer->transaction = NULL;
3677 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3678 err_binder_alloc_buf_failed:
3679 err_bad_extra_size:
3680 	if (secctx)
3681 		security_release_secctx(secctx, secctx_sz);
3682 err_get_secctx_failed:
3683 	kfree(tcomplete);
3684 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3685 err_alloc_tcomplete_failed:
3686 	if (trace_binder_txn_latency_free_enabled())
3687 		binder_txn_latency_free(t);
3688 	kfree(t);
3689 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3690 err_alloc_t_failed:
3691 err_bad_todo_list:
3692 err_bad_call_stack:
3693 err_empty_call_stack:
3694 err_dead_binder:
3695 err_invalid_target_handle:
3696 	if (target_node) {
3697 		binder_dec_node(target_node, 1, 0);
3698 		binder_dec_node_tmpref(target_node);
3699 	}
3700 
3701 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3702 		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3703 		     proc->pid, thread->pid, reply ? "reply" :
3704 		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
3705 		     target_proc ? target_proc->pid : 0,
3706 		     target_thread ? target_thread->pid : 0,
3707 		     t_debug_id, return_error, return_error_param,
3708 		     (u64)tr->data_size, (u64)tr->offsets_size,
3709 		     return_error_line);
3710 
3711 	if (target_thread)
3712 		binder_thread_dec_tmpref(target_thread);
3713 	if (target_proc)
3714 		binder_proc_dec_tmpref(target_proc);
3715 
3716 	{
3717 		struct binder_transaction_log_entry *fe;
3718 
3719 		e->return_error = return_error;
3720 		e->return_error_param = return_error_param;
3721 		e->return_error_line = return_error_line;
3722 		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3723 		*fe = *e;
3724 		/*
3725 		 * write barrier to synchronize with initialization
3726 		 * of log entry
3727 		 */
3728 		smp_wmb();
3729 		WRITE_ONCE(e->debug_id_done, t_debug_id);
3730 		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3731 	}
3732 
3733 	BUG_ON(thread->return_error.cmd != BR_OK);
3734 	if (in_reply_to) {
3735 		binder_set_txn_from_error(in_reply_to, t_debug_id,
3736 				return_error, return_error_param);
3737 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3738 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3739 		binder_send_failed_reply(in_reply_to, return_error);
3740 	} else {
3741 		binder_inner_proc_lock(proc);
3742 		binder_set_extended_error(&thread->ee, t_debug_id,
3743 				return_error, return_error_param);
3744 		binder_inner_proc_unlock(proc);
3745 		thread->return_error.cmd = return_error;
3746 		binder_enqueue_thread_work(thread, &thread->return_error.work);
3747 	}
3748 }
3749 
3750 /**
3751  * binder_free_buf() - free the specified buffer
3752  * @proc:	binder proc that owns buffer
3753  * @buffer:	buffer to be freed
3754  * @is_failure:	failed to send transaction
3755  *
3756  * If buffer for an async transaction, enqueue the next async
3757  * transaction from the node.
3758  *
3759  * Cleanup buffer and free it.
3760  */
3761 static void
3762 binder_free_buf(struct binder_proc *proc,
3763 		struct binder_thread *thread,
3764 		struct binder_buffer *buffer, bool is_failure)
3765 {
3766 	binder_inner_proc_lock(proc);
3767 	if (buffer->transaction) {
3768 		buffer->transaction->buffer = NULL;
3769 		buffer->transaction = NULL;
3770 	}
3771 	binder_inner_proc_unlock(proc);
3772 	if (buffer->async_transaction && buffer->target_node) {
3773 		struct binder_node *buf_node;
3774 		struct binder_work *w;
3775 
3776 		buf_node = buffer->target_node;
3777 		binder_node_inner_lock(buf_node);
3778 		BUG_ON(!buf_node->has_async_transaction);
3779 		BUG_ON(buf_node->proc != proc);
3780 		w = binder_dequeue_work_head_ilocked(
3781 				&buf_node->async_todo);
3782 		if (!w) {
3783 			buf_node->has_async_transaction = false;
3784 		} else {
3785 			binder_enqueue_work_ilocked(
3786 					w, &proc->todo);
3787 			binder_wakeup_proc_ilocked(proc);
3788 		}
3789 		binder_node_inner_unlock(buf_node);
3790 	}
3791 	trace_binder_transaction_buffer_release(buffer);
3792 	binder_release_entire_buffer(proc, thread, buffer, is_failure);
3793 	binder_alloc_free_buf(&proc->alloc, buffer);
3794 }
3795 
3796 static int binder_thread_write(struct binder_proc *proc,
3797 			struct binder_thread *thread,
3798 			binder_uintptr_t binder_buffer, size_t size,
3799 			binder_size_t *consumed)
3800 {
3801 	uint32_t cmd;
3802 	struct binder_context *context = proc->context;
3803 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3804 	void __user *ptr = buffer + *consumed;
3805 	void __user *end = buffer + size;
3806 
3807 	while (ptr < end && thread->return_error.cmd == BR_OK) {
3808 		int ret;
3809 
3810 		if (get_user(cmd, (uint32_t __user *)ptr))
3811 			return -EFAULT;
3812 		ptr += sizeof(uint32_t);
3813 		trace_binder_command(cmd);
3814 		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3815 			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3816 			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3817 			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3818 		}
3819 		switch (cmd) {
3820 		case BC_INCREFS:
3821 		case BC_ACQUIRE:
3822 		case BC_RELEASE:
3823 		case BC_DECREFS: {
3824 			uint32_t target;
3825 			const char *debug_string;
3826 			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3827 			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3828 			struct binder_ref_data rdata;
3829 
3830 			if (get_user(target, (uint32_t __user *)ptr))
3831 				return -EFAULT;
3832 
3833 			ptr += sizeof(uint32_t);
3834 			ret = -1;
3835 			if (increment && !target) {
3836 				struct binder_node *ctx_mgr_node;
3837 
3838 				mutex_lock(&context->context_mgr_node_lock);
3839 				ctx_mgr_node = context->binder_context_mgr_node;
3840 				if (ctx_mgr_node) {
3841 					if (ctx_mgr_node->proc == proc) {
3842 						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3843 								  proc->pid, thread->pid);
3844 						mutex_unlock(&context->context_mgr_node_lock);
3845 						return -EINVAL;
3846 					}
3847 					ret = binder_inc_ref_for_node(
3848 							proc, ctx_mgr_node,
3849 							strong, NULL, &rdata);
3850 				}
3851 				mutex_unlock(&context->context_mgr_node_lock);
3852 			}
3853 			if (ret)
3854 				ret = binder_update_ref_for_handle(
3855 						proc, target, increment, strong,
3856 						&rdata);
3857 			if (!ret && rdata.desc != target) {
3858 				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3859 					proc->pid, thread->pid,
3860 					target, rdata.desc);
3861 			}
3862 			switch (cmd) {
3863 			case BC_INCREFS:
3864 				debug_string = "IncRefs";
3865 				break;
3866 			case BC_ACQUIRE:
3867 				debug_string = "Acquire";
3868 				break;
3869 			case BC_RELEASE:
3870 				debug_string = "Release";
3871 				break;
3872 			case BC_DECREFS:
3873 			default:
3874 				debug_string = "DecRefs";
3875 				break;
3876 			}
3877 			if (ret) {
3878 				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3879 					proc->pid, thread->pid, debug_string,
3880 					strong, target, ret);
3881 				break;
3882 			}
3883 			binder_debug(BINDER_DEBUG_USER_REFS,
3884 				     "%d:%d %s ref %d desc %d s %d w %d\n",
3885 				     proc->pid, thread->pid, debug_string,
3886 				     rdata.debug_id, rdata.desc, rdata.strong,
3887 				     rdata.weak);
3888 			break;
3889 		}
3890 		case BC_INCREFS_DONE:
3891 		case BC_ACQUIRE_DONE: {
3892 			binder_uintptr_t node_ptr;
3893 			binder_uintptr_t cookie;
3894 			struct binder_node *node;
3895 			bool free_node;
3896 
3897 			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3898 				return -EFAULT;
3899 			ptr += sizeof(binder_uintptr_t);
3900 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3901 				return -EFAULT;
3902 			ptr += sizeof(binder_uintptr_t);
3903 			node = binder_get_node(proc, node_ptr);
3904 			if (node == NULL) {
3905 				binder_user_error("%d:%d %s u%016llx no match\n",
3906 					proc->pid, thread->pid,
3907 					cmd == BC_INCREFS_DONE ?
3908 					"BC_INCREFS_DONE" :
3909 					"BC_ACQUIRE_DONE",
3910 					(u64)node_ptr);
3911 				break;
3912 			}
3913 			if (cookie != node->cookie) {
3914 				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3915 					proc->pid, thread->pid,
3916 					cmd == BC_INCREFS_DONE ?
3917 					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3918 					(u64)node_ptr, node->debug_id,
3919 					(u64)cookie, (u64)node->cookie);
3920 				binder_put_node(node);
3921 				break;
3922 			}
3923 			binder_node_inner_lock(node);
3924 			if (cmd == BC_ACQUIRE_DONE) {
3925 				if (node->pending_strong_ref == 0) {
3926 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3927 						proc->pid, thread->pid,
3928 						node->debug_id);
3929 					binder_node_inner_unlock(node);
3930 					binder_put_node(node);
3931 					break;
3932 				}
3933 				node->pending_strong_ref = 0;
3934 			} else {
3935 				if (node->pending_weak_ref == 0) {
3936 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3937 						proc->pid, thread->pid,
3938 						node->debug_id);
3939 					binder_node_inner_unlock(node);
3940 					binder_put_node(node);
3941 					break;
3942 				}
3943 				node->pending_weak_ref = 0;
3944 			}
3945 			free_node = binder_dec_node_nilocked(node,
3946 					cmd == BC_ACQUIRE_DONE, 0);
3947 			WARN_ON(free_node);
3948 			binder_debug(BINDER_DEBUG_USER_REFS,
3949 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3950 				     proc->pid, thread->pid,
3951 				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3952 				     node->debug_id, node->local_strong_refs,
3953 				     node->local_weak_refs, node->tmp_refs);
3954 			binder_node_inner_unlock(node);
3955 			binder_put_node(node);
3956 			break;
3957 		}
3958 		case BC_ATTEMPT_ACQUIRE:
3959 			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3960 			return -EINVAL;
3961 		case BC_ACQUIRE_RESULT:
3962 			pr_err("BC_ACQUIRE_RESULT not supported\n");
3963 			return -EINVAL;
3964 
3965 		case BC_FREE_BUFFER: {
3966 			binder_uintptr_t data_ptr;
3967 			struct binder_buffer *buffer;
3968 
3969 			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3970 				return -EFAULT;
3971 			ptr += sizeof(binder_uintptr_t);
3972 
3973 			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3974 							      data_ptr);
3975 			if (IS_ERR_OR_NULL(buffer)) {
3976 				if (PTR_ERR(buffer) == -EPERM) {
3977 					binder_user_error(
3978 						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3979 						proc->pid, thread->pid,
3980 						(u64)data_ptr);
3981 				} else {
3982 					binder_user_error(
3983 						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3984 						proc->pid, thread->pid,
3985 						(u64)data_ptr);
3986 				}
3987 				break;
3988 			}
3989 			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3990 				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3991 				     proc->pid, thread->pid, (u64)data_ptr,
3992 				     buffer->debug_id,
3993 				     buffer->transaction ? "active" : "finished");
3994 			binder_free_buf(proc, thread, buffer, false);
3995 			break;
3996 		}
3997 
3998 		case BC_TRANSACTION_SG:
3999 		case BC_REPLY_SG: {
4000 			struct binder_transaction_data_sg tr;
4001 
4002 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4003 				return -EFAULT;
4004 			ptr += sizeof(tr);
4005 			binder_transaction(proc, thread, &tr.transaction_data,
4006 					   cmd == BC_REPLY_SG, tr.buffers_size);
4007 			break;
4008 		}
4009 		case BC_TRANSACTION:
4010 		case BC_REPLY: {
4011 			struct binder_transaction_data tr;
4012 
4013 			if (copy_from_user(&tr, ptr, sizeof(tr)))
4014 				return -EFAULT;
4015 			ptr += sizeof(tr);
4016 			binder_transaction(proc, thread, &tr,
4017 					   cmd == BC_REPLY, 0);
4018 			break;
4019 		}
4020 
4021 		case BC_REGISTER_LOOPER:
4022 			binder_debug(BINDER_DEBUG_THREADS,
4023 				     "%d:%d BC_REGISTER_LOOPER\n",
4024 				     proc->pid, thread->pid);
4025 			binder_inner_proc_lock(proc);
4026 			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4027 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4028 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4029 					proc->pid, thread->pid);
4030 			} else if (proc->requested_threads == 0) {
4031 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4032 				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4033 					proc->pid, thread->pid);
4034 			} else {
4035 				proc->requested_threads--;
4036 				proc->requested_threads_started++;
4037 			}
4038 			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4039 			binder_inner_proc_unlock(proc);
4040 			break;
4041 		case BC_ENTER_LOOPER:
4042 			binder_debug(BINDER_DEBUG_THREADS,
4043 				     "%d:%d BC_ENTER_LOOPER\n",
4044 				     proc->pid, thread->pid);
4045 			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4046 				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4047 				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4048 					proc->pid, thread->pid);
4049 			}
4050 			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4051 			break;
4052 		case BC_EXIT_LOOPER:
4053 			binder_debug(BINDER_DEBUG_THREADS,
4054 				     "%d:%d BC_EXIT_LOOPER\n",
4055 				     proc->pid, thread->pid);
4056 			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4057 			break;
4058 
4059 		case BC_REQUEST_DEATH_NOTIFICATION:
4060 		case BC_CLEAR_DEATH_NOTIFICATION: {
4061 			uint32_t target;
4062 			binder_uintptr_t cookie;
4063 			struct binder_ref *ref;
4064 			struct binder_ref_death *death = NULL;
4065 
4066 			if (get_user(target, (uint32_t __user *)ptr))
4067 				return -EFAULT;
4068 			ptr += sizeof(uint32_t);
4069 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4070 				return -EFAULT;
4071 			ptr += sizeof(binder_uintptr_t);
4072 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4073 				/*
4074 				 * Allocate memory for death notification
4075 				 * before taking lock
4076 				 */
4077 				death = kzalloc(sizeof(*death), GFP_KERNEL);
4078 				if (death == NULL) {
4079 					WARN_ON(thread->return_error.cmd !=
4080 						BR_OK);
4081 					thread->return_error.cmd = BR_ERROR;
4082 					binder_enqueue_thread_work(
4083 						thread,
4084 						&thread->return_error.work);
4085 					binder_debug(
4086 						BINDER_DEBUG_FAILED_TRANSACTION,
4087 						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4088 						proc->pid, thread->pid);
4089 					break;
4090 				}
4091 			}
4092 			binder_proc_lock(proc);
4093 			ref = binder_get_ref_olocked(proc, target, false);
4094 			if (ref == NULL) {
4095 				binder_user_error("%d:%d %s invalid ref %d\n",
4096 					proc->pid, thread->pid,
4097 					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4098 					"BC_REQUEST_DEATH_NOTIFICATION" :
4099 					"BC_CLEAR_DEATH_NOTIFICATION",
4100 					target);
4101 				binder_proc_unlock(proc);
4102 				kfree(death);
4103 				break;
4104 			}
4105 
4106 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4107 				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4108 				     proc->pid, thread->pid,
4109 				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4110 				     "BC_REQUEST_DEATH_NOTIFICATION" :
4111 				     "BC_CLEAR_DEATH_NOTIFICATION",
4112 				     (u64)cookie, ref->data.debug_id,
4113 				     ref->data.desc, ref->data.strong,
4114 				     ref->data.weak, ref->node->debug_id);
4115 
4116 			binder_node_lock(ref->node);
4117 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4118 				if (ref->death) {
4119 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4120 						proc->pid, thread->pid);
4121 					binder_node_unlock(ref->node);
4122 					binder_proc_unlock(proc);
4123 					kfree(death);
4124 					break;
4125 				}
4126 				binder_stats_created(BINDER_STAT_DEATH);
4127 				INIT_LIST_HEAD(&death->work.entry);
4128 				death->cookie = cookie;
4129 				ref->death = death;
4130 				if (ref->node->proc == NULL) {
4131 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4132 
4133 					binder_inner_proc_lock(proc);
4134 					binder_enqueue_work_ilocked(
4135 						&ref->death->work, &proc->todo);
4136 					binder_wakeup_proc_ilocked(proc);
4137 					binder_inner_proc_unlock(proc);
4138 				}
4139 			} else {
4140 				if (ref->death == NULL) {
4141 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4142 						proc->pid, thread->pid);
4143 					binder_node_unlock(ref->node);
4144 					binder_proc_unlock(proc);
4145 					break;
4146 				}
4147 				death = ref->death;
4148 				if (death->cookie != cookie) {
4149 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4150 						proc->pid, thread->pid,
4151 						(u64)death->cookie,
4152 						(u64)cookie);
4153 					binder_node_unlock(ref->node);
4154 					binder_proc_unlock(proc);
4155 					break;
4156 				}
4157 				ref->death = NULL;
4158 				binder_inner_proc_lock(proc);
4159 				if (list_empty(&death->work.entry)) {
4160 					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4161 					if (thread->looper &
4162 					    (BINDER_LOOPER_STATE_REGISTERED |
4163 					     BINDER_LOOPER_STATE_ENTERED))
4164 						binder_enqueue_thread_work_ilocked(
4165 								thread,
4166 								&death->work);
4167 					else {
4168 						binder_enqueue_work_ilocked(
4169 								&death->work,
4170 								&proc->todo);
4171 						binder_wakeup_proc_ilocked(
4172 								proc);
4173 					}
4174 				} else {
4175 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4176 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4177 				}
4178 				binder_inner_proc_unlock(proc);
4179 			}
4180 			binder_node_unlock(ref->node);
4181 			binder_proc_unlock(proc);
4182 		} break;
4183 		case BC_DEAD_BINDER_DONE: {
4184 			struct binder_work *w;
4185 			binder_uintptr_t cookie;
4186 			struct binder_ref_death *death = NULL;
4187 
4188 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4189 				return -EFAULT;
4190 
4191 			ptr += sizeof(cookie);
4192 			binder_inner_proc_lock(proc);
4193 			list_for_each_entry(w, &proc->delivered_death,
4194 					    entry) {
4195 				struct binder_ref_death *tmp_death =
4196 					container_of(w,
4197 						     struct binder_ref_death,
4198 						     work);
4199 
4200 				if (tmp_death->cookie == cookie) {
4201 					death = tmp_death;
4202 					break;
4203 				}
4204 			}
4205 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4206 				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4207 				     proc->pid, thread->pid, (u64)cookie,
4208 				     death);
4209 			if (death == NULL) {
4210 				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4211 					proc->pid, thread->pid, (u64)cookie);
4212 				binder_inner_proc_unlock(proc);
4213 				break;
4214 			}
4215 			binder_dequeue_work_ilocked(&death->work);
4216 			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4217 				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4218 				if (thread->looper &
4219 					(BINDER_LOOPER_STATE_REGISTERED |
4220 					 BINDER_LOOPER_STATE_ENTERED))
4221 					binder_enqueue_thread_work_ilocked(
4222 						thread, &death->work);
4223 				else {
4224 					binder_enqueue_work_ilocked(
4225 							&death->work,
4226 							&proc->todo);
4227 					binder_wakeup_proc_ilocked(proc);
4228 				}
4229 			}
4230 			binder_inner_proc_unlock(proc);
4231 		} break;
4232 
4233 		default:
4234 			pr_err("%d:%d unknown command %u\n",
4235 			       proc->pid, thread->pid, cmd);
4236 			return -EINVAL;
4237 		}
4238 		*consumed = ptr - buffer;
4239 	}
4240 	return 0;
4241 }
4242 
4243 static void binder_stat_br(struct binder_proc *proc,
4244 			   struct binder_thread *thread, uint32_t cmd)
4245 {
4246 	trace_binder_return(cmd);
4247 	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4248 		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4249 		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4250 		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4251 	}
4252 }
4253 
4254 static int binder_put_node_cmd(struct binder_proc *proc,
4255 			       struct binder_thread *thread,
4256 			       void __user **ptrp,
4257 			       binder_uintptr_t node_ptr,
4258 			       binder_uintptr_t node_cookie,
4259 			       int node_debug_id,
4260 			       uint32_t cmd, const char *cmd_name)
4261 {
4262 	void __user *ptr = *ptrp;
4263 
4264 	if (put_user(cmd, (uint32_t __user *)ptr))
4265 		return -EFAULT;
4266 	ptr += sizeof(uint32_t);
4267 
4268 	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4269 		return -EFAULT;
4270 	ptr += sizeof(binder_uintptr_t);
4271 
4272 	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4273 		return -EFAULT;
4274 	ptr += sizeof(binder_uintptr_t);
4275 
4276 	binder_stat_br(proc, thread, cmd);
4277 	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4278 		     proc->pid, thread->pid, cmd_name, node_debug_id,
4279 		     (u64)node_ptr, (u64)node_cookie);
4280 
4281 	*ptrp = ptr;
4282 	return 0;
4283 }
4284 
4285 static int binder_wait_for_work(struct binder_thread *thread,
4286 				bool do_proc_work)
4287 {
4288 	DEFINE_WAIT(wait);
4289 	struct binder_proc *proc = thread->proc;
4290 	int ret = 0;
4291 
4292 	binder_inner_proc_lock(proc);
4293 	for (;;) {
4294 		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4295 		if (binder_has_work_ilocked(thread, do_proc_work))
4296 			break;
4297 		if (do_proc_work)
4298 			list_add(&thread->waiting_thread_node,
4299 				 &proc->waiting_threads);
4300 		binder_inner_proc_unlock(proc);
4301 		schedule();
4302 		binder_inner_proc_lock(proc);
4303 		list_del_init(&thread->waiting_thread_node);
4304 		if (signal_pending(current)) {
4305 			ret = -EINTR;
4306 			break;
4307 		}
4308 	}
4309 	finish_wait(&thread->wait, &wait);
4310 	binder_inner_proc_unlock(proc);
4311 
4312 	return ret;
4313 }
4314 
4315 /**
4316  * binder_apply_fd_fixups() - finish fd translation
4317  * @proc:         binder_proc associated @t->buffer
4318  * @t:	binder transaction with list of fd fixups
4319  *
4320  * Now that we are in the context of the transaction target
4321  * process, we can allocate and install fds. Process the
4322  * list of fds to translate and fixup the buffer with the
4323  * new fds first and only then install the files.
4324  *
4325  * If we fail to allocate an fd, skip the install and release
4326  * any fds that have already been allocated.
4327  */
4328 static int binder_apply_fd_fixups(struct binder_proc *proc,
4329 				  struct binder_transaction *t)
4330 {
4331 	struct binder_txn_fd_fixup *fixup, *tmp;
4332 	int ret = 0;
4333 
4334 	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4335 		int fd = get_unused_fd_flags(O_CLOEXEC);
4336 
4337 		if (fd < 0) {
4338 			binder_debug(BINDER_DEBUG_TRANSACTION,
4339 				     "failed fd fixup txn %d fd %d\n",
4340 				     t->debug_id, fd);
4341 			ret = -ENOMEM;
4342 			goto err;
4343 		}
4344 		binder_debug(BINDER_DEBUG_TRANSACTION,
4345 			     "fd fixup txn %d fd %d\n",
4346 			     t->debug_id, fd);
4347 		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4348 		fixup->target_fd = fd;
4349 		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4350 						fixup->offset, &fd,
4351 						sizeof(u32))) {
4352 			ret = -EINVAL;
4353 			goto err;
4354 		}
4355 	}
4356 	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4357 		fd_install(fixup->target_fd, fixup->file);
4358 		list_del(&fixup->fixup_entry);
4359 		kfree(fixup);
4360 	}
4361 
4362 	return ret;
4363 
4364 err:
4365 	binder_free_txn_fixups(t);
4366 	return ret;
4367 }
4368 
4369 static int binder_thread_read(struct binder_proc *proc,
4370 			      struct binder_thread *thread,
4371 			      binder_uintptr_t binder_buffer, size_t size,
4372 			      binder_size_t *consumed, int non_block)
4373 {
4374 	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4375 	void __user *ptr = buffer + *consumed;
4376 	void __user *end = buffer + size;
4377 
4378 	int ret = 0;
4379 	int wait_for_proc_work;
4380 
4381 	if (*consumed == 0) {
4382 		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4383 			return -EFAULT;
4384 		ptr += sizeof(uint32_t);
4385 	}
4386 
4387 retry:
4388 	binder_inner_proc_lock(proc);
4389 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4390 	binder_inner_proc_unlock(proc);
4391 
4392 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4393 
4394 	trace_binder_wait_for_work(wait_for_proc_work,
4395 				   !!thread->transaction_stack,
4396 				   !binder_worklist_empty(proc, &thread->todo));
4397 	if (wait_for_proc_work) {
4398 		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4399 					BINDER_LOOPER_STATE_ENTERED))) {
4400 			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4401 				proc->pid, thread->pid, thread->looper);
4402 			wait_event_interruptible(binder_user_error_wait,
4403 						 binder_stop_on_user_error < 2);
4404 		}
4405 		binder_set_nice(proc->default_priority);
4406 	}
4407 
4408 	if (non_block) {
4409 		if (!binder_has_work(thread, wait_for_proc_work))
4410 			ret = -EAGAIN;
4411 	} else {
4412 		ret = binder_wait_for_work(thread, wait_for_proc_work);
4413 	}
4414 
4415 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4416 
4417 	if (ret)
4418 		return ret;
4419 
4420 	while (1) {
4421 		uint32_t cmd;
4422 		struct binder_transaction_data_secctx tr;
4423 		struct binder_transaction_data *trd = &tr.transaction_data;
4424 		struct binder_work *w = NULL;
4425 		struct list_head *list = NULL;
4426 		struct binder_transaction *t = NULL;
4427 		struct binder_thread *t_from;
4428 		size_t trsize = sizeof(*trd);
4429 
4430 		binder_inner_proc_lock(proc);
4431 		if (!binder_worklist_empty_ilocked(&thread->todo))
4432 			list = &thread->todo;
4433 		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4434 			   wait_for_proc_work)
4435 			list = &proc->todo;
4436 		else {
4437 			binder_inner_proc_unlock(proc);
4438 
4439 			/* no data added */
4440 			if (ptr - buffer == 4 && !thread->looper_need_return)
4441 				goto retry;
4442 			break;
4443 		}
4444 
4445 		if (end - ptr < sizeof(tr) + 4) {
4446 			binder_inner_proc_unlock(proc);
4447 			break;
4448 		}
4449 		w = binder_dequeue_work_head_ilocked(list);
4450 		if (binder_worklist_empty_ilocked(&thread->todo))
4451 			thread->process_todo = false;
4452 
4453 		switch (w->type) {
4454 		case BINDER_WORK_TRANSACTION: {
4455 			binder_inner_proc_unlock(proc);
4456 			t = container_of(w, struct binder_transaction, work);
4457 		} break;
4458 		case BINDER_WORK_RETURN_ERROR: {
4459 			struct binder_error *e = container_of(
4460 					w, struct binder_error, work);
4461 
4462 			WARN_ON(e->cmd == BR_OK);
4463 			binder_inner_proc_unlock(proc);
4464 			if (put_user(e->cmd, (uint32_t __user *)ptr))
4465 				return -EFAULT;
4466 			cmd = e->cmd;
4467 			e->cmd = BR_OK;
4468 			ptr += sizeof(uint32_t);
4469 
4470 			binder_stat_br(proc, thread, cmd);
4471 		} break;
4472 		case BINDER_WORK_TRANSACTION_COMPLETE:
4473 		case BINDER_WORK_TRANSACTION_PENDING:
4474 		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4475 			if (proc->oneway_spam_detection_enabled &&
4476 				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4477 				cmd = BR_ONEWAY_SPAM_SUSPECT;
4478 			else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4479 				cmd = BR_TRANSACTION_PENDING_FROZEN;
4480 			else
4481 				cmd = BR_TRANSACTION_COMPLETE;
4482 			binder_inner_proc_unlock(proc);
4483 			kfree(w);
4484 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4485 			if (put_user(cmd, (uint32_t __user *)ptr))
4486 				return -EFAULT;
4487 			ptr += sizeof(uint32_t);
4488 
4489 			binder_stat_br(proc, thread, cmd);
4490 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4491 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4492 				     proc->pid, thread->pid);
4493 		} break;
4494 		case BINDER_WORK_NODE: {
4495 			struct binder_node *node = container_of(w, struct binder_node, work);
4496 			int strong, weak;
4497 			binder_uintptr_t node_ptr = node->ptr;
4498 			binder_uintptr_t node_cookie = node->cookie;
4499 			int node_debug_id = node->debug_id;
4500 			int has_weak_ref;
4501 			int has_strong_ref;
4502 			void __user *orig_ptr = ptr;
4503 
4504 			BUG_ON(proc != node->proc);
4505 			strong = node->internal_strong_refs ||
4506 					node->local_strong_refs;
4507 			weak = !hlist_empty(&node->refs) ||
4508 					node->local_weak_refs ||
4509 					node->tmp_refs || strong;
4510 			has_strong_ref = node->has_strong_ref;
4511 			has_weak_ref = node->has_weak_ref;
4512 
4513 			if (weak && !has_weak_ref) {
4514 				node->has_weak_ref = 1;
4515 				node->pending_weak_ref = 1;
4516 				node->local_weak_refs++;
4517 			}
4518 			if (strong && !has_strong_ref) {
4519 				node->has_strong_ref = 1;
4520 				node->pending_strong_ref = 1;
4521 				node->local_strong_refs++;
4522 			}
4523 			if (!strong && has_strong_ref)
4524 				node->has_strong_ref = 0;
4525 			if (!weak && has_weak_ref)
4526 				node->has_weak_ref = 0;
4527 			if (!weak && !strong) {
4528 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4529 					     "%d:%d node %d u%016llx c%016llx deleted\n",
4530 					     proc->pid, thread->pid,
4531 					     node_debug_id,
4532 					     (u64)node_ptr,
4533 					     (u64)node_cookie);
4534 				rb_erase(&node->rb_node, &proc->nodes);
4535 				binder_inner_proc_unlock(proc);
4536 				binder_node_lock(node);
4537 				/*
4538 				 * Acquire the node lock before freeing the
4539 				 * node to serialize with other threads that
4540 				 * may have been holding the node lock while
4541 				 * decrementing this node (avoids race where
4542 				 * this thread frees while the other thread
4543 				 * is unlocking the node after the final
4544 				 * decrement)
4545 				 */
4546 				binder_node_unlock(node);
4547 				binder_free_node(node);
4548 			} else
4549 				binder_inner_proc_unlock(proc);
4550 
4551 			if (weak && !has_weak_ref)
4552 				ret = binder_put_node_cmd(
4553 						proc, thread, &ptr, node_ptr,
4554 						node_cookie, node_debug_id,
4555 						BR_INCREFS, "BR_INCREFS");
4556 			if (!ret && strong && !has_strong_ref)
4557 				ret = binder_put_node_cmd(
4558 						proc, thread, &ptr, node_ptr,
4559 						node_cookie, node_debug_id,
4560 						BR_ACQUIRE, "BR_ACQUIRE");
4561 			if (!ret && !strong && has_strong_ref)
4562 				ret = binder_put_node_cmd(
4563 						proc, thread, &ptr, node_ptr,
4564 						node_cookie, node_debug_id,
4565 						BR_RELEASE, "BR_RELEASE");
4566 			if (!ret && !weak && has_weak_ref)
4567 				ret = binder_put_node_cmd(
4568 						proc, thread, &ptr, node_ptr,
4569 						node_cookie, node_debug_id,
4570 						BR_DECREFS, "BR_DECREFS");
4571 			if (orig_ptr == ptr)
4572 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4573 					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4574 					     proc->pid, thread->pid,
4575 					     node_debug_id,
4576 					     (u64)node_ptr,
4577 					     (u64)node_cookie);
4578 			if (ret)
4579 				return ret;
4580 		} break;
4581 		case BINDER_WORK_DEAD_BINDER:
4582 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4583 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4584 			struct binder_ref_death *death;
4585 			uint32_t cmd;
4586 			binder_uintptr_t cookie;
4587 
4588 			death = container_of(w, struct binder_ref_death, work);
4589 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4590 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4591 			else
4592 				cmd = BR_DEAD_BINDER;
4593 			cookie = death->cookie;
4594 
4595 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4596 				     "%d:%d %s %016llx\n",
4597 				      proc->pid, thread->pid,
4598 				      cmd == BR_DEAD_BINDER ?
4599 				      "BR_DEAD_BINDER" :
4600 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4601 				      (u64)cookie);
4602 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4603 				binder_inner_proc_unlock(proc);
4604 				kfree(death);
4605 				binder_stats_deleted(BINDER_STAT_DEATH);
4606 			} else {
4607 				binder_enqueue_work_ilocked(
4608 						w, &proc->delivered_death);
4609 				binder_inner_proc_unlock(proc);
4610 			}
4611 			if (put_user(cmd, (uint32_t __user *)ptr))
4612 				return -EFAULT;
4613 			ptr += sizeof(uint32_t);
4614 			if (put_user(cookie,
4615 				     (binder_uintptr_t __user *)ptr))
4616 				return -EFAULT;
4617 			ptr += sizeof(binder_uintptr_t);
4618 			binder_stat_br(proc, thread, cmd);
4619 			if (cmd == BR_DEAD_BINDER)
4620 				goto done; /* DEAD_BINDER notifications can cause transactions */
4621 		} break;
4622 		default:
4623 			binder_inner_proc_unlock(proc);
4624 			pr_err("%d:%d: bad work type %d\n",
4625 			       proc->pid, thread->pid, w->type);
4626 			break;
4627 		}
4628 
4629 		if (!t)
4630 			continue;
4631 
4632 		BUG_ON(t->buffer == NULL);
4633 		if (t->buffer->target_node) {
4634 			struct binder_node *target_node = t->buffer->target_node;
4635 
4636 			trd->target.ptr = target_node->ptr;
4637 			trd->cookie =  target_node->cookie;
4638 			t->saved_priority = task_nice(current);
4639 			if (t->priority < target_node->min_priority &&
4640 			    !(t->flags & TF_ONE_WAY))
4641 				binder_set_nice(t->priority);
4642 			else if (!(t->flags & TF_ONE_WAY) ||
4643 				 t->saved_priority > target_node->min_priority)
4644 				binder_set_nice(target_node->min_priority);
4645 			cmd = BR_TRANSACTION;
4646 		} else {
4647 			trd->target.ptr = 0;
4648 			trd->cookie = 0;
4649 			cmd = BR_REPLY;
4650 		}
4651 		trd->code = t->code;
4652 		trd->flags = t->flags;
4653 		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4654 
4655 		t_from = binder_get_txn_from(t);
4656 		if (t_from) {
4657 			struct task_struct *sender = t_from->proc->tsk;
4658 
4659 			trd->sender_pid =
4660 				task_tgid_nr_ns(sender,
4661 						task_active_pid_ns(current));
4662 		} else {
4663 			trd->sender_pid = 0;
4664 		}
4665 
4666 		ret = binder_apply_fd_fixups(proc, t);
4667 		if (ret) {
4668 			struct binder_buffer *buffer = t->buffer;
4669 			bool oneway = !!(t->flags & TF_ONE_WAY);
4670 			int tid = t->debug_id;
4671 
4672 			if (t_from)
4673 				binder_thread_dec_tmpref(t_from);
4674 			buffer->transaction = NULL;
4675 			binder_cleanup_transaction(t, "fd fixups failed",
4676 						   BR_FAILED_REPLY);
4677 			binder_free_buf(proc, thread, buffer, true);
4678 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4679 				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4680 				     proc->pid, thread->pid,
4681 				     oneway ? "async " :
4682 					(cmd == BR_REPLY ? "reply " : ""),
4683 				     tid, BR_FAILED_REPLY, ret, __LINE__);
4684 			if (cmd == BR_REPLY) {
4685 				cmd = BR_FAILED_REPLY;
4686 				if (put_user(cmd, (uint32_t __user *)ptr))
4687 					return -EFAULT;
4688 				ptr += sizeof(uint32_t);
4689 				binder_stat_br(proc, thread, cmd);
4690 				break;
4691 			}
4692 			continue;
4693 		}
4694 		trd->data_size = t->buffer->data_size;
4695 		trd->offsets_size = t->buffer->offsets_size;
4696 		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4697 		trd->data.ptr.offsets = trd->data.ptr.buffer +
4698 					ALIGN(t->buffer->data_size,
4699 					    sizeof(void *));
4700 
4701 		tr.secctx = t->security_ctx;
4702 		if (t->security_ctx) {
4703 			cmd = BR_TRANSACTION_SEC_CTX;
4704 			trsize = sizeof(tr);
4705 		}
4706 		if (put_user(cmd, (uint32_t __user *)ptr)) {
4707 			if (t_from)
4708 				binder_thread_dec_tmpref(t_from);
4709 
4710 			binder_cleanup_transaction(t, "put_user failed",
4711 						   BR_FAILED_REPLY);
4712 
4713 			return -EFAULT;
4714 		}
4715 		ptr += sizeof(uint32_t);
4716 		if (copy_to_user(ptr, &tr, trsize)) {
4717 			if (t_from)
4718 				binder_thread_dec_tmpref(t_from);
4719 
4720 			binder_cleanup_transaction(t, "copy_to_user failed",
4721 						   BR_FAILED_REPLY);
4722 
4723 			return -EFAULT;
4724 		}
4725 		ptr += trsize;
4726 
4727 		trace_binder_transaction_received(t);
4728 		binder_stat_br(proc, thread, cmd);
4729 		binder_debug(BINDER_DEBUG_TRANSACTION,
4730 			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4731 			     proc->pid, thread->pid,
4732 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4733 				(cmd == BR_TRANSACTION_SEC_CTX) ?
4734 				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4735 			     t->debug_id, t_from ? t_from->proc->pid : 0,
4736 			     t_from ? t_from->pid : 0, cmd,
4737 			     t->buffer->data_size, t->buffer->offsets_size,
4738 			     (u64)trd->data.ptr.buffer,
4739 			     (u64)trd->data.ptr.offsets);
4740 
4741 		if (t_from)
4742 			binder_thread_dec_tmpref(t_from);
4743 		t->buffer->allow_user_free = 1;
4744 		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4745 			binder_inner_proc_lock(thread->proc);
4746 			t->to_parent = thread->transaction_stack;
4747 			t->to_thread = thread;
4748 			thread->transaction_stack = t;
4749 			binder_inner_proc_unlock(thread->proc);
4750 		} else {
4751 			binder_free_transaction(t);
4752 		}
4753 		break;
4754 	}
4755 
4756 done:
4757 
4758 	*consumed = ptr - buffer;
4759 	binder_inner_proc_lock(proc);
4760 	if (proc->requested_threads == 0 &&
4761 	    list_empty(&thread->proc->waiting_threads) &&
4762 	    proc->requested_threads_started < proc->max_threads &&
4763 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4764 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4765 	     /*spawn a new thread if we leave this out */) {
4766 		proc->requested_threads++;
4767 		binder_inner_proc_unlock(proc);
4768 		binder_debug(BINDER_DEBUG_THREADS,
4769 			     "%d:%d BR_SPAWN_LOOPER\n",
4770 			     proc->pid, thread->pid);
4771 		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4772 			return -EFAULT;
4773 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4774 	} else
4775 		binder_inner_proc_unlock(proc);
4776 	return 0;
4777 }
4778 
4779 static void binder_release_work(struct binder_proc *proc,
4780 				struct list_head *list)
4781 {
4782 	struct binder_work *w;
4783 	enum binder_work_type wtype;
4784 
4785 	while (1) {
4786 		binder_inner_proc_lock(proc);
4787 		w = binder_dequeue_work_head_ilocked(list);
4788 		wtype = w ? w->type : 0;
4789 		binder_inner_proc_unlock(proc);
4790 		if (!w)
4791 			return;
4792 
4793 		switch (wtype) {
4794 		case BINDER_WORK_TRANSACTION: {
4795 			struct binder_transaction *t;
4796 
4797 			t = container_of(w, struct binder_transaction, work);
4798 
4799 			binder_cleanup_transaction(t, "process died.",
4800 						   BR_DEAD_REPLY);
4801 		} break;
4802 		case BINDER_WORK_RETURN_ERROR: {
4803 			struct binder_error *e = container_of(
4804 					w, struct binder_error, work);
4805 
4806 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4807 				"undelivered TRANSACTION_ERROR: %u\n",
4808 				e->cmd);
4809 		} break;
4810 		case BINDER_WORK_TRANSACTION_COMPLETE: {
4811 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4812 				"undelivered TRANSACTION_COMPLETE\n");
4813 			kfree(w);
4814 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4815 		} break;
4816 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4817 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4818 			struct binder_ref_death *death;
4819 
4820 			death = container_of(w, struct binder_ref_death, work);
4821 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4822 				"undelivered death notification, %016llx\n",
4823 				(u64)death->cookie);
4824 			kfree(death);
4825 			binder_stats_deleted(BINDER_STAT_DEATH);
4826 		} break;
4827 		case BINDER_WORK_NODE:
4828 			break;
4829 		default:
4830 			pr_err("unexpected work type, %d, not freed\n",
4831 			       wtype);
4832 			break;
4833 		}
4834 	}
4835 
4836 }
4837 
4838 static struct binder_thread *binder_get_thread_ilocked(
4839 		struct binder_proc *proc, struct binder_thread *new_thread)
4840 {
4841 	struct binder_thread *thread = NULL;
4842 	struct rb_node *parent = NULL;
4843 	struct rb_node **p = &proc->threads.rb_node;
4844 
4845 	while (*p) {
4846 		parent = *p;
4847 		thread = rb_entry(parent, struct binder_thread, rb_node);
4848 
4849 		if (current->pid < thread->pid)
4850 			p = &(*p)->rb_left;
4851 		else if (current->pid > thread->pid)
4852 			p = &(*p)->rb_right;
4853 		else
4854 			return thread;
4855 	}
4856 	if (!new_thread)
4857 		return NULL;
4858 	thread = new_thread;
4859 	binder_stats_created(BINDER_STAT_THREAD);
4860 	thread->proc = proc;
4861 	thread->pid = current->pid;
4862 	atomic_set(&thread->tmp_ref, 0);
4863 	init_waitqueue_head(&thread->wait);
4864 	INIT_LIST_HEAD(&thread->todo);
4865 	rb_link_node(&thread->rb_node, parent, p);
4866 	rb_insert_color(&thread->rb_node, &proc->threads);
4867 	thread->looper_need_return = true;
4868 	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4869 	thread->return_error.cmd = BR_OK;
4870 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4871 	thread->reply_error.cmd = BR_OK;
4872 	thread->ee.command = BR_OK;
4873 	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4874 	return thread;
4875 }
4876 
4877 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4878 {
4879 	struct binder_thread *thread;
4880 	struct binder_thread *new_thread;
4881 
4882 	binder_inner_proc_lock(proc);
4883 	thread = binder_get_thread_ilocked(proc, NULL);
4884 	binder_inner_proc_unlock(proc);
4885 	if (!thread) {
4886 		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4887 		if (new_thread == NULL)
4888 			return NULL;
4889 		binder_inner_proc_lock(proc);
4890 		thread = binder_get_thread_ilocked(proc, new_thread);
4891 		binder_inner_proc_unlock(proc);
4892 		if (thread != new_thread)
4893 			kfree(new_thread);
4894 	}
4895 	return thread;
4896 }
4897 
4898 static void binder_free_proc(struct binder_proc *proc)
4899 {
4900 	struct binder_device *device;
4901 
4902 	BUG_ON(!list_empty(&proc->todo));
4903 	BUG_ON(!list_empty(&proc->delivered_death));
4904 	if (proc->outstanding_txns)
4905 		pr_warn("%s: Unexpected outstanding_txns %d\n",
4906 			__func__, proc->outstanding_txns);
4907 	device = container_of(proc->context, struct binder_device, context);
4908 	if (refcount_dec_and_test(&device->ref)) {
4909 		kfree(proc->context->name);
4910 		kfree(device);
4911 	}
4912 	binder_alloc_deferred_release(&proc->alloc);
4913 	put_task_struct(proc->tsk);
4914 	put_cred(proc->cred);
4915 	binder_stats_deleted(BINDER_STAT_PROC);
4916 	kfree(proc);
4917 }
4918 
4919 static void binder_free_thread(struct binder_thread *thread)
4920 {
4921 	BUG_ON(!list_empty(&thread->todo));
4922 	binder_stats_deleted(BINDER_STAT_THREAD);
4923 	binder_proc_dec_tmpref(thread->proc);
4924 	kfree(thread);
4925 }
4926 
4927 static int binder_thread_release(struct binder_proc *proc,
4928 				 struct binder_thread *thread)
4929 {
4930 	struct binder_transaction *t;
4931 	struct binder_transaction *send_reply = NULL;
4932 	int active_transactions = 0;
4933 	struct binder_transaction *last_t = NULL;
4934 
4935 	binder_inner_proc_lock(thread->proc);
4936 	/*
4937 	 * take a ref on the proc so it survives
4938 	 * after we remove this thread from proc->threads.
4939 	 * The corresponding dec is when we actually
4940 	 * free the thread in binder_free_thread()
4941 	 */
4942 	proc->tmp_ref++;
4943 	/*
4944 	 * take a ref on this thread to ensure it
4945 	 * survives while we are releasing it
4946 	 */
4947 	atomic_inc(&thread->tmp_ref);
4948 	rb_erase(&thread->rb_node, &proc->threads);
4949 	t = thread->transaction_stack;
4950 	if (t) {
4951 		spin_lock(&t->lock);
4952 		if (t->to_thread == thread)
4953 			send_reply = t;
4954 	} else {
4955 		__acquire(&t->lock);
4956 	}
4957 	thread->is_dead = true;
4958 
4959 	while (t) {
4960 		last_t = t;
4961 		active_transactions++;
4962 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4963 			     "release %d:%d transaction %d %s, still active\n",
4964 			      proc->pid, thread->pid,
4965 			     t->debug_id,
4966 			     (t->to_thread == thread) ? "in" : "out");
4967 
4968 		if (t->to_thread == thread) {
4969 			thread->proc->outstanding_txns--;
4970 			t->to_proc = NULL;
4971 			t->to_thread = NULL;
4972 			if (t->buffer) {
4973 				t->buffer->transaction = NULL;
4974 				t->buffer = NULL;
4975 			}
4976 			t = t->to_parent;
4977 		} else if (t->from == thread) {
4978 			t->from = NULL;
4979 			t = t->from_parent;
4980 		} else
4981 			BUG();
4982 		spin_unlock(&last_t->lock);
4983 		if (t)
4984 			spin_lock(&t->lock);
4985 		else
4986 			__acquire(&t->lock);
4987 	}
4988 	/* annotation for sparse, lock not acquired in last iteration above */
4989 	__release(&t->lock);
4990 
4991 	/*
4992 	 * If this thread used poll, make sure we remove the waitqueue from any
4993 	 * poll data structures holding it.
4994 	 */
4995 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4996 		wake_up_pollfree(&thread->wait);
4997 
4998 	binder_inner_proc_unlock(thread->proc);
4999 
5000 	/*
5001 	 * This is needed to avoid races between wake_up_pollfree() above and
5002 	 * someone else removing the last entry from the queue for other reasons
5003 	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5004 	 * descriptor being closed).  Such other users hold an RCU read lock, so
5005 	 * we can be sure they're done after we call synchronize_rcu().
5006 	 */
5007 	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5008 		synchronize_rcu();
5009 
5010 	if (send_reply)
5011 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5012 	binder_release_work(proc, &thread->todo);
5013 	binder_thread_dec_tmpref(thread);
5014 	return active_transactions;
5015 }
5016 
5017 static __poll_t binder_poll(struct file *filp,
5018 				struct poll_table_struct *wait)
5019 {
5020 	struct binder_proc *proc = filp->private_data;
5021 	struct binder_thread *thread = NULL;
5022 	bool wait_for_proc_work;
5023 
5024 	thread = binder_get_thread(proc);
5025 	if (!thread)
5026 		return POLLERR;
5027 
5028 	binder_inner_proc_lock(thread->proc);
5029 	thread->looper |= BINDER_LOOPER_STATE_POLL;
5030 	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5031 
5032 	binder_inner_proc_unlock(thread->proc);
5033 
5034 	poll_wait(filp, &thread->wait, wait);
5035 
5036 	if (binder_has_work(thread, wait_for_proc_work))
5037 		return EPOLLIN;
5038 
5039 	return 0;
5040 }
5041 
5042 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5043 				struct binder_thread *thread)
5044 {
5045 	int ret = 0;
5046 	struct binder_proc *proc = filp->private_data;
5047 	void __user *ubuf = (void __user *)arg;
5048 	struct binder_write_read bwr;
5049 
5050 	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5051 		ret = -EFAULT;
5052 		goto out;
5053 	}
5054 	binder_debug(BINDER_DEBUG_READ_WRITE,
5055 		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5056 		     proc->pid, thread->pid,
5057 		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5058 		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5059 
5060 	if (bwr.write_size > 0) {
5061 		ret = binder_thread_write(proc, thread,
5062 					  bwr.write_buffer,
5063 					  bwr.write_size,
5064 					  &bwr.write_consumed);
5065 		trace_binder_write_done(ret);
5066 		if (ret < 0) {
5067 			bwr.read_consumed = 0;
5068 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5069 				ret = -EFAULT;
5070 			goto out;
5071 		}
5072 	}
5073 	if (bwr.read_size > 0) {
5074 		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5075 					 bwr.read_size,
5076 					 &bwr.read_consumed,
5077 					 filp->f_flags & O_NONBLOCK);
5078 		trace_binder_read_done(ret);
5079 		binder_inner_proc_lock(proc);
5080 		if (!binder_worklist_empty_ilocked(&proc->todo))
5081 			binder_wakeup_proc_ilocked(proc);
5082 		binder_inner_proc_unlock(proc);
5083 		if (ret < 0) {
5084 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5085 				ret = -EFAULT;
5086 			goto out;
5087 		}
5088 	}
5089 	binder_debug(BINDER_DEBUG_READ_WRITE,
5090 		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5091 		     proc->pid, thread->pid,
5092 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5093 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5094 	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5095 		ret = -EFAULT;
5096 		goto out;
5097 	}
5098 out:
5099 	return ret;
5100 }
5101 
5102 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5103 				    struct flat_binder_object *fbo)
5104 {
5105 	int ret = 0;
5106 	struct binder_proc *proc = filp->private_data;
5107 	struct binder_context *context = proc->context;
5108 	struct binder_node *new_node;
5109 	kuid_t curr_euid = current_euid();
5110 
5111 	mutex_lock(&context->context_mgr_node_lock);
5112 	if (context->binder_context_mgr_node) {
5113 		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5114 		ret = -EBUSY;
5115 		goto out;
5116 	}
5117 	ret = security_binder_set_context_mgr(proc->cred);
5118 	if (ret < 0)
5119 		goto out;
5120 	if (uid_valid(context->binder_context_mgr_uid)) {
5121 		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5122 			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5123 			       from_kuid(&init_user_ns, curr_euid),
5124 			       from_kuid(&init_user_ns,
5125 					 context->binder_context_mgr_uid));
5126 			ret = -EPERM;
5127 			goto out;
5128 		}
5129 	} else {
5130 		context->binder_context_mgr_uid = curr_euid;
5131 	}
5132 	new_node = binder_new_node(proc, fbo);
5133 	if (!new_node) {
5134 		ret = -ENOMEM;
5135 		goto out;
5136 	}
5137 	binder_node_lock(new_node);
5138 	new_node->local_weak_refs++;
5139 	new_node->local_strong_refs++;
5140 	new_node->has_strong_ref = 1;
5141 	new_node->has_weak_ref = 1;
5142 	context->binder_context_mgr_node = new_node;
5143 	binder_node_unlock(new_node);
5144 	binder_put_node(new_node);
5145 out:
5146 	mutex_unlock(&context->context_mgr_node_lock);
5147 	return ret;
5148 }
5149 
5150 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5151 		struct binder_node_info_for_ref *info)
5152 {
5153 	struct binder_node *node;
5154 	struct binder_context *context = proc->context;
5155 	__u32 handle = info->handle;
5156 
5157 	if (info->strong_count || info->weak_count || info->reserved1 ||
5158 	    info->reserved2 || info->reserved3) {
5159 		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5160 				  proc->pid);
5161 		return -EINVAL;
5162 	}
5163 
5164 	/* This ioctl may only be used by the context manager */
5165 	mutex_lock(&context->context_mgr_node_lock);
5166 	if (!context->binder_context_mgr_node ||
5167 		context->binder_context_mgr_node->proc != proc) {
5168 		mutex_unlock(&context->context_mgr_node_lock);
5169 		return -EPERM;
5170 	}
5171 	mutex_unlock(&context->context_mgr_node_lock);
5172 
5173 	node = binder_get_node_from_ref(proc, handle, true, NULL);
5174 	if (!node)
5175 		return -EINVAL;
5176 
5177 	info->strong_count = node->local_strong_refs +
5178 		node->internal_strong_refs;
5179 	info->weak_count = node->local_weak_refs;
5180 
5181 	binder_put_node(node);
5182 
5183 	return 0;
5184 }
5185 
5186 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5187 				struct binder_node_debug_info *info)
5188 {
5189 	struct rb_node *n;
5190 	binder_uintptr_t ptr = info->ptr;
5191 
5192 	memset(info, 0, sizeof(*info));
5193 
5194 	binder_inner_proc_lock(proc);
5195 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5196 		struct binder_node *node = rb_entry(n, struct binder_node,
5197 						    rb_node);
5198 		if (node->ptr > ptr) {
5199 			info->ptr = node->ptr;
5200 			info->cookie = node->cookie;
5201 			info->has_strong_ref = node->has_strong_ref;
5202 			info->has_weak_ref = node->has_weak_ref;
5203 			break;
5204 		}
5205 	}
5206 	binder_inner_proc_unlock(proc);
5207 
5208 	return 0;
5209 }
5210 
5211 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5212 {
5213 	struct rb_node *n;
5214 	struct binder_thread *thread;
5215 
5216 	if (proc->outstanding_txns > 0)
5217 		return true;
5218 
5219 	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5220 		thread = rb_entry(n, struct binder_thread, rb_node);
5221 		if (thread->transaction_stack)
5222 			return true;
5223 	}
5224 	return false;
5225 }
5226 
5227 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5228 			       struct binder_proc *target_proc)
5229 {
5230 	int ret = 0;
5231 
5232 	if (!info->enable) {
5233 		binder_inner_proc_lock(target_proc);
5234 		target_proc->sync_recv = false;
5235 		target_proc->async_recv = false;
5236 		target_proc->is_frozen = false;
5237 		binder_inner_proc_unlock(target_proc);
5238 		return 0;
5239 	}
5240 
5241 	/*
5242 	 * Freezing the target. Prevent new transactions by
5243 	 * setting frozen state. If timeout specified, wait
5244 	 * for transactions to drain.
5245 	 */
5246 	binder_inner_proc_lock(target_proc);
5247 	target_proc->sync_recv = false;
5248 	target_proc->async_recv = false;
5249 	target_proc->is_frozen = true;
5250 	binder_inner_proc_unlock(target_proc);
5251 
5252 	if (info->timeout_ms > 0)
5253 		ret = wait_event_interruptible_timeout(
5254 			target_proc->freeze_wait,
5255 			(!target_proc->outstanding_txns),
5256 			msecs_to_jiffies(info->timeout_ms));
5257 
5258 	/* Check pending transactions that wait for reply */
5259 	if (ret >= 0) {
5260 		binder_inner_proc_lock(target_proc);
5261 		if (binder_txns_pending_ilocked(target_proc))
5262 			ret = -EAGAIN;
5263 		binder_inner_proc_unlock(target_proc);
5264 	}
5265 
5266 	if (ret < 0) {
5267 		binder_inner_proc_lock(target_proc);
5268 		target_proc->is_frozen = false;
5269 		binder_inner_proc_unlock(target_proc);
5270 	}
5271 
5272 	return ret;
5273 }
5274 
5275 static int binder_ioctl_get_freezer_info(
5276 				struct binder_frozen_status_info *info)
5277 {
5278 	struct binder_proc *target_proc;
5279 	bool found = false;
5280 	__u32 txns_pending;
5281 
5282 	info->sync_recv = 0;
5283 	info->async_recv = 0;
5284 
5285 	mutex_lock(&binder_procs_lock);
5286 	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5287 		if (target_proc->pid == info->pid) {
5288 			found = true;
5289 			binder_inner_proc_lock(target_proc);
5290 			txns_pending = binder_txns_pending_ilocked(target_proc);
5291 			info->sync_recv |= target_proc->sync_recv |
5292 					(txns_pending << 1);
5293 			info->async_recv |= target_proc->async_recv;
5294 			binder_inner_proc_unlock(target_proc);
5295 		}
5296 	}
5297 	mutex_unlock(&binder_procs_lock);
5298 
5299 	if (!found)
5300 		return -EINVAL;
5301 
5302 	return 0;
5303 }
5304 
5305 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5306 					   void __user *ubuf)
5307 {
5308 	struct binder_extended_error ee;
5309 
5310 	binder_inner_proc_lock(thread->proc);
5311 	ee = thread->ee;
5312 	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5313 	binder_inner_proc_unlock(thread->proc);
5314 
5315 	if (copy_to_user(ubuf, &ee, sizeof(ee)))
5316 		return -EFAULT;
5317 
5318 	return 0;
5319 }
5320 
5321 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5322 {
5323 	int ret;
5324 	struct binder_proc *proc = filp->private_data;
5325 	struct binder_thread *thread;
5326 	void __user *ubuf = (void __user *)arg;
5327 
5328 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5329 			proc->pid, current->pid, cmd, arg);*/
5330 
5331 	binder_selftest_alloc(&proc->alloc);
5332 
5333 	trace_binder_ioctl(cmd, arg);
5334 
5335 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5336 	if (ret)
5337 		goto err_unlocked;
5338 
5339 	thread = binder_get_thread(proc);
5340 	if (thread == NULL) {
5341 		ret = -ENOMEM;
5342 		goto err;
5343 	}
5344 
5345 	switch (cmd) {
5346 	case BINDER_WRITE_READ:
5347 		ret = binder_ioctl_write_read(filp, arg, thread);
5348 		if (ret)
5349 			goto err;
5350 		break;
5351 	case BINDER_SET_MAX_THREADS: {
5352 		int max_threads;
5353 
5354 		if (copy_from_user(&max_threads, ubuf,
5355 				   sizeof(max_threads))) {
5356 			ret = -EINVAL;
5357 			goto err;
5358 		}
5359 		binder_inner_proc_lock(proc);
5360 		proc->max_threads = max_threads;
5361 		binder_inner_proc_unlock(proc);
5362 		break;
5363 	}
5364 	case BINDER_SET_CONTEXT_MGR_EXT: {
5365 		struct flat_binder_object fbo;
5366 
5367 		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5368 			ret = -EINVAL;
5369 			goto err;
5370 		}
5371 		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5372 		if (ret)
5373 			goto err;
5374 		break;
5375 	}
5376 	case BINDER_SET_CONTEXT_MGR:
5377 		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5378 		if (ret)
5379 			goto err;
5380 		break;
5381 	case BINDER_THREAD_EXIT:
5382 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5383 			     proc->pid, thread->pid);
5384 		binder_thread_release(proc, thread);
5385 		thread = NULL;
5386 		break;
5387 	case BINDER_VERSION: {
5388 		struct binder_version __user *ver = ubuf;
5389 
5390 		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5391 			     &ver->protocol_version)) {
5392 			ret = -EINVAL;
5393 			goto err;
5394 		}
5395 		break;
5396 	}
5397 	case BINDER_GET_NODE_INFO_FOR_REF: {
5398 		struct binder_node_info_for_ref info;
5399 
5400 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5401 			ret = -EFAULT;
5402 			goto err;
5403 		}
5404 
5405 		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5406 		if (ret < 0)
5407 			goto err;
5408 
5409 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5410 			ret = -EFAULT;
5411 			goto err;
5412 		}
5413 
5414 		break;
5415 	}
5416 	case BINDER_GET_NODE_DEBUG_INFO: {
5417 		struct binder_node_debug_info info;
5418 
5419 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5420 			ret = -EFAULT;
5421 			goto err;
5422 		}
5423 
5424 		ret = binder_ioctl_get_node_debug_info(proc, &info);
5425 		if (ret < 0)
5426 			goto err;
5427 
5428 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5429 			ret = -EFAULT;
5430 			goto err;
5431 		}
5432 		break;
5433 	}
5434 	case BINDER_FREEZE: {
5435 		struct binder_freeze_info info;
5436 		struct binder_proc **target_procs = NULL, *target_proc;
5437 		int target_procs_count = 0, i = 0;
5438 
5439 		ret = 0;
5440 
5441 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5442 			ret = -EFAULT;
5443 			goto err;
5444 		}
5445 
5446 		mutex_lock(&binder_procs_lock);
5447 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5448 			if (target_proc->pid == info.pid)
5449 				target_procs_count++;
5450 		}
5451 
5452 		if (target_procs_count == 0) {
5453 			mutex_unlock(&binder_procs_lock);
5454 			ret = -EINVAL;
5455 			goto err;
5456 		}
5457 
5458 		target_procs = kcalloc(target_procs_count,
5459 				       sizeof(struct binder_proc *),
5460 				       GFP_KERNEL);
5461 
5462 		if (!target_procs) {
5463 			mutex_unlock(&binder_procs_lock);
5464 			ret = -ENOMEM;
5465 			goto err;
5466 		}
5467 
5468 		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5469 			if (target_proc->pid != info.pid)
5470 				continue;
5471 
5472 			binder_inner_proc_lock(target_proc);
5473 			target_proc->tmp_ref++;
5474 			binder_inner_proc_unlock(target_proc);
5475 
5476 			target_procs[i++] = target_proc;
5477 		}
5478 		mutex_unlock(&binder_procs_lock);
5479 
5480 		for (i = 0; i < target_procs_count; i++) {
5481 			if (ret >= 0)
5482 				ret = binder_ioctl_freeze(&info,
5483 							  target_procs[i]);
5484 
5485 			binder_proc_dec_tmpref(target_procs[i]);
5486 		}
5487 
5488 		kfree(target_procs);
5489 
5490 		if (ret < 0)
5491 			goto err;
5492 		break;
5493 	}
5494 	case BINDER_GET_FROZEN_INFO: {
5495 		struct binder_frozen_status_info info;
5496 
5497 		if (copy_from_user(&info, ubuf, sizeof(info))) {
5498 			ret = -EFAULT;
5499 			goto err;
5500 		}
5501 
5502 		ret = binder_ioctl_get_freezer_info(&info);
5503 		if (ret < 0)
5504 			goto err;
5505 
5506 		if (copy_to_user(ubuf, &info, sizeof(info))) {
5507 			ret = -EFAULT;
5508 			goto err;
5509 		}
5510 		break;
5511 	}
5512 	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5513 		uint32_t enable;
5514 
5515 		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5516 			ret = -EFAULT;
5517 			goto err;
5518 		}
5519 		binder_inner_proc_lock(proc);
5520 		proc->oneway_spam_detection_enabled = (bool)enable;
5521 		binder_inner_proc_unlock(proc);
5522 		break;
5523 	}
5524 	case BINDER_GET_EXTENDED_ERROR:
5525 		ret = binder_ioctl_get_extended_error(thread, ubuf);
5526 		if (ret < 0)
5527 			goto err;
5528 		break;
5529 	default:
5530 		ret = -EINVAL;
5531 		goto err;
5532 	}
5533 	ret = 0;
5534 err:
5535 	if (thread)
5536 		thread->looper_need_return = false;
5537 	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5538 	if (ret && ret != -EINTR)
5539 		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5540 err_unlocked:
5541 	trace_binder_ioctl_done(ret);
5542 	return ret;
5543 }
5544 
5545 static void binder_vma_open(struct vm_area_struct *vma)
5546 {
5547 	struct binder_proc *proc = vma->vm_private_data;
5548 
5549 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5550 		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5551 		     proc->pid, vma->vm_start, vma->vm_end,
5552 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5553 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5554 }
5555 
5556 static void binder_vma_close(struct vm_area_struct *vma)
5557 {
5558 	struct binder_proc *proc = vma->vm_private_data;
5559 
5560 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5561 		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5562 		     proc->pid, vma->vm_start, vma->vm_end,
5563 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5564 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5565 	binder_alloc_vma_close(&proc->alloc);
5566 }
5567 
5568 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5569 {
5570 	return VM_FAULT_SIGBUS;
5571 }
5572 
5573 static const struct vm_operations_struct binder_vm_ops = {
5574 	.open = binder_vma_open,
5575 	.close = binder_vma_close,
5576 	.fault = binder_vm_fault,
5577 };
5578 
5579 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5580 {
5581 	struct binder_proc *proc = filp->private_data;
5582 
5583 	if (proc->tsk != current->group_leader)
5584 		return -EINVAL;
5585 
5586 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5587 		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5588 		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5589 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5590 		     (unsigned long)pgprot_val(vma->vm_page_prot));
5591 
5592 	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5593 		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5594 		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5595 		return -EPERM;
5596 	}
5597 	vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5598 
5599 	vma->vm_ops = &binder_vm_ops;
5600 	vma->vm_private_data = proc;
5601 
5602 	return binder_alloc_mmap_handler(&proc->alloc, vma);
5603 }
5604 
5605 static int binder_open(struct inode *nodp, struct file *filp)
5606 {
5607 	struct binder_proc *proc, *itr;
5608 	struct binder_device *binder_dev;
5609 	struct binderfs_info *info;
5610 	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5611 	bool existing_pid = false;
5612 
5613 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5614 		     current->group_leader->pid, current->pid);
5615 
5616 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5617 	if (proc == NULL)
5618 		return -ENOMEM;
5619 	spin_lock_init(&proc->inner_lock);
5620 	spin_lock_init(&proc->outer_lock);
5621 	get_task_struct(current->group_leader);
5622 	proc->tsk = current->group_leader;
5623 	proc->cred = get_cred(filp->f_cred);
5624 	INIT_LIST_HEAD(&proc->todo);
5625 	init_waitqueue_head(&proc->freeze_wait);
5626 	proc->default_priority = task_nice(current);
5627 	/* binderfs stashes devices in i_private */
5628 	if (is_binderfs_device(nodp)) {
5629 		binder_dev = nodp->i_private;
5630 		info = nodp->i_sb->s_fs_info;
5631 		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5632 	} else {
5633 		binder_dev = container_of(filp->private_data,
5634 					  struct binder_device, miscdev);
5635 	}
5636 	refcount_inc(&binder_dev->ref);
5637 	proc->context = &binder_dev->context;
5638 	binder_alloc_init(&proc->alloc);
5639 
5640 	binder_stats_created(BINDER_STAT_PROC);
5641 	proc->pid = current->group_leader->pid;
5642 	INIT_LIST_HEAD(&proc->delivered_death);
5643 	INIT_LIST_HEAD(&proc->waiting_threads);
5644 	filp->private_data = proc;
5645 
5646 	mutex_lock(&binder_procs_lock);
5647 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5648 		if (itr->pid == proc->pid) {
5649 			existing_pid = true;
5650 			break;
5651 		}
5652 	}
5653 	hlist_add_head(&proc->proc_node, &binder_procs);
5654 	mutex_unlock(&binder_procs_lock);
5655 
5656 	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5657 		char strbuf[11];
5658 
5659 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5660 		/*
5661 		 * proc debug entries are shared between contexts.
5662 		 * Only create for the first PID to avoid debugfs log spamming
5663 		 * The printing code will anyway print all contexts for a given
5664 		 * PID so this is not a problem.
5665 		 */
5666 		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5667 			binder_debugfs_dir_entry_proc,
5668 			(void *)(unsigned long)proc->pid,
5669 			&proc_fops);
5670 	}
5671 
5672 	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5673 		char strbuf[11];
5674 		struct dentry *binderfs_entry;
5675 
5676 		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5677 		/*
5678 		 * Similar to debugfs, the process specific log file is shared
5679 		 * between contexts. Only create for the first PID.
5680 		 * This is ok since same as debugfs, the log file will contain
5681 		 * information on all contexts of a given PID.
5682 		 */
5683 		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5684 			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5685 		if (!IS_ERR(binderfs_entry)) {
5686 			proc->binderfs_entry = binderfs_entry;
5687 		} else {
5688 			int error;
5689 
5690 			error = PTR_ERR(binderfs_entry);
5691 			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5692 				strbuf, error);
5693 		}
5694 	}
5695 
5696 	return 0;
5697 }
5698 
5699 static int binder_flush(struct file *filp, fl_owner_t id)
5700 {
5701 	struct binder_proc *proc = filp->private_data;
5702 
5703 	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5704 
5705 	return 0;
5706 }
5707 
5708 static void binder_deferred_flush(struct binder_proc *proc)
5709 {
5710 	struct rb_node *n;
5711 	int wake_count = 0;
5712 
5713 	binder_inner_proc_lock(proc);
5714 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5715 		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5716 
5717 		thread->looper_need_return = true;
5718 		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5719 			wake_up_interruptible(&thread->wait);
5720 			wake_count++;
5721 		}
5722 	}
5723 	binder_inner_proc_unlock(proc);
5724 
5725 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5726 		     "binder_flush: %d woke %d threads\n", proc->pid,
5727 		     wake_count);
5728 }
5729 
5730 static int binder_release(struct inode *nodp, struct file *filp)
5731 {
5732 	struct binder_proc *proc = filp->private_data;
5733 
5734 	debugfs_remove(proc->debugfs_entry);
5735 
5736 	if (proc->binderfs_entry) {
5737 		binderfs_remove_file(proc->binderfs_entry);
5738 		proc->binderfs_entry = NULL;
5739 	}
5740 
5741 	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5742 
5743 	return 0;
5744 }
5745 
5746 static int binder_node_release(struct binder_node *node, int refs)
5747 {
5748 	struct binder_ref *ref;
5749 	int death = 0;
5750 	struct binder_proc *proc = node->proc;
5751 
5752 	binder_release_work(proc, &node->async_todo);
5753 
5754 	binder_node_lock(node);
5755 	binder_inner_proc_lock(proc);
5756 	binder_dequeue_work_ilocked(&node->work);
5757 	/*
5758 	 * The caller must have taken a temporary ref on the node,
5759 	 */
5760 	BUG_ON(!node->tmp_refs);
5761 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5762 		binder_inner_proc_unlock(proc);
5763 		binder_node_unlock(node);
5764 		binder_free_node(node);
5765 
5766 		return refs;
5767 	}
5768 
5769 	node->proc = NULL;
5770 	node->local_strong_refs = 0;
5771 	node->local_weak_refs = 0;
5772 	binder_inner_proc_unlock(proc);
5773 
5774 	spin_lock(&binder_dead_nodes_lock);
5775 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5776 	spin_unlock(&binder_dead_nodes_lock);
5777 
5778 	hlist_for_each_entry(ref, &node->refs, node_entry) {
5779 		refs++;
5780 		/*
5781 		 * Need the node lock to synchronize
5782 		 * with new notification requests and the
5783 		 * inner lock to synchronize with queued
5784 		 * death notifications.
5785 		 */
5786 		binder_inner_proc_lock(ref->proc);
5787 		if (!ref->death) {
5788 			binder_inner_proc_unlock(ref->proc);
5789 			continue;
5790 		}
5791 
5792 		death++;
5793 
5794 		BUG_ON(!list_empty(&ref->death->work.entry));
5795 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5796 		binder_enqueue_work_ilocked(&ref->death->work,
5797 					    &ref->proc->todo);
5798 		binder_wakeup_proc_ilocked(ref->proc);
5799 		binder_inner_proc_unlock(ref->proc);
5800 	}
5801 
5802 	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5803 		     "node %d now dead, refs %d, death %d\n",
5804 		     node->debug_id, refs, death);
5805 	binder_node_unlock(node);
5806 	binder_put_node(node);
5807 
5808 	return refs;
5809 }
5810 
5811 static void binder_deferred_release(struct binder_proc *proc)
5812 {
5813 	struct binder_context *context = proc->context;
5814 	struct rb_node *n;
5815 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5816 
5817 	mutex_lock(&binder_procs_lock);
5818 	hlist_del(&proc->proc_node);
5819 	mutex_unlock(&binder_procs_lock);
5820 
5821 	mutex_lock(&context->context_mgr_node_lock);
5822 	if (context->binder_context_mgr_node &&
5823 	    context->binder_context_mgr_node->proc == proc) {
5824 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5825 			     "%s: %d context_mgr_node gone\n",
5826 			     __func__, proc->pid);
5827 		context->binder_context_mgr_node = NULL;
5828 	}
5829 	mutex_unlock(&context->context_mgr_node_lock);
5830 	binder_inner_proc_lock(proc);
5831 	/*
5832 	 * Make sure proc stays alive after we
5833 	 * remove all the threads
5834 	 */
5835 	proc->tmp_ref++;
5836 
5837 	proc->is_dead = true;
5838 	proc->is_frozen = false;
5839 	proc->sync_recv = false;
5840 	proc->async_recv = false;
5841 	threads = 0;
5842 	active_transactions = 0;
5843 	while ((n = rb_first(&proc->threads))) {
5844 		struct binder_thread *thread;
5845 
5846 		thread = rb_entry(n, struct binder_thread, rb_node);
5847 		binder_inner_proc_unlock(proc);
5848 		threads++;
5849 		active_transactions += binder_thread_release(proc, thread);
5850 		binder_inner_proc_lock(proc);
5851 	}
5852 
5853 	nodes = 0;
5854 	incoming_refs = 0;
5855 	while ((n = rb_first(&proc->nodes))) {
5856 		struct binder_node *node;
5857 
5858 		node = rb_entry(n, struct binder_node, rb_node);
5859 		nodes++;
5860 		/*
5861 		 * take a temporary ref on the node before
5862 		 * calling binder_node_release() which will either
5863 		 * kfree() the node or call binder_put_node()
5864 		 */
5865 		binder_inc_node_tmpref_ilocked(node);
5866 		rb_erase(&node->rb_node, &proc->nodes);
5867 		binder_inner_proc_unlock(proc);
5868 		incoming_refs = binder_node_release(node, incoming_refs);
5869 		binder_inner_proc_lock(proc);
5870 	}
5871 	binder_inner_proc_unlock(proc);
5872 
5873 	outgoing_refs = 0;
5874 	binder_proc_lock(proc);
5875 	while ((n = rb_first(&proc->refs_by_desc))) {
5876 		struct binder_ref *ref;
5877 
5878 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5879 		outgoing_refs++;
5880 		binder_cleanup_ref_olocked(ref);
5881 		binder_proc_unlock(proc);
5882 		binder_free_ref(ref);
5883 		binder_proc_lock(proc);
5884 	}
5885 	binder_proc_unlock(proc);
5886 
5887 	binder_release_work(proc, &proc->todo);
5888 	binder_release_work(proc, &proc->delivered_death);
5889 
5890 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5891 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5892 		     __func__, proc->pid, threads, nodes, incoming_refs,
5893 		     outgoing_refs, active_transactions);
5894 
5895 	binder_proc_dec_tmpref(proc);
5896 }
5897 
5898 static void binder_deferred_func(struct work_struct *work)
5899 {
5900 	struct binder_proc *proc;
5901 
5902 	int defer;
5903 
5904 	do {
5905 		mutex_lock(&binder_deferred_lock);
5906 		if (!hlist_empty(&binder_deferred_list)) {
5907 			proc = hlist_entry(binder_deferred_list.first,
5908 					struct binder_proc, deferred_work_node);
5909 			hlist_del_init(&proc->deferred_work_node);
5910 			defer = proc->deferred_work;
5911 			proc->deferred_work = 0;
5912 		} else {
5913 			proc = NULL;
5914 			defer = 0;
5915 		}
5916 		mutex_unlock(&binder_deferred_lock);
5917 
5918 		if (defer & BINDER_DEFERRED_FLUSH)
5919 			binder_deferred_flush(proc);
5920 
5921 		if (defer & BINDER_DEFERRED_RELEASE)
5922 			binder_deferred_release(proc); /* frees proc */
5923 	} while (proc);
5924 }
5925 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5926 
5927 static void
5928 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5929 {
5930 	mutex_lock(&binder_deferred_lock);
5931 	proc->deferred_work |= defer;
5932 	if (hlist_unhashed(&proc->deferred_work_node)) {
5933 		hlist_add_head(&proc->deferred_work_node,
5934 				&binder_deferred_list);
5935 		schedule_work(&binder_deferred_work);
5936 	}
5937 	mutex_unlock(&binder_deferred_lock);
5938 }
5939 
5940 static void print_binder_transaction_ilocked(struct seq_file *m,
5941 					     struct binder_proc *proc,
5942 					     const char *prefix,
5943 					     struct binder_transaction *t)
5944 {
5945 	struct binder_proc *to_proc;
5946 	struct binder_buffer *buffer = t->buffer;
5947 
5948 	spin_lock(&t->lock);
5949 	to_proc = t->to_proc;
5950 	seq_printf(m,
5951 		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5952 		   prefix, t->debug_id, t,
5953 		   t->from ? t->from->proc->pid : 0,
5954 		   t->from ? t->from->pid : 0,
5955 		   to_proc ? to_proc->pid : 0,
5956 		   t->to_thread ? t->to_thread->pid : 0,
5957 		   t->code, t->flags, t->priority, t->need_reply);
5958 	spin_unlock(&t->lock);
5959 
5960 	if (proc != to_proc) {
5961 		/*
5962 		 * Can only safely deref buffer if we are holding the
5963 		 * correct proc inner lock for this node
5964 		 */
5965 		seq_puts(m, "\n");
5966 		return;
5967 	}
5968 
5969 	if (buffer == NULL) {
5970 		seq_puts(m, " buffer free\n");
5971 		return;
5972 	}
5973 	if (buffer->target_node)
5974 		seq_printf(m, " node %d", buffer->target_node->debug_id);
5975 	seq_printf(m, " size %zd:%zd data %pK\n",
5976 		   buffer->data_size, buffer->offsets_size,
5977 		   buffer->user_data);
5978 }
5979 
5980 static void print_binder_work_ilocked(struct seq_file *m,
5981 				     struct binder_proc *proc,
5982 				     const char *prefix,
5983 				     const char *transaction_prefix,
5984 				     struct binder_work *w)
5985 {
5986 	struct binder_node *node;
5987 	struct binder_transaction *t;
5988 
5989 	switch (w->type) {
5990 	case BINDER_WORK_TRANSACTION:
5991 		t = container_of(w, struct binder_transaction, work);
5992 		print_binder_transaction_ilocked(
5993 				m, proc, transaction_prefix, t);
5994 		break;
5995 	case BINDER_WORK_RETURN_ERROR: {
5996 		struct binder_error *e = container_of(
5997 				w, struct binder_error, work);
5998 
5999 		seq_printf(m, "%stransaction error: %u\n",
6000 			   prefix, e->cmd);
6001 	} break;
6002 	case BINDER_WORK_TRANSACTION_COMPLETE:
6003 		seq_printf(m, "%stransaction complete\n", prefix);
6004 		break;
6005 	case BINDER_WORK_NODE:
6006 		node = container_of(w, struct binder_node, work);
6007 		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6008 			   prefix, node->debug_id,
6009 			   (u64)node->ptr, (u64)node->cookie);
6010 		break;
6011 	case BINDER_WORK_DEAD_BINDER:
6012 		seq_printf(m, "%shas dead binder\n", prefix);
6013 		break;
6014 	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6015 		seq_printf(m, "%shas cleared dead binder\n", prefix);
6016 		break;
6017 	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6018 		seq_printf(m, "%shas cleared death notification\n", prefix);
6019 		break;
6020 	default:
6021 		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6022 		break;
6023 	}
6024 }
6025 
6026 static void print_binder_thread_ilocked(struct seq_file *m,
6027 					struct binder_thread *thread,
6028 					int print_always)
6029 {
6030 	struct binder_transaction *t;
6031 	struct binder_work *w;
6032 	size_t start_pos = m->count;
6033 	size_t header_pos;
6034 
6035 	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6036 			thread->pid, thread->looper,
6037 			thread->looper_need_return,
6038 			atomic_read(&thread->tmp_ref));
6039 	header_pos = m->count;
6040 	t = thread->transaction_stack;
6041 	while (t) {
6042 		if (t->from == thread) {
6043 			print_binder_transaction_ilocked(m, thread->proc,
6044 					"    outgoing transaction", t);
6045 			t = t->from_parent;
6046 		} else if (t->to_thread == thread) {
6047 			print_binder_transaction_ilocked(m, thread->proc,
6048 						 "    incoming transaction", t);
6049 			t = t->to_parent;
6050 		} else {
6051 			print_binder_transaction_ilocked(m, thread->proc,
6052 					"    bad transaction", t);
6053 			t = NULL;
6054 		}
6055 	}
6056 	list_for_each_entry(w, &thread->todo, entry) {
6057 		print_binder_work_ilocked(m, thread->proc, "    ",
6058 					  "    pending transaction", w);
6059 	}
6060 	if (!print_always && m->count == header_pos)
6061 		m->count = start_pos;
6062 }
6063 
6064 static void print_binder_node_nilocked(struct seq_file *m,
6065 				       struct binder_node *node)
6066 {
6067 	struct binder_ref *ref;
6068 	struct binder_work *w;
6069 	int count;
6070 
6071 	count = 0;
6072 	hlist_for_each_entry(ref, &node->refs, node_entry)
6073 		count++;
6074 
6075 	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6076 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
6077 		   node->has_strong_ref, node->has_weak_ref,
6078 		   node->local_strong_refs, node->local_weak_refs,
6079 		   node->internal_strong_refs, count, node->tmp_refs);
6080 	if (count) {
6081 		seq_puts(m, " proc");
6082 		hlist_for_each_entry(ref, &node->refs, node_entry)
6083 			seq_printf(m, " %d", ref->proc->pid);
6084 	}
6085 	seq_puts(m, "\n");
6086 	if (node->proc) {
6087 		list_for_each_entry(w, &node->async_todo, entry)
6088 			print_binder_work_ilocked(m, node->proc, "    ",
6089 					  "    pending async transaction", w);
6090 	}
6091 }
6092 
6093 static void print_binder_ref_olocked(struct seq_file *m,
6094 				     struct binder_ref *ref)
6095 {
6096 	binder_node_lock(ref->node);
6097 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6098 		   ref->data.debug_id, ref->data.desc,
6099 		   ref->node->proc ? "" : "dead ",
6100 		   ref->node->debug_id, ref->data.strong,
6101 		   ref->data.weak, ref->death);
6102 	binder_node_unlock(ref->node);
6103 }
6104 
6105 static void print_binder_proc(struct seq_file *m,
6106 			      struct binder_proc *proc, int print_all)
6107 {
6108 	struct binder_work *w;
6109 	struct rb_node *n;
6110 	size_t start_pos = m->count;
6111 	size_t header_pos;
6112 	struct binder_node *last_node = NULL;
6113 
6114 	seq_printf(m, "proc %d\n", proc->pid);
6115 	seq_printf(m, "context %s\n", proc->context->name);
6116 	header_pos = m->count;
6117 
6118 	binder_inner_proc_lock(proc);
6119 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6120 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6121 						rb_node), print_all);
6122 
6123 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6124 		struct binder_node *node = rb_entry(n, struct binder_node,
6125 						    rb_node);
6126 		if (!print_all && !node->has_async_transaction)
6127 			continue;
6128 
6129 		/*
6130 		 * take a temporary reference on the node so it
6131 		 * survives and isn't removed from the tree
6132 		 * while we print it.
6133 		 */
6134 		binder_inc_node_tmpref_ilocked(node);
6135 		/* Need to drop inner lock to take node lock */
6136 		binder_inner_proc_unlock(proc);
6137 		if (last_node)
6138 			binder_put_node(last_node);
6139 		binder_node_inner_lock(node);
6140 		print_binder_node_nilocked(m, node);
6141 		binder_node_inner_unlock(node);
6142 		last_node = node;
6143 		binder_inner_proc_lock(proc);
6144 	}
6145 	binder_inner_proc_unlock(proc);
6146 	if (last_node)
6147 		binder_put_node(last_node);
6148 
6149 	if (print_all) {
6150 		binder_proc_lock(proc);
6151 		for (n = rb_first(&proc->refs_by_desc);
6152 		     n != NULL;
6153 		     n = rb_next(n))
6154 			print_binder_ref_olocked(m, rb_entry(n,
6155 							    struct binder_ref,
6156 							    rb_node_desc));
6157 		binder_proc_unlock(proc);
6158 	}
6159 	binder_alloc_print_allocated(m, &proc->alloc);
6160 	binder_inner_proc_lock(proc);
6161 	list_for_each_entry(w, &proc->todo, entry)
6162 		print_binder_work_ilocked(m, proc, "  ",
6163 					  "  pending transaction", w);
6164 	list_for_each_entry(w, &proc->delivered_death, entry) {
6165 		seq_puts(m, "  has delivered dead binder\n");
6166 		break;
6167 	}
6168 	binder_inner_proc_unlock(proc);
6169 	if (!print_all && m->count == header_pos)
6170 		m->count = start_pos;
6171 }
6172 
6173 static const char * const binder_return_strings[] = {
6174 	"BR_ERROR",
6175 	"BR_OK",
6176 	"BR_TRANSACTION",
6177 	"BR_REPLY",
6178 	"BR_ACQUIRE_RESULT",
6179 	"BR_DEAD_REPLY",
6180 	"BR_TRANSACTION_COMPLETE",
6181 	"BR_INCREFS",
6182 	"BR_ACQUIRE",
6183 	"BR_RELEASE",
6184 	"BR_DECREFS",
6185 	"BR_ATTEMPT_ACQUIRE",
6186 	"BR_NOOP",
6187 	"BR_SPAWN_LOOPER",
6188 	"BR_FINISHED",
6189 	"BR_DEAD_BINDER",
6190 	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6191 	"BR_FAILED_REPLY",
6192 	"BR_FROZEN_REPLY",
6193 	"BR_ONEWAY_SPAM_SUSPECT",
6194 	"BR_TRANSACTION_PENDING_FROZEN"
6195 };
6196 
6197 static const char * const binder_command_strings[] = {
6198 	"BC_TRANSACTION",
6199 	"BC_REPLY",
6200 	"BC_ACQUIRE_RESULT",
6201 	"BC_FREE_BUFFER",
6202 	"BC_INCREFS",
6203 	"BC_ACQUIRE",
6204 	"BC_RELEASE",
6205 	"BC_DECREFS",
6206 	"BC_INCREFS_DONE",
6207 	"BC_ACQUIRE_DONE",
6208 	"BC_ATTEMPT_ACQUIRE",
6209 	"BC_REGISTER_LOOPER",
6210 	"BC_ENTER_LOOPER",
6211 	"BC_EXIT_LOOPER",
6212 	"BC_REQUEST_DEATH_NOTIFICATION",
6213 	"BC_CLEAR_DEATH_NOTIFICATION",
6214 	"BC_DEAD_BINDER_DONE",
6215 	"BC_TRANSACTION_SG",
6216 	"BC_REPLY_SG",
6217 };
6218 
6219 static const char * const binder_objstat_strings[] = {
6220 	"proc",
6221 	"thread",
6222 	"node",
6223 	"ref",
6224 	"death",
6225 	"transaction",
6226 	"transaction_complete"
6227 };
6228 
6229 static void print_binder_stats(struct seq_file *m, const char *prefix,
6230 			       struct binder_stats *stats)
6231 {
6232 	int i;
6233 
6234 	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6235 		     ARRAY_SIZE(binder_command_strings));
6236 	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6237 		int temp = atomic_read(&stats->bc[i]);
6238 
6239 		if (temp)
6240 			seq_printf(m, "%s%s: %d\n", prefix,
6241 				   binder_command_strings[i], temp);
6242 	}
6243 
6244 	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6245 		     ARRAY_SIZE(binder_return_strings));
6246 	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6247 		int temp = atomic_read(&stats->br[i]);
6248 
6249 		if (temp)
6250 			seq_printf(m, "%s%s: %d\n", prefix,
6251 				   binder_return_strings[i], temp);
6252 	}
6253 
6254 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6255 		     ARRAY_SIZE(binder_objstat_strings));
6256 	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6257 		     ARRAY_SIZE(stats->obj_deleted));
6258 	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6259 		int created = atomic_read(&stats->obj_created[i]);
6260 		int deleted = atomic_read(&stats->obj_deleted[i]);
6261 
6262 		if (created || deleted)
6263 			seq_printf(m, "%s%s: active %d total %d\n",
6264 				prefix,
6265 				binder_objstat_strings[i],
6266 				created - deleted,
6267 				created);
6268 	}
6269 }
6270 
6271 static void print_binder_proc_stats(struct seq_file *m,
6272 				    struct binder_proc *proc)
6273 {
6274 	struct binder_work *w;
6275 	struct binder_thread *thread;
6276 	struct rb_node *n;
6277 	int count, strong, weak, ready_threads;
6278 	size_t free_async_space =
6279 		binder_alloc_get_free_async_space(&proc->alloc);
6280 
6281 	seq_printf(m, "proc %d\n", proc->pid);
6282 	seq_printf(m, "context %s\n", proc->context->name);
6283 	count = 0;
6284 	ready_threads = 0;
6285 	binder_inner_proc_lock(proc);
6286 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6287 		count++;
6288 
6289 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6290 		ready_threads++;
6291 
6292 	seq_printf(m, "  threads: %d\n", count);
6293 	seq_printf(m, "  requested threads: %d+%d/%d\n"
6294 			"  ready threads %d\n"
6295 			"  free async space %zd\n", proc->requested_threads,
6296 			proc->requested_threads_started, proc->max_threads,
6297 			ready_threads,
6298 			free_async_space);
6299 	count = 0;
6300 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6301 		count++;
6302 	binder_inner_proc_unlock(proc);
6303 	seq_printf(m, "  nodes: %d\n", count);
6304 	count = 0;
6305 	strong = 0;
6306 	weak = 0;
6307 	binder_proc_lock(proc);
6308 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6309 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6310 						  rb_node_desc);
6311 		count++;
6312 		strong += ref->data.strong;
6313 		weak += ref->data.weak;
6314 	}
6315 	binder_proc_unlock(proc);
6316 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6317 
6318 	count = binder_alloc_get_allocated_count(&proc->alloc);
6319 	seq_printf(m, "  buffers: %d\n", count);
6320 
6321 	binder_alloc_print_pages(m, &proc->alloc);
6322 
6323 	count = 0;
6324 	binder_inner_proc_lock(proc);
6325 	list_for_each_entry(w, &proc->todo, entry) {
6326 		if (w->type == BINDER_WORK_TRANSACTION)
6327 			count++;
6328 	}
6329 	binder_inner_proc_unlock(proc);
6330 	seq_printf(m, "  pending transactions: %d\n", count);
6331 
6332 	print_binder_stats(m, "  ", &proc->stats);
6333 }
6334 
6335 static int state_show(struct seq_file *m, void *unused)
6336 {
6337 	struct binder_proc *proc;
6338 	struct binder_node *node;
6339 	struct binder_node *last_node = NULL;
6340 
6341 	seq_puts(m, "binder state:\n");
6342 
6343 	spin_lock(&binder_dead_nodes_lock);
6344 	if (!hlist_empty(&binder_dead_nodes))
6345 		seq_puts(m, "dead nodes:\n");
6346 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6347 		/*
6348 		 * take a temporary reference on the node so it
6349 		 * survives and isn't removed from the list
6350 		 * while we print it.
6351 		 */
6352 		node->tmp_refs++;
6353 		spin_unlock(&binder_dead_nodes_lock);
6354 		if (last_node)
6355 			binder_put_node(last_node);
6356 		binder_node_lock(node);
6357 		print_binder_node_nilocked(m, node);
6358 		binder_node_unlock(node);
6359 		last_node = node;
6360 		spin_lock(&binder_dead_nodes_lock);
6361 	}
6362 	spin_unlock(&binder_dead_nodes_lock);
6363 	if (last_node)
6364 		binder_put_node(last_node);
6365 
6366 	mutex_lock(&binder_procs_lock);
6367 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6368 		print_binder_proc(m, proc, 1);
6369 	mutex_unlock(&binder_procs_lock);
6370 
6371 	return 0;
6372 }
6373 
6374 static int stats_show(struct seq_file *m, void *unused)
6375 {
6376 	struct binder_proc *proc;
6377 
6378 	seq_puts(m, "binder stats:\n");
6379 
6380 	print_binder_stats(m, "", &binder_stats);
6381 
6382 	mutex_lock(&binder_procs_lock);
6383 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6384 		print_binder_proc_stats(m, proc);
6385 	mutex_unlock(&binder_procs_lock);
6386 
6387 	return 0;
6388 }
6389 
6390 static int transactions_show(struct seq_file *m, void *unused)
6391 {
6392 	struct binder_proc *proc;
6393 
6394 	seq_puts(m, "binder transactions:\n");
6395 	mutex_lock(&binder_procs_lock);
6396 	hlist_for_each_entry(proc, &binder_procs, proc_node)
6397 		print_binder_proc(m, proc, 0);
6398 	mutex_unlock(&binder_procs_lock);
6399 
6400 	return 0;
6401 }
6402 
6403 static int proc_show(struct seq_file *m, void *unused)
6404 {
6405 	struct binder_proc *itr;
6406 	int pid = (unsigned long)m->private;
6407 
6408 	mutex_lock(&binder_procs_lock);
6409 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6410 		if (itr->pid == pid) {
6411 			seq_puts(m, "binder proc state:\n");
6412 			print_binder_proc(m, itr, 1);
6413 		}
6414 	}
6415 	mutex_unlock(&binder_procs_lock);
6416 
6417 	return 0;
6418 }
6419 
6420 static void print_binder_transaction_log_entry(struct seq_file *m,
6421 					struct binder_transaction_log_entry *e)
6422 {
6423 	int debug_id = READ_ONCE(e->debug_id_done);
6424 	/*
6425 	 * read barrier to guarantee debug_id_done read before
6426 	 * we print the log values
6427 	 */
6428 	smp_rmb();
6429 	seq_printf(m,
6430 		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6431 		   e->debug_id, (e->call_type == 2) ? "reply" :
6432 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6433 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6434 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6435 		   e->return_error, e->return_error_param,
6436 		   e->return_error_line);
6437 	/*
6438 	 * read-barrier to guarantee read of debug_id_done after
6439 	 * done printing the fields of the entry
6440 	 */
6441 	smp_rmb();
6442 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6443 			"\n" : " (incomplete)\n");
6444 }
6445 
6446 static int transaction_log_show(struct seq_file *m, void *unused)
6447 {
6448 	struct binder_transaction_log *log = m->private;
6449 	unsigned int log_cur = atomic_read(&log->cur);
6450 	unsigned int count;
6451 	unsigned int cur;
6452 	int i;
6453 
6454 	count = log_cur + 1;
6455 	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6456 		0 : count % ARRAY_SIZE(log->entry);
6457 	if (count > ARRAY_SIZE(log->entry) || log->full)
6458 		count = ARRAY_SIZE(log->entry);
6459 	for (i = 0; i < count; i++) {
6460 		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6461 
6462 		print_binder_transaction_log_entry(m, &log->entry[index]);
6463 	}
6464 	return 0;
6465 }
6466 
6467 const struct file_operations binder_fops = {
6468 	.owner = THIS_MODULE,
6469 	.poll = binder_poll,
6470 	.unlocked_ioctl = binder_ioctl,
6471 	.compat_ioctl = compat_ptr_ioctl,
6472 	.mmap = binder_mmap,
6473 	.open = binder_open,
6474 	.flush = binder_flush,
6475 	.release = binder_release,
6476 };
6477 
6478 DEFINE_SHOW_ATTRIBUTE(state);
6479 DEFINE_SHOW_ATTRIBUTE(stats);
6480 DEFINE_SHOW_ATTRIBUTE(transactions);
6481 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6482 
6483 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6484 	{
6485 		.name = "state",
6486 		.mode = 0444,
6487 		.fops = &state_fops,
6488 		.data = NULL,
6489 	},
6490 	{
6491 		.name = "stats",
6492 		.mode = 0444,
6493 		.fops = &stats_fops,
6494 		.data = NULL,
6495 	},
6496 	{
6497 		.name = "transactions",
6498 		.mode = 0444,
6499 		.fops = &transactions_fops,
6500 		.data = NULL,
6501 	},
6502 	{
6503 		.name = "transaction_log",
6504 		.mode = 0444,
6505 		.fops = &transaction_log_fops,
6506 		.data = &binder_transaction_log,
6507 	},
6508 	{
6509 		.name = "failed_transaction_log",
6510 		.mode = 0444,
6511 		.fops = &transaction_log_fops,
6512 		.data = &binder_transaction_log_failed,
6513 	},
6514 	{} /* terminator */
6515 };
6516 
6517 static int __init init_binder_device(const char *name)
6518 {
6519 	int ret;
6520 	struct binder_device *binder_device;
6521 
6522 	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6523 	if (!binder_device)
6524 		return -ENOMEM;
6525 
6526 	binder_device->miscdev.fops = &binder_fops;
6527 	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6528 	binder_device->miscdev.name = name;
6529 
6530 	refcount_set(&binder_device->ref, 1);
6531 	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6532 	binder_device->context.name = name;
6533 	mutex_init(&binder_device->context.context_mgr_node_lock);
6534 
6535 	ret = misc_register(&binder_device->miscdev);
6536 	if (ret < 0) {
6537 		kfree(binder_device);
6538 		return ret;
6539 	}
6540 
6541 	hlist_add_head(&binder_device->hlist, &binder_devices);
6542 
6543 	return ret;
6544 }
6545 
6546 static int __init binder_init(void)
6547 {
6548 	int ret;
6549 	char *device_name, *device_tmp;
6550 	struct binder_device *device;
6551 	struct hlist_node *tmp;
6552 	char *device_names = NULL;
6553 
6554 	ret = binder_alloc_shrinker_init();
6555 	if (ret)
6556 		return ret;
6557 
6558 	atomic_set(&binder_transaction_log.cur, ~0U);
6559 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6560 
6561 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6562 	if (binder_debugfs_dir_entry_root) {
6563 		const struct binder_debugfs_entry *db_entry;
6564 
6565 		binder_for_each_debugfs_entry(db_entry)
6566 			debugfs_create_file(db_entry->name,
6567 					    db_entry->mode,
6568 					    binder_debugfs_dir_entry_root,
6569 					    db_entry->data,
6570 					    db_entry->fops);
6571 
6572 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6573 						 binder_debugfs_dir_entry_root);
6574 	}
6575 
6576 	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6577 	    strcmp(binder_devices_param, "") != 0) {
6578 		/*
6579 		* Copy the module_parameter string, because we don't want to
6580 		* tokenize it in-place.
6581 		 */
6582 		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6583 		if (!device_names) {
6584 			ret = -ENOMEM;
6585 			goto err_alloc_device_names_failed;
6586 		}
6587 
6588 		device_tmp = device_names;
6589 		while ((device_name = strsep(&device_tmp, ","))) {
6590 			ret = init_binder_device(device_name);
6591 			if (ret)
6592 				goto err_init_binder_device_failed;
6593 		}
6594 	}
6595 
6596 	ret = init_binderfs();
6597 	if (ret)
6598 		goto err_init_binder_device_failed;
6599 
6600 	return ret;
6601 
6602 err_init_binder_device_failed:
6603 	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6604 		misc_deregister(&device->miscdev);
6605 		hlist_del(&device->hlist);
6606 		kfree(device);
6607 	}
6608 
6609 	kfree(device_names);
6610 
6611 err_alloc_device_names_failed:
6612 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6613 
6614 	return ret;
6615 }
6616 
6617 device_initcall(binder_init);
6618 
6619 #define CREATE_TRACE_POINTS
6620 #include "binder_trace.h"
6621 
6622 MODULE_LICENSE("GPL v2");
6623