1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69
70 #include <uapi/linux/android/binder.h>
71
72 #include <asm/cacheflush.h>
73
74 #include "binder_internal.h"
75 #include "binder_trace.h"
76
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
79
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
83
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
90
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
93
94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
95
96 enum {
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
112 };
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
119
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
122
binder_set_stop_on_user_error(const char * val,const struct kernel_param * kp)123 static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
125 {
126 int ret;
127
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
131 return ret;
132 }
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
135
136 #define binder_debug(mask, x...) \
137 do { \
138 if (binder_debug_mask & mask) \
139 pr_info_ratelimited(x); \
140 } while (0)
141
142 #define binder_user_error(x...) \
143 do { \
144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145 pr_info_ratelimited(x); \
146 if (binder_stop_on_user_error) \
147 binder_stop_on_user_error = 2; \
148 } while (0)
149
150 #define to_flat_binder_object(hdr) \
151 container_of(hdr, struct flat_binder_object, hdr)
152
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154
155 #define to_binder_buffer_object(hdr) \
156 container_of(hdr, struct binder_buffer_object, hdr)
157
158 #define to_binder_fd_array_object(hdr) \
159 container_of(hdr, struct binder_fd_array_object, hdr)
160
161 static struct binder_stats binder_stats;
162
binder_stats_deleted(enum binder_stat_types type)163 static inline void binder_stats_deleted(enum binder_stat_types type)
164 {
165 atomic_inc(&binder_stats.obj_deleted[type]);
166 }
167
binder_stats_created(enum binder_stat_types type)168 static inline void binder_stats_created(enum binder_stat_types type)
169 {
170 atomic_inc(&binder_stats.obj_created[type]);
171 }
172
173 struct binder_transaction_log binder_transaction_log;
174 struct binder_transaction_log binder_transaction_log_failed;
175
binder_transaction_log_add(struct binder_transaction_log * log)176 static struct binder_transaction_log_entry *binder_transaction_log_add(
177 struct binder_transaction_log *log)
178 {
179 struct binder_transaction_log_entry *e;
180 unsigned int cur = atomic_inc_return(&log->cur);
181
182 if (cur >= ARRAY_SIZE(log->entry))
183 log->full = true;
184 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
185 WRITE_ONCE(e->debug_id_done, 0);
186 /*
187 * write-barrier to synchronize access to e->debug_id_done.
188 * We make sure the initialized 0 value is seen before
189 * memset() other fields are zeroed by memset.
190 */
191 smp_wmb();
192 memset(e, 0, sizeof(*e));
193 return e;
194 }
195
196 enum binder_deferred_state {
197 BINDER_DEFERRED_FLUSH = 0x01,
198 BINDER_DEFERRED_RELEASE = 0x02,
199 };
200
201 enum {
202 BINDER_LOOPER_STATE_REGISTERED = 0x01,
203 BINDER_LOOPER_STATE_ENTERED = 0x02,
204 BINDER_LOOPER_STATE_EXITED = 0x04,
205 BINDER_LOOPER_STATE_INVALID = 0x08,
206 BINDER_LOOPER_STATE_WAITING = 0x10,
207 BINDER_LOOPER_STATE_POLL = 0x20,
208 };
209
210 /**
211 * binder_proc_lock() - Acquire outer lock for given binder_proc
212 * @proc: struct binder_proc to acquire
213 *
214 * Acquires proc->outer_lock. Used to protect binder_ref
215 * structures associated with the given proc.
216 */
217 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
218 static void
_binder_proc_lock(struct binder_proc * proc,int line)219 _binder_proc_lock(struct binder_proc *proc, int line)
220 __acquires(&proc->outer_lock)
221 {
222 binder_debug(BINDER_DEBUG_SPINLOCKS,
223 "%s: line=%d\n", __func__, line);
224 spin_lock(&proc->outer_lock);
225 }
226
227 /**
228 * binder_proc_unlock() - Release spinlock for given binder_proc
229 * @proc: struct binder_proc to acquire
230 *
231 * Release lock acquired via binder_proc_lock()
232 */
233 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
234 static void
_binder_proc_unlock(struct binder_proc * proc,int line)235 _binder_proc_unlock(struct binder_proc *proc, int line)
236 __releases(&proc->outer_lock)
237 {
238 binder_debug(BINDER_DEBUG_SPINLOCKS,
239 "%s: line=%d\n", __func__, line);
240 spin_unlock(&proc->outer_lock);
241 }
242
243 /**
244 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
245 * @proc: struct binder_proc to acquire
246 *
247 * Acquires proc->inner_lock. Used to protect todo lists
248 */
249 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
250 static void
_binder_inner_proc_lock(struct binder_proc * proc,int line)251 _binder_inner_proc_lock(struct binder_proc *proc, int line)
252 __acquires(&proc->inner_lock)
253 {
254 binder_debug(BINDER_DEBUG_SPINLOCKS,
255 "%s: line=%d\n", __func__, line);
256 spin_lock(&proc->inner_lock);
257 }
258
259 /**
260 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
261 * @proc: struct binder_proc to acquire
262 *
263 * Release lock acquired via binder_inner_proc_lock()
264 */
265 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
266 static void
_binder_inner_proc_unlock(struct binder_proc * proc,int line)267 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
268 __releases(&proc->inner_lock)
269 {
270 binder_debug(BINDER_DEBUG_SPINLOCKS,
271 "%s: line=%d\n", __func__, line);
272 spin_unlock(&proc->inner_lock);
273 }
274
275 /**
276 * binder_node_lock() - Acquire spinlock for given binder_node
277 * @node: struct binder_node to acquire
278 *
279 * Acquires node->lock. Used to protect binder_node fields
280 */
281 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
282 static void
_binder_node_lock(struct binder_node * node,int line)283 _binder_node_lock(struct binder_node *node, int line)
284 __acquires(&node->lock)
285 {
286 binder_debug(BINDER_DEBUG_SPINLOCKS,
287 "%s: line=%d\n", __func__, line);
288 spin_lock(&node->lock);
289 }
290
291 /**
292 * binder_node_unlock() - Release spinlock for given binder_proc
293 * @node: struct binder_node to acquire
294 *
295 * Release lock acquired via binder_node_lock()
296 */
297 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
298 static void
_binder_node_unlock(struct binder_node * node,int line)299 _binder_node_unlock(struct binder_node *node, int line)
300 __releases(&node->lock)
301 {
302 binder_debug(BINDER_DEBUG_SPINLOCKS,
303 "%s: line=%d\n", __func__, line);
304 spin_unlock(&node->lock);
305 }
306
307 /**
308 * binder_node_inner_lock() - Acquire node and inner locks
309 * @node: struct binder_node to acquire
310 *
311 * Acquires node->lock. If node->proc also acquires
312 * proc->inner_lock. Used to protect binder_node fields
313 */
314 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
315 static void
_binder_node_inner_lock(struct binder_node * node,int line)316 _binder_node_inner_lock(struct binder_node *node, int line)
317 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
318 {
319 binder_debug(BINDER_DEBUG_SPINLOCKS,
320 "%s: line=%d\n", __func__, line);
321 spin_lock(&node->lock);
322 if (node->proc)
323 binder_inner_proc_lock(node->proc);
324 else
325 /* annotation for sparse */
326 __acquire(&node->proc->inner_lock);
327 }
328
329 /**
330 * binder_node_unlock() - Release node and inner locks
331 * @node: struct binder_node to acquire
332 *
333 * Release lock acquired via binder_node_lock()
334 */
335 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
336 static void
_binder_node_inner_unlock(struct binder_node * node,int line)337 _binder_node_inner_unlock(struct binder_node *node, int line)
338 __releases(&node->lock) __releases(&node->proc->inner_lock)
339 {
340 struct binder_proc *proc = node->proc;
341
342 binder_debug(BINDER_DEBUG_SPINLOCKS,
343 "%s: line=%d\n", __func__, line);
344 if (proc)
345 binder_inner_proc_unlock(proc);
346 else
347 /* annotation for sparse */
348 __release(&node->proc->inner_lock);
349 spin_unlock(&node->lock);
350 }
351
binder_worklist_empty_ilocked(struct list_head * list)352 static bool binder_worklist_empty_ilocked(struct list_head *list)
353 {
354 return list_empty(list);
355 }
356
357 /**
358 * binder_worklist_empty() - Check if no items on the work list
359 * @proc: binder_proc associated with list
360 * @list: list to check
361 *
362 * Return: true if there are no items on list, else false
363 */
binder_worklist_empty(struct binder_proc * proc,struct list_head * list)364 static bool binder_worklist_empty(struct binder_proc *proc,
365 struct list_head *list)
366 {
367 bool ret;
368
369 binder_inner_proc_lock(proc);
370 ret = binder_worklist_empty_ilocked(list);
371 binder_inner_proc_unlock(proc);
372 return ret;
373 }
374
375 /**
376 * binder_enqueue_work_ilocked() - Add an item to the work list
377 * @work: struct binder_work to add to list
378 * @target_list: list to add work to
379 *
380 * Adds the work to the specified list. Asserts that work
381 * is not already on a list.
382 *
383 * Requires the proc->inner_lock to be held.
384 */
385 static void
binder_enqueue_work_ilocked(struct binder_work * work,struct list_head * target_list)386 binder_enqueue_work_ilocked(struct binder_work *work,
387 struct list_head *target_list)
388 {
389 BUG_ON(target_list == NULL);
390 BUG_ON(work->entry.next && !list_empty(&work->entry));
391 list_add_tail(&work->entry, target_list);
392 }
393
394 /**
395 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
396 * @thread: thread to queue work to
397 * @work: struct binder_work to add to list
398 *
399 * Adds the work to the todo list of the thread. Doesn't set the process_todo
400 * flag, which means that (if it wasn't already set) the thread will go to
401 * sleep without handling this work when it calls read.
402 *
403 * Requires the proc->inner_lock to be held.
404 */
405 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)406 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
407 struct binder_work *work)
408 {
409 WARN_ON(!list_empty(&thread->waiting_thread_node));
410 binder_enqueue_work_ilocked(work, &thread->todo);
411 }
412
413 /**
414 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
415 * @thread: thread to queue work to
416 * @work: struct binder_work to add to list
417 *
418 * Adds the work to the todo list of the thread, and enables processing
419 * of the todo queue.
420 *
421 * Requires the proc->inner_lock to be held.
422 */
423 static void
binder_enqueue_thread_work_ilocked(struct binder_thread * thread,struct binder_work * work)424 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
425 struct binder_work *work)
426 {
427 WARN_ON(!list_empty(&thread->waiting_thread_node));
428 binder_enqueue_work_ilocked(work, &thread->todo);
429 thread->process_todo = true;
430 }
431
432 /**
433 * binder_enqueue_thread_work() - Add an item to the thread work list
434 * @thread: thread to queue work to
435 * @work: struct binder_work to add to list
436 *
437 * Adds the work to the todo list of the thread, and enables processing
438 * of the todo queue.
439 */
440 static void
binder_enqueue_thread_work(struct binder_thread * thread,struct binder_work * work)441 binder_enqueue_thread_work(struct binder_thread *thread,
442 struct binder_work *work)
443 {
444 binder_inner_proc_lock(thread->proc);
445 binder_enqueue_thread_work_ilocked(thread, work);
446 binder_inner_proc_unlock(thread->proc);
447 }
448
449 static void
binder_dequeue_work_ilocked(struct binder_work * work)450 binder_dequeue_work_ilocked(struct binder_work *work)
451 {
452 list_del_init(&work->entry);
453 }
454
455 /**
456 * binder_dequeue_work() - Removes an item from the work list
457 * @proc: binder_proc associated with list
458 * @work: struct binder_work to remove from list
459 *
460 * Removes the specified work item from whatever list it is on.
461 * Can safely be called if work is not on any list.
462 */
463 static void
binder_dequeue_work(struct binder_proc * proc,struct binder_work * work)464 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
465 {
466 binder_inner_proc_lock(proc);
467 binder_dequeue_work_ilocked(work);
468 binder_inner_proc_unlock(proc);
469 }
470
binder_dequeue_work_head_ilocked(struct list_head * list)471 static struct binder_work *binder_dequeue_work_head_ilocked(
472 struct list_head *list)
473 {
474 struct binder_work *w;
475
476 w = list_first_entry_or_null(list, struct binder_work, entry);
477 if (w)
478 list_del_init(&w->entry);
479 return w;
480 }
481
482 static void
483 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484 static void binder_free_thread(struct binder_thread *thread);
485 static void binder_free_proc(struct binder_proc *proc);
486 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
487
binder_has_work_ilocked(struct binder_thread * thread,bool do_proc_work)488 static bool binder_has_work_ilocked(struct binder_thread *thread,
489 bool do_proc_work)
490 {
491 return thread->process_todo ||
492 thread->looper_need_return ||
493 (do_proc_work &&
494 !binder_worklist_empty_ilocked(&thread->proc->todo));
495 }
496
binder_has_work(struct binder_thread * thread,bool do_proc_work)497 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
498 {
499 bool has_work;
500
501 binder_inner_proc_lock(thread->proc);
502 has_work = binder_has_work_ilocked(thread, do_proc_work);
503 binder_inner_proc_unlock(thread->proc);
504
505 return has_work;
506 }
507
binder_available_for_proc_work_ilocked(struct binder_thread * thread)508 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
509 {
510 return !thread->transaction_stack &&
511 binder_worklist_empty_ilocked(&thread->todo) &&
512 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
513 BINDER_LOOPER_STATE_REGISTERED));
514 }
515
binder_wakeup_poll_threads_ilocked(struct binder_proc * proc,bool sync)516 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
517 bool sync)
518 {
519 struct rb_node *n;
520 struct binder_thread *thread;
521
522 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
523 thread = rb_entry(n, struct binder_thread, rb_node);
524 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
525 binder_available_for_proc_work_ilocked(thread)) {
526 if (sync)
527 wake_up_interruptible_sync(&thread->wait);
528 else
529 wake_up_interruptible(&thread->wait);
530 }
531 }
532 }
533
534 /**
535 * binder_select_thread_ilocked() - selects a thread for doing proc work.
536 * @proc: process to select a thread from
537 *
538 * Note that calling this function moves the thread off the waiting_threads
539 * list, so it can only be woken up by the caller of this function, or a
540 * signal. Therefore, callers *should* always wake up the thread this function
541 * returns.
542 *
543 * Return: If there's a thread currently waiting for process work,
544 * returns that thread. Otherwise returns NULL.
545 */
546 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc * proc)547 binder_select_thread_ilocked(struct binder_proc *proc)
548 {
549 struct binder_thread *thread;
550
551 assert_spin_locked(&proc->inner_lock);
552 thread = list_first_entry_or_null(&proc->waiting_threads,
553 struct binder_thread,
554 waiting_thread_node);
555
556 if (thread)
557 list_del_init(&thread->waiting_thread_node);
558
559 return thread;
560 }
561
562 /**
563 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
564 * @proc: process to wake up a thread in
565 * @thread: specific thread to wake-up (may be NULL)
566 * @sync: whether to do a synchronous wake-up
567 *
568 * This function wakes up a thread in the @proc process.
569 * The caller may provide a specific thread to wake-up in
570 * the @thread parameter. If @thread is NULL, this function
571 * will wake up threads that have called poll().
572 *
573 * Note that for this function to work as expected, callers
574 * should first call binder_select_thread() to find a thread
575 * to handle the work (if they don't have a thread already),
576 * and pass the result into the @thread parameter.
577 */
binder_wakeup_thread_ilocked(struct binder_proc * proc,struct binder_thread * thread,bool sync)578 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
579 struct binder_thread *thread,
580 bool sync)
581 {
582 assert_spin_locked(&proc->inner_lock);
583
584 if (thread) {
585 if (sync)
586 wake_up_interruptible_sync(&thread->wait);
587 else
588 wake_up_interruptible(&thread->wait);
589 return;
590 }
591
592 /* Didn't find a thread waiting for proc work; this can happen
593 * in two scenarios:
594 * 1. All threads are busy handling transactions
595 * In that case, one of those threads should call back into
596 * the kernel driver soon and pick up this work.
597 * 2. Threads are using the (e)poll interface, in which case
598 * they may be blocked on the waitqueue without having been
599 * added to waiting_threads. For this case, we just iterate
600 * over all threads not handling transaction work, and
601 * wake them all up. We wake all because we don't know whether
602 * a thread that called into (e)poll is handling non-binder
603 * work currently.
604 */
605 binder_wakeup_poll_threads_ilocked(proc, sync);
606 }
607
binder_wakeup_proc_ilocked(struct binder_proc * proc)608 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
609 {
610 struct binder_thread *thread = binder_select_thread_ilocked(proc);
611
612 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
613 }
614
binder_set_nice(long nice)615 static void binder_set_nice(long nice)
616 {
617 long min_nice;
618
619 if (can_nice(current, nice)) {
620 set_user_nice(current, nice);
621 return;
622 }
623 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625 "%d: nice value %ld not allowed use %ld instead\n",
626 current->pid, nice, min_nice);
627 set_user_nice(current, min_nice);
628 if (min_nice <= MAX_NICE)
629 return;
630 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
631 }
632
binder_get_node_ilocked(struct binder_proc * proc,binder_uintptr_t ptr)633 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
634 binder_uintptr_t ptr)
635 {
636 struct rb_node *n = proc->nodes.rb_node;
637 struct binder_node *node;
638
639 assert_spin_locked(&proc->inner_lock);
640
641 while (n) {
642 node = rb_entry(n, struct binder_node, rb_node);
643
644 if (ptr < node->ptr)
645 n = n->rb_left;
646 else if (ptr > node->ptr)
647 n = n->rb_right;
648 else {
649 /*
650 * take an implicit weak reference
651 * to ensure node stays alive until
652 * call to binder_put_node()
653 */
654 binder_inc_node_tmpref_ilocked(node);
655 return node;
656 }
657 }
658 return NULL;
659 }
660
binder_get_node(struct binder_proc * proc,binder_uintptr_t ptr)661 static struct binder_node *binder_get_node(struct binder_proc *proc,
662 binder_uintptr_t ptr)
663 {
664 struct binder_node *node;
665
666 binder_inner_proc_lock(proc);
667 node = binder_get_node_ilocked(proc, ptr);
668 binder_inner_proc_unlock(proc);
669 return node;
670 }
671
binder_init_node_ilocked(struct binder_proc * proc,struct binder_node * new_node,struct flat_binder_object * fp)672 static struct binder_node *binder_init_node_ilocked(
673 struct binder_proc *proc,
674 struct binder_node *new_node,
675 struct flat_binder_object *fp)
676 {
677 struct rb_node **p = &proc->nodes.rb_node;
678 struct rb_node *parent = NULL;
679 struct binder_node *node;
680 binder_uintptr_t ptr = fp ? fp->binder : 0;
681 binder_uintptr_t cookie = fp ? fp->cookie : 0;
682 __u32 flags = fp ? fp->flags : 0;
683
684 assert_spin_locked(&proc->inner_lock);
685
686 while (*p) {
687
688 parent = *p;
689 node = rb_entry(parent, struct binder_node, rb_node);
690
691 if (ptr < node->ptr)
692 p = &(*p)->rb_left;
693 else if (ptr > node->ptr)
694 p = &(*p)->rb_right;
695 else {
696 /*
697 * A matching node is already in
698 * the rb tree. Abandon the init
699 * and return it.
700 */
701 binder_inc_node_tmpref_ilocked(node);
702 return node;
703 }
704 }
705 node = new_node;
706 binder_stats_created(BINDER_STAT_NODE);
707 node->tmp_refs++;
708 rb_link_node(&node->rb_node, parent, p);
709 rb_insert_color(&node->rb_node, &proc->nodes);
710 node->debug_id = atomic_inc_return(&binder_last_id);
711 node->proc = proc;
712 node->ptr = ptr;
713 node->cookie = cookie;
714 node->work.type = BINDER_WORK_NODE;
715 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
716 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718 spin_lock_init(&node->lock);
719 INIT_LIST_HEAD(&node->work.entry);
720 INIT_LIST_HEAD(&node->async_todo);
721 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722 "%d:%d node %d u%016llx c%016llx created\n",
723 proc->pid, current->pid, node->debug_id,
724 (u64)node->ptr, (u64)node->cookie);
725
726 return node;
727 }
728
binder_new_node(struct binder_proc * proc,struct flat_binder_object * fp)729 static struct binder_node *binder_new_node(struct binder_proc *proc,
730 struct flat_binder_object *fp)
731 {
732 struct binder_node *node;
733 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
734
735 if (!new_node)
736 return NULL;
737 binder_inner_proc_lock(proc);
738 node = binder_init_node_ilocked(proc, new_node, fp);
739 binder_inner_proc_unlock(proc);
740 if (node != new_node)
741 /*
742 * The node was already added by another thread
743 */
744 kfree(new_node);
745
746 return node;
747 }
748
binder_free_node(struct binder_node * node)749 static void binder_free_node(struct binder_node *node)
750 {
751 kfree(node);
752 binder_stats_deleted(BINDER_STAT_NODE);
753 }
754
binder_inc_node_nilocked(struct binder_node * node,int strong,int internal,struct list_head * target_list)755 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
756 int internal,
757 struct list_head *target_list)
758 {
759 struct binder_proc *proc = node->proc;
760
761 assert_spin_locked(&node->lock);
762 if (proc)
763 assert_spin_locked(&proc->inner_lock);
764 if (strong) {
765 if (internal) {
766 if (target_list == NULL &&
767 node->internal_strong_refs == 0 &&
768 !(node->proc &&
769 node == node->proc->context->binder_context_mgr_node &&
770 node->has_strong_ref)) {
771 pr_err("invalid inc strong node for %d\n",
772 node->debug_id);
773 return -EINVAL;
774 }
775 node->internal_strong_refs++;
776 } else
777 node->local_strong_refs++;
778 if (!node->has_strong_ref && target_list) {
779 struct binder_thread *thread = container_of(target_list,
780 struct binder_thread, todo);
781 binder_dequeue_work_ilocked(&node->work);
782 BUG_ON(&thread->todo != target_list);
783 binder_enqueue_deferred_thread_work_ilocked(thread,
784 &node->work);
785 }
786 } else {
787 if (!internal)
788 node->local_weak_refs++;
789 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
790 if (target_list == NULL) {
791 pr_err("invalid inc weak node for %d\n",
792 node->debug_id);
793 return -EINVAL;
794 }
795 /*
796 * See comment above
797 */
798 binder_enqueue_work_ilocked(&node->work, target_list);
799 }
800 }
801 return 0;
802 }
803
binder_inc_node(struct binder_node * node,int strong,int internal,struct list_head * target_list)804 static int binder_inc_node(struct binder_node *node, int strong, int internal,
805 struct list_head *target_list)
806 {
807 int ret;
808
809 binder_node_inner_lock(node);
810 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
811 binder_node_inner_unlock(node);
812
813 return ret;
814 }
815
binder_dec_node_nilocked(struct binder_node * node,int strong,int internal)816 static bool binder_dec_node_nilocked(struct binder_node *node,
817 int strong, int internal)
818 {
819 struct binder_proc *proc = node->proc;
820
821 assert_spin_locked(&node->lock);
822 if (proc)
823 assert_spin_locked(&proc->inner_lock);
824 if (strong) {
825 if (internal)
826 node->internal_strong_refs--;
827 else
828 node->local_strong_refs--;
829 if (node->local_strong_refs || node->internal_strong_refs)
830 return false;
831 } else {
832 if (!internal)
833 node->local_weak_refs--;
834 if (node->local_weak_refs || node->tmp_refs ||
835 !hlist_empty(&node->refs))
836 return false;
837 }
838
839 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840 if (list_empty(&node->work.entry)) {
841 binder_enqueue_work_ilocked(&node->work, &proc->todo);
842 binder_wakeup_proc_ilocked(proc);
843 }
844 } else {
845 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846 !node->local_weak_refs && !node->tmp_refs) {
847 if (proc) {
848 binder_dequeue_work_ilocked(&node->work);
849 rb_erase(&node->rb_node, &proc->nodes);
850 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851 "refless node %d deleted\n",
852 node->debug_id);
853 } else {
854 BUG_ON(!list_empty(&node->work.entry));
855 spin_lock(&binder_dead_nodes_lock);
856 /*
857 * tmp_refs could have changed so
858 * check it again
859 */
860 if (node->tmp_refs) {
861 spin_unlock(&binder_dead_nodes_lock);
862 return false;
863 }
864 hlist_del(&node->dead_node);
865 spin_unlock(&binder_dead_nodes_lock);
866 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867 "dead node %d deleted\n",
868 node->debug_id);
869 }
870 return true;
871 }
872 }
873 return false;
874 }
875
binder_dec_node(struct binder_node * node,int strong,int internal)876 static void binder_dec_node(struct binder_node *node, int strong, int internal)
877 {
878 bool free_node;
879
880 binder_node_inner_lock(node);
881 free_node = binder_dec_node_nilocked(node, strong, internal);
882 binder_node_inner_unlock(node);
883 if (free_node)
884 binder_free_node(node);
885 }
886
binder_inc_node_tmpref_ilocked(struct binder_node * node)887 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
888 {
889 /*
890 * No call to binder_inc_node() is needed since we
891 * don't need to inform userspace of any changes to
892 * tmp_refs
893 */
894 node->tmp_refs++;
895 }
896
897 /**
898 * binder_inc_node_tmpref() - take a temporary reference on node
899 * @node: node to reference
900 *
901 * Take reference on node to prevent the node from being freed
902 * while referenced only by a local variable. The inner lock is
903 * needed to serialize with the node work on the queue (which
904 * isn't needed after the node is dead). If the node is dead
905 * (node->proc is NULL), use binder_dead_nodes_lock to protect
906 * node->tmp_refs against dead-node-only cases where the node
907 * lock cannot be acquired (eg traversing the dead node list to
908 * print nodes)
909 */
binder_inc_node_tmpref(struct binder_node * node)910 static void binder_inc_node_tmpref(struct binder_node *node)
911 {
912 binder_node_lock(node);
913 if (node->proc)
914 binder_inner_proc_lock(node->proc);
915 else
916 spin_lock(&binder_dead_nodes_lock);
917 binder_inc_node_tmpref_ilocked(node);
918 if (node->proc)
919 binder_inner_proc_unlock(node->proc);
920 else
921 spin_unlock(&binder_dead_nodes_lock);
922 binder_node_unlock(node);
923 }
924
925 /**
926 * binder_dec_node_tmpref() - remove a temporary reference on node
927 * @node: node to reference
928 *
929 * Release temporary reference on node taken via binder_inc_node_tmpref()
930 */
binder_dec_node_tmpref(struct binder_node * node)931 static void binder_dec_node_tmpref(struct binder_node *node)
932 {
933 bool free_node;
934
935 binder_node_inner_lock(node);
936 if (!node->proc)
937 spin_lock(&binder_dead_nodes_lock);
938 else
939 __acquire(&binder_dead_nodes_lock);
940 node->tmp_refs--;
941 BUG_ON(node->tmp_refs < 0);
942 if (!node->proc)
943 spin_unlock(&binder_dead_nodes_lock);
944 else
945 __release(&binder_dead_nodes_lock);
946 /*
947 * Call binder_dec_node() to check if all refcounts are 0
948 * and cleanup is needed. Calling with strong=0 and internal=1
949 * causes no actual reference to be released in binder_dec_node().
950 * If that changes, a change is needed here too.
951 */
952 free_node = binder_dec_node_nilocked(node, 0, 1);
953 binder_node_inner_unlock(node);
954 if (free_node)
955 binder_free_node(node);
956 }
957
binder_put_node(struct binder_node * node)958 static void binder_put_node(struct binder_node *node)
959 {
960 binder_dec_node_tmpref(node);
961 }
962
binder_get_ref_olocked(struct binder_proc * proc,u32 desc,bool need_strong_ref)963 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
964 u32 desc, bool need_strong_ref)
965 {
966 struct rb_node *n = proc->refs_by_desc.rb_node;
967 struct binder_ref *ref;
968
969 while (n) {
970 ref = rb_entry(n, struct binder_ref, rb_node_desc);
971
972 if (desc < ref->data.desc) {
973 n = n->rb_left;
974 } else if (desc > ref->data.desc) {
975 n = n->rb_right;
976 } else if (need_strong_ref && !ref->data.strong) {
977 binder_user_error("tried to use weak ref as strong ref\n");
978 return NULL;
979 } else {
980 return ref;
981 }
982 }
983 return NULL;
984 }
985
986 /**
987 * binder_get_ref_for_node_olocked() - get the ref associated with given node
988 * @proc: binder_proc that owns the ref
989 * @node: binder_node of target
990 * @new_ref: newly allocated binder_ref to be initialized or %NULL
991 *
992 * Look up the ref for the given node and return it if it exists
993 *
994 * If it doesn't exist and the caller provides a newly allocated
995 * ref, initialize the fields of the newly allocated ref and insert
996 * into the given proc rb_trees and node refs list.
997 *
998 * Return: the ref for node. It is possible that another thread
999 * allocated/initialized the ref first in which case the
1000 * returned ref would be different than the passed-in
1001 * new_ref. new_ref must be kfree'd by the caller in
1002 * this case.
1003 */
binder_get_ref_for_node_olocked(struct binder_proc * proc,struct binder_node * node,struct binder_ref * new_ref)1004 static struct binder_ref *binder_get_ref_for_node_olocked(
1005 struct binder_proc *proc,
1006 struct binder_node *node,
1007 struct binder_ref *new_ref)
1008 {
1009 struct binder_context *context = proc->context;
1010 struct rb_node **p = &proc->refs_by_node.rb_node;
1011 struct rb_node *parent = NULL;
1012 struct binder_ref *ref;
1013 struct rb_node *n;
1014
1015 while (*p) {
1016 parent = *p;
1017 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1018
1019 if (node < ref->node)
1020 p = &(*p)->rb_left;
1021 else if (node > ref->node)
1022 p = &(*p)->rb_right;
1023 else
1024 return ref;
1025 }
1026 if (!new_ref)
1027 return NULL;
1028
1029 binder_stats_created(BINDER_STAT_REF);
1030 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031 new_ref->proc = proc;
1032 new_ref->node = node;
1033 rb_link_node(&new_ref->rb_node_node, parent, p);
1034 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1035
1036 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039 if (ref->data.desc > new_ref->data.desc)
1040 break;
1041 new_ref->data.desc = ref->data.desc + 1;
1042 }
1043
1044 p = &proc->refs_by_desc.rb_node;
1045 while (*p) {
1046 parent = *p;
1047 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1048
1049 if (new_ref->data.desc < ref->data.desc)
1050 p = &(*p)->rb_left;
1051 else if (new_ref->data.desc > ref->data.desc)
1052 p = &(*p)->rb_right;
1053 else
1054 BUG();
1055 }
1056 rb_link_node(&new_ref->rb_node_desc, parent, p);
1057 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058
1059 binder_node_lock(node);
1060 hlist_add_head(&new_ref->node_entry, &node->refs);
1061
1062 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063 "%d new ref %d desc %d for node %d\n",
1064 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065 node->debug_id);
1066 binder_node_unlock(node);
1067 return new_ref;
1068 }
1069
binder_cleanup_ref_olocked(struct binder_ref * ref)1070 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071 {
1072 bool delete_node = false;
1073
1074 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075 "%d delete ref %d desc %d for node %d\n",
1076 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077 ref->node->debug_id);
1078
1079 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081
1082 binder_node_inner_lock(ref->node);
1083 if (ref->data.strong)
1084 binder_dec_node_nilocked(ref->node, 1, 1);
1085
1086 hlist_del(&ref->node_entry);
1087 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088 binder_node_inner_unlock(ref->node);
1089 /*
1090 * Clear ref->node unless we want the caller to free the node
1091 */
1092 if (!delete_node) {
1093 /*
1094 * The caller uses ref->node to determine
1095 * whether the node needs to be freed. Clear
1096 * it since the node is still alive.
1097 */
1098 ref->node = NULL;
1099 }
1100
1101 if (ref->death) {
1102 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103 "%d delete ref %d desc %d has death notification\n",
1104 ref->proc->pid, ref->data.debug_id,
1105 ref->data.desc);
1106 binder_dequeue_work(ref->proc, &ref->death->work);
1107 binder_stats_deleted(BINDER_STAT_DEATH);
1108 }
1109 binder_stats_deleted(BINDER_STAT_REF);
1110 }
1111
1112 /**
1113 * binder_inc_ref_olocked() - increment the ref for given handle
1114 * @ref: ref to be incremented
1115 * @strong: if true, strong increment, else weak
1116 * @target_list: list to queue node work on
1117 *
1118 * Increment the ref. @ref->proc->outer_lock must be held on entry
1119 *
1120 * Return: 0, if successful, else errno
1121 */
binder_inc_ref_olocked(struct binder_ref * ref,int strong,struct list_head * target_list)1122 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123 struct list_head *target_list)
1124 {
1125 int ret;
1126
1127 if (strong) {
1128 if (ref->data.strong == 0) {
1129 ret = binder_inc_node(ref->node, 1, 1, target_list);
1130 if (ret)
1131 return ret;
1132 }
1133 ref->data.strong++;
1134 } else {
1135 if (ref->data.weak == 0) {
1136 ret = binder_inc_node(ref->node, 0, 1, target_list);
1137 if (ret)
1138 return ret;
1139 }
1140 ref->data.weak++;
1141 }
1142 return 0;
1143 }
1144
1145 /**
1146 * binder_dec_ref() - dec the ref for given handle
1147 * @ref: ref to be decremented
1148 * @strong: if true, strong decrement, else weak
1149 *
1150 * Decrement the ref.
1151 *
1152 * Return: true if ref is cleaned up and ready to be freed
1153 */
binder_dec_ref_olocked(struct binder_ref * ref,int strong)1154 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155 {
1156 if (strong) {
1157 if (ref->data.strong == 0) {
1158 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159 ref->proc->pid, ref->data.debug_id,
1160 ref->data.desc, ref->data.strong,
1161 ref->data.weak);
1162 return false;
1163 }
1164 ref->data.strong--;
1165 if (ref->data.strong == 0)
1166 binder_dec_node(ref->node, strong, 1);
1167 } else {
1168 if (ref->data.weak == 0) {
1169 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170 ref->proc->pid, ref->data.debug_id,
1171 ref->data.desc, ref->data.strong,
1172 ref->data.weak);
1173 return false;
1174 }
1175 ref->data.weak--;
1176 }
1177 if (ref->data.strong == 0 && ref->data.weak == 0) {
1178 binder_cleanup_ref_olocked(ref);
1179 return true;
1180 }
1181 return false;
1182 }
1183
1184 /**
1185 * binder_get_node_from_ref() - get the node from the given proc/desc
1186 * @proc: proc containing the ref
1187 * @desc: the handle associated with the ref
1188 * @need_strong_ref: if true, only return node if ref is strong
1189 * @rdata: the id/refcount data for the ref
1190 *
1191 * Given a proc and ref handle, return the associated binder_node
1192 *
1193 * Return: a binder_node or NULL if not found or not strong when strong required
1194 */
binder_get_node_from_ref(struct binder_proc * proc,u32 desc,bool need_strong_ref,struct binder_ref_data * rdata)1195 static struct binder_node *binder_get_node_from_ref(
1196 struct binder_proc *proc,
1197 u32 desc, bool need_strong_ref,
1198 struct binder_ref_data *rdata)
1199 {
1200 struct binder_node *node;
1201 struct binder_ref *ref;
1202
1203 binder_proc_lock(proc);
1204 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205 if (!ref)
1206 goto err_no_ref;
1207 node = ref->node;
1208 /*
1209 * Take an implicit reference on the node to ensure
1210 * it stays alive until the call to binder_put_node()
1211 */
1212 binder_inc_node_tmpref(node);
1213 if (rdata)
1214 *rdata = ref->data;
1215 binder_proc_unlock(proc);
1216
1217 return node;
1218
1219 err_no_ref:
1220 binder_proc_unlock(proc);
1221 return NULL;
1222 }
1223
1224 /**
1225 * binder_free_ref() - free the binder_ref
1226 * @ref: ref to free
1227 *
1228 * Free the binder_ref. Free the binder_node indicated by ref->node
1229 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1230 */
binder_free_ref(struct binder_ref * ref)1231 static void binder_free_ref(struct binder_ref *ref)
1232 {
1233 if (ref->node)
1234 binder_free_node(ref->node);
1235 kfree(ref->death);
1236 kfree(ref);
1237 }
1238
1239 /**
1240 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241 * @proc: proc containing the ref
1242 * @desc: the handle associated with the ref
1243 * @increment: true=inc reference, false=dec reference
1244 * @strong: true=strong reference, false=weak reference
1245 * @rdata: the id/refcount data for the ref
1246 *
1247 * Given a proc and ref handle, increment or decrement the ref
1248 * according to "increment" arg.
1249 *
1250 * Return: 0 if successful, else errno
1251 */
binder_update_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool increment,bool strong,struct binder_ref_data * rdata)1252 static int binder_update_ref_for_handle(struct binder_proc *proc,
1253 uint32_t desc, bool increment, bool strong,
1254 struct binder_ref_data *rdata)
1255 {
1256 int ret = 0;
1257 struct binder_ref *ref;
1258 bool delete_ref = false;
1259
1260 binder_proc_lock(proc);
1261 ref = binder_get_ref_olocked(proc, desc, strong);
1262 if (!ref) {
1263 ret = -EINVAL;
1264 goto err_no_ref;
1265 }
1266 if (increment)
1267 ret = binder_inc_ref_olocked(ref, strong, NULL);
1268 else
1269 delete_ref = binder_dec_ref_olocked(ref, strong);
1270
1271 if (rdata)
1272 *rdata = ref->data;
1273 binder_proc_unlock(proc);
1274
1275 if (delete_ref)
1276 binder_free_ref(ref);
1277 return ret;
1278
1279 err_no_ref:
1280 binder_proc_unlock(proc);
1281 return ret;
1282 }
1283
1284 /**
1285 * binder_dec_ref_for_handle() - dec the ref for given handle
1286 * @proc: proc containing the ref
1287 * @desc: the handle associated with the ref
1288 * @strong: true=strong reference, false=weak reference
1289 * @rdata: the id/refcount data for the ref
1290 *
1291 * Just calls binder_update_ref_for_handle() to decrement the ref.
1292 *
1293 * Return: 0 if successful, else errno
1294 */
binder_dec_ref_for_handle(struct binder_proc * proc,uint32_t desc,bool strong,struct binder_ref_data * rdata)1295 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1297 {
1298 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1299 }
1300
1301
1302 /**
1303 * binder_inc_ref_for_node() - increment the ref for given proc/node
1304 * @proc: proc containing the ref
1305 * @node: target node
1306 * @strong: true=strong reference, false=weak reference
1307 * @target_list: worklist to use if node is incremented
1308 * @rdata: the id/refcount data for the ref
1309 *
1310 * Given a proc and node, increment the ref. Create the ref if it
1311 * doesn't already exist
1312 *
1313 * Return: 0 if successful, else errno
1314 */
binder_inc_ref_for_node(struct binder_proc * proc,struct binder_node * node,bool strong,struct list_head * target_list,struct binder_ref_data * rdata)1315 static int binder_inc_ref_for_node(struct binder_proc *proc,
1316 struct binder_node *node,
1317 bool strong,
1318 struct list_head *target_list,
1319 struct binder_ref_data *rdata)
1320 {
1321 struct binder_ref *ref;
1322 struct binder_ref *new_ref = NULL;
1323 int ret = 0;
1324
1325 binder_proc_lock(proc);
1326 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327 if (!ref) {
1328 binder_proc_unlock(proc);
1329 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1330 if (!new_ref)
1331 return -ENOMEM;
1332 binder_proc_lock(proc);
1333 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334 }
1335 ret = binder_inc_ref_olocked(ref, strong, target_list);
1336 *rdata = ref->data;
1337 binder_proc_unlock(proc);
1338 if (new_ref && ref != new_ref)
1339 /*
1340 * Another thread created the ref first so
1341 * free the one we allocated
1342 */
1343 kfree(new_ref);
1344 return ret;
1345 }
1346
binder_pop_transaction_ilocked(struct binder_thread * target_thread,struct binder_transaction * t)1347 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348 struct binder_transaction *t)
1349 {
1350 BUG_ON(!target_thread);
1351 assert_spin_locked(&target_thread->proc->inner_lock);
1352 BUG_ON(target_thread->transaction_stack != t);
1353 BUG_ON(target_thread->transaction_stack->from != target_thread);
1354 target_thread->transaction_stack =
1355 target_thread->transaction_stack->from_parent;
1356 t->from = NULL;
1357 }
1358
1359 /**
1360 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361 * @thread: thread to decrement
1362 *
1363 * A thread needs to be kept alive while being used to create or
1364 * handle a transaction. binder_get_txn_from() is used to safely
1365 * extract t->from from a binder_transaction and keep the thread
1366 * indicated by t->from from being freed. When done with that
1367 * binder_thread, this function is called to decrement the
1368 * tmp_ref and free if appropriate (thread has been released
1369 * and no transaction being processed by the driver)
1370 */
binder_thread_dec_tmpref(struct binder_thread * thread)1371 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1372 {
1373 /*
1374 * atomic is used to protect the counter value while
1375 * it cannot reach zero or thread->is_dead is false
1376 */
1377 binder_inner_proc_lock(thread->proc);
1378 atomic_dec(&thread->tmp_ref);
1379 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380 binder_inner_proc_unlock(thread->proc);
1381 binder_free_thread(thread);
1382 return;
1383 }
1384 binder_inner_proc_unlock(thread->proc);
1385 }
1386
1387 /**
1388 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389 * @proc: proc to decrement
1390 *
1391 * A binder_proc needs to be kept alive while being used to create or
1392 * handle a transaction. proc->tmp_ref is incremented when
1393 * creating a new transaction or the binder_proc is currently in-use
1394 * by threads that are being released. When done with the binder_proc,
1395 * this function is called to decrement the counter and free the
1396 * proc if appropriate (proc has been released, all threads have
1397 * been released and not currenly in-use to process a transaction).
1398 */
binder_proc_dec_tmpref(struct binder_proc * proc)1399 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1400 {
1401 binder_inner_proc_lock(proc);
1402 proc->tmp_ref--;
1403 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1404 !proc->tmp_ref) {
1405 binder_inner_proc_unlock(proc);
1406 binder_free_proc(proc);
1407 return;
1408 }
1409 binder_inner_proc_unlock(proc);
1410 }
1411
1412 /**
1413 * binder_get_txn_from() - safely extract the "from" thread in transaction
1414 * @t: binder transaction for t->from
1415 *
1416 * Atomically return the "from" thread and increment the tmp_ref
1417 * count for the thread to ensure it stays alive until
1418 * binder_thread_dec_tmpref() is called.
1419 *
1420 * Return: the value of t->from
1421 */
binder_get_txn_from(struct binder_transaction * t)1422 static struct binder_thread *binder_get_txn_from(
1423 struct binder_transaction *t)
1424 {
1425 struct binder_thread *from;
1426
1427 spin_lock(&t->lock);
1428 from = t->from;
1429 if (from)
1430 atomic_inc(&from->tmp_ref);
1431 spin_unlock(&t->lock);
1432 return from;
1433 }
1434
1435 /**
1436 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437 * @t: binder transaction for t->from
1438 *
1439 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440 * to guarantee that the thread cannot be released while operating on it.
1441 * The caller must call binder_inner_proc_unlock() to release the inner lock
1442 * as well as call binder_dec_thread_txn() to release the reference.
1443 *
1444 * Return: the value of t->from
1445 */
binder_get_txn_from_and_acq_inner(struct binder_transaction * t)1446 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447 struct binder_transaction *t)
1448 __acquires(&t->from->proc->inner_lock)
1449 {
1450 struct binder_thread *from;
1451
1452 from = binder_get_txn_from(t);
1453 if (!from) {
1454 __acquire(&from->proc->inner_lock);
1455 return NULL;
1456 }
1457 binder_inner_proc_lock(from->proc);
1458 if (t->from) {
1459 BUG_ON(from != t->from);
1460 return from;
1461 }
1462 binder_inner_proc_unlock(from->proc);
1463 __acquire(&from->proc->inner_lock);
1464 binder_thread_dec_tmpref(from);
1465 return NULL;
1466 }
1467
1468 /**
1469 * binder_free_txn_fixups() - free unprocessed fd fixups
1470 * @t: binder transaction for t->from
1471 *
1472 * If the transaction is being torn down prior to being
1473 * processed by the target process, free all of the
1474 * fd fixups and fput the file structs. It is safe to
1475 * call this function after the fixups have been
1476 * processed -- in that case, the list will be empty.
1477 */
binder_free_txn_fixups(struct binder_transaction * t)1478 static void binder_free_txn_fixups(struct binder_transaction *t)
1479 {
1480 struct binder_txn_fd_fixup *fixup, *tmp;
1481
1482 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1483 fput(fixup->file);
1484 list_del(&fixup->fixup_entry);
1485 kfree(fixup);
1486 }
1487 }
1488
binder_txn_latency_free(struct binder_transaction * t)1489 static void binder_txn_latency_free(struct binder_transaction *t)
1490 {
1491 int from_proc, from_thread, to_proc, to_thread;
1492
1493 spin_lock(&t->lock);
1494 from_proc = t->from ? t->from->proc->pid : 0;
1495 from_thread = t->from ? t->from->pid : 0;
1496 to_proc = t->to_proc ? t->to_proc->pid : 0;
1497 to_thread = t->to_thread ? t->to_thread->pid : 0;
1498 spin_unlock(&t->lock);
1499
1500 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1501 }
1502
binder_free_transaction(struct binder_transaction * t)1503 static void binder_free_transaction(struct binder_transaction *t)
1504 {
1505 struct binder_proc *target_proc = t->to_proc;
1506
1507 if (target_proc) {
1508 binder_inner_proc_lock(target_proc);
1509 target_proc->outstanding_txns--;
1510 if (target_proc->outstanding_txns < 0)
1511 pr_warn("%s: Unexpected outstanding_txns %d\n",
1512 __func__, target_proc->outstanding_txns);
1513 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1514 wake_up_interruptible_all(&target_proc->freeze_wait);
1515 if (t->buffer)
1516 t->buffer->transaction = NULL;
1517 binder_inner_proc_unlock(target_proc);
1518 }
1519 if (trace_binder_txn_latency_free_enabled())
1520 binder_txn_latency_free(t);
1521 /*
1522 * If the transaction has no target_proc, then
1523 * t->buffer->transaction has already been cleared.
1524 */
1525 binder_free_txn_fixups(t);
1526 kfree(t);
1527 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1528 }
1529
binder_send_failed_reply(struct binder_transaction * t,uint32_t error_code)1530 static void binder_send_failed_reply(struct binder_transaction *t,
1531 uint32_t error_code)
1532 {
1533 struct binder_thread *target_thread;
1534 struct binder_transaction *next;
1535
1536 BUG_ON(t->flags & TF_ONE_WAY);
1537 while (1) {
1538 target_thread = binder_get_txn_from_and_acq_inner(t);
1539 if (target_thread) {
1540 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1541 "send failed reply for transaction %d to %d:%d\n",
1542 t->debug_id,
1543 target_thread->proc->pid,
1544 target_thread->pid);
1545
1546 binder_pop_transaction_ilocked(target_thread, t);
1547 if (target_thread->reply_error.cmd == BR_OK) {
1548 target_thread->reply_error.cmd = error_code;
1549 binder_enqueue_thread_work_ilocked(
1550 target_thread,
1551 &target_thread->reply_error.work);
1552 wake_up_interruptible(&target_thread->wait);
1553 } else {
1554 /*
1555 * Cannot get here for normal operation, but
1556 * we can if multiple synchronous transactions
1557 * are sent without blocking for responses.
1558 * Just ignore the 2nd error in this case.
1559 */
1560 pr_warn("Unexpected reply error: %u\n",
1561 target_thread->reply_error.cmd);
1562 }
1563 binder_inner_proc_unlock(target_thread->proc);
1564 binder_thread_dec_tmpref(target_thread);
1565 binder_free_transaction(t);
1566 return;
1567 }
1568 __release(&target_thread->proc->inner_lock);
1569 next = t->from_parent;
1570
1571 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1572 "send failed reply for transaction %d, target dead\n",
1573 t->debug_id);
1574
1575 binder_free_transaction(t);
1576 if (next == NULL) {
1577 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578 "reply failed, no target thread at root\n");
1579 return;
1580 }
1581 t = next;
1582 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1583 "reply failed, no target thread -- retry %d\n",
1584 t->debug_id);
1585 }
1586 }
1587
1588 /**
1589 * binder_cleanup_transaction() - cleans up undelivered transaction
1590 * @t: transaction that needs to be cleaned up
1591 * @reason: reason the transaction wasn't delivered
1592 * @error_code: error to return to caller (if synchronous call)
1593 */
binder_cleanup_transaction(struct binder_transaction * t,const char * reason,uint32_t error_code)1594 static void binder_cleanup_transaction(struct binder_transaction *t,
1595 const char *reason,
1596 uint32_t error_code)
1597 {
1598 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1599 binder_send_failed_reply(t, error_code);
1600 } else {
1601 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1602 "undelivered transaction %d, %s\n",
1603 t->debug_id, reason);
1604 binder_free_transaction(t);
1605 }
1606 }
1607
1608 /**
1609 * binder_get_object() - gets object and checks for valid metadata
1610 * @proc: binder_proc owning the buffer
1611 * @buffer: binder_buffer that we're parsing.
1612 * @offset: offset in the @buffer at which to validate an object.
1613 * @object: struct binder_object to read into
1614 *
1615 * Return: If there's a valid metadata object at @offset in @buffer, the
1616 * size of that object. Otherwise, it returns zero. The object
1617 * is read into the struct binder_object pointed to by @object.
1618 */
binder_get_object(struct binder_proc * proc,struct binder_buffer * buffer,unsigned long offset,struct binder_object * object)1619 static size_t binder_get_object(struct binder_proc *proc,
1620 struct binder_buffer *buffer,
1621 unsigned long offset,
1622 struct binder_object *object)
1623 {
1624 size_t read_size;
1625 struct binder_object_header *hdr;
1626 size_t object_size = 0;
1627
1628 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1629 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1630 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1631 offset, read_size))
1632 return 0;
1633
1634 /* Ok, now see if we read a complete object. */
1635 hdr = &object->hdr;
1636 switch (hdr->type) {
1637 case BINDER_TYPE_BINDER:
1638 case BINDER_TYPE_WEAK_BINDER:
1639 case BINDER_TYPE_HANDLE:
1640 case BINDER_TYPE_WEAK_HANDLE:
1641 object_size = sizeof(struct flat_binder_object);
1642 break;
1643 case BINDER_TYPE_FD:
1644 object_size = sizeof(struct binder_fd_object);
1645 break;
1646 case BINDER_TYPE_PTR:
1647 object_size = sizeof(struct binder_buffer_object);
1648 break;
1649 case BINDER_TYPE_FDA:
1650 object_size = sizeof(struct binder_fd_array_object);
1651 break;
1652 default:
1653 return 0;
1654 }
1655 if (offset <= buffer->data_size - object_size &&
1656 buffer->data_size >= object_size)
1657 return object_size;
1658 else
1659 return 0;
1660 }
1661
1662 /**
1663 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1664 * @proc: binder_proc owning the buffer
1665 * @b: binder_buffer containing the object
1666 * @object: struct binder_object to read into
1667 * @index: index in offset array at which the binder_buffer_object is
1668 * located
1669 * @start_offset: points to the start of the offset array
1670 * @object_offsetp: offset of @object read from @b
1671 * @num_valid: the number of valid offsets in the offset array
1672 *
1673 * Return: If @index is within the valid range of the offset array
1674 * described by @start and @num_valid, and if there's a valid
1675 * binder_buffer_object at the offset found in index @index
1676 * of the offset array, that object is returned. Otherwise,
1677 * %NULL is returned.
1678 * Note that the offset found in index @index itself is not
1679 * verified; this function assumes that @num_valid elements
1680 * from @start were previously verified to have valid offsets.
1681 * If @object_offsetp is non-NULL, then the offset within
1682 * @b is written to it.
1683 */
binder_validate_ptr(struct binder_proc * proc,struct binder_buffer * b,struct binder_object * object,binder_size_t index,binder_size_t start_offset,binder_size_t * object_offsetp,binder_size_t num_valid)1684 static struct binder_buffer_object *binder_validate_ptr(
1685 struct binder_proc *proc,
1686 struct binder_buffer *b,
1687 struct binder_object *object,
1688 binder_size_t index,
1689 binder_size_t start_offset,
1690 binder_size_t *object_offsetp,
1691 binder_size_t num_valid)
1692 {
1693 size_t object_size;
1694 binder_size_t object_offset;
1695 unsigned long buffer_offset;
1696
1697 if (index >= num_valid)
1698 return NULL;
1699
1700 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1701 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1702 b, buffer_offset,
1703 sizeof(object_offset)))
1704 return NULL;
1705 object_size = binder_get_object(proc, b, object_offset, object);
1706 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1707 return NULL;
1708 if (object_offsetp)
1709 *object_offsetp = object_offset;
1710
1711 return &object->bbo;
1712 }
1713
1714 /**
1715 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1716 * @proc: binder_proc owning the buffer
1717 * @b: transaction buffer
1718 * @objects_start_offset: offset to start of objects buffer
1719 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1720 * @fixup_offset: start offset in @buffer to fix up
1721 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1722 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1723 *
1724 * Return: %true if a fixup in buffer @buffer at offset @offset is
1725 * allowed.
1726 *
1727 * For safety reasons, we only allow fixups inside a buffer to happen
1728 * at increasing offsets; additionally, we only allow fixup on the last
1729 * buffer object that was verified, or one of its parents.
1730 *
1731 * Example of what is allowed:
1732 *
1733 * A
1734 * B (parent = A, offset = 0)
1735 * C (parent = A, offset = 16)
1736 * D (parent = C, offset = 0)
1737 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1738 *
1739 * Examples of what is not allowed:
1740 *
1741 * Decreasing offsets within the same parent:
1742 * A
1743 * C (parent = A, offset = 16)
1744 * B (parent = A, offset = 0) // decreasing offset within A
1745 *
1746 * Referring to a parent that wasn't the last object or any of its parents:
1747 * A
1748 * B (parent = A, offset = 0)
1749 * C (parent = A, offset = 0)
1750 * C (parent = A, offset = 16)
1751 * D (parent = B, offset = 0) // B is not A or any of A's parents
1752 */
binder_validate_fixup(struct binder_proc * proc,struct binder_buffer * b,binder_size_t objects_start_offset,binder_size_t buffer_obj_offset,binder_size_t fixup_offset,binder_size_t last_obj_offset,binder_size_t last_min_offset)1753 static bool binder_validate_fixup(struct binder_proc *proc,
1754 struct binder_buffer *b,
1755 binder_size_t objects_start_offset,
1756 binder_size_t buffer_obj_offset,
1757 binder_size_t fixup_offset,
1758 binder_size_t last_obj_offset,
1759 binder_size_t last_min_offset)
1760 {
1761 if (!last_obj_offset) {
1762 /* Nothing to fix up in */
1763 return false;
1764 }
1765
1766 while (last_obj_offset != buffer_obj_offset) {
1767 unsigned long buffer_offset;
1768 struct binder_object last_object;
1769 struct binder_buffer_object *last_bbo;
1770 size_t object_size = binder_get_object(proc, b, last_obj_offset,
1771 &last_object);
1772 if (object_size != sizeof(*last_bbo))
1773 return false;
1774
1775 last_bbo = &last_object.bbo;
1776 /*
1777 * Safe to retrieve the parent of last_obj, since it
1778 * was already previously verified by the driver.
1779 */
1780 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1781 return false;
1782 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1783 buffer_offset = objects_start_offset +
1784 sizeof(binder_size_t) * last_bbo->parent;
1785 if (binder_alloc_copy_from_buffer(&proc->alloc,
1786 &last_obj_offset,
1787 b, buffer_offset,
1788 sizeof(last_obj_offset)))
1789 return false;
1790 }
1791 return (fixup_offset >= last_min_offset);
1792 }
1793
1794 /**
1795 * struct binder_task_work_cb - for deferred close
1796 *
1797 * @twork: callback_head for task work
1798 * @fd: fd to close
1799 *
1800 * Structure to pass task work to be handled after
1801 * returning from binder_ioctl() via task_work_add().
1802 */
1803 struct binder_task_work_cb {
1804 struct callback_head twork;
1805 struct file *file;
1806 };
1807
1808 /**
1809 * binder_do_fd_close() - close list of file descriptors
1810 * @twork: callback head for task work
1811 *
1812 * It is not safe to call ksys_close() during the binder_ioctl()
1813 * function if there is a chance that binder's own file descriptor
1814 * might be closed. This is to meet the requirements for using
1815 * fdget() (see comments for __fget_light()). Therefore use
1816 * task_work_add() to schedule the close operation once we have
1817 * returned from binder_ioctl(). This function is a callback
1818 * for that mechanism and does the actual ksys_close() on the
1819 * given file descriptor.
1820 */
binder_do_fd_close(struct callback_head * twork)1821 static void binder_do_fd_close(struct callback_head *twork)
1822 {
1823 struct binder_task_work_cb *twcb = container_of(twork,
1824 struct binder_task_work_cb, twork);
1825
1826 fput(twcb->file);
1827 kfree(twcb);
1828 }
1829
1830 /**
1831 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1832 * @fd: file-descriptor to close
1833 *
1834 * See comments in binder_do_fd_close(). This function is used to schedule
1835 * a file-descriptor to be closed after returning from binder_ioctl().
1836 */
binder_deferred_fd_close(int fd)1837 static void binder_deferred_fd_close(int fd)
1838 {
1839 struct binder_task_work_cb *twcb;
1840
1841 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1842 if (!twcb)
1843 return;
1844 init_task_work(&twcb->twork, binder_do_fd_close);
1845 close_fd_get_file(fd, &twcb->file);
1846 if (twcb->file) {
1847 filp_close(twcb->file, current->files);
1848 task_work_add(current, &twcb->twork, TWA_RESUME);
1849 } else {
1850 kfree(twcb);
1851 }
1852 }
1853
binder_transaction_buffer_release(struct binder_proc * proc,struct binder_buffer * buffer,binder_size_t failed_at,bool is_failure)1854 static void binder_transaction_buffer_release(struct binder_proc *proc,
1855 struct binder_buffer *buffer,
1856 binder_size_t failed_at,
1857 bool is_failure)
1858 {
1859 int debug_id = buffer->debug_id;
1860 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1861
1862 binder_debug(BINDER_DEBUG_TRANSACTION,
1863 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1864 proc->pid, buffer->debug_id,
1865 buffer->data_size, buffer->offsets_size,
1866 (unsigned long long)failed_at);
1867
1868 if (buffer->target_node)
1869 binder_dec_node(buffer->target_node, 1, 0);
1870
1871 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1872 off_end_offset = is_failure ? failed_at :
1873 off_start_offset + buffer->offsets_size;
1874 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1875 buffer_offset += sizeof(binder_size_t)) {
1876 struct binder_object_header *hdr;
1877 size_t object_size = 0;
1878 struct binder_object object;
1879 binder_size_t object_offset;
1880
1881 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1882 buffer, buffer_offset,
1883 sizeof(object_offset)))
1884 object_size = binder_get_object(proc, buffer,
1885 object_offset, &object);
1886 if (object_size == 0) {
1887 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1888 debug_id, (u64)object_offset, buffer->data_size);
1889 continue;
1890 }
1891 hdr = &object.hdr;
1892 switch (hdr->type) {
1893 case BINDER_TYPE_BINDER:
1894 case BINDER_TYPE_WEAK_BINDER: {
1895 struct flat_binder_object *fp;
1896 struct binder_node *node;
1897
1898 fp = to_flat_binder_object(hdr);
1899 node = binder_get_node(proc, fp->binder);
1900 if (node == NULL) {
1901 pr_err("transaction release %d bad node %016llx\n",
1902 debug_id, (u64)fp->binder);
1903 break;
1904 }
1905 binder_debug(BINDER_DEBUG_TRANSACTION,
1906 " node %d u%016llx\n",
1907 node->debug_id, (u64)node->ptr);
1908 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1909 0);
1910 binder_put_node(node);
1911 } break;
1912 case BINDER_TYPE_HANDLE:
1913 case BINDER_TYPE_WEAK_HANDLE: {
1914 struct flat_binder_object *fp;
1915 struct binder_ref_data rdata;
1916 int ret;
1917
1918 fp = to_flat_binder_object(hdr);
1919 ret = binder_dec_ref_for_handle(proc, fp->handle,
1920 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1921
1922 if (ret) {
1923 pr_err("transaction release %d bad handle %d, ret = %d\n",
1924 debug_id, fp->handle, ret);
1925 break;
1926 }
1927 binder_debug(BINDER_DEBUG_TRANSACTION,
1928 " ref %d desc %d\n",
1929 rdata.debug_id, rdata.desc);
1930 } break;
1931
1932 case BINDER_TYPE_FD: {
1933 /*
1934 * No need to close the file here since user-space
1935 * closes it for for successfully delivered
1936 * transactions. For transactions that weren't
1937 * delivered, the new fd was never allocated so
1938 * there is no need to close and the fput on the
1939 * file is done when the transaction is torn
1940 * down.
1941 */
1942 } break;
1943 case BINDER_TYPE_PTR:
1944 /*
1945 * Nothing to do here, this will get cleaned up when the
1946 * transaction buffer gets freed
1947 */
1948 break;
1949 case BINDER_TYPE_FDA: {
1950 struct binder_fd_array_object *fda;
1951 struct binder_buffer_object *parent;
1952 struct binder_object ptr_object;
1953 binder_size_t fda_offset;
1954 size_t fd_index;
1955 binder_size_t fd_buf_size;
1956 binder_size_t num_valid;
1957
1958 if (proc->tsk != current->group_leader) {
1959 /*
1960 * Nothing to do if running in sender context
1961 * The fd fixups have not been applied so no
1962 * fds need to be closed.
1963 */
1964 continue;
1965 }
1966
1967 num_valid = (buffer_offset - off_start_offset) /
1968 sizeof(binder_size_t);
1969 fda = to_binder_fd_array_object(hdr);
1970 parent = binder_validate_ptr(proc, buffer, &ptr_object,
1971 fda->parent,
1972 off_start_offset,
1973 NULL,
1974 num_valid);
1975 if (!parent) {
1976 pr_err("transaction release %d bad parent offset\n",
1977 debug_id);
1978 continue;
1979 }
1980 fd_buf_size = sizeof(u32) * fda->num_fds;
1981 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1982 pr_err("transaction release %d invalid number of fds (%lld)\n",
1983 debug_id, (u64)fda->num_fds);
1984 continue;
1985 }
1986 if (fd_buf_size > parent->length ||
1987 fda->parent_offset > parent->length - fd_buf_size) {
1988 /* No space for all file descriptors here. */
1989 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1990 debug_id, (u64)fda->num_fds);
1991 continue;
1992 }
1993 /*
1994 * the source data for binder_buffer_object is visible
1995 * to user-space and the @buffer element is the user
1996 * pointer to the buffer_object containing the fd_array.
1997 * Convert the address to an offset relative to
1998 * the base of the transaction buffer.
1999 */
2000 fda_offset =
2001 (parent->buffer - (uintptr_t)buffer->user_data) +
2002 fda->parent_offset;
2003 for (fd_index = 0; fd_index < fda->num_fds;
2004 fd_index++) {
2005 u32 fd;
2006 int err;
2007 binder_size_t offset = fda_offset +
2008 fd_index * sizeof(fd);
2009
2010 err = binder_alloc_copy_from_buffer(
2011 &proc->alloc, &fd, buffer,
2012 offset, sizeof(fd));
2013 WARN_ON(err);
2014 if (!err)
2015 binder_deferred_fd_close(fd);
2016 }
2017 } break;
2018 default:
2019 pr_err("transaction release %d bad object type %x\n",
2020 debug_id, hdr->type);
2021 break;
2022 }
2023 }
2024 }
2025
binder_translate_binder(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2026 static int binder_translate_binder(struct flat_binder_object *fp,
2027 struct binder_transaction *t,
2028 struct binder_thread *thread)
2029 {
2030 struct binder_node *node;
2031 struct binder_proc *proc = thread->proc;
2032 struct binder_proc *target_proc = t->to_proc;
2033 struct binder_ref_data rdata;
2034 int ret = 0;
2035
2036 node = binder_get_node(proc, fp->binder);
2037 if (!node) {
2038 node = binder_new_node(proc, fp);
2039 if (!node)
2040 return -ENOMEM;
2041 }
2042 if (fp->cookie != node->cookie) {
2043 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2044 proc->pid, thread->pid, (u64)fp->binder,
2045 node->debug_id, (u64)fp->cookie,
2046 (u64)node->cookie);
2047 ret = -EINVAL;
2048 goto done;
2049 }
2050 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2051 ret = -EPERM;
2052 goto done;
2053 }
2054
2055 ret = binder_inc_ref_for_node(target_proc, node,
2056 fp->hdr.type == BINDER_TYPE_BINDER,
2057 &thread->todo, &rdata);
2058 if (ret)
2059 goto done;
2060
2061 if (fp->hdr.type == BINDER_TYPE_BINDER)
2062 fp->hdr.type = BINDER_TYPE_HANDLE;
2063 else
2064 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2065 fp->binder = 0;
2066 fp->handle = rdata.desc;
2067 fp->cookie = 0;
2068
2069 trace_binder_transaction_node_to_ref(t, node, &rdata);
2070 binder_debug(BINDER_DEBUG_TRANSACTION,
2071 " node %d u%016llx -> ref %d desc %d\n",
2072 node->debug_id, (u64)node->ptr,
2073 rdata.debug_id, rdata.desc);
2074 done:
2075 binder_put_node(node);
2076 return ret;
2077 }
2078
binder_translate_handle(struct flat_binder_object * fp,struct binder_transaction * t,struct binder_thread * thread)2079 static int binder_translate_handle(struct flat_binder_object *fp,
2080 struct binder_transaction *t,
2081 struct binder_thread *thread)
2082 {
2083 struct binder_proc *proc = thread->proc;
2084 struct binder_proc *target_proc = t->to_proc;
2085 struct binder_node *node;
2086 struct binder_ref_data src_rdata;
2087 int ret = 0;
2088
2089 node = binder_get_node_from_ref(proc, fp->handle,
2090 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2091 if (!node) {
2092 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2093 proc->pid, thread->pid, fp->handle);
2094 return -EINVAL;
2095 }
2096 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2097 ret = -EPERM;
2098 goto done;
2099 }
2100
2101 binder_node_lock(node);
2102 if (node->proc == target_proc) {
2103 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2104 fp->hdr.type = BINDER_TYPE_BINDER;
2105 else
2106 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2107 fp->binder = node->ptr;
2108 fp->cookie = node->cookie;
2109 if (node->proc)
2110 binder_inner_proc_lock(node->proc);
2111 else
2112 __acquire(&node->proc->inner_lock);
2113 binder_inc_node_nilocked(node,
2114 fp->hdr.type == BINDER_TYPE_BINDER,
2115 0, NULL);
2116 if (node->proc)
2117 binder_inner_proc_unlock(node->proc);
2118 else
2119 __release(&node->proc->inner_lock);
2120 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2121 binder_debug(BINDER_DEBUG_TRANSACTION,
2122 " ref %d desc %d -> node %d u%016llx\n",
2123 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2124 (u64)node->ptr);
2125 binder_node_unlock(node);
2126 } else {
2127 struct binder_ref_data dest_rdata;
2128
2129 binder_node_unlock(node);
2130 ret = binder_inc_ref_for_node(target_proc, node,
2131 fp->hdr.type == BINDER_TYPE_HANDLE,
2132 NULL, &dest_rdata);
2133 if (ret)
2134 goto done;
2135
2136 fp->binder = 0;
2137 fp->handle = dest_rdata.desc;
2138 fp->cookie = 0;
2139 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2140 &dest_rdata);
2141 binder_debug(BINDER_DEBUG_TRANSACTION,
2142 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2143 src_rdata.debug_id, src_rdata.desc,
2144 dest_rdata.debug_id, dest_rdata.desc,
2145 node->debug_id);
2146 }
2147 done:
2148 binder_put_node(node);
2149 return ret;
2150 }
2151
binder_translate_fd(u32 fd,binder_size_t fd_offset,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2152 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2153 struct binder_transaction *t,
2154 struct binder_thread *thread,
2155 struct binder_transaction *in_reply_to)
2156 {
2157 struct binder_proc *proc = thread->proc;
2158 struct binder_proc *target_proc = t->to_proc;
2159 struct binder_txn_fd_fixup *fixup;
2160 struct file *file;
2161 int ret = 0;
2162 bool target_allows_fd;
2163
2164 if (in_reply_to)
2165 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2166 else
2167 target_allows_fd = t->buffer->target_node->accept_fds;
2168 if (!target_allows_fd) {
2169 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2170 proc->pid, thread->pid,
2171 in_reply_to ? "reply" : "transaction",
2172 fd);
2173 ret = -EPERM;
2174 goto err_fd_not_accepted;
2175 }
2176
2177 file = fget(fd);
2178 if (!file) {
2179 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2180 proc->pid, thread->pid, fd);
2181 ret = -EBADF;
2182 goto err_fget;
2183 }
2184 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2185 if (ret < 0) {
2186 ret = -EPERM;
2187 goto err_security;
2188 }
2189
2190 /*
2191 * Add fixup record for this transaction. The allocation
2192 * of the fd in the target needs to be done from a
2193 * target thread.
2194 */
2195 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2196 if (!fixup) {
2197 ret = -ENOMEM;
2198 goto err_alloc;
2199 }
2200 fixup->file = file;
2201 fixup->offset = fd_offset;
2202 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2203 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2204
2205 return ret;
2206
2207 err_alloc:
2208 err_security:
2209 fput(file);
2210 err_fget:
2211 err_fd_not_accepted:
2212 return ret;
2213 }
2214
binder_translate_fd_array(struct binder_fd_array_object * fda,struct binder_buffer_object * parent,struct binder_transaction * t,struct binder_thread * thread,struct binder_transaction * in_reply_to)2215 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2216 struct binder_buffer_object *parent,
2217 struct binder_transaction *t,
2218 struct binder_thread *thread,
2219 struct binder_transaction *in_reply_to)
2220 {
2221 binder_size_t fdi, fd_buf_size;
2222 binder_size_t fda_offset;
2223 struct binder_proc *proc = thread->proc;
2224 struct binder_proc *target_proc = t->to_proc;
2225
2226 fd_buf_size = sizeof(u32) * fda->num_fds;
2227 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2228 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2229 proc->pid, thread->pid, (u64)fda->num_fds);
2230 return -EINVAL;
2231 }
2232 if (fd_buf_size > parent->length ||
2233 fda->parent_offset > parent->length - fd_buf_size) {
2234 /* No space for all file descriptors here. */
2235 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2236 proc->pid, thread->pid, (u64)fda->num_fds);
2237 return -EINVAL;
2238 }
2239 /*
2240 * the source data for binder_buffer_object is visible
2241 * to user-space and the @buffer element is the user
2242 * pointer to the buffer_object containing the fd_array.
2243 * Convert the address to an offset relative to
2244 * the base of the transaction buffer.
2245 */
2246 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2247 fda->parent_offset;
2248 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2249 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2250 proc->pid, thread->pid);
2251 return -EINVAL;
2252 }
2253 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2254 u32 fd;
2255 int ret;
2256 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2257
2258 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2259 &fd, t->buffer,
2260 offset, sizeof(fd));
2261 if (!ret)
2262 ret = binder_translate_fd(fd, offset, t, thread,
2263 in_reply_to);
2264 if (ret < 0)
2265 return ret;
2266 }
2267 return 0;
2268 }
2269
binder_fixup_parent(struct binder_transaction * t,struct binder_thread * thread,struct binder_buffer_object * bp,binder_size_t off_start_offset,binder_size_t num_valid,binder_size_t last_fixup_obj_off,binder_size_t last_fixup_min_off)2270 static int binder_fixup_parent(struct binder_transaction *t,
2271 struct binder_thread *thread,
2272 struct binder_buffer_object *bp,
2273 binder_size_t off_start_offset,
2274 binder_size_t num_valid,
2275 binder_size_t last_fixup_obj_off,
2276 binder_size_t last_fixup_min_off)
2277 {
2278 struct binder_buffer_object *parent;
2279 struct binder_buffer *b = t->buffer;
2280 struct binder_proc *proc = thread->proc;
2281 struct binder_proc *target_proc = t->to_proc;
2282 struct binder_object object;
2283 binder_size_t buffer_offset;
2284 binder_size_t parent_offset;
2285
2286 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2287 return 0;
2288
2289 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2290 off_start_offset, &parent_offset,
2291 num_valid);
2292 if (!parent) {
2293 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2294 proc->pid, thread->pid);
2295 return -EINVAL;
2296 }
2297
2298 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2299 parent_offset, bp->parent_offset,
2300 last_fixup_obj_off,
2301 last_fixup_min_off)) {
2302 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2303 proc->pid, thread->pid);
2304 return -EINVAL;
2305 }
2306
2307 if (parent->length < sizeof(binder_uintptr_t) ||
2308 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2309 /* No space for a pointer here! */
2310 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2311 proc->pid, thread->pid);
2312 return -EINVAL;
2313 }
2314 buffer_offset = bp->parent_offset +
2315 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2316 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2317 &bp->buffer, sizeof(bp->buffer))) {
2318 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2319 proc->pid, thread->pid);
2320 return -EINVAL;
2321 }
2322
2323 return 0;
2324 }
2325
2326 /**
2327 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2328 * @t: transaction to send
2329 * @proc: process to send the transaction to
2330 * @thread: thread in @proc to send the transaction to (may be NULL)
2331 *
2332 * This function queues a transaction to the specified process. It will try
2333 * to find a thread in the target process to handle the transaction and
2334 * wake it up. If no thread is found, the work is queued to the proc
2335 * waitqueue.
2336 *
2337 * If the @thread parameter is not NULL, the transaction is always queued
2338 * to the waitlist of that specific thread.
2339 *
2340 * Return: 0 if the transaction was successfully queued
2341 * BR_DEAD_REPLY if the target process or thread is dead
2342 * BR_FROZEN_REPLY if the target process or thread is frozen
2343 */
binder_proc_transaction(struct binder_transaction * t,struct binder_proc * proc,struct binder_thread * thread)2344 static int binder_proc_transaction(struct binder_transaction *t,
2345 struct binder_proc *proc,
2346 struct binder_thread *thread)
2347 {
2348 struct binder_node *node = t->buffer->target_node;
2349 bool oneway = !!(t->flags & TF_ONE_WAY);
2350 bool pending_async = false;
2351
2352 BUG_ON(!node);
2353 binder_node_lock(node);
2354 if (oneway) {
2355 BUG_ON(thread);
2356 if (node->has_async_transaction)
2357 pending_async = true;
2358 else
2359 node->has_async_transaction = true;
2360 }
2361
2362 binder_inner_proc_lock(proc);
2363 if (proc->is_frozen) {
2364 proc->sync_recv |= !oneway;
2365 proc->async_recv |= oneway;
2366 }
2367
2368 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2369 (thread && thread->is_dead)) {
2370 binder_inner_proc_unlock(proc);
2371 binder_node_unlock(node);
2372 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2373 }
2374
2375 if (!thread && !pending_async)
2376 thread = binder_select_thread_ilocked(proc);
2377
2378 if (thread)
2379 binder_enqueue_thread_work_ilocked(thread, &t->work);
2380 else if (!pending_async)
2381 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2382 else
2383 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2384
2385 if (!pending_async)
2386 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2387
2388 proc->outstanding_txns++;
2389 binder_inner_proc_unlock(proc);
2390 binder_node_unlock(node);
2391
2392 return 0;
2393 }
2394
2395 /**
2396 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2397 * @node: struct binder_node for which to get refs
2398 * @proc: returns @node->proc if valid
2399 * @error: if no @proc then returns BR_DEAD_REPLY
2400 *
2401 * User-space normally keeps the node alive when creating a transaction
2402 * since it has a reference to the target. The local strong ref keeps it
2403 * alive if the sending process dies before the target process processes
2404 * the transaction. If the source process is malicious or has a reference
2405 * counting bug, relying on the local strong ref can fail.
2406 *
2407 * Since user-space can cause the local strong ref to go away, we also take
2408 * a tmpref on the node to ensure it survives while we are constructing
2409 * the transaction. We also need a tmpref on the proc while we are
2410 * constructing the transaction, so we take that here as well.
2411 *
2412 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2413 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2414 * target proc has died, @error is set to BR_DEAD_REPLY
2415 */
binder_get_node_refs_for_txn(struct binder_node * node,struct binder_proc ** procp,uint32_t * error)2416 static struct binder_node *binder_get_node_refs_for_txn(
2417 struct binder_node *node,
2418 struct binder_proc **procp,
2419 uint32_t *error)
2420 {
2421 struct binder_node *target_node = NULL;
2422
2423 binder_node_inner_lock(node);
2424 if (node->proc) {
2425 target_node = node;
2426 binder_inc_node_nilocked(node, 1, 0, NULL);
2427 binder_inc_node_tmpref_ilocked(node);
2428 node->proc->tmp_ref++;
2429 *procp = node->proc;
2430 } else
2431 *error = BR_DEAD_REPLY;
2432 binder_node_inner_unlock(node);
2433
2434 return target_node;
2435 }
2436
binder_transaction(struct binder_proc * proc,struct binder_thread * thread,struct binder_transaction_data * tr,int reply,binder_size_t extra_buffers_size)2437 static void binder_transaction(struct binder_proc *proc,
2438 struct binder_thread *thread,
2439 struct binder_transaction_data *tr, int reply,
2440 binder_size_t extra_buffers_size)
2441 {
2442 int ret;
2443 struct binder_transaction *t;
2444 struct binder_work *w;
2445 struct binder_work *tcomplete;
2446 binder_size_t buffer_offset = 0;
2447 binder_size_t off_start_offset, off_end_offset;
2448 binder_size_t off_min;
2449 binder_size_t sg_buf_offset, sg_buf_end_offset;
2450 struct binder_proc *target_proc = NULL;
2451 struct binder_thread *target_thread = NULL;
2452 struct binder_node *target_node = NULL;
2453 struct binder_transaction *in_reply_to = NULL;
2454 struct binder_transaction_log_entry *e;
2455 uint32_t return_error = 0;
2456 uint32_t return_error_param = 0;
2457 uint32_t return_error_line = 0;
2458 binder_size_t last_fixup_obj_off = 0;
2459 binder_size_t last_fixup_min_off = 0;
2460 struct binder_context *context = proc->context;
2461 int t_debug_id = atomic_inc_return(&binder_last_id);
2462 char *secctx = NULL;
2463 u32 secctx_sz = 0;
2464
2465 e = binder_transaction_log_add(&binder_transaction_log);
2466 e->debug_id = t_debug_id;
2467 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2468 e->from_proc = proc->pid;
2469 e->from_thread = thread->pid;
2470 e->target_handle = tr->target.handle;
2471 e->data_size = tr->data_size;
2472 e->offsets_size = tr->offsets_size;
2473 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2474
2475 if (reply) {
2476 binder_inner_proc_lock(proc);
2477 in_reply_to = thread->transaction_stack;
2478 if (in_reply_to == NULL) {
2479 binder_inner_proc_unlock(proc);
2480 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2481 proc->pid, thread->pid);
2482 return_error = BR_FAILED_REPLY;
2483 return_error_param = -EPROTO;
2484 return_error_line = __LINE__;
2485 goto err_empty_call_stack;
2486 }
2487 if (in_reply_to->to_thread != thread) {
2488 spin_lock(&in_reply_to->lock);
2489 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2490 proc->pid, thread->pid, in_reply_to->debug_id,
2491 in_reply_to->to_proc ?
2492 in_reply_to->to_proc->pid : 0,
2493 in_reply_to->to_thread ?
2494 in_reply_to->to_thread->pid : 0);
2495 spin_unlock(&in_reply_to->lock);
2496 binder_inner_proc_unlock(proc);
2497 return_error = BR_FAILED_REPLY;
2498 return_error_param = -EPROTO;
2499 return_error_line = __LINE__;
2500 in_reply_to = NULL;
2501 goto err_bad_call_stack;
2502 }
2503 thread->transaction_stack = in_reply_to->to_parent;
2504 binder_inner_proc_unlock(proc);
2505 binder_set_nice(in_reply_to->saved_priority);
2506 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2507 if (target_thread == NULL) {
2508 /* annotation for sparse */
2509 __release(&target_thread->proc->inner_lock);
2510 return_error = BR_DEAD_REPLY;
2511 return_error_line = __LINE__;
2512 goto err_dead_binder;
2513 }
2514 if (target_thread->transaction_stack != in_reply_to) {
2515 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2516 proc->pid, thread->pid,
2517 target_thread->transaction_stack ?
2518 target_thread->transaction_stack->debug_id : 0,
2519 in_reply_to->debug_id);
2520 binder_inner_proc_unlock(target_thread->proc);
2521 return_error = BR_FAILED_REPLY;
2522 return_error_param = -EPROTO;
2523 return_error_line = __LINE__;
2524 in_reply_to = NULL;
2525 target_thread = NULL;
2526 goto err_dead_binder;
2527 }
2528 target_proc = target_thread->proc;
2529 target_proc->tmp_ref++;
2530 binder_inner_proc_unlock(target_thread->proc);
2531 } else {
2532 if (tr->target.handle) {
2533 struct binder_ref *ref;
2534
2535 /*
2536 * There must already be a strong ref
2537 * on this node. If so, do a strong
2538 * increment on the node to ensure it
2539 * stays alive until the transaction is
2540 * done.
2541 */
2542 binder_proc_lock(proc);
2543 ref = binder_get_ref_olocked(proc, tr->target.handle,
2544 true);
2545 if (ref) {
2546 target_node = binder_get_node_refs_for_txn(
2547 ref->node, &target_proc,
2548 &return_error);
2549 } else {
2550 binder_user_error("%d:%d got transaction to invalid handle\n",
2551 proc->pid, thread->pid);
2552 return_error = BR_FAILED_REPLY;
2553 }
2554 binder_proc_unlock(proc);
2555 } else {
2556 mutex_lock(&context->context_mgr_node_lock);
2557 target_node = context->binder_context_mgr_node;
2558 if (target_node)
2559 target_node = binder_get_node_refs_for_txn(
2560 target_node, &target_proc,
2561 &return_error);
2562 else
2563 return_error = BR_DEAD_REPLY;
2564 mutex_unlock(&context->context_mgr_node_lock);
2565 if (target_node && target_proc->pid == proc->pid) {
2566 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2567 proc->pid, thread->pid);
2568 return_error = BR_FAILED_REPLY;
2569 return_error_param = -EINVAL;
2570 return_error_line = __LINE__;
2571 goto err_invalid_target_handle;
2572 }
2573 }
2574 if (!target_node) {
2575 /*
2576 * return_error is set above
2577 */
2578 return_error_param = -EINVAL;
2579 return_error_line = __LINE__;
2580 goto err_dead_binder;
2581 }
2582 e->to_node = target_node->debug_id;
2583 if (WARN_ON(proc == target_proc)) {
2584 return_error = BR_FAILED_REPLY;
2585 return_error_param = -EINVAL;
2586 return_error_line = __LINE__;
2587 goto err_invalid_target_handle;
2588 }
2589 if (security_binder_transaction(proc->tsk,
2590 target_proc->tsk) < 0) {
2591 return_error = BR_FAILED_REPLY;
2592 return_error_param = -EPERM;
2593 return_error_line = __LINE__;
2594 goto err_invalid_target_handle;
2595 }
2596 binder_inner_proc_lock(proc);
2597
2598 w = list_first_entry_or_null(&thread->todo,
2599 struct binder_work, entry);
2600 if (!(tr->flags & TF_ONE_WAY) && w &&
2601 w->type == BINDER_WORK_TRANSACTION) {
2602 /*
2603 * Do not allow new outgoing transaction from a
2604 * thread that has a transaction at the head of
2605 * its todo list. Only need to check the head
2606 * because binder_select_thread_ilocked picks a
2607 * thread from proc->waiting_threads to enqueue
2608 * the transaction, and nothing is queued to the
2609 * todo list while the thread is on waiting_threads.
2610 */
2611 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2612 proc->pid, thread->pid);
2613 binder_inner_proc_unlock(proc);
2614 return_error = BR_FAILED_REPLY;
2615 return_error_param = -EPROTO;
2616 return_error_line = __LINE__;
2617 goto err_bad_todo_list;
2618 }
2619
2620 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2621 struct binder_transaction *tmp;
2622
2623 tmp = thread->transaction_stack;
2624 if (tmp->to_thread != thread) {
2625 spin_lock(&tmp->lock);
2626 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2627 proc->pid, thread->pid, tmp->debug_id,
2628 tmp->to_proc ? tmp->to_proc->pid : 0,
2629 tmp->to_thread ?
2630 tmp->to_thread->pid : 0);
2631 spin_unlock(&tmp->lock);
2632 binder_inner_proc_unlock(proc);
2633 return_error = BR_FAILED_REPLY;
2634 return_error_param = -EPROTO;
2635 return_error_line = __LINE__;
2636 goto err_bad_call_stack;
2637 }
2638 while (tmp) {
2639 struct binder_thread *from;
2640
2641 spin_lock(&tmp->lock);
2642 from = tmp->from;
2643 if (from && from->proc == target_proc) {
2644 atomic_inc(&from->tmp_ref);
2645 target_thread = from;
2646 spin_unlock(&tmp->lock);
2647 break;
2648 }
2649 spin_unlock(&tmp->lock);
2650 tmp = tmp->from_parent;
2651 }
2652 }
2653 binder_inner_proc_unlock(proc);
2654 }
2655 if (target_thread)
2656 e->to_thread = target_thread->pid;
2657 e->to_proc = target_proc->pid;
2658
2659 /* TODO: reuse incoming transaction for reply */
2660 t = kzalloc(sizeof(*t), GFP_KERNEL);
2661 if (t == NULL) {
2662 return_error = BR_FAILED_REPLY;
2663 return_error_param = -ENOMEM;
2664 return_error_line = __LINE__;
2665 goto err_alloc_t_failed;
2666 }
2667 INIT_LIST_HEAD(&t->fd_fixups);
2668 binder_stats_created(BINDER_STAT_TRANSACTION);
2669 spin_lock_init(&t->lock);
2670
2671 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2672 if (tcomplete == NULL) {
2673 return_error = BR_FAILED_REPLY;
2674 return_error_param = -ENOMEM;
2675 return_error_line = __LINE__;
2676 goto err_alloc_tcomplete_failed;
2677 }
2678 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2679
2680 t->debug_id = t_debug_id;
2681
2682 if (reply)
2683 binder_debug(BINDER_DEBUG_TRANSACTION,
2684 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2685 proc->pid, thread->pid, t->debug_id,
2686 target_proc->pid, target_thread->pid,
2687 (u64)tr->data.ptr.buffer,
2688 (u64)tr->data.ptr.offsets,
2689 (u64)tr->data_size, (u64)tr->offsets_size,
2690 (u64)extra_buffers_size);
2691 else
2692 binder_debug(BINDER_DEBUG_TRANSACTION,
2693 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2694 proc->pid, thread->pid, t->debug_id,
2695 target_proc->pid, target_node->debug_id,
2696 (u64)tr->data.ptr.buffer,
2697 (u64)tr->data.ptr.offsets,
2698 (u64)tr->data_size, (u64)tr->offsets_size,
2699 (u64)extra_buffers_size);
2700
2701 if (!reply && !(tr->flags & TF_ONE_WAY))
2702 t->from = thread;
2703 else
2704 t->from = NULL;
2705 t->sender_euid = task_euid(proc->tsk);
2706 t->to_proc = target_proc;
2707 t->to_thread = target_thread;
2708 t->code = tr->code;
2709 t->flags = tr->flags;
2710 t->priority = task_nice(current);
2711
2712 if (target_node && target_node->txn_security_ctx) {
2713 u32 secid;
2714 size_t added_size;
2715
2716 /*
2717 * Arguably this should be the task's subjective LSM secid but
2718 * we can't reliably access the subjective creds of a task
2719 * other than our own so we must use the objective creds, which
2720 * are safe to access. The downside is that if a task is
2721 * temporarily overriding it's creds it will not be reflected
2722 * here; however, it isn't clear that binder would handle that
2723 * case well anyway.
2724 */
2725 security_task_getsecid_obj(proc->tsk, &secid);
2726 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2727 if (ret) {
2728 return_error = BR_FAILED_REPLY;
2729 return_error_param = ret;
2730 return_error_line = __LINE__;
2731 goto err_get_secctx_failed;
2732 }
2733 added_size = ALIGN(secctx_sz, sizeof(u64));
2734 extra_buffers_size += added_size;
2735 if (extra_buffers_size < added_size) {
2736 /* integer overflow of extra_buffers_size */
2737 return_error = BR_FAILED_REPLY;
2738 return_error_param = -EINVAL;
2739 return_error_line = __LINE__;
2740 goto err_bad_extra_size;
2741 }
2742 }
2743
2744 trace_binder_transaction(reply, t, target_node);
2745
2746 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2747 tr->offsets_size, extra_buffers_size,
2748 !reply && (t->flags & TF_ONE_WAY), current->tgid);
2749 if (IS_ERR(t->buffer)) {
2750 /*
2751 * -ESRCH indicates VMA cleared. The target is dying.
2752 */
2753 return_error_param = PTR_ERR(t->buffer);
2754 return_error = return_error_param == -ESRCH ?
2755 BR_DEAD_REPLY : BR_FAILED_REPLY;
2756 return_error_line = __LINE__;
2757 t->buffer = NULL;
2758 goto err_binder_alloc_buf_failed;
2759 }
2760 if (secctx) {
2761 int err;
2762 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
2763 ALIGN(tr->offsets_size, sizeof(void *)) +
2764 ALIGN(extra_buffers_size, sizeof(void *)) -
2765 ALIGN(secctx_sz, sizeof(u64));
2766
2767 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
2768 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
2769 t->buffer, buf_offset,
2770 secctx, secctx_sz);
2771 if (err) {
2772 t->security_ctx = 0;
2773 WARN_ON(1);
2774 }
2775 security_release_secctx(secctx, secctx_sz);
2776 secctx = NULL;
2777 }
2778 t->buffer->debug_id = t->debug_id;
2779 t->buffer->transaction = t;
2780 t->buffer->target_node = target_node;
2781 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
2782 trace_binder_transaction_alloc_buf(t->buffer);
2783
2784 if (binder_alloc_copy_user_to_buffer(
2785 &target_proc->alloc,
2786 t->buffer, 0,
2787 (const void __user *)
2788 (uintptr_t)tr->data.ptr.buffer,
2789 tr->data_size)) {
2790 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2791 proc->pid, thread->pid);
2792 return_error = BR_FAILED_REPLY;
2793 return_error_param = -EFAULT;
2794 return_error_line = __LINE__;
2795 goto err_copy_data_failed;
2796 }
2797 if (binder_alloc_copy_user_to_buffer(
2798 &target_proc->alloc,
2799 t->buffer,
2800 ALIGN(tr->data_size, sizeof(void *)),
2801 (const void __user *)
2802 (uintptr_t)tr->data.ptr.offsets,
2803 tr->offsets_size)) {
2804 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2805 proc->pid, thread->pid);
2806 return_error = BR_FAILED_REPLY;
2807 return_error_param = -EFAULT;
2808 return_error_line = __LINE__;
2809 goto err_copy_data_failed;
2810 }
2811 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2812 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2813 proc->pid, thread->pid, (u64)tr->offsets_size);
2814 return_error = BR_FAILED_REPLY;
2815 return_error_param = -EINVAL;
2816 return_error_line = __LINE__;
2817 goto err_bad_offset;
2818 }
2819 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2820 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2821 proc->pid, thread->pid,
2822 (u64)extra_buffers_size);
2823 return_error = BR_FAILED_REPLY;
2824 return_error_param = -EINVAL;
2825 return_error_line = __LINE__;
2826 goto err_bad_offset;
2827 }
2828 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
2829 buffer_offset = off_start_offset;
2830 off_end_offset = off_start_offset + tr->offsets_size;
2831 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
2832 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
2833 ALIGN(secctx_sz, sizeof(u64));
2834 off_min = 0;
2835 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2836 buffer_offset += sizeof(binder_size_t)) {
2837 struct binder_object_header *hdr;
2838 size_t object_size;
2839 struct binder_object object;
2840 binder_size_t object_offset;
2841
2842 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
2843 &object_offset,
2844 t->buffer,
2845 buffer_offset,
2846 sizeof(object_offset))) {
2847 return_error = BR_FAILED_REPLY;
2848 return_error_param = -EINVAL;
2849 return_error_line = __LINE__;
2850 goto err_bad_offset;
2851 }
2852 object_size = binder_get_object(target_proc, t->buffer,
2853 object_offset, &object);
2854 if (object_size == 0 || object_offset < off_min) {
2855 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2856 proc->pid, thread->pid,
2857 (u64)object_offset,
2858 (u64)off_min,
2859 (u64)t->buffer->data_size);
2860 return_error = BR_FAILED_REPLY;
2861 return_error_param = -EINVAL;
2862 return_error_line = __LINE__;
2863 goto err_bad_offset;
2864 }
2865
2866 hdr = &object.hdr;
2867 off_min = object_offset + object_size;
2868 switch (hdr->type) {
2869 case BINDER_TYPE_BINDER:
2870 case BINDER_TYPE_WEAK_BINDER: {
2871 struct flat_binder_object *fp;
2872
2873 fp = to_flat_binder_object(hdr);
2874 ret = binder_translate_binder(fp, t, thread);
2875
2876 if (ret < 0 ||
2877 binder_alloc_copy_to_buffer(&target_proc->alloc,
2878 t->buffer,
2879 object_offset,
2880 fp, sizeof(*fp))) {
2881 return_error = BR_FAILED_REPLY;
2882 return_error_param = ret;
2883 return_error_line = __LINE__;
2884 goto err_translate_failed;
2885 }
2886 } break;
2887 case BINDER_TYPE_HANDLE:
2888 case BINDER_TYPE_WEAK_HANDLE: {
2889 struct flat_binder_object *fp;
2890
2891 fp = to_flat_binder_object(hdr);
2892 ret = binder_translate_handle(fp, t, thread);
2893 if (ret < 0 ||
2894 binder_alloc_copy_to_buffer(&target_proc->alloc,
2895 t->buffer,
2896 object_offset,
2897 fp, sizeof(*fp))) {
2898 return_error = BR_FAILED_REPLY;
2899 return_error_param = ret;
2900 return_error_line = __LINE__;
2901 goto err_translate_failed;
2902 }
2903 } break;
2904
2905 case BINDER_TYPE_FD: {
2906 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2907 binder_size_t fd_offset = object_offset +
2908 (uintptr_t)&fp->fd - (uintptr_t)fp;
2909 int ret = binder_translate_fd(fp->fd, fd_offset, t,
2910 thread, in_reply_to);
2911
2912 fp->pad_binder = 0;
2913 if (ret < 0 ||
2914 binder_alloc_copy_to_buffer(&target_proc->alloc,
2915 t->buffer,
2916 object_offset,
2917 fp, sizeof(*fp))) {
2918 return_error = BR_FAILED_REPLY;
2919 return_error_param = ret;
2920 return_error_line = __LINE__;
2921 goto err_translate_failed;
2922 }
2923 } break;
2924 case BINDER_TYPE_FDA: {
2925 struct binder_object ptr_object;
2926 binder_size_t parent_offset;
2927 struct binder_fd_array_object *fda =
2928 to_binder_fd_array_object(hdr);
2929 size_t num_valid = (buffer_offset - off_start_offset) /
2930 sizeof(binder_size_t);
2931 struct binder_buffer_object *parent =
2932 binder_validate_ptr(target_proc, t->buffer,
2933 &ptr_object, fda->parent,
2934 off_start_offset,
2935 &parent_offset,
2936 num_valid);
2937 if (!parent) {
2938 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2939 proc->pid, thread->pid);
2940 return_error = BR_FAILED_REPLY;
2941 return_error_param = -EINVAL;
2942 return_error_line = __LINE__;
2943 goto err_bad_parent;
2944 }
2945 if (!binder_validate_fixup(target_proc, t->buffer,
2946 off_start_offset,
2947 parent_offset,
2948 fda->parent_offset,
2949 last_fixup_obj_off,
2950 last_fixup_min_off)) {
2951 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2952 proc->pid, thread->pid);
2953 return_error = BR_FAILED_REPLY;
2954 return_error_param = -EINVAL;
2955 return_error_line = __LINE__;
2956 goto err_bad_parent;
2957 }
2958 ret = binder_translate_fd_array(fda, parent, t, thread,
2959 in_reply_to);
2960 if (ret < 0) {
2961 return_error = BR_FAILED_REPLY;
2962 return_error_param = ret;
2963 return_error_line = __LINE__;
2964 goto err_translate_failed;
2965 }
2966 last_fixup_obj_off = parent_offset;
2967 last_fixup_min_off =
2968 fda->parent_offset + sizeof(u32) * fda->num_fds;
2969 } break;
2970 case BINDER_TYPE_PTR: {
2971 struct binder_buffer_object *bp =
2972 to_binder_buffer_object(hdr);
2973 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
2974 size_t num_valid;
2975
2976 if (bp->length > buf_left) {
2977 binder_user_error("%d:%d got transaction with too large buffer\n",
2978 proc->pid, thread->pid);
2979 return_error = BR_FAILED_REPLY;
2980 return_error_param = -EINVAL;
2981 return_error_line = __LINE__;
2982 goto err_bad_offset;
2983 }
2984 if (binder_alloc_copy_user_to_buffer(
2985 &target_proc->alloc,
2986 t->buffer,
2987 sg_buf_offset,
2988 (const void __user *)
2989 (uintptr_t)bp->buffer,
2990 bp->length)) {
2991 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2992 proc->pid, thread->pid);
2993 return_error_param = -EFAULT;
2994 return_error = BR_FAILED_REPLY;
2995 return_error_line = __LINE__;
2996 goto err_copy_data_failed;
2997 }
2998 /* Fixup buffer pointer to target proc address space */
2999 bp->buffer = (uintptr_t)
3000 t->buffer->user_data + sg_buf_offset;
3001 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3002
3003 num_valid = (buffer_offset - off_start_offset) /
3004 sizeof(binder_size_t);
3005 ret = binder_fixup_parent(t, thread, bp,
3006 off_start_offset,
3007 num_valid,
3008 last_fixup_obj_off,
3009 last_fixup_min_off);
3010 if (ret < 0 ||
3011 binder_alloc_copy_to_buffer(&target_proc->alloc,
3012 t->buffer,
3013 object_offset,
3014 bp, sizeof(*bp))) {
3015 return_error = BR_FAILED_REPLY;
3016 return_error_param = ret;
3017 return_error_line = __LINE__;
3018 goto err_translate_failed;
3019 }
3020 last_fixup_obj_off = object_offset;
3021 last_fixup_min_off = 0;
3022 } break;
3023 default:
3024 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3025 proc->pid, thread->pid, hdr->type);
3026 return_error = BR_FAILED_REPLY;
3027 return_error_param = -EINVAL;
3028 return_error_line = __LINE__;
3029 goto err_bad_object_type;
3030 }
3031 }
3032 if (t->buffer->oneway_spam_suspect)
3033 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3034 else
3035 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3036 t->work.type = BINDER_WORK_TRANSACTION;
3037
3038 if (reply) {
3039 binder_enqueue_thread_work(thread, tcomplete);
3040 binder_inner_proc_lock(target_proc);
3041 if (target_thread->is_dead || target_proc->is_frozen) {
3042 return_error = target_thread->is_dead ?
3043 BR_DEAD_REPLY : BR_FROZEN_REPLY;
3044 binder_inner_proc_unlock(target_proc);
3045 goto err_dead_proc_or_thread;
3046 }
3047 BUG_ON(t->buffer->async_transaction != 0);
3048 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3049 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3050 target_proc->outstanding_txns++;
3051 binder_inner_proc_unlock(target_proc);
3052 wake_up_interruptible_sync(&target_thread->wait);
3053 binder_free_transaction(in_reply_to);
3054 } else if (!(t->flags & TF_ONE_WAY)) {
3055 BUG_ON(t->buffer->async_transaction != 0);
3056 binder_inner_proc_lock(proc);
3057 /*
3058 * Defer the TRANSACTION_COMPLETE, so we don't return to
3059 * userspace immediately; this allows the target process to
3060 * immediately start processing this transaction, reducing
3061 * latency. We will then return the TRANSACTION_COMPLETE when
3062 * the target replies (or there is an error).
3063 */
3064 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3065 t->need_reply = 1;
3066 t->from_parent = thread->transaction_stack;
3067 thread->transaction_stack = t;
3068 binder_inner_proc_unlock(proc);
3069 return_error = binder_proc_transaction(t,
3070 target_proc, target_thread);
3071 if (return_error) {
3072 binder_inner_proc_lock(proc);
3073 binder_pop_transaction_ilocked(thread, t);
3074 binder_inner_proc_unlock(proc);
3075 goto err_dead_proc_or_thread;
3076 }
3077 } else {
3078 BUG_ON(target_node == NULL);
3079 BUG_ON(t->buffer->async_transaction != 1);
3080 binder_enqueue_thread_work(thread, tcomplete);
3081 return_error = binder_proc_transaction(t, target_proc, NULL);
3082 if (return_error)
3083 goto err_dead_proc_or_thread;
3084 }
3085 if (target_thread)
3086 binder_thread_dec_tmpref(target_thread);
3087 binder_proc_dec_tmpref(target_proc);
3088 if (target_node)
3089 binder_dec_node_tmpref(target_node);
3090 /*
3091 * write barrier to synchronize with initialization
3092 * of log entry
3093 */
3094 smp_wmb();
3095 WRITE_ONCE(e->debug_id_done, t_debug_id);
3096 return;
3097
3098 err_dead_proc_or_thread:
3099 return_error_line = __LINE__;
3100 binder_dequeue_work(proc, tcomplete);
3101 err_translate_failed:
3102 err_bad_object_type:
3103 err_bad_offset:
3104 err_bad_parent:
3105 err_copy_data_failed:
3106 binder_free_txn_fixups(t);
3107 trace_binder_transaction_failed_buffer_release(t->buffer);
3108 binder_transaction_buffer_release(target_proc, t->buffer,
3109 buffer_offset, true);
3110 if (target_node)
3111 binder_dec_node_tmpref(target_node);
3112 target_node = NULL;
3113 t->buffer->transaction = NULL;
3114 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3115 err_binder_alloc_buf_failed:
3116 err_bad_extra_size:
3117 if (secctx)
3118 security_release_secctx(secctx, secctx_sz);
3119 err_get_secctx_failed:
3120 kfree(tcomplete);
3121 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3122 err_alloc_tcomplete_failed:
3123 if (trace_binder_txn_latency_free_enabled())
3124 binder_txn_latency_free(t);
3125 kfree(t);
3126 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3127 err_alloc_t_failed:
3128 err_bad_todo_list:
3129 err_bad_call_stack:
3130 err_empty_call_stack:
3131 err_dead_binder:
3132 err_invalid_target_handle:
3133 if (target_thread)
3134 binder_thread_dec_tmpref(target_thread);
3135 if (target_proc)
3136 binder_proc_dec_tmpref(target_proc);
3137 if (target_node) {
3138 binder_dec_node(target_node, 1, 0);
3139 binder_dec_node_tmpref(target_node);
3140 }
3141
3142 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3143 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3144 proc->pid, thread->pid, return_error, return_error_param,
3145 (u64)tr->data_size, (u64)tr->offsets_size,
3146 return_error_line);
3147
3148 {
3149 struct binder_transaction_log_entry *fe;
3150
3151 e->return_error = return_error;
3152 e->return_error_param = return_error_param;
3153 e->return_error_line = return_error_line;
3154 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3155 *fe = *e;
3156 /*
3157 * write barrier to synchronize with initialization
3158 * of log entry
3159 */
3160 smp_wmb();
3161 WRITE_ONCE(e->debug_id_done, t_debug_id);
3162 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3163 }
3164
3165 BUG_ON(thread->return_error.cmd != BR_OK);
3166 if (in_reply_to) {
3167 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3168 binder_enqueue_thread_work(thread, &thread->return_error.work);
3169 binder_send_failed_reply(in_reply_to, return_error);
3170 } else {
3171 thread->return_error.cmd = return_error;
3172 binder_enqueue_thread_work(thread, &thread->return_error.work);
3173 }
3174 }
3175
3176 /**
3177 * binder_free_buf() - free the specified buffer
3178 * @proc: binder proc that owns buffer
3179 * @buffer: buffer to be freed
3180 *
3181 * If buffer for an async transaction, enqueue the next async
3182 * transaction from the node.
3183 *
3184 * Cleanup buffer and free it.
3185 */
3186 static void
binder_free_buf(struct binder_proc * proc,struct binder_buffer * buffer)3187 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3188 {
3189 binder_inner_proc_lock(proc);
3190 if (buffer->transaction) {
3191 buffer->transaction->buffer = NULL;
3192 buffer->transaction = NULL;
3193 }
3194 binder_inner_proc_unlock(proc);
3195 if (buffer->async_transaction && buffer->target_node) {
3196 struct binder_node *buf_node;
3197 struct binder_work *w;
3198
3199 buf_node = buffer->target_node;
3200 binder_node_inner_lock(buf_node);
3201 BUG_ON(!buf_node->has_async_transaction);
3202 BUG_ON(buf_node->proc != proc);
3203 w = binder_dequeue_work_head_ilocked(
3204 &buf_node->async_todo);
3205 if (!w) {
3206 buf_node->has_async_transaction = false;
3207 } else {
3208 binder_enqueue_work_ilocked(
3209 w, &proc->todo);
3210 binder_wakeup_proc_ilocked(proc);
3211 }
3212 binder_node_inner_unlock(buf_node);
3213 }
3214 trace_binder_transaction_buffer_release(buffer);
3215 binder_transaction_buffer_release(proc, buffer, 0, false);
3216 binder_alloc_free_buf(&proc->alloc, buffer);
3217 }
3218
binder_thread_write(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed)3219 static int binder_thread_write(struct binder_proc *proc,
3220 struct binder_thread *thread,
3221 binder_uintptr_t binder_buffer, size_t size,
3222 binder_size_t *consumed)
3223 {
3224 uint32_t cmd;
3225 struct binder_context *context = proc->context;
3226 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3227 void __user *ptr = buffer + *consumed;
3228 void __user *end = buffer + size;
3229
3230 while (ptr < end && thread->return_error.cmd == BR_OK) {
3231 int ret;
3232
3233 if (get_user(cmd, (uint32_t __user *)ptr))
3234 return -EFAULT;
3235 ptr += sizeof(uint32_t);
3236 trace_binder_command(cmd);
3237 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3238 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3239 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3240 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3241 }
3242 switch (cmd) {
3243 case BC_INCREFS:
3244 case BC_ACQUIRE:
3245 case BC_RELEASE:
3246 case BC_DECREFS: {
3247 uint32_t target;
3248 const char *debug_string;
3249 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3250 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3251 struct binder_ref_data rdata;
3252
3253 if (get_user(target, (uint32_t __user *)ptr))
3254 return -EFAULT;
3255
3256 ptr += sizeof(uint32_t);
3257 ret = -1;
3258 if (increment && !target) {
3259 struct binder_node *ctx_mgr_node;
3260
3261 mutex_lock(&context->context_mgr_node_lock);
3262 ctx_mgr_node = context->binder_context_mgr_node;
3263 if (ctx_mgr_node) {
3264 if (ctx_mgr_node->proc == proc) {
3265 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3266 proc->pid, thread->pid);
3267 mutex_unlock(&context->context_mgr_node_lock);
3268 return -EINVAL;
3269 }
3270 ret = binder_inc_ref_for_node(
3271 proc, ctx_mgr_node,
3272 strong, NULL, &rdata);
3273 }
3274 mutex_unlock(&context->context_mgr_node_lock);
3275 }
3276 if (ret)
3277 ret = binder_update_ref_for_handle(
3278 proc, target, increment, strong,
3279 &rdata);
3280 if (!ret && rdata.desc != target) {
3281 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3282 proc->pid, thread->pid,
3283 target, rdata.desc);
3284 }
3285 switch (cmd) {
3286 case BC_INCREFS:
3287 debug_string = "IncRefs";
3288 break;
3289 case BC_ACQUIRE:
3290 debug_string = "Acquire";
3291 break;
3292 case BC_RELEASE:
3293 debug_string = "Release";
3294 break;
3295 case BC_DECREFS:
3296 default:
3297 debug_string = "DecRefs";
3298 break;
3299 }
3300 if (ret) {
3301 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3302 proc->pid, thread->pid, debug_string,
3303 strong, target, ret);
3304 break;
3305 }
3306 binder_debug(BINDER_DEBUG_USER_REFS,
3307 "%d:%d %s ref %d desc %d s %d w %d\n",
3308 proc->pid, thread->pid, debug_string,
3309 rdata.debug_id, rdata.desc, rdata.strong,
3310 rdata.weak);
3311 break;
3312 }
3313 case BC_INCREFS_DONE:
3314 case BC_ACQUIRE_DONE: {
3315 binder_uintptr_t node_ptr;
3316 binder_uintptr_t cookie;
3317 struct binder_node *node;
3318 bool free_node;
3319
3320 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3321 return -EFAULT;
3322 ptr += sizeof(binder_uintptr_t);
3323 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3324 return -EFAULT;
3325 ptr += sizeof(binder_uintptr_t);
3326 node = binder_get_node(proc, node_ptr);
3327 if (node == NULL) {
3328 binder_user_error("%d:%d %s u%016llx no match\n",
3329 proc->pid, thread->pid,
3330 cmd == BC_INCREFS_DONE ?
3331 "BC_INCREFS_DONE" :
3332 "BC_ACQUIRE_DONE",
3333 (u64)node_ptr);
3334 break;
3335 }
3336 if (cookie != node->cookie) {
3337 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3338 proc->pid, thread->pid,
3339 cmd == BC_INCREFS_DONE ?
3340 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3341 (u64)node_ptr, node->debug_id,
3342 (u64)cookie, (u64)node->cookie);
3343 binder_put_node(node);
3344 break;
3345 }
3346 binder_node_inner_lock(node);
3347 if (cmd == BC_ACQUIRE_DONE) {
3348 if (node->pending_strong_ref == 0) {
3349 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3350 proc->pid, thread->pid,
3351 node->debug_id);
3352 binder_node_inner_unlock(node);
3353 binder_put_node(node);
3354 break;
3355 }
3356 node->pending_strong_ref = 0;
3357 } else {
3358 if (node->pending_weak_ref == 0) {
3359 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3360 proc->pid, thread->pid,
3361 node->debug_id);
3362 binder_node_inner_unlock(node);
3363 binder_put_node(node);
3364 break;
3365 }
3366 node->pending_weak_ref = 0;
3367 }
3368 free_node = binder_dec_node_nilocked(node,
3369 cmd == BC_ACQUIRE_DONE, 0);
3370 WARN_ON(free_node);
3371 binder_debug(BINDER_DEBUG_USER_REFS,
3372 "%d:%d %s node %d ls %d lw %d tr %d\n",
3373 proc->pid, thread->pid,
3374 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3375 node->debug_id, node->local_strong_refs,
3376 node->local_weak_refs, node->tmp_refs);
3377 binder_node_inner_unlock(node);
3378 binder_put_node(node);
3379 break;
3380 }
3381 case BC_ATTEMPT_ACQUIRE:
3382 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3383 return -EINVAL;
3384 case BC_ACQUIRE_RESULT:
3385 pr_err("BC_ACQUIRE_RESULT not supported\n");
3386 return -EINVAL;
3387
3388 case BC_FREE_BUFFER: {
3389 binder_uintptr_t data_ptr;
3390 struct binder_buffer *buffer;
3391
3392 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3393 return -EFAULT;
3394 ptr += sizeof(binder_uintptr_t);
3395
3396 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3397 data_ptr);
3398 if (IS_ERR_OR_NULL(buffer)) {
3399 if (PTR_ERR(buffer) == -EPERM) {
3400 binder_user_error(
3401 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3402 proc->pid, thread->pid,
3403 (u64)data_ptr);
3404 } else {
3405 binder_user_error(
3406 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3407 proc->pid, thread->pid,
3408 (u64)data_ptr);
3409 }
3410 break;
3411 }
3412 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3413 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3414 proc->pid, thread->pid, (u64)data_ptr,
3415 buffer->debug_id,
3416 buffer->transaction ? "active" : "finished");
3417 binder_free_buf(proc, buffer);
3418 break;
3419 }
3420
3421 case BC_TRANSACTION_SG:
3422 case BC_REPLY_SG: {
3423 struct binder_transaction_data_sg tr;
3424
3425 if (copy_from_user(&tr, ptr, sizeof(tr)))
3426 return -EFAULT;
3427 ptr += sizeof(tr);
3428 binder_transaction(proc, thread, &tr.transaction_data,
3429 cmd == BC_REPLY_SG, tr.buffers_size);
3430 break;
3431 }
3432 case BC_TRANSACTION:
3433 case BC_REPLY: {
3434 struct binder_transaction_data tr;
3435
3436 if (copy_from_user(&tr, ptr, sizeof(tr)))
3437 return -EFAULT;
3438 ptr += sizeof(tr);
3439 binder_transaction(proc, thread, &tr,
3440 cmd == BC_REPLY, 0);
3441 break;
3442 }
3443
3444 case BC_REGISTER_LOOPER:
3445 binder_debug(BINDER_DEBUG_THREADS,
3446 "%d:%d BC_REGISTER_LOOPER\n",
3447 proc->pid, thread->pid);
3448 binder_inner_proc_lock(proc);
3449 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3450 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3451 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3452 proc->pid, thread->pid);
3453 } else if (proc->requested_threads == 0) {
3454 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3455 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3456 proc->pid, thread->pid);
3457 } else {
3458 proc->requested_threads--;
3459 proc->requested_threads_started++;
3460 }
3461 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3462 binder_inner_proc_unlock(proc);
3463 break;
3464 case BC_ENTER_LOOPER:
3465 binder_debug(BINDER_DEBUG_THREADS,
3466 "%d:%d BC_ENTER_LOOPER\n",
3467 proc->pid, thread->pid);
3468 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3469 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3470 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3471 proc->pid, thread->pid);
3472 }
3473 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3474 break;
3475 case BC_EXIT_LOOPER:
3476 binder_debug(BINDER_DEBUG_THREADS,
3477 "%d:%d BC_EXIT_LOOPER\n",
3478 proc->pid, thread->pid);
3479 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3480 break;
3481
3482 case BC_REQUEST_DEATH_NOTIFICATION:
3483 case BC_CLEAR_DEATH_NOTIFICATION: {
3484 uint32_t target;
3485 binder_uintptr_t cookie;
3486 struct binder_ref *ref;
3487 struct binder_ref_death *death = NULL;
3488
3489 if (get_user(target, (uint32_t __user *)ptr))
3490 return -EFAULT;
3491 ptr += sizeof(uint32_t);
3492 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3493 return -EFAULT;
3494 ptr += sizeof(binder_uintptr_t);
3495 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3496 /*
3497 * Allocate memory for death notification
3498 * before taking lock
3499 */
3500 death = kzalloc(sizeof(*death), GFP_KERNEL);
3501 if (death == NULL) {
3502 WARN_ON(thread->return_error.cmd !=
3503 BR_OK);
3504 thread->return_error.cmd = BR_ERROR;
3505 binder_enqueue_thread_work(
3506 thread,
3507 &thread->return_error.work);
3508 binder_debug(
3509 BINDER_DEBUG_FAILED_TRANSACTION,
3510 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3511 proc->pid, thread->pid);
3512 break;
3513 }
3514 }
3515 binder_proc_lock(proc);
3516 ref = binder_get_ref_olocked(proc, target, false);
3517 if (ref == NULL) {
3518 binder_user_error("%d:%d %s invalid ref %d\n",
3519 proc->pid, thread->pid,
3520 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3521 "BC_REQUEST_DEATH_NOTIFICATION" :
3522 "BC_CLEAR_DEATH_NOTIFICATION",
3523 target);
3524 binder_proc_unlock(proc);
3525 kfree(death);
3526 break;
3527 }
3528
3529 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3530 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3531 proc->pid, thread->pid,
3532 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3533 "BC_REQUEST_DEATH_NOTIFICATION" :
3534 "BC_CLEAR_DEATH_NOTIFICATION",
3535 (u64)cookie, ref->data.debug_id,
3536 ref->data.desc, ref->data.strong,
3537 ref->data.weak, ref->node->debug_id);
3538
3539 binder_node_lock(ref->node);
3540 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3541 if (ref->death) {
3542 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3543 proc->pid, thread->pid);
3544 binder_node_unlock(ref->node);
3545 binder_proc_unlock(proc);
3546 kfree(death);
3547 break;
3548 }
3549 binder_stats_created(BINDER_STAT_DEATH);
3550 INIT_LIST_HEAD(&death->work.entry);
3551 death->cookie = cookie;
3552 ref->death = death;
3553 if (ref->node->proc == NULL) {
3554 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3555
3556 binder_inner_proc_lock(proc);
3557 binder_enqueue_work_ilocked(
3558 &ref->death->work, &proc->todo);
3559 binder_wakeup_proc_ilocked(proc);
3560 binder_inner_proc_unlock(proc);
3561 }
3562 } else {
3563 if (ref->death == NULL) {
3564 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3565 proc->pid, thread->pid);
3566 binder_node_unlock(ref->node);
3567 binder_proc_unlock(proc);
3568 break;
3569 }
3570 death = ref->death;
3571 if (death->cookie != cookie) {
3572 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3573 proc->pid, thread->pid,
3574 (u64)death->cookie,
3575 (u64)cookie);
3576 binder_node_unlock(ref->node);
3577 binder_proc_unlock(proc);
3578 break;
3579 }
3580 ref->death = NULL;
3581 binder_inner_proc_lock(proc);
3582 if (list_empty(&death->work.entry)) {
3583 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3584 if (thread->looper &
3585 (BINDER_LOOPER_STATE_REGISTERED |
3586 BINDER_LOOPER_STATE_ENTERED))
3587 binder_enqueue_thread_work_ilocked(
3588 thread,
3589 &death->work);
3590 else {
3591 binder_enqueue_work_ilocked(
3592 &death->work,
3593 &proc->todo);
3594 binder_wakeup_proc_ilocked(
3595 proc);
3596 }
3597 } else {
3598 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3599 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3600 }
3601 binder_inner_proc_unlock(proc);
3602 }
3603 binder_node_unlock(ref->node);
3604 binder_proc_unlock(proc);
3605 } break;
3606 case BC_DEAD_BINDER_DONE: {
3607 struct binder_work *w;
3608 binder_uintptr_t cookie;
3609 struct binder_ref_death *death = NULL;
3610
3611 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3612 return -EFAULT;
3613
3614 ptr += sizeof(cookie);
3615 binder_inner_proc_lock(proc);
3616 list_for_each_entry(w, &proc->delivered_death,
3617 entry) {
3618 struct binder_ref_death *tmp_death =
3619 container_of(w,
3620 struct binder_ref_death,
3621 work);
3622
3623 if (tmp_death->cookie == cookie) {
3624 death = tmp_death;
3625 break;
3626 }
3627 }
3628 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3629 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3630 proc->pid, thread->pid, (u64)cookie,
3631 death);
3632 if (death == NULL) {
3633 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3634 proc->pid, thread->pid, (u64)cookie);
3635 binder_inner_proc_unlock(proc);
3636 break;
3637 }
3638 binder_dequeue_work_ilocked(&death->work);
3639 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3640 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3641 if (thread->looper &
3642 (BINDER_LOOPER_STATE_REGISTERED |
3643 BINDER_LOOPER_STATE_ENTERED))
3644 binder_enqueue_thread_work_ilocked(
3645 thread, &death->work);
3646 else {
3647 binder_enqueue_work_ilocked(
3648 &death->work,
3649 &proc->todo);
3650 binder_wakeup_proc_ilocked(proc);
3651 }
3652 }
3653 binder_inner_proc_unlock(proc);
3654 } break;
3655
3656 default:
3657 pr_err("%d:%d unknown command %d\n",
3658 proc->pid, thread->pid, cmd);
3659 return -EINVAL;
3660 }
3661 *consumed = ptr - buffer;
3662 }
3663 return 0;
3664 }
3665
binder_stat_br(struct binder_proc * proc,struct binder_thread * thread,uint32_t cmd)3666 static void binder_stat_br(struct binder_proc *proc,
3667 struct binder_thread *thread, uint32_t cmd)
3668 {
3669 trace_binder_return(cmd);
3670 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3671 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3672 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3673 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3674 }
3675 }
3676
binder_put_node_cmd(struct binder_proc * proc,struct binder_thread * thread,void __user ** ptrp,binder_uintptr_t node_ptr,binder_uintptr_t node_cookie,int node_debug_id,uint32_t cmd,const char * cmd_name)3677 static int binder_put_node_cmd(struct binder_proc *proc,
3678 struct binder_thread *thread,
3679 void __user **ptrp,
3680 binder_uintptr_t node_ptr,
3681 binder_uintptr_t node_cookie,
3682 int node_debug_id,
3683 uint32_t cmd, const char *cmd_name)
3684 {
3685 void __user *ptr = *ptrp;
3686
3687 if (put_user(cmd, (uint32_t __user *)ptr))
3688 return -EFAULT;
3689 ptr += sizeof(uint32_t);
3690
3691 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3692 return -EFAULT;
3693 ptr += sizeof(binder_uintptr_t);
3694
3695 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3696 return -EFAULT;
3697 ptr += sizeof(binder_uintptr_t);
3698
3699 binder_stat_br(proc, thread, cmd);
3700 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3701 proc->pid, thread->pid, cmd_name, node_debug_id,
3702 (u64)node_ptr, (u64)node_cookie);
3703
3704 *ptrp = ptr;
3705 return 0;
3706 }
3707
binder_wait_for_work(struct binder_thread * thread,bool do_proc_work)3708 static int binder_wait_for_work(struct binder_thread *thread,
3709 bool do_proc_work)
3710 {
3711 DEFINE_WAIT(wait);
3712 struct binder_proc *proc = thread->proc;
3713 int ret = 0;
3714
3715 freezer_do_not_count();
3716 binder_inner_proc_lock(proc);
3717 for (;;) {
3718 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3719 if (binder_has_work_ilocked(thread, do_proc_work))
3720 break;
3721 if (do_proc_work)
3722 list_add(&thread->waiting_thread_node,
3723 &proc->waiting_threads);
3724 binder_inner_proc_unlock(proc);
3725 schedule();
3726 binder_inner_proc_lock(proc);
3727 list_del_init(&thread->waiting_thread_node);
3728 if (signal_pending(current)) {
3729 ret = -EINTR;
3730 break;
3731 }
3732 }
3733 finish_wait(&thread->wait, &wait);
3734 binder_inner_proc_unlock(proc);
3735 freezer_count();
3736
3737 return ret;
3738 }
3739
3740 /**
3741 * binder_apply_fd_fixups() - finish fd translation
3742 * @proc: binder_proc associated @t->buffer
3743 * @t: binder transaction with list of fd fixups
3744 *
3745 * Now that we are in the context of the transaction target
3746 * process, we can allocate and install fds. Process the
3747 * list of fds to translate and fixup the buffer with the
3748 * new fds.
3749 *
3750 * If we fail to allocate an fd, then free the resources by
3751 * fput'ing files that have not been processed and ksys_close'ing
3752 * any fds that have already been allocated.
3753 */
binder_apply_fd_fixups(struct binder_proc * proc,struct binder_transaction * t)3754 static int binder_apply_fd_fixups(struct binder_proc *proc,
3755 struct binder_transaction *t)
3756 {
3757 struct binder_txn_fd_fixup *fixup, *tmp;
3758 int ret = 0;
3759
3760 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3761 int fd = get_unused_fd_flags(O_CLOEXEC);
3762
3763 if (fd < 0) {
3764 binder_debug(BINDER_DEBUG_TRANSACTION,
3765 "failed fd fixup txn %d fd %d\n",
3766 t->debug_id, fd);
3767 ret = -ENOMEM;
3768 break;
3769 }
3770 binder_debug(BINDER_DEBUG_TRANSACTION,
3771 "fd fixup txn %d fd %d\n",
3772 t->debug_id, fd);
3773 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3774 fd_install(fd, fixup->file);
3775 fixup->file = NULL;
3776 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3777 fixup->offset, &fd,
3778 sizeof(u32))) {
3779 ret = -EINVAL;
3780 break;
3781 }
3782 }
3783 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3784 if (fixup->file) {
3785 fput(fixup->file);
3786 } else if (ret) {
3787 u32 fd;
3788 int err;
3789
3790 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
3791 t->buffer,
3792 fixup->offset,
3793 sizeof(fd));
3794 WARN_ON(err);
3795 if (!err)
3796 binder_deferred_fd_close(fd);
3797 }
3798 list_del(&fixup->fixup_entry);
3799 kfree(fixup);
3800 }
3801
3802 return ret;
3803 }
3804
binder_thread_read(struct binder_proc * proc,struct binder_thread * thread,binder_uintptr_t binder_buffer,size_t size,binder_size_t * consumed,int non_block)3805 static int binder_thread_read(struct binder_proc *proc,
3806 struct binder_thread *thread,
3807 binder_uintptr_t binder_buffer, size_t size,
3808 binder_size_t *consumed, int non_block)
3809 {
3810 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3811 void __user *ptr = buffer + *consumed;
3812 void __user *end = buffer + size;
3813
3814 int ret = 0;
3815 int wait_for_proc_work;
3816
3817 if (*consumed == 0) {
3818 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3819 return -EFAULT;
3820 ptr += sizeof(uint32_t);
3821 }
3822
3823 retry:
3824 binder_inner_proc_lock(proc);
3825 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3826 binder_inner_proc_unlock(proc);
3827
3828 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3829
3830 trace_binder_wait_for_work(wait_for_proc_work,
3831 !!thread->transaction_stack,
3832 !binder_worklist_empty(proc, &thread->todo));
3833 if (wait_for_proc_work) {
3834 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3835 BINDER_LOOPER_STATE_ENTERED))) {
3836 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3837 proc->pid, thread->pid, thread->looper);
3838 wait_event_interruptible(binder_user_error_wait,
3839 binder_stop_on_user_error < 2);
3840 }
3841 binder_set_nice(proc->default_priority);
3842 }
3843
3844 if (non_block) {
3845 if (!binder_has_work(thread, wait_for_proc_work))
3846 ret = -EAGAIN;
3847 } else {
3848 ret = binder_wait_for_work(thread, wait_for_proc_work);
3849 }
3850
3851 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3852
3853 if (ret)
3854 return ret;
3855
3856 while (1) {
3857 uint32_t cmd;
3858 struct binder_transaction_data_secctx tr;
3859 struct binder_transaction_data *trd = &tr.transaction_data;
3860 struct binder_work *w = NULL;
3861 struct list_head *list = NULL;
3862 struct binder_transaction *t = NULL;
3863 struct binder_thread *t_from;
3864 size_t trsize = sizeof(*trd);
3865
3866 binder_inner_proc_lock(proc);
3867 if (!binder_worklist_empty_ilocked(&thread->todo))
3868 list = &thread->todo;
3869 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3870 wait_for_proc_work)
3871 list = &proc->todo;
3872 else {
3873 binder_inner_proc_unlock(proc);
3874
3875 /* no data added */
3876 if (ptr - buffer == 4 && !thread->looper_need_return)
3877 goto retry;
3878 break;
3879 }
3880
3881 if (end - ptr < sizeof(tr) + 4) {
3882 binder_inner_proc_unlock(proc);
3883 break;
3884 }
3885 w = binder_dequeue_work_head_ilocked(list);
3886 if (binder_worklist_empty_ilocked(&thread->todo))
3887 thread->process_todo = false;
3888
3889 switch (w->type) {
3890 case BINDER_WORK_TRANSACTION: {
3891 binder_inner_proc_unlock(proc);
3892 t = container_of(w, struct binder_transaction, work);
3893 } break;
3894 case BINDER_WORK_RETURN_ERROR: {
3895 struct binder_error *e = container_of(
3896 w, struct binder_error, work);
3897
3898 WARN_ON(e->cmd == BR_OK);
3899 binder_inner_proc_unlock(proc);
3900 if (put_user(e->cmd, (uint32_t __user *)ptr))
3901 return -EFAULT;
3902 cmd = e->cmd;
3903 e->cmd = BR_OK;
3904 ptr += sizeof(uint32_t);
3905
3906 binder_stat_br(proc, thread, cmd);
3907 } break;
3908 case BINDER_WORK_TRANSACTION_COMPLETE:
3909 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
3910 if (proc->oneway_spam_detection_enabled &&
3911 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
3912 cmd = BR_ONEWAY_SPAM_SUSPECT;
3913 else
3914 cmd = BR_TRANSACTION_COMPLETE;
3915 binder_inner_proc_unlock(proc);
3916 kfree(w);
3917 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3918 if (put_user(cmd, (uint32_t __user *)ptr))
3919 return -EFAULT;
3920 ptr += sizeof(uint32_t);
3921
3922 binder_stat_br(proc, thread, cmd);
3923 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3924 "%d:%d BR_TRANSACTION_COMPLETE\n",
3925 proc->pid, thread->pid);
3926 } break;
3927 case BINDER_WORK_NODE: {
3928 struct binder_node *node = container_of(w, struct binder_node, work);
3929 int strong, weak;
3930 binder_uintptr_t node_ptr = node->ptr;
3931 binder_uintptr_t node_cookie = node->cookie;
3932 int node_debug_id = node->debug_id;
3933 int has_weak_ref;
3934 int has_strong_ref;
3935 void __user *orig_ptr = ptr;
3936
3937 BUG_ON(proc != node->proc);
3938 strong = node->internal_strong_refs ||
3939 node->local_strong_refs;
3940 weak = !hlist_empty(&node->refs) ||
3941 node->local_weak_refs ||
3942 node->tmp_refs || strong;
3943 has_strong_ref = node->has_strong_ref;
3944 has_weak_ref = node->has_weak_ref;
3945
3946 if (weak && !has_weak_ref) {
3947 node->has_weak_ref = 1;
3948 node->pending_weak_ref = 1;
3949 node->local_weak_refs++;
3950 }
3951 if (strong && !has_strong_ref) {
3952 node->has_strong_ref = 1;
3953 node->pending_strong_ref = 1;
3954 node->local_strong_refs++;
3955 }
3956 if (!strong && has_strong_ref)
3957 node->has_strong_ref = 0;
3958 if (!weak && has_weak_ref)
3959 node->has_weak_ref = 0;
3960 if (!weak && !strong) {
3961 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3962 "%d:%d node %d u%016llx c%016llx deleted\n",
3963 proc->pid, thread->pid,
3964 node_debug_id,
3965 (u64)node_ptr,
3966 (u64)node_cookie);
3967 rb_erase(&node->rb_node, &proc->nodes);
3968 binder_inner_proc_unlock(proc);
3969 binder_node_lock(node);
3970 /*
3971 * Acquire the node lock before freeing the
3972 * node to serialize with other threads that
3973 * may have been holding the node lock while
3974 * decrementing this node (avoids race where
3975 * this thread frees while the other thread
3976 * is unlocking the node after the final
3977 * decrement)
3978 */
3979 binder_node_unlock(node);
3980 binder_free_node(node);
3981 } else
3982 binder_inner_proc_unlock(proc);
3983
3984 if (weak && !has_weak_ref)
3985 ret = binder_put_node_cmd(
3986 proc, thread, &ptr, node_ptr,
3987 node_cookie, node_debug_id,
3988 BR_INCREFS, "BR_INCREFS");
3989 if (!ret && strong && !has_strong_ref)
3990 ret = binder_put_node_cmd(
3991 proc, thread, &ptr, node_ptr,
3992 node_cookie, node_debug_id,
3993 BR_ACQUIRE, "BR_ACQUIRE");
3994 if (!ret && !strong && has_strong_ref)
3995 ret = binder_put_node_cmd(
3996 proc, thread, &ptr, node_ptr,
3997 node_cookie, node_debug_id,
3998 BR_RELEASE, "BR_RELEASE");
3999 if (!ret && !weak && has_weak_ref)
4000 ret = binder_put_node_cmd(
4001 proc, thread, &ptr, node_ptr,
4002 node_cookie, node_debug_id,
4003 BR_DECREFS, "BR_DECREFS");
4004 if (orig_ptr == ptr)
4005 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4006 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4007 proc->pid, thread->pid,
4008 node_debug_id,
4009 (u64)node_ptr,
4010 (u64)node_cookie);
4011 if (ret)
4012 return ret;
4013 } break;
4014 case BINDER_WORK_DEAD_BINDER:
4015 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4016 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4017 struct binder_ref_death *death;
4018 uint32_t cmd;
4019 binder_uintptr_t cookie;
4020
4021 death = container_of(w, struct binder_ref_death, work);
4022 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4023 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4024 else
4025 cmd = BR_DEAD_BINDER;
4026 cookie = death->cookie;
4027
4028 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4029 "%d:%d %s %016llx\n",
4030 proc->pid, thread->pid,
4031 cmd == BR_DEAD_BINDER ?
4032 "BR_DEAD_BINDER" :
4033 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4034 (u64)cookie);
4035 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4036 binder_inner_proc_unlock(proc);
4037 kfree(death);
4038 binder_stats_deleted(BINDER_STAT_DEATH);
4039 } else {
4040 binder_enqueue_work_ilocked(
4041 w, &proc->delivered_death);
4042 binder_inner_proc_unlock(proc);
4043 }
4044 if (put_user(cmd, (uint32_t __user *)ptr))
4045 return -EFAULT;
4046 ptr += sizeof(uint32_t);
4047 if (put_user(cookie,
4048 (binder_uintptr_t __user *)ptr))
4049 return -EFAULT;
4050 ptr += sizeof(binder_uintptr_t);
4051 binder_stat_br(proc, thread, cmd);
4052 if (cmd == BR_DEAD_BINDER)
4053 goto done; /* DEAD_BINDER notifications can cause transactions */
4054 } break;
4055 default:
4056 binder_inner_proc_unlock(proc);
4057 pr_err("%d:%d: bad work type %d\n",
4058 proc->pid, thread->pid, w->type);
4059 break;
4060 }
4061
4062 if (!t)
4063 continue;
4064
4065 BUG_ON(t->buffer == NULL);
4066 if (t->buffer->target_node) {
4067 struct binder_node *target_node = t->buffer->target_node;
4068
4069 trd->target.ptr = target_node->ptr;
4070 trd->cookie = target_node->cookie;
4071 t->saved_priority = task_nice(current);
4072 if (t->priority < target_node->min_priority &&
4073 !(t->flags & TF_ONE_WAY))
4074 binder_set_nice(t->priority);
4075 else if (!(t->flags & TF_ONE_WAY) ||
4076 t->saved_priority > target_node->min_priority)
4077 binder_set_nice(target_node->min_priority);
4078 cmd = BR_TRANSACTION;
4079 } else {
4080 trd->target.ptr = 0;
4081 trd->cookie = 0;
4082 cmd = BR_REPLY;
4083 }
4084 trd->code = t->code;
4085 trd->flags = t->flags;
4086 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4087
4088 t_from = binder_get_txn_from(t);
4089 if (t_from) {
4090 struct task_struct *sender = t_from->proc->tsk;
4091
4092 trd->sender_pid =
4093 task_tgid_nr_ns(sender,
4094 task_active_pid_ns(current));
4095 } else {
4096 trd->sender_pid = 0;
4097 }
4098
4099 ret = binder_apply_fd_fixups(proc, t);
4100 if (ret) {
4101 struct binder_buffer *buffer = t->buffer;
4102 bool oneway = !!(t->flags & TF_ONE_WAY);
4103 int tid = t->debug_id;
4104
4105 if (t_from)
4106 binder_thread_dec_tmpref(t_from);
4107 buffer->transaction = NULL;
4108 binder_cleanup_transaction(t, "fd fixups failed",
4109 BR_FAILED_REPLY);
4110 binder_free_buf(proc, buffer);
4111 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4112 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4113 proc->pid, thread->pid,
4114 oneway ? "async " :
4115 (cmd == BR_REPLY ? "reply " : ""),
4116 tid, BR_FAILED_REPLY, ret, __LINE__);
4117 if (cmd == BR_REPLY) {
4118 cmd = BR_FAILED_REPLY;
4119 if (put_user(cmd, (uint32_t __user *)ptr))
4120 return -EFAULT;
4121 ptr += sizeof(uint32_t);
4122 binder_stat_br(proc, thread, cmd);
4123 break;
4124 }
4125 continue;
4126 }
4127 trd->data_size = t->buffer->data_size;
4128 trd->offsets_size = t->buffer->offsets_size;
4129 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4130 trd->data.ptr.offsets = trd->data.ptr.buffer +
4131 ALIGN(t->buffer->data_size,
4132 sizeof(void *));
4133
4134 tr.secctx = t->security_ctx;
4135 if (t->security_ctx) {
4136 cmd = BR_TRANSACTION_SEC_CTX;
4137 trsize = sizeof(tr);
4138 }
4139 if (put_user(cmd, (uint32_t __user *)ptr)) {
4140 if (t_from)
4141 binder_thread_dec_tmpref(t_from);
4142
4143 binder_cleanup_transaction(t, "put_user failed",
4144 BR_FAILED_REPLY);
4145
4146 return -EFAULT;
4147 }
4148 ptr += sizeof(uint32_t);
4149 if (copy_to_user(ptr, &tr, trsize)) {
4150 if (t_from)
4151 binder_thread_dec_tmpref(t_from);
4152
4153 binder_cleanup_transaction(t, "copy_to_user failed",
4154 BR_FAILED_REPLY);
4155
4156 return -EFAULT;
4157 }
4158 ptr += trsize;
4159
4160 trace_binder_transaction_received(t);
4161 binder_stat_br(proc, thread, cmd);
4162 binder_debug(BINDER_DEBUG_TRANSACTION,
4163 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4164 proc->pid, thread->pid,
4165 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4166 (cmd == BR_TRANSACTION_SEC_CTX) ?
4167 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4168 t->debug_id, t_from ? t_from->proc->pid : 0,
4169 t_from ? t_from->pid : 0, cmd,
4170 t->buffer->data_size, t->buffer->offsets_size,
4171 (u64)trd->data.ptr.buffer,
4172 (u64)trd->data.ptr.offsets);
4173
4174 if (t_from)
4175 binder_thread_dec_tmpref(t_from);
4176 t->buffer->allow_user_free = 1;
4177 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4178 binder_inner_proc_lock(thread->proc);
4179 t->to_parent = thread->transaction_stack;
4180 t->to_thread = thread;
4181 thread->transaction_stack = t;
4182 binder_inner_proc_unlock(thread->proc);
4183 } else {
4184 binder_free_transaction(t);
4185 }
4186 break;
4187 }
4188
4189 done:
4190
4191 *consumed = ptr - buffer;
4192 binder_inner_proc_lock(proc);
4193 if (proc->requested_threads == 0 &&
4194 list_empty(&thread->proc->waiting_threads) &&
4195 proc->requested_threads_started < proc->max_threads &&
4196 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4197 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4198 /*spawn a new thread if we leave this out */) {
4199 proc->requested_threads++;
4200 binder_inner_proc_unlock(proc);
4201 binder_debug(BINDER_DEBUG_THREADS,
4202 "%d:%d BR_SPAWN_LOOPER\n",
4203 proc->pid, thread->pid);
4204 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4205 return -EFAULT;
4206 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4207 } else
4208 binder_inner_proc_unlock(proc);
4209 return 0;
4210 }
4211
binder_release_work(struct binder_proc * proc,struct list_head * list)4212 static void binder_release_work(struct binder_proc *proc,
4213 struct list_head *list)
4214 {
4215 struct binder_work *w;
4216 enum binder_work_type wtype;
4217
4218 while (1) {
4219 binder_inner_proc_lock(proc);
4220 w = binder_dequeue_work_head_ilocked(list);
4221 wtype = w ? w->type : 0;
4222 binder_inner_proc_unlock(proc);
4223 if (!w)
4224 return;
4225
4226 switch (wtype) {
4227 case BINDER_WORK_TRANSACTION: {
4228 struct binder_transaction *t;
4229
4230 t = container_of(w, struct binder_transaction, work);
4231
4232 binder_cleanup_transaction(t, "process died.",
4233 BR_DEAD_REPLY);
4234 } break;
4235 case BINDER_WORK_RETURN_ERROR: {
4236 struct binder_error *e = container_of(
4237 w, struct binder_error, work);
4238
4239 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4240 "undelivered TRANSACTION_ERROR: %u\n",
4241 e->cmd);
4242 } break;
4243 case BINDER_WORK_TRANSACTION_COMPLETE: {
4244 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4245 "undelivered TRANSACTION_COMPLETE\n");
4246 kfree(w);
4247 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4248 } break;
4249 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4250 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4251 struct binder_ref_death *death;
4252
4253 death = container_of(w, struct binder_ref_death, work);
4254 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4255 "undelivered death notification, %016llx\n",
4256 (u64)death->cookie);
4257 kfree(death);
4258 binder_stats_deleted(BINDER_STAT_DEATH);
4259 } break;
4260 case BINDER_WORK_NODE:
4261 break;
4262 default:
4263 pr_err("unexpected work type, %d, not freed\n",
4264 wtype);
4265 break;
4266 }
4267 }
4268
4269 }
4270
binder_get_thread_ilocked(struct binder_proc * proc,struct binder_thread * new_thread)4271 static struct binder_thread *binder_get_thread_ilocked(
4272 struct binder_proc *proc, struct binder_thread *new_thread)
4273 {
4274 struct binder_thread *thread = NULL;
4275 struct rb_node *parent = NULL;
4276 struct rb_node **p = &proc->threads.rb_node;
4277
4278 while (*p) {
4279 parent = *p;
4280 thread = rb_entry(parent, struct binder_thread, rb_node);
4281
4282 if (current->pid < thread->pid)
4283 p = &(*p)->rb_left;
4284 else if (current->pid > thread->pid)
4285 p = &(*p)->rb_right;
4286 else
4287 return thread;
4288 }
4289 if (!new_thread)
4290 return NULL;
4291 thread = new_thread;
4292 binder_stats_created(BINDER_STAT_THREAD);
4293 thread->proc = proc;
4294 thread->pid = current->pid;
4295 atomic_set(&thread->tmp_ref, 0);
4296 init_waitqueue_head(&thread->wait);
4297 INIT_LIST_HEAD(&thread->todo);
4298 rb_link_node(&thread->rb_node, parent, p);
4299 rb_insert_color(&thread->rb_node, &proc->threads);
4300 thread->looper_need_return = true;
4301 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4302 thread->return_error.cmd = BR_OK;
4303 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4304 thread->reply_error.cmd = BR_OK;
4305 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4306 return thread;
4307 }
4308
binder_get_thread(struct binder_proc * proc)4309 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4310 {
4311 struct binder_thread *thread;
4312 struct binder_thread *new_thread;
4313
4314 binder_inner_proc_lock(proc);
4315 thread = binder_get_thread_ilocked(proc, NULL);
4316 binder_inner_proc_unlock(proc);
4317 if (!thread) {
4318 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4319 if (new_thread == NULL)
4320 return NULL;
4321 binder_inner_proc_lock(proc);
4322 thread = binder_get_thread_ilocked(proc, new_thread);
4323 binder_inner_proc_unlock(proc);
4324 if (thread != new_thread)
4325 kfree(new_thread);
4326 }
4327 return thread;
4328 }
4329
binder_free_proc(struct binder_proc * proc)4330 static void binder_free_proc(struct binder_proc *proc)
4331 {
4332 struct binder_device *device;
4333
4334 BUG_ON(!list_empty(&proc->todo));
4335 BUG_ON(!list_empty(&proc->delivered_death));
4336 if (proc->outstanding_txns)
4337 pr_warn("%s: Unexpected outstanding_txns %d\n",
4338 __func__, proc->outstanding_txns);
4339 device = container_of(proc->context, struct binder_device, context);
4340 if (refcount_dec_and_test(&device->ref)) {
4341 kfree(proc->context->name);
4342 kfree(device);
4343 }
4344 binder_alloc_deferred_release(&proc->alloc);
4345 put_task_struct(proc->tsk);
4346 binder_stats_deleted(BINDER_STAT_PROC);
4347 kfree(proc);
4348 }
4349
binder_free_thread(struct binder_thread * thread)4350 static void binder_free_thread(struct binder_thread *thread)
4351 {
4352 BUG_ON(!list_empty(&thread->todo));
4353 binder_stats_deleted(BINDER_STAT_THREAD);
4354 binder_proc_dec_tmpref(thread->proc);
4355 kfree(thread);
4356 }
4357
binder_thread_release(struct binder_proc * proc,struct binder_thread * thread)4358 static int binder_thread_release(struct binder_proc *proc,
4359 struct binder_thread *thread)
4360 {
4361 struct binder_transaction *t;
4362 struct binder_transaction *send_reply = NULL;
4363 int active_transactions = 0;
4364 struct binder_transaction *last_t = NULL;
4365
4366 binder_inner_proc_lock(thread->proc);
4367 /*
4368 * take a ref on the proc so it survives
4369 * after we remove this thread from proc->threads.
4370 * The corresponding dec is when we actually
4371 * free the thread in binder_free_thread()
4372 */
4373 proc->tmp_ref++;
4374 /*
4375 * take a ref on this thread to ensure it
4376 * survives while we are releasing it
4377 */
4378 atomic_inc(&thread->tmp_ref);
4379 rb_erase(&thread->rb_node, &proc->threads);
4380 t = thread->transaction_stack;
4381 if (t) {
4382 spin_lock(&t->lock);
4383 if (t->to_thread == thread)
4384 send_reply = t;
4385 } else {
4386 __acquire(&t->lock);
4387 }
4388 thread->is_dead = true;
4389
4390 while (t) {
4391 last_t = t;
4392 active_transactions++;
4393 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4394 "release %d:%d transaction %d %s, still active\n",
4395 proc->pid, thread->pid,
4396 t->debug_id,
4397 (t->to_thread == thread) ? "in" : "out");
4398
4399 if (t->to_thread == thread) {
4400 thread->proc->outstanding_txns--;
4401 t->to_proc = NULL;
4402 t->to_thread = NULL;
4403 if (t->buffer) {
4404 t->buffer->transaction = NULL;
4405 t->buffer = NULL;
4406 }
4407 t = t->to_parent;
4408 } else if (t->from == thread) {
4409 t->from = NULL;
4410 t = t->from_parent;
4411 } else
4412 BUG();
4413 spin_unlock(&last_t->lock);
4414 if (t)
4415 spin_lock(&t->lock);
4416 else
4417 __acquire(&t->lock);
4418 }
4419 /* annotation for sparse, lock not acquired in last iteration above */
4420 __release(&t->lock);
4421
4422 /*
4423 * If this thread used poll, make sure we remove the waitqueue
4424 * from any epoll data structures holding it with POLLFREE.
4425 * waitqueue_active() is safe to use here because we're holding
4426 * the inner lock.
4427 */
4428 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4429 waitqueue_active(&thread->wait)) {
4430 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4431 }
4432
4433 binder_inner_proc_unlock(thread->proc);
4434
4435 /*
4436 * This is needed to avoid races between wake_up_poll() above and
4437 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4438 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4439 * lock, so we can be sure it's done after calling synchronize_rcu().
4440 */
4441 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4442 synchronize_rcu();
4443
4444 if (send_reply)
4445 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4446 binder_release_work(proc, &thread->todo);
4447 binder_thread_dec_tmpref(thread);
4448 return active_transactions;
4449 }
4450
binder_poll(struct file * filp,struct poll_table_struct * wait)4451 static __poll_t binder_poll(struct file *filp,
4452 struct poll_table_struct *wait)
4453 {
4454 struct binder_proc *proc = filp->private_data;
4455 struct binder_thread *thread = NULL;
4456 bool wait_for_proc_work;
4457
4458 thread = binder_get_thread(proc);
4459 if (!thread)
4460 return POLLERR;
4461
4462 binder_inner_proc_lock(thread->proc);
4463 thread->looper |= BINDER_LOOPER_STATE_POLL;
4464 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4465
4466 binder_inner_proc_unlock(thread->proc);
4467
4468 poll_wait(filp, &thread->wait, wait);
4469
4470 if (binder_has_work(thread, wait_for_proc_work))
4471 return EPOLLIN;
4472
4473 return 0;
4474 }
4475
binder_ioctl_write_read(struct file * filp,unsigned int cmd,unsigned long arg,struct binder_thread * thread)4476 static int binder_ioctl_write_read(struct file *filp,
4477 unsigned int cmd, unsigned long arg,
4478 struct binder_thread *thread)
4479 {
4480 int ret = 0;
4481 struct binder_proc *proc = filp->private_data;
4482 unsigned int size = _IOC_SIZE(cmd);
4483 void __user *ubuf = (void __user *)arg;
4484 struct binder_write_read bwr;
4485
4486 if (size != sizeof(struct binder_write_read)) {
4487 ret = -EINVAL;
4488 goto out;
4489 }
4490 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4491 ret = -EFAULT;
4492 goto out;
4493 }
4494 binder_debug(BINDER_DEBUG_READ_WRITE,
4495 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4496 proc->pid, thread->pid,
4497 (u64)bwr.write_size, (u64)bwr.write_buffer,
4498 (u64)bwr.read_size, (u64)bwr.read_buffer);
4499
4500 if (bwr.write_size > 0) {
4501 ret = binder_thread_write(proc, thread,
4502 bwr.write_buffer,
4503 bwr.write_size,
4504 &bwr.write_consumed);
4505 trace_binder_write_done(ret);
4506 if (ret < 0) {
4507 bwr.read_consumed = 0;
4508 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4509 ret = -EFAULT;
4510 goto out;
4511 }
4512 }
4513 if (bwr.read_size > 0) {
4514 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4515 bwr.read_size,
4516 &bwr.read_consumed,
4517 filp->f_flags & O_NONBLOCK);
4518 trace_binder_read_done(ret);
4519 binder_inner_proc_lock(proc);
4520 if (!binder_worklist_empty_ilocked(&proc->todo))
4521 binder_wakeup_proc_ilocked(proc);
4522 binder_inner_proc_unlock(proc);
4523 if (ret < 0) {
4524 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4525 ret = -EFAULT;
4526 goto out;
4527 }
4528 }
4529 binder_debug(BINDER_DEBUG_READ_WRITE,
4530 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4531 proc->pid, thread->pid,
4532 (u64)bwr.write_consumed, (u64)bwr.write_size,
4533 (u64)bwr.read_consumed, (u64)bwr.read_size);
4534 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4535 ret = -EFAULT;
4536 goto out;
4537 }
4538 out:
4539 return ret;
4540 }
4541
binder_ioctl_set_ctx_mgr(struct file * filp,struct flat_binder_object * fbo)4542 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4543 struct flat_binder_object *fbo)
4544 {
4545 int ret = 0;
4546 struct binder_proc *proc = filp->private_data;
4547 struct binder_context *context = proc->context;
4548 struct binder_node *new_node;
4549 kuid_t curr_euid = current_euid();
4550
4551 mutex_lock(&context->context_mgr_node_lock);
4552 if (context->binder_context_mgr_node) {
4553 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4554 ret = -EBUSY;
4555 goto out;
4556 }
4557 ret = security_binder_set_context_mgr(proc->tsk);
4558 if (ret < 0)
4559 goto out;
4560 if (uid_valid(context->binder_context_mgr_uid)) {
4561 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4562 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4563 from_kuid(&init_user_ns, curr_euid),
4564 from_kuid(&init_user_ns,
4565 context->binder_context_mgr_uid));
4566 ret = -EPERM;
4567 goto out;
4568 }
4569 } else {
4570 context->binder_context_mgr_uid = curr_euid;
4571 }
4572 new_node = binder_new_node(proc, fbo);
4573 if (!new_node) {
4574 ret = -ENOMEM;
4575 goto out;
4576 }
4577 binder_node_lock(new_node);
4578 new_node->local_weak_refs++;
4579 new_node->local_strong_refs++;
4580 new_node->has_strong_ref = 1;
4581 new_node->has_weak_ref = 1;
4582 context->binder_context_mgr_node = new_node;
4583 binder_node_unlock(new_node);
4584 binder_put_node(new_node);
4585 out:
4586 mutex_unlock(&context->context_mgr_node_lock);
4587 return ret;
4588 }
4589
binder_ioctl_get_node_info_for_ref(struct binder_proc * proc,struct binder_node_info_for_ref * info)4590 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4591 struct binder_node_info_for_ref *info)
4592 {
4593 struct binder_node *node;
4594 struct binder_context *context = proc->context;
4595 __u32 handle = info->handle;
4596
4597 if (info->strong_count || info->weak_count || info->reserved1 ||
4598 info->reserved2 || info->reserved3) {
4599 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4600 proc->pid);
4601 return -EINVAL;
4602 }
4603
4604 /* This ioctl may only be used by the context manager */
4605 mutex_lock(&context->context_mgr_node_lock);
4606 if (!context->binder_context_mgr_node ||
4607 context->binder_context_mgr_node->proc != proc) {
4608 mutex_unlock(&context->context_mgr_node_lock);
4609 return -EPERM;
4610 }
4611 mutex_unlock(&context->context_mgr_node_lock);
4612
4613 node = binder_get_node_from_ref(proc, handle, true, NULL);
4614 if (!node)
4615 return -EINVAL;
4616
4617 info->strong_count = node->local_strong_refs +
4618 node->internal_strong_refs;
4619 info->weak_count = node->local_weak_refs;
4620
4621 binder_put_node(node);
4622
4623 return 0;
4624 }
4625
binder_ioctl_get_node_debug_info(struct binder_proc * proc,struct binder_node_debug_info * info)4626 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4627 struct binder_node_debug_info *info)
4628 {
4629 struct rb_node *n;
4630 binder_uintptr_t ptr = info->ptr;
4631
4632 memset(info, 0, sizeof(*info));
4633
4634 binder_inner_proc_lock(proc);
4635 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4636 struct binder_node *node = rb_entry(n, struct binder_node,
4637 rb_node);
4638 if (node->ptr > ptr) {
4639 info->ptr = node->ptr;
4640 info->cookie = node->cookie;
4641 info->has_strong_ref = node->has_strong_ref;
4642 info->has_weak_ref = node->has_weak_ref;
4643 break;
4644 }
4645 }
4646 binder_inner_proc_unlock(proc);
4647
4648 return 0;
4649 }
4650
binder_ioctl_freeze(struct binder_freeze_info * info,struct binder_proc * target_proc)4651 static int binder_ioctl_freeze(struct binder_freeze_info *info,
4652 struct binder_proc *target_proc)
4653 {
4654 int ret = 0;
4655
4656 if (!info->enable) {
4657 binder_inner_proc_lock(target_proc);
4658 target_proc->sync_recv = false;
4659 target_proc->async_recv = false;
4660 target_proc->is_frozen = false;
4661 binder_inner_proc_unlock(target_proc);
4662 return 0;
4663 }
4664
4665 /*
4666 * Freezing the target. Prevent new transactions by
4667 * setting frozen state. If timeout specified, wait
4668 * for transactions to drain.
4669 */
4670 binder_inner_proc_lock(target_proc);
4671 target_proc->sync_recv = false;
4672 target_proc->async_recv = false;
4673 target_proc->is_frozen = true;
4674 binder_inner_proc_unlock(target_proc);
4675
4676 if (info->timeout_ms > 0)
4677 ret = wait_event_interruptible_timeout(
4678 target_proc->freeze_wait,
4679 (!target_proc->outstanding_txns),
4680 msecs_to_jiffies(info->timeout_ms));
4681
4682 if (!ret && target_proc->outstanding_txns)
4683 ret = -EAGAIN;
4684
4685 if (ret < 0) {
4686 binder_inner_proc_lock(target_proc);
4687 target_proc->is_frozen = false;
4688 binder_inner_proc_unlock(target_proc);
4689 }
4690
4691 return ret;
4692 }
4693
binder_ioctl_get_freezer_info(struct binder_frozen_status_info * info)4694 static int binder_ioctl_get_freezer_info(
4695 struct binder_frozen_status_info *info)
4696 {
4697 struct binder_proc *target_proc;
4698 bool found = false;
4699
4700 info->sync_recv = 0;
4701 info->async_recv = 0;
4702
4703 mutex_lock(&binder_procs_lock);
4704 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4705 if (target_proc->pid == info->pid) {
4706 found = true;
4707 binder_inner_proc_lock(target_proc);
4708 info->sync_recv |= target_proc->sync_recv;
4709 info->async_recv |= target_proc->async_recv;
4710 binder_inner_proc_unlock(target_proc);
4711 }
4712 }
4713 mutex_unlock(&binder_procs_lock);
4714
4715 if (!found)
4716 return -EINVAL;
4717
4718 return 0;
4719 }
4720
binder_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4721 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4722 {
4723 int ret;
4724 struct binder_proc *proc = filp->private_data;
4725 struct binder_thread *thread;
4726 unsigned int size = _IOC_SIZE(cmd);
4727 void __user *ubuf = (void __user *)arg;
4728
4729 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4730 proc->pid, current->pid, cmd, arg);*/
4731
4732 binder_selftest_alloc(&proc->alloc);
4733
4734 trace_binder_ioctl(cmd, arg);
4735
4736 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4737 if (ret)
4738 goto err_unlocked;
4739
4740 thread = binder_get_thread(proc);
4741 if (thread == NULL) {
4742 ret = -ENOMEM;
4743 goto err;
4744 }
4745
4746 switch (cmd) {
4747 case BINDER_WRITE_READ:
4748 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4749 if (ret)
4750 goto err;
4751 break;
4752 case BINDER_SET_MAX_THREADS: {
4753 int max_threads;
4754
4755 if (copy_from_user(&max_threads, ubuf,
4756 sizeof(max_threads))) {
4757 ret = -EINVAL;
4758 goto err;
4759 }
4760 binder_inner_proc_lock(proc);
4761 proc->max_threads = max_threads;
4762 binder_inner_proc_unlock(proc);
4763 break;
4764 }
4765 case BINDER_SET_CONTEXT_MGR_EXT: {
4766 struct flat_binder_object fbo;
4767
4768 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4769 ret = -EINVAL;
4770 goto err;
4771 }
4772 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4773 if (ret)
4774 goto err;
4775 break;
4776 }
4777 case BINDER_SET_CONTEXT_MGR:
4778 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4779 if (ret)
4780 goto err;
4781 break;
4782 case BINDER_THREAD_EXIT:
4783 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4784 proc->pid, thread->pid);
4785 binder_thread_release(proc, thread);
4786 thread = NULL;
4787 break;
4788 case BINDER_VERSION: {
4789 struct binder_version __user *ver = ubuf;
4790
4791 if (size != sizeof(struct binder_version)) {
4792 ret = -EINVAL;
4793 goto err;
4794 }
4795 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4796 &ver->protocol_version)) {
4797 ret = -EINVAL;
4798 goto err;
4799 }
4800 break;
4801 }
4802 case BINDER_GET_NODE_INFO_FOR_REF: {
4803 struct binder_node_info_for_ref info;
4804
4805 if (copy_from_user(&info, ubuf, sizeof(info))) {
4806 ret = -EFAULT;
4807 goto err;
4808 }
4809
4810 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4811 if (ret < 0)
4812 goto err;
4813
4814 if (copy_to_user(ubuf, &info, sizeof(info))) {
4815 ret = -EFAULT;
4816 goto err;
4817 }
4818
4819 break;
4820 }
4821 case BINDER_GET_NODE_DEBUG_INFO: {
4822 struct binder_node_debug_info info;
4823
4824 if (copy_from_user(&info, ubuf, sizeof(info))) {
4825 ret = -EFAULT;
4826 goto err;
4827 }
4828
4829 ret = binder_ioctl_get_node_debug_info(proc, &info);
4830 if (ret < 0)
4831 goto err;
4832
4833 if (copy_to_user(ubuf, &info, sizeof(info))) {
4834 ret = -EFAULT;
4835 goto err;
4836 }
4837 break;
4838 }
4839 case BINDER_FREEZE: {
4840 struct binder_freeze_info info;
4841 struct binder_proc **target_procs = NULL, *target_proc;
4842 int target_procs_count = 0, i = 0;
4843
4844 ret = 0;
4845
4846 if (copy_from_user(&info, ubuf, sizeof(info))) {
4847 ret = -EFAULT;
4848 goto err;
4849 }
4850
4851 mutex_lock(&binder_procs_lock);
4852 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4853 if (target_proc->pid == info.pid)
4854 target_procs_count++;
4855 }
4856
4857 if (target_procs_count == 0) {
4858 mutex_unlock(&binder_procs_lock);
4859 ret = -EINVAL;
4860 goto err;
4861 }
4862
4863 target_procs = kcalloc(target_procs_count,
4864 sizeof(struct binder_proc *),
4865 GFP_KERNEL);
4866
4867 if (!target_procs) {
4868 mutex_unlock(&binder_procs_lock);
4869 ret = -ENOMEM;
4870 goto err;
4871 }
4872
4873 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4874 if (target_proc->pid != info.pid)
4875 continue;
4876
4877 binder_inner_proc_lock(target_proc);
4878 target_proc->tmp_ref++;
4879 binder_inner_proc_unlock(target_proc);
4880
4881 target_procs[i++] = target_proc;
4882 }
4883 mutex_unlock(&binder_procs_lock);
4884
4885 for (i = 0; i < target_procs_count; i++) {
4886 if (ret >= 0)
4887 ret = binder_ioctl_freeze(&info,
4888 target_procs[i]);
4889
4890 binder_proc_dec_tmpref(target_procs[i]);
4891 }
4892
4893 kfree(target_procs);
4894
4895 if (ret < 0)
4896 goto err;
4897 break;
4898 }
4899 case BINDER_GET_FROZEN_INFO: {
4900 struct binder_frozen_status_info info;
4901
4902 if (copy_from_user(&info, ubuf, sizeof(info))) {
4903 ret = -EFAULT;
4904 goto err;
4905 }
4906
4907 ret = binder_ioctl_get_freezer_info(&info);
4908 if (ret < 0)
4909 goto err;
4910
4911 if (copy_to_user(ubuf, &info, sizeof(info))) {
4912 ret = -EFAULT;
4913 goto err;
4914 }
4915 break;
4916 }
4917 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
4918 uint32_t enable;
4919
4920 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
4921 ret = -EINVAL;
4922 goto err;
4923 }
4924 binder_inner_proc_lock(proc);
4925 proc->oneway_spam_detection_enabled = (bool)enable;
4926 binder_inner_proc_unlock(proc);
4927 break;
4928 }
4929 default:
4930 ret = -EINVAL;
4931 goto err;
4932 }
4933 ret = 0;
4934 err:
4935 if (thread)
4936 thread->looper_need_return = false;
4937 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4938 if (ret && ret != -EINTR)
4939 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4940 err_unlocked:
4941 trace_binder_ioctl_done(ret);
4942 return ret;
4943 }
4944
binder_vma_open(struct vm_area_struct * vma)4945 static void binder_vma_open(struct vm_area_struct *vma)
4946 {
4947 struct binder_proc *proc = vma->vm_private_data;
4948
4949 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4950 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4951 proc->pid, vma->vm_start, vma->vm_end,
4952 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4953 (unsigned long)pgprot_val(vma->vm_page_prot));
4954 }
4955
binder_vma_close(struct vm_area_struct * vma)4956 static void binder_vma_close(struct vm_area_struct *vma)
4957 {
4958 struct binder_proc *proc = vma->vm_private_data;
4959
4960 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4961 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4962 proc->pid, vma->vm_start, vma->vm_end,
4963 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4964 (unsigned long)pgprot_val(vma->vm_page_prot));
4965 binder_alloc_vma_close(&proc->alloc);
4966 }
4967
binder_vm_fault(struct vm_fault * vmf)4968 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4969 {
4970 return VM_FAULT_SIGBUS;
4971 }
4972
4973 static const struct vm_operations_struct binder_vm_ops = {
4974 .open = binder_vma_open,
4975 .close = binder_vma_close,
4976 .fault = binder_vm_fault,
4977 };
4978
binder_mmap(struct file * filp,struct vm_area_struct * vma)4979 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4980 {
4981 struct binder_proc *proc = filp->private_data;
4982
4983 if (proc->tsk != current->group_leader)
4984 return -EINVAL;
4985
4986 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4987 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4988 __func__, proc->pid, vma->vm_start, vma->vm_end,
4989 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4990 (unsigned long)pgprot_val(vma->vm_page_prot));
4991
4992 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4993 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
4994 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
4995 return -EPERM;
4996 }
4997 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4998 vma->vm_flags &= ~VM_MAYWRITE;
4999
5000 vma->vm_ops = &binder_vm_ops;
5001 vma->vm_private_data = proc;
5002
5003 return binder_alloc_mmap_handler(&proc->alloc, vma);
5004 }
5005
binder_open(struct inode * nodp,struct file * filp)5006 static int binder_open(struct inode *nodp, struct file *filp)
5007 {
5008 struct binder_proc *proc, *itr;
5009 struct binder_device *binder_dev;
5010 struct binderfs_info *info;
5011 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5012 bool existing_pid = false;
5013
5014 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5015 current->group_leader->pid, current->pid);
5016
5017 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5018 if (proc == NULL)
5019 return -ENOMEM;
5020 spin_lock_init(&proc->inner_lock);
5021 spin_lock_init(&proc->outer_lock);
5022 get_task_struct(current->group_leader);
5023 proc->tsk = current->group_leader;
5024 INIT_LIST_HEAD(&proc->todo);
5025 init_waitqueue_head(&proc->freeze_wait);
5026 proc->default_priority = task_nice(current);
5027 /* binderfs stashes devices in i_private */
5028 if (is_binderfs_device(nodp)) {
5029 binder_dev = nodp->i_private;
5030 info = nodp->i_sb->s_fs_info;
5031 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5032 } else {
5033 binder_dev = container_of(filp->private_data,
5034 struct binder_device, miscdev);
5035 }
5036 refcount_inc(&binder_dev->ref);
5037 proc->context = &binder_dev->context;
5038 binder_alloc_init(&proc->alloc);
5039
5040 binder_stats_created(BINDER_STAT_PROC);
5041 proc->pid = current->group_leader->pid;
5042 INIT_LIST_HEAD(&proc->delivered_death);
5043 INIT_LIST_HEAD(&proc->waiting_threads);
5044 filp->private_data = proc;
5045
5046 mutex_lock(&binder_procs_lock);
5047 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5048 if (itr->pid == proc->pid) {
5049 existing_pid = true;
5050 break;
5051 }
5052 }
5053 hlist_add_head(&proc->proc_node, &binder_procs);
5054 mutex_unlock(&binder_procs_lock);
5055
5056 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5057 char strbuf[11];
5058
5059 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5060 /*
5061 * proc debug entries are shared between contexts.
5062 * Only create for the first PID to avoid debugfs log spamming
5063 * The printing code will anyway print all contexts for a given
5064 * PID so this is not a problem.
5065 */
5066 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5067 binder_debugfs_dir_entry_proc,
5068 (void *)(unsigned long)proc->pid,
5069 &proc_fops);
5070 }
5071
5072 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5073 char strbuf[11];
5074 struct dentry *binderfs_entry;
5075
5076 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5077 /*
5078 * Similar to debugfs, the process specific log file is shared
5079 * between contexts. Only create for the first PID.
5080 * This is ok since same as debugfs, the log file will contain
5081 * information on all contexts of a given PID.
5082 */
5083 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5084 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5085 if (!IS_ERR(binderfs_entry)) {
5086 proc->binderfs_entry = binderfs_entry;
5087 } else {
5088 int error;
5089
5090 error = PTR_ERR(binderfs_entry);
5091 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5092 strbuf, error);
5093 }
5094 }
5095
5096 return 0;
5097 }
5098
binder_flush(struct file * filp,fl_owner_t id)5099 static int binder_flush(struct file *filp, fl_owner_t id)
5100 {
5101 struct binder_proc *proc = filp->private_data;
5102
5103 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5104
5105 return 0;
5106 }
5107
binder_deferred_flush(struct binder_proc * proc)5108 static void binder_deferred_flush(struct binder_proc *proc)
5109 {
5110 struct rb_node *n;
5111 int wake_count = 0;
5112
5113 binder_inner_proc_lock(proc);
5114 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5115 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5116
5117 thread->looper_need_return = true;
5118 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5119 wake_up_interruptible(&thread->wait);
5120 wake_count++;
5121 }
5122 }
5123 binder_inner_proc_unlock(proc);
5124
5125 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5126 "binder_flush: %d woke %d threads\n", proc->pid,
5127 wake_count);
5128 }
5129
binder_release(struct inode * nodp,struct file * filp)5130 static int binder_release(struct inode *nodp, struct file *filp)
5131 {
5132 struct binder_proc *proc = filp->private_data;
5133
5134 debugfs_remove(proc->debugfs_entry);
5135
5136 if (proc->binderfs_entry) {
5137 binderfs_remove_file(proc->binderfs_entry);
5138 proc->binderfs_entry = NULL;
5139 }
5140
5141 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5142
5143 return 0;
5144 }
5145
binder_node_release(struct binder_node * node,int refs)5146 static int binder_node_release(struct binder_node *node, int refs)
5147 {
5148 struct binder_ref *ref;
5149 int death = 0;
5150 struct binder_proc *proc = node->proc;
5151
5152 binder_release_work(proc, &node->async_todo);
5153
5154 binder_node_lock(node);
5155 binder_inner_proc_lock(proc);
5156 binder_dequeue_work_ilocked(&node->work);
5157 /*
5158 * The caller must have taken a temporary ref on the node,
5159 */
5160 BUG_ON(!node->tmp_refs);
5161 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5162 binder_inner_proc_unlock(proc);
5163 binder_node_unlock(node);
5164 binder_free_node(node);
5165
5166 return refs;
5167 }
5168
5169 node->proc = NULL;
5170 node->local_strong_refs = 0;
5171 node->local_weak_refs = 0;
5172 binder_inner_proc_unlock(proc);
5173
5174 spin_lock(&binder_dead_nodes_lock);
5175 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5176 spin_unlock(&binder_dead_nodes_lock);
5177
5178 hlist_for_each_entry(ref, &node->refs, node_entry) {
5179 refs++;
5180 /*
5181 * Need the node lock to synchronize
5182 * with new notification requests and the
5183 * inner lock to synchronize with queued
5184 * death notifications.
5185 */
5186 binder_inner_proc_lock(ref->proc);
5187 if (!ref->death) {
5188 binder_inner_proc_unlock(ref->proc);
5189 continue;
5190 }
5191
5192 death++;
5193
5194 BUG_ON(!list_empty(&ref->death->work.entry));
5195 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5196 binder_enqueue_work_ilocked(&ref->death->work,
5197 &ref->proc->todo);
5198 binder_wakeup_proc_ilocked(ref->proc);
5199 binder_inner_proc_unlock(ref->proc);
5200 }
5201
5202 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5203 "node %d now dead, refs %d, death %d\n",
5204 node->debug_id, refs, death);
5205 binder_node_unlock(node);
5206 binder_put_node(node);
5207
5208 return refs;
5209 }
5210
binder_deferred_release(struct binder_proc * proc)5211 static void binder_deferred_release(struct binder_proc *proc)
5212 {
5213 struct binder_context *context = proc->context;
5214 struct rb_node *n;
5215 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5216
5217 mutex_lock(&binder_procs_lock);
5218 hlist_del(&proc->proc_node);
5219 mutex_unlock(&binder_procs_lock);
5220
5221 mutex_lock(&context->context_mgr_node_lock);
5222 if (context->binder_context_mgr_node &&
5223 context->binder_context_mgr_node->proc == proc) {
5224 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5225 "%s: %d context_mgr_node gone\n",
5226 __func__, proc->pid);
5227 context->binder_context_mgr_node = NULL;
5228 }
5229 mutex_unlock(&context->context_mgr_node_lock);
5230 binder_inner_proc_lock(proc);
5231 /*
5232 * Make sure proc stays alive after we
5233 * remove all the threads
5234 */
5235 proc->tmp_ref++;
5236
5237 proc->is_dead = true;
5238 proc->is_frozen = false;
5239 proc->sync_recv = false;
5240 proc->async_recv = false;
5241 threads = 0;
5242 active_transactions = 0;
5243 while ((n = rb_first(&proc->threads))) {
5244 struct binder_thread *thread;
5245
5246 thread = rb_entry(n, struct binder_thread, rb_node);
5247 binder_inner_proc_unlock(proc);
5248 threads++;
5249 active_transactions += binder_thread_release(proc, thread);
5250 binder_inner_proc_lock(proc);
5251 }
5252
5253 nodes = 0;
5254 incoming_refs = 0;
5255 while ((n = rb_first(&proc->nodes))) {
5256 struct binder_node *node;
5257
5258 node = rb_entry(n, struct binder_node, rb_node);
5259 nodes++;
5260 /*
5261 * take a temporary ref on the node before
5262 * calling binder_node_release() which will either
5263 * kfree() the node or call binder_put_node()
5264 */
5265 binder_inc_node_tmpref_ilocked(node);
5266 rb_erase(&node->rb_node, &proc->nodes);
5267 binder_inner_proc_unlock(proc);
5268 incoming_refs = binder_node_release(node, incoming_refs);
5269 binder_inner_proc_lock(proc);
5270 }
5271 binder_inner_proc_unlock(proc);
5272
5273 outgoing_refs = 0;
5274 binder_proc_lock(proc);
5275 while ((n = rb_first(&proc->refs_by_desc))) {
5276 struct binder_ref *ref;
5277
5278 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5279 outgoing_refs++;
5280 binder_cleanup_ref_olocked(ref);
5281 binder_proc_unlock(proc);
5282 binder_free_ref(ref);
5283 binder_proc_lock(proc);
5284 }
5285 binder_proc_unlock(proc);
5286
5287 binder_release_work(proc, &proc->todo);
5288 binder_release_work(proc, &proc->delivered_death);
5289
5290 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5291 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5292 __func__, proc->pid, threads, nodes, incoming_refs,
5293 outgoing_refs, active_transactions);
5294
5295 binder_proc_dec_tmpref(proc);
5296 }
5297
binder_deferred_func(struct work_struct * work)5298 static void binder_deferred_func(struct work_struct *work)
5299 {
5300 struct binder_proc *proc;
5301
5302 int defer;
5303
5304 do {
5305 mutex_lock(&binder_deferred_lock);
5306 if (!hlist_empty(&binder_deferred_list)) {
5307 proc = hlist_entry(binder_deferred_list.first,
5308 struct binder_proc, deferred_work_node);
5309 hlist_del_init(&proc->deferred_work_node);
5310 defer = proc->deferred_work;
5311 proc->deferred_work = 0;
5312 } else {
5313 proc = NULL;
5314 defer = 0;
5315 }
5316 mutex_unlock(&binder_deferred_lock);
5317
5318 if (defer & BINDER_DEFERRED_FLUSH)
5319 binder_deferred_flush(proc);
5320
5321 if (defer & BINDER_DEFERRED_RELEASE)
5322 binder_deferred_release(proc); /* frees proc */
5323 } while (proc);
5324 }
5325 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5326
5327 static void
binder_defer_work(struct binder_proc * proc,enum binder_deferred_state defer)5328 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5329 {
5330 mutex_lock(&binder_deferred_lock);
5331 proc->deferred_work |= defer;
5332 if (hlist_unhashed(&proc->deferred_work_node)) {
5333 hlist_add_head(&proc->deferred_work_node,
5334 &binder_deferred_list);
5335 schedule_work(&binder_deferred_work);
5336 }
5337 mutex_unlock(&binder_deferred_lock);
5338 }
5339
print_binder_transaction_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,struct binder_transaction * t)5340 static void print_binder_transaction_ilocked(struct seq_file *m,
5341 struct binder_proc *proc,
5342 const char *prefix,
5343 struct binder_transaction *t)
5344 {
5345 struct binder_proc *to_proc;
5346 struct binder_buffer *buffer = t->buffer;
5347
5348 spin_lock(&t->lock);
5349 to_proc = t->to_proc;
5350 seq_printf(m,
5351 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5352 prefix, t->debug_id, t,
5353 t->from ? t->from->proc->pid : 0,
5354 t->from ? t->from->pid : 0,
5355 to_proc ? to_proc->pid : 0,
5356 t->to_thread ? t->to_thread->pid : 0,
5357 t->code, t->flags, t->priority, t->need_reply);
5358 spin_unlock(&t->lock);
5359
5360 if (proc != to_proc) {
5361 /*
5362 * Can only safely deref buffer if we are holding the
5363 * correct proc inner lock for this node
5364 */
5365 seq_puts(m, "\n");
5366 return;
5367 }
5368
5369 if (buffer == NULL) {
5370 seq_puts(m, " buffer free\n");
5371 return;
5372 }
5373 if (buffer->target_node)
5374 seq_printf(m, " node %d", buffer->target_node->debug_id);
5375 seq_printf(m, " size %zd:%zd data %pK\n",
5376 buffer->data_size, buffer->offsets_size,
5377 buffer->user_data);
5378 }
5379
print_binder_work_ilocked(struct seq_file * m,struct binder_proc * proc,const char * prefix,const char * transaction_prefix,struct binder_work * w)5380 static void print_binder_work_ilocked(struct seq_file *m,
5381 struct binder_proc *proc,
5382 const char *prefix,
5383 const char *transaction_prefix,
5384 struct binder_work *w)
5385 {
5386 struct binder_node *node;
5387 struct binder_transaction *t;
5388
5389 switch (w->type) {
5390 case BINDER_WORK_TRANSACTION:
5391 t = container_of(w, struct binder_transaction, work);
5392 print_binder_transaction_ilocked(
5393 m, proc, transaction_prefix, t);
5394 break;
5395 case BINDER_WORK_RETURN_ERROR: {
5396 struct binder_error *e = container_of(
5397 w, struct binder_error, work);
5398
5399 seq_printf(m, "%stransaction error: %u\n",
5400 prefix, e->cmd);
5401 } break;
5402 case BINDER_WORK_TRANSACTION_COMPLETE:
5403 seq_printf(m, "%stransaction complete\n", prefix);
5404 break;
5405 case BINDER_WORK_NODE:
5406 node = container_of(w, struct binder_node, work);
5407 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5408 prefix, node->debug_id,
5409 (u64)node->ptr, (u64)node->cookie);
5410 break;
5411 case BINDER_WORK_DEAD_BINDER:
5412 seq_printf(m, "%shas dead binder\n", prefix);
5413 break;
5414 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5415 seq_printf(m, "%shas cleared dead binder\n", prefix);
5416 break;
5417 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5418 seq_printf(m, "%shas cleared death notification\n", prefix);
5419 break;
5420 default:
5421 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5422 break;
5423 }
5424 }
5425
print_binder_thread_ilocked(struct seq_file * m,struct binder_thread * thread,int print_always)5426 static void print_binder_thread_ilocked(struct seq_file *m,
5427 struct binder_thread *thread,
5428 int print_always)
5429 {
5430 struct binder_transaction *t;
5431 struct binder_work *w;
5432 size_t start_pos = m->count;
5433 size_t header_pos;
5434
5435 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5436 thread->pid, thread->looper,
5437 thread->looper_need_return,
5438 atomic_read(&thread->tmp_ref));
5439 header_pos = m->count;
5440 t = thread->transaction_stack;
5441 while (t) {
5442 if (t->from == thread) {
5443 print_binder_transaction_ilocked(m, thread->proc,
5444 " outgoing transaction", t);
5445 t = t->from_parent;
5446 } else if (t->to_thread == thread) {
5447 print_binder_transaction_ilocked(m, thread->proc,
5448 " incoming transaction", t);
5449 t = t->to_parent;
5450 } else {
5451 print_binder_transaction_ilocked(m, thread->proc,
5452 " bad transaction", t);
5453 t = NULL;
5454 }
5455 }
5456 list_for_each_entry(w, &thread->todo, entry) {
5457 print_binder_work_ilocked(m, thread->proc, " ",
5458 " pending transaction", w);
5459 }
5460 if (!print_always && m->count == header_pos)
5461 m->count = start_pos;
5462 }
5463
print_binder_node_nilocked(struct seq_file * m,struct binder_node * node)5464 static void print_binder_node_nilocked(struct seq_file *m,
5465 struct binder_node *node)
5466 {
5467 struct binder_ref *ref;
5468 struct binder_work *w;
5469 int count;
5470
5471 count = 0;
5472 hlist_for_each_entry(ref, &node->refs, node_entry)
5473 count++;
5474
5475 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5476 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5477 node->has_strong_ref, node->has_weak_ref,
5478 node->local_strong_refs, node->local_weak_refs,
5479 node->internal_strong_refs, count, node->tmp_refs);
5480 if (count) {
5481 seq_puts(m, " proc");
5482 hlist_for_each_entry(ref, &node->refs, node_entry)
5483 seq_printf(m, " %d", ref->proc->pid);
5484 }
5485 seq_puts(m, "\n");
5486 if (node->proc) {
5487 list_for_each_entry(w, &node->async_todo, entry)
5488 print_binder_work_ilocked(m, node->proc, " ",
5489 " pending async transaction", w);
5490 }
5491 }
5492
print_binder_ref_olocked(struct seq_file * m,struct binder_ref * ref)5493 static void print_binder_ref_olocked(struct seq_file *m,
5494 struct binder_ref *ref)
5495 {
5496 binder_node_lock(ref->node);
5497 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5498 ref->data.debug_id, ref->data.desc,
5499 ref->node->proc ? "" : "dead ",
5500 ref->node->debug_id, ref->data.strong,
5501 ref->data.weak, ref->death);
5502 binder_node_unlock(ref->node);
5503 }
5504
print_binder_proc(struct seq_file * m,struct binder_proc * proc,int print_all)5505 static void print_binder_proc(struct seq_file *m,
5506 struct binder_proc *proc, int print_all)
5507 {
5508 struct binder_work *w;
5509 struct rb_node *n;
5510 size_t start_pos = m->count;
5511 size_t header_pos;
5512 struct binder_node *last_node = NULL;
5513
5514 seq_printf(m, "proc %d\n", proc->pid);
5515 seq_printf(m, "context %s\n", proc->context->name);
5516 header_pos = m->count;
5517
5518 binder_inner_proc_lock(proc);
5519 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5520 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5521 rb_node), print_all);
5522
5523 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5524 struct binder_node *node = rb_entry(n, struct binder_node,
5525 rb_node);
5526 if (!print_all && !node->has_async_transaction)
5527 continue;
5528
5529 /*
5530 * take a temporary reference on the node so it
5531 * survives and isn't removed from the tree
5532 * while we print it.
5533 */
5534 binder_inc_node_tmpref_ilocked(node);
5535 /* Need to drop inner lock to take node lock */
5536 binder_inner_proc_unlock(proc);
5537 if (last_node)
5538 binder_put_node(last_node);
5539 binder_node_inner_lock(node);
5540 print_binder_node_nilocked(m, node);
5541 binder_node_inner_unlock(node);
5542 last_node = node;
5543 binder_inner_proc_lock(proc);
5544 }
5545 binder_inner_proc_unlock(proc);
5546 if (last_node)
5547 binder_put_node(last_node);
5548
5549 if (print_all) {
5550 binder_proc_lock(proc);
5551 for (n = rb_first(&proc->refs_by_desc);
5552 n != NULL;
5553 n = rb_next(n))
5554 print_binder_ref_olocked(m, rb_entry(n,
5555 struct binder_ref,
5556 rb_node_desc));
5557 binder_proc_unlock(proc);
5558 }
5559 binder_alloc_print_allocated(m, &proc->alloc);
5560 binder_inner_proc_lock(proc);
5561 list_for_each_entry(w, &proc->todo, entry)
5562 print_binder_work_ilocked(m, proc, " ",
5563 " pending transaction", w);
5564 list_for_each_entry(w, &proc->delivered_death, entry) {
5565 seq_puts(m, " has delivered dead binder\n");
5566 break;
5567 }
5568 binder_inner_proc_unlock(proc);
5569 if (!print_all && m->count == header_pos)
5570 m->count = start_pos;
5571 }
5572
5573 static const char * const binder_return_strings[] = {
5574 "BR_ERROR",
5575 "BR_OK",
5576 "BR_TRANSACTION",
5577 "BR_REPLY",
5578 "BR_ACQUIRE_RESULT",
5579 "BR_DEAD_REPLY",
5580 "BR_TRANSACTION_COMPLETE",
5581 "BR_INCREFS",
5582 "BR_ACQUIRE",
5583 "BR_RELEASE",
5584 "BR_DECREFS",
5585 "BR_ATTEMPT_ACQUIRE",
5586 "BR_NOOP",
5587 "BR_SPAWN_LOOPER",
5588 "BR_FINISHED",
5589 "BR_DEAD_BINDER",
5590 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5591 "BR_FAILED_REPLY",
5592 "BR_FROZEN_REPLY",
5593 "BR_ONEWAY_SPAM_SUSPECT",
5594 };
5595
5596 static const char * const binder_command_strings[] = {
5597 "BC_TRANSACTION",
5598 "BC_REPLY",
5599 "BC_ACQUIRE_RESULT",
5600 "BC_FREE_BUFFER",
5601 "BC_INCREFS",
5602 "BC_ACQUIRE",
5603 "BC_RELEASE",
5604 "BC_DECREFS",
5605 "BC_INCREFS_DONE",
5606 "BC_ACQUIRE_DONE",
5607 "BC_ATTEMPT_ACQUIRE",
5608 "BC_REGISTER_LOOPER",
5609 "BC_ENTER_LOOPER",
5610 "BC_EXIT_LOOPER",
5611 "BC_REQUEST_DEATH_NOTIFICATION",
5612 "BC_CLEAR_DEATH_NOTIFICATION",
5613 "BC_DEAD_BINDER_DONE",
5614 "BC_TRANSACTION_SG",
5615 "BC_REPLY_SG",
5616 };
5617
5618 static const char * const binder_objstat_strings[] = {
5619 "proc",
5620 "thread",
5621 "node",
5622 "ref",
5623 "death",
5624 "transaction",
5625 "transaction_complete"
5626 };
5627
print_binder_stats(struct seq_file * m,const char * prefix,struct binder_stats * stats)5628 static void print_binder_stats(struct seq_file *m, const char *prefix,
5629 struct binder_stats *stats)
5630 {
5631 int i;
5632
5633 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5634 ARRAY_SIZE(binder_command_strings));
5635 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5636 int temp = atomic_read(&stats->bc[i]);
5637
5638 if (temp)
5639 seq_printf(m, "%s%s: %d\n", prefix,
5640 binder_command_strings[i], temp);
5641 }
5642
5643 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5644 ARRAY_SIZE(binder_return_strings));
5645 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5646 int temp = atomic_read(&stats->br[i]);
5647
5648 if (temp)
5649 seq_printf(m, "%s%s: %d\n", prefix,
5650 binder_return_strings[i], temp);
5651 }
5652
5653 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5654 ARRAY_SIZE(binder_objstat_strings));
5655 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5656 ARRAY_SIZE(stats->obj_deleted));
5657 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5658 int created = atomic_read(&stats->obj_created[i]);
5659 int deleted = atomic_read(&stats->obj_deleted[i]);
5660
5661 if (created || deleted)
5662 seq_printf(m, "%s%s: active %d total %d\n",
5663 prefix,
5664 binder_objstat_strings[i],
5665 created - deleted,
5666 created);
5667 }
5668 }
5669
print_binder_proc_stats(struct seq_file * m,struct binder_proc * proc)5670 static void print_binder_proc_stats(struct seq_file *m,
5671 struct binder_proc *proc)
5672 {
5673 struct binder_work *w;
5674 struct binder_thread *thread;
5675 struct rb_node *n;
5676 int count, strong, weak, ready_threads;
5677 size_t free_async_space =
5678 binder_alloc_get_free_async_space(&proc->alloc);
5679
5680 seq_printf(m, "proc %d\n", proc->pid);
5681 seq_printf(m, "context %s\n", proc->context->name);
5682 count = 0;
5683 ready_threads = 0;
5684 binder_inner_proc_lock(proc);
5685 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5686 count++;
5687
5688 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5689 ready_threads++;
5690
5691 seq_printf(m, " threads: %d\n", count);
5692 seq_printf(m, " requested threads: %d+%d/%d\n"
5693 " ready threads %d\n"
5694 " free async space %zd\n", proc->requested_threads,
5695 proc->requested_threads_started, proc->max_threads,
5696 ready_threads,
5697 free_async_space);
5698 count = 0;
5699 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5700 count++;
5701 binder_inner_proc_unlock(proc);
5702 seq_printf(m, " nodes: %d\n", count);
5703 count = 0;
5704 strong = 0;
5705 weak = 0;
5706 binder_proc_lock(proc);
5707 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5708 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5709 rb_node_desc);
5710 count++;
5711 strong += ref->data.strong;
5712 weak += ref->data.weak;
5713 }
5714 binder_proc_unlock(proc);
5715 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5716
5717 count = binder_alloc_get_allocated_count(&proc->alloc);
5718 seq_printf(m, " buffers: %d\n", count);
5719
5720 binder_alloc_print_pages(m, &proc->alloc);
5721
5722 count = 0;
5723 binder_inner_proc_lock(proc);
5724 list_for_each_entry(w, &proc->todo, entry) {
5725 if (w->type == BINDER_WORK_TRANSACTION)
5726 count++;
5727 }
5728 binder_inner_proc_unlock(proc);
5729 seq_printf(m, " pending transactions: %d\n", count);
5730
5731 print_binder_stats(m, " ", &proc->stats);
5732 }
5733
5734
binder_state_show(struct seq_file * m,void * unused)5735 int binder_state_show(struct seq_file *m, void *unused)
5736 {
5737 struct binder_proc *proc;
5738 struct binder_node *node;
5739 struct binder_node *last_node = NULL;
5740
5741 seq_puts(m, "binder state:\n");
5742
5743 spin_lock(&binder_dead_nodes_lock);
5744 if (!hlist_empty(&binder_dead_nodes))
5745 seq_puts(m, "dead nodes:\n");
5746 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5747 /*
5748 * take a temporary reference on the node so it
5749 * survives and isn't removed from the list
5750 * while we print it.
5751 */
5752 node->tmp_refs++;
5753 spin_unlock(&binder_dead_nodes_lock);
5754 if (last_node)
5755 binder_put_node(last_node);
5756 binder_node_lock(node);
5757 print_binder_node_nilocked(m, node);
5758 binder_node_unlock(node);
5759 last_node = node;
5760 spin_lock(&binder_dead_nodes_lock);
5761 }
5762 spin_unlock(&binder_dead_nodes_lock);
5763 if (last_node)
5764 binder_put_node(last_node);
5765
5766 mutex_lock(&binder_procs_lock);
5767 hlist_for_each_entry(proc, &binder_procs, proc_node)
5768 print_binder_proc(m, proc, 1);
5769 mutex_unlock(&binder_procs_lock);
5770
5771 return 0;
5772 }
5773
binder_stats_show(struct seq_file * m,void * unused)5774 int binder_stats_show(struct seq_file *m, void *unused)
5775 {
5776 struct binder_proc *proc;
5777
5778 seq_puts(m, "binder stats:\n");
5779
5780 print_binder_stats(m, "", &binder_stats);
5781
5782 mutex_lock(&binder_procs_lock);
5783 hlist_for_each_entry(proc, &binder_procs, proc_node)
5784 print_binder_proc_stats(m, proc);
5785 mutex_unlock(&binder_procs_lock);
5786
5787 return 0;
5788 }
5789
binder_transactions_show(struct seq_file * m,void * unused)5790 int binder_transactions_show(struct seq_file *m, void *unused)
5791 {
5792 struct binder_proc *proc;
5793
5794 seq_puts(m, "binder transactions:\n");
5795 mutex_lock(&binder_procs_lock);
5796 hlist_for_each_entry(proc, &binder_procs, proc_node)
5797 print_binder_proc(m, proc, 0);
5798 mutex_unlock(&binder_procs_lock);
5799
5800 return 0;
5801 }
5802
proc_show(struct seq_file * m,void * unused)5803 static int proc_show(struct seq_file *m, void *unused)
5804 {
5805 struct binder_proc *itr;
5806 int pid = (unsigned long)m->private;
5807
5808 mutex_lock(&binder_procs_lock);
5809 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5810 if (itr->pid == pid) {
5811 seq_puts(m, "binder proc state:\n");
5812 print_binder_proc(m, itr, 1);
5813 }
5814 }
5815 mutex_unlock(&binder_procs_lock);
5816
5817 return 0;
5818 }
5819
print_binder_transaction_log_entry(struct seq_file * m,struct binder_transaction_log_entry * e)5820 static void print_binder_transaction_log_entry(struct seq_file *m,
5821 struct binder_transaction_log_entry *e)
5822 {
5823 int debug_id = READ_ONCE(e->debug_id_done);
5824 /*
5825 * read barrier to guarantee debug_id_done read before
5826 * we print the log values
5827 */
5828 smp_rmb();
5829 seq_printf(m,
5830 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5831 e->debug_id, (e->call_type == 2) ? "reply" :
5832 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5833 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5834 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5835 e->return_error, e->return_error_param,
5836 e->return_error_line);
5837 /*
5838 * read-barrier to guarantee read of debug_id_done after
5839 * done printing the fields of the entry
5840 */
5841 smp_rmb();
5842 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5843 "\n" : " (incomplete)\n");
5844 }
5845
binder_transaction_log_show(struct seq_file * m,void * unused)5846 int binder_transaction_log_show(struct seq_file *m, void *unused)
5847 {
5848 struct binder_transaction_log *log = m->private;
5849 unsigned int log_cur = atomic_read(&log->cur);
5850 unsigned int count;
5851 unsigned int cur;
5852 int i;
5853
5854 count = log_cur + 1;
5855 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5856 0 : count % ARRAY_SIZE(log->entry);
5857 if (count > ARRAY_SIZE(log->entry) || log->full)
5858 count = ARRAY_SIZE(log->entry);
5859 for (i = 0; i < count; i++) {
5860 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5861
5862 print_binder_transaction_log_entry(m, &log->entry[index]);
5863 }
5864 return 0;
5865 }
5866
5867 const struct file_operations binder_fops = {
5868 .owner = THIS_MODULE,
5869 .poll = binder_poll,
5870 .unlocked_ioctl = binder_ioctl,
5871 .compat_ioctl = compat_ptr_ioctl,
5872 .mmap = binder_mmap,
5873 .open = binder_open,
5874 .flush = binder_flush,
5875 .release = binder_release,
5876 };
5877
init_binder_device(const char * name)5878 static int __init init_binder_device(const char *name)
5879 {
5880 int ret;
5881 struct binder_device *binder_device;
5882
5883 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5884 if (!binder_device)
5885 return -ENOMEM;
5886
5887 binder_device->miscdev.fops = &binder_fops;
5888 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5889 binder_device->miscdev.name = name;
5890
5891 refcount_set(&binder_device->ref, 1);
5892 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5893 binder_device->context.name = name;
5894 mutex_init(&binder_device->context.context_mgr_node_lock);
5895
5896 ret = misc_register(&binder_device->miscdev);
5897 if (ret < 0) {
5898 kfree(binder_device);
5899 return ret;
5900 }
5901
5902 hlist_add_head(&binder_device->hlist, &binder_devices);
5903
5904 return ret;
5905 }
5906
binder_init(void)5907 static int __init binder_init(void)
5908 {
5909 int ret;
5910 char *device_name, *device_tmp;
5911 struct binder_device *device;
5912 struct hlist_node *tmp;
5913 char *device_names = NULL;
5914
5915 ret = binder_alloc_shrinker_init();
5916 if (ret)
5917 return ret;
5918
5919 atomic_set(&binder_transaction_log.cur, ~0U);
5920 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5921
5922 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5923 if (binder_debugfs_dir_entry_root)
5924 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5925 binder_debugfs_dir_entry_root);
5926
5927 if (binder_debugfs_dir_entry_root) {
5928 debugfs_create_file("state",
5929 0444,
5930 binder_debugfs_dir_entry_root,
5931 NULL,
5932 &binder_state_fops);
5933 debugfs_create_file("stats",
5934 0444,
5935 binder_debugfs_dir_entry_root,
5936 NULL,
5937 &binder_stats_fops);
5938 debugfs_create_file("transactions",
5939 0444,
5940 binder_debugfs_dir_entry_root,
5941 NULL,
5942 &binder_transactions_fops);
5943 debugfs_create_file("transaction_log",
5944 0444,
5945 binder_debugfs_dir_entry_root,
5946 &binder_transaction_log,
5947 &binder_transaction_log_fops);
5948 debugfs_create_file("failed_transaction_log",
5949 0444,
5950 binder_debugfs_dir_entry_root,
5951 &binder_transaction_log_failed,
5952 &binder_transaction_log_fops);
5953 }
5954
5955 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
5956 strcmp(binder_devices_param, "") != 0) {
5957 /*
5958 * Copy the module_parameter string, because we don't want to
5959 * tokenize it in-place.
5960 */
5961 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5962 if (!device_names) {
5963 ret = -ENOMEM;
5964 goto err_alloc_device_names_failed;
5965 }
5966
5967 device_tmp = device_names;
5968 while ((device_name = strsep(&device_tmp, ","))) {
5969 ret = init_binder_device(device_name);
5970 if (ret)
5971 goto err_init_binder_device_failed;
5972 }
5973 }
5974
5975 ret = init_binderfs();
5976 if (ret)
5977 goto err_init_binder_device_failed;
5978
5979 return ret;
5980
5981 err_init_binder_device_failed:
5982 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5983 misc_deregister(&device->miscdev);
5984 hlist_del(&device->hlist);
5985 kfree(device);
5986 }
5987
5988 kfree(device_names);
5989
5990 err_alloc_device_names_failed:
5991 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5992
5993 return ret;
5994 }
5995
5996 device_initcall(binder_init);
5997
5998 #define CREATE_TRACE_POINTS
5999 #include "binder_trace.h"
6000
6001 MODULE_LICENSE("GPL v2");
6002