summaryrefslogtreecommitdiff
path: root/drivers/android/binder.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r--drivers/android/binder.c248
1 files changed, 162 insertions, 86 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 5fc2c8ee61b1..c463ca4a8fff 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -79,6 +79,8 @@ static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
static HLIST_HEAD(binder_devices);
+static DEFINE_SPINLOCK(binder_devices_lock);
+
static HLIST_HEAD(binder_procs);
static DEFINE_MUTEX(binder_procs_lock);
@@ -3261,20 +3263,16 @@ static void binder_transaction(struct binder_proc *proc,
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
@@ -4223,20 +4221,21 @@ static int binder_thread_write(struct binder_proc *proc,
if (IS_ERR_OR_NULL(buffer)) {
if (PTR_ERR(buffer) == -EPERM) {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
} else {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
}
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
- proc->pid, thread->pid, (u64)data_ptr,
+ "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
+ proc->pid, thread->pid,
+ (unsigned long)data_ptr - proc->alloc.vm_start,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
binder_free_buf(proc, thread, buffer, false);
@@ -5053,16 +5052,14 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
+ "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
(cmd == BR_TRANSACTION_SEC_CTX) ?
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
- t->buffer->data_size, t->buffer->offsets_size,
- (u64)trd->data.ptr.buffer,
- (u64)trd->data.ptr.offsets);
+ t->buffer->data_size, t->buffer->offsets_size);
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -5244,6 +5241,7 @@ static void binder_free_proc(struct binder_proc *proc)
__func__, proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
+ binder_remove_device(device);
kfree(proc->context->name);
kfree(device);
}
@@ -6377,10 +6375,10 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
}
static void print_binder_work_ilocked(struct seq_file *m,
- struct binder_proc *proc,
- const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w, bool hash_ptrs)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -6403,9 +6401,15 @@ static void print_binder_work_ilocked(struct seq_file *m,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
- prefix, node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
+ if (hash_ptrs)
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id,
+ (void *)(long)node->ptr,
+ (void *)(long)node->cookie);
+ else
+ seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+ prefix, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
@@ -6430,7 +6434,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
static void print_binder_thread_ilocked(struct seq_file *m,
struct binder_thread *thread,
- int print_always)
+ bool print_always, bool hash_ptrs)
{
struct binder_transaction *t;
struct binder_work *w;
@@ -6460,14 +6464,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
}
list_for_each_entry(w, &thread->todo, entry) {
print_binder_work_ilocked(m, thread->proc, " ",
- " pending transaction", w);
+ " pending transaction",
+ w, hash_ptrs);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
static void print_binder_node_nilocked(struct seq_file *m,
- struct binder_node *node)
+ struct binder_node *node,
+ bool hash_ptrs)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -6475,8 +6481,13 @@ static void print_binder_node_nilocked(struct seq_file *m,
count = hlist_count_nodes(&node->refs);
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
- node->debug_id, (u64)node->ptr, (u64)node->cookie,
+ if (hash_ptrs)
+ seq_printf(m, " node %d: u%p c%p", node->debug_id,
+ (void *)(long)node->ptr, (void *)(long)node->cookie);
+ else
+ seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
+ seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count, node->tmp_refs);
@@ -6489,7 +6500,8 @@ static void print_binder_node_nilocked(struct seq_file *m,
if (node->proc) {
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work_ilocked(m, node->proc, " ",
- " pending async transaction", w);
+ " pending async transaction",
+ w, hash_ptrs);
}
}
@@ -6505,8 +6517,54 @@ static void print_binder_ref_olocked(struct seq_file *m,
binder_node_unlock(ref->node);
}
-static void print_binder_proc(struct seq_file *m,
- struct binder_proc *proc, int print_all)
+/**
+ * print_next_binder_node_ilocked() - Print binder_node from a locked list
+ * @m: struct seq_file for output via seq_printf()
+ * @proc: struct binder_proc we hold the inner_proc_lock to (if any)
+ * @node: struct binder_node to print fields of
+ * @prev_node: struct binder_node we hold a temporary reference to (if any)
+ * @hash_ptrs: whether to hash @node's binder_uintptr_t fields
+ *
+ * Helper function to handle synchronization around printing a struct
+ * binder_node while iterating through @proc->nodes or the dead nodes list.
+ * Caller must hold either @proc->inner_lock (for live nodes) or
+ * binder_dead_nodes_lock. This lock will be released during the body of this
+ * function, but it will be reacquired before returning to the caller.
+ *
+ * Return: pointer to the struct binder_node we hold a tmpref on
+ */
+static struct binder_node *
+print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_node *prev_node, bool hash_ptrs)
+{
+ /*
+ * Take a temporary reference on the node so that isn't freed while
+ * we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /*
+ * Live nodes need to drop the inner proc lock and dead nodes need to
+ * drop the binder_dead_nodes_lock before trying to take the node lock.
+ */
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ if (prev_node)
+ binder_put_node(prev_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node, hash_ptrs);
+ binder_node_inner_unlock(node);
+ if (proc)
+ binder_inner_proc_lock(proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ return node;
+}
+
+static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
+ bool print_all, bool hash_ptrs)
{
struct binder_work *w;
struct rb_node *n;
@@ -6519,31 +6577,19 @@ static void print_binder_proc(struct seq_file *m,
header_pos = m->count;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
- rb_node), print_all);
+ rb_node), print_all, hash_ptrs);
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
if (!print_all && !node->has_async_transaction)
continue;
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the tree
- * while we print it.
- */
- binder_inc_node_tmpref_ilocked(node);
- /* Need to drop inner lock to take node lock */
- binder_inner_proc_unlock(proc);
- if (last_node)
- binder_put_node(last_node);
- binder_node_inner_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_inner_unlock(node);
- last_node = node;
- binder_inner_proc_lock(proc);
+ last_node = print_next_binder_node_ilocked(m, proc, node,
+ last_node,
+ hash_ptrs);
}
binder_inner_proc_unlock(proc);
if (last_node)
@@ -6551,19 +6597,18 @@ static void print_binder_proc(struct seq_file *m,
if (print_all) {
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc);
- n != NULL;
- n = rb_next(n))
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
print_binder_ref_olocked(m, rb_entry(n,
- struct binder_ref,
- rb_node_desc));
+ struct binder_ref,
+ rb_node_desc));
binder_proc_unlock(proc);
}
binder_alloc_print_allocated(m, &proc->alloc);
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
print_binder_work_ilocked(m, proc, " ",
- " pending transaction", w);
+ " pending transaction", w,
+ hash_ptrs);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
@@ -6696,7 +6741,7 @@ static void print_binder_proc_stats(struct seq_file *m,
count = 0;
ready_threads = 0;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
count++;
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
@@ -6710,7 +6755,7 @@ static void print_binder_proc_stats(struct seq_file *m,
ready_threads,
free_async_space);
count = 0;
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n))
count++;
binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
@@ -6718,7 +6763,7 @@ static void print_binder_proc_stats(struct seq_file *m,
strong = 0;
weak = 0;
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
@@ -6745,7 +6790,7 @@ static void print_binder_proc_stats(struct seq_file *m,
print_binder_stats(m, " ", &proc->stats);
}
-static int state_show(struct seq_file *m, void *unused)
+static void print_binder_state(struct seq_file *m, bool hash_ptrs)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -6756,31 +6801,40 @@ static int state_show(struct seq_file *m, void *unused)
spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the list
- * while we print it.
- */
- node->tmp_refs++;
- spin_unlock(&binder_dead_nodes_lock);
- if (last_node)
- binder_put_node(last_node);
- binder_node_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_unlock(node);
- last_node = node;
- spin_lock(&binder_dead_nodes_lock);
- }
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
+ last_node = print_next_binder_node_ilocked(m, NULL, node,
+ last_node,
+ hash_ptrs);
spin_unlock(&binder_dead_nodes_lock);
if (last_node)
binder_put_node(last_node);
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 1);
+ print_binder_proc(m, proc, true, hash_ptrs);
+ mutex_unlock(&binder_procs_lock);
+}
+
+static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
+{
+ struct binder_proc *proc;
+
+ seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
+ print_binder_proc(m, proc, false, hash_ptrs);
mutex_unlock(&binder_procs_lock);
+}
+
+static int state_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, false);
+ return 0;
+}
+static int state_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, true);
return 0;
}
@@ -6802,14 +6856,13 @@ static int stats_show(struct seq_file *m, void *unused)
static int transactions_show(struct seq_file *m, void *unused)
{
- struct binder_proc *proc;
-
- seq_puts(m, "binder transactions:\n");
- mutex_lock(&binder_procs_lock);
- hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 0);
- mutex_unlock(&binder_procs_lock);
+ print_binder_transactions(m, false);
+ return 0;
+}
+static int transactions_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_transactions(m, true);
return 0;
}
@@ -6822,7 +6875,7 @@ static int proc_show(struct seq_file *m, void *unused)
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, itr, 1);
+ print_binder_proc(m, itr, true, false);
}
}
mutex_unlock(&binder_procs_lock);
@@ -6889,8 +6942,10 @@ const struct file_operations binder_fops = {
};
DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(state_hashed);
DEFINE_SHOW_ATTRIBUTE(stats);
DEFINE_SHOW_ATTRIBUTE(transactions);
+DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
DEFINE_SHOW_ATTRIBUTE(transaction_log);
const struct binder_debugfs_entry binder_debugfs_entries[] = {
@@ -6901,6 +6956,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "state_hashed",
+ .mode = 0444,
+ .fops = &state_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "stats",
.mode = 0444,
.fops = &stats_fops,
@@ -6913,6 +6974,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "transactions_hashed",
+ .mode = 0444,
+ .fops = &transactions_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "transaction_log",
.mode = 0444,
.fops = &transaction_log_fops,
@@ -6929,7 +6996,16 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
void binder_add_device(struct binder_device *device)
{
+ spin_lock(&binder_devices_lock);
hlist_add_head(&device->hlist, &binder_devices);
+ spin_unlock(&binder_devices_lock);
+}
+
+void binder_remove_device(struct binder_device *device)
+{
+ spin_lock(&binder_devices_lock);
+ hlist_del_init(&device->hlist);
+ spin_unlock(&binder_devices_lock);
}
static int __init init_binder_device(const char *name)
@@ -6956,7 +7032,7 @@ static int __init init_binder_device(const char *name)
return ret;
}
- hlist_add_head(&binder_device->hlist, &binder_devices);
+ binder_add_device(binder_device);
return ret;
}
@@ -7018,7 +7094,7 @@ static int __init binder_init(void)
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
- hlist_del(&device->hlist);
+ binder_remove_device(device);
kfree(device);
}