summaryrefslogtreecommitdiff
path: root/drivers/android
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-01-27 16:51:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-01-27 16:51:51 -0800
commit13845bdc869f136f92ad3d40ea09b867bb4ce467 (patch)
treea2f503e60579cee485c2d0de06ad4bcb8abb7612 /drivers/android
parent125ca745467d4f87ae58e671a4a5714e024d2908 (diff)
parent970b9757cb44c315b5c3da6b1b35a1ffb07cca5a (diff)
Merge tag 'char-misc-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull Char/Misc/IIO driver updates from Greg KH: "Here is the "big" set of char/misc/iio and other smaller driver subsystem updates for 6.14-rc1. Loads of different things in here this development cycle, highlights are: - ntsync "driver" to handle Windows locking types enabling Wine to work much better on many workloads (i.e. games). The driver framework was in 6.13, but now it's enabled and fully working properly. Should make many SteamOS users happy. Even comes with tests! - Large IIO driver updates and bugfixes - FPGA driver updates - Coresight driver updates - MHI driver updates - PPS driver updatesa - const bin_attribute reworking for many drivers - binder driver updates - smaller driver updates and fixes All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (311 commits) ntsync: Fix reference leaks in the remaining create ioctls. spmi: hisi-spmi-controller: Drop duplicated OF node assignment in spmi_controller_probe() spmi: Set fwnode for spmi devices ntsync: fix a file reference leak in drivers/misc/ntsync.c scripts/tags.sh: Don't tag usages of DECLARE_BITMAP dt-bindings: interconnect: qcom,msm8998-bwmon: Add SM8750 CPU BWMONs dt-bindings: interconnect: OSM L3: Document sm8650 OSM L3 compatible dt-bindings: interconnect: qcom-bwmon: Document QCS615 bwmon compatibles interconnect: sm8750: Add missing const to static qcom_icc_desc memstick: core: fix kernel-doc notation intel_th: core: fix kernel-doc warnings binder: log transaction code on failure iio: dac: ad3552r-hs: clear reset status flag iio: dac: ad3552r-common: fix ad3541/2r ranges iio: chemical: bme680: Fix uninitialized variable in __bme680_read_raw() misc: fastrpc: Fix copy buffer page size misc: fastrpc: Fix registered buffer page address misc: fastrpc: Deregister device nodes properly in error scenarios nvmem: core: improve range check for nvmem_cell_write() nvmem: qcom-spmi-sdam: Set size in struct nvmem_config ...
Diffstat (limited to 'drivers/android')
-rw-r--r--drivers/android/binder.c13
-rw-r--r--drivers/android/binder_alloc.c366
-rw-r--r--drivers/android/binder_alloc.h45
-rw-r--r--drivers/android/binder_alloc_selftest.c18
-rw-r--r--drivers/android/binder_internal.h11
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/android/binderfs.c2
7 files changed, 288 insertions, 169 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a4b98e95ab85..76052006bd87 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1971,7 +1971,7 @@ static bool binder_validate_fixup(struct binder_proc *proc,
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
- * @fd: fd to close
+ * @file: file to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
@@ -3800,13 +3800,13 @@ err_invalid_target_handle:
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
(tr->flags & TF_ONE_WAY ? "async" : "call"),
target_proc ? target_proc->pid : 0,
target_thread ? target_thread->pid : 0,
t_debug_id, return_error, return_error_param,
- (u64)tr->data_size, (u64)tr->offsets_size,
+ tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
if (target_thread)
@@ -6373,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.buffer - buffer->user_data);
+ proc->alloc.vm_start - buffer->user_data);
}
static void print_binder_work_ilocked(struct seq_file *m,
@@ -6927,6 +6927,11 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
{} /* terminator */
};
+void binder_add_device(struct binder_device *device)
+{
+ hlist_add_head(&device->hlist, &binder_devices);
+}
+
static int __init init_binder_device(const char *name)
{
int ret;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index a738e7745865..fcfaf1b899c8 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -61,7 +61,7 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
@@ -169,32 +169,33 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return buffer;
}
static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+ unsigned long index,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
- smp_store_release(&lru_page->page_ptr, page);
+ smp_store_release(&alloc->pages[index], page);
}
static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
{
/* Pairs with release in binder_set_installed_page() */
- return smp_load_acquire(&lru_page->page_ptr);
+ return smp_load_acquire(&alloc->pages[index]);
}
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, false, start, end);
@@ -202,65 +203,159 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (!binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
+ if (!page)
continue;
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ ret = list_lru_add(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
}
}
-static int binder_install_single_page(struct binder_alloc *alloc,
- struct binder_lru_page *lru_page,
- unsigned long addr)
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
{
- struct page *page;
- int ret = 0;
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
- if (!mmget_not_zero(alloc->mm))
- return -ESRCH;
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long npages = 0;
/*
- * Protected with mmap_sem in write mode as multiple tasks
- * might race to install the same page.
+ * Find an existing page in the remote mm. If missing,
+ * don't attempt to fault-in just propagate an error.
*/
- mmap_write_lock(alloc->mm);
- if (binder_get_installed_page(lru_page))
- goto out;
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+ &page, NULL);
+ mmap_read_unlock(mm);
- if (!alloc->vma) {
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto out;
+ return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
}
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+ unsigned long index)
+{
+ struct binder_shrinker_mdata *mdata;
+ struct page *page;
+
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* allocate and install shrinker metadata under page->private */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata) {
+ __free_page(page);
+ return NULL;
+ }
+
+ mdata->alloc = alloc;
+ mdata->page_index = index;
+ INIT_LIST_HEAD(&mdata->lru);
+ set_page_private(page, (unsigned long)mdata);
+
+ return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+ kfree((struct binder_shrinker_mdata *)page_private(page));
+ __free_page(page);
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ unsigned long index,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret;
+
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
+
+ page = binder_page_alloc(alloc, index);
if (!page) {
- pr_err("%d: failed to allocate page\n", alloc->pid);
ret = -ENOMEM;
goto out;
}
- ret = vm_insert_page(alloc->vma, addr, page);
- if (ret) {
+ ret = binder_page_insert(alloc, addr, page);
+ switch (ret) {
+ case -EBUSY:
+ /*
+ * EBUSY is ok. Someone installed the pte first but the
+ * alloc->pages[index] has not been updated yet. Discard
+ * our page and look up the one already installed.
+ */
+ ret = 0;
+ binder_free_page(page);
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
+ pr_err("%d: failed to find page at offset %lx\n",
+ alloc->pid, addr - alloc->vm_start);
+ ret = -ESRCH;
+ break;
+ }
+ fallthrough;
+ case 0:
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(alloc, index, page);
+ break;
+ default:
+ binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
- alloc->pid, __func__, addr - alloc->buffer, ret);
- __free_page(page);
- ret = -ENOMEM;
- goto out;
+ alloc->pid, __func__, addr - alloc->vm_start, ret);
+ break;
}
-
- /* Mark page installation complete and safe to use */
- binder_set_installed_page(lru_page, page);
out:
- mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm);
return ret;
}
@@ -269,7 +364,6 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
@@ -280,15 +374,13 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (binder_get_installed_page(alloc, index))
continue;
trace_binder_alloc_page_start(alloc, index);
- ret = binder_install_single_page(alloc, page, page_addr);
+ ret = binder_install_single_page(alloc, index, page_addr);
if (ret)
return ret;
@@ -302,8 +394,8 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, true, start, end);
@@ -311,13 +403,16 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
- if (page->page_ptr) {
+ if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -329,20 +424,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
}
}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- /* pairs with smp_load_acquire in binder_alloc_get_vma() */
- smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- /* pairs with smp_store_release in binder_alloc_set_vma() */
- return smp_load_acquire(&alloc->vma);
-}
-
static void debug_no_space_locked(struct binder_alloc *alloc)
{
size_t largest_alloc_size = 0;
@@ -576,7 +657,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -597,10 +678,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +689,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -674,8 +755,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->vm_start);
+ BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -734,14 +815,13 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
+ (buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
*pgoffp = pgoff;
- return lru_page->page_ptr;
+
+ return alloc->pages[index];
}
/**
@@ -785,17 +865,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc->lock if clear_on_free
- * is used frequently for large buffers. This lock is not
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -816,7 +896,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
const char *failure_string;
- int ret, i;
+ int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -834,22 +914,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = vma->vm_start;
+ alloc->vm_start = vma->vm_start;
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
- if (alloc->pages == NULL) {
+ if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- alloc->pages[i].alloc = alloc;
- INIT_LIST_HEAD(&alloc->pages[i].lru);
- }
-
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -857,14 +932,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->user_data = alloc->buffer;
+ buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
@@ -872,7 +947,7 @@ err_alloc_buf_struct_failed:
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = 0;
+ alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -893,8 +968,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
- BUG_ON(alloc->vma);
+ mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -925,22 +1000,26 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ struct page *page;
bool on_lru;
- if (!alloc->pages[i].page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
continue;
- on_lru = list_lru_del_obj(&binder_freelist,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
+ binder_free_page(page);
page_count++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -964,17 +1043,17 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
- buffer->user_data - alloc->buffer,
+ buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -985,29 +1064,29 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{
- struct binder_lru_page *page;
+ struct page *page;
int i;
int active = 0;
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
free++;
- else if (list_empty(&page->lru))
+ else if (list_empty(page_to_lru(page)))
active++;
else
lru++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1102,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return count;
}
@@ -1036,12 +1115,12 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
/**
@@ -1058,39 +1137,50 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
void *cb_arg)
__must_hold(&lru->lock)
{
- struct binder_lru_page *page = container_of(item, typeof(*page), lru);
- struct binder_alloc *alloc = page->alloc;
+ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+ struct binder_alloc *alloc = mdata->alloc;
struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
- goto err_get_alloc_lock_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
-
- index = page - alloc->pages;
- page_addr = alloc->buffer + index * PAGE_SIZE;
-
- vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+
+ index = mdata->page_index;
+ page_addr = alloc->vm_start + index * PAGE_SIZE;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ /*
+ * Since a binder_alloc can only be mapped once, we ensure
+ * the vma corresponds to this mapping by checking whether
+ * the binder_alloc is still mapped.
+ */
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
- page_to_free = page->page_ptr;
- page->page_ptr = NULL;
+ page_to_free = alloc->pages[index];
+ binder_set_installed_page(alloc, index, NULL);
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
spin_unlock(&lru->lock);
if (vma) {
@@ -1101,17 +1191,23 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
- __free_page(page_to_free);
+ binder_free_page(page_to_free);
return LRU_REMOVED_RETRY;
err_invalid_vma:
-err_page_already_freed:
- spin_unlock(&alloc->lock);
-err_get_alloc_lock_failed:
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
@@ -1145,7 +1241,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
+ mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index c02c8ebcb466..feecd7414241 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,7 +9,7 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
@@ -59,34 +59,43 @@ struct binder_buffer {
};
/**
- * struct binder_lru_page - page object used for binder shrinker
- * @page_ptr: pointer to physical page in mmap'd space
- * @lru: entry in binder_freelist
- * @alloc: binder_alloc for a proc
+ * struct binder_shrinker_mdata - binder metadata used to reclaim pages
+ * @lru: LRU entry in binder_freelist
+ * @alloc: binder_alloc owning the page to reclaim
+ * @page_index: offset in @alloc->pages[] into the page to reclaim
*/
-struct binder_lru_page {
+struct binder_shrinker_mdata {
struct list_head lru;
- struct page *page_ptr;
struct binder_alloc *alloc;
+ unsigned long page_index;
};
+static inline struct list_head *page_to_lru(struct page *p)
+{
+ struct binder_shrinker_mdata *mdata;
+
+ mdata = (struct binder_shrinker_mdata *)page_private(p);
+
+ return &mdata->lru;
+}
+
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @lock: protects binder_alloc fields
- * @vma: vm_area_struct passed to mmap_handler
- * (invariant after mmap)
+ * @mutex: protects binder_alloc fields
* @mm: copy of task->mm (invariant after open)
- * @buffer: base of per-proc address space mapped via mmap
+ * @vm_start: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of binder_lru_page
+ * @pages: array of struct page *
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @mapped: whether the vm area is mapped, each binder instance is
+ * allowed a single mapping throughout its lifetime
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
@@ -96,18 +105,18 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- spinlock_t lock;
- struct vm_area_struct *vma;
+ struct mutex mutex;
struct mm_struct *mm;
- unsigned long buffer;
+ unsigned long vm_start;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct binder_lru_page *pages;
+ struct page **pages;
size_t buffer_size;
int pid;
size_t pages_high;
+ bool mapped;
bool oneway_spam_detected;
};
@@ -153,9 +162,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
size_t free_async_space;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
free_async_space = alloc->free_async_space;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return free_async_space;
}
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 81442fe20a69..c88735c54848 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -104,11 +104,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
end = PAGE_ALIGN(buffer->user_data + size);
page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
- page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
- if (!alloc->pages[page_index].page_ptr ||
- !list_empty(&alloc->pages[page_index].lru)) {
+ page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (!alloc->pages[page_index] ||
+ !list_empty(page_to_lru(alloc->pages[page_index]))) {
pr_err("expect alloc but is %s at page index %d\n",
- alloc->pages[page_index].page_ptr ?
+ alloc->pages[page_index] ?
"lru" : "free", page_index);
return false;
}
@@ -148,10 +148,10 @@ static void binder_selftest_free_buf(struct binder_alloc *alloc,
* if binder shrinker ran during binder_alloc_free_buf
* calls above.
*/
- if (list_empty(&alloc->pages[i].lru)) {
+ if (list_empty(page_to_lru(alloc->pages[i]))) {
pr_err_size_seq(sizes, seq);
pr_err("expect lru but is %s at page index %d\n",
- alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ alloc->pages[i] ? "alloc" : "free", i);
binder_selftest_failures++;
}
}
@@ -168,9 +168,9 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
}
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if (alloc->pages[i].page_ptr) {
+ if (alloc->pages[i]) {
pr_err("expect free but is %s at page index %d\n",
- list_empty(&alloc->pages[i].lru) ?
+ list_empty(page_to_lru(alloc->pages[i])) ?
"alloc" : "lru", i);
binder_selftest_failures++;
}
@@ -291,7 +291,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->mapped)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index f8d6be682f23..e4eb8357989c 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -25,8 +25,7 @@ struct binder_context {
/**
* struct binder_device - information about a binder device node
- * @hlist: list of binder devices (only used for devices requested via
- * CONFIG_ANDROID_BINDER_DEVICES)
+ * @hlist: list of binder devices
* @miscdev: information about a binder character device node
* @context: binder context information
* @binderfs_inode: This is the inode of the root dentry of the super block
@@ -582,4 +581,12 @@ struct binder_object {
};
};
+/**
+ * Add a binder device to binder_devices
+ * @device: the new binder device to add to the global list
+ *
+ * Not reentrant as the list is not protected by any locks
+ */
+void binder_add_device(struct binder_device *device);
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index fe38c6fc65d0..16de1b9e72f7 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -328,7 +328,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign(
__entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - alloc->buffer;
+ __entry->offset = start - alloc->vm_start;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index ad1fa7abc323..bc6bae76ccaf 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -207,6 +207,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
fsnotify_create(root->d_inode, dentry);
inode_unlock(d_inode(root));
+ binder_add_device(device);
+
return 0;
err: