summaryrefslogtreecommitdiff
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 54cee2b31c8e..813894ba5da6 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
if (mm) {
mmap_write_unlock(mm);
- mmput(mm);
+ mmput_async(mm);
}
return 0;
@@ -304,7 +304,7 @@ err_page_ptr_cleared:
err_no_vma:
if (mm) {
mmap_write_unlock(mm);
- mmput(mm);
+ mmput_async(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
@@ -344,8 +344,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
continue;
if (!buffer->async_transaction)
continue;
- total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
- + sizeof(struct binder_buffer);
+ total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
num_buffers++;
}
@@ -407,17 +406,17 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL);
}
- if (is_async &&
- alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
+ if (is_async && alloc->free_async_space < size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
return ERR_PTR(-ENOSPC);
}
- /* Pad 0-size buffers so they get assigned unique addresses */
- size = max(size, sizeof(void *));
-
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@@ -519,7 +518,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->pid = pid;
buffer->oneway_spam_suspect = false;
if (is_async) {
- alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ alloc->free_async_space -= size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
@@ -557,7 +556,7 @@ err_alloc_buf_struct_failed:
* is the sum of the three given sizes (each rounded up to
* pointer-sized boundary)
*
- * Return: The allocated buffer or %NULL if error
+ * Return: The allocated buffer or %ERR_PTR(-errno) if error
*/
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
@@ -657,8 +656,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
- alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
-
+ alloc->free_async_space += buffer_size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
@@ -706,7 +704,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
/*
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
- * binder_alloc_free_buf_locked(). However, that could
+ * binder_free_buf_locked(). However, that could
* increase contention for the alloc mutex if clear_on_free
* is used frequently for large buffers. The mutex is not
* needed for correctness here.
@@ -1005,7 +1003,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
goto err_mmget;
if (!mmap_read_trylock(mm))
goto err_mmap_read_lock_failed;
- vma = binder_alloc_get_vma(alloc);
+ vma = vma_lookup(mm, page_addr);
+ if (vma && vma != binder_alloc_get_vma(alloc))
+ goto err_invalid_vma;
list_lru_isolate(lru, item);
spin_unlock(lock);
@@ -1031,6 +1031,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
+err_invalid_vma:
+ mmap_read_unlock(mm);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget: