summaryrefslogtreecommitdiff
path: root/drivers/android/binder
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder')
-rw-r--r--drivers/android/binder/page_range.rs84
-rw-r--r--drivers/android/binder/process.rs3
-rw-r--r--drivers/android/binder/range_alloc/array.rs35
-rw-r--r--drivers/android/binder/range_alloc/mod.rs4
-rw-r--r--drivers/android/binder/range_alloc/tree.rs18
-rw-r--r--drivers/android/binder/thread.rs17
6 files changed, 116 insertions, 45 deletions
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
index fdd97112ef5c..9dfc154e5dd4 100644
--- a/drivers/android/binder/page_range.rs
+++ b/drivers/android/binder/page_range.rs
@@ -142,6 +142,30 @@ pub(crate) struct ShrinkablePageRange {
_pin: PhantomPinned,
}
+// We do not define any ops. For now, used only to check identity of vmas.
+static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed();
+
+// To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we
+// check its vm_ops and private data before using it.
+fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> {
+ // SAFETY: Just reading the vm_ops pointer of any active vma is safe.
+ let vm_ops = unsafe { (*vma.as_ptr()).vm_ops };
+ if !ptr::eq(vm_ops, &BINDER_VM_OPS) {
+ return None;
+ }
+
+ // SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe.
+ let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data };
+ // The ShrinkablePageRange is only dropped when the Process is dropped, which only happens once
+ // the file's ->release handler is invoked, which means the ShrinkablePageRange outlives any
+ // VMA associated with it, so there can't be any false positives due to pointer reuse here.
+ if !ptr::eq(vm_private_data, owner.cast()) {
+ return None;
+ }
+
+ vma.as_mixedmap_vma()
+}
+
struct Inner {
/// Array of pages.
///
@@ -308,6 +332,18 @@ impl ShrinkablePageRange {
inner.size = num_pages;
inner.vma_addr = vma.start();
+ // This pointer is only used for comparison - it's not dereferenced.
+ //
+ // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
+ // `vm_private_data`.
+ unsafe {
+ (*vma.as_ptr()).vm_private_data = ptr::from_ref(self).cast_mut().cast::<c_void>()
+ };
+
+ // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
+ // `vm_ops`.
+ unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS };
+
Ok(num_pages)
}
@@ -399,22 +435,25 @@ impl ShrinkablePageRange {
//
// Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
// workqueue.
- MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
- .mmap_read_lock()
- .vma_lookup(vma_addr)
- .ok_or(ESRCH)?
- .as_mixedmap_vma()
- .ok_or(ESRCH)?
- .vm_insert_page(user_page_addr, &new_page)
- .inspect_err(|err| {
- pr_warn!(
- "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
- user_page_addr,
- vma_addr,
- i,
- err
- )
- })?;
+ let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?);
+ {
+ let vma_read;
+ let mmap_read;
+ let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) {
+ vma_read = ret;
+ check_vma(&vma_read, self)
+ } else {
+ mmap_read = mm.mmap_read_lock();
+ mmap_read
+ .vma_lookup(vma_addr)
+ .and_then(|vma| check_vma(vma, self))
+ };
+
+ match vma {
+ Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?,
+ None => return Err(ESRCH),
+ }
+ }
let inner = self.lock.lock();
@@ -667,12 +706,15 @@ unsafe extern "C" fn rust_shrink_free_page(
let mmap_read;
let mm_mutex;
let vma_addr;
+ let range_ptr;
{
// CAST: The `list_head` field is first in `PageInfo`.
let info = item as *mut PageInfo;
// SAFETY: The `range` field of `PageInfo` is immutable.
- let range = unsafe { &*((*info).range) };
+ range_ptr = unsafe { (*info).range };
+ // SAFETY: The `range` outlives its `PageInfo` values.
+ let range = unsafe { &*range_ptr };
mm = match range.mm.mmget_not_zero() {
Some(mm) => MmWithUser::into_mmput_async(mm),
@@ -717,9 +759,11 @@ unsafe extern "C" fn rust_shrink_free_page(
// SAFETY: The lru lock is locked when this method is called.
unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
- if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
- let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
- vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) {
+ if let Some(vma) = check_vma(unchecked_vma, range_ptr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
}
drop(mmap_read);
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index 41de5593197c..f06498129aa9 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -1295,7 +1295,8 @@ impl Process {
}
pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
- if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ let death = self.inner.lock().pull_delivered_death(cookie);
+ if let Some(death) = death {
death.set_notification_done(thread);
}
}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
index 07e1dec2ce63..ada1d1b4302e 100644
--- a/drivers/android/binder/range_alloc/array.rs
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -118,7 +118,7 @@ impl<T> ArrayRangeAllocator<T> {
size: usize,
is_oneway: bool,
pid: Pid,
- ) -> Result<usize> {
+ ) -> Result<(usize, bool)> {
// Compute new value of free_oneway_space, which is set only on success.
let new_oneway_space = if is_oneway {
match self.free_oneway_space.checked_sub(size) {
@@ -146,7 +146,38 @@ impl<T> ArrayRangeAllocator<T> {
.ok()
.unwrap();
- Ok(insert_at_offset)
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circuit, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ Ok((insert_at_offset, oneway_spam_detected))
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ for range in &self.ranges {
+ if range.state.is_oneway() && range.state.pid() == calling_pid {
+ total_alloc_size += range.size;
+ num_buffers += 1;
+ }
+ }
+ num_buffers > 50 || total_alloc_size > self.size / 4
}
pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
index 2301e2bc1a1f..1f4734468ff1 100644
--- a/drivers/android/binder/range_alloc/mod.rs
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -188,11 +188,11 @@ impl<T> RangeAllocator<T> {
self.reserve_new(args)
}
Impl::Array(array) => {
- let offset =
+ let (offset, oneway_spam_detected) =
array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
Ok(ReserveNew::Success(ReserveNewSuccess {
offset,
- oneway_spam_detected: false,
+ oneway_spam_detected,
_empty_array_alloc: args.empty_array_alloc,
_new_tree_alloc: args.new_tree_alloc,
_tree_alloc: args.tree_alloc,
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
index 838fdd2b47ea..48796fcdb362 100644
--- a/drivers/android/binder/range_alloc/tree.rs
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -164,15 +164,6 @@ impl<T> TreeRangeAllocator<T> {
self.free_oneway_space
};
- // Start detecting spammers once we have less than 20%
- // of async space left (which is less than 10% of total
- // buffer size).
- //
- // (This will short-circut, so `low_oneway_space` is
- // only called when necessary.)
- let oneway_spam_detected =
- is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
-
let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
None => {
pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
@@ -203,6 +194,15 @@ impl<T> TreeRangeAllocator<T> {
self.free_tree.insert(free_tree_node);
}
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circuit, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
Ok((found_off, oneway_spam_detected))
}
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
index 0b62d24b2118..c004214b1662 100644
--- a/drivers/android/binder/thread.rs
+++ b/drivers/android/binder/thread.rs
@@ -1015,12 +1015,9 @@ impl Thread {
// Copy offsets if there are any.
if offsets_size > 0 {
- {
- let mut reader =
- UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
- .reader();
- alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
- }
+ let mut offsets_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
let offsets_start = aligned_data_size;
let offsets_end = aligned_data_size + offsets_size;
@@ -1041,11 +1038,9 @@ impl Thread {
.step_by(size_of::<u64>())
.enumerate()
{
- let offset: usize = view
- .alloc
- .read::<u64>(index_offset)?
- .try_into()
- .map_err(|_| EINVAL)?;
+ let offset = offsets_reader.read::<u64>()?;
+ view.alloc.write(index_offset, &offset)?;
+ let offset: usize = offset.try_into().map_err(|_| EINVAL)?;
if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
pr_warn!("Got transaction with invalid offset.");