summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAntti Hatala <ahatala@nvidia.com>2010-05-20 03:48:41 -0700
committerAntti Hatala <ahatala@nvidia.com>2010-05-20 04:25:55 -0700
commit4bc4768e06afa53392678197d59b6394cdab5e1c (patch)
treef7043e35fe3becf3617aa9cd4b51e093f8661ea9 /drivers
parent0c7b09c2f248c69d7970bdbd211c4733c3e2b129 (diff)
nvmap: fix unpin/pin race condition with RECLAIM_UNPINNED_VM
_nvmap_handle_unpin needs to acquire the mru_vma_lock before decrementing the pin count, to ensure that a decrement to zero and insertion on the MRU VMA list appears atomic with respect to a second client calling _nvmap_handle_pin on the same handle; otherwise, the two clients race and the pin operation may trigger a BUG because the handle has a valid IOVMM area but is not located on any MRU VMA list. also, clean up some additional allocation-inside-spinlock issues; release the MRU VMA lock before calling tegra_iovmm_create_vm, and reacquire the lock after returning. Change-Id: If3a32e493a9222eac56a0980c10c0d4281389e7e Reviewed-on: http://git-master/r/1448 Tested-by: Antti Hatala <ahatala@nvidia.com> Reviewed-by: Antti Hatala <ahatala@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/nvmap.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/drivers/char/nvmap.c b/drivers/char/nvmap.c
index cdcb9f63cd7e..969c2106ca64 100644
--- a/drivers/char/nvmap.c
+++ b/drivers/char/nvmap.c
@@ -910,12 +910,11 @@ static struct nvmap_handle *_nvmap_validate_get(unsigned long handle, bool su)
#endif
}
+/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
static inline void _nvmap_insert_mru_vma(struct nvmap_handle *h)
{
#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- spin_lock(&nvmap_mru_vma_lock);
list_add(&h->pgalloc.mru_list, _nvmap_list(h->pgalloc.area->iovm_length));
- spin_unlock(&nvmap_mru_vma_lock);
#endif
}
@@ -943,9 +942,8 @@ static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
struct tegra_iovmm_area *vm = NULL;
unsigned int i, idx;
- spin_lock(&nvmap_mru_vma_lock);
-
if (h->pgalloc.area) {
+ spin_lock(&nvmap_mru_vma_lock);
BUG_ON(list_empty(&h->pgalloc.mru_list));
list_del(&h->pgalloc.mru_list);
INIT_LIST_HEAD(&h->pgalloc.mru_list);
@@ -958,7 +956,6 @@ static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
if (vm) {
INIT_LIST_HEAD(&h->pgalloc.mru_list);
- spin_unlock(&nvmap_mru_vma_lock);
return vm;
}
/* attempt to re-use the most recently unpinned IOVMM area in the
@@ -966,6 +963,7 @@ static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
* evict handles (starting from the current bin) until an allocation
* succeeds or no more areas can be evicted */
+ spin_lock(&nvmap_mru_vma_lock);
mru = _nvmap_list(h->size);
if (!list_empty(mru))
evict = list_first_entry(mru, struct nvmap_handle,
@@ -993,11 +991,13 @@ static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
BUG_ON(!evict->pgalloc.area);
list_del(&evict->pgalloc.mru_list);
INIT_LIST_HEAD(&evict->pgalloc.mru_list);
+ spin_unlock(&nvmap_mru_vma_lock);
tegra_iovmm_free_vm(evict->pgalloc.area);
evict->pgalloc.area = NULL;
vm = tegra_iovmm_create_vm(nvmap_vm_client,
NULL, h->size,
_nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
+ spin_lock(&nvmap_mru_vma_lock);
}
}
@@ -1447,6 +1447,9 @@ static int _nvmap_handle_unpin(struct nvmap_handle *h)
}
BUG_ON(!h->alloc);
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ spin_lock(&nvmap_mru_vma_lock);
+#endif
if (!atomic_dec_return(&h->pin)) {
if (h->heap_pgalloc && h->pgalloc.area) {
/* if a secure handle is clean (i.e., mapped into
@@ -1459,6 +1462,9 @@ static int _nvmap_handle_unpin(struct nvmap_handle *h)
ret=1;
}
}
+#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
+ spin_unlock(&nvmap_mru_vma_lock);
+#endif
_nvmap_handle_put(h);
return ret;
}