summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/nvmap/nvmap_pp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_pp.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_pp.c39
1 files changed, 14 insertions, 25 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_pp.c b/drivers/video/tegra/nvmap/nvmap_pp.c
index 4748155871a8..b2878f0acb46 100644
--- a/drivers/video/tegra/nvmap/nvmap_pp.c
+++ b/drivers/video/tegra/nvmap/nvmap_pp.c
@@ -32,7 +32,7 @@
#include "nvmap_priv.h"
#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
-#define PENDING_PAGES_SIZE (SZ_1M / PAGE_SIZE)
+#define PENDING_PAGES_SIZE 128
#define MIN_AVAILABLE_MB 128
static bool enable_pp = 1;
@@ -41,7 +41,6 @@ static int pool_size;
static struct task_struct *background_allocator;
static struct page *pending_pages[PENDING_PAGES_SIZE];
static atomic_t bg_pages_to_fill;
-static atomic_t pp_dirty;
#ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
@@ -57,21 +56,6 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
#define pp_hit_add(pool, nr) __pp_dbg_var_add(&(pool)->hits, nr)
#define pp_miss_add(pool, nr) __pp_dbg_var_add(&(pool)->misses, nr)
-static void pp_clean_cache(void)
-{
- if (atomic_read(&pp_dirty)) {
- /*
- * Make sure any data in the caches is cleaned out before
- * passing these pages to userspace. otherwise, It can lead to
- * corruption in pages that get mapped as something
- * other than WB in userspace and leaked kernel data.
- */
- inner_clean_cache_all();
- outer_clean_all();
- atomic_set(&pp_dirty, 0);
- }
-}
-
/*
* Allocate n pages one by one. Not the most efficient allocation scheme ever;
* however, it will make it easier later on to handle single or small number of
@@ -134,7 +118,6 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
}
nvmap_page_pool_lock(pool);
- atomic_set(&pp_dirty, 1);
i = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, nr);
nvmap_page_pool_unlock(pool);
pages -= nr;
@@ -142,10 +125,6 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
for (; i < nr; i++)
__free_page(pending_pages[i]);
- /* clean cache in the background so that allocations immediately
- * after fill don't suffer the cache clean overhead.
- */
- pp_clean_cache();
}
/*
@@ -232,7 +211,6 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
BUG_ON(pool->count == 0);
- pp_clean_cache();
page = pool->page_array[pool->alloc];
pool->page_array[pool->alloc] = NULL;
nvmap_pp_alloc_inc(pool);
@@ -266,8 +244,6 @@ int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
if (!enable_pp || !pool->page_array)
return 0;
- pp_clean_cache();
-
real_nr = min_t(u32, nr, pool->count);
while (real_nr--) {
@@ -359,6 +335,19 @@ int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
return ind;
}
+bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
+{
+ bool ret = false;
+
+ if (pool) {
+ nvmap_page_pool_lock(pool);
+ ret = nvmap_page_pool_fill_locked(pool, page);
+ nvmap_page_pool_unlock(pool);
+ }
+
+ return ret;
+}
+
static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
{
return pool->count;