summaryrefslogtreecommitdiff
path: root/drivers/video/tegra
diff options
context:
space:
mode:
authorKrishna Reddy <vdumpa@nvidia.com>2014-12-16 15:51:03 -0800
committerWinnie Hsu <whsu@nvidia.com>2015-05-12 13:27:22 -0700
commit7eb081d6d596e5e76239130acea69ebd36b4582c (patch)
treea77117e0bb78e958e48614dce8863f326c656d86 /drivers/video/tegra
parent40ac5130e5a7554b9422664e30cca40ba906ae83 (diff)
Revert "video: tegra: nvmap: clean cache during page allocations into page pool"
This reverts commit b1d8c6c9415df111e4af1425a3d84b25c00a9c06. Change-Id: Ide7e78780722bdd30426089f38155c7cabf28934 Signed-off-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-on: http://git-master/r/664669 GVS: Gerrit_Virtual_Submit Reviewed-on: http://git-master/r/736423 Reviewed-by: Alex Waterman <alexw@nvidia.com> Tested-by: Alex Waterman <alexw@nvidia.com>
Diffstat (limited to 'drivers/video/tegra')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c5
-rw-r--r--drivers/video/tegra/nvmap/nvmap_pp.c39
2 files changed, 16 insertions, 28 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index 4281dafa578e..d166ffca499c 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -220,11 +220,10 @@ static int handle_page_alloc(struct nvmap_client *client,
* FIXME: For ARMv7 we don't have __clean_dcache_page() so we continue
* to use the flush cache version.
*/
- if (page_index < nr_page)
#ifdef ARM64
- nvmap_clean_cache(&pages[page_index], nr_page - page_index);
+ nvmap_clean_cache(pages, nr_page);
#else
- nvmap_flush_cache(&pages[page_index], nr_page - page_index);
+ nvmap_flush_cache(pages, nr_page);
#endif
h->size = size;
diff --git a/drivers/video/tegra/nvmap/nvmap_pp.c b/drivers/video/tegra/nvmap/nvmap_pp.c
index 4748155871a8..b2878f0acb46 100644
--- a/drivers/video/tegra/nvmap/nvmap_pp.c
+++ b/drivers/video/tegra/nvmap/nvmap_pp.c
@@ -32,7 +32,7 @@
#include "nvmap_priv.h"
#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
-#define PENDING_PAGES_SIZE (SZ_1M / PAGE_SIZE)
+#define PENDING_PAGES_SIZE 128
#define MIN_AVAILABLE_MB 128
static bool enable_pp = 1;
@@ -41,7 +41,6 @@ static int pool_size;
static struct task_struct *background_allocator;
static struct page *pending_pages[PENDING_PAGES_SIZE];
static atomic_t bg_pages_to_fill;
-static atomic_t pp_dirty;
#ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
@@ -57,21 +56,6 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
#define pp_hit_add(pool, nr) __pp_dbg_var_add(&(pool)->hits, nr)
#define pp_miss_add(pool, nr) __pp_dbg_var_add(&(pool)->misses, nr)
-static void pp_clean_cache(void)
-{
- if (atomic_read(&pp_dirty)) {
- /*
- * Make sure any data in the caches is cleaned out before
- * passing these pages to userspace. otherwise, It can lead to
- * corruption in pages that get mapped as something
- * other than WB in userspace and leaked kernel data.
- */
- inner_clean_cache_all();
- outer_clean_all();
- atomic_set(&pp_dirty, 0);
- }
-}
-
/*
* Allocate n pages one by one. Not the most efficient allocation scheme ever;
* however, it will make it easier later on to handle single or small number of
@@ -134,7 +118,6 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
}
nvmap_page_pool_lock(pool);
- atomic_set(&pp_dirty, 1);
i = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, nr);
nvmap_page_pool_unlock(pool);
pages -= nr;
@@ -142,10 +125,6 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
for (; i < nr; i++)
__free_page(pending_pages[i]);
- /* clean cache in the background so that allocations immediately
- * after fill don't suffer the cache clean overhead.
- */
- pp_clean_cache();
}
/*
@@ -232,7 +211,6 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
BUG_ON(pool->count == 0);
- pp_clean_cache();
page = pool->page_array[pool->alloc];
pool->page_array[pool->alloc] = NULL;
nvmap_pp_alloc_inc(pool);
@@ -266,8 +244,6 @@ int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
if (!enable_pp || !pool->page_array)
return 0;
- pp_clean_cache();
-
real_nr = min_t(u32, nr, pool->count);
while (real_nr--) {
@@ -359,6 +335,19 @@ int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
return ind;
}
+bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
+{
+ bool ret = false;
+
+ if (pool) {
+ nvmap_page_pool_lock(pool);
+ ret = nvmap_page_pool_fill_locked(pool, page);
+ nvmap_page_pool_unlock(pool);
+ }
+
+ return ret;
+}
+
static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
{
return pool->count;