diff options
author | Alex Waterman <alexw@nvidia.com> | 2013-11-25 16:00:53 -0800 |
---|---|---|
committer | Krishna Reddy <vdumpa@nvidia.com> | 2014-03-03 09:41:31 -0800 |
commit | 6722ff6f99cdd3de4e3bcc6c4cac5cebbcb5f4b9 (patch) | |
tree | e6e8bf2ed09fe80fc2ce719855dc3fcf2d017da5 /drivers | |
parent | 137f159e552f1f3014dfbedc74c53d487bb9b99a (diff) |
video: tegra: nvmap: Merge page pools
Instead of maintaining separate page pools for each type of
memory (UC, WC, IWB, WB) maintain only one page pool. In
future ARM cores CPA (change page attributes) is not necessary
since different mappings are acceptable for the same physical
page.
Remove CPA support in NvMap.
Actual user space mappings which are not WB are configured
when the page is mapped into userspace.
Bug 1371433
Bug 1392833
Change-Id: I153664fc96107245fe03a31d9f9213915c4398c2
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/335254
Reviewed-by: Automatic_Commit_Validation_User
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_dev.c | 19 | ||||
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_handle.c | 50 | ||||
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_mm.c | 39 | ||||
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_pp.c | 160 | ||||
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_priv.h | 22 |
5 files changed, 59 insertions, 231 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c index f6c9c76c1c70..7d610e5a808f 100644 --- a/drivers/video/tegra/nvmap/nvmap_dev.c +++ b/drivers/video/tegra/nvmap/nvmap_dev.c @@ -38,6 +38,7 @@ #include <linux/resource.h> #include <linux/security.h> #include <linux/stat.h> +#include <linux/kthread.h> #include <asm/cputype.h> @@ -1248,8 +1249,9 @@ static int nvmap_probe(struct platform_device *pdev) mutex_init(&dev->iovmm_master.pin_lock); #ifdef CONFIG_NVMAP_PAGE_POOLS - for (i = 0; i < NVMAP_NUM_POOLS; i++) - nvmap_page_pool_init(&dev->iovmm_master.pools[i], i); + e = nvmap_page_pool_init(dev); + if (e) + goto fail; #endif dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE, NULL); @@ -1371,16 +1373,9 @@ static int nvmap_probe(struct platform_device *pdev) debugfs_create_file("procrank", S_IRUGO, iovmm_root, dev, &debug_iovmm_procrank_fops); #ifdef CONFIG_NVMAP_PAGE_POOLS - for (i = 0; i < NVMAP_NUM_POOLS; i++) { - char name[40]; - char *memtype_string[] = {"uc", "wc", - "iwb", "wb"}; - sprintf(name, "%s_page_pool_available_pages", - memtype_string[i]); - debugfs_create_u32(name, S_IRUGO, - iovmm_root, - &dev->iovmm_master.pools[i].npages); - } + debugfs_create_u32("page_pool_available_pages", + S_IRUGO, iovmm_root, + &dev->iovmm_master.pool.npages); #endif } #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c index 44a317f30d7b..8a92326e9916 100644 --- a/drivers/video/tegra/nvmap/nvmap_handle.c +++ b/drivers/video/tegra/nvmap/nvmap_handle.c @@ -72,7 +72,6 @@ static inline void altfree(void *ptr, size_t len) void _nvmap_handle_free(struct nvmap_handle *h) { - int err; struct nvmap_share *share = nvmap_get_share_from_dev(h->dev); unsigned int i, nr_page, page_index = 0; #ifdef CONFIG_NVMAP_PAGE_POOLS @@ -101,31 +100,18 @@ void _nvmap_handle_free(struct nvmap_handle *h) BUG_ON(!h->pgalloc.pages); #ifdef CONFIG_NVMAP_PAGE_POOLS - if (h->flags < NVMAP_NUM_POOLS) - pool = &share->pools[h->flags]; + pool = &share->pool; while (page_index < nr_page) { if (!nvmap_page_pool_release(pool, h->pgalloc.pages[page_index])) break; + page_index++; } -#endif - if (page_index == nr_page) - goto skip_attr_restore; - - /* Restore page attributes. */ - if (h->flags == NVMAP_HANDLE_WRITE_COMBINE || - h->flags == NVMAP_HANDLE_UNCACHEABLE || - h->flags == NVMAP_HANDLE_INNER_CACHEABLE) { - /* This op should never fail. */ - err = nvmap_set_pages_array_wb(&h->pgalloc.pages[page_index], - nr_page - page_index); - BUG_ON(err); - } +#endif -skip_attr_restore: for (i = page_index; i < nr_page; i++) __free_page(h->pgalloc.pages[i]); @@ -198,10 +184,7 @@ static int handle_page_alloc(struct nvmap_client *client, } else { #ifdef CONFIG_NVMAP_PAGE_POOLS - if (h->flags < NVMAP_NUM_POOLS) - pool = &share->pools[h->flags]; - else - BUG(); + pool = &share->pool; for (i = 0; i < nr_page; i++) { /* Get pages from pool, if available. */ @@ -236,24 +219,17 @@ static int handle_page_alloc(struct nvmap_client *client, } } - if (nr_page == page_index) - goto skip_attr_change; - - /* Update the pages mapping in kernel page table. */ - if (h->flags == NVMAP_HANDLE_WRITE_COMBINE) - err = nvmap_set_pages_array_wc(&pages[page_index], - nr_page - page_index); - else if (h->flags == NVMAP_HANDLE_UNCACHEABLE) - err = nvmap_set_pages_array_uc(&pages[page_index], - nr_page - page_index); - else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) - err = nvmap_set_pages_array_iwb(&pages[page_index], - nr_page - page_index); + /* + * Make sure any data in the caches is flushed out before + * passing these pages to userspace. otherwise, It can lead to + * corruption in pages that get mapped as something other than WB in + * userspace and leaked kernel data structures. + */ + nvmap_flush_cache(pages, nr_page); if (err) goto fail; -skip_attr_change: if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES) nvmap_free_pte(nvmap_dev, pte); h->size = size; @@ -264,10 +240,6 @@ skip_attr_change: fail: if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES) nvmap_free_pte(nvmap_dev, pte); - if (i) { - err = nvmap_set_pages_array_wb(pages, i); - BUG_ON(err); - } while (i--) __free_page(pages[i]); altfree(pages, nr_page * sizeof(*pages)); diff --git a/drivers/video/tegra/nvmap/nvmap_mm.c b/drivers/video/tegra/nvmap/nvmap_mm.c index 5d09979ec9a9..235ba4af6154 100644 --- a/drivers/video/tegra/nvmap/nvmap_mm.c +++ b/drivers/video/tegra/nvmap/nvmap_mm.c @@ -72,42 +72,3 @@ void nvmap_flush_cache(struct page **pages, int numpages) #endif } } - -int nvmap_set_pages_array_uc(struct page **pages, int addrinarray) -{ -#ifdef CONFIG_NVMAP_CPA - return set_pages_array_uc(pages, addrinarray); -#else - nvmap_flush_cache(pages, addrinarray); - return 0; -#endif -} - -int nvmap_set_pages_array_wc(struct page **pages, int addrinarray) -{ -#ifdef CONFIG_NVMAP_CPA - return set_pages_array_wc(pages, addrinarray); -#else - nvmap_flush_cache(pages, addrinarray); - return 0; -#endif -} - -int nvmap_set_pages_array_iwb(struct page **pages, int addrinarray) -{ -#ifdef CONFIG_NVMAP_CPA - return set_pages_array_iwb(pages, addrinarray); -#else - nvmap_flush_cache(pages, addrinarray); - return 0; -#endif -} - -int nvmap_set_pages_array_wb(struct page **pages, int addrinarray) -{ -#ifdef CONFIG_NVMAP_CPA - return set_pages_array_wb(pages, addrinarray); -#else - return 0; -#endif -} diff --git a/drivers/video/tegra/nvmap/nvmap_pp.c b/drivers/video/tegra/nvmap/nvmap_pp.c index ad6d9a901235..f37bab1cc164 100644 --- a/drivers/video/tegra/nvmap/nvmap_pp.c +++ b/drivers/video/tegra/nvmap/nvmap_pp.c @@ -3,7 +3,7 @@ * * Manage page pools to speed up page allocation. * - * Copyright (c) 2009-2013, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -29,14 +29,7 @@ #define NVMAP_TEST_PAGE_POOL_SHRINKER 1 static bool enable_pp = 1; -static int pool_size[NVMAP_NUM_POOLS]; - -static char *s_memtype_str[] = { - "uc", - "wc", - "iwb", - "wb", -}; +static int pool_size; static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool) { @@ -107,37 +100,27 @@ static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool) static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free) { - int err; int i = nr_free; - int idx = 0; struct page *page; if (!nr_free) return nr_free; + nvmap_page_pool_lock(pool); while (i) { page = nvmap_page_pool_alloc_locked(pool); if (!page) break; - pool->shrink_array[idx++] = page; + __free_page(page); i--; } - - if (idx) { - /* This op should never fail. */ - err = nvmap_set_pages_array_wb(pool->shrink_array, idx); - BUG_ON(err); - } - - while (idx--) - __free_page(pool->shrink_array[idx]); nvmap_page_pool_unlock(pool); + return i; } ulong nvmap_page_pool_get_unused_pages(void) { - unsigned int i; int total = 0; struct nvmap_share *share; @@ -148,8 +131,7 @@ ulong nvmap_page_pool_get_unused_pages(void) if (!share) return 0; - for (i = 0; i < NVMAP_NUM_POOLS; i++) - total += nvmap_page_pool_get_available_count(&share->pools[i]); + total = nvmap_page_pool_get_available_count(&share->pool); return total; } @@ -159,7 +141,6 @@ static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size) int available_pages; int pages_to_release = 0; struct page **page_array = NULL; - struct page **shrink_array = NULL; if (size == pool->max_pages) return; @@ -175,30 +156,25 @@ repeat: if (size == 0) { vfree(pool->page_array); - vfree(pool->shrink_array); - pool->page_array = pool->shrink_array = NULL; + pool->page_array = NULL; goto out; } page_array = vzalloc(sizeof(struct page *) * size); - shrink_array = vzalloc(sizeof(struct page *) * size); - if (!page_array || !shrink_array) + if (!page_array) goto fail; memcpy(page_array, pool->page_array, pool->npages * sizeof(struct page *)); vfree(pool->page_array); - vfree(pool->shrink_array); pool->page_array = page_array; - pool->shrink_array = shrink_array; out: - pr_debug("%s pool resized to %d from %d pages", - s_memtype_str[pool->flags], size, pool->max_pages); + pr_debug("page pool resized to %d from %d pages", size, + pool->max_pages); pool->max_pages = size; goto exit; fail: vfree(page_array); - vfree(shrink_array); pr_err("failed"); exit: nvmap_page_pool_unlock(pool); @@ -207,11 +183,7 @@ exit: static int nvmap_page_pool_shrink(struct shrinker *shrinker, struct shrink_control *sc) { - unsigned int i; - unsigned int pool_offset; - struct nvmap_page_pool *pool; int shrink_pages = sc->nr_to_scan; - static atomic_t start_pool = ATOMIC_INIT(-1); struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); if (!shrink_pages) @@ -219,12 +191,7 @@ static int nvmap_page_pool_shrink(struct shrinker *shrinker, pr_debug("sh_pages=%d", shrink_pages); - for (i = 0; i < NVMAP_NUM_POOLS && shrink_pages; i++) { - pool_offset = atomic_add_return(1, &start_pool) % - NVMAP_NUM_POOLS; - pool = &share->pools[pool_offset]; - shrink_pages = nvmap_page_pool_free(pool, shrink_pages); - } + shrink_pages = nvmap_page_pool_free(&share->pool, shrink_pages); out: return nvmap_page_pool_get_unused_pages(); } @@ -312,105 +279,57 @@ static struct kernel_param_ops enable_pp_ops = { module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644); -#define POOL_SIZE_SET(m, i) \ -static int pool_size_##m##_set(const char *arg, const struct kernel_param *kp) \ -{ \ - struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); \ - param_set_int(arg, kp); \ - nvmap_page_pool_resize(&share->pools[i], pool_size[i]); \ - return 0; \ +static int pool_size_set(const char *arg, const struct kernel_param *kp) +{ + struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); + param_set_int(arg, kp); + nvmap_page_pool_resize(&share->pool, pool_size); + return 0; } -#define POOL_SIZE_GET(m) \ -static int pool_size_##m##_get(char *buff, const struct kernel_param *kp) \ -{ \ - return param_get_int(buff, kp); \ +static int pool_size_get(char *buff, const struct kernel_param *kp) +{ + return param_get_int(buff, kp); } -#define POOL_SIZE_OPS(m) \ -static struct kernel_param_ops pool_size_##m##_ops = { \ - .get = pool_size_##m##_get, \ - .set = pool_size_##m##_set, \ +static struct kernel_param_ops pool_size_ops = { + .get = pool_size_get, + .set = pool_size_set, }; -#define POOL_SIZE_MOUDLE_PARAM_CB(m, i) \ -module_param_cb(m##_pool_size, &pool_size_##m##_ops, &pool_size[i], 0644) - -POOL_SIZE_SET(uc, NVMAP_HANDLE_UNCACHEABLE); -POOL_SIZE_GET(uc); -POOL_SIZE_OPS(uc); -POOL_SIZE_MOUDLE_PARAM_CB(uc, NVMAP_HANDLE_UNCACHEABLE); - -POOL_SIZE_SET(wc, NVMAP_HANDLE_WRITE_COMBINE); -POOL_SIZE_GET(wc); -POOL_SIZE_OPS(wc); -POOL_SIZE_MOUDLE_PARAM_CB(wc, NVMAP_HANDLE_WRITE_COMBINE); +module_param_cb(pool_size, &pool_size_ops, &pool_size, 0644); -POOL_SIZE_SET(iwb, NVMAP_HANDLE_INNER_CACHEABLE); -POOL_SIZE_GET(iwb); -POOL_SIZE_OPS(iwb); -POOL_SIZE_MOUDLE_PARAM_CB(iwb, NVMAP_HANDLE_INNER_CACHEABLE); - -POOL_SIZE_SET(wb, NVMAP_HANDLE_CACHEABLE); -POOL_SIZE_GET(wb); -POOL_SIZE_OPS(wb); -POOL_SIZE_MOUDLE_PARAM_CB(wb, NVMAP_HANDLE_CACHEABLE); - -int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags) +int nvmap_page_pool_init(struct nvmap_device *dev) { static int reg = 1; struct sysinfo info; + struct nvmap_page_pool *pool = &dev->iovmm_master.pool; #ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP int i; - int err; struct page *page; int pages_to_fill; int highmem_pages = 0; - typedef int (*set_pages_array) (struct page **pages, int addrinarray); - set_pages_array s_cpa[] = { - nvmap_set_pages_array_uc, - nvmap_set_pages_array_wc, - nvmap_set_pages_array_iwb, - nvmap_set_pages_array_wb - }; #endif - BUG_ON(flags >= NVMAP_NUM_POOLS); memset(pool, 0x0, sizeof(*pool)); mutex_init(&pool->lock); - pool->flags = flags; - - /* No default pool for cached memory. */ - if (flags == NVMAP_HANDLE_CACHEABLE) - return 0; - -#if !defined(CONFIG_OUTER_CACHE) - /* If outer cache is not enabled or don't exist, cacheable and - * inner cacheable memory are same. For cacheable memory, there - * is no need of page pool as there is no need to flush cache and - * change page attributes. - */ - if (flags == NVMAP_HANDLE_INNER_CACHEABLE) - return 0; -#endif si_meminfo(&info); - if (!pool_size[flags] && !CONFIG_NVMAP_PAGE_POOL_SIZE) + if (!CONFIG_NVMAP_PAGE_POOL_SIZE) /* Use 3/8th of total ram for page pools. * 1/8th for uc, 1/8th for wc and 1/8th for iwb. */ - pool->max_pages = info.totalram >> 3; + pool->max_pages = (info.totalram >> 3) * 3; else pool->max_pages = CONFIG_NVMAP_PAGE_POOL_SIZE; if (pool->max_pages <= 0 || pool->max_pages >= info.totalram) goto fail; - pool_size[flags] = pool->max_pages; - pr_info("nvmap %s page pool size=%d pages\n", - s_memtype_str[flags], pool->max_pages); + + pool_size = pool->max_pages; + pr_info("nvmap page pool size: %d pages\n", pool->max_pages); pool->page_array = vzalloc(sizeof(struct page *) * pool->max_pages); - pool->shrink_array = vzalloc(sizeof(struct page *) * pool->max_pages); - if (!pool->page_array || !pool->shrink_array) + if (!pool->page_array) goto fail; if (reg) { @@ -427,30 +346,25 @@ int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags) for (i = 0; i < pages_to_fill; i++) { page = alloc_page(GFP_NVMAP); if (!page) - goto do_cpa; + goto done; if (!nvmap_page_pool_release_locked(pool, page)) { __free_page(page); - goto do_cpa; + goto done; } if (PageHighMem(page)) highmem_pages++; } si_meminfo(&info); - pr_info("nvmap pool = %s, highmem=%d, pool_size=%d," + pr_info("highmem=%d, pool_size=%d," "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu\n", - s_memtype_str[flags], highmem_pages, pool->max_pages, + highmem_pages, pool->max_pages, info.totalram, info.freeram, info.totalhigh, info.freehigh); -do_cpa: - if (pool->npages) { - err = (*s_cpa[flags])(pool->page_array, pool->npages); - BUG_ON(err); - } +done: nvmap_page_pool_unlock(pool); #endif return 0; fail: pool->max_pages = 0; - vfree(pool->shrink_array); vfree(pool->page_array); return -ENOMEM; } diff --git a/drivers/video/tegra/nvmap/nvmap_priv.h b/drivers/video/tegra/nvmap/nvmap_priv.h index ac65f7e4289b..45be8645acca 100644 --- a/drivers/video/tegra/nvmap/nvmap_priv.h +++ b/drivers/video/tegra/nvmap/nvmap_priv.h @@ -175,14 +175,12 @@ struct nvmap_handle_ref { struct nvmap_page_pool { struct mutex lock; - int npages; - struct page **page_array; - struct page **shrink_array; + int npages; /* Number of zeroed pages. */ + struct page **page_array; /* For zeroed pages. */ int max_pages; - int flags; }; -int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags); +int nvmap_page_pool_init(struct nvmap_device *dev); struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool); bool nvmap_page_pool_release(struct nvmap_page_pool *pool, struct page *page); #endif @@ -192,15 +190,7 @@ struct nvmap_share { wait_queue_head_t pin_wait; struct mutex pin_lock; #ifdef CONFIG_NVMAP_PAGE_POOLS - union { - struct nvmap_page_pool pools[NVMAP_NUM_POOLS]; - struct { - struct nvmap_page_pool uc_pool; - struct nvmap_page_pool wc_pool; - struct nvmap_page_pool iwb_pool; - struct nvmap_page_pool wb_pool; - }; - }; + struct nvmap_page_pool pool; #endif }; @@ -431,10 +421,6 @@ extern void __flush_dcache_all(void *arg); void inner_flush_cache_all(void); void inner_clean_cache_all(void); -int nvmap_set_pages_array_uc(struct page **pages, int addrinarray); -int nvmap_set_pages_array_wc(struct page **pages, int addrinarray); -int nvmap_set_pages_array_iwb(struct page **pages, int addrinarray); -int nvmap_set_pages_array_wb(struct page **pages, int addrinarray); void nvmap_flush_cache(struct page **pages, int numpages); /* Internal API to support dmabuf */ |