summaryrefslogtreecommitdiff
path: root/drivers/video/tegra/nvmap/nvmap_pp.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2013-11-27 15:34:19 -0800
committerKrishna Reddy <vdumpa@nvidia.com>2014-03-12 10:08:23 -0700
commitaf38c644126f547c1f10bbf71027883e1ea960e5 (patch)
treedcd7dcf4b6d59b0834ec3b59ed8fa8eec617c9a2 /drivers/video/tegra/nvmap/nvmap_pp.c
parentc86af9d01a10eaf645b84790eafc5533c95a5022 (diff)
video: tegra: nvmap: Page pool rework
In order to better facilitate zeroing pages in the background this patch reworks the page pool mechanics to use a circular buffer of page pointers. The buffer works by keeping track of an alloc offset and a fill offset that move linearly through the buffer. Of course if these indexes exceed the length of the pool, then they wrap. By inspecting the values of these indexes and the underlying page pointer it is possible to check if the pool if full or empty or somewhere between. Two new page pool functions have been added as well: __nvmap_page_pool_fill_lots() __nvmap_page_pool_alloc_lots() These functions save some of the overhead in acquiring and releasing the pool lock every time a page is allocated with the normal alloc/fill functions: nvmap_page_pool_fill() nvmap_page_pool_alloc() Also, the debugging and sanity checks in this code have been guarded by a config option so that in general the overhead incurred by sanity checking is avoided. Of course if there are ever issues this code can be enabled for a small performance cost. Bug 1371433 Bug 1392833 Change-Id: I1921459f7951237adfb36c8526872554e7311c8d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/336379 Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Tested-by: Krishna Reddy <vdumpa@nvidia.com>
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_pp.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_pp.c215
1 files changed, 169 insertions, 46 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_pp.c b/drivers/video/tegra/nvmap/nvmap_pp.c
index f37bab1cc164..559214e990f5 100644
--- a/drivers/video/tegra/nvmap/nvmap_pp.c
+++ b/drivers/video/tegra/nvmap/nvmap_pp.c
@@ -20,6 +20,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
@@ -31,6 +33,20 @@
static bool enable_pp = 1;
static int pool_size;
+#ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
+static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
+{
+ *dbg_var += nr;
+}
+#else
+#define __pp_dbg_var_add(dbg_var, nr)
+#endif
+
+#define pp_alloc_add(pool, nr) __pp_dbg_var_add(&(pool)->allocs, nr)
+#define pp_fill_add(pool, nr) __pp_dbg_var_add(&(pool)->fills, nr)
+#define pp_hit_add(pool, nr) __pp_dbg_var_add(&(pool)->hits, nr)
+#define pp_miss_add(pool, nr) __pp_dbg_var_add(&(pool)->misses, nr)
+
static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
{
mutex_lock(&pool->lock);
@@ -41,19 +57,75 @@ static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
mutex_unlock(&pool->lock);
}
+/*
+ * This removes a page from the page pool.
+ */
static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool)
{
- struct page *page = NULL;
+ struct page *page;
+
+ if (pp_empty(pool)) {
+ pp_miss_add(pool, 1);
+ return NULL;
+ }
- if (pool->npages > 0) {
- page = pool->page_array[--pool->npages];
- pool->page_array[pool->npages] = NULL;
+ if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
+ BUG_ON(pool->count == 0);
+
+ page = pool->page_array[pool->alloc];
+ pool->page_array[pool->alloc] = NULL;
+ nvmap_pp_alloc_inc(pool);
+ pool->count--;
+
+ /* Sanity check. */
+ if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
atomic_dec(&page->_count);
BUG_ON(atomic_read(&page->_count) != 1);
}
+
+ pp_alloc_add(pool, 1);
+ pp_hit_add(pool, 1);
+
return page;
}
+/*
+ * Alloc a bunch of pages from the page pool. This will alloc as many as it can
+ * and return the number of pages allocated. Pages are placed into the passed
+ * array in a linear fashion starting from index 0.
+ *
+ * You must lock the page pool before using this.
+ */
+int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
+ struct page **pages, u32 nr)
+{
+ u32 real_nr;
+ u32 ind = 0;
+
+ real_nr = min(nr, pool->count);
+
+ while (real_nr--) {
+ if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
+ BUG_ON(pp_empty(pool));
+ BUG_ON(!pool->page_array[pool->alloc]);
+ }
+ pages[ind++] = pool->page_array[pool->alloc];
+ pool->page_array[pool->alloc] = NULL;
+ nvmap_pp_alloc_inc(pool);
+ if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
+ atomic_dec(&pages[ind - 1]->_count);
+ BUG_ON(atomic_read(&pages[ind - 1]->_count) != 1);
+ }
+ }
+
+ pool->count -= ind;
+ pp_alloc_add(pool, ind);
+ pp_hit_add(pool, ind);
+ pp_miss_add(pool, nr - ind);
+
+ return ind;
+}
+
struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool)
{
struct page *page = NULL;
@@ -66,36 +138,82 @@ struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool)
return page;
}
-static bool nvmap_page_pool_release_locked(struct nvmap_page_pool *pool,
- struct page *page)
+/*
+ * This adds a page to the pool. Returns true iff the passed page is added.
+ * That means if the pool is full this operation will fail.
+ */
+static bool nvmap_page_pool_fill_locked(struct nvmap_page_pool *pool,
+ struct page *page)
{
- int ret = false;
+ if (pp_full(pool))
+ return false;
- if (enable_pp && pool->npages < pool->max_pages) {
+ /* Sanity check. */
+ if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
atomic_inc(&page->_count);
BUG_ON(atomic_read(&page->_count) != 2);
- BUG_ON(pool->page_array[pool->npages] != NULL);
- pool->page_array[pool->npages++] = page;
- ret = true;
+ BUG_ON(pool->count > pool->length);
+ BUG_ON(pool->page_array[pool->fill] != NULL);
}
- return ret;
+
+ pool->page_array[pool->fill] = page;
+ nvmap_pp_fill_inc(pool);
+ pool->count++;
+ pp_fill_add(pool, 1);
+
+ return true;
}
-bool nvmap_page_pool_release(struct nvmap_page_pool *pool, struct page *page)
+/*
+ * Fill a bunch of pages into the page pool. This will fill as many as it can
+ * and return the number of pages filled. Pages are used from the start of the
+ * passed page pointer array in a linear fashion.
+ *
+ * You must lock the page pool before using this.
+ */
+int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
+ struct page **pages, u32 nr)
{
- int ret = false;
+ u32 real_nr;
+ u32 ind = 0;
+
+ real_nr = min(pool->length - pool->count, nr);
+ if (real_nr == 0)
+ return 0;
+
+ while (real_nr--) {
+ if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
+ BUG_ON(pp_full(pool));
+ BUG_ON(pool->page_array[pool->fill]);
+ atomic_inc(&pages[ind]->_count);
+ BUG_ON(atomic_read(&pages[ind]->_count) != 2);
+ }
+ pool->page_array[pool->fill] = pages[ind++];
+ nvmap_pp_fill_inc(pool);
+ }
+
+ pool->count += ind;
+ pp_fill_add(pool, ind);
+
+ return ind;
+}
+
+bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
+{
+ bool ret = false;
if (pool) {
nvmap_page_pool_lock(pool);
- ret = nvmap_page_pool_release_locked(pool, page);
+ ret = nvmap_page_pool_fill_locked(pool, page);
nvmap_page_pool_unlock(pool);
}
+
return ret;
}
static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
{
- return pool->npages;
+ return pool->count;
}
static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
@@ -138,23 +256,15 @@ ulong nvmap_page_pool_get_unused_pages(void)
static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
{
- int available_pages;
- int pages_to_release = 0;
+ int ind;
struct page **page_array = NULL;
- if (size == pool->max_pages)
+ if (size == pool->length)
return;
-repeat:
- nvmap_page_pool_free(pool, pages_to_release);
- nvmap_page_pool_lock(pool);
- available_pages = nvmap_page_pool_get_available_count(pool);
- if (available_pages > size) {
- nvmap_page_pool_unlock(pool);
- pages_to_release = available_pages - size;
- goto repeat;
- }
+ nvmap_page_pool_lock(pool);
if (size == 0) {
+ /* TODO: fix this! */
vfree(pool->page_array);
pool->page_array = NULL;
goto out;
@@ -164,18 +274,32 @@ repeat:
if (!page_array)
goto fail;
- memcpy(page_array, pool->page_array,
- pool->npages * sizeof(struct page *));
- vfree(pool->page_array);
- pool->page_array = page_array;
+ /*
+ * Reuse what pages we can.
+ */
+ ind = __nvmap_page_pool_alloc_lots_locked(pool, page_array, size);
+
+ /*
+ * And free anything that might be left over.
+ */
+ while (!pp_empty(pool))
+ __free_page(nvmap_page_pool_alloc_locked(pool));
+
+ swap(page_array, pool->page_array);
+ pool->alloc = 0;
+ pool->fill = (ind == size ? 0 : ind);
+ pool->count = ind;
+ pool->length = size;
+
+ vfree(page_array);
+
out:
- pr_debug("page pool resized to %d from %d pages", size,
- pool->max_pages);
- pool->max_pages = size;
+ pr_debug("page pool resized to %d from %d pages\n", size, pool->length);
+ pool->length = size;
goto exit;
fail:
vfree(page_array);
- pr_err("failed");
+ pr_err("page pool resize failed\n");
exit:
nvmap_page_pool_unlock(pool);
}
@@ -319,16 +443,15 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
/* Use 3/8th of total ram for page pools.
* 1/8th for uc, 1/8th for wc and 1/8th for iwb.
*/
- pool->max_pages = (info.totalram >> 3) * 3;
+ pool->length = (info.totalram >> 3) * 3;
else
- pool->max_pages = CONFIG_NVMAP_PAGE_POOL_SIZE;
+ pool->length = CONFIG_NVMAP_PAGE_POOL_SIZE;
- if (pool->max_pages <= 0 || pool->max_pages >= info.totalram)
+ if (pool->length <= 0 || pool->length >= info.totalram)
goto fail;
- pool_size = pool->max_pages;
- pr_info("nvmap page pool size: %d pages\n", pool->max_pages);
- pool->page_array = vzalloc(sizeof(struct page *) * pool->max_pages);
+ pr_info("nvmap page pool size: %d pages\n", pool->length);
+ pool->page_array = vzalloc(sizeof(struct page *) * pool->length);
if (!pool->page_array)
goto fail;
@@ -340,14 +463,14 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
#ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
pages_to_fill = CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP_SIZE * SZ_1M /
PAGE_SIZE;
- pages_to_fill = pages_to_fill ? : pool->max_pages;
+ pages_to_fill = pages_to_fill ? : pool->length;
nvmap_page_pool_lock(pool);
for (i = 0; i < pages_to_fill; i++) {
page = alloc_page(GFP_NVMAP);
if (!page)
goto done;
- if (!nvmap_page_pool_release_locked(pool, page)) {
+ if (!nvmap_page_pool_fill_locked(pool, page)) {
__free_page(page);
goto done;
}
@@ -357,14 +480,14 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
si_meminfo(&info);
pr_info("highmem=%d, pool_size=%d,"
"totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu\n",
- highmem_pages, pool->max_pages,
+ highmem_pages, pool->length,
info.totalram, info.freeram, info.totalhigh, info.freehigh);
done:
nvmap_page_pool_unlock(pool);
#endif
return 0;
fail:
- pool->max_pages = 0;
+ pool->length = 0;
vfree(pool->page_array);
return -ENOMEM;
}