diff options
author | Colin Cross <ccross@android.com> | 2014-08-11 16:09:51 -0700 |
---|---|---|
committer | Winnie Hsu <whsu@nvidia.com> | 2015-05-12 13:28:18 -0700 |
commit | c81dc1143c8c6d642adfd527ba43d16355ffd373 (patch) | |
tree | 1eae786401b2b0c5f13bf8686f55389aac6c4716 /drivers/video | |
parent | 4a3a5bb3cb51bcd55d0695fcb4d6d28b4b2f2461 (diff) |
nvmap: page pool: fix background thread
Fix a race condition in the background allocator where
wake_up_process could be called just before set_current_state
changed the state to TASK_INTERRUPTIBLE, causing the thread
not to wake. Use a waitqueue instead.
Also make the background allocator nicer by marking it freezable
so it doesn't compete with suspend, and setting it SCHED_IDLE so
it only runs when no other threads want to run.
Change-Id: If95da005bb1fc4c9b5e802d40730803a57057fe1
Signed-off-by: Colin Cross <ccross@android.com>
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/664673
GVS: Gerrit_Virtual_Submit
Reviewed-on: http://git-master/r/736427
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Tested-by: Alex Waterman <alexw@nvidia.com>
Diffstat (limited to 'drivers/video')
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_pp.c | 63 | ||||
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_priv.h | 2 |
2 files changed, 27 insertions, 38 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_pp.c b/drivers/video/tegra/nvmap/nvmap_pp.c index 1c2e1f715aff..c00d9f20edd4 100644 --- a/drivers/video/tegra/nvmap/nvmap_pp.c +++ b/drivers/video/tegra/nvmap/nvmap_pp.c @@ -28,6 +28,8 @@ #include <linux/shrinker.h> #include <linux/kthread.h> #include <linux/debugfs.h> +#include <linux/freezer.h> +#include <linux/highmem.h> #include "nvmap_priv.h" @@ -39,6 +41,7 @@ static bool enable_pp = 1; static int pool_size; static struct task_struct *background_allocator; +static DECLARE_WAIT_QUEUE_HEAD(nvmap_bg_wait); static struct page *pending_pages[PENDING_PAGES_SIZE]; static atomic_t bg_pages_to_fill; @@ -74,6 +77,17 @@ static inline struct page *get_page_list_page(struct nvmap_page_pool *pool) return page; } +static inline bool nvmap_bg_should_run(struct nvmap_page_pool *pool) +{ + bool ret; + + mutex_lock(&pool->lock); + ret = (pool->to_zero > 0 || atomic_read(&bg_pages_to_fill)); + mutex_unlock(&pool->lock); + + return ret; +} + /* * Allocate n pages one by one. Not the most efficient allocation scheme ever; * however, it will make it easier later on to handle single or small number of @@ -155,17 +169,20 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool) */ static int nvmap_background_zero_allocator(void *arg) { + struct nvmap_page_pool *pool = &nvmap_dev->pool; + struct sched_param param = { .sched_priority = 0 }; + pr_info("PP alloc thread starting.\n"); - while (1) { - if (kthread_should_stop()) - break; + set_freezable(); + sched_setscheduler(current, SCHED_IDLE, ¶m); - nvmap_pp_do_background_fill(&nvmap_dev->pool); + while (!kthread_should_stop()) { + nvmap_pp_do_background_fill(pool); - /* Pending work is done - go to sleep. */ - set_current_state(TASK_INTERRUPTIBLE); - schedule(); + wait_event_freezable(nvmap_bg_wait, + nvmap_bg_should_run(pool) || + kthread_should_stop()); } return 0; @@ -206,7 +223,7 @@ static inline void nvmap_pp_wake_up_allocator(void) /* Let the background thread know how much memory to fill. */ atomic_set(&bg_pages_to_fill, min(tmp, (int)(pool->max - pool->count))); - wake_up_process(background_allocator); + wake_up_interruptible(&nvmap_bg_wait); } /* @@ -617,12 +634,6 @@ int nvmap_page_pool_init(struct nvmap_device *dev) unsigned long totalram_mb; struct sysinfo info; struct nvmap_page_pool *pool = &dev->pool; -#ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP - int i; - struct page *page; - int pages_to_fill; - int highmem_pages = 0; -#endif memset(pool, 0x0, sizeof(*pool)); mutex_init(&pool->lock); @@ -655,30 +666,6 @@ int nvmap_page_pool_init(struct nvmap_device *dev) if (IS_ERR_OR_NULL(background_allocator)) goto fail; -#ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP - pages_to_fill = CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP_SIZE * SZ_1M / - PAGE_SIZE; - pages_to_fill = pages_to_fill ? : pool->count; - - for (i = 0; i < pages_to_fill; i++) { - page = alloc_page(GFP_NVMAP); - if (!page) - goto done; - if (!nvmap_page_pool_fill_locked(pool, page)) { - __free_page(page); - goto done; - } - if (PageHighMem(page)) - highmem_pages++; - } - - si_meminfo(&info); - pr_info("highmem=%d, pool_size=%d," - "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu\n", - highmem_pages, pool->count, - info.totalram, info.freeram, info.totalhigh, info.freehigh); -done: -#endif return 0; fail: nvmap_page_pool_fini(dev); diff --git a/drivers/video/tegra/nvmap/nvmap_priv.h b/drivers/video/tegra/nvmap/nvmap_priv.h index 27cb3290a599..8b715b42faac 100644 --- a/drivers/video/tegra/nvmap/nvmap_priv.h +++ b/drivers/video/tegra/nvmap/nvmap_priv.h @@ -173,7 +173,9 @@ struct nvmap_page_pool { struct mutex lock; u32 count; /* Number of pages in the page list. */ u32 max; /* Max length of the page list. */ + int to_zero; /* Number of pages on the zero list */ struct list_head page_list; + struct list_head zero_list; #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG u64 allocs; |