diff options
author | Krishna Reddy <vdumpa@nvidia.com> | 2012-01-10 16:33:33 -0800 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2013-09-14 01:36:00 -0700 |
commit | 015f410e3252be38f6a01a8644f3835756debbc8 (patch) | |
tree | 9e5bf0cfb508b9c8e8e76a3384c3503997a1e230 /lib | |
parent | ca0b08bd6d222955ea8f4e69aec8e18f18a02402 (diff) |
lib: genalloc: Add API to allocate at specified addr.
Add API to allocate at specified alloc address.
Change-Id: I188e5430220c050026c6a3e17a586012d9a9fa04
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/74468
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Stephen Warren <swarren@nvidia.com>
Rebase-Id: R93cbd801fbaea15cd4b0b579826b659d220618d7
Diffstat (limited to 'lib')
-rw-r--r-- | lib/genalloc.c | 23 |
1 files changed, 20 insertions, 3 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c index b35cfa9bc3d4..30d3d8e24af9 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -254,21 +254,24 @@ void gen_pool_destroy(struct gen_pool *pool) EXPORT_SYMBOL(gen_pool_destroy); /** - * gen_pool_alloc - allocate special memory from the pool + * gen_pool_alloc_addr - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool + * @alloc_addr: if non-zero, allocate starting at alloc_addr. * * Allocate the requested number of bytes from the specified pool. * Uses the pool allocation function (with first-fit algorithm by default). * Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ -unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) +unsigned long gen_pool_alloc_addr(struct gen_pool *pool, size_t size, + unsigned long alloc_addr) { struct gen_pool_chunk *chunk; unsigned long addr = 0; int order = pool->min_alloc_order; int nbits, start_bit = 0, end_bit, remain; + int alloc_bit_needed = 0; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); @@ -277,6 +280,9 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) if (size == 0) return 0; + if (alloc_addr & (1 << order) - 1) + return 0; + nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { @@ -284,9 +290,20 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) continue; end_bit = (chunk->end_addr - chunk->start_addr) >> order; + if (alloc_addr) { + if (alloc_addr < chunk->start_addr || + alloc_addr >= chunk->end_addr) + continue; + if (alloc_addr + size > chunk->end_addr) + return 0; + alloc_bit_needed = start_bit = + (alloc_addr - chunk->start_addr) >> order; + } retry: start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, pool->data); + if (alloc_addr && alloc_bit_needed != start_bit) + return 0; if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -305,7 +322,7 @@ retry: rcu_read_unlock(); return addr; } -EXPORT_SYMBOL(gen_pool_alloc); +EXPORT_SYMBOL(gen_pool_alloc_addr); /** * gen_pool_free - free allocated special memory back to the pool |