summaryrefslogtreecommitdiff
path: root/drivers/video
diff options
context:
space:
mode:
authorGary King <gking@nvidia.com>2010-10-12 17:40:18 -0700
committerRebecca Schultz Zavin <rebecca@android.com>2010-10-19 12:42:21 -0700
commit78d98c3245a12433d2016ffc1e00a34fae79d822 (patch)
tree69427a26b0cb39f32dd120375c2a87497418c2c3 /drivers/video
parentaba71d0453b73cbea8297d092be5bb3ecb3fd311 (diff)
video: tegra: nvmap: eliminate arm_attrib_allocator dependency
remove the dependency that nvmap has on the arm_attrib_allocator and the lowmem in PTEs change by adding a private page allocator utility function and calling vm_map_ram unconditionally for all sysmem handles. also, add Kconfig variables to allow platforms to disallow the SYSMEM heap, and to optionally restrict the SYSMEM and IOVMM heaps to just HIGHMEM. Change-Id: I3dab1c7323f54a8ab3994dc672b27fd79a9057d7 Signed-off-by: Gary King <gking@nvidia.com>
Diffstat (limited to 'drivers/video')
-rw-r--r--drivers/video/tegra/Kconfig26
-rw-r--r--drivers/video/tegra/nvmap/nvmap.c10
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c51
3 files changed, 70 insertions, 17 deletions
diff --git a/drivers/video/tegra/Kconfig b/drivers/video/tegra/Kconfig
index f9192c6d68b8..2b8160877688 100644
--- a/drivers/video/tegra/Kconfig
+++ b/drivers/video/tegra/Kconfig
@@ -28,22 +28,38 @@ config FB_TEGRA
Framebuffer device support for the Tegra display controller.
config TEGRA_NVMAP
- bool "Tegra GPU memory management driver"
- select ARM_ATTRIB_ALLOCATOR
+ bool "Tegra GPU memory management driver (nvmap)"
default y
help
Say Y here to include the memory management driver for the Tegra
GPU, multimedia and display subsystems
config NVMAP_RECLAIM_UNPINNED_VM
- bool "Allow /dev/nvmap to reclaim unpinned I/O virtual memory"
+ bool "Virtualize IOVMM memory in nvmap"
depends on TEGRA_NVMAP && TEGRA_IOVMM
default y
help
- Say Y here to enable /dev/nvmap to reclaim I/O virtual memory after
- it has been unpinned, and re-use it for other objects. This can
+ Say Y here to enable nvmap to reclaim I/O virtual memory after
+ it has been unpinned, and re-use it for other handles. This can
allow a larger virtual I/O VM space than would normally be
supported by the hardware, at a slight cost in performance.
+config NVMAP_ALLOW_SYSMEM
+ bool "Allow physical system memory to be used by nvmap"
+ depends on TEGRA_NVMAP
+ default y
+ help
+ Say Y here to allow nvmap to use physical system memory (i.e.,
+ shared with the operating system but not translated through
+ an IOVMM device) for allocations.
+
+config NVMAP_HIGHMEM_ONLY
+ bool "Use only HIGHMEM for nvmap"
+ depends on TEGRA_NVMAP && (NVMAP_ALLOW_SYSMEM || TEGRA_IOVMM) && HIGHMEM
+ default n
+ help
+ Say Y here to restrict nvmap system memory allocations (both
+ physical system memory and IOVMM) to just HIGHMEM pages.
+
endif
diff --git a/drivers/video/tegra/nvmap/nvmap.c b/drivers/video/tegra/nvmap/nvmap.c
index 506aef8408a9..865681f2e221 100644
--- a/drivers/video/tegra/nvmap/nvmap.c
+++ b/drivers/video/tegra/nvmap/nvmap.c
@@ -623,10 +623,7 @@ void *nvmap_mmap(struct nvmap_handle_ref *ref)
prot = nvmap_pgprot(h, pgprot_kernel);
- if (h->heap_pgalloc && h->pgalloc.contig &&
- !PageHighMem(h->pgalloc.pages[0]))
- return page_address(h->pgalloc.pages[0]);
- else if (h->heap_pgalloc)
+ if (h->heap_pgalloc)
return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
-1, prot);
@@ -687,10 +684,9 @@ void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
h = ref->handle;
- if (h->heap_pgalloc && (!h->pgalloc.contig ||
- PageHighMem(h->pgalloc.pages[0]))) {
+ if (h->heap_pgalloc) {
vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
- } else if (!h->heap_pgalloc) {
+ } else {
struct vm_struct *vm;
addr -= (h->carveout->base & ~PAGE_MASK);
vm = remove_vm_area(addr);
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
index 21cbf9c4d85d..725a132ff6fd 100644
--- a/drivers/video/tegra/nvmap/nvmap_handle.c
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -28,7 +28,8 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <asm/attrib_alloc.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
#include <asm/pgtable.h>
#include <mach/iovmm.h>
@@ -38,7 +39,11 @@
#include "nvmap_mru.h"
#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM)
+#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
#define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN)
+#else
+#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+#endif
/* handles may be arbitrarily large (16+MiB), and any handle allocated from
* the kernel (i.e., not a carveout handle) includes its array of pages. to
* preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
@@ -90,7 +95,7 @@ void _nvmap_handle_free(struct nvmap_handle *h)
tegra_iovmm_free_vm(h->pgalloc.area);
for (i = 0; i < nr_page; i++)
- arm_attrib_free_page(h->pgalloc.pages[i]);
+ __free_page(h->pgalloc.pages[i]);
altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
@@ -99,6 +104,36 @@ out:
nvmap_client_put(client);
}
+extern void __flush_dcache_page(struct address_space *, struct page *);
+
+static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
+{
+ struct page *page, *p, *e;
+ unsigned int order;
+ unsigned long base;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+ page = alloc_pages(gfp, order);
+
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ e = page + (1 << order);
+ for (p = page + (size >> PAGE_SHIFT); p < e; p++)
+ __free_page(p);
+
+ e = page + (size >> PAGE_SHIFT);
+ for (p = page; p < e; p++)
+ __flush_dcache_page(page_mapping(p), p);
+
+ base = page_to_phys(page);
+ outer_flush_range(base, base + size);
+ return page;
+}
+
static int handle_page_alloc(struct nvmap_client *client,
struct nvmap_handle *h, bool contiguous)
{
@@ -120,14 +155,16 @@ static int handle_page_alloc(struct nvmap_client *client,
h->pgalloc.area = NULL;
if (contiguous) {
struct page *page;
- page = arm_attrib_alloc_pages_exact(GFP_NVMAP, size, prot);
+ page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
+ if (!page)
+ goto fail;
for (i = 0; i < nr_page; i++)
pages[i] = nth_page(page, i);
} else {
for (i = 0; i < nr_page; i++) {
- pages[i] = arm_attrib_alloc_page(GFP_NVMAP, prot);
+ pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP, PAGE_SIZE);
if (!pages[i])
goto fail;
}
@@ -151,7 +188,7 @@ static int handle_page_alloc(struct nvmap_client *client,
fail:
while (i--)
- arm_attrib_free_page(pages[i]);
+ __free_page(pages[i]);
altfree(pages, nr_page * sizeof(*pages));
return -ENOMEM;
}
@@ -210,7 +247,9 @@ static void alloc_handle(struct nvmap_client *client, size_t align,
* sub-page splinters */
static const unsigned int heap_policy_small[] = {
NVMAP_HEAP_CARVEOUT_IRAM,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
NVMAP_HEAP_SYSMEM,
+#endif
NVMAP_HEAP_CARVEOUT_MASK,
NVMAP_HEAP_IOVMM,
0,
@@ -220,7 +259,9 @@ static const unsigned int heap_policy_large[] = {
NVMAP_HEAP_CARVEOUT_IRAM,
NVMAP_HEAP_IOVMM,
NVMAP_HEAP_CARVEOUT_MASK,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
NVMAP_HEAP_SYSMEM,
+#endif
0,
};