summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c3
-rw-r--r--mm/swap.c39
-rw-r--r--mm/vmalloc.c8
3 files changed, 25 insertions, 25 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 21ba06035700..5870bcbd33cf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
return csizep->cs_cachep;
}
-struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
+static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
return __find_general_cachep(size, gfpflags);
}
-EXPORT_SYMBOL(kmem_find_general_cachep);
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
diff --git a/mm/swap.c b/mm/swap.c
index 600235e43704..2e0e871f542f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,6 +34,25 @@
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
+/*
+ * This path almost never happens for VM activity - pages are normally
+ * freed via pagevecs. But it gets used by networking.
+ */
+static void fastcall __page_cache_release(struct page *page)
+{
+ if (PageLRU(page)) {
+ unsigned long flags;
+ struct zone *zone = page_zone(page);
+
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ VM_BUG_ON(!PageLRU(page));
+ __ClearPageLRU(page);
+ del_page_from_lru(zone, page);
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ }
+ free_hot_page(page);
+}
+
static void put_compound_page(struct page *page)
{
page = (struct page *)page_private(page);
@@ -223,26 +242,6 @@ int lru_add_drain_all(void)
#endif
/*
- * This path almost never happens for VM activity - pages are normally
- * freed via pagevecs. But it gets used by networking.
- */
-void fastcall __page_cache_release(struct page *page)
-{
- if (PageLRU(page)) {
- unsigned long flags;
- struct zone *zone = page_zone(page);
-
- spin_lock_irqsave(&zone->lru_lock, flags);
- VM_BUG_ON(!PageLRU(page));
- __ClearPageLRU(page);
- del_page_from_lru(zone, page);
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- }
- free_hot_page(page);
-}
-EXPORT_SYMBOL(__page_cache_release);
-
-/*
* Batched page_cache_release(). Decrement the reference count on all the
* passed pages. If it fell to zero then remove the page from the LRU and
* free it.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 266162d2ba28..9aad8b0cc6ee 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,9 @@
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
+static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
+ int node);
+
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
pte_t *pte;
@@ -478,8 +481,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
*/
-void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
- int node)
+static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
+ int node)
{
struct vm_struct *area;
@@ -493,7 +496,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
return __vmalloc_area_node(area, gfp_mask, prot, node);
}
-EXPORT_SYMBOL(__vmalloc_node);
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{