summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/slab.c10
-rw-r--r--mm/slob.c2
-rw-r--r--mm/swap.c32
-rw-r--r--mm/vmscan.c25
5 files changed, 53 insertions, 35 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ceb3ebb3c399..67f29516662a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -107,7 +107,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
set_page_count(page, 1);
page[1].mapping = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
- clear_highpage(&page[i]);
+ clear_user_highpage(&page[i], addr);
return page;
}
@@ -391,12 +391,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
if (!new_page) {
page_cache_release(old_page);
-
- /* Logically this is OOM, not a SIGBUS, but an OOM
- * could cause the kernel to go killing other
- * processes which won't help the hugepage situation
- * at all (?) */
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_OOM;
}
spin_unlock(&mm->page_table_lock);
@@ -444,15 +439,7 @@ retry:
page = alloc_huge_page(vma, address);
if (!page) {
hugetlb_put_quota(mapping);
- /*
- * No huge pages available. So this is an OOM
- * condition but we do not want to trigger the OOM
- * killer, so we return VM_FAULT_SIGBUS.
- *
- * A program using hugepages may fault with Bus Error
- * because no huge pages are available in the cpuset, per
- * memory policy or because all are in use!
- */
+ ret = VM_FAULT_OOM;
goto out;
}
diff --git a/mm/slab.c b/mm/slab.c
index d66c2b0d9715..add05d808a4a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1717,6 +1717,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG();
}
+ /*
+ * Prevent CPUs from coming and going.
+ * lock_cpu_hotplug() nests outside cache_chain_mutex
+ */
+ lock_cpu_hotplug();
+
mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) {
@@ -1918,8 +1924,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->dtor = dtor;
cachep->name = name;
- /* Don't let CPUs to come and go */
- lock_cpu_hotplug();
if (g_cpucache_up == FULL) {
enable_cpucache(cachep);
@@ -1978,12 +1982,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
- unlock_cpu_hotplug();
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
mutex_unlock(&cache_chain_mutex);
+ unlock_cpu_hotplug();
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
diff --git a/mm/slob.c b/mm/slob.c
index 1c240c4b71d9..a1f42bdc0245 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(slab_reclaim_pages);
#ifdef CONFIG_SMP
-void *__alloc_percpu(size_t size, size_t align)
+void *__alloc_percpu(size_t size)
{
int i;
struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
diff --git a/mm/swap.c b/mm/swap.c
index bc2442a7b0ee..76247424dea1 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,19 +34,22 @@
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
-void put_page(struct page *page)
+static void put_compound_page(struct page *page)
{
- if (unlikely(PageCompound(page))) {
- page = (struct page *)page_private(page);
- if (put_page_testzero(page)) {
- void (*dtor)(struct page *page);
+ page = (struct page *)page_private(page);
+ if (put_page_testzero(page)) {
+ void (*dtor)(struct page *page);
- dtor = (void (*)(struct page *))page[1].mapping;
- (*dtor)(page);
- }
- return;
+ dtor = (void (*)(struct page *))page[1].mapping;
+ (*dtor)(page);
}
- if (put_page_testzero(page))
+}
+
+void put_page(struct page *page)
+{
+ if (unlikely(PageCompound(page)))
+ put_compound_page(page);
+ else if (put_page_testzero(page))
__page_cache_release(page);
}
EXPORT_SYMBOL(put_page);
@@ -244,6 +247,15 @@ void release_pages(struct page **pages, int nr, int cold)
struct page *page = pages[i];
struct zone *pagezone;
+ if (unlikely(PageCompound(page))) {
+ if (zone) {
+ spin_unlock_irq(&zone->lru_lock);
+ zone = NULL;
+ }
+ put_compound_page(page);
+ continue;
+ }
+
if (!put_page_testzero(page))
continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5a610804cd06..5db32fdfaf39 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -632,7 +632,7 @@ static int swap_page(struct page *page)
struct address_space *mapping = page_mapping(page);
if (page_mapped(page) && mapping)
- if (try_to_unmap(page, 0) != SWAP_SUCCESS)
+ if (try_to_unmap(page, 1) != SWAP_SUCCESS)
goto unlock_retry;
if (PageDirty(page)) {
@@ -839,7 +839,7 @@ EXPORT_SYMBOL(migrate_page);
* pages are swapped out.
*
* The function returns after 10 attempts or if no pages
- * are movable anymore because t has become empty
+ * are movable anymore because to has become empty
* or no retryable pages exist anymore.
*
* Return: Number of pages not migrated when "to" ran empty.
@@ -928,12 +928,21 @@ redo:
goto unlock_both;
if (mapping->a_ops->migratepage) {
+ /*
+ * Most pages have a mapping and most filesystems
+ * should provide a migration function. Anonymous
+ * pages are part of swap space which also has its
+ * own migration function. This is the most common
+ * path for page migration.
+ */
rc = mapping->a_ops->migratepage(newpage, page);
goto unlock_both;
}
/*
- * Trigger writeout if page is dirty
+ * Default handling if a filesystem does not provide
+ * a migration function. We can only migrate clean
+ * pages so try to write out any dirty pages first.
*/
if (PageDirty(page)) {
switch (pageout(page, mapping)) {
@@ -949,9 +958,10 @@ redo:
; /* try to migrate the page below */
}
}
+
/*
- * If we have no buffer or can release the buffer
- * then do a simple migration.
+ * Buffers are managed in a filesystem specific way.
+ * We must have no buffers or drop them.
*/
if (!page_has_buffers(page) ||
try_to_release_page(page, GFP_KERNEL)) {
@@ -966,6 +976,11 @@ redo:
* swap them out.
*/
if (pass > 4) {
+ /*
+ * Persistently unable to drop buffers..... As a
+ * measure of last resort we fall back to
+ * swap_page().
+ */
unlock_page(newpage);
newpage = NULL;
rc = swap_page(page);