summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2009-01-09 03:38:15 -0500
committerLen Brown <len.brown@intel.com>2009-01-09 03:38:15 -0500
commit3cc8a5f4ba91f67bbdb81a43a99281a26aab8d77 (patch)
tree82b60e24ce5457e2e21e00bfa3b2f171d7eb97ce /kernel
parentd0302bc62af7983040ea1cd1b5dd2b36e1a6a509 (diff)
parentada9cfdd158abb8169873dc8e5ae39b1ec6ffa8c (diff)
Merge branch 'suspend' into release
Diffstat (limited to 'kernel')
-rw-r--r--kernel/power/disk.c6
-rw-r--r--kernel/power/snapshot.c370
-rw-r--r--kernel/power/swsusp.c122
3 files changed, 324 insertions, 174 deletions
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index c9d74083746f..096fe4899ea4 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -259,12 +259,12 @@ int hibernation_snapshot(int platform_mode)
{
int error, ftrace_save;
- /* Free memory before shutting down devices. */
- error = swsusp_shrink_memory();
+ error = platform_begin(platform_mode);
if (error)
return error;
- error = platform_begin(platform_mode);
+ /* Free memory before shutting down devices. */
+ error = swsusp_shrink_memory();
if (error)
goto Close;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5d2ab836e998..f5fc2d7680f2 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -25,6 +25,7 @@
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>
+#include <linux/list.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -192,12 +193,6 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
return ret;
}
-static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
-{
- free_list_of_pages(ca->chain, clear_page_nosave);
- memset(ca, 0, sizeof(struct chain_allocator));
-}
-
/**
* Data types related to memory bitmaps.
*
@@ -233,7 +228,7 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
struct bm_block {
- struct bm_block *next; /* next element of the list */
+ struct list_head hook; /* hook into a list of bitmap blocks */
unsigned long start_pfn; /* pfn represented by the first bit */
unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
unsigned long *data; /* bitmap representing pages */
@@ -244,24 +239,15 @@ static inline unsigned long bm_block_bits(struct bm_block *bb)
return bb->end_pfn - bb->start_pfn;
}
-struct zone_bitmap {
- struct zone_bitmap *next; /* next element of the list */
- unsigned long start_pfn; /* minimal pfn in this zone */
- unsigned long end_pfn; /* maximal pfn in this zone plus 1 */
- struct bm_block *bm_blocks; /* list of bitmap blocks */
- struct bm_block *cur_block; /* recently used bitmap block */
-};
-
/* strcut bm_position is used for browsing memory bitmaps */
struct bm_position {
- struct zone_bitmap *zone_bm;
struct bm_block *block;
int bit;
};
struct memory_bitmap {
- struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */
+ struct list_head blocks; /* list of bitmap blocks */
struct linked_page *p_list; /* list of pages used to store zone
* bitmap objects and bitmap block
* objects
@@ -273,11 +259,7 @@ struct memory_bitmap {
static void memory_bm_position_reset(struct memory_bitmap *bm)
{
- struct zone_bitmap *zone_bm;
-
- zone_bm = bm->zone_bm_list;
- bm->cur.zone_bm = zone_bm;
- bm->cur.block = zone_bm->bm_blocks;
+ bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
bm->cur.bit = 0;
}
@@ -285,151 +267,184 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
/**
* create_bm_block_list - create a list of block bitmap objects
+ * @nr_blocks - number of blocks to allocate
+ * @list - list to put the allocated blocks into
+ * @ca - chain allocator to be used for allocating memory
*/
-
-static inline struct bm_block *
-create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
+static int create_bm_block_list(unsigned long pages,
+ struct list_head *list,
+ struct chain_allocator *ca)
{
- struct bm_block *bblist = NULL;
+ unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
while (nr_blocks-- > 0) {
struct bm_block *bb;
bb = chain_alloc(ca, sizeof(struct bm_block));
if (!bb)
- return NULL;
-
- bb->next = bblist;
- bblist = bb;
+ return -ENOMEM;
+ list_add(&bb->hook, list);
}
- return bblist;
+
+ return 0;
}
+struct mem_extent {
+ struct list_head hook;
+ unsigned long start;
+ unsigned long end;
+};
+
/**
- * create_zone_bm_list - create a list of zone bitmap objects
+ * free_mem_extents - free a list of memory extents
+ * @list - list of extents to empty
*/
+static void free_mem_extents(struct list_head *list)
+{
+ struct mem_extent *ext, *aux;
-static inline struct zone_bitmap *
-create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
+ list_for_each_entry_safe(ext, aux, list, hook) {
+ list_del(&ext->hook);
+ kfree(ext);
+ }
+}
+
+/**
+ * create_mem_extents - create a list of memory extents representing
+ * contiguous ranges of PFNs
+ * @list - list to put the extents into
+ * @gfp_mask - mask to use for memory allocations
+ */
+static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
{
- struct zone_bitmap *zbmlist = NULL;
+ struct zone *zone;
- while (nr_zones-- > 0) {
- struct zone_bitmap *zbm;
+ INIT_LIST_HEAD(list);
- zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
- if (!zbm)
- return NULL;
+ for_each_zone(zone) {
+ unsigned long zone_start, zone_end;
+ struct mem_extent *ext, *cur, *aux;
+
+ if (!populated_zone(zone))
+ continue;
- zbm->next = zbmlist;
- zbmlist = zbm;
+ zone_start = zone->zone_start_pfn;
+ zone_end = zone->zone_start_pfn + zone->spanned_pages;
+
+ list_for_each_entry(ext, list, hook)
+ if (zone_start <= ext->end)
+ break;
+
+ if (&ext->hook == list || zone_end < ext->start) {
+ /* New extent is necessary */
+ struct mem_extent *new_ext;
+
+ new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
+ if (!new_ext) {
+ free_mem_extents(list);
+ return -ENOMEM;
+ }
+ new_ext->start = zone_start;
+ new_ext->end = zone_end;
+ list_add_tail(&new_ext->hook, &ext->hook);
+ continue;
+ }
+
+ /* Merge this zone's range of PFNs with the existing one */
+ if (zone_start < ext->start)
+ ext->start = zone_start;
+ if (zone_end > ext->end)
+ ext->end = zone_end;
+
+ /* More merging may be possible */
+ cur = ext;
+ list_for_each_entry_safe_continue(cur, aux, list, hook) {
+ if (zone_end < cur->start)
+ break;
+ if (zone_end < cur->end)
+ ext->end = cur->end;
+ list_del(&cur->hook);
+ kfree(cur);
+ }
}
- return zbmlist;
+
+ return 0;
}
/**
* memory_bm_create - allocate memory for a memory bitmap
*/
-
static int
memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
{
struct chain_allocator ca;
- struct zone *zone;
- struct zone_bitmap *zone_bm;
- struct bm_block *bb;
- unsigned int nr;
+ struct list_head mem_extents;
+ struct mem_extent *ext;
+ int error;
chain_init(&ca, gfp_mask, safe_needed);
+ INIT_LIST_HEAD(&bm->blocks);
- /* Compute the number of zones */
- nr = 0;
- for_each_zone(zone)
- if (populated_zone(zone))
- nr++;
-
- /* Allocate the list of zones bitmap objects */
- zone_bm = create_zone_bm_list(nr, &ca);
- bm->zone_bm_list = zone_bm;
- if (!zone_bm) {
- chain_free(&ca, PG_UNSAFE_CLEAR);
- return -ENOMEM;
- }
-
- /* Initialize the zone bitmap objects */
- for_each_zone(zone) {
- unsigned long pfn;
+ error = create_mem_extents(&mem_extents, gfp_mask);
+ if (error)
+ return error;
- if (!populated_zone(zone))
- continue;
+ list_for_each_entry(ext, &mem_extents, hook) {
+ struct bm_block *bb;
+ unsigned long pfn = ext->start;
+ unsigned long pages = ext->end - ext->start;
- zone_bm->start_pfn = zone->zone_start_pfn;
- zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
- /* Allocate the list of bitmap block objects */
- nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
- bb = create_bm_block_list(nr, &ca);
- zone_bm->bm_blocks = bb;
- zone_bm->cur_block = bb;
- if (!bb)
- goto Free;
+ bb = list_entry(bm->blocks.prev, struct bm_block, hook);
- nr = zone->spanned_pages;
- pfn = zone->zone_start_pfn;
- /* Initialize the bitmap block objects */
- while (bb) {
- unsigned long *ptr;
+ error = create_bm_block_list(pages, bm->blocks.prev, &ca);
+ if (error)
+ goto Error;
- ptr = get_image_page(gfp_mask, safe_needed);
- bb->data = ptr;
- if (!ptr)
- goto Free;
+ list_for_each_entry_continue(bb, &bm->blocks, hook) {
+ bb->data = get_image_page(gfp_mask, safe_needed);
+ if (!bb->data) {
+ error = -ENOMEM;
+ goto Error;
+ }
bb->start_pfn = pfn;
- if (nr >= BM_BITS_PER_BLOCK) {
+ if (pages >= BM_BITS_PER_BLOCK) {
pfn += BM_BITS_PER_BLOCK;
- nr -= BM_BITS_PER_BLOCK;
+ pages -= BM_BITS_PER_BLOCK;
} else {
/* This is executed only once in the loop */
- pfn += nr;
+ pfn += pages;
}
bb->end_pfn = pfn;
- bb = bb->next;
}
- zone_bm = zone_bm->next;
}
+
bm->p_list = ca.chain;
memory_bm_position_reset(bm);
- return 0;
+ Exit:
+ free_mem_extents(&mem_extents);
+ return error;
- Free:
+ Error:
bm->p_list = ca.chain;
memory_bm_free(bm, PG_UNSAFE_CLEAR);
- return -ENOMEM;
+ goto Exit;
}
/**
* memory_bm_free - free memory occupied by the memory bitmap @bm
*/
-
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
{
- struct zone_bitmap *zone_bm;
+ struct bm_block *bb;
- /* Free the list of bit blocks for each zone_bitmap object */
- zone_bm = bm->zone_bm_list;
- while (zone_bm) {
- struct bm_block *bb;
+ list_for_each_entry(bb, &bm->blocks, hook)
+ if (bb->data)
+ free_image_page(bb->data, clear_nosave_free);
- bb = zone_bm->bm_blocks;
- while (bb) {
- if (bb->data)
- free_image_page(bb->data, clear_nosave_free);
- bb = bb->next;
- }
- zone_bm = zone_bm->next;
- }
free_list_of_pages(bm->p_list, clear_nosave_free);
- bm->zone_bm_list = NULL;
+
+ INIT_LIST_HEAD(&bm->blocks);
}
/**
@@ -437,38 +452,33 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
* to given pfn. The cur_zone_bm member of @bm and the cur_block member
* of @bm->cur_zone_bm are updated.
*/
-
static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
void **addr, unsigned int *bit_nr)
{
- struct zone_bitmap *zone_bm;
struct bm_block *bb;
- /* Check if the pfn is from the current zone */
- zone_bm = bm->cur.zone_bm;
- if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
- zone_bm = bm->zone_bm_list;
- /* We don't assume that the zones are sorted by pfns */
- while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
- zone_bm = zone_bm->next;
-
- if (!zone_bm)
- return -EFAULT;
- }
- bm->cur.zone_bm = zone_bm;
- }
- /* Check if the pfn corresponds to the current bitmap block */
- bb = zone_bm->cur_block;
+ /*
+ * Check if the pfn corresponds to the current bitmap block and find
+ * the block where it fits if this is not the case.
+ */
+ bb = bm->cur.block;
if (pfn < bb->start_pfn)
- bb = zone_bm->bm_blocks;
+ list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
+ if (pfn >= bb->start_pfn)
+ break;
- while (pfn >= bb->end_pfn) {
- bb = bb->next;
+ if (pfn >= bb->end_pfn)
+ list_for_each_entry_continue(bb, &bm->blocks, hook)
+ if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
+ break;
- BUG_ON(!bb);
- }
- zone_bm->cur_block = bb;
+ if (&bb->hook == &bm->blocks)
+ return -EFAULT;
+
+ /* The block has been found */
+ bm->cur.block = bb;
pfn -= bb->start_pfn;
+ bm->cur.bit = pfn + 1;
*bit_nr = pfn;
*addr = bb->data;
return 0;
@@ -519,6 +529,14 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
return test_bit(bit, addr);
}
+static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
+{
+ void *addr;
+ unsigned int bit;
+
+ return !memory_bm_find_bit(bm, pfn, &addr, &bit);
+}
+
/**
* memory_bm_next_pfn - find the pfn that corresponds to the next set bit
* in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
@@ -530,29 +548,21 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
{
- struct zone_bitmap *zone_bm;
struct bm_block *bb;
int bit;
+ bb = bm->cur.block;
do {
- bb = bm->cur.block;
- do {
- bit = bm->cur.bit;
- bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
- if (bit < bm_block_bits(bb))
- goto Return_pfn;
-
- bb = bb->next;
- bm->cur.block = bb;
- bm->cur.bit = 0;
- } while (bb);
- zone_bm = bm->cur.zone_bm->next;
- if (zone_bm) {
- bm->cur.zone_bm = zone_bm;
- bm->cur.block = zone_bm->bm_blocks;
- bm->cur.bit = 0;
- }
- } while (zone_bm);
+ bit = bm->cur.bit;
+ bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
+ if (bit < bm_block_bits(bb))
+ goto Return_pfn;
+
+ bb = list_entry(bb->hook.next, struct bm_block, hook);
+ bm->cur.block = bb;
+ bm->cur.bit = 0;
+ } while (&bb->hook != &bm->blocks);
+
memory_bm_position_reset(bm);
return BM_END_OF_MAP;
@@ -808,8 +818,7 @@ static unsigned int count_free_highmem_pages(void)
* We should save the page if it isn't Nosave or NosaveFree, or Reserved,
* and it isn't a part of a free chunk of pages.
*/
-
-static struct page *saveable_highmem_page(unsigned long pfn)
+static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
{
struct page *page;
@@ -817,6 +826,8 @@ static struct page *saveable_highmem_page(unsigned long pfn)
return NULL;
page = pfn_to_page(pfn);
+ if (page_zone(page) != zone)
+ return NULL;
BUG_ON(!PageHighMem(page));
@@ -846,13 +857,16 @@ unsigned int count_highmem_pages(void)
mark_free_pages(zone);
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
- if (saveable_highmem_page(pfn))
+ if (saveable_highmem_page(zone, pfn))
n++;
}
return n;
}
#else
-static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
+static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
+{
+ return NULL;
+}
#endif /* CONFIG_HIGHMEM */
/**
@@ -863,8 +877,7 @@ static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
* of pages statically defined as 'unsaveable', and it isn't a part of
* a free chunk of pages.
*/
-
-static struct page *saveable_page(unsigned long pfn)
+static struct page *saveable_page(struct zone *zone, unsigned long pfn)
{
struct page *page;
@@ -872,6 +885,8 @@ static struct page *saveable_page(unsigned long pfn)
return NULL;
page = pfn_to_page(pfn);
+ if (page_zone(page) != zone)
+ return NULL;
BUG_ON(PageHighMem(page));
@@ -903,7 +918,7 @@ unsigned int count_data_pages(void)
mark_free_pages(zone);
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
- if(saveable_page(pfn))
+ if (saveable_page(zone, pfn))
n++;
}
return n;
@@ -944,7 +959,7 @@ static inline struct page *
page_is_saveable(struct zone *zone, unsigned long pfn)
{
return is_highmem(zone) ?
- saveable_highmem_page(pfn) : saveable_page(pfn);
+ saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
}
static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
@@ -966,7 +981,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
* data modified by kmap_atomic()
*/
safe_copy_page(buffer, s_page);
- dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
+ dst = kmap_atomic(d_page, KM_USER0);
memcpy(dst, buffer, PAGE_SIZE);
kunmap_atomic(dst, KM_USER0);
} else {
@@ -975,7 +990,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
}
}
#else
-#define page_is_saveable(zone, pfn) saveable_page(pfn)
+#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
@@ -1459,9 +1474,7 @@ load_header(struct swsusp_info *info)
* unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
* the corresponding bit in the memory bitmap @bm
*/
-
-static inline void
-unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
+static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
{
int j;
@@ -1469,8 +1482,13 @@ unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
- memory_bm_set_bit(bm, buf[j]);
+ if (memory_bm_pfn_present(bm, buf[j]))
+ memory_bm_set_bit(bm, buf[j]);
+ else
+ return -EFAULT;
}
+
+ return 0;
}
/* List of "safe" pages that may be used to store data loaded from the suspend
@@ -1608,7 +1626,7 @@ get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
if (!pbe) {
swsusp_free();
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
pbe->orig_page = page;
if (safe_highmem_pages > 0) {
@@ -1677,7 +1695,7 @@ prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
static inline void *
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
{
- return NULL;
+ return ERR_PTR(-EINVAL);
}
static inline void copy_last_highmem_page(void) {}
@@ -1788,8 +1806,13 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
{
struct pbe *pbe;
- struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
+ struct page *page;
+ unsigned long pfn = memory_bm_next_pfn(bm);
+ if (pfn == BM_END_OF_MAP)
+ return ERR_PTR(-EFAULT);
+
+ page = pfn_to_page(pfn);
if (PageHighMem(page))
return get_highmem_page_buffer(page, ca);
@@ -1805,7 +1828,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
pbe = chain_alloc(ca, sizeof(struct pbe));
if (!pbe) {
swsusp_free();
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
pbe->orig_address = page_address(page);
pbe->address = safe_pages_list;
@@ -1868,7 +1891,10 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
return error;
} else if (handle->prev <= nr_meta_pages) {
- unpack_orig_pfns(buffer, &copy_bm);
+ error = unpack_orig_pfns(buffer, &copy_bm);
+ if (error)
+ return error;
+
if (handle->prev == nr_meta_pages) {
error = prepare_image(&orig_bm, &copy_bm);
if (error)
@@ -1879,12 +1905,14 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
restore_pblist = NULL;
handle->buffer = get_buffer(&orig_bm, &ca);
handle->sync_read = 0;
- if (!handle->buffer)
- return -ENOMEM;
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
}
} else {
copy_last_highmem_page();
handle->buffer = get_buffer(&orig_bm, &ca);
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
if (handle->buffer != buffer)
handle->sync_read = 0;
}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 023ff2a31d89..a92c91451559 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -262,3 +262,125 @@ int swsusp_shrink_memory(void)
return 0;
}
+
+/*
+ * Platforms, like ACPI, may want us to save some memory used by them during
+ * hibernation and to restore the contents of this memory during the subsequent
+ * resume. The code below implements a mechanism allowing us to do that.
+ */
+
+struct nvs_page {
+ unsigned long phys_start;
+ unsigned int size;
+ void *kaddr;
+ void *data;
+ struct list_head node;
+};
+
+static LIST_HEAD(nvs_list);
+
+/**
+ * hibernate_nvs_register - register platform NVS memory region to save
+ * @start - physical address of the region
+ * @size - size of the region
+ *
+ * The NVS region need not be page-aligned (both ends) and we arrange
+ * things so that the data from page-aligned addresses in this region will
+ * be copied into separate RAM pages.
+ */
+int hibernate_nvs_register(unsigned long start, unsigned long size)
+{
+ struct nvs_page *entry, *next;
+
+ while (size > 0) {
+ unsigned int nr_bytes;
+
+ entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
+ if (!entry)
+ goto Error;
+
+ list_add_tail(&entry->node, &nvs_list);
+ entry->phys_start = start;
+ nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
+ entry->size = (size < nr_bytes) ? size : nr_bytes;
+
+ start += entry->size;
+ size -= entry->size;
+ }
+ return 0;
+
+ Error:
+ list_for_each_entry_safe(entry, next, &nvs_list, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * hibernate_nvs_free - free data pages allocated for saving NVS regions
+ */
+void hibernate_nvs_free(void)
+{
+ struct nvs_page *entry;
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data) {
+ free_page((unsigned long)entry->data);
+ entry->data = NULL;
+ if (entry->kaddr) {
+ iounmap(entry->kaddr);
+ entry->kaddr = NULL;
+ }
+ }
+}
+
+/**
+ * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
+ */
+int hibernate_nvs_alloc(void)
+{
+ struct nvs_page *entry;
+
+ list_for_each_entry(entry, &nvs_list, node) {
+ entry->data = (void *)__get_free_page(GFP_KERNEL);
+ if (!entry->data) {
+ hibernate_nvs_free();
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * hibernate_nvs_save - save NVS memory regions
+ */
+void hibernate_nvs_save(void)
+{
+ struct nvs_page *entry;
+
+ printk(KERN_INFO "PM: Saving platform NVS memory\n");
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data) {
+ entry->kaddr = ioremap(entry->phys_start, entry->size);
+ memcpy(entry->data, entry->kaddr, entry->size);
+ }
+}
+
+/**
+ * hibernate_nvs_restore - restore NVS memory regions
+ *
+ * This function is going to be called with interrupts disabled, so it
+ * cannot iounmap the virtual addresses used to access the NVS region.
+ */
+void hibernate_nvs_restore(void)
+{
+ struct nvs_page *entry;
+
+ printk(KERN_INFO "PM: Restoring platform NVS memory\n");
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data)
+ memcpy(entry->kaddr, entry->data, entry->size);
+}