summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/block/zram/zram_drv.c11
-rw-r--r--include/linux/zpool.h4
-rw-r--r--include/linux/zsmalloc.h3
-rw-r--r--mm/zpool.c8
-rw-r--r--mm/zsmalloc.c20
-rw-r--r--mm/zswap.c2
6 files changed, 29 insertions, 19 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fda7d8624889..0ba18277ed7b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1694,7 +1694,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
*/
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
@@ -1761,7 +1761,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle);
@@ -1981,10 +1981,15 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
* We are holding per-CPU stream mutex and entry lock so better
* avoid direct reclaim. Allocation error is not fatal since
* we still have the old object in the mem_pool.
+ *
+ * XXX: technically, the node we really want here is the node that holds
+ * the original compressed data. But that would require us to modify
+ * zsmalloc API to return this information. For now, we will make do with
+ * the node of the page allocated for recompression.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle_new)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 52f30e526607..369ef068fad8 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -22,7 +22,7 @@ const char *zpool_get_type(struct zpool *pool);
void zpool_destroy_pool(struct zpool *pool);
int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
- unsigned long *handle);
+ unsigned long *handle, const int nid);
void zpool_free(struct zpool *pool, unsigned long handle);
@@ -64,7 +64,7 @@ struct zpool_driver {
void (*destroy)(void *pool);
int (*malloc)(void *pool, size_t size, gfp_t gfp,
- unsigned long *handle);
+ unsigned long *handle, const int nid);
void (*free)(void *pool, unsigned long handle);
void *(*obj_read_begin)(void *pool, unsigned long handle,
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index c26baf9fb331..13e9cc5490f7 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -26,7 +26,8 @@ struct zs_pool;
struct zs_pool *zs_create_pool(const char *name);
void zs_destroy_pool(struct zs_pool *pool);
-unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags,
+ const int nid);
void zs_free(struct zs_pool *pool, unsigned long obj);
size_t zs_huge_class_size(struct zs_pool *pool);
diff --git a/mm/zpool.c b/mm/zpool.c
index 6d6d88930932..0a71d03369f1 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -226,20 +226,22 @@ const char *zpool_get_type(struct zpool *zpool)
* @size: The amount of memory to allocate.
* @gfp: The GFP flags to use when allocating memory.
* @handle: Pointer to the handle to set
+ * @nid: The preferred node id.
*
* This allocates the requested amount of memory from the pool.
* The gfp flags will be used when allocating memory, if the
* implementation supports it. The provided @handle will be
- * set to the allocated object handle.
+ * set to the allocated object handle. The allocation will
+ * prefer the NUMA node specified by @nid.
*
* Implementations must guarantee this to be thread-safe.
*
* Returns: 0 on success, negative value on error.
*/
int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
- unsigned long *handle)
+ unsigned long *handle, const int nid)
{
- return zpool->driver->malloc(zpool->pool, size, gfp, handle);
+ return zpool->driver->malloc(zpool->pool, size, gfp, handle, nid);
}
/**
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index d14a7e317ac8..513b08c7c941 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -243,9 +243,9 @@ static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc)
dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES);
}
-static inline struct zpdesc *alloc_zpdesc(gfp_t gfp)
+static inline struct zpdesc *alloc_zpdesc(gfp_t gfp, const int nid)
{
- struct page *page = alloc_page(gfp);
+ struct page *page = alloc_pages_node(nid, gfp, 0);
return page_zpdesc(page);
}
@@ -462,9 +462,9 @@ static void zs_zpool_destroy(void *pool)
}
static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
- unsigned long *handle)
+ unsigned long *handle, const int nid)
{
- *handle = zs_malloc(pool, size, gfp);
+ *handle = zs_malloc(pool, size, gfp, nid);
if (IS_ERR_VALUE(*handle))
return PTR_ERR((void *)*handle);
@@ -1043,8 +1043,8 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage,
* Allocate a zspage for the given size class
*/
static struct zspage *alloc_zspage(struct zs_pool *pool,
- struct size_class *class,
- gfp_t gfp)
+ struct size_class *class,
+ gfp_t gfp, const int nid)
{
int i;
struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE];
@@ -1061,7 +1061,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
for (i = 0; i < class->pages_per_zspage; i++) {
struct zpdesc *zpdesc;
- zpdesc = alloc_zpdesc(gfp);
+ zpdesc = alloc_zpdesc(gfp, nid);
if (!zpdesc) {
while (--i >= 0) {
zpdesc_dec_zone_page_state(zpdescs[i]);
@@ -1336,12 +1336,14 @@ static unsigned long obj_malloc(struct zs_pool *pool,
* @pool: pool to allocate from
* @size: size of block to allocate
* @gfp: gfp flags when allocating object
+ * @nid: The preferred node id to allocate new zspage (if needed)
*
* On success, handle to the allocated object is returned,
* otherwise an ERR_PTR().
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
-unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp,
+ const int nid)
{
unsigned long handle;
struct size_class *class;
@@ -1376,7 +1378,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
spin_unlock(&class->lock);
- zspage = alloc_zspage(pool, class, gfp);
+ zspage = alloc_zspage(pool, class, gfp, nid);
if (!zspage) {
cache_free_handle(pool, handle);
return (unsigned long)ERR_PTR(-ENOMEM);
diff --git a/mm/zswap.c b/mm/zswap.c
index 204fb59da33c..455e9425c5f5 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -981,7 +981,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
zpool = pool->zpool;
gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE;
- alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
+ alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle, page_to_nid(page));
if (alloc_ret)
goto unlock;