summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup/cpuset.c7
-rw-r--r--kernel/power/swap.c10
2 files changed, 9 insertions, 8 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index c43efef7df71..832179236529 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -458,9 +458,8 @@ static void guarantee_active_cpus(struct task_struct *tsk,
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
- while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
+ while (!nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]))
cs = parent_cs(cs);
- nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
}
/**
@@ -2622,13 +2621,13 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
struct cpuset *parent = parent_cs(cp);
- nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
+ bool has_mems = nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
/*
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some MEMs.
*/
- if (is_in_v2_mode() && nodes_empty(*new_mems))
+ if (is_in_v2_mode() && !has_mems)
*new_mems = parent->effective_mems;
/* Skip the whole subtree if the nodemask remains the same. */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 7e462957c9bf..c4eb284b8e72 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -174,10 +174,10 @@ sector_t alloc_swapdev_block(int swap)
* Allocate a swap page and register that it has been allocated, so that
* it can be freed in case of an error.
*/
- offset = swp_offset(get_swap_page_of_type(swap));
+ offset = swp_offset(swap_alloc_hibernation_slot(swap));
if (offset) {
if (swsusp_extents_insert(offset))
- swap_free(swp_entry(swap, offset));
+ swap_free_hibernation_slot(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
}
@@ -186,6 +186,7 @@ sector_t alloc_swapdev_block(int swap)
void free_all_swap_pages(int swap)
{
+ unsigned long offset;
struct rb_node *node;
/*
@@ -197,8 +198,9 @@ void free_all_swap_pages(int swap)
ext = rb_entry(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
- swap_free_nr(swp_entry(swap, ext->start),
- ext->end - ext->start + 1);
+
+ for (offset = ext->start; offset <= ext->end; offset++)
+ swap_free_hibernation_slot(swp_entry(swap, offset));
kfree(ext);
}