1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common interface for implementing a memory balloon, including support
* for migration of pages inflated in a memory balloon.
*
* Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/balloon.h>
/*
* Lock protecting the balloon_dev_info of all devices. We don't really
* expect more than one device.
*/
static DEFINE_SPINLOCK(balloon_pages_lock);
/**
* balloon_page_insert - insert a page into the balloon's page list and make
* the page->private assignment accordingly.
* @balloon : pointer to balloon device
* @page : page to be assigned as a 'balloon page'
*
* Caller must ensure the balloon_pages_lock is held.
*/
static void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
lockdep_assert_held(&balloon_pages_lock);
__SetPageOffline(page);
if (IS_ENABLED(CONFIG_BALLOON_MIGRATION)) {
SetPageMovableOps(page);
set_page_private(page, (unsigned long)balloon);
}
list_add(&page->lru, &balloon->pages);
}
/**
* balloon_page_finalize - prepare a balloon page that was removed from the
* balloon list for release to the page allocator
* @page: page to be released to the page allocator
*
* Caller must ensure the balloon_pages_lock is held.
*/
static void balloon_page_finalize(struct page *page)
{
lockdep_assert_held(&balloon_pages_lock);
if (IS_ENABLED(CONFIG_BALLOON_MIGRATION))
set_page_private(page, 0);
/* PageOffline is sticky until the page is freed to the buddy. */
}
static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
struct page *page)
{
balloon_page_insert(b_dev_info, page);
if (b_dev_info->adjust_managed_page_count)
adjust_managed_page_count(page, -1);
__count_vm_event(BALLOON_INFLATE);
inc_node_page_state(page, NR_BALLOON_PAGES);
}
/**
* balloon_page_list_enqueue() - inserts a list of pages into the balloon page
* list.
* @b_dev_info: balloon device descriptor where we will insert a new page to
* @pages: pages to enqueue - allocated using balloon_page_alloc.
*
* Driver must call this function to properly enqueue balloon pages before
* definitively removing them from the guest system.
*
* Return: number of pages that were enqueued.
*/
size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
struct list_head *pages)
{
struct page *page, *tmp;
unsigned long flags;
size_t n_pages = 0;
spin_lock_irqsave(&balloon_pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) {
list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page);
n_pages++;
}
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return n_pages;
}
EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
/**
* balloon_page_list_dequeue() - removes pages from balloon's page list and
* returns a list of the pages.
* @b_dev_info: balloon device descriptor where we will grab a page from.
* @pages: pointer to the list of pages that would be returned to the caller.
* @n_req_pages: number of requested pages.
*
* Driver must call this function to properly de-allocate a previous enlisted
* balloon pages before definitively releasing it back to the guest system.
* This function tries to remove @n_req_pages from the ballooned pages and
* return them to the caller in the @pages list.
*
* Note that this function may fail to dequeue some pages even if the balloon
* isn't empty - since the page list can be temporarily empty due to compaction
* of isolated pages.
*
* Return: number of pages that were added to the @pages list.
*/
size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
struct list_head *pages, size_t n_req_pages)
{
struct page *page, *tmp;
unsigned long flags;
size_t n_pages = 0;
spin_lock_irqsave(&balloon_pages_lock, flags);
list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
if (n_pages == n_req_pages)
break;
list_del(&page->lru);
if (b_dev_info->adjust_managed_page_count)
adjust_managed_page_count(page, 1);
balloon_page_finalize(page);
__count_vm_event(BALLOON_DEFLATE);
list_add(&page->lru, pages);
dec_node_page_state(page, NR_BALLOON_PAGES);
n_pages++;
}
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return n_pages;
}
EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
/**
* balloon_page_alloc - allocates a new page for insertion into the balloon
* page list.
*
* Driver must call this function to properly allocate a new balloon page.
* Driver must call balloon_page_enqueue before definitively removing the page
* from the guest system.
*
* Return: struct page for the allocated page or NULL on allocation failure.
*/
struct page *balloon_page_alloc(void)
{
gfp_t gfp_flags = __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
if (IS_ENABLED(CONFIG_BALLOON_MIGRATION))
gfp_flags |= GFP_HIGHUSER_MOVABLE;
else
gfp_flags |= GFP_HIGHUSER;
return alloc_page(gfp_flags);
}
EXPORT_SYMBOL_GPL(balloon_page_alloc);
/**
* balloon_page_enqueue - inserts a new page into the balloon page list.
*
* @b_dev_info: balloon device descriptor where we will insert a new page
* @page: new page to enqueue - allocated using balloon_page_alloc.
*
* Drivers must call this function to properly enqueue a new allocated balloon
* page before definitively removing the page from the guest system.
*
* Drivers must not enqueue pages while page->lru is still in
* use, and must not use page->lru until a page was unqueued again.
*/
void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
struct page *page)
{
unsigned long flags;
spin_lock_irqsave(&balloon_pages_lock, flags);
balloon_page_enqueue_one(b_dev_info, page);
spin_unlock_irqrestore(&balloon_pages_lock, flags);
}
EXPORT_SYMBOL_GPL(balloon_page_enqueue);
/**
* balloon_page_dequeue - removes a page from balloon's page list and returns
* its address to allow the driver to release the page.
* @b_dev_info: balloon device descriptor where we will grab a page from.
*
* Driver must call this function to properly dequeue a previously enqueued page
* before definitively releasing it back to the guest system.
*
* Caller must perform its own accounting to ensure that this
* function is called only if some pages are actually enqueued.
*
* Note that this function may fail to dequeue some pages even if there are
* some enqueued pages - since the page list can be temporarily empty due to
* the compaction of isolated pages.
*
* TODO: remove the caller accounting requirements, and allow caller to wait
* until all pages can be dequeued.
*
* Return: struct page for the dequeued page, or NULL if no page was dequeued.
*/
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
{
unsigned long flags;
LIST_HEAD(pages);
int n_pages;
n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1);
if (n_pages != 1) {
/*
* If we are unable to dequeue a balloon page because the page
* list is empty and there are no isolated pages, then something
* went out of track and some balloon pages are lost.
* BUG() here, otherwise the balloon driver may get stuck in
* an infinite loop while attempting to release all its pages.
*/
spin_lock_irqsave(&balloon_pages_lock, flags);
if (unlikely(list_empty(&b_dev_info->pages) &&
!b_dev_info->isolated_pages))
BUG();
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return NULL;
}
return list_first_entry(&pages, struct page, lru);
}
EXPORT_SYMBOL_GPL(balloon_page_dequeue);
#ifdef CONFIG_BALLOON_MIGRATION
static struct balloon_dev_info *balloon_page_device(struct page *page)
{
return (struct balloon_dev_info *)page_private(page);
}
static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
{
struct balloon_dev_info *b_dev_info;
unsigned long flags;
spin_lock_irqsave(&balloon_pages_lock, flags);
b_dev_info = balloon_page_device(page);
if (!b_dev_info) {
/*
* The page already got deflated and removed from the
* balloon list.
*/
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return false;
}
list_del(&page->lru);
b_dev_info->isolated_pages++;
spin_unlock_irqrestore(&balloon_pages_lock, flags);
return true;
}
static void balloon_page_putback(struct page *page)
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
/*
* When we isolated the page, the page was still inflated in a balloon
* device. As isolated balloon pages cannot get deflated, we still have
* a balloon device here.
*/
if (WARN_ON_ONCE(!b_dev_info))
return;
spin_lock_irqsave(&balloon_pages_lock, flags);
list_add(&page->lru, &b_dev_info->pages);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&balloon_pages_lock, flags);
}
static int balloon_page_migrate(struct page *newpage, struct page *page,
enum migrate_mode mode)
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
int rc;
/*
* When we isolated the page, the page was still inflated in a balloon
* device. As isolated balloon pages cannot get deflated, we still have
* a balloon device here.
*/
if (WARN_ON_ONCE(!b_dev_info))
return -EAGAIN;
rc = b_dev_info->migratepage(b_dev_info, newpage, page, mode);
if (rc < 0 && rc != -ENOENT)
return rc;
spin_lock_irqsave(&balloon_pages_lock, flags);
if (!rc) {
/* Insert the new page into the balloon list. */
get_page(newpage);
balloon_page_insert(b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
if (b_dev_info->adjust_managed_page_count &&
page_zone(page) != page_zone(newpage)) {
/*
* When we migrate a page to a different zone we
* have to fixup the count of both involved zones.
*/
adjust_managed_page_count(page, 1);
adjust_managed_page_count(newpage, -1);
}
} else {
/* Old page was deflated but new page not inflated. */
__count_vm_event(BALLOON_DEFLATE);
if (b_dev_info->adjust_managed_page_count)
adjust_managed_page_count(page, 1);
}
b_dev_info->isolated_pages--;
/* Free the now-deflated page we isolated in balloon_page_isolate(). */
balloon_page_finalize(page);
spin_unlock_irqrestore(&balloon_pages_lock, flags);
put_page(page);
return 0;
}
static const struct movable_operations balloon_mops = {
.migrate_page = balloon_page_migrate,
.isolate_page = balloon_page_isolate,
.putback_page = balloon_page_putback,
};
static int __init balloon_init(void)
{
return set_movable_ops(&balloon_mops, PGTY_offline);
}
core_initcall(balloon_init);
#endif /* CONFIG_BALLOON_MIGRATION */
|