1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
|
#ifndef LINUX_MM_INLINE_H
#define LINUX_MM_INLINE_H
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
* @page: the page to test
*
* Returns LRU_FILE if @page is page cache page backed by a regular filesystem,
* or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
* Used by functions that manipulate the LRU lists, to sort a page
* onto the right LRU list.
*
* We would like to get this info without a page flag, but the state
* needs to survive until the page is last deleted from the LRU, which
* could be as far down as __page_cache_release.
*/
static inline int page_is_file_cache(struct page *page)
{
if (PageSwapBacked(page))
return 0;
/* The page is page cache backed by a normal filesystem. */
return LRU_FILE;
}
static inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
list_add(&page->lru, &zone->lru[l].list);
__inc_zone_state(zone, NR_LRU_BASE + l);
}
static inline void
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
list_del(&page->lru);
__dec_zone_state(zone, NR_LRU_BASE + l);
}
static inline void
add_page_to_inactive_anon_list(struct zone *zone, struct page *page)
{
add_page_to_lru_list(zone, page, LRU_INACTIVE_ANON);
}
static inline void
add_page_to_active_anon_list(struct zone *zone, struct page *page)
{
add_page_to_lru_list(zone, page, LRU_ACTIVE_ANON);
}
static inline void
add_page_to_inactive_file_list(struct zone *zone, struct page *page)
{
add_page_to_lru_list(zone, page, LRU_INACTIVE_FILE);
}
static inline void
add_page_to_active_file_list(struct zone *zone, struct page *page)
{
add_page_to_lru_list(zone, page, LRU_ACTIVE_FILE);
}
static inline void
del_page_from_inactive_anon_list(struct zone *zone, struct page *page)
{
del_page_from_lru_list(zone, page, LRU_INACTIVE_ANON);
}
static inline void
del_page_from_active_anon_list(struct zone *zone, struct page *page)
{
del_page_from_lru_list(zone, page, LRU_ACTIVE_ANON);
}
static inline void
del_page_from_inactive_file_list(struct zone *zone, struct page *page)
{
del_page_from_lru_list(zone, page, LRU_INACTIVE_FILE);
}
static inline void
del_page_from_active_file_list(struct zone *zone, struct page *page)
{
del_page_from_lru_list(zone, page, LRU_INACTIVE_FILE);
}
static inline void
del_page_from_lru(struct zone *zone, struct page *page)
{
enum lru_list l = LRU_BASE;
list_del(&page->lru);
if (PageUnevictable(page)) {
__ClearPageUnevictable(page);
l = LRU_UNEVICTABLE;
} else {
if (PageActive(page)) {
__ClearPageActive(page);
l += LRU_ACTIVE;
}
l += page_is_file_cache(page);
}
__dec_zone_state(zone, NR_LRU_BASE + l);
}
/**
* page_lru - which LRU list should a page be on?
* @page: the page to test
*
* Returns the LRU list a page should be on, as an index
* into the array of LRU lists.
*/
static inline enum lru_list page_lru(struct page *page)
{
enum lru_list lru = LRU_BASE;
if (PageUnevictable(page))
lru = LRU_UNEVICTABLE;
else {
if (PageActive(page))
lru += LRU_ACTIVE;
lru += page_is_file_cache(page);
}
return lru;
}
/**
* inactive_anon_is_low - check if anonymous pages need to be deactivated
* @zone: zone to check
*
* Returns true if the zone does not have enough inactive anon pages,
* meaning some active anon pages need to be deactivated.
*/
static inline int inactive_anon_is_low(struct zone *zone)
{
unsigned long active, inactive;
active = zone_page_state(zone, NR_ACTIVE_ANON);
inactive = zone_page_state(zone, NR_INACTIVE_ANON);
if (inactive * zone->inactive_ratio < active)
return 1;
return 0;
}
#endif
|