diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 43 |
1 files changed, 24 insertions, 19 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 30fe668c2542..b4d109e389b8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -184,13 +184,7 @@ struct per_cpu_pageset { s8 stat_threshold; s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; #endif -} ____cacheline_aligned_in_smp; - -#ifdef CONFIG_NUMA -#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) -#else -#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) -#endif +}; #endif /* !__GENERATING_BOUNDS.H */ @@ -306,14 +300,13 @@ struct zone { */ unsigned long min_unmapped_pages; unsigned long min_slab_pages; - struct per_cpu_pageset *pageset[NR_CPUS]; -#else - struct per_cpu_pageset pageset[NR_CPUS]; #endif + struct per_cpu_pageset __percpu *pageset; /* * free areas of different sizes */ spinlock_t lock; + int all_unreclaimable; /* All pages pinned */ #ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ seqlock_t span_seqlock; @@ -328,6 +321,15 @@ struct zone { unsigned long *pageblock_flags; #endif /* CONFIG_SPARSEMEM */ +#ifdef CONFIG_COMPACTION + /* + * On compaction failure, 1<<compact_defer_shift compactions + * are skipped before trying again. The number attempted since + * last failure is tracked with compact_considered. + */ + unsigned int compact_considered; + unsigned int compact_defer_shift; +#endif ZONE_PADDING(_pad1_) @@ -349,7 +351,7 @@ struct zone { * prev_priority holds the scanning priority for this zone. It is * defined as the scanning priority at which we achieved our reclaim * target at the previous try_to_free_pages() or balance_pgdat() - * invokation. + * invocation. * * We use prev_priority as a measure of how much stress page reclaim is * under - it drives the swappiness decision: whether to unmap mapped @@ -425,7 +427,6 @@ struct zone { } ____cacheline_internodealigned_in_smp; typedef enum { - ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */ ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ } zone_flags_t; @@ -445,11 +446,6 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) clear_bit(flag, &zone->flags); } -static inline int zone_is_all_unreclaimable(const struct zone *zone) -{ - return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); -} - static inline int zone_is_reclaim_locked(const struct zone *zone) { return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); @@ -620,7 +616,9 @@ typedef struct pglist_data { struct page_cgroup *node_page_cgroup; #endif #endif +#ifndef CONFIG_NO_BOOTMEM struct bootmem_data *bdata; +#endif #ifdef CONFIG_MEMORY_HOTPLUG /* * Must be held any time you expect node_start_pfn, node_present_pages @@ -652,9 +650,10 @@ typedef struct pglist_data { #include <linux/memory_hotplug.h> +extern struct mutex zonelists_mutex; void get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free); -void build_all_zonelists(void); +void build_all_zonelists(void *data); void wakeup_kswapd(struct zone *zone, int order); int zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags); @@ -672,6 +671,12 @@ void memory_present(int nid, unsigned long start, unsigned long end); static inline void memory_present(int nid, unsigned long start, unsigned long end) {} #endif +#ifdef CONFIG_HAVE_MEMORYLESS_NODES +int local_memory_node(int node_id); +#else +static inline int local_memory_node(int node_id) { return node_id; }; +#endif + #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); #endif @@ -983,7 +988,7 @@ struct mem_section { #endif #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) -#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) +#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) #ifdef CONFIG_SPARSEMEM_EXTREME |