From f0b791a34cb3cffd2bbc3ca4365c9b719fa2c9f3 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 23 Jan 2014 15:52:49 -0800 Subject: mm: print more details for bad_page() bad_page() is cool in that it prints out a bunch of data about the page. But, I can never remember which page flags are good and which are bad, or whether ->index or ->mapping is required to be NULL. This patch allows bad/dump_page() callers to specify a string about why they are dumping the page and adds explanation strings to a number of places. It also adds a 'bad_flags' argument to bad_page(), which it then dumps out separately from the flags which are actually set. This way, the messages will show specifically why the page was bad, *specifically* which flags it is complaining about, if it was a page flag combination which was the problem. [akpm@linux-foundation.org: switch to pr_alert] Signed-off-by: Dave Hansen Reviewed-by: Christoph Lameter Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index a512dd836931..03bbcb84d96e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2029,7 +2029,9 @@ extern void shake_page(struct page *p, int access); extern atomic_long_t num_poisoned_pages; extern int soft_offline_page(struct page *page, int flags); -extern void dump_page(struct page *page); +extern void dump_page(struct page *page, char *reason); +extern void dump_page_badflags(struct page *page, char *reason, + unsigned long badflags); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) extern void clear_huge_page(struct page *page, -- cgit v1.2.3 From 309381feaee564281c3d9e90fbca8963bb7428ad Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 23 Jan 2014 15:52:54 -0800 Subject: mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 1 + include/linux/hugetlb.h | 3 ++- include/linux/hugetlb_cgroup.h | 5 +++-- include/linux/mm.h | 29 +++++++++++++---------------- include/linux/mmdebug.h | 9 +++++++++ include/linux/page-flags.h | 10 +++++----- include/linux/pagemap.h | 10 +++++----- include/linux/percpu.h | 1 + 8 files changed, 39 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 9b4dd491f7e8..0437439bc047 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -1,6 +1,7 @@ #ifndef __LINUX_GFP_H #define __LINUX_GFP_H +#include #include #include #include diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d01cc972a1d9..8c43cc469d78 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -2,6 +2,7 @@ #define _LINUX_HUGETLB_H #include +#include #include #include #include @@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, static inline struct hstate *page_hstate(struct page *page) { - VM_BUG_ON(!PageHuge(page)); + VM_BUG_ON_PAGE(!PageHuge(page), page); return size_to_hstate(PAGE_SIZE << compound_order(page)); } diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index ce8217f7b5c2..787bba3bf552 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -15,6 +15,7 @@ #ifndef _LINUX_HUGETLB_CGROUP_H #define _LINUX_HUGETLB_CGROUP_H +#include #include struct hugetlb_cgroup; @@ -28,7 +29,7 @@ struct hugetlb_cgroup; static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) { - VM_BUG_ON(!PageHuge(page)); + VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; @@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) static inline int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) { - VM_BUG_ON(!PageHuge(page)); + VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; diff --git a/include/linux/mm.h b/include/linux/mm.h index 03bbcb84d96e..d9992fc128ca 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -5,6 +5,7 @@ #ifdef __KERNEL__ +#include #include #include #include @@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page) */ static inline int put_page_testzero(struct page *page) { - VM_BUG_ON(atomic_read(&page->_count) == 0); + VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); return atomic_dec_and_test(&page->_count); } @@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) static inline void compound_lock(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON(PageSlab(page)); + VM_BUG_ON_PAGE(PageSlab(page), page); bit_spin_lock(PG_compound_lock, &page->flags); #endif } @@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page) static inline void compound_unlock(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON(PageSlab(page)); + VM_BUG_ON_PAGE(PageSlab(page), page); bit_spin_unlock(PG_compound_lock, &page->flags); #endif } @@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page) */ static inline bool compound_tail_refcounted(struct page *page) { - VM_BUG_ON(!PageHead(page)); + VM_BUG_ON_PAGE(!PageHead(page), page); return __compound_tail_refcounted(page); } @@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page) /* * __split_huge_page_refcount() cannot run from under us. */ - VM_BUG_ON(!PageTail(page)); - VM_BUG_ON(page_mapcount(page) < 0); - VM_BUG_ON(atomic_read(&page->_count) != 0); + VM_BUG_ON_PAGE(!PageTail(page), page); + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); + VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); if (compound_tail_refcounted(page->first_page)) atomic_inc(&page->_mapcount); } @@ -474,7 +475,7 @@ static inline void get_page(struct page *page) * Getting a normal page or the head of a compound page * requires to already have an elevated page->_count. */ - VM_BUG_ON(atomic_read(&page->_count) <= 0); + VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); atomic_inc(&page->_count); } @@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page) static inline void __SetPageBuddy(struct page *page) { - VM_BUG_ON(atomic_read(&page->_mapcount) != -1); + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); } static inline void __ClearPageBuddy(struct page *page) { - VM_BUG_ON(!PageBuddy(page)); + VM_BUG_ON_PAGE(!PageBuddy(page), page); atomic_set(&page->_mapcount, -1); } @@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page) * slab code uses page->slab_cache and page->first_page (for tail * pages), which share storage with page->ptl. */ - VM_BUG_ON(*(unsigned long *)&page->ptl); + VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); if (!ptlock_alloc(page)) return false; spin_lock_init(ptlock_ptr(page)); @@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page) static inline void pgtable_pmd_page_dtor(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON(page->pmd_huge_pte); + VM_BUG_ON_PAGE(page->pmd_huge_pte, page); #endif ptlock_free(page); } @@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access); extern atomic_long_t num_poisoned_pages; extern int soft_offline_page(struct page *page, int flags); -extern void dump_page(struct page *page, char *reason); -extern void dump_page_badflags(struct page *page, char *reason, - unsigned long badflags); - #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) extern void clear_huge_page(struct page *page, unsigned long addr, diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 580bd587d916..5042c036dda9 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -1,10 +1,19 @@ #ifndef LINUX_MM_DEBUG_H #define LINUX_MM_DEBUG_H 1 +struct page; + +extern void dump_page(struct page *page, char *reason); +extern void dump_page_badflags(struct page *page, char *reason, + unsigned long badflags); + #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) +#define VM_BUG_ON_PAGE(cond, page) \ + do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) +#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) #endif #ifdef CONFIG_DEBUG_VIRTUAL diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 98ada58f9942..e464b4e987e8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -412,7 +412,7 @@ static inline void ClearPageCompound(struct page *page) */ static inline int PageTransHuge(struct page *page) { - VM_BUG_ON(PageTail(page)); + VM_BUG_ON_PAGE(PageTail(page), page); return PageHead(page); } @@ -460,25 +460,25 @@ static inline int PageTransTail(struct page *page) */ static inline int PageSlabPfmemalloc(struct page *page) { - VM_BUG_ON(!PageSlab(page)); + VM_BUG_ON_PAGE(!PageSlab(page), page); return PageActive(page); } static inline void SetPageSlabPfmemalloc(struct page *page) { - VM_BUG_ON(!PageSlab(page)); + VM_BUG_ON_PAGE(!PageSlab(page), page); SetPageActive(page); } static inline void __ClearPageSlabPfmemalloc(struct page *page) { - VM_BUG_ON(!PageSlab(page)); + VM_BUG_ON_PAGE(!PageSlab(page), page); __ClearPageActive(page); } static inline void ClearPageSlabPfmemalloc(struct page *page) { - VM_BUG_ON(!PageSlab(page)); + VM_BUG_ON_PAGE(!PageSlab(page), page); ClearPageActive(page); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e3dea75a078b..1710d1b060ba 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -162,7 +162,7 @@ static inline int page_cache_get_speculative(struct page *page) * disabling preempt, and hence no need for the "speculative get" that * SMP requires. */ - VM_BUG_ON(page_count(page) == 0); + VM_BUG_ON_PAGE(page_count(page) == 0, page); atomic_inc(&page->_count); #else @@ -175,7 +175,7 @@ static inline int page_cache_get_speculative(struct page *page) return 0; } #endif - VM_BUG_ON(PageTail(page)); + VM_BUG_ON_PAGE(PageTail(page), page); return 1; } @@ -191,14 +191,14 @@ static inline int page_cache_add_speculative(struct page *page, int count) # ifdef CONFIG_PREEMPT_COUNT VM_BUG_ON(!in_atomic()); # endif - VM_BUG_ON(page_count(page) == 0); + VM_BUG_ON_PAGE(page_count(page) == 0, page); atomic_add(count, &page->_count); #else if (unlikely(!atomic_add_unless(&page->_count, count, 0))) return 0; #endif - VM_BUG_ON(PageCompound(page) && page != compound_head(page)); + VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); return 1; } @@ -210,7 +210,7 @@ static inline int page_freeze_refs(struct page *page, int count) static inline void page_unfreeze_refs(struct page *page, int count) { - VM_BUG_ON(page_count(page) != 0); + VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); atomic_set(&page->_count, count); diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9e4761caa80c..e3817d2441b6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -1,6 +1,7 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H +#include #include #include #include -- cgit v1.2.3 From 363a044f739b0f07a8c063b838c5528d10720e02 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:52:56 -0800 Subject: memcg, slab: kmem_cache_create_memcg(): fix memleak on fail path We do not free the cache's memcg_params if __kmem_cache_create fails. Fix this. Plus, rename memcg_register_cache() to memcg_alloc_cache_params(), because it actually does not register the cache anywhere, but simply initialize kmem_cache::memcg_params. [akpm@linux-foundation.org: fix build] Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b3e7a667e03c..284daff507fb 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -497,8 +497,9 @@ void __memcg_kmem_commit_charge(struct page *page, void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache); +int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, + struct kmem_cache *root_cache); +void memcg_free_cache_params(struct kmem_cache *s); void memcg_release_cache(struct kmem_cache *cachep); void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); @@ -640,13 +641,16 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return -1; } -static inline int -memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache) +static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, + struct kmem_cache *s, struct kmem_cache *root_cache) { return 0; } +static inline void memcg_free_cache_params(struct kmem_cache *s) +{ +} + static inline void memcg_release_cache(struct kmem_cache *cachep) { } -- cgit v1.2.3 From 1aa13254259bdef0bca723849ab3bab308d2f0c3 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:52:58 -0800 Subject: memcg, slab: clean up memcg cache initialization/destruction Currently, we have rather a messy function set relating to per-memcg kmem cache initialization/destruction. Per-memcg caches are created in memcg_create_kmem_cache(). This function calls kmem_cache_create_memcg() to allocate and initialize a kmem cache and then "registers" the new cache in the memcg_params::memcg_caches array of the parent cache. During its work-flow, kmem_cache_create_memcg() executes the following memcg-related functions: - memcg_alloc_cache_params(), to initialize memcg_params of the newly created cache; - memcg_cache_list_add(), to add the new cache to the memcg_slab_caches list. On the other hand, kmem_cache_destroy() called on a cache destruction only calls memcg_release_cache(), which does all the work: it cleans the reference to the cache in its parent's memcg_params::memcg_caches, removes the cache from the memcg_slab_caches list, and frees memcg_params. Such an inconsistency between destruction and initialization paths make the code difficult to read, so let's clean this up a bit. This patch moves all the code relating to registration of per-memcg caches (adding to memcg list, setting the pointer to a cache from its parent) to the newly created memcg_register_cache() and memcg_unregister_cache() functions making the initialization and destruction paths look symmetrical. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 284daff507fb..abd0113b6620 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -500,8 +500,8 @@ int memcg_cache_id(struct mem_cgroup *memcg); int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, struct kmem_cache *root_cache); void memcg_free_cache_params(struct kmem_cache *s); -void memcg_release_cache(struct kmem_cache *cachep); -void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); +void memcg_register_cache(struct kmem_cache *s); +void memcg_unregister_cache(struct kmem_cache *s); int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); @@ -651,12 +651,11 @@ static inline void memcg_free_cache_params(struct kmem_cache *s) { } -static inline void memcg_release_cache(struct kmem_cache *cachep) +static inline void memcg_register_cache(struct kmem_cache *s) { } -static inline void memcg_cache_list_add(struct mem_cgroup *memcg, - struct kmem_cache *s) +static inline void memcg_unregister_cache(struct kmem_cache *s) { } -- cgit v1.2.3 From f8570263ee16eb1d5038b8e20d7db3a68bbb2b49 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:06 -0800 Subject: memcg, slab: RCU protect memcg_params for root caches We relocate root cache's memcg_params whenever we need to grow the memcg_caches array to accommodate all kmem-active memory cgroups. Currently on relocation we free the old version immediately, which can lead to use-after-free, because the memcg_caches array is accessed lock-free (see cache_from_memcg_idx()). This patch fixes this by making memcg_params RCU-protected for root caches. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 1e2f4fe12773..a060142aa5f5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -513,7 +513,9 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) * * Both the root cache and the child caches will have it. For the root cache, * this will hold a dynamically allocated array large enough to hold - * information about the currently limited memcgs in the system. + * information about the currently limited memcgs in the system. To allow the + * array to be accessed without taking any locks, on relocation we free the old + * version only after a grace period. * * Child caches will hold extra metadata needed for its operation. Fields are: * @@ -528,7 +530,10 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) struct memcg_cache_params { bool is_root_cache; union { - struct kmem_cache *memcg_caches[0]; + struct { + struct rcu_head rcu_head; + struct kmem_cache *memcg_caches[0]; + }; struct { struct mem_cgroup *memcg; struct list_head list; -- cgit v1.2.3 From 5e270e254885893f8c82ab9b91caa648af3690df Mon Sep 17 00:00:00 2001 From: Philipp Hachtmann Date: Thu, 23 Jan 2014 15:53:11 -0800 Subject: mm: free memblock.memory in free_all_bootmem When calling free_all_bootmem() the free areas under memblock's control are released to the buddy allocator. Additionally the reserved list is freed if it was reallocated by memblock. The same should apply for the memory list. Signed-off-by: Philipp Hachtmann Reviewed-by: Tejun Heo Cc: Joonsoo Kim Cc: Johannes Weiner Cc: Tang Chen Cc: Toshi Kani Cc: Jianguo Wu Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index cd0274bebd4c..1ef66360f0b0 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -61,6 +61,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); +phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); -- cgit v1.2.3 From 54a43d54988a3731d644fdeb7a1d6f46b4ac64c7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 23 Jan 2014 15:53:13 -0800 Subject: numa: add a sysctl for numa_balancing Add a working sysctl to enable/disable automatic numa memory balancing at runtime. This allows us to track down performance problems with this feature and is generally a good idea. This was possible earlier through debugfs, but only with special debugging options set. Also fix the boot message. [akpm@linux-foundation.org: s/sched_numa_balancing/sysctl_numa_balancing/] Signed-off-by: Andi Kleen Acked-by: Mel Gorman Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched/sysctl.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 31e0193cb0c5..b13cf430764f 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -99,4 +99,8 @@ extern int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +extern int sysctl_numa_balancing(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + #endif /* _SCHED_SYSCTL_H */ -- cgit v1.2.3 From b30afea019a4548ee77b73e83f03104e0e3a0556 Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Thu, 23 Jan 2014 15:53:18 -0800 Subject: include/linux/genalloc.h: spinlock_t needs spinlock_types.h Compiling a C file which includes genalloc.h but without spinlock_types.h being included before, we will see the compile error below. include/linux/genalloc.h:54:2: error: unknown type name `spinlock_t' Include spinlock_types.h from genalloc.h to fix the problem. Signed-off-by: Shawn Guo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/genalloc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1eda33d7cb10..1c2fdaa2ffc3 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -30,6 +30,8 @@ #ifndef __GENALLOC_H__ #define __GENALLOC_H__ +#include + struct device; struct device_node; -- cgit v1.2.3 From 77719536dc00f8fd8f5abe6dadbde5331c37f996 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 23 Jan 2014 15:53:59 -0800 Subject: conditionally define U32_MAX The symbol U32_MAX is defined in several spots. Change these definitions to be conditional. This is in preparation for the next patch, which centralizes the definition in . Signed-off-by: Alex Elder Cc: Sage Weil Cc: David Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ceph/decode.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 0442c3d800f0..27fe66a279b1 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -10,6 +10,7 @@ /* This seemed to be the easiest place to define these */ +#ifndef U32_MAX #define U8_MAX ((u8)(~0U)) #define U16_MAX ((u16)(~0U)) #define U32_MAX ((u32)(~0U)) @@ -24,6 +25,7 @@ #define S16_MIN ((s16)(-S16_MAX - 1)) #define S32_MIN ((s32)(-S32_MAX - 1)) #define S64_MIN ((s64)(-S64_MAX - 1LL)) +#endif /* !U32_MAX */ /* * in all cases, -- cgit v1.2.3 From 89a0714106aac7309c7dfa0f004b39e1e89d2942 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 23 Jan 2014 15:54:00 -0800 Subject: kernel.h: define u8, s8, u32, etc. limits Create constants that define the maximum and minimum values representable by the kernel types u8, s8, u16, s16, and so on. Signed-off-by: Alex Elder Cc: Sage Weil Cc: David Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2aa3d4b000e6..f74bb581ab64 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -29,6 +29,19 @@ #define ULLONG_MAX (~0ULL) #define SIZE_MAX (~(size_t)0) +#define U8_MAX ((u8)~0U) +#define S8_MAX ((s8)(U8_MAX>>1)) +#define S8_MIN ((s8)(-S8_MAX - 1)) +#define U16_MAX ((u16)~0U) +#define S16_MAX ((s16)(U16_MAX>>1)) +#define S16_MIN ((s16)(-S16_MAX - 1)) +#define U32_MAX ((u32)~0U) +#define S32_MAX ((s32)(U32_MAX>>1)) +#define S32_MIN ((s32)(-S32_MAX - 1)) +#define U64_MAX ((u64)~0ULL) +#define S64_MAX ((s64)(U64_MAX>>1)) +#define S64_MIN ((s64)(-S64_MAX - 1)) + #define STACK_MAGIC 0xdeadbeef #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) -- cgit v1.2.3 From 04f9b74e4d96d349de12fdd4e6626af4a9f75e09 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 23 Jan 2014 15:54:01 -0800 Subject: remove extra definitions of U32_MAX Now that the definition is centralized in , the definitions of U32_MAX (and related) elsewhere in the kernel can be removed. Signed-off-by: Alex Elder Acked-by: Sage Weil Acked-by: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ceph/decode.h | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 27fe66a279b1..a6ef9cc267ec 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -8,25 +8,6 @@ #include -/* This seemed to be the easiest place to define these */ - -#ifndef U32_MAX -#define U8_MAX ((u8)(~0U)) -#define U16_MAX ((u16)(~0U)) -#define U32_MAX ((u32)(~0U)) -#define U64_MAX ((u64)(~0ULL)) - -#define S8_MAX ((s8)(U8_MAX >> 1)) -#define S16_MAX ((s16)(U16_MAX >> 1)) -#define S32_MAX ((s32)(U32_MAX >> 1)) -#define S64_MAX ((s64)(U64_MAX >> 1LL)) - -#define S8_MIN ((s8)(-S8_MAX - 1)) -#define S16_MIN ((s16)(-S16_MAX - 1)) -#define S32_MIN ((s32)(-S32_MAX - 1)) -#define S64_MIN ((s64)(-S64_MAX - 1LL)) -#endif /* !U32_MAX */ - /* * in all cases, * void **p pointer to position pointer -- cgit v1.2.3 From 00b2c76a6abbe082bb5afb89ee49ec325e9cd4d2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 23 Jan 2014 15:54:02 -0800 Subject: include/linux/of.h: make for_each_child_of_node() reference its args when CONFIG_OF=n Make for_each_child_of_node() reference its args when CONFIG_OF=n to avoid warnings like: drivers/leds/leds-pwm.c:88:22: warning: unused variable 'node' [-Wunused-variable] struct device_node *node = pdev->dev.of_node; ^ Signed-off-by: David Howells Cc: Grant Likely Cc: Rob Herring Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/of.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/of.h b/include/linux/of.h index 276c546980d8..70c64ba17fa5 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -377,8 +377,13 @@ static inline bool of_have_populated_dt(void) return false; } +/* Kill an unused variable warning on a device_node pointer */ +static inline void __of_use_dn(const struct device_node *np) +{ +} + #define for_each_child_of_node(parent, child) \ - while (0) + while (__of_use_dn(parent), __of_use_dn(child), 0) #define for_each_available_child_of_node(parent, child) \ while (0) -- cgit v1.2.3 From aace05097a0fd467230e39acb148be0fdaa90068 Mon Sep 17 00:00:00 2001 From: "Du, Changbin" Date: Thu, 23 Jan 2014 15:54:12 -0800 Subject: lib/parser.c: add match_wildcard() function match_wildcard function is a simple implementation of wildcard matching algorithm. It only supports two usual wildcardes: '*' - matches zero or more characters '?' - matches one character This algorithm is safe since it is non-recursive. Signed-off-by: Du, Changbin Cc: Jason Baron Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/parser.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/parser.h b/include/linux/parser.h index ea2281e726f6..39d5b7955b23 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -29,5 +29,6 @@ int match_token(char *, const match_table_t table, substring_t args[]); int match_int(substring_t *, int *result); int match_octal(substring_t *, int *result); int match_hex(substring_t *, int *result); +bool match_wildcard(const char *pattern, const char *str); size_t match_strlcpy(char *, const substring_t *, size_t); char *match_strdup(const substring_t *); -- cgit v1.2.3 From c28aa1f0a847c36daa4280b611e2b54bad75c576 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 23 Jan 2014 15:54:16 -0800 Subject: printk/cache: mark printk_once test variable __read_mostly Add #include to define __read_mostly. Convert cache.h to use uapi/linux/kernel.h instead of linux/kernel.h to avoid recursive #includes. Convert the ALIGN macro to __ALIGN_KERNEL. printk_once only sets the bool variable tested once so mark it __read_mostly. Neaten the alignment so it matches the rest of the pr__once #defines too. Signed-off-by: Joe Perches Reviewed-by: James Hogan Cc: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cache.h | 4 ++-- include/linux/printk.h | 19 ++++++++++--------- 2 files changed, 12 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cache.h b/include/linux/cache.h index 4c570653ab84..17e7e82d2aa7 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -1,11 +1,11 @@ #ifndef __LINUX_CACHE_H #define __LINUX_CACHE_H -#include +#include #include #ifndef L1_CACHE_ALIGN -#define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES) +#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) #endif #ifndef SMP_CACHE_BYTES diff --git a/include/linux/printk.h b/include/linux/printk.h index 694925837a16..cc6f74d65167 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -5,6 +5,7 @@ #include #include #include +#include extern const char linux_banner[]; extern const char linux_proc_banner[]; @@ -253,17 +254,17 @@ extern asmlinkage void dump_stack(void) __cold; */ #ifdef CONFIG_PRINTK -#define printk_once(fmt, ...) \ -({ \ - static bool __print_once; \ - \ - if (!__print_once) { \ - __print_once = true; \ - printk(fmt, ##__VA_ARGS__); \ - } \ +#define printk_once(fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + printk(fmt, ##__VA_ARGS__); \ + } \ }) #else -#define printk_once(fmt, ...) \ +#define printk_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) #endif -- cgit v1.2.3 From 0fa9aa20c33d76e98f44ff1de6e128e39a7738ca Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 23 Jan 2014 15:54:54 -0800 Subject: fs/ramfs/file-nommu.c: make ramfs_nommu_get_unmapped_area() and ramfs_nommu_mmap() static Since commit 853ac43ab194 ("shmem: unify regular and tiny shmem"), ramfs_nommu_get_unmapped_area() and ramfs_nommu_mmap() are not directly referenced outside of file-nommu.c. Thus make them static. Signed-off-by: Axel Lin Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ramfs.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 753207c8ce20..ecc730977a5a 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -14,13 +14,6 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) } #else extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); -extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags); - -extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); #endif extern const struct file_operations ramfs_file_operations; -- cgit v1.2.3 From 7288e1187ba935996232246916418c64bb88da30 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 23 Jan 2014 15:55:32 -0800 Subject: coredump: kill MMF_DUMPABLE and MMF_DUMP_SECURELY Nobody actually needs MMF_DUMPABLE/MMF_DUMP_SECURELY, they are only used to enforce the encoding of SUID_DUMP_* enum in mm->flags & MMF_DUMPABLE_MASK. Now that set_dumpable() updates both bits atomically we can kill them and simply store the value "as is" in 2 lower bits. Signed-off-by: Oleg Nesterov Acked-by: Kees Cook Cc: Alex Kelly Cc: "Eric W. Biederman" Cc: Josh Triplett Cc: Petr Matousek Cc: Vasily Kulikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 485234d2fd42..124430ba569b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -400,10 +400,8 @@ extern int get_dumpable(struct mm_struct *mm); #define SUID_DUMP_ROOT 2 /* Dump as root */ /* mm flags */ -/* dumpable bits */ -#define MMF_DUMPABLE 0 /* core dump is permitted */ -#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ +/* for SUID_DUMP_* above */ #define MMF_DUMPABLE_BITS 2 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) -- cgit v1.2.3 From 942be3875a1931c379bbc37053829dd6847e0f3f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 23 Jan 2014 15:55:34 -0800 Subject: coredump: make __get_dumpable/get_dumpable inline, kill fs/coredump.h 1. Remove fs/coredump.h. It is not clear why do we need it, it only declares __get_dumpable(), signal.c includes it for no reason. 2. Now that get_dumpable() and __get_dumpable() are really trivial make them inline in linux/sched.h. Signed-off-by: Oleg Nesterov Acked-by: Kees Cook Cc: Alex Kelly Cc: "Eric W. Biederman" Cc: Josh Triplett Cc: Petr Matousek Cc: Vasily Kulikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 124430ba569b..cf9e414dbb9e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -391,10 +391,6 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} #endif - -extern void set_dumpable(struct mm_struct *mm, int value); -extern int get_dumpable(struct mm_struct *mm); - #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ #define SUID_DUMP_USER 1 /* Dump as user of process */ #define SUID_DUMP_ROOT 2 /* Dump as root */ @@ -405,6 +401,23 @@ extern int get_dumpable(struct mm_struct *mm); #define MMF_DUMPABLE_BITS 2 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) +extern void set_dumpable(struct mm_struct *mm, int value); +/* + * This returns the actual value of the suid_dumpable flag. For things + * that are using this for checking for privilege transitions, it must + * test against SUID_DUMP_USER rather than treating it as a boolean + * value. + */ +static inline int __get_dumpable(unsigned long mm_flags) +{ + return mm_flags & MMF_DUMPABLE_MASK; +} + +static inline int get_dumpable(struct mm_struct *mm) +{ + return __get_dumpable(mm->flags); +} + /* coredump filter bits */ #define MMF_DUMP_ANON_PRIVATE 2 #define MMF_DUMP_ANON_SHARED 3 -- cgit v1.2.3 From 74e37200de8e9c4e09b70c21c3f13c2071e77457 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 23 Jan 2014 15:55:35 -0800 Subject: proc: cleanup/simplify get_task_state/task_state_array get_task_state() and task_state_array[] look confusing and suboptimal, it is not clear what it can actually report to user-space and task_state_array[] blows .data for no reason. 1. state = (tsk->state & TASK_REPORT) | tsk->exit_state is not clear. TASK_REPORT is self-documenting but it is not clear what ->exit_state can add. Move the potential exit_state's (EXIT_ZOMBIE and EXIT_DEAD) into TASK_REPORT and use it to calculate the final result. 2. With the change above it is obvious that task_state_array[] has the unused entries just to make BUILD_BUG_ON() happy. Change this BUILD_BUG_ON() to use TASK_REPORT rather than TASK_STATE_MAX and shrink task_state_array[]. 3. Turn the "while (state)" loop into fls(state). Signed-off-by: Oleg Nesterov Cc: Peter Zijlstra Cc: David Laight Cc: Geert Uytterhoeven Cc: Ingo Molnar Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index cf9e414dbb9e..33e4e9e1f621 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -229,7 +229,7 @@ extern char ___assert_task_state[1 - 2*!!( /* get_task_state() */ #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED) + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) -- cgit v1.2.3 From ff252c1fc537b0c9e40f62da0a9d11bf0737b7db Mon Sep 17 00:00:00 2001 From: DaeSeok Youn Date: Thu, 23 Jan 2014 15:55:46 -0800 Subject: kernel/fork.c: make dup_mm() static dup_mm() is used only in kernel/fork.c Signed-off-by: Daeseok Youn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 33e4e9e1f621..66a17ad55bcb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2295,8 +2295,6 @@ extern struct mm_struct *get_task_mm(struct task_struct *task); extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); /* Remove the current tasks stale references to the old mm_struct */ extern void mm_release(struct task_struct *, struct mm_struct *); -/* Allocate a new mm structure and copy contents from tsk->mm */ -extern struct mm_struct *dup_mm(struct task_struct *tsk); extern int copy_thread(unsigned long, unsigned long, unsigned long, struct task_struct *); -- cgit v1.2.3 From 98611e4e6a2b4a03fd2d4750cce8e4455a995c8d Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 23 Jan 2014 15:55:52 -0800 Subject: exec: kill task_struct->did_exec We can kill either task->did_exec or PF_FORKNOEXEC, they are mutually exclusive. The patch kills ->did_exec because it has a single user. Signed-off-by: Oleg Nesterov Acked-by: KOSAKI Motohiro Cc: Al Viro Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 66a17ad55bcb..68a0e84463a0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1239,7 +1239,6 @@ struct task_struct { /* Used for emulating ABI behavior of previous Linux versions */ unsigned int personality; - unsigned did_exec:1; unsigned in_execve:1; /* Tell the LSMs that the process is doing an * execve */ unsigned in_iowait:1; -- cgit v1.2.3 From 7984754b99b6c89054edc405e9d9d35810a91d36 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 23 Jan 2014 15:55:59 -0800 Subject: kexec: add sysctl to disable kexec_load For general-purpose (i.e. distro) kernel builds it makes sense to build with CONFIG_KEXEC to allow end users to choose what kind of things they want to do with kexec. However, in the face of trying to lock down a system with such a kernel, there needs to be a way to disable kexec_load (much like module loading can be disabled). Without this, it is too easy for the root user to modify kernel memory even when CONFIG_STRICT_DEVMEM and modules_disabled are set. With this change, it is still possible to load an image for use later, then disable kexec_load so the image (or lack of image) can't be altered. The intention is for using this in environments where "perfect" enforcement is hard. Without a verified boot, along with verified modules, and along with verified kexec, this is trying to give a system a better chance to defend itself (or at least grow the window of discoverability) against attack in the face of a privilege escalation. In my mind, I consider several boot scenarios: 1) Verified boot of read-only verified root fs loading fd-based verification of kexec images. 2) Secure boot of writable root fs loading signed kexec images. 3) Regular boot loading kexec (e.g. kcrash) image early and locking it. 4) Regular boot with no control of kexec image at all. 1 and 2 don't exist yet, but will soon once the verified kexec series has landed. 4 is the state of things now. The gap between 2 and 4 is too large, so this change creates scenario 3, a middle-ground above 4 when 2 and 1 are not possible for a system. Signed-off-by: Kees Cook Acked-by: Rik van Riel Cc: Vivek Goyal Cc: Eric Biederman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kexec.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 5fd33dc1fe3a..6d4066cdb5b5 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -170,6 +170,7 @@ unsigned long paddr_vmcoreinfo_note(void); extern struct kimage *kexec_image; extern struct kimage *kexec_crash_image; +extern int kexec_load_disabled; #ifndef kexec_flush_icache_page #define kexec_flush_icache_page(page) -- cgit v1.2.3 From 3089a4c8d3abc7e2ab105d1d39d415110d1566d6 Mon Sep 17 00:00:00 2001 From: Evgeny Boger Date: Thu, 23 Jan 2014 15:56:18 -0800 Subject: drivers/w1/masters/w1-gpio.c: add strong pullup emulation Strong pullup is emulated by driving pin logic high after write command when using tri-state push-pull GPIO. Signed-off-by: Evgeny Boger Cc: Greg KH Acked-by: David Fries Acked-by: Evgeniy Polyakov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/w1-gpio.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h index 065e3ae79ab0..d58594a32324 100644 --- a/include/linux/w1-gpio.h +++ b/include/linux/w1-gpio.h @@ -20,6 +20,7 @@ struct w1_gpio_platform_data { unsigned int is_open_drain:1; void (*enable_external_pullup)(int enable); unsigned int ext_pullup_enable_pin; + unsigned int pullup_duration; }; #endif /* _LINUX_W1_GPIO_H */ -- cgit v1.2.3