diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2016-05-11 00:00:29 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2016-05-11 00:00:29 -0400 |
commit | e4d35be584be88a3db3fa5635a97c62a2ec5aafe (patch) | |
tree | fc22a7fb65697306edd71411959ccee6df60c64d /include/linux/pagemap.h | |
parent | 99d825822eade8d827a1817357cbf3f889a552d6 (diff) | |
parent | 38b78a5f18584db6fa7441e0f4531b283b0e6725 (diff) |
Merge branch 'ovl-fixes' into for-linus
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 32 |
1 files changed, 8 insertions, 24 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1ebd65c91422..7e1ab155c67c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) (__force unsigned long)mask; } -/* - * The page cache can be done in larger chunks than - * one page, because it allows for more efficient - * throughput (it can then be mapped into user - * space in smaller chunks for same flexibility). - * - * Or rather, it _will_ be done in larger chunks. - */ -#define PAGE_CACHE_SHIFT PAGE_SHIFT -#define PAGE_CACHE_SIZE PAGE_SIZE -#define PAGE_CACHE_MASK PAGE_MASK -#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) - -#define page_cache_get(page) get_page(page) -#define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, bool cold); /* @@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page) return page->index << compound_order(page); if (likely(!PageTransTail(page))) - return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + return page->index; /* * We don't initialize ->index for tail pages: calculate based on * head page */ - pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff = compound_head(page)->index; pgoff += page - compound_head(page); return pgoff; } @@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page) */ static inline loff_t page_offset(struct page *page) { - return ((loff_t)page->index) << PAGE_CACHE_SHIFT; + return ((loff_t)page->index) << PAGE_SHIFT; } static inline loff_t page_file_offset(struct page *page) { - return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; + return ((loff_t)page_file_index(page)) << PAGE_SHIFT; } extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, @@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; - return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); + return pgoff; } extern void __lock_page(struct page *page); @@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* * Fault a userspace page into pagetables. Return non-zero on a fault. * - * This assumes that two userspace pages are always sufficient. That's - * not true if PAGE_CACHE_SIZE > PAGE_SIZE. + * This assumes that two userspace pages are always sufficient. */ static inline int fault_in_pages_writeable(char __user *uaddr, int size) { @@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page, static inline unsigned long dir_pages(struct inode *inode) { - return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> + PAGE_SHIFT; } #endif /* _LINUX_PAGEMAP_H */ |