diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-06-10 14:46:48 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-06-10 14:46:57 +0200 |
commit | 5be5758c114b18260c6fd4c8373bf89e39b0fe82 (patch) | |
tree | 54390f904df6ff11e570f764c444356cf2709fda /include/asm-generic | |
parent | 71f66a6580c4e42df377bebbcca5c72661a40700 (diff) | |
parent | 7f45e5cd1718ed769295033ca214032848a0097d (diff) |
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply patches against new
code I have in queue.
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/audit_change_attr.h | 4 | ||||
-rw-r--r-- | include/asm-generic/audit_dir_write.h | 14 | ||||
-rw-r--r-- | include/asm-generic/audit_read.h | 5 | ||||
-rw-r--r-- | include/asm-generic/audit_write.h | 2 | ||||
-rw-r--r-- | include/asm-generic/bitops/find.h | 4 | ||||
-rw-r--r-- | include/asm-generic/bitops/le.h | 7 | ||||
-rw-r--r-- | include/asm-generic/bug.h | 3 | ||||
-rw-r--r-- | include/asm-generic/cacheflush.h | 5 | ||||
-rw-r--r-- | include/asm-generic/gpio.h | 10 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 12 | ||||
-rw-r--r-- | include/asm-generic/ptrace.h | 74 | ||||
-rw-r--r-- | include/asm-generic/resource.h | 2 | ||||
-rw-r--r-- | include/asm-generic/tlb.h | 156 | ||||
-rw-r--r-- | include/asm-generic/unistd.h | 227 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 61 |
15 files changed, 371 insertions, 215 deletions
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index bcbab3e4a3be..89b73e5d0fd0 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h @@ -1,4 +1,6 @@ +#ifdef __NR_chmod __NR_chmod, +#endif __NR_fchmod, #ifdef __NR_chown __NR_chown, @@ -20,7 +22,9 @@ __NR_chown32, __NR_fchown32, __NR_lchown32, #endif +#ifdef __NR_link __NR_link, +#endif #ifdef __NR_linkat __NR_linkat, #endif diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h index 6621bd82cbe8..7b61db4fe72b 100644 --- a/include/asm-generic/audit_dir_write.h +++ b/include/asm-generic/audit_dir_write.h @@ -1,13 +1,27 @@ +#ifdef __NR_rename __NR_rename, +#endif +#ifdef __NR_mkdir __NR_mkdir, +#endif +#ifdef __NR_rmdir __NR_rmdir, +#endif #ifdef __NR_creat __NR_creat, #endif +#ifdef __NR_link __NR_link, +#endif +#ifdef __NR_unlink __NR_unlink, +#endif +#ifdef __NR_symlink __NR_symlink, +#endif +#ifdef __NR_mknod __NR_mknod, +#endif #ifdef __NR_mkdirat __NR_mkdirat, __NR_mknodat, diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h index 0e87464d9847..3b249cb857dc 100644 --- a/include/asm-generic/audit_read.h +++ b/include/asm-generic/audit_read.h @@ -1,4 +1,6 @@ +#ifdef __NR_readlink __NR_readlink, +#endif __NR_quotactl, __NR_listxattr, __NR_llistxattr, @@ -6,3 +8,6 @@ __NR_flistxattr, __NR_getxattr, __NR_lgetxattr, __NR_fgetxattr, +#ifdef __NR_readlinkat +__NR_readlinkat, +#endif diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h index c5f1c2c920e2..e7020c57b13b 100644 --- a/include/asm-generic/audit_write.h +++ b/include/asm-generic/audit_write.h @@ -4,7 +4,9 @@ __NR_acct, __NR_swapon, #endif __NR_quotactl, +#ifdef __NR_truncate __NR_truncate, +#endif #ifdef __NR_truncate64 __NR_truncate64, #endif diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 110fa700f853..71c778033f57 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -1,6 +1,7 @@ #ifndef _ASM_GENERIC_BITOPS_FIND_H_ #define _ASM_GENERIC_BITOPS_FIND_H_ +#ifndef find_next_bit /** * find_next_bit - find the next set bit in a memory region * @addr: The address to base the search on @@ -9,7 +10,9 @@ */ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +#endif +#ifndef find_next_zero_bit /** * find_next_zero_bit - find the next cleared bit in a memory region * @addr: The address to base the search on @@ -18,6 +21,7 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long */ extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +#endif #ifdef CONFIG_GENERIC_FIND_FIRST_BIT diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index 946a21b1b5dc..f95c663a6a41 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -30,13 +30,20 @@ static inline unsigned long find_first_zero_bit_le(const void *addr, #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) +#ifndef find_next_zero_bit_le extern unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset); +#endif + +#ifndef find_next_bit_le extern unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset); +#endif +#ifndef find_first_zero_bit_le #define find_first_zero_bit_le(addr, size) \ find_next_zero_bit_le((addr), (size), 0) +#endif #else #error "Please fix <asm/byteorder.h>" diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index e5a3f5880001..dfb0ec666c94 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -162,9 +162,6 @@ extern void warn_slowpath_null(const char *file, const int line); unlikely(__ret_warn_once); \ }) -#define WARN_ON_RATELIMIT(condition, state) \ - WARN_ON((condition) && __ratelimit(state)) - /* * WARN_ON_SMP() is for cases that the warning is either * meaningless for !SMP or may even cause failures. diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 57b5c3c82e86..87bc536ccde3 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -24,7 +24,10 @@ #define flush_cache_vunmap(start, end) do { } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) + do { \ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ + } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index ff5c66080c8c..fcdcb5d5c995 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -35,9 +35,9 @@ * platform data and other tables. */ -static inline int gpio_is_valid(int number) +static inline bool gpio_is_valid(int number) { - return ((unsigned)number) < ARCH_NR_GPIOS; + return number >= 0 && number < ARCH_NR_GPIOS; } struct device; @@ -193,8 +193,8 @@ struct gpio { }; extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); -extern int gpio_request_array(struct gpio *array, size_t num); -extern void gpio_free_array(struct gpio *array, size_t num); +extern int gpio_request_array(const struct gpio *array, size_t num); +extern void gpio_free_array(const struct gpio *array, size_t num); #ifdef CONFIG_GPIO_SYSFS @@ -212,7 +212,7 @@ extern void gpio_unexport(unsigned gpio); #else /* !CONFIG_GPIOLIB */ -static inline int gpio_is_valid(int number) +static inline bool gpio_is_valid(int number) { /* only non-negative numbers are valid */ return number >= 0; diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index b4bfe338ea0e..e9b8e5926bef 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -184,22 +184,18 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif -#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY -#define page_test_dirty(page) (0) +#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY +#define page_test_and_clear_dirty(pfn, mapped) (0) #endif -#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY -#define page_clear_dirty(page, mapped) do { } while (0) -#endif - -#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY +#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define pte_maybe_dirty(pte) pte_dirty(pte) #else #define pte_maybe_dirty(pte) (1) #endif #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG -#define page_test_and_clear_young(page) (0) +#define page_test_and_clear_young(pfn) (0) #endif #ifndef __HAVE_ARCH_PGD_OFFSET_GATE diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h new file mode 100644 index 000000000000..82e674f6b337 --- /dev/null +++ b/include/asm-generic/ptrace.h @@ -0,0 +1,74 @@ +/* + * Common low level (register) ptrace helpers + * + * Copyright 2004-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __ASM_GENERIC_PTRACE_H__ +#define __ASM_GENERIC_PTRACE_H__ + +#ifndef __ASSEMBLY__ + +/* Helpers for working with the instruction pointer */ +#ifndef GET_IP +#define GET_IP(regs) ((regs)->pc) +#endif +#ifndef SET_IP +#define SET_IP(regs, val) (GET_IP(regs) = (val)) +#endif + +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return GET_IP(regs); +} +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_IP(regs, val); +} + +#ifndef profile_pc +#define profile_pc(regs) instruction_pointer(regs) +#endif + +/* Helpers for working with the user stack pointer */ +#ifndef GET_USP +#define GET_USP(regs) ((regs)->usp) +#endif +#ifndef SET_USP +#define SET_USP(regs, val) (GET_USP(regs) = (val)) +#endif + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return GET_USP(regs); +} +static inline void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_USP(regs, val); +} + +/* Helpers for working with the frame pointer */ +#ifndef GET_FP +#define GET_FP(regs) ((regs)->fp) +#endif +#ifndef SET_FP +#define SET_FP(regs, val) (GET_FP(regs) = (val)) +#endif + +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return GET_FP(regs); +} +static inline void frame_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_FP(regs, val); +} + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index 587566f95f6c..61fa862fe08d 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h @@ -78,7 +78,7 @@ [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_NPROC] = { 0, 0 }, \ - [RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \ + [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \ [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index e43f9766259f..e58fa777fa09 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -5,6 +5,8 @@ * Copyright 2001 Red Hat, Inc. * Based on code from mm/memory.c Copyright Linus Torvalds and others. * + * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version @@ -17,97 +19,111 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> +#ifdef CONFIG_HAVE_RCU_TABLE_FREE /* - * For UP we don't need to worry about TLB flush - * and page free order so much.. + * Semi RCU freeing of the page directories. + * + * This is needed by some architectures to implement software pagetable walkers. + * + * gup_fast() and other software pagetable walkers do a lockless page-table + * walk and therefore needs some synchronization with the freeing of the page + * directories. The chosen means to accomplish that is by disabling IRQs over + * the walk. + * + * Architectures that use IPIs to flush TLBs will then automagically DTRT, + * since we unlink the page, flush TLBs, free the page. Since the disabling of + * IRQs delays the completion of the TLB flush we can never observe an already + * freed page. + * + * Architectures that do not have this (PPC) need to delay the freeing by some + * other means, this is that means. + * + * What we do is batch the freed directory pages (tables) and RCU free them. + * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling + * holds off grace periods. + * + * However, in order to batch these pages we need to allocate storage, this + * allocation is deep inside the MM code and can thus easily fail on memory + * pressure. To guarantee progress we fall back to single table freeing, see + * the implementation of tlb_remove_table_one(). + * */ -#ifdef CONFIG_SMP - #ifdef ARCH_FREE_PTR_NR - #define FREE_PTR_NR ARCH_FREE_PTR_NR - #else - #define FREE_PTE_NR 506 - #endif - #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) -#else - #define FREE_PTE_NR 1 - #define tlb_fast_mode(tlb) 1 +struct mmu_table_batch { + struct rcu_head rcu; + unsigned int nr; + void *tables[0]; +}; + +#define MAX_TABLE_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) + +extern void tlb_table_flush(struct mmu_gather *tlb); +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); + #endif -/* struct mmu_gather is an opaque type used by the mm code for passing around - * any data needed by arch specific code for tlb_remove_page. +/* + * If we can't allocate a page to make a big batch of page pointers + * to work on, then just handle a few from the on-stack structure. */ -struct mmu_gather { - struct mm_struct *mm; - unsigned int nr; /* set to ~0U means fast mode */ - unsigned int need_flush;/* Really unmapped some ptes? */ - unsigned int fullmm; /* non-zero means full mm flush */ - struct page * pages[FREE_PTE_NR]; +#define MMU_GATHER_BUNDLE 8 + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct page *pages[0]; }; -/* Users of the generic TLB shootdown code must declare this storage space. */ -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); +#define MAX_GATHER_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) -/* tlb_gather_mmu - * Return a pointer to an initialized struct mmu_gather. +/* struct mmu_gather is an opaque type used by the mm code for passing around + * any data needed by arch specific code for tlb_remove_page. */ -static inline struct mmu_gather * -tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) -{ - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); - - tlb->mm = mm; +struct mmu_gather { + struct mm_struct *mm; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + struct mmu_table_batch *batch; +#endif + unsigned int need_flush : 1, /* Did free PTEs */ + fast_mode : 1; /* No batching */ - /* Use fast mode if only one CPU is online */ - tlb->nr = num_online_cpus() > 1 ? 0U : ~0U; + unsigned int fullmm; - tlb->fullmm = full_mm_flush; + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[MMU_GATHER_BUNDLE]; +}; - return tlb; -} +#define HAVE_GENERIC_MMU_GATHER -static inline void -tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +static inline int tlb_fast_mode(struct mmu_gather *tlb) { - if (!tlb->need_flush) - return; - tlb->need_flush = 0; - tlb_flush(tlb); - if (!tlb_fast_mode(tlb)) { - free_pages_and_swap_cache(tlb->pages, tlb->nr); - tlb->nr = 0; - } +#ifdef CONFIG_SMP + return tlb->fast_mode; +#else + /* + * For UP we don't need to worry about TLB flush + * and page free order so much.. + */ + return 1; +#endif } -/* tlb_finish_mmu - * Called at the end of the shootdown operation to free up any resources - * that were required. - */ -static inline void -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) -{ - tlb_flush_mmu(tlb, start, end); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - put_cpu_var(mmu_gathers); -} +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); +void tlb_flush_mmu(struct mmu_gather *tlb); +void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); +int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); /* tlb_remove_page - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while - * handling the additional races in SMP caused by other CPUs caching valid - * mappings in their TLBs. + * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when + * required. */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - tlb->need_flush = 1; - if (tlb_fast_mode(tlb)) { - free_page_and_swap_cache(page); - return; - } - tlb->pages[tlb->nr++] = page; - if (tlb->nr >= FREE_PTE_NR) - tlb_flush_mmu(tlb, 0, 0); + if (!__tlb_remove_page(tlb, page)) + tlb_flush_mmu(tlb); } /** diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 07c40d5149de..4f76959397fa 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -24,16 +24,24 @@ #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) #endif +#ifdef __SYSCALL_COMPAT +#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _comp) +#define __SC_COMP_3264(_nr, _32, _64, _comp) __SYSCALL(_nr, _comp) +#else +#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _sys) +#define __SC_COMP_3264(_nr, _32, _64, _comp) __SC_3264(_nr, _32, _64) +#endif + #define __NR_io_setup 0 -__SYSCALL(__NR_io_setup, sys_io_setup) +__SC_COMP(__NR_io_setup, sys_io_setup, compat_sys_io_setup) #define __NR_io_destroy 1 __SYSCALL(__NR_io_destroy, sys_io_destroy) #define __NR_io_submit 2 -__SYSCALL(__NR_io_submit, sys_io_submit) +__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit) #define __NR_io_cancel 3 __SYSCALL(__NR_io_cancel, sys_io_cancel) #define __NR_io_getevents 4 -__SYSCALL(__NR_io_getevents, sys_io_getevents) +__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents) /* fs/xattr.c */ #define __NR_setxattr 5 @@ -67,7 +75,7 @@ __SYSCALL(__NR_getcwd, sys_getcwd) /* fs/cookies.c */ #define __NR_lookup_dcookie 18 -__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie) +__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie) /* fs/eventfd.c */ #define __NR_eventfd2 19 @@ -79,7 +87,7 @@ __SYSCALL(__NR_epoll_create1, sys_epoll_create1) #define __NR_epoll_ctl 21 __SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) #define __NR_epoll_pwait 22 -__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) +__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait) /* fs/fcntl.c */ #define __NR_dup 23 @@ -87,7 +95,7 @@ __SYSCALL(__NR_dup, sys_dup) #define __NR_dup3 24 __SYSCALL(__NR_dup3, sys_dup3) #define __NR3264_fcntl 25 -__SC_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl) +__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64) /* fs/inotify_user.c */ #define __NR_inotify_init1 26 @@ -99,7 +107,7 @@ __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) /* fs/ioctl.c */ #define __NR_ioctl 29 -__SYSCALL(__NR_ioctl, sys_ioctl) +__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl) /* fs/ioprio.c */ #define __NR_ioprio_set 30 @@ -129,26 +137,30 @@ __SYSCALL(__NR_renameat, sys_renameat) #define __NR_umount2 39 __SYSCALL(__NR_umount2, sys_umount) #define __NR_mount 40 -__SYSCALL(__NR_mount, sys_mount) +__SC_COMP(__NR_mount, sys_mount, compat_sys_mount) #define __NR_pivot_root 41 __SYSCALL(__NR_pivot_root, sys_pivot_root) /* fs/nfsctl.c */ #define __NR_nfsservctl 42 -__SYSCALL(__NR_nfsservctl, sys_nfsservctl) +__SC_COMP(__NR_nfsservctl, sys_nfsservctl, compat_sys_nfsservctl) /* fs/open.c */ #define __NR3264_statfs 43 -__SC_3264(__NR3264_statfs, sys_statfs64, sys_statfs) +__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \ + compat_sys_statfs64) #define __NR3264_fstatfs 44 -__SC_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs) +__SC_COMP_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs, \ + compat_sys_fstatfs64) #define __NR3264_truncate 45 -__SC_3264(__NR3264_truncate, sys_truncate64, sys_truncate) +__SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \ + compat_sys_truncate64) #define __NR3264_ftruncate 46 -__SC_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate) +__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \ + compat_sys_ftruncate64) #define __NR_fallocate 47 -__SYSCALL(__NR_fallocate, sys_fallocate) +__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate) #define __NR_faccessat 48 __SYSCALL(__NR_faccessat, sys_faccessat) #define __NR_chdir 49 @@ -166,7 +178,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat) #define __NR_fchown 55 __SYSCALL(__NR_fchown, sys_fchown) #define __NR_openat 56 -__SYSCALL(__NR_openat, sys_openat) +__SC_COMP(__NR_openat, sys_openat, compat_sys_openat) #define __NR_close 57 __SYSCALL(__NR_close, sys_close) #define __NR_vhangup 58 @@ -182,7 +194,7 @@ __SYSCALL(__NR_quotactl, sys_quotactl) /* fs/readdir.c */ #define __NR_getdents64 61 -__SYSCALL(__NR_getdents64, sys_getdents64) +__SC_COMP(__NR_getdents64, sys_getdents64, compat_sys_getdents64) /* fs/read_write.c */ #define __NR3264_lseek 62 @@ -192,17 +204,17 @@ __SYSCALL(__NR_read, sys_read) #define __NR_write 64 __SYSCALL(__NR_write, sys_write) #define __NR_readv 65 -__SYSCALL(__NR_readv, sys_readv) +__SC_COMP(__NR_readv, sys_readv, compat_sys_readv) #define __NR_writev 66 -__SYSCALL(__NR_writev, sys_writev) +__SC_COMP(__NR_writev, sys_writev, compat_sys_writev) #define __NR_pread64 67 -__SYSCALL(__NR_pread64, sys_pread64) +__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64) #define __NR_pwrite64 68 -__SYSCALL(__NR_pwrite64, sys_pwrite64) +__SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64) #define __NR_preadv 69 -__SYSCALL(__NR_preadv, sys_preadv) +__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv) #define __NR_pwritev 70 -__SYSCALL(__NR_pwritev, sys_pwritev) +__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev) /* fs/sendfile.c */ #define __NR3264_sendfile 71 @@ -210,17 +222,17 @@ __SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile) /* fs/select.c */ #define __NR_pselect6 72 -__SYSCALL(__NR_pselect6, sys_pselect6) +__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6) #define __NR_ppoll 73 -__SYSCALL(__NR_ppoll, sys_ppoll) +__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll) /* fs/signalfd.c */ #define __NR_signalfd4 74 -__SYSCALL(__NR_signalfd4, sys_signalfd4) +__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4) /* fs/splice.c */ #define __NR_vmsplice 75 -__SYSCALL(__NR_vmsplice, sys_vmsplice) +__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice) #define __NR_splice 76 __SYSCALL(__NR_splice, sys_splice) #define __NR_tee 77 @@ -243,23 +255,27 @@ __SYSCALL(__NR_fsync, sys_fsync) __SYSCALL(__NR_fdatasync, sys_fdatasync) #ifdef __ARCH_WANT_SYNC_FILE_RANGE2 #define __NR_sync_file_range2 84 -__SYSCALL(__NR_sync_file_range2, sys_sync_file_range2) +__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \ + compat_sys_sync_file_range2) #else #define __NR_sync_file_range 84 -__SYSCALL(__NR_sync_file_range, sys_sync_file_range) +__SC_COMP(__NR_sync_file_range, sys_sync_file_range, \ + compat_sys_sync_file_range) #endif /* fs/timerfd.c */ #define __NR_timerfd_create 85 __SYSCALL(__NR_timerfd_create, sys_timerfd_create) #define __NR_timerfd_settime 86 -__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) +__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \ + compat_sys_timerfd_settime) #define __NR_timerfd_gettime 87 -__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) +__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \ + compat_sys_timerfd_gettime) /* fs/utimes.c */ #define __NR_utimensat 88 -__SYSCALL(__NR_utimensat, sys_utimensat) +__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat) /* kernel/acct.c */ #define __NR_acct 89 @@ -281,7 +297,7 @@ __SYSCALL(__NR_exit, sys_exit) #define __NR_exit_group 94 __SYSCALL(__NR_exit_group, sys_exit_group) #define __NR_waitid 95 -__SYSCALL(__NR_waitid, sys_waitid) +__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid) /* kernel/fork.c */ #define __NR_set_tid_address 96 @@ -291,25 +307,27 @@ __SYSCALL(__NR_unshare, sys_unshare) /* kernel/futex.c */ #define __NR_futex 98 -__SYSCALL(__NR_futex, sys_futex) +__SC_COMP(__NR_futex, sys_futex, compat_sys_futex) #define __NR_set_robust_list 99 -__SYSCALL(__NR_set_robust_list, sys_set_robust_list) +__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \ + compat_sys_set_robust_list) #define __NR_get_robust_list 100 -__SYSCALL(__NR_get_robust_list, sys_get_robust_list) +__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \ + compat_sys_get_robust_list) /* kernel/hrtimer.c */ #define __NR_nanosleep 101 -__SYSCALL(__NR_nanosleep, sys_nanosleep) +__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep) /* kernel/itimer.c */ #define __NR_getitimer 102 -__SYSCALL(__NR_getitimer, sys_getitimer) +__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer) #define __NR_setitimer 103 -__SYSCALL(__NR_setitimer, sys_setitimer) +__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer) /* kernel/kexec.c */ #define __NR_kexec_load 104 -__SYSCALL(__NR_kexec_load, sys_kexec_load) +__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load) /* kernel/module.c */ #define __NR_init_module 105 @@ -319,23 +337,24 @@ __SYSCALL(__NR_delete_module, sys_delete_module) /* kernel/posix-timers.c */ #define __NR_timer_create 107 -__SYSCALL(__NR_timer_create, sys_timer_create) +__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create) #define __NR_timer_gettime 108 -__SYSCALL(__NR_timer_gettime, sys_timer_gettime) +__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime) #define __NR_timer_getoverrun 109 __SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) #define __NR_timer_settime 110 -__SYSCALL(__NR_timer_settime, sys_timer_settime) +__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime) #define __NR_timer_delete 111 __SYSCALL(__NR_timer_delete, sys_timer_delete) #define __NR_clock_settime 112 -__SYSCALL(__NR_clock_settime, sys_clock_settime) +__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime) #define __NR_clock_gettime 113 -__SYSCALL(__NR_clock_gettime, sys_clock_gettime) +__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime) #define __NR_clock_getres 114 -__SYSCALL(__NR_clock_getres, sys_clock_getres) +__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres) #define __NR_clock_nanosleep 115 -__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep) +__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \ + compat_sys_clock_nanosleep) /* kernel/printk.c */ #define __NR_syslog 116 @@ -355,9 +374,11 @@ __SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler) #define __NR_sched_getparam 121 __SYSCALL(__NR_sched_getparam, sys_sched_getparam) #define __NR_sched_setaffinity 122 -__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity) +__SC_COMP(__NR_sched_setaffinity, sys_sched_setaffinity, \ + compat_sys_sched_setaffinity) #define __NR_sched_getaffinity 123 -__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity) +__SC_COMP(__NR_sched_getaffinity, sys_sched_getaffinity, \ + compat_sys_sched_getaffinity) #define __NR_sched_yield 124 __SYSCALL(__NR_sched_yield, sys_sched_yield) #define __NR_sched_get_priority_max 125 @@ -365,7 +386,8 @@ __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) #define __NR_sched_get_priority_min 126 __SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) #define __NR_sched_rr_get_interval 127 -__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval) +__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \ + compat_sys_sched_rr_get_interval) /* kernel/signal.c */ #define __NR_restart_syscall 128 @@ -377,21 +399,23 @@ __SYSCALL(__NR_tkill, sys_tkill) #define __NR_tgkill 131 __SYSCALL(__NR_tgkill, sys_tgkill) #define __NR_sigaltstack 132 -__SYSCALL(__NR_sigaltstack, sys_sigaltstack) +__SC_COMP(__NR_sigaltstack, sys_sigaltstack, compat_sys_sigaltstack) #define __NR_rt_sigsuspend 133 -__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ +__SC_COMP(__NR_rt_sigsuspend, sys_rt_sigsuspend, compat_sys_rt_sigsuspend) #define __NR_rt_sigaction 134 -__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) /* __ARCH_WANT_SYS_RT_SIGACTION */ +__SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction) #define __NR_rt_sigprocmask 135 __SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask) #define __NR_rt_sigpending 136 __SYSCALL(__NR_rt_sigpending, sys_rt_sigpending) #define __NR_rt_sigtimedwait 137 -__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait) +__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \ + compat_sys_rt_sigtimedwait) #define __NR_rt_sigqueueinfo 138 -__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) +__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \ + compat_sys_rt_sigqueueinfo) #define __NR_rt_sigreturn 139 -__SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn) /* sys_rt_sigreturn_wrapper, */ +__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn) /* kernel/sys.c */ #define __NR_setpriority 140 @@ -421,7 +445,7 @@ __SYSCALL(__NR_setfsuid, sys_setfsuid) #define __NR_setfsgid 152 __SYSCALL(__NR_setfsgid, sys_setfsgid) #define __NR_times 153 -__SYSCALL(__NR_times, sys_times) +__SC_COMP(__NR_times, sys_times, compat_sys_times) #define __NR_setpgid 154 __SYSCALL(__NR_setpgid, sys_setpgid) #define __NR_getpgid 155 @@ -441,11 +465,11 @@ __SYSCALL(__NR_sethostname, sys_sethostname) #define __NR_setdomainname 162 __SYSCALL(__NR_setdomainname, sys_setdomainname) #define __NR_getrlimit 163 -__SYSCALL(__NR_getrlimit, sys_getrlimit) +__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit) #define __NR_setrlimit 164 -__SYSCALL(__NR_setrlimit, sys_setrlimit) +__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit) #define __NR_getrusage 165 -__SYSCALL(__NR_getrusage, sys_getrusage) +__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage) #define __NR_umask 166 __SYSCALL(__NR_umask, sys_umask) #define __NR_prctl 167 @@ -455,11 +479,11 @@ __SYSCALL(__NR_getcpu, sys_getcpu) /* kernel/time.c */ #define __NR_gettimeofday 169 -__SYSCALL(__NR_gettimeofday, sys_gettimeofday) +__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday) #define __NR_settimeofday 170 -__SYSCALL(__NR_settimeofday, sys_settimeofday) +__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday) #define __NR_adjtimex 171 -__SYSCALL(__NR_adjtimex, sys_adjtimex) +__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex) /* kernel/timer.c */ #define __NR_getpid 172 @@ -477,39 +501,40 @@ __SYSCALL(__NR_getegid, sys_getegid) #define __NR_gettid 178 __SYSCALL(__NR_gettid, sys_gettid) #define __NR_sysinfo 179 -__SYSCALL(__NR_sysinfo, sys_sysinfo) +__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo) /* ipc/mqueue.c */ #define __NR_mq_open 180 -__SYSCALL(__NR_mq_open, sys_mq_open) +__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open) #define __NR_mq_unlink 181 __SYSCALL(__NR_mq_unlink, sys_mq_unlink) #define __NR_mq_timedsend 182 -__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend) +__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend) #define __NR_mq_timedreceive 183 -__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive) +__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \ + compat_sys_mq_timedreceive) #define __NR_mq_notify 184 -__SYSCALL(__NR_mq_notify, sys_mq_notify) +__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify) #define __NR_mq_getsetattr 185 -__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr) +__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr) /* ipc/msg.c */ #define __NR_msgget 186 __SYSCALL(__NR_msgget, sys_msgget) #define __NR_msgctl 187 -__SYSCALL(__NR_msgctl, sys_msgctl) +__SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl) #define __NR_msgrcv 188 -__SYSCALL(__NR_msgrcv, sys_msgrcv) +__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv) #define __NR_msgsnd 189 -__SYSCALL(__NR_msgsnd, sys_msgsnd) +__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd) /* ipc/sem.c */ #define __NR_semget 190 __SYSCALL(__NR_semget, sys_semget) #define __NR_semctl 191 -__SYSCALL(__NR_semctl, sys_semctl) +__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) #define __NR_semtimedop 192 -__SYSCALL(__NR_semtimedop, sys_semtimedop) +__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop) #define __NR_semop 193 __SYSCALL(__NR_semop, sys_semop) @@ -517,9 +542,9 @@ __SYSCALL(__NR_semop, sys_semop) #define __NR_shmget 194 __SYSCALL(__NR_shmget, sys_shmget) #define __NR_shmctl 195 -__SYSCALL(__NR_shmctl, sys_shmctl) +__SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl) #define __NR_shmat 196 -__SYSCALL(__NR_shmat, sys_shmat) +__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat) #define __NR_shmdt 197 __SYSCALL(__NR_shmdt, sys_shmdt) @@ -543,21 +568,21 @@ __SYSCALL(__NR_getpeername, sys_getpeername) #define __NR_sendto 206 __SYSCALL(__NR_sendto, sys_sendto) #define __NR_recvfrom 207 -__SYSCALL(__NR_recvfrom, sys_recvfrom) +__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom) #define __NR_setsockopt 208 -__SYSCALL(__NR_setsockopt, sys_setsockopt) +__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt) #define __NR_getsockopt 209 -__SYSCALL(__NR_getsockopt, sys_getsockopt) +__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt) #define __NR_shutdown 210 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_sendmsg 211 -__SYSCALL(__NR_sendmsg, sys_sendmsg) +__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg) #define __NR_recvmsg 212 -__SYSCALL(__NR_recvmsg, sys_recvmsg) +__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg) /* mm/filemap.c */ #define __NR_readahead 213 -__SYSCALL(__NR_readahead, sys_readahead) +__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead) /* mm/nommu.c, also with MMU */ #define __NR_brk 214 @@ -573,19 +598,19 @@ __SYSCALL(__NR_add_key, sys_add_key) #define __NR_request_key 218 __SYSCALL(__NR_request_key, sys_request_key) #define __NR_keyctl 219 -__SYSCALL(__NR_keyctl, sys_keyctl) +__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl) /* arch/example/kernel/sys_example.c */ #define __NR_clone 220 -__SYSCALL(__NR_clone, sys_clone) /* .long sys_clone_wrapper */ +__SYSCALL(__NR_clone, sys_clone) #define __NR_execve 221 -__SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ +__SC_COMP(__NR_execve, sys_execve, compat_sys_execve) #define __NR3264_mmap 222 __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) /* mm/fadvise.c */ #define __NR3264_fadvise64 223 -__SYSCALL(__NR3264_fadvise64, sys_fadvise64_64) +__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64) /* mm/, CONFIG_MMU only */ #ifndef __ARCH_NOMMU @@ -612,25 +637,26 @@ __SYSCALL(__NR_madvise, sys_madvise) #define __NR_remap_file_pages 234 __SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) #define __NR_mbind 235 -__SYSCALL(__NR_mbind, sys_mbind) +__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind) #define __NR_get_mempolicy 236 -__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) +__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy) #define __NR_set_mempolicy 237 -__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) +__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy) #define __NR_migrate_pages 238 -__SYSCALL(__NR_migrate_pages, sys_migrate_pages) +__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages) #define __NR_move_pages 239 -__SYSCALL(__NR_move_pages, sys_move_pages) +__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages) #endif #define __NR_rt_tgsigqueueinfo 240 -__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) +__SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \ + compat_sys_rt_tgsigqueueinfo) #define __NR_perf_event_open 241 __SYSCALL(__NR_perf_event_open, sys_perf_event_open) #define __NR_accept4 242 __SYSCALL(__NR_accept4, sys_accept4) #define __NR_recvmmsg 243 -__SYSCALL(__NR_recvmmsg, sys_recvmmsg) +__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg) /* * Architectures may provide up to 16 syscalls of their own @@ -639,24 +665,29 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg) #define __NR_arch_specific_syscall 244 #define __NR_wait4 260 -__SYSCALL(__NR_wait4, sys_wait4) +__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4) #define __NR_prlimit64 261 __SYSCALL(__NR_prlimit64, sys_prlimit64) #define __NR_fanotify_init 262 __SYSCALL(__NR_fanotify_init, sys_fanotify_init) #define __NR_fanotify_mark 263 __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) -#define __NR_name_to_handle_at 264 +#define __NR_name_to_handle_at 264 __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) -#define __NR_open_by_handle_at 265 -__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) +#define __NR_open_by_handle_at 265 +__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \ + compat_sys_open_by_handle_at) #define __NR_clock_adjtime 266 -__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime) +__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime) #define __NR_syncfs 267 __SYSCALL(__NR_syncfs, sys_syncfs) +#define __NR_setns 268 +__SYSCALL(__NR_setns, sys_setns) +#define __NR_sendmmsg 269 +__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg) #undef __NR_syscalls -#define __NR_syscalls 268 +#define __NR_syscalls 270 /* * All syscalls below here should go away really, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 077c00d94f6e..db22d136ad08 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -15,7 +15,7 @@ * HEAD_TEXT_SECTION * INIT_TEXT_SECTION(PAGE_SIZE) * INIT_DATA_SECTION(...) - * PERCPU(CACHELINE_SIZE, PAGE_SIZE) + * PERCPU_SECTION(CACHELINE_SIZE) * __init_end = .; * * _stext = .; @@ -682,6 +682,28 @@ } /** + * PERCPU_INPUT - the percpu input sections + * @cacheline: cacheline size + * + * The core percpu section names and core symbols which do not rely + * directly upon load addresses. + * + * @cacheline is used to align subsections to avoid false cacheline + * sharing between subsections for different purposes. + */ +#define PERCPU_INPUT(cacheline) \ + VMLINUX_SYMBOL(__per_cpu_start) = .; \ + *(.data..percpu..first) \ + . = ALIGN(PAGE_SIZE); \ + *(.data..percpu..page_aligned) \ + . = ALIGN(cacheline); \ + *(.data..percpu..readmostly) \ + . = ALIGN(cacheline); \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ + VMLINUX_SYMBOL(__per_cpu_end) = .; + +/** * PERCPU_VADDR - define output section for percpu area * @cacheline: cacheline size * @vaddr: explicit base address (optional) @@ -703,52 +725,33 @@ * * Note that this macros defines __per_cpu_load as an absolute symbol. * If there is no need to put the percpu section at a predetermined - * address, use PERCPU(). + * address, use PERCPU_SECTION. */ #define PERCPU_VADDR(cacheline, vaddr, phdr) \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data..percpu..first) \ - . = ALIGN(PAGE_SIZE); \ - *(.data..percpu..page_aligned) \ - . = ALIGN(cacheline); \ - *(.data..percpu..readmostly) \ - . = ALIGN(cacheline); \ - *(.data..percpu) \ - *(.data..percpu..shared_aligned) \ - VMLINUX_SYMBOL(__per_cpu_end) = .; \ + PERCPU_INPUT(cacheline) \ } phdr \ . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); /** - * PERCPU - define output section for percpu area, simple version + * PERCPU_SECTION - define output section for percpu area, simple version * @cacheline: cacheline size - * @align: required alignment * - * Align to @align and outputs output section for percpu area. This macro - * doesn't manipulate @vaddr or @phdr and __per_cpu_load and + * Align to PAGE_SIZE and outputs output section for percpu area. This + * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and * __per_cpu_start will be identical. * - * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,) + * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) * except that __per_cpu_load is defined as a relative symbol against * .data..percpu which is required for relocatable x86_32 configuration. */ -#define PERCPU(cacheline, align) \ - . = ALIGN(align); \ +#define PERCPU_SECTION(cacheline) \ + . = ALIGN(PAGE_SIZE); \ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ - VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data..percpu..first) \ - . = ALIGN(PAGE_SIZE); \ - *(.data..percpu..page_aligned) \ - . = ALIGN(cacheline); \ - *(.data..percpu..readmostly) \ - . = ALIGN(cacheline); \ - *(.data..percpu) \ - *(.data..percpu..shared_aligned) \ - VMLINUX_SYMBOL(__per_cpu_end) = .; \ + PERCPU_INPUT(cacheline) \ } |