diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-27 21:17:55 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-27 21:17:55 -0800 |
commit | 54c0a4b46150db1571d955d598cd342c9f1d9657 (patch) | |
tree | fb5968daa68092779e7db3eb1ccd96829783dfc3 | |
parent | 1b17366d695c8ab03f98d0155357e97a427e1dce (diff) | |
parent | c2218e26c0d03c368fff825a6f15b7bb3418dbde (diff) |
Merge branch 'akpm' (incoming from Andrew)
Merge misc updates from Andrew Morton:
- a few hotfixes
- dynamic-debug updates
- ipc updates
- various other sweepings off the factory floor
* akpm: (31 commits)
firmware/google: drop 'select EFI' to avoid recursive dependency
compat: fix sys_fanotify_mark
checkpatch.pl: check for function declarations without arguments
mm/migrate.c: fix setting of cpupid on page migration twice against normal page
softirq: use const char * const for softirq_to_name, whitespace neatening
softirq: convert printks to pr_<level>
softirq: use ffs() in __do_softirq()
kernel/kexec.c: use vscnprintf() instead of vsnprintf() in vmcoreinfo_append_str()
splice: fix unexpected size truncation
ipc: fix compat msgrcv with negative msgtyp
ipc,msg: document barriers
ipc: delete seq_max field in struct ipc_ids
ipc: simplify sysvipc_proc_open() return
ipc: remove useless return statement
ipc: remove braces for single statements
ipc: standardize code comments
ipc: whitespace cleanup
ipc: change kern_ipc_perm.deleted type to bool
ipc: introduce ipc_valid_object() helper to sort out IPC_RMID races
ipc/sem.c: avoid overflow of semop undo (semadj) value
...
33 files changed, 448 insertions, 416 deletions
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1e8b030dbefd..b0df9761de6d 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) kernel_data.end = virt_to_phys(_end - 1); for_each_memblock(memory, region) { - res = memblock_virt_alloc(sizeof(*res), 0); + res = memblock_virt_alloc_low(sizeof(*res), 0); res->name = "System RAM"; res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c index 9be0d5d02a9a..f2f6323c8d64 100644 --- a/arch/unicore32/kernel/early_printk.c +++ b/arch/unicore32/kernel/early_printk.c @@ -35,17 +35,11 @@ static struct console early_ocd_console = { static int __init setup_early_printk(char *buf) { - int keep_early; - if (!buf || early_console) return 0; - if (strstr(buf, "keep")) - keep_early = 1; - early_console = &early_ocd_console; - - if (keep_early) + if (strstr(buf, "keep")) early_console->flags &= ~CON_BOOT; else early_console->flags |= CON_BOOT; diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 2f59cce3b38a..f97fbe3abb67 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -51,9 +51,9 @@ extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; -static inline phys_addr_t get_max_low_mapped(void) +static inline phys_addr_t get_max_mapped(void) { - return (phys_addr_t)max_low_pfn_mapped << PAGE_SHIFT; + return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; } bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c9675594d7ca..06853e670354 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p) setup_real_mode(); - memblock_set_current_limit(get_max_low_mapped()); + memblock_set_current_limit(get_max_mapped()); dma_contiguous_reserve(0); /* diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index 2f21b0bfe653..29c8cdda82a1 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -12,8 +12,7 @@ menu "Google Firmware Drivers" config GOOGLE_SMI tristate "SMI interface for Google platforms" - depends on ACPI && DMI - select EFI + depends on ACPI && DMI && EFI select EFI_VARS help Say Y here if you want to enable SMI callbacks for Google diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 57d7c083cb4b..1fd66abe5740 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -886,9 +886,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark, { return sys_fanotify_mark(fanotify_fd, flags, #ifdef __BIG_ENDIAN - ((__u64)mask1 << 32) | mask0, -#else ((__u64)mask0 << 32) | mask1, +#else + ((__u64)mask1 << 32) | mask0, #endif dfd, pathname); } diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 4f791f6d27d0..41513a4e98e4 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -948,7 +948,7 @@ leave: ocfs2_free_dir_lookup_result(&orphan_insert); ocfs2_free_dir_lookup_result(&lookup); - if (status && (status != -ENOTEMPTY)) + if (status && (status != -ENOTEMPTY) && (status != -ENOENT)) mlog_errno(status); return status; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 2fae55def608..b388223bd4a9 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -175,6 +175,27 @@ static inline void * __init memblock_virt_alloc_nopanic( NUMA_NO_NODE); } +#ifndef ARCH_LOW_ADDRESS_LIMIT +#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL +#endif + +static inline void * __init memblock_virt_alloc_low( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid(size, align, + BOOTMEM_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} +static inline void * __init memblock_virt_alloc_low_nopanic( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, + BOOTMEM_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} + static inline void * __init memblock_virt_alloc_from_nopanic( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) { @@ -238,6 +259,22 @@ static inline void * __init memblock_virt_alloc_nopanic( return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); } +static inline void * __init memblock_virt_alloc_low( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_low(size, align, BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_low_nopanic( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_low_nopanic(size, align, BOOTMEM_LOW_LIMIT); +} + static inline void * __init memblock_virt_alloc_from_nopanic( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) { diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index db43b58a3355..0053adde0ed9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -360,7 +360,7 @@ enum /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. */ -extern char *softirq_to_name[NR_SOFTIRQS]; +extern const char * const softirq_to_name[NR_SOFTIRQS]; /* softirq mask and active fields moved to irq_cpustat_t in * asm/hardirq.h to get better cache usage. KAO diff --git a/include/linux/ipc.h b/include/linux/ipc.h index 8d861b2651f7..9d84942ae2e5 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -11,7 +11,7 @@ struct kern_ipc_perm { spinlock_t lock; - int deleted; + bool deleted; int id; key_t key; kuid_t uid; diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index f6c82de12541..e7831d203737 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -21,7 +21,6 @@ struct user_namespace; struct ipc_ids { int in_use; unsigned short seq; - unsigned short seq_max; struct rw_semaphore rwsem; struct idr ipcs_idr; int next_id; diff --git a/include/linux/msg.h b/include/linux/msg.h index e21f9d44307f..f3f302f9c197 100644 --- a/include/linux/msg.h +++ b/include/linux/msg.h @@ -9,7 +9,7 @@ struct msg_msg { struct list_head m_list; long m_type; size_t m_ts; /* message text size */ - struct msg_msgseg* next; + struct msg_msgseg *next; void *security; /* the actual message follows immediately */ }; diff --git a/include/linux/shm.h b/include/linux/shm.h index 429c1995d756..1e2cd2e6b540 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h @@ -9,7 +9,7 @@ struct shmid_kernel /* private to the kernel */ { struct kern_ipc_perm shm_perm; - struct file * shm_file; + struct file *shm_file; unsigned long shm_nattch; unsigned long shm_segsz; time_t shm_atim; diff --git a/include/linux/splice.h b/include/linux/splice.h index 74575cbf2d6f..0e43906d2fda 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -24,7 +24,8 @@ * Passed to the actors */ struct splice_desc { - unsigned int len, total_len; /* current and remaining length */ + size_t total_len; /* remaining length */ + unsigned int len; /* current length */ unsigned int flags; /* splice flags */ /* * actor() private data diff --git a/init/main.c b/init/main.c index 3f1f4e4b61f7..2fd9cef70ee8 100644 --- a/init/main.c +++ b/init/main.c @@ -92,8 +92,6 @@ static int kernel_init(void *); extern void init_IRQ(void); extern void fork_init(unsigned long); -extern void mca_init(void); -extern void sbus_init(void); extern void radix_tree_init(void); #ifndef CONFIG_DEBUG_RODATA static inline void mark_rodata_ro(void) { } diff --git a/ipc/compat.c b/ipc/compat.c index 892f6585dd60..f486b0096a67 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -197,7 +197,7 @@ static inline int __put_compat_ipc_perm(struct ipc64_perm *p, static inline int get_compat_semid64_ds(struct semid64_ds *s64, struct compat_semid64_ds __user *up64) { - if (!access_ok (VERIFY_READ, up64, sizeof(*up64))) + if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) return -EFAULT; return __get_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); } @@ -205,7 +205,7 @@ static inline int get_compat_semid64_ds(struct semid64_ds *s64, static inline int get_compat_semid_ds(struct semid64_ds *s, struct compat_semid_ds __user *up) { - if (!access_ok (VERIFY_READ, up, sizeof(*up))) + if (!access_ok(VERIFY_READ, up, sizeof(*up))) return -EFAULT; return __get_compat_ipc_perm(&s->sem_perm, &up->sem_perm); } @@ -215,7 +215,7 @@ static inline int put_compat_semid64_ds(struct semid64_ds *s64, { int err; - if (!access_ok (VERIFY_WRITE, up64, sizeof(*up64))) + if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) return -EFAULT; err = __put_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); err |= __put_user(s64->sem_otime, &up64->sem_otime); @@ -229,7 +229,7 @@ static inline int put_compat_semid_ds(struct semid64_ds *s, { int err; - if (!access_ok (VERIFY_WRITE, up, sizeof(*up))) + if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) return -EFAULT; err = __put_compat_ipc_perm(&s->sem_perm, &up->sem_perm); err |= __put_user(s->sem_otime, &up->sem_otime); @@ -288,11 +288,11 @@ static long do_compat_semctl(int first, int second, int third, u32 pad) break; case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_semid64_ds(&s64, compat_ptr(pad)); - } else { + else err = get_compat_semid_ds(&s64, compat_ptr(pad)); - } + up64 = compat_alloc_user_space(sizeof(s64)); if (copy_to_user(up64, &s64, sizeof(s64))) err = -EFAULT; @@ -376,12 +376,12 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second, struct compat_ipc_kludge ipck; if (!uptr) return -EINVAL; - if (copy_from_user (&ipck, uptr, sizeof(ipck))) + if (copy_from_user(&ipck, uptr, sizeof(ipck))) return -EFAULT; uptr = compat_ptr(ipck.msgp); fifth = ipck.msgtyp; } - return do_msgrcv(first, uptr, second, fifth, third, + return do_msgrcv(first, uptr, second, (s32)fifth, third, compat_do_msg_fill); } case MSGGET: @@ -515,11 +515,11 @@ long compat_sys_msgctl(int first, int second, void __user *uptr) break; case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_msqid64(&m64, uptr); - } else { + else err = get_compat_msqid(&m64, uptr); - } + if (err) break; p = compat_alloc_user_space(sizeof(m64)); @@ -702,11 +702,11 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_shmid64_ds(&s64, uptr); - } else { + else err = get_compat_shmid_ds(&s64, uptr); - } + if (err) break; p = compat_alloc_user_space(sizeof(s64)); diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c index 380ea4fe08e7..63d7c6de335b 100644 --- a/ipc/compat_mq.c +++ b/ipc/compat_mq.c @@ -64,7 +64,7 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name, return sys_mq_open(u_name, oflag, mode, p); } -static int compat_prepare_timeout(struct timespec __user * *p, +static int compat_prepare_timeout(struct timespec __user **p, const struct compat_timespec __user *u) { struct timespec ts; diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index b0e99deb6d05..17028648cfeb 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -164,21 +164,21 @@ static struct ctl_table ipc_kern_table[] = { { .procname = "shmmax", .data = &init_ipc_ns.shm_ctlmax, - .maxlen = sizeof (init_ipc_ns.shm_ctlmax), + .maxlen = sizeof(init_ipc_ns.shm_ctlmax), .mode = 0644, .proc_handler = proc_ipc_doulongvec_minmax, }, { .procname = "shmall", .data = &init_ipc_ns.shm_ctlall, - .maxlen = sizeof (init_ipc_ns.shm_ctlall), + .maxlen = sizeof(init_ipc_ns.shm_ctlall), .mode = 0644, .proc_handler = proc_ipc_doulongvec_minmax, }, { .procname = "shmmni", .data = &init_ipc_ns.shm_ctlmni, - .maxlen = sizeof (init_ipc_ns.shm_ctlmni), + .maxlen = sizeof(init_ipc_ns.shm_ctlmni), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, @@ -194,7 +194,7 @@ static struct ctl_table ipc_kern_table[] = { { .procname = "msgmax", .data = &init_ipc_ns.msg_ctlmax, - .maxlen = sizeof (init_ipc_ns.msg_ctlmax), + .maxlen = sizeof(init_ipc_ns.msg_ctlmax), .mode = 0644, .proc_handler = proc_ipc_dointvec_minmax, .extra1 = &zero, @@ -203,7 +203,7 @@ static struct ctl_table ipc_kern_table[] = { { .procname = "msgmni", .data = &init_ipc_ns.msg_ctlmni, - .maxlen = sizeof (init_ipc_ns.msg_ctlmni), + .maxlen = sizeof(init_ipc_ns.msg_ctlmni), .mode = 0644, .proc_handler = proc_ipc_callback_dointvec_minmax, .extra1 = &zero, @@ -212,7 +212,7 @@ static struct ctl_table ipc_kern_table[] = { { .procname = "msgmnb", .data = &init_ipc_ns.msg_ctlmnb, - .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), + .maxlen = sizeof(init_ipc_ns.msg_ctlmnb), .mode = 0644, .proc_handler = proc_ipc_dointvec_minmax, .extra1 = &zero, @@ -221,7 +221,7 @@ static struct ctl_table ipc_kern_table[] = { { .procname = "sem", .data = &init_ipc_ns.sem_ctls, - .maxlen = 4*sizeof (int), + .maxlen = 4*sizeof(int), .mode = 0644, .proc_handler = proc_ipc_dointvec, }, diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 95827ce2f3c7..ccf1f9fd263a 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -6,7 +6,7 @@ * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: - * Manfred Spraul (manfred@colorfullife.com) + * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * @@ -73,7 +73,7 @@ struct mqueue_inode_info { struct mq_attr attr; struct sigevent notify; - struct pid* notify_owner; + struct pid *notify_owner; struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; @@ -92,7 +92,7 @@ static void remove_notification(struct mqueue_inode_info *info); static struct kmem_cache *mqueue_inode_cachep; -static struct ctl_table_header * mq_sysctl_table; +static struct ctl_table_header *mq_sysctl_table; static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { @@ -466,13 +466,13 @@ out_unlock: static int mqueue_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = dentry->d_inode; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; dir->i_size -= DIRENT_SIZE; - drop_nlink(inode); - dput(dentry); - return 0; + drop_nlink(inode); + dput(dentry); + return 0; } /* @@ -622,7 +622,7 @@ static struct ext_wait_queue *wq_get_first_waiter( static inline void set_cookie(struct sk_buff *skb, char code) { - ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; + ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* @@ -1303,11 +1303,11 @@ retry: out_fput: fdput(f); out: - if (sock) { + if (sock) netlink_detachskb(sock, nc); - } else if (nc) { + else if (nc) dev_kfree_skb(nc); - } + return ret; } diff --git a/ipc/msg.c b/ipc/msg.c index 558aa91186b6..245db1140ad6 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -253,8 +253,14 @@ static void expunge_all(struct msg_queue *msq, int res) struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { - msr->r_msg = NULL; + msr->r_msg = NULL; /* initialize expunge ordering */ wake_up_process(msr->r_tsk); + /* + * Ensure that the wakeup is visible before setting r_msg as + * the receiving end depends on it: either spinning on a nil, + * or dealing with -EAGAIN cases. See lockless receive part 1 + * and 2 in do_msgrcv(). + */ smp_mb(); msr->r_msg = ERR_PTR(res); } @@ -318,7 +324,7 @@ SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: @@ -363,7 +369,7 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) static inline unsigned long copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; @@ -375,9 +381,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->msg_perm.uid = tbuf_old.msg_perm.uid; - out->msg_perm.gid = tbuf_old.msg_perm.gid; - out->msg_perm.mode = tbuf_old.msg_perm.mode; + out->msg_perm.uid = tbuf_old.msg_perm.uid; + out->msg_perm.gid = tbuf_old.msg_perm.gid; + out->msg_perm.mode = tbuf_old.msg_perm.mode; if (tbuf_old.msg_qbytes == 0) out->msg_qbytes = tbuf_old.msg_lqbytes; @@ -606,13 +612,13 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) static int testmsg(struct msg_msg *msg, long type, int mode) { - switch(mode) + switch (mode) { case SEARCH_ANY: case SEARCH_NUMBER: return 1; case SEARCH_LESSEQUAL: - if (msg->m_type <=type) + if (msg->m_type <= type) return 1; break; case SEARCH_EQUAL: @@ -638,15 +644,22 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { + /* initialize pipelined send ordering */ msr->r_msg = NULL; wake_up_process(msr->r_tsk); - smp_mb(); + smp_mb(); /* see barrier comment below */ msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_rtime = get_seconds(); wake_up_process(msr->r_tsk); + /* + * Ensure that the wakeup is visible before + * setting r_msg, as the receiving end depends + * on it. See lockless receive part 1 and 2 in + * do_msgrcv(). + */ smp_mb(); msr->r_msg = msg; @@ -654,6 +667,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) } } } + return 0; } @@ -696,7 +710,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, goto out_unlock0; /* raced with RMID? */ - if (msq->q_perm.deleted) { + if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } @@ -716,6 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, goto out_unlock0; } + /* enqueue the sender and prepare to block */ ss_add(msq, &s); if (!ipc_rcu_getref(msq)) { @@ -731,7 +746,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, ipc_lock_object(&msq->q_perm); ipc_rcu_putref(msq, ipc_rcu_free); - if (msq->q_perm.deleted) { + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } @@ -909,7 +925,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl ipc_lock_object(&msq->q_perm); /* raced with RMID? */ - if (msq->q_perm.deleted) { + if (!ipc_valid_object(&msq->q_perm)) { msg = ERR_PTR(-EIDRM); goto out_unlock0; } @@ -983,7 +999,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ - msg = (struct msg_msg*)msr_d.r_msg; + msg = (struct msg_msg *)msr_d.r_msg; while (msg == NULL) { cpu_relax(); msg = (struct msg_msg *)msr_d.r_msg; @@ -1004,7 +1020,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ - msg = (struct msg_msg*)msr_d.r_msg; + msg = (struct msg_msg *)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock0; diff --git a/ipc/sem.c b/ipc/sem.c index db9d241af133..bee555417312 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -188,7 +188,7 @@ void sem_exit_ns(struct ipc_namespace *ns) } #endif -void __init sem_init (void) +void __init sem_init(void) { sem_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/sem", @@ -225,7 +225,7 @@ static void unmerge_queues(struct sem_array *sma) } /** - * merge_queues - Merge single semop queues into global queue + * merge_queues - merge single semop queues into global queue * @sma: semaphore array * * This function merges all per-semaphore queues into the global queue. @@ -394,7 +394,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, /* ipc_rmid() may have already freed the ID while sem_lock * was spinning: verify that the structure is still valid */ - if (!ipcp->deleted) + if (ipc_valid_object(ipcp)) return container_of(ipcp, struct sem_array, sem_perm); sem_unlock(sma, *locknum); @@ -445,11 +445,11 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) * * call wake_up_process * * set queue.status to the final value. * - the previously blocked thread checks queue.status: - * * if it's IN_WAKEUP, then it must wait until the value changes - * * if it's not -EINTR, then the operation was completed by - * update_queue. semtimedop can return queue.status without - * performing any operation on the sem array. - * * otherwise it must acquire the spinlock and check what's up. + * * if it's IN_WAKEUP, then it must wait until the value changes + * * if it's not -EINTR, then the operation was completed by + * update_queue. semtimedop can return queue.status without + * performing any operation on the sem array. + * * otherwise it must acquire the spinlock and check what's up. * * The two-stage algorithm is necessary to protect against the following * races: @@ -474,7 +474,6 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) * * Called with sem_ids.rwsem held (as a writer) */ - static int newary(struct ipc_namespace *ns, struct ipc_params *params) { int id; @@ -491,12 +490,12 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; - size = sizeof (*sma) + nsems * sizeof (struct sem); + size = sizeof(*sma) + nsems * sizeof(struct sem); sma = ipc_rcu_alloc(size); - if (!sma) { + if (!sma) return -ENOMEM; - } - memset (sma, 0, size); + + memset(sma, 0, size); sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; @@ -584,10 +583,11 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); } -/** perform_atomic_semop - Perform (if possible) a semaphore operation +/** + * perform_atomic_semop - Perform (if possible) a semaphore operation * @sma: semaphore array * @sops: array with operations that should be checked - * @nsems: number of sops + * @nsops: number of operations * @un: undo array * @pid: pid that did the change * @@ -595,19 +595,18 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) * Returns 1 if the operation is impossible, the caller must sleep. * Negative values are error codes. */ - static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, int nsops, struct sem_undo *un, int pid) { int result, sem_op; struct sembuf *sop; - struct sem * curr; + struct sem *curr; for (sop = sops; sop < sops + nsops; sop++) { curr = sma->sem_base + sop->sem_num; sem_op = sop->sem_op; result = curr->semval; - + if (!sem_op && result) goto would_block; @@ -616,25 +615,24 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, goto would_block; if (result > SEMVMX) goto out_of_range; + if (sop->sem_flg & SEM_UNDO) { int undo = un->semadj[sop->sem_num] - sem_op; - /* - * Exceeding the undo range is an error. - */ + /* Exceeding the undo range is an error. */ if (undo < (-SEMAEM - 1) || undo > SEMAEM) goto out_of_range; + un->semadj[sop->sem_num] = undo; } + curr->semval = result; } sop--; while (sop >= sops) { sma->sem_base[sop->sem_num].sempid = pid; - if (sop->sem_flg & SEM_UNDO) - un->semadj[sop->sem_num] -= sop->sem_op; sop--; } - + return 0; out_of_range: @@ -650,7 +648,10 @@ would_block: undo: sop--; while (sop >= sops) { - sma->sem_base[sop->sem_num].semval -= sop->sem_op; + sem_op = sop->sem_op; + sma->sem_base[sop->sem_num].semval -= sem_op; + if (sop->sem_flg & SEM_UNDO) + un->semadj[sop->sem_num] += sem_op; sop--; } @@ -680,7 +681,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, } /** - * wake_up_sem_queue_do(pt) - do the actual wake-up + * wake_up_sem_queue_do - do the actual wake-up * @pt: list of tasks to be woken up * * Do the actual wake-up. @@ -746,7 +747,7 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) } /** - * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks + * wake_const_ops - wake up non-alter tasks * @sma: semaphore array. * @semnum: semaphore that was modified. * @pt: list head for the tasks that must be woken up. @@ -796,15 +797,14 @@ static int wake_const_ops(struct sem_array *sma, int semnum, } /** - * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks + * do_smart_wakeup_zero - wakeup all wait for zero tasks * @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations * @pt: list head of the tasks that must be woken up. * - * do_smart_wakeup_zero() checks all required queue for wait-for-zero - * operations, based on the actual changes that were performed on the - * semaphore array. + * Checks all required queue for wait-for-zero operations, based + * on the actual changes that were performed on the semaphore array. * The function returns 1 if at least one operation was completed successfully. */ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, @@ -848,7 +848,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, /** - * update_queue(sma, semnum): Look for tasks that can be completed. + * update_queue - look for tasks that can be completed. * @sma: semaphore array. * @semnum: semaphore that was modified. * @pt: list head for the tasks that must be woken up. @@ -918,7 +918,7 @@ again: } /** - * set_semotime(sma, sops) - set sem_otime + * set_semotime - set sem_otime * @sma: semaphore array * @sops: operations that modified the array, may be NULL * @@ -936,7 +936,7 @@ static void set_semotime(struct sem_array *sma, struct sembuf *sops) } /** - * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue + * do_smart_update - optimized update_queue * @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations @@ -998,21 +998,21 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop * The counts we return here are a rough approximation, but still * warrant that semncnt+semzcnt>0 if the task is on the pending queue. */ -static int count_semncnt (struct sem_array * sma, ushort semnum) +static int count_semncnt(struct sem_array *sma, ushort semnum) { int semncnt; - struct sem_queue * q; + struct sem_queue *q; semncnt = 0; list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { - struct sembuf * sops = q->sops; + struct sembuf *sops = q->sops; BUG_ON(sops->sem_num != semnum); if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) semncnt++; } list_for_each_entry(q, &sma->pending_alter, list) { - struct sembuf * sops = q->sops; + struct sembuf *sops = q->sops; int nsops = q->nsops; int i; for (i = 0; i < nsops; i++) @@ -1024,21 +1024,21 @@ static int count_semncnt (struct sem_array * sma, ushort semnum) return semncnt; } -static int count_semzcnt (struct sem_array * sma, ushort semnum) +static int count_semzcnt(struct sem_array *sma, ushort semnum) { int semzcnt; - struct sem_queue * q; + struct sem_queue *q; semzcnt = 0; list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { - struct sembuf * sops = q->sops; + struct sembuf *sops = q->sops; BUG_ON(sops->sem_num != semnum); if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) semzcnt++; } list_for_each_entry(q, &sma->pending_const, list) { - struct sembuf * sops = q->sops; + struct sembuf *sops = q->sops; int nsops = q->nsops; int i; for (i = 0; i < nsops; i++) @@ -1108,7 +1108,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: @@ -1151,7 +1151,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int err; struct sem_array *sma; - switch(cmd) { + switch (cmd) { case IPC_INFO: case SEM_INFO: { @@ -1162,7 +1162,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, if (err) return err; - memset(&seminfo,0,sizeof(seminfo)); + memset(&seminfo, 0, sizeof(seminfo)); seminfo.semmni = ns->sc_semmni; seminfo.semmns = ns->sc_semmns; seminfo.semmsl = ns->sc_semmsl; @@ -1183,7 +1183,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, up_read(&sem_ids(ns).rwsem); if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) return -EFAULT; - return (max_id < 0) ? 0: max_id; + return (max_id < 0) ? 0 : max_id; } case IPC_STAT: case SEM_STAT: @@ -1239,7 +1239,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, { struct sem_undo *un; struct sem_array *sma; - struct sem* curr; + struct sem *curr; int err; struct list_head tasks; int val; @@ -1282,7 +1282,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, sem_lock(sma, NULL, -1); - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { sem_unlock(sma, -1); rcu_read_unlock(); return -EIDRM; @@ -1309,10 +1309,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int cmd, void __user *p) { struct sem_array *sma; - struct sem* curr; + struct sem *curr; int err, nsems; ushort fast_sem_io[SEMMSL_FAST]; - ushort* sem_io = fast_sem_io; + ushort *sem_io = fast_sem_io; struct list_head tasks; INIT_LIST_HEAD(&tasks); @@ -1342,11 +1342,11 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; sem_lock(sma, NULL, -1); - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; goto out_unlock; } - if(nsems > SEMMSL_FAST) { + if (nsems > SEMMSL_FAST) { if (!ipc_rcu_getref(sma)) { err = -EIDRM; goto out_unlock; @@ -1354,14 +1354,14 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, sem_unlock(sma, -1); rcu_read_unlock(); sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { + if (sem_io == NULL) { ipc_rcu_putref(sma, ipc_rcu_free); return -ENOMEM; } rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; goto out_unlock; } @@ -1371,7 +1371,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, sem_unlock(sma, -1); rcu_read_unlock(); err = 0; - if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) + if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) err = -EFAULT; goto out_free; } @@ -1386,15 +1386,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, } rcu_read_unlock(); - if(nsems > SEMMSL_FAST) { + if (nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { + if (sem_io == NULL) { ipc_rcu_putref(sma, ipc_rcu_free); return -ENOMEM; } } - if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { + if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { ipc_rcu_putref(sma, ipc_rcu_free); err = -EFAULT; goto out_free; @@ -1409,7 +1409,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, } rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; goto out_unlock; } @@ -1435,7 +1435,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, goto out_rcu_wakeup; sem_lock(sma, NULL, -1); - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; goto out_unlock; } @@ -1449,10 +1449,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, err = curr->sempid; goto out_unlock; case GETNCNT: - err = count_semncnt(sma,semnum); + err = count_semncnt(sma, semnum); goto out_unlock; case GETZCNT: - err = count_semzcnt(sma,semnum); + err = count_semzcnt(sma, semnum); goto out_unlock; } @@ -1462,7 +1462,7 @@ out_rcu_wakeup: rcu_read_unlock(); wake_up_sem_queue_do(&tasks); out_free: - if(sem_io != fast_sem_io) + if (sem_io != fast_sem_io) ipc_free(sem_io, sizeof(ushort)*nsems); return err; } @@ -1470,7 +1470,7 @@ out_free: static inline unsigned long copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; @@ -1479,7 +1479,7 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { struct semid_ds tbuf_old; - if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) + if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->sem_perm.uid = tbuf_old.sem_perm.uid; @@ -1506,7 +1506,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, struct semid64_ds semid64; struct kern_ipc_perm *ipcp; - if(cmd == IPC_SET) { + if (cmd == IPC_SET) { if (copy_semid_from_user(&semid64, p, version)) return -EFAULT; } @@ -1566,7 +1566,7 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; - switch(cmd) { + switch (cmd) { case IPC_INFO: case SEM_INFO: case IPC_STAT: @@ -1634,7 +1634,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; - assert_spin_locked(&ulp->lock); + assert_spin_locked(&ulp->lock); un = __lookup_undo(ulp, semid); if (un) { @@ -1645,7 +1645,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) } /** - * find_alloc_undo - Lookup (and if not present create) undo array + * find_alloc_undo - lookup (and if not present create) undo array * @ns: namespace * @semid: semaphore array id * @@ -1670,7 +1670,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); spin_unlock(&ulp->lock); - if (likely(un!=NULL)) + if (likely(un != NULL)) goto out; /* no undo structure around - allocate one. */ @@ -1699,7 +1699,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) /* step 3: Acquire the lock on semaphore array */ rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { sem_unlock(sma, -1); rcu_read_unlock(); kfree(new); @@ -1735,7 +1735,7 @@ out: /** - * get_queue_result - Retrieve the result code from sem_queue + * get_queue_result - retrieve the result code from sem_queue * @q: Pointer to queue structure * * Retrieve the return code from the pending queue. If IN_WAKEUP is found in @@ -1765,7 +1765,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; - struct sembuf* sops = fast_sops, *sop; + struct sembuf *sops = fast_sops, *sop; struct sem_undo *un; int undos = 0, alter = 0, max, locknum; struct sem_queue queue; @@ -1779,13 +1779,13 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, return -EINVAL; if (nsops > ns->sc_semopm) return -E2BIG; - if(nsops > SEMOPM_FAST) { - sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); - if(sops==NULL) + if (nsops > SEMOPM_FAST) { + sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL); + if (sops == NULL) return -ENOMEM; } - if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { - error=-EFAULT; + if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { + error = -EFAULT; goto out_free; } if (timeout) { @@ -1846,7 +1846,15 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, error = -EIDRM; locknum = sem_lock(sma, sops, nsops); - if (sma->sem_perm.deleted) + /* + * We eventually might perform the following check in a lockless + * fashion, considering ipc_valid_object() locking constraints. + * If nsops == 1 and there is no contention for sem_perm.lock, then + * only a per-semaphore lock is held and it's OK to proceed with the + * check below. More details on the fine grained locking scheme + * entangled here and why it's RMID race safe on comments at sem_lock() + */ + if (!ipc_valid_object(&sma->sem_perm)) goto out_unlock_free; /* * semid identifiers are not unique - find_alloc_undo may have @@ -1959,10 +1967,8 @@ sleep_again: * If queue.status != -EINTR we are woken up by another process. * Leave without unlink_queue(), but with sem_unlock(). */ - - if (error != -EINTR) { + if (error != -EINTR) goto out_unlock_free; - } /* * If an interrupt occurred we have to clean up the queue @@ -1984,7 +1990,7 @@ out_rcu_wakeup: rcu_read_unlock(); wake_up_sem_queue_do(&tasks); out_free: - if(sops != fast_sops) + if (sops != fast_sops) kfree(sops); return error; } @@ -2068,7 +2074,7 @@ void exit_sem(struct task_struct *tsk) sem_lock(sma, NULL, -1); /* exit_sem raced with IPC_RMID, nothing to do */ - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { sem_unlock(sma, -1); rcu_read_unlock(); continue; @@ -2093,7 +2099,7 @@ void exit_sem(struct task_struct *tsk) /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { - struct sem * semaphore = &sma->sem_base[i]; + struct sem *semaphore = &sma->sem_base[i]; if (un->semadj[i]) { semaphore->semval += un->semadj[i]; /* @@ -2107,7 +2113,7 @@ void exit_sem(struct task_struct *tsk) * Linux caps the semaphore value, both at 0 * and at SEMVMX. * - * Manfred <manfred@colorfullife.com> + * Manfred <manfred@colorfullife.com> */ if (semaphore->semval < 0) semaphore->semval = 0; diff --git a/ipc/shm.c b/ipc/shm.c index 7a51443a51d6..76459616a7fa 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -67,7 +67,7 @@ static const struct vm_operations_struct shm_vm_ops; static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); -static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); +static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif @@ -91,7 +91,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); - if (shp->shm_nattch){ + if (shp->shm_nattch) { shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; @@ -116,7 +116,7 @@ static int __init ipc_ns_init(void) pure_initcall(ipc_ns_init); -void __init shm_init (void) +void __init shm_init(void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 @@ -248,7 +248,7 @@ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) */ static void shm_close(struct vm_area_struct *vma) { - struct file * file = vma->vm_file; + struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; @@ -379,7 +379,7 @@ static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, } #endif -static int shm_mmap(struct file * file, struct vm_area_struct * vma) +static int shm_mmap(struct file *file, struct vm_area_struct *vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; @@ -477,7 +477,6 @@ static const struct vm_operations_struct shm_vm_ops = { * * Called with shm_ids.rwsem held as a writer. */ - static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; @@ -486,7 +485,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) int error; struct shmid_kernel *shp; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - struct file * file; + struct file *file; char name[13]; int id; vm_flags_t acctflag = 0; @@ -512,7 +511,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) return error; } - sprintf (name, "SYSV%08x", key); + sprintf(name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { struct hstate *hs; size_t hugesize; @@ -533,7 +532,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even - * if it's asked for. + * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) @@ -628,7 +627,7 @@ SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: @@ -655,7 +654,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; @@ -680,14 +679,14 @@ copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; - if(in->shmmax > INT_MAX) + if (in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; @@ -846,14 +845,14 @@ static int shmctl_nolock(struct ipc_namespace *ns, int shmid, shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; - if(copy_shminfo_to_user (buf, &shminfo, version)) + if (copy_shminfo_to_user(buf, &shminfo, version)) return -EFAULT; down_read(&shm_ids(ns).rwsem); err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); - if(err<0) + if (err < 0) err = 0; goto out; } @@ -864,7 +863,7 @@ static int shmctl_nolock(struct ipc_namespace *ns, int shmid, memset(&shm_info, 0, sizeof(shm_info)); down_read(&shm_ids(ns).rwsem); shm_info.used_ids = shm_ids(ns).in_use; - shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); + shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; @@ -975,6 +974,13 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) goto out_unlock1; ipc_lock_object(&shp->shm_perm); + + /* check if shm_destroy() is tearing down shp */ + if (!ipc_valid_object(&shp->shm_perm)) { + err = -EIDRM; + goto out_unlock0; + } + if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); if (!uid_eq(euid, shp->shm_perm.uid) && @@ -989,13 +995,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) } shm_file = shp->shm_file; - - /* check if shm_destroy() is tearing down shp */ - if (shm_file == NULL) { - err = -EIDRM; - goto out_unlock0; - } - if (is_file_hugepages(shm_file)) goto out_unlock0; @@ -1047,7 +1046,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, struct shmid_kernel *shp; unsigned long addr; unsigned long size; - struct file * file; + struct file *file; int err; unsigned long flags; unsigned long prot; @@ -1116,7 +1115,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, ipc_lock_object(&shp->shm_perm); /* check if shm_destroy() is tearing down shp */ - if (shp->shm_file == NULL) { + if (!ipc_valid_object(&shp->shm_perm)) { ipc_unlock_object(&shp->shm_perm); err = -EIDRM; goto out_unlock; diff --git a/ipc/util.c b/ipc/util.c index 3ae17a4ace5b..e1b4c6db8aa0 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -110,15 +110,15 @@ static struct notifier_block ipc_memory_nb = { }; /** - * ipc_init - initialise IPC subsystem + * ipc_init - initialise ipc subsystem * - * The various system5 IPC resources (semaphores, messages and shared - * memory) are initialised - * A callback routine is registered into the memory hotplug notifier - * chain: since msgmni scales to lowmem this callback routine will be - * called upon successful memory add / remove to recompute msmgni. + * The various sysv ipc resources (semaphores, messages and shared + * memory) are initialised. + * + * A callback routine is registered into the memory hotplug notifier + * chain: since msgmni scales to lowmem this callback routine will be + * called upon successful memory add / remove to recompute msmgni. */ - static int __init ipc_init(void) { sem_init(); @@ -131,39 +131,29 @@ static int __init ipc_init(void) __initcall(ipc_init); /** - * ipc_init_ids - initialise IPC identifiers - * @ids: Identifier set + * ipc_init_ids - initialise ipc identifiers + * @ids: ipc identifier set * - * Set up the sequence range to use for the ipc identifier range (limited - * below IPCMNI) then initialise the ids idr. + * Set up the sequence range to use for the ipc identifier range (limited + * below IPCMNI) then initialise the ids idr. */ - void ipc_init_ids(struct ipc_ids *ids) { - init_rwsem(&ids->rwsem); - ids->in_use = 0; ids->seq = 0; ids->next_id = -1; - { - int seq_limit = INT_MAX/SEQ_MULTIPLIER; - if (seq_limit > USHRT_MAX) - ids->seq_max = USHRT_MAX; - else - ids->seq_max = seq_limit; - } - + init_rwsem(&ids->rwsem); idr_init(&ids->ipcs_idr); } #ifdef CONFIG_PROC_FS static const struct file_operations sysvipc_proc_fops; /** - * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface. - * @path: Path in procfs - * @header: Banner to be printed at the beginning of the file. - * @ids: ipc id table to iterate. - * @show: show routine. + * ipc_init_proc_interface - create a proc interface for sysipc types using a seq_file interface. + * @path: Path in procfs + * @header: Banner to be printed at the beginning of the file. + * @ids: ipc id table to iterate. + * @show: show routine. */ void __init ipc_init_proc_interface(const char *path, const char *header, int ids, int (*show)(struct seq_file *, void *)) @@ -184,23 +174,21 @@ void __init ipc_init_proc_interface(const char *path, const char *header, NULL, /* parent dir */ &sysvipc_proc_fops, iface); - if (!pde) { + if (!pde) kfree(iface); - } } #endif /** - * ipc_findkey - find a key in an ipc identifier set - * @ids: Identifier set - * @key: The key to find + * ipc_findkey - find a key in an ipc identifier set + * @ids: ipc identifier set + * @key: key to find * - * Requires ipc_ids.rwsem locked. - * Returns the LOCKED pointer to the ipc structure if found or NULL - * if not. - * If key is found ipc points to the owning ipc structure + * Returns the locked pointer to the ipc structure if found or NULL + * otherwise. If key is found ipc points to the owning ipc structure + * + * Called with ipc_ids.rwsem held. */ - static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) { struct kern_ipc_perm *ipc; @@ -227,12 +215,11 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) } /** - * ipc_get_maxid - get the last assigned id - * @ids: IPC identifier set + * ipc_get_maxid - get the last assigned id + * @ids: ipc identifier set * - * Called with ipc_ids.rwsem held. + * Called with ipc_ids.rwsem held. */ - int ipc_get_maxid(struct ipc_ids *ids) { struct kern_ipc_perm *ipc; @@ -258,19 +245,19 @@ int ipc_get_maxid(struct ipc_ids *ids) } /** - * ipc_addid - add an IPC identifier - * @ids: IPC identifier set - * @new: new IPC permission set - * @size: limit for the number of used ids + * ipc_addid - add an ipc identifier + * @ids: ipc identifier set + * @new: new ipc permission set + * @size: limit for the number of used ids * - * Add an entry 'new' to the IPC ids idr. The permissions object is - * initialised and the first free entry is set up and the id assigned - * is returned. The 'new' entry is returned in a locked state on success. - * On failure the entry is not locked and a negative err-code is returned. + * Add an entry 'new' to the ipc ids idr. The permissions object is + * initialised and the first free entry is set up and the id assigned + * is returned. The 'new' entry is returned in a locked state on success. + * On failure the entry is not locked and a negative err-code is returned. * - * Called with writer ipc_ids.rwsem held. + * Called with writer ipc_ids.rwsem held. */ -int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) +int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) { kuid_t euid; kgid_t egid; @@ -286,7 +273,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) idr_preload(GFP_KERNEL); spin_lock_init(&new->lock); - new->deleted = 0; + new->deleted = false; rcu_read_lock(); spin_lock(&new->lock); @@ -308,7 +295,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) if (next_id < 0) { new->seq = ids->seq++; - if (ids->seq > ids->seq_max) + if (ids->seq > IPCID_SEQ_MAX) ids->seq = 0; } else { new->seq = ipcid_to_seqx(next_id); @@ -320,14 +307,14 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) } /** - * ipcget_new - create a new ipc object - * @ns: namespace - * @ids: IPC identifer set - * @ops: the actual creation routine to call - * @params: its parameters - * - * This routine is called by sys_msgget, sys_semget() and sys_shmget() - * when the key is IPC_PRIVATE. + * ipcget_new - create a new ipc object + * @ns: ipc namespace + * @ids: ipc identifer set + * @ops: the actual creation routine to call + * @params: its parameters + * + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is IPC_PRIVATE. */ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, struct ipc_ops *ops, struct ipc_params *params) @@ -341,19 +328,19 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, } /** - * ipc_check_perms - check security and permissions for an IPC - * @ns: IPC namespace - * @ipcp: ipc permission set - * @ops: the actual security routine to call - * @params: its parameters + * ipc_check_perms - check security and permissions for an ipc object + * @ns: ipc namespace + * @ipcp: ipc permission set + * @ops: the actual security routine to call + * @params: its parameters * - * This routine is called by sys_msgget(), sys_semget() and sys_shmget() - * when the key is not IPC_PRIVATE and that key already exists in the - * ids IDR. + * This routine is called by sys_msgget(), sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE and that key already exists in the + * ds IDR. * - * On success, the IPC id is returned. + * On success, the ipc id is returned. * - * It is called with ipc_ids.rwsem and ipcp->lock held. + * It is called with ipc_ids.rwsem and ipcp->lock held. */ static int ipc_check_perms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, @@ -374,18 +361,18 @@ static int ipc_check_perms(struct ipc_namespace *ns, } /** - * ipcget_public - get an ipc object or create a new one - * @ns: namespace - * @ids: IPC identifer set - * @ops: the actual creation routine to call - * @params: its parameters - * - * This routine is called by sys_msgget, sys_semget() and sys_shmget() - * when the key is not IPC_PRIVATE. - * It adds a new entry if the key is not found and does some permission - * / security checkings if the key is found. - * - * On success, the ipc id is returned. + * ipcget_public - get an ipc object or create a new one + * @ns: ipc namespace + * @ids: ipc identifer set + * @ops: the actual creation routine to call + * @params: its parameters + * + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE. + * It adds a new entry if the key is not found and does some permission + * / security checkings if the key is found. + * + * On success, the ipc id is returned. */ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, struct ipc_ops *ops, struct ipc_params *params) @@ -431,39 +418,33 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, /** - * ipc_rmid - remove an IPC identifier - * @ids: IPC identifier set - * @ipcp: ipc perm structure containing the identifier to remove + * ipc_rmid - remove an ipc identifier + * @ids: ipc identifier set + * @ipcp: ipc perm structure containing the identifier to remove * - * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held - * before this function is called, and remain locked on the exit. + * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held + * before this function is called, and remain locked on the exit. */ - void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { int lid = ipcid_to_idx(ipcp->id); idr_remove(&ids->ipcs_idr, lid); - ids->in_use--; - - ipcp->deleted = 1; - - return; + ipcp->deleted = true; } /** - * ipc_alloc - allocate ipc space - * @size: size desired + * ipc_alloc - allocate ipc space + * @size: size desired * - * Allocate memory from the appropriate pools and return a pointer to it. - * NULL is returned if the allocation fails + * Allocate memory from the appropriate pools and return a pointer to it. + * NULL is returned if the allocation fails */ - void *ipc_alloc(int size) { void *out; - if(size > PAGE_SIZE) + if (size > PAGE_SIZE) out = vmalloc(size); else out = kmalloc(size, GFP_KERNEL); @@ -471,28 +452,27 @@ void *ipc_alloc(int size) } /** - * ipc_free - free ipc space - * @ptr: pointer returned by ipc_alloc - * @size: size of block + * ipc_free - free ipc space + * @ptr: pointer returned by ipc_alloc + * @size: size of block * - * Free a block created with ipc_alloc(). The caller must know the size - * used in the allocation call. + * Free a block created with ipc_alloc(). The caller must know the size + * used in the allocation call. */ - -void ipc_free(void* ptr, int size) +void ipc_free(void *ptr, int size) { - if(size > PAGE_SIZE) + if (size > PAGE_SIZE) vfree(ptr); else kfree(ptr); } /** - * ipc_rcu_alloc - allocate ipc and rcu space - * @size: size desired + * ipc_rcu_alloc - allocate ipc and rcu space + * @size: size desired * - * Allocate memory for the rcu header structure + the object. - * Returns the pointer to the object or NULL upon failure. + * Allocate memory for the rcu header structure + the object. + * Returns the pointer to the object or NULL upon failure. */ void *ipc_rcu_alloc(int size) { @@ -534,17 +514,16 @@ void ipc_rcu_free(struct rcu_head *head) } /** - * ipcperms - check IPC permissions - * @ns: IPC namespace - * @ipcp: IPC permission set - * @flag: desired permission set. + * ipcperms - check ipc permissions + * @ns: ipc namespace + * @ipcp: ipc permission set + * @flag: desired permission set * - * Check user, group, other permissions for access - * to ipc resources. return 0 if allowed + * Check user, group, other permissions for access + * to ipc resources. return 0 if allowed * - * @flag will most probably be 0 or S_...UGO from <linux/stat.h> + * @flag will most probably be 0 or S_...UGO from <linux/stat.h> */ - int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) { kuid_t euid = current_euid(); @@ -572,16 +551,14 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) */ /** - * kernel_to_ipc64_perm - convert kernel ipc permissions to user - * @in: kernel permissions - * @out: new style IPC permissions + * kernel_to_ipc64_perm - convert kernel ipc permissions to user + * @in: kernel permissions + * @out: new style ipc permissions * - * Turn the kernel object @in into a set of permissions descriptions - * for returning to userspace (@out). + * Turn the kernel object @in into a set of permissions descriptions + * for returning to userspace (@out). */ - - -void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) +void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out) { out->key = in->key; out->uid = from_kuid_munged(current_user_ns(), in->uid); @@ -593,15 +570,14 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) } /** - * ipc64_perm_to_ipc_perm - convert new ipc permissions to old - * @in: new style IPC permissions - * @out: old style IPC permissions + * ipc64_perm_to_ipc_perm - convert new ipc permissions to old + * @in: new style ipc permissions + * @out: old style ipc permissions * - * Turn the new style permissions object @in into a compatibility - * object and store it into the @out pointer. + * Turn the new style permissions object @in into a compatibility + * object and store it into the @out pointer. */ - -void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) +void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out) { out->key = in->key; SET_UID(out->uid, in->uid); @@ -635,8 +611,8 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) } /** - * ipc_lock - Lock an ipc structure without rwsem held - * @ids: IPC identifier set + * ipc_lock - lock an ipc structure without rwsem held + * @ids: ipc identifier set * @id: ipc id to look for * * Look for an id in the ipc ids idr and lock the associated ipc object. @@ -657,7 +633,7 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) /* ipc_rmid() may have already freed the ID while ipc_lock * was spinning: here verify that the structure is still valid */ - if (!out->deleted) + if (ipc_valid_object(out)) return out; spin_unlock(&out->lock); @@ -693,11 +669,11 @@ out: /** * ipcget - Common sys_*get() code - * @ns : namsepace - * @ids : IPC identifier set - * @ops : operations to be called on ipc object creation, permission checks - * and further checks - * @params : the parameters needed by the previous operations. + * @ns: namsepace + * @ids: ipc identifier set + * @ops: operations to be called on ipc object creation, permission checks + * and further checks + * @params: the parameters needed by the previous operations. * * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). */ @@ -711,7 +687,7 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, } /** - * ipc_update_perm - update the permissions of an IPC. + * ipc_update_perm - update the permissions of an ipc object * @in: the permission given as input. * @out: the permission of the ipc to set. */ @@ -732,7 +708,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) /** * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd - * @ns: the ipc namespace + * @ns: ipc namespace * @ids: the table of ids where to look for the ipc * @id: the id of the ipc to retrieve * @cmd: the cmd to check @@ -779,15 +755,14 @@ err: /** - * ipc_parse_version - IPC call version - * @cmd: pointer to command + * ipc_parse_version - ipc call version + * @cmd: pointer to command * - * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. - * The @cmd value is turned from an encoding command and version into - * just the command code. + * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. + * The @cmd value is turned from an encoding command and version into + * just the command code. */ - -int ipc_parse_version (int *cmd) +int ipc_parse_version(int *cmd) { if (*cmd & IPC_64) { *cmd ^= IPC_64; @@ -824,7 +799,7 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, if (total >= ids->in_use) return NULL; - for ( ; pos < IPCMNI; pos++) { + for (; pos < IPCMNI; pos++) { ipc = idr_find(&ids->ipcs_idr, pos); if (ipc != NULL) { *new_pos = pos + 1; @@ -927,8 +902,10 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file) goto out; ret = seq_open(file, &sysvipc_proc_seqops); - if (ret) - goto out_kfree; + if (ret) { + kfree(iter); + goto out; + } seq = file->private_data; seq->private = iter; @@ -937,9 +914,6 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file) iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); out: return ret; -out_kfree: - kfree(iter); - goto out; } static int sysvipc_proc_release(struct inode *inode, struct file *file) diff --git a/ipc/util.h b/ipc/util.h index 59d78aa94987..9c47d6f6c7b4 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -15,9 +15,9 @@ #define SEQ_MULTIPLIER (IPCMNI) -void sem_init (void); -void msg_init (void); -void shm_init (void); +void sem_init(void); +void msg_init(void); +void shm_init(void); struct ipc_namespace; @@ -100,6 +100,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER) #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER) +#define IPCID_SEQ_MAX min_t(int, INT_MAX/SEQ_MULTIPLIER, USHRT_MAX) /* must be called with ids->rwsem acquired for writing */ int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); @@ -116,8 +117,8 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); /* for rare, potentially huge allocations. * both function can sleep */ -void* ipc_alloc(int size); -void ipc_free(void* ptr, int size); +void *ipc_alloc(int size); +void ipc_free(void *ptr, int size); /* * For allocation that need to be freed by RCU. @@ -125,7 +126,7 @@ void ipc_free(void* ptr, int size); * getref increases the refcount, the putref call that reduces the recount * to 0 schedules the rcu destruction. Caller must guarantee locking. */ -void* ipc_rcu_alloc(int size); +void *ipc_rcu_alloc(int size); int ipc_rcu_getref(void *ptr); void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); void ipc_rcu_free(struct rcu_head *head); @@ -144,7 +145,7 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, /* On IA-64, we always use the "64-bit version" of the IPC structures. */ # define ipc_parse_version(cmd) IPC_64 #else -int ipc_parse_version (int *cmd); +int ipc_parse_version(int *cmd); #endif extern void free_msg(struct msg_msg *msg); @@ -185,6 +186,19 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm) rcu_read_unlock(); } +/* + * ipc_valid_object() - helper to sort out IPC_RMID races for codepaths + * where the respective ipc_ids.rwsem is not being held down. + * Checks whether the ipc object is still around or if it's gone already, as + * ipc_rmid() may have already freed the ID while the ipc lock was spinning. + * Needs to be called with kern_ipc_perm.lock held -- exception made for one + * checkpoint case at sys_semtimedop() as noted in code commentary. + */ +static inline bool ipc_valid_object(struct kern_ipc_perm *perm) +{ + return !perm->deleted; +} + struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, struct ipc_ops *ops, struct ipc_params *params); diff --git a/kernel/kexec.c b/kernel/kexec.c index ac738781d356..60bafbed06ab 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1537,7 +1537,7 @@ void vmcoreinfo_append_str(const char *fmt, ...) size_t r; va_start(args, fmt); - r = vsnprintf(buf, sizeof(buf), fmt, args); + r = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); diff --git a/kernel/softirq.c b/kernel/softirq.c index 8a1e6e104892..850967068aaf 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -8,6 +8,8 @@ * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/export.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> @@ -54,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp DEFINE_PER_CPU(struct task_struct *, ksoftirqd); -char *softirq_to_name[NR_SOFTIRQS] = { +const char * const softirq_to_name[NR_SOFTIRQS] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", "TASKLET", "SCHED", "HRTIMER", "RCU" }; @@ -136,7 +138,6 @@ void _local_bh_enable(void) WARN_ON_ONCE(in_irq()); __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); } - EXPORT_SYMBOL(_local_bh_enable); void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) @@ -153,7 +154,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) /* * Keep preemption disabled until we are done with * softirq processing: - */ + */ preempt_count_sub(cnt - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) { @@ -229,6 +230,7 @@ asmlinkage void __do_softirq(void) struct softirq_action *h; bool in_hardirq; __u32 pending; + int softirq_bit; int cpu; /* @@ -253,30 +255,30 @@ restart: h = softirq_vec; - do { - if (pending & 1) { - unsigned int vec_nr = h - softirq_vec; - int prev_count = preempt_count(); - - kstat_incr_softirqs_this_cpu(vec_nr); - - trace_softirq_entry(vec_nr); - h->action(h); - trace_softirq_exit(vec_nr); - if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %u %s %p" - "with preempt_count %08x," - " exited with %08x?\n", vec_nr, - softirq_to_name[vec_nr], h->action, - prev_count, preempt_count()); - preempt_count_set(prev_count); - } + while ((softirq_bit = ffs(pending))) { + unsigned int vec_nr; + int prev_count; - rcu_bh_qs(cpu); + h += softirq_bit - 1; + + vec_nr = h - softirq_vec; + prev_count = preempt_count(); + + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); + h->action(h); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", + vec_nr, softirq_to_name[vec_nr], h->action, + prev_count, preempt_count()); + preempt_count_set(prev_count); } + rcu_bh_qs(cpu); h++; - pending >>= 1; - } while (pending); + pending >>= softirq_bit; + } local_irq_disable(); @@ -433,8 +435,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action *)) /* * Tasklets */ -struct tasklet_head -{ +struct tasklet_head { struct tasklet_struct *head; struct tasklet_struct **tail; }; @@ -453,7 +454,6 @@ void __tasklet_schedule(struct tasklet_struct *t) raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_restore(flags); } - EXPORT_SYMBOL(__tasklet_schedule); void __tasklet_hi_schedule(struct tasklet_struct *t) @@ -467,7 +467,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) raise_softirq_irqoff(HI_SOFTIRQ); local_irq_restore(flags); } - EXPORT_SYMBOL(__tasklet_hi_schedule); void __tasklet_hi_schedule_first(struct tasklet_struct *t) @@ -478,7 +477,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) __this_cpu_write(tasklet_hi_vec.head, t); __raise_softirq_irqoff(HI_SOFTIRQ); } - EXPORT_SYMBOL(__tasklet_hi_schedule_first); static void tasklet_action(struct softirq_action *a) @@ -498,7 +496,8 @@ static void tasklet_action(struct softirq_action *a) if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + if (!test_and_clear_bit(TASKLET_STATE_SCHED, + &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); @@ -533,7 +532,8 @@ static void tasklet_hi_action(struct softirq_action *a) if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + if (!test_and_clear_bit(TASKLET_STATE_SCHED, + &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); @@ -551,7 +551,6 @@ static void tasklet_hi_action(struct softirq_action *a) } } - void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { @@ -561,13 +560,12 @@ void tasklet_init(struct tasklet_struct *t, t->func = func; t->data = data; } - EXPORT_SYMBOL(tasklet_init); void tasklet_kill(struct tasklet_struct *t) { if (in_interrupt()) - printk("Attempt to kill tasklet from interrupt\n"); + pr_notice("Attempt to kill tasklet from interrupt\n"); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do { @@ -577,7 +575,6 @@ void tasklet_kill(struct tasklet_struct *t) tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); } - EXPORT_SYMBOL(tasklet_kill); /* @@ -727,9 +724,8 @@ static void takeover_tasklets(unsigned int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int cpu_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { switch (action) { #ifdef CONFIG_HOTPLUG_CPU diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 600ac57e2777..7288e38e1757 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -268,14 +268,12 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) */ static inline int parse_lineno(const char *str, unsigned int *val) { - char *end = NULL; BUG_ON(str == NULL); if (*str == '\0') { *val = 0; return 0; } - *val = simple_strtoul(str, &end, 10); - if (end == NULL || end == str || *end != '\0') { + if (kstrtouint(str, 10, val) < 0) { pr_err("bad line-number: %s\n", str); return -EINVAL; } @@ -348,14 +346,14 @@ static int ddebug_parse_query(char *words[], int nwords, } if (last) *last++ = '\0'; - if (parse_lineno(first, &query->first_lineno) < 0) { - pr_err("line-number is <0\n"); + if (parse_lineno(first, &query->first_lineno) < 0) return -EINVAL; - } if (last) { /* range <first>-<last> */ - if (parse_lineno(last, &query->last_lineno) - < query->first_lineno) { + if (parse_lineno(last, &query->last_lineno) < 0) + return -EINVAL; + + if (query->last_lineno < query->first_lineno) { pr_err("last-line:%d < 1st-line:%d\n", query->last_lineno, query->first_lineno); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 2e1c102759ce..b604b831f4d1 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -172,7 +172,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) /* * Get the overflow emergency buffer */ - v_overflow_buffer = memblock_virt_alloc_nopanic( + v_overflow_buffer = memblock_virt_alloc_low_nopanic( PAGE_ALIGN(io_tlb_overflow), PAGE_SIZE); if (!v_overflow_buffer) @@ -220,7 +220,7 @@ swiotlb_init(int verbose) bytes = io_tlb_nslabs << IO_TLB_SHIFT; /* Get IO TLB memory from the low pages */ - vstart = memblock_virt_alloc_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); + vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) return; diff --git a/mm/memblock.c b/mm/memblock.c index 9c0aeef19440..87d21a6ff63c 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -984,9 +984,6 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, if (!align) align = SMP_CACHE_BYTES; - /* align @size to avoid excessive fragmentation on reserved array */ - size = round_up(size, align); - found = memblock_find_in_range_node(size, align, 0, max_addr, nid); if (found && !memblock_reserve(found, size)) return found; @@ -1080,9 +1077,6 @@ static void * __init memblock_virt_alloc_internal( if (!align) align = SMP_CACHE_BYTES; - /* align @size to avoid excessive fragmentation on reserved array */ - size = round_up(size, align); - again: alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, nid); diff --git a/mm/migrate.c b/mm/migrate.c index 734704f6f29b..482a33d89134 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1548,8 +1548,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page, __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & ~GFP_IOFS, 0); - if (newpage) - page_cpupid_xchg_last(newpage, page_cpupid_last(page)); return newpage; } diff --git a/mm/mm_init.c b/mm/mm_init.c index 857a6434e3a5..4074caf9936b 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -202,4 +202,4 @@ static int __init mm_sysfs_init(void) return 0; } -pure_initcall(mm_sysfs_init); +postcore_initcall(mm_sysfs_init); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e4f0db2a3eae..0fdf96803c5b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x) } /* - * Walk a vmap address to the physical pfn it maps to. + * Walk a vmap address to the struct page it maps. */ -unsigned long vmalloc_to_pfn(const void *vmalloc_addr) +struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; - unsigned long pfn = 0; + struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); /* @@ -244,23 +244,23 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) - pfn = pte_pfn(pte); + page = pte_page(pte); pte_unmap(ptep); } } } - return pfn; + return page; } -EXPORT_SYMBOL(vmalloc_to_pfn); +EXPORT_SYMBOL(vmalloc_to_page); /* - * Map a vmalloc()-space virtual address to the struct page. + * Map a vmalloc()-space virtual address to the physical page frame number. */ -struct page *vmalloc_to_page(const void *vmalloc_addr) +unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { - return pfn_to_page(vmalloc_to_pfn(vmalloc_addr)); + return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } -EXPORT_SYMBOL(vmalloc_to_page); +EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 1dbd6d1cd1b5..0ea2a1e24ade 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2665,6 +2665,15 @@ sub process { $herecurr); } +# check for function declarations without arguments like "int foo()" + if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) { + if (ERROR("FUNCTION_WITHOUT_ARGS", + "Bad function definition - $1() should probably be $1(void)\n" . $herecurr) && + $fix) { + $fixed[$linenr - 1] =~ s/(\b($Type)\s+($Ident))\s*\(\s*\)/$2 $3(void)/; + } + } + # check for uses of DEFINE_PCI_DEVICE_TABLE if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) { if (WARN("DEFINE_PCI_DEVICE_TABLE", |