From cbf6b1ba7ae12b3f7cb6b0d060b88d44649f9eda Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 12 Jan 2010 19:01:11 +0900 Subject: sh: Always provide thread_info allocators. Presently the thread_info allocators are special cased, depending on THREAD_SHIFT < PAGE_SHIFT. This provides a sensible definition for them regardless of configuration, in preparation for extended CPU state. Signed-off-by: Paul Mundt --- arch/sh/kernel/process.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 arch/sh/kernel/process.c (limited to 'arch/sh/kernel/process.c') diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c new file mode 100644 index 000000000000..b2bda83baeef --- /dev/null +++ b/arch/sh/kernel/process.c @@ -0,0 +1,47 @@ +#include +#include +#include + +#if THREAD_SHIFT < PAGE_SHIFT +static struct kmem_cache *thread_info_cache; + +struct thread_info *alloc_thread_info(struct task_struct *tsk) +{ + struct thread_info *ti; + + ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); + if (unlikely(ti == NULL)) + return NULL; +#ifdef CONFIG_DEBUG_STACK_USAGE + memset(ti, 0, THREAD_SIZE); +#endif + return ti; +} + +void free_thread_info(struct thread_info *ti) +{ + kmem_cache_free(thread_info_cache, ti); +} + +void thread_info_cache_init(void) +{ + thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, + THREAD_SIZE, 0, NULL); + BUG_ON(thread_info_cache == NULL); +} +#else +struct thread_info *alloc_thread_info(struct task_struct *tsk) +{ +#ifdef CONFIG_DEBUG_STACK_USAGE + gfp_t mask = GFP_KERNEL | __GFP_ZERO; +#else + gfp_t mask = GFP_KERNEL; +#endif + return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); +} + +void free_thread_info(struct thread_info *ti) +{ + free_pages((unsigned long)ti, THREAD_SIZE_ORDER); +} +#endif /* THREAD_SHIFT < PAGE_SHIFT */ -- cgit v1.2.3 From a3705799e2cc5fb69d88ad6a7f317a8f5597f18d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 12 Jan 2010 19:10:06 +0900 Subject: sh: Use SLAB_PANIC for thread_info slab cache. Presently this has a BUG_ON() for failure cases, as powerpc does. Switch this over to a SLAB_PANIC instead. Signed-off-by: Paul Mundt --- arch/sh/kernel/process.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/sh/kernel/process.c') diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index b2bda83baeef..077e06e1a889 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -26,8 +26,7 @@ void free_thread_info(struct thread_info *ti) void thread_info_cache_init(void) { thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, - THREAD_SIZE, 0, NULL); - BUG_ON(thread_info_cache == NULL); + THREAD_SIZE, SLAB_PANIC, NULL); } #else struct thread_info *alloc_thread_info(struct task_struct *tsk) -- cgit v1.2.3 From 0ea820cf9bf58f735ed40ec67947159c4f170012 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Jan 2010 12:51:40 +0900 Subject: sh: Move over to dynamically allocated FPU context. This follows the x86 xstate changes and implements a task_xstate slab cache that is dynamically sized to match one of hard FP/soft FP/FPU-less. This also tidies up and consolidates some of the SH-2A/SH-4 FPU fragmentation. Now fpu state restorers are commonly defined, with the init_fpu()/fpu_init() mess reworked to follow the x86 convention. The fpu_init() register initialization has been replaced by xstate setup followed by writing out to hardware via the standard restore path. As init_fpu() now performs a slab allocation a secondary lighterweight restorer is also introduced for the context switch. In the future the DSP state will be rolled in here, too. More work remains for math emulation and the SH-5 FPU, which presently uses its own special (UP-only) interfaces. Signed-off-by: Paul Mundt --- arch/sh/kernel/process.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) (limited to 'arch/sh/kernel/process.c') diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 077e06e1a889..81add9b9ea6e 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -2,6 +2,32 @@ #include #include +struct kmem_cache *task_xstate_cachep = NULL; +unsigned int xstate_size; + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + *dst = *src; + + if (src->thread.xstate) { + dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, + GFP_KERNEL); + if (!dst->thread.xstate) + return -ENOMEM; + memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); + } + + return 0; +} + +void free_thread_xstate(struct task_struct *tsk) +{ + if (tsk->thread.xstate) { + kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); + tsk->thread.xstate = NULL; + } +} + #if THREAD_SHIFT < PAGE_SHIFT static struct kmem_cache *thread_info_cache; @@ -20,6 +46,7 @@ struct thread_info *alloc_thread_info(struct task_struct *tsk) void free_thread_info(struct thread_info *ti) { + free_thread_xstate(ti->task); kmem_cache_free(thread_info_cache, ti); } @@ -41,6 +68,33 @@ struct thread_info *alloc_thread_info(struct task_struct *tsk) void free_thread_info(struct thread_info *ti) { + free_thread_xstate(ti->task); free_pages((unsigned long)ti, THREAD_SIZE_ORDER); } #endif /* THREAD_SHIFT < PAGE_SHIFT */ + +void arch_task_cache_init(void) +{ + if (!xstate_size) + return; + + task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, + __alignof__(union thread_xstate), + SLAB_PANIC | SLAB_NOTRACK, NULL); +} + +#ifdef CONFIG_SH_FPU_EMU +# define HAVE_SOFTFP 1 +#else +# define HAVE_SOFTFP 0 +#endif + +void init_thread_xstate(void) +{ + if (boot_cpu_data.flags & CPU_HAS_FPU) + xstate_size = sizeof(struct sh_fpu_hard_struct); + else if (HAVE_SOFTFP) + xstate_size = sizeof(struct sh_fpu_soft_struct); + else + xstate_size = 0; +} -- cgit v1.2.3