summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/process_64.c
diff options
context:
space:
mode:
authorChang S. Bae <chang.seok.bae@intel.com>2018-09-18 16:08:53 -0700
committerIngo Molnar <mingo@kernel.org>2018-10-08 10:41:08 +0200
commitb1378a561fd16afdd96ef0bc912b1bcd2b85a68e (patch)
tree9501db7c1545546c2a6fa1eb558ec1ecca1e11f8 /arch/x86/kernel/process_64.c
parent07e1d88adaaeab247b300926f78cc3f950dbeda3 (diff)
x86/fsgsbase/64: Introduce FS/GS base helper functions
Introduce FS/GS base access functionality via <asm/fsgsbase.h>, not yet used by anything directly. Factor out task_seg_base() from x86/ptrace.c and rename it to x86_fsgsbase_read_task() to make it part of the new helpers. This will allow us to enhance FSGSBASE support and eventually enable the FSBASE/GSBASE instructions. An "inactive" GS base refers to a base saved at kernel entry and being part of an inactive, non-running/stopped user-task. (The typical ptrace model.) Here are the new functions: x86_fsbase_read_task() x86_gsbase_read_task() x86_fsbase_write_task() x86_gsbase_write_task() x86_fsbase_read_cpu() x86_fsbase_write_cpu() x86_gsbase_read_cpu_inactive() x86_gsbase_write_cpu_inactive() As an advantage of the unified namespace we can now see all FS/GSBASE API use in the kernel via the following 'git grep' pattern: $ git grep x86_.*sbase [ mingo: Wrote new changelog. ] Based-on-code-from: Andy Lutomirski <luto@kernel.org> Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Markus T Metzger <markus.t.metzger@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Shankar <ravi.v.shankar@intel.com> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1537312139-5580-3-git-send-email-chang.seok.bae@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r--arch/x86/kernel/process_64.c124
1 files changed, 124 insertions, 0 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ea5ea850348d..2a53ff8d1baf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -54,6 +54,7 @@
#include <asm/vdso.h>
#include <asm/intel_rdt_sched.h>
#include <asm/unistd.h>
+#include <asm/fsgsbase.h>
#ifdef CONFIG_IA32_EMULATION
/* Not included via unistd.h */
#include <asm/unistd_32_ia32.h>
@@ -286,6 +287,129 @@ static __always_inline void load_seg_legacy(unsigned short prev_index,
}
}
+unsigned long x86_fsgsbase_read_task(struct task_struct *task,
+ unsigned short selector)
+{
+ unsigned short idx = selector >> 3;
+ unsigned long base;
+
+ if (likely((selector & SEGMENT_TI_MASK) == 0)) {
+ if (unlikely(idx >= GDT_ENTRIES))
+ return 0;
+
+ /*
+ * There are no user segments in the GDT with nonzero bases
+ * other than the TLS segments.
+ */
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return 0;
+
+ idx -= GDT_ENTRY_TLS_MIN;
+ base = get_desc_base(&task->thread.tls_array[idx]);
+ } else {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ struct ldt_struct *ldt;
+
+ /*
+ * If performance here mattered, we could protect the LDT
+ * with RCU. This is a slow path, though, so we can just
+ * take the mutex.
+ */
+ mutex_lock(&task->mm->context.lock);
+ ldt = task->mm->context.ldt;
+ if (unlikely(idx >= ldt->nr_entries))
+ base = 0;
+ else
+ base = get_desc_base(ldt->entries + idx);
+ mutex_unlock(&task->mm->context.lock);
+#else
+ base = 0;
+#endif
+ }
+
+ return base;
+}
+
+void x86_fsbase_write_cpu(unsigned long fsbase)
+{
+ /*
+ * Set the selector to 0 as a notion, that the segment base is
+ * overwritten, which will be checked for skipping the segment load
+ * during context switch.
+ */
+ loadseg(FS, 0);
+ wrmsrl(MSR_FS_BASE, fsbase);
+}
+
+void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
+{
+ /* Set the selector to 0 for the same reason as %fs above. */
+ loadseg(GS, 0);
+ wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+}
+
+unsigned long x86_fsbase_read_task(struct task_struct *task)
+{
+ unsigned long fsbase;
+
+ if (task == current)
+ fsbase = x86_fsbase_read_cpu();
+ else if (task->thread.fsindex == 0)
+ fsbase = task->thread.fsbase;
+ else
+ fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
+
+ return fsbase;
+}
+
+unsigned long x86_gsbase_read_task(struct task_struct *task)
+{
+ unsigned long gsbase;
+
+ if (task == current)
+ gsbase = x86_gsbase_read_cpu_inactive();
+ else if (task->thread.gsindex == 0)
+ gsbase = task->thread.gsbase;
+ else
+ gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
+
+ return gsbase;
+}
+
+int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
+{
+ /*
+ * Not strictly needed for %fs, but do it for symmetry
+ * with %gs
+ */
+ if (unlikely(fsbase >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ task->thread.fsbase = fsbase;
+ if (task == current)
+ x86_fsbase_write_cpu(fsbase);
+ task->thread.fsindex = 0;
+ preempt_enable();
+
+ return 0;
+}
+
+int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
+{
+ if (unlikely(gsbase >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ task->thread.gsbase = gsbase;
+ if (task == current)
+ x86_gsbase_write_cpu_inactive(gsbase);
+ task->thread.gsindex = 0;
+ preempt_enable();
+
+ return 0;
+}
+
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{