summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/boot/compressed/aslr.c2
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/include/asm/msr.h11
-rw-r--r--arch/x86/include/asm/pvclock.h2
-rw-r--r--arch/x86/include/asm/stackprotector.h2
-rw-r--r--arch/x86/include/asm/tsc.h2
-rw-r--r--arch/x86/kernel/apb_timer.c8
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/trace_clock.c2
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/lib/delay.c8
19 files changed, 49 insertions, 40 deletions
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index ea33236190b1..6a9b96b4624d 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -82,7 +82,7 @@ static unsigned long get_random_long(void)
if (has_cpuflag(X86_FEATURE_TSC)) {
debug_putstr(" RDTSC");
- raw = native_read_tsc();
+ raw = rdtsc();
random ^= raw;
use_i8254 = false;
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 972b488ac16a..0340d93c18ca 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void)
* but no one has ever seen it happen.
*/
rdtsc_barrier();
- ret = (cycle_t)native_read_tsc();
+ ret = (cycle_t)rdtsc();
last = gtod->cycle_last;
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index c89ed6ceed02..ff0c120dafe5 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -109,7 +109,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);
-static __always_inline unsigned long long native_read_tsc(void)
+/**
+ * rdtsc() - returns the current TSC without ordering constraints
+ *
+ * rdtsc() returns the result of RDTSC as a 64-bit integer. The
+ * only ordering constraint it supplies is the ordering implied by
+ * "asm volatile": it will put the RDTSC in the place you expect. The
+ * CPU can and will speculatively execute that RDTSC, though, so the
+ * results can be non-monotonic if compared on different CPUs.
+ */
+static __always_inline unsigned long long rdtsc(void)
{
DECLARE_ARGS(val, low, high);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 2bd69d62c623..5c490db62e32 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
static __always_inline
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
{
- u64 delta = native_read_tsc() - src->tsc_timestamp;
+ u64 delta = rdtsc() - src->tsc_timestamp;
return pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift);
}
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index bc5fa2af112e..58505f01962f 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void)
* on during the bootup the random pool has true entropy too.
*/
get_random_bytes(&canary, sizeof(canary));
- tsc = native_read_tsc();
+ tsc = rdtsc();
canary += tsc + (tsc << 32UL);
current->stack_canary = canary;
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index b4883902948b..3df7675debcf 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -26,7 +26,7 @@ static inline cycles_t get_cycles(void)
return 0;
#endif
- return native_read_tsc();
+ return rdtsc();
}
extern void tsc_init(void);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 25efa534c4e4..222a57076039 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)
/* Verify whether apbt counter works */
t1 = dw_apb_clocksource_read(clocksource_apbt);
- start = native_read_tsc();
+ start = rdtsc();
/*
* We don't know the TSC frequency yet, but waiting for
@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
*/
do {
rep_nop();
- now = native_read_tsc();
+ now = rdtsc();
} while ((now - start) < 200000UL);
/* APBT is the only always on clocksource, it has to work! */
@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void)
old = dw_apb_clocksource_read(clocksource_apbt);
old += loop;
- t1 = native_read_tsc();
+ t1 = rdtsc();
do {
new = dw_apb_clocksource_read(clocksource_apbt);
} while (new < old);
- t2 = native_read_tsc();
+ t2 = rdtsc();
shift = 5;
if (unlikely(loop >> shift == 0)) {
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 51af1ed1ae2e..0d71cd9b4a50 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
{
u64 tsc;
- tsc = native_read_tsc();
+ tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
}
@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
unsigned long pm = acpi_pm_read_early();
if (cpu_has_tsc)
- tsc = native_read_tsc();
+ tsc = rdtsc();
switch (lapic_cal_loops++) {
case 0:
@@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
long long max_loops = cpu_khz ? cpu_khz : 1000000;
if (cpu_has_tsc)
- tsc = native_read_tsc();
+ tsc = rdtsc();
if (disable_apic) {
disable_ioapic_support();
@@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
}
if (queued) {
if (cpu_has_tsc && cpu_khz) {
- ntsc = native_read_tsc();
+ ntsc = rdtsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else
max_loops--;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a69710db6112..51ad2af84a72 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -125,10 +125,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
n = K6_BUG_LOOP;
f_vide = vide;
- d = native_read_tsc();
+ d = rdtsc();
while (n--)
f_vide();
- d2 = native_read_tsc();
+ d2 = rdtsc();
d = d2-d;
if (d > 20*K6_BUG_LOOP)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a5283d2d0094..96cceccd11b4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
- m->tsc = native_read_tsc();
+ m->tsc = rdtsc();
/* We hope get_seconds stays lockless */
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
{
unsigned long *cpu_tsc = (unsigned long *)data;
- cpu_tsc[smp_processor_id()] = native_read_tsc();
+ cpu_tsc[smp_processor_id()] = rdtsc();
}
static int mce_apei_read_done;
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 334a2a9c034d..67315cd0132c 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -110,7 +110,7 @@ static void init_espfix_random(void)
*/
if (!arch_get_random_long(&rand)) {
/* The constant is an arbitrary large prime */
- rand = native_read_tsc();
+ rand = rdtsc();
rand *= 0xc345c6b72fd16123UL;
}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index cc390fe69b71..f75c5908c7a6 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -735,7 +735,7 @@ static int hpet_clocksource_register(void)
/* Verify whether hpet counter works */
t1 = hpet_readl(HPET_COUNTER);
- start = native_read_tsc();
+ start = rdtsc();
/*
* We don't know the TSC frequency yet, but waiting for
@@ -745,7 +745,7 @@ static int hpet_clocksource_register(void)
*/
do {
rep_nop();
- now = native_read_tsc();
+ now = rdtsc();
} while ((now - start) < 200000UL);
if (t1 == hpet_readl(HPET_COUNTER)) {
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
index bd8f4d41bd56..67efb8c96fc4 100644
--- a/arch/x86/kernel/trace_clock.c
+++ b/arch/x86/kernel/trace_clock.c
@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
u64 ret;
rdtsc_barrier();
- ret = native_read_tsc();
+ ret = rdtsc();
return ret;
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e66f5dcaeb63..21d6e04e3e82 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
data = cyc2ns_write_begin(cpu);
- tsc_now = native_read_tsc();
+ tsc_now = rdtsc();
ns_now = cycles_2_ns(tsc_now);
/*
@@ -290,7 +290,7 @@ u64 native_sched_clock(void)
}
/* read the Time Stamp Counter: */
- tsc_now = native_read_tsc();
+ tsc_now = rdtsc();
/* return the value in ns */
return cycles_2_ns(tsc_now);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 954e98a8c2e3..2f0ade48614f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1172,7 +1172,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
tsc_deadline = apic->lapic_timer.expired_tscdeadline;
apic->lapic_timer.expired_tscdeadline = 0;
- guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+ guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */
@@ -1240,7 +1240,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
local_irq_save(flags);
now = apic->lapic_timer.timer.base->get_time();
- guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+ guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 602b974a60a6..8dfbad7a2c44 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1080,7 +1080,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
u64 tsc;
- tsc = svm_scale_tsc(vcpu, native_read_tsc());
+ tsc = svm_scale_tsc(vcpu, rdtsc());
return target_tsc - tsc;
}
@@ -3079,7 +3079,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr_info->index) {
case MSR_IA32_TSC: {
msr_info->data = svm->vmcb->control.tsc_offset +
- svm_scale_tsc(vcpu, native_read_tsc());
+ svm_scale_tsc(vcpu, rdtsc());
break;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4fa1ccad7beb..10d69a6df14f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
{
u64 host_tsc, tsc_offset;
- host_tsc = native_read_tsc();
+ host_tsc = rdtsc();
tsc_offset = vmcs_read64(TSC_OFFSET);
return host_tsc + tsc_offset;
}
@@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
- return target_tsc - native_read_tsc();
+ return target_tsc - rdtsc();
}
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f771058cfb5c..dfa97139282d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1455,7 +1455,7 @@ static cycle_t read_tsc(void)
* but no one has ever seen it happen.
*/
rdtsc_barrier();
- ret = (cycle_t)native_read_tsc();
+ ret = (cycle_t)rdtsc();
last = pvclock_gtod_data.clock.cycle_last;
@@ -1646,7 +1646,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
return 1;
}
if (!use_master_clock) {
- host_tsc = native_read_tsc();
+ host_tsc = rdtsc();
kernel_ns = get_kernel_ns();
}
@@ -2810,7 +2810,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
- native_read_tsc() - vcpu->arch.last_host_tsc;
+ rdtsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
if (check_tsc_unstable()) {
@@ -2838,7 +2838,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
- vcpu->arch.last_host_tsc = native_read_tsc();
+ vcpu->arch.last_host_tsc = rdtsc();
}
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
@@ -6623,7 +6623,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
hw_breakpoint_restore();
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
- native_read_tsc());
+ rdtsc());
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
@@ -7437,7 +7437,7 @@ int kvm_arch_hardware_enable(void)
if (ret != 0)
return ret;
- local_tsc = native_read_tsc();
+ local_tsc = rdtsc();
stable = !check_tsc_unstable();
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 35115f3786a9..f24bc59ab0a0 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -55,10 +55,10 @@ static void delay_tsc(unsigned long __loops)
preempt_disable();
cpu = smp_processor_id();
rdtsc_barrier();
- bclock = native_read_tsc();
+ bclock = rdtsc();
for (;;) {
rdtsc_barrier();
- now = native_read_tsc();
+ now = rdtsc();
if ((now - bclock) >= loops)
break;
@@ -80,7 +80,7 @@ static void delay_tsc(unsigned long __loops)
loops -= (now - bclock);
cpu = smp_processor_id();
rdtsc_barrier();
- bclock = native_read_tsc();
+ bclock = rdtsc();
}
}
preempt_enable();
@@ -100,7 +100,7 @@ void use_tsc_delay(void)
int read_current_timer(unsigned long *timer_val)
{
if (delay_fn == delay_tsc) {
- *timer_val = native_read_tsc();
+ *timer_val = rdtsc();
return 0;
}
return -1;