if (has_cpuflag(X86_FEATURE_TSC)) {
debug_putstr(" RDTSC");
- rdtscll(raw);
+ raw = native_read_tsc();
random ^= raw;
use_i8254 = false;
#define rdtscl(low) \
((low) = (u32)native_read_tsc())
-#define rdtscll(val) \
- ((val) = native_read_tsc())
-
#define rdtscp(low, high, aux) \
do { \
unsigned long long _val = native_read_tscp(&(aux)); \
static inline cycles_t get_cycles(void)
{
- unsigned long long ret = 0;
-
#ifndef CONFIG_X86_TSC
if (!cpu_has_tsc)
return 0;
#endif
- rdtscll(ret);
- return ret;
+ return native_read_tsc();
}
extern void tsc_init(void);
/* Verify whether apbt counter works */
t1 = dw_apb_clocksource_read(clocksource_apbt);
- rdtscll(start);
+ start = native_read_tsc();
/*
* We don't know the TSC frequency yet, but waiting for
*/
do {
rep_nop();
- rdtscll(now);
+ now = native_read_tsc();
} while ((now - start) < 200000UL);
/* APBT is the only always on clocksource, it has to work! */
{
u64 tsc;
- rdtscll(tsc);
+ tsc = native_read_tsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
}
unsigned long pm = acpi_pm_read_early();
if (cpu_has_tsc)
- rdtscll(tsc);
+ tsc = native_read_tsc();
switch (lapic_cal_loops++) {
case 0:
long long max_loops = cpu_khz ? cpu_khz : 1000000;
if (cpu_has_tsc)
- rdtscll(tsc);
+ tsc = native_read_tsc();
if (disable_apic) {
disable_ioapic_support();
}
if (queued) {
if (cpu_has_tsc && cpu_khz) {
- rdtscll(ntsc);
+ ntsc = native_read_tsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else
max_loops--;
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
- rdtscll(m->tsc);
+ m->tsc = native_read_tsc();
/* We hope get_seconds stays lockless */
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
{
unsigned long *cpu_tsc = (unsigned long *)data;
- rdtscll(cpu_tsc[smp_processor_id()]);
+ cpu_tsc[smp_processor_id()] = native_read_tsc();
}
static int mce_apei_read_done;
*/
if (!arch_get_random_long(&rand)) {
/* The constant is an arbitrary large prime */
- rdtscll(rand);
+ rand = native_read_tsc();
rand *= 0xc345c6b72fd16123UL;
}
/* Verify whether hpet counter works */
t1 = hpet_readl(HPET_COUNTER);
- rdtscll(start);
+ start = native_read_tsc();
/*
* We don't know the TSC frequency yet, but waiting for
*/
do {
rep_nop();
- rdtscll(now);
+ now = native_read_tsc();
} while ((now - start) < 200000UL);
if (t1 == hpet_readl(HPET_COUNTER)) {
u64 ret;
rdtsc_barrier();
- rdtscll(ret);
+ ret = native_read_tsc();
return ret;
}
data = cyc2ns_write_begin(cpu);
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
ns_now = cycles_2_ns(tsc_now);
/*
}
/* read the Time Stamp Counter: */
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
/* return the value in ns */
return cycles_2_ns(tsc_now);
{
u64 host_tsc, tsc_offset;
- rdtscll(host_tsc);
+ host_tsc = native_read_tsc();
tsc_offset = vmcs_read64(TSC_OFFSET);
return host_tsc + tsc_offset;
}
int read_current_timer(unsigned long *timer_val)
{
if (delay_fn == delay_tsc) {
- rdtscll(*timer_val);
+ *timer_val = native_read_tsc();
return 0;
}
return -1;
/* check result for the last window */
msr_now = pkg_state_counter();
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
/* calculate pkg cstate vs tsc ratio */
if (!msr_last || !tsc_last)
u64 val64;
msr_now = pkg_state_counter();
- rdtscll(tsc_now);
+ tsc_now = native_read_tsc();
jiffies_now = jiffies;
/* calculate pkg cstate vs tsc ratio */
printk(KERN_DEBUG "start--> \n");
then = read_pmtmr();
- rdtscll(then_tsc);
+ then_tsc = native_read_tsc();
for (i=0;i<20;i++) {
mdelay(100);
now = read_pmtmr();
- rdtscll(now_tsc);
+ now_tsc = native_read_tsc();
diff = (now - then) & 0xFFFFFF;
diff_tsc = now_tsc - then_tsc;
printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);