}
#ifndef CONFIG_SMP
-void __init tsc_store_and_check_tsc_adjust(void)
+bool __init tsc_store_and_check_tsc_adjust(void)
{
struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
s64 bootval;
if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
- return;
+ return false;
rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
cur->adjusted = bootval;
cur->nextcheck = jiffies + HZ;
pr_info("TSC ADJUST: Boot CPU0: %lld\n", bootval);
+ return false;
}
#else /* !CONFIG_SMP */
/*
* Store and check the TSC ADJUST MSR if available
*/
-void tsc_store_and_check_tsc_adjust(void)
+bool tsc_store_and_check_tsc_adjust(void)
{
struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
unsigned int refcpu, cpu = smp_processor_id();
s64 bootval;
if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
- return;
+ return false;
rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
*/
cur->adjusted = bootval;
pr_info_once("TSC ADJUST: Boot CPU%u: %lld\n", cpu, bootval);
- return;
+ return false;
}
ref = per_cpu_ptr(&tsc_adjust, refcpu);
cur->adjusted = ref->adjusted;
wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
}
+ /*
+ * We have the TSCs forced to be in sync on this package. Skip sync
+ * test:
+ */
+ return true;
}
/*
*/
static atomic_t start_count;
static atomic_t stop_count;
+static atomic_t skip_test;
/*
* We use a raw spinlock in this exceptional case, because
atomic_set(&stop_count, 0);
/*
- * Wait for the target to arrive:
+ * Wait for the target to start or to skip the test:
*/
- while (atomic_read(&start_count) != cpus-1)
+ while (atomic_read(&start_count) != cpus - 1) {
+ if (atomic_read(&skip_test) > 0) {
+ atomic_set(&skip_test, 0);
+ return;
+ }
cpu_relax();
+ }
+
/*
* Trigger the target to continue into the measurement too:
*/
if (unsynchronized_tsc() || tsc_clocksource_reliable)
return;
- /* Store and check the TSC ADJUST MSR */
- tsc_store_and_check_tsc_adjust();
+ /*
+ * Store, verify and sanitize the TSC adjust register. If
+ * successful skip the test.
+ */
+ if (tsc_store_and_check_tsc_adjust()) {
+ atomic_inc(&skip_test);
+ return;
+ }
/*
* Register this CPU's participation and wait for the