1dd7d207dSJung-uk Kim /*- 2ebf5747bSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3ebf5747bSPedro F. Giffuni * 4dd7d207dSJung-uk Kim * Copyright (c) 1998-2003 Poul-Henning Kamp 5dd7d207dSJung-uk Kim * All rights reserved. 6dd7d207dSJung-uk Kim * 7dd7d207dSJung-uk Kim * Redistribution and use in source and binary forms, with or without 8dd7d207dSJung-uk Kim * modification, are permitted provided that the following conditions 9dd7d207dSJung-uk Kim * are met: 10dd7d207dSJung-uk Kim * 1. Redistributions of source code must retain the above copyright 11dd7d207dSJung-uk Kim * notice, this list of conditions and the following disclaimer. 12dd7d207dSJung-uk Kim * 2. Redistributions in binary form must reproduce the above copyright 13dd7d207dSJung-uk Kim * notice, this list of conditions and the following disclaimer in the 14dd7d207dSJung-uk Kim * documentation and/or other materials provided with the distribution. 15dd7d207dSJung-uk Kim * 16dd7d207dSJung-uk Kim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17dd7d207dSJung-uk Kim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18dd7d207dSJung-uk Kim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19dd7d207dSJung-uk Kim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20dd7d207dSJung-uk Kim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21dd7d207dSJung-uk Kim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22dd7d207dSJung-uk Kim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23dd7d207dSJung-uk Kim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24dd7d207dSJung-uk Kim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25dd7d207dSJung-uk Kim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26dd7d207dSJung-uk Kim * SUCH DAMAGE. 27dd7d207dSJung-uk Kim */ 28dd7d207dSJung-uk Kim 29dd7d207dSJung-uk Kim #include <sys/cdefs.h> 30dd7d207dSJung-uk Kim __FBSDID("$FreeBSD$"); 31dd7d207dSJung-uk Kim 32dd7d207dSJung-uk Kim #include "opt_clock.h" 33dd7d207dSJung-uk Kim 34dd7d207dSJung-uk Kim #include <sys/param.h> 35dd7d207dSJung-uk Kim #include <sys/bus.h> 36dd7d207dSJung-uk Kim #include <sys/cpu.h> 375da5812bSJung-uk Kim #include <sys/limits.h> 38dd7d207dSJung-uk Kim #include <sys/malloc.h> 39dd7d207dSJung-uk Kim #include <sys/systm.h> 40dd7d207dSJung-uk Kim #include <sys/sysctl.h> 41dd7d207dSJung-uk Kim #include <sys/time.h> 42dd7d207dSJung-uk Kim #include <sys/timetc.h> 43dd7d207dSJung-uk Kim #include <sys/kernel.h> 44dd7d207dSJung-uk Kim #include <sys/power.h> 45dd7d207dSJung-uk Kim #include <sys/smp.h> 46aea81038SKonstantin Belousov #include <sys/vdso.h> 47dd7d207dSJung-uk Kim #include <machine/clock.h> 48dd7d207dSJung-uk Kim #include <machine/cputypes.h> 49dd7d207dSJung-uk Kim #include <machine/md_var.h> 50dd7d207dSJung-uk Kim #include <machine/specialreg.h> 5101e1933dSJohn Baldwin #include <x86/vmware.h> 5216808549SKonstantin Belousov #include <dev/acpica/acpi_hpet.h> 53dd7d207dSJung-uk Kim 54dd7d207dSJung-uk Kim #include "cpufreq_if.h" 55dd7d207dSJung-uk Kim 56dd7d207dSJung-uk Kim uint64_t tsc_freq; 57dd7d207dSJung-uk Kim int tsc_is_invariant; 58155094d7SJung-uk Kim int tsc_perf_stat; 59155094d7SJung-uk Kim 60dd7d207dSJung-uk Kim static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 61dd7d207dSJung-uk Kim 62dd7d207dSJung-uk Kim SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 63dd7d207dSJung-uk Kim &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 64dd7d207dSJung-uk Kim 65dd7d207dSJung-uk Kim #ifdef SMP 661472b87fSNeel Natu int smp_tsc; 67dd7d207dSJung-uk Kim SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 68dd7d207dSJung-uk Kim "Indicates whether the TSC is safe to use in SMP mode"); 69b2c63698SAlexander Motin 70b2c63698SAlexander Motin int smp_tsc_adjust = 0; 71b2c63698SAlexander Motin SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 72b2c63698SAlexander Motin &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 73dd7d207dSJung-uk Kim #endif 74dd7d207dSJung-uk Kim 75e7f1427dSKonstantin Belousov static int tsc_shift = 1; 76e7f1427dSKonstantin Belousov SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 77e7f1427dSKonstantin Belousov &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 78e7f1427dSKonstantin Belousov 7979422085SJung-uk Kim static int tsc_disabled; 8079422085SJung-uk Kim SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 8179422085SJung-uk Kim "Disable x86 Time Stamp Counter"); 8279422085SJung-uk Kim 83a4e4127fSJung-uk Kim static int tsc_skip_calibration; 84a4e4127fSJung-uk Kim SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 85a4e4127fSJung-uk Kim &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 86a4e4127fSJung-uk Kim 87dd7d207dSJung-uk Kim static void tsc_freq_changed(void *arg, const struct cf_level *level, 88dd7d207dSJung-uk Kim int status); 89dd7d207dSJung-uk Kim static void tsc_freq_changing(void *arg, const struct cf_level *level, 90dd7d207dSJung-uk Kim int *status); 91dd7d207dSJung-uk Kim static unsigned tsc_get_timecount(struct timecounter *tc); 92814124c3SKonstantin Belousov static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 93814124c3SKonstantin Belousov static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 94814124c3SKonstantin Belousov static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 95814124c3SKonstantin Belousov static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 96814124c3SKonstantin Belousov static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 97dd7d207dSJung-uk Kim static void tsc_levels_changed(void *arg, int unit); 9816808549SKonstantin Belousov static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 9916808549SKonstantin Belousov struct timecounter *tc); 10016808549SKonstantin Belousov #ifdef COMPAT_FREEBSD32 10116808549SKonstantin Belousov static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 10216808549SKonstantin Belousov struct timecounter *tc); 10316808549SKonstantin Belousov #endif 104dd7d207dSJung-uk Kim 105dd7d207dSJung-uk Kim static struct timecounter tsc_timecounter = { 10616808549SKonstantin Belousov .tc_get_timecount = tsc_get_timecount, 10716808549SKonstantin Belousov .tc_counter_mask = ~0u, 10816808549SKonstantin Belousov .tc_name = "TSC", 10916808549SKonstantin Belousov .tc_quality = 800, /* adjusted in code */ 11016808549SKonstantin Belousov .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 11116808549SKonstantin Belousov #ifdef COMPAT_FREEBSD32 11216808549SKonstantin Belousov .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 11316808549SKonstantin Belousov #endif 114dd7d207dSJung-uk Kim }; 115dd7d207dSJung-uk Kim 11601e1933dSJohn Baldwin static void 1175da5812bSJung-uk Kim tsc_freq_vmware(void) 1185da5812bSJung-uk Kim { 1195da5812bSJung-uk Kim u_int regs[4]; 1205da5812bSJung-uk Kim 1215da5812bSJung-uk Kim if (hv_high >= 0x40000010) { 1225da5812bSJung-uk Kim do_cpuid(0x40000010, regs); 1235da5812bSJung-uk Kim tsc_freq = regs[0] * 1000; 1245da5812bSJung-uk Kim } else { 1255da5812bSJung-uk Kim vmware_hvcall(VMW_HVCMD_GETHZ, regs); 1265da5812bSJung-uk Kim if (regs[1] != UINT_MAX) 1275da5812bSJung-uk Kim tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 1285da5812bSJung-uk Kim } 1295da5812bSJung-uk Kim tsc_is_invariant = 1; 1305da5812bSJung-uk Kim } 1315da5812bSJung-uk Kim 132506a906cSKonstantin Belousov /* 133506a906cSKonstantin Belousov * Calculate TSC frequency using information from the CPUID leaf 0x15 134506a906cSKonstantin Belousov * 'Time Stamp Counter and Nominal Core Crystal Clock'. It should be 135506a906cSKonstantin Belousov * an improvement over the parsing of the CPU model name in 136506a906cSKonstantin Belousov * tsc_freq_intel(), when available. 137506a906cSKonstantin Belousov */ 138506a906cSKonstantin Belousov static bool 139506a906cSKonstantin Belousov tsc_freq_cpuid(void) 140506a906cSKonstantin Belousov { 141506a906cSKonstantin Belousov u_int regs[4]; 142506a906cSKonstantin Belousov 143506a906cSKonstantin Belousov if (cpu_high < 0x15) 144506a906cSKonstantin Belousov return (false); 145506a906cSKonstantin Belousov do_cpuid(0x15, regs); 146506a906cSKonstantin Belousov if (regs[0] == 0 || regs[1] == 0 || regs[2] == 0) 147506a906cSKonstantin Belousov return (false); 148506a906cSKonstantin Belousov tsc_freq = (uint64_t)regs[2] * regs[1] / regs[0]; 149506a906cSKonstantin Belousov return (true); 150506a906cSKonstantin Belousov } 151506a906cSKonstantin Belousov 152a4e4127fSJung-uk Kim static void 153a4e4127fSJung-uk Kim tsc_freq_intel(void) 154dd7d207dSJung-uk Kim { 155a4e4127fSJung-uk Kim char brand[48]; 156a4e4127fSJung-uk Kim u_int regs[4]; 157a4e4127fSJung-uk Kim uint64_t freq; 158a4e4127fSJung-uk Kim char *p; 159a4e4127fSJung-uk Kim u_int i; 160dd7d207dSJung-uk Kim 161a4e4127fSJung-uk Kim /* 162a4e4127fSJung-uk Kim * Intel Processor Identification and the CPUID Instruction 163a4e4127fSJung-uk Kim * Application Note 485. 164a4e4127fSJung-uk Kim * http://www.intel.com/assets/pdf/appnote/241618.pdf 165a4e4127fSJung-uk Kim */ 166a4e4127fSJung-uk Kim if (cpu_exthigh >= 0x80000004) { 167a4e4127fSJung-uk Kim p = brand; 168a4e4127fSJung-uk Kim for (i = 0x80000002; i < 0x80000005; i++) { 169a4e4127fSJung-uk Kim do_cpuid(i, regs); 170a4e4127fSJung-uk Kim memcpy(p, regs, sizeof(regs)); 171a4e4127fSJung-uk Kim p += sizeof(regs); 172a4e4127fSJung-uk Kim } 173a4e4127fSJung-uk Kim p = NULL; 174a4e4127fSJung-uk Kim for (i = 0; i < sizeof(brand) - 1; i++) 175a4e4127fSJung-uk Kim if (brand[i] == 'H' && brand[i + 1] == 'z') 176a4e4127fSJung-uk Kim p = brand + i; 177a4e4127fSJung-uk Kim if (p != NULL) { 178a4e4127fSJung-uk Kim p -= 5; 179a4e4127fSJung-uk Kim switch (p[4]) { 180a4e4127fSJung-uk Kim case 'M': 181a4e4127fSJung-uk Kim i = 1; 182a4e4127fSJung-uk Kim break; 183a4e4127fSJung-uk Kim case 'G': 184a4e4127fSJung-uk Kim i = 1000; 185a4e4127fSJung-uk Kim break; 186a4e4127fSJung-uk Kim case 'T': 187a4e4127fSJung-uk Kim i = 1000000; 188a4e4127fSJung-uk Kim break; 189a4e4127fSJung-uk Kim default: 190dd7d207dSJung-uk Kim return; 191a4e4127fSJung-uk Kim } 192a4e4127fSJung-uk Kim #define C2D(c) ((c) - '0') 193a4e4127fSJung-uk Kim if (p[1] == '.') { 194a4e4127fSJung-uk Kim freq = C2D(p[0]) * 1000; 195a4e4127fSJung-uk Kim freq += C2D(p[2]) * 100; 196a4e4127fSJung-uk Kim freq += C2D(p[3]) * 10; 197a4e4127fSJung-uk Kim freq *= i * 1000; 198a4e4127fSJung-uk Kim } else { 199a4e4127fSJung-uk Kim freq = C2D(p[0]) * 1000; 200a4e4127fSJung-uk Kim freq += C2D(p[1]) * 100; 201a4e4127fSJung-uk Kim freq += C2D(p[2]) * 10; 202a4e4127fSJung-uk Kim freq += C2D(p[3]); 203a4e4127fSJung-uk Kim freq *= i * 1000000; 204a4e4127fSJung-uk Kim } 205a4e4127fSJung-uk Kim #undef C2D 206a4e4127fSJung-uk Kim tsc_freq = freq; 207a4e4127fSJung-uk Kim } 208a4e4127fSJung-uk Kim } 209a4e4127fSJung-uk Kim } 210dd7d207dSJung-uk Kim 211a4e4127fSJung-uk Kim static void 212a4e4127fSJung-uk Kim probe_tsc_freq(void) 213a4e4127fSJung-uk Kim { 214155094d7SJung-uk Kim u_int regs[4]; 215a4e4127fSJung-uk Kim uint64_t tsc1, tsc2; 216dd7d207dSJung-uk Kim 2175da5812bSJung-uk Kim if (cpu_high >= 6) { 2185da5812bSJung-uk Kim do_cpuid(6, regs); 2195da5812bSJung-uk Kim if ((regs[2] & CPUID_PERF_STAT) != 0) { 2205da5812bSJung-uk Kim /* 2215da5812bSJung-uk Kim * XXX Some emulators expose host CPUID without actual 2225da5812bSJung-uk Kim * support for these MSRs. We must test whether they 2235da5812bSJung-uk Kim * really work. 2245da5812bSJung-uk Kim */ 2255da5812bSJung-uk Kim wrmsr(MSR_MPERF, 0); 2265da5812bSJung-uk Kim wrmsr(MSR_APERF, 0); 2275da5812bSJung-uk Kim DELAY(10); 2285da5812bSJung-uk Kim if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 2295da5812bSJung-uk Kim tsc_perf_stat = 1; 2305da5812bSJung-uk Kim } 2315da5812bSJung-uk Kim } 2325da5812bSJung-uk Kim 23301e1933dSJohn Baldwin if (vm_guest == VM_GUEST_VMWARE) { 23401e1933dSJohn Baldwin tsc_freq_vmware(); 2355da5812bSJung-uk Kim return; 23601e1933dSJohn Baldwin } 2375da5812bSJung-uk Kim 238dd7d207dSJung-uk Kim switch (cpu_vendor_id) { 239dd7d207dSJung-uk Kim case CPU_VENDOR_AMD: 240a106a27cSJung-uk Kim if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 241a106a27cSJung-uk Kim (vm_guest == VM_GUEST_NO && 242a106a27cSJung-uk Kim CPUID_TO_FAMILY(cpu_id) >= 0x10)) 243dd7d207dSJung-uk Kim tsc_is_invariant = 1; 244814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 245814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 246814124c3SKonstantin Belousov tsc_get_timecount_mfence; 247814124c3SKonstantin Belousov } 248dd7d207dSJung-uk Kim break; 249dd7d207dSJung-uk Kim case CPU_VENDOR_INTEL: 250a106a27cSJung-uk Kim if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 251a106a27cSJung-uk Kim (vm_guest == VM_GUEST_NO && 252a106a27cSJung-uk Kim ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 253dd7d207dSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0xe) || 254dd7d207dSJung-uk Kim (CPUID_TO_FAMILY(cpu_id) == 0xf && 255a106a27cSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0x3)))) 256dd7d207dSJung-uk Kim tsc_is_invariant = 1; 257814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 258814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 259814124c3SKonstantin Belousov tsc_get_timecount_lfence; 260814124c3SKonstantin Belousov } 261dd7d207dSJung-uk Kim break; 262dd7d207dSJung-uk Kim case CPU_VENDOR_CENTAUR: 263a106a27cSJung-uk Kim if (vm_guest == VM_GUEST_NO && 264a106a27cSJung-uk Kim CPUID_TO_FAMILY(cpu_id) == 0x6 && 265dd7d207dSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0xf && 266dd7d207dSJung-uk Kim (rdmsr(0x1203) & 0x100000000ULL) == 0) 267dd7d207dSJung-uk Kim tsc_is_invariant = 1; 268814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 269814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 270814124c3SKonstantin Belousov tsc_get_timecount_lfence; 271814124c3SKonstantin Belousov } 272dd7d207dSJung-uk Kim break; 273dd7d207dSJung-uk Kim } 274dd7d207dSJung-uk Kim 275a4e4127fSJung-uk Kim if (tsc_skip_calibration) { 276506a906cSKonstantin Belousov if (tsc_freq_cpuid()) 277506a906cSKonstantin Belousov ; 278506a906cSKonstantin Belousov else if (cpu_vendor_id == CPU_VENDOR_INTEL) 279a4e4127fSJung-uk Kim tsc_freq_intel(); 280506a906cSKonstantin Belousov } else { 281a4e4127fSJung-uk Kim if (bootverbose) 282a4e4127fSJung-uk Kim printf("Calibrating TSC clock ... "); 283a4e4127fSJung-uk Kim tsc1 = rdtsc(); 284a4e4127fSJung-uk Kim DELAY(1000000); 285a4e4127fSJung-uk Kim tsc2 = rdtsc(); 286a4e4127fSJung-uk Kim tsc_freq = tsc2 - tsc1; 287506a906cSKonstantin Belousov } 288a4e4127fSJung-uk Kim if (bootverbose) 289a4e4127fSJung-uk Kim printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 290a4e4127fSJung-uk Kim } 291a4e4127fSJung-uk Kim 292a4e4127fSJung-uk Kim void 293a4e4127fSJung-uk Kim init_TSC(void) 294a4e4127fSJung-uk Kim { 295a4e4127fSJung-uk Kim 296a4e4127fSJung-uk Kim if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 297a4e4127fSJung-uk Kim return; 298a4e4127fSJung-uk Kim 299fe760cfaSJohn Baldwin #ifdef __i386__ 300fe760cfaSJohn Baldwin /* The TSC is known to be broken on certain CPUs. */ 301fe760cfaSJohn Baldwin switch (cpu_vendor_id) { 302fe760cfaSJohn Baldwin case CPU_VENDOR_AMD: 303fe760cfaSJohn Baldwin switch (cpu_id & 0xFF0) { 304fe760cfaSJohn Baldwin case 0x500: 305fe760cfaSJohn Baldwin /* K5 Model 0 */ 306fe760cfaSJohn Baldwin return; 307fe760cfaSJohn Baldwin } 308fe760cfaSJohn Baldwin break; 309fe760cfaSJohn Baldwin case CPU_VENDOR_CENTAUR: 310fe760cfaSJohn Baldwin switch (cpu_id & 0xff0) { 311fe760cfaSJohn Baldwin case 0x540: 312fe760cfaSJohn Baldwin /* 313fe760cfaSJohn Baldwin * http://www.centtech.com/c6_data_sheet.pdf 314fe760cfaSJohn Baldwin * 315fe760cfaSJohn Baldwin * I-12 RDTSC may return incoherent values in EDX:EAX 316fe760cfaSJohn Baldwin * I-13 RDTSC hangs when certain event counters are used 317fe760cfaSJohn Baldwin */ 318fe760cfaSJohn Baldwin return; 319fe760cfaSJohn Baldwin } 320fe760cfaSJohn Baldwin break; 321fe760cfaSJohn Baldwin case CPU_VENDOR_NSC: 322fe760cfaSJohn Baldwin switch (cpu_id & 0xff0) { 323fe760cfaSJohn Baldwin case 0x540: 324fe760cfaSJohn Baldwin if ((cpu_id & CPUID_STEPPING) == 0) 325fe760cfaSJohn Baldwin return; 326fe760cfaSJohn Baldwin break; 327fe760cfaSJohn Baldwin } 328fe760cfaSJohn Baldwin break; 329fe760cfaSJohn Baldwin } 330fe760cfaSJohn Baldwin #endif 331fe760cfaSJohn Baldwin 332a4e4127fSJung-uk Kim probe_tsc_freq(); 333a4e4127fSJung-uk Kim 334dd7d207dSJung-uk Kim /* 335dd7d207dSJung-uk Kim * Inform CPU accounting about our boot-time clock rate. This will 336dd7d207dSJung-uk Kim * be updated if someone loads a cpufreq driver after boot that 337dd7d207dSJung-uk Kim * discovers a new max frequency. 338dd7d207dSJung-uk Kim */ 339a4e4127fSJung-uk Kim if (tsc_freq != 0) 3405ac44f72SJung-uk Kim set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 341dd7d207dSJung-uk Kim 342dd7d207dSJung-uk Kim if (tsc_is_invariant) 343dd7d207dSJung-uk Kim return; 344dd7d207dSJung-uk Kim 345dd7d207dSJung-uk Kim /* Register to find out about changes in CPU frequency. */ 346dd7d207dSJung-uk Kim tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 347dd7d207dSJung-uk Kim tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 348dd7d207dSJung-uk Kim tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 349dd7d207dSJung-uk Kim tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 350dd7d207dSJung-uk Kim tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 351dd7d207dSJung-uk Kim tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 352dd7d207dSJung-uk Kim } 353dd7d207dSJung-uk Kim 35465e7d70bSJung-uk Kim #ifdef SMP 35565e7d70bSJung-uk Kim 356814124c3SKonstantin Belousov /* 357814124c3SKonstantin Belousov * RDTSC is not a serializing instruction, and does not drain 358814124c3SKonstantin Belousov * instruction stream, so we need to drain the stream before executing 359814124c3SKonstantin Belousov * it. It could be fixed by use of RDTSCP, except the instruction is 360814124c3SKonstantin Belousov * not available everywhere. 361814124c3SKonstantin Belousov * 362814124c3SKonstantin Belousov * Use CPUID for draining in the boot-time SMP constistency test. The 363814124c3SKonstantin Belousov * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 364814124c3SKonstantin Belousov * and VIA) when SSE2 is present, and nothing on older machines which 365814124c3SKonstantin Belousov * also do not issue RDTSC prematurely. There, testing for SSE2 and 366e1a18e46SKonstantin Belousov * vendor is too cumbersome, and we learn about TSC presence from CPUID. 367814124c3SKonstantin Belousov * 368814124c3SKonstantin Belousov * Do not use do_cpuid(), since we do not need CPUID results, which 369814124c3SKonstantin Belousov * have to be written into memory with do_cpuid(). 370814124c3SKonstantin Belousov */ 37165e7d70bSJung-uk Kim #define TSC_READ(x) \ 37265e7d70bSJung-uk Kim static void \ 37365e7d70bSJung-uk Kim tsc_read_##x(void *arg) \ 37465e7d70bSJung-uk Kim { \ 3757bfcb3bbSJim Harris uint64_t *tsc = arg; \ 37665e7d70bSJung-uk Kim u_int cpu = PCPU_GET(cpuid); \ 37765e7d70bSJung-uk Kim \ 378814124c3SKonstantin Belousov __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 3797bfcb3bbSJim Harris tsc[cpu * 3 + x] = rdtsc(); \ 38065e7d70bSJung-uk Kim } 38165e7d70bSJung-uk Kim TSC_READ(0) 38265e7d70bSJung-uk Kim TSC_READ(1) 38365e7d70bSJung-uk Kim TSC_READ(2) 38465e7d70bSJung-uk Kim #undef TSC_READ 38565e7d70bSJung-uk Kim 38665e7d70bSJung-uk Kim #define N 1000 38765e7d70bSJung-uk Kim 38865e7d70bSJung-uk Kim static void 38965e7d70bSJung-uk Kim comp_smp_tsc(void *arg) 39065e7d70bSJung-uk Kim { 3917bfcb3bbSJim Harris uint64_t *tsc; 3927bfcb3bbSJim Harris int64_t d1, d2; 39365e7d70bSJung-uk Kim u_int cpu = PCPU_GET(cpuid); 39465e7d70bSJung-uk Kim u_int i, j, size; 39565e7d70bSJung-uk Kim 39665e7d70bSJung-uk Kim size = (mp_maxid + 1) * 3; 39765e7d70bSJung-uk Kim for (i = 0, tsc = arg; i < N; i++, tsc += size) 39865e7d70bSJung-uk Kim CPU_FOREACH(j) { 39965e7d70bSJung-uk Kim if (j == cpu) 40065e7d70bSJung-uk Kim continue; 40165e7d70bSJung-uk Kim d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 40265e7d70bSJung-uk Kim d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 40365e7d70bSJung-uk Kim if (d1 <= 0 || d2 <= 0) { 40465e7d70bSJung-uk Kim smp_tsc = 0; 40565e7d70bSJung-uk Kim return; 40665e7d70bSJung-uk Kim } 40765e7d70bSJung-uk Kim } 40865e7d70bSJung-uk Kim } 40965e7d70bSJung-uk Kim 410b2c63698SAlexander Motin static void 411b2c63698SAlexander Motin adj_smp_tsc(void *arg) 412b2c63698SAlexander Motin { 413b2c63698SAlexander Motin uint64_t *tsc; 414b2c63698SAlexander Motin int64_t d, min, max; 415b2c63698SAlexander Motin u_int cpu = PCPU_GET(cpuid); 416b2c63698SAlexander Motin u_int first, i, size; 417b2c63698SAlexander Motin 418b2c63698SAlexander Motin first = CPU_FIRST(); 419b2c63698SAlexander Motin if (cpu == first) 420b2c63698SAlexander Motin return; 421b2c63698SAlexander Motin min = INT64_MIN; 422b2c63698SAlexander Motin max = INT64_MAX; 423b2c63698SAlexander Motin size = (mp_maxid + 1) * 3; 424b2c63698SAlexander Motin for (i = 0, tsc = arg; i < N; i++, tsc += size) { 425b2c63698SAlexander Motin d = tsc[first * 3] - tsc[cpu * 3 + 1]; 426b2c63698SAlexander Motin if (d > min) 427b2c63698SAlexander Motin min = d; 428b2c63698SAlexander Motin d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 429b2c63698SAlexander Motin if (d > min) 430b2c63698SAlexander Motin min = d; 431b2c63698SAlexander Motin d = tsc[first * 3 + 1] - tsc[cpu * 3]; 432b2c63698SAlexander Motin if (d < max) 433b2c63698SAlexander Motin max = d; 434b2c63698SAlexander Motin d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 435b2c63698SAlexander Motin if (d < max) 436b2c63698SAlexander Motin max = d; 437b2c63698SAlexander Motin } 438b2c63698SAlexander Motin if (min > max) 439b2c63698SAlexander Motin return; 440b2c63698SAlexander Motin d = min / 2 + max / 2; 441b2c63698SAlexander Motin __asm __volatile ( 442b2c63698SAlexander Motin "movl $0x10, %%ecx\n\t" 443b2c63698SAlexander Motin "rdmsr\n\t" 444b2c63698SAlexander Motin "addl %%edi, %%eax\n\t" 445b2c63698SAlexander Motin "adcl %%esi, %%edx\n\t" 446b2c63698SAlexander Motin "wrmsr\n" 447b2c63698SAlexander Motin : /* No output */ 448b2c63698SAlexander Motin : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 449b2c63698SAlexander Motin : "ax", "cx", "dx", "cc" 450b2c63698SAlexander Motin ); 451b2c63698SAlexander Motin } 452b2c63698SAlexander Motin 45365e7d70bSJung-uk Kim static int 454279be68bSAndriy Gapon test_tsc(int adj_max_count) 45565e7d70bSJung-uk Kim { 4567bfcb3bbSJim Harris uint64_t *data, *tsc; 457b2c63698SAlexander Motin u_int i, size, adj; 45865e7d70bSJung-uk Kim 459e7f1427dSKonstantin Belousov if ((!smp_tsc && !tsc_is_invariant) || vm_guest) 46065e7d70bSJung-uk Kim return (-100); 46165e7d70bSJung-uk Kim size = (mp_maxid + 1) * 3; 46265e7d70bSJung-uk Kim data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 463b2c63698SAlexander Motin adj = 0; 464b2c63698SAlexander Motin retry: 46565e7d70bSJung-uk Kim for (i = 0, tsc = data; i < N; i++, tsc += size) 46665e7d70bSJung-uk Kim smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 46765e7d70bSJung-uk Kim smp_tsc = 1; /* XXX */ 46867d955aaSPatrick Kelsey smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 46967d955aaSPatrick Kelsey smp_no_rendezvous_barrier, data); 470279be68bSAndriy Gapon if (!smp_tsc && adj < adj_max_count) { 471b2c63698SAlexander Motin adj++; 47267d955aaSPatrick Kelsey smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 47367d955aaSPatrick Kelsey smp_no_rendezvous_barrier, data); 474b2c63698SAlexander Motin goto retry; 475b2c63698SAlexander Motin } 47665e7d70bSJung-uk Kim free(data, M_TEMP); 47765e7d70bSJung-uk Kim if (bootverbose) 478b2c63698SAlexander Motin printf("SMP: %sed TSC synchronization test%s\n", 479b2c63698SAlexander Motin smp_tsc ? "pass" : "fail", 480b2c63698SAlexander Motin adj > 0 ? " after adjustment" : ""); 48126e6537aSJung-uk Kim if (smp_tsc && tsc_is_invariant) { 48226e6537aSJung-uk Kim switch (cpu_vendor_id) { 48326e6537aSJung-uk Kim case CPU_VENDOR_AMD: 48426e6537aSJung-uk Kim /* 48526e6537aSJung-uk Kim * Starting with Family 15h processors, TSC clock 48626e6537aSJung-uk Kim * source is in the north bridge. Check whether 48726e6537aSJung-uk Kim * we have a single-socket/multi-core platform. 48826e6537aSJung-uk Kim * XXX Need more work for complex cases. 48926e6537aSJung-uk Kim */ 49026e6537aSJung-uk Kim if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 49126e6537aSJung-uk Kim (amd_feature2 & AMDID2_CMP) == 0 || 49226e6537aSJung-uk Kim smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 49326e6537aSJung-uk Kim break; 49426e6537aSJung-uk Kim return (1000); 49526e6537aSJung-uk Kim case CPU_VENDOR_INTEL: 49626e6537aSJung-uk Kim /* 49726e6537aSJung-uk Kim * XXX Assume Intel platforms have synchronized TSCs. 49826e6537aSJung-uk Kim */ 49926e6537aSJung-uk Kim return (1000); 50026e6537aSJung-uk Kim } 50126e6537aSJung-uk Kim return (800); 50226e6537aSJung-uk Kim } 50326e6537aSJung-uk Kim return (-100); 50465e7d70bSJung-uk Kim } 50565e7d70bSJung-uk Kim 50665e7d70bSJung-uk Kim #undef N 50765e7d70bSJung-uk Kim 50865e7d70bSJung-uk Kim #endif /* SMP */ 50965e7d70bSJung-uk Kim 51065e7d70bSJung-uk Kim static void 511dd7d207dSJung-uk Kim init_TSC_tc(void) 512dd7d207dSJung-uk Kim { 51395f2f098SJung-uk Kim uint64_t max_freq; 51495f2f098SJung-uk Kim int shift; 515dd7d207dSJung-uk Kim 51638b8542cSJung-uk Kim if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 517dd7d207dSJung-uk Kim return; 518dd7d207dSJung-uk Kim 519dd7d207dSJung-uk Kim /* 52095f2f098SJung-uk Kim * Limit timecounter frequency to fit in an int and prevent it from 52195f2f098SJung-uk Kim * overflowing too fast. 52295f2f098SJung-uk Kim */ 52395f2f098SJung-uk Kim max_freq = UINT_MAX; 52495f2f098SJung-uk Kim 52595f2f098SJung-uk Kim /* 526dd7d207dSJung-uk Kim * We can not use the TSC if we support APM. Precise timekeeping 527dd7d207dSJung-uk Kim * on an APM'ed machine is at best a fools pursuit, since 528dd7d207dSJung-uk Kim * any and all of the time spent in various SMM code can't 529dd7d207dSJung-uk Kim * be reliably accounted for. Reading the RTC is your only 530dd7d207dSJung-uk Kim * source of reliable time info. The i8254 loses too, of course, 531dd7d207dSJung-uk Kim * but we need to have some kind of time... 532dd7d207dSJung-uk Kim * We don't know at this point whether APM is going to be used 533dd7d207dSJung-uk Kim * or not, nor when it might be activated. Play it safe. 534dd7d207dSJung-uk Kim */ 535dd7d207dSJung-uk Kim if (power_pm_get_type() == POWER_PM_TYPE_APM) { 536dd7d207dSJung-uk Kim tsc_timecounter.tc_quality = -1000; 537dd7d207dSJung-uk Kim if (bootverbose) 538dd7d207dSJung-uk Kim printf("TSC timecounter disabled: APM enabled.\n"); 53965e7d70bSJung-uk Kim goto init; 540dd7d207dSJung-uk Kim } 541dd7d207dSJung-uk Kim 542a49399a9SJung-uk Kim /* 54392597e06SJohn Baldwin * Intel CPUs without a C-state invariant TSC can stop the TSC 544d1411416SJohn Baldwin * in either C2 or C3. Disable use of C2 and C3 while using 545d1411416SJohn Baldwin * the TSC as the timecounter. The timecounter can be changed 546d1411416SJohn Baldwin * to enable C2 and C3. 547d1411416SJohn Baldwin * 548d1411416SJohn Baldwin * Note that the TSC is used as the cputicker for computing 549d1411416SJohn Baldwin * thread runtime regardless of the timecounter setting, so 550d1411416SJohn Baldwin * using an alternate timecounter and enabling C2 or C3 can 551d1411416SJohn Baldwin * result incorrect runtimes for kernel idle threads (but not 552d1411416SJohn Baldwin * for any non-idle threads). 553a49399a9SJung-uk Kim */ 5548cd59625SKonstantin Belousov if (cpu_vendor_id == CPU_VENDOR_INTEL && 555a49399a9SJung-uk Kim (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 55692597e06SJohn Baldwin tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 557a49399a9SJung-uk Kim if (bootverbose) 558d1411416SJohn Baldwin printf("TSC timecounter disables C2 and C3.\n"); 559a49399a9SJung-uk Kim } 560a49399a9SJung-uk Kim 561dd7d207dSJung-uk Kim /* 562e7f1427dSKonstantin Belousov * We can not use the TSC in SMP mode unless the TSCs on all CPUs 563e7f1427dSKonstantin Belousov * are synchronized. If the user is sure that the system has 564e7f1427dSKonstantin Belousov * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 565e7f1427dSKonstantin Belousov * non-zero value. The TSC seems unreliable in virtualized SMP 5665cf8ac1bSMike Silbersack * environments, so it is set to a negative quality in those cases. 567dd7d207dSJung-uk Kim */ 568ba79ab82SAndriy Gapon #ifdef SMP 569e7f1427dSKonstantin Belousov if (mp_ncpus > 1) 570279be68bSAndriy Gapon tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust); 571ba79ab82SAndriy Gapon else 572ba79ab82SAndriy Gapon #endif /* SMP */ 573ba79ab82SAndriy Gapon if (tsc_is_invariant) 57426e6537aSJung-uk Kim tsc_timecounter.tc_quality = 1000; 575e7f1427dSKonstantin Belousov max_freq >>= tsc_shift; 57626e6537aSJung-uk Kim 57765e7d70bSJung-uk Kim init: 578e7f1427dSKonstantin Belousov for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 57995f2f098SJung-uk Kim ; 580e7f1427dSKonstantin Belousov if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 581814124c3SKonstantin Belousov if (cpu_vendor_id == CPU_VENDOR_AMD) { 582e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 583e7f1427dSKonstantin Belousov tsc_get_timecount_low_mfence : 584e7f1427dSKonstantin Belousov tsc_get_timecount_mfence; 585814124c3SKonstantin Belousov } else { 586e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 587e7f1427dSKonstantin Belousov tsc_get_timecount_low_lfence : 588e7f1427dSKonstantin Belousov tsc_get_timecount_lfence; 589814124c3SKonstantin Belousov } 590e7f1427dSKonstantin Belousov } else { 591e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 592e7f1427dSKonstantin Belousov tsc_get_timecount_low : tsc_get_timecount; 593e7f1427dSKonstantin Belousov } 594e7f1427dSKonstantin Belousov if (shift > 0) { 59595f2f098SJung-uk Kim tsc_timecounter.tc_name = "TSC-low"; 59695f2f098SJung-uk Kim if (bootverbose) 597bc8e4ad2SJung-uk Kim printf("TSC timecounter discards lower %d bit(s)\n", 59895f2f098SJung-uk Kim shift); 59995f2f098SJung-uk Kim } 600bc34c87eSJung-uk Kim if (tsc_freq != 0) { 60195f2f098SJung-uk Kim tsc_timecounter.tc_frequency = tsc_freq >> shift; 60295f2f098SJung-uk Kim tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 603dd7d207dSJung-uk Kim tc_init(&tsc_timecounter); 604dd7d207dSJung-uk Kim } 605dd7d207dSJung-uk Kim } 60665e7d70bSJung-uk Kim SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 607dd7d207dSJung-uk Kim 608279be68bSAndriy Gapon void 609279be68bSAndriy Gapon resume_TSC(void) 610279be68bSAndriy Gapon { 611ba79ab82SAndriy Gapon #ifdef SMP 612279be68bSAndriy Gapon int quality; 613279be68bSAndriy Gapon 614279be68bSAndriy Gapon /* If TSC was not good on boot, it is unlikely to become good now. */ 615279be68bSAndriy Gapon if (tsc_timecounter.tc_quality < 0) 616279be68bSAndriy Gapon return; 617279be68bSAndriy Gapon /* Nothing to do with UP. */ 618279be68bSAndriy Gapon if (mp_ncpus < 2) 619279be68bSAndriy Gapon return; 620279be68bSAndriy Gapon 621279be68bSAndriy Gapon /* 622279be68bSAndriy Gapon * If TSC was good, a single synchronization should be enough, 623279be68bSAndriy Gapon * but honour smp_tsc_adjust if it's set. 624279be68bSAndriy Gapon */ 625279be68bSAndriy Gapon quality = test_tsc(MAX(smp_tsc_adjust, 1)); 626279be68bSAndriy Gapon if (quality != tsc_timecounter.tc_quality) { 627279be68bSAndriy Gapon printf("TSC timecounter quality changed: %d -> %d\n", 628279be68bSAndriy Gapon tsc_timecounter.tc_quality, quality); 629279be68bSAndriy Gapon tsc_timecounter.tc_quality = quality; 630279be68bSAndriy Gapon } 631ba79ab82SAndriy Gapon #endif /* SMP */ 632279be68bSAndriy Gapon } 633279be68bSAndriy Gapon 634dd7d207dSJung-uk Kim /* 635dd7d207dSJung-uk Kim * When cpufreq levels change, find out about the (new) max frequency. We 636dd7d207dSJung-uk Kim * use this to update CPU accounting in case it got a lower estimate at boot. 637dd7d207dSJung-uk Kim */ 638dd7d207dSJung-uk Kim static void 639dd7d207dSJung-uk Kim tsc_levels_changed(void *arg, int unit) 640dd7d207dSJung-uk Kim { 641dd7d207dSJung-uk Kim device_t cf_dev; 642dd7d207dSJung-uk Kim struct cf_level *levels; 643dd7d207dSJung-uk Kim int count, error; 644dd7d207dSJung-uk Kim uint64_t max_freq; 645dd7d207dSJung-uk Kim 646dd7d207dSJung-uk Kim /* Only use values from the first CPU, assuming all are equal. */ 647dd7d207dSJung-uk Kim if (unit != 0) 648dd7d207dSJung-uk Kim return; 649dd7d207dSJung-uk Kim 650dd7d207dSJung-uk Kim /* Find the appropriate cpufreq device instance. */ 651dd7d207dSJung-uk Kim cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 652dd7d207dSJung-uk Kim if (cf_dev == NULL) { 653dd7d207dSJung-uk Kim printf("tsc_levels_changed() called but no cpufreq device?\n"); 654dd7d207dSJung-uk Kim return; 655dd7d207dSJung-uk Kim } 656dd7d207dSJung-uk Kim 657dd7d207dSJung-uk Kim /* Get settings from the device and find the max frequency. */ 658dd7d207dSJung-uk Kim count = 64; 659dd7d207dSJung-uk Kim levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 660dd7d207dSJung-uk Kim if (levels == NULL) 661dd7d207dSJung-uk Kim return; 662dd7d207dSJung-uk Kim error = CPUFREQ_LEVELS(cf_dev, levels, &count); 663dd7d207dSJung-uk Kim if (error == 0 && count != 0) { 664dd7d207dSJung-uk Kim max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 665dd7d207dSJung-uk Kim set_cputicker(rdtsc, max_freq, 1); 666dd7d207dSJung-uk Kim } else 667dd7d207dSJung-uk Kim printf("tsc_levels_changed: no max freq found\n"); 668dd7d207dSJung-uk Kim free(levels, M_TEMP); 669dd7d207dSJung-uk Kim } 670dd7d207dSJung-uk Kim 671dd7d207dSJung-uk Kim /* 672dd7d207dSJung-uk Kim * If the TSC timecounter is in use, veto the pending change. It may be 673dd7d207dSJung-uk Kim * possible in the future to handle a dynamically-changing timecounter rate. 674dd7d207dSJung-uk Kim */ 675dd7d207dSJung-uk Kim static void 676dd7d207dSJung-uk Kim tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 677dd7d207dSJung-uk Kim { 678dd7d207dSJung-uk Kim 679dd7d207dSJung-uk Kim if (*status != 0 || timecounter != &tsc_timecounter) 680dd7d207dSJung-uk Kim return; 681dd7d207dSJung-uk Kim 682dd7d207dSJung-uk Kim printf("timecounter TSC must not be in use when " 683dd7d207dSJung-uk Kim "changing frequencies; change denied\n"); 684dd7d207dSJung-uk Kim *status = EBUSY; 685dd7d207dSJung-uk Kim } 686dd7d207dSJung-uk Kim 687dd7d207dSJung-uk Kim /* Update TSC freq with the value indicated by the caller. */ 688dd7d207dSJung-uk Kim static void 689dd7d207dSJung-uk Kim tsc_freq_changed(void *arg, const struct cf_level *level, int status) 690dd7d207dSJung-uk Kim { 6913453537fSJung-uk Kim uint64_t freq; 692dd7d207dSJung-uk Kim 693dd7d207dSJung-uk Kim /* If there was an error during the transition, don't do anything. */ 69479422085SJung-uk Kim if (tsc_disabled || status != 0) 695dd7d207dSJung-uk Kim return; 696dd7d207dSJung-uk Kim 697dd7d207dSJung-uk Kim /* Total setting for this level gives the new frequency in MHz. */ 6983453537fSJung-uk Kim freq = (uint64_t)level->total_set.freq * 1000000; 6993453537fSJung-uk Kim atomic_store_rel_64(&tsc_freq, freq); 70095f2f098SJung-uk Kim tsc_timecounter.tc_frequency = 70195f2f098SJung-uk Kim freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 702dd7d207dSJung-uk Kim } 703dd7d207dSJung-uk Kim 704dd7d207dSJung-uk Kim static int 705dd7d207dSJung-uk Kim sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 706dd7d207dSJung-uk Kim { 707dd7d207dSJung-uk Kim int error; 708dd7d207dSJung-uk Kim uint64_t freq; 709dd7d207dSJung-uk Kim 7103453537fSJung-uk Kim freq = atomic_load_acq_64(&tsc_freq); 7113453537fSJung-uk Kim if (freq == 0) 712dd7d207dSJung-uk Kim return (EOPNOTSUPP); 713cbc134adSMatthew D Fleming error = sysctl_handle_64(oidp, &freq, 0, req); 7147ebbcb21SJung-uk Kim if (error == 0 && req->newptr != NULL) { 7153453537fSJung-uk Kim atomic_store_rel_64(&tsc_freq, freq); 716bc8e4ad2SJung-uk Kim atomic_store_rel_64(&tsc_timecounter.tc_frequency, 717bc8e4ad2SJung-uk Kim freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 7187ebbcb21SJung-uk Kim } 719dd7d207dSJung-uk Kim return (error); 720dd7d207dSJung-uk Kim } 721dd7d207dSJung-uk Kim 722cbc134adSMatthew D Fleming SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 7235331d61dSJung-uk Kim 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 724dd7d207dSJung-uk Kim 725727c7b2dSJung-uk Kim static u_int 72695f2f098SJung-uk Kim tsc_get_timecount(struct timecounter *tc __unused) 727dd7d207dSJung-uk Kim { 728727c7b2dSJung-uk Kim 729727c7b2dSJung-uk Kim return (rdtsc32()); 730dd7d207dSJung-uk Kim } 73195f2f098SJung-uk Kim 732814124c3SKonstantin Belousov static inline u_int 733bc8e4ad2SJung-uk Kim tsc_get_timecount_low(struct timecounter *tc) 73495f2f098SJung-uk Kim { 7355df88f46SJung-uk Kim uint32_t rv; 73695f2f098SJung-uk Kim 7375df88f46SJung-uk Kim __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 7385df88f46SJung-uk Kim : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 7395df88f46SJung-uk Kim return (rv); 74095f2f098SJung-uk Kim } 741aea81038SKonstantin Belousov 742814124c3SKonstantin Belousov static u_int 743814124c3SKonstantin Belousov tsc_get_timecount_lfence(struct timecounter *tc __unused) 744814124c3SKonstantin Belousov { 745814124c3SKonstantin Belousov 746814124c3SKonstantin Belousov lfence(); 747814124c3SKonstantin Belousov return (rdtsc32()); 748814124c3SKonstantin Belousov } 749814124c3SKonstantin Belousov 750814124c3SKonstantin Belousov static u_int 751814124c3SKonstantin Belousov tsc_get_timecount_low_lfence(struct timecounter *tc) 752814124c3SKonstantin Belousov { 753814124c3SKonstantin Belousov 754814124c3SKonstantin Belousov lfence(); 755814124c3SKonstantin Belousov return (tsc_get_timecount_low(tc)); 756814124c3SKonstantin Belousov } 757814124c3SKonstantin Belousov 758814124c3SKonstantin Belousov static u_int 759814124c3SKonstantin Belousov tsc_get_timecount_mfence(struct timecounter *tc __unused) 760814124c3SKonstantin Belousov { 761814124c3SKonstantin Belousov 762814124c3SKonstantin Belousov mfence(); 763814124c3SKonstantin Belousov return (rdtsc32()); 764814124c3SKonstantin Belousov } 765814124c3SKonstantin Belousov 766814124c3SKonstantin Belousov static u_int 767814124c3SKonstantin Belousov tsc_get_timecount_low_mfence(struct timecounter *tc) 768814124c3SKonstantin Belousov { 769814124c3SKonstantin Belousov 770814124c3SKonstantin Belousov mfence(); 771814124c3SKonstantin Belousov return (tsc_get_timecount_low(tc)); 772814124c3SKonstantin Belousov } 773814124c3SKonstantin Belousov 77416808549SKonstantin Belousov static uint32_t 77516808549SKonstantin Belousov x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 776aea81038SKonstantin Belousov { 777aea81038SKonstantin Belousov 77816808549SKonstantin Belousov vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 779d1b1b600SNeel Natu vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 78016808549SKonstantin Belousov vdso_th->th_x86_hpet_idx = 0xffffffff; 781aea81038SKonstantin Belousov bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 78216808549SKonstantin Belousov return (1); 783aea81038SKonstantin Belousov } 784aea81038SKonstantin Belousov 785aea81038SKonstantin Belousov #ifdef COMPAT_FREEBSD32 78616808549SKonstantin Belousov static uint32_t 78716808549SKonstantin Belousov x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 788d1b1b600SNeel Natu struct timecounter *tc) 789aea81038SKonstantin Belousov { 790aea81038SKonstantin Belousov 79116808549SKonstantin Belousov vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 792d1b1b600SNeel Natu vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 79316808549SKonstantin Belousov vdso_th32->th_x86_hpet_idx = 0xffffffff; 794aea81038SKonstantin Belousov bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 79516808549SKonstantin Belousov return (1); 796aea81038SKonstantin Belousov } 797aea81038SKonstantin Belousov #endif 798