xref: /freebsd/sys/x86/x86/pvclock.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2009 Adrian Chadd
3  * Copyright (c) 2012 Spectra Logic Corporation
4  * Copyright (c) 2014 Bryan Venteicher
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/proc.h>
35 
36 #include <machine/cpufunc.h>
37 #include <machine/cpu.h>
38 #include <machine/atomic.h>
39 #include <machine/pvclock.h>
40 
41 /*
42  * Last time; this guarantees a monotonically increasing clock for when
43  * a stable TSC is not provided.
44  */
45 static volatile uint64_t pvclock_last_cycles;
46 
47 void
48 pvclock_resume(void)
49 {
50 
51 	atomic_store_rel_64(&pvclock_last_cycles, 0);
52 }
53 
54 uint64_t
55 pvclock_get_last_cycles(void)
56 {
57 
58 	return (atomic_load_acq_64(&pvclock_last_cycles));
59 }
60 
61 uint64_t
62 pvclock_tsc_freq(struct pvclock_vcpu_time_info *ti)
63 {
64 	uint64_t freq;
65 
66 	freq = (1000000000ULL << 32) / ti->tsc_to_system_mul;
67 
68 	if (ti->tsc_shift < 0)
69 		freq <<= -ti->tsc_shift;
70 	else
71 		freq >>= ti->tsc_shift;
72 
73 	return (freq);
74 }
75 
76 /*
77  * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
78  * yielding a 64-bit result.
79  */
80 static inline uint64_t
81 pvclock_scale_delta(uint64_t delta, uint32_t mul_frac, int shift)
82 {
83 	uint64_t product;
84 
85 	if (shift < 0)
86 		delta >>= -shift;
87 	else
88 		delta <<= shift;
89 
90 #if defined(__i386__)
91 	{
92 		uint32_t tmp1, tmp2;
93 
94 		/**
95 		 * For i386, the formula looks like:
96 		 *
97 		 *   lower = (mul_frac * (delta & UINT_MAX)) >> 32
98 		 *   upper = mul_frac * (delta >> 32)
99 		 *   product = lower + upper
100 		 */
101 		__asm__ (
102 			"mul  %5       ; "
103 			"mov  %4,%%eax ; "
104 			"mov  %%edx,%4 ; "
105 			"mul  %5       ; "
106 			"xor  %5,%5    ; "
107 			"add  %4,%%eax ; "
108 			"adc  %5,%%edx ; "
109 			: "=A" (product), "=r" (tmp1), "=r" (tmp2)
110 			: "a" ((uint32_t)delta), "1" ((uint32_t)(delta >> 32)),
111 			  "2" (mul_frac) );
112 	}
113 #elif defined(__amd64__)
114 	{
115 		unsigned long tmp;
116 
117 		__asm__ (
118 			"mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
119 			: [lo]"=a" (product), [hi]"=d" (tmp)
120 			: "0" (delta), [mul_frac]"rm"((uint64_t)mul_frac));
121 	}
122 #else
123 #error "pvclock: unsupported x86 architecture?"
124 #endif
125 
126 	return (product);
127 }
128 
129 static uint64_t
130 pvclock_get_nsec_offset(struct pvclock_vcpu_time_info *ti)
131 {
132 	uint64_t delta;
133 
134 	delta = rdtsc() - ti->tsc_timestamp;
135 
136 	return (pvclock_scale_delta(delta, ti->tsc_to_system_mul,
137 	    ti->tsc_shift));
138 }
139 
140 static void
141 pvclock_read_time_info(struct pvclock_vcpu_time_info *ti,
142     uint64_t *cycles, uint8_t *flags)
143 {
144 	uint32_t version;
145 
146 	do {
147 		version = ti->version;
148 		rmb();
149 		*cycles = ti->system_time + pvclock_get_nsec_offset(ti);
150 		*flags = ti->flags;
151 		rmb();
152 	} while ((ti->version & 1) != 0 || ti->version != version);
153 }
154 
155 static void
156 pvclock_read_wall_clock(struct pvclock_wall_clock *wc, uint32_t *sec,
157     uint32_t *nsec)
158 {
159 	uint32_t version;
160 
161 	do {
162 		version = wc->version;
163 		rmb();
164 		*sec = wc->sec;
165 		*nsec = wc->nsec;
166 		rmb();
167 	} while ((wc->version & 1) != 0 || wc->version != version);
168 }
169 
170 uint64_t
171 pvclock_get_timecount(struct pvclock_vcpu_time_info *ti)
172 {
173 	uint64_t now, last;
174 	uint8_t flags;
175 
176 	pvclock_read_time_info(ti, &now, &flags);
177 
178 	if (flags & PVCLOCK_FLAG_TSC_STABLE)
179 		return (now);
180 
181 	/*
182 	 * Enforce a monotonically increasing clock time across all VCPUs.
183 	 * If our time is too old, use the last time and return. Otherwise,
184 	 * try to update the last time.
185 	 */
186 	do {
187 		last = atomic_load_acq_64(&pvclock_last_cycles);
188 		if (last > now)
189 			return (last);
190 	} while (!atomic_cmpset_64(&pvclock_last_cycles, last, now));
191 
192 	return (now);
193 }
194 
195 void
196 pvclock_get_wallclock(struct pvclock_wall_clock *wc, struct timespec *ts)
197 {
198 	uint32_t sec, nsec;
199 
200 	pvclock_read_wall_clock(wc, &sec, &nsec);
201 	ts->tv_sec = sec;
202 	ts->tv_nsec = nsec;
203 }
204