1 /*-
2  * Copyright (c) 2016 Microsoft Corp.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/systimer.h>
29 #include <sys/systm.h>
30 
31 #include <machine/cpufunc.h>
32 #include <machine/specialreg.h>
33 #include <machine/msi_machdep.h>
34 
35 #include <dev/virtual/hyperv/hyperv_busdma.h>
36 #include <dev/virtual/hyperv/hyperv_machdep.h>
37 #include <dev/virtual/hyperv/hyperv_reg.h>
38 #include <dev/virtual/hyperv/hyperv_var.h>
39 
40 struct hyperv_reftsc_ctx {
41 	struct hyperv_reftsc	*tsc_ref;
42 	struct hyperv_dma	tsc_ref_dma;
43 };
44 
45 static void		hyperv_tsc_cputimer_construct(struct cputimer *,
46 			    sysclock_t);
47 static sysclock_t	hyperv_tsc_cputimer_count_mfence(void);
48 static sysclock_t	hyperv_tsc_cputimer_count_lfence(void);
49 
50 static struct hyperv_reftsc_ctx	hyperv_ref_tsc;
51 static hyperv_tc64_t	hyperv_tc64_saved;
52 
53 static struct cputimer	hyperv_tsc_cputimer = {
54 	.next		= SLIST_ENTRY_INITIALIZER,
55 	.name		= "Hyper-V-TSC",
56 	.pri		= CPUTIMER_PRI_VMM_HI,
57 	.type		= CPUTIMER_VMM1,
58 	.count		= NULL,	/* determined later */
59 	.fromhz		= cputimer_default_fromhz,
60 	.fromus		= cputimer_default_fromus,
61 	.construct	= hyperv_tsc_cputimer_construct,
62 	.destruct	= cputimer_default_destruct,
63 	.freq		= HYPERV_TIMER_FREQ
64 };
65 
66 static struct cpucounter hyperv_tsc_cpucounter = {
67 	.freq		= HYPERV_TIMER_FREQ,
68 	.count		= NULL, /* determined later */
69 	.flags		= CPUCOUNTER_FLAG_MPSYNC,
70 	.prio		= CPUCOUNTER_PRIO_VMM_HI,
71 	.type		= CPUCOUNTER_VMM1
72 };
73 
74 uint64_t
75 hypercall_md(volatile void *hc_addr, uint64_t in_val,
76     uint64_t in_paddr, uint64_t out_paddr)
77 {
78 	uint64_t status;
79 
80 	__asm__ __volatile__ ("mov %0, %%r8" : : "r" (out_paddr): "r8");
81 	__asm__ __volatile__ ("call *%3" : "=a" (status) :
82 	    "c" (in_val), "d" (in_paddr), "m" (hc_addr));
83 	return (status);
84 }
85 
86 int
87 hyperv_msi2vector(uint64_t msi_addr __unused, uint32_t msi_data)
88 {
89 	return (msi_data & MSI_X86_DATA_INTVEC);
90 }
91 
92 #define HYPERV_TSC(fence)					\
93 static uint64_t							\
94 hyperv_tsc_##fence(void)					\
95 {								\
96 	struct hyperv_reftsc *tsc_ref = hyperv_ref_tsc.tsc_ref;	\
97 	uint32_t seq;						\
98 								\
99 	while ((seq = tsc_ref->tsc_seq) != 0) {			\
100 		uint64_t disc, ret, tsc;			\
101 		uint64_t scale;					\
102 		int64_t ofs;					\
103 								\
104 		cpu_ccfence();					\
105 		scale = tsc_ref->tsc_scale;			\
106 		ofs = tsc_ref->tsc_ofs;				\
107 								\
108 		cpu_##fence();					\
109 		tsc = rdtsc();					\
110 								\
111 		/* ret = ((tsc * scale) >> 64) + ofs */		\
112 		__asm__ __volatile__ ("mulq %3" :		\
113 		    "=d" (ret), "=a" (disc) :			\
114 		    "a" (tsc), "r" (scale));			\
115 		ret += ofs;					\
116 								\
117 		cpu_ccfence();					\
118 		if (tsc_ref->tsc_seq == seq)			\
119 			return (ret);				\
120 								\
121 		/* Sequence changed; re-sync. */		\
122 	}							\
123 	/* Fallback to the generic rdmsr. */			\
124 	return (rdmsr(MSR_HV_TIME_REF_COUNT));			\
125 }								\
126 struct __hack
127 
128 HYPERV_TSC(lfence);
129 HYPERV_TSC(mfence);
130 
131 static sysclock_t
132 hyperv_tsc_cputimer_count_lfence(void)
133 {
134 	uint64_t val;
135 
136 	val = hyperv_tsc_lfence();
137 	return (val + hyperv_tsc_cputimer.base);
138 }
139 
140 static sysclock_t
141 hyperv_tsc_cputimer_count_mfence(void)
142 {
143 	uint64_t val;
144 
145 	val = hyperv_tsc_mfence();
146 	return (val + hyperv_tsc_cputimer.base);
147 }
148 
149 static void
150 hyperv_tsc_cputimer_construct(struct cputimer *timer, sysclock_t oldclock)
151 {
152 	timer->base = 0;
153 	timer->base = oldclock - timer->count();
154 }
155 
156 void
157 hyperv_md_init(void)
158 {
159 	hyperv_tc64_t tc64 = NULL;
160 	uint64_t val, orig;
161 
162 	if ((hyperv_features &
163 	     (CPUID_HV_MSR_TIME_REFCNT | CPUID_HV_MSR_REFERENCE_TSC)) !=
164 	    (CPUID_HV_MSR_TIME_REFCNT | CPUID_HV_MSR_REFERENCE_TSC) ||
165 	    (cpu_feature & CPUID_SSE2) == 0)	/* SSE2 for mfence/lfence */
166 		return;
167 
168 	switch (cpu_vendor_id) {
169 	case CPU_VENDOR_AMD:
170 		hyperv_tsc_cputimer.count = hyperv_tsc_cputimer_count_mfence;
171 		tc64 = hyperv_tsc_mfence;
172 		break;
173 
174 	case CPU_VENDOR_INTEL:
175 		hyperv_tsc_cputimer.count = hyperv_tsc_cputimer_count_lfence;
176 		tc64 = hyperv_tsc_lfence;
177 		break;
178 
179 	default:
180 		/* Unsupport CPU vendors. */
181 		return;
182 	}
183 	KASSERT(tc64 != NULL, ("tc64 is not set"));
184 	hyperv_tsc_cpucounter.count = tc64;
185 
186 	hyperv_ref_tsc.tsc_ref = hyperv_dmamem_alloc(NULL, PAGE_SIZE, 0,
187 	    sizeof(struct hyperv_reftsc), &hyperv_ref_tsc.tsc_ref_dma,
188 	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
189 	if (hyperv_ref_tsc.tsc_ref == NULL) {
190 		kprintf("hyperv: reftsc page allocation failed\n");
191 		return;
192 	}
193 
194 	orig = rdmsr(MSR_HV_REFERENCE_TSC);
195 	val = MSR_HV_REFTSC_ENABLE | (orig & MSR_HV_REFTSC_RSVD_MASK) |
196 	    ((hyperv_ref_tsc.tsc_ref_dma.hv_paddr >> PAGE_SHIFT) <<
197 	     MSR_HV_REFTSC_PGSHIFT);
198 	wrmsr(MSR_HV_REFERENCE_TSC, val);
199 
200 	/* Register Hyper-V reference TSC cputimers. */
201 	cputimer_register(&hyperv_tsc_cputimer);
202 	cputimer_select(&hyperv_tsc_cputimer, 0);
203 	cpucounter_register(&hyperv_tsc_cpucounter);
204 	hyperv_tc64_saved = hyperv_tc64;
205 	hyperv_tc64 = tc64;
206 }
207 
208 void
209 hyperv_md_uninit(void)
210 {
211 	if (hyperv_ref_tsc.tsc_ref != NULL) {
212 		uint64_t val;
213 
214 		/* Deregister Hyper-V reference TSC systimer. */
215 		cputimer_deregister(&hyperv_tsc_cputimer);
216 		/* Revert tc64 change. */
217 		hyperv_tc64 = hyperv_tc64_saved;
218 
219 		val = rdmsr(MSR_HV_REFERENCE_TSC);
220 		wrmsr(MSR_HV_REFERENCE_TSC, val & MSR_HV_REFTSC_RSVD_MASK);
221 
222 		hyperv_dmamem_free(&hyperv_ref_tsc.tsc_ref_dma,
223 		    hyperv_ref_tsc.tsc_ref);
224 		hyperv_ref_tsc.tsc_ref = NULL;
225 	}
226 }
227