xref: /freebsd/sys/dev/cpuctl/cpuctl.c (revision d6b92ffa)
1 /*-
2  * Copyright (c) 2006-2008 Stanislav Sedov <stas@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/conf.h>
34 #include <sys/fcntl.h>
35 #include <sys/ioccom.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/priv.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/sched.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/uio.h>
46 #include <sys/pcpu.h>
47 #include <sys/smp.h>
48 #include <sys/pmckern.h>
49 #include <sys/cpuctl.h>
50 
51 #include <machine/cpufunc.h>
52 #include <machine/md_var.h>
53 #include <machine/specialreg.h>
54 
55 static d_open_t cpuctl_open;
56 static d_ioctl_t cpuctl_ioctl;
57 
58 #define	CPUCTL_VERSION 1
59 
60 #ifdef CPUCTL_DEBUG
61 # define	DPRINTF(format,...) printf(format, __VA_ARGS__);
62 #else
63 # define	DPRINTF(...)
64 #endif
65 
66 #define	UCODE_SIZE_MAX	(4 * 1024 * 1024)
67 
68 static int cpuctl_do_msr(int cpu, cpuctl_msr_args_t *data, u_long cmd,
69     struct thread *td);
70 static int cpuctl_do_cpuid(int cpu, cpuctl_cpuid_args_t *data,
71     struct thread *td);
72 static int cpuctl_do_cpuid_count(int cpu, cpuctl_cpuid_count_args_t *data,
73     struct thread *td);
74 static int cpuctl_do_update(int cpu, cpuctl_update_args_t *data,
75     struct thread *td);
76 static int update_intel(int cpu, cpuctl_update_args_t *args,
77     struct thread *td);
78 static int update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td);
79 static int update_via(int cpu, cpuctl_update_args_t *args,
80     struct thread *td);
81 
82 static struct cdev **cpuctl_devs;
83 static MALLOC_DEFINE(M_CPUCTL, "cpuctl", "CPUCTL buffer");
84 
85 static struct cdevsw cpuctl_cdevsw = {
86         .d_version =    D_VERSION,
87         .d_open =       cpuctl_open,
88         .d_ioctl =      cpuctl_ioctl,
89         .d_name =       "cpuctl",
90 };
91 
92 /*
93  * This function checks if specified cpu enabled or not.
94  */
95 static int
96 cpu_enabled(int cpu)
97 {
98 
99 	return (pmc_cpu_is_disabled(cpu) == 0);
100 }
101 
102 /*
103  * Check if the current thread is bound to a specific cpu.
104  */
105 static int
106 cpu_sched_is_bound(struct thread *td)
107 {
108 	int ret;
109 
110 	thread_lock(td);
111 	ret = sched_is_bound(td);
112 	thread_unlock(td);
113 	return (ret);
114 }
115 
116 /*
117  * Switch to target cpu to run.
118  */
119 static void
120 set_cpu(int cpu, struct thread *td)
121 {
122 
123 	KASSERT(cpu >= 0 && cpu <= mp_maxid && cpu_enabled(cpu),
124 	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
125 	thread_lock(td);
126 	sched_bind(td, cpu);
127 	thread_unlock(td);
128 	KASSERT(td->td_oncpu == cpu,
129 	    ("[cpuctl,%d]: cannot bind to target cpu %d on cpu %d", __LINE__,
130 	    cpu, td->td_oncpu));
131 }
132 
133 static void
134 restore_cpu(int oldcpu, int is_bound, struct thread *td)
135 {
136 
137 	KASSERT(oldcpu >= 0 && oldcpu <= mp_maxid && cpu_enabled(oldcpu),
138 	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, oldcpu));
139 	thread_lock(td);
140 	if (is_bound == 0)
141 		sched_unbind(td);
142 	else
143 		sched_bind(td, oldcpu);
144 	thread_unlock(td);
145 }
146 
147 int
148 cpuctl_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
149     int flags, struct thread *td)
150 {
151 	int cpu, ret;
152 
153 	cpu = dev2unit(dev);
154 	if (cpu > mp_maxid || !cpu_enabled(cpu)) {
155 		DPRINTF("[cpuctl,%d]: bad cpu number %d\n", __LINE__, cpu);
156 		return (ENXIO);
157 	}
158 	/* Require write flag for "write" requests. */
159 	if ((cmd == CPUCTL_MSRCBIT || cmd == CPUCTL_MSRSBIT ||
160 	    cmd == CPUCTL_UPDATE || cmd == CPUCTL_WRMSR) &&
161 	    (flags & FWRITE) == 0)
162 		return (EPERM);
163 	switch (cmd) {
164 	case CPUCTL_RDMSR:
165 		ret = cpuctl_do_msr(cpu, (cpuctl_msr_args_t *)data, cmd, td);
166 		break;
167 	case CPUCTL_MSRSBIT:
168 	case CPUCTL_MSRCBIT:
169 	case CPUCTL_WRMSR:
170 		ret = priv_check(td, PRIV_CPUCTL_WRMSR);
171 		if (ret != 0)
172 			goto fail;
173 		ret = cpuctl_do_msr(cpu, (cpuctl_msr_args_t *)data, cmd, td);
174 		break;
175 	case CPUCTL_CPUID:
176 		ret = cpuctl_do_cpuid(cpu, (cpuctl_cpuid_args_t *)data, td);
177 		break;
178 	case CPUCTL_UPDATE:
179 		ret = priv_check(td, PRIV_CPUCTL_UPDATE);
180 		if (ret != 0)
181 			goto fail;
182 		ret = cpuctl_do_update(cpu, (cpuctl_update_args_t *)data, td);
183 		break;
184 	case CPUCTL_CPUID_COUNT:
185 		ret = cpuctl_do_cpuid_count(cpu,
186 		    (cpuctl_cpuid_count_args_t *)data, td);
187 		break;
188 	default:
189 		ret = EINVAL;
190 		break;
191 	}
192 fail:
193 	return (ret);
194 }
195 
196 /*
197  * Actually perform cpuid operation.
198  */
199 static int
200 cpuctl_do_cpuid_count(int cpu, cpuctl_cpuid_count_args_t *data,
201     struct thread *td)
202 {
203 	int is_bound = 0;
204 	int oldcpu;
205 
206 	KASSERT(cpu >= 0 && cpu <= mp_maxid,
207 	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
208 
209 	/* Explicitly clear cpuid data to avoid returning stale info. */
210 	bzero(data->data, sizeof(data->data));
211 	DPRINTF("[cpuctl,%d]: retrieving cpuid lev %#0x type %#0x for %d cpu\n",
212 	    __LINE__, data->level, data->level_type, cpu);
213 #ifdef __i386__
214 	if (cpu_id == 0)
215 		return (ENODEV);
216 #endif
217 	oldcpu = td->td_oncpu;
218 	is_bound = cpu_sched_is_bound(td);
219 	set_cpu(cpu, td);
220 	cpuid_count(data->level, data->level_type, data->data);
221 	restore_cpu(oldcpu, is_bound, td);
222 	return (0);
223 }
224 
225 static int
226 cpuctl_do_cpuid(int cpu, cpuctl_cpuid_args_t *data, struct thread *td)
227 {
228 	cpuctl_cpuid_count_args_t cdata;
229 	int error;
230 
231 	cdata.level = data->level;
232 	/* Override the level type. */
233 	cdata.level_type = 0;
234 	error = cpuctl_do_cpuid_count(cpu, &cdata, td);
235 	bcopy(cdata.data, data->data, sizeof(data->data)); /* Ignore error */
236 	return (error);
237 }
238 
239 /*
240  * Actually perform MSR operations.
241  */
242 static int
243 cpuctl_do_msr(int cpu, cpuctl_msr_args_t *data, u_long cmd, struct thread *td)
244 {
245 	uint64_t reg;
246 	int is_bound = 0;
247 	int oldcpu;
248 	int ret;
249 
250 	KASSERT(cpu >= 0 && cpu <= mp_maxid,
251 	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
252 
253 	/*
254 	 * Explicitly clear cpuid data to avoid returning stale
255 	 * info
256 	 */
257 	DPRINTF("[cpuctl,%d]: operating on MSR %#0x for %d cpu\n", __LINE__,
258 	    data->msr, cpu);
259 #ifdef __i386__
260 	if ((cpu_feature & CPUID_MSR) == 0)
261 		return (ENODEV);
262 #endif
263 	oldcpu = td->td_oncpu;
264 	is_bound = cpu_sched_is_bound(td);
265 	set_cpu(cpu, td);
266 	if (cmd == CPUCTL_RDMSR) {
267 		data->data = 0;
268 		ret = rdmsr_safe(data->msr, &data->data);
269 	} else if (cmd == CPUCTL_WRMSR) {
270 		ret = wrmsr_safe(data->msr, data->data);
271 	} else if (cmd == CPUCTL_MSRSBIT) {
272 		critical_enter();
273 		ret = rdmsr_safe(data->msr, &reg);
274 		if (ret == 0)
275 			ret = wrmsr_safe(data->msr, reg | data->data);
276 		critical_exit();
277 	} else if (cmd == CPUCTL_MSRCBIT) {
278 		critical_enter();
279 		ret = rdmsr_safe(data->msr, &reg);
280 		if (ret == 0)
281 			ret = wrmsr_safe(data->msr, reg & ~data->data);
282 		critical_exit();
283 	} else
284 		panic("[cpuctl,%d]: unknown operation requested: %lu",
285 		    __LINE__, cmd);
286 	restore_cpu(oldcpu, is_bound, td);
287 	return (ret);
288 }
289 
290 /*
291  * Actually perform microcode update.
292  */
293 static int
294 cpuctl_do_update(int cpu, cpuctl_update_args_t *data, struct thread *td)
295 {
296 	cpuctl_cpuid_args_t args = {
297 		.level = 0,
298 	};
299 	char vendor[13];
300 	int ret;
301 
302 	KASSERT(cpu >= 0 && cpu <= mp_maxid,
303 	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
304 	DPRINTF("[cpuctl,%d]: XXX %d", __LINE__, cpu);
305 
306 	ret = cpuctl_do_cpuid(cpu, &args, td);
307 	if (ret != 0)
308 		return (ret);
309 	((uint32_t *)vendor)[0] = args.data[1];
310 	((uint32_t *)vendor)[1] = args.data[3];
311 	((uint32_t *)vendor)[2] = args.data[2];
312 	vendor[12] = '\0';
313 	if (strncmp(vendor, INTEL_VENDOR_ID, sizeof(INTEL_VENDOR_ID)) == 0)
314 		ret = update_intel(cpu, data, td);
315 	else if(strncmp(vendor, AMD_VENDOR_ID, sizeof(AMD_VENDOR_ID)) == 0)
316 		ret = update_amd(cpu, data, td);
317 	else if(strncmp(vendor, CENTAUR_VENDOR_ID, sizeof(CENTAUR_VENDOR_ID))
318 	    == 0)
319 		ret = update_via(cpu, data, td);
320 	else
321 		ret = ENXIO;
322 	return (ret);
323 }
324 
325 static int
326 update_intel(int cpu, cpuctl_update_args_t *args, struct thread *td)
327 {
328 	void *ptr;
329 	uint64_t rev0, rev1;
330 	uint32_t tmp[4];
331 	int is_bound;
332 	int oldcpu;
333 	int ret;
334 
335 	if (args->size == 0 || args->data == NULL) {
336 		DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
337 		return (EINVAL);
338 	}
339 	if (args->size > UCODE_SIZE_MAX) {
340 		DPRINTF("[cpuctl,%d]: firmware image too large", __LINE__);
341 		return (EINVAL);
342 	}
343 
344 	/*
345 	 * 16 byte alignment required.  Rely on the fact that
346 	 * malloc(9) always returns the pointer aligned at least on
347 	 * the size of the allocation.
348 	 */
349 	ptr = malloc(args->size + 16, M_CPUCTL, M_WAITOK);
350 	if (copyin(args->data, ptr, args->size) != 0) {
351 		DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
352 		    __LINE__, args->data, ptr, args->size);
353 		ret = EFAULT;
354 		goto fail;
355 	}
356 	oldcpu = td->td_oncpu;
357 	is_bound = cpu_sched_is_bound(td);
358 	set_cpu(cpu, td);
359 	critical_enter();
360 	rdmsr_safe(MSR_BIOS_SIGN, &rev0); /* Get current microcode revision. */
361 
362 	/*
363 	 * Perform update.
364 	 */
365 	wrmsr_safe(MSR_BIOS_UPDT_TRIG, (uintptr_t)(ptr));
366 	wrmsr_safe(MSR_BIOS_SIGN, 0);
367 
368 	/*
369 	 * Serialize instruction flow.
370 	 */
371 	do_cpuid(0, tmp);
372 	critical_exit();
373 	rdmsr_safe(MSR_BIOS_SIGN, &rev1); /* Get new microcode revision. */
374 	restore_cpu(oldcpu, is_bound, td);
375 	if (rev1 > rev0)
376 		ret = 0;
377 	else
378 		ret = EEXIST;
379 fail:
380 	free(ptr, M_CPUCTL);
381 	return (ret);
382 }
383 
384 /*
385  * NB: MSR 0xc0010020, MSR_K8_UCODE_UPDATE, is not documented by AMD.
386  * Coreboot, illumos and Linux source code was used to understand
387  * its workings.
388  */
389 static void
390 amd_ucode_wrmsr(void *ucode_ptr)
391 {
392 	uint32_t tmp[4];
393 
394 	wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)ucode_ptr);
395 	do_cpuid(0, tmp);
396 }
397 
398 static int
399 update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
400 {
401 	void *ptr;
402 	int ret;
403 
404 	if (args->size == 0 || args->data == NULL) {
405 		DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
406 		return (EINVAL);
407 	}
408 	if (args->size > UCODE_SIZE_MAX) {
409 		DPRINTF("[cpuctl,%d]: firmware image too large", __LINE__);
410 		return (EINVAL);
411 	}
412 
413 	/*
414 	 * 16 byte alignment required.  Rely on the fact that
415 	 * malloc(9) always returns the pointer aligned at least on
416 	 * the size of the allocation.
417 	 */
418 	ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
419 	if (copyin(args->data, ptr, args->size) != 0) {
420 		DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
421 		    __LINE__, args->data, ptr, args->size);
422 		ret = EFAULT;
423 		goto fail;
424 	}
425 	smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, ptr);
426 	ret = 0;
427 fail:
428 	free(ptr, M_CPUCTL);
429 	return (ret);
430 }
431 
432 static int
433 update_via(int cpu, cpuctl_update_args_t *args, struct thread *td)
434 {
435 	void *ptr;
436 	uint64_t rev0, rev1, res;
437 	uint32_t tmp[4];
438 	int is_bound;
439 	int oldcpu;
440 	int ret;
441 
442 	if (args->size == 0 || args->data == NULL) {
443 		DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
444 		return (EINVAL);
445 	}
446 	if (args->size > UCODE_SIZE_MAX) {
447 		DPRINTF("[cpuctl,%d]: firmware image too large", __LINE__);
448 		return (EINVAL);
449 	}
450 
451 	/*
452 	 * 4 byte alignment required.
453 	 */
454 	ptr = malloc(args->size, M_CPUCTL, M_WAITOK);
455 	if (copyin(args->data, ptr, args->size) != 0) {
456 		DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
457 		    __LINE__, args->data, ptr, args->size);
458 		ret = EFAULT;
459 		goto fail;
460 	}
461 	oldcpu = td->td_oncpu;
462 	is_bound = cpu_sched_is_bound(td);
463 	set_cpu(cpu, td);
464 	critical_enter();
465 	rdmsr_safe(MSR_BIOS_SIGN, &rev0); /* Get current microcode revision. */
466 
467 	/*
468 	 * Perform update.
469 	 */
470 	wrmsr_safe(MSR_BIOS_UPDT_TRIG, (uintptr_t)(ptr));
471 	do_cpuid(1, tmp);
472 
473 	/*
474 	 * Result are in low byte of MSR FCR5:
475 	 * 0x00: No update has been attempted since RESET.
476 	 * 0x01: The last attempted update was successful.
477 	 * 0x02: The last attempted update was unsuccessful due to a bad
478 	 *       environment. No update was loaded and any preexisting
479 	 *       patches are still active.
480 	 * 0x03: The last attempted update was not applicable to this processor.
481 	 *       No update was loaded and any preexisting patches are still
482 	 *       active.
483 	 * 0x04: The last attempted update was not successful due to an invalid
484 	 *       update data block. No update was loaded and any preexisting
485 	 *       patches are still active
486 	 */
487 	rdmsr_safe(0x1205, &res);
488 	res &= 0xff;
489 	critical_exit();
490 	rdmsr_safe(MSR_BIOS_SIGN, &rev1); /* Get new microcode revision. */
491 	restore_cpu(oldcpu, is_bound, td);
492 
493 	DPRINTF("[cpu,%d]: rev0=%x rev1=%x res=%x\n", __LINE__,
494 	    (unsigned)(rev0 >> 32), (unsigned)(rev1 >> 32), (unsigned)res);
495 
496 	if (res != 0x01)
497 		ret = EINVAL;
498 	else
499 		ret = 0;
500 fail:
501 	free(ptr, M_CPUCTL);
502 	return (ret);
503 }
504 
505 int
506 cpuctl_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
507 {
508 	int ret = 0;
509 	int cpu;
510 
511 	cpu = dev2unit(dev);
512 	if (cpu > mp_maxid || !cpu_enabled(cpu)) {
513 		DPRINTF("[cpuctl,%d]: incorrect cpu number %d\n", __LINE__,
514 		    cpu);
515 		return (ENXIO);
516 	}
517 	if (flags & FWRITE)
518 		ret = securelevel_gt(td->td_ucred, 0);
519 	return (ret);
520 }
521 
522 static int
523 cpuctl_modevent(module_t mod __unused, int type, void *data __unused)
524 {
525 	int cpu;
526 
527 	switch(type) {
528 	case MOD_LOAD:
529 		if (bootverbose)
530 			printf("cpuctl: access to MSR registers/cpuid info.\n");
531 		cpuctl_devs = malloc(sizeof(*cpuctl_devs) * (mp_maxid + 1), M_CPUCTL,
532 		    M_WAITOK | M_ZERO);
533 		CPU_FOREACH(cpu)
534 			if (cpu_enabled(cpu))
535 				cpuctl_devs[cpu] = make_dev(&cpuctl_cdevsw, cpu,
536 				    UID_ROOT, GID_KMEM, 0640, "cpuctl%d", cpu);
537 		break;
538 	case MOD_UNLOAD:
539 		CPU_FOREACH(cpu) {
540 			if (cpuctl_devs[cpu] != NULL)
541 				destroy_dev(cpuctl_devs[cpu]);
542 		}
543 		free(cpuctl_devs, M_CPUCTL);
544 		break;
545 	case MOD_SHUTDOWN:
546 		break;
547 	default:
548 		return (EOPNOTSUPP);
549         }
550 	return (0);
551 }
552 
553 DEV_MODULE(cpuctl, cpuctl_modevent, NULL);
554 MODULE_VERSION(cpuctl, CPUCTL_VERSION);
555