xref: /linux/arch/x86/xen/smp_hvm.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/smp.h>
3 
4 #include <xen/events.h>
5 
6 #include "xen-ops.h"
7 #include "smp.h"
8 
9 
10 static void __init xen_hvm_smp_prepare_boot_cpu(void)
11 {
12 	BUG_ON(smp_processor_id() != 0);
13 	native_smp_prepare_boot_cpu();
14 
15 	/*
16 	 * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
17 	 * in xen_cpu_up_prepare_hvm().
18 	 */
19 	xen_vcpu_setup(0);
20 
21 	/*
22 	 * The alternative logic (which patches the unlock/lock) runs before
23 	 * the smp bootup up code is activated. Hence we need to set this up
24 	 * the core kernel is being patched. Otherwise we will have only
25 	 * modules patched but not core code.
26 	 */
27 	xen_init_spinlocks();
28 }
29 
30 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
31 {
32 	int cpu;
33 
34 	native_smp_prepare_cpus(max_cpus);
35 	WARN_ON(xen_smp_intr_init(0));
36 
37 	xen_init_lock_cpu(0);
38 
39 	for_each_possible_cpu(cpu) {
40 		if (cpu == 0)
41 			continue;
42 
43 		/* Set default vcpu_id to make sure that we don't use cpu-0's */
44 		per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
45 	}
46 }
47 
48 #ifdef CONFIG_HOTPLUG_CPU
49 static void xen_hvm_cpu_die(unsigned int cpu)
50 {
51 	if (common_cpu_die(cpu) == 0) {
52 		xen_smp_intr_free(cpu);
53 		xen_uninit_lock_cpu(cpu);
54 		xen_teardown_timer(cpu);
55 	}
56 }
57 #else
58 static void xen_hvm_cpu_die(unsigned int cpu)
59 {
60 	BUG();
61 }
62 #endif
63 
64 void __init xen_hvm_smp_init(void)
65 {
66 	if (!xen_have_vector_callback)
67 		return;
68 
69 	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
70 	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
71 	smp_ops.cpu_die = xen_hvm_cpu_die;
72 	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
73 	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
74 	smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
75 	smp_ops.smp_cpus_done = xen_smp_cpus_done;
76 }
77