xref: /linux/arch/x86/xen/xen-ops.h (revision d05208cf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef XEN_OPS_H
3 #define XEN_OPS_H
4 
5 #include <linux/init.h>
6 #include <linux/clocksource.h>
7 #include <linux/irqreturn.h>
8 #include <linux/linkage.h>
9 
10 #include <xen/interface/xenpmu.h>
11 #include <xen/xen-ops.h>
12 
13 #include <asm/page.h>
14 
15 #include <trace/events/xen.h>
16 
17 /* These are code, but not functions.  Defined in entry.S */
18 extern const char xen_failsafe_callback[];
19 
20 void xen_entry_SYSENTER_compat(void);
21 #ifdef CONFIG_X86_64
22 void xen_entry_SYSCALL_64(void);
23 void xen_entry_SYSCALL_compat(void);
24 #endif
25 
26 extern void *xen_initial_gdt;
27 
28 struct trap_info;
29 void xen_copy_trap_info(struct trap_info *traps);
30 
31 DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
32 DECLARE_PER_CPU(unsigned long, xen_cr3);
33 
34 extern struct start_info *xen_start_info;
35 extern struct shared_info xen_dummy_shared_info;
36 extern struct shared_info *HYPERVISOR_shared_info;
37 
38 void xen_setup_mfn_list_list(void);
39 void xen_build_mfn_list_list(void);
40 void xen_setup_machphys_mapping(void);
41 void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
42 void __init xen_reserve_special_pages(void);
43 void __init xen_pt_check_e820(void);
44 
45 void xen_mm_pin_all(void);
46 void xen_mm_unpin_all(void);
47 #ifdef CONFIG_X86_64
48 void __init xen_relocate_p2m(void);
49 #endif
50 void __init xen_do_remap_nonram(void);
51 void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
52 				 unsigned long size);
53 
54 void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
55 				   const char *component);
56 unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
57 void __init xen_inv_extra_mem(void);
58 void __init xen_remap_memory(void);
59 phys_addr_t __init xen_find_free_area(phys_addr_t size);
60 char * __init xen_memory_setup(void);
61 void __init xen_arch_setup(void);
62 void xen_banner(void);
63 void xen_enable_sysenter(void);
64 void xen_enable_syscall(void);
65 void xen_vcpu_restore(void);
66 
67 void xen_hvm_init_shared_info(void);
68 void xen_unplug_emulated_devices(void);
69 
70 void __init xen_build_dynamic_phys_to_machine(void);
71 void __init xen_vmalloc_p2m_tree(void);
72 
73 void xen_init_irq_ops(void);
74 void xen_setup_timer(int cpu);
75 void xen_setup_runstate_info(int cpu);
76 void xen_teardown_timer(int cpu);
77 void xen_setup_cpu_clockevents(void);
78 void xen_save_time_memory_area(void);
79 void xen_restore_time_memory_area(void);
80 void xen_init_time_ops(void);
81 void xen_hvm_init_time_ops(void);
82 
83 bool xen_vcpu_stolen(int vcpu);
84 
85 void xen_vcpu_setup(int cpu);
86 void xen_vcpu_info_reset(int cpu);
87 void xen_setup_vcpu_info_placement(void);
88 
89 #ifdef CONFIG_SMP
90 void xen_smp_init(void);
91 void __init xen_hvm_smp_init(void);
92 
93 extern cpumask_var_t xen_cpu_initialized_map;
94 #else
xen_smp_init(void)95 static inline void xen_smp_init(void) {}
xen_hvm_smp_init(void)96 static inline void xen_hvm_smp_init(void) {}
97 #endif
98 
99 #ifdef CONFIG_PARAVIRT_SPINLOCKS
100 void __init xen_init_spinlocks(void);
101 void xen_init_lock_cpu(int cpu);
102 void xen_uninit_lock_cpu(int cpu);
103 #else
xen_init_spinlocks(void)104 static inline void xen_init_spinlocks(void)
105 {
106 }
xen_init_lock_cpu(int cpu)107 static inline void xen_init_lock_cpu(int cpu)
108 {
109 }
xen_uninit_lock_cpu(int cpu)110 static inline void xen_uninit_lock_cpu(int cpu)
111 {
112 }
113 #endif
114 
115 struct dom0_vga_console_info;
116 
117 #ifdef CONFIG_XEN_DOM0
118 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size,
119 			 struct screen_info *);
120 #else
xen_init_vga(const struct dom0_vga_console_info * info,size_t size,struct screen_info * si)121 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
122 				       size_t size, struct screen_info *si)
123 {
124 }
125 #endif
126 
127 void xen_add_preferred_consoles(void);
128 
129 void __init xen_init_apic(void);
130 
131 #ifdef CONFIG_XEN_EFI
132 extern void xen_efi_init(struct boot_params *boot_params);
133 #else
xen_efi_init(struct boot_params * boot_params)134 static inline void __init xen_efi_init(struct boot_params *boot_params)
135 {
136 }
137 #endif
138 
139 __visible void xen_irq_enable_direct(void);
140 __visible void xen_irq_disable_direct(void);
141 __visible unsigned long xen_save_fl_direct(void);
142 
143 __visible unsigned long xen_read_cr2(void);
144 __visible unsigned long xen_read_cr2_direct(void);
145 
146 /* These are not functions, and cannot be called normally */
147 __visible void xen_iret(void);
148 
149 extern int xen_panic_handler_init(void);
150 
151 int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
152 		    int (*cpu_dead_cb)(unsigned int));
153 
154 void xen_pin_vcpu(int cpu);
155 
156 void xen_emergency_restart(void);
157 void xen_force_evtchn_callback(void);
158 
159 #ifdef CONFIG_XEN_PV
160 void xen_pv_pre_suspend(void);
161 void xen_pv_post_suspend(int suspend_cancelled);
162 void xen_start_kernel(struct start_info *si);
163 #else
xen_pv_pre_suspend(void)164 static inline void xen_pv_pre_suspend(void) {}
xen_pv_post_suspend(int suspend_cancelled)165 static inline void xen_pv_post_suspend(int suspend_cancelled) {}
166 #endif
167 
168 #ifdef CONFIG_XEN_PVHVM
169 void xen_hvm_post_suspend(int suspend_cancelled);
170 #else
xen_hvm_post_suspend(int suspend_cancelled)171 static inline void xen_hvm_post_suspend(int suspend_cancelled) {}
172 #endif
173 
174 /*
175  * The maximum amount of extra memory compared to the base size.  The
176  * main scaling factor is the size of struct page.  At extreme ratios
177  * of base:extra, all the base memory can be filled with page
178  * structures for the extra memory, leaving no space for anything
179  * else.
180  *
181  * 10x seems like a reasonable balance between scaling flexibility and
182  * leaving a practically usable system.
183  */
184 #define EXTRA_MEM_RATIO		(10)
185 
186 void xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns);
187 
188 struct dentry * __init xen_init_debugfs(void);
189 
190 enum pt_level {
191 	PT_PGD,
192 	PT_P4D,
193 	PT_PUD,
194 	PT_PMD,
195 	PT_PTE
196 };
197 
198 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
199 void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
200 unsigned long xen_read_cr2_direct(void);
201 void xen_init_mmu_ops(void);
202 void xen_hvm_init_mmu_ops(void);
203 
204 /* Multicalls */
205 struct multicall_space
206 {
207 	struct multicall_entry *mc;
208 	void *args;
209 };
210 
211 /* Allocate room for a multicall and its args */
212 struct multicall_space __xen_mc_entry(size_t args);
213 
214 DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
215 
216 /* Call to start a batch of multiple __xen_mc_entry()s.  Must be
217    paired with xen_mc_issue() */
xen_mc_batch(void)218 static inline void xen_mc_batch(void)
219 {
220 	unsigned long flags;
221 
222 	/* need to disable interrupts until this entry is complete */
223 	local_irq_save(flags);
224 	trace_xen_mc_batch(xen_get_lazy_mode());
225 	__this_cpu_write(xen_mc_irq_flags, flags);
226 }
227 
xen_mc_entry(size_t args)228 static inline struct multicall_space xen_mc_entry(size_t args)
229 {
230 	xen_mc_batch();
231 	return __xen_mc_entry(args);
232 }
233 
234 /* Flush all pending multicalls */
235 void xen_mc_flush(void);
236 
237 /* Issue a multicall if we're not in a lazy mode */
xen_mc_issue(unsigned mode)238 static inline void xen_mc_issue(unsigned mode)
239 {
240 	trace_xen_mc_issue(mode);
241 
242 	if ((xen_get_lazy_mode() & mode) == 0)
243 		xen_mc_flush();
244 
245 	/* restore flags saved in xen_mc_batch */
246 	local_irq_restore(this_cpu_read(xen_mc_irq_flags));
247 }
248 
249 /* Set up a callback to be called when the current batch is flushed */
250 void xen_mc_callback(void (*fn)(void *), void *data);
251 
252 /*
253  * Try to extend the arguments of the previous multicall command.  The
254  * previous command's op must match.  If it does, then it attempts to
255  * extend the argument space allocated to the multicall entry by
256  * arg_size bytes.
257  *
258  * The returned multicall_space will return with mc pointing to the
259  * command on success, or NULL on failure, and args pointing to the
260  * newly allocated space.
261  */
262 struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
263 
264 /* Do percpu data initialization for multicalls. */
265 void mc_percpu_init(unsigned int cpu);
266 
267 extern bool is_xen_pmu;
268 
269 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
270 #ifdef CONFIG_XEN_HAVE_VPMU
271 void xen_pmu_init(int cpu);
272 void xen_pmu_finish(int cpu);
273 #else
xen_pmu_init(int cpu)274 static inline void xen_pmu_init(int cpu) {}
xen_pmu_finish(int cpu)275 static inline void xen_pmu_finish(int cpu) {}
276 #endif
277 bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
278 bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
279 int pmu_apic_update(uint32_t reg);
280 unsigned long long xen_read_pmc(int counter);
281 
282 #ifdef CONFIG_SMP
283 
284 void asm_cpu_bringup_and_idle(void);
285 asmlinkage void cpu_bringup_and_idle(void);
286 
287 extern void xen_send_IPI_mask(const struct cpumask *mask,
288 			      int vector);
289 extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
290 				int vector);
291 extern void xen_send_IPI_allbutself(int vector);
292 extern void xen_send_IPI_all(int vector);
293 extern void xen_send_IPI_self(int vector);
294 
295 extern int xen_smp_intr_init(unsigned int cpu);
296 extern void xen_smp_intr_free(unsigned int cpu);
297 int xen_smp_intr_init_pv(unsigned int cpu);
298 void xen_smp_intr_free_pv(unsigned int cpu);
299 
300 void xen_smp_count_cpus(void);
301 void xen_smp_cpus_done(unsigned int max_cpus);
302 
303 void xen_smp_send_reschedule(int cpu);
304 void xen_smp_send_call_function_ipi(const struct cpumask *mask);
305 void xen_smp_send_call_function_single_ipi(int cpu);
306 
307 void __noreturn xen_cpu_bringup_again(unsigned long stack);
308 
309 struct xen_common_irq {
310 	int irq;
311 	char *name;
312 };
313 #else /* CONFIG_SMP */
314 
xen_smp_intr_init(unsigned int cpu)315 static inline int xen_smp_intr_init(unsigned int cpu)
316 {
317 	return 0;
318 }
xen_smp_intr_free(unsigned int cpu)319 static inline void xen_smp_intr_free(unsigned int cpu) {}
320 
xen_smp_intr_init_pv(unsigned int cpu)321 static inline int xen_smp_intr_init_pv(unsigned int cpu)
322 {
323 	return 0;
324 }
xen_smp_intr_free_pv(unsigned int cpu)325 static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
xen_smp_count_cpus(void)326 static inline void xen_smp_count_cpus(void) { }
327 #endif /* CONFIG_SMP */
328 
329 #endif /* XEN_OPS_H */
330