xref: /linux/arch/x86/entry/vdso/vma.c (revision b757959f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/time_namespace.h>
18 
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
22 #include <asm/vdso.h>
23 #include <asm/vvar.h>
24 #include <asm/tlb.h>
25 #include <asm/page.h>
26 #include <asm/desc.h>
27 #include <asm/cpufeature.h>
28 #include <clocksource/hyperv_timer.h>
29 
30 #undef _ASM_X86_VVAR_H
31 #define EMIT_VVAR(name, offset)	\
32 	const size_t name ## _offset = offset;
33 #include <asm/vvar.h>
34 
arch_get_vdso_data(void * vvar_page)35 struct vdso_data *arch_get_vdso_data(void *vvar_page)
36 {
37 	return (struct vdso_data *)(vvar_page + _vdso_data_offset);
38 }
39 #undef EMIT_VVAR
40 
41 DEFINE_VVAR(struct vdso_data, _vdso_data);
42 DEFINE_VVAR_SINGLE(struct vdso_rng_data, _vdso_rng_data);
43 
44 unsigned int vclocks_used __read_mostly;
45 
46 #if defined(CONFIG_X86_64)
47 unsigned int __read_mostly vdso64_enabled = 1;
48 #endif
49 
init_vdso_image(const struct vdso_image * image)50 int __init init_vdso_image(const struct vdso_image *image)
51 {
52 	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
53 	BUG_ON(image->size % PAGE_SIZE != 0);
54 
55 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
56 			   (struct alt_instr *)(image->data + image->alt +
57 						image->alt_len));
58 
59 	return 0;
60 }
61 
62 static const struct vm_special_mapping vvar_mapping;
63 struct linux_binprm;
64 
vdso_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)65 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
66 		      struct vm_area_struct *vma, struct vm_fault *vmf)
67 {
68 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
69 
70 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
71 		return VM_FAULT_SIGBUS;
72 
73 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
74 	get_page(vmf->page);
75 	return 0;
76 }
77 
vdso_fix_landing(const struct vdso_image * image,struct vm_area_struct * new_vma)78 static void vdso_fix_landing(const struct vdso_image *image,
79 		struct vm_area_struct *new_vma)
80 {
81 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
82 	if (in_ia32_syscall() && image == &vdso_image_32) {
83 		struct pt_regs *regs = current_pt_regs();
84 		unsigned long vdso_land = image->sym_int80_landing_pad;
85 		unsigned long old_land_addr = vdso_land +
86 			(unsigned long)current->mm->context.vdso;
87 
88 		/* Fixing userspace landing - look at do_fast_syscall_32 */
89 		if (regs->ip == old_land_addr)
90 			regs->ip = new_vma->vm_start + vdso_land;
91 	}
92 #endif
93 }
94 
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)95 static int vdso_mremap(const struct vm_special_mapping *sm,
96 		struct vm_area_struct *new_vma)
97 {
98 	const struct vdso_image *image = current->mm->context.vdso_image;
99 
100 	vdso_fix_landing(image, new_vma);
101 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
102 
103 	return 0;
104 }
105 
106 #ifdef CONFIG_TIME_NS
107 /*
108  * The vvar page layout depends on whether a task belongs to the root or
109  * non-root time namespace. Whenever a task changes its namespace, the VVAR
110  * page tables are cleared and then they will re-faulted with a
111  * corresponding layout.
112  * See also the comment near timens_setup_vdso_data() for details.
113  */
vdso_join_timens(struct task_struct * task,struct time_namespace * ns)114 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
115 {
116 	struct mm_struct *mm = task->mm;
117 	struct vm_area_struct *vma;
118 	VMA_ITERATOR(vmi, mm, 0);
119 
120 	mmap_read_lock(mm);
121 	for_each_vma(vmi, vma) {
122 		if (vma_is_special_mapping(vma, &vvar_mapping))
123 			zap_vma_pages(vma);
124 	}
125 	mmap_read_unlock(mm);
126 
127 	return 0;
128 }
129 #endif
130 
vvar_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)131 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
132 		      struct vm_area_struct *vma, struct vm_fault *vmf)
133 {
134 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
135 	unsigned long pfn;
136 	long sym_offset;
137 
138 	if (!image)
139 		return VM_FAULT_SIGBUS;
140 
141 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
142 		image->sym_vvar_start;
143 
144 	/*
145 	 * Sanity check: a symbol offset of zero means that the page
146 	 * does not exist for this vdso image, not that the page is at
147 	 * offset zero relative to the text mapping.  This should be
148 	 * impossible here, because sym_offset should only be zero for
149 	 * the page past the end of the vvar mapping.
150 	 */
151 	if (sym_offset == 0)
152 		return VM_FAULT_SIGBUS;
153 
154 	if (sym_offset == image->sym_vvar_page) {
155 		struct page *timens_page = find_timens_vvar_page(vma);
156 
157 		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
158 
159 		/*
160 		 * If a task belongs to a time namespace then a namespace
161 		 * specific VVAR is mapped with the sym_vvar_page offset and
162 		 * the real VVAR page is mapped with the sym_timens_page
163 		 * offset.
164 		 * See also the comment near timens_setup_vdso_data().
165 		 */
166 		if (timens_page) {
167 			unsigned long addr;
168 			vm_fault_t err;
169 
170 			/*
171 			 * Optimization: inside time namespace pre-fault
172 			 * VVAR page too. As on timens page there are only
173 			 * offsets for clocks on VVAR, it'll be faulted
174 			 * shortly by VDSO code.
175 			 */
176 			addr = vmf->address + (image->sym_timens_page - sym_offset);
177 			err = vmf_insert_pfn(vma, addr, pfn);
178 			if (unlikely(err & VM_FAULT_ERROR))
179 				return err;
180 
181 			pfn = page_to_pfn(timens_page);
182 		}
183 
184 		return vmf_insert_pfn(vma, vmf->address, pfn);
185 	} else if (sym_offset == image->sym_pvclock_page) {
186 		struct pvclock_vsyscall_time_info *pvti =
187 			pvclock_get_pvti_cpu0_va();
188 		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
189 			return vmf_insert_pfn_prot(vma, vmf->address,
190 					__pa(pvti) >> PAGE_SHIFT,
191 					pgprot_decrypted(vma->vm_page_prot));
192 		}
193 	} else if (sym_offset == image->sym_hvclock_page) {
194 		pfn = hv_get_tsc_pfn();
195 
196 		if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
197 			return vmf_insert_pfn(vma, vmf->address, pfn);
198 	} else if (sym_offset == image->sym_timens_page) {
199 		struct page *timens_page = find_timens_vvar_page(vma);
200 
201 		if (!timens_page)
202 			return VM_FAULT_SIGBUS;
203 
204 		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
205 		return vmf_insert_pfn(vma, vmf->address, pfn);
206 	}
207 
208 	return VM_FAULT_SIGBUS;
209 }
210 
211 static const struct vm_special_mapping vdso_mapping = {
212 	.name = "[vdso]",
213 	.fault = vdso_fault,
214 	.mremap = vdso_mremap,
215 };
216 static const struct vm_special_mapping vvar_mapping = {
217 	.name = "[vvar]",
218 	.fault = vvar_fault,
219 };
220 
221 /*
222  * Add vdso and vvar mappings to current process.
223  * @image          - blob to map
224  * @addr           - request a specific address (zero to map at free addr)
225  */
map_vdso(const struct vdso_image * image,unsigned long addr)226 static int map_vdso(const struct vdso_image *image, unsigned long addr)
227 {
228 	struct mm_struct *mm = current->mm;
229 	struct vm_area_struct *vma;
230 	unsigned long text_start;
231 	int ret = 0;
232 
233 	if (mmap_write_lock_killable(mm))
234 		return -EINTR;
235 
236 	addr = get_unmapped_area(NULL, addr,
237 				 image->size - image->sym_vvar_start, 0, 0);
238 	if (IS_ERR_VALUE(addr)) {
239 		ret = addr;
240 		goto up_fail;
241 	}
242 
243 	text_start = addr - image->sym_vvar_start;
244 
245 	/*
246 	 * MAYWRITE to allow gdb to COW and set breakpoints
247 	 */
248 	vma = _install_special_mapping(mm,
249 				       text_start,
250 				       image->size,
251 				       VM_READ|VM_EXEC|
252 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
253 				       &vdso_mapping);
254 
255 	if (IS_ERR(vma)) {
256 		ret = PTR_ERR(vma);
257 		goto up_fail;
258 	}
259 
260 	vma = _install_special_mapping(mm,
261 				       addr,
262 				       -image->sym_vvar_start,
263 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
264 				       VM_PFNMAP,
265 				       &vvar_mapping);
266 
267 	if (IS_ERR(vma)) {
268 		ret = PTR_ERR(vma);
269 		do_munmap(mm, text_start, image->size, NULL);
270 	} else {
271 		current->mm->context.vdso = (void __user *)text_start;
272 		current->mm->context.vdso_image = image;
273 	}
274 
275 up_fail:
276 	mmap_write_unlock(mm);
277 	return ret;
278 }
279 
map_vdso_once(const struct vdso_image * image,unsigned long addr)280 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
281 {
282 	struct mm_struct *mm = current->mm;
283 	struct vm_area_struct *vma;
284 	VMA_ITERATOR(vmi, mm, 0);
285 
286 	mmap_write_lock(mm);
287 	/*
288 	 * Check if we have already mapped vdso blob - fail to prevent
289 	 * abusing from userspace install_special_mapping, which may
290 	 * not do accounting and rlimit right.
291 	 * We could search vma near context.vdso, but it's a slowpath,
292 	 * so let's explicitly check all VMAs to be completely sure.
293 	 */
294 	for_each_vma(vmi, vma) {
295 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
296 				vma_is_special_mapping(vma, &vvar_mapping)) {
297 			mmap_write_unlock(mm);
298 			return -EEXIST;
299 		}
300 	}
301 	mmap_write_unlock(mm);
302 
303 	return map_vdso(image, addr);
304 }
305 
306 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
load_vdso32(void)307 static int load_vdso32(void)
308 {
309 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
310 		return 0;
311 
312 	return map_vdso(&vdso_image_32, 0);
313 }
314 #endif
315 
316 #ifdef CONFIG_X86_64
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)317 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
318 {
319 	if (!vdso64_enabled)
320 		return 0;
321 
322 	return map_vdso(&vdso_image_64, 0);
323 }
324 
325 #ifdef CONFIG_COMPAT
compat_arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp,bool x32)326 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
327 				       int uses_interp, bool x32)
328 {
329 #ifdef CONFIG_X86_X32_ABI
330 	if (x32) {
331 		if (!vdso64_enabled)
332 			return 0;
333 		return map_vdso(&vdso_image_x32, 0);
334 	}
335 #endif
336 #ifdef CONFIG_IA32_EMULATION
337 	return load_vdso32();
338 #else
339 	return 0;
340 #endif
341 }
342 #endif
343 #else
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)344 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
345 {
346 	return load_vdso32();
347 }
348 #endif
349 
arch_syscall_is_vdso_sigreturn(struct pt_regs * regs)350 bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
351 {
352 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
353 	const struct vdso_image *image = current->mm->context.vdso_image;
354 	unsigned long vdso = (unsigned long) current->mm->context.vdso;
355 
356 	if (in_ia32_syscall() && image == &vdso_image_32) {
357 		if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
358 		    regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
359 			return true;
360 	}
361 #endif
362 	return false;
363 }
364 
365 #ifdef CONFIG_X86_64
vdso_setup(char * s)366 static __init int vdso_setup(char *s)
367 {
368 	vdso64_enabled = simple_strtoul(s, NULL, 0);
369 	return 1;
370 }
371 __setup("vdso=", vdso_setup);
372 #endif /* CONFIG_X86_64 */
373