xref: /linux/arch/x86/entry/vdso/vma.c (revision dd093fb0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/time_namespace.h>
18 
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
22 #include <asm/vdso.h>
23 #include <asm/vvar.h>
24 #include <asm/tlb.h>
25 #include <asm/page.h>
26 #include <asm/desc.h>
27 #include <asm/cpufeature.h>
28 #include <clocksource/hyperv_timer.h>
29 
30 #undef _ASM_X86_VVAR_H
31 #define EMIT_VVAR(name, offset)	\
32 	const size_t name ## _offset = offset;
33 #include <asm/vvar.h>
34 
35 struct vdso_data *arch_get_vdso_data(void *vvar_page)
36 {
37 	return (struct vdso_data *)(vvar_page + _vdso_data_offset);
38 }
39 #undef EMIT_VVAR
40 
41 unsigned int vclocks_used __read_mostly;
42 
43 #if defined(CONFIG_X86_64)
44 unsigned int __read_mostly vdso64_enabled = 1;
45 #endif
46 
47 int __init init_vdso_image(const struct vdso_image *image)
48 {
49 	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
50 	BUG_ON(image->size % PAGE_SIZE != 0);
51 
52 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
53 			   (struct alt_instr *)(image->data + image->alt +
54 						image->alt_len));
55 
56 	return 0;
57 }
58 
59 static const struct vm_special_mapping vvar_mapping;
60 struct linux_binprm;
61 
62 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
63 		      struct vm_area_struct *vma, struct vm_fault *vmf)
64 {
65 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
66 
67 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
68 		return VM_FAULT_SIGBUS;
69 
70 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
71 	get_page(vmf->page);
72 	return 0;
73 }
74 
75 static void vdso_fix_landing(const struct vdso_image *image,
76 		struct vm_area_struct *new_vma)
77 {
78 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
79 	if (in_ia32_syscall() && image == &vdso_image_32) {
80 		struct pt_regs *regs = current_pt_regs();
81 		unsigned long vdso_land = image->sym_int80_landing_pad;
82 		unsigned long old_land_addr = vdso_land +
83 			(unsigned long)current->mm->context.vdso;
84 
85 		/* Fixing userspace landing - look at do_fast_syscall_32 */
86 		if (regs->ip == old_land_addr)
87 			regs->ip = new_vma->vm_start + vdso_land;
88 	}
89 #endif
90 }
91 
92 static int vdso_mremap(const struct vm_special_mapping *sm,
93 		struct vm_area_struct *new_vma)
94 {
95 	const struct vdso_image *image = current->mm->context.vdso_image;
96 
97 	vdso_fix_landing(image, new_vma);
98 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
99 
100 	return 0;
101 }
102 
103 #ifdef CONFIG_TIME_NS
104 /*
105  * The vvar page layout depends on whether a task belongs to the root or
106  * non-root time namespace. Whenever a task changes its namespace, the VVAR
107  * page tables are cleared and then they will re-faulted with a
108  * corresponding layout.
109  * See also the comment near timens_setup_vdso_data() for details.
110  */
111 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
112 {
113 	struct mm_struct *mm = task->mm;
114 	struct vm_area_struct *vma;
115 	VMA_ITERATOR(vmi, mm, 0);
116 
117 	mmap_read_lock(mm);
118 	for_each_vma(vmi, vma) {
119 		unsigned long size = vma->vm_end - vma->vm_start;
120 
121 		if (vma_is_special_mapping(vma, &vvar_mapping))
122 			zap_page_range(vma, vma->vm_start, size);
123 	}
124 	mmap_read_unlock(mm);
125 
126 	return 0;
127 }
128 #endif
129 
130 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
131 		      struct vm_area_struct *vma, struct vm_fault *vmf)
132 {
133 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
134 	unsigned long pfn;
135 	long sym_offset;
136 
137 	if (!image)
138 		return VM_FAULT_SIGBUS;
139 
140 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
141 		image->sym_vvar_start;
142 
143 	/*
144 	 * Sanity check: a symbol offset of zero means that the page
145 	 * does not exist for this vdso image, not that the page is at
146 	 * offset zero relative to the text mapping.  This should be
147 	 * impossible here, because sym_offset should only be zero for
148 	 * the page past the end of the vvar mapping.
149 	 */
150 	if (sym_offset == 0)
151 		return VM_FAULT_SIGBUS;
152 
153 	if (sym_offset == image->sym_vvar_page) {
154 		struct page *timens_page = find_timens_vvar_page(vma);
155 
156 		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
157 
158 		/*
159 		 * If a task belongs to a time namespace then a namespace
160 		 * specific VVAR is mapped with the sym_vvar_page offset and
161 		 * the real VVAR page is mapped with the sym_timens_page
162 		 * offset.
163 		 * See also the comment near timens_setup_vdso_data().
164 		 */
165 		if (timens_page) {
166 			unsigned long addr;
167 			vm_fault_t err;
168 
169 			/*
170 			 * Optimization: inside time namespace pre-fault
171 			 * VVAR page too. As on timens page there are only
172 			 * offsets for clocks on VVAR, it'll be faulted
173 			 * shortly by VDSO code.
174 			 */
175 			addr = vmf->address + (image->sym_timens_page - sym_offset);
176 			err = vmf_insert_pfn(vma, addr, pfn);
177 			if (unlikely(err & VM_FAULT_ERROR))
178 				return err;
179 
180 			pfn = page_to_pfn(timens_page);
181 		}
182 
183 		return vmf_insert_pfn(vma, vmf->address, pfn);
184 	} else if (sym_offset == image->sym_pvclock_page) {
185 		struct pvclock_vsyscall_time_info *pvti =
186 			pvclock_get_pvti_cpu0_va();
187 		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
188 			return vmf_insert_pfn_prot(vma, vmf->address,
189 					__pa(pvti) >> PAGE_SHIFT,
190 					pgprot_decrypted(vma->vm_page_prot));
191 		}
192 	} else if (sym_offset == image->sym_hvclock_page) {
193 		pfn = hv_get_tsc_pfn();
194 
195 		if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
196 			return vmf_insert_pfn(vma, vmf->address, pfn);
197 	} else if (sym_offset == image->sym_timens_page) {
198 		struct page *timens_page = find_timens_vvar_page(vma);
199 
200 		if (!timens_page)
201 			return VM_FAULT_SIGBUS;
202 
203 		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
204 		return vmf_insert_pfn(vma, vmf->address, pfn);
205 	}
206 
207 	return VM_FAULT_SIGBUS;
208 }
209 
210 static const struct vm_special_mapping vdso_mapping = {
211 	.name = "[vdso]",
212 	.fault = vdso_fault,
213 	.mremap = vdso_mremap,
214 };
215 static const struct vm_special_mapping vvar_mapping = {
216 	.name = "[vvar]",
217 	.fault = vvar_fault,
218 };
219 
220 /*
221  * Add vdso and vvar mappings to current process.
222  * @image          - blob to map
223  * @addr           - request a specific address (zero to map at free addr)
224  */
225 static int map_vdso(const struct vdso_image *image, unsigned long addr)
226 {
227 	struct mm_struct *mm = current->mm;
228 	struct vm_area_struct *vma;
229 	unsigned long text_start;
230 	int ret = 0;
231 
232 	if (mmap_write_lock_killable(mm))
233 		return -EINTR;
234 
235 	addr = get_unmapped_area(NULL, addr,
236 				 image->size - image->sym_vvar_start, 0, 0);
237 	if (IS_ERR_VALUE(addr)) {
238 		ret = addr;
239 		goto up_fail;
240 	}
241 
242 	text_start = addr - image->sym_vvar_start;
243 
244 	/*
245 	 * MAYWRITE to allow gdb to COW and set breakpoints
246 	 */
247 	vma = _install_special_mapping(mm,
248 				       text_start,
249 				       image->size,
250 				       VM_READ|VM_EXEC|
251 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
252 				       &vdso_mapping);
253 
254 	if (IS_ERR(vma)) {
255 		ret = PTR_ERR(vma);
256 		goto up_fail;
257 	}
258 
259 	vma = _install_special_mapping(mm,
260 				       addr,
261 				       -image->sym_vvar_start,
262 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
263 				       VM_PFNMAP,
264 				       &vvar_mapping);
265 
266 	if (IS_ERR(vma)) {
267 		ret = PTR_ERR(vma);
268 		do_munmap(mm, text_start, image->size, NULL);
269 	} else {
270 		current->mm->context.vdso = (void __user *)text_start;
271 		current->mm->context.vdso_image = image;
272 	}
273 
274 up_fail:
275 	mmap_write_unlock(mm);
276 	return ret;
277 }
278 
279 #ifdef CONFIG_X86_64
280 /*
281  * Put the vdso above the (randomized) stack with another randomized
282  * offset.  This way there is no hole in the middle of address space.
283  * To save memory make sure it is still in the same PTE as the stack
284  * top.  This doesn't give that many random bits.
285  *
286  * Note that this algorithm is imperfect: the distribution of the vdso
287  * start address within a PMD is biased toward the end.
288  *
289  * Only used for the 64-bit and x32 vdsos.
290  */
291 static unsigned long vdso_addr(unsigned long start, unsigned len)
292 {
293 	unsigned long addr, end;
294 	unsigned offset;
295 
296 	/*
297 	 * Round up the start address.  It can start out unaligned as a result
298 	 * of stack start randomization.
299 	 */
300 	start = PAGE_ALIGN(start);
301 
302 	/* Round the lowest possible end address up to a PMD boundary. */
303 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
304 	if (end >= TASK_SIZE_MAX)
305 		end = TASK_SIZE_MAX;
306 	end -= len;
307 
308 	if (end > start) {
309 		offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
310 		addr = start + (offset << PAGE_SHIFT);
311 	} else {
312 		addr = start;
313 	}
314 
315 	/*
316 	 * Forcibly align the final address in case we have a hardware
317 	 * issue that requires alignment for performance reasons.
318 	 */
319 	addr = align_vdso_addr(addr);
320 
321 	return addr;
322 }
323 
324 static int map_vdso_randomized(const struct vdso_image *image)
325 {
326 	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
327 
328 	return map_vdso(image, addr);
329 }
330 #endif
331 
332 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
333 {
334 	struct mm_struct *mm = current->mm;
335 	struct vm_area_struct *vma;
336 	VMA_ITERATOR(vmi, mm, 0);
337 
338 	mmap_write_lock(mm);
339 	/*
340 	 * Check if we have already mapped vdso blob - fail to prevent
341 	 * abusing from userspace install_special_mapping, which may
342 	 * not do accounting and rlimit right.
343 	 * We could search vma near context.vdso, but it's a slowpath,
344 	 * so let's explicitly check all VMAs to be completely sure.
345 	 */
346 	for_each_vma(vmi, vma) {
347 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
348 				vma_is_special_mapping(vma, &vvar_mapping)) {
349 			mmap_write_unlock(mm);
350 			return -EEXIST;
351 		}
352 	}
353 	mmap_write_unlock(mm);
354 
355 	return map_vdso(image, addr);
356 }
357 
358 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
359 static int load_vdso32(void)
360 {
361 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
362 		return 0;
363 
364 	return map_vdso(&vdso_image_32, 0);
365 }
366 #endif
367 
368 #ifdef CONFIG_X86_64
369 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
370 {
371 	if (!vdso64_enabled)
372 		return 0;
373 
374 	return map_vdso_randomized(&vdso_image_64);
375 }
376 
377 #ifdef CONFIG_COMPAT
378 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
379 				       int uses_interp, bool x32)
380 {
381 #ifdef CONFIG_X86_X32_ABI
382 	if (x32) {
383 		if (!vdso64_enabled)
384 			return 0;
385 		return map_vdso_randomized(&vdso_image_x32);
386 	}
387 #endif
388 #ifdef CONFIG_IA32_EMULATION
389 	return load_vdso32();
390 #else
391 	return 0;
392 #endif
393 }
394 #endif
395 #else
396 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
397 {
398 	return load_vdso32();
399 }
400 #endif
401 
402 bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
403 {
404 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
405 	const struct vdso_image *image = current->mm->context.vdso_image;
406 	unsigned long vdso = (unsigned long) current->mm->context.vdso;
407 
408 	if (in_ia32_syscall() && image == &vdso_image_32) {
409 		if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
410 		    regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
411 			return true;
412 	}
413 #endif
414 	return false;
415 }
416 
417 #ifdef CONFIG_X86_64
418 static __init int vdso_setup(char *s)
419 {
420 	vdso64_enabled = simple_strtoul(s, NULL, 0);
421 	return 1;
422 }
423 __setup("vdso=", vdso_setup);
424 #endif /* CONFIG_X86_64 */
425