xref: /freebsd/sys/x86/x86/stack_machdep.c (revision 19261079)
1 /*-
2  * Copyright (c) 2015 EMC Corporation
3  * Copyright (c) 2005 Antoine Brodin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_stack.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/stack.h>
40 
41 #include <machine/pcb.h>
42 #include <machine/smp.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_param.h>
46 #include <vm/pmap.h>
47 
48 #include <x86/stack.h>
49 
50 #ifdef __i386__
51 #define	PCB_FP(pcb)	((pcb)->pcb_ebp)
52 #define	TF_FLAGS(tf)	((tf)->tf_eflags)
53 #define	TF_FP(tf)	((tf)->tf_ebp)
54 #define	TF_PC(tf)	((tf)->tf_eip)
55 
56 typedef struct i386_frame *x86_frame_t;
57 #else
58 #define	PCB_FP(pcb)	((pcb)->pcb_rbp)
59 #define	TF_FLAGS(tf)	((tf)->tf_rflags)
60 #define	TF_FP(tf)	((tf)->tf_rbp)
61 #define	TF_PC(tf)	((tf)->tf_rip)
62 
63 typedef struct amd64_frame *x86_frame_t;
64 #endif
65 
66 #ifdef SMP
67 static struct stack *stack_intr_stack;
68 static struct thread *stack_intr_td;
69 static struct mtx intr_lock;
70 MTX_SYSINIT(intr_lock, &intr_lock, "stack intr", MTX_DEF);
71 #endif
72 
73 static void __nosanitizeaddress __nosanitizememory
74 stack_capture(struct thread *td, struct stack *st, register_t fp)
75 {
76 	x86_frame_t frame;
77 	vm_offset_t callpc;
78 
79 	stack_zero(st);
80 	frame = (x86_frame_t)fp;
81 	while (1) {
82 		if (!kstack_contains(td, (vm_offset_t)frame, sizeof(*frame)))
83 			break;
84 		callpc = frame->f_retaddr;
85 		if (!INKERNEL(callpc))
86 			break;
87 		if (stack_put(st, callpc) == -1)
88 			break;
89 		if (frame->f_frame <= frame)
90 			break;
91 		frame = frame->f_frame;
92 	}
93 }
94 
95 #ifdef SMP
96 void
97 stack_capture_intr(void)
98 {
99 	struct thread *td;
100 
101 	td = curthread;
102 	stack_capture(td, stack_intr_stack, TF_FP(td->td_intr_frame));
103 	atomic_store_rel_ptr((void *)&stack_intr_td, (uintptr_t)td);
104 }
105 #endif
106 
107 int
108 stack_save_td(struct stack *st, struct thread *td)
109 {
110 	int cpuid, error;
111 	bool done;
112 
113 	THREAD_LOCK_ASSERT(td, MA_OWNED);
114 	KASSERT(!TD_IS_SWAPPED(td),
115 	    ("stack_save_td: thread %p is swapped", td));
116 	if (TD_IS_RUNNING(td) && td != curthread)
117 		PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
118 
119 	if (td == curthread) {
120 		stack_save(st);
121 		return (0);
122 	}
123 
124 	for (done = false, error = 0; !done;) {
125 		if (!TD_IS_RUNNING(td)) {
126 			/*
127 			 * The thread will not start running so long as we hold
128 			 * its lock.
129 			 */
130 			stack_capture(td, st, PCB_FP(td->td_pcb));
131 			error = 0;
132 			break;
133 		}
134 
135 #ifdef SMP
136 		thread_unlock(td);
137 		cpuid = atomic_load_int(&td->td_oncpu);
138 		if (cpuid == NOCPU) {
139 			cpu_spinwait();
140 		} else {
141 			mtx_lock(&intr_lock);
142 			stack_intr_td = NULL;
143 			stack_intr_stack = st;
144 			ipi_cpu(cpuid, IPI_TRACE);
145 			while (atomic_load_acq_ptr((void *)&stack_intr_td) ==
146 			    (uintptr_t)NULL)
147 				cpu_spinwait();
148 			if (stack_intr_td == td) {
149 				done = true;
150 				error = st->depth > 0 ? 0 : EBUSY;
151 			}
152 			stack_intr_td = NULL;
153 			mtx_unlock(&intr_lock);
154 		}
155 		thread_lock(td);
156 #else
157 		(void)cpuid;
158 		KASSERT(0, ("%s: multiple running threads", __func__));
159 #endif
160 	}
161 
162 	return (error);
163 }
164 
165 void
166 stack_save(struct stack *st)
167 {
168 	register_t fp;
169 
170 #ifdef __i386__
171 	__asm __volatile("movl %%ebp,%0" : "=g" (fp));
172 #else
173 	__asm __volatile("movq %%rbp,%0" : "=g" (fp));
174 #endif
175 	stack_capture(curthread, st, fp);
176 }
177