xref: /dragonfly/sys/cpu/x86_64/include/asmacros.h (revision 8eff4093)
1 /*
2  * Copyright (c) 1993 The Regents of the University of California.
3  * Copyright (c) 2008 The DragonFly Project.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/amd64/include/asmacros.h,v 1.32 2006/10/28 06:04:29 bde Exp $
31  */
32 
33 #ifndef _CPU_ASMACROS_H_
34 #define _CPU_ASMACROS_H_
35 
36 #include <sys/cdefs.h>
37 #include <machine/specialreg.h>
38 
39 /* XXX too much duplication in various asm*.h's. */
40 
41 /*
42  * CNAME is used to manage the relationship between symbol names in C
43  * and the equivalent assembly language names.  CNAME is given a name as
44  * it would be used in a C program.  It expands to the equivalent assembly
45  * language name.
46  */
47 #define CNAME(csym)		csym
48 
49 #define ALIGN_DATA	.p2align 3	/* 8 byte alignment, zero filled */
50 #ifdef GPROF
51 #define ALIGN_TEXT	.p2align 4,0x90	/* 16-byte alignment, nop filled */
52 #else
53 #define ALIGN_TEXT	.p2align 4,0x90	/* 16-byte alignment, nop filled */
54 #endif
55 #define SUPERALIGN_TEXT	.p2align 4,0x90	/* 16-byte alignment, nop filled */
56 
57 #define GEN_ENTRY(name)		ALIGN_TEXT; .globl CNAME(name); \
58 				.type CNAME(name),@function; CNAME(name):
59 #define NON_GPROF_ENTRY(name)	GEN_ENTRY(name)
60 #define NON_GPROF_RET		.byte 0xc3	/* opcode for `ret' */
61 
62 #define	END(name)		.size name, . - name
63 
64 #ifdef GPROF
65 /*
66  * __mcount is like [.]mcount except that doesn't require its caller to set
67  * up a frame pointer.  It must be called before pushing anything onto the
68  * stack.  gcc should eventually generate code to call __mcount in most
69  * cases.  This would make -pg in combination with -fomit-frame-pointer
70  * useful.  gcc has a configuration variable PROFILE_BEFORE_PROLOGUE to
71  * allow profiling before setting up the frame pointer, but this is
72  * inadequate for good handling of special cases, e.g., -fpic works best
73  * with profiling after the prologue.
74  *
75  * [.]mexitcount is a new function to support non-statistical profiling if an
76  * accurate clock is available.  For C sources, calls to it are generated
77  * by the FreeBSD extension `-mprofiler-epilogue' to gcc.  It is best to
78  * call [.]mexitcount at the end of a function like the MEXITCOUNT macro does,
79  * but gcc currently generates calls to it at the start of the epilogue to
80  * avoid problems with -fpic.
81  *
82  * [.]mcount and __mcount may clobber the call-used registers and %ef.
83  * [.]mexitcount may clobber %ecx and %ef.
84  *
85  * Cross-jumping makes non-statistical profiling timing more complicated.
86  * It is handled in many cases by calling [.]mexitcount before jumping.  It
87  * is handled for conditional jumps using CROSSJUMP() and CROSSJUMP_LABEL().
88  * It is handled for some fault-handling jumps by not sharing the exit
89  * routine.
90  *
91  * ALTENTRY() must be before a corresponding ENTRY() so that it can jump to
92  * the main entry point.  Note that alt entries are counted twice.  They
93  * have to be counted as ordinary entries for gprof to get the call times
94  * right for the ordinary entries.
95  *
96  * High local labels are used in macros to avoid clashes with local labels
97  * in functions.
98  *
99  * Ordinary `ret' is used instead of a macro `RET' because there are a lot
100  * of `ret's.  0xc3 is the opcode for `ret' (`#define ret ... ret' can't
101  * be used because this file is sometimes preprocessed in traditional mode).
102  * `ret' clobbers eflags but this doesn't matter.
103  */
104 #define ALTENTRY(name)		GEN_ENTRY(name) ; MCOUNT ; MEXITCOUNT ; jmp 9f
105 #define	CROSSJUMP(jtrue, label, jfalse) \
106 	jfalse 8f; MEXITCOUNT; jmp __CONCAT(to,label); 8:
107 #define CROSSJUMPTARGET(label) \
108 	ALIGN_TEXT; __CONCAT(to,label): ; MCOUNT; jmp label
109 #define ENTRY(name)		GEN_ENTRY(name) ; 9: ; MCOUNT
110 #define FAKE_MCOUNT(caller)	pushq caller ; call __mcount ; popq %rcx
111 #define MCOUNT			call __mcount
112 #define MCOUNT_LABEL(name)	GEN_ENTRY(name) ; nop ; ALIGN_TEXT
113 #ifdef GUPROF
114 #define MEXITCOUNT		call .mexitcount
115 #define ret			MEXITCOUNT ; NON_GPROF_RET
116 #else
117 #define MEXITCOUNT
118 #endif
119 
120 #else /* !GPROF */
121 /*
122  * ALTENTRY() has to align because it is before a corresponding ENTRY().
123  * ENTRY() has to align to because there may be no ALTENTRY() before it.
124  * If there is a previous ALTENTRY() then the alignment code for ENTRY()
125  * is empty.
126  */
127 #define ALTENTRY(name)		GEN_ENTRY(name)
128 #define	CROSSJUMP(jtrue, label, jfalse)	jtrue label
129 #define	CROSSJUMPTARGET(label)
130 #define ENTRY(name)		GEN_ENTRY(name)
131 #define FAKE_MCOUNT(caller)
132 #define MCOUNT
133 #define MCOUNT_LABEL(name)
134 #define MEXITCOUNT
135 #endif /* GPROF */
136 
137 #ifdef LOCORE
138 /*
139  * Convenience macro for declaring interrupt entry points.
140  */
141 #define	IDTVEC(name)	ALIGN_TEXT; .globl __CONCAT(X,name); \
142 			.type __CONCAT(X,name),@function; __CONCAT(X,name):
143 
144 /*
145  * stack frame macro support - supports mmu isolation, swapgs, and
146  * stack frame pushing and popping.
147  */
148 
149 /*
150  * Kernel pmap isolation to work-around the massive Intel mmu bug
151  * that allows kernel memory to be sussed out due to speculative memory
152  * reads and instruction execution creating timing differences that can
153  * be detected by userland.  e.g. force speculative read, speculatively
154  * execute a cmp/branch sequence, detect timing.  Iterate cmp $values
155  * to suss-out content of speculatively read kernel memory.
156  *
157  * We do this by creating a trampoline area for all user->kernel and
158  * kernel->user transitions.  The trampoline area allows us to limit
159  * the reach the kernel map in the isolated version of the user pmap
160  * to JUST the trampoline area (for all cpus), tss, and vector area.
161  *
162  * It is very important that these transitions not access any memory
163  * outside of the trampoline page while the isolated user process pmap
164  * is active in %cr3.
165  *
166  * The trampoline does not add much overhead when pmap isolation is
167  * disabled, so we just run with it regardless.  Of course, when pmap
168  * isolation is enabled, the %cr3 loads add 150-250ns to every system
169  * call as well as (without PCID) smash the TLB.
170  *
171  * KMMUENTER -	Executed by the trampoline when a user->kernel transition
172  *		is detected.  The stack pointer points into the pcpu
173  *		trampoline space and is available for register save/restore.
174  *		Other registers have not yet been saved.  %gs points at
175  *		the kernel pcpu structure.
176  *
177  *		Caller has already determined that a transition is in
178  *		progress and has already issued the swapgs.  hwtf indicates
179  *		how much hardware has already pushed.
180  *
181  * KMMUEXIT  -	Executed when a kernel->user transition is made.  The stack
182  *		pointer points into the pcpu trampoline space and we are
183  *		almost ready to iretq.  %gs still points at the kernel pcpu
184  *		structure.
185  *
186  *		Caller has already determined that a transition is in
187  *		progress.  hwtf indicates how much hardware has already
188  *		pushed.
189  */
190 
191 /*
192  * KMMUENTER_CORE - Handles ISOMMU, IBRS, and IBPB.  Caller has already
193  *		    saved %rcx and %rdx.  We have to deal with %rax.
194  *
195  *		    XXX If IBPB is not supported, try to clear the
196  *		    call return hw cache w/ many x chained call sequence?
197  *
198  *	IBRS2 note - We are leaving IBRS on full-time.  However, Intel
199  *	believes it is not safe unless the MSR is poked on each user->kernel
200  *	transition, so poke the MSR for both IBRS1 and IBRS2.
201  */
202 #define KMMUENTER_CORE							\
203 	testq	$PCB_ISOMMU,PCPU(trampoline)+TR_PCB_FLAGS ;		\
204 	je	40f ;							\
205 	movq	PCPU(trampoline)+TR_PCB_CR3,%rcx ;			\
206 	movq	%rcx,%cr3 ;						\
207 40:	testq	$PCB_IBRS1|PCB_IBRS2|PCB_IBPB,PCPU(trampoline)+TR_PCB_GFLAGS ;\
208 	je	43f ;							\
209 	movq	%rax, PCPU(trampoline)+TR_RAX ;				\
210 	testq	$PCB_IBRS1|PCB_IBRS2,PCPU(trampoline)+TR_PCB_GFLAGS ;	\
211 	je	41f ;							\
212 	movl	$MSR_SPEC_CTRL,%ecx ;					\
213 	movl	$MSR_IBRS_ENABLE,%eax ;					\
214 	xorl	%edx,%edx ;						\
215 	wrmsr ;								\
216 41:	testq	$PCB_IBPB,PCPU(trampoline)+TR_PCB_GFLAGS ;		\
217 	je	42f ;							\
218 	movl	$MSR_PRED_CMD,%ecx ;					\
219 	movl	$MSR_IBPB_BARRIER,%eax ;				\
220 	xorl	%edx,%edx ;						\
221 	wrmsr ;								\
222 42:	movq	PCPU(trampoline)+TR_RAX, %rax ;				\
223 43:									\
224 
225 
226 /*
227  * Enter with trampoline, hardware pushed up to %rip
228  */
229 #define KMMUENTER_TFRIP							\
230 	subq	$TR_RIP, %rsp ;						\
231 	movq	%rcx, TR_RCX(%rsp) ;					\
232 	movq	%rdx, TR_RDX(%rsp) ;					\
233 	KMMUENTER_CORE ;						\
234 	movq	%rsp, %rcx ;		/* trampoline rsp */		\
235 	movq	PCPU(trampoline)+TR_PCB_RSP,%rsp ; /* kstack rsp */	\
236 	movq	TR_SS(%rcx), %rdx ;					\
237 	pushq	%rdx ;							\
238 	movq	TR_RSP(%rcx), %rdx ;					\
239 	pushq	%rdx ;							\
240 	movq	TR_RFLAGS(%rcx), %rdx ;					\
241 	pushq	%rdx ;							\
242 	movq	TR_CS(%rcx), %rdx ;					\
243 	pushq	%rdx ;							\
244 	movq	TR_RIP(%rcx), %rdx ;					\
245 	pushq	%rdx ;							\
246 	movq	TR_RDX(%rcx), %rdx ;					\
247 	movq	TR_RCX(%rcx), %rcx					\
248 
249 /*
250  * Enter with trampoline, hardware pushed up to ERR
251  */
252 #define KMMUENTER_TFERR							\
253 	subq	$TR_ERR, %rsp ;						\
254 	movq	%rcx, TR_RCX(%rsp) ;					\
255 	movq	%rdx, TR_RDX(%rsp) ;					\
256 	KMMUENTER_CORE ;						\
257 	movq	%rsp, %rcx ;		/* trampoline rsp */		\
258 	movq	PCPU(trampoline)+TR_PCB_RSP,%rsp ; /* kstack rsp */	\
259 	movq	TR_SS(%rcx), %rdx ;					\
260 	pushq	%rdx ;							\
261 	movq	TR_RSP(%rcx), %rdx ;					\
262 	pushq	%rdx ;							\
263 	movq	TR_RFLAGS(%rcx), %rdx ;					\
264 	pushq	%rdx ;							\
265 	movq	TR_CS(%rcx), %rdx ;					\
266 	pushq	%rdx ;							\
267 	movq	TR_RIP(%rcx), %rdx ;					\
268 	pushq	%rdx ;							\
269 	movq	TR_ERR(%rcx), %rdx ;					\
270 	pushq	%rdx ;							\
271 	movq	TR_RDX(%rcx), %rdx ;					\
272 	movq	TR_RCX(%rcx), %rcx					\
273 
274 /*
275  * Enter with trampoline, hardware pushed up to ERR and
276  * we need to save %cr2 early (before potentially reloading %cr3).
277  */
278 #define KMMUENTER_TFERR_SAVECR2						\
279 	subq	$TR_ERR, %rsp ;						\
280 	movq	%rcx, TR_RCX(%rsp) ;					\
281 	movq	%rdx, TR_RDX(%rsp) ;					\
282 	movq	%cr2, %rcx ;						\
283 	movq	%rcx, PCPU(trampoline)+TR_CR2 ;				\
284 	KMMUENTER_CORE ;						\
285 	movq	%rsp, %rcx ;		/* trampoline rsp */		\
286 	movq	PCPU(trampoline)+TR_PCB_RSP,%rsp ; /* kstack rsp */	\
287 	movq	TR_SS(%rcx), %rdx ;					\
288 	pushq	%rdx ;							\
289 	movq	TR_RSP(%rcx), %rdx ;					\
290 	pushq	%rdx ;							\
291 	movq	TR_RFLAGS(%rcx), %rdx ;					\
292 	pushq	%rdx ;							\
293 	movq	TR_CS(%rcx), %rdx ;					\
294 	pushq	%rdx ;							\
295 	movq	TR_RIP(%rcx), %rdx ;					\
296 	pushq	%rdx ;							\
297 	movq	TR_ERR(%rcx), %rdx ;					\
298 	pushq	%rdx ;							\
299 	movq	TR_RDX(%rcx), %rdx ;					\
300 	movq	TR_RCX(%rcx), %rcx					\
301 
302 /*
303  * Set %cr3 if necessary on syscall entry.  No registers may be
304  * disturbed.
305  *
306  * NOTE: TR_CR2 is used by the caller to save %rsp, we cannot use it here.
307  */
308 #define KMMUENTER_SYSCALL						\
309 	movq	%rcx, PCPU(trampoline)+TR_RCX ;				\
310 	movq	%rdx, PCPU(trampoline)+TR_RDX ;				\
311 	KMMUENTER_CORE ;						\
312 	movq	PCPU(trampoline)+TR_RDX, %rdx ;				\
313 	movq	PCPU(trampoline)+TR_RCX, %rcx 				\
314 
315 /*
316  * KMMUEXIT_CORE handles IBRS and IBPB, but not ISOMMU
317  *
318  * We don't re-execute the IBPB barrier on exit atm.
319  */
320 #define KMMUEXIT_CORE							\
321 	testq	$PCB_IBRS1,PCPU(trampoline)+TR_PCB_GFLAGS ;		\
322 	je	41f ;							\
323 	movq	%rax, PCPU(trampoline)+TR_RAX ;				\
324 	movq	%rcx, PCPU(trampoline)+TR_RCX ;				\
325 	movq	%rdx, PCPU(trampoline)+TR_RDX ;				\
326 	movl	$MSR_SPEC_CTRL,%ecx ;					\
327 	movl	$MSR_IBRS_DISABLE,%eax ;				\
328 	xorl	%edx,%edx ;						\
329 	wrmsr ;								\
330 	movq	PCPU(trampoline)+TR_RDX, %rdx ;				\
331 	movq	PCPU(trampoline)+TR_RCX, %rcx ;				\
332 	movq	PCPU(trampoline)+TR_RAX, %rax ;				\
333 41:
334 
335 /*
336  * We are positioned at the base of the trapframe.  Advance the trapframe
337  * and handle MMU isolation.  MMU isolation requires us to copy the
338  * hardware frame to the trampoline area before setting %cr3 to the
339  * isolated map.  We then set the %rsp for iretq to TR_RIP in the
340  * trampoline area (after restoring the register we saved in TR_ERR).
341  */
342 #define KMMUEXIT							\
343 	addq	$TF_RIP,%rsp ;						\
344 	KMMUEXIT_CORE ;							\
345 	testq	$PCB_ISOMMU,PCPU(trampoline)+TR_PCB_FLAGS ;		\
346 	je	40f ;							\
347 	movq	%rcx, PCPU(trampoline)+TR_ERR ;	/* save in TR_ERR */	\
348 	popq	%rcx ;				/* copy %rip */		\
349 	movq	%rcx, PCPU(trampoline)+TR_RIP ;				\
350 	popq	%rcx ;				/* copy %cs */		\
351 	movq	%rcx, PCPU(trampoline)+TR_CS ;				\
352 	popq	%rcx ;				/* copy %rflags */	\
353 	movq	%rcx, PCPU(trampoline)+TR_RFLAGS ;			\
354 	popq	%rcx ;				/* copy %rsp */		\
355 	movq	%rcx, PCPU(trampoline)+TR_RSP ;				\
356 	popq	%rcx ;				/* copy %ss */		\
357 	movq	%rcx, PCPU(trampoline)+TR_SS ;				\
358 	movq	%gs:0,%rcx ;						\
359 	addq	$GD_TRAMPOLINE+TR_ERR,%rcx ;				\
360 	movq	%rcx,%rsp ;						\
361 	movq	PCPU(trampoline)+TR_PCB_CR3_ISO,%rcx ;			\
362 	movq	%rcx,%cr3 ;						\
363 	popq	%rcx ;		/* positioned at TR_RIP after this */	\
364 40:									\
365 
366 /*
367  * Warning: user stack pointer already loaded into %rsp at this
368  * point.  We still have the kernel %gs.
369  *
370  * Caller will sysexit, we do not have to copy anything to the
371  * trampoline area.
372  */
373 #define KMMUEXIT_SYSCALL						\
374 	KMMUEXIT_CORE ;							\
375 	testq	$PCB_ISOMMU,PCPU(trampoline)+TR_PCB_FLAGS ;		\
376 	je	40f ;							\
377 	movq	%rcx, PCPU(trampoline)+TR_RCX ;				\
378 	movq	PCPU(trampoline)+TR_PCB_CR3_ISO,%rcx ;			\
379 	movq	%rcx,%cr3 ;						\
380 	movq	PCPU(trampoline)+TR_RCX, %rcx ;				\
381 40:									\
382 
383 /*
384  * Macros to create and destroy a trap frame.  rsp has already been shifted
385  * to the base of the trapframe in the thread structure.
386  */
387 #define PUSH_FRAME_REGS							\
388 	movq	%rdi,TF_RDI(%rsp) ;					\
389 	movq	%rsi,TF_RSI(%rsp) ;					\
390 	movq	%rdx,TF_RDX(%rsp) ;					\
391 	movq	%rcx,TF_RCX(%rsp) ;					\
392 	movq	%r8,TF_R8(%rsp) ;					\
393 	movq	%r9,TF_R9(%rsp) ;					\
394 	movq	%rax,TF_RAX(%rsp) ;					\
395 	movq	%rbx,TF_RBX(%rsp) ;					\
396 	movq	%rbp,TF_RBP(%rsp) ;					\
397 	movq	%r10,TF_R10(%rsp) ;					\
398 	movq	%r11,TF_R11(%rsp) ;					\
399 	movq	%r12,TF_R12(%rsp) ;					\
400 	movq	%r13,TF_R13(%rsp) ;					\
401 	movq	%r14,TF_R14(%rsp) ;					\
402 	movq	%r15,TF_R15(%rsp)
403 
404 /*
405  * PUSH_FRAME is the first thing executed upon interrupt entry.  We are
406  * responsible for swapgs execution and the KMMUENTER dispatch.
407  */
408 #define PUSH_FRAME_TFRIP						\
409 	testb	$SEL_RPL_MASK,TF_CS-TF_RIP(%rsp) ; /* from userland? */	\
410 	jz	1f ;							\
411 	swapgs ;		/* from userland */			\
412 	KMMUENTER_TFRIP ;	/* from userland */			\
413 1:									\
414 	subq	$TF_RIP,%rsp ;						\
415 	PUSH_FRAME_REGS 						\
416 
417 #define PUSH_FRAME_TFERR						\
418 	testb	$SEL_RPL_MASK,TF_CS-TF_ERR(%rsp) ; /* from userland? */	\
419 	jz	1f ;							\
420 	swapgs ;		/* from userland */			\
421 	KMMUENTER_TFERR ;	/* from userland */			\
422 1:									\
423 	subq	$TF_ERR,%rsp ;						\
424 	PUSH_FRAME_REGS 						\
425 
426 #define PUSH_FRAME_TFERR_SAVECR2					\
427 	testb	$SEL_RPL_MASK,TF_CS-TF_ERR(%rsp) ;			\
428 	jz	1f ;							\
429 	swapgs ;		/* from userland */			\
430 	KMMUENTER_TFERR_SAVECR2 ;/* from userland */			\
431 	subq	$TF_ERR,%rsp ;						\
432 	PUSH_FRAME_REGS ;						\
433 	movq	PCPU(trampoline)+TR_CR2, %r10 ;				\
434 	jmp 2f ;							\
435 1:									\
436 	subq	$TF_ERR,%rsp ;						\
437 	PUSH_FRAME_REGS ;						\
438 	movq	%cr2, %r10 ;						\
439 2:									\
440 	movq	%r10, TF_ADDR(%rsp)
441 
442 /*
443  * POP_FRAME is issued just prior to the iretq, or just prior to a
444  * jmp doreti_iret.  These must be passed in to the macro.
445  */
446 #define POP_FRAME(lastinsn)						\
447 	movq	TF_RDI(%rsp),%rdi ;					\
448 	movq	TF_RSI(%rsp),%rsi ;					\
449 	movq	TF_RDX(%rsp),%rdx ;					\
450 	movq	TF_RCX(%rsp),%rcx ;					\
451 	movq	TF_R8(%rsp),%r8 ;					\
452 	movq	TF_R9(%rsp),%r9 ;					\
453 	movq	TF_RAX(%rsp),%rax ;					\
454 	movq	TF_RBX(%rsp),%rbx ;					\
455 	movq	TF_RBP(%rsp),%rbp ;					\
456 	movq	TF_R10(%rsp),%r10 ;					\
457 	movq	TF_R11(%rsp),%r11 ;					\
458 	movq	TF_R12(%rsp),%r12 ;					\
459 	movq	TF_R13(%rsp),%r13 ;					\
460 	movq	TF_R14(%rsp),%r14 ;					\
461 	movq	TF_R15(%rsp),%r15 ;					\
462 	cli ;								\
463 	testb	$SEL_RPL_MASK,TF_CS(%rsp) ; /* return to user? */	\
464 	jz	1f ;							\
465 	KMMUEXIT ;		/* return to user */			\
466 	swapgs ;		/* return to user */			\
467 	jmp	2f ;							\
468 1:									\
469 	addq	$TF_RIP,%rsp ;	/* setup for iretq */			\
470 2:									\
471 	lastinsn
472 
473 /*
474  * Access per-CPU data.
475  */
476 #define	PCPU(member)		%gs:gd_ ## member
477 #define PCPU_E8(member,idx)	%gs:gd_ ## member(,idx,8)
478 #define	PCPU_ADDR(member, reg)					\
479 	movq %gs:PC_PRVSPACE, reg ;				\
480 	addq $PC_ ## member, reg
481 
482 #endif /* LOCORE */
483 
484 #endif /* !_CPU_ASMACROS_H_ */
485