xref: /netbsd/sys/arch/i386/i386/i386func.S (revision 38e9c51f)
1/*	$NetBSD: i386func.S,v 1.22 2020/05/19 21:40:55 ad Exp $	*/
2
3/*-
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Functions to provide access to i386-specific instructions.
34 *
35 * These are _not_ shared with NetBSD/xen.
36 */
37
38#include <machine/asm.h>
39__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.22 2020/05/19 21:40:55 ad Exp $");
40
41#include <machine/specialreg.h>
42#include <machine/segments.h>
43
44#include "assym.h"
45
46ENTRY(invlpg)
47	movl	4(%esp), %eax
48	invlpg	(%eax)
49	ret
50END(invlpg)
51
52ENTRY(lldt)
53	movl	4(%esp), %eax
54	cmpl	%eax, CPUVAR(CURLDT)
55	jne	1f
56	ret
571:
58	movl	%eax, CPUVAR(CURLDT)
59	lldt	%ax
60	ret
61END(lldt)
62
63ENTRY(ltr)
64	movl	4(%esp), %eax
65	ltr	%ax
66	ret
67END(ltr)
68
69/*
70 * Big hammer: flush all TLB entries, including ones from PTE's
71 * with the G bit set.  This should only be necessary if TLB
72 * shootdown falls far behind.
73 *
74 * Intel Architecture Software Developer's Manual, Volume 3,
75 *	System Programming, section 9.10, "Invalidating the
76 * Translation Lookaside Buffers (TLBS)":
77 * "The following operations invalidate all TLB entries, irrespective
78 * of the setting of the G flag:
79 * ...
80 * "(P6 family processors only): Writing to control register CR4 to
81 * modify the PSE, PGE, or PAE flag."
82 *
83 * (the alternatives not quoted above are not an option here.)
84 *
85 * If PGE is not in use, we reload CR3.  Check for the PGE feature
86 * first since i486 does not have CR4.  Note: the feature flag may
87 * be present while the actual PGE functionality not yet enabled.
88 */
89ENTRY(tlbflushg)
90	testl	$CPUID_PGE, _C_LABEL(cpu_feature)
91	jz	1f
92	movl	%cr4, %eax
93	testl	$CR4_PGE, %eax
94	jz	1f
95	movl	%eax, %edx
96	andl	$~CR4_PGE, %edx
97	movl	%edx, %cr4
98	movl	%eax, %cr4
99	ret
100END(tlbflushg)
101
102ENTRY(tlbflush)
1031:
104	movl	%cr3, %eax
105	movl	%eax, %cr3
106	ret
107END(tlbflush)
108
109ENTRY(wbinvd)
110	wbinvd
111	ret
112END(wbinvd)
113
114/*
115 * void lgdt(struct region_descriptor *rdp);
116 *
117 * Load a new GDT pointer (and do any necessary cleanup).
118 * XXX It's somewhat questionable whether reloading all the segment registers
119 * is necessary, since the actual descriptor data is not changed except by
120 * process creation and exit, both of which clean up via task switches.  OTOH,
121 * this only happens at run time when the GDT is resized.
122 */
123ENTRY(lgdt)
124	/* Reload the descriptor table. */
125	movl	4(%esp), %eax
126	lgdt	(%eax)
127	/* Flush the prefetch queue. */
128	jmp	1f
129	nop
1301:	/* Reload "stale" selectors. */
131	movl	$GSEL(GDATA_SEL, SEL_KPL), %eax
132	movl	%eax, %ds
133	movl	%eax, %es
134	movl	%eax, %gs
135	movl	%eax, %ss
136	movl	$GSEL(GCPU_SEL, SEL_KPL), %eax
137	movl	%eax, %fs
138	jmp	_C_LABEL(x86_flush)
139END(lgdt)
140