1 /*-
2 * Copyright (c) 2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 #ifdef __FreeBSD__
35 __FBSDID("$FreeBSD: src/sys/compat/ndis/subr_hal.c,v 1.13.2.3 2005/03/31 04:24:35 wpaul Exp $");
36 #endif
37 #ifdef __NetBSD__
38 __KERNEL_RCSID(0, "$NetBSD: subr_hal.c,v 1.8 2012/07/28 00:43:22 matt Exp $");
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/types.h>
43 #include <sys/errno.h>
44
45 #include <sys/callout.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #ifdef __FreeBSD__
49 #include <sys/mutex.h>
50 #endif
51 #include <sys/proc.h>
52 #include <sys/sched.h>
53 #ifdef __FreeBSD__
54 #include <sys/module.h>
55 #endif
56
57 #include <sys/systm.h>
58 #ifdef __FreeBSD__
59 #include <machine/clock.h>
60 #include <machine/bus_memio.h>
61 #include <machine/bus_pio.h>
62 #endif
63 #include <sys/bus.h>
64
65 #ifdef __FreeBSD__
66 #include <sys/bus.h>
67 #include <sys/rman.h>
68 #endif
69
70 #include <compat/ndis/pe_var.h>
71 #include <compat/ndis/ntoskrnl_var.h>
72 #include <compat/ndis/hal_var.h>
73
74 __stdcall static void KeStallExecutionProcessor(uint32_t);
75 __stdcall static void WRITE_PORT_BUFFER_ULONG(uint32_t *,
76 uint32_t *, uint32_t);
77 __stdcall static void WRITE_PORT_BUFFER_USHORT(uint16_t *,
78 uint16_t *, uint32_t);
79 __stdcall static void WRITE_PORT_BUFFER_UCHAR(uint8_t *,
80 uint8_t *, uint32_t);
81 __stdcall static void WRITE_PORT_ULONG(uint32_t *, uint32_t);
82 __stdcall static void WRITE_PORT_USHORT(uint16_t *, uint16_t);
83 __stdcall static void WRITE_PORT_UCHAR(uint8_t *, uint8_t);
84 __stdcall static uint32_t READ_PORT_ULONG(uint32_t *);
85 __stdcall static uint16_t READ_PORT_USHORT(uint16_t *);
86 __stdcall static uint8_t READ_PORT_UCHAR(uint8_t *);
87 __stdcall static void READ_PORT_BUFFER_ULONG(uint32_t *,
88 uint32_t *, uint32_t);
89 __stdcall static void READ_PORT_BUFFER_USHORT(uint16_t *,
90 uint16_t *, uint32_t);
91 __stdcall static void READ_PORT_BUFFER_UCHAR(uint8_t *,
92 uint8_t *, uint32_t);
93 __stdcall static uint64_t KeQueryPerformanceCounter(uint64_t *);
94 __stdcall static void dummy (void);
95
96 extern struct mtx_pool *ndis_mtxpool;
97
98 #ifdef __NetBSD__
99 int win_irql;
100 #endif
101
102 int
hal_libinit(void)103 hal_libinit(void)
104 {
105 image_patch_table *patch;
106
107 patch = hal_functbl;
108 while (patch->ipt_func != NULL) {
109 windrv_wrap((funcptr)patch->ipt_func,
110 (funcptr *)&patch->ipt_wrap);
111 patch++;
112 }
113
114 return(0);
115 }
116
117 int
hal_libfini(void)118 hal_libfini(void)
119 {
120 image_patch_table *patch;
121
122 patch = hal_functbl;
123 while (patch->ipt_func != NULL) {
124 windrv_unwrap(patch->ipt_wrap);
125 patch++;
126 }
127
128 return(0);
129 }
130
131 __stdcall static void
KeStallExecutionProcessor(uint32_t usecs)132 KeStallExecutionProcessor(uint32_t usecs)
133 {
134 DELAY(usecs);
135 return;
136 }
137
138 __stdcall static void
WRITE_PORT_ULONG(uint32_t * port,uint32_t val)139 WRITE_PORT_ULONG(uint32_t *port, uint32_t val)
140 {
141 bus_space_write_4(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val);
142 return;
143 }
144
145 __stdcall static void
WRITE_PORT_USHORT(uint16_t * port,uint16_t val)146 WRITE_PORT_USHORT(uint16_t *port, uint16_t val)
147 {
148 bus_space_write_2(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val);
149 return;
150 }
151
152 __stdcall static void
WRITE_PORT_UCHAR(uint8_t * port,uint8_t val)153 WRITE_PORT_UCHAR(uint8_t *port, uint8_t val)
154 {
155 bus_space_write_1(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val);
156 return;
157 }
158
159 __stdcall static void
WRITE_PORT_BUFFER_ULONG(uint32_t * port,uint32_t * val,uint32_t cnt)160 WRITE_PORT_BUFFER_ULONG(uint32_t *port, uint32_t *val, uint32_t cnt)
161 {
162 bus_space_write_multi_4(NDIS_BUS_SPACE_IO, 0x0,
163 (bus_size_t)port, val, cnt);
164 return;
165 }
166
167 __stdcall static void
WRITE_PORT_BUFFER_USHORT(uint16_t * port,uint16_t * val,uint32_t cnt)168 WRITE_PORT_BUFFER_USHORT(uint16_t *port, uint16_t *val, uint32_t cnt)
169 {
170 bus_space_write_multi_2(NDIS_BUS_SPACE_IO, 0x0,
171 (bus_size_t)port, val, cnt);
172 return;
173 }
174
175 __stdcall static void
WRITE_PORT_BUFFER_UCHAR(uint8_t * port,uint8_t * val,uint32_t cnt)176 WRITE_PORT_BUFFER_UCHAR(uint8_t *port, uint8_t *val, uint32_t cnt)
177 {
178 bus_space_write_multi_1(NDIS_BUS_SPACE_IO, 0x0,
179 (bus_size_t)port, val, cnt);
180 return;
181 }
182
183 __stdcall static uint16_t
READ_PORT_USHORT(uint16_t * port)184 READ_PORT_USHORT(uint16_t *port)
185 {
186 return(bus_space_read_2(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port));
187 }
188
189 __stdcall static uint32_t
READ_PORT_ULONG(uint32_t * port)190 READ_PORT_ULONG(uint32_t *port)
191 {
192 return(bus_space_read_4(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port));
193 }
194
195 __stdcall static uint8_t
READ_PORT_UCHAR(uint8_t * port)196 READ_PORT_UCHAR(uint8_t *port)
197 {
198 return(bus_space_read_1(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port));
199 }
200
201 __stdcall static void
READ_PORT_BUFFER_ULONG(uint32_t * port,uint32_t * val,uint32_t cnt)202 READ_PORT_BUFFER_ULONG(uint32_t *port, uint32_t *val, uint32_t cnt)
203 {
204 bus_space_read_multi_4(NDIS_BUS_SPACE_IO, 0x0,
205 (bus_size_t)port, val, cnt);
206 return;
207 }
208
209 __stdcall static void
READ_PORT_BUFFER_USHORT(uint16_t * port,uint16_t * val,uint32_t cnt)210 READ_PORT_BUFFER_USHORT(uint16_t *port, uint16_t *val, uint32_t cnt)
211 {
212 bus_space_read_multi_2(NDIS_BUS_SPACE_IO, 0x0,
213 (bus_size_t)port, val, cnt);
214 return;
215 }
216
217 __stdcall static void
READ_PORT_BUFFER_UCHAR(uint8_t * port,uint8_t * val,uint32_t cnt)218 READ_PORT_BUFFER_UCHAR(uint8_t *port, uint8_t *val, uint32_t cnt)
219 {
220 bus_space_read_multi_1(NDIS_BUS_SPACE_IO, 0x0,
221 (bus_size_t)port, val, cnt);
222 return;
223 }
224
225 /*
226 * The spinlock implementation in Windows differs from that of FreeBSD.
227 * The basic operation of spinlocks involves two steps: 1) spin in a
228 * tight loop while trying to acquire a lock, 2) after obtaining the
229 * lock, disable preemption. (Note that on uniprocessor systems, you're
230 * allowed to skip the first step and just lock out pre-emption, since
231 * it's not possible for you to be in contention with another running
232 * thread.) Later, you release the lock then re-enable preemption.
233 * The difference between Windows and FreeBSD lies in how preemption
234 * is disabled. In FreeBSD, it's done using critical_enter(), which on
235 * the x86 arch translates to a cli instruction. This masks off all
236 * interrupts, and effectively stops the scheduler from ever running
237 * so _nothing_ can execute except the current thread. In Windows,
238 * preemption is disabled by raising the processor IRQL to DISPATCH_LEVEL.
239 * This stops other threads from running, but does _not_ block device
240 * interrupts. This means ISRs can still run, and they can make other
241 * threads runable, but those other threads won't be able to execute
242 * until the current thread lowers the IRQL to something less than
243 * DISPATCH_LEVEL.
244 *
245 * There's another commonly used IRQL in Windows, which is APC_LEVEL.
246 * An APC is an Asynchronous Procedure Call, which differs from a DPC
247 * (Defered Procedure Call) in that a DPC is queued up to run in
248 * another thread, while an APC runs in the thread that scheduled
249 * it (similar to a signal handler in a UNIX process). We don't
250 * actually support the notion of APCs in FreeBSD, so for now, the
251 * only IRQLs we're interested in are DISPATCH_LEVEL and PASSIVE_LEVEL.
252 *
253 * To simulate DISPATCH_LEVEL, we raise the current thread's priority
254 * to PI_REALTIME, which is the highest we can give it. This should,
255 * if I understand things correctly, prevent anything except for an
256 * interrupt thread from preempting us. PASSIVE_LEVEL is basically
257 * everything else.
258 *
259 * Be aware that, at least on the x86 arch, the Windows spinlock
260 * functions are divided up in peculiar ways. The actual spinlock
261 * functions are KfAcquireSpinLock() and KfReleaseSpinLock(), and
262 * they live in HAL.dll. Meanwhile, KeInitializeSpinLock(),
263 * KefAcquireSpinLockAtDpcLevel() and KefReleaseSpinLockFromDpcLevel()
264 * live in ntoskrnl.exe. Most Windows source code will call
265 * KeAcquireSpinLock() and KeReleaseSpinLock(), but these are just
266 * macros that call KfAcquireSpinLock() and KfReleaseSpinLock().
267 * KefAcquireSpinLockAtDpcLevel() and KefReleaseSpinLockFromDpcLevel()
268 * perform the lock aquisition/release functions without doing the
269 * IRQL manipulation, and are used when one is already running at
270 * DISPATCH_LEVEL. Make sense? Good.
271 *
272 * According to the Microsoft documentation, any thread that calls
273 * KeAcquireSpinLock() must be running at IRQL <= DISPATCH_LEVEL. If
274 * we detect someone trying to acquire a spinlock from DEVICE_LEVEL
275 * or HIGH_LEVEL, we panic.
276 */
277
278 __fastcall uint8_t
KfAcquireSpinLock(REGARGS1 (kspin_lock * lock))279 KfAcquireSpinLock(REGARGS1(kspin_lock *lock))
280 {
281 uint8_t oldirql;
282
283 /* I am so going to hell for this. */
284 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
285 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
286
287 oldirql = KeRaiseIrql(DISPATCH_LEVEL);
288 KeAcquireSpinLockAtDpcLevel(lock);
289
290 return(oldirql);
291 }
292
293 __fastcall void
KfReleaseSpinLock(REGARGS2 (kspin_lock * lock,uint8_t newirql))294 KfReleaseSpinLock(REGARGS2(kspin_lock *lock, uint8_t newirql))
295 {
296 KeReleaseSpinLockFromDpcLevel(lock);
297 KeLowerIrql(newirql);
298
299 return;
300 }
301
302 __stdcall uint8_t
KeGetCurrentIrql(void)303 KeGetCurrentIrql(void)
304 {
305 if (AT_DISPATCH_LEVEL(curthread))
306 return(DISPATCH_LEVEL);
307 return(PASSIVE_LEVEL);
308 }
309
310 __stdcall static uint64_t
KeQueryPerformanceCounter(uint64_t * freq)311 KeQueryPerformanceCounter(uint64_t *freq)
312 {
313 if (freq != NULL)
314 *freq = hz;
315
316 return((uint64_t)ticks);
317 }
318
319
320 static int old_ipl;
321 static int ipl_raised = FALSE;
322
323 __fastcall uint8_t
KfRaiseIrql(REGARGS1 (uint8_t irql))324 KfRaiseIrql(REGARGS1(uint8_t irql))
325 {
326 uint8_t oldirql = 0;
327 //#ifdef __NetBSD__
328 // uint8_t s;
329 //#endif
330
331 if (irql < KeGetCurrentIrql())
332 panic("IRQL_NOT_LESS_THAN");
333
334 if (KeGetCurrentIrql() == DISPATCH_LEVEL)
335 return(DISPATCH_LEVEL);
336 #ifdef __NetBSD__
337 if(irql >= DISPATCH_LEVEL && !ipl_raised) {
338 old_ipl = splsoftclock();
339 ipl_raised = TRUE;
340 oldirql = win_irql;
341 win_irql = irql;
342 }
343 #else /* __FreeBSD__ */
344 mtx_lock_spin(&sched_lock);
345 oldirql = curthread->td_base_pri;
346 sched_prio(curthread, PI_REALTIME);
347 #if __FreeBSD_version < 600000
348 curthread->td_base_pri = PI_REALTIME;
349 #endif
350 mtx_unlock_spin(&sched_lock);
351 #endif /* __FreeBSD__ */
352
353 return(oldirql);
354 }
355
356 __fastcall void
KfLowerIrql(REGARGS1 (uint8_t oldirql))357 KfLowerIrql(REGARGS1(uint8_t oldirql))
358 {
359 //#ifdef __NetBSD__
360 // uint8_t s;
361 //#endif
362
363 if (oldirql == DISPATCH_LEVEL)
364 return;
365
366 #ifdef __FreeBSD__
367 if (KeGetCurrentIrql() != DISPATCH_LEVEL)
368 panic("IRQL_NOT_GREATER_THAN");
369 #else /* __NetBSD__ */
370 if (KeGetCurrentIrql() < oldirql)
371 panic("IRQL_NOT_GREATER_THAN");
372 #endif
373
374 #ifdef __NetBSD__
375 if(oldirql < DISPATCH_LEVEL && ipl_raised) {
376 splx(old_ipl);
377 ipl_raised = FALSE;
378 win_irql = oldirql;
379 }
380 #else
381 mtx_lock_spin(&sched_lock);
382 #if __FreeBSD_version < 600000
383 curthread->td_base_pri = oldirql;
384 #endif
385 sched_prio(curthread, oldirql);
386 mtx_unlock_spin(&sched_lock);
387 #endif /* __NetBSD__ */
388
389 return;
390 }
391
392 __stdcall
dummy(void)393 static void dummy(void)
394 {
395 printf ("hal dummy called...\n");
396 return;
397 }
398
399 image_patch_table hal_functbl[] = {
400 IMPORT_FUNC(KeStallExecutionProcessor),
401 IMPORT_FUNC(WRITE_PORT_ULONG),
402 IMPORT_FUNC(WRITE_PORT_USHORT),
403 IMPORT_FUNC(WRITE_PORT_UCHAR),
404 IMPORT_FUNC(WRITE_PORT_BUFFER_ULONG),
405 IMPORT_FUNC(WRITE_PORT_BUFFER_USHORT),
406 IMPORT_FUNC(WRITE_PORT_BUFFER_UCHAR),
407 IMPORT_FUNC(READ_PORT_ULONG),
408 IMPORT_FUNC(READ_PORT_USHORT),
409 IMPORT_FUNC(READ_PORT_UCHAR),
410 IMPORT_FUNC(READ_PORT_BUFFER_ULONG),
411 IMPORT_FUNC(READ_PORT_BUFFER_USHORT),
412 IMPORT_FUNC(READ_PORT_BUFFER_UCHAR),
413 IMPORT_FUNC(KfAcquireSpinLock),
414 IMPORT_FUNC(KfReleaseSpinLock),
415 IMPORT_FUNC(KeGetCurrentIrql),
416 IMPORT_FUNC(KeQueryPerformanceCounter),
417 IMPORT_FUNC(KfLowerIrql),
418 IMPORT_FUNC(KfRaiseIrql),
419
420 /*
421 * This last entry is a catch-all for any function we haven't
422 * implemented yet. The PE import list patching routine will
423 * use it for any function that doesn't have an explicit match
424 * in this table.
425 */
426
427 { NULL, (FUNC)dummy, NULL },
428
429 /* End of list. */
430
431 { NULL, NULL, NULL }
432 };
433