1 /* $NetBSD: subr_ntoskrnl.c,v 1.26 2016/02/08 16:42:04 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2003
5 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 #ifdef __FreeBSD__
37 __FBSDID("$FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.43.2.5 2005/03/31 04:24:36 wpaul Exp $");
38 #endif
39 #ifdef __NetBSD__
40 __KERNEL_RCSID(0, "$NetBSD: subr_ntoskrnl.c,v 1.26 2016/02/08 16:42:04 christos Exp $");
41 #endif
42
43 #ifdef __FreeBSD__
44 #include <sys/ctype.h>
45 #endif
46 #include <sys/unistd.h>
47 #include <sys/param.h>
48 #include <sys/types.h>
49 #include <sys/errno.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
53
54 #include <sys/callout.h>
55 #if __FreeBSD_version > 502113
56 #include <sys/kdb.h>
57 #endif
58 #include <sys/kernel.h>
59 #include <sys/proc.h>
60 #include <sys/kthread.h>
61 #include <sys/module.h>
62 #include <sys/atomic.h>
63 #ifdef __FreeBSD__
64 #include <machine/clock.h>
65 #include <machine/bus_memio.h>
66 #include <machine/bus_pio.h>
67 #endif
68 #include <sys/bus.h>
69
70 #ifdef __FreeBSD__
71 #include <sys/bus.h>
72 #include <sys/rman.h>
73 #endif
74
75 #ifdef __NetBSD__
76 #include <uvm/uvm.h>
77 #include <uvm/uvm_param.h>
78 #include <uvm/uvm_pmap.h>
79 #include <sys/pool.h>
80 #include <sys/reboot.h> /* for AB_VERBOSE */
81 #else
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/uma.h>
86 #endif
87
88 #include <compat/ndis/pe_var.h>
89 #include <compat/ndis/ntoskrnl_var.h>
90 #include <compat/ndis/hal_var.h>
91 #include <compat/ndis/resource_var.h>
92 #include <compat/ndis/ndis_var.h>
93 #ifdef __NetBSD__
94 #include <compat/ndis/nbcompat.h>
95 #endif
96
97 #define __regparm __attribute__((regparm(3)))
98
99 #ifdef __NetBSD__
100 /* Turn on DbgPrint() from Windows Driver*/
101 #define boothowto AB_VERBOSE
102 #endif
103
104 __stdcall static uint8_t RtlEqualUnicodeString(ndis_unicode_string *,
105 ndis_unicode_string *, uint8_t);
106 __stdcall static void RtlCopyUnicodeString(ndis_unicode_string *,
107 ndis_unicode_string *);
108 __stdcall static ndis_status RtlUnicodeStringToAnsiString(ndis_ansi_string *,
109 ndis_unicode_string *, uint8_t);
110 __stdcall static ndis_status RtlAnsiStringToUnicodeString(ndis_unicode_string *,
111 ndis_ansi_string *, uint8_t);
112 __stdcall static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
113 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
114 __stdcall static irp *IoBuildAsynchronousFsdRequest(uint32_t,
115 device_object *, void *, uint32_t, uint64_t *, io_status_block *);
116 __stdcall static irp *IoBuildDeviceIoControlRequest(uint32_t,
117 device_object *, void *, uint32_t, void *, uint32_t,
118 uint8_t, nt_kevent *, io_status_block *);
119 __stdcall static irp *IoAllocateIrp(uint8_t, uint8_t);
120 __stdcall static void IoReuseIrp(irp *, uint32_t);
121 __stdcall static void IoFreeIrp(irp *);
122 __stdcall static void IoInitializeIrp(irp *, uint16_t, uint8_t);
123 __stdcall static irp *IoMakeAssociatedIrp(irp *, uint8_t);
124 __stdcall static uint32_t KeWaitForMultipleObjects(uint32_t,
125 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
126 int64_t *, wait_block *);
127 static void ntoskrnl_wakeup(void *);
128 static void ntoskrnl_timercall(void *);
129 static void ntoskrnl_run_dpc(void *);
130 __stdcall static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
131 __stdcall static uint16_t READ_REGISTER_USHORT(uint16_t *);
132 __stdcall static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
133 __stdcall static uint32_t READ_REGISTER_ULONG(uint32_t *);
134 __stdcall static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
135 __stdcall static uint8_t READ_REGISTER_UCHAR(uint8_t *);
136 __stdcall static int64_t _allmul(int64_t, int64_t);
137 __stdcall static int64_t _alldiv(int64_t, int64_t);
138 __stdcall static int64_t _allrem(int64_t, int64_t);
139 __regparm static int64_t _allshr(int64_t, uint8_t);
140 __regparm static int64_t _allshl(int64_t, uint8_t);
141 __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
142 __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
143 __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
144 __regparm static uint64_t _aullshr(uint64_t, uint8_t);
145 __regparm static uint64_t _aullshl(uint64_t, uint8_t);
146 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
147 static slist_entry *ntoskrnl_popsl(slist_header *);
148 __stdcall static void ExInitializePagedLookasideList(paged_lookaside_list *,
149 lookaside_alloc_func *, lookaside_free_func *,
150 uint32_t, size_t, uint32_t, uint16_t);
151 __stdcall static void ExDeletePagedLookasideList(paged_lookaside_list *);
152 __stdcall static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
153 lookaside_alloc_func *, lookaside_free_func *,
154 uint32_t, size_t, uint32_t, uint16_t);
155 __stdcall static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
156 __fastcall static slist_entry
157 *InterlockedPushEntrySList(REGARGS2(slist_header *head,
158 slist_entry *entry));
159 __fastcall static slist_entry *InterlockedPopEntrySList(REGARGS1(slist_header
160 *head));
161 __fastcall static slist_entry
162 *ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
163 slist_entry *entry), kspin_lock *lock);
164 __fastcall static slist_entry
165 *ExInterlockedPopEntrySList(REGARGS2(slist_header *head,
166 kspin_lock *lock));
167 __stdcall static uint16_t
168 ExQueryDepthSList(slist_header *);
169 __fastcall static uint32_t
170 InterlockedIncrement(REGARGS1(volatile uint32_t *addend));
171 __fastcall static uint32_t
172 InterlockedDecrement(REGARGS1(volatile uint32_t *addend));
173 __fastcall static void
174 ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t));
175 __stdcall static uint32_t MmSizeOfMdl(void *, size_t);
176 __stdcall static void MmBuildMdlForNonPagedPool(mdl *);
177 __stdcall static void *MmMapLockedPages(mdl *, uint8_t);
178 __stdcall static void *MmMapLockedPagesSpecifyCache(mdl *,
179 uint8_t, uint32_t, void *, uint32_t, uint32_t);
180 __stdcall static void MmUnmapLockedPages(void *, mdl *);
181 __stdcall static size_t RtlCompareMemory(const void *, const void *, size_t);
182 __stdcall static void RtlInitAnsiString(ndis_ansi_string *, char *);
183 __stdcall static void RtlInitUnicodeString(ndis_unicode_string *,
184 uint16_t *);
185 __stdcall static void RtlFreeUnicodeString(ndis_unicode_string *);
186 __stdcall static void RtlFreeAnsiString(ndis_ansi_string *);
187 __stdcall static ndis_status RtlUnicodeStringToInteger(ndis_unicode_string *,
188 uint32_t, uint32_t *);
189 static int atoi (const char *);
190 static long atol (const char *);
191 static int rand(void);
192 static void srand(unsigned int);
193 static void ntoskrnl_time(uint64_t *);
194 __stdcall static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
195 static void ntoskrnl_thrfunc(void *);
196 __stdcall static ndis_status PsCreateSystemThread(ndis_handle *,
197 uint32_t, void *, ndis_handle, void *, void *, void *);
198 __stdcall static ndis_status PsTerminateSystemThread(ndis_status);
199 __stdcall static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
200 uint32_t, void *, uint32_t *);
201 __stdcall static void KeInitializeMutex(kmutant *, uint32_t);
202 __stdcall static uint32_t KeReleaseMutex(kmutant *, uint8_t);
203 __stdcall static uint32_t KeReadStateMutex(kmutant *);
204 __stdcall static ndis_status ObReferenceObjectByHandle(ndis_handle,
205 uint32_t, void *, uint8_t, void **, void **);
206 __fastcall static void ObfDereferenceObject(REGARGS1(void *object));
207 __stdcall static uint32_t ZwClose(ndis_handle);
208 static void *ntoskrnl_memset(void *, int, size_t);
209 static funcptr ntoskrnl_findwrap(funcptr);
210 static uint32_t DbgPrint(char *, ...);
211 __stdcall static void DbgBreakPoint(void);
212 __stdcall static void dummy(void);
213
214 #ifdef __FreeBSD__
215 static struct mtx ntoskrnl_dispatchlock;
216 #else /* __NetBSD__ */
217 static kmutex_t ntoskrnl_dispatchlock;
218 #endif
219
220 static kspin_lock ntoskrnl_global;
221 static kspin_lock ntoskrnl_cancellock;
222 static int ntoskrnl_kth = 0;
223 static struct nt_objref_head ntoskrnl_reflist;
224 #ifdef __FreeBSD__
225 static uma_zone_t mdl_zone;
226 #else
227 static struct pool mdl_pool;
228 #endif
229
230 int
ntoskrnl_libinit(void)231 ntoskrnl_libinit(void)
232 {
233 image_patch_table *patch;
234 #ifdef __FreeBSD__
235 mtx_init(&ntoskrnl_dispatchlock,
236 "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF);
237 #else /* __NetBSD__ */
238 mutex_init(&ntoskrnl_dispatchlock, MUTEX_DEFAULT, IPL_NET);
239 #endif
240 KeInitializeSpinLock(&ntoskrnl_global);
241 KeInitializeSpinLock(&ntoskrnl_cancellock);
242 TAILQ_INIT(&ntoskrnl_reflist);
243
244 patch = ntoskrnl_functbl;
245 while (patch->ipt_func != NULL) {
246 windrv_wrap((funcptr)patch->ipt_func,
247 (funcptr *)&patch->ipt_wrap);
248 patch++;
249 }
250
251 /*
252 * MDLs are supposed to be variable size (they describe
253 * buffers containing some number of pages, but we don't
254 * know ahead of time how many pages that will be). But
255 * always allocating them off the heap is very slow. As
256 * a compromize, we create an MDL UMA zone big enough to
257 * handle any buffer requiring up to 16 pages, and we
258 * use those for any MDLs for buffers of 16 pages or less
259 * in size. For buffers larger than that (which we assume
260 * will be few and far between, we allocate the MDLs off
261 * the heap.
262 */
263
264 #ifdef __FreeBSD__
265 mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
266 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
267 #else
268 pool_init(&mdl_pool, MDL_ZONE_SIZE, 0, 0, 0, "winmdl", NULL,
269 IPL_VM);
270 #endif
271
272 return(0);
273 }
274
275 int
ntoskrnl_libfini(void)276 ntoskrnl_libfini(void)
277 {
278 image_patch_table *patch;
279
280 patch = ntoskrnl_functbl;
281 while (patch->ipt_func != NULL) {
282 windrv_unwrap(patch->ipt_wrap);
283 patch++;
284 }
285
286 #ifdef __FreeBSD__
287 uma_zdestroy(mdl_zone);
288 #else
289 pool_destroy(&mdl_pool);
290 #endif
291 mtx_destroy(&ntoskrnl_dispatchlock);
292
293 return(0);
294 }
295
296 /*
297 * We need to be able to reference this externally from the wrapper;
298 * GCC only generates a local implementation of memset.
299 */
300 static void *
ntoskrnl_memset(void * buf,int ch,size_t size)301 ntoskrnl_memset(void *buf, int ch, size_t size)
302 {
303 return(memset(buf, ch, size));
304 }
305
306 __stdcall static uint8_t
RtlEqualUnicodeString(ndis_unicode_string * str1,ndis_unicode_string * str2,uint8_t caseinsensitive)307 RtlEqualUnicodeString(ndis_unicode_string *str1, ndis_unicode_string *str2, uint8_t caseinsensitive)
308 {
309 int i;
310
311 if (str1->us_len != str2->us_len)
312 return(FALSE);
313
314 for (i = 0; i < str1->us_len; i++) {
315 if (caseinsensitive == TRUE) {
316 if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
317 toupper((char)(str2->us_buf[i] & 0xFF)))
318 return(FALSE);
319 } else {
320 if (str1->us_buf[i] != str2->us_buf[i])
321 return(FALSE);
322 }
323 }
324
325 return(TRUE);
326 }
327
328 __stdcall static void
RtlCopyUnicodeString(ndis_unicode_string * dest,ndis_unicode_string * src)329 RtlCopyUnicodeString(ndis_unicode_string *dest, ndis_unicode_string *src)
330 {
331
332 if (dest->us_maxlen >= src->us_len)
333 dest->us_len = src->us_len;
334 else
335 dest->us_len = dest->us_maxlen;
336 memcpy(dest->us_buf, src->us_buf, dest->us_len);
337 return;
338 }
339
340 __stdcall static ndis_status
RtlUnicodeStringToAnsiString(ndis_ansi_string * dest,ndis_unicode_string * src,uint8_t allocate)341 RtlUnicodeStringToAnsiString(ndis_ansi_string *dest, ndis_unicode_string *src, uint8_t allocate)
342 {
343 char *astr = NULL;
344
345 if (dest == NULL || src == NULL)
346 return(NDIS_STATUS_FAILURE);
347
348 if (allocate == TRUE) {
349 if (ndis_unicode_to_ascii(src->us_buf, src->us_len, &astr))
350 return(NDIS_STATUS_FAILURE);
351 dest->nas_buf = astr;
352 dest->nas_len = dest->nas_maxlen = strlen(astr);
353 } else {
354 dest->nas_len = src->us_len / 2; /* XXX */
355 if (dest->nas_maxlen < dest->nas_len)
356 dest->nas_len = dest->nas_maxlen;
357 ndis_unicode_to_ascii(src->us_buf, dest->nas_len * 2,
358 &dest->nas_buf);
359 }
360 return (NDIS_STATUS_SUCCESS);
361 }
362
363 __stdcall static ndis_status
RtlAnsiStringToUnicodeString(ndis_unicode_string * dest,ndis_ansi_string * src,uint8_t allocate)364 RtlAnsiStringToUnicodeString(ndis_unicode_string *dest, ndis_ansi_string *src, uint8_t allocate)
365 {
366 uint16_t *ustr = NULL;
367
368 if (dest == NULL || src == NULL)
369 return(NDIS_STATUS_FAILURE);
370
371 if (allocate == TRUE) {
372 if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
373 return(NDIS_STATUS_FAILURE);
374 dest->us_buf = ustr;
375 dest->us_len = dest->us_maxlen = strlen(src->nas_buf) * 2;
376 } else {
377 dest->us_len = src->nas_len * 2; /* XXX */
378 if (dest->us_maxlen < dest->us_len)
379 dest->us_len = dest->us_maxlen;
380 ndis_ascii_to_unicode(src->nas_buf, &dest->us_buf);
381 }
382 return (NDIS_STATUS_SUCCESS);
383 }
384
385 __stdcall void *
ExAllocatePoolWithTag(uint32_t pooltype,size_t len,uint32_t tag)386 ExAllocatePoolWithTag(
387 uint32_t pooltype,
388 size_t len,
389 uint32_t tag)
390 {
391 void *buf;
392
393 buf = malloc(len, M_DEVBUF, M_NOWAIT);
394 if (buf == NULL)
395 return(NULL);
396 return(buf);
397 }
398
399 __stdcall void
ExFreePool(void * buf)400 ExFreePool(void *buf)
401 {
402 free(buf, M_DEVBUF);
403 return;
404 }
405
406 __stdcall uint32_t
IoAllocateDriverObjectExtension(driver_object * drv,void * clid,uint32_t extlen,void ** ext)407 IoAllocateDriverObjectExtension(driver_object *drv, void *clid, uint32_t extlen, void **ext)
408 {
409 custom_extension *ce;
410
411 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
412 + extlen, 0);
413
414 if (ce == NULL)
415 return(STATUS_INSUFFICIENT_RESOURCES);
416
417 ce->ce_clid = clid;
418 INSERT_LIST_TAIL((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
419
420 *ext = (void *)(ce + 1);
421
422 return(STATUS_SUCCESS);
423 }
424
425 __stdcall void *
IoGetDriverObjectExtension(driver_object * drv,void * clid)426 IoGetDriverObjectExtension(driver_object *drv, void *clid)
427 {
428 list_entry *e;
429 custom_extension *ce;
430
431 printf("in IoGetDriverObjectExtension\n");
432
433 e = drv->dro_driverext->dre_usrext.nle_flink;
434 while (e != &drv->dro_driverext->dre_usrext) {
435 ce = (custom_extension *)e;
436 if (ce->ce_clid == clid) {
437 printf("found\n");
438 return((void *)(ce + 1));
439 }
440 e = e->nle_flink;
441 }
442 printf("not found\n");
443 return(NULL);
444 }
445
446
447 __stdcall uint32_t
IoCreateDevice(driver_object * drv,uint32_t devextlen,unicode_string * devname,uint32_t devtype,uint32_t devchars,uint8_t exclusive,device_object ** newdev)448 IoCreateDevice(
449 driver_object *drv,
450 uint32_t devextlen,
451 unicode_string *devname,
452 uint32_t devtype,
453 uint32_t devchars,
454 uint8_t exclusive,
455 device_object **newdev)
456 {
457 device_object *dev;
458
459 #ifdef NDIS_LKM
460 printf("In IoCreateDevice: drv = %x, devextlen = %x\n", drv, devextlen);
461 #endif
462
463 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
464 #ifdef NDIS_LKM
465 printf("dev = %x\n", dev);
466 #endif
467 if (dev == NULL)
468 return(STATUS_INSUFFICIENT_RESOURCES);
469
470 dev->do_type = devtype;
471 dev->do_drvobj = drv;
472 dev->do_currirp = NULL;
473 dev->do_flags = 0;
474
475 if (devextlen) {
476 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
477 devextlen, 0);
478
479 if (dev->do_devext == NULL) {
480 ExFreePool(dev);
481 return(STATUS_INSUFFICIENT_RESOURCES);
482 }
483
484 memset(dev->do_devext, 0, devextlen);
485 } else
486 dev->do_devext = NULL;
487
488 dev->do_size = sizeof(device_object) + devextlen;
489 dev->do_refcnt = 1;
490 dev->do_attacheddev = NULL;
491 dev->do_nextdev = NULL;
492 dev->do_devtype = devtype;
493 dev->do_stacksize = 1;
494 dev->do_alignreq = 1;
495 dev->do_characteristics = devchars;
496 dev->do_iotimer = NULL;
497 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
498
499 /*
500 * Vpd is used for disk/tape devices,
501 * but we don't support those. (Yet.)
502 */
503 dev->do_vpb = NULL;
504
505 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
506 sizeof(devobj_extension), 0);
507
508 if (dev->do_devobj_ext == NULL) {
509 if (dev->do_devext != NULL)
510 ExFreePool(dev->do_devext);
511 ExFreePool(dev);
512 return(STATUS_INSUFFICIENT_RESOURCES);
513 }
514
515 dev->do_devobj_ext->dve_type = 0;
516 dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
517 dev->do_devobj_ext->dve_devobj = dev;
518
519 /*
520 * Attach this device to the driver object's list
521 * of devices. Note: this is not the same as attaching
522 * the device to the device stack. The driver's AddDevice
523 * routine must explicitly call IoAddDeviceToDeviceStack()
524 * to do that.
525 */
526
527 if (drv->dro_devobj == NULL) {
528 drv->dro_devobj = dev;
529 dev->do_nextdev = NULL;
530 } else {
531 dev->do_nextdev = drv->dro_devobj;
532 drv->dro_devobj = dev;
533 }
534
535 *newdev = dev;
536
537 return(STATUS_SUCCESS);
538 }
539
540 __stdcall void
IoDeleteDevice(device_object * dev)541 IoDeleteDevice(device_object *dev)
542 {
543 device_object *prev;
544
545 if (dev == NULL)
546 return;
547
548 if (dev->do_devobj_ext != NULL)
549 ExFreePool(dev->do_devobj_ext);
550
551 if (dev->do_devext != NULL)
552 ExFreePool(dev->do_devext);
553
554 /* Unlink the device from the driver's device list. */
555
556 prev = dev->do_drvobj->dro_devobj;
557 if (prev == dev)
558 dev->do_drvobj->dro_devobj = dev->do_nextdev;
559 else {
560 while (prev->do_nextdev != dev)
561 prev = prev->do_nextdev;
562 prev->do_nextdev = dev->do_nextdev;
563 }
564
565 ExFreePool(dev);
566
567 return;
568 }
569
570 __stdcall device_object *
IoGetAttachedDevice(device_object * dev)571 IoGetAttachedDevice(device_object *dev)
572 {
573 device_object *d;
574
575 if (dev == NULL)
576 return (NULL);
577
578 d = dev;
579
580 while (d->do_attacheddev != NULL)
581 d = d->do_attacheddev;
582
583 return (d);
584 }
585
586 __stdcall static irp *
IoBuildSynchronousFsdRequest(uint32_t func,device_object * dobj,void * buf,uint32_t len,uint64_t * off,nt_kevent * event,io_status_block * status)587 IoBuildSynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf, uint32_t len, uint64_t *off, nt_kevent *event, io_status_block *status)
588 {
589 irp *ip;
590
591 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
592 if (ip == NULL)
593 return(NULL);
594 ip->irp_usrevent = event;
595
596 return(ip);
597 }
598
599 __stdcall static irp *
IoBuildAsynchronousFsdRequest(uint32_t func,device_object * dobj,void * buf,uint32_t len,uint64_t * off,io_status_block * status)600 IoBuildAsynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf, uint32_t len, uint64_t *off, io_status_block *status)
601 {
602 irp *ip;
603 io_stack_location *sl;
604
605 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
606 if (ip == NULL)
607 return(NULL);
608
609 ip->irp_usriostat = status;
610 ip->irp_tail.irp_overlay.irp_thread = NULL;
611
612 sl = IoGetNextIrpStackLocation(ip);
613 sl->isl_major = func;
614 sl->isl_minor = 0;
615 sl->isl_flags = 0;
616 sl->isl_ctl = 0;
617 sl->isl_devobj = dobj;
618 sl->isl_fileobj = NULL;
619 sl->isl_completionfunc = NULL;
620
621 ip->irp_userbuf = buf;
622
623 if (dobj->do_flags & DO_BUFFERED_IO) {
624 ip->irp_assoc.irp_sysbuf =
625 ExAllocatePoolWithTag(NonPagedPool, len, 0);
626 if (ip->irp_assoc.irp_sysbuf == NULL) {
627 IoFreeIrp(ip);
628 return(NULL);
629 }
630 memcpy( ip->irp_assoc.irp_sysbuf, buf, len);
631 }
632
633 if (dobj->do_flags & DO_DIRECT_IO) {
634 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
635 if (ip->irp_mdl == NULL) {
636 if (ip->irp_assoc.irp_sysbuf != NULL)
637 ExFreePool(ip->irp_assoc.irp_sysbuf);
638 IoFreeIrp(ip);
639 return(NULL);
640 }
641 ip->irp_userbuf = NULL;
642 ip->irp_assoc.irp_sysbuf = NULL;
643 }
644
645 if (func == IRP_MJ_READ) {
646 sl->isl_parameters.isl_read.isl_len = len;
647 if (off != NULL)
648 sl->isl_parameters.isl_read.isl_byteoff = *off;
649 else
650 sl->isl_parameters.isl_read.isl_byteoff = 0;
651 }
652
653 if (func == IRP_MJ_WRITE) {
654 sl->isl_parameters.isl_write.isl_len = len;
655 if (off != NULL)
656 sl->isl_parameters.isl_write.isl_byteoff = *off;
657 else
658 sl->isl_parameters.isl_write.isl_byteoff = 0;
659 }
660
661 return(ip);
662 }
663
664 __stdcall static irp *
IoBuildDeviceIoControlRequest(uint32_t iocode,device_object * dobj,void * ibuf,uint32_t ilen,void * obuf,uint32_t olen,uint8_t isinternal,nt_kevent * event,io_status_block * status)665 IoBuildDeviceIoControlRequest(
666 uint32_t iocode,
667 device_object *dobj,
668 void *ibuf,
669 uint32_t ilen,
670 void *obuf,
671 uint32_t olen,
672 uint8_t isinternal,
673 nt_kevent *event,
674 io_status_block *status
675 )
676 {
677 irp *ip;
678 io_stack_location *sl;
679 uint32_t buflen;
680
681 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
682 if (ip == NULL)
683 return(NULL);
684 ip->irp_usrevent = event;
685 ip->irp_usriostat = status;
686 ip->irp_tail.irp_overlay.irp_thread = NULL;
687
688 sl = IoGetNextIrpStackLocation(ip);
689 sl->isl_major = isinternal == TRUE ?
690 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
691 sl->isl_minor = 0;
692 sl->isl_flags = 0;
693 sl->isl_ctl = 0;
694 sl->isl_devobj = dobj;
695 sl->isl_fileobj = NULL;
696 sl->isl_completionfunc = NULL;
697 sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
698 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
699 sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
700
701 switch(IO_METHOD(iocode)) {
702 case METHOD_BUFFERED:
703 if (ilen > olen)
704 buflen = ilen;
705 else
706 buflen = olen;
707 if (buflen) {
708 ip->irp_assoc.irp_sysbuf =
709 ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
710 if (ip->irp_assoc.irp_sysbuf == NULL) {
711 IoFreeIrp(ip);
712 return(NULL);
713 }
714 }
715 if (ilen && ibuf != NULL) {
716 memcpy( ip->irp_assoc.irp_sysbuf, ibuf, ilen);
717 memset((char *)ip->irp_assoc.irp_sysbuf + ilen, 0,
718 buflen - ilen);
719 } else
720 memset(ip->irp_assoc.irp_sysbuf, 0, ilen);
721 ip->irp_userbuf = obuf;
722 break;
723 case METHOD_IN_DIRECT:
724 case METHOD_OUT_DIRECT:
725 if (ilen && ibuf != NULL) {
726 ip->irp_assoc.irp_sysbuf =
727 ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
728 if (ip->irp_assoc.irp_sysbuf == NULL) {
729 IoFreeIrp(ip);
730 return(NULL);
731 }
732 memcpy( ip->irp_assoc.irp_sysbuf, ibuf, ilen);
733 }
734 if (olen && obuf != NULL) {
735 ip->irp_mdl = IoAllocateMdl(obuf, olen,
736 FALSE, FALSE, ip);
737 /*
738 * Normally we would MmProbeAndLockPages()
739 * here, but we don't have to in our
740 * imlementation.
741 */
742 }
743 break;
744 case METHOD_NEITHER:
745 ip->irp_userbuf = obuf;
746 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
747 break;
748 default:
749 break;
750 }
751
752 /*
753 * Ideally, we should associate this IRP with the calling
754 * thread here.
755 */
756
757 return (ip);
758 }
759
760 __stdcall static irp *
IoAllocateIrp(uint8_t stsize,uint8_t chargequota)761 IoAllocateIrp(
762 uint8_t stsize,
763 uint8_t chargequota)
764 {
765 irp *i;
766
767 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
768 if (i == NULL)
769 return (NULL);
770
771 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
772
773 return (i);
774 }
775
776 __stdcall static irp *
IoMakeAssociatedIrp(irp * ip,uint8_t stsize)777 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
778 {
779 irp *associrp;
780
781 associrp = IoAllocateIrp(stsize, FALSE);
782 if (associrp == NULL)
783 return(NULL);
784
785 mtx_lock(&ntoskrnl_dispatchlock);
786 associrp->irp_flags |= IRP_ASSOCIATED_IRP;
787 associrp->irp_tail.irp_overlay.irp_thread =
788 ip->irp_tail.irp_overlay.irp_thread;
789 associrp->irp_assoc.irp_master = ip;
790 mtx_unlock(&ntoskrnl_dispatchlock);
791
792 return(associrp);
793 }
794
795 __stdcall static void
IoFreeIrp(irp * ip)796 IoFreeIrp(irp *ip)
797 {
798 ExFreePool(ip);
799 return;
800 }
801
802 __stdcall static void
IoInitializeIrp(irp * io,uint16_t psize,uint8_t ssize)803 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
804 {
805 memset((char *)io, 0, IoSizeOfIrp(ssize));
806 io->irp_size = psize;
807 io->irp_stackcnt = ssize;
808 io->irp_currentstackloc = ssize;
809 INIT_LIST_HEAD(&io->irp_thlist);
810 io->irp_tail.irp_overlay.irp_csl =
811 (io_stack_location *)(io + 1) + ssize;
812
813 return;
814 }
815
816 __stdcall static void
IoReuseIrp(irp * ip,uint32_t status)817 IoReuseIrp(irp *ip, uint32_t status)
818 {
819 uint8_t allocflags;
820
821 allocflags = ip->irp_allocflags;
822 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
823 ip->irp_iostat.isb_status = status;
824 ip->irp_allocflags = allocflags;
825
826 return;
827 }
828
829 __stdcall void
IoAcquireCancelSpinLock(uint8_t * irql)830 IoAcquireCancelSpinLock(uint8_t *irql)
831 {
832 KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
833 return;
834 }
835
836 __stdcall void
IoReleaseCancelSpinLock(uint8_t irql)837 IoReleaseCancelSpinLock(uint8_t irql)
838 {
839 KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
840 return;
841 }
842
843 __stdcall uint8_t
IoCancelIrp(irp * ip)844 IoCancelIrp(irp *ip)
845 {
846 cancel_func cfunc;
847
848 IoAcquireCancelSpinLock(&ip->irp_cancelirql);
849 cfunc = IoSetCancelRoutine(ip, NULL);
850 ip->irp_cancel = TRUE;
851 if (ip->irp_cancelfunc == NULL) {
852 IoReleaseCancelSpinLock(ip->irp_cancelirql);
853 return(FALSE);
854 }
855 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
856 return(TRUE);
857 }
858
859 __fastcall uint32_t
IofCallDriver(REGARGS2 (device_object * dobj,irp * ip))860 IofCallDriver(REGARGS2(device_object *dobj, irp *ip))
861 {
862 driver_object *drvobj;
863 io_stack_location *sl;
864 uint32_t status;
865 driver_dispatch disp;
866
867 drvobj = dobj->do_drvobj;
868
869 if (ip->irp_currentstackloc <= 0)
870 panic("IoCallDriver(): out of stack locations");
871
872 IoSetNextIrpStackLocation(ip);
873 sl = IoGetCurrentIrpStackLocation(ip);
874
875 sl->isl_devobj = dobj;
876
877 disp = drvobj->dro_dispatch[sl->isl_major];
878 status = MSCALL2(disp, dobj, ip);
879
880 return(status);
881 }
882
883 __fastcall void
IofCompleteRequest(REGARGS2 (irp * ip,uint8_t prioboost))884 IofCompleteRequest(REGARGS2(irp *ip, uint8_t prioboost))
885 {
886 uint32_t i;
887 uint32_t status;
888 device_object *dobj;
889 io_stack_location *sl;
890 completion_func cf;
891
892 ip->irp_pendingreturned =
893 IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
894 sl = (io_stack_location *)(ip + 1);
895
896 for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
897 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
898 IoSkipCurrentIrpStackLocation(ip);
899 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
900 } else
901 dobj = NULL;
902
903 if (sl[i].isl_completionfunc != NULL &&
904 ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
905 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
906 (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
907 sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
908 (ip->irp_cancel == TRUE &&
909 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
910 cf = sl->isl_completionfunc;
911 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
912 if (status == STATUS_MORE_PROCESSING_REQUIRED)
913 return;
914 }
915
916 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
917 SL_PENDING_RETURNED)
918 ip->irp_pendingreturned = TRUE;
919 }
920
921 /* Handle any associated IRPs. */
922
923 if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
924 uint32_t masterirpcnt;
925 irp *masterirp;
926 mdl *m;
927
928 masterirp = ip->irp_assoc.irp_master;
929 masterirpcnt = FASTCALL1(InterlockedDecrement,
930 &masterirp->irp_assoc.irp_irpcnt);
931
932 while ((m = ip->irp_mdl) != NULL) {
933 ip->irp_mdl = m->mdl_next;
934 IoFreeMdl(m);
935 }
936 IoFreeIrp(ip);
937 if (masterirpcnt == 0)
938 IoCompleteRequest(masterirp, IO_NO_INCREMENT);
939 return;
940 }
941
942 /* With any luck, these conditions will never arise. */
943
944 if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
945 if (ip->irp_usriostat != NULL)
946 *ip->irp_usriostat = ip->irp_iostat;
947 if (ip->irp_usrevent != NULL)
948 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
949 if (ip->irp_flags & IRP_PAGING_IO) {
950 if (ip->irp_mdl != NULL)
951 IoFreeMdl(ip->irp_mdl);
952 IoFreeIrp(ip);
953 }
954 }
955
956 return;
957 }
958
959 __stdcall device_object *
IoAttachDeviceToDeviceStack(device_object * src,device_object * dst)960 IoAttachDeviceToDeviceStack(device_object *src, device_object *dst)
961 {
962 device_object *attached;
963
964 mtx_lock(&ntoskrnl_dispatchlock);
965 attached = IoGetAttachedDevice(dst);
966 attached->do_attacheddev = src;
967 src->do_attacheddev = NULL;
968 src->do_stacksize = attached->do_stacksize + 1;
969 mtx_unlock(&ntoskrnl_dispatchlock);
970
971 return(attached);
972 }
973
974 __stdcall void
IoDetachDevice(device_object * topdev)975 IoDetachDevice(device_object *topdev)
976 {
977 device_object *tail;
978
979 mtx_lock(&ntoskrnl_dispatchlock);
980
981 /* First, break the chain. */
982 tail = topdev->do_attacheddev;
983 if (tail == NULL) {
984 mtx_unlock(&ntoskrnl_dispatchlock);
985 return;
986 }
987 topdev->do_attacheddev = tail->do_attacheddev;
988 topdev->do_refcnt--;
989
990 /* Now reduce the stacksize count for the tail objects. */
991
992 tail = topdev->do_attacheddev;
993 while (tail != NULL) {
994 tail->do_stacksize--;
995 tail = tail->do_attacheddev;
996 }
997 mtx_unlock(&ntoskrnl_dispatchlock);
998 }
999
1000 /* Always called with dispatcher lock held. */
1001 static void
ntoskrnl_wakeup(void * arg)1002 ntoskrnl_wakeup(void *arg)
1003 {
1004 nt_dispatch_header *obj;
1005 list_entry *e;
1006 #ifdef __FreeBSD__
1007 wait_block *w;
1008 struct thread *td;
1009 #endif
1010
1011 obj = arg;
1012
1013 obj->dh_sigstate = TRUE;
1014 e = obj->dh_waitlisthead.nle_flink;
1015 while (e != &obj->dh_waitlisthead) {
1016 /* TODO: is this correct? */
1017 #ifdef __FreeBSD__
1018 w = (wait_block *)e;
1019 td = w->wb_kthread;
1020 ndis_thresume(td->td_proc);
1021 #else
1022 ndis_thresume(curproc);
1023 #endif
1024 /*
1025 * For synchronization objects, only wake up
1026 * the first waiter.
1027 */
1028 if (obj->dh_type == EVENT_TYPE_SYNC)
1029 break;
1030 e = e->nle_flink;
1031 }
1032
1033 return;
1034 }
1035
1036 static void
ntoskrnl_time(uint64_t * tval)1037 ntoskrnl_time(uint64_t *tval)
1038 {
1039 struct timespec ts;
1040 #ifdef __NetBSD__
1041 struct timeval tv;
1042 microtime(&tv);
1043 TIMEVAL_TO_TIMESPEC(&tv,&ts);
1044 #else
1045 nanotime(&ts);
1046 #endif
1047
1048 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1049 (uint64_t)11644473600ULL;
1050
1051 return;
1052 }
1053
1054 /*
1055 * KeWaitForSingleObject() is a tricky beast, because it can be used
1056 * with several different object types: semaphores, timers, events,
1057 * mutexes and threads. Semaphores don't appear very often, but the
1058 * other object types are quite common. KeWaitForSingleObject() is
1059 * what's normally used to acquire a mutex, and it can be used to
1060 * wait for a thread termination.
1061 *
1062 * The Windows NDIS API is implemented in terms of Windows kernel
1063 * primitives, and some of the object manipulation is duplicated in
1064 * NDIS. For example, NDIS has timers and events, which are actually
1065 * Windows kevents and ktimers. Now, you're supposed to only use the
1066 * NDIS variants of these objects within the confines of the NDIS API,
1067 * but there are some naughty developers out there who will use
1068 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1069 * have to support that as well. Conseqently, our NDIS timer and event
1070 * code has to be closely tied into our ntoskrnl timer and event code,
1071 * just as it is in Windows.
1072 *
1073 * KeWaitForSingleObject() may do different things for different kinds
1074 * of objects:
1075 *
1076 * - For events, we check if the event has been signalled. If the
1077 * event is already in the signalled state, we just return immediately,
1078 * otherwise we wait for it to be set to the signalled state by someone
1079 * else calling KeSetEvent(). Events can be either synchronization or
1080 * notification events.
1081 *
1082 * - For timers, if the timer has already fired and the timer is in
1083 * the signalled state, we just return, otherwise we wait on the
1084 * timer. Unlike an event, timers get signalled automatically when
1085 * they expire rather than someone having to trip them manually.
1086 * Timers initialized with KeInitializeTimer() are always notification
1087 * events: KeInitializeTimerEx() lets you initialize a timer as
1088 * either a notification or synchronization event.
1089 *
1090 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1091 * on the mutex until it's available and then grab it. When a mutex is
1092 * released, it enters the signaled state, which wakes up one of the
1093 * threads waiting to acquire it. Mutexes are always synchronization
1094 * events.
1095 *
1096 * - For threads, the only thing we do is wait until the thread object
1097 * enters a signalled state, which occurs when the thread terminates.
1098 * Threads are always notification events.
1099 *
1100 * A notification event wakes up all threads waiting on an object. A
1101 * synchronization event wakes up just one. Also, a synchronization event
1102 * is auto-clearing, which means we automatically set the event back to
1103 * the non-signalled state once the wakeup is done.
1104 */
1105
1106 __stdcall uint32_t
KeWaitForSingleObject(nt_dispatch_header * obj,uint32_t reason,uint32_t mode,uint8_t alertable,int64_t * duetime)1107 KeWaitForSingleObject(
1108 nt_dispatch_header *obj,
1109 uint32_t reason,
1110 uint32_t mode,
1111 uint8_t alertable,
1112 int64_t *duetime)
1113 {
1114 #ifdef __FreeBSD__
1115 struct thread *td = curthread;
1116 #endif
1117 kmutant *km;
1118 wait_block w;
1119 struct timeval tv;
1120 int error = 0;
1121 uint64_t curtime;
1122
1123 if (obj == NULL)
1124 return(STATUS_INVALID_PARAMETER);
1125
1126 mtx_lock(&ntoskrnl_dispatchlock);
1127
1128 /*
1129 * See if the object is a mutex. If so, and we already own
1130 * it, then just increment the acquisition count and return.
1131 *
1132 * For any other kind of object, see if it's already in the
1133 * signalled state, and if it is, just return. If the object
1134 * is marked as a synchronization event, reset the state to
1135 * unsignalled.
1136 */
1137
1138 if (obj->dh_size == OTYPE_MUTEX) {
1139 km = (kmutant *)obj;
1140 if (km->km_ownerthread == NULL ||
1141 #ifdef __FreeBSD__
1142 km->km_ownerthread == curthread->td_proc) {
1143 #else
1144 km->km_ownerthread == curproc) {
1145 #endif
1146 obj->dh_sigstate = FALSE;
1147 km->km_acquirecnt++;
1148 #ifdef __FreeBSD__
1149 km->km_ownerthread = curthread->td_proc;
1150 #else
1151 km->km_ownerthread = curproc;
1152 #endif
1153 mtx_unlock(&ntoskrnl_dispatchlock);
1154 return (STATUS_SUCCESS);
1155 }
1156 } else if (obj->dh_sigstate == TRUE) {
1157 if (obj->dh_type == EVENT_TYPE_SYNC)
1158 obj->dh_sigstate = FALSE;
1159 mtx_unlock(&ntoskrnl_dispatchlock);
1160 return (STATUS_SUCCESS);
1161 }
1162
1163 w.wb_object = obj;
1164 #ifdef __FreeBSD__
1165 w.wb_kthread = td;
1166 #endif
1167
1168 INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
1169
1170 /*
1171 * The timeout value is specified in 100 nanosecond units
1172 * and can be a positive or negative number. If it's positive,
1173 * then the duetime is absolute, and we need to convert it
1174 * to an absolute offset relative to now in order to use it.
1175 * If it's negative, then the duetime is relative and we
1176 * just have to convert the units.
1177 */
1178
1179 if (duetime != NULL) {
1180 if (*duetime < 0) {
1181 tv.tv_sec = - (*duetime) / 10000000;
1182 tv.tv_usec = (- (*duetime) / 10) -
1183 (tv.tv_sec * 1000000);
1184 } else {
1185 ntoskrnl_time(&curtime);
1186 if (*duetime < curtime)
1187 tv.tv_sec = tv.tv_usec = 0;
1188 else {
1189 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1190 tv.tv_usec = ((*duetime) - curtime) / 10 -
1191 (tv.tv_sec * 1000000);
1192 }
1193 }
1194 }
1195
1196 error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1197 duetime == NULL ? 0 : tvtohz(&tv));
1198
1199 /* We timed out. Leave the object alone and return status. */
1200
1201 if (error == EWOULDBLOCK) {
1202 REMOVE_LIST_ENTRY((&w.wb_waitlist));
1203 mtx_unlock(&ntoskrnl_dispatchlock);
1204 return(STATUS_TIMEOUT);
1205 }
1206
1207 /*
1208 * Mutexes are always synchronization objects, which means
1209 * if several threads are waiting to acquire it, only one will
1210 * be woken up. If that one is us, and the mutex is up for grabs,
1211 * grab it.
1212 */
1213
1214 if (obj->dh_size == OTYPE_MUTEX) {
1215 km = (kmutant *)obj;
1216 if (km->km_ownerthread == NULL) {
1217 #ifdef __FreeBSD__
1218 km->km_ownerthread = curthread->td_proc;
1219 #else
1220 km->km_ownerthread = curproc;
1221 #endif
1222 km->km_acquirecnt++;
1223 }
1224 }
1225
1226 if (obj->dh_type == EVENT_TYPE_SYNC)
1227 obj->dh_sigstate = FALSE;
1228 REMOVE_LIST_ENTRY((&w.wb_waitlist));
1229
1230 mtx_unlock(&ntoskrnl_dispatchlock);
1231
1232 return(STATUS_SUCCESS);
1233 }
1234
1235 __stdcall static uint32_t
1236 KeWaitForMultipleObjects(
1237 uint32_t cnt,
1238 nt_dispatch_header *obj[],
1239 uint32_t wtype,
1240 uint32_t reason,
1241 uint32_t mode,
1242 uint8_t alertable,
1243 int64_t *duetime,
1244 wait_block *wb_array)
1245 {
1246 #ifdef __FreeBSD__
1247 struct thread *td = curthread;
1248 #endif
1249 kmutant *km;
1250 wait_block _wb_array[THREAD_WAIT_OBJECTS];
1251 wait_block *w;
1252 struct timeval tv;
1253 int i, wcnt = 0, widx = 0, error = 0;
1254 uint64_t curtime;
1255 struct timespec t1, t2;
1256 #ifdef __NetBSD__
1257 struct timeval tv1,tv2;
1258 #endif
1259
1260 if (cnt > MAX_WAIT_OBJECTS)
1261 return(STATUS_INVALID_PARAMETER);
1262 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1263 return(STATUS_INVALID_PARAMETER);
1264
1265 mtx_lock(&ntoskrnl_dispatchlock);
1266 if (wb_array == NULL)
1267 w = &_wb_array[0];
1268 else
1269 w = wb_array;
1270
1271 /* First pass: see if we can satisfy any waits immediately. */
1272
1273 for (i = 0; i < cnt; i++) {
1274 if (obj[i]->dh_size == OTYPE_MUTEX) {
1275 km = (kmutant *)obj[i];
1276 if (km->km_ownerthread == NULL ||
1277 #ifdef __FreeBSD__
1278 km->km_ownerthread == curthread->td_proc) {
1279 #else
1280 km->km_ownerthread == curproc) {
1281 #endif
1282 obj[i]->dh_sigstate = FALSE;
1283 km->km_acquirecnt++;
1284 #ifdef __FreeBSD__
1285 km->km_ownerthread = curthread->td_proc;
1286 #else
1287 km->km_ownerthread = curproc;
1288 #endif
1289 if (wtype == WAITTYPE_ANY) {
1290 mtx_unlock(&ntoskrnl_dispatchlock);
1291 return (STATUS_WAIT_0 + i);
1292 }
1293 }
1294 } else if (obj[i]->dh_sigstate == TRUE) {
1295 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1296 obj[i]->dh_sigstate = FALSE;
1297 if (wtype == WAITTYPE_ANY) {
1298 mtx_unlock(&ntoskrnl_dispatchlock);
1299 return (STATUS_WAIT_0 + i);
1300 }
1301 }
1302 }
1303
1304 /*
1305 * Second pass: set up wait for anything we can't
1306 * satisfy immediately.
1307 */
1308
1309 for (i = 0; i < cnt; i++) {
1310 if (obj[i]->dh_sigstate == TRUE)
1311 continue;
1312 INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
1313 (&w[i].wb_waitlist));
1314 #ifdef __FreeBSD__
1315 w[i].wb_kthread = td;
1316 #endif
1317 w[i].wb_object = obj[i];
1318 wcnt++;
1319 }
1320
1321 if (duetime != NULL) {
1322 if (*duetime < 0) {
1323 tv.tv_sec = - (*duetime) / 10000000;
1324 tv.tv_usec = (- (*duetime) / 10) -
1325 (tv.tv_sec * 1000000);
1326 } else {
1327 ntoskrnl_time(&curtime);
1328 if (*duetime < curtime)
1329 tv.tv_sec = tv.tv_usec = 0;
1330 else {
1331 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1332 tv.tv_usec = ((*duetime) - curtime) / 10 -
1333 (tv.tv_sec * 1000000);
1334 }
1335 }
1336 }
1337
1338 while (wcnt) {
1339 #ifdef __FreeBSD__
1340 nanotime(&t1);
1341 #else
1342 microtime(&tv1);
1343 TIMEVAL_TO_TIMESPEC(&tv1,&t1);
1344 #endif
1345
1346 error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1347 duetime == NULL ? 0 : tvtohz(&tv));
1348
1349 #ifdef __FreeBSD__
1350 nanotime(&t2);
1351 #else
1352 microtime(&tv2);
1353 TIMEVAL_TO_TIMESPEC(&tv2,&t2);
1354 #endif
1355
1356 for (i = 0; i < cnt; i++) {
1357 if (obj[i]->dh_size == OTYPE_MUTEX) {
1358 km = (kmutant *)obj;
1359 if (km->km_ownerthread == NULL) {
1360 km->km_ownerthread =
1361 #ifdef __FreeBSD__
1362 curthread->td_proc;
1363 #else
1364 curproc;
1365 #endif
1366 km->km_acquirecnt++;
1367 }
1368 }
1369 if (obj[i]->dh_sigstate == TRUE) {
1370 widx = i;
1371 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1372 obj[i]->dh_sigstate = FALSE;
1373 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1374 wcnt--;
1375 }
1376 }
1377
1378 if (error || wtype == WAITTYPE_ANY)
1379 break;
1380
1381 if (duetime != NULL) {
1382 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1383 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1384 }
1385 }
1386
1387 if (wcnt) {
1388 for (i = 0; i < cnt; i++)
1389 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1390 }
1391
1392 if (error == EWOULDBLOCK) {
1393 mtx_unlock(&ntoskrnl_dispatchlock);
1394 return(STATUS_TIMEOUT);
1395 }
1396
1397 if (wtype == WAITTYPE_ANY && wcnt) {
1398 mtx_unlock(&ntoskrnl_dispatchlock);
1399 return(STATUS_WAIT_0 + widx);
1400 }
1401 mtx_unlock(&ntoskrnl_dispatchlock);
1402
1403 return(STATUS_SUCCESS);
1404 }
1405
1406 __stdcall static void
1407 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1408 {
1409 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1410 return;
1411 }
1412
1413 __stdcall static uint16_t
1414 READ_REGISTER_USHORT(uint16_t *reg)
1415 {
1416 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1417 }
1418
1419 __stdcall static void
1420 WRITE_REGISTER_ULONG(uint32_t *reg, uint32_t val)
1421 {
1422 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1423 return;
1424 }
1425
1426 __stdcall static uint32_t
1427 READ_REGISTER_ULONG(uint32_t *reg)
1428 {
1429 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1430 }
1431
1432 __stdcall static uint8_t
1433 READ_REGISTER_UCHAR(uint8_t *reg)
1434 {
1435 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1436 }
1437
1438 __stdcall static void
1439 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1440 {
1441 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1442 return;
1443 }
1444
1445 __stdcall static int64_t
1446 _allmul(int64_t a, int64_t b)
1447 {
1448 return (a * b);
1449 }
1450
1451 __stdcall static int64_t
1452 _alldiv(int64_t a, int64_t b)
1453 {
1454 return (a / b);
1455 }
1456
1457 __stdcall static int64_t
1458 _allrem(int64_t a, int64_t b)
1459 {
1460 return (a % b);
1461 }
1462
1463 __stdcall static uint64_t
1464 _aullmul(uint64_t a, uint64_t b)
1465 {
1466 return (a * b);
1467 }
1468
1469 __stdcall static uint64_t
1470 _aulldiv(uint64_t a, uint64_t b)
1471 {
1472 return (a / b);
1473 }
1474
1475 __stdcall static uint64_t
1476 _aullrem(uint64_t a, uint64_t b)
1477 {
1478 return (a % b);
1479 }
1480
1481 __regparm static int64_t
1482 _allshl(int64_t a, uint8_t b)
1483 {
1484 return (a << b);
1485 }
1486
1487 __regparm static uint64_t
1488 _aullshl(uint64_t a, uint8_t b)
1489 {
1490 return (a << b);
1491 }
1492
1493 __regparm static int64_t
1494 _allshr(int64_t a, uint8_t b)
1495 {
1496 return (a >> b);
1497 }
1498
1499 __regparm static uint64_t
1500 _aullshr(uint64_t a, uint8_t b)
1501 {
1502 return (a >> b);
1503 }
1504
1505 static slist_entry *
1506 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
1507 {
1508 slist_entry *oldhead;
1509
1510 oldhead = head->slh_list.slh_next;
1511 entry->sl_next = head->slh_list.slh_next;
1512 head->slh_list.slh_next = entry;
1513 head->slh_list.slh_depth++;
1514 head->slh_list.slh_seq++;
1515
1516 return(oldhead);
1517 }
1518
1519 static slist_entry *
1520 ntoskrnl_popsl(slist_header *head)
1521 {
1522 slist_entry *first;
1523
1524 first = head->slh_list.slh_next;
1525 if (first != NULL) {
1526 head->slh_list.slh_next = first->sl_next;
1527 head->slh_list.slh_depth--;
1528 head->slh_list.slh_seq++;
1529 }
1530
1531 return(first);
1532 }
1533
1534 /*
1535 * We need this to make lookaside lists work for amd64.
1536 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
1537 * list structure. For amd64 to work right, this has to be a
1538 * pointer to the wrapped version of the routine, not the
1539 * original. Letting the Windows driver invoke the original
1540 * function directly will result in a convention calling
1541 * mismatch and a pretty crash. On x86, this effectively
1542 * becomes a no-op since ipt_func and ipt_wrap are the same.
1543 */
1544
1545 static funcptr
1546 ntoskrnl_findwrap(funcptr func)
1547 {
1548 image_patch_table *patch;
1549
1550 patch = ntoskrnl_functbl;
1551 while (patch->ipt_func != NULL) {
1552 if ((funcptr)patch->ipt_func == func)
1553 return((funcptr)patch->ipt_wrap);
1554 patch++;
1555 }
1556
1557 return(NULL);
1558 }
1559
1560 __stdcall static void
1561 ExInitializePagedLookasideList(
1562 paged_lookaside_list *lookaside,
1563 lookaside_alloc_func *allocfunc,
1564 lookaside_free_func *freefunc,
1565 uint32_t flags,
1566 size_t size,
1567 uint32_t tag,
1568 uint16_t depth)
1569 {
1570 memset((char *)lookaside, 0, sizeof(paged_lookaside_list));
1571
1572 if (size < sizeof(slist_entry))
1573 lookaside->nll_l.gl_size = sizeof(slist_entry);
1574 else
1575 lookaside->nll_l.gl_size = size;
1576 lookaside->nll_l.gl_tag = tag;
1577 if (allocfunc == NULL)
1578 lookaside->nll_l.gl_allocfunc =
1579 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1580 else
1581 lookaside->nll_l.gl_allocfunc = allocfunc;
1582
1583 if (freefunc == NULL)
1584 lookaside->nll_l.gl_freefunc =
1585 ntoskrnl_findwrap((funcptr)ExFreePool);
1586 else
1587 lookaside->nll_l.gl_freefunc = freefunc;
1588
1589 #ifdef __i386__
1590 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1591 #endif
1592
1593 lookaside->nll_l.gl_type = NonPagedPool;
1594 lookaside->nll_l.gl_depth = depth;
1595 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1596
1597 return;
1598 }
1599
1600 __stdcall static void
1601 ExDeletePagedLookasideList(paged_lookaside_list *lookaside)
1602 {
1603 void *buf;
1604 __stdcall void (*freefunc)(void *);
1605
1606 freefunc = lookaside->nll_l.gl_freefunc;
1607 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1608 MSCALL1(freefunc, buf);
1609
1610 return;
1611 }
1612
1613 __stdcall static void
1614 ExInitializeNPagedLookasideList(
1615 npaged_lookaside_list *lookaside,
1616 lookaside_alloc_func *allocfunc,
1617 lookaside_free_func *freefunc,
1618 uint32_t flags,
1619 size_t size,
1620 uint32_t tag,
1621 uint16_t depth)
1622 {
1623 memset((char *)lookaside, 0, sizeof(npaged_lookaside_list));
1624
1625 if (size < sizeof(slist_entry))
1626 lookaside->nll_l.gl_size = sizeof(slist_entry);
1627 else
1628 lookaside->nll_l.gl_size = size;
1629 lookaside->nll_l.gl_tag = tag;
1630 if (allocfunc == NULL)
1631 lookaside->nll_l.gl_allocfunc =
1632 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1633 else
1634 lookaside->nll_l.gl_allocfunc = allocfunc;
1635
1636 if (freefunc == NULL)
1637 lookaside->nll_l.gl_freefunc =
1638 ntoskrnl_findwrap((funcptr)ExFreePool);
1639 else
1640 lookaside->nll_l.gl_freefunc = freefunc;
1641
1642 #ifdef __i386__
1643 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1644 #endif
1645
1646 lookaside->nll_l.gl_type = NonPagedPool;
1647 lookaside->nll_l.gl_depth = depth;
1648 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1649
1650 return;
1651 }
1652
1653 __stdcall static void
1654 ExDeleteNPagedLookasideList(npaged_lookaside_list *lookaside)
1655 {
1656 void *buf;
1657 __stdcall void (*freefunc)(void *);
1658
1659 freefunc = lookaside->nll_l.gl_freefunc;
1660 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1661 MSCALL1(freefunc, buf);
1662
1663 return;
1664 }
1665
1666 /*
1667 * Note: the interlocked slist push and pop routines are
1668 * declared to be _fastcall in Windows. gcc 3.4 is supposed
1669 * to have support for this calling convention, however we
1670 * don't have that version available yet, so we kludge things
1671 * up using __regparm__(3) and some argument shuffling.
1672 */
1673
1674 __fastcall static slist_entry *
1675 InterlockedPushEntrySList(REGARGS2(slist_header *head, slist_entry *entry))
1676 {
1677 slist_entry *oldhead;
1678
1679 oldhead = (slist_entry *)FASTCALL3(ExInterlockedPushEntrySList,
1680 head, entry, &ntoskrnl_global);
1681
1682 return(oldhead);
1683 }
1684
1685 __fastcall static slist_entry *
1686 InterlockedPopEntrySList(REGARGS1(slist_header *head))
1687 {
1688 slist_entry *first;
1689
1690 first = (slist_entry *)FASTCALL2(ExInterlockedPopEntrySList,
1691 head, &ntoskrnl_global);
1692
1693 return(first);
1694 }
1695
1696 __fastcall static slist_entry *
1697 ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
1698 slist_entry *entry), kspin_lock *lock)
1699 {
1700 slist_entry *oldhead;
1701 uint8_t irql;
1702
1703 KeAcquireSpinLock(lock, &irql);
1704 oldhead = ntoskrnl_pushsl(head, entry);
1705 KeReleaseSpinLock(lock, irql);
1706
1707 return(oldhead);
1708 }
1709
1710 __fastcall static slist_entry *
1711 ExInterlockedPopEntrySList(REGARGS2(slist_header *head, kspin_lock *lock))
1712 {
1713 slist_entry *first;
1714 uint8_t irql;
1715
1716 KeAcquireSpinLock(lock, &irql);
1717 first = ntoskrnl_popsl(head);
1718 KeReleaseSpinLock(lock, irql);
1719
1720 return(first);
1721 }
1722
1723 __stdcall static uint16_t
1724 ExQueryDepthSList(slist_header *head)
1725 {
1726 uint16_t depth;
1727 uint8_t irql;
1728
1729 KeAcquireSpinLock(&ntoskrnl_global, &irql);
1730 depth = head->slh_list.slh_depth;
1731 KeReleaseSpinLock(&ntoskrnl_global, irql);
1732
1733 return(depth);
1734 }
1735
1736 /*
1737 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1738 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1739 * to splnet()/splx() in their use. We can't create a new mutex
1740 * lock here because there is no complimentary KeFreeSpinLock()
1741 * function. Instead, we grab a mutex from the mutex pool.
1742 */
1743 __stdcall void
1744 KeInitializeSpinLock(kspin_lock *lock)
1745 {
1746
1747 __cpu_simple_lock_init((__cpu_simple_lock_t *)lock);
1748 }
1749
1750 #ifdef __i386__
1751 __fastcall void
1752 KefAcquireSpinLockAtDpcLevel(REGARGS1(kspin_lock *lock))
1753 {
1754
1755 __cpu_simple_lock((__cpu_simple_lock_t *)lock);
1756 }
1757
1758 __fastcall void
1759 KefReleaseSpinLockFromDpcLevel(REGARGS1(kspin_lock *lock))
1760 {
1761
1762 __cpu_simple_unlock((__cpu_simple_lock_t *)lock);
1763 }
1764
1765 __stdcall uint8_t
1766 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
1767 {
1768 uint8_t oldirql;
1769
1770 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
1771 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
1772
1773 oldirql = KeRaiseIrql(DISPATCH_LEVEL);
1774 KeAcquireSpinLockAtDpcLevel(lock);
1775
1776 return(oldirql);
1777 }
1778 #else
1779 __stdcall void
1780 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
1781 {
1782 while (atomic_swap_uint((volatile u_int *)lock, 1) == 1)
1783 /* sit and spin */;
1784
1785 return;
1786 }
1787
1788 __stdcall void
1789 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
1790 {
1791 *(volatile u_int *)lock = 0;
1792
1793 return;
1794 }
1795 #endif /* __i386__ */
1796
1797 __fastcall uintptr_t
1798 InterlockedExchange(REGARGS2(volatile uint32_t *dst, uintptr_t val))
1799 {
1800 uint8_t irql;
1801 uintptr_t r;
1802
1803 KeAcquireSpinLock(&ntoskrnl_global, &irql);
1804 r = *dst;
1805 *dst = val;
1806 KeReleaseSpinLock(&ntoskrnl_global, irql);
1807
1808 return(r);
1809 }
1810
1811 __fastcall static uint32_t
1812 InterlockedIncrement(REGARGS1(volatile uint32_t *addend))
1813 {
1814 atomic_inc_32(addend);
1815 return(*addend);
1816 }
1817
1818 __fastcall static uint32_t
1819 InterlockedDecrement(REGARGS1(volatile uint32_t *addend))
1820 {
1821 atomic_dec_32(addend);
1822 return(*addend);
1823 }
1824
1825 __fastcall static void
1826 ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t inc))
1827 {
1828 uint8_t irql;
1829
1830 KeAcquireSpinLock(&ntoskrnl_global, &irql);
1831 *addend += inc;
1832 KeReleaseSpinLock(&ntoskrnl_global, irql);
1833
1834 return;
1835 };
1836
1837 __stdcall mdl *
1838 IoAllocateMdl(
1839 void *vaddr,
1840 uint32_t len,
1841 uint8_t secondarybuf,
1842 uint8_t chargequota,
1843 irp *iopkt)
1844 {
1845 mdl *m;
1846 int zone = 0;
1847
1848 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
1849 m = ExAllocatePoolWithTag(NonPagedPool,
1850 MmSizeOfMdl(vaddr, len), 0);
1851 else {
1852 #ifdef __FreeBSD__
1853 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
1854 #else
1855 m = pool_get(&mdl_pool, PR_WAITOK);
1856 #endif
1857 zone++;
1858 }
1859
1860 if (m == NULL)
1861 return (NULL);
1862
1863 MmInitializeMdl(m, vaddr, len);
1864
1865 /*
1866 * MmInitializMdl() clears the flags field, so we
1867 * have to set this here. If the MDL came from the
1868 * MDL UMA zone, tag it so we can release it to
1869 * the right place later.
1870 */
1871 if (zone)
1872 m->mdl_flags = MDL_ZONE_ALLOCED;
1873
1874 if (iopkt != NULL) {
1875 if (secondarybuf == TRUE) {
1876 mdl *last;
1877 last = iopkt->irp_mdl;
1878 while (last->mdl_next != NULL)
1879 last = last->mdl_next;
1880 last->mdl_next = m;
1881 } else {
1882 if (iopkt->irp_mdl != NULL)
1883 panic("leaking an MDL in IoAllocateMdl()");
1884 iopkt->irp_mdl = m;
1885 }
1886 }
1887
1888 return (m);
1889 }
1890
1891 __stdcall void
1892 IoFreeMdl(mdl *m)
1893 {
1894 if (m == NULL)
1895 return;
1896
1897 if (m->mdl_flags & MDL_ZONE_ALLOCED)
1898 #ifdef __FreeBSD__
1899 uma_zfree(mdl_zone, m);
1900 #else
1901 pool_put(&mdl_pool, m);
1902 #endif
1903 else
1904 ExFreePool(m);
1905
1906 return;
1907 }
1908
1909 __stdcall static uint32_t
1910 MmSizeOfMdl(void *vaddr, size_t len)
1911 {
1912 uint32_t l;
1913
1914 l = sizeof(struct mdl) +
1915 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
1916
1917 return(l);
1918 }
1919
1920 /*
1921 * The Microsoft documentation says this routine fills in the
1922 * page array of an MDL with the _physical_ page addresses that
1923 * comprise the buffer, but we don't really want to do that here.
1924 * Instead, we just fill in the page array with the kernel virtual
1925 * addresses of the buffers.
1926 */
1927 __stdcall static void
1928 MmBuildMdlForNonPagedPool(mdl *m)
1929 {
1930 vm_offset_t *mdl_pages;
1931 int pagecnt, i;
1932
1933 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
1934
1935 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
1936 panic("not enough pages in MDL to describe buffer");
1937
1938 mdl_pages = MmGetMdlPfnArray(m);
1939
1940 for (i = 0; i < pagecnt; i++)
1941 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
1942
1943 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
1944 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
1945
1946 return;
1947 }
1948
1949 __stdcall static void *
1950 MmMapLockedPages(
1951 mdl *buf,
1952 uint8_t accessmode)
1953 {
1954 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
1955 return(MmGetMdlVirtualAddress(buf));
1956 }
1957
1958 __stdcall static void *
1959 MmMapLockedPagesSpecifyCache(
1960 mdl *buf,
1961 uint8_t accessmode,
1962 uint32_t cachetype,
1963 void *vaddr,
1964 uint32_t bugcheck,
1965 uint32_t prio)
1966 {
1967 return(MmMapLockedPages(buf, accessmode));
1968 }
1969
1970 __stdcall static void
1971 MmUnmapLockedPages(
1972 void *vaddr,
1973 mdl *buf)
1974 {
1975 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
1976 return;
1977 }
1978
1979 __stdcall static size_t
1980 RtlCompareMemory(const void *s1, const void *s2, size_t len)
1981 {
1982 size_t i, total = 0;
1983 uint8_t *m1, *m2;
1984
1985 m1 = __DECONST(char *, s1);
1986 m2 = __DECONST(char *, s2);
1987
1988 for (i = 0; i < len; i++) {
1989 if (m1[i] == m2[i])
1990 total++;
1991 }
1992 return(total);
1993 }
1994
1995 __stdcall static void
1996 RtlInitAnsiString(ndis_ansi_string *dst, char *src)
1997 {
1998 ndis_ansi_string *a;
1999
2000 a = dst;
2001 if (a == NULL)
2002 return;
2003 if (src == NULL) {
2004 a->nas_len = a->nas_maxlen = 0;
2005 a->nas_buf = NULL;
2006 } else {
2007 a->nas_buf = src;
2008 a->nas_len = a->nas_maxlen = strlen(src);
2009 }
2010
2011 return;
2012 }
2013
2014 __stdcall static void
2015 RtlInitUnicodeString(ndis_unicode_string *dst, uint16_t *src)
2016 {
2017 ndis_unicode_string *u;
2018 int i;
2019
2020 u = dst;
2021 if (u == NULL)
2022 return;
2023 if (src == NULL) {
2024 u->us_len = u->us_maxlen = 0;
2025 u->us_buf = NULL;
2026 } else {
2027 i = 0;
2028 while(src[i] != 0)
2029 i++;
2030 u->us_buf = src;
2031 u->us_len = u->us_maxlen = i * 2;
2032 }
2033
2034 return;
2035 }
2036
2037 __stdcall ndis_status
2038 RtlUnicodeStringToInteger(ndis_unicode_string *ustr, uint32_t base, uint32_t *val)
2039 {
2040 uint16_t *uchr;
2041 int len, neg = 0;
2042 char abuf[64];
2043 char *astr;
2044
2045 uchr = ustr->us_buf;
2046 len = ustr->us_len;
2047 memset(abuf, 0, sizeof(abuf));
2048
2049 if ((char)((*uchr) & 0xFF) == '-') {
2050 neg = 1;
2051 uchr++;
2052 len -= 2;
2053 } else if ((char)((*uchr) & 0xFF) == '+') {
2054 neg = 0;
2055 uchr++;
2056 len -= 2;
2057 }
2058
2059 if (base == 0) {
2060 if ((char)((*uchr) & 0xFF) == 'b') {
2061 base = 2;
2062 uchr++;
2063 len -= 2;
2064 } else if ((char)((*uchr) & 0xFF) == 'o') {
2065 base = 8;
2066 uchr++;
2067 len -= 2;
2068 } else if ((char)((*uchr) & 0xFF) == 'x') {
2069 base = 16;
2070 uchr++;
2071 len -= 2;
2072 } else
2073 base = 10;
2074 }
2075
2076 astr = abuf;
2077 if (neg) {
2078 strcpy(astr, "-");
2079 astr++;
2080 }
2081
2082 ndis_unicode_to_ascii(uchr, len, &astr);
2083 *val = strtoul(abuf, NULL, base);
2084
2085 return(NDIS_STATUS_SUCCESS);
2086 }
2087
2088 __stdcall static void
2089 RtlFreeUnicodeString(ndis_unicode_string *ustr)
2090 {
2091 if (ustr->us_buf == NULL)
2092 return;
2093 free(ustr->us_buf, M_DEVBUF);
2094 ustr->us_buf = NULL;
2095 return;
2096 }
2097
2098 __stdcall static void
2099 RtlFreeAnsiString(ndis_ansi_string *astr)
2100 {
2101 if (astr->nas_buf == NULL)
2102 return;
2103 free(astr->nas_buf, M_DEVBUF);
2104 astr->nas_buf = NULL;
2105 return;
2106 }
2107
2108 static int
2109 atoi(const char *str)
2110 {
2111 #ifdef __FreeBSD__
2112 return (int)strtol(str, NULL, 10);
2113 #else
2114 int n;
2115
2116 for (n = 0; *str && *str >= '0' && *str <= '9'; str++)
2117 n = n * 10 + *str - '0';
2118 return n;
2119 #endif
2120
2121 }
2122
2123 static long
2124 atol(const char *str)
2125 {
2126 #ifdef __FreeBSD__
2127 return strtol(str, NULL, 10);
2128 #else
2129 long n;
2130
2131 for (n = 0; *str && *str >= '0' && *str <= '9'; str++)
2132 n = n * 10 + *str - '0';
2133 return n;
2134 #endif
2135
2136 }
2137
2138
2139 /*
2140 * stolen from ./netipsec/key.c
2141 */
2142
2143 #ifdef __NetBSD__
2144 void srandom(int);
2145 void srandom(int arg) {return;}
2146 #endif
2147
2148
2149 static int
2150 rand(void)
2151 {
2152 struct timeval tv;
2153
2154 microtime(&tv);
2155 srandom(tv.tv_usec);
2156 return((int)random());
2157 }
2158
2159 static void
2160 srand(unsigned int seed)
2161 {
2162 srandom(seed);
2163 return;
2164 }
2165
2166 __stdcall static uint8_t
2167 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
2168 {
2169 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
2170 return(TRUE);
2171 return(FALSE);
2172 }
2173
2174 __stdcall static ndis_status
2175 IoGetDeviceProperty(
2176 device_object *devobj,
2177 uint32_t regprop,
2178 uint32_t buflen,
2179 void *prop,
2180 uint32_t *reslen)
2181 {
2182 driver_object *drv;
2183 uint16_t **name;
2184
2185 drv = devobj->do_drvobj;
2186
2187 switch (regprop) {
2188 case DEVPROP_DRIVER_KEYNAME:
2189 name = prop;
2190 *name = drv->dro_drivername.us_buf;
2191 *reslen = drv->dro_drivername.us_len;
2192 break;
2193 default:
2194 return(STATUS_INVALID_PARAMETER_2);
2195 break;
2196 }
2197
2198 return(STATUS_SUCCESS);
2199 }
2200
2201 __stdcall static void
2202 KeInitializeMutex(
2203 kmutant *kmutex,
2204 uint32_t level)
2205 {
2206 INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
2207 kmutex->km_abandoned = FALSE;
2208 kmutex->km_apcdisable = 1;
2209 kmutex->km_header.dh_sigstate = TRUE;
2210 kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
2211 kmutex->km_header.dh_size = OTYPE_MUTEX;
2212 kmutex->km_acquirecnt = 0;
2213 kmutex->km_ownerthread = NULL;
2214 return;
2215 }
2216
2217 __stdcall static uint32_t
2218 KeReleaseMutex(
2219 kmutant *kmutex,
2220 uint8_t kwait)
2221 {
2222
2223 mtx_lock(&ntoskrnl_dispatchlock);
2224
2225 #ifdef __FreeBSD__
2226 if (kmutex->km_ownerthread != curthread->td_proc) {
2227 #else
2228 if (kmutex->km_ownerthread != curproc) {
2229 #endif
2230 mtx_unlock(&ntoskrnl_dispatchlock);
2231 return(STATUS_MUTANT_NOT_OWNED);
2232 }
2233 kmutex->km_acquirecnt--;
2234 if (kmutex->km_acquirecnt == 0) {
2235 kmutex->km_ownerthread = NULL;
2236 ntoskrnl_wakeup(&kmutex->km_header);
2237 }
2238 mtx_unlock(&ntoskrnl_dispatchlock);
2239
2240 return(kmutex->km_acquirecnt);
2241 }
2242
2243 __stdcall static uint32_t
2244 KeReadStateMutex(kmutant *kmutex)
2245 {
2246 return(kmutex->km_header.dh_sigstate);
2247 }
2248
2249 __stdcall void
2250 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
2251 {
2252 INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
2253 kevent->k_header.dh_sigstate = state;
2254 kevent->k_header.dh_type = type;
2255 kevent->k_header.dh_size = OTYPE_EVENT;
2256 return;
2257 }
2258
2259 __stdcall uint32_t
2260 KeResetEvent(nt_kevent *kevent)
2261 {
2262 uint32_t prevstate;
2263
2264 mtx_lock(&ntoskrnl_dispatchlock);
2265 prevstate = kevent->k_header.dh_sigstate;
2266 kevent->k_header.dh_sigstate = FALSE;
2267 mtx_unlock(&ntoskrnl_dispatchlock);
2268
2269 return(prevstate);
2270 }
2271
2272 __stdcall uint32_t
2273 KeSetEvent(
2274 nt_kevent *kevent,
2275 uint32_t increment,
2276 uint8_t kwait)
2277 {
2278 uint32_t prevstate;
2279
2280 mtx_lock(&ntoskrnl_dispatchlock);
2281 prevstate = kevent->k_header.dh_sigstate;
2282 ntoskrnl_wakeup(&kevent->k_header);
2283 mtx_unlock(&ntoskrnl_dispatchlock);
2284
2285 return(prevstate);
2286 }
2287
2288 __stdcall void
2289 KeClearEvent(nt_kevent *kevent)
2290 {
2291 kevent->k_header.dh_sigstate = FALSE;
2292 return;
2293 }
2294
2295 __stdcall uint32_t
2296 KeReadStateEvent(nt_kevent *kevent)
2297 {
2298 return(kevent->k_header.dh_sigstate);
2299 }
2300
2301 __stdcall static ndis_status
2302 ObReferenceObjectByHandle(
2303 ndis_handle handle,
2304 uint32_t reqaccess,
2305 void *otype,
2306 uint8_t accessmode,
2307 void **object,
2308 void **handleinfo)
2309 {
2310 nt_objref *nr;
2311
2312 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
2313 if (nr == NULL)
2314 return(NDIS_STATUS_FAILURE);
2315
2316 INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
2317 nr->no_obj = handle;
2318 nr->no_dh.dh_size = OTYPE_THREAD;
2319 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
2320 *object = nr;
2321
2322 return(NDIS_STATUS_SUCCESS);
2323 }
2324
2325 __fastcall static void
2326 ObfDereferenceObject(REGARGS1(void *object))
2327 {
2328 nt_objref *nr;
2329
2330 nr = object;
2331 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
2332 free(nr, M_DEVBUF);
2333
2334 return;
2335 }
2336
2337 __stdcall static uint32_t
2338 ZwClose(ndis_handle handle)
2339 {
2340 return(STATUS_SUCCESS);
2341 }
2342
2343 /*
2344 * This is here just in case the thread returns without calling
2345 * PsTerminateSystemThread().
2346 */
2347 static void
2348 ntoskrnl_thrfunc(void *arg)
2349 {
2350 thread_context *thrctx;
2351 __stdcall uint32_t (*tfunc)(void *);
2352 void *tctx;
2353 uint32_t rval;
2354
2355 thrctx = arg;
2356 tfunc = thrctx->tc_thrfunc;
2357 tctx = thrctx->tc_thrctx;
2358 free(thrctx, M_TEMP);
2359
2360 rval = MSCALL1(tfunc, tctx);
2361
2362 PsTerminateSystemThread(rval);
2363 return; /* notreached */
2364 }
2365
2366 __stdcall static ndis_status
2367 PsCreateSystemThread(
2368 ndis_handle *handle,
2369 uint32_t reqaccess,
2370 void *objattrs,
2371 ndis_handle phandle,
2372 void *clientid,
2373 void *thrfunc,
2374 void *thrctx)
2375 {
2376 int error;
2377 char tname[128];
2378 thread_context *tc;
2379 struct proc *p;
2380
2381 tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
2382 if (tc == NULL)
2383 return(NDIS_STATUS_FAILURE);
2384
2385 tc->tc_thrctx = thrctx;
2386 tc->tc_thrfunc = thrfunc;
2387
2388 snprintf(tname, sizeof(tname), "windows kthread %d", ntoskrnl_kth);
2389 #ifdef __FreeBSD__
2390 error = kthread_create(ntoskrnl_thrfunc, tc, &p,
2391 RFHIGHPID, NDIS_KSTACK_PAGES, tname);
2392 #else
2393 /* TODO: Provide a larger stack for these threads (NDIS_KSTACK_PAGES) */
2394 error = ndis_kthread_create(ntoskrnl_thrfunc, tc, &p, NULL, 0, tname);
2395 #endif
2396 *handle = p;
2397
2398 ntoskrnl_kth++;
2399
2400 return(error);
2401 }
2402
2403 /*
2404 * In Windows, the exit of a thread is an event that you're allowed
2405 * to wait on, assuming you've obtained a reference to the thread using
2406 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
2407 * simulate this behavior is to register each thread we create in a
2408 * reference list, and if someone holds a reference to us, we poke
2409 * them.
2410 */
2411 __stdcall static ndis_status
2412 PsTerminateSystemThread(ndis_status status)
2413 {
2414 struct nt_objref *nr;
2415
2416 mtx_lock(&ntoskrnl_dispatchlock);
2417 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
2418 #ifdef __FreeBSD__
2419 if (nr->no_obj != curthread->td_proc)
2420 #else
2421 if (nr->no_obj != curproc)
2422 #endif
2423 continue;
2424 ntoskrnl_wakeup(&nr->no_dh);
2425 break;
2426 }
2427 ntoskrnl_kth--;
2428 mtx_unlock(&ntoskrnl_dispatchlock);
2429
2430 kthread_exit(0);
2431 /* NOTREACHED */
2432 }
2433
2434 static uint32_t
2435 DbgPrint(char *fmt, ...)
2436 {
2437 //va_list ap;
2438
2439 if (bootverbose) {
2440 //va_start(ap, fmt);
2441 //vprintf(fmt, ap);
2442 }
2443
2444 return(STATUS_SUCCESS);
2445 }
2446
2447 __stdcall static void
2448 DbgBreakPoint(void)
2449 {
2450 #if defined(__FreeBSD__) && __FreeBSD_version < 502113
2451 Debugger("DbgBreakPoint(): breakpoint");
2452 #elif defined(__FreeBSD__) && __FreeBSD_version >= 502113
2453 kdb_enter("DbgBreakPoint(): breakpoint");
2454 #else /* NetBSD case */
2455 ; /* TODO Search how to go into debugger without panic */
2456 #endif
2457 }
2458
2459 static void
2460 ntoskrnl_timercall(void *arg)
2461 {
2462 ktimer *timer;
2463 struct timeval tv;
2464
2465 mtx_lock(&ntoskrnl_dispatchlock);
2466
2467 timer = arg;
2468
2469 timer->k_header.dh_inserted = FALSE;
2470
2471 /*
2472 * If this is a periodic timer, re-arm it
2473 * so it will fire again. We do this before
2474 * calling any deferred procedure calls because
2475 * it's possible the DPC might cancel the timer,
2476 * in which case it would be wrong for us to
2477 * re-arm it again afterwards.
2478 */
2479
2480 if (timer->k_period) {
2481 tv.tv_sec = 0;
2482 tv.tv_usec = timer->k_period * 1000;
2483 timer->k_header.dh_inserted = TRUE;
2484 #ifdef __FreeBSD__
2485 timer->k_handle = timeout(ntoskrnl_timercall,
2486 timer, tvtohz(&tv));
2487 #else /* __NetBSD__ */
2488 callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2489 #endif /* __NetBSD__ */
2490 }
2491
2492 if (timer->k_dpc != NULL)
2493 KeInsertQueueDpc(timer->k_dpc, NULL, NULL);
2494
2495 ntoskrnl_wakeup(&timer->k_header);
2496 mtx_unlock(&ntoskrnl_dispatchlock);
2497 }
2498
2499 __stdcall void
2500 KeInitializeTimer(ktimer *timer)
2501 {
2502 if (timer == NULL)
2503 return;
2504
2505 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
2506
2507 return;
2508 }
2509
2510 __stdcall void
2511 KeInitializeTimerEx(ktimer *timer, uint32_t type)
2512 {
2513 if (timer == NULL)
2514 return;
2515
2516 INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
2517 timer->k_header.dh_sigstate = FALSE;
2518 timer->k_header.dh_inserted = FALSE;
2519 timer->k_header.dh_type = type;
2520 timer->k_header.dh_size = OTYPE_TIMER;
2521 #ifdef __FreeBSD__
2522 callout_handle_init(&timer->k_handle);
2523 #else
2524 callout_init(timer->k_handle, 0);
2525 #endif
2526
2527 return;
2528 }
2529
2530 /*
2531 * This is a wrapper for Windows deferred procedure calls that
2532 * have been placed on an NDIS thread work queue. We need it
2533 * since the DPC could be a _stdcall function. Also, as far as
2534 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
2535 */
2536 static void
2537 ntoskrnl_run_dpc(void *arg)
2538 {
2539 __stdcall kdpc_func dpcfunc;
2540 kdpc *dpc;
2541 uint8_t irql;
2542
2543 dpc = arg;
2544 dpcfunc = dpc->k_deferedfunc;
2545 irql = KeRaiseIrql(DISPATCH_LEVEL);
2546 MSCALL4(dpcfunc, dpc, dpc->k_deferredctx,
2547 dpc->k_sysarg1, dpc->k_sysarg2);
2548 KeLowerIrql(irql);
2549
2550 return;
2551 }
2552
2553 __stdcall void
2554 KeInitializeDpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
2555 {
2556
2557 if (dpc == NULL)
2558 return;
2559
2560 dpc->k_deferedfunc = dpcfunc;
2561 dpc->k_deferredctx = dpcctx;
2562
2563 return;
2564 }
2565
2566 __stdcall uint8_t
2567 KeInsertQueueDpc(kdpc *dpc, void *sysarg1, void *sysarg2)
2568 {
2569 dpc->k_sysarg1 = sysarg1;
2570 dpc->k_sysarg2 = sysarg2;
2571
2572 if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2573 return(FALSE);
2574
2575 return(TRUE);
2576 }
2577
2578 __stdcall uint8_t
2579 KeRemoveQueueDpc(kdpc *dpc)
2580 {
2581 if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2582 return(FALSE);
2583
2584 return(TRUE);
2585 }
2586
2587 __stdcall uint8_t
2588 KeSetTimerEx(ktimer *timer, int64_t duetime, uint32_t period, kdpc *dpc)
2589 {
2590 struct timeval tv;
2591 uint64_t curtime;
2592 uint8_t pending;
2593
2594 if (timer == NULL)
2595 return(FALSE);
2596
2597 mtx_lock(&ntoskrnl_dispatchlock);
2598 if (timer->k_header.dh_inserted == TRUE) {
2599 #ifdef __FreeBSD__
2600 untimeout(ntoskrnl_timercall, timer, timer->k_handle);
2601 #else /* __NetBSD__ */
2602 callout_stop(timer->k_handle);
2603 #endif
2604 timer->k_header.dh_inserted = FALSE;
2605 pending = TRUE;
2606 } else
2607 pending = FALSE;
2608
2609 timer->k_duetime = duetime;
2610 timer->k_period = period;
2611 timer->k_header.dh_sigstate = FALSE;
2612 timer->k_dpc = dpc;
2613
2614 if (duetime < 0) {
2615 tv.tv_sec = - (duetime) / 10000000;
2616 tv.tv_usec = (- (duetime) / 10) -
2617 (tv.tv_sec * 1000000);
2618 } else {
2619 ntoskrnl_time(&curtime);
2620 if (duetime < curtime)
2621 tv.tv_sec = tv.tv_usec = 0;
2622 else {
2623 tv.tv_sec = ((duetime) - curtime) / 10000000;
2624 tv.tv_usec = ((duetime) - curtime) / 10 -
2625 (tv.tv_sec * 1000000);
2626 }
2627 }
2628
2629 timer->k_header.dh_inserted = TRUE;
2630 #ifdef __FreeBSD__
2631 timer->k_handle = timeout(ntoskrnl_timercall, timer, tvtohz(&tv));
2632 #else
2633 callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2634 #endif
2635 mtx_unlock(&ntoskrnl_dispatchlock);
2636
2637 return(pending);
2638 }
2639
2640 __stdcall uint8_t
2641 KeSetTimer(ktimer *timer, int64_t duetime, kdpc *dpc)
2642 {
2643 return (KeSetTimerEx(timer, duetime, 0, dpc));
2644 }
2645
2646 __stdcall uint8_t
2647 KeCancelTimer(ktimer *timer)
2648 {
2649 uint8_t pending;
2650
2651 if (timer == NULL)
2652 return(FALSE);
2653
2654 mtx_lock(&ntoskrnl_dispatchlock);
2655
2656 if (timer->k_header.dh_inserted == TRUE) {
2657 #ifdef __FreeBSD__
2658 untimeout(ntoskrnl_timercall, timer, timer->k_handle);
2659 #else /* __NetBSD__ */
2660 callout_stop(timer->k_handle);
2661 #endif
2662 pending = TRUE;
2663 } else
2664 pending = KeRemoveQueueDpc(timer->k_dpc);
2665
2666 mtx_unlock(&ntoskrnl_dispatchlock);
2667
2668 return(pending);
2669 }
2670
2671 __stdcall uint8_t
2672 KeReadStateTimer(ktimer *timer)
2673 {
2674 return(timer->k_header.dh_sigstate);
2675 }
2676
2677 __stdcall static void
2678 dummy(void)
2679 {
2680 printf ("ntoskrnl dummy called...\n");
2681 return;
2682 }
2683
2684
2685 image_patch_table ntoskrnl_functbl[] = {
2686 IMPORT_FUNC(RtlCompareMemory),
2687 IMPORT_FUNC(RtlEqualUnicodeString),
2688 IMPORT_FUNC(RtlCopyUnicodeString),
2689 IMPORT_FUNC(RtlUnicodeStringToAnsiString),
2690 IMPORT_FUNC(RtlAnsiStringToUnicodeString),
2691 IMPORT_FUNC(RtlInitAnsiString),
2692 IMPORT_FUNC_MAP(RtlInitString, RtlInitAnsiString),
2693 IMPORT_FUNC(RtlInitUnicodeString),
2694 IMPORT_FUNC(RtlFreeAnsiString),
2695 IMPORT_FUNC(RtlFreeUnicodeString),
2696 IMPORT_FUNC(RtlUnicodeStringToInteger),
2697 IMPORT_FUNC_MAP(_snprintf, snprintf),
2698 IMPORT_FUNC_MAP(_vsnprintf, vsnprintf),
2699 IMPORT_FUNC(DbgPrint),
2700 IMPORT_FUNC(DbgBreakPoint),
2701 IMPORT_FUNC(strncmp),
2702 IMPORT_FUNC(strcmp),
2703 IMPORT_FUNC(strncpy),
2704 IMPORT_FUNC(strcpy),
2705 IMPORT_FUNC(strlen),
2706 IMPORT_FUNC(memcpy),
2707 IMPORT_FUNC_MAP(memmove, ntoskrnl_memset),
2708 IMPORT_FUNC_MAP(memset, ntoskrnl_memset),
2709 IMPORT_FUNC(IoAllocateDriverObjectExtension),
2710 IMPORT_FUNC(IoGetDriverObjectExtension),
2711 IMPORT_FUNC(IofCallDriver),
2712 IMPORT_FUNC(IofCompleteRequest),
2713 IMPORT_FUNC(IoAcquireCancelSpinLock),
2714 IMPORT_FUNC(IoReleaseCancelSpinLock),
2715 IMPORT_FUNC(IoCancelIrp),
2716 IMPORT_FUNC(IoCreateDevice),
2717 IMPORT_FUNC(IoDeleteDevice),
2718 IMPORT_FUNC(IoGetAttachedDevice),
2719 IMPORT_FUNC(IoAttachDeviceToDeviceStack),
2720 IMPORT_FUNC(IoDetachDevice),
2721 IMPORT_FUNC(IoBuildSynchronousFsdRequest),
2722 IMPORT_FUNC(IoBuildAsynchronousFsdRequest),
2723 IMPORT_FUNC(IoBuildDeviceIoControlRequest),
2724 IMPORT_FUNC(IoAllocateIrp),
2725 IMPORT_FUNC(IoReuseIrp),
2726 IMPORT_FUNC(IoMakeAssociatedIrp),
2727 IMPORT_FUNC(IoFreeIrp),
2728 IMPORT_FUNC(IoInitializeIrp),
2729 IMPORT_FUNC(KeWaitForSingleObject),
2730 IMPORT_FUNC(KeWaitForMultipleObjects),
2731 IMPORT_FUNC(_allmul),
2732 IMPORT_FUNC(_alldiv),
2733 IMPORT_FUNC(_allrem),
2734 IMPORT_FUNC(_allshr),
2735 IMPORT_FUNC(_allshl),
2736 IMPORT_FUNC(_aullmul),
2737 IMPORT_FUNC(_aulldiv),
2738 IMPORT_FUNC(_aullrem),
2739 IMPORT_FUNC(_aullshr),
2740 IMPORT_FUNC(_aullshl),
2741 IMPORT_FUNC(atoi),
2742 IMPORT_FUNC(atol),
2743 IMPORT_FUNC(rand),
2744 IMPORT_FUNC(srand),
2745 IMPORT_FUNC(WRITE_REGISTER_USHORT),
2746 IMPORT_FUNC(READ_REGISTER_USHORT),
2747 IMPORT_FUNC(WRITE_REGISTER_ULONG),
2748 IMPORT_FUNC(READ_REGISTER_ULONG),
2749 IMPORT_FUNC(READ_REGISTER_UCHAR),
2750 IMPORT_FUNC(WRITE_REGISTER_UCHAR),
2751 IMPORT_FUNC(ExInitializePagedLookasideList),
2752 IMPORT_FUNC(ExDeletePagedLookasideList),
2753 IMPORT_FUNC(ExInitializeNPagedLookasideList),
2754 IMPORT_FUNC(ExDeleteNPagedLookasideList),
2755 IMPORT_FUNC(InterlockedPopEntrySList),
2756 IMPORT_FUNC(InterlockedPushEntrySList),
2757 IMPORT_FUNC(ExQueryDepthSList),
2758 IMPORT_FUNC_MAP(ExpInterlockedPopEntrySList, InterlockedPopEntrySList),
2759 IMPORT_FUNC_MAP(ExpInterlockedPushEntrySList,
2760 InterlockedPushEntrySList),
2761 IMPORT_FUNC(ExInterlockedPopEntrySList),
2762 IMPORT_FUNC(ExInterlockedPushEntrySList),
2763 IMPORT_FUNC(ExAllocatePoolWithTag),
2764 IMPORT_FUNC(ExFreePool),
2765 #ifdef __i386__
2766 IMPORT_FUNC(KefAcquireSpinLockAtDpcLevel),
2767 IMPORT_FUNC(KefReleaseSpinLockFromDpcLevel),
2768 IMPORT_FUNC(KeAcquireSpinLockRaiseToDpc),
2769 #else
2770 /*
2771 * For AMD64, we can get away with just mapping
2772 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
2773 * because the calling conventions end up being the same.
2774 * On i386, we have to be careful because KfAcquireSpinLock()
2775 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
2776 */
2777 IMPORT_FUNC(KeAcquireSpinLockAtDpcLevel),
2778 IMPORT_FUNC(KeReleaseSpinLockFromDpcLevel),
2779 IMPORT_FUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock),
2780 #endif
2781 IMPORT_FUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock),
2782 IMPORT_FUNC(InterlockedIncrement),
2783 IMPORT_FUNC(InterlockedDecrement),
2784 IMPORT_FUNC(ExInterlockedAddLargeStatistic),
2785 IMPORT_FUNC(IoAllocateMdl),
2786 IMPORT_FUNC(IoFreeMdl),
2787 IMPORT_FUNC(MmSizeOfMdl),
2788 IMPORT_FUNC(MmMapLockedPages),
2789 IMPORT_FUNC(MmMapLockedPagesSpecifyCache),
2790 IMPORT_FUNC(MmUnmapLockedPages),
2791 IMPORT_FUNC(MmBuildMdlForNonPagedPool),
2792 IMPORT_FUNC(KeInitializeSpinLock),
2793 IMPORT_FUNC(IoIsWdmVersionAvailable),
2794 IMPORT_FUNC(IoGetDeviceProperty),
2795 IMPORT_FUNC(KeInitializeMutex),
2796 IMPORT_FUNC(KeReleaseMutex),
2797 IMPORT_FUNC(KeReadStateMutex),
2798 IMPORT_FUNC(KeInitializeEvent),
2799 IMPORT_FUNC(KeSetEvent),
2800 IMPORT_FUNC(KeResetEvent),
2801 IMPORT_FUNC(KeClearEvent),
2802 IMPORT_FUNC(KeReadStateEvent),
2803 IMPORT_FUNC(KeInitializeTimer),
2804 IMPORT_FUNC(KeInitializeTimerEx),
2805 IMPORT_FUNC(KeSetTimer),
2806 IMPORT_FUNC(KeSetTimerEx),
2807 IMPORT_FUNC(KeCancelTimer),
2808 IMPORT_FUNC(KeReadStateTimer),
2809 IMPORT_FUNC(KeInitializeDpc),
2810 IMPORT_FUNC(KeInsertQueueDpc),
2811 IMPORT_FUNC(KeRemoveQueueDpc),
2812 IMPORT_FUNC(ObReferenceObjectByHandle),
2813 IMPORT_FUNC(ObfDereferenceObject),
2814 IMPORT_FUNC(ZwClose),
2815 IMPORT_FUNC(PsCreateSystemThread),
2816 IMPORT_FUNC(PsTerminateSystemThread),
2817
2818 /*
2819 * This last entry is a catch-all for any function we haven't
2820 * implemented yet. The PE import list patching routine will
2821 * use it for any function that doesn't have an explicit match
2822 * in this table.
2823 */
2824
2825 { NULL, (FUNC)dummy, NULL },
2826
2827 /* End of list. */
2828
2829 { NULL, NULL, NULL }
2830 };
2831