Index: head/sys/compat/ndis/kern_windrv.c =================================================================== --- head/sys/compat/ndis/kern_windrv.c (revision 343297) +++ head/sys/compat/ndis/kern_windrv.c (revision 343298) @@ -1,990 +1,1171 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2005 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #include #endif +#ifdef __amd64__ +#include +#endif + #include #include #include #include #include #include #include #include +#ifdef __amd64__ +struct fpu_cc_ent { + struct fpu_kern_ctx *ctx; + LIST_ENTRY(fpu_cc_ent) entries; +}; +static LIST_HEAD(fpu_ctx_free, fpu_cc_ent) fpu_free_head = + LIST_HEAD_INITIALIZER(fpu_free_head); +static LIST_HEAD(fpu_ctx_busy, fpu_cc_ent) fpu_busy_head = + LIST_HEAD_INITIALIZER(fpu_busy_head); +static struct mtx fpu_free_mtx; +static struct mtx fpu_busy_mtx; +#endif + static struct mtx drvdb_mtx; static STAILQ_HEAD(drvdb, drvdb_ent) drvdb_head; static driver_object fake_pci_driver; /* serves both PCI and cardbus */ static driver_object fake_pccard_driver; #ifdef __i386__ static void x86_oldldt(void *); static void x86_newldt(void *); struct tid { void *tid_except_list; /* 0x00 */ uint32_t tid_oldfs; /* 0x04 */ uint32_t tid_selector; /* 0x08 */ struct tid *tid_self; /* 0x0C */ int tid_cpu; /* 0x10 */ }; static struct tid *my_tids; #endif /* __i386__ */ #define DUMMY_REGISTRY_PATH "\\\\some\\bogus\\path" int windrv_libinit(void) { STAILQ_INIT(&drvdb_head); mtx_init(&drvdb_mtx, "Windows driver DB lock", "Windows internal lock", MTX_DEF); +#ifdef __amd64__ + LIST_INIT(&fpu_free_head); + LIST_INIT(&fpu_busy_head); + mtx_init(&fpu_free_mtx, "free fpu context list lock", NULL, MTX_DEF); + mtx_init(&fpu_busy_mtx, "busy fpu context list lock", NULL, MTX_DEF); +#endif + /* * PCI and pccard devices don't need to use IRPs to * interact with their bus drivers (usually), so our * emulated PCI and pccard drivers are just stubs. * USB devices, on the other hand, do all their I/O * by exchanging IRPs with the USB bus driver, so * for that we need to provide emulator dispatcher * routines, which are in a separate module. */ windrv_bus_attach(&fake_pci_driver, "PCI Bus"); windrv_bus_attach(&fake_pccard_driver, "PCCARD Bus"); #ifdef __i386__ /* * In order to properly support SMP machines, we have * to modify the GDT on each CPU, since we never know * on which one we'll end up running. */ my_tids = ExAllocatePoolWithTag(NonPagedPool, sizeof(struct tid) * mp_ncpus, 0); if (my_tids == NULL) panic("failed to allocate thread info blocks"); smp_rendezvous(NULL, x86_newldt, NULL, NULL); #endif return (0); } int windrv_libfini(void) { struct drvdb_ent *d; +#ifdef __amd64__ + struct fpu_cc_ent *ent; +#endif mtx_lock(&drvdb_mtx); while(STAILQ_FIRST(&drvdb_head) != NULL) { d = STAILQ_FIRST(&drvdb_head); STAILQ_REMOVE_HEAD(&drvdb_head, link); free(d, M_DEVBUF); } mtx_unlock(&drvdb_mtx); RtlFreeUnicodeString(&fake_pci_driver.dro_drivername); RtlFreeUnicodeString(&fake_pccard_driver.dro_drivername); mtx_destroy(&drvdb_mtx); #ifdef __i386__ smp_rendezvous(NULL, x86_oldldt, NULL, NULL); ExFreePool(my_tids); #endif +#ifdef __amd64__ + while ((ent = LIST_FIRST(&fpu_free_head)) != NULL) { + LIST_REMOVE(ent, entries); + fpu_kern_free_ctx(ent->ctx); + free(ent, M_DEVBUF); + } + mtx_destroy(&fpu_free_mtx); + + ent = LIST_FIRST(&fpu_busy_head); + KASSERT(ent == NULL, ("busy fpu context list is not empty")); + mtx_destroy(&fpu_busy_mtx); +#endif return (0); } /* * Given the address of a driver image, find its corresponding * driver_object. */ driver_object * windrv_lookup(img, name) vm_offset_t img; char *name; { struct drvdb_ent *d; unicode_string us; ansi_string as; bzero((char *)&us, sizeof(us)); /* Damn unicode. */ if (name != NULL) { RtlInitAnsiString(&as, name); if (RtlAnsiStringToUnicodeString(&us, &as, TRUE)) return (NULL); } mtx_lock(&drvdb_mtx); STAILQ_FOREACH(d, &drvdb_head, link) { if (d->windrv_object->dro_driverstart == (void *)img || (bcmp((char *)d->windrv_object->dro_drivername.us_buf, (char *)us.us_buf, us.us_len) == 0 && us.us_len)) { mtx_unlock(&drvdb_mtx); if (name != NULL) ExFreePool(us.us_buf); return (d->windrv_object); } } mtx_unlock(&drvdb_mtx); if (name != NULL) RtlFreeUnicodeString(&us); return (NULL); } struct drvdb_ent * windrv_match(matchfunc, ctx) matchfuncptr matchfunc; void *ctx; { struct drvdb_ent *d; int match; mtx_lock(&drvdb_mtx); STAILQ_FOREACH(d, &drvdb_head, link) { if (d->windrv_devlist == NULL) continue; match = matchfunc(d->windrv_bustype, d->windrv_devlist, ctx); if (match == TRUE) { mtx_unlock(&drvdb_mtx); return (d); } } mtx_unlock(&drvdb_mtx); return (NULL); } /* * Remove a driver_object from our datatabase and destroy it. Throw * away any custom driver extension info that may have been added. */ int windrv_unload(mod, img, len) module_t mod; vm_offset_t img; int len; { struct drvdb_ent *db, *r = NULL; driver_object *drv; device_object *d, *pdo; device_t dev; list_entry *e; drv = windrv_lookup(img, NULL); /* * When we unload a driver image, we need to force a * detach of any devices that might be using it. We * need the PDOs of all attached devices for this. * Getting at them is a little hard. We basically * have to walk the device lists of all our bus * drivers. */ mtx_lock(&drvdb_mtx); STAILQ_FOREACH(db, &drvdb_head, link) { /* * Fake bus drivers have no devlist info. * If this driver has devlist info, it's * a loaded Windows driver and has no PDOs, * so skip it. */ if (db->windrv_devlist != NULL) continue; pdo = db->windrv_object->dro_devobj; while (pdo != NULL) { d = pdo->do_attacheddev; if (d->do_drvobj != drv) { pdo = pdo->do_nextdev; continue; } dev = pdo->do_devext; pdo = pdo->do_nextdev; mtx_unlock(&drvdb_mtx); device_detach(dev); mtx_lock(&drvdb_mtx); } } STAILQ_FOREACH(db, &drvdb_head, link) { if (db->windrv_object->dro_driverstart == (void *)img) { r = db; STAILQ_REMOVE(&drvdb_head, db, drvdb_ent, link); break; } } mtx_unlock(&drvdb_mtx); if (r == NULL) return (ENOENT); if (drv == NULL) return (ENOENT); /* * Destroy any custom extensions that may have been added. */ drv = r->windrv_object; while (!IsListEmpty(&drv->dro_driverext->dre_usrext)) { e = RemoveHeadList(&drv->dro_driverext->dre_usrext); ExFreePool(e); } /* Free the driver extension */ free(drv->dro_driverext, M_DEVBUF); /* Free the driver name */ RtlFreeUnicodeString(&drv->dro_drivername); /* Free driver object */ free(drv, M_DEVBUF); /* Free our DB handle */ free(r, M_DEVBUF); return (0); } #define WINDRV_LOADED htonl(0x42534F44) #ifdef __amd64__ static void patch_user_shared_data_address(vm_offset_t img, size_t len) { unsigned long i, n, max_addr, *addr; n = len - sizeof(unsigned long); max_addr = KI_USER_SHARED_DATA + sizeof(kuser_shared_data); for (i = 0; i < n; i++) { addr = (unsigned long *)(img + i); if (*addr >= KI_USER_SHARED_DATA && *addr < max_addr) { *addr -= KI_USER_SHARED_DATA; *addr += (unsigned long)&kuser_shared_data; } } } #endif /* * Loader routine for actual Windows driver modules, ultimately * calls the driver's DriverEntry() routine. */ int windrv_load(mod, img, len, bustype, devlist, regvals) module_t mod; vm_offset_t img; int len; interface_type bustype; void *devlist; ndis_cfg *regvals; { image_import_descriptor imp_desc; image_optional_header opt_hdr; driver_entry entry; struct drvdb_ent *new; struct driver_object *drv; int status; uint32_t *ptr; ansi_string as; /* * First step: try to relocate and dynalink the executable * driver image. */ ptr = (uint32_t *)(img + 8); if (*ptr == WINDRV_LOADED) goto skipreloc; /* Perform text relocation */ if (pe_relocate(img)) return (ENOEXEC); /* Dynamically link the NDIS.SYS routines -- required. */ if (pe_patch_imports(img, "NDIS", ndis_functbl)) return (ENOEXEC); /* Dynamically link the HAL.dll routines -- optional. */ if (pe_get_import_descriptor(img, &imp_desc, "HAL") == 0) { if (pe_patch_imports(img, "HAL", hal_functbl)) return (ENOEXEC); } /* Dynamically link ntoskrnl.exe -- optional. */ if (pe_get_import_descriptor(img, &imp_desc, "ntoskrnl") == 0) { if (pe_patch_imports(img, "ntoskrnl", ntoskrnl_functbl)) return (ENOEXEC); } #ifdef __amd64__ patch_user_shared_data_address(img, len); #endif /* Dynamically link USBD.SYS -- optional */ if (pe_get_import_descriptor(img, &imp_desc, "USBD") == 0) { if (pe_patch_imports(img, "USBD", usbd_functbl)) return (ENOEXEC); } *ptr = WINDRV_LOADED; skipreloc: /* Next step: find the driver entry point. */ pe_get_optional_header(img, &opt_hdr); entry = (driver_entry)pe_translate_addr(img, opt_hdr.ioh_entryaddr); /* Next step: allocate and store a driver object. */ new = malloc(sizeof(struct drvdb_ent), M_DEVBUF, M_NOWAIT|M_ZERO); if (new == NULL) return (ENOMEM); drv = malloc(sizeof(driver_object), M_DEVBUF, M_NOWAIT|M_ZERO); if (drv == NULL) { free (new, M_DEVBUF); return (ENOMEM); } /* Allocate a driver extension structure too. */ drv->dro_driverext = malloc(sizeof(driver_extension), M_DEVBUF, M_NOWAIT|M_ZERO); if (drv->dro_driverext == NULL) { free(new, M_DEVBUF); free(drv, M_DEVBUF); return (ENOMEM); } InitializeListHead((&drv->dro_driverext->dre_usrext)); drv->dro_driverstart = (void *)img; drv->dro_driversize = len; RtlInitAnsiString(&as, DUMMY_REGISTRY_PATH); if (RtlAnsiStringToUnicodeString(&drv->dro_drivername, &as, TRUE)) { free(new, M_DEVBUF); free(drv, M_DEVBUF); return (ENOMEM); } new->windrv_object = drv; new->windrv_regvals = regvals; new->windrv_devlist = devlist; new->windrv_bustype = bustype; /* Now call the DriverEntry() function. */ status = MSCALL2(entry, drv, &drv->dro_drivername); if (status != STATUS_SUCCESS) { RtlFreeUnicodeString(&drv->dro_drivername); free(drv, M_DEVBUF); free(new, M_DEVBUF); return (ENODEV); } mtx_lock(&drvdb_mtx); STAILQ_INSERT_HEAD(&drvdb_head, new, link); mtx_unlock(&drvdb_mtx); return (0); } /* * Make a new Physical Device Object for a device that was * detected/plugged in. For us, the PDO is just a way to * get at the device_t. */ int windrv_create_pdo(drv, bsddev) driver_object *drv; device_t bsddev; { device_object *dev; /* * This is a new physical device object, which technically * is the "top of the stack." Consequently, we don't do * an IoAttachDeviceToDeviceStack() here. */ mtx_lock(&drvdb_mtx); IoCreateDevice(drv, 0, NULL, FILE_DEVICE_UNKNOWN, 0, FALSE, &dev); mtx_unlock(&drvdb_mtx); /* Stash pointer to our BSD device handle. */ dev->do_devext = bsddev; return (STATUS_SUCCESS); } void windrv_destroy_pdo(drv, bsddev) driver_object *drv; device_t bsddev; { device_object *pdo; pdo = windrv_find_pdo(drv, bsddev); /* Remove reference to device_t */ pdo->do_devext = NULL; mtx_lock(&drvdb_mtx); IoDeleteDevice(pdo); mtx_unlock(&drvdb_mtx); } /* * Given a device_t, find the corresponding PDO in a driver's * device list. */ device_object * windrv_find_pdo(drv, bsddev) driver_object *drv; device_t bsddev; { device_object *pdo; mtx_lock(&drvdb_mtx); pdo = drv->dro_devobj; while (pdo != NULL) { if (pdo->do_devext == bsddev) { mtx_unlock(&drvdb_mtx); return (pdo); } pdo = pdo->do_nextdev; } mtx_unlock(&drvdb_mtx); return (NULL); } /* * Add an internally emulated driver to the database. We need this * to set up an emulated bus driver so that it can receive IRPs. */ int windrv_bus_attach(drv, name) driver_object *drv; char *name; { struct drvdb_ent *new; ansi_string as; new = malloc(sizeof(struct drvdb_ent), M_DEVBUF, M_NOWAIT|M_ZERO); if (new == NULL) return (ENOMEM); RtlInitAnsiString(&as, name); if (RtlAnsiStringToUnicodeString(&drv->dro_drivername, &as, TRUE)) { free(new, M_DEVBUF); return (ENOMEM); } /* * Set up a fake image pointer to avoid false matches * in windrv_lookup(). */ drv->dro_driverstart = (void *)0xFFFFFFFF; new->windrv_object = drv; new->windrv_devlist = NULL; new->windrv_regvals = NULL; mtx_lock(&drvdb_mtx); STAILQ_INSERT_HEAD(&drvdb_head, new, link); mtx_unlock(&drvdb_mtx); return (0); } #ifdef __amd64__ extern void x86_64_wrap(void); extern void x86_64_wrap_call(void); extern void x86_64_wrap_end(void); int windrv_wrap(func, wrap, argcnt, ftype) funcptr func; funcptr *wrap; int argcnt; int ftype; { funcptr p; vm_offset_t *calladdr; vm_offset_t wrapstart, wrapend, wrapcall; wrapstart = (vm_offset_t)&x86_64_wrap; wrapend = (vm_offset_t)&x86_64_wrap_end; wrapcall = (vm_offset_t)&x86_64_wrap_call; /* Allocate a new wrapper instance. */ p = malloc((wrapend - wrapstart), M_DEVBUF, M_NOWAIT); if (p == NULL) return (ENOMEM); /* Copy over the code. */ bcopy((char *)wrapstart, p, (wrapend - wrapstart)); /* Insert the function address into the new wrapper instance. */ calladdr = (uint64_t *)((char *)p + (wrapcall - wrapstart) + 2); *calladdr = (vm_offset_t)func; *wrap = p; return (0); +} + +static struct fpu_cc_ent * +request_fpu_cc_ent(void) +{ + struct fpu_cc_ent *ent; + + mtx_lock(&fpu_free_mtx); + if ((ent = LIST_FIRST(&fpu_free_head)) != NULL) { + LIST_REMOVE(ent, entries); + mtx_unlock(&fpu_free_mtx); + mtx_lock(&fpu_busy_mtx); + LIST_INSERT_HEAD(&fpu_busy_head, ent, entries); + mtx_unlock(&fpu_busy_mtx); + return (ent); + } + mtx_unlock(&fpu_free_mtx); + + if ((ent = malloc(sizeof(struct fpu_cc_ent), M_DEVBUF, M_NOWAIT | + M_ZERO)) != NULL) { + ent->ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL | + FPU_KERN_NOWAIT); + if (ent->ctx != NULL) { + mtx_lock(&fpu_busy_mtx); + LIST_INSERT_HEAD(&fpu_busy_head, ent, entries); + mtx_unlock(&fpu_busy_mtx); + } else { + free(ent, M_DEVBUF); + ent = NULL; + } + } + + return (ent); +} + +static void +release_fpu_cc_ent(struct fpu_cc_ent *ent) +{ + mtx_lock(&fpu_busy_mtx); + LIST_REMOVE(ent, entries); + mtx_unlock(&fpu_busy_mtx); + mtx_lock(&fpu_free_mtx); + LIST_INSERT_HEAD(&fpu_free_head, ent, entries); + mtx_unlock(&fpu_free_mtx); +} + +uint64_t +_x86_64_call1(void *fn, uint64_t a) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call1(fn, a); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call2(void *fn, uint64_t a, uint64_t b) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call2(fn, a, b); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call3(void *fn, uint64_t a, uint64_t b, uint64_t c) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call3(fn, a, b, c); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call4(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call4(fn, a, b, c, d); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call5(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d, + uint64_t e) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call5(fn, a, b, c, d, e); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call6(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d, + uint64_t e, uint64_t f) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call6(fn, a, b, c, d, e, f); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); } #endif /* __amd64__ */ #ifdef __i386__ struct x86desc { uint16_t x_lolimit; uint16_t x_base0; uint8_t x_base1; uint8_t x_flags; uint8_t x_hilimit; uint8_t x_base2; }; struct gdt { uint16_t limit; void *base; } __attribute__((__packed__)); extern uint16_t x86_getfs(void); extern void x86_setfs(uint16_t); extern void *x86_gettid(void); extern void x86_critical_enter(void); extern void x86_critical_exit(void); extern void x86_getldt(struct gdt *, uint16_t *); extern void x86_setldt(struct gdt *, uint16_t); #define SEL_LDT 4 /* local descriptor table */ #define SEL_TO_FS(x) (((x) << 3)) /* * FreeBSD 6.0 and later has a special GDT segment reserved * specifically for us, so if GNDIS_SEL is defined, use that. * If not, use GTGATE_SEL, which is uninitialized and infrequently * used. */ #ifdef GNDIS_SEL #define FREEBSD_EMPTYSEL GNDIS_SEL #else #define FREEBSD_EMPTYSEL GTGATE_SEL /* slot 7 */ #endif /* * The meanings of various bits in a descriptor vary a little * depending on whether the descriptor will be used as a * code, data or system descriptor. (And that in turn depends * on which segment register selects the descriptor.) * We're only trying to create a data segment, so the definitions * below are the ones that apply to a data descriptor. */ #define SEGFLAGLO_PRESENT 0x80 /* segment is present */ #define SEGFLAGLO_PRIVLVL 0x60 /* privlevel needed for this seg */ #define SEGFLAGLO_CD 0x10 /* 1 = code/data, 0 = system */ #define SEGFLAGLO_MBZ 0x08 /* must be zero */ #define SEGFLAGLO_EXPANDDOWN 0x04 /* limit expands down */ #define SEGFLAGLO_WRITEABLE 0x02 /* segment is writeable */ #define SEGGLAGLO_ACCESSED 0x01 /* segment has been accessed */ #define SEGFLAGHI_GRAN 0x80 /* granularity, 1 = byte, 0 = page */ #define SEGFLAGHI_BIG 0x40 /* 1 = 32 bit stack, 0 = 16 bit */ /* * Context switch from UNIX to Windows. Save the existing value * of %fs for this processor, then change it to point to our * fake TID. Note that it is also possible to pin ourselves * to our current CPU, though I'm not sure this is really * necessary. It depends on whether or not an interrupt might * preempt us while Windows code is running and we wind up * scheduled onto another CPU as a result. So far, it doesn't * seem like this is what happens. */ void ctxsw_utow(void) { struct tid *t; t = &my_tids[curthread->td_oncpu]; /* * Ugly hack. During system bootstrap (cold == 1), only CPU 0 * is running. So if we were loaded at bootstrap, only CPU 0 * will have our special GDT entry. This is a problem for SMP * systems, so to deal with this, we check here to make sure * the TID for this processor has been initialized, and if it * hasn't, we need to do it right now or else things will * explode. */ if (t->tid_self != t) x86_newldt(NULL); x86_critical_enter(); t->tid_oldfs = x86_getfs(); t->tid_cpu = curthread->td_oncpu; sched_pin(); x86_setfs(SEL_TO_FS(t->tid_selector)); x86_critical_exit(); /* Now entering Windows land, population: you. */ } /* * Context switch from Windows back to UNIX. Restore %fs to * its previous value. This always occurs after a call to * ctxsw_utow(). */ void ctxsw_wtou(void) { struct tid *t; x86_critical_enter(); t = x86_gettid(); x86_setfs(t->tid_oldfs); sched_unpin(); x86_critical_exit(); /* Welcome back to UNIX land, we missed you. */ #ifdef EXTRA_SANITY if (t->tid_cpu != curthread->td_oncpu) panic("ctxsw GOT MOVED TO OTHER CPU!"); #endif } static int windrv_wrap_stdcall(funcptr, funcptr *, int); static int windrv_wrap_fastcall(funcptr, funcptr *, int); static int windrv_wrap_regparm(funcptr, funcptr *); extern void x86_fastcall_wrap(void); extern void x86_fastcall_wrap_call(void); extern void x86_fastcall_wrap_arg(void); extern void x86_fastcall_wrap_end(void); static int windrv_wrap_fastcall(func, wrap, argcnt) funcptr func; funcptr *wrap; int8_t argcnt; { funcptr p; vm_offset_t *calladdr; uint8_t *argaddr; vm_offset_t wrapstart, wrapend, wrapcall, wraparg; wrapstart = (vm_offset_t)&x86_fastcall_wrap; wrapend = (vm_offset_t)&x86_fastcall_wrap_end; wrapcall = (vm_offset_t)&x86_fastcall_wrap_call; wraparg = (vm_offset_t)&x86_fastcall_wrap_arg; /* Allocate a new wrapper instance. */ p = malloc((wrapend - wrapstart), M_DEVBUF, M_NOWAIT); if (p == NULL) return (ENOMEM); /* Copy over the code. */ bcopy((char *)wrapstart, p, (wrapend - wrapstart)); /* Insert the function address into the new wrapper instance. */ calladdr = (vm_offset_t *)((char *)p + ((wrapcall - wrapstart) + 1)); *calladdr = (vm_offset_t)func; argcnt -= 2; if (argcnt < 1) argcnt = 0; argaddr = (u_int8_t *)((char *)p + ((wraparg - wrapstart) + 1)); *argaddr = argcnt * sizeof(uint32_t); *wrap = p; return (0); } extern void x86_stdcall_wrap(void); extern void x86_stdcall_wrap_call(void); extern void x86_stdcall_wrap_arg(void); extern void x86_stdcall_wrap_end(void); static int windrv_wrap_stdcall(func, wrap, argcnt) funcptr func; funcptr *wrap; uint8_t argcnt; { funcptr p; vm_offset_t *calladdr; uint8_t *argaddr; vm_offset_t wrapstart, wrapend, wrapcall, wraparg; wrapstart = (vm_offset_t)&x86_stdcall_wrap; wrapend = (vm_offset_t)&x86_stdcall_wrap_end; wrapcall = (vm_offset_t)&x86_stdcall_wrap_call; wraparg = (vm_offset_t)&x86_stdcall_wrap_arg; /* Allocate a new wrapper instance. */ p = malloc((wrapend - wrapstart), M_DEVBUF, M_NOWAIT); if (p == NULL) return (ENOMEM); /* Copy over the code. */ bcopy((char *)wrapstart, p, (wrapend - wrapstart)); /* Insert the function address into the new wrapper instance. */ calladdr = (vm_offset_t *)((char *)p + ((wrapcall - wrapstart) + 1)); *calladdr = (vm_offset_t)func; argaddr = (u_int8_t *)((char *)p + ((wraparg - wrapstart) + 1)); *argaddr = argcnt * sizeof(uint32_t); *wrap = p; return (0); } extern void x86_regparm_wrap(void); extern void x86_regparm_wrap_call(void); extern void x86_regparm_wrap_end(void); static int windrv_wrap_regparm(func, wrap) funcptr func; funcptr *wrap; { funcptr p; vm_offset_t *calladdr; vm_offset_t wrapstart, wrapend, wrapcall; wrapstart = (vm_offset_t)&x86_regparm_wrap; wrapend = (vm_offset_t)&x86_regparm_wrap_end; wrapcall = (vm_offset_t)&x86_regparm_wrap_call; /* Allocate a new wrapper instance. */ p = malloc((wrapend - wrapstart), M_DEVBUF, M_NOWAIT); if (p == NULL) return (ENOMEM); /* Copy over the code. */ bcopy(x86_regparm_wrap, p, (wrapend - wrapstart)); /* Insert the function address into the new wrapper instance. */ calladdr = (vm_offset_t *)((char *)p + ((wrapcall - wrapstart) + 1)); *calladdr = (vm_offset_t)func; *wrap = p; return (0); } int windrv_wrap(func, wrap, argcnt, ftype) funcptr func; funcptr *wrap; int argcnt; int ftype; { switch(ftype) { case WINDRV_WRAP_FASTCALL: return (windrv_wrap_fastcall(func, wrap, argcnt)); case WINDRV_WRAP_STDCALL: return (windrv_wrap_stdcall(func, wrap, argcnt)); case WINDRV_WRAP_REGPARM: return (windrv_wrap_regparm(func, wrap)); case WINDRV_WRAP_CDECL: return (windrv_wrap_stdcall(func, wrap, 0)); default: break; } return (EINVAL); } static void x86_oldldt(dummy) void *dummy; { struct x86desc *gdt; struct gdt gtable; uint16_t ltable; mtx_lock_spin(&dt_lock); /* Grab location of existing GDT. */ x86_getldt(>able, <able); /* Find the slot we updated. */ gdt = gtable.base; gdt += FREEBSD_EMPTYSEL; /* Empty it out. */ bzero((char *)gdt, sizeof(struct x86desc)); /* Restore GDT. */ x86_setldt(>able, ltable); mtx_unlock_spin(&dt_lock); } static void x86_newldt(dummy) void *dummy; { struct gdt gtable; uint16_t ltable; struct x86desc *l; struct thread *t; t = curthread; mtx_lock_spin(&dt_lock); /* Grab location of existing GDT. */ x86_getldt(>able, <able); /* Get pointer to the GDT table. */ l = gtable.base; /* Get pointer to empty slot */ l += FREEBSD_EMPTYSEL; /* Initialize TID for this CPU. */ my_tids[t->td_oncpu].tid_selector = FREEBSD_EMPTYSEL; my_tids[t->td_oncpu].tid_self = &my_tids[t->td_oncpu]; /* Set up new GDT entry. */ l->x_lolimit = sizeof(struct tid); l->x_hilimit = SEGFLAGHI_GRAN|SEGFLAGHI_BIG; l->x_base0 = (vm_offset_t)(&my_tids[t->td_oncpu]) & 0xFFFF; l->x_base1 = ((vm_offset_t)(&my_tids[t->td_oncpu]) >> 16) & 0xFF; l->x_base2 = ((vm_offset_t)(&my_tids[t->td_oncpu]) >> 24) & 0xFF; l->x_flags = SEGFLAGLO_PRESENT|SEGFLAGLO_CD|SEGFLAGLO_WRITEABLE; /* Update the GDT. */ x86_setldt(>able, ltable); mtx_unlock_spin(&dt_lock); /* Whew. */ } #endif /* __i386__ */ int windrv_unwrap(func) funcptr func; { free(func, M_DEVBUF); return (0); } Index: head/sys/compat/ndis/pe_var.h =================================================================== --- head/sys/compat/ndis/pe_var.h (revision 343297) +++ head/sys/compat/ndis/pe_var.h (revision 343298) @@ -1,549 +1,557 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _PE_VAR_H_ #define _PE_VAR_H_ /* * Image Format */ #define IMAGE_DOS_SIGNATURE 0x5A4D /* MZ */ #define IMAGE_OS2_SIGNATURE 0x454E /* NE */ #define IMAGE_OS2_SIGNATURE_LE 0x454C /* LE */ #define IMAGE_VXD_SIGNATURE 0x454C /* LE */ #define IMAGE_NT_SIGNATURE 0x00004550 /* PE00 */ /* * All PE files have one of these, just so if you attempt to * run them, they'll print out a message telling you they can * only be run in Windows. */ struct image_dos_header { uint16_t idh_magic; /* Magic number */ uint16_t idh_cblp; /* Bytes on last page of file */ uint16_t idh_cp; /* Pages in file */ uint16_t idh_crlc; /* Relocations */ uint16_t idh_cparhdr; /* Size of header in paragraphs */ uint16_t idh_minalloc; /* Minimum extra paragraphs needed */ uint16_t idh_maxalloc; /* Maximum extra paragraphs needed */ uint16_t idh_ss; /* Initial (relative) SS value */ uint16_t idh_sp; /* Initial SP value */ uint16_t idh_csum; /* Checksum */ uint16_t idh_ip; /* Initial IP value */ uint16_t idh_cs; /* Initial (relative) CS value */ uint16_t idh_lfarlc; /* File address of relocation table */ uint16_t idh_ovno; /* Overlay number */ uint16_t idh_rsvd1[4]; /* Reserved words */ uint16_t idh_oemid; /* OEM identifier (for idh_oeminfo) */ uint16_t idh_oeminfo; /* OEM information; oemid specific */ uint16_t idh_rsvd2[10]; /* Reserved words */ uint32_t idh_lfanew; /* File address of new exe header */ }; typedef struct image_dos_header image_dos_header; /* * File header format. */ struct image_file_header { uint16_t ifh_machine; /* Machine type */ uint16_t ifh_numsections; /* # of sections */ uint32_t ifh_timestamp; /* Date/time stamp */ uint32_t ifh_symtblptr; /* Offset to symbol table */ uint32_t ifh_numsyms; /* # of symbols */ uint16_t ifh_optionalhdrlen; /* Size of optional header */ uint16_t ifh_characteristics; /* Characteristics */ }; typedef struct image_file_header image_file_header; /* Machine types */ #define IMAGE_FILE_MACHINE_UNKNOWN 0 #define IMAGE_FILE_MACHINE_I860 0x014d #define IMAGE_FILE_MACHINE_I386 0x014c #define IMAGE_FILE_MACHINE_R3000 0x0162 #define IMAGE_FILE_MACHINE_R4000 0x0166 #define IMAGE_FILE_MACHINE_R10000 0x0168 #define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169 #define IMAGE_FILE_MACHINE_ALPHA 0x0184 #define IMAGE_FILE_MACHINE_SH3 0x01a2 #define IMAGE_FILE_MACHINE_SH3DSP 0x01a3 #define IMAGE_FILE_MACHINE_SH3E 0x01a4 #define IMAGE_FILE_MACHINE_SH4 0x01a6 #define IMAGE_FILE_MACHINE_SH5 0x01a8 #define IMAGE_FILE_MACHINE_ARM 0x01c0 #define IMAGE_FILE_MACHINE_THUMB 0x01c2 #define IMAGE_FILE_MACHINE_AM33 0x01d3 #define IMAGE_FILE_MACHINE_POWERPC 0x01f0 #define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1 #define IMAGE_FILE_MACHINE_MIPS16 0x0266 #define IMAGE_FILE_MACHINE_ALPHA64 0x0284 #define IMAGE_FILE_MACHINE_MIPSFPU 0x0366 #define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466 #define IMAGE_FILE_MACHINE_AXP64 IMAGE_FILE_MACHINE_ALPHA64 #define IMAGE_FILE_MACHINE_TRICORE 0x0520 #define IMAGE_FILE_MACHINE_CEF 0x0cef #define IMAGE_FILE_MACHINE_EBC 0x0ebc #define IMAGE_FILE_MACHINE_AMD64 0x8664 #define IMAGE_FILE_MACHINE_M32R 0x9041 #define IMAGE_FILE_MACHINE_CEE 0xc0ee /* Characteristics */ #define IMAGE_FILE_RELOCS_STRIPPED 0x0001 /* No relocation info */ #define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 #define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 #define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 #define IMAGE_FILE_AGGRESIVE_WS_TRIM 0x0010 #define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020 #define IMAGE_FILE_16BIT_MACHINE 0x0040 #define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 #define IMAGE_FILE_32BIT_MACHINE 0x0100 #define IMAGE_FILE_DEBUG_STRIPPED 0x0200 #define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400 #define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800 #define IMAGE_FILE_SYSTEM 0x1000 #define IMAGE_FILE_DLL 0x2000 #define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 #define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 #define IMAGE_SIZEOF_FILE_HEADER 20 /* * Directory format. */ struct image_data_directory { uint32_t idd_vaddr; /* virtual address */ uint32_t idd_size; /* size */ }; typedef struct image_data_directory image_data_directory; #define IMAGE_DIRECTORY_ENTRIES_MAX 16 /* * Optional header format. */ struct image_optional_header { /* Standard fields */ uint16_t ioh_magic; uint8_t ioh_linkerver_major; uint8_t ioh_linkerver_minor; uint32_t ioh_codesize; uint32_t ioh_datasize; uint32_t ioh_bsssize; uint32_t ioh_entryaddr; uint32_t ioh_codebaseaddr; #ifndef __amd64__ uint32_t ioh_databaseaddr; #endif /* NT-specific fields */ uintptr_t ioh_imagebase; uint32_t ioh_sectalign; uint32_t ioh_filealign; uint16_t ioh_osver_major; uint16_t ioh_osver_minor; uint16_t ioh_imagever_major; uint16_t ioh_imagever_minor; uint16_t ioh_subsys_major; uint16_t ioh_subsys_minor; uint32_t ioh_win32ver; uint32_t ioh_imagesize; uint32_t ioh_headersize; uint32_t ioh_csum; uint16_t ioh_subsys; uint16_t ioh_dll_characteristics; uintptr_t ioh_stackreservesize; uintptr_t ioh_stackcommitsize; uintptr_t ioh_heapreservesize; uintptr_t ioh_heapcommitsize; uint16_t ioh_loaderflags; uint32_t ioh_rva_size_cnt; image_data_directory ioh_datadir[IMAGE_DIRECTORY_ENTRIES_MAX]; }; typedef struct image_optional_header image_optional_header; struct image_nt_header { uint32_t inh_signature; image_file_header inh_filehdr; image_optional_header inh_optionalhdr; }; typedef struct image_nt_header image_nt_header; #define IMAGE_SIZEOF_NT_HEADER(nthdr) \ (offsetof(image_nt_header, inh_optionalhdr) + \ ((image_nt_header *)(nthdr))->inh_filehdr.ifh_optionalhdrlen) /* Directory Entries */ #define IMAGE_DIRECTORY_ENTRY_EXPORT 0 /* Export Directory */ #define IMAGE_DIRECTORY_ENTRY_IMPORT 1 /* Import Directory */ #define IMAGE_DIRECTORY_ENTRY_RESOURCE 2 /* Resource Directory */ #define IMAGE_DIRECTORY_ENTRY_EXCEPTION 3 /* Exception Directory */ #define IMAGE_DIRECTORY_ENTRY_SECURITY 4 /* Security Directory */ #define IMAGE_DIRECTORY_ENTRY_BASERELOC 5 /* Base Relocation Table */ #define IMAGE_DIRECTORY_ENTRY_DEBUG 6 /* Debug Directory */ #define IMAGE_DIRECTORY_ENTRY_COPYRIGHT 7 /* Description String */ #define IMAGE_DIRECTORY_ENTRY_GLOBALPTR 8 /* Machine Value (MIPS GP) */ #define IMAGE_DIRECTORY_ENTRY_TLS 9 /* TLS Directory */ #define IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG 10 /* Load Configuration Directory */ #define IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT 11 /* Bound Import Directory in headers */ #define IMAGE_DIRECTORY_ENTRY_IAT 12 /* Import Address Table */ #define IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT 13 #define IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR 14 /* Resource types */ #define RT_CURSOR 1 #define RT_BITMAP 2 #define RT_ICON 3 #define RT_MENU 4 #define RT_DIALOG 5 #define RT_STRING 6 #define RT_FONTDIR 7 #define RT_FONT 8 #define RT_ACCELERATOR 9 #define RT_RCDATA 10 #define RT_MESSAGETABLE 11 #define RT_GROUP_CURSOR 12 #define RT_GROUP_ICON 14 #define RT_VERSION 16 #define RT_DLGINCLUDE 17 #define RT_PLUGPLAY 19 #define RT_VXD 20 #define RT_ANICURSOR 21 #define RT_ANIICON 22 #define RT_HTML 23 /* * Section header format. */ #define IMAGE_SHORT_NAME_LEN 8 struct image_section_header { uint8_t ish_name[IMAGE_SHORT_NAME_LEN]; union { uint32_t ish_paddr; uint32_t ish_vsize; } ish_misc; uint32_t ish_vaddr; uint32_t ish_rawdatasize; uint32_t ish_rawdataaddr; uint32_t ish_relocaddr; uint32_t ish_linenumaddr; uint16_t ish_numrelocs; uint16_t ish_numlinenums; uint32_t ish_characteristics; }; typedef struct image_section_header image_section_header; #define IMAGE_SIZEOF_SECTION_HEADER 40 #define IMAGE_FIRST_SECTION(nthdr) \ ((image_section_header *)((vm_offset_t)(nthdr) + \ offsetof(image_nt_header, inh_optionalhdr) + \ ((image_nt_header *)(nthdr))->inh_filehdr.ifh_optionalhdrlen)) /* * Import format */ struct image_import_by_name { uint16_t iibn_hint; uint8_t iibn_name[1]; }; #define IMAGE_ORDINAL_FLAG 0x80000000 #define IMAGE_ORDINAL(Ordinal) (Ordinal & 0xffff) struct image_import_descriptor { uint32_t iid_import_name_table_addr; uint32_t iid_timestamp; uint32_t iid_forwardchain; uint32_t iid_nameaddr; uint32_t iid_import_address_table_addr; }; typedef struct image_import_descriptor image_import_descriptor; struct image_base_reloc { uint32_t ibr_vaddr; uint32_t ibr_blocksize; uint16_t ibr_rel[1]; }; typedef struct image_base_reloc image_base_reloc; #define IMR_RELTYPE(x) ((x >> 12) & 0xF) #define IMR_RELOFFSET(x) (x & 0xFFF) /* generic relocation types */ #define IMAGE_REL_BASED_ABSOLUTE 0 #define IMAGE_REL_BASED_HIGH 1 #define IMAGE_REL_BASED_LOW 2 #define IMAGE_REL_BASED_HIGHLOW 3 #define IMAGE_REL_BASED_HIGHADJ 4 #define IMAGE_REL_BASED_MIPS_JMPADDR 5 #define IMAGE_REL_BASED_SECTION 6 #define IMAGE_REL_BASED_REL 7 #define IMAGE_REL_BASED_MIPS_JMPADDR16 9 #define IMAGE_REL_BASED_DIR64 10 #define IMAGE_REL_BASED_HIGH3ADJ 11 struct image_resource_directory_entry { uint32_t irde_name; uint32_t irde_dataoff; }; typedef struct image_resource_directory_entry image_resource_directory_entry; #define RESOURCE_NAME_STR 0x80000000 #define RESOURCE_DIR_FLAG 0x80000000 struct image_resource_directory { uint32_t ird_characteristics; uint32_t ird_timestamp; uint16_t ird_majorver; uint16_t ird_minorver; uint16_t ird_named_entries; uint16_t ird_id_entries; #ifdef notdef image_resource_directory_entry ird_entries[1]; #endif }; typedef struct image_resource_directory image_resource_directory; struct image_resource_directory_string { uint16_t irds_len; char irds_name[1]; }; typedef struct image_resource_directory_string image_resource_directory_string; struct image_resource_directory_string_u { uint16_t irds_len; char irds_name[1]; }; typedef struct image_resource_directory_string_u image_resource_directory_string_u; struct image_resource_data_entry { uint32_t irde_offset; uint32_t irde_size; uint32_t irde_codepage; uint32_t irde_rsvd; }; typedef struct image_resource_data_entry image_resource_data_entry; struct message_resource_data { uint32_t mrd_numblocks; #ifdef notdef message_resource_block mrd_blocks[1]; #endif }; typedef struct message_resource_data message_resource_data; struct message_resource_block { uint32_t mrb_lowid; uint32_t mrb_highid; uint32_t mrb_entryoff; }; typedef struct message_resource_block message_resource_block; struct message_resource_entry { uint16_t mre_len; uint16_t mre_flags; char mre_text[]; }; typedef struct message_resource_entry message_resource_entry; #define MESSAGE_RESOURCE_UNICODE 0x0001 struct image_patch_table { char *ipt_name; void (*ipt_func)(void); void (*ipt_wrap)(void); int ipt_argcnt; int ipt_ftype; }; typedef struct image_patch_table image_patch_table; /* * AMD64 support. Microsoft uses a different calling convention * than everyone else on the amd64 platform. Sadly, gcc has no * built-in support for it (yet). * * The three major differences we're concerned with are: * * - The first 4 register-sized arguments are passed in the * %rcx, %rdx, %r8 and %r9 registers, and the rest are pushed * onto the stack. (The ELF ABI uses 6 registers, not 4). * * - The caller must reserve space on the stack for the 4 * register arguments in case the callee has to spill them. * * - The stack myst be 16-byte aligned by the time the callee * executes. A call instruction implicitly pushes an 8 byte * return address onto the stack. We have to make sure that * the amount of space we consume, plus the return address, * is a multiple of 16 bytes in size. This means that in * some cases, we may need to chew up an extra 8 bytes on * the stack that will be unused. * * On the bright side, Microsoft seems to be using just the one * calling convention for all functions on amd64, unlike x86 where * they use a mix of _stdcall, _fastcall and _cdecl. */ #ifdef __amd64__ extern uint64_t x86_64_call1(void *, uint64_t); extern uint64_t x86_64_call2(void *, uint64_t, uint64_t); extern uint64_t x86_64_call3(void *, uint64_t, uint64_t, uint64_t); extern uint64_t x86_64_call4(void *, uint64_t, uint64_t, uint64_t, uint64_t); extern uint64_t x86_64_call5(void *, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); extern uint64_t x86_64_call6(void *, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); +uint64_t _x86_64_call1(void *, uint64_t); +uint64_t _x86_64_call2(void *, uint64_t, uint64_t); +uint64_t _x86_64_call3(void *, uint64_t, uint64_t, uint64_t); +uint64_t _x86_64_call4(void *, uint64_t, uint64_t, uint64_t, uint64_t); +uint64_t _x86_64_call5(void *, uint64_t, uint64_t, uint64_t, uint64_t, + uint64_t); +uint64_t _x86_64_call6(void *, uint64_t, uint64_t, uint64_t, uint64_t, + uint64_t, uint64_t); #define MSCALL1(fn, a) \ - x86_64_call1((fn), (uint64_t)(a)) + _x86_64_call1((fn), (uint64_t)(a)) #define MSCALL2(fn, a, b) \ - x86_64_call2((fn), (uint64_t)(a), (uint64_t)(b)) + _x86_64_call2((fn), (uint64_t)(a), (uint64_t)(b)) #define MSCALL3(fn, a, b, c) \ - x86_64_call3((fn), (uint64_t)(a), (uint64_t)(b), \ + _x86_64_call3((fn), (uint64_t)(a), (uint64_t)(b), \ (uint64_t)(c)) #define MSCALL4(fn, a, b, c, d) \ - x86_64_call4((fn), (uint64_t)(a), (uint64_t)(b), \ + _x86_64_call4((fn), (uint64_t)(a), (uint64_t)(b), \ (uint64_t)(c), (uint64_t)(d)) #define MSCALL5(fn, a, b, c, d, e) \ - x86_64_call5((fn), (uint64_t)(a), (uint64_t)(b), \ + _x86_64_call5((fn), (uint64_t)(a), (uint64_t)(b), \ (uint64_t)(c), (uint64_t)(d), (uint64_t)(e)) #define MSCALL6(fn, a, b, c, d, e, f) \ - x86_64_call6((fn), (uint64_t)(a), (uint64_t)(b), \ + _x86_64_call6((fn), (uint64_t)(a), (uint64_t)(b), \ (uint64_t)(c), (uint64_t)(d), (uint64_t)(e), (uint64_t)(f)) #endif /* __amd64__ */ #ifdef __i386__ extern uint32_t x86_stdcall_call(void *, int, ...); #define MSCALL1(fn, a) x86_stdcall_call(fn, 1, (a)) #define MSCALL2(fn, a, b) x86_stdcall_call(fn, 2, (a), (b)) #define MSCALL3(fn, a, b, c) x86_stdcall_call(fn, 3, (a), (b), (c)) #define MSCALL4(fn, a, b, c, d) x86_stdcall_call(fn, 4, (a), (b), (c), (d)) #define MSCALL5(fn, a, b, c, d, e) \ x86_stdcall_call(fn, 5, (a), (b), (c), (d), (e)) #define MSCALL6(fn, a, b, c, d, e, f) \ x86_stdcall_call(fn, 6, (a), (b), (c), (d), (e), (f)) #endif /* __i386__ */ #define FUNC void(*)(void) #ifdef __i386__ #define IMPORT_SFUNC(x, y) { #x, (FUNC)x, NULL, y, WINDRV_WRAP_STDCALL } #define IMPORT_SFUNC_MAP(x, y, z) \ { #x, (FUNC)y, NULL, z, WINDRV_WRAP_STDCALL } #define IMPORT_FFUNC(x, y) { #x, (FUNC)x, NULL, y, WINDRV_WRAP_FASTCALL } #define IMPORT_FFUNC_MAP(x, y, z) \ { #x, (FUNC)y, NULL, z, WINDRV_WRAP_FASTCALL } #define IMPORT_RFUNC(x, y) { #x, (FUNC)x, NULL, y, WINDRV_WRAP_REGPARM } #define IMPORT_RFUNC_MAP(x, y, z) \ { #x, (FUNC)y, NULL, z, WINDRV_WRAP_REGPARM } #define IMPORT_CFUNC(x, y) { #x, (FUNC)x, NULL, y, WINDRV_WRAP_CDECL } #define IMPORT_CFUNC_MAP(x, y, z) \ { #x, (FUNC)y, NULL, z, WINDRV_WRAP_CDECL } #endif /* __i386__ */ #ifdef __amd64__ #define IMPORT_SFUNC(x, y) { #x, (FUNC)x, NULL, y, WINDRV_WRAP_AMD64 } #define IMPORT_SFUNC_MAP(x, y, z) \ { #x, (FUNC)y, NULL, z, WINDRV_WRAP_AMD64 } #define IMPORT_FFUNC(x, y) { #x, (FUNC)x, NULL, y, WINDRV_WRAP_AMD64 } #define IMPORT_FFUNC_MAP(x, y, z) \ { #x, (FUNC)y, NULL, z, WINDRV_WRAP_AMD64 } #define IMPORT_RFUNC(x, y) { #x, (FUNC)x, NULL, y, WINDRV_WRAP_AMD64 } #define IMPORT_RFUNC_MAP(x, y, z) \ { #x, (FUNC)y, NULL, z, WINDRV_WRAP_AMD64 } #define IMPORT_CFUNC(x, y) { #x, (FUNC)x, NULL, y, WINDRV_WRAP_AMD64 } #define IMPORT_CFUNC_MAP(x, y, z) \ { #x, (FUNC)y, NULL, z, WINDRV_WRAP_AMD64 } #endif /* __amd64__ */ __BEGIN_DECLS extern int pe_get_dos_header(vm_offset_t, image_dos_header *); extern int pe_is_nt_image(vm_offset_t); extern int pe_get_optional_header(vm_offset_t, image_optional_header *); extern int pe_get_file_header(vm_offset_t, image_file_header *); extern int pe_get_section_header(vm_offset_t, image_section_header *); extern int pe_numsections(vm_offset_t); extern vm_offset_t pe_imagebase(vm_offset_t); extern vm_offset_t pe_directory_offset(vm_offset_t, uint32_t); extern vm_offset_t pe_translate_addr (vm_offset_t, vm_offset_t); extern int pe_get_section(vm_offset_t, image_section_header *, const char *); extern int pe_relocate(vm_offset_t); extern int pe_get_import_descriptor(vm_offset_t, image_import_descriptor *, char *); extern int pe_patch_imports(vm_offset_t, char *, image_patch_table *); extern int pe_get_messagetable(vm_offset_t, message_resource_data **); extern int pe_get_message(vm_offset_t, uint32_t, char **, int *, uint16_t *); __END_DECLS #endif /* _PE_VAR_H_ */