Index: head/sys/compat/ndis/kern_ndis.c =================================================================== --- head/sys/compat/ndis/kern_ndis.c (revision 298827) +++ head/sys/compat/ndis/kern_ndis.c (revision 298828) @@ -1,1441 +1,1441 @@ /*- * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NDIS_DUMMY_PATH "\\\\some\\bogus\\path" #define NDIS_FLAG_RDONLY 1 static void ndis_status_func(ndis_handle, ndis_status, void *, uint32_t); static void ndis_statusdone_func(ndis_handle); static void ndis_setdone_func(ndis_handle, ndis_status); static void ndis_getdone_func(ndis_handle, ndis_status); static void ndis_resetdone_func(ndis_handle, ndis_status, uint8_t); static void ndis_sendrsrcavail_func(ndis_handle); static void ndis_intrsetup(kdpc *, device_object *, irp *, struct ndis_softc *); static void ndis_return(device_object *, void *); static image_patch_table kernndis_functbl[] = { IMPORT_SFUNC(ndis_status_func, 4), IMPORT_SFUNC(ndis_statusdone_func, 1), IMPORT_SFUNC(ndis_setdone_func, 2), IMPORT_SFUNC(ndis_getdone_func, 2), IMPORT_SFUNC(ndis_resetdone_func, 3), IMPORT_SFUNC(ndis_sendrsrcavail_func, 1), IMPORT_SFUNC(ndis_intrsetup, 4), IMPORT_SFUNC(ndis_return, 1), { NULL, NULL, NULL } }; static struct nd_head ndis_devhead; /* * This allows us to export our symbols to other modules. * Note that we call ourselves 'ndisapi' to avoid a namespace * collision with if_ndis.ko, which internally calls itself * 'ndis.' * * Note: some of the subsystems depend on each other, so the * order in which they're started is important. The order of * importance is: * * HAL - spinlocks and IRQL manipulation * ntoskrnl - DPC and workitem threads, object waiting * windrv - driver/device registration * * The HAL should also be the last thing shut down, since * the ntoskrnl subsystem will use spinlocks right up until * the DPC and workitem threads are terminated. */ static int ndis_modevent(module_t mod, int cmd, void *arg) { int error = 0; image_patch_table *patch; switch (cmd) { case MOD_LOAD: /* Initialize subsystems */ hal_libinit(); ntoskrnl_libinit(); windrv_libinit(); ndis_libinit(); usbd_libinit(); patch = kernndis_functbl; while (patch->ipt_func != NULL) { windrv_wrap((funcptr)patch->ipt_func, (funcptr *)&patch->ipt_wrap, patch->ipt_argcnt, patch->ipt_ftype); patch++; } TAILQ_INIT(&ndis_devhead); break; case MOD_SHUTDOWN: if (TAILQ_FIRST(&ndis_devhead) == NULL) { /* Shut down subsystems */ ndis_libfini(); usbd_libfini(); windrv_libfini(); ntoskrnl_libfini(); hal_libfini(); patch = kernndis_functbl; while (patch->ipt_func != NULL) { windrv_unwrap(patch->ipt_wrap); patch++; } } break; case MOD_UNLOAD: /* Shut down subsystems */ ndis_libfini(); usbd_libfini(); windrv_libfini(); ntoskrnl_libfini(); hal_libfini(); patch = kernndis_functbl; while (patch->ipt_func != NULL) { windrv_unwrap(patch->ipt_wrap); patch++; } break; default: error = EINVAL; break; } return (error); } DEV_MODULE(ndisapi, ndis_modevent, NULL); MODULE_VERSION(ndisapi, 1); static void ndis_sendrsrcavail_func(adapter) ndis_handle adapter; { } static void ndis_status_func(adapter, status, sbuf, slen) ndis_handle adapter; ndis_status status; void *sbuf; uint32_t slen; { ndis_miniport_block *block; struct ndis_softc *sc; struct ifnet *ifp; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; if (ifp->if_flags & IFF_DEBUG) device_printf(sc->ndis_dev, "status: %x\n", status); } static void ndis_statusdone_func(adapter) ndis_handle adapter; { ndis_miniport_block *block; struct ndis_softc *sc; struct ifnet *ifp; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; if (ifp->if_flags & IFF_DEBUG) device_printf(sc->ndis_dev, "status complete\n"); } static void ndis_setdone_func(adapter, status) ndis_handle adapter; ndis_status status; { ndis_miniport_block *block; block = adapter; block->nmb_setstat = status; KeSetEvent(&block->nmb_setevent, IO_NO_INCREMENT, FALSE); } static void ndis_getdone_func(adapter, status) ndis_handle adapter; ndis_status status; { ndis_miniport_block *block; block = adapter; block->nmb_getstat = status; KeSetEvent(&block->nmb_getevent, IO_NO_INCREMENT, FALSE); } static void ndis_resetdone_func(ndis_handle adapter, ndis_status status, uint8_t addressingreset) { ndis_miniport_block *block; struct ndis_softc *sc; struct ifnet *ifp; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; if (ifp->if_flags & IFF_DEBUG) device_printf(sc->ndis_dev, "reset done...\n"); KeSetEvent(&block->nmb_resetevent, IO_NO_INCREMENT, FALSE); } int ndis_create_sysctls(arg) void *arg; { struct ndis_softc *sc; ndis_cfg *vals; char buf[256]; struct sysctl_oid *oidp; struct sysctl_ctx_entry *e; if (arg == NULL) return (EINVAL); sc = arg; vals = sc->ndis_regvals; TAILQ_INIT(&sc->ndis_cfglist_head); /* Add the driver-specific registry keys. */ while(1) { if (vals->nc_cfgkey == NULL) break; if (vals->nc_idx != sc->ndis_devidx) { vals++; continue; } /* See if we already have a sysctl with this name */ oidp = NULL; TAILQ_FOREACH(e, device_get_sysctl_ctx(sc->ndis_dev), link) { oidp = e->entry; if (strcasecmp(oidp->oid_name, vals->nc_cfgkey) == 0) break; oidp = NULL; } if (oidp != NULL) { vals++; continue; } ndis_add_sysctl(sc, vals->nc_cfgkey, vals->nc_cfgdesc, vals->nc_val, CTLFLAG_RW); vals++; } /* Now add a couple of builtin keys. */ /* * Environment can be either Windows (0) or WindowsNT (1). * We qualify as the latter. */ ndis_add_sysctl(sc, "Environment", "Windows environment", "1", NDIS_FLAG_RDONLY); /* NDIS version should be 5.1. */ ndis_add_sysctl(sc, "NdisVersion", "NDIS API Version", "0x00050001", NDIS_FLAG_RDONLY); /* * Some miniport drivers rely on the existence of the SlotNumber, * NetCfgInstanceId and DriverDesc keys. */ ndis_add_sysctl(sc, "SlotNumber", "Slot Numer", "01", NDIS_FLAG_RDONLY); ndis_add_sysctl(sc, "NetCfgInstanceId", "NetCfgInstanceId", "{12345678-1234-5678-CAFE0-123456789ABC}", NDIS_FLAG_RDONLY); ndis_add_sysctl(sc, "DriverDesc", "Driver Description", "NDIS Network Adapter", NDIS_FLAG_RDONLY); /* Bus type (PCI, PCMCIA, etc...) */ sprintf(buf, "%d", (int)sc->ndis_iftype); ndis_add_sysctl(sc, "BusType", "Bus Type", buf, NDIS_FLAG_RDONLY); if (sc->ndis_res_io != NULL) { sprintf(buf, "0x%jx", rman_get_start(sc->ndis_res_io)); ndis_add_sysctl(sc, "IOBaseAddress", "Base I/O Address", buf, NDIS_FLAG_RDONLY); } if (sc->ndis_irq != NULL) { sprintf(buf, "%ju", rman_get_start(sc->ndis_irq)); ndis_add_sysctl(sc, "InterruptNumber", "Interrupt Number", buf, NDIS_FLAG_RDONLY); } return (0); } int ndis_add_sysctl(arg, key, desc, val, flag_rdonly) void *arg; char *key; char *desc; char *val; int flag_rdonly; { struct ndis_softc *sc; struct ndis_cfglist *cfg; char descstr[256]; sc = arg; cfg = malloc(sizeof(struct ndis_cfglist), M_DEVBUF, M_NOWAIT|M_ZERO); if (cfg == NULL) { printf("failed for %s\n", key); return (ENOMEM); } cfg->ndis_cfg.nc_cfgkey = strdup(key, M_DEVBUF); if (desc == NULL) { snprintf(descstr, sizeof(descstr), "%s (dynamic)", key); cfg->ndis_cfg.nc_cfgdesc = strdup(descstr, M_DEVBUF); } else cfg->ndis_cfg.nc_cfgdesc = strdup(desc, M_DEVBUF); strcpy(cfg->ndis_cfg.nc_val, val); TAILQ_INSERT_TAIL(&sc->ndis_cfglist_head, cfg, link); if (flag_rdonly != 0) { cfg->ndis_oid = SYSCTL_ADD_STRING(device_get_sysctl_ctx(sc->ndis_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ndis_dev)), OID_AUTO, cfg->ndis_cfg.nc_cfgkey, CTLFLAG_RD, cfg->ndis_cfg.nc_val, sizeof(cfg->ndis_cfg.nc_val), cfg->ndis_cfg.nc_cfgdesc); } else { cfg->ndis_oid = SYSCTL_ADD_STRING(device_get_sysctl_ctx(sc->ndis_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ndis_dev)), OID_AUTO, cfg->ndis_cfg.nc_cfgkey, CTLFLAG_RW, cfg->ndis_cfg.nc_val, sizeof(cfg->ndis_cfg.nc_val), cfg->ndis_cfg.nc_cfgdesc); } return (0); } /* * Somewhere, somebody decided "hey, let's automatically create * a sysctl tree for each device instance as it's created -- it'll * make life so much easier!" Lies. Why must they turn the kernel * into a house of lies? */ int ndis_flush_sysctls(arg) void *arg; { struct ndis_softc *sc; struct ndis_cfglist *cfg; struct sysctl_ctx_list *clist; sc = arg; clist = device_get_sysctl_ctx(sc->ndis_dev); while (!TAILQ_EMPTY(&sc->ndis_cfglist_head)) { cfg = TAILQ_FIRST(&sc->ndis_cfglist_head); TAILQ_REMOVE(&sc->ndis_cfglist_head, cfg, link); sysctl_ctx_entry_del(clist, cfg->ndis_oid); sysctl_remove_oid(cfg->ndis_oid, 1, 0); free(cfg->ndis_cfg.nc_cfgkey, M_DEVBUF); free(cfg->ndis_cfg.nc_cfgdesc, M_DEVBUF); free(cfg, M_DEVBUF); } return (0); } void * ndis_get_routine_address(functbl, name) struct image_patch_table *functbl; char *name; { int i; for (i = 0; functbl[i].ipt_name != NULL; i++) if (strcmp(name, functbl[i].ipt_name) == 0) return (functbl[i].ipt_wrap); return (NULL); } static void ndis_return(dobj, arg) device_object *dobj; void *arg; { ndis_miniport_block *block; ndis_miniport_characteristics *ch; ndis_return_handler returnfunc; ndis_handle adapter; ndis_packet *p; uint8_t irql; list_entry *l; block = arg; ch = IoGetDriverObjectExtension(dobj->do_drvobj, (void *)1); p = arg; adapter = block->nmb_miniportadapterctx; if (adapter == NULL) return; returnfunc = ch->nmc_return_packet_func; KeAcquireSpinLock(&block->nmb_returnlock, &irql); while (!IsListEmpty(&block->nmb_returnlist)) { l = RemoveHeadList((&block->nmb_returnlist)); p = CONTAINING_RECORD(l, ndis_packet, np_list); InitializeListHead((&p->np_list)); KeReleaseSpinLock(&block->nmb_returnlock, irql); MSCALL2(returnfunc, adapter, p); KeAcquireSpinLock(&block->nmb_returnlock, &irql); } KeReleaseSpinLock(&block->nmb_returnlock, irql); } void ndis_return_packet(struct mbuf *m, void *buf, void *arg) { ndis_packet *p; ndis_miniport_block *block; if (arg == NULL) return; p = arg; /* Decrement refcount. */ p->np_refcnt--; /* Release packet when refcount hits zero, otherwise return. */ if (p->np_refcnt) return; block = ((struct ndis_softc *)p->np_softc)->ndis_block; KeAcquireSpinLockAtDpcLevel(&block->nmb_returnlock); InitializeListHead((&p->np_list)); InsertHeadList((&block->nmb_returnlist), (&p->np_list)); KeReleaseSpinLockFromDpcLevel(&block->nmb_returnlock); IoQueueWorkItem(block->nmb_returnitem, (io_workitem_func)kernndis_functbl[7].ipt_wrap, WORKQUEUE_CRITICAL, block); } void ndis_free_bufs(b0) ndis_buffer *b0; { ndis_buffer *next; if (b0 == NULL) return; while(b0 != NULL) { next = b0->mdl_next; IoFreeMdl(b0); b0 = next; } } void ndis_free_packet(p) ndis_packet *p; { if (p == NULL) return; ndis_free_bufs(p->np_private.npp_head); NdisFreePacket(p); } int ndis_convert_res(arg) void *arg; { struct ndis_softc *sc; ndis_resource_list *rl = NULL; cm_partial_resource_desc *prd = NULL; ndis_miniport_block *block; device_t dev; struct resource_list *brl; struct resource_list_entry *brle; int error = 0; sc = arg; block = sc->ndis_block; dev = sc->ndis_dev; rl = malloc(sizeof(ndis_resource_list) + (sizeof(cm_partial_resource_desc) * (sc->ndis_rescnt - 1)), M_DEVBUF, M_NOWAIT|M_ZERO); if (rl == NULL) return (ENOMEM); rl->cprl_version = 5; rl->cprl_revision = 1; rl->cprl_count = sc->ndis_rescnt; prd = rl->cprl_partial_descs; brl = BUS_GET_RESOURCE_LIST(dev, dev); if (brl != NULL) { STAILQ_FOREACH(brle, brl, link) { switch (brle->type) { case SYS_RES_IOPORT: prd->cprd_type = CmResourceTypePort; prd->cprd_flags = CM_RESOURCE_PORT_IO; prd->cprd_sharedisp = CmResourceShareDeviceExclusive; prd->u.cprd_port.cprd_start.np_quad = brle->start; prd->u.cprd_port.cprd_len = brle->count; break; case SYS_RES_MEMORY: prd->cprd_type = CmResourceTypeMemory; prd->cprd_flags = CM_RESOURCE_MEMORY_READ_WRITE; prd->cprd_sharedisp = CmResourceShareDeviceExclusive; prd->u.cprd_mem.cprd_start.np_quad = brle->start; prd->u.cprd_mem.cprd_len = brle->count; break; case SYS_RES_IRQ: prd->cprd_type = CmResourceTypeInterrupt; prd->cprd_flags = 0; /* * Always mark interrupt resources as * shared, since in our implementation, * they will be. */ prd->cprd_sharedisp = CmResourceShareShared; prd->u.cprd_intr.cprd_level = brle->start; prd->u.cprd_intr.cprd_vector = brle->start; prd->u.cprd_intr.cprd_affinity = 0; break; default: break; } prd++; } } block->nmb_rlist = rl; return (error); } /* * Map an NDIS packet to an mbuf list. When an NDIS driver receives a * packet, it will hand it to us in the form of an ndis_packet, * which we need to convert to an mbuf that is then handed off * to the stack. Note: we configure the mbuf list so that it uses * the memory regions specified by the ndis_buffer structures in * the ndis_packet as external storage. In most cases, this will * point to a memory region allocated by the driver (either by * ndis_malloc_withtag() or ndis_alloc_sharedmem()). We expect * the driver to handle free()ing this region for is, so we set up * a dummy no-op free handler for it. */ int ndis_ptom(m0, p) struct mbuf **m0; ndis_packet *p; { struct mbuf *m = NULL, *prev = NULL; ndis_buffer *buf; ndis_packet_private *priv; uint32_t totlen = 0; struct ifnet *ifp; struct ether_header *eh; int diff; if (p == NULL || m0 == NULL) return (EINVAL); priv = &p->np_private; buf = priv->npp_head; p->np_refcnt = 0; for (buf = priv->npp_head; buf != NULL; buf = buf->mdl_next) { if (buf == priv->npp_head) m = m_gethdr(M_NOWAIT, MT_DATA); else m = m_get(M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(*m0); *m0 = NULL; return (ENOBUFS); } m->m_len = MmGetMdlByteCount(buf); m->m_data = MmGetMdlVirtualAddress(buf); MEXTADD(m, m->m_data, m->m_len, ndis_return_packet, m->m_data, p, 0, EXT_NDIS); p->np_refcnt++; totlen += m->m_len; if (m->m_flags & M_PKTHDR) *m0 = m; else prev->m_next = m; prev = m; } /* * This is a hack to deal with the Marvell 8335 driver * which, when associated with an AP in WPA-PSK mode, * seems to overpad its frames by 8 bytes. I don't know * that the extra 8 bytes are for, and they're not there * in open mode, so for now clamp the frame size at 1514 * until I can figure out how to deal with this properly, * otherwise if_ethersubr() will spank us by discarding * the 'oversize' frames. */ eh = mtod((*m0), struct ether_header *); ifp = ((struct ndis_softc *)p->np_softc)->ifp; if (totlen > ETHER_MAX_FRAME(ifp, eh->ether_type, FALSE)) { diff = totlen - ETHER_MAX_FRAME(ifp, eh->ether_type, FALSE); totlen -= diff; m->m_len -= diff; } (*m0)->m_pkthdr.len = totlen; return (0); } /* * Create an NDIS packet from an mbuf chain. * This is used mainly when transmitting packets, where we need * to turn an mbuf off an interface's send queue and transform it * into an NDIS packet which will be fed into the NDIS driver's * send routine. * * NDIS packets consist of two parts: an ndis_packet structure, - * which is vaguely analagous to the pkthdr portion of an mbuf, + * which is vaguely analogous to the pkthdr portion of an mbuf, * and one or more ndis_buffer structures, which define the * actual memory segments in which the packet data resides. * We need to allocate one ndis_buffer for each mbuf in a chain, * plus one ndis_packet as the header. */ int ndis_mtop(m0, p) struct mbuf *m0; ndis_packet **p; { struct mbuf *m; ndis_buffer *buf = NULL, *prev = NULL; ndis_packet_private *priv; if (p == NULL || *p == NULL || m0 == NULL) return (EINVAL); priv = &(*p)->np_private; priv->npp_totlen = m0->m_pkthdr.len; for (m = m0; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; buf = IoAllocateMdl(m->m_data, m->m_len, FALSE, FALSE, NULL); if (buf == NULL) { ndis_free_packet(*p); *p = NULL; return (ENOMEM); } MmBuildMdlForNonPagedPool(buf); if (priv->npp_head == NULL) priv->npp_head = buf; else prev->mdl_next = buf; prev = buf; } priv->npp_tail = buf; return (0); } int ndis_get_supported_oids(arg, oids, oidcnt) void *arg; ndis_oid **oids; int *oidcnt; { int len, rval; ndis_oid *o; if (arg == NULL || oids == NULL || oidcnt == NULL) return (EINVAL); len = 0; ndis_get_info(arg, OID_GEN_SUPPORTED_LIST, NULL, &len); o = malloc(len, M_DEVBUF, M_NOWAIT); if (o == NULL) return (ENOMEM); rval = ndis_get_info(arg, OID_GEN_SUPPORTED_LIST, o, &len); if (rval) { free(o, M_DEVBUF); return (rval); } *oids = o; *oidcnt = len / 4; return (0); } int ndis_set_info(arg, oid, buf, buflen) void *arg; ndis_oid oid; void *buf; int *buflen; { struct ndis_softc *sc; ndis_status rval; ndis_handle adapter; ndis_setinfo_handler setfunc; uint32_t byteswritten = 0, bytesneeded = 0; uint8_t irql; uint64_t duetime; /* * According to the NDIS spec, MiniportQueryInformation() * and MiniportSetInformation() requests are handled serially: * once one request has been issued, we must wait for it to * finish before allowing another request to proceed. */ sc = arg; KeResetEvent(&sc->ndis_block->nmb_setevent); KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql); if (sc->ndis_block->nmb_pendingreq != NULL) { KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); panic("ndis_set_info() called while other request pending"); } else sc->ndis_block->nmb_pendingreq = (ndis_request *)sc; setfunc = sc->ndis_chars->nmc_setinfo_func; adapter = sc->ndis_block->nmb_miniportadapterctx; if (adapter == NULL || setfunc == NULL || sc->ndis_block->nmb_devicectx == NULL) { sc->ndis_block->nmb_pendingreq = NULL; KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); return (ENXIO); } rval = MSCALL6(setfunc, adapter, oid, buf, *buflen, &byteswritten, &bytesneeded); sc->ndis_block->nmb_pendingreq = NULL; KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); if (rval == NDIS_STATUS_PENDING) { /* Wait up to 5 seconds. */ duetime = (5 * 1000000) * -10; KeWaitForSingleObject(&sc->ndis_block->nmb_setevent, 0, 0, FALSE, &duetime); rval = sc->ndis_block->nmb_setstat; } if (byteswritten) *buflen = byteswritten; if (bytesneeded) *buflen = bytesneeded; if (rval == NDIS_STATUS_INVALID_LENGTH) return (ENOSPC); if (rval == NDIS_STATUS_INVALID_OID) return (EINVAL); if (rval == NDIS_STATUS_NOT_SUPPORTED || rval == NDIS_STATUS_NOT_ACCEPTED) return (ENOTSUP); if (rval != NDIS_STATUS_SUCCESS) return (ENODEV); return (0); } typedef void (*ndis_senddone_func)(ndis_handle, ndis_packet *, ndis_status); int ndis_send_packets(arg, packets, cnt) void *arg; ndis_packet **packets; int cnt; { struct ndis_softc *sc; ndis_handle adapter; ndis_sendmulti_handler sendfunc; ndis_senddone_func senddonefunc; int i; ndis_packet *p; uint8_t irql = 0; sc = arg; adapter = sc->ndis_block->nmb_miniportadapterctx; if (adapter == NULL) return (ENXIO); sendfunc = sc->ndis_chars->nmc_sendmulti_func; senddonefunc = sc->ndis_block->nmb_senddone_func; if (NDIS_SERIALIZED(sc->ndis_block)) KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql); MSCALL3(sendfunc, adapter, packets, cnt); for (i = 0; i < cnt; i++) { p = packets[i]; /* * Either the driver already handed the packet to * ndis_txeof() due to a failure, or it wants to keep * it and release it asynchronously later. Skip to the * next one. */ if (p == NULL || p->np_oob.npo_status == NDIS_STATUS_PENDING) continue; MSCALL3(senddonefunc, sc->ndis_block, p, p->np_oob.npo_status); } if (NDIS_SERIALIZED(sc->ndis_block)) KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); return (0); } int ndis_send_packet(arg, packet) void *arg; ndis_packet *packet; { struct ndis_softc *sc; ndis_handle adapter; ndis_status status; ndis_sendsingle_handler sendfunc; ndis_senddone_func senddonefunc; uint8_t irql = 0; sc = arg; adapter = sc->ndis_block->nmb_miniportadapterctx; if (adapter == NULL) return (ENXIO); sendfunc = sc->ndis_chars->nmc_sendsingle_func; senddonefunc = sc->ndis_block->nmb_senddone_func; if (NDIS_SERIALIZED(sc->ndis_block)) KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql); status = MSCALL3(sendfunc, adapter, packet, packet->np_private.npp_flags); if (status == NDIS_STATUS_PENDING) { if (NDIS_SERIALIZED(sc->ndis_block)) KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); return (0); } MSCALL3(senddonefunc, sc->ndis_block, packet, status); if (NDIS_SERIALIZED(sc->ndis_block)) KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); return (0); } int ndis_init_dma(arg) void *arg; { struct ndis_softc *sc; int i, error; sc = arg; sc->ndis_tmaps = malloc(sizeof(bus_dmamap_t) * sc->ndis_maxpkts, M_DEVBUF, M_NOWAIT|M_ZERO); if (sc->ndis_tmaps == NULL) return (ENOMEM); for (i = 0; i < sc->ndis_maxpkts; i++) { error = bus_dmamap_create(sc->ndis_ttag, 0, &sc->ndis_tmaps[i]); if (error) { free(sc->ndis_tmaps, M_DEVBUF); return (ENODEV); } } return (0); } int ndis_destroy_dma(arg) void *arg; { struct ndis_softc *sc; struct mbuf *m; ndis_packet *p = NULL; int i; sc = arg; for (i = 0; i < sc->ndis_maxpkts; i++) { if (sc->ndis_txarray[i] != NULL) { p = sc->ndis_txarray[i]; m = (struct mbuf *)p->np_rsvd[1]; if (m != NULL) m_freem(m); ndis_free_packet(sc->ndis_txarray[i]); } bus_dmamap_destroy(sc->ndis_ttag, sc->ndis_tmaps[i]); } free(sc->ndis_tmaps, M_DEVBUF); bus_dma_tag_destroy(sc->ndis_ttag); return (0); } int ndis_reset_nic(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; ndis_reset_handler resetfunc; uint8_t addressing_reset; int rval; uint8_t irql = 0; sc = arg; NDIS_LOCK(sc); adapter = sc->ndis_block->nmb_miniportadapterctx; resetfunc = sc->ndis_chars->nmc_reset_func; if (adapter == NULL || resetfunc == NULL || sc->ndis_block->nmb_devicectx == NULL) { NDIS_UNLOCK(sc); return (EIO); } NDIS_UNLOCK(sc); KeResetEvent(&sc->ndis_block->nmb_resetevent); if (NDIS_SERIALIZED(sc->ndis_block)) KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql); rval = MSCALL2(resetfunc, &addressing_reset, adapter); if (NDIS_SERIALIZED(sc->ndis_block)) KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); if (rval == NDIS_STATUS_PENDING) KeWaitForSingleObject(&sc->ndis_block->nmb_resetevent, 0, 0, FALSE, NULL); return (0); } int ndis_halt_nic(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; ndis_halt_handler haltfunc; ndis_miniport_block *block; int empty = 0; uint8_t irql; sc = arg; block = sc->ndis_block; if (!cold) KeFlushQueuedDpcs(); /* * Wait for all packets to be returned. */ while (1) { KeAcquireSpinLock(&block->nmb_returnlock, &irql); empty = IsListEmpty(&block->nmb_returnlist); KeReleaseSpinLock(&block->nmb_returnlock, irql); if (empty) break; NdisMSleep(1000); } NDIS_LOCK(sc); adapter = sc->ndis_block->nmb_miniportadapterctx; if (adapter == NULL) { NDIS_UNLOCK(sc); return (EIO); } sc->ndis_block->nmb_devicectx = NULL; /* * The adapter context is only valid after the init * handler has been called, and is invalid once the * halt handler has been called. */ haltfunc = sc->ndis_chars->nmc_halt_func; NDIS_UNLOCK(sc); MSCALL1(haltfunc, adapter); NDIS_LOCK(sc); sc->ndis_block->nmb_miniportadapterctx = NULL; NDIS_UNLOCK(sc); return (0); } int ndis_shutdown_nic(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; ndis_shutdown_handler shutdownfunc; sc = arg; NDIS_LOCK(sc); adapter = sc->ndis_block->nmb_miniportadapterctx; shutdownfunc = sc->ndis_chars->nmc_shutdown_handler; NDIS_UNLOCK(sc); if (adapter == NULL || shutdownfunc == NULL) return (EIO); if (sc->ndis_chars->nmc_rsvd0 == NULL) MSCALL1(shutdownfunc, adapter); else MSCALL1(shutdownfunc, sc->ndis_chars->nmc_rsvd0); TAILQ_REMOVE(&ndis_devhead, sc->ndis_block, link); return (0); } int ndis_pnpevent_nic(arg, type) void *arg; int type; { device_t dev; struct ndis_softc *sc; ndis_handle adapter; ndis_pnpevent_handler pnpeventfunc; dev = arg; sc = device_get_softc(arg); NDIS_LOCK(sc); adapter = sc->ndis_block->nmb_miniportadapterctx; pnpeventfunc = sc->ndis_chars->nmc_pnpevent_handler; NDIS_UNLOCK(sc); if (adapter == NULL || pnpeventfunc == NULL) return (EIO); if (sc->ndis_chars->nmc_rsvd0 == NULL) MSCALL4(pnpeventfunc, adapter, type, NULL, 0); else MSCALL4(pnpeventfunc, sc->ndis_chars->nmc_rsvd0, type, NULL, 0); return (0); } int ndis_init_nic(arg) void *arg; { struct ndis_softc *sc; ndis_miniport_block *block; ndis_init_handler initfunc; ndis_status status, openstatus = 0; ndis_medium mediumarray[NdisMediumMax]; uint32_t chosenmedium, i; if (arg == NULL) return (EINVAL); sc = arg; NDIS_LOCK(sc); block = sc->ndis_block; initfunc = sc->ndis_chars->nmc_init_func; NDIS_UNLOCK(sc); sc->ndis_block->nmb_timerlist = NULL; for (i = 0; i < NdisMediumMax; i++) mediumarray[i] = i; status = MSCALL6(initfunc, &openstatus, &chosenmedium, mediumarray, NdisMediumMax, block, block); /* * If the init fails, blow away the other exported routines * we obtained from the driver so we can't call them later. * If the init failed, none of these will work. */ if (status != NDIS_STATUS_SUCCESS) { NDIS_LOCK(sc); sc->ndis_block->nmb_miniportadapterctx = NULL; NDIS_UNLOCK(sc); return (ENXIO); } /* * This may look really goofy, but apparently it is possible * to halt a miniport too soon after it's been initialized. * After MiniportInitialize() finishes, pause for 1 second * to give the chip a chance to handle any short-lived timers * that were set in motion. If we call MiniportHalt() too soon, * some of the timers may not be cancelled, because the driver * expects them to fire before the halt is called. */ pause("ndwait", hz); NDIS_LOCK(sc); sc->ndis_block->nmb_devicectx = sc; NDIS_UNLOCK(sc); return (0); } static void ndis_intrsetup(dpc, dobj, ip, sc) kdpc *dpc; device_object *dobj; irp *ip; struct ndis_softc *sc; { ndis_miniport_interrupt *intr; intr = sc->ndis_block->nmb_interrupt; /* Sanity check. */ if (intr == NULL) return; KeAcquireSpinLockAtDpcLevel(&intr->ni_dpccountlock); KeResetEvent(&intr->ni_dpcevt); if (KeInsertQueueDpc(&intr->ni_dpc, NULL, NULL) == TRUE) intr->ni_dpccnt++; KeReleaseSpinLockFromDpcLevel(&intr->ni_dpccountlock); } int ndis_get_info(arg, oid, buf, buflen) void *arg; ndis_oid oid; void *buf; int *buflen; { struct ndis_softc *sc; ndis_status rval; ndis_handle adapter; ndis_queryinfo_handler queryfunc; uint32_t byteswritten = 0, bytesneeded = 0; uint8_t irql; uint64_t duetime; sc = arg; KeResetEvent(&sc->ndis_block->nmb_getevent); KeAcquireSpinLock(&sc->ndis_block->nmb_lock, &irql); if (sc->ndis_block->nmb_pendingreq != NULL) { KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); panic("ndis_get_info() called while other request pending"); } else sc->ndis_block->nmb_pendingreq = (ndis_request *)sc; queryfunc = sc->ndis_chars->nmc_queryinfo_func; adapter = sc->ndis_block->nmb_miniportadapterctx; if (adapter == NULL || queryfunc == NULL || sc->ndis_block->nmb_devicectx == NULL) { sc->ndis_block->nmb_pendingreq = NULL; KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); return (ENXIO); } rval = MSCALL6(queryfunc, adapter, oid, buf, *buflen, &byteswritten, &bytesneeded); sc->ndis_block->nmb_pendingreq = NULL; KeReleaseSpinLock(&sc->ndis_block->nmb_lock, irql); /* Wait for requests that block. */ if (rval == NDIS_STATUS_PENDING) { /* Wait up to 5 seconds. */ duetime = (5 * 1000000) * -10; KeWaitForSingleObject(&sc->ndis_block->nmb_getevent, 0, 0, FALSE, &duetime); rval = sc->ndis_block->nmb_getstat; } if (byteswritten) *buflen = byteswritten; if (bytesneeded) *buflen = bytesneeded; if (rval == NDIS_STATUS_INVALID_LENGTH || rval == NDIS_STATUS_BUFFER_TOO_SHORT) return (ENOSPC); if (rval == NDIS_STATUS_INVALID_OID) return (EINVAL); if (rval == NDIS_STATUS_NOT_SUPPORTED || rval == NDIS_STATUS_NOT_ACCEPTED) return (ENOTSUP); if (rval != NDIS_STATUS_SUCCESS) return (ENODEV); return (0); } uint32_t NdisAddDevice(drv, pdo) driver_object *drv; device_object *pdo; { device_object *fdo; ndis_miniport_block *block; struct ndis_softc *sc; uint32_t status; int error; sc = device_get_softc(pdo->do_devext); if (sc->ndis_iftype == PCMCIABus || sc->ndis_iftype == PCIBus) { error = bus_setup_intr(sc->ndis_dev, sc->ndis_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ntoskrnl_intr, NULL, &sc->ndis_intrhand); if (error) return (NDIS_STATUS_FAILURE); } status = IoCreateDevice(drv, sizeof(ndis_miniport_block), NULL, FILE_DEVICE_UNKNOWN, 0, FALSE, &fdo); if (status != STATUS_SUCCESS) return (status); block = fdo->do_devext; block->nmb_filterdbs.nf_ethdb = block; block->nmb_deviceobj = fdo; block->nmb_physdeviceobj = pdo; block->nmb_nextdeviceobj = IoAttachDeviceToDeviceStack(fdo, pdo); KeInitializeSpinLock(&block->nmb_lock); KeInitializeSpinLock(&block->nmb_returnlock); KeInitializeEvent(&block->nmb_getevent, EVENT_TYPE_NOTIFY, TRUE); KeInitializeEvent(&block->nmb_setevent, EVENT_TYPE_NOTIFY, TRUE); KeInitializeEvent(&block->nmb_resetevent, EVENT_TYPE_NOTIFY, TRUE); InitializeListHead(&block->nmb_parmlist); InitializeListHead(&block->nmb_returnlist); block->nmb_returnitem = IoAllocateWorkItem(fdo); /* * Stash pointers to the miniport block and miniport * characteristics info in the if_ndis softc so the * UNIX wrapper driver can get to them later. */ sc->ndis_block = block; sc->ndis_chars = IoGetDriverObjectExtension(drv, (void *)1); /* * If the driver has a MiniportTransferData() function, * we should allocate a private RX packet pool. */ if (sc->ndis_chars->nmc_transferdata_func != NULL) { NdisAllocatePacketPool(&status, &block->nmb_rxpool, 32, PROTOCOL_RESERVED_SIZE_IN_PACKET); if (status != NDIS_STATUS_SUCCESS) { IoDetachDevice(block->nmb_nextdeviceobj); IoDeleteDevice(fdo); return (status); } InitializeListHead((&block->nmb_packetlist)); } /* Give interrupt handling priority over timers. */ IoInitializeDpcRequest(fdo, kernndis_functbl[6].ipt_wrap); KeSetImportanceDpc(&fdo->do_dpc, KDPC_IMPORTANCE_HIGH); /* Finish up BSD-specific setup. */ block->nmb_signature = (void *)0xcafebabe; block->nmb_status_func = kernndis_functbl[0].ipt_wrap; block->nmb_statusdone_func = kernndis_functbl[1].ipt_wrap; block->nmb_setdone_func = kernndis_functbl[2].ipt_wrap; block->nmb_querydone_func = kernndis_functbl[3].ipt_wrap; block->nmb_resetdone_func = kernndis_functbl[4].ipt_wrap; block->nmb_sendrsrc_func = kernndis_functbl[5].ipt_wrap; block->nmb_pendingreq = NULL; TAILQ_INSERT_TAIL(&ndis_devhead, block, link); return (STATUS_SUCCESS); } int ndis_unload_driver(arg) void *arg; { struct ndis_softc *sc; device_object *fdo; sc = arg; if (sc->ndis_intrhand) bus_teardown_intr(sc->ndis_dev, sc->ndis_irq, sc->ndis_intrhand); if (sc->ndis_block->nmb_rlist != NULL) free(sc->ndis_block->nmb_rlist, M_DEVBUF); ndis_flush_sysctls(sc); TAILQ_REMOVE(&ndis_devhead, sc->ndis_block, link); if (sc->ndis_chars->nmc_transferdata_func != NULL) NdisFreePacketPool(sc->ndis_block->nmb_rxpool); fdo = sc->ndis_block->nmb_deviceobj; IoFreeWorkItem(sc->ndis_block->nmb_returnitem); IoDetachDevice(sc->ndis_block->nmb_nextdeviceobj); IoDeleteDevice(fdo); return (0); } Index: head/sys/compat/ndis/ndis_var.h =================================================================== --- head/sys/compat/ndis/ndis_var.h (revision 298827) +++ head/sys/compat/ndis/ndis_var.h (revision 298828) @@ -1,1767 +1,1767 @@ /*- * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NDIS_VAR_H_ #define _NDIS_VAR_H_ /* Forward declarations */ struct ndis_miniport_block; struct ndis_mdriver_block; typedef struct ndis_miniport_block ndis_miniport_block; typedef struct ndis_mdriver_block ndis_mdriver_block; /* Base types */ typedef uint32_t ndis_status; typedef void *ndis_handle; typedef uint32_t ndis_oid; typedef uint32_t ndis_error_code; typedef register_t ndis_kspin_lock; typedef uint8_t ndis_kirql; /* * NDIS status codes (there are lots of them). The ones that * don't seem to fit the pattern are actually mapped to generic * NT status codes. */ #define NDIS_STATUS_SUCCESS 0 #define NDIS_STATUS_PENDING 0x00000103 #define NDIS_STATUS_NOT_RECOGNIZED 0x00010001 #define NDIS_STATUS_NOT_COPIED 0x00010002 #define NDIS_STATUS_NOT_ACCEPTED 0x00010003 #define NDIS_STATUS_CALL_ACTIVE 0x00010007 #define NDIS_STATUS_ONLINE 0x40010003 #define NDIS_STATUS_RESET_START 0x40010004 #define NDIS_STATUS_RESET_END 0x40010005 #define NDIS_STATUS_RING_STATUS 0x40010006 #define NDIS_STATUS_CLOSED 0x40010007 #define NDIS_STATUS_WAN_LINE_UP 0x40010008 #define NDIS_STATUS_WAN_LINE_DOWN 0x40010009 #define NDIS_STATUS_WAN_FRAGMENT 0x4001000A #define NDIS_STATUS_MEDIA_CONNECT 0x4001000B #define NDIS_STATUS_MEDIA_DISCONNECT 0x4001000C #define NDIS_STATUS_HARDWARE_LINE_UP 0x4001000D #define NDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E #define NDIS_STATUS_INTERFACE_UP 0x4001000F #define NDIS_STATUS_INTERFACE_DOWN 0x40010010 #define NDIS_STATUS_MEDIA_BUSY 0x40010011 #define NDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012 #define NDIS_STATUS_WW_INDICATION NDIS_STATUS_MEDIA_SPECIFIC_INDICATION #define NDIS_STATUS_LINK_SPEED_CHANGE 0x40010013 #define NDIS_STATUS_WAN_GET_STATS 0x40010014 #define NDIS_STATUS_WAN_CO_FRAGMENT 0x40010015 #define NDIS_STATUS_WAN_CO_LINKPARAMS 0x40010016 #define NDIS_STATUS_NOT_RESETTABLE 0x80010001 #define NDIS_STATUS_SOFT_ERRORS 0x80010003 #define NDIS_STATUS_HARD_ERRORS 0x80010004 #define NDIS_STATUS_BUFFER_OVERFLOW 0x80000005 #define NDIS_STATUS_FAILURE 0xC0000001 #define NDIS_STATUS_RESOURCES 0xC000009A #define NDIS_STATUS_CLOSING 0xC0010002 #define NDIS_STATUS_BAD_VERSION 0xC0010004 #define NDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005 #define NDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006 #define NDIS_STATUS_OPEN_FAILED 0xC0010007 #define NDIS_STATUS_DEVICE_FAILED 0xC0010008 #define NDIS_STATUS_MULTICAST_FULL 0xC0010009 #define NDIS_STATUS_MULTICAST_EXISTS 0xC001000A #define NDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B #define NDIS_STATUS_REQUEST_ABORTED 0xC001000C #define NDIS_STATUS_RESET_IN_PROGRESS 0xC001000D #define NDIS_STATUS_CLOSING_INDICATING 0xC001000E #define NDIS_STATUS_NOT_SUPPORTED 0xC00000BB #define NDIS_STATUS_INVALID_PACKET 0xC001000F #define NDIS_STATUS_OPEN_LIST_FULL 0xC0010010 #define NDIS_STATUS_ADAPTER_NOT_READY 0xC0010011 #define NDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012 #define NDIS_STATUS_NOT_INDICATING 0xC0010013 #define NDIS_STATUS_INVALID_LENGTH 0xC0010014 #define NDIS_STATUS_INVALID_DATA 0xC0010015 #define NDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016 #define NDIS_STATUS_INVALID_OID 0xC0010017 #define NDIS_STATUS_ADAPTER_REMOVED 0xC0010018 #define NDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019 #define NDIS_STATUS_GROUP_ADDRESS_IN_USE 0xC001001A #define NDIS_STATUS_FILE_NOT_FOUND 0xC001001B #define NDIS_STATUS_ERROR_READING_FILE 0xC001001C #define NDIS_STATUS_ALREADY_MAPPED 0xC001001D #define NDIS_STATUS_RESOURCE_CONFLICT 0xC001001E #define NDIS_STATUS_NO_CABLE 0xC001001F #define NDIS_STATUS_INVALID_SAP 0xC0010020 #define NDIS_STATUS_SAP_IN_USE 0xC0010021 #define NDIS_STATUS_INVALID_ADDRESS 0xC0010022 #define NDIS_STATUS_VC_NOT_ACTIVATED 0xC0010023 #define NDIS_STATUS_DEST_OUT_OF_ORDER 0xC0010024 #define NDIS_STATUS_VC_NOT_AVAILABLE 0xC0010025 #define NDIS_STATUS_CELLRATE_NOT_AVAILABLE 0xC0010026 #define NDIS_STATUS_INCOMPATABLE_QOS 0xC0010027 #define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED 0xC0010028 #define NDIS_STATUS_NO_ROUTE_TO_DESTINATION 0xC0010029 #define NDIS_STATUS_TOKEN_RING_OPEN_ERROR 0xC0011000 #define NDIS_STATUS_INVALID_DEVICE_REQUEST 0xC0000010 #define NDIS_STATUS_NETWORK_UNREACHABLE 0xC000023C /* * NDIS event codes. They are usually reported to NdisWriteErrorLogEntry(). */ #define EVENT_NDIS_RESOURCE_CONFLICT 0xC0001388 #define EVENT_NDIS_OUT_OF_RESOURCE 0xC0001389 #define EVENT_NDIS_HARDWARE_FAILURE 0xC000138A #define EVENT_NDIS_ADAPTER_NOT_FOUND 0xC000138B #define EVENT_NDIS_INTERRUPT_CONNECT 0xC000138C #define EVENT_NDIS_DRIVER_FAILURE 0xC000138D #define EVENT_NDIS_BAD_VERSION 0xC000138E #define EVENT_NDIS_TIMEOUT 0x8000138F #define EVENT_NDIS_NETWORK_ADDRESS 0xC0001390 #define EVENT_NDIS_UNSUPPORTED_CONFIGURATION 0xC0001391 #define EVENT_NDIS_INVALID_VALUE_FROM_ADAPTER 0xC0001392 #define EVENT_NDIS_MISSING_CONFIGURATION_PARAMETER 0xC0001393 #define EVENT_NDIS_BAD_IO_BASE_ADDRESS 0xC0001394 #define EVENT_NDIS_RECEIVE_SPACE_SMALL 0x40001395 #define EVENT_NDIS_ADAPTER_DISABLED 0x80001396 #define EVENT_NDIS_IO_PORT_CONFLICT 0x80001397 #define EVENT_NDIS_PORT_OR_DMA_CONFLICT 0x80001398 #define EVENT_NDIS_MEMORY_CONFLICT 0x80001399 #define EVENT_NDIS_INTERRUPT_CONFLICT 0x8000139A #define EVENT_NDIS_DMA_CONFLICT 0x8000139B #define EVENT_NDIS_INVALID_DOWNLOAD_FILE_ERROR 0xC000139C #define EVENT_NDIS_MAXRECEIVES_ERROR 0x8000139D #define EVENT_NDIS_MAXTRANSMITS_ERROR 0x8000139E #define EVENT_NDIS_MAXFRAMESIZE_ERROR 0x8000139F #define EVENT_NDIS_MAXINTERNALBUFS_ERROR 0x800013A0 #define EVENT_NDIS_MAXMULTICAST_ERROR 0x800013A1 #define EVENT_NDIS_PRODUCTID_ERROR 0x800013A2 #define EVENT_NDIS_LOBE_FAILUE_ERROR 0x800013A3 #define EVENT_NDIS_SIGNAL_LOSS_ERROR 0x800013A4 #define EVENT_NDIS_REMOVE_RECEIVED_ERROR 0x800013A5 #define EVENT_NDIS_TOKEN_RING_CORRECTION 0x400013A6 #define EVENT_NDIS_ADAPTER_CHECK_ERROR 0xC00013A7 #define EVENT_NDIS_RESET_FAILURE_ERROR 0x800013A8 #define EVENT_NDIS_CABLE_DISCONNECTED_ERROR 0x800013A9 #define EVENT_NDIS_RESET_FAILURE_CORRECTION 0x800013AA /* * NDIS OIDs used by the queryinfo/setinfo routines. * Some are required by all NDIS drivers, some are specific to * a particular type of device, and some are purely optional. * Unfortunately, one of the purely optional OIDs is the one * that lets us set the MAC address of the device. */ /* Required OIDs */ #define OID_GEN_SUPPORTED_LIST 0x00010101 #define OID_GEN_HARDWARE_STATUS 0x00010102 #define OID_GEN_MEDIA_SUPPORTED 0x00010103 #define OID_GEN_MEDIA_IN_USE 0x00010104 #define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105 #define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106 #define OID_GEN_LINK_SPEED 0x00010107 #define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108 #define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109 #define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A #define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B #define OID_GEN_VENDOR_ID 0x0001010C #define OID_GEN_VENDOR_DESCRIPTION 0x0001010D #define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E #define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F #define OID_GEN_DRIVER_VERSION 0x00010110 #define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111 #define OID_GEN_PROTOCOL_OPTIONS 0x00010112 #define OID_GEN_MAC_OPTIONS 0x00010113 #define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114 #define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115 #define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116 #define OID_GEN_SUPPORTED_GUIDS 0x00010117 #define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118 /* Set only */ #define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119 /* Set only */ #define OID_GEN_MACHINE_NAME 0x0001021A #define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B /* Set only */ #define OID_GEN_VLAN_ID 0x0001021C /* Optional OIDs. */ #define OID_GEN_MEDIA_CAPABILITIES 0x00010201 #define OID_GEN_PHYSICAL_MEDIUM 0x00010202 /* Required statistics OIDs. */ #define OID_GEN_XMIT_OK 0x00020101 #define OID_GEN_RCV_OK 0x00020102 #define OID_GEN_XMIT_ERROR 0x00020103 #define OID_GEN_RCV_ERROR 0x00020104 #define OID_GEN_RCV_NO_BUFFER 0x00020105 /* Optional OID statistics */ #define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 #define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202 #define OID_GEN_MULTICAST_BYTES_XMIT 0x00020203 #define OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204 #define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 #define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206 #define OID_GEN_DIRECTED_BYTES_RCV 0x00020207 #define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208 #define OID_GEN_MULTICAST_BYTES_RCV 0x00020209 #define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A #define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B #define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C #define OID_GEN_RCV_CRC_ERROR 0x0002020D #define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E #define OID_GEN_GET_TIME_CAPS 0x0002020F #define OID_GEN_GET_NETCARD_TIME 0x00020210 #define OID_GEN_NETCARD_LOAD 0x00020211 #define OID_GEN_DEVICE_PROFILE 0x00020212 /* 802.3 (ethernet) OIDs */ #define OID_802_3_PERMANENT_ADDRESS 0x01010101 #define OID_802_3_CURRENT_ADDRESS 0x01010102 #define OID_802_3_MULTICAST_LIST 0x01010103 #define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 #define OID_802_3_MAC_OPTIONS 0x01010105 #define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001 #define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 #define OID_802_3_XMIT_ONE_COLLISION 0x01020102 #define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 #define OID_802_3_XMIT_DEFERRED 0x01020201 #define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 #define OID_802_3_RCV_OVERRUN 0x01020203 #define OID_802_3_XMIT_UNDERRUN 0x01020204 #define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205 #define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 #define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 /* PnP and power management OIDs */ #define OID_PNP_CAPABILITIES 0xFD010100 #define OID_PNP_SET_POWER 0xFD010101 #define OID_PNP_QUERY_POWER 0xFD010102 #define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103 #define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104 #define OID_PNP_WAKE_UP_PATTERN_LIST 0xFD010105 #define OID_PNP_ENABLE_WAKE_UP 0xFD010106 /* * These are the possible power states for * OID_PNP_SET_POWER and OID_PNP_QUERY_POWER. */ #define NDIS_POWERSTATE_UNSPEC 0 #define NDIS_POWERSTATE_D0 1 #define NDIS_POWERSTATE_D1 2 #define NDIS_POWERSTATE_D2 3 #define NDIS_POWERSTATE_D3 4 /* * These are used with the MiniportPnpEventNotify() method. */ #define NDIS_POWERPROFILE_BATTERY 0 #define NDIS_POWERPROFILE_ACONLINE 1 #define NDIS_PNP_EVENT_QUERY_REMOVED 0 #define NDIS_PNP_EVENT_REMOVED 1 #define NDIS_PNP_EVENT_SURPRISE_REMOVED 2 #define NDIS_PNP_EVENT_QUERY_STOPPED 3 #define NDIS_PNP_EVENT_STOPPED 4 #define NDIS_PNP_EVENT_PROFILECHANGED 5 /* PnP/PM Statistics (Optional). */ #define OID_PNP_WAKE_UP_OK 0xFD020200 #define OID_PNP_WAKE_UP_ERROR 0xFD020201 /* The following bits are defined for OID_PNP_ENABLE_WAKE_UP */ #define NDIS_PNP_WAKE_UP_MAGIC_PACKET 0x00000001 #define NDIS_PNP_WAKE_UP_PATTERN_MATCH 0x00000002 #define NDIS_PNP_WAKE_UP_LINK_CHANGE 0x00000004 /* 802.11 OIDs */ #define OID_802_11_BSSID 0x0D010101 #define OID_802_11_SSID 0x0D010102 #define OID_802_11_NETWORK_TYPES_SUPPORTED 0x0D010203 #define OID_802_11_NETWORK_TYPE_IN_USE 0x0D010204 #define OID_802_11_TX_POWER_LEVEL 0x0D010205 #define OID_802_11_RSSI 0x0D010206 #define OID_802_11_RSSI_TRIGGER 0x0D010207 #define OID_802_11_INFRASTRUCTURE_MODE 0x0D010108 #define OID_802_11_FRAGMENTATION_THRESHOLD 0x0D010209 #define OID_802_11_RTS_THRESHOLD 0x0D01020A #define OID_802_11_NUMBER_OF_ANTENNAS 0x0D01020B #define OID_802_11_RX_ANTENNA_SELECTED 0x0D01020C #define OID_802_11_TX_ANTENNA_SELECTED 0x0D01020D #define OID_802_11_SUPPORTED_RATES 0x0D01020E #define OID_802_11_DESIRED_RATES 0x0D010210 #define OID_802_11_CONFIGURATION 0x0D010211 #define OID_802_11_STATISTICS 0x0D020212 #define OID_802_11_ADD_WEP 0x0D010113 #define OID_802_11_REMOVE_WEP 0x0D010114 #define OID_802_11_DISASSOCIATE 0x0D010115 #define OID_802_11_POWER_MODE 0x0D010216 #define OID_802_11_BSSID_LIST 0x0D010217 #define OID_802_11_AUTHENTICATION_MODE 0x0D010118 #define OID_802_11_PRIVACY_FILTER 0x0D010119 #define OID_802_11_BSSID_LIST_SCAN 0x0D01011A #define OID_802_11_WEP_STATUS 0x0D01011B #define OID_802_11_ENCRYPTION_STATUS OID_802_11_WEP_STATUS #define OID_802_11_RELOAD_DEFAULTS 0x0D01011C #define OID_802_11_ADD_KEY 0x0D01011D #define OID_802_11_REMOVE_KEY 0x0D01011E #define OID_802_11_ASSOCIATION_INFORMATION 0x0D01011F #define OID_802_11_TEST 0x0D010120 #define OID_802_11_CAPABILITY 0x0D010122 #define OID_802_11_PMKID 0x0D010123 /* structures/definitions for 802.11 */ #define NDIS_80211_NETTYPE_11FH 0x00000000 #define NDIS_80211_NETTYPE_11DS 0x00000001 #define NDIS_80211_NETTYPE_11OFDM5 0x00000002 #define NDIS_80211_NETTYPE_11OFDM24 0x00000003 #define NDIS_80211_NETTYPE_AUTO 0x00000004 struct ndis_80211_nettype_list { uint32_t ntl_items; uint32_t ntl_type[1]; }; #define NDIS_80211_POWERMODE_CAM 0x00000000 #define NDIS_80211_POWERMODE_MAX_PSP 0x00000001 #define NDIS_80211_POWERMODE_FAST_PSP 0x00000002 typedef uint32_t ndis_80211_power; /* Power in milliwatts */ typedef uint32_t ndis_80211_rssi; /* Signal strength in dBm */ struct ndis_80211_config_fh { uint32_t ncf_length; uint32_t ncf_hoppatterh; uint32_t ncf_hopset; uint32_t ncf_dwelltime; }; typedef struct ndis_80211_config_fh ndis_80211_config_fh; struct ndis_80211_config { uint32_t nc_length; uint32_t nc_beaconperiod; uint32_t nc_atimwin; uint32_t nc_dsconfig; ndis_80211_config_fh nc_fhconfig; }; typedef struct ndis_80211_config ndis_80211_config; struct ndis_80211_stats { uint32_t ns_length; uint64_t ns_txfragcnt; uint64_t ns_txmcastcnt; uint64_t ns_failedcnt; uint64_t ns_retrycnt; uint64_t ns_multiretrycnt; uint64_t ns_rtssuccesscnt; uint64_t ns_rtsfailcnt; uint64_t ns_ackfailcnt; uint64_t ns_dupeframecnt; uint64_t ns_rxfragcnt; uint64_t ns_rxmcastcnt; uint64_t ns_fcserrcnt; }; typedef struct ndis_80211_stats ndis_80211_stats; typedef uint32_t ndis_80211_key_idx; struct ndis_80211_wep { uint32_t nw_length; uint32_t nw_keyidx; uint32_t nw_keylen; uint8_t nw_keydata[256]; }; typedef struct ndis_80211_wep ndis_80211_wep; #define NDIS_80211_WEPKEY_TX 0x80000000 #define NDIS_80211_WEPKEY_PERCLIENT 0x40000000 #define NDIS_80211_NET_INFRA_IBSS 0x00000000 #define NDIS_80211_NET_INFRA_BSS 0x00000001 #define NDIS_80211_NET_INFRA_AUTO 0x00000002 #define NDIS_80211_AUTHMODE_OPEN 0x00000000 #define NDIS_80211_AUTHMODE_SHARED 0x00000001 #define NDIS_80211_AUTHMODE_AUTO 0x00000002 #define NDIS_80211_AUTHMODE_WPA 0x00000003 #define NDIS_80211_AUTHMODE_WPAPSK 0x00000004 #define NDIS_80211_AUTHMODE_WPANONE 0x00000005 #define NDIS_80211_AUTHMODE_WPA2 0x00000006 #define NDIS_80211_AUTHMODE_WPA2PSK 0x00000007 typedef uint8_t ndis_80211_rates[8]; typedef uint8_t ndis_80211_rates_ex[16]; typedef uint8_t ndis_80211_macaddr[6]; struct ndis_80211_ssid { uint32_t ns_ssidlen; uint8_t ns_ssid[32]; }; typedef struct ndis_80211_ssid ndis_80211_ssid; struct ndis_wlan_bssid { uint32_t nwb_length; ndis_80211_macaddr nwb_macaddr; uint8_t nwb_rsvd[2]; ndis_80211_ssid nwb_ssid; uint32_t nwb_privacy; ndis_80211_rssi nwb_rssi; uint32_t nwb_nettype; ndis_80211_config nwb_config; uint32_t nwb_netinfra; ndis_80211_rates nwb_supportedrates; }; typedef struct ndis_wlan_bssid ndis_wlan_bssid; struct ndis_80211_bssid_list { uint32_t nbl_items; ndis_wlan_bssid nbl_bssid[1]; }; typedef struct ndis_80211_bssid_list ndis_80211_bssid_list; struct ndis_wlan_bssid_ex { uint32_t nwbx_len; ndis_80211_macaddr nwbx_macaddr; uint8_t nwbx_rsvd[2]; ndis_80211_ssid nwbx_ssid; uint32_t nwbx_privacy; ndis_80211_rssi nwbx_rssi; uint32_t nwbx_nettype; ndis_80211_config nwbx_config; uint32_t nwbx_netinfra; ndis_80211_rates_ex nwbx_supportedrates; uint32_t nwbx_ielen; uint8_t nwbx_ies[1]; }; typedef struct ndis_wlan_bssid_ex ndis_wlan_bssid_ex; struct ndis_80211_bssid_list_ex { uint32_t nblx_items; ndis_wlan_bssid_ex nblx_bssid[1]; }; typedef struct ndis_80211_bssid_list_ex ndis_80211_bssid_list_ex; struct ndis_80211_fixed_ies { uint8_t nfi_tstamp[8]; uint16_t nfi_beaconint; uint16_t nfi_caps; }; struct ndis_80211_variable_ies { uint8_t nvi_elemid; uint8_t nvi_len; uint8_t nvi_data[1]; }; typedef uint32_t ndis_80211_fragthresh; typedef uint32_t ndis_80211_rtsthresh; typedef uint32_t ndis_80211_antenna; #define NDIS_80211_PRIVFILT_ACCEPTALL 0x00000000 #define NDIS_80211_PRIVFILT_8021XWEP 0x00000001 #define NDIS_80211_WEPSTAT_ENABLED 0x00000000 #define NDIS_80211_WEPSTAT_ENC1ENABLED NDIS_80211_WEPSTAT_ENABLED #define NDIS_80211_WEPSTAT_DISABLED 0x00000001 #define NDIS_80211_WEPSTAT_ENCDISABLED NDIS_80211_WEPSTAT_DISABLED #define NDIS_80211_WEPSTAT_KEYABSENT 0x00000002 #define NDIS_80211_WEPSTAT_ENC1KEYABSENT NDIS_80211_WEPSTAT_KEYABSENT #define NDIS_80211_WEPSTAT_NOTSUPPORTED 0x00000003 #define NDIS_80211_WEPSTAT_ENCNOTSUPPORTED NDIS_80211_WEPSTAT_NOTSUPPORTED #define NDIS_80211_WEPSTAT_ENC2ENABLED 0x00000004 #define NDIS_80211_WEPSTAT_ENC2KEYABSENT 0x00000005 #define NDIS_80211_WEPSTAT_ENC3ENABLED 0x00000006 #define NDIS_80211_WEPSTAT_ENC3KEYABSENT 0x00000007 #define NDIS_80211_RELOADDEFAULT_WEP 0x00000000 #define NDIS_80211_STATUSTYPE_AUTH 0x00000000 #define NDIS_80211_STATUSTYPE_PMKIDLIST 0x00000001 struct ndis_80211_status_indication { uint32_t nsi_type; }; typedef struct ndis_80211_status_indication ndis_80211_status_indication; #define NDIS_802_11_AUTH_REQUEST_REAUTH 0x01 #define NDIS_802_11_AUTH_REQUEST_KEYUPDATE 0x02 #define NDIS_802_11_AUTH_REQUEST_PAIRWISE_ERROR 0x06 #define NDIS_802_11_AUTH_REQUEST_GROUP_ERROR 0x0E struct ndis_80211_auth_request { uint32_t nar_len; ndis_80211_macaddr nar_bssid; uint32_t nar_flags; }; typedef struct ndis_80211_auth_request ndis_80211_auth_request; struct ndis_80211_key { uint32_t nk_len; uint32_t nk_keyidx; uint32_t nk_keylen; ndis_80211_macaddr nk_bssid; uint8_t nk_pad[6]; uint64_t nk_keyrsc; uint8_t nk_keydata[32]; }; typedef struct ndis_80211_key ndis_80211_key; struct ndis_80211_remove_key { uint32_t nk_len; uint32_t nk_keyidx; ndis_80211_macaddr nk_bssid; }; typedef struct ndis_80211_remove_key ndis_80211_remove_key; #define NDIS_80211_AI_REQFI_CAPABILITIES 0x00000001 #define NDIS_80211_AI_REQFI_LISTENINTERVAL 0x00000002 #define NDIS_80211_AI_REQFI_CURRENTAPADDRESS 0x00000004 #define NDIS_80211_AI_RESFI_CAPABILITIES 0x00000001 #define NDIS_80211_AI_RESFI_STATUSCODE 0x00000002 #define NDIS_80211_AI_RESFI_ASSOCIATIONID 0x00000004 struct ndis_80211_ai_reqfi { uint16_t naq_caps; uint16_t naq_listentint; ndis_80211_macaddr naq_currentapaddr; }; typedef struct ndis_80211_ai_reqfi ndis_80211_ai_reqfi; struct ndis_80211_ai_resfi { uint16_t nas_caps; uint16_t nas_statuscode; uint16_t nas_associd; }; typedef struct ndis_80211_ai_resfi ndis_80211_ai_resfi; struct ndis_80211_assoc_info { uint32_t nai_len; uint16_t nai_avail_req_fixed_ies; ndis_80211_ai_reqfi nai_req_fixed_ies; uint32_t nai_req_ielen; uint32_t nai_offset_req_ies; uint16_t nai_avail_resp_fixed_ies; ndis_80211_ai_resfi nai_resp_fixed_iex; uint32_t nai_resp_ielen; uint32_t nai_offset_resp_ies; }; typedef struct ndis_80211_assoc_info ndis_80211_assoc_info; struct ndis_80211_auth_event { ndis_80211_status_indication nae_status; ndis_80211_auth_request nae_request[1]; }; typedef struct ndis_80211_auth_event ndis_80211_auth_event; struct ndis_80211_test { uint32_t nt_len; uint32_t nt_type; union { ndis_80211_auth_event nt_authevent; uint32_t nt_rssitrigger; } u; }; typedef struct ndis_80211_test ndis_80211_test; struct ndis_80211_auth_encrypt { uint32_t ne_authmode; uint32_t ne_cryptstat; }; typedef struct ndis_80211_auth_encrypt ndis_80211_auth_encrypt; struct ndis_80211_caps { uint32_t nc_len; uint32_t nc_ver; uint32_t nc_numpmkids; ndis_80211_auth_encrypt nc_authencs[1]; }; typedef struct ndis_80211_caps ndis_80211_caps; struct ndis_80211_bssidinfo { ndis_80211_macaddr nb_bssid; uint8_t nb_pmkid[16]; }; typedef struct ndis_80211_bssidinfo ndis_80211_bssidinfo; struct ndis_80211_pmkid { uint32_t np_len; uint32_t np_bssidcnt; ndis_80211_bssidinfo np_bssidinfo[1]; }; typedef struct ndis_80211_pmkid ndis_80211_pmkid; struct ndis_80211_pmkid_cand { ndis_80211_macaddr npc_bssid; uint32_t npc_flags; }; typedef struct ndis_80211_pmkid_cand ndis_80211_pmkid_cand; #define NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED (0x01) struct ndis_80211_pmkid_candidate_list { uint32_t npcl_version; uint32_t npcl_numcandidates; ndis_80211_pmkid_cand npcl_candidatelist[1]; }; typedef struct ndis_80211_pmkid_candidate_list ndis_80211_pmkid_candidate_list; struct ndis_80211_enc_indication { uint32_t nei_statustype; ndis_80211_pmkid_candidate_list nei_pmkidlist; }; typedef struct ndis_80211_enc_indication ndis_80211_enc_indication; /* TCP OIDs. */ #define OID_TCP_TASK_OFFLOAD 0xFC010201 #define OID_TCP_TASK_IPSEC_ADD_SA 0xFC010202 #define OID_TCP_TASK_IPSEC_DELETE_SA 0xFC010203 #define OID_TCP_SAN_SUPPORT 0xFC010204 #define NDIS_TASK_OFFLOAD_VERSION 1 #define NDIS_TASK_TCPIP_CSUM 0x00000000 #define NDIS_TASK_IPSEC 0x00000001 #define NDIS_TASK_TCP_LARGESEND 0x00000002 #define NDIS_ENCAP_UNSPEC 0x00000000 #define NDIS_ENCAP_NULL 0x00000001 #define NDIS_ENCAP_IEEE802_3 0x00000002 #define NDIS_ENCAP_IEEE802_5 0x00000003 #define NDIS_ENCAP_SNAP_ROUTED 0x00000004 #define NDIS_ENCAP_SNAP_BRIDGED 0x00000005 #define NDIS_ENCAPFLAG_FIXEDHDRLEN 0x00000001 struct ndis_encap_fmt { uint32_t nef_encap; uint32_t nef_flags; uint32_t nef_encaphdrlen; }; typedef struct ndis_encap_fmt ndis_encap_fmt; struct ndis_task_offload_hdr { uint32_t ntoh_vers; uint32_t ntoh_len; uint32_t ntoh_rsvd; uint32_t ntoh_offset_firsttask; ndis_encap_fmt ntoh_encapfmt; }; typedef struct ndis_task_offload_hdr ndis_task_offload_hdr; struct ndis_task_offload { uint32_t nto_vers; uint32_t nto_len; uint32_t nto_task; uint32_t nto_offset_nexttask; uint32_t nto_taskbuflen; uint8_t nto_taskbuf[1]; }; typedef struct ndis_task_offload ndis_task_offload; #define NDIS_TCPSUM_FLAGS_IP_OPTS 0x00000001 #define NDIS_TCPSUM_FLAGS_TCP_OPTS 0x00000002 #define NDIS_TCPSUM_FLAGS_TCP_CSUM 0x00000004 #define NDIS_TCPSUM_FLAGS_UDP_CSUM 0x00000008 #define NDIS_TCPSUM_FLAGS_IP_CSUM 0x00000010 struct ndis_task_tcpip_csum { uint32_t nttc_v4tx; uint32_t nttc_v4rx; uint32_t nttc_v6tx; uint32_t nttc_v6rx; }; typedef struct ndis_task_tcpip_csum ndis_task_tcpip_csum; struct ndis_task_tcp_largesend { uint32_t nttl_vers; uint32_t nttl_maxofflen; uint32_t nttl_minsegcnt; uint8_t nttl_tcpopt; uint8_t nttl_ipopt; }; typedef struct ndis_task_tcp_largesend ndis_task_tcp_largesend; #define NDIS_IPSEC_AH_MD5 0x00000001 #define NDIS_IPSEC_AH_SHA1 0x00000002 #define NDIS_IPSEC_AH_TRANSPORT 0x00000004 #define NDIS_IPSEC_AH_TUNNEL 0x00000008 #define NDIS_IPSEC_AH_SEND 0x00000010 #define NDIS_IPSEC_AH_RECEIVE 0x00000020 #define NDIS_IPSEC_ESP_DES 0x00000001 #define NDIS_IPSEC_ESP_RSVD 0x00000002 #define NDIS_IPSEC_ESP_3DES 0x00000004 #define NDIS_IPSEC_ESP_NULL 0x00000008 #define NDIS_IPSEC_ESP_TRANSPORT 0x00000010 #define NDIS_IPSEC_ESP_TUNNEL 0x00000020 #define NDIS_IPSEC_ESP_SEND 0x00000040 #define NDIS_IPSEC_ESP_RECEIVE 0x00000080 struct ndis_task_ipsec { uint32_t nti_ah_esp_combined; uint32_t nti_ah_transport_tunnel_combined; uint32_t nti_v4_options; uint32_t nti_rsvd; uint32_t nti_v4ah; uint32_t nti_v4esp; }; typedef struct ndis_task_ipsec ndis_task_ipsec; /* * Attribures of NDIS drivers. Not all drivers support * all attributes. */ #define NDIS_ATTRIBUTE_IGNORE_PACKET_TIMEOUT 0x00000001 #define NDIS_ATTRIBUTE_IGNORE_REQUEST_TIMEOUT 0x00000002 #define NDIS_ATTRIBUTE_IGNORE_TOKEN_RING_ERRORS 0x00000004 #define NDIS_ATTRIBUTE_BUS_MASTER 0x00000008 #define NDIS_ATTRIBUTE_INTERMEDIATE_DRIVER 0x00000010 #define NDIS_ATTRIBUTE_DESERIALIZE 0x00000020 #define NDIS_ATTRIBUTE_NO_HALT_ON_SUSPEND 0x00000040 #define NDIS_ATTRIBUTE_SURPRISE_REMOVE_OK 0x00000080 #define NDIS_ATTRIBUTE_NOT_CO_NDIS 0x00000100 #define NDIS_ATTRIBUTE_USES_SAFE_BUFFER_APIS 0x00000200 #define NDIS_SERIALIZED(block) \ (((block)->nmb_flags & NDIS_ATTRIBUTE_DESERIALIZE) == 0) enum ndis_media_state { nmc_connected, nmc_disconnected }; typedef enum ndis_media_state ndis_media_state; /* Ndis Packet Filter Bits (OID_GEN_CURRENT_PACKET_FILTER). */ #define NDIS_PACKET_TYPE_DIRECTED 0x00000001 #define NDIS_PACKET_TYPE_MULTICAST 0x00000002 #define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004 #define NDIS_PACKET_TYPE_BROADCAST 0x00000008 #define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010 #define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020 #define NDIS_PACKET_TYPE_SMT 0x00000040 #define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080 #define NDIS_PACKET_TYPE_GROUP 0x00001000 #define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000 #define NDIS_PACKET_TYPE_FUNCTIONAL 0x00004000 #define NDIS_PACKET_TYPE_MAC_FRAME 0x00008000 /* Ndis MAC option bits (OID_GEN_MAC_OPTIONS). */ #define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA 0x00000001 #define NDIS_MAC_OPTION_RECEIVE_SERIALIZED 0x00000002 #define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND 0x00000004 #define NDIS_MAC_OPTION_NO_LOOPBACK 0x00000008 #define NDIS_MAC_OPTION_FULL_DUPLEX 0x00000010 #define NDIS_MAC_OPTION_EOTX_INDICATION 0x00000020 #define NDIS_MAC_OPTION_8021P_PRIORITY 0x00000040 #define NDIS_MAC_OPTION_SUPPORTS_MAC_ADDRESS_OVERWRITE 0x00000080 #define NDIS_MAC_OPTION_RECEIVE_AT_DPC 0x00000100 #define NDIS_MAC_OPTION_8021Q_VLAN 0x00000200 #define NDIS_MAC_OPTION_RESERVED 0x80000000 #define NDIS_DMA_24BITS 0x00 #define NDIS_DMA_32BITS 0x01 #define NDIS_DMA_64BITS 0x02 /* struct ndis_physaddr { #ifdef __i386__ uint64_t np_quad; #endif #ifdef __amd64__ uint32_t np_low; uint32_t np_high; #define np_quad np_low #endif #ifdef notdef uint32_t np_low; uint32_t np_high; #endif }; */ typedef struct physaddr ndis_physaddr; struct ndis_ansi_string { uint16_t nas_len; uint16_t nas_maxlen; char *nas_buf; }; typedef struct ndis_ansi_string ndis_ansi_string; #ifdef notdef /* * nus_buf is really a wchar_t *, but it's inconvenient to include * all the necessary header goop needed to define it, and it's a * pointer anyway, so for now, just make it a uint16_t *. */ struct ndis_unicode_string { uint16_t nus_len; uint16_t nus_maxlen; uint16_t *nus_buf; }; typedef struct ndis_unicode_string ndis_unicode_string; #endif typedef unicode_string ndis_unicode_string; enum ndis_parm_type { ndis_parm_int, ndis_parm_hexint, ndis_parm_string, ndis_parm_multistring, ndis_parm_binary }; typedef enum ndis_parm_type ndis_parm_type; struct ndis_binary_data { uint16_t nbd_len; void *nbd_buf; }; typedef struct ndis_binary_data ndis_binary_data; struct ndis_config_parm { ndis_parm_type ncp_type; union { uint32_t ncp_intdata; ndis_unicode_string ncp_stringdata; ndis_binary_data ncp_binarydata; } ncp_parmdata; }; /* * Not part of Windows NDIS spec; we uses this to keep a * list of ndis_config_parm structures that we've allocated. */ typedef struct ndis_config_parm ndis_config_parm; struct ndis_parmlist_entry { list_entry np_list; ndis_config_parm np_parm; }; typedef struct ndis_parmlist_entry ndis_parmlist_entry; #ifdef notdef struct ndis_list_entry { struct ndis_list_entry *nle_flink; struct ndis_list_entry *nle_blink; }; typedef struct ndis_list_entry ndis_list_entry; #endif struct ndis_bind_paths { uint32_t nbp_number; ndis_unicode_string nbp_paths[1]; }; typedef struct ndis_bind_paths ndis_bind_paths; #ifdef notdef struct dispatch_header { uint8_t dh_type; uint8_t dh_abs; uint8_t dh_size; uint8_t dh_inserted; uint32_t dh_sigstate; list_entry dh_waitlisthead; }; #endif #define dispatch_header nt_dispatch_header struct ndis_ktimer { struct dispatch_header nk_header; uint64_t nk_duetime; list_entry nk_timerlistentry; void *nk_dpc; uint32_t nk_period; }; struct ndis_kevent { struct dispatch_header nk_header; }; struct ndis_event { struct nt_kevent ne_event; }; typedef struct ndis_event ndis_event; /* Kernel defered procedure call (i.e. timer callback) */ struct ndis_kdpc; typedef void (*ndis_kdpc_func)(struct ndis_kdpc *, void *, void *, void *); struct ndis_kdpc { uint16_t nk_type; uint8_t nk_num; uint8_t nk_importance; list_entry nk_dpclistentry; ndis_kdpc_func nk_deferedfunc; void *nk_deferredctx; void *nk_sysarg1; void *nk_sysarg2; uint32_t *nk_lock; }; struct ndis_timer { struct ktimer nt_ktimer; struct kdpc nt_kdpc; }; typedef struct ndis_timer ndis_timer; typedef void (*ndis_timer_function)(void *, void *, void *, void *); struct ndis_miniport_timer { struct ktimer nmt_ktimer; struct kdpc nmt_kdpc; ndis_timer_function nmt_timerfunc; void *nmt_timerctx; ndis_miniport_block *nmt_block; struct ndis_miniport_timer *nmt_nexttimer; }; typedef struct ndis_miniport_timer ndis_miniport_timer; struct ndis_spin_lock { ndis_kspin_lock nsl_spinlock; ndis_kirql nsl_kirql; }; typedef struct ndis_spin_lock ndis_spin_lock; struct ndis_rw_lock { union { kspin_lock nrl_spinlock; void *nrl_ctx; } u; uint8_t nrl_rsvd[16]; }; #define nrl_spinlock u.nrl_spinlock #define nrl_ctx u.nrl_ctx; typedef struct ndis_rw_lock ndis_rw_lock; struct ndis_lock_state { uint16_t nls_lockstate; ndis_kirql nls_oldirql; }; typedef struct ndis_lock_state ndis_lock_state; struct ndis_request { uint8_t nr_macreserved[4*sizeof(void *)]; uint32_t nr_requesttype; union _ndis_data { struct _ndis_query_information { ndis_oid nr_oid; void *nr_infobuf; uint32_t nr_infobuflen; uint32_t nr_byteswritten; uint32_t nr_bytesneeded; } ndis_query_information; struct _ndis_set_information { ndis_oid nr_oid; void *nr_infobuf; uint32_t nr_infobuflen; uint32_t nr_byteswritten; uint32_t nr_bytesneeded; } ndis_set_information; } ndis_data; - /* NDIS 5.0 extentions */ + /* NDIS 5.0 extensions */ uint8_t nr_ndis_rsvd[9 * sizeof(void *)]; union { uint8_t nr_callmgr_rsvd[2 * sizeof(void *)]; uint8_t nr_protocol_rsvd[2 * sizeof(void *)]; } u; uint8_t nr_miniport_rsvd[2 * sizeof(void *)]; }; typedef struct ndis_request ndis_request; /* * Filler, not used. */ struct ndis_miniport_interrupt { kinterrupt *ni_introbj; ndis_kspin_lock ni_dpccountlock; void *ni_rsvd; void *ni_isrfunc; void *ni_dpcfunc; kdpc ni_dpc; ndis_miniport_block *ni_block; uint8_t ni_dpccnt; uint8_t ni_filler1; struct nt_kevent ni_dpcevt; uint8_t ni_shared; uint8_t ni_isrreq; }; typedef struct ndis_miniport_interrupt ndis_miniport_interrupt; enum ndis_interrupt_mode { nim_level, nim_latched }; typedef enum ndis_interrupt_mode ndis_interrupt_mode; #define NUMBER_OF_SINGLE_WORK_ITEMS 6 struct ndis_work_item; typedef void (*ndis_proc)(struct ndis_work_item *, void *); struct ndis_work_item { void *nwi_ctx; ndis_proc nwi_func; uint8_t nwi_wraprsvd[sizeof(void *) * 8]; }; typedef struct ndis_work_item ndis_work_item; #define NdisInitializeWorkItem(w, f, c) \ do { \ (w)->nwi_ctx = c; \ (w)->nwi_func = f; \ } while (0) #ifdef notdef struct ndis_buffer { struct ndis_buffer *nb_next; uint16_t nb_size; uint16_t nb_flags; void *nb_process; void *nb_mappedsystemva; void *nb_startva; uint32_t nb_bytecount; uint32_t nb_byteoffset; }; typedef struct ndis_buffer ndis_buffer; #endif struct ndis_sc_element { ndis_physaddr nse_addr; uint32_t nse_len; uint32_t *nse_rsvd; }; typedef struct ndis_sc_element ndis_sc_element; #define NDIS_MAXSEG 32 #define NDIS_BUS_SPACE_SHARED_MAXADDR 0x3E7FFFFF struct ndis_sc_list { uint32_t nsl_frags; uint32_t *nsl_rsvd; ndis_sc_element nsl_elements[NDIS_MAXSEG]; }; typedef struct ndis_sc_list ndis_sc_list; struct ndis_tcpip_csum { union { uint32_t ntc_txflags; uint32_t ntc_rxflags; uint32_t ntc_val; } u; }; typedef struct ndis_tcpip_csum ndis_tcpip_csum; #define NDIS_TXCSUM_DO_IPV4 0x00000001 #define NDIS_TXCSUM_DO_IPV6 0x00000002 #define NDIS_TXCSUM_DO_TCP 0x00000004 #define NDIS_TXCSUM_DO_UDP 0x00000008 #define NDIS_TXCSUM_DO_IP 0x00000010 #define NDIS_RXCSUM_TCP_FAILED 0x00000001 #define NDIS_RXCSUM_UDP_FAILED 0x00000002 #define NDIS_RXCSUM_IP_FAILED 0x00000004 #define NDIS_RXCSUM_TCP_PASSED 0x00000008 #define NDIS_RXCSUM_UDP_PASSED 0x00000010 #define NDIS_RXCSUM_IP_PASSED 0x00000020 #define NDIS_RXCSUM_LOOPBACK 0x00000040 struct ndis_vlan { union { struct { uint32_t nvt_userprio:3; uint32_t nvt_canformatid:1; uint32_t nvt_vlanid:12; uint32_t nvt_rsvd:16; } nv_taghdr; } u; }; typedef struct ndis_vlan ndis_vlan; enum ndis_perpkt_info { ndis_tcpipcsum_info, ndis_ipsec_info, ndis_largesend_info, ndis_classhandle_info, ndis_rsvd, ndis_sclist_info, ndis_ieee8021q_info, ndis_originalpkt_info, ndis_packetcancelid, ndis_maxpkt_info }; typedef enum ndis_perpkt_info ndis_perpkt_info; struct ndis_packet_extension { void *npe_info[ndis_maxpkt_info]; }; typedef struct ndis_packet_extension ndis_packet_extension; struct ndis_packet_private { uint32_t npp_physcnt; uint32_t npp_totlen; ndis_buffer *npp_head; ndis_buffer *npp_tail; void *npp_pool; uint32_t npp_count; uint32_t npp_flags; uint8_t npp_validcounts; uint8_t npp_ndispktflags; uint16_t npp_packetooboffset; }; #define NDIS_FLAGS_PROTOCOL_ID_MASK 0x0000000F #define NDIS_FLAGS_MULTICAST_PACKET 0x00000010 #define NDIS_FLAGS_RESERVED2 0x00000020 #define NDIS_FLAGS_RESERVED3 0x00000040 #define NDIS_FLAGS_DONT_LOOPBACK 0x00000080 #define NDIS_FLAGS_IS_LOOPBACK_PACKET 0x00000100 #define NDIS_FLAGS_LOOPBACK_ONLY 0x00000200 #define NDIS_FLAGS_RESERVED4 0x00000400 #define NDIS_FLAGS_DOUBLE_BUFFERED 0x00000800 #define NDIS_FLAGS_SENT_AT_DPC 0x00001000 #define NDIS_FLAGS_USES_SG_BUFFER_LIST 0x00002000 #define NDIS_PACKET_WRAPPER_RESERVED 0x3F #define NDIS_PACKET_CONTAINS_MEDIA_SPECIFIC_INFO 0x40 #define NDIS_PACKET_ALLOCATED_BY_NDIS 0x80 #define NDIS_PROTOCOL_ID_DEFAULT 0x00 #define NDIS_PROTOCOL_ID_TCP_IP 0x02 #define NDIS_PROTOCOL_ID_IPX 0x06 #define NDIS_PROTOCOL_ID_NBF 0x07 #define NDIS_PROTOCOL_ID_MAX 0x0F #define NDIS_PROTOCOL_ID_MASK 0x0F typedef struct ndis_packet_private ndis_packet_private; enum ndis_classid { ndis_class_802_3prio, ndis_class_wirelesswan_mbx, ndis_class_irda_packetinfo, ndis_class_atm_aainfo }; typedef enum ndis_classid ndis_classid; struct ndis_mediaspecific_info { uint32_t nmi_nextentoffset; ndis_classid nmi_classid; uint32_t nmi_size; uint8_t nmi_classinfo[1]; }; typedef struct ndis_mediaspecific_info ndis_mediaspecific_info; struct ndis_packet_oob { union { uint64_t npo_timetotx; uint64_t npo_timetxed; } u; uint64_t npo_timerxed; uint32_t npo_hdrlen; uint32_t npo_mediaspecific_len; void *npo_mediaspecific; ndis_status npo_status; }; typedef struct ndis_packet_oob ndis_packet_oob; /* * Our protocol private region for handling ethernet. * We need this to stash some of the things returned * by NdisMEthIndicateReceive(). */ struct ndis_ethpriv { void *nep_ctx; /* packet context */ long nep_offset; /* residual data to transfer */ void *nep_pad[2]; }; typedef struct ndis_ethpriv ndis_ethpriv; #define PROTOCOL_RESERVED_SIZE_IN_PACKET (4 * sizeof(void *)) struct ndis_packet { ndis_packet_private np_private; union { /* For connectionless miniports. */ struct { uint8_t np_miniport_rsvd[2 * sizeof(void *)]; uint8_t np_wrapper_rsvd[2 * sizeof(void *)]; } np_clrsvd; /* For de-serialized miniports */ struct { uint8_t np_miniport_rsvdex[3 * sizeof(void *)]; uint8_t np_wrapper_rsvdex[sizeof(void *)]; } np_dsrsvd; struct { uint8_t np_mac_rsvd[4 * sizeof(void *)]; } np_macrsvd; } u; uint32_t *np_rsvd[2]; uint8_t np_protocolreserved[PROTOCOL_RESERVED_SIZE_IN_PACKET]; /* * This next part is probably wrong, but we need some place * to put the out of band data structure... */ ndis_packet_oob np_oob; ndis_packet_extension np_ext; ndis_sc_list np_sclist; /* BSD-specific stuff which should be invisible to drivers. */ uint32_t np_refcnt; void *np_softc; void *np_m0; int np_txidx; list_entry np_list; }; typedef struct ndis_packet ndis_packet; struct ndis_packet_pool { slist_header np_head; int np_dead; nt_kevent np_event; kspin_lock np_lock; int np_cnt; int np_len; int np_protrsvd; void *np_pktmem; }; typedef struct ndis_packet_pool ndis_packet_pool; /* mbuf ext type for NDIS */ #define EXT_NDIS EXT_NET_DRV /* mtx type for NDIS */ #define MTX_NDIS_LOCK "NDIS lock" struct ndis_filterdbs { union { void *nf_ethdb; void *nf_nulldb; } u; void *nf_trdb; void *nf_fddidb; void *nf_arcdb; }; typedef struct ndis_filterdbs ndis_filterdbs; #define nf_ethdb u.nf_ethdb enum ndis_medium { NdisMedium802_3, NdisMedium802_5, NdisMediumFddi, NdisMediumWan, NdisMediumLocalTalk, NdisMediumDix, /* defined for convenience, not a real medium */ NdisMediumArcnetRaw, NdisMediumArcnet878_2, NdisMediumAtm, NdisMediumWirelessWan, NdisMediumIrda, NdisMediumBpc, NdisMediumCoWan, NdisMedium1394, NdisMediumMax }; typedef enum ndis_medium ndis_medium; /* enum interface_type { InterfaceTypeUndefined = -1, Internal, Isa, Eisa, MicroChannel, TurboChannel, PCIBus, VMEBus, NuBus, PCMCIABus, CBus, MPIBus, MPSABus, ProcessorInternal, InternalPowerBus, PNPISABus, PNPBus, MaximumInterfaceType }; */ enum ndis_interface_type { NdisInterfaceInternal = Internal, NdisInterfaceIsa = Isa, NdisInterfaceEisa = Eisa, NdisInterfaceMca = MicroChannel, NdisInterfaceTurboChannel = TurboChannel, NdisInterfacePci = PCIBus, NdisInterfacePcMcia = PCMCIABus }; typedef enum ndis_interface_type ndis_interface_type; struct ndis_paddr_unit { ndis_physaddr npu_physaddr; uint32_t npu_len; }; typedef struct ndis_paddr_unit ndis_paddr_unit; struct ndis_map_arg { ndis_paddr_unit *nma_fraglist; int nma_cnt; int nma_max; }; /* * Miniport characteristics were originally defined in the NDIS 3.0 * spec and then extended twice, in NDIS 4.0 and 5.0. */ struct ndis_miniport_characteristics { /* NDIS 3.0 */ uint8_t nmc_version_major; uint8_t nmc_version_minor; uint16_t nmc_pad; uint32_t nmc_rsvd; void * nmc_checkhang_func; void * nmc_disable_interrupts_func; void * nmc_enable_interrupts_func; void * nmc_halt_func; void * nmc_interrupt_func; void * nmc_init_func; void * nmc_isr_func; void * nmc_queryinfo_func; void * nmc_reconfig_func; void * nmc_reset_func; void * nmc_sendsingle_func; void * nmc_setinfo_func; void * nmc_transferdata_func; - /* NDIS 4.0 extentions */ + /* NDIS 4.0 extensions */ void * nmc_return_packet_func; void * nmc_sendmulti_func; void * nmc_allocate_complete_func; /* NDIS 5.0 extensions */ void * nmc_cocreatevc_func; void * nmc_codeletevc_func; void * nmc_coactivatevc_func; void * nmc_codeactivatevc_func; void * nmc_comultisend_func; void * nmc_corequest_func; - /* NDIS 5.1 extentions */ + /* NDIS 5.1 extensions */ void * nmc_canceltxpkts_handler; void * nmc_pnpevent_handler; void * nmc_shutdown_handler; void * nmc_rsvd0; void * nmc_rsvd1; void * nmc_rsvd2; void * nmc_rsvd3; }; typedef struct ndis_miniport_characteristics ndis_miniport_characteristics; struct ndis_driver_object { char *ndo_ifname; void *ndo_softc; ndis_miniport_characteristics ndo_chars; }; typedef struct ndis_driver_object ndis_driver_object; struct ndis_reference { ndis_kspin_lock nr_spinlock; uint16_t nr_refcnt; uint8_t nr_closing; }; typedef struct ndis_reference ndis_reference; struct ndis_timer_entry { struct callout nte_ch; ndis_miniport_timer *nte_timer; TAILQ_ENTRY(ndis_timer_entry) link; }; TAILQ_HEAD(nte_head, ndis_timer_entry); #define NDIS_FH_TYPE_VFS 0 #define NDIS_FH_TYPE_MODULE 1 struct ndis_fh { int nf_type; char *nf_name; void *nf_vp; void *nf_map; uint32_t nf_maplen; }; typedef struct ndis_fh ndis_fh; /* * The miniport block is basically the internal NDIS handle. We need * to define this because, unfortunately, it is not entirely opaque * to NDIS drivers. For one thing, it contains the function pointer * to the NDIS packet receive handler, which is invoked out of the * NDIS block via a macro rather than a function pointer. (The * NdisMIndicateReceivePacket() routine is a macro rather than * a function.) For another, the driver maintains a pointer to the * miniport block and passes it as a handle to various NDIS functions. * (The driver never really knows this because it's hidden behind * an ndis_handle though.) * * The miniport block has two parts: the first part contains fields * that must never change, since they are referenced by driver * binaries through macros. The second part is ignored by the driver, * but contains various things used internaly by NDIS.SYS. In our * case, we define the first 'immutable' part exactly as it appears * in Windows, but don't bother duplicating the Windows definitions * for the second part. Instead, we replace them with a few BSD-specific * things. */ struct ndis_miniport_block { /* * Windows-specific portion -- DO NOT MODIFY OR NDIS * DRIVERS WILL NOT WORK. */ void *nmb_signature; /* magic number */ ndis_miniport_block *nmb_nextminiport; ndis_mdriver_block *nmb_driverhandle; ndis_handle nmb_miniportadapterctx; ndis_unicode_string nmb_name; ndis_bind_paths *nmb_bindpaths; ndis_handle nmb_openqueue; ndis_reference nmb_ref; ndis_handle nmb_devicectx; uint8_t nmb_padding; uint8_t nmb_lockacquired; uint8_t nmb_pmodeopens; uint8_t nmb_assignedcpu; ndis_kspin_lock nmb_lock; ndis_request *nmb_mediarequest; ndis_miniport_interrupt *nmb_interrupt; uint32_t nmb_flags; uint32_t nmb_pnpflags; list_entry nmb_packetlist; ndis_packet *nmb_firstpendingtxpacket; ndis_packet *nmb_returnpacketqueue; uint32_t nmb_requestbuffer; void *nmb_setmcastbuf; ndis_miniport_block *nmb_primaryminiport; void *nmb_wrapperctx; void *nmb_busdatactx; uint32_t nmb_pnpcaps; cm_resource_list *nmb_resources; ndis_timer nmb_wkupdpctimer; ndis_unicode_string nmb_basename; ndis_unicode_string nmb_symlinkname; uint32_t nmb_checkforhangsecs; uint16_t nmb_cfhticks; uint16_t nmb_cfhcurrticks; ndis_status nmb_resetstatus; ndis_handle nmb_resetopen; ndis_filterdbs nmb_filterdbs; void *nmb_pktind_func; void *nmb_senddone_func; void *nmb_sendrsrc_func; void *nmb_resetdone_func; ndis_medium nmb_medium; uint32_t nmb_busnum; uint32_t nmb_bustype; uint32_t nmb_adaptertype; device_object *nmb_deviceobj; /* Functional device */ device_object *nmb_physdeviceobj; /* Physical device */ device_object *nmb_nextdeviceobj; /* Next dev in stack */ void *nmb_mapreg; void *nmb_callmgraflist; void *nmb_miniportthread; void *nmb_setinfobuf; uint16_t nmb_setinfobuflen; uint16_t nmb_maxsendpkts; ndis_status nmb_fakestatus; void *nmb_lockhandler; ndis_unicode_string *nmb_adapterinstancename; void *nmb_timerqueue; uint32_t nmb_mactoptions; ndis_request *nmb_pendingreq; uint32_t nmb_maxlongaddrs; uint32_t nmb_maxshortaddrs; uint32_t nmb_currlookahead; uint32_t nmb_maxlookahead; void *nmb_interrupt_func; void *nmb_disableintr_func; void *nmb_enableintr_func; void *nmb_sendpkts_func; void *nmb_deferredsend_func; void *nmb_ethrxindicate_func; void *nmb_txrxindicate_func; void *nmb_fddirxindicate_func; void *nmb_ethrxdone_func; void *nmb_txrxdone_func; void *nmb_fddirxcond_func; void *nmb_status_func; void *nmb_statusdone_func; void *nmb_tdcond_func; void *nmb_querydone_func; void *nmb_setdone_func; void *nmb_wantxdone_func; void *nmb_wanrx_func; void *nmb_wanrxdone_func; /* * End of windows-specific portion of miniport block. Everything * below is BSD-specific. */ list_entry nmb_parmlist; ndis_resource_list *nmb_rlist; ndis_status nmb_getstat; nt_kevent nmb_getevent; ndis_status nmb_setstat; nt_kevent nmb_setevent; nt_kevent nmb_resetevent; io_workitem *nmb_returnitem; ndis_miniport_timer *nmb_timerlist; ndis_handle nmb_rxpool; list_entry nmb_returnlist; kspin_lock nmb_returnlock; TAILQ_ENTRY(ndis_miniport_block) link; }; TAILQ_HEAD(nd_head, ndis_miniport_block); typedef ndis_status (*ndis_init_handler)(ndis_status *, uint32_t *, ndis_medium *, uint32_t, ndis_handle, ndis_handle); typedef ndis_status (*ndis_queryinfo_handler)(ndis_handle, ndis_oid, void *, uint32_t, uint32_t *, uint32_t *); typedef ndis_status (*ndis_setinfo_handler)(ndis_handle, ndis_oid, void *, uint32_t, uint32_t *, uint32_t *); typedef ndis_status (*ndis_sendsingle_handler)(ndis_handle, ndis_packet *, uint32_t); typedef ndis_status (*ndis_sendmulti_handler)(ndis_handle, ndis_packet **, uint32_t); typedef void (*ndis_isr_handler)(uint8_t *, uint8_t *, ndis_handle); typedef void (*ndis_interrupt_handler)(ndis_handle); typedef int (*ndis_reset_handler)(uint8_t *, ndis_handle); typedef void (*ndis_halt_handler)(ndis_handle); typedef void (*ndis_return_handler)(ndis_handle, ndis_packet *); typedef void (*ndis_enable_interrupts_handler)(ndis_handle); typedef void (*ndis_disable_interrupts_handler)(ndis_handle); typedef void (*ndis_shutdown_handler)(void *); typedef void (*ndis_pnpevent_handler)(void *, int, void *, uint32_t); typedef void (*ndis_allocdone_handler)(ndis_handle, void *, ndis_physaddr *, uint32_t, void *); typedef uint8_t (*ndis_checkforhang_handler)(ndis_handle); typedef ndis_status (*driver_entry)(void *, unicode_string *); extern image_patch_table ndis_functbl[]; #define NDIS_TASKQUEUE 1 #define NDIS_SWI 2 #define NDIS_PSTATE_RUNNING 1 #define NDIS_PSTATE_SLEEPING 2 #define NdisQueryPacket(p, pbufcnt, bufcnt, firstbuf, plen) \ do { \ if ((firstbuf) != NULL) { \ ndis_buffer **_first; \ _first = firstbuf; \ *(_first) = (p)->np_private.npp_head; \ } \ if ((plen) || (bufcnt) || (pbufcnt)) { \ if ((p)->np_private.npp_validcounts == FALSE) { \ ndis_buffer *tmp; \ unsigned int tlen = 0, pcnt = 0; \ unsigned int add = 0; \ unsigned int pktlen, off; \ \ tmp = (p)->np_private.npp_head; \ while (tmp != NULL) { \ off = MmGetMdlByteOffset(tmp); \ pktlen = MmGetMdlByteCount(tmp);\ tlen += pktlen; \ pcnt += \ NDIS_BUFFER_TO_SPAN_PAGES(tmp); \ add++; \ tmp = tmp->mdl_next; \ } \ (p)->np_private.npp_count = add; \ (p)->np_private.npp_totlen = tlen; \ (p)->np_private.npp_physcnt = pcnt; \ (p)->np_private.npp_validcounts = TRUE; \ } \ if (pbufcnt) { \ unsigned int *_pbufcnt; \ _pbufcnt = (pbufcnt); \ *(_pbufcnt) = (p)->np_private.npp_physcnt; \ } \ if (bufcnt) { \ unsigned int *_bufcnt; \ _bufcnt = (bufcnt); \ *(_bufcnt) = (p)->np_private.npp_count; \ } \ if (plen) { \ unsigned int *_plen; \ _plen = (plen); \ *(_plen) = (p)->np_private.npp_totlen; \ } \ } \ } while (0) __BEGIN_DECLS extern int ndis_libinit(void); extern int ndis_libfini(void); extern int ndis_load_driver(vm_offset_t, void *); extern int ndis_unload_driver(void *); extern int ndis_mtop(struct mbuf *, ndis_packet **); extern int ndis_ptom(struct mbuf **, ndis_packet *); extern int ndis_get_info(void *, ndis_oid, void *, int *); extern int ndis_set_info(void *, ndis_oid, void *, int *); extern void *ndis_get_routine_address(struct image_patch_table *, char *); extern int ndis_get_supported_oids(void *, ndis_oid **, int *); extern int ndis_send_packets(void *, ndis_packet **, int); extern int ndis_send_packet(void *, ndis_packet *); extern int ndis_convert_res(void *); extern int ndis_alloc_amem(void *); extern void ndis_free_amem(void *); extern void ndis_free_packet(ndis_packet *); extern void ndis_free_bufs(ndis_buffer *); extern int ndis_reset_nic(void *); extern int ndis_halt_nic(void *); extern int ndis_shutdown_nic(void *); extern int ndis_pnpevent_nic(void *, int); extern int ndis_init_nic(void *); extern void ndis_return_packet(struct mbuf *, void *, void *); extern int ndis_init_dma(void *); extern int ndis_destroy_dma(void *); extern int ndis_create_sysctls(void *); extern int ndis_add_sysctl(void *, char *, char *, char *, int); extern int ndis_flush_sysctls(void *); extern uint32_t NdisAddDevice(driver_object *, device_object *); extern void NdisAllocatePacketPool(ndis_status *, ndis_handle *, uint32_t, uint32_t); extern void NdisAllocatePacketPoolEx(ndis_status *, ndis_handle *, uint32_t, uint32_t, uint32_t); extern uint32_t NdisPacketPoolUsage(ndis_handle); extern void NdisFreePacketPool(ndis_handle); extern void NdisAllocatePacket(ndis_status *, ndis_packet **, ndis_handle); extern void NdisFreePacket(ndis_packet *); extern ndis_status NdisScheduleWorkItem(ndis_work_item *); extern void NdisMSleep(uint32_t); __END_DECLS #endif /* _NDIS_VAR_H_ */ Index: head/sys/compat/ndis/ntoskrnl_var.h =================================================================== --- head/sys/compat/ndis/ntoskrnl_var.h (revision 298827) +++ head/sys/compat/ndis/ntoskrnl_var.h (revision 298828) @@ -1,1529 +1,1529 @@ /*- * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NTOSKRNL_VAR_H_ #define _NTOSKRNL_VAR_H_ #define MTX_NTOSKRNL_SPIN_LOCK "NDIS thread lock" /* * us_buf is really a wchar_t *, but it's inconvenient to include * all the necessary header goop needed to define it, and it's a * pointer anyway, so for now, just make it a uint16_t *. */ struct unicode_string { uint16_t us_len; uint16_t us_maxlen; uint16_t *us_buf; }; typedef struct unicode_string unicode_string; struct ansi_string { uint16_t as_len; uint16_t as_maxlen; char *as_buf; }; typedef struct ansi_string ansi_string; /* * Windows memory descriptor list. In Windows, it's possible for * buffers to be passed between user and kernel contexts without * copying. Buffers may also be allocated in either paged or * non-paged memory regions. An MDL describes the pages of memory * used to contain a particular buffer. Note that a single MDL * may describe a buffer that spans multiple pages. An array of * page addresses appears immediately after the MDL structure itself. * MDLs are therefore implicitly variably sized, even though they * don't look it. * * Note that in FreeBSD, we can take many shortcuts in the way * we handle MDLs because: * * - We are only concerned with pages in kernel context. This means * we will only ever use the kernel's memory map, and remapping * of buffers is never needed. * * - Kernel pages can never be paged out, so we don't have to worry * about whether or not a page is actually mapped before going to * touch it. */ struct mdl { struct mdl *mdl_next; uint16_t mdl_size; uint16_t mdl_flags; void *mdl_process; void *mdl_mappedsystemva; void *mdl_startva; uint32_t mdl_bytecount; uint32_t mdl_byteoffset; }; typedef struct mdl mdl, ndis_buffer; /* MDL flags */ #define MDL_MAPPED_TO_SYSTEM_VA 0x0001 #define MDL_PAGES_LOCKED 0x0002 #define MDL_SOURCE_IS_NONPAGED_POOL 0x0004 #define MDL_ALLOCATED_FIXED_SIZE 0x0008 #define MDL_PARTIAL 0x0010 #define MDL_PARTIAL_HAS_BEEN_MAPPED 0x0020 #define MDL_IO_PAGE_READ 0x0040 #define MDL_WRITE_OPERATION 0x0080 #define MDL_PARENT_MAPPED_SYSTEM_VA 0x0100 #define MDL_FREE_EXTRA_PTES 0x0200 #define MDL_IO_SPACE 0x0800 #define MDL_NETWORK_HEADER 0x1000 #define MDL_MAPPING_CAN_FAIL 0x2000 #define MDL_ALLOCATED_MUST_SUCCEED 0x4000 #define MDL_ZONE_ALLOCED 0x8000 /* BSD private */ #define MDL_ZONE_PAGES 16 #define MDL_ZONE_SIZE (sizeof(mdl) + (sizeof(vm_offset_t) * MDL_ZONE_PAGES)) /* Note: assumes x86 page size of 4K. */ #ifndef PAGE_SHIFT #if PAGE_SIZE == 4096 #define PAGE_SHIFT 12 #elif PAGE_SIZE == 8192 #define PAGE_SHIFT 13 #else #error PAGE_SHIFT undefined! #endif #endif #define SPAN_PAGES(ptr, len) \ ((uint32_t)((((uintptr_t)(ptr) & (PAGE_SIZE - 1)) + \ (len) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) #define PAGE_ALIGN(ptr) \ ((void *)((uintptr_t)(ptr) & ~(PAGE_SIZE - 1))) #define BYTE_OFFSET(ptr) \ ((uint32_t)((uintptr_t)(ptr) & (PAGE_SIZE - 1))) #define MDL_PAGES(m) (vm_offset_t *)(m + 1) #define MmInitializeMdl(b, baseva, len) \ (b)->mdl_next = NULL; \ (b)->mdl_size = (uint16_t)(sizeof(mdl) + \ (sizeof(vm_offset_t) * SPAN_PAGES((baseva), (len)))); \ (b)->mdl_flags = 0; \ (b)->mdl_startva = (void *)PAGE_ALIGN((baseva)); \ (b)->mdl_byteoffset = BYTE_OFFSET((baseva)); \ (b)->mdl_bytecount = (uint32_t)(len); #define MmGetMdlByteOffset(mdl) ((mdl)->mdl_byteoffset) #define MmGetMdlByteCount(mdl) ((mdl)->mdl_bytecount) #define MmGetMdlVirtualAddress(mdl) \ ((void *)((char *)((mdl)->mdl_startva) + (mdl)->mdl_byteoffset)) #define MmGetMdlStartVa(mdl) ((mdl)->mdl_startva) #define MmGetMdlPfnArray(mdl) MDL_PAGES(mdl) #define WDM_MAJOR 1 #define WDM_MINOR_WIN98 0x00 #define WDM_MINOR_WINME 0x05 #define WDM_MINOR_WIN2000 0x10 #define WDM_MINOR_WINXP 0x20 #define WDM_MINOR_WIN2003 0x30 enum nt_caching_type { MmNonCached = 0, MmCached = 1, MmWriteCombined = 2, MmHardwareCoherentCached = 3, MmNonCachedUnordered = 4, MmUSWCCached = 5, MmMaximumCacheType = 6 }; /*- * The ndis_kspin_lock type is called KSPIN_LOCK in MS-Windows. * According to the Windows DDK header files, KSPIN_LOCK is defined like this: * typedef ULONG_PTR KSPIN_LOCK; * * From basetsd.h (SDK, Feb. 2003): * typedef [public] unsigned __int3264 ULONG_PTR, *PULONG_PTR; * typedef unsigned __int64 ULONG_PTR, *PULONG_PTR; * typedef _W64 unsigned long ULONG_PTR, *PULONG_PTR; * * The keyword __int3264 specifies an integral type that has the following * properties: * + It is 32-bit on 32-bit platforms * + It is 64-bit on 64-bit platforms * + It is 32-bit on the wire for backward compatibility. * It gets truncated on the sending side and extended appropriately * (signed or unsigned) on the receiving side. * * Thus register_t seems the proper mapping onto FreeBSD for spin locks. */ typedef register_t kspin_lock; struct slist_entry { struct slist_entry *sl_next; }; typedef struct slist_entry slist_entry; union slist_header { uint64_t slh_align; struct { struct slist_entry *slh_next; uint16_t slh_depth; uint16_t slh_seq; } slh_list; }; typedef union slist_header slist_header; struct list_entry { struct list_entry *nle_flink; struct list_entry *nle_blink; }; typedef struct list_entry list_entry; #define InitializeListHead(l) \ (l)->nle_flink = (l)->nle_blink = (l) #define IsListEmpty(h) \ ((h)->nle_flink == (h)) #define RemoveEntryList(e) \ do { \ list_entry *b; \ list_entry *f; \ \ f = (e)->nle_flink; \ b = (e)->nle_blink; \ b->nle_flink = f; \ f->nle_blink = b; \ } while (0) /* These two have to be inlined since they return things. */ static __inline__ list_entry * RemoveHeadList(list_entry *l) { list_entry *f; list_entry *e; e = l->nle_flink; f = e->nle_flink; l->nle_flink = f; f->nle_blink = l; return (e); } static __inline__ list_entry * RemoveTailList(list_entry *l) { list_entry *b; list_entry *e; e = l->nle_blink; b = e->nle_blink; l->nle_blink = b; b->nle_flink = l; return (e); } #define InsertTailList(l, e) \ do { \ list_entry *b; \ \ b = l->nle_blink; \ e->nle_flink = l; \ e->nle_blink = b; \ b->nle_flink = (e); \ l->nle_blink = (e); \ } while (0) #define InsertHeadList(l, e) \ do { \ list_entry *f; \ \ f = l->nle_flink; \ e->nle_flink = f; \ e->nle_blink = l; \ f->nle_blink = e; \ l->nle_flink = e; \ } while (0) #define CONTAINING_RECORD(addr, type, field) \ ((type *)((vm_offset_t)(addr) - (vm_offset_t)(&((type *)0)->field))) struct nt_dispatch_header { uint8_t dh_type; uint8_t dh_abs; uint8_t dh_size; uint8_t dh_inserted; int32_t dh_sigstate; list_entry dh_waitlisthead; }; typedef struct nt_dispatch_header nt_dispatch_header; /* Dispatcher object types */ #define DISP_TYPE_NOTIFICATION_EVENT 0 /* KEVENT */ #define DISP_TYPE_SYNCHRONIZATION_EVENT 1 /* KEVENT */ #define DISP_TYPE_MUTANT 2 /* KMUTANT/KMUTEX */ #define DISP_TYPE_PROCESS 3 /* KPROCESS */ #define DISP_TYPE_QUEUE 4 /* KQUEUE */ #define DISP_TYPE_SEMAPHORE 5 /* KSEMAPHORE */ #define DISP_TYPE_THREAD 6 /* KTHREAD */ #define DISP_TYPE_NOTIFICATION_TIMER 8 /* KTIMER */ #define DISP_TYPE_SYNCHRONIZATION_TIMER 9 /* KTIMER */ #define OTYPE_EVENT 0 #define OTYPE_MUTEX 1 #define OTYPE_THREAD 2 #define OTYPE_TIMER 3 /* Windows dispatcher levels. */ #define PASSIVE_LEVEL 0 #define LOW_LEVEL 0 #define APC_LEVEL 1 #define DISPATCH_LEVEL 2 #define DEVICE_LEVEL (DISPATCH_LEVEL + 1) #define PROFILE_LEVEL 27 #define CLOCK1_LEVEL 28 #define CLOCK2_LEVEL 28 #define IPI_LEVEL 29 #define POWER_LEVEL 30 #define HIGH_LEVEL 31 #define SYNC_LEVEL_UP DISPATCH_LEVEL #define SYNC_LEVEL_MP (IPI_LEVEL - 1) #define AT_PASSIVE_LEVEL(td) \ ((td)->td_proc->p_flag & P_KPROC == FALSE) #define AT_DISPATCH_LEVEL(td) \ ((td)->td_base_pri == PI_REALTIME) #define AT_DIRQL_LEVEL(td) \ ((td)->td_priority <= PI_NET) #define AT_HIGH_LEVEL(td) \ ((td)->td_critnest != 0) struct nt_objref { nt_dispatch_header no_dh; void *no_obj; TAILQ_ENTRY(nt_objref) link; }; TAILQ_HEAD(nt_objref_head, nt_objref); typedef struct nt_objref nt_objref; #define EVENT_TYPE_NOTIFY 0 #define EVENT_TYPE_SYNC 1 /* * We need to use the timeout()/untimeout() API for ktimers * since timers can be initialized, but not destroyed (so * malloc()ing our own callout structures would mean a leak, * since there'd be no way to free() them). This means we * need to use struct callout_handle, which is really just a * pointer. To make it easier to deal with, we use a union * to overlay the callout_handle over the k_timerlistentry. * The latter is a list_entry, which is two pointers, so * there's enough space available to hide a callout_handle * there. */ struct ktimer { nt_dispatch_header k_header; uint64_t k_duetime; union { list_entry k_timerlistentry; struct callout *k_callout; } u; void *k_dpc; uint32_t k_period; }; #define k_timerlistentry u.k_timerlistentry #define k_callout u.k_callout typedef struct ktimer ktimer; struct nt_kevent { nt_dispatch_header k_header; }; typedef struct nt_kevent nt_kevent; /* Kernel defered procedure call (i.e. timer callback) */ struct kdpc; typedef void (*kdpc_func)(struct kdpc *, void *, void *, void *); struct kdpc { uint16_t k_type; uint8_t k_num; /* CPU number */ uint8_t k_importance; /* priority */ list_entry k_dpclistentry; void *k_deferedfunc; void *k_deferredctx; void *k_sysarg1; void *k_sysarg2; void *k_lock; }; #define KDPC_IMPORTANCE_LOW 0 #define KDPC_IMPORTANCE_MEDIUM 1 #define KDPC_IMPORTANCE_HIGH 2 #define KDPC_CPU_DEFAULT 255 typedef struct kdpc kdpc; /* * Note: the acquisition count is BSD-specific. The Microsoft * documentation says that mutexes can be acquired recursively * by a given thread, but that you must release the mutex as * many times as you acquired it before it will be set to the * signalled state (i.e. before any other threads waiting on * the object will be woken up). However the Windows KMUTANT * structure has no field for keeping track of the number of * acquisitions, so we need to add one ourselves. As long as * driver code treats the mutex as opaque, we should be ok. */ struct kmutant { nt_dispatch_header km_header; list_entry km_listentry; void *km_ownerthread; uint8_t km_abandoned; uint8_t km_apcdisable; }; typedef struct kmutant kmutant; #define LOOKASIDE_DEPTH 256 struct general_lookaside { slist_header gl_listhead; uint16_t gl_depth; uint16_t gl_maxdepth; uint32_t gl_totallocs; union { uint32_t gl_allocmisses; uint32_t gl_allochits; } u_a; uint32_t gl_totalfrees; union { uint32_t gl_freemisses; uint32_t gl_freehits; } u_m; uint32_t gl_type; uint32_t gl_tag; uint32_t gl_size; void *gl_allocfunc; void *gl_freefunc; list_entry gl_listent; uint32_t gl_lasttotallocs; union { uint32_t gl_lastallocmisses; uint32_t gl_lastallochits; } u_l; uint32_t gl_rsvd[2]; }; typedef struct general_lookaside general_lookaside; struct npaged_lookaside_list { general_lookaside nll_l; #ifdef __i386__ kspin_lock nll_obsoletelock; #endif }; typedef struct npaged_lookaside_list npaged_lookaside_list; typedef struct npaged_lookaside_list paged_lookaside_list; typedef void * (*lookaside_alloc_func)(uint32_t, size_t, uint32_t); typedef void (*lookaside_free_func)(void *); struct irp; struct kdevice_qentry { list_entry kqe_devlistent; uint32_t kqe_sortkey; uint8_t kqe_inserted; }; typedef struct kdevice_qentry kdevice_qentry; struct kdevice_queue { uint16_t kq_type; uint16_t kq_size; list_entry kq_devlisthead; kspin_lock kq_lock; uint8_t kq_busy; }; typedef struct kdevice_queue kdevice_queue; struct wait_ctx_block { kdevice_qentry wcb_waitqueue; void *wcb_devfunc; void *wcb_devctx; uint32_t wcb_mapregcnt; void *wcb_devobj; void *wcb_curirp; void *wcb_bufchaindpc; }; typedef struct wait_ctx_block wait_ctx_block; struct wait_block { list_entry wb_waitlist; void *wb_kthread; nt_dispatch_header *wb_object; struct wait_block *wb_next; #ifdef notdef uint16_t wb_waitkey; uint16_t wb_waittype; #endif uint8_t wb_waitkey; uint8_t wb_waittype; uint8_t wb_awakened; uint8_t wb_oldpri; }; typedef struct wait_block wait_block; #define wb_ext wb_kthread #define THREAD_WAIT_OBJECTS 3 #define MAX_WAIT_OBJECTS 64 #define WAITTYPE_ALL 0 #define WAITTYPE_ANY 1 #define WAITKEY_VALID 0x8000 /* kthread priority */ #define LOW_PRIORITY 0 #define LOW_REALTIME_PRIORITY 16 #define HIGH_PRIORITY 31 struct thread_context { void *tc_thrctx; void *tc_thrfunc; }; typedef struct thread_context thread_context; /* Forward declaration */ struct driver_object; struct devobj_extension; struct driver_extension { struct driver_object *dre_driverobj; void *dre_adddevicefunc; uint32_t dre_reinitcnt; unicode_string dre_srvname; /* * Drivers are allowed to add one or more custom extensions * to the driver object, but there's no special pointer * for them. Hang them off here for now. */ list_entry dre_usrext; }; typedef struct driver_extension driver_extension; struct custom_extension { list_entry ce_list; void *ce_clid; }; typedef struct custom_extension custom_extension; /* * The KINTERRUPT structure in Windows is opaque to drivers. * We define our own custom version with things we need. */ struct kinterrupt { list_entry ki_list; device_t ki_dev; int ki_rid; void *ki_cookie; struct resource *ki_irq; kspin_lock ki_lock_priv; kspin_lock *ki_lock; void *ki_svcfunc; void *ki_svcctx; }; typedef struct kinterrupt kinterrupt; struct ksystem_time { uint32_t low_part; int32_t high1_time; int32_t high2_time; }; enum nt_product_type { NT_PRODUCT_WIN_NT = 1, NT_PRODUCT_LAN_MAN_NT, NT_PRODUCT_SERVER }; enum alt_arch_type { STANDARD_DESIGN, NEC98x86, END_ALTERNATIVES }; struct kuser_shared_data { uint32_t tick_count; uint32_t tick_count_multiplier; volatile struct ksystem_time interrupt_time; volatile struct ksystem_time system_time; volatile struct ksystem_time time_zone_bias; uint16_t image_number_low; uint16_t image_number_high; int16_t nt_system_root[260]; uint32_t max_stack_trace_depth; uint32_t crypto_exponent; uint32_t time_zone_id; uint32_t large_page_min; uint32_t reserved2[7]; enum nt_product_type nt_product_type; uint8_t product_type_is_valid; uint32_t nt_major_version; uint32_t nt_minor_version; uint8_t processor_features[64]; uint32_t reserved1; uint32_t reserved3; volatile uint32_t time_slip; enum alt_arch_type alt_arch_type; int64_t system_expiration_date; uint32_t suite_mask; uint8_t kdbg_enabled; volatile uint32_t active_console; volatile uint32_t dismount_count; uint32_t com_plus_package; uint32_t last_system_rit_event_tick_count; uint32_t num_phys_pages; uint8_t safe_boot_mode; uint32_t trace_log; uint64_t fill0; uint64_t sys_call[4]; union { volatile struct ksystem_time tick_count; volatile uint64_t tick_count_quad; } tick; }; /* * In Windows, there are Physical Device Objects (PDOs) and * Functional Device Objects (FDOs). Physical Device Objects are * created and maintained by bus drivers. For example, the PCI * bus driver might detect two PCI ethernet cards on a given * bus. The PCI bus driver will then allocate two device_objects - * for its own internal bookeeping purposes. This is analagous + * for its own internal bookeeping purposes. This is analogous * to the device_t that the FreeBSD PCI code allocates and passes * into each PCI driver's probe and attach routines. * * When an ethernet driver claims one of the ethernet cards * on the bus, it will create its own device_object. This is - * the Functional Device Object. This object is analagous to the + * the Functional Device Object. This object is analogous to the * device-specific softc structure. */ struct device_object { uint16_t do_type; uint16_t do_size; uint32_t do_refcnt; struct driver_object *do_drvobj; struct device_object *do_nextdev; struct device_object *do_attacheddev; struct irp *do_currirp; void *do_iotimer; uint32_t do_flags; uint32_t do_characteristics; void *do_vpb; void *do_devext; uint32_t do_devtype; uint8_t do_stacksize; union { list_entry do_listent; wait_ctx_block do_wcb; } queue; uint32_t do_alignreq; kdevice_queue do_devqueue; struct kdpc do_dpc; uint32_t do_activethreads; void *do_securitydesc; struct nt_kevent do_devlock; uint16_t do_sectorsz; uint16_t do_spare1; struct devobj_extension *do_devobj_ext; void *do_rsvd; }; typedef struct device_object device_object; struct devobj_extension { uint16_t dve_type; uint16_t dve_size; device_object *dve_devobj; }; typedef struct devobj_extension devobj_extension; /* Device object flags */ #define DO_VERIFY_VOLUME 0x00000002 #define DO_BUFFERED_IO 0x00000004 #define DO_EXCLUSIVE 0x00000008 #define DO_DIRECT_IO 0x00000010 #define DO_MAP_IO_BUFFER 0x00000020 #define DO_DEVICE_HAS_NAME 0x00000040 #define DO_DEVICE_INITIALIZING 0x00000080 #define DO_SYSTEM_BOOT_PARTITION 0x00000100 #define DO_LONG_TERM_REQUESTS 0x00000200 #define DO_NEVER_LAST_DEVICE 0x00000400 #define DO_SHUTDOWN_REGISTERED 0x00000800 #define DO_BUS_ENUMERATED_DEVICE 0x00001000 #define DO_POWER_PAGABLE 0x00002000 #define DO_POWER_INRUSH 0x00004000 #define DO_LOW_PRIORITY_FILESYSTEM 0x00010000 /* Priority boosts */ #define IO_NO_INCREMENT 0 #define IO_CD_ROM_INCREMENT 1 #define IO_DISK_INCREMENT 1 #define IO_KEYBOARD_INCREMENT 6 #define IO_MAILSLOT_INCREMENT 2 #define IO_MOUSE_INCREMENT 6 #define IO_NAMED_PIPE_INCREMENT 2 #define IO_NETWORK_INCREMENT 2 #define IO_PARALLEL_INCREMENT 1 #define IO_SERIAL_INCREMENT 2 #define IO_SOUND_INCREMENT 8 #define IO_VIDEO_INCREMENT 1 /* IRP major codes */ #define IRP_MJ_CREATE 0x00 #define IRP_MJ_CREATE_NAMED_PIPE 0x01 #define IRP_MJ_CLOSE 0x02 #define IRP_MJ_READ 0x03 #define IRP_MJ_WRITE 0x04 #define IRP_MJ_QUERY_INFORMATION 0x05 #define IRP_MJ_SET_INFORMATION 0x06 #define IRP_MJ_QUERY_EA 0x07 #define IRP_MJ_SET_EA 0x08 #define IRP_MJ_FLUSH_BUFFERS 0x09 #define IRP_MJ_QUERY_VOLUME_INFORMATION 0x0a #define IRP_MJ_SET_VOLUME_INFORMATION 0x0b #define IRP_MJ_DIRECTORY_CONTROL 0x0c #define IRP_MJ_FILE_SYSTEM_CONTROL 0x0d #define IRP_MJ_DEVICE_CONTROL 0x0e #define IRP_MJ_INTERNAL_DEVICE_CONTROL 0x0f #define IRP_MJ_SHUTDOWN 0x10 #define IRP_MJ_LOCK_CONTROL 0x11 #define IRP_MJ_CLEANUP 0x12 #define IRP_MJ_CREATE_MAILSLOT 0x13 #define IRP_MJ_QUERY_SECURITY 0x14 #define IRP_MJ_SET_SECURITY 0x15 #define IRP_MJ_POWER 0x16 #define IRP_MJ_SYSTEM_CONTROL 0x17 #define IRP_MJ_DEVICE_CHANGE 0x18 #define IRP_MJ_QUERY_QUOTA 0x19 #define IRP_MJ_SET_QUOTA 0x1a #define IRP_MJ_PNP 0x1b #define IRP_MJ_PNP_POWER IRP_MJ_PNP // Obsolete.... #define IRP_MJ_MAXIMUM_FUNCTION 0x1b #define IRP_MJ_SCSI IRP_MJ_INTERNAL_DEVICE_CONTROL /* IRP minor codes */ #define IRP_MN_QUERY_DIRECTORY 0x01 #define IRP_MN_NOTIFY_CHANGE_DIRECTORY 0x02 #define IRP_MN_USER_FS_REQUEST 0x00 #define IRP_MN_MOUNT_VOLUME 0x01 #define IRP_MN_VERIFY_VOLUME 0x02 #define IRP_MN_LOAD_FILE_SYSTEM 0x03 #define IRP_MN_TRACK_LINK 0x04 #define IRP_MN_KERNEL_CALL 0x04 #define IRP_MN_LOCK 0x01 #define IRP_MN_UNLOCK_SINGLE 0x02 #define IRP_MN_UNLOCK_ALL 0x03 #define IRP_MN_UNLOCK_ALL_BY_KEY 0x04 #define IRP_MN_NORMAL 0x00 #define IRP_MN_DPC 0x01 #define IRP_MN_MDL 0x02 #define IRP_MN_COMPLETE 0x04 #define IRP_MN_COMPRESSED 0x08 #define IRP_MN_MDL_DPC (IRP_MN_MDL | IRP_MN_DPC) #define IRP_MN_COMPLETE_MDL (IRP_MN_COMPLETE | IRP_MN_MDL) #define IRP_MN_COMPLETE_MDL_DPC (IRP_MN_COMPLETE_MDL | IRP_MN_DPC) #define IRP_MN_SCSI_CLASS 0x01 #define IRP_MN_START_DEVICE 0x00 #define IRP_MN_QUERY_REMOVE_DEVICE 0x01 #define IRP_MN_REMOVE_DEVICE 0x02 #define IRP_MN_CANCEL_REMOVE_DEVICE 0x03 #define IRP_MN_STOP_DEVICE 0x04 #define IRP_MN_QUERY_STOP_DEVICE 0x05 #define IRP_MN_CANCEL_STOP_DEVICE 0x06 #define IRP_MN_QUERY_DEVICE_RELATIONS 0x07 #define IRP_MN_QUERY_INTERFACE 0x08 #define IRP_MN_QUERY_CAPABILITIES 0x09 #define IRP_MN_QUERY_RESOURCES 0x0A #define IRP_MN_QUERY_RESOURCE_REQUIREMENTS 0x0B #define IRP_MN_QUERY_DEVICE_TEXT 0x0C #define IRP_MN_FILTER_RESOURCE_REQUIREMENTS 0x0D #define IRP_MN_READ_CONFIG 0x0F #define IRP_MN_WRITE_CONFIG 0x10 #define IRP_MN_EJECT 0x11 #define IRP_MN_SET_LOCK 0x12 #define IRP_MN_QUERY_ID 0x13 #define IRP_MN_QUERY_PNP_DEVICE_STATE 0x14 #define IRP_MN_QUERY_BUS_INFORMATION 0x15 #define IRP_MN_DEVICE_USAGE_NOTIFICATION 0x16 #define IRP_MN_SURPRISE_REMOVAL 0x17 #define IRP_MN_QUERY_LEGACY_BUS_INFORMATION 0x18 #define IRP_MN_WAIT_WAKE 0x00 #define IRP_MN_POWER_SEQUENCE 0x01 #define IRP_MN_SET_POWER 0x02 #define IRP_MN_QUERY_POWER 0x03 #define IRP_MN_QUERY_ALL_DATA 0x00 #define IRP_MN_QUERY_SINGLE_INSTANCE 0x01 #define IRP_MN_CHANGE_SINGLE_INSTANCE 0x02 #define IRP_MN_CHANGE_SINGLE_ITEM 0x03 #define IRP_MN_ENABLE_EVENTS 0x04 #define IRP_MN_DISABLE_EVENTS 0x05 #define IRP_MN_ENABLE_COLLECTION 0x06 #define IRP_MN_DISABLE_COLLECTION 0x07 #define IRP_MN_REGINFO 0x08 #define IRP_MN_EXECUTE_METHOD 0x09 #define IRP_MN_REGINFO_EX 0x0b /* IRP flags */ #define IRP_NOCACHE 0x00000001 #define IRP_PAGING_IO 0x00000002 #define IRP_MOUNT_COMPLETION 0x00000002 #define IRP_SYNCHRONOUS_API 0x00000004 #define IRP_ASSOCIATED_IRP 0x00000008 #define IRP_BUFFERED_IO 0x00000010 #define IRP_DEALLOCATE_BUFFER 0x00000020 #define IRP_INPUT_OPERATION 0x00000040 #define IRP_SYNCHRONOUS_PAGING_IO 0x00000040 #define IRP_CREATE_OPERATION 0x00000080 #define IRP_READ_OPERATION 0x00000100 #define IRP_WRITE_OPERATION 0x00000200 #define IRP_CLOSE_OPERATION 0x00000400 #define IRP_DEFER_IO_COMPLETION 0x00000800 #define IRP_OB_QUERY_NAME 0x00001000 #define IRP_HOLD_DEVICE_QUEUE 0x00002000 #define IRP_RETRY_IO_COMPLETION 0x00004000 #define IRP_CLASS_CACHE_OPERATION 0x00008000 #define IRP_SET_USER_EVENT IRP_CLOSE_OPERATION /* IRP I/O control flags */ #define IRP_QUOTA_CHARGED 0x01 #define IRP_ALLOCATED_MUST_SUCCEED 0x02 #define IRP_ALLOCATED_FIXED_SIZE 0x04 #define IRP_LOOKASIDE_ALLOCATION 0x08 /* I/O method types */ #define METHOD_BUFFERED 0 #define METHOD_IN_DIRECT 1 #define METHOD_OUT_DIRECT 2 #define METHOD_NEITHER 3 /* File access types */ #define FILE_ANY_ACCESS 0x0000 #define FILE_SPECIAL_ACCESS FILE_ANY_ACCESS #define FILE_READ_ACCESS 0x0001 #define FILE_WRITE_ACCESS 0x0002 /* Recover I/O access method from IOCTL code. */ #define IO_METHOD(x) ((x) & 0xFFFFFFFC) /* Recover function code from IOCTL code */ #define IO_FUNC(x) (((x) & 0x7FFC) >> 2) /* Macro to construct an IOCTL code. */ #define IOCTL_CODE(dev, func, iomethod, acc) \ ((dev) << 16) | (acc << 14) | (func << 2) | (iomethod)) struct io_status_block { union { uint32_t isb_status; void *isb_ptr; } u; register_t isb_info; }; #define isb_status u.isb_status #define isb_ptr u.isb_ptr typedef struct io_status_block io_status_block; struct kapc { uint16_t apc_type; uint16_t apc_size; uint32_t apc_spare0; void *apc_thread; list_entry apc_list; void *apc_kernfunc; void *apc_rundownfunc; void *apc_normalfunc; void *apc_normctx; void *apc_sysarg1; void *apc_sysarg2; uint8_t apc_stateidx; uint8_t apc_cpumode; uint8_t apc_inserted; }; typedef struct kapc kapc; typedef uint32_t (*completion_func)(device_object *, struct irp *, void *); typedef uint32_t (*cancel_func)(device_object *, struct irp *); struct io_stack_location { uint8_t isl_major; uint8_t isl_minor; uint8_t isl_flags; uint8_t isl_ctl; /* * There's a big-ass union here in the actual Windows - * definition of the stucture, but it contains stuff + * definition of the structure, but it contains stuff * that doesn't really apply to BSD, and defining it * all properly would require duplicating over a dozen * other structures that we'll never use. Since the * io_stack_location structure is opaque to drivers * anyway, I'm not going to bother with the extra crap. */ union { struct { uint32_t isl_len; uint32_t *isl_key; uint64_t isl_byteoff; } isl_read; struct { uint32_t isl_len; uint32_t *isl_key; uint64_t isl_byteoff; } isl_write; struct { uint32_t isl_obuflen; uint32_t isl_ibuflen; uint32_t isl_iocode; void *isl_type3ibuf; } isl_ioctl; struct { void *isl_arg1; void *isl_arg2; void *isl_arg3; void *isl_arg4; } isl_others; } isl_parameters __attribute__((packed)); void *isl_devobj; void *isl_fileobj; completion_func isl_completionfunc; void *isl_completionctx; }; typedef struct io_stack_location io_stack_location; /* Stack location control flags */ #define SL_PENDING_RETURNED 0x01 #define SL_INVOKE_ON_CANCEL 0x20 #define SL_INVOKE_ON_SUCCESS 0x40 #define SL_INVOKE_ON_ERROR 0x80 struct irp { uint16_t irp_type; uint16_t irp_size; mdl *irp_mdl; uint32_t irp_flags; union { struct irp *irp_master; uint32_t irp_irpcnt; void *irp_sysbuf; } irp_assoc; list_entry irp_thlist; io_status_block irp_iostat; uint8_t irp_reqmode; uint8_t irp_pendingreturned; uint8_t irp_stackcnt; uint8_t irp_currentstackloc; uint8_t irp_cancel; uint8_t irp_cancelirql; uint8_t irp_apcenv; uint8_t irp_allocflags; io_status_block *irp_usriostat; nt_kevent *irp_usrevent; union { struct { void *irp_apcfunc; void *irp_apcctx; } irp_asyncparms; uint64_t irp_allocsz; } irp_overlay; cancel_func irp_cancelfunc; void *irp_userbuf; /* Windows kernel info */ union { struct { union { kdevice_qentry irp_dqe; struct { void *irp_drvctx[4]; } s1; } u1; void *irp_thread; char *irp_auxbuf; struct { list_entry irp_list; union { io_stack_location *irp_csl; uint32_t irp_pkttype; } u2; } s2; void *irp_fileobj; } irp_overlay; union { kapc irp_apc; struct { void *irp_ep; void *irp_dev; } irp_usb; } irp_misc; void *irp_compkey; } irp_tail; }; #define irp_csl s2.u2.irp_csl #define irp_pkttype s2.u2.irp_pkttype #define IRP_NDIS_DEV(irp) (irp)->irp_tail.irp_misc.irp_usb.irp_dev #define IRP_NDISUSB_EP(irp) (irp)->irp_tail.irp_misc.irp_usb.irp_ep typedef struct irp irp; #define InterlockedExchangePointer(dst, val) \ (void *)InterlockedExchange((uint32_t *)(dst), (uintptr_t)(val)) #define IoSizeOfIrp(ssize) \ ((uint16_t) (sizeof(irp) + ((ssize) * (sizeof(io_stack_location))))) #define IoSetCancelRoutine(irp, func) \ (cancel_func)InterlockedExchangePointer( \ (void *)&(ip)->irp_cancelfunc, (void *)(func)) #define IoSetCancelValue(irp, val) \ (u_long)InterlockedExchangePointer( \ (void *)&(ip)->irp_cancel, (void *)(val)) #define IoGetCurrentIrpStackLocation(irp) \ (irp)->irp_tail.irp_overlay.irp_csl #define IoGetNextIrpStackLocation(irp) \ ((irp)->irp_tail.irp_overlay.irp_csl - 1) #define IoSetNextIrpStackLocation(irp) \ do { \ irp->irp_currentstackloc--; \ irp->irp_tail.irp_overlay.irp_csl--; \ } while(0) #define IoSetCompletionRoutine(irp, func, ctx, ok, err, cancel) \ do { \ io_stack_location *s; \ s = IoGetNextIrpStackLocation((irp)); \ s->isl_completionfunc = (func); \ s->isl_completionctx = (ctx); \ s->isl_ctl = 0; \ if (ok) s->isl_ctl = SL_INVOKE_ON_SUCCESS; \ if (err) s->isl_ctl |= SL_INVOKE_ON_ERROR; \ if (cancel) s->isl_ctl |= SL_INVOKE_ON_CANCEL; \ } while(0) #define IoMarkIrpPending(irp) \ IoGetCurrentIrpStackLocation(irp)->isl_ctl |= SL_PENDING_RETURNED #define IoUnmarkIrpPending(irp) \ IoGetCurrentIrpStackLocation(irp)->isl_ctl &= ~SL_PENDING_RETURNED #define IoCopyCurrentIrpStackLocationToNext(irp) \ do { \ io_stack_location *src, *dst; \ src = IoGetCurrentIrpStackLocation(irp); \ dst = IoGetNextIrpStackLocation(irp); \ bcopy((char *)src, (char *)dst, \ offsetof(io_stack_location, isl_completionfunc)); \ } while(0) #define IoSkipCurrentIrpStackLocation(irp) \ do { \ (irp)->irp_currentstackloc++; \ (irp)->irp_tail.irp_overlay.irp_csl++; \ } while(0) #define IoInitializeDpcRequest(dobj, dpcfunc) \ KeInitializeDpc(&(dobj)->do_dpc, dpcfunc, dobj) #define IoRequestDpc(dobj, irp, ctx) \ KeInsertQueueDpc(&(dobj)->do_dpc, irp, ctx) typedef uint32_t (*driver_dispatch)(device_object *, irp *); /* * The driver_object is allocated once for each driver that's loaded * into the system. A new one is allocated for each driver and * populated a bit via the driver's DriverEntry function. * In general, a Windows DriverEntry() function will provide a pointer * to its AddDevice() method and set up the dispatch table. * For NDIS drivers, this is all done behind the scenes in the * NdisInitializeWrapper() and/or NdisMRegisterMiniport() routines. */ struct driver_object { uint16_t dro_type; uint16_t dro_size; device_object *dro_devobj; uint32_t dro_flags; void *dro_driverstart; uint32_t dro_driversize; void *dro_driversection; driver_extension *dro_driverext; unicode_string dro_drivername; unicode_string *dro_hwdb; void *dro_pfastiodispatch; void *dro_driverinitfunc; void *dro_driverstartiofunc; void *dro_driverunloadfunc; driver_dispatch dro_dispatch[IRP_MJ_MAXIMUM_FUNCTION + 1]; }; typedef struct driver_object driver_object; #define DEVPROP_DEVICE_DESCRIPTION 0x00000000 #define DEVPROP_HARDWARE_ID 0x00000001 #define DEVPROP_COMPATIBLE_IDS 0x00000002 #define DEVPROP_BOOTCONF 0x00000003 #define DEVPROP_BOOTCONF_TRANSLATED 0x00000004 #define DEVPROP_CLASS_NAME 0x00000005 #define DEVPROP_CLASS_GUID 0x00000006 #define DEVPROP_DRIVER_KEYNAME 0x00000007 #define DEVPROP_MANUFACTURER 0x00000008 #define DEVPROP_FRIENDLYNAME 0x00000009 #define DEVPROP_LOCATION_INFO 0x0000000A #define DEVPROP_PHYSDEV_NAME 0x0000000B #define DEVPROP_BUSTYPE_GUID 0x0000000C #define DEVPROP_LEGACY_BUSTYPE 0x0000000D #define DEVPROP_BUS_NUMBER 0x0000000E #define DEVPROP_ENUMERATOR_NAME 0x0000000F #define DEVPROP_ADDRESS 0x00000010 #define DEVPROP_UINUMBER 0x00000011 #define DEVPROP_INSTALL_STATE 0x00000012 #define DEVPROP_REMOVAL_POLICY 0x00000013 /* Various supported device types (used with IoCreateDevice()) */ #define FILE_DEVICE_BEEP 0x00000001 #define FILE_DEVICE_CD_ROM 0x00000002 #define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003 #define FILE_DEVICE_CONTROLLER 0x00000004 #define FILE_DEVICE_DATALINK 0x00000005 #define FILE_DEVICE_DFS 0x00000006 #define FILE_DEVICE_DISK 0x00000007 #define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008 #define FILE_DEVICE_FILE_SYSTEM 0x00000009 #define FILE_DEVICE_INPORT_PORT 0x0000000A #define FILE_DEVICE_KEYBOARD 0x0000000B #define FILE_DEVICE_MAILSLOT 0x0000000C #define FILE_DEVICE_MIDI_IN 0x0000000D #define FILE_DEVICE_MIDI_OUT 0x0000000E #define FILE_DEVICE_MOUSE 0x0000000F #define FILE_DEVICE_MULTI_UNC_PROVIDER 0x00000010 #define FILE_DEVICE_NAMED_PIPE 0x00000011 #define FILE_DEVICE_NETWORK 0x00000012 #define FILE_DEVICE_NETWORK_BROWSER 0x00000013 #define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014 #define FILE_DEVICE_NULL 0x00000015 #define FILE_DEVICE_PARALLEL_PORT 0x00000016 #define FILE_DEVICE_PHYSICAL_NETCARD 0x00000017 #define FILE_DEVICE_PRINTER 0x00000018 #define FILE_DEVICE_SCANNER 0x00000019 #define FILE_DEVICE_SERIAL_MOUSE_PORT 0x0000001A #define FILE_DEVICE_SERIAL_PORT 0x0000001B #define FILE_DEVICE_SCREEN 0x0000001C #define FILE_DEVICE_SOUND 0x0000001D #define FILE_DEVICE_STREAMS 0x0000001E #define FILE_DEVICE_TAPE 0x0000001F #define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020 #define FILE_DEVICE_TRANSPORT 0x00000021 #define FILE_DEVICE_UNKNOWN 0x00000022 #define FILE_DEVICE_VIDEO 0x00000023 #define FILE_DEVICE_VIRTUAL_DISK 0x00000024 #define FILE_DEVICE_WAVE_IN 0x00000025 #define FILE_DEVICE_WAVE_OUT 0x00000026 #define FILE_DEVICE_8042_PORT 0x00000027 #define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028 #define FILE_DEVICE_BATTERY 0x00000029 #define FILE_DEVICE_BUS_EXTENDER 0x0000002A #define FILE_DEVICE_MODEM 0x0000002B #define FILE_DEVICE_VDM 0x0000002C #define FILE_DEVICE_MASS_STORAGE 0x0000002D #define FILE_DEVICE_SMB 0x0000002E #define FILE_DEVICE_KS 0x0000002F #define FILE_DEVICE_CHANGER 0x00000030 #define FILE_DEVICE_SMARTCARD 0x00000031 #define FILE_DEVICE_ACPI 0x00000032 #define FILE_DEVICE_DVD 0x00000033 #define FILE_DEVICE_FULLSCREEN_VIDEO 0x00000034 #define FILE_DEVICE_DFS_FILE_SYSTEM 0x00000035 #define FILE_DEVICE_DFS_VOLUME 0x00000036 #define FILE_DEVICE_SERENUM 0x00000037 #define FILE_DEVICE_TERMSRV 0x00000038 #define FILE_DEVICE_KSEC 0x00000039 #define FILE_DEVICE_FIPS 0x0000003A /* Device characteristics */ #define FILE_REMOVABLE_MEDIA 0x00000001 #define FILE_READ_ONLY_DEVICE 0x00000002 #define FILE_FLOPPY_DISKETTE 0x00000004 #define FILE_WRITE_ONCE_MEDIA 0x00000008 #define FILE_REMOTE_DEVICE 0x00000010 #define FILE_DEVICE_IS_MOUNTED 0x00000020 #define FILE_VIRTUAL_VOLUME 0x00000040 #define FILE_AUTOGENERATED_DEVICE_NAME 0x00000080 #define FILE_DEVICE_SECURE_OPEN 0x00000100 /* Status codes */ #define STATUS_SUCCESS 0x00000000 #define STATUS_USER_APC 0x000000C0 #define STATUS_KERNEL_APC 0x00000100 #define STATUS_ALERTED 0x00000101 #define STATUS_TIMEOUT 0x00000102 #define STATUS_PENDING 0x00000103 #define STATUS_FAILURE 0xC0000001 #define STATUS_NOT_IMPLEMENTED 0xC0000002 #define STATUS_ACCESS_VIOLATION 0xC0000005 #define STATUS_INVALID_PARAMETER 0xC000000D #define STATUS_INVALID_DEVICE_REQUEST 0xC0000010 #define STATUS_MORE_PROCESSING_REQUIRED 0xC0000016 #define STATUS_NO_MEMORY 0xC0000017 #define STATUS_BUFFER_TOO_SMALL 0xC0000023 #define STATUS_MUTANT_NOT_OWNED 0xC0000046 #define STATUS_NOT_SUPPORTED 0xC00000BB #define STATUS_INVALID_PARAMETER_2 0xC00000F0 #define STATUS_INSUFFICIENT_RESOURCES 0xC000009A #define STATUS_DEVICE_NOT_CONNECTED 0xC000009D #define STATUS_CANCELLED 0xC0000120 #define STATUS_NOT_FOUND 0xC0000225 #define STATUS_DEVICE_REMOVED 0xC00002B6 #define STATUS_WAIT_0 0x00000000 /* Memory pool types, for ExAllocatePoolWithTag() */ #define NonPagedPool 0x00000000 #define PagedPool 0x00000001 #define NonPagedPoolMustSucceed 0x00000002 #define DontUseThisType 0x00000003 #define NonPagedPoolCacheAligned 0x00000004 #define PagedPoolCacheAligned 0x00000005 #define NonPagedPoolCacheAlignedMustS 0x00000006 #define MaxPoolType 0x00000007 /* * IO_WORKITEM is an opaque structures that must be allocated * via IoAllocateWorkItem() and released via IoFreeWorkItem(). * Consequently, we can define it any way we want. */ typedef void (*io_workitem_func)(device_object *, void *); struct io_workitem { io_workitem_func iw_func; void *iw_ctx; list_entry iw_listentry; device_object *iw_dobj; int iw_idx; }; typedef struct io_workitem io_workitem; #define WORKQUEUE_CRITICAL 0 #define WORKQUEUE_DELAYED 1 #define WORKQUEUE_HYPERCRITICAL 2 #define WORKITEM_THREADS 4 #define WORKITEM_LEGACY_THREAD 3 #define WORKIDX_INC(x) (x) = (x + 1) % WORKITEM_LEGACY_THREAD /* * Older, deprecated work item API, needed to support NdisQueueWorkItem(). */ struct work_queue_item; typedef void (*work_item_func)(struct work_queue_item *, void *); struct work_queue_item { list_entry wqi_entry; work_item_func wqi_func; void *wqi_ctx; }; typedef struct work_queue_item work_queue_item; #define ExInitializeWorkItem(w, func, ctx) \ do { \ (w)->wqi_func = (func); \ (w)->wqi_ctx = (ctx); \ InitializeListHead(&((w)->wqi_entry)); \ } while (0) /* * FreeBSD's kernel stack is 2 pages in size by default. The * Windows stack is larger, so we need to give our threads more * stack pages. 4 should be enough, we use 8 just to extra safe. */ #define NDIS_KSTACK_PAGES 8 /* * Different kinds of function wrapping we can do. */ #define WINDRV_WRAP_STDCALL 1 #define WINDRV_WRAP_FASTCALL 2 #define WINDRV_WRAP_REGPARM 3 #define WINDRV_WRAP_CDECL 4 #define WINDRV_WRAP_AMD64 5 struct drvdb_ent { driver_object *windrv_object; void *windrv_devlist; ndis_cfg *windrv_regvals; interface_type windrv_bustype; STAILQ_ENTRY(drvdb_ent) link; }; extern image_patch_table ntoskrnl_functbl[]; #ifdef __amd64__ extern struct kuser_shared_data kuser_shared_data; #endif typedef void (*funcptr)(void); typedef int (*matchfuncptr)(interface_type, void *, void *); __BEGIN_DECLS extern int windrv_libinit(void); extern int windrv_libfini(void); extern driver_object *windrv_lookup(vm_offset_t, char *); extern struct drvdb_ent *windrv_match(matchfuncptr, void *); extern int windrv_load(module_t, vm_offset_t, int, interface_type, void *, ndis_cfg *); extern int windrv_unload(module_t, vm_offset_t, int); extern int windrv_create_pdo(driver_object *, device_t); extern void windrv_destroy_pdo(driver_object *, device_t); extern device_object *windrv_find_pdo(driver_object *, device_t); extern int windrv_bus_attach(driver_object *, char *); extern int windrv_wrap(funcptr, funcptr *, int, int); extern int windrv_unwrap(funcptr); extern void ctxsw_utow(void); extern void ctxsw_wtou(void); extern int ntoskrnl_libinit(void); extern int ntoskrnl_libfini(void); extern void ntoskrnl_intr(void *); extern void ntoskrnl_time(uint64_t *); extern uint16_t ExQueryDepthSList(slist_header *); extern slist_entry *InterlockedPushEntrySList(slist_header *, slist_entry *); extern slist_entry *InterlockedPopEntrySList(slist_header *); extern uint32_t RtlUnicodeStringToAnsiString(ansi_string *, unicode_string *, uint8_t); extern uint32_t RtlAnsiStringToUnicodeString(unicode_string *, ansi_string *, uint8_t); extern void RtlInitAnsiString(ansi_string *, char *); extern void RtlInitUnicodeString(unicode_string *, uint16_t *); extern void RtlFreeUnicodeString(unicode_string *); extern void RtlFreeAnsiString(ansi_string *); extern void KeInitializeDpc(kdpc *, void *, void *); extern uint8_t KeInsertQueueDpc(kdpc *, void *, void *); extern uint8_t KeRemoveQueueDpc(kdpc *); extern void KeSetImportanceDpc(kdpc *, uint32_t); extern void KeSetTargetProcessorDpc(kdpc *, uint8_t); extern void KeFlushQueuedDpcs(void); extern uint32_t KeGetCurrentProcessorNumber(void); extern void KeInitializeTimer(ktimer *); extern void KeInitializeTimerEx(ktimer *, uint32_t); extern uint8_t KeSetTimer(ktimer *, int64_t, kdpc *); extern uint8_t KeSetTimerEx(ktimer *, int64_t, uint32_t, kdpc *); extern uint8_t KeCancelTimer(ktimer *); extern uint8_t KeReadStateTimer(ktimer *); extern uint32_t KeWaitForSingleObject(void *, uint32_t, uint32_t, uint8_t, int64_t *); extern void KeInitializeEvent(nt_kevent *, uint32_t, uint8_t); extern void KeClearEvent(nt_kevent *); extern uint32_t KeReadStateEvent(nt_kevent *); extern uint32_t KeSetEvent(nt_kevent *, uint32_t, uint8_t); extern uint32_t KeResetEvent(nt_kevent *); #ifdef __i386__ extern void KefAcquireSpinLockAtDpcLevel(kspin_lock *); extern void KefReleaseSpinLockFromDpcLevel(kspin_lock *); extern uint8_t KeAcquireSpinLockRaiseToDpc(kspin_lock *); #else extern void KeAcquireSpinLockAtDpcLevel(kspin_lock *); extern void KeReleaseSpinLockFromDpcLevel(kspin_lock *); #endif extern void KeInitializeSpinLock(kspin_lock *); extern uint8_t KeAcquireInterruptSpinLock(kinterrupt *); extern void KeReleaseInterruptSpinLock(kinterrupt *, uint8_t); extern uint8_t KeSynchronizeExecution(kinterrupt *, void *, void *); extern uintptr_t InterlockedExchange(volatile uint32_t *, uintptr_t); extern void *ExAllocatePoolWithTag(uint32_t, size_t, uint32_t); extern void ExFreePool(void *); extern uint32_t IoConnectInterrupt(kinterrupt **, void *, void *, kspin_lock *, uint32_t, uint8_t, uint8_t, uint8_t, uint8_t, uint32_t, uint8_t); extern uint8_t MmIsAddressValid(void *); extern void *MmGetSystemRoutineAddress(unicode_string *); extern void *MmMapIoSpace(uint64_t, uint32_t, uint32_t); extern void MmUnmapIoSpace(void *, size_t); extern void MmBuildMdlForNonPagedPool(mdl *); extern void IoDisconnectInterrupt(kinterrupt *); extern uint32_t IoAllocateDriverObjectExtension(driver_object *, void *, uint32_t, void **); extern void *IoGetDriverObjectExtension(driver_object *, void *); extern uint32_t IoCreateDevice(driver_object *, uint32_t, unicode_string *, uint32_t, uint32_t, uint8_t, device_object **); extern void IoDeleteDevice(device_object *); extern device_object *IoGetAttachedDevice(device_object *); extern uint32_t IofCallDriver(device_object *, irp *); extern void IofCompleteRequest(irp *, uint8_t); extern void IoAcquireCancelSpinLock(uint8_t *); extern void IoReleaseCancelSpinLock(uint8_t); extern uint8_t IoCancelIrp(irp *); extern void IoDetachDevice(device_object *); extern device_object *IoAttachDeviceToDeviceStack(device_object *, device_object *); extern mdl *IoAllocateMdl(void *, uint32_t, uint8_t, uint8_t, irp *); extern void IoFreeMdl(mdl *); extern io_workitem *IoAllocateWorkItem(device_object *); extern void ExQueueWorkItem(work_queue_item *, u_int32_t); extern void IoFreeWorkItem(io_workitem *); extern void IoQueueWorkItem(io_workitem *, io_workitem_func, uint32_t, void *); #define IoCallDriver(a, b) IofCallDriver(a, b) #define IoCompleteRequest(a, b) IofCompleteRequest(a, b) /* * On the Windows x86 arch, KeAcquireSpinLock() and KeReleaseSpinLock() * routines live in the HAL. We try to imitate this behavior. */ #ifdef __i386__ #define KI_USER_SHARED_DATA 0xffdf0000 #define KeAcquireSpinLock(a, b) *(b) = KfAcquireSpinLock(a) #define KeReleaseSpinLock(a, b) KfReleaseSpinLock(a, b) #define KeRaiseIrql(a, b) *(b) = KfRaiseIrql(a) #define KeLowerIrql(a) KfLowerIrql(a) #define KeAcquireSpinLockAtDpcLevel(a) KefAcquireSpinLockAtDpcLevel(a) #define KeReleaseSpinLockFromDpcLevel(a) KefReleaseSpinLockFromDpcLevel(a) #endif /* __i386__ */ #ifdef __amd64__ #define KI_USER_SHARED_DATA 0xfffff78000000000UL #define KeAcquireSpinLock(a, b) *(b) = KfAcquireSpinLock(a) #define KeReleaseSpinLock(a, b) KfReleaseSpinLock(a, b) /* * These may need to be redefined later; * not sure where they live on amd64 yet. */ #define KeRaiseIrql(a, b) *(b) = KfRaiseIrql(a) #define KeLowerIrql(a) KfLowerIrql(a) #endif /* __amd64__ */ __END_DECLS #endif /* _NTOSKRNL_VAR_H_ */ Index: head/sys/compat/ndis/subr_hal.c =================================================================== --- head/sys/compat/ndis/subr_hal.c (revision 298827) +++ head/sys/compat/ndis/subr_hal.c (revision 298828) @@ -1,481 +1,481 @@ /*- * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void KeStallExecutionProcessor(uint32_t); static void WRITE_PORT_BUFFER_ULONG(uint32_t *, uint32_t *, uint32_t); static void WRITE_PORT_BUFFER_USHORT(uint16_t *, uint16_t *, uint32_t); static void WRITE_PORT_BUFFER_UCHAR(uint8_t *, uint8_t *, uint32_t); static void WRITE_PORT_ULONG(uint32_t *, uint32_t); static void WRITE_PORT_USHORT(uint16_t *, uint16_t); static void WRITE_PORT_UCHAR(uint8_t *, uint8_t); static uint32_t READ_PORT_ULONG(uint32_t *); static uint16_t READ_PORT_USHORT(uint16_t *); static uint8_t READ_PORT_UCHAR(uint8_t *); static void READ_PORT_BUFFER_ULONG(uint32_t *, uint32_t *, uint32_t); static void READ_PORT_BUFFER_USHORT(uint16_t *, uint16_t *, uint32_t); static void READ_PORT_BUFFER_UCHAR(uint8_t *, uint8_t *, uint32_t); static uint64_t KeQueryPerformanceCounter(uint64_t *); static void _KeLowerIrql(uint8_t); static uint8_t KeRaiseIrqlToDpcLevel(void); static void dummy (void); #define NDIS_MAXCPUS 64 static struct mtx disp_lock[NDIS_MAXCPUS]; int hal_libinit() { image_patch_table *patch; int i; for (i = 0; i < NDIS_MAXCPUS; i++) mtx_init(&disp_lock[i], "HAL preemption lock", "HAL lock", MTX_RECURSE|MTX_DEF); patch = hal_functbl; while (patch->ipt_func != NULL) { windrv_wrap((funcptr)patch->ipt_func, (funcptr *)&patch->ipt_wrap, patch->ipt_argcnt, patch->ipt_ftype); patch++; } return (0); } int hal_libfini() { image_patch_table *patch; int i; for (i = 0; i < NDIS_MAXCPUS; i++) mtx_destroy(&disp_lock[i]); patch = hal_functbl; while (patch->ipt_func != NULL) { windrv_unwrap(patch->ipt_wrap); patch++; } return (0); } static void KeStallExecutionProcessor(usecs) uint32_t usecs; { DELAY(usecs); } static void WRITE_PORT_ULONG(port, val) uint32_t *port; uint32_t val; { bus_space_write_4(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val); } static void WRITE_PORT_USHORT(uint16_t *port, uint16_t val) { bus_space_write_2(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val); } static void WRITE_PORT_UCHAR(uint8_t *port, uint8_t val) { bus_space_write_1(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val); } static void WRITE_PORT_BUFFER_ULONG(port, val, cnt) uint32_t *port; uint32_t *val; uint32_t cnt; { bus_space_write_multi_4(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val, cnt); } static void WRITE_PORT_BUFFER_USHORT(port, val, cnt) uint16_t *port; uint16_t *val; uint32_t cnt; { bus_space_write_multi_2(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val, cnt); } static void WRITE_PORT_BUFFER_UCHAR(port, val, cnt) uint8_t *port; uint8_t *val; uint32_t cnt; { bus_space_write_multi_1(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val, cnt); } static uint16_t READ_PORT_USHORT(port) uint16_t *port; { return (bus_space_read_2(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port)); } static uint32_t READ_PORT_ULONG(port) uint32_t *port; { return (bus_space_read_4(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port)); } static uint8_t READ_PORT_UCHAR(port) uint8_t *port; { return (bus_space_read_1(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port)); } static void READ_PORT_BUFFER_ULONG(port, val, cnt) uint32_t *port; uint32_t *val; uint32_t cnt; { bus_space_read_multi_4(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val, cnt); } static void READ_PORT_BUFFER_USHORT(port, val, cnt) uint16_t *port; uint16_t *val; uint32_t cnt; { bus_space_read_multi_2(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val, cnt); } static void READ_PORT_BUFFER_UCHAR(port, val, cnt) uint8_t *port; uint8_t *val; uint32_t cnt; { bus_space_read_multi_1(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port, val, cnt); } /* * The spinlock implementation in Windows differs from that of FreeBSD. * The basic operation of spinlocks involves two steps: 1) spin in a * tight loop while trying to acquire a lock, 2) after obtaining the * lock, disable preemption. (Note that on uniprocessor systems, you're * allowed to skip the first step and just lock out pre-emption, since * it's not possible for you to be in contention with another running * thread.) Later, you release the lock then re-enable preemption. * The difference between Windows and FreeBSD lies in how preemption * is disabled. In FreeBSD, it's done using critical_enter(), which on * the x86 arch translates to a cli instruction. This masks off all * interrupts, and effectively stops the scheduler from ever running * so _nothing_ can execute except the current thread. In Windows, * preemption is disabled by raising the processor IRQL to DISPATCH_LEVEL. * This stops other threads from running, but does _not_ block device * interrupts. This means ISRs can still run, and they can make other * threads runable, but those other threads won't be able to execute * until the current thread lowers the IRQL to something less than * DISPATCH_LEVEL. * * There's another commonly used IRQL in Windows, which is APC_LEVEL. * An APC is an Asynchronous Procedure Call, which differs from a DPC * (Defered Procedure Call) in that a DPC is queued up to run in * another thread, while an APC runs in the thread that scheduled * it (similar to a signal handler in a UNIX process). We don't * actually support the notion of APCs in FreeBSD, so for now, the * only IRQLs we're interested in are DISPATCH_LEVEL and PASSIVE_LEVEL. * * To simulate DISPATCH_LEVEL, we raise the current thread's priority * to PI_REALTIME, which is the highest we can give it. This should, * if I understand things correctly, prevent anything except for an * interrupt thread from preempting us. PASSIVE_LEVEL is basically * everything else. * * Be aware that, at least on the x86 arch, the Windows spinlock * functions are divided up in peculiar ways. The actual spinlock * functions are KfAcquireSpinLock() and KfReleaseSpinLock(), and * they live in HAL.dll. Meanwhile, KeInitializeSpinLock(), * KefAcquireSpinLockAtDpcLevel() and KefReleaseSpinLockFromDpcLevel() * live in ntoskrnl.exe. Most Windows source code will call * KeAcquireSpinLock() and KeReleaseSpinLock(), but these are just * macros that call KfAcquireSpinLock() and KfReleaseSpinLock(). * KefAcquireSpinLockAtDpcLevel() and KefReleaseSpinLockFromDpcLevel() - * perform the lock aquisition/release functions without doing the + * perform the lock acquisition/release functions without doing the * IRQL manipulation, and are used when one is already running at * DISPATCH_LEVEL. Make sense? Good. * * According to the Microsoft documentation, any thread that calls * KeAcquireSpinLock() must be running at IRQL <= DISPATCH_LEVEL. If * we detect someone trying to acquire a spinlock from DEVICE_LEVEL * or HIGH_LEVEL, we panic. * * Alternate sleep-lock-based spinlock implementation * -------------------------------------------------- * * The earlier spinlock implementation was arguably a bit of a hack * and presented several problems. It was basically designed to provide * the functionality of spinlocks without incurring the wrath of * WITNESS. We could get away with using both our spinlock implementation * and FreeBSD sleep locks at the same time, but if WITNESS knew what * we were really up to, it would have spanked us rather severely. * * There's another method we can use based entirely on sleep locks. * First, it's important to realize that everything we're locking * resides inside Project Evil itself: any critical data being locked * by drivers belongs to the drivers, and should not be referenced * by any other OS code outside of the NDISulator. The priority-based * locking scheme has system-wide effects, just like real spinlocks * (blocking preemption affects the whole CPU), but since we keep all * our critical data private, we can use a simpler mechanism that * affects only code/threads directly related to Project Evil. * * The idea is to create a sleep lock mutex for each CPU in the system. * When a CPU running in the NDISulator wants to acquire a spinlock, it * does the following: * - Pin ourselves to the current CPU * - Acquire the mutex for the current CPU * - Spin on the spinlock variable using atomic test and set, just like * a real spinlock. * - Once we have the lock, we execute our critical code * * To give up the lock, we do: * - Clear the spinlock variable with an atomic op * - Release the per-CPU mutex * - Unpin ourselves from the current CPU. * * On a uniprocessor system, this means all threads that access protected * data are serialized through the per-CPU mutex. After one thread * acquires the 'spinlock,' any other thread that uses a spinlock on the * current CPU will block on the per-CPU mutex, which has the same general * effect of blocking pre-emption, but _only_ for those threads that are * running NDISulator code. * * On a multiprocessor system, threads on different CPUs all block on * their respective per-CPU mutex, and the atomic test/set operation * on the spinlock variable provides inter-CPU synchronization, though * only for threads running NDISulator code. * * This method solves an important problem. In Windows, you're allowed * to do an ExAllocatePoolWithTag() with a spinlock held, provided you * allocate from NonPagedPool. This implies an atomic heap allocation * that will not cause the current thread to sleep. (You can't sleep * while holding real spinlock: clowns will eat you.) But in FreeBSD, * malloc(9) _always_ triggers the acquisition of a sleep lock, even * when you use M_NOWAIT. This is not a problem for FreeBSD native * code: you're allowed to sleep in things like interrupt threads. But * it is a problem with the old priority-based spinlock implementation: * even though we get away with it most of the time, we really can't * do a malloc(9) after doing a KeAcquireSpinLock() or KeRaiseIrql(). * With the new implementation, it's not a problem: you're allowed to * acquire more than one sleep lock (as long as you avoid lock order * reversals). * * The one drawback to this approach is that now we have a lot of * contention on one per-CPU mutex within the NDISulator code. Whether * or not this is preferable to the expected Windows spinlock behavior * of blocking pre-emption is debatable. */ uint8_t KfAcquireSpinLock(lock) kspin_lock *lock; { uint8_t oldirql; KeRaiseIrql(DISPATCH_LEVEL, &oldirql); KeAcquireSpinLockAtDpcLevel(lock); return (oldirql); } void KfReleaseSpinLock(kspin_lock *lock, uint8_t newirql) { KeReleaseSpinLockFromDpcLevel(lock); KeLowerIrql(newirql); } uint8_t KeGetCurrentIrql() { if (mtx_owned(&disp_lock[curthread->td_oncpu])) return (DISPATCH_LEVEL); return (PASSIVE_LEVEL); } static uint64_t KeQueryPerformanceCounter(freq) uint64_t *freq; { if (freq != NULL) *freq = hz; return ((uint64_t)ticks); } uint8_t KfRaiseIrql(uint8_t irql) { uint8_t oldirql; sched_pin(); oldirql = KeGetCurrentIrql(); /* I am so going to hell for this. */ if (oldirql > irql) panic("IRQL_NOT_LESS_THAN_OR_EQUAL"); if (oldirql != DISPATCH_LEVEL) mtx_lock(&disp_lock[curthread->td_oncpu]); else sched_unpin(); /*printf("RAISE IRQL: %d %d\n", irql, oldirql);*/ return (oldirql); } void KfLowerIrql(uint8_t oldirql) { if (oldirql == DISPATCH_LEVEL) return; if (KeGetCurrentIrql() != DISPATCH_LEVEL) panic("IRQL_NOT_GREATER_THAN"); mtx_unlock(&disp_lock[curthread->td_oncpu]); sched_unpin(); } static uint8_t KeRaiseIrqlToDpcLevel(void) { uint8_t irql; KeRaiseIrql(DISPATCH_LEVEL, &irql); return (irql); } static void _KeLowerIrql(uint8_t oldirql) { KeLowerIrql(oldirql); } static void dummy() { printf("hal dummy called...\n"); } image_patch_table hal_functbl[] = { IMPORT_SFUNC(KeStallExecutionProcessor, 1), IMPORT_SFUNC(WRITE_PORT_ULONG, 2), IMPORT_SFUNC(WRITE_PORT_USHORT, 2), IMPORT_SFUNC(WRITE_PORT_UCHAR, 2), IMPORT_SFUNC(WRITE_PORT_BUFFER_ULONG, 3), IMPORT_SFUNC(WRITE_PORT_BUFFER_USHORT, 3), IMPORT_SFUNC(WRITE_PORT_BUFFER_UCHAR, 3), IMPORT_SFUNC(READ_PORT_ULONG, 1), IMPORT_SFUNC(READ_PORT_USHORT, 1), IMPORT_SFUNC(READ_PORT_UCHAR, 1), IMPORT_SFUNC(READ_PORT_BUFFER_ULONG, 3), IMPORT_SFUNC(READ_PORT_BUFFER_USHORT, 3), IMPORT_SFUNC(READ_PORT_BUFFER_UCHAR, 3), IMPORT_FFUNC(KfAcquireSpinLock, 1), IMPORT_FFUNC(KfReleaseSpinLock, 1), IMPORT_SFUNC(KeGetCurrentIrql, 0), IMPORT_SFUNC(KeQueryPerformanceCounter, 1), IMPORT_FFUNC(KfLowerIrql, 1), IMPORT_FFUNC(KfRaiseIrql, 1), IMPORT_SFUNC(KeRaiseIrqlToDpcLevel, 0), #undef KeLowerIrql IMPORT_SFUNC_MAP(KeLowerIrql, _KeLowerIrql, 1), /* * This last entry is a catch-all for any function we haven't * implemented yet. The PE import list patching routine will * use it for any function that doesn't have an explicit match * in this table. */ { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL }, /* End of list. */ { NULL, NULL, NULL } }; Index: head/sys/compat/ndis/subr_ntoskrnl.c =================================================================== --- head/sys/compat/ndis/subr_ntoskrnl.c (revision 298827) +++ head/sys/compat/ndis/subr_ntoskrnl.c (revision 298828) @@ -1,4457 +1,4457 @@ /*- * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef NTOSKRNL_DEBUG_TIMERS static int sysctl_show_timers(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_show_timers, "I", "Show ntoskrnl timer stats"); #endif struct kdpc_queue { list_entry kq_disp; struct thread *kq_td; int kq_cpu; int kq_exit; int kq_running; kspin_lock kq_lock; nt_kevent kq_proc; nt_kevent kq_done; }; typedef struct kdpc_queue kdpc_queue; struct wb_ext { struct cv we_cv; struct thread *we_td; }; typedef struct wb_ext wb_ext; #define NTOSKRNL_TIMEOUTS 256 #ifdef NTOSKRNL_DEBUG_TIMERS static uint64_t ntoskrnl_timer_fires; static uint64_t ntoskrnl_timer_sets; static uint64_t ntoskrnl_timer_reloads; static uint64_t ntoskrnl_timer_cancels; #endif struct callout_entry { struct callout ce_callout; list_entry ce_list; }; typedef struct callout_entry callout_entry; static struct list_entry ntoskrnl_calllist; static struct mtx ntoskrnl_calllock; struct kuser_shared_data kuser_shared_data; static struct list_entry ntoskrnl_intlist; static kspin_lock ntoskrnl_intlock; static uint8_t RtlEqualUnicodeString(unicode_string *, unicode_string *, uint8_t); static void RtlCopyString(ansi_string *, const ansi_string *); static void RtlCopyUnicodeString(unicode_string *, unicode_string *); static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *, void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *); static irp *IoBuildAsynchronousFsdRequest(uint32_t, device_object *, void *, uint32_t, uint64_t *, io_status_block *); static irp *IoBuildDeviceIoControlRequest(uint32_t, device_object *, void *, uint32_t, void *, uint32_t, uint8_t, nt_kevent *, io_status_block *); static irp *IoAllocateIrp(uint8_t, uint8_t); static void IoReuseIrp(irp *, uint32_t); static void IoFreeIrp(irp *); static void IoInitializeIrp(irp *, uint16_t, uint8_t); static irp *IoMakeAssociatedIrp(irp *, uint8_t); static uint32_t KeWaitForMultipleObjects(uint32_t, nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t, int64_t *, wait_block *); static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t); static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *); static void ntoskrnl_satisfy_multiple_waits(wait_block *); static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *); static void ntoskrnl_insert_timer(ktimer *, int); static void ntoskrnl_remove_timer(ktimer *); #ifdef NTOSKRNL_DEBUG_TIMERS static void ntoskrnl_show_timers(void); #endif static void ntoskrnl_timercall(void *); static void ntoskrnl_dpc_thread(void *); static void ntoskrnl_destroy_dpc_threads(void); static void ntoskrnl_destroy_workitem_threads(void); static void ntoskrnl_workitem_thread(void *); static void ntoskrnl_workitem(device_object *, void *); static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int); static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int); static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *); static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t); static uint16_t READ_REGISTER_USHORT(uint16_t *); static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t); static uint32_t READ_REGISTER_ULONG(uint32_t *); static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t); static uint8_t READ_REGISTER_UCHAR(uint8_t *); static int64_t _allmul(int64_t, int64_t); static int64_t _alldiv(int64_t, int64_t); static int64_t _allrem(int64_t, int64_t); static int64_t _allshr(int64_t, uint8_t); static int64_t _allshl(int64_t, uint8_t); static uint64_t _aullmul(uint64_t, uint64_t); static uint64_t _aulldiv(uint64_t, uint64_t); static uint64_t _aullrem(uint64_t, uint64_t); static uint64_t _aullshr(uint64_t, uint8_t); static uint64_t _aullshl(uint64_t, uint8_t); static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *); static void InitializeSListHead(slist_header *); static slist_entry *ntoskrnl_popsl(slist_header *); static void ExFreePoolWithTag(void *, uint32_t); static void ExInitializePagedLookasideList(paged_lookaside_list *, lookaside_alloc_func *, lookaside_free_func *, uint32_t, size_t, uint32_t, uint16_t); static void ExDeletePagedLookasideList(paged_lookaside_list *); static void ExInitializeNPagedLookasideList(npaged_lookaside_list *, lookaside_alloc_func *, lookaside_free_func *, uint32_t, size_t, uint32_t, uint16_t); static void ExDeleteNPagedLookasideList(npaged_lookaside_list *); static slist_entry *ExInterlockedPushEntrySList(slist_header *, slist_entry *, kspin_lock *); static slist_entry *ExInterlockedPopEntrySList(slist_header *, kspin_lock *); static uint32_t InterlockedIncrement(volatile uint32_t *); static uint32_t InterlockedDecrement(volatile uint32_t *); static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t); static void *MmAllocateContiguousMemory(uint32_t, uint64_t); static void *MmAllocateContiguousMemorySpecifyCache(uint32_t, uint64_t, uint64_t, uint64_t, enum nt_caching_type); static void MmFreeContiguousMemory(void *); static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t, enum nt_caching_type); static uint32_t MmSizeOfMdl(void *, size_t); static void *MmMapLockedPages(mdl *, uint8_t); static void *MmMapLockedPagesSpecifyCache(mdl *, uint8_t, uint32_t, void *, uint32_t, uint32_t); static void MmUnmapLockedPages(void *, mdl *); static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **); static void RtlZeroMemory(void *, size_t); static void RtlSecureZeroMemory(void *, size_t); static void RtlFillMemory(void *, size_t, uint8_t); static void RtlMoveMemory(void *, const void *, size_t); static ndis_status RtlCharToInteger(const char *, uint32_t, uint32_t *); static void RtlCopyMemory(void *, const void *, size_t); static size_t RtlCompareMemory(const void *, const void *, size_t); static ndis_status RtlUnicodeStringToInteger(unicode_string *, uint32_t, uint32_t *); static int atoi (const char *); static long atol (const char *); static int rand(void); static void srand(unsigned int); static void KeQuerySystemTime(uint64_t *); static uint32_t KeTickCount(void); static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t); static int32_t IoOpenDeviceRegistryKey(struct device_object *, uint32_t, uint32_t, void **); static void ntoskrnl_thrfunc(void *); static ndis_status PsCreateSystemThread(ndis_handle *, uint32_t, void *, ndis_handle, void *, void *, void *); static ndis_status PsTerminateSystemThread(ndis_status); static ndis_status IoGetDeviceObjectPointer(unicode_string *, uint32_t, void *, device_object *); static ndis_status IoGetDeviceProperty(device_object *, uint32_t, uint32_t, void *, uint32_t *); static void KeInitializeMutex(kmutant *, uint32_t); static uint32_t KeReleaseMutex(kmutant *, uint8_t); static uint32_t KeReadStateMutex(kmutant *); static ndis_status ObReferenceObjectByHandle(ndis_handle, uint32_t, void *, uint8_t, void **, void **); static void ObfDereferenceObject(void *); static uint32_t ZwClose(ndis_handle); static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t, uint32_t, void *); static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...); static uint32_t IoWMIRegistrationControl(device_object *, uint32_t); static void *ntoskrnl_memset(void *, int, size_t); static void *ntoskrnl_memmove(void *, void *, size_t); static void *ntoskrnl_memchr(void *, unsigned char, size_t); static char *ntoskrnl_strstr(char *, char *); static char *ntoskrnl_strncat(char *, char *, size_t); static int ntoskrnl_toupper(int); static int ntoskrnl_tolower(int); static funcptr ntoskrnl_findwrap(funcptr); static uint32_t DbgPrint(char *, ...); static void DbgBreakPoint(void); static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long); static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *); static int32_t KeSetPriorityThread(struct thread *, int32_t); static void dummy(void); static struct mtx ntoskrnl_dispatchlock; static struct mtx ntoskrnl_interlock; static kspin_lock ntoskrnl_cancellock; static int ntoskrnl_kth = 0; static struct nt_objref_head ntoskrnl_reflist; static uma_zone_t mdl_zone; static uma_zone_t iw_zone; static struct kdpc_queue *kq_queues; static struct kdpc_queue *wq_queues; static int wq_idx = 0; int ntoskrnl_libinit() { image_patch_table *patch; int error; struct proc *p; kdpc_queue *kq; callout_entry *e; int i; mtx_init(&ntoskrnl_dispatchlock, "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE); mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN); KeInitializeSpinLock(&ntoskrnl_cancellock); KeInitializeSpinLock(&ntoskrnl_intlock); TAILQ_INIT(&ntoskrnl_reflist); InitializeListHead(&ntoskrnl_calllist); InitializeListHead(&ntoskrnl_intlist); mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN); kq_queues = ExAllocatePoolWithTag(NonPagedPool, #ifdef NTOSKRNL_MULTIPLE_DPCS sizeof(kdpc_queue) * mp_ncpus, 0); #else sizeof(kdpc_queue), 0); #endif if (kq_queues == NULL) return (ENOMEM); wq_queues = ExAllocatePoolWithTag(NonPagedPool, sizeof(kdpc_queue) * WORKITEM_THREADS, 0); if (wq_queues == NULL) return (ENOMEM); #ifdef NTOSKRNL_MULTIPLE_DPCS bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus); #else bzero((char *)kq_queues, sizeof(kdpc_queue)); #endif bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS); /* * Launch the DPC threads. */ #ifdef NTOSKRNL_MULTIPLE_DPCS for (i = 0; i < mp_ncpus; i++) { #else for (i = 0; i < 1; i++) { #endif kq = kq_queues + i; kq->kq_cpu = i; error = kproc_create(ntoskrnl_dpc_thread, kq, &p, RFHIGHPID, NDIS_KSTACK_PAGES, "Windows DPC %d", i); if (error) panic("failed to launch DPC thread"); } /* * Launch the workitem threads. */ for (i = 0; i < WORKITEM_THREADS; i++) { kq = wq_queues + i; error = kproc_create(ntoskrnl_workitem_thread, kq, &p, RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Workitem %d", i); if (error) panic("failed to launch workitem thread"); } patch = ntoskrnl_functbl; while (patch->ipt_func != NULL) { windrv_wrap((funcptr)patch->ipt_func, (funcptr *)&patch->ipt_wrap, patch->ipt_argcnt, patch->ipt_ftype); patch++; } for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) { e = ExAllocatePoolWithTag(NonPagedPool, sizeof(callout_entry), 0); if (e == NULL) panic("failed to allocate timeouts"); mtx_lock_spin(&ntoskrnl_calllock); InsertHeadList((&ntoskrnl_calllist), (&e->ce_list)); mtx_unlock_spin(&ntoskrnl_calllock); } /* * MDLs are supposed to be variable size (they describe * buffers containing some number of pages, but we don't * know ahead of time how many pages that will be). But * always allocating them off the heap is very slow. As * a compromise, we create an MDL UMA zone big enough to * handle any buffer requiring up to 16 pages, and we * use those for any MDLs for buffers of 16 pages or less * in size. For buffers larger than that (which we assume * will be few and far between, we allocate the MDLs off * the heap. */ mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); return (0); } int ntoskrnl_libfini() { image_patch_table *patch; callout_entry *e; list_entry *l; patch = ntoskrnl_functbl; while (patch->ipt_func != NULL) { windrv_unwrap(patch->ipt_wrap); patch++; } /* Stop the workitem queues. */ ntoskrnl_destroy_workitem_threads(); /* Stop the DPC queues. */ ntoskrnl_destroy_dpc_threads(); ExFreePool(kq_queues); ExFreePool(wq_queues); uma_zdestroy(mdl_zone); uma_zdestroy(iw_zone); mtx_lock_spin(&ntoskrnl_calllock); while(!IsListEmpty(&ntoskrnl_calllist)) { l = RemoveHeadList(&ntoskrnl_calllist); e = CONTAINING_RECORD(l, callout_entry, ce_list); mtx_unlock_spin(&ntoskrnl_calllock); ExFreePool(e); mtx_lock_spin(&ntoskrnl_calllock); } mtx_unlock_spin(&ntoskrnl_calllock); mtx_destroy(&ntoskrnl_dispatchlock); mtx_destroy(&ntoskrnl_interlock); mtx_destroy(&ntoskrnl_calllock); return (0); } /* * We need to be able to reference this externally from the wrapper; * GCC only generates a local implementation of memset. */ static void * ntoskrnl_memset(buf, ch, size) void *buf; int ch; size_t size; { return (memset(buf, ch, size)); } static void * ntoskrnl_memmove(dst, src, size) void *src; void *dst; size_t size; { bcopy(src, dst, size); return (dst); } static void * ntoskrnl_memchr(void *buf, unsigned char ch, size_t len) { if (len != 0) { unsigned char *p = buf; do { if (*p++ == ch) return (p - 1); } while (--len != 0); } return (NULL); } static char * ntoskrnl_strstr(s, find) char *s, *find; { char c, sc; size_t len; if ((c = *find++) != 0) { len = strlen(find); do { do { if ((sc = *s++) == 0) return (NULL); } while (sc != c); } while (strncmp(s, find, len) != 0); s--; } return ((char *)s); } /* Taken from libc */ static char * ntoskrnl_strncat(dst, src, n) char *dst; char *src; size_t n; { if (n != 0) { char *d = dst; const char *s = src; while (*d != 0) d++; do { if ((*d = *s++) == 0) break; d++; } while (--n != 0); *d = 0; } return (dst); } static int ntoskrnl_toupper(c) int c; { return (toupper(c)); } static int ntoskrnl_tolower(c) int c; { return (tolower(c)); } static uint8_t RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2, uint8_t caseinsensitive) { int i; if (str1->us_len != str2->us_len) return (FALSE); for (i = 0; i < str1->us_len; i++) { if (caseinsensitive == TRUE) { if (toupper((char)(str1->us_buf[i] & 0xFF)) != toupper((char)(str2->us_buf[i] & 0xFF))) return (FALSE); } else { if (str1->us_buf[i] != str2->us_buf[i]) return (FALSE); } } return (TRUE); } static void RtlCopyString(dst, src) ansi_string *dst; const ansi_string *src; { if (src != NULL && src->as_buf != NULL && dst->as_buf != NULL) { dst->as_len = min(src->as_len, dst->as_maxlen); memcpy(dst->as_buf, src->as_buf, dst->as_len); if (dst->as_len < dst->as_maxlen) dst->as_buf[dst->as_len] = 0; } else dst->as_len = 0; } static void RtlCopyUnicodeString(dest, src) unicode_string *dest; unicode_string *src; { if (dest->us_maxlen >= src->us_len) dest->us_len = src->us_len; else dest->us_len = dest->us_maxlen; memcpy(dest->us_buf, src->us_buf, dest->us_len); } static void ntoskrnl_ascii_to_unicode(ascii, unicode, len) char *ascii; uint16_t *unicode; int len; { int i; uint16_t *ustr; ustr = unicode; for (i = 0; i < len; i++) { *ustr = (uint16_t)ascii[i]; ustr++; } } static void ntoskrnl_unicode_to_ascii(unicode, ascii, len) uint16_t *unicode; char *ascii; int len; { int i; uint8_t *astr; astr = ascii; for (i = 0; i < len / 2; i++) { *astr = (uint8_t)unicode[i]; astr++; } } uint32_t RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate) { if (dest == NULL || src == NULL) return (STATUS_INVALID_PARAMETER); dest->as_len = src->us_len / 2; if (dest->as_maxlen < dest->as_len) dest->as_len = dest->as_maxlen; if (allocate == TRUE) { dest->as_buf = ExAllocatePoolWithTag(NonPagedPool, (src->us_len / 2) + 1, 0); if (dest->as_buf == NULL) return (STATUS_INSUFFICIENT_RESOURCES); dest->as_len = dest->as_maxlen = src->us_len / 2; } else { dest->as_len = src->us_len / 2; /* XXX */ if (dest->as_maxlen < dest->as_len) dest->as_len = dest->as_maxlen; } ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf, dest->as_len * 2); return (STATUS_SUCCESS); } uint32_t RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src, uint8_t allocate) { if (dest == NULL || src == NULL) return (STATUS_INVALID_PARAMETER); if (allocate == TRUE) { dest->us_buf = ExAllocatePoolWithTag(NonPagedPool, src->as_len * 2, 0); if (dest->us_buf == NULL) return (STATUS_INSUFFICIENT_RESOURCES); dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2; } else { dest->us_len = src->as_len * 2; /* XXX */ if (dest->us_maxlen < dest->us_len) dest->us_len = dest->us_maxlen; } ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf, dest->us_len / 2); return (STATUS_SUCCESS); } void * ExAllocatePoolWithTag(pooltype, len, tag) uint32_t pooltype; size_t len; uint32_t tag; { void *buf; buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); if (buf == NULL) return (NULL); return (buf); } static void ExFreePoolWithTag(buf, tag) void *buf; uint32_t tag; { ExFreePool(buf); } void ExFreePool(buf) void *buf; { free(buf, M_DEVBUF); } uint32_t IoAllocateDriverObjectExtension(drv, clid, extlen, ext) driver_object *drv; void *clid; uint32_t extlen; void **ext; { custom_extension *ce; ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension) + extlen, 0); if (ce == NULL) return (STATUS_INSUFFICIENT_RESOURCES); ce->ce_clid = clid; InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list)); *ext = (void *)(ce + 1); return (STATUS_SUCCESS); } void * IoGetDriverObjectExtension(drv, clid) driver_object *drv; void *clid; { list_entry *e; custom_extension *ce; /* * Sanity check. Our dummy bus drivers don't have - * any driver extentions. + * any driver extensions. */ if (drv->dro_driverext == NULL) return (NULL); e = drv->dro_driverext->dre_usrext.nle_flink; while (e != &drv->dro_driverext->dre_usrext) { ce = (custom_extension *)e; if (ce->ce_clid == clid) return ((void *)(ce + 1)); e = e->nle_flink; } return (NULL); } uint32_t IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname, uint32_t devtype, uint32_t devchars, uint8_t exclusive, device_object **newdev) { device_object *dev; dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0); if (dev == NULL) return (STATUS_INSUFFICIENT_RESOURCES); dev->do_type = devtype; dev->do_drvobj = drv; dev->do_currirp = NULL; dev->do_flags = 0; if (devextlen) { dev->do_devext = ExAllocatePoolWithTag(NonPagedPool, devextlen, 0); if (dev->do_devext == NULL) { ExFreePool(dev); return (STATUS_INSUFFICIENT_RESOURCES); } bzero(dev->do_devext, devextlen); } else dev->do_devext = NULL; dev->do_size = sizeof(device_object) + devextlen; dev->do_refcnt = 1; dev->do_attacheddev = NULL; dev->do_nextdev = NULL; dev->do_devtype = devtype; dev->do_stacksize = 1; dev->do_alignreq = 1; dev->do_characteristics = devchars; dev->do_iotimer = NULL; KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE); /* * Vpd is used for disk/tape devices, * but we don't support those. (Yet.) */ dev->do_vpb = NULL; dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool, sizeof(devobj_extension), 0); if (dev->do_devobj_ext == NULL) { if (dev->do_devext != NULL) ExFreePool(dev->do_devext); ExFreePool(dev); return (STATUS_INSUFFICIENT_RESOURCES); } dev->do_devobj_ext->dve_type = 0; dev->do_devobj_ext->dve_size = sizeof(devobj_extension); dev->do_devobj_ext->dve_devobj = dev; /* * Attach this device to the driver object's list * of devices. Note: this is not the same as attaching * the device to the device stack. The driver's AddDevice * routine must explicitly call IoAddDeviceToDeviceStack() * to do that. */ if (drv->dro_devobj == NULL) { drv->dro_devobj = dev; dev->do_nextdev = NULL; } else { dev->do_nextdev = drv->dro_devobj; drv->dro_devobj = dev; } *newdev = dev; return (STATUS_SUCCESS); } void IoDeleteDevice(dev) device_object *dev; { device_object *prev; if (dev == NULL) return; if (dev->do_devobj_ext != NULL) ExFreePool(dev->do_devobj_ext); if (dev->do_devext != NULL) ExFreePool(dev->do_devext); /* Unlink the device from the driver's device list. */ prev = dev->do_drvobj->dro_devobj; if (prev == dev) dev->do_drvobj->dro_devobj = dev->do_nextdev; else { while (prev->do_nextdev != dev) prev = prev->do_nextdev; prev->do_nextdev = dev->do_nextdev; } ExFreePool(dev); } device_object * IoGetAttachedDevice(dev) device_object *dev; { device_object *d; if (dev == NULL) return (NULL); d = dev; while (d->do_attacheddev != NULL) d = d->do_attacheddev; return (d); } static irp * IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status) uint32_t func; device_object *dobj; void *buf; uint32_t len; uint64_t *off; nt_kevent *event; io_status_block *status; { irp *ip; ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status); if (ip == NULL) return (NULL); ip->irp_usrevent = event; return (ip); } static irp * IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status) uint32_t func; device_object *dobj; void *buf; uint32_t len; uint64_t *off; io_status_block *status; { irp *ip; io_stack_location *sl; ip = IoAllocateIrp(dobj->do_stacksize, TRUE); if (ip == NULL) return (NULL); ip->irp_usriostat = status; ip->irp_tail.irp_overlay.irp_thread = NULL; sl = IoGetNextIrpStackLocation(ip); sl->isl_major = func; sl->isl_minor = 0; sl->isl_flags = 0; sl->isl_ctl = 0; sl->isl_devobj = dobj; sl->isl_fileobj = NULL; sl->isl_completionfunc = NULL; ip->irp_userbuf = buf; if (dobj->do_flags & DO_BUFFERED_IO) { ip->irp_assoc.irp_sysbuf = ExAllocatePoolWithTag(NonPagedPool, len, 0); if (ip->irp_assoc.irp_sysbuf == NULL) { IoFreeIrp(ip); return (NULL); } bcopy(buf, ip->irp_assoc.irp_sysbuf, len); } if (dobj->do_flags & DO_DIRECT_IO) { ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip); if (ip->irp_mdl == NULL) { if (ip->irp_assoc.irp_sysbuf != NULL) ExFreePool(ip->irp_assoc.irp_sysbuf); IoFreeIrp(ip); return (NULL); } ip->irp_userbuf = NULL; ip->irp_assoc.irp_sysbuf = NULL; } if (func == IRP_MJ_READ) { sl->isl_parameters.isl_read.isl_len = len; if (off != NULL) sl->isl_parameters.isl_read.isl_byteoff = *off; else sl->isl_parameters.isl_read.isl_byteoff = 0; } if (func == IRP_MJ_WRITE) { sl->isl_parameters.isl_write.isl_len = len; if (off != NULL) sl->isl_parameters.isl_write.isl_byteoff = *off; else sl->isl_parameters.isl_write.isl_byteoff = 0; } return (ip); } static irp * IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf, uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal, nt_kevent *event, io_status_block *status) { irp *ip; io_stack_location *sl; uint32_t buflen; ip = IoAllocateIrp(dobj->do_stacksize, TRUE); if (ip == NULL) return (NULL); ip->irp_usrevent = event; ip->irp_usriostat = status; ip->irp_tail.irp_overlay.irp_thread = NULL; sl = IoGetNextIrpStackLocation(ip); sl->isl_major = isinternal == TRUE ? IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL; sl->isl_minor = 0; sl->isl_flags = 0; sl->isl_ctl = 0; sl->isl_devobj = dobj; sl->isl_fileobj = NULL; sl->isl_completionfunc = NULL; sl->isl_parameters.isl_ioctl.isl_iocode = iocode; sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen; sl->isl_parameters.isl_ioctl.isl_obuflen = olen; switch(IO_METHOD(iocode)) { case METHOD_BUFFERED: if (ilen > olen) buflen = ilen; else buflen = olen; if (buflen) { ip->irp_assoc.irp_sysbuf = ExAllocatePoolWithTag(NonPagedPool, buflen, 0); if (ip->irp_assoc.irp_sysbuf == NULL) { IoFreeIrp(ip); return (NULL); } } if (ilen && ibuf != NULL) { bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen); bzero((char *)ip->irp_assoc.irp_sysbuf + ilen, buflen - ilen); } else bzero(ip->irp_assoc.irp_sysbuf, ilen); ip->irp_userbuf = obuf; break; case METHOD_IN_DIRECT: case METHOD_OUT_DIRECT: if (ilen && ibuf != NULL) { ip->irp_assoc.irp_sysbuf = ExAllocatePoolWithTag(NonPagedPool, ilen, 0); if (ip->irp_assoc.irp_sysbuf == NULL) { IoFreeIrp(ip); return (NULL); } bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen); } if (olen && obuf != NULL) { ip->irp_mdl = IoAllocateMdl(obuf, olen, FALSE, FALSE, ip); /* * Normally we would MmProbeAndLockPages() * here, but we don't have to in our * imlementation. */ } break; case METHOD_NEITHER: ip->irp_userbuf = obuf; sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf; break; default: break; } /* * Ideally, we should associate this IRP with the calling * thread here. */ return (ip); } static irp * IoAllocateIrp(uint8_t stsize, uint8_t chargequota) { irp *i; i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0); if (i == NULL) return (NULL); IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize); return (i); } static irp * IoMakeAssociatedIrp(irp *ip, uint8_t stsize) { irp *associrp; associrp = IoAllocateIrp(stsize, FALSE); if (associrp == NULL) return (NULL); mtx_lock(&ntoskrnl_dispatchlock); associrp->irp_flags |= IRP_ASSOCIATED_IRP; associrp->irp_tail.irp_overlay.irp_thread = ip->irp_tail.irp_overlay.irp_thread; associrp->irp_assoc.irp_master = ip; mtx_unlock(&ntoskrnl_dispatchlock); return (associrp); } static void IoFreeIrp(ip) irp *ip; { ExFreePool(ip); } static void IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize) { bzero((char *)io, IoSizeOfIrp(ssize)); io->irp_size = psize; io->irp_stackcnt = ssize; io->irp_currentstackloc = ssize; InitializeListHead(&io->irp_thlist); io->irp_tail.irp_overlay.irp_csl = (io_stack_location *)(io + 1) + ssize; } static void IoReuseIrp(ip, status) irp *ip; uint32_t status; { uint8_t allocflags; allocflags = ip->irp_allocflags; IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt); ip->irp_iostat.isb_status = status; ip->irp_allocflags = allocflags; } void IoAcquireCancelSpinLock(uint8_t *irql) { KeAcquireSpinLock(&ntoskrnl_cancellock, irql); } void IoReleaseCancelSpinLock(uint8_t irql) { KeReleaseSpinLock(&ntoskrnl_cancellock, irql); } uint8_t IoCancelIrp(irp *ip) { cancel_func cfunc; uint8_t cancelirql; IoAcquireCancelSpinLock(&cancelirql); cfunc = IoSetCancelRoutine(ip, NULL); ip->irp_cancel = TRUE; if (cfunc == NULL) { IoReleaseCancelSpinLock(cancelirql); return (FALSE); } ip->irp_cancelirql = cancelirql; MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip); return (uint8_t)IoSetCancelValue(ip, TRUE); } uint32_t IofCallDriver(dobj, ip) device_object *dobj; irp *ip; { driver_object *drvobj; io_stack_location *sl; uint32_t status; driver_dispatch disp; drvobj = dobj->do_drvobj; if (ip->irp_currentstackloc <= 0) panic("IoCallDriver(): out of stack locations"); IoSetNextIrpStackLocation(ip); sl = IoGetCurrentIrpStackLocation(ip); sl->isl_devobj = dobj; disp = drvobj->dro_dispatch[sl->isl_major]; status = MSCALL2(disp, dobj, ip); return (status); } void IofCompleteRequest(irp *ip, uint8_t prioboost) { uint32_t status; device_object *dobj; io_stack_location *sl; completion_func cf; KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING, ("incorrect IRP(%p) status (STATUS_PENDING)", ip)); sl = IoGetCurrentIrpStackLocation(ip); IoSkipCurrentIrpStackLocation(ip); do { if (sl->isl_ctl & SL_PENDING_RETURNED) ip->irp_pendingreturned = TRUE; if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1)) dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj; else dobj = NULL; if (sl->isl_completionfunc != NULL && ((ip->irp_iostat.isb_status == STATUS_SUCCESS && sl->isl_ctl & SL_INVOKE_ON_SUCCESS) || (ip->irp_iostat.isb_status != STATUS_SUCCESS && sl->isl_ctl & SL_INVOKE_ON_ERROR) || (ip->irp_cancel == TRUE && sl->isl_ctl & SL_INVOKE_ON_CANCEL))) { cf = sl->isl_completionfunc; status = MSCALL3(cf, dobj, ip, sl->isl_completionctx); if (status == STATUS_MORE_PROCESSING_REQUIRED) return; } else { if ((ip->irp_currentstackloc <= ip->irp_stackcnt) && (ip->irp_pendingreturned == TRUE)) IoMarkIrpPending(ip); } /* move to the next. */ IoSkipCurrentIrpStackLocation(ip); sl++; } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1)); if (ip->irp_usriostat != NULL) *ip->irp_usriostat = ip->irp_iostat; if (ip->irp_usrevent != NULL) KeSetEvent(ip->irp_usrevent, prioboost, FALSE); /* Handle any associated IRPs. */ if (ip->irp_flags & IRP_ASSOCIATED_IRP) { uint32_t masterirpcnt; irp *masterirp; mdl *m; masterirp = ip->irp_assoc.irp_master; masterirpcnt = InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt); while ((m = ip->irp_mdl) != NULL) { ip->irp_mdl = m->mdl_next; IoFreeMdl(m); } IoFreeIrp(ip); if (masterirpcnt == 0) IoCompleteRequest(masterirp, IO_NO_INCREMENT); return; } /* With any luck, these conditions will never arise. */ if (ip->irp_flags & IRP_PAGING_IO) { if (ip->irp_mdl != NULL) IoFreeMdl(ip->irp_mdl); IoFreeIrp(ip); } } void ntoskrnl_intr(arg) void *arg; { kinterrupt *iobj; uint8_t irql; uint8_t claimed; list_entry *l; KeAcquireSpinLock(&ntoskrnl_intlock, &irql); l = ntoskrnl_intlist.nle_flink; while (l != &ntoskrnl_intlist) { iobj = CONTAINING_RECORD(l, kinterrupt, ki_list); claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx); if (claimed == TRUE) break; l = l->nle_flink; } KeReleaseSpinLock(&ntoskrnl_intlock, irql); } uint8_t KeAcquireInterruptSpinLock(iobj) kinterrupt *iobj; { uint8_t irql; KeAcquireSpinLock(&ntoskrnl_intlock, &irql); return (irql); } void KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql) { KeReleaseSpinLock(&ntoskrnl_intlock, irql); } uint8_t KeSynchronizeExecution(iobj, syncfunc, syncctx) kinterrupt *iobj; void *syncfunc; void *syncctx; { uint8_t irql; KeAcquireSpinLock(&ntoskrnl_intlock, &irql); MSCALL1(syncfunc, syncctx); KeReleaseSpinLock(&ntoskrnl_intlock, irql); return (TRUE); } /* * IoConnectInterrupt() is passed only the interrupt vector and * irql that a device wants to use, but no device-specific tag * of any kind. This conflicts rather badly with FreeBSD's * bus_setup_intr(), which needs the device_t for the device * requesting interrupt delivery. In order to bypass this * inconsistency, we implement a second level of interrupt * dispatching on top of bus_setup_intr(). All devices use * ntoskrnl_intr() as their ISR, and any device requesting * interrupts will be registered with ntoskrnl_intr()'s interrupt * dispatch list. When an interrupt arrives, we walk the list * and invoke all the registered ISRs. This effectively makes all * interrupts shared, but it's the only way to duplicate the * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly. */ uint32_t IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx, kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql, uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat) { uint8_t curirql; *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0); if (*iobj == NULL) return (STATUS_INSUFFICIENT_RESOURCES); (*iobj)->ki_svcfunc = svcfunc; (*iobj)->ki_svcctx = svcctx; if (lock == NULL) { KeInitializeSpinLock(&(*iobj)->ki_lock_priv); (*iobj)->ki_lock = &(*iobj)->ki_lock_priv; } else (*iobj)->ki_lock = lock; KeAcquireSpinLock(&ntoskrnl_intlock, &curirql); InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list)); KeReleaseSpinLock(&ntoskrnl_intlock, curirql); return (STATUS_SUCCESS); } void IoDisconnectInterrupt(iobj) kinterrupt *iobj; { uint8_t irql; if (iobj == NULL) return; KeAcquireSpinLock(&ntoskrnl_intlock, &irql); RemoveEntryList((&iobj->ki_list)); KeReleaseSpinLock(&ntoskrnl_intlock, irql); ExFreePool(iobj); } device_object * IoAttachDeviceToDeviceStack(src, dst) device_object *src; device_object *dst; { device_object *attached; mtx_lock(&ntoskrnl_dispatchlock); attached = IoGetAttachedDevice(dst); attached->do_attacheddev = src; src->do_attacheddev = NULL; src->do_stacksize = attached->do_stacksize + 1; mtx_unlock(&ntoskrnl_dispatchlock); return (attached); } void IoDetachDevice(topdev) device_object *topdev; { device_object *tail; mtx_lock(&ntoskrnl_dispatchlock); /* First, break the chain. */ tail = topdev->do_attacheddev; if (tail == NULL) { mtx_unlock(&ntoskrnl_dispatchlock); return; } topdev->do_attacheddev = tail->do_attacheddev; topdev->do_refcnt--; /* Now reduce the stacksize count for the takm_il objects. */ tail = topdev->do_attacheddev; while (tail != NULL) { tail->do_stacksize--; tail = tail->do_attacheddev; } mtx_unlock(&ntoskrnl_dispatchlock); } /* * For the most part, an object is considered signalled if * dh_sigstate == TRUE. The exception is for mutant objects * (mutexes), where the logic works like this: * * - If the thread already owns the object and sigstate is * less than or equal to 0, then the object is considered * signalled (recursive acquisition). * - If dh_sigstate == 1, the object is also considered * signalled. */ static int ntoskrnl_is_signalled(obj, td) nt_dispatch_header *obj; struct thread *td; { kmutant *km; if (obj->dh_type == DISP_TYPE_MUTANT) { km = (kmutant *)obj; if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) || obj->dh_sigstate == 1) return (TRUE); return (FALSE); } if (obj->dh_sigstate > 0) return (TRUE); return (FALSE); } static void ntoskrnl_satisfy_wait(obj, td) nt_dispatch_header *obj; struct thread *td; { kmutant *km; switch (obj->dh_type) { case DISP_TYPE_MUTANT: km = (struct kmutant *)obj; obj->dh_sigstate--; /* * If sigstate reaches 0, the mutex is now * non-signalled (the new thread owns it). */ if (obj->dh_sigstate == 0) { km->km_ownerthread = td; if (km->km_abandoned == TRUE) km->km_abandoned = FALSE; } break; /* Synchronization objects get reset to unsignalled. */ case DISP_TYPE_SYNCHRONIZATION_EVENT: case DISP_TYPE_SYNCHRONIZATION_TIMER: obj->dh_sigstate = 0; break; case DISP_TYPE_SEMAPHORE: obj->dh_sigstate--; break; default: break; } } static void ntoskrnl_satisfy_multiple_waits(wb) wait_block *wb; { wait_block *cur; struct thread *td; cur = wb; td = wb->wb_kthread; do { ntoskrnl_satisfy_wait(wb->wb_object, td); cur->wb_awakened = TRUE; cur = cur->wb_next; } while (cur != wb); } /* Always called with dispatcher lock held. */ static void ntoskrnl_waittest(obj, increment) nt_dispatch_header *obj; uint32_t increment; { wait_block *w, *next; list_entry *e; struct thread *td; wb_ext *we; int satisfied; /* * Once an object has been signalled, we walk its list of * wait blocks. If a wait block can be awakened, then satisfy * waits as necessary and wake the thread. * * The rules work like this: * * If a wait block is marked as WAITTYPE_ANY, then * we can satisfy the wait conditions on the current * object and wake the thread right away. Satisfying * the wait also has the effect of breaking us out * of the search loop. * * If the object is marked as WAITTYLE_ALL, then the * wait block will be part of a circularly linked * list of wait blocks belonging to a waiting thread * that's sleeping in KeWaitForMultipleObjects(). In * order to wake the thread, all the objects in the * wait list must be in the signalled state. If they * are, we then satisfy all of them and wake the * thread. * */ e = obj->dh_waitlisthead.nle_flink; while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) { w = CONTAINING_RECORD(e, wait_block, wb_waitlist); we = w->wb_ext; td = we->we_td; satisfied = FALSE; if (w->wb_waittype == WAITTYPE_ANY) { /* * Thread can be awakened if * any wait is satisfied. */ ntoskrnl_satisfy_wait(obj, td); satisfied = TRUE; w->wb_awakened = TRUE; } else { /* * Thread can only be woken up * if all waits are satisfied. * If the thread is waiting on multiple * objects, they should all be linked * through the wb_next pointers in the * wait blocks. */ satisfied = TRUE; next = w->wb_next; while (next != w) { if (ntoskrnl_is_signalled(obj, td) == FALSE) { satisfied = FALSE; break; } next = next->wb_next; } ntoskrnl_satisfy_multiple_waits(w); } if (satisfied == TRUE) cv_broadcastpri(&we->we_cv, (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ? w->wb_oldpri - (increment * 4) : PRI_MIN_KERN); e = e->nle_flink; } } /* * Return the number of 100 nanosecond intervals since * January 1, 1601. (?!?!) */ void ntoskrnl_time(tval) uint64_t *tval; { struct timespec ts; nanotime(&ts); *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 + 11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */ } static void KeQuerySystemTime(current_time) uint64_t *current_time; { ntoskrnl_time(current_time); } static uint32_t KeTickCount(void) { struct timeval tv; getmicrouptime(&tv); return tvtohz(&tv); } /* * KeWaitForSingleObject() is a tricky beast, because it can be used * with several different object types: semaphores, timers, events, * mutexes and threads. Semaphores don't appear very often, but the * other object types are quite common. KeWaitForSingleObject() is * what's normally used to acquire a mutex, and it can be used to * wait for a thread termination. * * The Windows NDIS API is implemented in terms of Windows kernel * primitives, and some of the object manipulation is duplicated in * NDIS. For example, NDIS has timers and events, which are actually * Windows kevents and ktimers. Now, you're supposed to only use the * NDIS variants of these objects within the confines of the NDIS API, * but there are some naughty developers out there who will use * KeWaitForSingleObject() on NDIS timer and event objects, so we * have to support that as well. Conseqently, our NDIS timer and event * code has to be closely tied into our ntoskrnl timer and event code, * just as it is in Windows. * * KeWaitForSingleObject() may do different things for different kinds * of objects: * * - For events, we check if the event has been signalled. If the * event is already in the signalled state, we just return immediately, * otherwise we wait for it to be set to the signalled state by someone * else calling KeSetEvent(). Events can be either synchronization or * notification events. * * - For timers, if the timer has already fired and the timer is in * the signalled state, we just return, otherwise we wait on the * timer. Unlike an event, timers get signalled automatically when * they expire rather than someone having to trip them manually. * Timers initialized with KeInitializeTimer() are always notification * events: KeInitializeTimerEx() lets you initialize a timer as * either a notification or synchronization event. * * - For mutexes, we try to acquire the mutex and if we can't, we wait * on the mutex until it's available and then grab it. When a mutex is * released, it enters the signalled state, which wakes up one of the * threads waiting to acquire it. Mutexes are always synchronization * events. * * - For threads, the only thing we do is wait until the thread object * enters a signalled state, which occurs when the thread terminates. * Threads are always notification events. * * A notification event wakes up all threads waiting on an object. A * synchronization event wakes up just one. Also, a synchronization event * is auto-clearing, which means we automatically set the event back to * the non-signalled state once the wakeup is done. */ uint32_t KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime) { wait_block w; struct thread *td = curthread; struct timeval tv; int error = 0; uint64_t curtime; wb_ext we; nt_dispatch_header *obj; obj = arg; if (obj == NULL) return (STATUS_INVALID_PARAMETER); mtx_lock(&ntoskrnl_dispatchlock); cv_init(&we.we_cv, "KeWFS"); we.we_td = td; /* * Check to see if this object is already signalled, * and just return without waiting if it is. */ if (ntoskrnl_is_signalled(obj, td) == TRUE) { /* Sanity check the signal state value. */ if (obj->dh_sigstate != INT32_MIN) { ntoskrnl_satisfy_wait(obj, curthread); mtx_unlock(&ntoskrnl_dispatchlock); return (STATUS_SUCCESS); } else { /* * There's a limit to how many times we can * recursively acquire a mutant. If we hit * the limit, something is very wrong. */ if (obj->dh_type == DISP_TYPE_MUTANT) { mtx_unlock(&ntoskrnl_dispatchlock); panic("mutant limit exceeded"); } } } bzero((char *)&w, sizeof(wait_block)); w.wb_object = obj; w.wb_ext = &we; w.wb_waittype = WAITTYPE_ANY; w.wb_next = &w; w.wb_waitkey = 0; w.wb_awakened = FALSE; w.wb_oldpri = td->td_priority; InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist)); /* * The timeout value is specified in 100 nanosecond units * and can be a positive or negative number. If it's positive, * then the duetime is absolute, and we need to convert it * to an absolute offset relative to now in order to use it. * If it's negative, then the duetime is relative and we * just have to convert the units. */ if (duetime != NULL) { if (*duetime < 0) { tv.tv_sec = - (*duetime) / 10000000; tv.tv_usec = (- (*duetime) / 10) - (tv.tv_sec * 1000000); } else { ntoskrnl_time(&curtime); if (*duetime < curtime) tv.tv_sec = tv.tv_usec = 0; else { tv.tv_sec = ((*duetime) - curtime) / 10000000; tv.tv_usec = ((*duetime) - curtime) / 10 - (tv.tv_sec * 1000000); } } } if (duetime == NULL) cv_wait(&we.we_cv, &ntoskrnl_dispatchlock); else error = cv_timedwait(&we.we_cv, &ntoskrnl_dispatchlock, tvtohz(&tv)); RemoveEntryList(&w.wb_waitlist); cv_destroy(&we.we_cv); /* We timed out. Leave the object alone and return status. */ if (error == EWOULDBLOCK) { mtx_unlock(&ntoskrnl_dispatchlock); return (STATUS_TIMEOUT); } mtx_unlock(&ntoskrnl_dispatchlock); return (STATUS_SUCCESS); /* return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason, mode, alertable, duetime, &w)); */ } static uint32_t KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype, uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime, wait_block *wb_array) { struct thread *td = curthread; wait_block *whead, *w; wait_block _wb_array[MAX_WAIT_OBJECTS]; nt_dispatch_header *cur; struct timeval tv; int i, wcnt = 0, error = 0; uint64_t curtime; struct timespec t1, t2; uint32_t status = STATUS_SUCCESS; wb_ext we; if (cnt > MAX_WAIT_OBJECTS) return (STATUS_INVALID_PARAMETER); if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL) return (STATUS_INVALID_PARAMETER); mtx_lock(&ntoskrnl_dispatchlock); cv_init(&we.we_cv, "KeWFM"); we.we_td = td; if (wb_array == NULL) whead = _wb_array; else whead = wb_array; bzero((char *)whead, sizeof(wait_block) * cnt); /* First pass: see if we can satisfy any waits immediately. */ wcnt = 0; w = whead; for (i = 0; i < cnt; i++) { InsertTailList((&obj[i]->dh_waitlisthead), (&w->wb_waitlist)); w->wb_ext = &we; w->wb_object = obj[i]; w->wb_waittype = wtype; w->wb_waitkey = i; w->wb_awakened = FALSE; w->wb_oldpri = td->td_priority; w->wb_next = w + 1; w++; wcnt++; if (ntoskrnl_is_signalled(obj[i], td)) { /* * There's a limit to how many times * we can recursively acquire a mutant. * If we hit the limit, something * is very wrong. */ if (obj[i]->dh_sigstate == INT32_MIN && obj[i]->dh_type == DISP_TYPE_MUTANT) { mtx_unlock(&ntoskrnl_dispatchlock); panic("mutant limit exceeded"); } /* * If this is a WAITTYPE_ANY wait, then * satisfy the waited object and exit * right now. */ if (wtype == WAITTYPE_ANY) { ntoskrnl_satisfy_wait(obj[i], td); status = STATUS_WAIT_0 + i; goto wait_done; } else { w--; wcnt--; w->wb_object = NULL; RemoveEntryList(&w->wb_waitlist); } } } /* * If this is a WAITTYPE_ALL wait and all objects are * already signalled, satisfy the waits and exit now. */ if (wtype == WAITTYPE_ALL && wcnt == 0) { for (i = 0; i < cnt; i++) ntoskrnl_satisfy_wait(obj[i], td); status = STATUS_SUCCESS; goto wait_done; } /* * Create a circular waitblock list. The waitcount * must always be non-zero when we get here. */ (w - 1)->wb_next = whead; /* Wait on any objects that aren't yet signalled. */ /* Calculate timeout, if any. */ if (duetime != NULL) { if (*duetime < 0) { tv.tv_sec = - (*duetime) / 10000000; tv.tv_usec = (- (*duetime) / 10) - (tv.tv_sec * 1000000); } else { ntoskrnl_time(&curtime); if (*duetime < curtime) tv.tv_sec = tv.tv_usec = 0; else { tv.tv_sec = ((*duetime) - curtime) / 10000000; tv.tv_usec = ((*duetime) - curtime) / 10 - (tv.tv_sec * 1000000); } } } while (wcnt) { nanotime(&t1); if (duetime == NULL) cv_wait(&we.we_cv, &ntoskrnl_dispatchlock); else error = cv_timedwait(&we.we_cv, &ntoskrnl_dispatchlock, tvtohz(&tv)); /* Wait with timeout expired. */ if (error) { status = STATUS_TIMEOUT; goto wait_done; } nanotime(&t2); /* See what's been signalled. */ w = whead; do { cur = w->wb_object; if (ntoskrnl_is_signalled(cur, td) == TRUE || w->wb_awakened == TRUE) { /* Sanity check the signal state value. */ if (cur->dh_sigstate == INT32_MIN && cur->dh_type == DISP_TYPE_MUTANT) { mtx_unlock(&ntoskrnl_dispatchlock); panic("mutant limit exceeded"); } wcnt--; if (wtype == WAITTYPE_ANY) { status = w->wb_waitkey & STATUS_WAIT_0; goto wait_done; } } w = w->wb_next; } while (w != whead); /* * If all objects have been signalled, or if this * is a WAITTYPE_ANY wait and we were woke up by * someone, we can bail. */ if (wcnt == 0) { status = STATUS_SUCCESS; goto wait_done; } /* * If this is WAITTYPE_ALL wait, and there's still * objects that haven't been signalled, deduct the * time that's elapsed so far from the timeout and * wait again (or continue waiting indefinitely if * there's no timeout). */ if (duetime != NULL) { tv.tv_sec -= (t2.tv_sec - t1.tv_sec); tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000; } } wait_done: cv_destroy(&we.we_cv); for (i = 0; i < cnt; i++) { if (whead[i].wb_object != NULL) RemoveEntryList(&whead[i].wb_waitlist); } mtx_unlock(&ntoskrnl_dispatchlock); return (status); } static void WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val) { bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); } static uint16_t READ_REGISTER_USHORT(reg) uint16_t *reg; { return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); } static void WRITE_REGISTER_ULONG(reg, val) uint32_t *reg; uint32_t val; { bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); } static uint32_t READ_REGISTER_ULONG(reg) uint32_t *reg; { return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); } static uint8_t READ_REGISTER_UCHAR(uint8_t *reg) { return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); } static void WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val) { bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); } static int64_t _allmul(a, b) int64_t a; int64_t b; { return (a * b); } static int64_t _alldiv(a, b) int64_t a; int64_t b; { return (a / b); } static int64_t _allrem(a, b) int64_t a; int64_t b; { return (a % b); } static uint64_t _aullmul(a, b) uint64_t a; uint64_t b; { return (a * b); } static uint64_t _aulldiv(a, b) uint64_t a; uint64_t b; { return (a / b); } static uint64_t _aullrem(a, b) uint64_t a; uint64_t b; { return (a % b); } static int64_t _allshl(int64_t a, uint8_t b) { return (a << b); } static uint64_t _aullshl(uint64_t a, uint8_t b) { return (a << b); } static int64_t _allshr(int64_t a, uint8_t b) { return (a >> b); } static uint64_t _aullshr(uint64_t a, uint8_t b) { return (a >> b); } static slist_entry * ntoskrnl_pushsl(head, entry) slist_header *head; slist_entry *entry; { slist_entry *oldhead; oldhead = head->slh_list.slh_next; entry->sl_next = head->slh_list.slh_next; head->slh_list.slh_next = entry; head->slh_list.slh_depth++; head->slh_list.slh_seq++; return (oldhead); } static void InitializeSListHead(head) slist_header *head; { memset(head, 0, sizeof(*head)); } static slist_entry * ntoskrnl_popsl(head) slist_header *head; { slist_entry *first; first = head->slh_list.slh_next; if (first != NULL) { head->slh_list.slh_next = first->sl_next; head->slh_list.slh_depth--; head->slh_list.slh_seq++; } return (first); } /* * We need this to make lookaside lists work for amd64. * We pass a pointer to ExAllocatePoolWithTag() the lookaside * list structure. For amd64 to work right, this has to be a * pointer to the wrapped version of the routine, not the * original. Letting the Windows driver invoke the original * function directly will result in a convention calling * mismatch and a pretty crash. On x86, this effectively * becomes a no-op since ipt_func and ipt_wrap are the same. */ static funcptr ntoskrnl_findwrap(func) funcptr func; { image_patch_table *patch; patch = ntoskrnl_functbl; while (patch->ipt_func != NULL) { if ((funcptr)patch->ipt_func == func) return ((funcptr)patch->ipt_wrap); patch++; } return (NULL); } static void ExInitializePagedLookasideList(paged_lookaside_list *lookaside, lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc, uint32_t flags, size_t size, uint32_t tag, uint16_t depth) { bzero((char *)lookaside, sizeof(paged_lookaside_list)); if (size < sizeof(slist_entry)) lookaside->nll_l.gl_size = sizeof(slist_entry); else lookaside->nll_l.gl_size = size; lookaside->nll_l.gl_tag = tag; if (allocfunc == NULL) lookaside->nll_l.gl_allocfunc = ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag); else lookaside->nll_l.gl_allocfunc = allocfunc; if (freefunc == NULL) lookaside->nll_l.gl_freefunc = ntoskrnl_findwrap((funcptr)ExFreePool); else lookaside->nll_l.gl_freefunc = freefunc; #ifdef __i386__ KeInitializeSpinLock(&lookaside->nll_obsoletelock); #endif lookaside->nll_l.gl_type = NonPagedPool; lookaside->nll_l.gl_depth = depth; lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH; } static void ExDeletePagedLookasideList(lookaside) paged_lookaside_list *lookaside; { void *buf; void (*freefunc)(void *); freefunc = lookaside->nll_l.gl_freefunc; while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL) MSCALL1(freefunc, buf); } static void ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside, lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc, uint32_t flags, size_t size, uint32_t tag, uint16_t depth) { bzero((char *)lookaside, sizeof(npaged_lookaside_list)); if (size < sizeof(slist_entry)) lookaside->nll_l.gl_size = sizeof(slist_entry); else lookaside->nll_l.gl_size = size; lookaside->nll_l.gl_tag = tag; if (allocfunc == NULL) lookaside->nll_l.gl_allocfunc = ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag); else lookaside->nll_l.gl_allocfunc = allocfunc; if (freefunc == NULL) lookaside->nll_l.gl_freefunc = ntoskrnl_findwrap((funcptr)ExFreePool); else lookaside->nll_l.gl_freefunc = freefunc; #ifdef __i386__ KeInitializeSpinLock(&lookaside->nll_obsoletelock); #endif lookaside->nll_l.gl_type = NonPagedPool; lookaside->nll_l.gl_depth = depth; lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH; } static void ExDeleteNPagedLookasideList(lookaside) npaged_lookaside_list *lookaside; { void *buf; void (*freefunc)(void *); freefunc = lookaside->nll_l.gl_freefunc; while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL) MSCALL1(freefunc, buf); } slist_entry * InterlockedPushEntrySList(head, entry) slist_header *head; slist_entry *entry; { slist_entry *oldhead; mtx_lock_spin(&ntoskrnl_interlock); oldhead = ntoskrnl_pushsl(head, entry); mtx_unlock_spin(&ntoskrnl_interlock); return (oldhead); } slist_entry * InterlockedPopEntrySList(head) slist_header *head; { slist_entry *first; mtx_lock_spin(&ntoskrnl_interlock); first = ntoskrnl_popsl(head); mtx_unlock_spin(&ntoskrnl_interlock); return (first); } static slist_entry * ExInterlockedPushEntrySList(head, entry, lock) slist_header *head; slist_entry *entry; kspin_lock *lock; { return (InterlockedPushEntrySList(head, entry)); } static slist_entry * ExInterlockedPopEntrySList(head, lock) slist_header *head; kspin_lock *lock; { return (InterlockedPopEntrySList(head)); } uint16_t ExQueryDepthSList(head) slist_header *head; { uint16_t depth; mtx_lock_spin(&ntoskrnl_interlock); depth = head->slh_list.slh_depth; mtx_unlock_spin(&ntoskrnl_interlock); return (depth); } void KeInitializeSpinLock(lock) kspin_lock *lock; { *lock = 0; } #ifdef __i386__ void KefAcquireSpinLockAtDpcLevel(lock) kspin_lock *lock; { #ifdef NTOSKRNL_DEBUG_SPINLOCKS int i = 0; #endif while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) { /* sit and spin */; #ifdef NTOSKRNL_DEBUG_SPINLOCKS i++; if (i > 200000000) panic("DEADLOCK!"); #endif } } void KefReleaseSpinLockFromDpcLevel(lock) kspin_lock *lock; { atomic_store_rel_int((volatile u_int *)lock, 0); } uint8_t KeAcquireSpinLockRaiseToDpc(kspin_lock *lock) { uint8_t oldirql; if (KeGetCurrentIrql() > DISPATCH_LEVEL) panic("IRQL_NOT_LESS_THAN_OR_EQUAL"); KeRaiseIrql(DISPATCH_LEVEL, &oldirql); KeAcquireSpinLockAtDpcLevel(lock); return (oldirql); } #else void KeAcquireSpinLockAtDpcLevel(kspin_lock *lock) { while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) /* sit and spin */; } void KeReleaseSpinLockFromDpcLevel(kspin_lock *lock) { atomic_store_rel_int((volatile u_int *)lock, 0); } #endif /* __i386__ */ uintptr_t InterlockedExchange(dst, val) volatile uint32_t *dst; uintptr_t val; { uintptr_t r; mtx_lock_spin(&ntoskrnl_interlock); r = *dst; *dst = val; mtx_unlock_spin(&ntoskrnl_interlock); return (r); } static uint32_t InterlockedIncrement(addend) volatile uint32_t *addend; { atomic_add_long((volatile u_long *)addend, 1); return (*addend); } static uint32_t InterlockedDecrement(addend) volatile uint32_t *addend; { atomic_subtract_long((volatile u_long *)addend, 1); return (*addend); } static void ExInterlockedAddLargeStatistic(addend, inc) uint64_t *addend; uint32_t inc; { mtx_lock_spin(&ntoskrnl_interlock); *addend += inc; mtx_unlock_spin(&ntoskrnl_interlock); }; mdl * IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf, uint8_t chargequota, irp *iopkt) { mdl *m; int zone = 0; if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE) m = ExAllocatePoolWithTag(NonPagedPool, MmSizeOfMdl(vaddr, len), 0); else { m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO); zone++; } if (m == NULL) return (NULL); MmInitializeMdl(m, vaddr, len); /* * MmInitializMdl() clears the flags field, so we * have to set this here. If the MDL came from the * MDL UMA zone, tag it so we can release it to * the right place later. */ if (zone) m->mdl_flags = MDL_ZONE_ALLOCED; if (iopkt != NULL) { if (secondarybuf == TRUE) { mdl *last; last = iopkt->irp_mdl; while (last->mdl_next != NULL) last = last->mdl_next; last->mdl_next = m; } else { if (iopkt->irp_mdl != NULL) panic("leaking an MDL in IoAllocateMdl()"); iopkt->irp_mdl = m; } } return (m); } void IoFreeMdl(m) mdl *m; { if (m == NULL) return; if (m->mdl_flags & MDL_ZONE_ALLOCED) uma_zfree(mdl_zone, m); else ExFreePool(m); } static void * MmAllocateContiguousMemory(size, highest) uint32_t size; uint64_t highest; { void *addr; size_t pagelength = roundup(size, PAGE_SIZE); addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0); return (addr); } static void * MmAllocateContiguousMemorySpecifyCache(size, lowest, highest, boundary, cachetype) uint32_t size; uint64_t lowest; uint64_t highest; uint64_t boundary; enum nt_caching_type cachetype; { vm_memattr_t memattr; void *ret; switch (cachetype) { case MmNonCached: memattr = VM_MEMATTR_UNCACHEABLE; break; case MmWriteCombined: memattr = VM_MEMATTR_WRITE_COMBINING; break; case MmNonCachedUnordered: memattr = VM_MEMATTR_UNCACHEABLE; break; case MmCached: case MmHardwareCoherentCached: case MmUSWCCached: default: memattr = VM_MEMATTR_DEFAULT; break; } ret = (void *)kmem_alloc_contig(kernel_arena, size, M_ZERO | M_NOWAIT, lowest, highest, PAGE_SIZE, boundary, memattr); if (ret != NULL) malloc_type_allocated(M_DEVBUF, round_page(size)); return (ret); } static void MmFreeContiguousMemory(base) void *base; { ExFreePool(base); } static void MmFreeContiguousMemorySpecifyCache(base, size, cachetype) void *base; uint32_t size; enum nt_caching_type cachetype; { contigfree(base, size, M_DEVBUF); } static uint32_t MmSizeOfMdl(vaddr, len) void *vaddr; size_t len; { uint32_t l; l = sizeof(struct mdl) + (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len)); return (l); } /* * The Microsoft documentation says this routine fills in the * page array of an MDL with the _physical_ page addresses that * comprise the buffer, but we don't really want to do that here. * Instead, we just fill in the page array with the kernel virtual * addresses of the buffers. */ void MmBuildMdlForNonPagedPool(m) mdl *m; { vm_offset_t *mdl_pages; int pagecnt, i; pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount); if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *)) panic("not enough pages in MDL to describe buffer"); mdl_pages = MmGetMdlPfnArray(m); for (i = 0; i < pagecnt; i++) *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE); m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL; m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m); } static void * MmMapLockedPages(mdl *buf, uint8_t accessmode) { buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA; return (MmGetMdlVirtualAddress(buf)); } static void * MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype, void *vaddr, uint32_t bugcheck, uint32_t prio) { return (MmMapLockedPages(buf, accessmode)); } static void MmUnmapLockedPages(vaddr, buf) void *vaddr; mdl *buf; { buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA; } /* * This function has a problem in that it will break if you * compile this module without PAE and try to use it on a PAE * kernel. Unfortunately, there's no way around this at the * moment. It's slightly less broken that using pmap_kextract(). * You'd think the virtual memory subsystem would help us out * here, but it doesn't. */ static uint64_t MmGetPhysicalAddress(void *base) { return (pmap_extract(kernel_map->pmap, (vm_offset_t)base)); } void * MmGetSystemRoutineAddress(ustr) unicode_string *ustr; { ansi_string astr; if (RtlUnicodeStringToAnsiString(&astr, ustr, TRUE)) return (NULL); return (ndis_get_routine_address(ntoskrnl_functbl, astr.as_buf)); } uint8_t MmIsAddressValid(vaddr) void *vaddr; { if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr)) return (TRUE); return (FALSE); } void * MmMapIoSpace(paddr, len, cachetype) uint64_t paddr; uint32_t len; uint32_t cachetype; { devclass_t nexus_class; device_t *nexus_devs, devp; int nexus_count = 0; device_t matching_dev = NULL; struct resource *res; int i; vm_offset_t v; /* There will always be at least one nexus. */ nexus_class = devclass_find("nexus"); devclass_get_devices(nexus_class, &nexus_devs, &nexus_count); for (i = 0; i < nexus_count; i++) { devp = nexus_devs[i]; matching_dev = ntoskrnl_finddev(devp, paddr, &res); if (matching_dev) break; } free(nexus_devs, M_TEMP); if (matching_dev == NULL) return (NULL); v = (vm_offset_t)rman_get_virtual(res); if (paddr > rman_get_start(res)) v += paddr - rman_get_start(res); return ((void *)v); } void MmUnmapIoSpace(vaddr, len) void *vaddr; size_t len; { } static device_t ntoskrnl_finddev(dev, paddr, res) device_t dev; uint64_t paddr; struct resource **res; { device_t *children = NULL; device_t matching_dev; int childcnt; struct resource *r; struct resource_list *rl; struct resource_list_entry *rle; uint32_t flags; int i; /* We only want devices that have been successfully probed. */ if (device_is_alive(dev) == FALSE) return (NULL); rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev); if (rl != NULL) { STAILQ_FOREACH(rle, rl, link) { r = rle->res; if (r == NULL) continue; flags = rman_get_flags(r); if (rle->type == SYS_RES_MEMORY && paddr >= rman_get_start(r) && paddr <= rman_get_end(r)) { if (!(flags & RF_ACTIVE)) bus_activate_resource(dev, SYS_RES_MEMORY, 0, r); *res = r; return (dev); } } } /* * If this device has children, do another * level of recursion to inspect them. */ device_get_children(dev, &children, &childcnt); for (i = 0; i < childcnt; i++) { matching_dev = ntoskrnl_finddev(children[i], paddr, res); if (matching_dev != NULL) { free(children, M_TEMP); return (matching_dev); } } /* Won't somebody please think of the children! */ if (children != NULL) free(children, M_TEMP); return (NULL); } /* * Workitems are unlike DPCs, in that they run in a user-mode thread * context rather than at DISPATCH_LEVEL in kernel context. In our * case we run them in kernel context anyway. */ static void ntoskrnl_workitem_thread(arg) void *arg; { kdpc_queue *kq; list_entry *l; io_workitem *iw; uint8_t irql; kq = arg; InitializeListHead(&kq->kq_disp); kq->kq_td = curthread; kq->kq_exit = 0; KeInitializeSpinLock(&kq->kq_lock); KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE); while (1) { KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL); KeAcquireSpinLock(&kq->kq_lock, &irql); if (kq->kq_exit) { kq->kq_exit = 0; KeReleaseSpinLock(&kq->kq_lock, irql); break; } while (!IsListEmpty(&kq->kq_disp)) { l = RemoveHeadList(&kq->kq_disp); iw = CONTAINING_RECORD(l, io_workitem, iw_listentry); InitializeListHead((&iw->iw_listentry)); if (iw->iw_func == NULL) continue; KeReleaseSpinLock(&kq->kq_lock, irql); MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx); KeAcquireSpinLock(&kq->kq_lock, &irql); } KeReleaseSpinLock(&kq->kq_lock, irql); } kproc_exit(0); return; /* notreached */ } static ndis_status RtlCharToInteger(src, base, val) const char *src; uint32_t base; uint32_t *val; { int negative = 0; uint32_t res; if (!src || !val) return (STATUS_ACCESS_VIOLATION); while (*src != '\0' && *src <= ' ') src++; if (*src == '+') src++; else if (*src == '-') { src++; negative = 1; } if (base == 0) { base = 10; if (*src == '0') { src++; if (*src == 'b') { base = 2; src++; } else if (*src == 'o') { base = 8; src++; } else if (*src == 'x') { base = 16; src++; } } } else if (!(base == 2 || base == 8 || base == 10 || base == 16)) return (STATUS_INVALID_PARAMETER); for (res = 0; *src; src++) { int v; if (isdigit(*src)) v = *src - '0'; else if (isxdigit(*src)) v = tolower(*src) - 'a' + 10; else v = base; if (v >= base) return (STATUS_INVALID_PARAMETER); res = res * base + v; } *val = negative ? -res : res; return (STATUS_SUCCESS); } static void ntoskrnl_destroy_workitem_threads(void) { kdpc_queue *kq; int i; for (i = 0; i < WORKITEM_THREADS; i++) { kq = wq_queues + i; kq->kq_exit = 1; KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE); while (kq->kq_exit) tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10); } } io_workitem * IoAllocateWorkItem(dobj) device_object *dobj; { io_workitem *iw; iw = uma_zalloc(iw_zone, M_NOWAIT); if (iw == NULL) return (NULL); InitializeListHead(&iw->iw_listentry); iw->iw_dobj = dobj; mtx_lock(&ntoskrnl_dispatchlock); iw->iw_idx = wq_idx; WORKIDX_INC(wq_idx); mtx_unlock(&ntoskrnl_dispatchlock); return (iw); } void IoFreeWorkItem(iw) io_workitem *iw; { uma_zfree(iw_zone, iw); } void IoQueueWorkItem(iw, iw_func, qtype, ctx) io_workitem *iw; io_workitem_func iw_func; uint32_t qtype; void *ctx; { kdpc_queue *kq; list_entry *l; io_workitem *cur; uint8_t irql; kq = wq_queues + iw->iw_idx; KeAcquireSpinLock(&kq->kq_lock, &irql); /* * Traverse the list and make sure this workitem hasn't * already been inserted. Queuing the same workitem * twice will hose the list but good. */ l = kq->kq_disp.nle_flink; while (l != &kq->kq_disp) { cur = CONTAINING_RECORD(l, io_workitem, iw_listentry); if (cur == iw) { /* Already queued -- do nothing. */ KeReleaseSpinLock(&kq->kq_lock, irql); return; } l = l->nle_flink; } iw->iw_func = iw_func; iw->iw_ctx = ctx; InsertTailList((&kq->kq_disp), (&iw->iw_listentry)); KeReleaseSpinLock(&kq->kq_lock, irql); KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE); } static void ntoskrnl_workitem(dobj, arg) device_object *dobj; void *arg; { io_workitem *iw; work_queue_item *w; work_item_func f; iw = arg; w = (work_queue_item *)dobj; f = (work_item_func)w->wqi_func; uma_zfree(iw_zone, iw); MSCALL2(f, w, w->wqi_ctx); } /* * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft * warns that it's unsafe and to use IoQueueWorkItem() instead. The * problem with ExQueueWorkItem() is that it can't guard against * the condition where a driver submits a job to the work queue and * is then unloaded before the job is able to run. IoQueueWorkItem() * acquires a reference to the device's device_object via the * object manager and retains it until after the job has completed, * which prevents the driver from being unloaded before the job * runs. (We don't currently support this behavior, though hopefully * that will change once the object manager API is fleshed out a bit.) * * Having said all that, the ExQueueWorkItem() API remains, because * there are still other parts of Windows that use it, including * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem(). * We fake up the ExQueueWorkItem() API on top of our implementation * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively * for ExQueueWorkItem() jobs, and we pass a pointer to the work * queue item (provided by the caller) in to IoAllocateWorkItem() * instead of the device_object. We need to save this pointer so * we can apply a sanity check: as with the DPC queue and other * workitem queues, we can't allow the same work queue item to * be queued twice. If it's already pending, we silently return */ void ExQueueWorkItem(w, qtype) work_queue_item *w; uint32_t qtype; { io_workitem *iw; io_workitem_func iwf; kdpc_queue *kq; list_entry *l; io_workitem *cur; uint8_t irql; /* * We need to do a special sanity test to make sure * the ExQueueWorkItem() API isn't used to queue * the same workitem twice. Rather than checking the * io_workitem pointer itself, we test the attached * device object, which is really a pointer to the * legacy work queue item structure. */ kq = wq_queues + WORKITEM_LEGACY_THREAD; KeAcquireSpinLock(&kq->kq_lock, &irql); l = kq->kq_disp.nle_flink; while (l != &kq->kq_disp) { cur = CONTAINING_RECORD(l, io_workitem, iw_listentry); if (cur->iw_dobj == (device_object *)w) { /* Already queued -- do nothing. */ KeReleaseSpinLock(&kq->kq_lock, irql); return; } l = l->nle_flink; } KeReleaseSpinLock(&kq->kq_lock, irql); iw = IoAllocateWorkItem((device_object *)w); if (iw == NULL) return; iw->iw_idx = WORKITEM_LEGACY_THREAD; iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem); IoQueueWorkItem(iw, iwf, qtype, iw); } static void RtlZeroMemory(dst, len) void *dst; size_t len; { bzero(dst, len); } static void RtlSecureZeroMemory(dst, len) void *dst; size_t len; { memset(dst, 0, len); } static void RtlFillMemory(void *dst, size_t len, uint8_t c) { memset(dst, c, len); } static void RtlMoveMemory(dst, src, len) void *dst; const void *src; size_t len; { memmove(dst, src, len); } static void RtlCopyMemory(dst, src, len) void *dst; const void *src; size_t len; { bcopy(src, dst, len); } static size_t RtlCompareMemory(s1, s2, len) const void *s1; const void *s2; size_t len; { size_t i; uint8_t *m1, *m2; m1 = __DECONST(char *, s1); m2 = __DECONST(char *, s2); for (i = 0; i < len && m1[i] == m2[i]; i++); return (i); } void RtlInitAnsiString(dst, src) ansi_string *dst; char *src; { ansi_string *a; a = dst; if (a == NULL) return; if (src == NULL) { a->as_len = a->as_maxlen = 0; a->as_buf = NULL; } else { a->as_buf = src; a->as_len = a->as_maxlen = strlen(src); } } void RtlInitUnicodeString(dst, src) unicode_string *dst; uint16_t *src; { unicode_string *u; int i; u = dst; if (u == NULL) return; if (src == NULL) { u->us_len = u->us_maxlen = 0; u->us_buf = NULL; } else { i = 0; while(src[i] != 0) i++; u->us_buf = src; u->us_len = u->us_maxlen = i * 2; } } ndis_status RtlUnicodeStringToInteger(ustr, base, val) unicode_string *ustr; uint32_t base; uint32_t *val; { uint16_t *uchr; int len, neg = 0; char abuf[64]; char *astr; uchr = ustr->us_buf; len = ustr->us_len; bzero(abuf, sizeof(abuf)); if ((char)((*uchr) & 0xFF) == '-') { neg = 1; uchr++; len -= 2; } else if ((char)((*uchr) & 0xFF) == '+') { neg = 0; uchr++; len -= 2; } if (base == 0) { if ((char)((*uchr) & 0xFF) == 'b') { base = 2; uchr++; len -= 2; } else if ((char)((*uchr) & 0xFF) == 'o') { base = 8; uchr++; len -= 2; } else if ((char)((*uchr) & 0xFF) == 'x') { base = 16; uchr++; len -= 2; } else base = 10; } astr = abuf; if (neg) { strcpy(astr, "-"); astr++; } ntoskrnl_unicode_to_ascii(uchr, astr, len); *val = strtoul(abuf, NULL, base); return (STATUS_SUCCESS); } void RtlFreeUnicodeString(ustr) unicode_string *ustr; { if (ustr->us_buf == NULL) return; ExFreePool(ustr->us_buf); ustr->us_buf = NULL; } void RtlFreeAnsiString(astr) ansi_string *astr; { if (astr->as_buf == NULL) return; ExFreePool(astr->as_buf); astr->as_buf = NULL; } static int atoi(str) const char *str; { return (int)strtol(str, (char **)NULL, 10); } static long atol(str) const char *str; { return strtol(str, (char **)NULL, 10); } static int rand(void) { struct timeval tv; microtime(&tv); srandom(tv.tv_usec); return ((int)random()); } static void srand(seed) unsigned int seed; { srandom(seed); } static uint8_t IoIsWdmVersionAvailable(uint8_t major, uint8_t minor) { if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP) return (TRUE); return (FALSE); } static int32_t IoOpenDeviceRegistryKey(struct device_object *devobj, uint32_t type, uint32_t mask, void **key) { return (NDIS_STATUS_INVALID_DEVICE_REQUEST); } static ndis_status IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj) unicode_string *name; uint32_t reqaccess; void *fileobj; device_object *devobj; { return (STATUS_SUCCESS); } static ndis_status IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen) device_object *devobj; uint32_t regprop; uint32_t buflen; void *prop; uint32_t *reslen; { driver_object *drv; uint16_t **name; drv = devobj->do_drvobj; switch (regprop) { case DEVPROP_DRIVER_KEYNAME: name = prop; *name = drv->dro_drivername.us_buf; *reslen = drv->dro_drivername.us_len; break; default: return (STATUS_INVALID_PARAMETER_2); break; } return (STATUS_SUCCESS); } static void KeInitializeMutex(kmutex, level) kmutant *kmutex; uint32_t level; { InitializeListHead((&kmutex->km_header.dh_waitlisthead)); kmutex->km_abandoned = FALSE; kmutex->km_apcdisable = 1; kmutex->km_header.dh_sigstate = 1; kmutex->km_header.dh_type = DISP_TYPE_MUTANT; kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t); kmutex->km_ownerthread = NULL; } static uint32_t KeReleaseMutex(kmutant *kmutex, uint8_t kwait) { uint32_t prevstate; mtx_lock(&ntoskrnl_dispatchlock); prevstate = kmutex->km_header.dh_sigstate; if (kmutex->km_ownerthread != curthread) { mtx_unlock(&ntoskrnl_dispatchlock); return (STATUS_MUTANT_NOT_OWNED); } kmutex->km_header.dh_sigstate++; kmutex->km_abandoned = FALSE; if (kmutex->km_header.dh_sigstate == 1) { kmutex->km_ownerthread = NULL; ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT); } mtx_unlock(&ntoskrnl_dispatchlock); return (prevstate); } static uint32_t KeReadStateMutex(kmutex) kmutant *kmutex; { return (kmutex->km_header.dh_sigstate); } void KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state) { InitializeListHead((&kevent->k_header.dh_waitlisthead)); kevent->k_header.dh_sigstate = state; if (type == EVENT_TYPE_NOTIFY) kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT; else kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT; kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t); } uint32_t KeResetEvent(kevent) nt_kevent *kevent; { uint32_t prevstate; mtx_lock(&ntoskrnl_dispatchlock); prevstate = kevent->k_header.dh_sigstate; kevent->k_header.dh_sigstate = FALSE; mtx_unlock(&ntoskrnl_dispatchlock); return (prevstate); } uint32_t KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait) { uint32_t prevstate; wait_block *w; nt_dispatch_header *dh; struct thread *td; wb_ext *we; mtx_lock(&ntoskrnl_dispatchlock); prevstate = kevent->k_header.dh_sigstate; dh = &kevent->k_header; if (IsListEmpty(&dh->dh_waitlisthead)) /* * If there's nobody in the waitlist, just set * the state to signalled. */ dh->dh_sigstate = 1; else { /* * Get the first waiter. If this is a synchronization * event, just wake up that one thread (don't bother * setting the state to signalled since we're supposed * to automatically clear synchronization events anyway). * * If it's a notification event, or the first * waiter is doing a WAITTYPE_ALL wait, go through * the full wait satisfaction process. */ w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink, wait_block, wb_waitlist); we = w->wb_ext; td = we->we_td; if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT || w->wb_waittype == WAITTYPE_ALL) { if (prevstate == 0) { dh->dh_sigstate = 1; ntoskrnl_waittest(dh, increment); } } else { w->wb_awakened |= TRUE; cv_broadcastpri(&we->we_cv, (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ? w->wb_oldpri - (increment * 4) : PRI_MIN_KERN); } } mtx_unlock(&ntoskrnl_dispatchlock); return (prevstate); } void KeClearEvent(kevent) nt_kevent *kevent; { kevent->k_header.dh_sigstate = FALSE; } uint32_t KeReadStateEvent(kevent) nt_kevent *kevent; { return (kevent->k_header.dh_sigstate); } /* * The object manager in Windows is responsible for managing * references and access to various types of objects, including * device_objects, events, threads, timers and so on. However, * there's a difference in the way objects are handled in user * mode versus kernel mode. * * In user mode (i.e. Win32 applications), all objects are * managed by the object manager. For example, when you create * a timer or event object, you actually end up with an * object_header (for the object manager's bookkeeping * purposes) and an object body (which contains the actual object * structure, e.g. ktimer, kevent, etc...). This allows Windows * to manage resource quotas and to enforce access restrictions * on basically every kind of system object handled by the kernel. * * However, in kernel mode, you only end up using the object * manager some of the time. For example, in a driver, you create * a timer object by simply allocating the memory for a ktimer * structure and initializing it with KeInitializeTimer(). Hence, * the timer has no object_header and no reference counting or * security/resource checks are done on it. The assumption in * this case is that if you're running in kernel mode, you know * what you're doing, and you're already at an elevated privilege * anyway. * * There are some exceptions to this. The two most important ones * for our purposes are device_objects and threads. We need to use * the object manager to do reference counting on device_objects, * and for threads, you can only get a pointer to a thread's * dispatch header by using ObReferenceObjectByHandle() on the * handle returned by PsCreateSystemThread(). */ static ndis_status ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype, uint8_t accessmode, void **object, void **handleinfo) { nt_objref *nr; nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO); if (nr == NULL) return (STATUS_INSUFFICIENT_RESOURCES); InitializeListHead((&nr->no_dh.dh_waitlisthead)); nr->no_obj = handle; nr->no_dh.dh_type = DISP_TYPE_THREAD; nr->no_dh.dh_sigstate = 0; nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) / sizeof(uint32_t)); TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link); *object = nr; return (STATUS_SUCCESS); } static void ObfDereferenceObject(object) void *object; { nt_objref *nr; nr = object; TAILQ_REMOVE(&ntoskrnl_reflist, nr, link); free(nr, M_DEVBUF); } static uint32_t ZwClose(handle) ndis_handle handle; { return (STATUS_SUCCESS); } static uint32_t WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf) uint32_t traceclass; void *traceinfo; uint32_t infolen; uint32_t reqlen; void *buf; { return (STATUS_NOT_FOUND); } static uint32_t WmiTraceMessage(uint64_t loghandle, uint32_t messageflags, void *guid, uint16_t messagenum, ...) { return (STATUS_SUCCESS); } static uint32_t IoWMIRegistrationControl(dobj, action) device_object *dobj; uint32_t action; { return (STATUS_SUCCESS); } /* * This is here just in case the thread returns without calling * PsTerminateSystemThread(). */ static void ntoskrnl_thrfunc(arg) void *arg; { thread_context *thrctx; uint32_t (*tfunc)(void *); void *tctx; uint32_t rval; thrctx = arg; tfunc = thrctx->tc_thrfunc; tctx = thrctx->tc_thrctx; free(thrctx, M_TEMP); rval = MSCALL1(tfunc, tctx); PsTerminateSystemThread(rval); return; /* notreached */ } static ndis_status PsCreateSystemThread(handle, reqaccess, objattrs, phandle, clientid, thrfunc, thrctx) ndis_handle *handle; uint32_t reqaccess; void *objattrs; ndis_handle phandle; void *clientid; void *thrfunc; void *thrctx; { int error; thread_context *tc; struct proc *p; tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT); if (tc == NULL) return (STATUS_INSUFFICIENT_RESOURCES); tc->tc_thrctx = thrctx; tc->tc_thrfunc = thrfunc; error = kproc_create(ntoskrnl_thrfunc, tc, &p, RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Kthread %d", ntoskrnl_kth); if (error) { free(tc, M_TEMP); return (STATUS_INSUFFICIENT_RESOURCES); } *handle = p; ntoskrnl_kth++; return (STATUS_SUCCESS); } /* * In Windows, the exit of a thread is an event that you're allowed * to wait on, assuming you've obtained a reference to the thread using * ObReferenceObjectByHandle(). Unfortunately, the only way we can * simulate this behavior is to register each thread we create in a * reference list, and if someone holds a reference to us, we poke * them. */ static ndis_status PsTerminateSystemThread(status) ndis_status status; { struct nt_objref *nr; mtx_lock(&ntoskrnl_dispatchlock); TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) { if (nr->no_obj != curthread->td_proc) continue; nr->no_dh.dh_sigstate = 1; ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT); break; } mtx_unlock(&ntoskrnl_dispatchlock); ntoskrnl_kth--; kproc_exit(0); return (0); /* notreached */ } static uint32_t DbgPrint(char *fmt, ...) { va_list ap; if (bootverbose) { va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } return (STATUS_SUCCESS); } static void DbgBreakPoint(void) { kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint"); } static void KeBugCheckEx(code, param1, param2, param3, param4) uint32_t code; u_long param1; u_long param2; u_long param3; u_long param4; { panic("KeBugCheckEx: STOP 0x%X", code); } static void ntoskrnl_timercall(arg) void *arg; { ktimer *timer; struct timeval tv; kdpc *dpc; mtx_lock(&ntoskrnl_dispatchlock); timer = arg; #ifdef NTOSKRNL_DEBUG_TIMERS ntoskrnl_timer_fires++; #endif ntoskrnl_remove_timer(timer); /* * This should never happen, but complain * if it does. */ if (timer->k_header.dh_inserted == FALSE) { mtx_unlock(&ntoskrnl_dispatchlock); printf("NTOS: timer %p fired even though " "it was canceled\n", timer); return; } /* Mark the timer as no longer being on the timer queue. */ timer->k_header.dh_inserted = FALSE; /* Now signal the object and satisfy any waits on it. */ timer->k_header.dh_sigstate = 1; ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT); /* * If this is a periodic timer, re-arm it * so it will fire again. We do this before * calling any deferred procedure calls because * it's possible the DPC might cancel the timer, * in which case it would be wrong for us to * re-arm it again afterwards. */ if (timer->k_period) { tv.tv_sec = 0; tv.tv_usec = timer->k_period * 1000; timer->k_header.dh_inserted = TRUE; ntoskrnl_insert_timer(timer, tvtohz(&tv)); #ifdef NTOSKRNL_DEBUG_TIMERS ntoskrnl_timer_reloads++; #endif } dpc = timer->k_dpc; mtx_unlock(&ntoskrnl_dispatchlock); /* If there's a DPC associated with the timer, queue it up. */ if (dpc != NULL) KeInsertQueueDpc(dpc, NULL, NULL); } #ifdef NTOSKRNL_DEBUG_TIMERS static int sysctl_show_timers(SYSCTL_HANDLER_ARGS) { int ret; ret = 0; ntoskrnl_show_timers(); return (sysctl_handle_int(oidp, &ret, 0, req)); } static void ntoskrnl_show_timers() { int i = 0; list_entry *l; mtx_lock_spin(&ntoskrnl_calllock); l = ntoskrnl_calllist.nle_flink; while(l != &ntoskrnl_calllist) { i++; l = l->nle_flink; } mtx_unlock_spin(&ntoskrnl_calllock); printf("\n"); printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS); printf("timer sets: %qu\n", ntoskrnl_timer_sets); printf("timer reloads: %qu\n", ntoskrnl_timer_reloads); printf("timer cancels: %qu\n", ntoskrnl_timer_cancels); printf("timer fires: %qu\n", ntoskrnl_timer_fires); printf("\n"); } #endif /* * Must be called with dispatcher lock held. */ static void ntoskrnl_insert_timer(timer, ticks) ktimer *timer; int ticks; { callout_entry *e; list_entry *l; struct callout *c; /* * Try and allocate a timer. */ mtx_lock_spin(&ntoskrnl_calllock); if (IsListEmpty(&ntoskrnl_calllist)) { mtx_unlock_spin(&ntoskrnl_calllock); #ifdef NTOSKRNL_DEBUG_TIMERS ntoskrnl_show_timers(); #endif panic("out of timers!"); } l = RemoveHeadList(&ntoskrnl_calllist); mtx_unlock_spin(&ntoskrnl_calllock); e = CONTAINING_RECORD(l, callout_entry, ce_list); c = &e->ce_callout; timer->k_callout = c; callout_init(c, 1); callout_reset(c, ticks, ntoskrnl_timercall, timer); } static void ntoskrnl_remove_timer(timer) ktimer *timer; { callout_entry *e; e = (callout_entry *)timer->k_callout; callout_stop(timer->k_callout); mtx_lock_spin(&ntoskrnl_calllock); InsertHeadList((&ntoskrnl_calllist), (&e->ce_list)); mtx_unlock_spin(&ntoskrnl_calllock); } void KeInitializeTimer(timer) ktimer *timer; { if (timer == NULL) return; KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY); } void KeInitializeTimerEx(timer, type) ktimer *timer; uint32_t type; { if (timer == NULL) return; bzero((char *)timer, sizeof(ktimer)); InitializeListHead((&timer->k_header.dh_waitlisthead)); timer->k_header.dh_sigstate = FALSE; timer->k_header.dh_inserted = FALSE; if (type == EVENT_TYPE_NOTIFY) timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER; else timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER; timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t); } /* * DPC subsystem. A Windows Defered Procedure Call has the following * properties: * - It runs at DISPATCH_LEVEL. * - It can have one of 3 importance values that control when it * runs relative to other DPCs in the queue. * - On SMP systems, it can be set to run on a specific processor. * In order to satisfy the last property, we create a DPC thread for * each CPU in the system and bind it to that CPU. Each thread * maintains three queues with different importance levels, which * will be processed in order from lowest to highest. * * In Windows, interrupt handlers run as DPCs. (Not to be confused * with ISRs, which run in interrupt context and can preempt DPCs.) * ISRs are given the highest importance so that they'll take * precedence over timers and other things. */ static void ntoskrnl_dpc_thread(arg) void *arg; { kdpc_queue *kq; kdpc *d; list_entry *l; uint8_t irql; kq = arg; InitializeListHead(&kq->kq_disp); kq->kq_td = curthread; kq->kq_exit = 0; kq->kq_running = FALSE; KeInitializeSpinLock(&kq->kq_lock); KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE); KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE); /* * Elevate our priority. DPCs are used to run interrupt * handlers, and they should trigger as soon as possible * once scheduled by an ISR. */ thread_lock(curthread); #ifdef NTOSKRNL_MULTIPLE_DPCS sched_bind(curthread, kq->kq_cpu); #endif sched_prio(curthread, PRI_MIN_KERN); thread_unlock(curthread); while (1) { KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL); KeAcquireSpinLock(&kq->kq_lock, &irql); if (kq->kq_exit) { kq->kq_exit = 0; KeReleaseSpinLock(&kq->kq_lock, irql); break; } kq->kq_running = TRUE; while (!IsListEmpty(&kq->kq_disp)) { l = RemoveHeadList((&kq->kq_disp)); d = CONTAINING_RECORD(l, kdpc, k_dpclistentry); InitializeListHead((&d->k_dpclistentry)); KeReleaseSpinLockFromDpcLevel(&kq->kq_lock); MSCALL4(d->k_deferedfunc, d, d->k_deferredctx, d->k_sysarg1, d->k_sysarg2); KeAcquireSpinLockAtDpcLevel(&kq->kq_lock); } kq->kq_running = FALSE; KeReleaseSpinLock(&kq->kq_lock, irql); KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE); } kproc_exit(0); return; /* notreached */ } static void ntoskrnl_destroy_dpc_threads(void) { kdpc_queue *kq; kdpc dpc; int i; kq = kq_queues; #ifdef NTOSKRNL_MULTIPLE_DPCS for (i = 0; i < mp_ncpus; i++) { #else for (i = 0; i < 1; i++) { #endif kq += i; kq->kq_exit = 1; KeInitializeDpc(&dpc, NULL, NULL); KeSetTargetProcessorDpc(&dpc, i); KeInsertQueueDpc(&dpc, NULL, NULL); while (kq->kq_exit) tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10); } } static uint8_t ntoskrnl_insert_dpc(head, dpc) list_entry *head; kdpc *dpc; { list_entry *l; kdpc *d; l = head->nle_flink; while (l != head) { d = CONTAINING_RECORD(l, kdpc, k_dpclistentry); if (d == dpc) return (FALSE); l = l->nle_flink; } if (dpc->k_importance == KDPC_IMPORTANCE_LOW) InsertTailList((head), (&dpc->k_dpclistentry)); else InsertHeadList((head), (&dpc->k_dpclistentry)); return (TRUE); } void KeInitializeDpc(dpc, dpcfunc, dpcctx) kdpc *dpc; void *dpcfunc; void *dpcctx; { if (dpc == NULL) return; dpc->k_deferedfunc = dpcfunc; dpc->k_deferredctx = dpcctx; dpc->k_num = KDPC_CPU_DEFAULT; dpc->k_importance = KDPC_IMPORTANCE_MEDIUM; InitializeListHead((&dpc->k_dpclistentry)); } uint8_t KeInsertQueueDpc(dpc, sysarg1, sysarg2) kdpc *dpc; void *sysarg1; void *sysarg2; { kdpc_queue *kq; uint8_t r; uint8_t irql; if (dpc == NULL) return (FALSE); kq = kq_queues; #ifdef NTOSKRNL_MULTIPLE_DPCS KeRaiseIrql(DISPATCH_LEVEL, &irql); /* * By default, the DPC is queued to run on the same CPU * that scheduled it. */ if (dpc->k_num == KDPC_CPU_DEFAULT) kq += curthread->td_oncpu; else kq += dpc->k_num; KeAcquireSpinLockAtDpcLevel(&kq->kq_lock); #else KeAcquireSpinLock(&kq->kq_lock, &irql); #endif r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc); if (r == TRUE) { dpc->k_sysarg1 = sysarg1; dpc->k_sysarg2 = sysarg2; } KeReleaseSpinLock(&kq->kq_lock, irql); if (r == FALSE) return (r); KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE); return (r); } uint8_t KeRemoveQueueDpc(dpc) kdpc *dpc; { kdpc_queue *kq; uint8_t irql; if (dpc == NULL) return (FALSE); #ifdef NTOSKRNL_MULTIPLE_DPCS KeRaiseIrql(DISPATCH_LEVEL, &irql); kq = kq_queues + dpc->k_num; KeAcquireSpinLockAtDpcLevel(&kq->kq_lock); #else kq = kq_queues; KeAcquireSpinLock(&kq->kq_lock, &irql); #endif if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) { KeReleaseSpinLockFromDpcLevel(&kq->kq_lock); KeLowerIrql(irql); return (FALSE); } RemoveEntryList((&dpc->k_dpclistentry)); InitializeListHead((&dpc->k_dpclistentry)); KeReleaseSpinLock(&kq->kq_lock, irql); return (TRUE); } void KeSetImportanceDpc(dpc, imp) kdpc *dpc; uint32_t imp; { if (imp != KDPC_IMPORTANCE_LOW && imp != KDPC_IMPORTANCE_MEDIUM && imp != KDPC_IMPORTANCE_HIGH) return; dpc->k_importance = (uint8_t)imp; } void KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu) { if (cpu > mp_ncpus) return; dpc->k_num = cpu; } void KeFlushQueuedDpcs(void) { kdpc_queue *kq; int i; /* * Poke each DPC queue and wait * for them to drain. */ #ifdef NTOSKRNL_MULTIPLE_DPCS for (i = 0; i < mp_ncpus; i++) { #else for (i = 0; i < 1; i++) { #endif kq = kq_queues + i; KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE); KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL); } } uint32_t KeGetCurrentProcessorNumber(void) { return ((uint32_t)curthread->td_oncpu); } uint8_t KeSetTimerEx(timer, duetime, period, dpc) ktimer *timer; int64_t duetime; uint32_t period; kdpc *dpc; { struct timeval tv; uint64_t curtime; uint8_t pending; if (timer == NULL) return (FALSE); mtx_lock(&ntoskrnl_dispatchlock); if (timer->k_header.dh_inserted == TRUE) { ntoskrnl_remove_timer(timer); #ifdef NTOSKRNL_DEBUG_TIMERS ntoskrnl_timer_cancels++; #endif timer->k_header.dh_inserted = FALSE; pending = TRUE; } else pending = FALSE; timer->k_duetime = duetime; timer->k_period = period; timer->k_header.dh_sigstate = FALSE; timer->k_dpc = dpc; if (duetime < 0) { tv.tv_sec = - (duetime) / 10000000; tv.tv_usec = (- (duetime) / 10) - (tv.tv_sec * 1000000); } else { ntoskrnl_time(&curtime); if (duetime < curtime) tv.tv_sec = tv.tv_usec = 0; else { tv.tv_sec = ((duetime) - curtime) / 10000000; tv.tv_usec = ((duetime) - curtime) / 10 - (tv.tv_sec * 1000000); } } timer->k_header.dh_inserted = TRUE; ntoskrnl_insert_timer(timer, tvtohz(&tv)); #ifdef NTOSKRNL_DEBUG_TIMERS ntoskrnl_timer_sets++; #endif mtx_unlock(&ntoskrnl_dispatchlock); return (pending); } uint8_t KeSetTimer(timer, duetime, dpc) ktimer *timer; int64_t duetime; kdpc *dpc; { return (KeSetTimerEx(timer, duetime, 0, dpc)); } /* * The Windows DDK documentation seems to say that cancelling * a timer that has a DPC will result in the DPC also being * cancelled, but this isn't really the case. */ uint8_t KeCancelTimer(timer) ktimer *timer; { uint8_t pending; if (timer == NULL) return (FALSE); mtx_lock(&ntoskrnl_dispatchlock); pending = timer->k_header.dh_inserted; if (timer->k_header.dh_inserted == TRUE) { timer->k_header.dh_inserted = FALSE; ntoskrnl_remove_timer(timer); #ifdef NTOSKRNL_DEBUG_TIMERS ntoskrnl_timer_cancels++; #endif } mtx_unlock(&ntoskrnl_dispatchlock); return (pending); } uint8_t KeReadStateTimer(timer) ktimer *timer; { return (timer->k_header.dh_sigstate); } static int32_t KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval) { ktimer timer; if (wait_mode != 0) panic("invalid wait_mode %d", wait_mode); KeInitializeTimer(&timer); KeSetTimer(&timer, *interval, NULL); KeWaitForSingleObject(&timer, 0, 0, alertable, NULL); return STATUS_SUCCESS; } static uint64_t KeQueryInterruptTime(void) { int ticks; struct timeval tv; getmicrouptime(&tv); ticks = tvtohz(&tv); return ticks * howmany(10000000, hz); } static struct thread * KeGetCurrentThread(void) { return curthread; } static int32_t KeSetPriorityThread(td, pri) struct thread *td; int32_t pri; { int32_t old; if (td == NULL) return LOW_REALTIME_PRIORITY; if (td->td_priority <= PRI_MIN_KERN) old = HIGH_PRIORITY; else if (td->td_priority >= PRI_MAX_KERN) old = LOW_PRIORITY; else old = LOW_REALTIME_PRIORITY; thread_lock(td); if (pri == HIGH_PRIORITY) sched_prio(td, PRI_MIN_KERN); if (pri == LOW_REALTIME_PRIORITY) sched_prio(td, PRI_MIN_KERN + (PRI_MAX_KERN - PRI_MIN_KERN) / 2); if (pri == LOW_PRIORITY) sched_prio(td, PRI_MAX_KERN); thread_unlock(td); return old; } static void dummy() { printf("ntoskrnl dummy called...\n"); } image_patch_table ntoskrnl_functbl[] = { IMPORT_SFUNC(RtlZeroMemory, 2), IMPORT_SFUNC(RtlSecureZeroMemory, 2), IMPORT_SFUNC(RtlFillMemory, 3), IMPORT_SFUNC(RtlMoveMemory, 3), IMPORT_SFUNC(RtlCharToInteger, 3), IMPORT_SFUNC(RtlCopyMemory, 3), IMPORT_SFUNC(RtlCopyString, 2), IMPORT_SFUNC(RtlCompareMemory, 3), IMPORT_SFUNC(RtlEqualUnicodeString, 3), IMPORT_SFUNC(RtlCopyUnicodeString, 2), IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3), IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3), IMPORT_SFUNC(RtlInitAnsiString, 2), IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2), IMPORT_SFUNC(RtlInitUnicodeString, 2), IMPORT_SFUNC(RtlFreeAnsiString, 1), IMPORT_SFUNC(RtlFreeUnicodeString, 1), IMPORT_SFUNC(RtlUnicodeStringToInteger, 3), IMPORT_CFUNC(sprintf, 0), IMPORT_CFUNC(vsprintf, 0), IMPORT_CFUNC_MAP(_snprintf, snprintf, 0), IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0), IMPORT_CFUNC(DbgPrint, 0), IMPORT_SFUNC(DbgBreakPoint, 0), IMPORT_SFUNC(KeBugCheckEx, 5), IMPORT_CFUNC(strncmp, 0), IMPORT_CFUNC(strcmp, 0), IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0), IMPORT_CFUNC(strncpy, 0), IMPORT_CFUNC(strcpy, 0), IMPORT_CFUNC(strlen, 0), IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0), IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0), IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0), IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0), IMPORT_CFUNC_MAP(strchr, index, 0), IMPORT_CFUNC_MAP(strrchr, rindex, 0), IMPORT_CFUNC(memcpy, 0), IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0), IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0), IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0), IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4), IMPORT_SFUNC(IoGetDriverObjectExtension, 2), IMPORT_FFUNC(IofCallDriver, 2), IMPORT_FFUNC(IofCompleteRequest, 2), IMPORT_SFUNC(IoAcquireCancelSpinLock, 1), IMPORT_SFUNC(IoReleaseCancelSpinLock, 1), IMPORT_SFUNC(IoCancelIrp, 1), IMPORT_SFUNC(IoConnectInterrupt, 11), IMPORT_SFUNC(IoDisconnectInterrupt, 1), IMPORT_SFUNC(IoCreateDevice, 7), IMPORT_SFUNC(IoDeleteDevice, 1), IMPORT_SFUNC(IoGetAttachedDevice, 1), IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2), IMPORT_SFUNC(IoDetachDevice, 1), IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7), IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6), IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9), IMPORT_SFUNC(IoAllocateIrp, 2), IMPORT_SFUNC(IoReuseIrp, 2), IMPORT_SFUNC(IoMakeAssociatedIrp, 2), IMPORT_SFUNC(IoFreeIrp, 1), IMPORT_SFUNC(IoInitializeIrp, 3), IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1), IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2), IMPORT_SFUNC(KeSynchronizeExecution, 3), IMPORT_SFUNC(KeWaitForSingleObject, 5), IMPORT_SFUNC(KeWaitForMultipleObjects, 8), IMPORT_SFUNC(_allmul, 4), IMPORT_SFUNC(_alldiv, 4), IMPORT_SFUNC(_allrem, 4), IMPORT_RFUNC(_allshr, 0), IMPORT_RFUNC(_allshl, 0), IMPORT_SFUNC(_aullmul, 4), IMPORT_SFUNC(_aulldiv, 4), IMPORT_SFUNC(_aullrem, 4), IMPORT_RFUNC(_aullshr, 0), IMPORT_RFUNC(_aullshl, 0), IMPORT_CFUNC(atoi, 0), IMPORT_CFUNC(atol, 0), IMPORT_CFUNC(rand, 0), IMPORT_CFUNC(srand, 0), IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2), IMPORT_SFUNC(READ_REGISTER_USHORT, 1), IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2), IMPORT_SFUNC(READ_REGISTER_ULONG, 1), IMPORT_SFUNC(READ_REGISTER_UCHAR, 1), IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2), IMPORT_SFUNC(ExInitializePagedLookasideList, 7), IMPORT_SFUNC(ExDeletePagedLookasideList, 1), IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7), IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1), IMPORT_FFUNC(InterlockedPopEntrySList, 1), IMPORT_FFUNC(InitializeSListHead, 1), IMPORT_FFUNC(InterlockedPushEntrySList, 2), IMPORT_SFUNC(ExQueryDepthSList, 1), IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList, InterlockedPopEntrySList, 1), IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList, InterlockedPushEntrySList, 2), IMPORT_FFUNC(ExInterlockedPopEntrySList, 2), IMPORT_FFUNC(ExInterlockedPushEntrySList, 3), IMPORT_SFUNC(ExAllocatePoolWithTag, 3), IMPORT_SFUNC(ExFreePoolWithTag, 2), IMPORT_SFUNC(ExFreePool, 1), #ifdef __i386__ IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1), IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1), IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1), #else /* * For AMD64, we can get away with just mapping * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock() * because the calling conventions end up being the same. * On i386, we have to be careful because KfAcquireSpinLock() * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't. */ IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1), IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1), IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1), #endif IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1), IMPORT_FFUNC(InterlockedIncrement, 1), IMPORT_FFUNC(InterlockedDecrement, 1), IMPORT_FFUNC(InterlockedExchange, 2), IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2), IMPORT_SFUNC(IoAllocateMdl, 5), IMPORT_SFUNC(IoFreeMdl, 1), IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1), IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3), IMPORT_SFUNC(MmFreeContiguousMemory, 1), IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3), IMPORT_SFUNC(MmSizeOfMdl, 1), IMPORT_SFUNC(MmMapLockedPages, 2), IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6), IMPORT_SFUNC(MmUnmapLockedPages, 2), IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1), IMPORT_SFUNC(MmGetPhysicalAddress, 1), IMPORT_SFUNC(MmGetSystemRoutineAddress, 1), IMPORT_SFUNC(MmIsAddressValid, 1), IMPORT_SFUNC(MmMapIoSpace, 3 + 1), IMPORT_SFUNC(MmUnmapIoSpace, 2), IMPORT_SFUNC(KeInitializeSpinLock, 1), IMPORT_SFUNC(IoIsWdmVersionAvailable, 2), IMPORT_SFUNC(IoOpenDeviceRegistryKey, 4), IMPORT_SFUNC(IoGetDeviceObjectPointer, 4), IMPORT_SFUNC(IoGetDeviceProperty, 5), IMPORT_SFUNC(IoAllocateWorkItem, 1), IMPORT_SFUNC(IoFreeWorkItem, 1), IMPORT_SFUNC(IoQueueWorkItem, 4), IMPORT_SFUNC(ExQueueWorkItem, 2), IMPORT_SFUNC(ntoskrnl_workitem, 2), IMPORT_SFUNC(KeInitializeMutex, 2), IMPORT_SFUNC(KeReleaseMutex, 2), IMPORT_SFUNC(KeReadStateMutex, 1), IMPORT_SFUNC(KeInitializeEvent, 3), IMPORT_SFUNC(KeSetEvent, 3), IMPORT_SFUNC(KeResetEvent, 1), IMPORT_SFUNC(KeClearEvent, 1), IMPORT_SFUNC(KeReadStateEvent, 1), IMPORT_SFUNC(KeInitializeTimer, 1), IMPORT_SFUNC(KeInitializeTimerEx, 2), IMPORT_SFUNC(KeSetTimer, 3), IMPORT_SFUNC(KeSetTimerEx, 4), IMPORT_SFUNC(KeCancelTimer, 1), IMPORT_SFUNC(KeReadStateTimer, 1), IMPORT_SFUNC(KeInitializeDpc, 3), IMPORT_SFUNC(KeInsertQueueDpc, 3), IMPORT_SFUNC(KeRemoveQueueDpc, 1), IMPORT_SFUNC(KeSetImportanceDpc, 2), IMPORT_SFUNC(KeSetTargetProcessorDpc, 2), IMPORT_SFUNC(KeFlushQueuedDpcs, 0), IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1), IMPORT_SFUNC(ObReferenceObjectByHandle, 6), IMPORT_FFUNC(ObfDereferenceObject, 1), IMPORT_SFUNC(ZwClose, 1), IMPORT_SFUNC(PsCreateSystemThread, 7), IMPORT_SFUNC(PsTerminateSystemThread, 1), IMPORT_SFUNC(IoWMIRegistrationControl, 2), IMPORT_SFUNC(WmiQueryTraceInformation, 5), IMPORT_CFUNC(WmiTraceMessage, 0), IMPORT_SFUNC(KeQuerySystemTime, 1), IMPORT_CFUNC(KeTickCount, 0), IMPORT_SFUNC(KeDelayExecutionThread, 3), IMPORT_SFUNC(KeQueryInterruptTime, 0), IMPORT_SFUNC(KeGetCurrentThread, 0), IMPORT_SFUNC(KeSetPriorityThread, 2), /* * This last entry is a catch-all for any function we haven't * implemented yet. The PE import list patching routine will * use it for any function that doesn't have an explicit match * in this table. */ { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL }, /* End of list. */ { NULL, NULL, NULL } }; Index: head/sys/compat/ndis/subr_pe.c =================================================================== --- head/sys/compat/ndis/subr_pe.c (revision 298827) +++ head/sys/compat/ndis/subr_pe.c (revision 298828) @@ -1,642 +1,642 @@ /*- * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This file contains routines for relocating and dynamically linking * executable object code files in the Windows(r) PE (Portable Executable) - * format. In Windows, anything with a .EXE, .DLL or .SYS extention is + * format. In Windows, anything with a .EXE, .DLL or .SYS extension is * considered an executable, and all such files have some structures in * common. The PE format was apparently based largely on COFF but has * mutated significantly over time. We are mainly concerned with .SYS files, * so this module implements only enough routines to be able to parse the * headers and sections of a .SYS object file and perform the necessary * relocations and jump table patching to allow us to call into it * (and to have it call back to us). Note that while this module * can handle fixups for imported symbols, it knows nothing about * exporting them. */ #include #include #include #ifdef _KERNEL #include #else #include #include #include #include #include #endif #include static vm_offset_t pe_functbl_match(image_patch_table *, char *); /* * Check for an MS-DOS executable header. All Windows binaries * have a small MS-DOS executable prepended to them to print out * the "This program requires Windows" message. Even .SYS files * have this header, in spite of the fact that you're can't actually * run them directly. */ int pe_get_dos_header(imgbase, hdr) vm_offset_t imgbase; image_dos_header *hdr; { uint16_t signature; if (imgbase == 0 || hdr == NULL) return (EINVAL); signature = *(uint16_t *)imgbase; if (signature != IMAGE_DOS_SIGNATURE) return (ENOEXEC); bcopy ((char *)imgbase, (char *)hdr, sizeof(image_dos_header)); return (0); } /* * Verify that this image has a Windows NT PE signature. */ int pe_is_nt_image(imgbase) vm_offset_t imgbase; { uint32_t signature; image_dos_header *dos_hdr; if (imgbase == 0) return (EINVAL); signature = *(uint16_t *)imgbase; if (signature == IMAGE_DOS_SIGNATURE) { dos_hdr = (image_dos_header *)imgbase; signature = *(uint32_t *)(imgbase + dos_hdr->idh_lfanew); if (signature == IMAGE_NT_SIGNATURE) return (0); } return (ENOEXEC); } /* * Return a copy of the optional header. This contains the * executable entry point and the directory listing which we * need to find the relocations and imports later. */ int pe_get_optional_header(imgbase, hdr) vm_offset_t imgbase; image_optional_header *hdr; { image_dos_header *dos_hdr; image_nt_header *nt_hdr; if (imgbase == 0 || hdr == NULL) return (EINVAL); if (pe_is_nt_image(imgbase)) return (EINVAL); dos_hdr = (image_dos_header *)(imgbase); nt_hdr = (image_nt_header *)(imgbase + dos_hdr->idh_lfanew); bcopy ((char *)&nt_hdr->inh_optionalhdr, (char *)hdr, nt_hdr->inh_filehdr.ifh_optionalhdrlen); return (0); } /* * Return a copy of the file header. Contains the number of * sections in this image. */ int pe_get_file_header(imgbase, hdr) vm_offset_t imgbase; image_file_header *hdr; { image_dos_header *dos_hdr; image_nt_header *nt_hdr; if (imgbase == 0 || hdr == NULL) return (EINVAL); if (pe_is_nt_image(imgbase)) return (EINVAL); dos_hdr = (image_dos_header *)imgbase; nt_hdr = (image_nt_header *)(imgbase + dos_hdr->idh_lfanew); /* * Note: the size of the nt_header is variable since it * can contain optional fields, as indicated by ifh_optionalhdrlen. * However it happens we're only interested in fields in the * non-variant portion of the nt_header structure, so we don't * bother copying the optional parts here. */ bcopy ((char *)&nt_hdr->inh_filehdr, (char *)hdr, sizeof(image_file_header)); return (0); } /* * Return the header of the first section in this image (usually * .text). */ int pe_get_section_header(imgbase, hdr) vm_offset_t imgbase; image_section_header *hdr; { image_dos_header *dos_hdr; image_nt_header *nt_hdr; image_section_header *sect_hdr; if (imgbase == 0 || hdr == NULL) return (EINVAL); if (pe_is_nt_image(imgbase)) return (EINVAL); dos_hdr = (image_dos_header *)imgbase; nt_hdr = (image_nt_header *)(imgbase + dos_hdr->idh_lfanew); sect_hdr = IMAGE_FIRST_SECTION(nt_hdr); bcopy ((char *)sect_hdr, (char *)hdr, sizeof(image_section_header)); return (0); } /* * Return the number of sections in this executable, or 0 on error. */ int pe_numsections(imgbase) vm_offset_t imgbase; { image_file_header file_hdr; if (pe_get_file_header(imgbase, &file_hdr)) return (0); return (file_hdr.ifh_numsections); } /* * Return the base address that this image was linked for. * This helps us calculate relocation addresses later. */ vm_offset_t pe_imagebase(imgbase) vm_offset_t imgbase; { image_optional_header optional_hdr; if (pe_get_optional_header(imgbase, &optional_hdr)) return (0); return (optional_hdr.ioh_imagebase); } /* * Return the offset of a given directory structure within the * image. Directories reside within sections. */ vm_offset_t pe_directory_offset(imgbase, diridx) vm_offset_t imgbase; uint32_t diridx; { image_optional_header opt_hdr; vm_offset_t dir; if (pe_get_optional_header(imgbase, &opt_hdr)) return (0); if (diridx >= opt_hdr.ioh_rva_size_cnt) return (0); dir = opt_hdr.ioh_datadir[diridx].idd_vaddr; return (pe_translate_addr(imgbase, dir)); } vm_offset_t pe_translate_addr(imgbase, rva) vm_offset_t imgbase; vm_offset_t rva; { image_optional_header opt_hdr; image_section_header *sect_hdr; image_dos_header *dos_hdr; image_nt_header *nt_hdr; int i = 0, sections, fixedlen; if (pe_get_optional_header(imgbase, &opt_hdr)) return (0); sections = pe_numsections(imgbase); dos_hdr = (image_dos_header *)imgbase; nt_hdr = (image_nt_header *)(imgbase + dos_hdr->idh_lfanew); sect_hdr = IMAGE_FIRST_SECTION(nt_hdr); /* * The test here is to see if the RVA falls somewhere * inside the section, based on the section's start RVA * and its length. However it seems sometimes the * virtual length isn't enough to cover the entire * area of the section. We fudge by taking into account * the section alignment and rounding the section length * up to a page boundary. */ while (i++ < sections) { fixedlen = sect_hdr->ish_misc.ish_vsize; fixedlen += ((opt_hdr.ioh_sectalign - 1) - sect_hdr->ish_misc.ish_vsize) & (opt_hdr.ioh_sectalign - 1); if (sect_hdr->ish_vaddr <= (uint32_t)rva && (sect_hdr->ish_vaddr + fixedlen) > (uint32_t)rva) break; sect_hdr++; } if (i > sections) return (0); return ((vm_offset_t)(imgbase + rva - sect_hdr->ish_vaddr + sect_hdr->ish_rawdataaddr)); } /* * Get the section header for a particular section. Note that * section names can be anything, but there are some standard * ones (.text, .data, .rdata, .reloc). */ int pe_get_section(imgbase, hdr, name) vm_offset_t imgbase; image_section_header *hdr; const char *name; { image_dos_header *dos_hdr; image_nt_header *nt_hdr; image_section_header *sect_hdr; int i, sections; if (imgbase == 0 || hdr == NULL) return (EINVAL); if (pe_is_nt_image(imgbase)) return (EINVAL); sections = pe_numsections(imgbase); dos_hdr = (image_dos_header *)imgbase; nt_hdr = (image_nt_header *)(imgbase + dos_hdr->idh_lfanew); sect_hdr = IMAGE_FIRST_SECTION(nt_hdr); for (i = 0; i < sections; i++) { if (!strcmp ((char *)§_hdr->ish_name, name)) { bcopy((char *)sect_hdr, (char *)hdr, sizeof(image_section_header)); return (0); } else sect_hdr++; } return (ENOEXEC); } /* * Apply the base relocations to this image. The relocation table * resides within the .reloc section. Relocations are specified in * blocks which refer to a particular page. We apply the relocations * one page block at a time. */ int pe_relocate(imgbase) vm_offset_t imgbase; { image_section_header sect; image_base_reloc *relhdr; uint16_t rel, *sloc; vm_offset_t base; vm_size_t delta; uint32_t *lloc; uint64_t *qloc; int i, count; vm_offset_t txt; base = pe_imagebase(imgbase); pe_get_section(imgbase, §, ".text"); txt = pe_translate_addr(imgbase, sect.ish_vaddr); delta = (uint32_t)(txt) - base - sect.ish_vaddr; pe_get_section(imgbase, §, ".reloc"); relhdr = (image_base_reloc *)(imgbase + sect.ish_rawdataaddr); do { count = (relhdr->ibr_blocksize - (sizeof(uint32_t) * 2)) / sizeof(uint16_t); for (i = 0; i < count; i++) { rel = relhdr->ibr_rel[i]; switch (IMR_RELTYPE(rel)) { case IMAGE_REL_BASED_ABSOLUTE: break; case IMAGE_REL_BASED_HIGHLOW: lloc = (uint32_t *)pe_translate_addr(imgbase, relhdr->ibr_vaddr + IMR_RELOFFSET(rel)); *lloc = pe_translate_addr(imgbase, (*lloc - base)); break; case IMAGE_REL_BASED_HIGH: sloc = (uint16_t *)pe_translate_addr(imgbase, relhdr->ibr_vaddr + IMR_RELOFFSET(rel)); *sloc += (delta & 0xFFFF0000) >> 16; break; case IMAGE_REL_BASED_LOW: sloc = (uint16_t *)pe_translate_addr(imgbase, relhdr->ibr_vaddr + IMR_RELOFFSET(rel)); *sloc += (delta & 0xFFFF); break; case IMAGE_REL_BASED_DIR64: qloc = (uint64_t *)pe_translate_addr(imgbase, relhdr->ibr_vaddr + IMR_RELOFFSET(rel)); *qloc = pe_translate_addr(imgbase, (*qloc - base)); break; default: printf("[%d]reloc type: %d\n",i, IMR_RELTYPE(rel)); break; } } relhdr = (image_base_reloc *)((vm_offset_t)relhdr + relhdr->ibr_blocksize); } while (relhdr->ibr_blocksize); return (0); } /* * Return the import descriptor for a particular module. An image * may be linked against several modules, typically HAL.dll, ntoskrnl.exe * and NDIS.SYS. For each module, there is a list of imported function * names and their addresses. * * Note: module names are case insensitive! */ int pe_get_import_descriptor(imgbase, desc, module) vm_offset_t imgbase; image_import_descriptor *desc; char *module; { vm_offset_t offset; image_import_descriptor *imp_desc; char *modname; if (imgbase == 0 || module == NULL || desc == NULL) return (EINVAL); offset = pe_directory_offset(imgbase, IMAGE_DIRECTORY_ENTRY_IMPORT); if (offset == 0) return (ENOENT); imp_desc = (void *)offset; while (imp_desc->iid_nameaddr) { modname = (char *)pe_translate_addr(imgbase, imp_desc->iid_nameaddr); if (!strncasecmp(module, modname, strlen(module))) { bcopy((char *)imp_desc, (char *)desc, sizeof(image_import_descriptor)); return (0); } imp_desc++; } return (ENOENT); } int pe_get_messagetable(imgbase, md) vm_offset_t imgbase; message_resource_data **md; { image_resource_directory *rdir, *rtype; image_resource_directory_entry *dent, *dent2; image_resource_data_entry *rent; vm_offset_t offset; int i; if (imgbase == 0) return (EINVAL); offset = pe_directory_offset(imgbase, IMAGE_DIRECTORY_ENTRY_RESOURCE); if (offset == 0) return (ENOENT); rdir = (image_resource_directory *)offset; dent = (image_resource_directory_entry *)(offset + sizeof(image_resource_directory)); for (i = 0; i < rdir->ird_id_entries; i++){ if (dent->irde_name != RT_MESSAGETABLE) { dent++; continue; } dent2 = dent; while (dent2->irde_dataoff & RESOURCE_DIR_FLAG) { rtype = (image_resource_directory *)(offset + (dent2->irde_dataoff & ~RESOURCE_DIR_FLAG)); dent2 = (image_resource_directory_entry *) ((uintptr_t)rtype + sizeof(image_resource_directory)); } rent = (image_resource_data_entry *)(offset + dent2->irde_dataoff); *md = (message_resource_data *)pe_translate_addr(imgbase, rent->irde_offset); return (0); } return (ENOENT); } int pe_get_message(imgbase, id, str, len, flags) vm_offset_t imgbase; uint32_t id; char **str; int *len; uint16_t *flags; { message_resource_data *md = NULL; message_resource_block *mb; message_resource_entry *me; uint32_t i; pe_get_messagetable(imgbase, &md); if (md == NULL) return (ENOENT); mb = (message_resource_block *)((uintptr_t)md + sizeof(message_resource_data)); for (i = 0; i < md->mrd_numblocks; i++) { if (id >= mb->mrb_lowid && id <= mb->mrb_highid) { me = (message_resource_entry *)((uintptr_t)md + mb->mrb_entryoff); for (i = id - mb->mrb_lowid; i > 0; i--) me = (message_resource_entry *)((uintptr_t)me + me->mre_len); *str = me->mre_text; *len = me->mre_len; *flags = me->mre_flags; return (0); } mb++; } return (ENOENT); } /* * Find the function that matches a particular name. This doesn't * need to be particularly speedy since it's only run when loading * a module for the first time. */ static vm_offset_t pe_functbl_match(functbl, name) image_patch_table *functbl; char *name; { image_patch_table *p; if (functbl == NULL || name == NULL) return (0); p = functbl; while (p->ipt_name != NULL) { if (!strcmp(p->ipt_name, name)) return ((vm_offset_t)p->ipt_wrap); p++; } printf("no match for %s\n", name); /* * Return the wrapper pointer for this routine. * For x86, this is the same as the funcptr. * For amd64, this points to a wrapper routine * that does calling convention translation and * then invokes the underlying routine. */ return ((vm_offset_t)p->ipt_wrap); } /* * Patch the imported function addresses for a given module. * The caller must specify the module name and provide a table * of function pointers that will be patched into the jump table. * Note that there are actually two copies of the jump table: one * copy is left alone. In a .SYS file, the jump tables are usually * merged into the INIT segment. */ int pe_patch_imports(imgbase, module, functbl) vm_offset_t imgbase; char *module; image_patch_table *functbl; { image_import_descriptor imp_desc; char *fname; vm_offset_t *nptr, *fptr; vm_offset_t func; if (imgbase == 0 || module == NULL || functbl == NULL) return (EINVAL); if (pe_get_import_descriptor(imgbase, &imp_desc, module)) return (ENOEXEC); nptr = (vm_offset_t *)pe_translate_addr(imgbase, imp_desc.iid_import_name_table_addr); fptr = (vm_offset_t *)pe_translate_addr(imgbase, imp_desc.iid_import_address_table_addr); while (nptr != NULL && pe_translate_addr(imgbase, *nptr)) { fname = (char *)pe_translate_addr(imgbase, (*nptr) + 2); func = pe_functbl_match(functbl, fname); if (func) *fptr = func; #ifdef notdef if (*fptr == 0) return (ENOENT); #endif nptr++; fptr++; } return (0); }