Index: head/sys/compat/ndis/kern_ndis.c =================================================================== --- head/sys/compat/ndis/kern_ndis.c (revision 130165) +++ head/sys/compat/ndis/kern_ndis.c (revision 130166) @@ -1,1685 +1,1688 @@ /* * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NDIS_DUMMY_PATH "\\\\some\\bogus\\path" __stdcall static void ndis_status_func(ndis_handle, ndis_status, void *, uint32_t); __stdcall static void ndis_statusdone_func(ndis_handle); __stdcall static void ndis_setdone_func(ndis_handle, ndis_status); __stdcall static void ndis_getdone_func(ndis_handle, ndis_status); __stdcall static void ndis_resetdone_func(ndis_handle, ndis_status, uint8_t); __stdcall static void ndis_sendrsrcavail_func(ndis_handle); struct nd_head ndis_devhead; struct ndis_req { void (*nr_func)(void *); void *nr_arg; int nr_exit; STAILQ_ENTRY(ndis_req) link; }; struct ndisproc { struct ndisqhead *np_q; struct proc *np_p; int np_state; }; static void ndis_return(void *); static int ndis_create_kthreads(void); static void ndis_destroy_kthreads(void); static void ndis_stop_thread(int); static int ndis_enlarge_thrqueue(int); static int ndis_shrink_thrqueue(int); static void ndis_runq(void *); static uma_zone_t ndis_packet_zone, ndis_buffer_zone; struct mtx ndis_thr_mtx; static STAILQ_HEAD(ndisqhead, ndis_req) ndis_ttodo; struct ndisqhead ndis_itodo; struct ndisqhead ndis_free; static int ndis_jobs = 32; static struct ndisproc ndis_tproc; static struct ndisproc ndis_iproc; /* * This allows us to export our symbols to other modules. * Note that we call ourselves 'ndisapi' to avoid a namespace * collision with if_ndis.ko, which internally calls itself * 'ndis.' */ static int ndis_modevent(module_t mod, int cmd, void *arg) { int error = 0; switch (cmd) { case MOD_LOAD: /* Initialize subsystems */ ndis_libinit(); ntoskrnl_libinit(); /* Initialize TX buffer UMA zone. */ ndis_packet_zone = uma_zcreate("NDIS packet", sizeof(ndis_packet), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); ndis_buffer_zone = uma_zcreate("NDIS buffer", sizeof(ndis_buffer), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); ndis_create_kthreads(); TAILQ_INIT(&ndis_devhead); break; case MOD_SHUTDOWN: /* stop kthreads */ ndis_destroy_kthreads(); if (TAILQ_FIRST(&ndis_devhead) == NULL) { /* Shut down subsystems */ ndis_libfini(); ntoskrnl_libfini(); /* Remove zones */ uma_zdestroy(ndis_packet_zone); uma_zdestroy(ndis_buffer_zone); } break; case MOD_UNLOAD: /* stop kthreads */ ndis_destroy_kthreads(); /* Shut down subsystems */ ndis_libfini(); ntoskrnl_libfini(); /* Remove zones */ uma_zdestroy(ndis_packet_zone); uma_zdestroy(ndis_buffer_zone); break; default: error = EINVAL; break; } return(error); } DEV_MODULE(ndisapi, ndis_modevent, NULL); MODULE_VERSION(ndisapi, 1); /* * We create two kthreads for the NDIS subsystem. One of them is a task * queue for performing various odd jobs. The other is an swi thread * reserved exclusively for running interrupt handlers. The reason we * have our own task queue is that there are some cases where we may * need to sleep for a significant amount of time, and if we were to * use one of the taskqueue threads, we might delay the processing * of other pending tasks which might need to run right away. We have * a separate swi thread because we don't want our interrupt handling * to be delayed either. * * By default there are 32 jobs available to start, and another 8 * are added to the free list each time a new device is created. */ static void ndis_runq(arg) void *arg; { struct ndis_req *r = NULL, *die = NULL; struct ndisproc *p; p = arg; while (1) { /* Sleep, but preserve our original priority. */ ndis_thsuspend(p->np_p, 0); /* Look for any jobs on the work queue. */ mtx_lock(&ndis_thr_mtx); p->np_state = NDIS_PSTATE_RUNNING; while(STAILQ_FIRST(p->np_q) != NULL) { r = STAILQ_FIRST(p->np_q); STAILQ_REMOVE_HEAD(p->np_q, link); mtx_unlock(&ndis_thr_mtx); /* Do the work. */ if (r->nr_func != NULL) (*r->nr_func)(r->nr_arg); mtx_lock(&ndis_thr_mtx); STAILQ_INSERT_HEAD(&ndis_free, r, link); /* Check for a shutdown request */ if (r->nr_exit == TRUE) die = r; } p->np_state = NDIS_PSTATE_SLEEPING; mtx_unlock(&ndis_thr_mtx); /* Bail if we were told to shut down. */ if (die != NULL) break; } wakeup(die); +#if __FreeBSD_version < 502113 + mtx_lock(&Giant); +#endif kthread_exit(0); return; /* notreached */ } static int ndis_create_kthreads() { struct ndis_req *r; int i, error = 0; mtx_init(&ndis_thr_mtx, "NDIS thread lock", MTX_NDIS_LOCK, MTX_DEF); STAILQ_INIT(&ndis_ttodo); STAILQ_INIT(&ndis_itodo); STAILQ_INIT(&ndis_free); for (i = 0; i < ndis_jobs; i++) { r = malloc(sizeof(struct ndis_req), M_DEVBUF, M_WAITOK); if (r == NULL) { error = ENOMEM; break; } STAILQ_INSERT_HEAD(&ndis_free, r, link); } if (error == 0) { ndis_tproc.np_q = &ndis_ttodo; ndis_tproc.np_state = NDIS_PSTATE_SLEEPING; error = kthread_create(ndis_runq, &ndis_tproc, &ndis_tproc.np_p, RFHIGHPID, NDIS_KSTACK_PAGES, "ndis taskqueue"); } if (error == 0) { ndis_iproc.np_q = &ndis_itodo; ndis_iproc.np_state = NDIS_PSTATE_SLEEPING; error = kthread_create(ndis_runq, &ndis_iproc, &ndis_iproc.np_p, RFHIGHPID, NDIS_KSTACK_PAGES, "ndis swi"); } if (error) { while ((r = STAILQ_FIRST(&ndis_free)) != NULL) { STAILQ_REMOVE_HEAD(&ndis_free, link); free(r, M_DEVBUF); } return(error); } return(0); } static void ndis_destroy_kthreads() { struct ndis_req *r; /* Stop the threads. */ ndis_stop_thread(NDIS_TASKQUEUE); ndis_stop_thread(NDIS_SWI); /* Destroy request structures. */ while ((r = STAILQ_FIRST(&ndis_free)) != NULL) { STAILQ_REMOVE_HEAD(&ndis_free, link); free(r, M_DEVBUF); } mtx_destroy(&ndis_thr_mtx); return; } static void ndis_stop_thread(t) int t; { struct ndis_req *r; struct ndisqhead *q; struct proc *p; if (t == NDIS_TASKQUEUE) { q = &ndis_ttodo; p = ndis_tproc.np_p; } else { q = &ndis_itodo; p = ndis_iproc.np_p; } /* Create and post a special 'exit' job. */ mtx_lock(&ndis_thr_mtx); r = STAILQ_FIRST(&ndis_free); STAILQ_REMOVE_HEAD(&ndis_free, link); r->nr_func = NULL; r->nr_arg = NULL; r->nr_exit = TRUE; STAILQ_INSERT_TAIL(q, r, link); mtx_unlock(&ndis_thr_mtx); ndis_thresume(p); /* wait for thread exit */ tsleep(r, curthread->td_priority|PCATCH, "ndisthexit", hz * 60); /* Now empty the job list. */ mtx_lock(&ndis_thr_mtx); while ((r = STAILQ_FIRST(q)) != NULL) { STAILQ_REMOVE_HEAD(q, link); STAILQ_INSERT_HEAD(&ndis_free, r, link); } mtx_unlock(&ndis_thr_mtx); return; } static int ndis_enlarge_thrqueue(cnt) int cnt; { struct ndis_req *r; int i; for (i = 0; i < cnt; i++) { r = malloc(sizeof(struct ndis_req), M_DEVBUF, M_WAITOK); if (r == NULL) return(ENOMEM); mtx_lock(&ndis_thr_mtx); STAILQ_INSERT_HEAD(&ndis_free, r, link); ndis_jobs++; mtx_unlock(&ndis_thr_mtx); } return(0); } static int ndis_shrink_thrqueue(cnt) int cnt; { struct ndis_req *r; int i; for (i = 0; i < cnt; i++) { mtx_lock(&ndis_thr_mtx); r = STAILQ_FIRST(&ndis_free); if (r == NULL) { mtx_unlock(&ndis_thr_mtx); return(ENOMEM); } STAILQ_REMOVE_HEAD(&ndis_free, link); ndis_jobs--; mtx_unlock(&ndis_thr_mtx); free(r, M_DEVBUF); } return(0); } int ndis_unsched(func, arg, t) void (*func)(void *); void *arg; int t; { struct ndis_req *r; struct ndisqhead *q; struct proc *p; if (t == NDIS_TASKQUEUE) { q = &ndis_ttodo; p = ndis_tproc.np_p; } else { q = &ndis_itodo; p = ndis_iproc.np_p; } mtx_lock(&ndis_thr_mtx); STAILQ_FOREACH(r, q, link) { if (r->nr_func == func && r->nr_arg == arg) { STAILQ_REMOVE(q, r, ndis_req, link); STAILQ_INSERT_HEAD(&ndis_free, r, link); mtx_unlock(&ndis_thr_mtx); return(0); } } mtx_unlock(&ndis_thr_mtx); return(ENOENT); } int ndis_sched(func, arg, t) void (*func)(void *); void *arg; int t; { struct ndis_req *r; struct ndisqhead *q; struct proc *p; int s; if (t == NDIS_TASKQUEUE) { q = &ndis_ttodo; p = ndis_tproc.np_p; } else { q = &ndis_itodo; p = ndis_iproc.np_p; } mtx_lock(&ndis_thr_mtx); /* * Check to see if an instance of this job is already * pending. If so, don't bother queuing it again. */ STAILQ_FOREACH(r, q, link) { if (r->nr_func == func && r->nr_arg == arg) { mtx_unlock(&ndis_thr_mtx); return(0); } } r = STAILQ_FIRST(&ndis_free); if (r == NULL) { mtx_unlock(&ndis_thr_mtx); return(EAGAIN); } STAILQ_REMOVE_HEAD(&ndis_free, link); r->nr_func = func; r->nr_arg = arg; r->nr_exit = FALSE; STAILQ_INSERT_TAIL(q, r, link); if (t == NDIS_TASKQUEUE) s = ndis_tproc.np_state; else s = ndis_iproc.np_state; mtx_unlock(&ndis_thr_mtx); /* * Post the job, but only if the thread is actually blocked * on its own suspend call. If a driver queues up a job with * NdisScheduleWorkItem() which happens to do a KeWaitForObject(), * it may suspend there, and in that case we don't want to wake * it up until KeWaitForObject() gets woken up on its own. */ if (s == NDIS_PSTATE_SLEEPING) ndis_thresume(p); return(0); } int ndis_thsuspend(p, timo) struct proc *p; int timo; { int error; PROC_LOCK(p); error = msleep(&p->p_siglist, &p->p_mtx, curthread->td_priority|PDROP, "ndissp", timo); return(error); } void ndis_thresume(p) struct proc *p; { wakeup(&p->p_siglist); return; } __stdcall static void ndis_sendrsrcavail_func(adapter) ndis_handle adapter; { return; } __stdcall static void ndis_status_func(adapter, status, sbuf, slen) ndis_handle adapter; ndis_status status; void *sbuf; uint32_t slen; { ndis_miniport_block *block; block = adapter; if (block->nmb_ifp->if_flags & IFF_DEBUG) device_printf (block->nmb_dev, "status: %x\n", status); return; } __stdcall static void ndis_statusdone_func(adapter) ndis_handle adapter; { ndis_miniport_block *block; block = adapter; if (block->nmb_ifp->if_flags & IFF_DEBUG) device_printf (block->nmb_dev, "status complete\n"); return; } __stdcall static void ndis_setdone_func(adapter, status) ndis_handle adapter; ndis_status status; { ndis_miniport_block *block; block = adapter; block->nmb_setstat = status; wakeup(&block->nmb_wkupdpctimer); return; } __stdcall static void ndis_getdone_func(adapter, status) ndis_handle adapter; ndis_status status; { ndis_miniport_block *block; block = adapter; block->nmb_getstat = status; wakeup(&block->nmb_wkupdpctimer); return; } __stdcall static void ndis_resetdone_func(adapter, status, addressingreset) ndis_handle adapter; ndis_status status; uint8_t addressingreset; { ndis_miniport_block *block; block = adapter; if (block->nmb_ifp->if_flags & IFF_DEBUG) device_printf (block->nmb_dev, "reset done...\n"); wakeup(block->nmb_ifp); return; } int ndis_create_sysctls(arg) void *arg; { struct ndis_softc *sc; ndis_cfg *vals; char buf[256]; if (arg == NULL) return(EINVAL); sc = arg; vals = sc->ndis_regvals; TAILQ_INIT(&sc->ndis_cfglist_head); #if __FreeBSD_version < 502113 /* Create the sysctl tree. */ sc->ndis_tree = SYSCTL_ADD_NODE(&sc->ndis_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(sc->ndis_dev), CTLFLAG_RD, 0, device_get_desc(sc->ndis_dev)); #endif /* Add the driver-specific registry keys. */ vals = sc->ndis_regvals; while(1) { if (vals->nc_cfgkey == NULL) break; if (vals->nc_idx != sc->ndis_devidx) { vals++; continue; } #if __FreeBSD_version < 502113 SYSCTL_ADD_STRING(&sc->ndis_ctx, SYSCTL_CHILDREN(sc->ndis_tree), #else SYSCTL_ADD_STRING(device_get_sysctl_ctx(sc->ndis_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ndis_dev)), #endif OID_AUTO, vals->nc_cfgkey, CTLFLAG_RW, vals->nc_val, sizeof(vals->nc_val), vals->nc_cfgdesc); vals++; } /* Now add a couple of builtin keys. */ /* * Environment can be either Windows (0) or WindowsNT (1). * We qualify as the latter. */ ndis_add_sysctl(sc, "Environment", "Windows environment", "1", CTLFLAG_RD); /* NDIS version should be 5.1. */ ndis_add_sysctl(sc, "NdisVersion", "NDIS API Version", "0x00050001", CTLFLAG_RD); /* Bus type (PCI, PCMCIA, etc...) */ sprintf(buf, "%d", (int)sc->ndis_iftype); ndis_add_sysctl(sc, "BusType", "Bus Type", buf, CTLFLAG_RD); if (sc->ndis_res_io != NULL) { sprintf(buf, "0x%lx", rman_get_start(sc->ndis_res_io)); ndis_add_sysctl(sc, "IOBaseAddress", "Base I/O Address", buf, CTLFLAG_RD); } if (sc->ndis_irq != NULL) { sprintf(buf, "%lu", rman_get_start(sc->ndis_irq)); ndis_add_sysctl(sc, "InterruptNumber", "Interrupt Number", buf, CTLFLAG_RD); } return(0); } int ndis_add_sysctl(arg, key, desc, val, flag) void *arg; char *key; char *desc; char *val; int flag; { struct ndis_softc *sc; struct ndis_cfglist *cfg; char descstr[256]; sc = arg; cfg = malloc(sizeof(struct ndis_cfglist), M_DEVBUF, M_NOWAIT|M_ZERO); if (cfg == NULL) return(ENOMEM); cfg->ndis_cfg.nc_cfgkey = strdup(key, M_DEVBUF); if (desc == NULL) { snprintf(descstr, sizeof(descstr), "%s (dynamic)", key); cfg->ndis_cfg.nc_cfgdesc = strdup(descstr, M_DEVBUF); } else cfg->ndis_cfg.nc_cfgdesc = strdup(desc, M_DEVBUF); strcpy(cfg->ndis_cfg.nc_val, val); TAILQ_INSERT_TAIL(&sc->ndis_cfglist_head, cfg, link); #if __FreeBSD_version < 502113 SYSCTL_ADD_STRING(&sc->ndis_ctx, SYSCTL_CHILDREN(sc->ndis_tree), #else SYSCTL_ADD_STRING(device_get_sysctl_ctx(sc->ndis_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ndis_dev)), #endif OID_AUTO, cfg->ndis_cfg.nc_cfgkey, flag, cfg->ndis_cfg.nc_val, sizeof(cfg->ndis_cfg.nc_val), cfg->ndis_cfg.nc_cfgdesc); return(0); } int ndis_flush_sysctls(arg) void *arg; { struct ndis_softc *sc; struct ndis_cfglist *cfg; sc = arg; while (!TAILQ_EMPTY(&sc->ndis_cfglist_head)) { cfg = TAILQ_FIRST(&sc->ndis_cfglist_head); TAILQ_REMOVE(&sc->ndis_cfglist_head, cfg, link); free(cfg->ndis_cfg.nc_cfgkey, M_DEVBUF); free(cfg->ndis_cfg.nc_cfgdesc, M_DEVBUF); free(cfg, M_DEVBUF); } return(0); } static void ndis_return(arg) void *arg; { struct ndis_softc *sc; __stdcall ndis_return_handler returnfunc; ndis_handle adapter; ndis_packet *p; uint8_t irql; p = arg; sc = p->np_softc; adapter = sc->ndis_block.nmb_miniportadapterctx; if (adapter == NULL) return; returnfunc = sc->ndis_chars.nmc_return_packet_func; irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL); returnfunc(adapter, p); FASTCALL1(hal_lower_irql, irql); return; } void ndis_return_packet(buf, arg) void *buf; /* not used */ void *arg; { ndis_packet *p; if (arg == NULL) return; p = arg; /* Decrement refcount. */ p->np_refcnt--; /* Release packet when refcount hits zero, otherwise return. */ if (p->np_refcnt) return; ndis_sched(ndis_return, p, NDIS_SWI); return; } void ndis_free_bufs(b0) ndis_buffer *b0; { ndis_buffer *next; if (b0 == NULL) return; while(b0 != NULL) { next = b0->nb_next; uma_zfree (ndis_buffer_zone, b0); b0 = next; } return; } void ndis_free_packet(p) ndis_packet *p; { if (p == NULL) return; ndis_free_bufs(p->np_private.npp_head); uma_zfree(ndis_packet_zone, p); return; } int ndis_convert_res(arg) void *arg; { struct ndis_softc *sc; ndis_resource_list *rl = NULL; cm_partial_resource_desc *prd = NULL; ndis_miniport_block *block; device_t dev; struct resource_list *brl; struct resource_list brl_rev; struct resource_list_entry *brle, *n; int error = 0; sc = arg; block = &sc->ndis_block; dev = sc->ndis_dev; SLIST_INIT(&brl_rev); rl = malloc(sizeof(ndis_resource_list) + (sizeof(cm_partial_resource_desc) * (sc->ndis_rescnt - 1)), M_DEVBUF, M_NOWAIT|M_ZERO); if (rl == NULL) return(ENOMEM); rl->cprl_version = 5; rl->cprl_version = 1; rl->cprl_count = sc->ndis_rescnt; prd = rl->cprl_partial_descs; brl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev); if (brl != NULL) { /* * We have a small problem. Some PCI devices have * multiple I/O ranges. Windows orders them starting * from lowest numbered BAR to highest. We discover * them in that order too, but insert them into a singly * linked list head first, which means when time comes * to traverse the list, we enumerate them in reverse * order. This screws up some drivers which expect the * BARs to be in ascending order so that they can choose * the "first" one as their register space. Unfortunately, * in order to fix this, we have to create our own * temporary list with the entries in reverse order. */ SLIST_FOREACH(brle, brl, link) { n = malloc(sizeof(struct resource_list_entry), M_TEMP, M_NOWAIT); if (n == NULL) { error = ENOMEM; goto bad; } bcopy((char *)brle, (char *)n, sizeof(struct resource_list_entry)); SLIST_INSERT_HEAD(&brl_rev, n, link); } SLIST_FOREACH(brle, &brl_rev, link) { switch (brle->type) { case SYS_RES_IOPORT: prd->cprd_type = CmResourceTypePort; prd->cprd_flags = CM_RESOURCE_PORT_IO; prd->cprd_sharedisp = CmResourceShareDeviceExclusive; prd->u.cprd_port.cprd_start.np_quad = brle->start; prd->u.cprd_port.cprd_len = brle->count; break; case SYS_RES_MEMORY: prd->cprd_type = CmResourceTypeMemory; prd->cprd_flags = CM_RESOURCE_MEMORY_READ_WRITE; prd->cprd_sharedisp = CmResourceShareDeviceExclusive; prd->u.cprd_port.cprd_start.np_quad = brle->start; prd->u.cprd_port.cprd_len = brle->count; break; case SYS_RES_IRQ: prd->cprd_type = CmResourceTypeInterrupt; prd->cprd_flags = 0; prd->cprd_sharedisp = CmResourceShareDeviceExclusive; prd->u.cprd_intr.cprd_level = brle->start; prd->u.cprd_intr.cprd_vector = brle->start; prd->u.cprd_intr.cprd_affinity = 0; break; default: break; } prd++; } } block->nmb_rlist = rl; bad: while (!SLIST_EMPTY(&brl_rev)) { n = SLIST_FIRST(&brl_rev); SLIST_REMOVE_HEAD(&brl_rev, link); free (n, M_TEMP); } return(error); } /* * Map an NDIS packet to an mbuf list. When an NDIS driver receives a * packet, it will hand it to us in the form of an ndis_packet, * which we need to convert to an mbuf that is then handed off * to the stack. Note: we configure the mbuf list so that it uses * the memory regions specified by the ndis_buffer structures in * the ndis_packet as external storage. In most cases, this will * point to a memory region allocated by the driver (either by * ndis_malloc_withtag() or ndis_alloc_sharedmem()). We expect * the driver to handle free()ing this region for is, so we set up * a dummy no-op free handler for it. */ int ndis_ptom(m0, p) struct mbuf **m0; ndis_packet *p; { struct mbuf *m, *prev = NULL; ndis_buffer *buf; ndis_packet_private *priv; uint32_t totlen = 0; if (p == NULL || m0 == NULL) return(EINVAL); priv = &p->np_private; buf = priv->npp_head; p->np_refcnt = 0; for (buf = priv->npp_head; buf != NULL; buf = buf->nb_next) { if (buf == priv->npp_head) MGETHDR(m, M_DONTWAIT, MT_HEADER); else MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(*m0); *m0 = NULL; return(ENOBUFS); } m->m_len = buf->nb_bytecount; m->m_data = MDL_VA(buf); MEXTADD(m, m->m_data, m->m_len, ndis_return_packet, p, 0, EXT_NDIS); p->np_refcnt++; totlen += m->m_len; if (m->m_flags & MT_HEADER) *m0 = m; else prev->m_next = m; prev = m; } (*m0)->m_pkthdr.len = totlen; return(0); } /* * Create an mbuf chain from an NDIS packet chain. * This is used mainly when transmitting packets, where we need * to turn an mbuf off an interface's send queue and transform it * into an NDIS packet which will be fed into the NDIS driver's * send routine. * * NDIS packets consist of two parts: an ndis_packet structure, * which is vaguely analagous to the pkthdr portion of an mbuf, * and one or more ndis_buffer structures, which define the * actual memory segments in which the packet data resides. * We need to allocate one ndis_buffer for each mbuf in a chain, * plus one ndis_packet as the header. */ int ndis_mtop(m0, p) struct mbuf *m0; ndis_packet **p; { struct mbuf *m; ndis_buffer *buf = NULL, *prev = NULL; ndis_packet_private *priv; if (p == NULL || m0 == NULL) return(EINVAL); /* If caller didn't supply a packet, make one. */ if (*p == NULL) { *p = uma_zalloc(ndis_packet_zone, M_NOWAIT|M_ZERO); if (*p == NULL) return(ENOMEM); } priv = &(*p)->np_private; priv->npp_totlen = m0->m_pkthdr.len; priv->npp_packetooboffset = offsetof(ndis_packet, np_oob); priv->npp_ndispktflags = NDIS_PACKET_ALLOCATED_BY_NDIS; for (m = m0; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; buf = uma_zalloc(ndis_buffer_zone, M_NOWAIT | M_ZERO); if (buf == NULL) { ndis_free_packet(*p); *p = NULL; return(ENOMEM); } MDL_INIT(buf, m->m_data, m->m_len); if (priv->npp_head == NULL) priv->npp_head = buf; else prev->nb_next = buf; prev = buf; } priv->npp_tail = buf; priv->npp_totlen = m0->m_pkthdr.len; return(0); } int ndis_get_supported_oids(arg, oids, oidcnt) void *arg; ndis_oid **oids; int *oidcnt; { int len, rval; ndis_oid *o; if (arg == NULL || oids == NULL || oidcnt == NULL) return(EINVAL); len = 0; ndis_get_info(arg, OID_GEN_SUPPORTED_LIST, NULL, &len); o = malloc(len, M_DEVBUF, M_NOWAIT); if (o == NULL) return(ENOMEM); rval = ndis_get_info(arg, OID_GEN_SUPPORTED_LIST, o, &len); if (rval) { free(o, M_DEVBUF); return(rval); } *oids = o; *oidcnt = len / 4; return(0); } int ndis_set_info(arg, oid, buf, buflen) void *arg; ndis_oid oid; void *buf; int *buflen; { struct ndis_softc *sc; ndis_status rval; ndis_handle adapter; __stdcall ndis_setinfo_handler setfunc; uint32_t byteswritten = 0, bytesneeded = 0; int error; uint8_t irql; sc = arg; NDIS_LOCK(sc); setfunc = sc->ndis_chars.nmc_setinfo_func; adapter = sc->ndis_block.nmb_miniportadapterctx; NDIS_UNLOCK(sc); if (adapter == NULL || setfunc == NULL) return(ENXIO); irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL); rval = setfunc(adapter, oid, buf, *buflen, &byteswritten, &bytesneeded); FASTCALL1(hal_lower_irql, irql); if (rval == NDIS_STATUS_PENDING) { PROC_LOCK(curthread->td_proc); error = msleep(&sc->ndis_block.nmb_wkupdpctimer, &curthread->td_proc->p_mtx, curthread->td_priority|PDROP, "ndisset", 5 * hz); rval = sc->ndis_block.nmb_setstat; } if (byteswritten) *buflen = byteswritten; if (bytesneeded) *buflen = bytesneeded; if (rval == NDIS_STATUS_INVALID_LENGTH) return(ENOSPC); if (rval == NDIS_STATUS_INVALID_OID) return(EINVAL); if (rval == NDIS_STATUS_NOT_SUPPORTED || rval == NDIS_STATUS_NOT_ACCEPTED) return(ENOTSUP); if (rval != NDIS_STATUS_SUCCESS) return(ENODEV); return(0); } typedef void (*ndis_senddone_func)(ndis_handle, ndis_packet *, ndis_status); int ndis_send_packets(arg, packets, cnt) void *arg; ndis_packet **packets; int cnt; { struct ndis_softc *sc; ndis_handle adapter; __stdcall ndis_sendmulti_handler sendfunc; __stdcall ndis_senddone_func senddonefunc; int i; ndis_packet *p; uint8_t irql; sc = arg; adapter = sc->ndis_block.nmb_miniportadapterctx; if (adapter == NULL) return(ENXIO); sendfunc = sc->ndis_chars.nmc_sendmulti_func; senddonefunc = sc->ndis_block.nmb_senddone_func; irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL); sendfunc(adapter, packets, cnt); FASTCALL1(hal_lower_irql, irql); for (i = 0; i < cnt; i++) { p = packets[i]; /* * Either the driver already handed the packet to * ndis_txeof() due to a failure, or it wants to keep * it and release it asynchronously later. Skip to the * next one. */ if (p == NULL || p->np_oob.npo_status == NDIS_STATUS_PENDING) continue; senddonefunc(&sc->ndis_block, p, p->np_oob.npo_status); } return(0); } int ndis_send_packet(arg, packet) void *arg; ndis_packet *packet; { struct ndis_softc *sc; ndis_handle adapter; ndis_status status; __stdcall ndis_sendsingle_handler sendfunc; __stdcall ndis_senddone_func senddonefunc; uint8_t irql; sc = arg; adapter = sc->ndis_block.nmb_miniportadapterctx; if (adapter == NULL) return(ENXIO); sendfunc = sc->ndis_chars.nmc_sendsingle_func; senddonefunc = sc->ndis_block.nmb_senddone_func; irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL); status = sendfunc(adapter, packet, packet->np_private.npp_flags); FASTCALL1(hal_lower_irql, irql); if (status == NDIS_STATUS_PENDING) return(0); senddonefunc(&sc->ndis_block, packet, status); return(0); } int ndis_init_dma(arg) void *arg; { struct ndis_softc *sc; int i, error; sc = arg; sc->ndis_tmaps = malloc(sizeof(bus_dmamap_t) * sc->ndis_maxpkts, M_DEVBUF, M_NOWAIT|M_ZERO); if (sc->ndis_tmaps == NULL) return(ENOMEM); for (i = 0; i < sc->ndis_maxpkts; i++) { error = bus_dmamap_create(sc->ndis_ttag, 0, &sc->ndis_tmaps[i]); if (error) { free(sc->ndis_tmaps, M_DEVBUF); return(ENODEV); } } return(0); } int ndis_destroy_dma(arg) void *arg; { struct ndis_softc *sc; struct mbuf *m; ndis_packet *p = NULL; int i; sc = arg; for (i = 0; i < sc->ndis_maxpkts; i++) { if (sc->ndis_txarray[i] != NULL) { p = sc->ndis_txarray[i]; m = (struct mbuf *)p->np_rsvd[1]; if (m != NULL) m_freem(m); ndis_free_packet(sc->ndis_txarray[i]); } bus_dmamap_destroy(sc->ndis_ttag, sc->ndis_tmaps[i]); } free(sc->ndis_tmaps, M_DEVBUF); bus_dma_tag_destroy(sc->ndis_ttag); return(0); } int ndis_reset_nic(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; __stdcall ndis_reset_handler resetfunc; uint8_t addressing_reset; struct ifnet *ifp; int rval; uint8_t irql; sc = arg; ifp = &sc->arpcom.ac_if; NDIS_LOCK(sc); adapter = sc->ndis_block.nmb_miniportadapterctx; resetfunc = sc->ndis_chars.nmc_reset_func; NDIS_UNLOCK(sc); if (adapter == NULL || resetfunc == NULL) return(EIO); irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL); rval = resetfunc(&addressing_reset, adapter); FASTCALL1(hal_lower_irql, irql); if (rval == NDIS_STATUS_PENDING) { PROC_LOCK(curthread->td_proc); msleep(sc, &curthread->td_proc->p_mtx, curthread->td_priority|PDROP, "ndisrst", 0); } return(0); } int ndis_halt_nic(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; __stdcall ndis_halt_handler haltfunc; struct ifnet *ifp; sc = arg; ifp = &sc->arpcom.ac_if; NDIS_LOCK(sc); adapter = sc->ndis_block.nmb_miniportadapterctx; if (adapter == NULL) { NDIS_UNLOCK(sc); return(EIO); } /* * The adapter context is only valid after the init * handler has been called, and is invalid once the * halt handler has been called. */ haltfunc = sc->ndis_chars.nmc_halt_func; NDIS_UNLOCK(sc); haltfunc(adapter); NDIS_LOCK(sc); sc->ndis_block.nmb_miniportadapterctx = NULL; NDIS_UNLOCK(sc); return(0); } int ndis_shutdown_nic(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; __stdcall ndis_shutdown_handler shutdownfunc; sc = arg; NDIS_LOCK(sc); adapter = sc->ndis_block.nmb_miniportadapterctx; shutdownfunc = sc->ndis_chars.nmc_shutdown_handler; NDIS_UNLOCK(sc); if (adapter == NULL || shutdownfunc == NULL) return(EIO); if (sc->ndis_chars.nmc_rsvd0 == NULL) shutdownfunc(adapter); else shutdownfunc(sc->ndis_chars.nmc_rsvd0); ndis_shrink_thrqueue(8); TAILQ_REMOVE(&ndis_devhead, &sc->ndis_block, link); return(0); } int ndis_init_nic(arg) void *arg; { struct ndis_softc *sc; ndis_miniport_block *block; __stdcall ndis_init_handler initfunc; ndis_status status, openstatus = 0; ndis_medium mediumarray[NdisMediumMax]; uint32_t chosenmedium, i; if (arg == NULL) return(EINVAL); sc = arg; NDIS_LOCK(sc); block = &sc->ndis_block; initfunc = sc->ndis_chars.nmc_init_func; NDIS_UNLOCK(sc); TAILQ_INIT(&block->nmb_timerlist); for (i = 0; i < NdisMediumMax; i++) mediumarray[i] = i; status = initfunc(&openstatus, &chosenmedium, mediumarray, NdisMediumMax, block, block); /* * If the init fails, blow away the other exported routines * we obtained from the driver so we can't call them later. * If the init failed, none of these will work. */ if (status != NDIS_STATUS_SUCCESS) { NDIS_LOCK(sc); sc->ndis_block.nmb_miniportadapterctx = NULL; NDIS_UNLOCK(sc); return(ENXIO); } return(0); } void ndis_enable_intr(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; __stdcall ndis_enable_interrupts_handler intrenbfunc; sc = arg; adapter = sc->ndis_block.nmb_miniportadapterctx; intrenbfunc = sc->ndis_chars.nmc_enable_interrupts_func; if (adapter == NULL || intrenbfunc == NULL) return; intrenbfunc(adapter); return; } void ndis_disable_intr(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; __stdcall ndis_disable_interrupts_handler intrdisfunc; sc = arg; NDIS_LOCK(sc); adapter = sc->ndis_block.nmb_miniportadapterctx; intrdisfunc = sc->ndis_chars.nmc_disable_interrupts_func; NDIS_UNLOCK(sc); if (adapter == NULL || intrdisfunc == NULL) return; intrdisfunc(adapter); return; } int ndis_isr(arg, ourintr, callhandler) void *arg; int *ourintr; int *callhandler; { struct ndis_softc *sc; ndis_handle adapter; __stdcall ndis_isr_handler isrfunc; uint8_t accepted, queue; if (arg == NULL || ourintr == NULL || callhandler == NULL) return(EINVAL); sc = arg; adapter = sc->ndis_block.nmb_miniportadapterctx; isrfunc = sc->ndis_chars.nmc_isr_func; if (adapter == NULL || isrfunc == NULL) return(ENXIO); isrfunc(&accepted, &queue, adapter); *ourintr = accepted; *callhandler = queue; return(0); } int ndis_intrhand(arg) void *arg; { struct ndis_softc *sc; ndis_handle adapter; __stdcall ndis_interrupt_handler intrfunc; if (arg == NULL) return(EINVAL); sc = arg; NDIS_LOCK(sc); adapter = sc->ndis_block.nmb_miniportadapterctx; intrfunc = sc->ndis_chars.nmc_interrupt_func; NDIS_UNLOCK(sc); if (adapter == NULL || intrfunc == NULL) return(EINVAL); intrfunc(adapter); return(0); } int ndis_get_info(arg, oid, buf, buflen) void *arg; ndis_oid oid; void *buf; int *buflen; { struct ndis_softc *sc; ndis_status rval; ndis_handle adapter; __stdcall ndis_queryinfo_handler queryfunc; uint32_t byteswritten = 0, bytesneeded = 0; int error; uint8_t irql; sc = arg; NDIS_LOCK(sc); queryfunc = sc->ndis_chars.nmc_queryinfo_func; adapter = sc->ndis_block.nmb_miniportadapterctx; NDIS_UNLOCK(sc); if (adapter == NULL || queryfunc == NULL) return(ENXIO); irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL); rval = queryfunc(adapter, oid, buf, *buflen, &byteswritten, &bytesneeded); FASTCALL1(hal_lower_irql, irql); /* Wait for requests that block. */ if (rval == NDIS_STATUS_PENDING) { PROC_LOCK(curthread->td_proc); error = msleep(&sc->ndis_block.nmb_wkupdpctimer, &curthread->td_proc->p_mtx, curthread->td_priority|PDROP, "ndisget", 5 * hz); rval = sc->ndis_block.nmb_getstat; } if (byteswritten) *buflen = byteswritten; if (bytesneeded) *buflen = bytesneeded; if (rval == NDIS_STATUS_INVALID_LENGTH || rval == NDIS_STATUS_BUFFER_TOO_SHORT) return(ENOSPC); if (rval == NDIS_STATUS_INVALID_OID) return(EINVAL); if (rval == NDIS_STATUS_NOT_SUPPORTED || rval == NDIS_STATUS_NOT_ACCEPTED) return(ENOTSUP); if (rval != NDIS_STATUS_SUCCESS) return(ENODEV); return(0); } int ndis_unload_driver(arg) void *arg; { struct ndis_softc *sc; sc = arg; free(sc->ndis_block.nmb_rlist, M_DEVBUF); ndis_flush_sysctls(sc); ndis_shrink_thrqueue(8); TAILQ_REMOVE(&ndis_devhead, &sc->ndis_block, link); return(0); } #define NDIS_LOADED htonl(0x42534F44) int ndis_load_driver(img, arg) vm_offset_t img; void *arg; { __stdcall driver_entry entry; image_optional_header opt_hdr; image_import_descriptor imp_desc; ndis_unicode_string dummystr; ndis_miniport_block *block; ndis_status status; int idx; uint32_t *ptr; struct ndis_softc *sc; sc = arg; /* * Only perform the relocation/linking phase once * since the binary image may be shared among multiple * device instances. */ ptr = (uint32_t *)(img + 8); if (*ptr != NDIS_LOADED) { /* Perform text relocation */ if (pe_relocate(img)) return(ENOEXEC); /* Dynamically link the NDIS.SYS routines -- required. */ if (pe_patch_imports(img, "NDIS", ndis_functbl)) return(ENOEXEC); /* Dynamically link the HAL.dll routines -- also required. */ if (pe_patch_imports(img, "HAL", hal_functbl)) return(ENOEXEC); /* Dynamically link ntoskrnl.exe -- optional. */ if (pe_get_import_descriptor(img, &imp_desc, "ntoskrnl") == 0) { if (pe_patch_imports(img, "ntoskrnl", ntoskrnl_functbl)) return(ENOEXEC); } *ptr = NDIS_LOADED; } /* Locate the driver entry point */ pe_get_optional_header(img, &opt_hdr); entry = (driver_entry)pe_translate_addr(img, opt_hdr.ioh_entryaddr); dummystr.nus_len = strlen(NDIS_DUMMY_PATH) * 2; dummystr.nus_maxlen = strlen(NDIS_DUMMY_PATH) * 2; dummystr.nus_buf = NULL; ndis_ascii_to_unicode(NDIS_DUMMY_PATH, &dummystr.nus_buf); /* * Now that we have the miniport driver characteristics, * create an NDIS block and call the init handler. * This will cause the driver to try to probe for * a device. */ block = &sc->ndis_block; ptr = (uint32_t *)block; for (idx = 0; idx < sizeof(ndis_miniport_block) / 4; idx++) { *ptr = idx | 0xdead0000; ptr++; } block->nmb_signature = (void *)0xcafebabe; block->nmb_setdone_func = ndis_setdone_func; block->nmb_querydone_func = ndis_getdone_func; block->nmb_status_func = ndis_status_func; block->nmb_statusdone_func = ndis_statusdone_func; block->nmb_resetdone_func = ndis_resetdone_func; block->nmb_sendrsrc_func = ndis_sendrsrcavail_func; block->nmb_ifp = &sc->arpcom.ac_if; block->nmb_dev = sc->ndis_dev; block->nmb_img = img; block->nmb_devobj.do_rsvd = block; /* * Now call the DriverEntry() routine. This will cause * a callout to the NdisInitializeWrapper() and * NdisMRegisterMiniport() routines. */ status = entry(&block->nmb_devobj, &dummystr); free (dummystr.nus_buf, M_DEVBUF); if (status != NDIS_STATUS_SUCCESS) return(ENODEV); ndis_enlarge_thrqueue(8); TAILQ_INSERT_TAIL(&ndis_devhead, block, link); return(0); } Index: head/sys/compat/ndis/subr_ntoskrnl.c =================================================================== --- head/sys/compat/ndis/subr_ntoskrnl.c (revision 130165) +++ head/sys/compat/ndis/subr_ntoskrnl.c (revision 130166) @@ -1,1968 +1,1971 @@ /* * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define __regparm __attribute__((regparm(3))) #define FUNC void(*)(void) __stdcall static uint8_t ntoskrnl_unicode_equal(ndis_unicode_string *, ndis_unicode_string *, uint8_t); __stdcall static void ntoskrnl_unicode_copy(ndis_unicode_string *, ndis_unicode_string *); __stdcall static ndis_status ntoskrnl_unicode_to_ansi(ndis_ansi_string *, ndis_unicode_string *, uint8_t); __stdcall static ndis_status ntoskrnl_ansi_to_unicode(ndis_unicode_string *, ndis_ansi_string *, uint8_t); __stdcall static void *ntoskrnl_iobuildsynchfsdreq(uint32_t, void *, void *, uint32_t, uint32_t *, void *, void *); __stdcall static uint32_t ntoskrnl_iofcalldriver(/*void *, void * */ void); __stdcall static void ntoskrnl_iofcompletereq(/*void *, uint8_t*/ void); __stdcall static uint32_t ntoskrnl_waitforobjs(uint32_t, nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t, int64_t *, wait_block *); static void ntoskrnl_wakeup(void *); static void ntoskrnl_timercall(void *); static void ntoskrnl_run_dpc(void *); __stdcall static void ntoskrnl_writereg_ushort(uint16_t *, uint16_t); __stdcall static uint16_t ntoskrnl_readreg_ushort(uint16_t *); __stdcall static void ntoskrnl_writereg_ulong(uint32_t *, uint32_t); __stdcall static uint32_t ntoskrnl_readreg_ulong(uint32_t *); __stdcall static void ntoskrnl_writereg_uchar(uint8_t *, uint8_t); __stdcall static uint8_t ntoskrnl_readreg_uchar(uint8_t *); __stdcall static int64_t _allmul(int64_t, int64_t); __stdcall static int64_t _alldiv(int64_t, int64_t); __stdcall static int64_t _allrem(int64_t, int64_t); __regparm static int64_t _allshr(int64_t, uint8_t); __regparm static int64_t _allshl(int64_t, uint8_t); __stdcall static uint64_t _aullmul(uint64_t, uint64_t); __stdcall static uint64_t _aulldiv(uint64_t, uint64_t); __stdcall static uint64_t _aullrem(uint64_t, uint64_t); __regparm static uint64_t _aullshr(uint64_t, uint8_t); __regparm static uint64_t _aullshl(uint64_t, uint8_t); __stdcall static void *ntoskrnl_allocfunc(uint32_t, size_t, uint32_t); __stdcall static void ntoskrnl_freefunc(void *); static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *); static slist_entry *ntoskrnl_popsl(slist_header *); __stdcall static void ntoskrnl_init_lookaside(paged_lookaside_list *, lookaside_alloc_func *, lookaside_free_func *, uint32_t, size_t, uint32_t, uint16_t); __stdcall static void ntoskrnl_delete_lookaside(paged_lookaside_list *); __stdcall static void ntoskrnl_init_nplookaside(npaged_lookaside_list *, lookaside_alloc_func *, lookaside_free_func *, uint32_t, size_t, uint32_t, uint16_t); __stdcall static void ntoskrnl_delete_nplookaside(npaged_lookaside_list *); __stdcall static slist_entry *ntoskrnl_push_slist(/*slist_header *, slist_entry * */ void); __stdcall static slist_entry *ntoskrnl_pop_slist(/*slist_header * */ void); __stdcall static slist_entry *ntoskrnl_push_slist_ex(/*slist_header *, slist_entry *,*/ kspin_lock *); __stdcall static slist_entry *ntoskrnl_pop_slist_ex(/*slist_header *, kspin_lock * */void); __stdcall static uint32_t ntoskrnl_interlock_inc(/*volatile uint32_t * */ void); __stdcall static uint32_t ntoskrnl_interlock_dec(/*volatile uint32_t * */ void); __stdcall static void ntoskrnl_interlock_addstat(/*uint64_t, uint32_t*/ void); __stdcall static void ntoskrnl_freemdl(ndis_buffer *); __stdcall static uint32_t ntoskrnl_sizeofmdl(void *, size_t); __stdcall static void ntoskrnl_build_npaged_mdl(ndis_buffer *); __stdcall static void *ntoskrnl_mmaplockedpages(ndis_buffer *, uint8_t); __stdcall static void *ntoskrnl_mmaplockedpages_cache(ndis_buffer *, uint8_t, uint32_t, void *, uint32_t, uint32_t); __stdcall static void ntoskrnl_munmaplockedpages(void *, ndis_buffer *); __stdcall static void ntoskrnl_init_lock(kspin_lock *); __stdcall static size_t ntoskrnl_memcmp(const void *, const void *, size_t); __stdcall static void ntoskrnl_init_ansi_string(ndis_ansi_string *, char *); __stdcall static void ntoskrnl_init_unicode_string(ndis_unicode_string *, uint16_t *); __stdcall static void ntoskrnl_free_unicode_string(ndis_unicode_string *); __stdcall static void ntoskrnl_free_ansi_string(ndis_ansi_string *); __stdcall static ndis_status ntoskrnl_unicode_to_int(ndis_unicode_string *, uint32_t, uint32_t *); static int atoi (const char *); static long atol (const char *); static int rand(void); static void ntoskrnl_time(uint64_t *); __stdcall static uint8_t ntoskrnl_wdmver(uint8_t, uint8_t); static void ntoskrnl_thrfunc(void *); __stdcall static ndis_status ntoskrnl_create_thread(ndis_handle *, uint32_t, void *, ndis_handle, void *, void *, void *); __stdcall static ndis_status ntoskrnl_thread_exit(ndis_status); __stdcall static ndis_status ntoskrnl_devprop(device_object *, uint32_t, uint32_t, void *, uint32_t *); __stdcall static void ntoskrnl_init_mutex(kmutant *, uint32_t); __stdcall static uint32_t ntoskrnl_release_mutex(kmutant *, uint8_t); __stdcall static uint32_t ntoskrnl_read_mutex(kmutant *); __stdcall static ndis_status ntoskrnl_objref(ndis_handle, uint32_t, void *, uint8_t, void **, void **); __stdcall static void ntoskrnl_objderef(/*void * */ void); __stdcall static uint32_t ntoskrnl_zwclose(ndis_handle); static uint32_t ntoskrnl_dbgprint(char *, ...); __stdcall static void ntoskrnl_debugger(void); __stdcall static void dummy(void); static struct mtx ntoskrnl_dispatchlock; static kspin_lock ntoskrnl_global; static int ntoskrnl_kth = 0; static struct nt_objref_head ntoskrnl_reflist; int ntoskrnl_libinit() { mtx_init(&ntoskrnl_dispatchlock, "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF); ntoskrnl_init_lock(&ntoskrnl_global); TAILQ_INIT(&ntoskrnl_reflist); return(0); } int ntoskrnl_libfini() { mtx_destroy(&ntoskrnl_dispatchlock); return(0); } __stdcall static uint8_t ntoskrnl_unicode_equal(str1, str2, caseinsensitive) ndis_unicode_string *str1; ndis_unicode_string *str2; uint8_t caseinsensitive; { int i; if (str1->nus_len != str2->nus_len) return(FALSE); for (i = 0; i < str1->nus_len; i++) { if (caseinsensitive == TRUE) { if (toupper((char)(str1->nus_buf[i] & 0xFF)) != toupper((char)(str2->nus_buf[i] & 0xFF))) return(FALSE); } else { if (str1->nus_buf[i] != str2->nus_buf[i]) return(FALSE); } } return(TRUE); } __stdcall static void ntoskrnl_unicode_copy(dest, src) ndis_unicode_string *dest; ndis_unicode_string *src; { if (dest->nus_maxlen >= src->nus_len) dest->nus_len = src->nus_len; else dest->nus_len = dest->nus_maxlen; memcpy(dest->nus_buf, src->nus_buf, dest->nus_len); return; } __stdcall static ndis_status ntoskrnl_unicode_to_ansi(dest, src, allocate) ndis_ansi_string *dest; ndis_unicode_string *src; uint8_t allocate; { char *astr = NULL; if (dest == NULL || src == NULL) return(NDIS_STATUS_FAILURE); if (allocate == TRUE) { if (ndis_unicode_to_ascii(src->nus_buf, src->nus_len, &astr)) return(NDIS_STATUS_FAILURE); dest->nas_buf = astr; dest->nas_len = dest->nas_maxlen = strlen(astr); } else { dest->nas_len = src->nus_len / 2; /* XXX */ if (dest->nas_maxlen < dest->nas_len) dest->nas_len = dest->nas_maxlen; ndis_unicode_to_ascii(src->nus_buf, dest->nas_len * 2, &dest->nas_buf); } return (NDIS_STATUS_SUCCESS); } __stdcall static ndis_status ntoskrnl_ansi_to_unicode(dest, src, allocate) ndis_unicode_string *dest; ndis_ansi_string *src; uint8_t allocate; { uint16_t *ustr = NULL; if (dest == NULL || src == NULL) return(NDIS_STATUS_FAILURE); if (allocate == TRUE) { if (ndis_ascii_to_unicode(src->nas_buf, &ustr)) return(NDIS_STATUS_FAILURE); dest->nus_buf = ustr; dest->nus_len = dest->nus_maxlen = strlen(src->nas_buf) * 2; } else { dest->nus_len = src->nas_len * 2; /* XXX */ if (dest->nus_maxlen < dest->nus_len) dest->nus_len = dest->nus_maxlen; ndis_ascii_to_unicode(src->nas_buf, &dest->nus_buf); } return (NDIS_STATUS_SUCCESS); } __stdcall static void * ntoskrnl_iobuildsynchfsdreq(func, dobj, buf, len, off, event, status) uint32_t func; void *dobj; void *buf; uint32_t len; uint32_t *off; void *event; void *status; { return(NULL); } __stdcall static uint32_t ntoskrnl_iofcalldriver(/*dobj, irp*/) { void *dobj; void *irp; __asm__ __volatile__ ("" : "=c" (dobj), "=d" (irp)); return(0); } __stdcall static void ntoskrnl_iofcompletereq(/*irp, prioboost*/) { void *irp; uint8_t prioboost; __asm__ __volatile__ ("" : "=c" (irp), "=d" (prioboost)); return; } static void ntoskrnl_wakeup(arg) void *arg; { nt_dispatch_header *obj; wait_block *w; list_entry *e; struct thread *td; obj = arg; mtx_lock(&ntoskrnl_dispatchlock); obj->dh_sigstate = TRUE; e = obj->dh_waitlisthead.nle_flink; while (e != &obj->dh_waitlisthead) { w = (wait_block *)e; td = w->wb_kthread; ndis_thresume(td->td_proc); /* * For synchronization objects, only wake up * the first waiter. */ if (obj->dh_type == EVENT_TYPE_SYNC) break; e = e->nle_flink; } mtx_unlock(&ntoskrnl_dispatchlock); return; } static void ntoskrnl_time(tval) uint64_t *tval; { struct timespec ts; nanotime(&ts); *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 + 11644473600; return; } /* * KeWaitForSingleObject() is a tricky beast, because it can be used * with several different object types: semaphores, timers, events, * mutexes and threads. Semaphores don't appear very often, but the * other object types are quite common. KeWaitForSingleObject() is * what's normally used to acquire a mutex, and it can be used to * wait for a thread termination. * * The Windows NDIS API is implemented in terms of Windows kernel * primitives, and some of the object manipulation is duplicated in * NDIS. For example, NDIS has timers and events, which are actually * Windows kevents and ktimers. Now, you're supposed to only use the * NDIS variants of these objects within the confines of the NDIS API, * but there are some naughty developers out there who will use * KeWaitForSingleObject() on NDIS timer and event objects, so we * have to support that as well. Conseqently, our NDIS timer and event * code has to be closely tied into our ntoskrnl timer and event code, * just as it is in Windows. * * KeWaitForSingleObject() may do different things for different kinds * of objects: * * - For events, we check if the event has been signalled. If the * event is already in the signalled state, we just return immediately, * otherwise we wait for it to be set to the signalled state by someone * else calling KeSetEvent(). Events can be either synchronization or * notification events. * * - For timers, if the timer has already fired and the timer is in * the signalled state, we just return, otherwise we wait on the * timer. Unlike an event, timers get signalled automatically when * they expire rather than someone having to trip them manually. * Timers initialized with KeInitializeTimer() are always notification * events: KeInitializeTimerEx() lets you initialize a timer as * either a notification or synchronization event. * * - For mutexes, we try to acquire the mutex and if we can't, we wait * on the mutex until it's available and then grab it. When a mutex is * released, it enters the signaled state, which wakes up one of the * threads waiting to acquire it. Mutexes are always synchronization * events. * * - For threads, the only thing we do is wait until the thread object * enters a signalled state, which occurs when the thread terminates. * Threads are always notification events. * * A notification event wakes up all threads waiting on an object. A * synchronization event wakes up just one. Also, a synchronization event * is auto-clearing, which means we automatically set the event back to * the non-signalled state once the wakeup is done. */ __stdcall uint32_t ntoskrnl_waitforobj(obj, reason, mode, alertable, duetime) nt_dispatch_header *obj; uint32_t reason; uint32_t mode; uint8_t alertable; int64_t *duetime; { struct thread *td = curthread; kmutant *km; wait_block w; struct timeval tv; int error = 0; uint64_t curtime; if (obj == NULL) return(STATUS_INVALID_PARAMETER); mtx_lock(&ntoskrnl_dispatchlock); /* * See if the object is a mutex. If so, and we already own * it, then just increment the acquisition count and return. * * For any other kind of object, see if it's already in the * signalled state, and if it is, just return. If the object * is marked as a synchronization event, reset the state to * unsignalled. */ if (obj->dh_size == OTYPE_MUTEX) { km = (kmutant *)obj; if (km->km_ownerthread == NULL || km->km_ownerthread == curthread->td_proc) { obj->dh_sigstate = FALSE; km->km_acquirecnt++; km->km_ownerthread = curthread->td_proc; mtx_unlock(&ntoskrnl_dispatchlock); return (STATUS_SUCCESS); } } else if (obj->dh_sigstate == TRUE) { if (obj->dh_type == EVENT_TYPE_SYNC) obj->dh_sigstate = FALSE; mtx_unlock(&ntoskrnl_dispatchlock); return (STATUS_SUCCESS); } w.wb_object = obj; w.wb_kthread = td; INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist)); /* * The timeout value is specified in 100 nanosecond units * and can be a positive or negative number. If it's positive, * then the duetime is absolute, and we need to convert it * to an absolute offset relative to now in order to use it. * If it's negative, then the duetime is relative and we * just have to convert the units. */ if (duetime != NULL) { if (*duetime < 0) { tv.tv_sec = - (*duetime) / 10000000; tv.tv_usec = (- (*duetime) / 10) - (tv.tv_sec * 1000000); } else { ntoskrnl_time(&curtime); if (*duetime < curtime) tv.tv_sec = tv.tv_usec = 0; else { tv.tv_sec = ((*duetime) - curtime) / 10000000; tv.tv_usec = ((*duetime) - curtime) / 10 - (tv.tv_sec * 1000000); } } } mtx_unlock(&ntoskrnl_dispatchlock); error = ndis_thsuspend(td->td_proc, duetime == NULL ? 0 : tvtohz(&tv)); mtx_lock(&ntoskrnl_dispatchlock); /* We timed out. Leave the object alone and return status. */ if (error == EWOULDBLOCK) { REMOVE_LIST_ENTRY((&w.wb_waitlist)); mtx_unlock(&ntoskrnl_dispatchlock); return(STATUS_TIMEOUT); } /* * Mutexes are always synchronization objects, which means * if several threads are waiting to acquire it, only one will * be woken up. If that one is us, and the mutex is up for grabs, * grab it. */ if (obj->dh_size == OTYPE_MUTEX) { km = (kmutant *)obj; if (km->km_ownerthread == NULL) { km->km_ownerthread = curthread->td_proc; km->km_acquirecnt++; } } if (obj->dh_type == EVENT_TYPE_SYNC) obj->dh_sigstate = FALSE; REMOVE_LIST_ENTRY((&w.wb_waitlist)); mtx_unlock(&ntoskrnl_dispatchlock); return(STATUS_SUCCESS); } __stdcall static uint32_t ntoskrnl_waitforobjs(cnt, obj, wtype, reason, mode, alertable, duetime, wb_array) uint32_t cnt; nt_dispatch_header *obj[]; uint32_t wtype; uint32_t reason; uint32_t mode; uint8_t alertable; int64_t *duetime; wait_block *wb_array; { struct thread *td = curthread; kmutant *km; wait_block _wb_array[THREAD_WAIT_OBJECTS]; wait_block *w; struct timeval tv; int i, wcnt = 0, widx = 0, error = 0; uint64_t curtime; struct timespec t1, t2; if (cnt > MAX_WAIT_OBJECTS) return(STATUS_INVALID_PARAMETER); if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL) return(STATUS_INVALID_PARAMETER); mtx_lock(&ntoskrnl_dispatchlock); if (wb_array == NULL) w = &_wb_array[0]; else w = wb_array; /* First pass: see if we can satisfy any waits immediately. */ for (i = 0; i < cnt; i++) { if (obj[i]->dh_size == OTYPE_MUTEX) { km = (kmutant *)obj[i]; if (km->km_ownerthread == NULL || km->km_ownerthread == curthread->td_proc) { obj[i]->dh_sigstate = FALSE; km->km_acquirecnt++; km->km_ownerthread = curthread->td_proc; if (wtype == WAITTYPE_ANY) { mtx_unlock(&ntoskrnl_dispatchlock); return (STATUS_WAIT_0 + i); } } } else if (obj[i]->dh_sigstate == TRUE) { if (obj[i]->dh_type == EVENT_TYPE_SYNC) obj[i]->dh_sigstate = FALSE; if (wtype == WAITTYPE_ANY) { mtx_unlock(&ntoskrnl_dispatchlock); return (STATUS_WAIT_0 + i); } } } /* * Second pass: set up wait for anything we can't * satisfy immediately. */ for (i = 0; i < cnt; i++) { if (obj[i]->dh_sigstate == TRUE) continue; INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead), (&w[i].wb_waitlist)); w[i].wb_kthread = td; w[i].wb_object = obj[i]; wcnt++; } if (duetime != NULL) { if (*duetime < 0) { tv.tv_sec = - (*duetime) / 10000000; tv.tv_usec = (- (*duetime) / 10) - (tv.tv_sec * 1000000); } else { ntoskrnl_time(&curtime); if (*duetime < curtime) tv.tv_sec = tv.tv_usec = 0; else { tv.tv_sec = ((*duetime) - curtime) / 10000000; tv.tv_usec = ((*duetime) - curtime) / 10 - (tv.tv_sec * 1000000); } } } while (wcnt) { nanotime(&t1); mtx_unlock(&ntoskrnl_dispatchlock); error = ndis_thsuspend(td->td_proc, duetime == NULL ? 0 : tvtohz(&tv)); mtx_lock(&ntoskrnl_dispatchlock); nanotime(&t2); for (i = 0; i < cnt; i++) { if (obj[i]->dh_size == OTYPE_MUTEX) { km = (kmutant *)obj; if (km->km_ownerthread == NULL) { km->km_ownerthread = curthread->td_proc; km->km_acquirecnt++; } } if (obj[i]->dh_sigstate == TRUE) { widx = i; if (obj[i]->dh_type == EVENT_TYPE_SYNC) obj[i]->dh_sigstate = FALSE; REMOVE_LIST_ENTRY((&w[i].wb_waitlist)); wcnt--; } } if (error || wtype == WAITTYPE_ANY) break; if (duetime != NULL) { tv.tv_sec -= (t2.tv_sec - t1.tv_sec); tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000; } } if (wcnt) { for (i = 0; i < cnt; i++) REMOVE_LIST_ENTRY((&w[i].wb_waitlist)); } if (error == EWOULDBLOCK) { mtx_unlock(&ntoskrnl_dispatchlock); return(STATUS_TIMEOUT); } if (wtype == WAITTYPE_ANY && wcnt) { mtx_unlock(&ntoskrnl_dispatchlock); return(STATUS_WAIT_0 + widx); } mtx_unlock(&ntoskrnl_dispatchlock); return(STATUS_SUCCESS); } __stdcall static void ntoskrnl_writereg_ushort(reg, val) uint16_t *reg; uint16_t val; { bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); return; } __stdcall static uint16_t ntoskrnl_readreg_ushort(reg) uint16_t *reg; { return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); } __stdcall static void ntoskrnl_writereg_ulong(reg, val) uint32_t *reg; uint32_t val; { bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); return; } __stdcall static uint32_t ntoskrnl_readreg_ulong(reg) uint32_t *reg; { return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); } __stdcall static uint8_t ntoskrnl_readreg_uchar(reg) uint8_t *reg; { return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); } __stdcall static void ntoskrnl_writereg_uchar(reg, val) uint8_t *reg; uint8_t val; { bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); return; } __stdcall static int64_t _allmul(a, b) int64_t a; int64_t b; { return (a * b); } __stdcall static int64_t _alldiv(a, b) int64_t a; int64_t b; { return (a / b); } __stdcall static int64_t _allrem(a, b) int64_t a; int64_t b; { return (a % b); } __stdcall static uint64_t _aullmul(a, b) uint64_t a; uint64_t b; { return (a * b); } __stdcall static uint64_t _aulldiv(a, b) uint64_t a; uint64_t b; { return (a / b); } __stdcall static uint64_t _aullrem(a, b) uint64_t a; uint64_t b; { return (a % b); } __regparm static int64_t _allshl(a, b) int64_t a; uint8_t b; { return (a << b); } __regparm static uint64_t _aullshl(a, b) uint64_t a; uint8_t b; { return (a << b); } __regparm static int64_t _allshr(a, b) int64_t a; uint8_t b; { return (a >> b); } __regparm static uint64_t _aullshr(a, b) uint64_t a; uint8_t b; { return (a >> b); } static slist_entry * ntoskrnl_pushsl(head, entry) slist_header *head; slist_entry *entry; { slist_entry *oldhead; oldhead = head->slh_list.slh_next; entry->sl_next = head->slh_list.slh_next; head->slh_list.slh_next = entry; head->slh_list.slh_depth++; head->slh_list.slh_seq++; return(oldhead); } static slist_entry * ntoskrnl_popsl(head) slist_header *head; { slist_entry *first; first = head->slh_list.slh_next; if (first != NULL) { head->slh_list.slh_next = first->sl_next; head->slh_list.slh_depth--; head->slh_list.slh_seq++; } return(first); } __stdcall static void * ntoskrnl_allocfunc(pooltype, size, tag) uint32_t pooltype; size_t size; uint32_t tag; { return(malloc(size, M_DEVBUF, M_NOWAIT)); } __stdcall static void ntoskrnl_freefunc(buf) void *buf; { free(buf, M_DEVBUF); return; } __stdcall static void ntoskrnl_init_lookaside(lookaside, allocfunc, freefunc, flags, size, tag, depth) paged_lookaside_list *lookaside; lookaside_alloc_func *allocfunc; lookaside_free_func *freefunc; uint32_t flags; size_t size; uint32_t tag; uint16_t depth; { bzero((char *)lookaside, sizeof(paged_lookaside_list)); if (size < sizeof(slist_entry)) lookaside->nll_l.gl_size = sizeof(slist_entry); else lookaside->nll_l.gl_size = size; lookaside->nll_l.gl_tag = tag; if (allocfunc == NULL) lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc; else lookaside->nll_l.gl_allocfunc = allocfunc; if (freefunc == NULL) lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc; else lookaside->nll_l.gl_freefunc = freefunc; ntoskrnl_init_lock(&lookaside->nll_obsoletelock); lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH; lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH; return; } __stdcall static void ntoskrnl_delete_lookaside(lookaside) paged_lookaside_list *lookaside; { void *buf; __stdcall void (*freefunc)(void *); freefunc = lookaside->nll_l.gl_freefunc; while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL) freefunc(buf); return; } __stdcall static void ntoskrnl_init_nplookaside(lookaside, allocfunc, freefunc, flags, size, tag, depth) npaged_lookaside_list *lookaside; lookaside_alloc_func *allocfunc; lookaside_free_func *freefunc; uint32_t flags; size_t size; uint32_t tag; uint16_t depth; { bzero((char *)lookaside, sizeof(npaged_lookaside_list)); if (size < sizeof(slist_entry)) lookaside->nll_l.gl_size = sizeof(slist_entry); else lookaside->nll_l.gl_size = size; lookaside->nll_l.gl_tag = tag; if (allocfunc == NULL) lookaside->nll_l.gl_allocfunc = ntoskrnl_allocfunc; else lookaside->nll_l.gl_allocfunc = allocfunc; if (freefunc == NULL) lookaside->nll_l.gl_freefunc = ntoskrnl_freefunc; else lookaside->nll_l.gl_freefunc = freefunc; ntoskrnl_init_lock(&lookaside->nll_obsoletelock); lookaside->nll_l.gl_depth = LOOKASIDE_DEPTH; lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH; return; } __stdcall static void ntoskrnl_delete_nplookaside(lookaside) npaged_lookaside_list *lookaside; { void *buf; __stdcall void (*freefunc)(void *); freefunc = lookaside->nll_l.gl_freefunc; while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL) freefunc(buf); return; } /* * Note: the interlocked slist push and pop routines are * declared to be _fastcall in Windows. gcc 3.4 is supposed * to have support for this calling convention, however we * don't have that version available yet, so we kludge things * up using some inline assembly. */ __stdcall static slist_entry * ntoskrnl_push_slist(/*head, entry*/ void) { slist_header *head; slist_entry *entry; slist_entry *oldhead; __asm__ __volatile__ ("" : "=c" (head), "=d" (entry)); oldhead = (slist_entry *)FASTCALL3(ntoskrnl_push_slist_ex, head, entry, &ntoskrnl_global); return(oldhead); } __stdcall static slist_entry * ntoskrnl_pop_slist(/*head*/ void) { slist_header *head; slist_entry *first; __asm__ __volatile__ ("" : "=c" (head)); first = (slist_entry *)FASTCALL2(ntoskrnl_pop_slist_ex, head, &ntoskrnl_global); return(first); } __stdcall static slist_entry * ntoskrnl_push_slist_ex(/*head, entry,*/ lock) kspin_lock *lock; { slist_header *head; slist_entry *entry; slist_entry *oldhead; uint8_t irql; __asm__ __volatile__ ("" : "=c" (head), "=d" (entry)); irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL); oldhead = ntoskrnl_pushsl(head, entry); FASTCALL2(hal_unlock, lock, irql); return(oldhead); } __stdcall static slist_entry * ntoskrnl_pop_slist_ex(/*head, lock*/ void) { slist_header *head; kspin_lock *lock; slist_entry *first; uint8_t irql; __asm__ __volatile__ ("" : "=c" (head), "=d" (lock)); irql = FASTCALL2(hal_lock, lock, DISPATCH_LEVEL); first = ntoskrnl_popsl(head); FASTCALL2(hal_unlock, lock, irql); return(first); } __stdcall void ntoskrnl_lock_dpc(/*lock*/ void) { kspin_lock *lock; __asm__ __volatile__ ("" : "=c" (lock)); while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) /* sit and spin */; return; } __stdcall void ntoskrnl_unlock_dpc(/*lock*/ void) { kspin_lock *lock; __asm__ __volatile__ ("" : "=c" (lock)); atomic_store_rel_int((volatile u_int *)lock, 0); return; } __stdcall static uint32_t ntoskrnl_interlock_inc(/*addend*/ void) { volatile uint32_t *addend; __asm__ __volatile__ ("" : "=c" (addend)); atomic_add_long((volatile u_long *)addend, 1); return(*addend); } __stdcall static uint32_t ntoskrnl_interlock_dec(/*addend*/ void) { volatile uint32_t *addend; __asm__ __volatile__ ("" : "=c" (addend)); atomic_subtract_long((volatile u_long *)addend, 1); return(*addend); } __stdcall static void ntoskrnl_interlock_addstat(/*addend, inc*/) { uint64_t *addend; uint32_t inc; uint8_t irql; __asm__ __volatile__ ("" : "=c" (addend), "=d" (inc)); irql = FASTCALL2(hal_lock, &ntoskrnl_global, DISPATCH_LEVEL); *addend += inc; FASTCALL2(hal_unlock, &ntoskrnl_global, irql); return; }; __stdcall static void ntoskrnl_freemdl(mdl) ndis_buffer *mdl; { ndis_buffer *head; if (mdl == NULL || mdl->nb_process == NULL) return; head = mdl->nb_process; if (head->nb_flags != 0x1) return; mdl->nb_next = head->nb_next; head->nb_next = mdl; /* Decrement count of busy buffers. */ head->nb_bytecount--; /* * If the pool has been marked for deletion and there are * no more buffers outstanding, nuke the pool. */ if (head->nb_byteoffset && head->nb_bytecount == 0) free(head, M_DEVBUF); return; } __stdcall static uint32_t ntoskrnl_sizeofmdl(vaddr, len) void *vaddr; size_t len; { uint32_t l; l = sizeof(struct ndis_buffer) + (sizeof(uint32_t) * SPAN_PAGES(vaddr, len)); return(l); } __stdcall static void ntoskrnl_build_npaged_mdl(mdl) ndis_buffer *mdl; { mdl->nb_mappedsystemva = (char *)mdl->nb_startva + mdl->nb_byteoffset; return; } __stdcall static void * ntoskrnl_mmaplockedpages(buf, accessmode) ndis_buffer *buf; uint8_t accessmode; { return(MDL_VA(buf)); } __stdcall static void * ntoskrnl_mmaplockedpages_cache(buf, accessmode, cachetype, vaddr, bugcheck, prio) ndis_buffer *buf; uint8_t accessmode; uint32_t cachetype; void *vaddr; uint32_t bugcheck; uint32_t prio; { return(MDL_VA(buf)); } __stdcall static void ntoskrnl_munmaplockedpages(vaddr, buf) void *vaddr; ndis_buffer *buf; { return; } /* * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel() * and KefReleaseSpinLockFromDpcLevel() appear to be analagous * to splnet()/splx() in their use. We can't create a new mutex * lock here because there is no complimentary KeFreeSpinLock() * function. Instead, we grab a mutex from the mutex pool. */ __stdcall static void ntoskrnl_init_lock(lock) kspin_lock *lock; { *lock = 0; return; } __stdcall static size_t ntoskrnl_memcmp(s1, s2, len) const void *s1; const void *s2; size_t len; { size_t i, total = 0; uint8_t *m1, *m2; m1 = __DECONST(char *, s1); m2 = __DECONST(char *, s2); for (i = 0; i < len; i++) { if (m1[i] == m2[i]) total++; } return(total); } __stdcall static void ntoskrnl_init_ansi_string(dst, src) ndis_ansi_string *dst; char *src; { ndis_ansi_string *a; a = dst; if (a == NULL) return; if (src == NULL) { a->nas_len = a->nas_maxlen = 0; a->nas_buf = NULL; } else { a->nas_buf = src; a->nas_len = a->nas_maxlen = strlen(src); } return; } __stdcall static void ntoskrnl_init_unicode_string(dst, src) ndis_unicode_string *dst; uint16_t *src; { ndis_unicode_string *u; int i; u = dst; if (u == NULL) return; if (src == NULL) { u->nus_len = u->nus_maxlen = 0; u->nus_buf = NULL; } else { i = 0; while(src[i] != 0) i++; u->nus_buf = src; u->nus_len = u->nus_maxlen = i * 2; } return; } __stdcall ndis_status ntoskrnl_unicode_to_int(ustr, base, val) ndis_unicode_string *ustr; uint32_t base; uint32_t *val; { uint16_t *uchr; int len, neg = 0; char abuf[64]; char *astr; uchr = ustr->nus_buf; len = ustr->nus_len; bzero(abuf, sizeof(abuf)); if ((char)((*uchr) & 0xFF) == '-') { neg = 1; uchr++; len -= 2; } else if ((char)((*uchr) & 0xFF) == '+') { neg = 0; uchr++; len -= 2; } if (base == 0) { if ((char)((*uchr) & 0xFF) == 'b') { base = 2; uchr++; len -= 2; } else if ((char)((*uchr) & 0xFF) == 'o') { base = 8; uchr++; len -= 2; } else if ((char)((*uchr) & 0xFF) == 'x') { base = 16; uchr++; len -= 2; } else base = 10; } astr = abuf; if (neg) { strcpy(astr, "-"); astr++; } ndis_unicode_to_ascii(uchr, len, &astr); *val = strtoul(abuf, NULL, base); return(NDIS_STATUS_SUCCESS); } __stdcall static void ntoskrnl_free_unicode_string(ustr) ndis_unicode_string *ustr; { if (ustr->nus_buf == NULL) return; free(ustr->nus_buf, M_DEVBUF); ustr->nus_buf = NULL; return; } __stdcall static void ntoskrnl_free_ansi_string(astr) ndis_ansi_string *astr; { if (astr->nas_buf == NULL) return; free(astr->nas_buf, M_DEVBUF); astr->nas_buf = NULL; return; } static int atoi(str) const char *str; { return (int)strtol(str, (char **)NULL, 10); } static long atol(str) const char *str; { return strtol(str, (char **)NULL, 10); } static int rand(void) { struct timeval tv; microtime(&tv); srandom(tv.tv_usec); return((int)random()); } __stdcall static uint8_t ntoskrnl_wdmver(major, minor) uint8_t major; uint8_t minor; { if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP) return(TRUE); return(FALSE); } __stdcall static ndis_status ntoskrnl_devprop(devobj, regprop, buflen, prop, reslen) device_object *devobj; uint32_t regprop; uint32_t buflen; void *prop; uint32_t *reslen; { ndis_miniport_block *block; block = devobj->do_rsvd; switch (regprop) { case DEVPROP_DRIVER_KEYNAME: ndis_ascii_to_unicode(__DECONST(char *, device_get_nameunit(block->nmb_dev)), (uint16_t **)&prop); *reslen = strlen(device_get_nameunit(block->nmb_dev)) * 2; break; default: return(STATUS_INVALID_PARAMETER_2); break; } return(STATUS_SUCCESS); } __stdcall static void ntoskrnl_init_mutex(kmutex, level) kmutant *kmutex; uint32_t level; { INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead)); kmutex->km_abandoned = FALSE; kmutex->km_apcdisable = 1; kmutex->km_header.dh_sigstate = TRUE; kmutex->km_header.dh_type = EVENT_TYPE_SYNC; kmutex->km_header.dh_size = OTYPE_MUTEX; kmutex->km_acquirecnt = 0; kmutex->km_ownerthread = NULL; return; } __stdcall static uint32_t ntoskrnl_release_mutex(kmutex, kwait) kmutant *kmutex; uint8_t kwait; { mtx_lock(&ntoskrnl_dispatchlock); if (kmutex->km_ownerthread != curthread->td_proc) { mtx_unlock(&ntoskrnl_dispatchlock); return(STATUS_MUTANT_NOT_OWNED); } kmutex->km_acquirecnt--; if (kmutex->km_acquirecnt == 0) { kmutex->km_ownerthread = NULL; mtx_unlock(&ntoskrnl_dispatchlock); ntoskrnl_wakeup(&kmutex->km_header); } else mtx_unlock(&ntoskrnl_dispatchlock); return(kmutex->km_acquirecnt); } __stdcall static uint32_t ntoskrnl_read_mutex(kmutex) kmutant *kmutex; { return(kmutex->km_header.dh_sigstate); } __stdcall void ntoskrnl_init_event(kevent, type, state) nt_kevent *kevent; uint32_t type; uint8_t state; { INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead)); kevent->k_header.dh_sigstate = state; kevent->k_header.dh_type = type; kevent->k_header.dh_size = OTYPE_EVENT; return; } __stdcall uint32_t ntoskrnl_reset_event(kevent) nt_kevent *kevent; { uint32_t prevstate; mtx_lock(&ntoskrnl_dispatchlock); prevstate = kevent->k_header.dh_sigstate; kevent->k_header.dh_sigstate = FALSE; mtx_unlock(&ntoskrnl_dispatchlock); return(prevstate); } __stdcall uint32_t ntoskrnl_set_event(kevent, increment, kwait) nt_kevent *kevent; uint32_t increment; uint8_t kwait; { uint32_t prevstate; prevstate = kevent->k_header.dh_sigstate; ntoskrnl_wakeup(&kevent->k_header); return(prevstate); } __stdcall void ntoskrnl_clear_event(kevent) nt_kevent *kevent; { kevent->k_header.dh_sigstate = FALSE; return; } __stdcall uint32_t ntoskrnl_read_event(kevent) nt_kevent *kevent; { return(kevent->k_header.dh_sigstate); } __stdcall static ndis_status ntoskrnl_objref(handle, reqaccess, otype, accessmode, object, handleinfo) ndis_handle handle; uint32_t reqaccess; void *otype; uint8_t accessmode; void **object; void **handleinfo; { nt_objref *nr; nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO); if (nr == NULL) return(NDIS_STATUS_FAILURE); INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead)); nr->no_obj = handle; nr->no_dh.dh_size = OTYPE_THREAD; TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link); *object = nr; return(NDIS_STATUS_SUCCESS); } __stdcall static void ntoskrnl_objderef(/*object*/void) { void *object; nt_objref *nr; __asm__ __volatile__ ("" : "=c" (object)); nr = object; TAILQ_REMOVE(&ntoskrnl_reflist, nr, link); free(nr, M_DEVBUF); return; } __stdcall static uint32_t ntoskrnl_zwclose(handle) ndis_handle handle; { return(STATUS_SUCCESS); } /* * This is here just in case the thread returns without calling * PsTerminateSystemThread(). */ static void ntoskrnl_thrfunc(arg) void *arg; { thread_context *thrctx; __stdcall uint32_t (*tfunc)(void *); void *tctx; uint32_t rval; thrctx = arg; tfunc = thrctx->tc_thrfunc; tctx = thrctx->tc_thrctx; free(thrctx, M_TEMP); rval = tfunc(tctx); ntoskrnl_thread_exit(rval); return; /* notreached */ } __stdcall static ndis_status ntoskrnl_create_thread(handle, reqaccess, objattrs, phandle, clientid, thrfunc, thrctx) ndis_handle *handle; uint32_t reqaccess; void *objattrs; ndis_handle phandle; void *clientid; void *thrfunc; void *thrctx; { int error; char tname[128]; thread_context *tc; struct proc *p; tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT); if (tc == NULL) return(NDIS_STATUS_FAILURE); tc->tc_thrctx = thrctx; tc->tc_thrfunc = thrfunc; sprintf(tname, "windows kthread %d", ntoskrnl_kth); error = kthread_create(ntoskrnl_thrfunc, tc, &p, RFHIGHPID, NDIS_KSTACK_PAGES, tname); *handle = p; ntoskrnl_kth++; return(error); } /* * In Windows, the exit of a thread is an event that you're allowed * to wait on, assuming you've obtained a reference to the thread using * ObReferenceObjectByHandle(). Unfortunately, the only way we can * simulate this behavior is to register each thread we create in a * reference list, and if someone holds a reference to us, we poke * them. */ __stdcall static ndis_status ntoskrnl_thread_exit(status) ndis_status status; { struct nt_objref *nr; TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) { if (nr->no_obj != curthread->td_proc) continue; ntoskrnl_wakeup(&nr->no_dh); break; } ntoskrnl_kth--; +#if __FreeBSD_version < 502113 + mtx_lock(&Giant); +#endif kthread_exit(0); return(0); /* notreached */ } static uint32_t ntoskrnl_dbgprint(char *fmt, ...) { va_list ap; if (bootverbose) { va_start(ap, fmt); vprintf(fmt, ap); } return(STATUS_SUCCESS); } __stdcall static void ntoskrnl_debugger(void) { Debugger("ntoskrnl_debugger(): breakpoint"); return; } static void ntoskrnl_timercall(arg) void *arg; { ktimer *timer; struct timeval tv; mtx_unlock(&Giant); timer = arg; timer->k_header.dh_inserted = FALSE; /* * If this is a periodic timer, re-arm it * so it will fire again. We do this before * calling any deferred procedure calls because * it's possible the DPC might cancel the timer, * in which case it would be wrong for us to * re-arm it again afterwards. */ if (timer->k_period) { tv.tv_sec = 0; tv.tv_usec = timer->k_period * 1000; timer->k_header.dh_inserted = TRUE; timer->k_handle = timeout(ntoskrnl_timercall, timer, tvtohz(&tv)); } if (timer->k_dpc != NULL) ntoskrnl_queue_dpc(timer->k_dpc, NULL, NULL); ntoskrnl_wakeup(&timer->k_header); mtx_lock(&Giant); return; } __stdcall void ntoskrnl_init_timer(timer) ktimer *timer; { if (timer == NULL) return; ntoskrnl_init_timer_ex(timer, EVENT_TYPE_NOTIFY); return; } __stdcall void ntoskrnl_init_timer_ex(timer, type) ktimer *timer; uint32_t type; { if (timer == NULL) return; INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead)); timer->k_header.dh_sigstate = FALSE; timer->k_header.dh_inserted = FALSE; timer->k_header.dh_type = type; timer->k_header.dh_size = OTYPE_TIMER; callout_handle_init(&timer->k_handle); return; } /* * This is a wrapper for Windows deferred procedure calls that * have been placed on an NDIS thread work queue. We need it * since the DPC could be a _stdcall function. Also, as far as * I can tell, defered procedure calls must run at DISPATCH_LEVEL. */ static void ntoskrnl_run_dpc(arg) void *arg; { __stdcall kdpc_func dpcfunc; kdpc *dpc; uint8_t irql; dpc = arg; dpcfunc = (kdpc_func)dpc->k_deferedfunc; irql = FASTCALL1(hal_raise_irql, DISPATCH_LEVEL); dpcfunc(dpc, dpc->k_deferredctx, dpc->k_sysarg1, dpc->k_sysarg2); FASTCALL1(hal_lower_irql, irql); return; } __stdcall void ntoskrnl_init_dpc(dpc, dpcfunc, dpcctx) kdpc *dpc; void *dpcfunc; void *dpcctx; { if (dpc == NULL) return; dpc->k_deferedfunc = dpcfunc; dpc->k_deferredctx = dpcctx; return; } __stdcall uint8_t ntoskrnl_queue_dpc(dpc, sysarg1, sysarg2) kdpc *dpc; void *sysarg1; void *sysarg2; { dpc->k_sysarg1 = sysarg1; dpc->k_sysarg2 = sysarg2; if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI)) return(FALSE); return(TRUE); } __stdcall uint8_t ntoskrnl_dequeue_dpc(dpc) kdpc *dpc; { if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI)) return(FALSE); return(TRUE); } __stdcall uint8_t ntoskrnl_set_timer_ex(timer, duetime, period, dpc) ktimer *timer; int64_t duetime; uint32_t period; kdpc *dpc; { struct timeval tv; uint64_t curtime; uint8_t pending; if (timer == NULL) return(FALSE); if (timer->k_header.dh_inserted == TRUE) { untimeout(ntoskrnl_timercall, timer, timer->k_handle); timer->k_header.dh_inserted = FALSE; pending = TRUE; } else pending = FALSE; timer->k_duetime = duetime; timer->k_period = period; timer->k_header.dh_sigstate = FALSE; timer->k_dpc = dpc; if (duetime < 0) { tv.tv_sec = - (duetime) / 10000000; tv.tv_usec = (- (duetime) / 10) - (tv.tv_sec * 1000000); } else { ntoskrnl_time(&curtime); if (duetime < curtime) tv.tv_sec = tv.tv_usec = 0; else { tv.tv_sec = ((duetime) - curtime) / 10000000; tv.tv_usec = ((duetime) - curtime) / 10 - (tv.tv_sec * 1000000); } } timer->k_header.dh_inserted = TRUE; timer->k_handle = timeout(ntoskrnl_timercall, timer, tvtohz(&tv)); return(pending); } __stdcall uint8_t ntoskrnl_set_timer(timer, duetime, dpc) ktimer *timer; int64_t duetime; kdpc *dpc; { return (ntoskrnl_set_timer_ex(timer, duetime, 0, dpc)); } __stdcall uint8_t ntoskrnl_cancel_timer(timer) ktimer *timer; { uint8_t pending; if (timer == NULL) return(FALSE); if (timer->k_header.dh_inserted == TRUE) { untimeout(ntoskrnl_timercall, timer, timer->k_handle); if (timer->k_dpc != NULL) ntoskrnl_dequeue_dpc(timer->k_dpc); pending = TRUE; } else pending = FALSE; return(pending); } __stdcall uint8_t ntoskrnl_read_timer(timer) ktimer *timer; { return(timer->k_header.dh_sigstate); } __stdcall static void dummy() { printf ("ntoskrnl dummy called...\n"); return; } image_patch_table ntoskrnl_functbl[] = { { "RtlCompareMemory", (FUNC)ntoskrnl_memcmp }, { "RtlEqualUnicodeString", (FUNC)ntoskrnl_unicode_equal }, { "RtlCopyUnicodeString", (FUNC)ntoskrnl_unicode_copy }, { "RtlUnicodeStringToAnsiString", (FUNC)ntoskrnl_unicode_to_ansi }, { "RtlAnsiStringToUnicodeString", (FUNC)ntoskrnl_ansi_to_unicode }, { "RtlInitAnsiString", (FUNC)ntoskrnl_init_ansi_string }, { "RtlInitUnicodeString", (FUNC)ntoskrnl_init_unicode_string }, { "RtlFreeAnsiString", (FUNC)ntoskrnl_free_ansi_string }, { "RtlFreeUnicodeString", (FUNC)ntoskrnl_free_unicode_string }, { "RtlUnicodeStringToInteger", (FUNC)ntoskrnl_unicode_to_int }, { "sprintf", (FUNC)sprintf }, { "vsprintf", (FUNC)vsprintf }, { "_snprintf", (FUNC)snprintf }, { "_vsnprintf", (FUNC)vsnprintf }, { "DbgPrint", (FUNC)ntoskrnl_dbgprint }, { "DbgBreakPoint", (FUNC)ntoskrnl_debugger }, { "strncmp", (FUNC)strncmp }, { "strcmp", (FUNC)strcmp }, { "strncpy", (FUNC)strncpy }, { "strcpy", (FUNC)strcpy }, { "strlen", (FUNC)strlen }, { "memcpy", (FUNC)memcpy }, { "memmove", (FUNC)memcpy }, { "memset", (FUNC)memset }, { "IofCallDriver", (FUNC)ntoskrnl_iofcalldriver }, { "IofCompleteRequest", (FUNC)ntoskrnl_iofcompletereq }, { "IoBuildSynchronousFsdRequest", (FUNC)ntoskrnl_iobuildsynchfsdreq }, { "KeWaitForSingleObject", (FUNC)ntoskrnl_waitforobj }, { "KeWaitForMultipleObjects", (FUNC)ntoskrnl_waitforobjs }, { "_allmul", (FUNC)_allmul }, { "_alldiv", (FUNC)_alldiv }, { "_allrem", (FUNC)_allrem }, { "_allshr", (FUNC)_allshr }, { "_allshl", (FUNC)_allshl }, { "_aullmul", (FUNC)_aullmul }, { "_aulldiv", (FUNC)_aulldiv }, { "_aullrem", (FUNC)_aullrem }, { "_aullshr", (FUNC)_aullshr }, { "_aullshl", (FUNC)_aullshl }, { "atoi", (FUNC)atoi }, { "atol", (FUNC)atol }, { "rand", (FUNC)rand }, { "WRITE_REGISTER_USHORT", (FUNC)ntoskrnl_writereg_ushort }, { "READ_REGISTER_USHORT", (FUNC)ntoskrnl_readreg_ushort }, { "WRITE_REGISTER_ULONG", (FUNC)ntoskrnl_writereg_ulong }, { "READ_REGISTER_ULONG", (FUNC)ntoskrnl_readreg_ulong }, { "READ_REGISTER_UCHAR", (FUNC)ntoskrnl_readreg_uchar }, { "WRITE_REGISTER_UCHAR", (FUNC)ntoskrnl_writereg_uchar }, { "ExInitializePagedLookasideList", (FUNC)ntoskrnl_init_lookaside }, { "ExDeletePagedLookasideList", (FUNC)ntoskrnl_delete_lookaside }, { "ExInitializeNPagedLookasideList", (FUNC)ntoskrnl_init_nplookaside }, { "ExDeleteNPagedLookasideList", (FUNC)ntoskrnl_delete_nplookaside }, { "InterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist }, { "InterlockedPushEntrySList", (FUNC)ntoskrnl_push_slist }, { "ExInterlockedPopEntrySList", (FUNC)ntoskrnl_pop_slist_ex }, { "ExInterlockedPushEntrySList",(FUNC)ntoskrnl_push_slist_ex }, { "KefAcquireSpinLockAtDpcLevel", (FUNC)ntoskrnl_lock_dpc }, { "KefReleaseSpinLockFromDpcLevel", (FUNC)ntoskrnl_unlock_dpc }, { "InterlockedIncrement", (FUNC)ntoskrnl_interlock_inc }, { "InterlockedDecrement", (FUNC)ntoskrnl_interlock_dec }, { "ExInterlockedAddLargeStatistic", (FUNC)ntoskrnl_interlock_addstat }, { "IoFreeMdl", (FUNC)ntoskrnl_freemdl }, { "MmSizeOfMdl", (FUNC)ntoskrnl_sizeofmdl }, { "MmMapLockedPages", (FUNC)ntoskrnl_mmaplockedpages }, { "MmMapLockedPagesSpecifyCache", (FUNC)ntoskrnl_mmaplockedpages_cache }, { "MmUnmapLockedPages", (FUNC)ntoskrnl_munmaplockedpages }, { "MmBuildMdlForNonPagedPool", (FUNC)ntoskrnl_build_npaged_mdl }, { "KeInitializeSpinLock", (FUNC)ntoskrnl_init_lock }, { "IoIsWdmVersionAvailable", (FUNC)ntoskrnl_wdmver }, { "IoGetDeviceProperty", (FUNC)ntoskrnl_devprop }, { "KeInitializeMutex", (FUNC)ntoskrnl_init_mutex }, { "KeReleaseMutex", (FUNC)ntoskrnl_release_mutex }, { "KeReadStateMutex", (FUNC)ntoskrnl_read_mutex }, { "KeInitializeEvent", (FUNC)ntoskrnl_init_event }, { "KeSetEvent", (FUNC)ntoskrnl_set_event }, { "KeResetEvent", (FUNC)ntoskrnl_reset_event }, { "KeClearEvent", (FUNC)ntoskrnl_clear_event }, { "KeReadStateEvent", (FUNC)ntoskrnl_read_event }, { "KeInitializeTimer", (FUNC)ntoskrnl_init_timer }, { "KeInitializeTimerEx", (FUNC)ntoskrnl_init_timer_ex }, { "KeSetTimer", (FUNC)ntoskrnl_set_timer }, { "KeSetTimerEx", (FUNC)ntoskrnl_set_timer_ex }, { "KeCancelTimer", (FUNC)ntoskrnl_cancel_timer }, { "KeReadStateTimer", (FUNC)ntoskrnl_read_timer }, { "KeInitializeDpc", (FUNC)ntoskrnl_init_dpc }, { "KeInsertQueueDpc", (FUNC)ntoskrnl_queue_dpc }, { "KeRemoveQueueDpc", (FUNC)ntoskrnl_dequeue_dpc }, { "ObReferenceObjectByHandle", (FUNC)ntoskrnl_objref }, { "ObfDereferenceObject", (FUNC)ntoskrnl_objderef }, { "ZwClose", (FUNC)ntoskrnl_zwclose }, { "PsCreateSystemThread", (FUNC)ntoskrnl_create_thread }, { "PsTerminateSystemThread", (FUNC)ntoskrnl_thread_exit }, /* * This last entry is a catch-all for any function we haven't * implemented yet. The PE import list patching routine will * use it for any function that doesn't have an explicit match * in this table. */ { NULL, (FUNC)dummy }, /* End of list. */ { NULL, NULL }, };