Index: opencrypto/crypto.c =================================================================== --- opencrypto/crypto.c +++ opencrypto/crypto.c @@ -134,31 +134,70 @@ * have one per-queue but having one simplifies handling of block/unblock * operations. */ -static int crp_sleep = 0; -static TAILQ_HEAD(,cryptop) crp_q; /* request queues */ -static TAILQ_HEAD(,cryptkop) crp_kq; -static struct mtx crypto_q_mtx; -#define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) -#define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) - -/* - * There are two queues for processing completed crypto requests; one - * for the symmetric and one for the asymmetric ops. We only need one - * but have two to avoid type futzing (cryptop vs. cryptkop). A single - * mutex is used to lock access to both queues. Note that this lock - * must be separate from the lock on request queues to insure driver - * callbacks don't generate lock order reversals. - */ -static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */ -static TAILQ_HEAD(,cryptkop) crp_ret_kq; -static struct mtx crypto_ret_q_mtx; -#define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx) -#define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx) -#define CRYPTO_RETQ_EMPTY() (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq)) +struct crypto_worker { + TAILQ_HEAD(,cryptop) crp_q; /* request queues */ + TAILQ_HEAD(,cryptkop) crp_kq; + + struct mtx crypto_q_mtx; + + int crp_sleep; + struct proc *cryptoproc; +} __aligned(CACHE_LINE_SIZE); +static struct crypto_worker *crypto_workers = NULL; + +#define CRYPTO_W(i) (&crypto_workers[i]) +#define CRYPTO_W_ID(w) ((int)(((uintptr_t)w - (uintptr_t)crypto_workers) / sizeof(struct crypto_worker))) + +#define CRYPTO_W_FOREACH(w) \ + for (w = crypto_workers; w < crypto_workers + crypto_workers_num; ++w) +#define CRYPTO_W_LOCK(w) mtx_lock(&(w)->crypto_q_mtx) +#define CRYPTO_W_UNLOCK(w) mtx_unlock(&(w)->crypto_q_mtx) + +#define CRYPTO_W_LOCKALL() do { \ + struct crypto_worker *_w; \ + CRYPTO_W_FOREACH(_w) \ + CRYPTO_W_LOCK(_w); \ +} while (0) + +#define CRYPTO_W_UNLOCKALL() do { \ + struct crypto_worker *_w; \ + CRYPTO_W_FOREACH(_w) \ + CRYPTO_W_UNLOCK(_w); \ +} while (0) + +struct crypto_ret_worker { + /* + * There are two queues for processing completed crypto requests; one + * for the symmetric and one for the asymmetric ops. We only need one + * but have two to avoid type futzing (cryptop vs. cryptkop). A single + * mutex is used to lock access to both queues. Note that this lock + * must be separate from the lock on request queues to insure driver + * callbacks don't generate lock order reversals. + */ + TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */ + TAILQ_HEAD(,cryptkop) crp_ret_kq; + struct mtx crypto_ret_q_mtx; + + struct proc *cryptoretproc; +} __aligned(CACHE_LINE_SIZE); +static struct crypto_ret_worker *crypto_ret_workers = NULL; + +#define CRYPTO_RETW_FOREACH(w) \ + for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) + +#define CRYPTO_RETW(i) (&crypto_ret_workers[i]) +#define CRYPTO_RETW_ID(w) ((int)(((uintptr_t)(w) - (uintptr_t)crypto_ret_workers) / sizeof(struct crypto_ret_worker))) + +#define CRYPTO_RETQ_LOCK(w) mtx_lock(&w->crypto_ret_q_mtx) +#define CRYPTO_RETQ_UNLOCK(w) mtx_unlock(&w->crypto_ret_q_mtx) +#define CRYPTO_RETQ_EMPTY(w) (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq)) static uma_zone_t cryptop_zone; static uma_zone_t cryptodesc_zone; +static int crypto_workers_num = 1; +TUNABLE_INT("kern.crypto_workers_num", &crypto_workers_num); + int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, @@ -167,13 +206,23 @@ SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable/disable use of software crypto by /dev/crypto"); +int crypto_max_requests = 256; +SYSCTL_INT(_kern, OID_AUTO, crypto_max_requests, CTLFLAG_RW, + &crypto_max_requests, 0, + "Max number of pending crypto requests"); +int crypto_nb_requests = 0; +SYSCTL_INT(_kern, OID_AUTO, crypto_nb_requests, CTLFLAG_RW, + &crypto_nb_requests, 0, + "Current number of crypto requests"); +int crypto_dropped_requests = 0; +SYSCTL_INT(_kern, OID_AUTO, crypto_dropped_requests, CTLFLAG_RW, + &crypto_dropped_requests, 0, + "Dropped requests due to limit reached"); MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); -static void crypto_proc(void); -static struct proc *cryptoproc; -static void crypto_ret_proc(void); -static struct proc *cryptoretproc; +static void crypto_proc(struct crypto_worker *worker); +static void crypto_ret_proc(struct crypto_ret_worker *worker); static void crypto_destroy(void); static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp, int flags); @@ -220,21 +269,36 @@ } static int +select_crypto_worker_id(const struct cryptop *crp) +{ + /* Each worker is assigned a session */ + if (crypto_workers_num > 1) + return (crp->crp_sid % crypto_workers_num); + + return 0; +} + +static int +select_kcrypto_worker_id(struct cryptkop *krp) +{ + if (crypto_workers_num > 1) + return krp->krp_crid % crypto_workers_num; + + return 0; +} + +static int crypto_init(void) { + struct crypto_ret_worker *ret_worker; + struct crypto_worker *worker; int error; + crypto_nb_requests = 0; + mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); - TAILQ_INIT(&crp_q); - TAILQ_INIT(&crp_kq); - mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); - - TAILQ_INIT(&crp_ret_q); - TAILQ_INIT(&crp_ret_kq); - mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF); - cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 0, 0, 0, 0, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); @@ -256,24 +320,63 @@ goto bad; } - error = kproc_create((void (*)(void *)) crypto_proc, NULL, - &cryptoproc, 0, 0, "crypto"); - if (error) { - printf("crypto_init: cannot start crypto thread; error %d", - error); + if (crypto_workers_num < 1) + crypto_workers_num = 1; + + crypto_workers = malloc(crypto_workers_num * sizeof(struct crypto_worker), + M_CRYPTO_DATA, M_NOWAIT|M_ZERO); + if (crypto_workers == NULL) { + error = ENOMEM; + printf("crypto_init: cannot allocate workers\n"); goto bad; } - error = kproc_create((void (*)(void *)) crypto_ret_proc, NULL, - &cryptoretproc, 0, 0, "crypto returns"); - if (error) { - printf("crypto_init: cannot start cryptoret thread; error %d", - error); + if (bootverbose) + printf("crypto_init: instanciating %d worker(s)\n", crypto_workers_num); + + CRYPTO_W_FOREACH(worker) { + TAILQ_INIT(&worker->crp_q); + TAILQ_INIT(&worker->crp_kq); + worker->crp_sleep = 0; + + mtx_init(&worker->crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); + + error = kproc_create((void (*)(void *)) crypto_proc, worker, + &worker->cryptoproc, 0, 0, "crypto %d", CRYPTO_W_ID(worker)); + if (error) { + printf("crypto_init: cannot start crypto thread %d; error %d", + CRYPTO_W_ID(worker), error); + goto bad; + } + } + + crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker), + M_CRYPTO_DATA, M_NOWAIT|M_ZERO); + if (crypto_ret_workers == NULL) { + error = ENOMEM; + printf("crypto_init: cannot allocate ret workers\n"); goto bad; } keybuf_init(); + CRYPTO_RETW_FOREACH(ret_worker) { + TAILQ_INIT(&ret_worker->crp_ret_q); + TAILQ_INIT(&ret_worker->crp_ret_kq); + + mtx_init(&ret_worker->crypto_ret_q_mtx, "crypto", + "crypto return queues", MTX_DEF); + + error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, + &ret_worker->cryptoretproc, 0, 0, "crypto returns %d", + CRYPTO_RETW_ID(ret_worker)); + if (error) { + printf("crypto_init: cannot start crypto ret thread %d; error %d", + CRYPTO_RETW_ID(ret_worker), error); + goto bad; + } + } + return 0; bad: crypto_destroy(); @@ -292,7 +395,7 @@ { struct proc *p; - mtx_assert(&crypto_drivers_mtx, MA_OWNED); + CRYPTO_DRIVER_ASSERT(); p = *pp; *pp = NULL; if (p) { @@ -308,12 +411,17 @@ static void crypto_destroy(void) { + struct crypto_ret_worker *ret_worker; + struct crypto_worker *worker; + /* * Terminate any crypto threads. */ CRYPTO_DRIVER_LOCK(); - crypto_terminate(&cryptoproc, &crp_q); - crypto_terminate(&cryptoretproc, &crp_ret_q); + CRYPTO_W_FOREACH(worker) + crypto_terminate(&worker->cryptoproc, &worker->crp_q); + CRYPTO_RETW_FOREACH(ret_worker) + crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ @@ -328,8 +436,15 @@ uma_zdestroy(cryptodesc_zone); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); - mtx_destroy(&crypto_q_mtx); - mtx_destroy(&crypto_ret_q_mtx); + + CRYPTO_W_FOREACH(worker) + mtx_destroy(&worker->crypto_q_mtx); + free(crypto_workers, M_CRYPTO_DATA); + + CRYPTO_RETW_FOREACH(ret_worker) + mtx_destroy(&ret_worker->crypto_ret_q_mtx); + free(crypto_ret_workers, M_CRYPTO_DATA); + mtx_destroy(&crypto_drivers_mtx); } @@ -792,21 +907,25 @@ crypto_unblock(u_int32_t driverid, int what) { struct cryptocap *cap; + struct crypto_worker *worker; int err; - CRYPTO_Q_LOCK(); + CRYPTO_W_LOCKALL(); cap = crypto_checkdriver(driverid); if (cap != NULL) { if (what & CRYPTO_SYMQ) cap->cc_qblocked = 0; if (what & CRYPTO_ASYMQ) cap->cc_kqblocked = 0; - if (crp_sleep) - wakeup_one(&crp_q); + CRYPTO_W_FOREACH(worker) { + if (worker->crp_sleep) + wakeup_one(&worker->crp_q); + } + err = 0; } else err = EINVAL; - CRYPTO_Q_UNLOCK(); + CRYPTO_W_UNLOCKALL(); return err; } @@ -818,10 +937,11 @@ crypto_dispatch(struct cryptop *crp) { struct cryptocap *cap; + struct crypto_worker *worker; u_int32_t hid; int result; - cryptostats.cs_ops++; + atomic_add_acq_int(&cryptostats.cs_ops, 1); #ifdef CRYPTO_TIMING if (crypto_timing) @@ -849,11 +969,14 @@ */ } } - CRYPTO_Q_LOCK(); - TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); - if (crp_sleep) - wakeup_one(&crp_q); - CRYPTO_Q_UNLOCK(); + + worker = CRYPTO_W(select_crypto_worker_id(crp)); + CRYPTO_W_LOCK(worker); + TAILQ_INSERT_TAIL(&worker->crp_q, crp, crp_next); + if (worker->crp_sleep) + wakeup_one(&worker->crp_q); + CRYPTO_W_UNLOCK(worker); + return 0; } @@ -870,11 +993,15 @@ error = crypto_kinvoke(krp, krp->krp_crid); if (error == ERESTART) { - CRYPTO_Q_LOCK(); - TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); - if (crp_sleep) - wakeup_one(&crp_q); - CRYPTO_Q_UNLOCK(); + struct crypto_worker *worker; + + worker = CRYPTO_W(select_kcrypto_worker_id(krp)); + + CRYPTO_W_LOCK(worker); + TAILQ_INSERT_TAIL(&worker->crp_kq, krp, krp_next); + if (worker->crp_sleep) + wakeup_one(&worker->crp_q); + CRYPTO_W_UNLOCK(worker); error = 0; } return error; @@ -1094,30 +1221,10 @@ if (crp == NULL) return; -#ifdef DIAGNOSTIC - { - struct cryptop *crp2; - - CRYPTO_Q_LOCK(); - TAILQ_FOREACH(crp2, &crp_q, crp_next) { - KASSERT(crp2 != crp, - ("Freeing cryptop from the crypto queue (%p).", - crp)); - } - CRYPTO_Q_UNLOCK(); - CRYPTO_RETQ_LOCK(); - TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) { - KASSERT(crp2 != crp, - ("Freeing cryptop from the return queue (%p).", - crp)); - } - CRYPTO_RETQ_UNLOCK(); - } -#endif - while ((crd = crp->crp_desc) != NULL) { crp->crp_desc = crd->crd_next; uma_zfree(cryptodesc_zone, crd); + atomic_add_acq_int(&crypto_nb_requests, -1); } uma_zfree(cryptop_zone, crp); } @@ -1131,6 +1238,11 @@ struct cryptodesc *crd; struct cryptop *crp; + if (crypto_nb_requests > crypto_max_requests) { + crypto_dropped_requests++; + return NULL; + } + crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO); if (crp != NULL) { while (num--) { @@ -1142,6 +1254,7 @@ crd->crd_next = crp->crp_desc; crp->crp_desc = crd; + atomic_add_acq_int(&crypto_nb_requests, 1); } } return crp; @@ -1192,14 +1305,18 @@ #endif crp->crp_callback(crp); } else { +// struct cryptop *tmp; + struct crypto_ret_worker *ret_worker; + + ret_worker = CRYPTO_RETW(select_crypto_worker_id(crp)); /* * Normal case; queue the callback for the thread. */ - CRYPTO_RETQ_LOCK(); - if (CRYPTO_RETQ_EMPTY()) - wakeup_one(&crp_ret_q); /* shared wait channel */ - TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next); - CRYPTO_RETQ_UNLOCK(); + CRYPTO_RETQ_LOCK(ret_worker); + if (CRYPTO_RETQ_EMPTY(ret_worker)) + wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ + TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); + CRYPTO_RETQ_UNLOCK(ret_worker); } } @@ -1209,6 +1326,7 @@ void crypto_kdone(struct cryptkop *krp) { + struct crypto_ret_worker *ret_worker; struct cryptocap *cap; if (krp->krp_status != 0) @@ -1223,11 +1341,14 @@ crypto_remove(cap); } CRYPTO_DRIVER_UNLOCK(); - CRYPTO_RETQ_LOCK(); - if (CRYPTO_RETQ_EMPTY()) - wakeup_one(&crp_ret_q); /* shared wait channel */ - TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next); - CRYPTO_RETQ_UNLOCK(); + + ret_worker = CRYPTO_RETW(select_kcrypto_worker_id(krp)); + + CRYPTO_RETQ_LOCK(ret_worker); + if (CRYPTO_RETQ_EMPTY(ret_worker)) + wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ + TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); + CRYPTO_RETQ_UNLOCK(ret_worker); } int @@ -1274,7 +1395,7 @@ * Crypto thread, dispatches crypto requests. */ static void -crypto_proc(void) +crypto_proc(struct crypto_worker *worker) { struct cryptop *crp, *submit; struct cryptkop *krp; @@ -1286,7 +1407,7 @@ fpu_kern_thread(FPU_KERN_NORMAL); #endif - CRYPTO_Q_LOCK(); + CRYPTO_W_LOCK(worker); for (;;) { /* * Find the first element in the queue that can be @@ -1295,7 +1416,7 @@ */ submit = NULL; hint = 0; - TAILQ_FOREACH(crp, &crp_q, crp_next) { + TAILQ_FOREACH(crp, &worker->crp_q, crp_next) { hid = CRYPTO_SESID2HID(crp->crp_sid); cap = crypto_checkdriver(hid); /* @@ -1332,7 +1453,7 @@ } } if (submit != NULL) { - TAILQ_REMOVE(&crp_q, submit, crp_next); + TAILQ_REMOVE(&worker->crp_q, submit, crp_next); hid = CRYPTO_SESID2HID(submit->crp_sid); cap = crypto_checkdriver(hid); KASSERT(cap != NULL, ("%s:%u Driver disappeared.", @@ -1350,13 +1471,13 @@ */ /* XXX validate sid again? */ crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1; - TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); - cryptostats.cs_blocks++; + TAILQ_INSERT_HEAD(&worker->crp_q, submit, crp_next); + atomic_add_acq_int(&cryptostats.cs_blocks, 1); } } /* As above, but for key ops */ - TAILQ_FOREACH(krp, &crp_kq, krp_next) { + TAILQ_FOREACH(krp, &worker->crp_kq, krp_next) { cap = crypto_checkdriver(krp->krp_hid); if (cap == NULL || cap->cc_dev == NULL) { /* @@ -1376,7 +1497,7 @@ break; } if (krp != NULL) { - TAILQ_REMOVE(&crp_kq, krp, krp_next); + TAILQ_REMOVE(&worker->crp_kq, krp, krp_next); result = crypto_kinvoke(krp, krp->krp_hid); if (result == ERESTART) { /* @@ -1390,8 +1511,8 @@ */ /* XXX validate sid again? */ crypto_drivers[krp->krp_hid].cc_kqblocked = 1; - TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); - cryptostats.cs_kblocks++; + TAILQ_INSERT_HEAD(&worker->crp_kq, krp, krp_next); + atomic_add_acq_int(&cryptostats.cs_kblocks, 1); } } @@ -1408,17 +1529,17 @@ * out of order if dispatched to different devices * and some become blocked while others do not. */ - crp_sleep = 1; - msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); - crp_sleep = 0; - if (cryptoproc == NULL) + worker->crp_sleep = 1; + msleep(&worker->crp_q, &worker->crypto_q_mtx, PWAIT, "crypto_wait", 0); + worker->crp_sleep = 0; + if (worker->cryptoproc == NULL) break; - cryptostats.cs_intrs++; + atomic_add_acq_int(&cryptostats.cs_intrs, 1); } } - CRYPTO_Q_UNLOCK(); + CRYPTO_W_UNLOCK(worker); - crypto_finis(&crp_q); + crypto_finis(&worker->crp_q); } /* @@ -1427,24 +1548,24 @@ * callbacks typically are expensive and would slow interrupt handling. */ static void -crypto_ret_proc(void) +crypto_ret_proc(struct crypto_ret_worker *worker) { struct cryptop *crpt; struct cryptkop *krpt; - CRYPTO_RETQ_LOCK(); + CRYPTO_RETQ_LOCK(worker); for (;;) { /* Harvest return q's for completed ops */ - crpt = TAILQ_FIRST(&crp_ret_q); + crpt = TAILQ_FIRST(&worker->crp_ret_q); if (crpt != NULL) - TAILQ_REMOVE(&crp_ret_q, crpt, crp_next); + TAILQ_REMOVE(&worker->crp_ret_q, crpt, crp_next); - krpt = TAILQ_FIRST(&crp_ret_kq); + krpt = TAILQ_FIRST(&worker->crp_ret_kq); if (krpt != NULL) - TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next); + TAILQ_REMOVE(&worker->crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { - CRYPTO_RETQ_UNLOCK(); + CRYPTO_RETQ_UNLOCK(worker); /* * Run callbacks unlocked. */ @@ -1466,22 +1587,22 @@ } if (krpt != NULL) krpt->krp_callback(krpt); - CRYPTO_RETQ_LOCK(); + CRYPTO_RETQ_LOCK(worker); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ - msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT, + msleep(&worker->crp_ret_q, &worker->crypto_ret_q_mtx, PWAIT, "crypto_ret_wait", 0); - if (cryptoretproc == NULL) + if (worker->cryptoretproc == NULL) break; cryptostats.cs_rets++; } } - CRYPTO_RETQ_UNLOCK(); + CRYPTO_RETQ_UNLOCK(worker); - crypto_finis(&crp_ret_q); + crypto_finis(&worker->crp_ret_q); } #ifdef DDB @@ -1516,6 +1637,8 @@ DB_SHOW_COMMAND(crypto, db_show_crypto) { struct cryptop *crp; + struct crypto_ret_worker *ret_worker; + struct crypto_worker *worker; db_show_drivers(); db_printf("\n"); @@ -1523,61 +1646,75 @@ db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Desc", "Callback"); - TAILQ_FOREACH(crp, &crp_q, crp_next) { - db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" - , (int) CRYPTO_SESID2HID(crp->crp_sid) - , (int) CRYPTO_SESID2CAPS(crp->crp_sid) - , crp->crp_ilen, crp->crp_olen - , crp->crp_etype - , crp->crp_flags - , crp->crp_desc - , crp->crp_callback - ); - } - if (!TAILQ_EMPTY(&crp_ret_q)) { - db_printf("\n%4s %4s %4s %8s\n", - "HID", "Etype", "Flags", "Callback"); - TAILQ_FOREACH(crp, &crp_ret_q, crp_next) { - db_printf("%4u %4u %04x %8p\n" + CRYPTO_W_FOREACH(worker) { + TAILQ_FOREACH(crp, &worker->crp_q, crp_next) { + db_printf("%4u %4u %08x %4u %4u %4u %04x %8p %8p\n" + , CRYPTO_W_ID(worker) , (int) CRYPTO_SESID2HID(crp->crp_sid) + , (int) CRYPTO_SESID2CAPS(crp->crp_sid) + , crp->crp_ilen, crp->crp_olen , crp->crp_etype , crp->crp_flags + , crp->crp_desc , crp->crp_callback ); } } + CRYPTO_RETW_FOREACH(ret_worker) { + if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { + db_printf("\n%7s %4s %4s %4s %8s\n", + "Worker", "HID", "Etype", "Flags", "Callback"); + TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { + db_printf("%4u %4u %4u %04x %8p\n" + , CRYPTO_RETW_ID(ret_worker) + , (int) CRYPTO_SESID2HID(crp->crp_sid) + , crp->crp_etype + , crp->crp_flags + , crp->crp_callback + ); + } + } + } } DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) { struct cryptkop *krp; + struct crypto_ret_worker *ret_worker; + struct crypto_worker *worker; db_show_drivers(); db_printf("\n"); - db_printf("%4s %5s %4s %4s %8s %4s %8s\n", - "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); - TAILQ_FOREACH(krp, &crp_kq, krp_next) { - db_printf("%4u %5u %4u %4u %08x %4u %8p\n" - , krp->krp_op - , krp->krp_status - , krp->krp_iparams, krp->krp_oparams - , krp->krp_crid, krp->krp_hid - , krp->krp_callback - ); - } - if (!TAILQ_EMPTY(&crp_ret_q)) { - db_printf("%4s %5s %8s %4s %8s\n", - "Op", "Status", "CRID", "HID", "Callback"); - TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) { - db_printf("%4u %5u %08x %4u %8p\n" + db_printf("%7s %4s %5s %4s %4s %8s %4s %8s\n", + "Worker", "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); + CRYPTO_W_FOREACH(worker) { + TAILQ_FOREACH(krp, &worker->crp_kq, krp_next) { + db_printf("%4u %4u %5u %4u %4u %08x %4u %8p\n" + , CRYPTO_W_ID(worker) , krp->krp_op , krp->krp_status + , krp->krp_iparams, krp->krp_oparams , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } } + CRYPTO_RETW_FOREACH(ret_worker) { + if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { + db_printf("%11s %4s %5s %8s %4s %8s\n", + "Ret Worker", "Op", "Status", "CRID", "HID", "Callback"); + TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { + db_printf("%4u %4u %5u %08x %4u %8p\n" + , CRYPTO_RETW_ID(ret_worker) + , krp->krp_op + , krp->krp_status + , krp->krp_crid, krp->krp_hid + , krp->krp_callback + ); + } + } + } } #endif