Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F107806536
D10680.id28237.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
19 KB
Referenced Files
None
Subscribers
None
D10680.id28237.diff
View Options
Index: netipsec/ipsec.h
===================================================================
--- netipsec/ipsec.h
+++ netipsec/ipsec.h
@@ -282,6 +282,7 @@
VNET_DECLARE(int, ip4_ipsec_ecn);
VNET_DECLARE(int, ip4_esp_randpad);
VNET_DECLARE(int, crypto_support);
+VNET_DECLARE(int, crypto_mp_dispatch);
VNET_DECLARE(int, natt_cksum_policy);
#define IPSECSTAT_INC(name) \
@@ -295,6 +296,7 @@
#define V_ip4_ipsec_ecn VNET(ip4_ipsec_ecn)
#define V_ip4_esp_randpad VNET(ip4_esp_randpad)
#define V_crypto_support VNET(crypto_support)
+#define V_crypto_mp_dispatch VNET(crypto_mp_dispatch)
#define V_natt_cksum_policy VNET(natt_cksum_policy)
#define ipseclog(x) do { if (V_ipsec_debug) log x; } while (0)
Index: netipsec/ipsec.c
===================================================================
--- netipsec/ipsec.c
+++ netipsec/ipsec.c
@@ -149,6 +149,15 @@
* 0 take anything
*/
VNET_DEFINE(int, crypto_support) = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
+
+/*
+ * Dispatching crypto jobs on multiple cores
+ *
+ * 0 - disabled
+ * 1 - enabled
+ */
+VNET_DEFINE(int, crypto_mp_dispatch) = 0;
+
/*
* TCP/UDP checksum handling policy for transport mode NAT-T (RFC3948)
*
@@ -195,6 +204,9 @@
SYSCTL_INT(_net_inet_ipsec, OID_AUTO, crypto_support,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(crypto_support), 0,
"Crypto driver selection.");
+SYSCTL_INT(_net_inet_ipsec, OID_AUTO, crypto_mp_dispatch,
+ CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(crypto_mp_dispatch), 0,
+ "Dispatch crypto jobs on multiple cores.");
SYSCTL_INT(_net_inet_ipsec, OID_AUTO, check_policy_history,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(check_policy_history), 0,
"Use strict check of inbound packets to security policy compliance.");
Index: netipsec/xform_ah.c
===================================================================
--- netipsec/xform_ah.c
+++ netipsec/xform_ah.c
@@ -655,7 +655,11 @@
/* Crypto operation descriptor. */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_flags = CRYPTO_F_IMBUF;
+ if (V_crypto_mp_dispatch)
+ crp->crp_flags |= CRYPTO_F_MPDISPATCH | CRYPTO_F_REORDER;
+ else
+ crp->crp_flags |= CRYPTO_F_CBIFSYNC;
crp->crp_buf = (caddr_t) m;
crp->crp_callback = ah_input_cb;
crp->crp_sid = cryptoid;
@@ -1027,7 +1031,11 @@
/* Crypto operation descriptor. */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_flags = CRYPTO_F_IMBUF;
+ if (V_crypto_mp_dispatch)
+ crp->crp_flags |= CRYPTO_F_MPDISPATCH | CRYPTO_F_REORDER;
+ else
+ crp->crp_flags |= CRYPTO_F_CBIFSYNC;
crp->crp_buf = (caddr_t) m;
crp->crp_callback = ah_output_cb;
crp->crp_sid = cryptoid;
Index: netipsec/xform_esp.c
===================================================================
--- netipsec/xform_esp.c
+++ netipsec/xform_esp.c
@@ -385,7 +385,11 @@
/* Crypto operation descriptor */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_flags = CRYPTO_F_IMBUF;
+ if (V_crypto_mp_dispatch)
+ crp->crp_flags |= CRYPTO_F_MPDISPATCH | CRYPTO_F_REORDER;
+ else
+ crp->crp_flags |= CRYPTO_F_CBIFSYNC;
crp->crp_buf = (caddr_t) m;
crp->crp_callback = esp_input_cb;
crp->crp_sid = cryptoid;
@@ -837,7 +841,11 @@
/* Crypto operation descriptor. */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_flags = CRYPTO_F_IMBUF;
+ if (V_crypto_mp_dispatch)
+ crp->crp_flags |= CRYPTO_F_MPDISPATCH | CRYPTO_F_REORDER;
+ else
+ crp->crp_flags |= CRYPTO_F_CBIFSYNC;
crp->crp_buf = (caddr_t) m;
crp->crp_callback = esp_output_cb;
crp->crp_opaque = (caddr_t) xd;
Index: opencrypto/crypto.c
===================================================================
--- opencrypto/crypto.c
+++ opencrypto/crypto.c
@@ -70,7 +70,9 @@
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/sdt.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
#include <ddb/ddb.h>
@@ -135,26 +137,50 @@
* operations.
*/
static int crp_sleep = 0;
-static TAILQ_HEAD(,cryptop) crp_q; /* request queues */
+static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */
static TAILQ_HEAD(,cryptkop) crp_kq;
static struct mtx crypto_q_mtx;
#define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
#define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
/*
- * There are two queues for processing completed crypto requests; one
- * for the symmetric and one for the asymmetric ops. We only need one
- * but have two to avoid type futzing (cryptop vs. cryptkop). A single
- * mutex is used to lock access to both queues. Note that this lock
- * must be separate from the lock on request queues to insure driver
- * callbacks don't generate lock order reversals.
- */
-static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */
-static TAILQ_HEAD(,cryptkop) crp_ret_kq;
-static struct mtx crypto_ret_q_mtx;
-#define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx)
-#define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx)
-#define CRYPTO_RETQ_EMPTY() (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
+ * Taskqueue used to dispatch the crypto requests
+ * that have the CRYPTO_MP flag
+ */
+static struct taskqueue *crypto_tq;
+
+/*
+ * Crypto seq numbers are operated on with modular arithmetic
+ */
+#define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0)
+
+struct crypto_ret_worker {
+ struct mtx crypto_ret_mtx;
+
+ TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
+ TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */
+ TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */
+
+ u_int32_t reorder_ops; /* total ordered sym jobs received */
+ u_int32_t reorder_cur_seq; /* current sym job dispatched */
+
+ struct proc *cryptoretproc;
+};
+static struct crypto_ret_worker *crypto_ret_workers = NULL;
+
+#define CRYPTO_RETW(i) (&crypto_ret_workers[i])
+#define CRYPTO_RETW_ID(w) \
+ ((int)(((uintptr_t)(w) - (uintptr_t)crypto_ret_workers) / sizeof(struct crypto_ret_worker)))
+#define FOREACH_CRYPTO_RETW(w) \
+ for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
+
+#define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx)
+#define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx)
+#define CRYPTO_RETW_EMPTY(w) \
+ (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q))
+
+static int crypto_workers_num = 0;
+TUNABLE_INT("kern.crypto_workers_num", &crypto_workers_num);
static uma_zone_t cryptop_zone;
static uma_zone_t cryptodesc_zone;
@@ -172,11 +198,11 @@
static void crypto_proc(void);
static struct proc *cryptoproc;
-static void crypto_ret_proc(void);
-static struct proc *cryptoretproc;
+static void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
static void crypto_destroy(void);
static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
static int crypto_kinvoke(struct cryptkop *krp, int flags);
+static void crypto_task_invoke(void *ctx, int pending);
static struct cryptostats cryptostats;
SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
@@ -222,6 +248,7 @@
static int
crypto_init(void)
{
+ struct crypto_ret_worker *ret_worker;
int error;
mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
@@ -231,10 +258,6 @@
TAILQ_INIT(&crp_kq);
mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
- TAILQ_INIT(&crp_ret_q);
- TAILQ_INIT(&crp_ret_kq);
- mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
-
cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
0, 0, 0, 0,
UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
@@ -256,6 +279,20 @@
goto bad;
}
+ if (crypto_workers_num < 1)
+ crypto_workers_num = mp_ncpus;
+
+ crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO,
+ taskqueue_thread_enqueue, &crypto_tq);
+ if (crypto_tq == NULL) {
+ printf("crypto init: cannot setup crypto taskqueue\n");
+ error = ENOMEM;
+ goto bad;
+ }
+
+ taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MAX_TIMESHARE,
+ "crypto");
+
error = kproc_create((void (*)(void *)) crypto_proc, NULL,
&cryptoproc, 0, 0, "crypto");
if (error) {
@@ -264,15 +301,33 @@
goto bad;
}
- error = kproc_create((void (*)(void *)) crypto_ret_proc, NULL,
- &cryptoretproc, 0, 0, "crypto returns");
- if (error) {
- printf("crypto_init: cannot start cryptoret thread; error %d",
- error);
+ crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (crypto_ret_workers == NULL) {
+ error = ENOMEM;
+ printf("crypto_init: cannot allocate ret workers\n");
goto bad;
}
- keybuf_init();
+
+ FOREACH_CRYPTO_RETW(ret_worker) {
+ TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
+ TAILQ_INIT(&ret_worker->crp_ret_q);
+ TAILQ_INIT(&ret_worker->crp_ret_kq);
+
+ ret_worker->reorder_ops = 0;
+ ret_worker->reorder_cur_seq = 0;
+
+ mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF);
+
+ error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker,
+ &ret_worker->cryptoretproc, 0, 0, "crypto returns %d", CRYPTO_RETW_ID(ret_worker));
+ if (error) {
+ printf("crypto_init: cannot start cryptoret thread; error %d",
+ error);
+ goto bad;
+ }
+ }
return 0;
bad:
@@ -308,12 +363,15 @@
static void
crypto_destroy(void)
{
+ struct crypto_ret_worker *ret_worker;
+
/*
* Terminate any crypto threads.
*/
CRYPTO_DRIVER_LOCK();
crypto_terminate(&cryptoproc, &crp_q);
- crypto_terminate(&cryptoretproc, &crp_ret_q);
+ FOREACH_CRYPTO_RETW(ret_worker)
+ crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q);
CRYPTO_DRIVER_UNLOCK();
/* XXX flush queues??? */
@@ -329,7 +387,9 @@
if (cryptop_zone != NULL)
uma_zdestroy(cryptop_zone);
mtx_destroy(&crypto_q_mtx);
- mtx_destroy(&crypto_ret_q_mtx);
+ FOREACH_CRYPTO_RETW(ret_worker)
+ mtx_destroy(&ret_worker->crypto_ret_mtx);
+ free(crypto_ret_workers, M_CRYPTO_DATA);
mtx_destroy(&crypto_drivers_mtx);
}
@@ -811,6 +871,16 @@
return err;
}
+static void
+crypto_batch_enqueue(struct cryptop *crp)
+{
+ CRYPTO_Q_LOCK();
+ TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
+ if (crp_sleep)
+ wakeup_one(&crp_q);
+ CRYPTO_Q_UNLOCK();
+}
+
/*
* Add a crypto request to a queue, to be processed by the kernel thread.
*/
@@ -828,9 +898,26 @@
binuptime(&crp->crp_tstamp);
#endif
- hid = CRYPTO_SESID2HID(crp->crp_sid);
+ if (crp->crp_flags & CRYPTO_F_REORDER) {
+ struct crypto_ret_worker *ret_worker;
+
+ crp->crp_retw_id = crp->crp_sid % crypto_workers_num;
+ ret_worker = CRYPTO_RETW(crp->crp_retw_id);
+
+ CRYPTO_RETW_LOCK(ret_worker);
+ crp->crp_seq = ret_worker->reorder_ops++;
+ CRYPTO_RETW_UNLOCK(ret_worker);
+ }
+
+ if (crp->crp_flags & CRYPTO_F_MPDISPATCH) {
+ TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
+ taskqueue_enqueue(crypto_tq, &crp->crp_task);
+ return (0);
+ }
if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
+ hid = CRYPTO_SESID2HID(crp->crp_sid);
+
/*
* Caller marked the request to be processed
* immediately; dispatch it directly to the
@@ -849,11 +936,7 @@
*/
}
}
- CRYPTO_Q_LOCK();
- TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
- if (crp_sleep)
- wakeup_one(&crp_q);
- CRYPTO_Q_UNLOCK();
+ crypto_batch_enqueue(crp);
return 0;
}
@@ -1035,6 +1118,23 @@
}
#endif
+static void
+crypto_task_invoke(void *ctx, int pending)
+{
+ struct cryptocap *cap;
+ struct cryptop *crp;
+ int hid, result;
+
+ crp = (struct cryptop *)ctx;
+
+ hid = CRYPTO_SESID2HID(crp->crp_sid);
+ cap = crypto_checkdriver(hid);
+
+ result = crypto_invoke(cap, crp, 0);
+ if (result == ERESTART)
+ crypto_batch_enqueue(crp);
+}
+
/*
* Dispatch a crypto request to the appropriate crypto devices.
*/
@@ -1097,6 +1197,7 @@
#ifdef DIAGNOSTIC
{
struct cryptop *crp2;
+ struct crypto_ret_worker *ret_worker;
CRYPTO_Q_LOCK();
TAILQ_FOREACH(crp2, &crp_q, crp_next) {
@@ -1105,13 +1206,16 @@
crp));
}
CRYPTO_Q_UNLOCK();
- CRYPTO_RETQ_LOCK();
- TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
- KASSERT(crp2 != crp,
- ("Freeing cryptop from the return queue (%p).",
- crp));
+
+ FOREACH_CRYPTO_RETW(ret_worker) {
+ CRYPTO_RETW_LOCK(ret_worker);
+ TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
+ KASSERT(crp2 != crp,
+ ("Freeing cryptop from the return queue (%p).",
+ crp));
+ }
+ CRYPTO_RETW_UNLOCK(ret_worker);
}
- CRYPTO_RETQ_UNLOCK();
}
#endif
@@ -1169,9 +1273,10 @@
* doing extraneous context switches; the latter is mostly
* used with the software crypto driver.
*/
- if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
+ if (!(crp->crp_flags & CRYPTO_F_REORDER) &&
+ ((crp->crp_flags & CRYPTO_F_CBIMM) ||
((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
- (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
+ (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC)))) {
/*
* Do the callback directly. This is ok when the
* callback routine does very little (e.g. the
@@ -1192,14 +1297,45 @@
#endif
crp->crp_callback(crp);
} else {
+ struct crypto_ret_worker *ret_worker;
+ bool wake;
+
+ ret_worker = CRYPTO_RETW(crp->crp_retw_id);
+ wake = false;
+
/*
* Normal case; queue the callback for the thread.
*/
- CRYPTO_RETQ_LOCK();
- if (CRYPTO_RETQ_EMPTY())
- wakeup_one(&crp_ret_q); /* shared wait channel */
- TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
- CRYPTO_RETQ_UNLOCK();
+ CRYPTO_RETW_LOCK(ret_worker);
+ if (crp->crp_flags & CRYPTO_F_REORDER) {
+ struct cryptop *tmp;
+
+ TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q,
+ cryptop_q, crp_next) {
+ if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
+ TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q,
+ tmp, crp, crp_next);
+ break;
+ }
+ }
+ if (tmp == NULL) {
+ TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q,
+ crp, crp_next);
+ }
+
+ if (crp->crp_seq == ret_worker->reorder_cur_seq)
+ wake = true;
+ }
+ else {
+ if (CRYPTO_RETW_EMPTY(ret_worker))
+ wake = true;
+
+ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next);
+ }
+
+ if (wake)
+ wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
+ CRYPTO_RETW_UNLOCK(ret_worker);
}
}
@@ -1209,6 +1345,7 @@
void
crypto_kdone(struct cryptkop *krp)
{
+ struct crypto_ret_worker *ret_worker;
struct cryptocap *cap;
if (krp->krp_status != 0)
@@ -1223,11 +1360,14 @@
crypto_remove(cap);
}
CRYPTO_DRIVER_UNLOCK();
- CRYPTO_RETQ_LOCK();
- if (CRYPTO_RETQ_EMPTY())
- wakeup_one(&crp_ret_q); /* shared wait channel */
- TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
- CRYPTO_RETQ_UNLOCK();
+
+ ret_worker = CRYPTO_RETW(0);
+
+ CRYPTO_RETW_LOCK(ret_worker);
+ if (CRYPTO_RETW_EMPTY(ret_worker))
+ wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
+ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next);
+ CRYPTO_RETW_UNLOCK(ret_worker);
}
int
@@ -1427,24 +1567,36 @@
* callbacks typically are expensive and would slow interrupt handling.
*/
static void
-crypto_ret_proc(void)
+crypto_ret_proc(struct crypto_ret_worker *ret_worker)
{
struct cryptop *crpt;
struct cryptkop *krpt;
- CRYPTO_RETQ_LOCK();
+ CRYPTO_RETW_LOCK(ret_worker);
for (;;) {
/* Harvest return q's for completed ops */
- crpt = TAILQ_FIRST(&crp_ret_q);
- if (crpt != NULL)
- TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
+ crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
+ if (crpt != NULL) {
+ if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
+ TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
+ ret_worker->reorder_cur_seq++;
+ } else {
+ crpt = NULL;
+ }
+ }
+
+ if (crpt == NULL) {
+ crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
+ if (crpt != NULL)
+ TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
+ }
- krpt = TAILQ_FIRST(&crp_ret_kq);
+ krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq);
if (krpt != NULL)
- TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
+ TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next);
if (crpt != NULL || krpt != NULL) {
- CRYPTO_RETQ_UNLOCK();
+ CRYPTO_RETW_UNLOCK(ret_worker);
/*
* Run callbacks unlocked.
*/
@@ -1466,24 +1618,25 @@
}
if (krpt != NULL)
krpt->krp_callback(krpt);
- CRYPTO_RETQ_LOCK();
+ CRYPTO_RETW_LOCK(ret_worker);
} else {
/*
* Nothing more to be processed. Sleep until we're
* woken because there are more returns to process.
*/
- msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
+ msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
"crypto_ret_wait", 0);
- if (cryptoretproc == NULL)
+ if (ret_worker->cryptoretproc == NULL)
break;
cryptostats.cs_rets++;
}
}
- CRYPTO_RETQ_UNLOCK();
+ CRYPTO_RETW_UNLOCK(ret_worker);
- crypto_finis(&crp_ret_q);
+ crypto_finis(&ret_worker->crp_ret_q);
}
+
#ifdef DDB
static void
db_show_drivers(void)
@@ -1516,6 +1669,7 @@
DB_SHOW_COMMAND(crypto, db_show_crypto)
{
struct cryptop *crp;
+ struct crypto_ret_worker *ret_worker;
db_show_drivers();
db_printf("\n");
@@ -1534,16 +1688,19 @@
, crp->crp_callback
);
}
- if (!TAILQ_EMPTY(&crp_ret_q)) {
- db_printf("\n%4s %4s %4s %8s\n",
- "HID", "Etype", "Flags", "Callback");
- TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
- db_printf("%4u %4u %04x %8p\n"
- , (int) CRYPTO_SESID2HID(crp->crp_sid)
- , crp->crp_etype
- , crp->crp_flags
- , crp->crp_callback
- );
+ FOREACH_CRYPTO_RETW(ret_worker) {
+ db_printf("\n%8s %4s %4s %4s %8s\n",
+ "ret_worker", "HID", "Etype", "Flags", "Callback");
+ if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
+ TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
+ db_printf("%8u %4u %4u %04x %8p\n"
+ , CRYPTO_RETW_ID(ret_worker)
+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
+ , crp->crp_etype
+ , crp->crp_flags
+ , crp->crp_callback
+ );
+ }
}
}
}
@@ -1551,6 +1708,7 @@
DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
{
struct cryptkop *krp;
+ struct crypto_ret_worker *ret_worker;
db_show_drivers();
db_printf("\n");
@@ -1566,10 +1724,12 @@
, krp->krp_callback
);
}
- if (!TAILQ_EMPTY(&crp_ret_q)) {
+
+ ret_worker = CRYPTO_RETW(0);
+ if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
db_printf("%4s %5s %8s %4s %8s\n",
"Op", "Status", "CRID", "HID", "Callback");
- TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
+ TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) {
db_printf("%4u %5u %08x %4u %8p\n"
, krp->krp_op
, krp->krp_status
Index: opencrypto/cryptodev.h
===================================================================
--- opencrypto/cryptodev.h
+++ opencrypto/cryptodev.h
@@ -63,6 +63,7 @@
#define _CRYPTO_CRYPTO_H_
#include <sys/ioccom.h>
+#include <sys/_task.h>
/* Some initial values */
#define CRYPTO_DRIVERS_INITIAL 4
@@ -395,6 +396,8 @@
struct cryptop {
TAILQ_ENTRY(cryptop) crp_next;
+ struct task crp_task;
+
u_int64_t crp_sid; /* Session ID */
int crp_ilen; /* Input data total length */
int crp_olen; /* Result total length */
@@ -417,6 +420,11 @@
#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
#define CRYPTO_F_DONE 0x0020 /* Operation completed */
#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */
+#define CRYPTO_F_MPDISPATCH 0x0080 /* Dispatch crypto jobs on several threads */
+#define CRYPTO_F_REORDER 0x0100 /*
+ Reorder crypto jobs and call callback
+ from an unique thread
+ */
caddr_t crp_buf; /* Data to be processed */
caddr_t crp_opaque; /* Opaque pointer, passed along */
@@ -425,6 +433,11 @@
int (*crp_callback)(struct cryptop *); /* Callback function */
struct bintime crp_tstamp; /* performance time stamp */
+ uint32_t crp_seq; /* used for ordered dispatch */
+ uint32_t crp_retw_id; /*
+ the return worker to be used,
+ used for ordered dispatch
+ */
};
#define CRYPTO_BUF_CONTIG 0x0
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Jan 19, 8:07 AM (20 h, 32 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15936297
Default Alt Text
D10680.id28237.diff (19 KB)
Attached To
Mode
D10680: IPSec performance increase in single flow mode by making crypto(9) multi thread
Attached
Detach File
Event Timeline
Log In to Comment