diff --git a/sys/dev/qat/qat.c b/sys/dev/qat/qat.c index 49cb408fd702..b5d3f4d9629a 100644 --- a/sys/dev/qat/qat.c +++ b/sys/dev/qat/qat.c @@ -1,2309 +1,2298 @@ /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ /* $NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $ */ /* * Copyright (c) 2019 Internet Initiative Japan, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #if 0 __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $"); #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #include "qatreg.h" #include "qatvar.h" #include "qat_aevar.h" extern struct qat_hw qat_hw_c2xxx; extern struct qat_hw qat_hw_c3xxx; extern struct qat_hw qat_hw_c62x; extern struct qat_hw qat_hw_d15xx; extern struct qat_hw qat_hw_dh895xcc; #define PCI_VENDOR_INTEL 0x8086 #define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS 0x1f18 #define PCI_PRODUCT_INTEL_C3K_QAT 0x19e2 #define PCI_PRODUCT_INTEL_C3K_QAT_VF 0x19e3 #define PCI_PRODUCT_INTEL_C620_QAT 0x37c8 #define PCI_PRODUCT_INTEL_C620_QAT_VF 0x37c9 #define PCI_PRODUCT_INTEL_XEOND_QAT 0x6f54 #define PCI_PRODUCT_INTEL_XEOND_QAT_VF 0x6f55 #define PCI_PRODUCT_INTEL_DH895XCC_QAT 0x0435 #define PCI_PRODUCT_INTEL_DH895XCC_QAT_VF 0x0443 static const struct qat_product { uint16_t qatp_vendor; uint16_t qatp_product; const char *qatp_name; enum qat_chip_type qatp_chip; const struct qat_hw *qatp_hw; } qat_products[] = { { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS, "Intel C2000 QuickAssist PF", QAT_CHIP_C2XXX, &qat_hw_c2xxx }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT, "Intel C3000 QuickAssist PF", QAT_CHIP_C3XXX, &qat_hw_c3xxx }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT, "Intel C620/Xeon D-2100 QuickAssist PF", QAT_CHIP_C62X, &qat_hw_c62x }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT, "Intel Xeon D-1500 QuickAssist PF", QAT_CHIP_D15XX, &qat_hw_d15xx }, { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH895XCC_QAT, "Intel 8950 QuickAssist PCIe Adapter PF", QAT_CHIP_DH895XCC, &qat_hw_dh895xcc }, { 0, 0, NULL, 0, NULL }, }; /* Hash Algorithm specific structure */ /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = { 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0 }; /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = { 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19 }; /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = { 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4 }; /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = { 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 }; static const struct qat_sym_hash_alg_info sha1_info = { .qshai_digest_len = QAT_HASH_SHA1_DIGEST_SIZE, .qshai_block_len = QAT_HASH_SHA1_BLOCK_SIZE, .qshai_state_size = QAT_HASH_SHA1_STATE_SIZE, .qshai_init_state = sha1_initial_state, .qshai_sah = &auth_hash_hmac_sha1, .qshai_state_offset = 0, .qshai_state_word = 4, }; static const struct qat_sym_hash_alg_info sha256_info = { .qshai_digest_len = QAT_HASH_SHA256_DIGEST_SIZE, .qshai_block_len = QAT_HASH_SHA256_BLOCK_SIZE, .qshai_state_size = QAT_HASH_SHA256_STATE_SIZE, .qshai_init_state = sha256_initial_state, .qshai_sah = &auth_hash_hmac_sha2_256, .qshai_state_offset = offsetof(SHA256_CTX, state), .qshai_state_word = 4, }; static const struct qat_sym_hash_alg_info sha384_info = { .qshai_digest_len = QAT_HASH_SHA384_DIGEST_SIZE, .qshai_block_len = QAT_HASH_SHA384_BLOCK_SIZE, .qshai_state_size = QAT_HASH_SHA384_STATE_SIZE, .qshai_init_state = sha384_initial_state, .qshai_sah = &auth_hash_hmac_sha2_384, .qshai_state_offset = offsetof(SHA384_CTX, state), .qshai_state_word = 8, }; static const struct qat_sym_hash_alg_info sha512_info = { .qshai_digest_len = QAT_HASH_SHA512_DIGEST_SIZE, .qshai_block_len = QAT_HASH_SHA512_BLOCK_SIZE, .qshai_state_size = QAT_HASH_SHA512_STATE_SIZE, .qshai_init_state = sha512_initial_state, .qshai_sah = &auth_hash_hmac_sha2_512, .qshai_state_offset = offsetof(SHA512_CTX, state), .qshai_state_word = 8, }; static const struct qat_sym_hash_alg_info aes_gcm_info = { .qshai_digest_len = QAT_HASH_AES_GCM_DIGEST_SIZE, .qshai_block_len = QAT_HASH_AES_GCM_BLOCK_SIZE, .qshai_state_size = QAT_HASH_AES_GCM_STATE_SIZE, .qshai_sah = &auth_hash_nist_gmac_aes_128, }; /* Hash QAT specific structures */ static const struct qat_sym_hash_qat_info sha1_config = { .qshqi_algo_enc = HW_AUTH_ALGO_SHA1, .qshqi_auth_counter = QAT_HASH_SHA1_BLOCK_SIZE, .qshqi_state1_len = HW_SHA1_STATE1_SZ, .qshqi_state2_len = HW_SHA1_STATE2_SZ, }; static const struct qat_sym_hash_qat_info sha256_config = { .qshqi_algo_enc = HW_AUTH_ALGO_SHA256, .qshqi_auth_counter = QAT_HASH_SHA256_BLOCK_SIZE, .qshqi_state1_len = HW_SHA256_STATE1_SZ, .qshqi_state2_len = HW_SHA256_STATE2_SZ }; static const struct qat_sym_hash_qat_info sha384_config = { .qshqi_algo_enc = HW_AUTH_ALGO_SHA384, .qshqi_auth_counter = QAT_HASH_SHA384_BLOCK_SIZE, .qshqi_state1_len = HW_SHA384_STATE1_SZ, .qshqi_state2_len = HW_SHA384_STATE2_SZ }; static const struct qat_sym_hash_qat_info sha512_config = { .qshqi_algo_enc = HW_AUTH_ALGO_SHA512, .qshqi_auth_counter = QAT_HASH_SHA512_BLOCK_SIZE, .qshqi_state1_len = HW_SHA512_STATE1_SZ, .qshqi_state2_len = HW_SHA512_STATE2_SZ }; static const struct qat_sym_hash_qat_info aes_gcm_config = { .qshqi_algo_enc = HW_AUTH_ALGO_GALOIS_128, .qshqi_auth_counter = QAT_HASH_AES_GCM_BLOCK_SIZE, .qshqi_state1_len = HW_GALOIS_128_STATE1_SZ, .qshqi_state2_len = HW_GALOIS_H_SZ + HW_GALOIS_LEN_A_SZ + HW_GALOIS_E_CTR0_SZ, }; static const struct qat_sym_hash_def qat_sym_hash_defs[] = { [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config }, [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config }, [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config }, [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config }, [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config }, }; static const struct qat_product *qat_lookup(device_t); static int qat_probe(device_t); static int qat_attach(device_t); static int qat_init(device_t); static int qat_start(device_t); static int qat_detach(device_t); static int qat_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp); static void qat_freesession(device_t dev, crypto_session_t cses); static int qat_setup_msix_intr(struct qat_softc *); static void qat_etr_init(struct qat_softc *); static void qat_etr_deinit(struct qat_softc *); static void qat_etr_bank_init(struct qat_softc *, int); static void qat_etr_bank_deinit(struct qat_softc *sc, int); static void qat_etr_ap_bank_init(struct qat_softc *); static void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int); static void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *, uint32_t, int); static void qat_etr_ap_bank_setup_ring(struct qat_softc *, struct qat_ring *); static int qat_etr_verify_ring_size(uint32_t, uint32_t); static int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *, struct qat_ring *); static void qat_etr_bank_intr(void *); static void qat_arb_update(struct qat_softc *, struct qat_bank *); static struct qat_sym_cookie *qat_crypto_alloc_sym_cookie( struct qat_crypto_bank *); static void qat_crypto_free_sym_cookie(struct qat_crypto_bank *, struct qat_sym_cookie *); static int qat_crypto_setup_ring(struct qat_softc *, struct qat_crypto_bank *); static int qat_crypto_bank_init(struct qat_softc *, struct qat_crypto_bank *); static int qat_crypto_init(struct qat_softc *); static void qat_crypto_deinit(struct qat_softc *); static int qat_crypto_start(struct qat_softc *); static void qat_crypto_stop(struct qat_softc *); static int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *); static MALLOC_DEFINE(M_QAT, "qat", "Intel QAT driver"); static const struct qat_product * qat_lookup(device_t dev) { const struct qat_product *qatp; for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) { if (pci_get_vendor(dev) == qatp->qatp_vendor && pci_get_device(dev) == qatp->qatp_product) return qatp; } return NULL; } static int qat_probe(device_t dev) { const struct qat_product *prod; prod = qat_lookup(dev); if (prod != NULL) { device_set_desc(dev, prod->qatp_name); return BUS_PROBE_DEFAULT; } return ENXIO; } static int qat_attach(device_t dev) { struct qat_softc *sc = device_get_softc(dev); const struct qat_product *qatp; - bus_size_t msixtbl_offset; - int bar, count, error, i, msixoff, msixtbl_bar; + int bar, count, error, i; sc->sc_dev = dev; sc->sc_rev = pci_get_revid(dev); sc->sc_crypto.qcy_cid = -1; qatp = qat_lookup(dev); memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw)); /* Determine active accelerators and engines */ sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc); sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc); sc->sc_accel_num = 0; for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) { if (sc->sc_accel_mask & (1 << i)) sc->sc_accel_num++; } sc->sc_ae_num = 0; for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) { if (sc->sc_ae_mask & (1 << i)) sc->sc_ae_num++; } if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) { device_printf(sc->sc_dev, "couldn't find acceleration"); goto fail; } MPASS(sc->sc_accel_num <= MAX_NUM_ACCEL); MPASS(sc->sc_ae_num <= MAX_NUM_AE); /* Determine SKU and capabilities */ sc->sc_sku = sc->sc_hw.qhw_get_sku(sc); sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc); sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc); - /* Map BARs */ - msixtbl_bar = 0; - msixtbl_offset = 0; - if (pci_find_cap(dev, PCIY_MSIX, &msixoff) == 0) { - uint32_t msixtbl; - msixtbl = pci_read_config(dev, msixoff + PCIR_MSIX_TABLE, 4); - msixtbl_offset = msixtbl & ~PCIM_MSIX_BIR_MASK; - msixtbl_bar = PCIR_BAR(msixtbl & PCIM_MSIX_BIR_MASK); - } - i = 0; if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) { MPASS(sc->sc_hw.qhw_sram_bar_id == 0); uint32_t fusectl = pci_read_config(dev, FUSECTL_REG, 4); /* Skip SRAM BAR */ i = (fusectl & FUSECTL_MASK) ? 1 : 0; } for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) { uint32_t val = pci_read_config(dev, PCIR_BAR(bar), 4); if (val == 0 || !PCI_BAR_MEM(val)) continue; sc->sc_rid[i] = PCIR_BAR(bar); sc->sc_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid[i], RF_ACTIVE); if (sc->sc_res[i] == NULL) { device_printf(dev, "couldn't map BAR %d\n", bar); goto fail; } sc->sc_csrt[i] = rman_get_bustag(sc->sc_res[i]); sc->sc_csrh[i] = rman_get_bushandle(sc->sc_res[i]); i++; if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) bar++; } pci_enable_busmaster(dev); count = sc->sc_hw.qhw_num_banks + 1; if (pci_msix_count(dev) < count) { device_printf(dev, "insufficient MSI-X vectors (%d vs. %d)\n", pci_msix_count(dev), count); goto fail; } error = pci_alloc_msix(dev, &count); if (error != 0) { device_printf(dev, "failed to allocate MSI-X vectors\n"); goto fail; } error = qat_init(dev); if (error == 0) return 0; fail: qat_detach(dev); return ENXIO; } static int qat_init(device_t dev) { struct qat_softc *sc = device_get_softc(dev); int error; qat_etr_init(sc); if (sc->sc_hw.qhw_init_admin_comms != NULL && (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) { device_printf(sc->sc_dev, "Could not initialize admin comms: %d\n", error); return error; } if (sc->sc_hw.qhw_init_arb != NULL && (error = sc->sc_hw.qhw_init_arb(sc)) != 0) { device_printf(sc->sc_dev, "Could not initialize hw arbiter: %d\n", error); return error; } error = qat_ae_init(sc); if (error) { device_printf(sc->sc_dev, "Could not initialize Acceleration Engine: %d\n", error); return error; } error = qat_aefw_load(sc); if (error) { device_printf(sc->sc_dev, "Could not load firmware: %d\n", error); return error; } error = qat_setup_msix_intr(sc); if (error) { device_printf(sc->sc_dev, "Could not setup interrupts: %d\n", error); return error; } sc->sc_hw.qhw_enable_intr(sc); error = qat_crypto_init(sc); if (error) { device_printf(sc->sc_dev, "Could not initialize service: %d\n", error); return error; } if (sc->sc_hw.qhw_enable_error_correction != NULL) sc->sc_hw.qhw_enable_error_correction(sc); if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL && (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) { device_printf(sc->sc_dev, "Could not initialize watchdog timer: %d\n", error); return error; } error = qat_start(dev); if (error) { device_printf(sc->sc_dev, "Could not start: %d\n", error); return error; } return 0; } static int qat_start(device_t dev) { struct qat_softc *sc = device_get_softc(dev); int error; error = qat_ae_start(sc); if (error) return error; if (sc->sc_hw.qhw_send_admin_init != NULL && (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) { return error; } error = qat_crypto_start(sc); if (error) return error; return 0; } static int qat_detach(device_t dev) { struct qat_softc *sc; int bar, i; sc = device_get_softc(dev); qat_crypto_stop(sc); qat_crypto_deinit(sc); qat_aefw_unload(sc); if (sc->sc_etr_banks != NULL) { for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) { struct qat_bank *qb = &sc->sc_etr_banks[i]; if (qb->qb_ih_cookie != NULL) (void)bus_teardown_intr(dev, qb->qb_ih, qb->qb_ih_cookie); if (qb->qb_ih != NULL) (void)bus_release_resource(dev, SYS_RES_IRQ, i + 1, qb->qb_ih); } } if (sc->sc_ih_cookie != NULL) { (void)bus_teardown_intr(dev, sc->sc_ih, sc->sc_ih_cookie); sc->sc_ih_cookie = NULL; } if (sc->sc_ih != NULL) { (void)bus_release_resource(dev, SYS_RES_IRQ, sc->sc_hw.qhw_num_banks + 1, sc->sc_ih); sc->sc_ih = NULL; } pci_release_msi(dev); qat_etr_deinit(sc); for (bar = 0; bar < MAX_BARS; bar++) { if (sc->sc_res[bar] != NULL) { (void)bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid[bar], sc->sc_res[bar]); sc->sc_res[bar] = NULL; } } return 0; } void * qat_alloc_mem(size_t size) { return (malloc(size, M_QAT, M_WAITOK | M_ZERO)); } void qat_free_mem(void *ptr) { free(ptr, M_QAT); } static void qat_alloc_dmamem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct qat_dmamem *qdm; if (error != 0) return; KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg)); qdm = arg; qdm->qdm_dma_seg = segs[0]; } int qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm, int nseg, bus_size_t size, bus_size_t alignment) { int error; KASSERT(qdm->qdm_dma_vaddr == NULL, ("%s: DMA memory descriptor in use", __func__)); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ nseg, /* nsegments */ size, /* maxsegsize */ BUS_DMA_COHERENT, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &qdm->qdm_dma_tag); if (error != 0) return error; error = bus_dmamem_alloc(qdm->qdm_dma_tag, &qdm->qdm_dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &qdm->qdm_dma_map); if (error != 0) { device_printf(sc->sc_dev, "couldn't allocate dmamem, error = %d\n", error); goto fail_0; } error = bus_dmamap_load(qdm->qdm_dma_tag, qdm->qdm_dma_map, qdm->qdm_dma_vaddr, size, qat_alloc_dmamem_cb, qdm, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "couldn't load dmamem map, error = %d\n", error); goto fail_1; } return 0; fail_1: bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map); fail_0: bus_dma_tag_destroy(qdm->qdm_dma_tag); return error; } void qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm) { if (qdm->qdm_dma_tag != NULL) { bus_dmamap_unload(qdm->qdm_dma_tag, qdm->qdm_dma_map); bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map); bus_dma_tag_destroy(qdm->qdm_dma_tag); explicit_bzero(qdm, sizeof(*qdm)); } } static int qat_setup_msix_intr(struct qat_softc *sc) { device_t dev; int error, i, rid; dev = sc->sc_dev; for (i = 1; i <= sc->sc_hw.qhw_num_banks; i++) { struct qat_bank *qb = &sc->sc_etr_banks[i - 1]; rid = i; qb->qb_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (qb->qb_ih == NULL) { device_printf(dev, "failed to allocate bank intr resource\n"); return ENXIO; } error = bus_setup_intr(dev, qb->qb_ih, INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_etr_bank_intr, qb, &qb->qb_ih_cookie); if (error != 0) { device_printf(dev, "failed to set up bank intr\n"); return error; } error = bus_bind_intr(dev, qb->qb_ih, (i - 1) % mp_ncpus); if (error != 0) device_printf(dev, "failed to bind intr %d\n", i); } rid = i; sc->sc_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->sc_ih == NULL) return ENXIO; error = bus_setup_intr(dev, sc->sc_ih, INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_ae_cluster_intr, sc, &sc->sc_ih_cookie); return error; } static void qat_etr_init(struct qat_softc *sc) { int i; sc->sc_etr_banks = qat_alloc_mem( sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks); for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) qat_etr_bank_init(sc, i); if (sc->sc_hw.qhw_num_ap_banks) { sc->sc_etr_ap_banks = qat_alloc_mem( sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks); qat_etr_ap_bank_init(sc); } } static void qat_etr_deinit(struct qat_softc *sc) { int i; if (sc->sc_etr_banks != NULL) { for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) qat_etr_bank_deinit(sc, i); qat_free_mem(sc->sc_etr_banks); sc->sc_etr_banks = NULL; } if (sc->sc_etr_ap_banks != NULL) { qat_free_mem(sc->sc_etr_ap_banks); sc->sc_etr_ap_banks = NULL; } } static void qat_etr_bank_init(struct qat_softc *sc, int bank) { struct qat_bank *qb = &sc->sc_etr_banks[bank]; int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap; MPASS(bank < sc->sc_hw.qhw_num_banks); mtx_init(&qb->qb_bank_mtx, "qb bank", NULL, MTX_DEF); qb->qb_sc = sc; qb->qb_bank = bank; qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT; /* Clean CSRs for all rings within the bank */ for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) { struct qat_ring *qr = &qb->qb_et_rings[i]; qat_etr_bank_ring_write_4(sc, bank, i, ETR_RING_CONFIG, 0); qat_etr_bank_ring_base_write_8(sc, bank, i, 0); if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) { qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t)); } else if (sc->sc_hw.qhw_tx_rings_mask & (1 << (i - tx_rx_gap))) { /* Share inflight counter with rx and tx */ qr->qr_inflight = qb->qb_et_rings[i - tx_rx_gap].qr_inflight; } } if (sc->sc_hw.qhw_init_etr_intr != NULL) { sc->sc_hw.qhw_init_etr_intr(sc, bank); } else { /* common code in qat 1.7 */ qat_etr_bank_write_4(sc, bank, ETR_INT_REG, ETR_INT_REG_CLEAR_MASK); for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank / ETR_RINGS_PER_INT_SRCSEL; i++) { qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL + (i * ETR_INT_SRCSEL_NEXT_OFFSET), ETR_INT_SRCSEL_MASK); } } } static void qat_etr_bank_deinit(struct qat_softc *sc, int bank) { struct qat_bank *qb; struct qat_ring *qr; int i; qb = &sc->sc_etr_banks[bank]; for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) { if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) { qr = &qb->qb_et_rings[i]; qat_free_mem(qr->qr_inflight); } } } static void qat_etr_ap_bank_init(struct qat_softc *sc) { int ap_bank; for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) { struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank]; qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK, ETR_AP_NF_MASK_INIT); qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0); qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK, ETR_AP_NE_MASK_INIT); qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0); memset(qab, 0, sizeof(*qab)); } } static void qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask) { if (set_mask) *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); else *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); } static void qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest, uint32_t ring, int set_dest) { uint32_t ae_mask; uint8_t mailbox, ae, nae; uint8_t *dest = (uint8_t *)ap_dest; mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring); nae = 0; ae_mask = sc->sc_ae_mask; for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) { if ((ae_mask & (1 << ae)) == 0) continue; if (set_dest) { dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) | __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) | ETR_AP_DEST_ENABLE; } else { dest[nae] = 0; } nae++; if (nae == ETR_MAX_AE_PER_MAILBOX) break; } } static void qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr) { struct qat_ap_bank *qab; int ap_bank; if (sc->sc_hw.qhw_num_ap_banks == 0) return; ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring); MPASS(ap_bank < sc->sc_hw.qhw_num_ap_banks); qab = &sc->sc_etr_ap_banks[ap_bank]; if (qr->qr_cb == NULL) { qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1); if (!qab->qab_ne_dest) { qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest, qr->qr_ring, 1); qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, qab->qab_ne_dest); } } else { qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1); if (!qab->qab_nf_dest) { qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest, qr->qr_ring, 1); qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, qab->qab_nf_dest); } } } static int qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs) { int i = QAT_MIN_RING_SIZE; for (; i <= QAT_MAX_RING_SIZE; i++) if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i)) return i; return QAT_DEFAULT_RING_SIZE; } int qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring, uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg, const char *name, struct qat_ring **rqr) { struct qat_bank *qb; struct qat_ring *qr = NULL; int error; uint32_t ring_size_bytes, ring_config; uint64_t ring_base; uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512; uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0; MPASS(bank < sc->sc_hw.qhw_num_banks); /* Allocate a ring from specified bank */ qb = &sc->sc_etr_banks[bank]; if (ring >= sc->sc_hw.qhw_num_rings_per_bank) return EINVAL; if (qb->qb_allocated_rings & (1 << ring)) return ENOENT; qr = &qb->qb_et_rings[ring]; qb->qb_allocated_rings |= 1 << ring; /* Initialize allocated ring */ qr->qr_ring = ring; qr->qr_bank = bank; qr->qr_name = name; qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring; qr->qr_ring_mask = (1 << ring); qr->qr_cb = cb; qr->qr_cb_arg = cb_arg; /* Setup the shadow variables */ qr->qr_head = 0; qr->qr_tail = 0; qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size); qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs); /* * To make sure that ring is alligned to ring size allocate * at least 4k and then tell the user it is smaller. */ ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size); ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes); error = qat_alloc_dmamem(sc, &qr->qr_dma, 1, ring_size_bytes, ring_size_bytes); if (error) return error; qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr; qr->qr_ring_paddr = qr->qr_dma.qdm_dma_seg.ds_addr; memset(qr->qr_ring_vaddr, QAT_RING_PATTERN, qr->qr_dma.qdm_dma_seg.ds_len); bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); if (cb == NULL) { ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size); } else { ring_config = ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne); } qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config); ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size); qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base); if (sc->sc_hw.qhw_init_arb != NULL) qat_arb_update(sc, qb); mtx_init(&qr->qr_ring_mtx, "qr ring", NULL, MTX_DEF); qat_etr_ap_bank_setup_ring(sc, qr); if (cb != NULL) { uint32_t intr_mask; qb->qb_intr_mask |= qr->qr_ring_mask; intr_mask = qb->qb_intr_mask; qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, intr_mask); qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL, ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); } *rqr = qr; return 0; } static inline u_int qat_modulo(u_int data, u_int shift) { u_int div = data >> shift; u_int mult = div << shift; return data - mult; } int qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg) { uint32_t inflight; uint32_t *addr; mtx_lock(&qr->qr_ring_mtx); inflight = atomic_fetchadd_32(qr->qr_inflight, 1) + 1; if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) { atomic_subtract_32(qr->qr_inflight, 1); qr->qr_need_wakeup = true; mtx_unlock(&qr->qr_ring_mtx); counter_u64_add(sc->sc_ring_full_restarts, 1); return ERESTART; } addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail); memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size)); bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, BUS_DMASYNC_PREWRITE); qr->qr_tail = qat_modulo(qr->qr_tail + QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), QAT_RING_SIZE_MODULO(qr->qr_ring_size)); qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, ETR_RING_TAIL_OFFSET, qr->qr_tail); mtx_unlock(&qr->qr_ring_mtx); return 0; } static int qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb, struct qat_ring *qr) { uint32_t *msg, nmsg = 0; int handled = 0; bool blocked = false; mtx_lock(&qr->qr_ring_mtx); msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); while (atomic_load_32(msg) != ETR_RING_EMPTY_ENTRY_SIG) { atomic_subtract_32(qr->qr_inflight, 1); if (qr->qr_cb != NULL) { mtx_unlock(&qr->qr_ring_mtx); handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg); mtx_lock(&qr->qr_ring_mtx); } atomic_store_32(msg, ETR_RING_EMPTY_ENTRY_SIG); qr->qr_head = qat_modulo(qr->qr_head + QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), QAT_RING_SIZE_MODULO(qr->qr_ring_size)); nmsg++; msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); } bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); if (nmsg > 0) { qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, ETR_RING_HEAD_OFFSET, qr->qr_head); if (qr->qr_need_wakeup) { blocked = true; qr->qr_need_wakeup = false; } } mtx_unlock(&qr->qr_ring_mtx); if (blocked) crypto_unblock(sc->sc_crypto.qcy_cid, CRYPTO_SYMQ); return handled; } static void qat_etr_bank_intr(void *arg) { struct qat_bank *qb = arg; struct qat_softc *sc = qb->qb_sc; uint32_t estat; - int i, handled = 0; + int i; mtx_lock(&qb->qb_bank_mtx); qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0); /* Now handle all the responses */ estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT); estat &= qb->qb_intr_mask; qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); mtx_unlock(&qb->qb_bank_mtx); while ((i = ffs(estat)) != 0) { struct qat_ring *qr = &qb->qb_et_rings[--i]; estat &= ~(1 << i); - handled |= qat_etr_ring_intr(sc, qb, qr); + (void)qat_etr_ring_intr(sc, qb, qr); } } void qat_arb_update(struct qat_softc *sc, struct qat_bank *qb) { qat_arb_ringsrvarben_write_4(sc, qb->qb_bank, qb->qb_allocated_rings & 0xff); } static struct qat_sym_cookie * qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb) { struct qat_sym_cookie *qsc; mtx_lock(&qcb->qcb_bank_mtx); if (qcb->qcb_symck_free_count == 0) { mtx_unlock(&qcb->qcb_bank_mtx); return NULL; } qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count]; mtx_unlock(&qcb->qcb_bank_mtx); return qsc; } static void qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, struct qat_sym_cookie *qsc) { explicit_bzero(qsc->qsc_iv_buf, EALG_MAX_BLOCK_LEN); explicit_bzero(qsc->qsc_auth_res, QAT_SYM_HASH_BUFFER_LEN); mtx_lock(&qcb->qcb_bank_mtx); qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc; mtx_unlock(&qcb->qcb_bank_mtx); } void qat_memcpy_htobe64(void *dst, const void *src, size_t len) { uint64_t *dst0 = dst; const uint64_t *src0 = src; size_t i; MPASS(len % sizeof(*dst0) == 0); for (i = 0; i < len / sizeof(*dst0); i++) *(dst0 + i) = htobe64(*(src0 + i)); } void qat_memcpy_htobe32(void *dst, const void *src, size_t len) { uint32_t *dst0 = dst; const uint32_t *src0 = src; size_t i; MPASS(len % sizeof(*dst0) == 0); for (i = 0; i < len / sizeof(*dst0); i++) *(dst0 + i) = htobe32(*(src0 + i)); } void qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte) { switch (wordbyte) { case 4: qat_memcpy_htobe32(dst, src, len); break; case 8: qat_memcpy_htobe64(dst, src, len); break; default: panic("invalid word size %u", wordbyte); } } void qat_crypto_gmac_precompute(const struct qat_crypto_desc *desc, const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def, uint8_t *state) { uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)]; char zeros[AES_BLOCK_LEN]; int rounds; memset(zeros, 0, sizeof(zeros)); rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY); rijndaelEncrypt(ks, rounds, zeros, state); explicit_bzero(ks, sizeof(ks)); } void qat_crypto_hmac_precompute(const struct qat_crypto_desc *desc, const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def, uint8_t *state1, uint8_t *state2) { union authctx ctx; const struct auth_hash *sah = hash_def->qshd_alg->qshai_sah; uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset; uint32_t state_size = hash_def->qshd_alg->qshai_state_size; uint32_t state_word = hash_def->qshd_alg->qshai_state_word; hmac_init_ipad(sah, key, klen, &ctx); qat_memcpy_htobe(state1, (uint8_t *)&ctx + state_offset, state_size, state_word); hmac_init_opad(sah, key, klen, &ctx); qat_memcpy_htobe(state2, (uint8_t *)&ctx + state_offset, state_size, state_word); explicit_bzero(&ctx, sizeof(ctx)); } static enum hw_cipher_algo qat_aes_cipher_algo(int klen) { switch (klen) { case HW_AES_128_KEY_SZ: return HW_CIPHER_ALGO_AES128; case HW_AES_192_KEY_SZ: return HW_CIPHER_ALGO_AES192; case HW_AES_256_KEY_SZ: return HW_CIPHER_ALGO_AES256; default: panic("invalid key length %d", klen); } } uint16_t qat_crypto_load_cipher_session(const struct qat_crypto_desc *desc, const struct qat_session *qs) { enum hw_cipher_algo algo; enum hw_cipher_dir dir; enum hw_cipher_convert key_convert; enum hw_cipher_mode mode; dir = desc->qcd_cipher_dir; key_convert = HW_CIPHER_NO_CONVERT; mode = qs->qs_cipher_mode; switch (mode) { case HW_CIPHER_CBC_MODE: case HW_CIPHER_XTS_MODE: algo = qs->qs_cipher_algo; /* * AES decrypt key needs to be reversed. * Instead of reversing the key at session registration, * it is instead reversed on-the-fly by setting the KEY_CONVERT * bit here. */ if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT) key_convert = HW_CIPHER_KEY_CONVERT; break; case HW_CIPHER_CTR_MODE: algo = qs->qs_cipher_algo; dir = HW_CIPHER_ENCRYPT; break; default: panic("unhandled cipher mode %d", mode); break; } return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, dir); } uint16_t qat_crypto_load_auth_session(const struct qat_crypto_desc *desc, const struct qat_session *qs, const struct qat_sym_hash_def **hash_def) { enum qat_sym_hash_algorithm algo; switch (qs->qs_auth_algo) { case HW_AUTH_ALGO_SHA1: algo = QAT_SYM_HASH_SHA1; break; case HW_AUTH_ALGO_SHA256: algo = QAT_SYM_HASH_SHA256; break; case HW_AUTH_ALGO_SHA384: algo = QAT_SYM_HASH_SHA384; break; case HW_AUTH_ALGO_SHA512: algo = QAT_SYM_HASH_SHA512; break; case HW_AUTH_ALGO_GALOIS_128: algo = QAT_SYM_HASH_AES_GCM; break; default: panic("unhandled auth algorithm %d", qs->qs_auth_algo); break; } *hash_def = &qat_sym_hash_defs[algo]; return HW_AUTH_CONFIG_BUILD(qs->qs_auth_mode, (*hash_def)->qshd_qat->qshqi_algo_enc, (*hash_def)->qshd_alg->qshai_digest_len); } struct qat_crypto_load_cb_arg { struct qat_session *qs; struct qat_sym_cookie *qsc; struct cryptop *crp; int error; }; static int qat_crypto_populate_buf_list(struct buffer_list_desc *buffers, bus_dma_segment_t *segs, int niseg, int noseg, int skip) { struct flat_buffer_desc *flatbuf; bus_addr_t addr; bus_size_t len; int iseg, oseg; for (iseg = 0, oseg = noseg; iseg < niseg && oseg < QAT_MAXSEG; iseg++) { addr = segs[iseg].ds_addr; len = segs[iseg].ds_len; if (skip > 0) { if (skip < len) { addr += skip; len -= skip; skip = 0; } else { skip -= len; continue; } } flatbuf = &buffers->flat_bufs[oseg++]; flatbuf->data_len_in_bytes = (uint32_t)len; flatbuf->phy_buffer = (uint64_t)addr; } buffers->num_buffers = oseg; return iseg < niseg ? E2BIG : 0; } static void qat_crypto_load_aadbuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg, int error) { struct qat_crypto_load_cb_arg *arg; struct qat_sym_cookie *qsc; arg = _arg; if (error != 0) { arg->error = error; return; } qsc = arg->qsc; arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs, nseg, 0, 0); } static void qat_crypto_load_buf_cb(void *_arg, bus_dma_segment_t *segs, int nseg, int error) { struct cryptop *crp; struct qat_crypto_load_cb_arg *arg; struct qat_session *qs; struct qat_sym_cookie *qsc; int noseg, skip; arg = _arg; if (error != 0) { arg->error = error; return; } crp = arg->crp; qs = arg->qs; qsc = arg->qsc; if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { /* AAD was handled in qat_crypto_load(). */ skip = crp->crp_payload_start; noseg = 0; } else if (crp->crp_aad == NULL && crp->crp_aad_length > 0) { skip = crp->crp_aad_start; noseg = 0; } else { skip = crp->crp_payload_start; noseg = crp->crp_aad == NULL ? 0 : qsc->qsc_buf_list.num_buffers; } arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs, nseg, noseg, skip); } static void qat_crypto_load_obuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg, int error) { struct buffer_list_desc *ibufs, *obufs; struct flat_buffer_desc *ibuf, *obuf; struct cryptop *crp; struct qat_crypto_load_cb_arg *arg; struct qat_session *qs; struct qat_sym_cookie *qsc; int buflen, osegs, tocopy; arg = _arg; if (error != 0) { arg->error = error; return; } crp = arg->crp; qs = arg->qs; qsc = arg->qsc; /* * The payload must start at the same offset in the output SG list as in * the input SG list. Copy over SG entries from the input corresponding * to the AAD buffer. */ osegs = 0; if (qs->qs_auth_algo != HW_AUTH_ALGO_GALOIS_128 && crp->crp_aad_length > 0) { tocopy = crp->crp_aad == NULL ? crp->crp_payload_start - crp->crp_aad_start : crp->crp_aad_length; ibufs = &qsc->qsc_buf_list; obufs = &qsc->qsc_obuf_list; for (; osegs < ibufs->num_buffers && tocopy > 0; osegs++) { ibuf = &ibufs->flat_bufs[osegs]; obuf = &obufs->flat_bufs[osegs]; obuf->phy_buffer = ibuf->phy_buffer; buflen = imin(ibuf->data_len_in_bytes, tocopy); obuf->data_len_in_bytes = buflen; tocopy -= buflen; } } arg->error = qat_crypto_populate_buf_list(&qsc->qsc_obuf_list, segs, nseg, osegs, crp->crp_payload_output_start); } static int qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc, struct qat_crypto_desc const *desc, struct cryptop *crp) { struct qat_crypto_load_cb_arg arg; int error; crypto_read_iv(crp, qsc->qsc_iv_buf); arg.crp = crp; arg.qs = qs; arg.qsc = qsc; arg.error = 0; error = 0; if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128 && crp->crp_aad_length > 0) { /* * The firmware expects AAD to be in a contiguous buffer and * padded to a multiple of 16 bytes. To satisfy these * constraints we bounce the AAD into a per-request buffer. * There is a small limit on the AAD size so this is not too * onerous. */ memset(qsc->qsc_gcm_aad, 0, QAT_GCM_AAD_SIZE_MAX); if (crp->crp_aad == NULL) { crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, qsc->qsc_gcm_aad); } else { memcpy(qsc->qsc_gcm_aad, crp->crp_aad, crp->crp_aad_length); } } else if (crp->crp_aad != NULL) { error = bus_dmamap_load( qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag, qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap, crp->crp_aad, crp->crp_aad_length, qat_crypto_load_aadbuf_cb, &arg, BUS_DMA_NOWAIT); if (error == 0) error = arg.error; } if (error == 0) { error = bus_dmamap_load_crp_buffer( qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag, qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap, &crp->crp_buf, qat_crypto_load_buf_cb, &arg, BUS_DMA_NOWAIT); if (error == 0) error = arg.error; } if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) { error = bus_dmamap_load_crp_buffer( qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag, qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap, &crp->crp_obuf, qat_crypto_load_obuf_cb, &arg, BUS_DMA_NOWAIT); if (error == 0) error = arg.error; } return error; } static inline struct qat_crypto_bank * qat_crypto_select_bank(struct qat_crypto *qcy) { u_int cpuid = PCPU_GET(cpuid); return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks]; } static int qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb) { char *name; int bank, curname, error, i, j; bank = qcb->qcb_bank; curname = 0; name = qcb->qcb_ring_names[curname++]; snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank); error = qat_etr_setup_ring(sc, qcb->qcb_bank, sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size, NULL, NULL, name, &qcb->qcb_sym_tx); if (error) return error; name = qcb->qcb_ring_names[curname++]; snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank); error = qat_etr_setup_ring(sc, qcb->qcb_bank, sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size, qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx); if (error) return error; for (i = 0; i < QAT_NSYMCOOKIE; i++) { struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i]; struct qat_sym_cookie *qsc; error = qat_alloc_dmamem(sc, qdm, 1, sizeof(struct qat_sym_cookie), QAT_OPTIMAL_ALIGN); if (error) return error; qsc = qdm->qdm_dma_vaddr; qsc->qsc_self_dmamap = qdm->qdm_dma_map; qsc->qsc_self_dma_tag = qdm->qdm_dma_tag; qsc->qsc_bulk_req_params_buf_paddr = qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, qsc_bulk_cookie.qsbc_req_params_buf); qsc->qsc_buffer_list_desc_paddr = qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, qsc_buf_list); qsc->qsc_obuffer_list_desc_paddr = qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, qsc_obuf_list); qsc->qsc_obuffer_list_desc_paddr = qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, qsc_obuf_list); qsc->qsc_iv_buf_paddr = qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, qsc_iv_buf); qsc->qsc_auth_res_paddr = qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, qsc_auth_res); qsc->qsc_gcm_aad_paddr = qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, qsc_gcm_aad); qsc->qsc_content_desc_paddr = qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, qsc_content_desc); qcb->qcb_symck_free[i] = qsc; qcb->qcb_symck_free_count++; for (j = 0; j < QAT_SYM_DMA_COUNT; j++) { error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ QAT_MAXLEN, /* maxsize */ QAT_MAXSEG, /* nsegments */ QAT_MAXLEN, /* maxsegsize */ BUS_DMA_COHERENT, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &qsc->qsc_dma[j].qsd_dma_tag); if (error != 0) return error; error = bus_dmamap_create(qsc->qsc_dma[j].qsd_dma_tag, BUS_DMA_COHERENT, &qsc->qsc_dma[j].qsd_dmamap); if (error != 0) return error; } } return 0; } static int qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb) { mtx_init(&qcb->qcb_bank_mtx, "qcb bank", NULL, MTX_DEF); return qat_crypto_setup_ring(sc, qcb); } static void qat_crypto_bank_deinit(struct qat_softc *sc, struct qat_crypto_bank *qcb) { struct qat_dmamem *qdm; struct qat_sym_cookie *qsc; int i, j; for (i = 0; i < QAT_NSYMCOOKIE; i++) { qdm = &qcb->qcb_symck_dmamems[i]; qsc = qcb->qcb_symck_free[i]; for (j = 0; j < QAT_SYM_DMA_COUNT; j++) { bus_dmamap_destroy(qsc->qsc_dma[j].qsd_dma_tag, qsc->qsc_dma[j].qsd_dmamap); bus_dma_tag_destroy(qsc->qsc_dma[j].qsd_dma_tag); } qat_free_dmamem(sc, qdm); } qat_free_dmamem(sc, &qcb->qcb_sym_tx->qr_dma); qat_free_dmamem(sc, &qcb->qcb_sym_rx->qr_dma); mtx_destroy(&qcb->qcb_bank_mtx); } static int qat_crypto_init(struct qat_softc *sc) { struct qat_crypto *qcy = &sc->sc_crypto; struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children; int bank, error, num_banks; qcy->qcy_sc = sc; if (sc->sc_hw.qhw_init_arb != NULL) num_banks = imin(mp_ncpus, sc->sc_hw.qhw_num_banks); else num_banks = sc->sc_ae_num; qcy->qcy_num_banks = num_banks; qcy->qcy_banks = qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks); for (bank = 0; bank < num_banks; bank++) { struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank]; qcb->qcb_bank = bank; error = qat_crypto_bank_init(sc, qcb); if (error) return error; } mtx_init(&qcy->qcy_crypto_mtx, "qcy crypto", NULL, MTX_DEF); ctx = device_get_sysctl_ctx(sc->sc_dev); oid = device_get_sysctl_tree(sc->sc_dev); children = SYSCTL_CHILDREN(oid); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); children = SYSCTL_CHILDREN(oid); sc->sc_gcm_aad_restarts = counter_u64_alloc(M_WAITOK); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_restarts", CTLFLAG_RD, &sc->sc_gcm_aad_restarts, "GCM requests deferred due to AAD size change"); sc->sc_gcm_aad_updates = counter_u64_alloc(M_WAITOK); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_updates", CTLFLAG_RD, &sc->sc_gcm_aad_updates, "GCM requests that required session state update"); sc->sc_ring_full_restarts = counter_u64_alloc(M_WAITOK); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ring_full", CTLFLAG_RD, &sc->sc_ring_full_restarts, "Requests deferred due to in-flight max reached"); sc->sc_sym_alloc_failures = counter_u64_alloc(M_WAITOK); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sym_alloc_failures", CTLFLAG_RD, &sc->sc_sym_alloc_failures, "Request allocation failures"); return 0; } static void qat_crypto_deinit(struct qat_softc *sc) { struct qat_crypto *qcy = &sc->sc_crypto; struct qat_crypto_bank *qcb; int bank; counter_u64_free(sc->sc_sym_alloc_failures); counter_u64_free(sc->sc_ring_full_restarts); counter_u64_free(sc->sc_gcm_aad_updates); counter_u64_free(sc->sc_gcm_aad_restarts); if (qcy->qcy_banks != NULL) { for (bank = 0; bank < qcy->qcy_num_banks; bank++) { qcb = &qcy->qcy_banks[bank]; qat_crypto_bank_deinit(sc, qcb); } qat_free_mem(qcy->qcy_banks); mtx_destroy(&qcy->qcy_crypto_mtx); } } static int qat_crypto_start(struct qat_softc *sc) { struct qat_crypto *qcy; qcy = &sc->sc_crypto; qcy->qcy_cid = crypto_get_driverid(sc->sc_dev, sizeof(struct qat_session), CRYPTOCAP_F_HARDWARE); if (qcy->qcy_cid < 0) { device_printf(sc->sc_dev, "could not get opencrypto driver id\n"); return ENOENT; } return 0; } static void qat_crypto_stop(struct qat_softc *sc) { struct qat_crypto *qcy; qcy = &sc->sc_crypto; if (qcy->qcy_cid >= 0) (void)crypto_unregister_all(qcy->qcy_cid); } static void qat_crypto_sym_dma_unload(struct qat_sym_cookie *qsc, enum qat_sym_dma i) { bus_dmamap_sync(qsc->qsc_dma[i].qsd_dma_tag, qsc->qsc_dma[i].qsd_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(qsc->qsc_dma[i].qsd_dma_tag, qsc->qsc_dma[i].qsd_dmamap); } static int qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg) { char icv[QAT_SYM_HASH_BUFFER_LEN]; struct qat_crypto_bank *qcb = arg; struct qat_crypto *qcy; struct qat_session *qs; struct qat_sym_cookie *qsc; struct qat_sym_bulk_cookie *qsbc; struct cryptop *crp; int error; uint16_t auth_sz; bool blocked; qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset); qsbc = &qsc->qsc_bulk_cookie; qcy = qsbc->qsbc_crypto; qs = qsbc->qsbc_session; crp = qsbc->qsbc_cb_tag; bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (crp->crp_aad != NULL) qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_AADBUF); qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_BUF); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_OBUF); error = 0; if ((auth_sz = qs->qs_auth_mlen) != 0) { if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) { crypto_copydata(crp, crp->crp_digest_start, auth_sz, icv); if (timingsafe_bcmp(icv, qsc->qsc_auth_res, auth_sz) != 0) { error = EBADMSG; } } else { crypto_copyback(crp, crp->crp_digest_start, auth_sz, qsc->qsc_auth_res); } } qat_crypto_free_sym_cookie(qcb, qsc); blocked = false; mtx_lock(&qs->qs_session_mtx); MPASS(qs->qs_status & QAT_SESSION_STATUS_ACTIVE); qs->qs_inflight--; if (__predict_false(qs->qs_need_wakeup && qs->qs_inflight == 0)) { blocked = true; qs->qs_need_wakeup = false; } mtx_unlock(&qs->qs_session_mtx); crp->crp_etype = error; crypto_done(crp); if (blocked) crypto_unblock(qcy->qcy_cid, CRYPTO_SYMQ); return 1; } static int qat_probesession(device_t dev, const struct crypto_session_params *csp) { if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 0) return EINVAL; if (csp->csp_cipher_alg == CRYPTO_AES_XTS && qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) { /* * AES-XTS is not supported by the NanoQAT. */ return EINVAL; } switch (csp->csp_mode) { case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return EINVAL; break; case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return EINVAL; break; default: return EINVAL; } break; case CSP_MODE_DIGEST: switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512: case CRYPTO_SHA2_512_HMAC: break; case CRYPTO_AES_NIST_GMAC: if (csp->csp_ivlen != AES_GCM_IV_LEN) return EINVAL; break; default: return EINVAL; } break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: if (csp->csp_ivlen != AES_GCM_IV_LEN) return EINVAL; break; default: return EINVAL; } break; case CSP_MODE_ETA: switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return EINVAL; break; case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return EINVAL; break; default: return EINVAL; } break; default: return EINVAL; } break; default: return EINVAL; } return CRYPTODEV_PROBE_HARDWARE; } static int qat_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct qat_crypto *qcy; struct qat_dmamem *qdm; struct qat_session *qs; struct qat_softc *sc; struct qat_crypto_desc *ddesc, *edesc; int error, slices; sc = device_get_softc(dev); qs = crypto_get_driver_session(cses); qcy = &sc->sc_crypto; qdm = &qs->qs_desc_mem; error = qat_alloc_dmamem(sc, qdm, QAT_MAXSEG, sizeof(struct qat_crypto_desc) * 2, QAT_OPTIMAL_ALIGN); if (error != 0) return error; mtx_init(&qs->qs_session_mtx, "qs session", NULL, MTX_DEF); qs->qs_aad_length = -1; qs->qs_dec_desc = ddesc = qdm->qdm_dma_vaddr; qs->qs_enc_desc = edesc = ddesc + 1; ddesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr; ddesc->qcd_hash_state_paddr = ddesc->qcd_desc_paddr + offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); edesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr + sizeof(struct qat_crypto_desc); edesc->qcd_hash_state_paddr = edesc->qcd_desc_paddr + offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); qs->qs_status = QAT_SESSION_STATUS_ACTIVE; qs->qs_inflight = 0; qs->qs_cipher_key = csp->csp_cipher_key; qs->qs_cipher_klen = csp->csp_cipher_klen; qs->qs_auth_key = csp->csp_auth_key; qs->qs_auth_klen = csp->csp_auth_klen; switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); qs->qs_cipher_mode = HW_CIPHER_CBC_MODE; break; case CRYPTO_AES_ICM: qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; break; case CRYPTO_AES_XTS: qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen / 2); qs->qs_cipher_mode = HW_CIPHER_XTS_MODE; break; case CRYPTO_AES_NIST_GCM_16: qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128; qs->qs_auth_mode = HW_AUTH_MODE1; break; case 0: break; default: panic("%s: unhandled cipher algorithm %d", __func__, csp->csp_cipher_alg); } switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: qs->qs_auth_algo = HW_AUTH_ALGO_SHA1; qs->qs_auth_mode = HW_AUTH_MODE1; break; case CRYPTO_SHA1: qs->qs_auth_algo = HW_AUTH_ALGO_SHA1; qs->qs_auth_mode = HW_AUTH_MODE0; break; case CRYPTO_SHA2_256_HMAC: qs->qs_auth_algo = HW_AUTH_ALGO_SHA256; qs->qs_auth_mode = HW_AUTH_MODE1; break; case CRYPTO_SHA2_256: qs->qs_auth_algo = HW_AUTH_ALGO_SHA256; qs->qs_auth_mode = HW_AUTH_MODE0; break; case CRYPTO_SHA2_384_HMAC: qs->qs_auth_algo = HW_AUTH_ALGO_SHA384; qs->qs_auth_mode = HW_AUTH_MODE1; break; case CRYPTO_SHA2_384: qs->qs_auth_algo = HW_AUTH_ALGO_SHA384; qs->qs_auth_mode = HW_AUTH_MODE0; break; case CRYPTO_SHA2_512_HMAC: qs->qs_auth_algo = HW_AUTH_ALGO_SHA512; qs->qs_auth_mode = HW_AUTH_MODE1; break; case CRYPTO_SHA2_512: qs->qs_auth_algo = HW_AUTH_ALGO_SHA512; qs->qs_auth_mode = HW_AUTH_MODE0; break; case CRYPTO_AES_NIST_GMAC: qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_auth_klen); qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128; qs->qs_auth_mode = HW_AUTH_MODE1; qs->qs_cipher_key = qs->qs_auth_key; qs->qs_cipher_klen = qs->qs_auth_klen; break; case 0: break; default: panic("%s: unhandled auth algorithm %d", __func__, csp->csp_auth_alg); } slices = 0; switch (csp->csp_mode) { case CSP_MODE_AEAD: case CSP_MODE_ETA: /* auth then decrypt */ ddesc->qcd_slices[0] = FW_SLICE_AUTH; ddesc->qcd_slices[1] = FW_SLICE_CIPHER; ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER; /* encrypt then auth */ edesc->qcd_slices[0] = FW_SLICE_CIPHER; edesc->qcd_slices[1] = FW_SLICE_AUTH; edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH; slices = 2; break; case CSP_MODE_CIPHER: /* decrypt */ ddesc->qcd_slices[0] = FW_SLICE_CIPHER; ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; ddesc->qcd_cmd_id = FW_LA_CMD_CIPHER; /* encrypt */ edesc->qcd_slices[0] = FW_SLICE_CIPHER; edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; edesc->qcd_cmd_id = FW_LA_CMD_CIPHER; slices = 1; break; case CSP_MODE_DIGEST: if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { /* auth then decrypt */ ddesc->qcd_slices[0] = FW_SLICE_AUTH; ddesc->qcd_slices[1] = FW_SLICE_CIPHER; ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER; /* encrypt then auth */ edesc->qcd_slices[0] = FW_SLICE_CIPHER; edesc->qcd_slices[1] = FW_SLICE_AUTH; edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH; slices = 2; } else { ddesc->qcd_slices[0] = FW_SLICE_AUTH; ddesc->qcd_cmd_id = FW_LA_CMD_AUTH; edesc->qcd_slices[0] = FW_SLICE_AUTH; edesc->qcd_cmd_id = FW_LA_CMD_AUTH; slices = 1; } break; default: panic("%s: unhandled crypto algorithm %d, %d", __func__, csp->csp_cipher_alg, csp->csp_auth_alg); } ddesc->qcd_slices[slices] = FW_SLICE_DRAM_WR; edesc->qcd_slices[slices] = FW_SLICE_DRAM_WR; qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, ddesc); qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, edesc); if (csp->csp_auth_mlen != 0) qs->qs_auth_mlen = csp->csp_auth_mlen; else qs->qs_auth_mlen = edesc->qcd_auth_sz; /* Compute the GMAC by specifying a null cipher payload. */ if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) ddesc->qcd_cmd_id = edesc->qcd_cmd_id = FW_LA_CMD_AUTH; return 0; } static void qat_crypto_clear_desc(struct qat_crypto_desc *desc) { explicit_bzero(desc->qcd_content_desc, sizeof(desc->qcd_content_desc)); explicit_bzero(desc->qcd_hash_state_prefix_buf, sizeof(desc->qcd_hash_state_prefix_buf)); explicit_bzero(desc->qcd_req_cache, sizeof(desc->qcd_req_cache)); } static void qat_freesession(device_t dev, crypto_session_t cses) { struct qat_session *qs; qs = crypto_get_driver_session(cses); KASSERT(qs->qs_inflight == 0, ("%s: session %p has requests in flight", __func__, qs)); qat_crypto_clear_desc(qs->qs_enc_desc); qat_crypto_clear_desc(qs->qs_dec_desc); qat_free_dmamem(device_get_softc(dev), &qs->qs_desc_mem); mtx_destroy(&qs->qs_session_mtx); } static int qat_process(device_t dev, struct cryptop *crp, int hint) { struct qat_crypto *qcy; struct qat_crypto_bank *qcb; struct qat_crypto_desc const *desc; struct qat_session *qs; struct qat_softc *sc; struct qat_sym_cookie *qsc; struct qat_sym_bulk_cookie *qsbc; int error; sc = device_get_softc(dev); qcy = &sc->sc_crypto; qs = crypto_get_driver_session(crp->crp_session); qsc = NULL; if (__predict_false(crypto_buffer_len(&crp->crp_buf) > QAT_MAXLEN)) { error = E2BIG; goto fail1; } mtx_lock(&qs->qs_session_mtx); if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { if (crp->crp_aad_length > QAT_GCM_AAD_SIZE_MAX) { error = E2BIG; mtx_unlock(&qs->qs_session_mtx); goto fail1; } /* * The firmware interface for GCM annoyingly requires the AAD * size to be stored in the session's content descriptor, which * is not really meant to be updated after session * initialization. For IPSec the AAD size is fixed so this is * not much of a problem in practice, but we have to catch AAD * size updates here so that the device code can safely update * the session's recorded AAD size. */ if (__predict_false(crp->crp_aad_length != qs->qs_aad_length)) { if (qs->qs_inflight == 0) { if (qs->qs_aad_length != -1) { counter_u64_add(sc->sc_gcm_aad_updates, 1); } qs->qs_aad_length = crp->crp_aad_length; } else { qs->qs_need_wakeup = true; mtx_unlock(&qs->qs_session_mtx); counter_u64_add(sc->sc_gcm_aad_restarts, 1); error = ERESTART; goto fail1; } } } qs->qs_inflight++; mtx_unlock(&qs->qs_session_mtx); qcb = qat_crypto_select_bank(qcy); qsc = qat_crypto_alloc_sym_cookie(qcb); if (qsc == NULL) { counter_u64_add(sc->sc_sym_alloc_failures, 1); error = ENOBUFS; goto fail2; } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) desc = qs->qs_enc_desc; else desc = qs->qs_dec_desc; error = qat_crypto_load(qs, qsc, desc, crp); if (error != 0) goto fail2; qsbc = &qsc->qsc_bulk_cookie; qsbc->qsbc_crypto = qcy; qsbc->qsbc_session = qs; qsbc->qsbc_cb_tag = crp; sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp); if (crp->crp_aad != NULL) { bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag, qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag, qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag, qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); error = qat_etr_put_msg(sc, qcb->qcb_sym_tx, (uint32_t *)qsbc->qsbc_msg); if (error) goto fail2; return 0; fail2: if (qsc) qat_crypto_free_sym_cookie(qcb, qsc); mtx_lock(&qs->qs_session_mtx); qs->qs_inflight--; mtx_unlock(&qs->qs_session_mtx); fail1: crp->crp_etype = error; crypto_done(crp); return 0; } static device_method_t qat_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qat_probe), DEVMETHOD(device_attach, qat_attach), DEVMETHOD(device_detach, qat_detach), /* Cryptodev interface */ DEVMETHOD(cryptodev_probesession, qat_probesession), DEVMETHOD(cryptodev_newsession, qat_newsession), DEVMETHOD(cryptodev_freesession, qat_freesession), DEVMETHOD(cryptodev_process, qat_process), DEVMETHOD_END }; static devclass_t qat_devclass; static driver_t qat_driver = { .name = "qat", .methods = qat_methods, .size = sizeof(struct qat_softc), }; DRIVER_MODULE(qat, pci, qat_driver, qat_devclass, 0, 0); MODULE_VERSION(qat, 1); MODULE_DEPEND(qat, crypto, 1, 1, 1); MODULE_DEPEND(qat, pci, 1, 1, 1); diff --git a/sys/dev/qat/qat_ae.c b/sys/dev/qat/qat_ae.c index 9d0a93955195..853069706079 100644 --- a/sys/dev/qat/qat_ae.c +++ b/sys/dev/qat/qat_ae.c @@ -1,3453 +1,3445 @@ /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ /* $NetBSD: qat_ae.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ /* * Copyright (c) 2019 Internet Initiative Japan, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #if 0 __KERNEL_RCSID(0, "$NetBSD: qat_ae.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); #endif #include #include #include #include #include #include #include #include #include "qatreg.h" #include "qatvar.h" #include "qat_aevar.h" static int qat_ae_write_4(struct qat_softc *, u_char, bus_size_t, uint32_t); static int qat_ae_read_4(struct qat_softc *, u_char, bus_size_t, uint32_t *); static void qat_ae_ctx_indr_write(struct qat_softc *, u_char, uint32_t, bus_size_t, uint32_t); static int qat_ae_ctx_indr_read(struct qat_softc *, u_char, uint32_t, bus_size_t, uint32_t *); static u_short qat_aereg_get_10bit_addr(enum aereg_type, u_short); static int qat_aereg_rel_data_write(struct qat_softc *, u_char, u_char, enum aereg_type, u_short, uint32_t); static int qat_aereg_rel_data_read(struct qat_softc *, u_char, u_char, enum aereg_type, u_short, uint32_t *); static int qat_aereg_rel_rdxfer_write(struct qat_softc *, u_char, u_char, enum aereg_type, u_short, uint32_t); static int qat_aereg_rel_wrxfer_write(struct qat_softc *, u_char, u_char, enum aereg_type, u_short, uint32_t); static int qat_aereg_rel_nn_write(struct qat_softc *, u_char, u_char, enum aereg_type, u_short, uint32_t); static int qat_aereg_abs_to_rel(struct qat_softc *, u_char, u_short, u_short *, u_char *); static int qat_aereg_abs_data_write(struct qat_softc *, u_char, enum aereg_type, u_short, uint32_t); static void qat_ae_enable_ctx(struct qat_softc *, u_char, u_int); static void qat_ae_disable_ctx(struct qat_softc *, u_char, u_int); static void qat_ae_write_ctx_mode(struct qat_softc *, u_char, u_char); static void qat_ae_write_nn_mode(struct qat_softc *, u_char, u_char); static void qat_ae_write_lm_mode(struct qat_softc *, u_char, enum aereg_type, u_char); static void qat_ae_write_shared_cs_mode0(struct qat_softc *, u_char, u_char); static void qat_ae_write_shared_cs_mode(struct qat_softc *, u_char, u_char); static int qat_ae_set_reload_ustore(struct qat_softc *, u_char, u_int, int, u_int); static enum qat_ae_status qat_ae_get_status(struct qat_softc *, u_char); static int qat_ae_is_active(struct qat_softc *, u_char); static int qat_ae_wait_num_cycles(struct qat_softc *, u_char, int, int); static int qat_ae_clear_reset(struct qat_softc *); static int qat_ae_check(struct qat_softc *); static int qat_ae_reset_timestamp(struct qat_softc *); static void qat_ae_clear_xfer(struct qat_softc *); static int qat_ae_clear_gprs(struct qat_softc *); static void qat_ae_get_shared_ustore_ae(u_char, u_char *); static u_int qat_ae_ucode_parity64(uint64_t); static uint64_t qat_ae_ucode_set_ecc(uint64_t); static int qat_ae_ucode_write(struct qat_softc *, u_char, u_int, u_int, const uint64_t *); static int qat_ae_ucode_read(struct qat_softc *, u_char, u_int, u_int, uint64_t *); static u_int qat_ae_concat_ucode(uint64_t *, u_int, u_int, u_int, u_int *); static int qat_ae_exec_ucode(struct qat_softc *, u_char, u_char, uint64_t *, u_int, int, u_int, u_int *); static int qat_ae_exec_ucode_init_lm(struct qat_softc *, u_char, u_char, int *, uint64_t *, u_int, u_int *, u_int *, u_int *, u_int *, u_int *); static int qat_ae_restore_init_lm_gprs(struct qat_softc *, u_char, u_char, u_int, u_int, u_int, u_int, u_int); static int qat_ae_get_inst_num(int); static int qat_ae_batch_put_lm(struct qat_softc *, u_char, struct qat_ae_batch_init_list *, size_t); static int qat_ae_write_pc(struct qat_softc *, u_char, u_int, u_int); static u_int qat_aefw_csum(char *, int); static const char *qat_aefw_uof_string(struct qat_softc *, size_t); static struct uof_chunk_hdr *qat_aefw_uof_find_chunk(struct qat_softc *, const char *, struct uof_chunk_hdr *); static int qat_aefw_load_mof(struct qat_softc *); static void qat_aefw_unload_mof(struct qat_softc *); static int qat_aefw_load_mmp(struct qat_softc *); static void qat_aefw_unload_mmp(struct qat_softc *); static int qat_aefw_mof_find_uof0(struct qat_softc *, struct mof_uof_hdr *, struct mof_uof_chunk_hdr *, u_int, size_t, const char *, size_t *, void **); static int qat_aefw_mof_find_uof(struct qat_softc *); static int qat_aefw_mof_parse(struct qat_softc *); static int qat_aefw_uof_parse_image(struct qat_softc *, struct qat_uof_image *, struct uof_chunk_hdr *uch); static int qat_aefw_uof_parse_images(struct qat_softc *); static int qat_aefw_uof_parse(struct qat_softc *); static int qat_aefw_alloc_auth_dmamem(struct qat_softc *, char *, size_t, struct qat_dmamem *); static int qat_aefw_auth(struct qat_softc *, struct qat_dmamem *); static int qat_aefw_suof_load(struct qat_softc *sc, struct qat_dmamem *dma); static int qat_aefw_suof_parse_image(struct qat_softc *, struct qat_suof_image *, struct suof_chunk_hdr *); static int qat_aefw_suof_parse(struct qat_softc *); static int qat_aefw_suof_write(struct qat_softc *); static int qat_aefw_uof_assign_image(struct qat_softc *, struct qat_ae *, struct qat_uof_image *); static int qat_aefw_uof_init_ae(struct qat_softc *, u_char); static int qat_aefw_uof_init(struct qat_softc *); static int qat_aefw_init_memory_one(struct qat_softc *, struct uof_init_mem *); static void qat_aefw_free_lm_init(struct qat_softc *, u_char); static int qat_aefw_init_ustore(struct qat_softc *); static int qat_aefw_init_reg(struct qat_softc *, u_char, u_char, enum aereg_type, u_short, u_int); static int qat_aefw_init_reg_sym_expr(struct qat_softc *, u_char, struct qat_uof_image *); static int qat_aefw_init_memory(struct qat_softc *); static int qat_aefw_init_globals(struct qat_softc *); static uint64_t qat_aefw_get_uof_inst(struct qat_softc *, struct qat_uof_page *, u_int); static int qat_aefw_do_pagein(struct qat_softc *, u_char, struct qat_uof_page *); static int qat_aefw_uof_write_one(struct qat_softc *, struct qat_uof_image *); static int qat_aefw_uof_write(struct qat_softc *); static int qat_ae_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset, uint32_t value) { int times = TIMEOUT_AE_CSR; do { qat_ae_local_write_4(sc, ae, offset, value); if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) & LOCAL_CSR_STATUS_STATUS) == 0) return 0; } while (times--); device_printf(sc->sc_dev, "couldn't write AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset); return EFAULT; } static int qat_ae_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset, uint32_t *value) { int times = TIMEOUT_AE_CSR; uint32_t v; do { v = qat_ae_local_read_4(sc, ae, offset); if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) & LOCAL_CSR_STATUS_STATUS) == 0) { *value = v; return 0; } } while (times--); device_printf(sc->sc_dev, "couldn't read AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset); return EFAULT; } static void qat_ae_ctx_indr_write(struct qat_softc *sc, u_char ae, uint32_t ctx_mask, bus_size_t offset, uint32_t value) { int ctx; uint32_t ctxptr; MPASS(offset == CTX_FUTURE_COUNT_INDIRECT || offset == FUTURE_COUNT_SIGNAL_INDIRECT || offset == CTX_STS_INDIRECT || offset == CTX_WAKEUP_EVENTS_INDIRECT || offset == CTX_SIG_EVENTS_INDIRECT || offset == LM_ADDR_0_INDIRECT || offset == LM_ADDR_1_INDIRECT || offset == INDIRECT_LM_ADDR_0_BYTE_INDEX || offset == INDIRECT_LM_ADDR_1_BYTE_INDEX); qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr); for (ctx = 0; ctx < MAX_AE_CTX; ctx++) { if ((ctx_mask & (1 << ctx)) == 0) continue; qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx); qat_ae_write_4(sc, ae, offset, value); } qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr); } static int qat_ae_ctx_indr_read(struct qat_softc *sc, u_char ae, uint32_t ctx, bus_size_t offset, uint32_t *value) { int error; uint32_t ctxptr; MPASS(offset == CTX_FUTURE_COUNT_INDIRECT || offset == FUTURE_COUNT_SIGNAL_INDIRECT || offset == CTX_STS_INDIRECT || offset == CTX_WAKEUP_EVENTS_INDIRECT || offset == CTX_SIG_EVENTS_INDIRECT || offset == LM_ADDR_0_INDIRECT || offset == LM_ADDR_1_INDIRECT || offset == INDIRECT_LM_ADDR_0_BYTE_INDEX || offset == INDIRECT_LM_ADDR_1_BYTE_INDEX); /* save the ctx ptr */ qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr); if ((ctxptr & CSR_CTX_POINTER_CONTEXT) != (ctx & CSR_CTX_POINTER_CONTEXT)) qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx); error = qat_ae_read_4(sc, ae, offset, value); /* restore ctx ptr */ if ((ctxptr & CSR_CTX_POINTER_CONTEXT) != (ctx & CSR_CTX_POINTER_CONTEXT)) qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr); return error; } static u_short qat_aereg_get_10bit_addr(enum aereg_type regtype, u_short reg) { u_short addr; switch (regtype) { case AEREG_GPA_ABS: case AEREG_GPB_ABS: addr = (reg & 0x7f) | 0x80; break; case AEREG_GPA_REL: case AEREG_GPB_REL: addr = reg & 0x1f; break; case AEREG_SR_RD_REL: case AEREG_SR_WR_REL: case AEREG_SR_REL: addr = 0x180 | (reg & 0x1f); break; case AEREG_SR_INDX: addr = 0x140 | ((reg & 0x3) << 1); break; case AEREG_DR_RD_REL: case AEREG_DR_WR_REL: case AEREG_DR_REL: addr = 0x1c0 | (reg & 0x1f); break; case AEREG_DR_INDX: addr = 0x100 | ((reg & 0x3) << 1); break; case AEREG_NEIGH_INDX: addr = 0x241 | ((reg & 0x3) << 1); break; case AEREG_NEIGH_REL: addr = 0x280 | (reg & 0x1f); break; case AEREG_LMEM0: addr = 0x200; break; case AEREG_LMEM1: addr = 0x220; break; case AEREG_NO_DEST: addr = 0x300 | (reg & 0xff); break; default: addr = AEREG_BAD_REGADDR; break; } return (addr); } static int qat_aereg_rel_data_write(struct qat_softc *sc, u_char ae, u_char ctx, enum aereg_type regtype, u_short relreg, uint32_t value) { uint16_t srchi, srclo, destaddr, data16hi, data16lo; uint64_t inst[] = { 0x0F440000000ull, /* immed_w1[reg, val_hi16] */ 0x0F040000000ull, /* immed_w0[reg, val_lo16] */ 0x0F0000C0300ull, /* nop */ 0x0E000010000ull /* ctx_arb[kill] */ }; const int ninst = nitems(inst); const int imm_w1 = 0, imm_w0 = 1; unsigned int ctxen; uint16_t mask; /* This logic only works for GPRs and LM index registers, not NN or XFER registers! */ MPASS(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL || regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1); if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL)) { /* determine the context mode */ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { /* 4-ctx mode */ if (ctx & 0x1) return EINVAL; mask = 0x1f; } else { /* 8-ctx mode */ mask = 0x0f; } if (relreg & ~mask) return EINVAL; } if ((destaddr = qat_aereg_get_10bit_addr(regtype, relreg)) == AEREG_BAD_REGADDR) { return EINVAL; } data16lo = 0xffff & value; data16hi = 0xffff & (value >> 16); srchi = qat_aereg_get_10bit_addr(AEREG_NO_DEST, (uint16_t)(0xff & data16hi)); srclo = qat_aereg_get_10bit_addr(AEREG_NO_DEST, (uint16_t)(0xff & data16lo)); switch (regtype) { case AEREG_GPA_REL: /* A rel source */ inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) | ((srchi & 0x3ff) << 10) | (destaddr & 0x3ff); inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) | ((srclo & 0x3ff) << 10) | (destaddr & 0x3ff); break; default: inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) | ((destaddr & 0x3ff) << 10) | (srchi & 0x3ff); inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) | ((destaddr & 0x3ff) << 10) | (srclo & 0x3ff); break; } return qat_ae_exec_ucode(sc, ae, ctx, inst, ninst, 1, ninst * 5, NULL); } static int qat_aereg_rel_data_read(struct qat_softc *sc, u_char ae, u_char ctx, enum aereg_type regtype, u_short relreg, uint32_t *value) { uint64_t inst, savucode; uint32_t ctxen, misc, nmisc, savctx, ctxarbctl, ulo, uhi; u_int uaddr, ustore_addr; int error; u_short mask, regaddr; u_char nae; MPASS(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL || regtype == AEREG_SR_REL || regtype == AEREG_SR_RD_REL || regtype == AEREG_DR_REL || regtype == AEREG_DR_RD_REL || regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1); if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL) || (regtype == AEREG_SR_REL) || (regtype == AEREG_SR_RD_REL) || (regtype == AEREG_DR_REL) || (regtype == AEREG_DR_RD_REL)) { /* determine the context mode */ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { /* 4-ctx mode */ if (ctx & 0x1) return EINVAL; mask = 0x1f; } else { /* 8-ctx mode */ mask = 0x0f; } if (relreg & ~mask) return EINVAL; } if ((regaddr = qat_aereg_get_10bit_addr(regtype, relreg)) == AEREG_BAD_REGADDR) { return EINVAL; } /* instruction -- alu[--, --, B, reg] */ switch (regtype) { case AEREG_GPA_REL: /* A rel source */ inst = 0xA070000000ull | (regaddr & 0x3ff); break; default: inst = (0xA030000000ull | ((regaddr & 0x3ff) << 10)); break; } /* backup shared control store bit, and force AE to * none-shared mode before executing ucode snippet */ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); if (misc & AE_MISC_CONTROL_SHARE_CS) { qat_ae_get_shared_ustore_ae(ae, &nae); if ((1 << nae) & sc->sc_ae_mask && qat_ae_is_active(sc, nae)) return EBUSY; } nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); /* read current context */ qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx); qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl); qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); /* prevent clearing the W1C bits: the breakpoint bit, ECC error bit, and Parity error bit */ ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; /* change the context */ if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, ctx & ACTIVE_CTX_STATUS_ACNO); /* save a ustore location */ if ((error = qat_ae_ucode_read(sc, ae, 0, 1, &savucode)) != 0) { /* restore AE_MISC_CONTROL csr */ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); /* restore the context */ if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) { qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, savctx & ACTIVE_CTX_STATUS_ACNO); } qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); return (error); } /* turn off ustore parity */ qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen & (~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE)); /* save ustore-addr csr */ qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); /* write the ALU instruction to ustore, enable ecs bit */ uaddr = 0 | USTORE_ADDRESS_ECS; /* set the uaddress */ qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); inst = qat_ae_ucode_set_ecc(inst); ulo = (uint32_t)(inst & 0xffffffff); uhi = (uint32_t)(inst >> 32); qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo); /* this will auto increment the address */ qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi); /* set the uaddress */ qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); /* delay for at least 8 cycles */ qat_ae_wait_num_cycles(sc, ae, 0x8, 0); /* read ALU output -- the instruction should have been executed prior to clearing the ECS in putUwords */ qat_ae_read_4(sc, ae, ALU_OUT, value); /* restore ustore-addr csr */ qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); /* restore the ustore */ error = qat_ae_ucode_write(sc, ae, 0, 1, &savucode); /* restore the context */ if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) { qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, savctx & ACTIVE_CTX_STATUS_ACNO); } qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); /* restore AE_MISC_CONTROL csr */ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); return error; } static int qat_aereg_rel_rdxfer_write(struct qat_softc *sc, u_char ae, u_char ctx, enum aereg_type regtype, u_short relreg, uint32_t value) { bus_size_t addr; int error; uint32_t ctxen; u_short mask; u_short dr_offset; MPASS(regtype == AEREG_SR_REL || regtype == AEREG_DR_REL || regtype == AEREG_SR_RD_REL || regtype == AEREG_DR_RD_REL); error = qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { if (ctx & 0x1) { device_printf(sc->sc_dev, "bad ctx argument in 4-ctx mode,ctx=0x%x\n", ctx); return EINVAL; } mask = 0x1f; dr_offset = 0x20; } else { mask = 0x0f; dr_offset = 0x10; } if (relreg & ~mask) return EINVAL; addr = relreg + (ctx << 0x5); switch (regtype) { case AEREG_SR_REL: case AEREG_SR_RD_REL: qat_ae_xfer_write_4(sc, ae, addr, value); break; case AEREG_DR_REL: case AEREG_DR_RD_REL: qat_ae_xfer_write_4(sc, ae, addr + dr_offset, value); break; default: error = EINVAL; } return error; } static int qat_aereg_rel_wrxfer_write(struct qat_softc *sc, u_char ae, u_char ctx, enum aereg_type regtype, u_short relreg, uint32_t value) { panic("notyet"); return 0; } static int qat_aereg_rel_nn_write(struct qat_softc *sc, u_char ae, u_char ctx, enum aereg_type regtype, u_short relreg, uint32_t value) { panic("notyet"); return 0; } static int qat_aereg_abs_to_rel(struct qat_softc *sc, u_char ae, u_short absreg, u_short *relreg, u_char *ctx) { uint32_t ctxen; qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { /* 4-ctx mode */ *relreg = absreg & 0x1f; *ctx = (absreg >> 0x4) & 0x6; } else { /* 8-ctx mode */ *relreg = absreg & 0x0f; *ctx = (absreg >> 0x4) & 0x7; } return 0; } static int qat_aereg_abs_data_write(struct qat_softc *sc, u_char ae, enum aereg_type regtype, u_short absreg, uint32_t value) { int error; u_short relreg; u_char ctx; qat_aereg_abs_to_rel(sc, ae, absreg, &relreg, &ctx); switch (regtype) { case AEREG_GPA_ABS: MPASS(absreg < MAX_GPR_REG); error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, relreg, value); break; case AEREG_GPB_ABS: MPASS(absreg < MAX_GPR_REG); error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, relreg, value); break; case AEREG_DR_RD_ABS: MPASS(absreg < MAX_XFER_REG); error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_DR_RD_REL, relreg, value); break; case AEREG_SR_RD_ABS: MPASS(absreg < MAX_XFER_REG); error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_SR_RD_REL, relreg, value); break; case AEREG_DR_WR_ABS: MPASS(absreg < MAX_XFER_REG); error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_DR_WR_REL, relreg, value); break; case AEREG_SR_WR_ABS: MPASS(absreg < MAX_XFER_REG); error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_SR_WR_REL, relreg, value); break; case AEREG_NEIGH_ABS: MPASS(absreg < MAX_NN_REG); if (absreg >= MAX_NN_REG) return EINVAL; error = qat_aereg_rel_nn_write(sc, ae, ctx, AEREG_NEIGH_REL, relreg, value); break; default: panic("Invalid Register Type"); } return error; } static void qat_ae_enable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask) { uint32_t ctxen; qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { ctx_mask &= 0x55; } else { ctx_mask &= 0xff; } ctxen |= __SHIFTIN(ctx_mask, CTX_ENABLES_ENABLE); qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); } static void qat_ae_disable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask) { uint32_t ctxen; qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; ctxen &= ~(__SHIFTIN(ctx_mask & AE_ALL_CTX, CTX_ENABLES_ENABLE)); qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); } static void qat_ae_write_ctx_mode(struct qat_softc *sc, u_char ae, u_char mode) { uint32_t val, nval; qat_ae_read_4(sc, ae, CTX_ENABLES, &val); val &= CTX_ENABLES_IGNORE_W1C_MASK; if (mode == 4) nval = val | CTX_ENABLES_INUSE_CONTEXTS; else nval = val & ~CTX_ENABLES_INUSE_CONTEXTS; if (val != nval) qat_ae_write_4(sc, ae, CTX_ENABLES, nval); } static void qat_ae_write_nn_mode(struct qat_softc *sc, u_char ae, u_char mode) { uint32_t val, nval; qat_ae_read_4(sc, ae, CTX_ENABLES, &val); val &= CTX_ENABLES_IGNORE_W1C_MASK; if (mode) nval = val | CTX_ENABLES_NN_MODE; else nval = val & ~CTX_ENABLES_NN_MODE; if (val != nval) qat_ae_write_4(sc, ae, CTX_ENABLES, nval); } static void qat_ae_write_lm_mode(struct qat_softc *sc, u_char ae, enum aereg_type lm, u_char mode) { uint32_t val, nval; uint32_t bit; qat_ae_read_4(sc, ae, CTX_ENABLES, &val); val &= CTX_ENABLES_IGNORE_W1C_MASK; switch (lm) { case AEREG_LMEM0: bit = CTX_ENABLES_LMADDR_0_GLOBAL; break; case AEREG_LMEM1: bit = CTX_ENABLES_LMADDR_1_GLOBAL; break; default: panic("invalid lmem reg type"); break; } if (mode) nval = val | bit; else nval = val & ~bit; if (val != nval) qat_ae_write_4(sc, ae, CTX_ENABLES, nval); } static void qat_ae_write_shared_cs_mode0(struct qat_softc *sc, u_char ae, u_char mode) { uint32_t val, nval; qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); if (mode == 1) nval = val | AE_MISC_CONTROL_SHARE_CS; else nval = val & ~AE_MISC_CONTROL_SHARE_CS; if (val != nval) qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nval); } static void qat_ae_write_shared_cs_mode(struct qat_softc *sc, u_char ae, u_char mode) { u_char nae; qat_ae_get_shared_ustore_ae(ae, &nae); qat_ae_write_shared_cs_mode0(sc, ae, mode); if ((sc->sc_ae_mask & (1 << nae))) { qat_ae_write_shared_cs_mode0(sc, nae, mode); } } static int qat_ae_set_reload_ustore(struct qat_softc *sc, u_char ae, u_int reload_size, int shared_mode, u_int ustore_dram_addr) { uint32_t val, cs_reload; switch (reload_size) { case 0: cs_reload = 0x0; break; case QAT_2K: cs_reload = 0x1; break; case QAT_4K: cs_reload = 0x2; break; case QAT_8K: cs_reload = 0x3; break; default: return EINVAL; } if (cs_reload) QAT_AE(sc, ae).qae_ustore_dram_addr = ustore_dram_addr; QAT_AE(sc, ae).qae_reload_size = reload_size; qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); val &= ~(AE_MISC_CONTROL_ONE_CTX_RELOAD | AE_MISC_CONTROL_CS_RELOAD | AE_MISC_CONTROL_SHARE_CS); val |= __SHIFTIN(cs_reload, AE_MISC_CONTROL_CS_RELOAD) | __SHIFTIN(shared_mode, AE_MISC_CONTROL_ONE_CTX_RELOAD); qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val); return 0; } static enum qat_ae_status qat_ae_get_status(struct qat_softc *sc, u_char ae) { int error; uint32_t val = 0; error = qat_ae_read_4(sc, ae, CTX_ENABLES, &val); if (error || val & CTX_ENABLES_ENABLE) return QAT_AE_ENABLED; qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val); if (val & ACTIVE_CTX_STATUS_ABO) return QAT_AE_ACTIVE; return QAT_AE_DISABLED; } static int qat_ae_is_active(struct qat_softc *sc, u_char ae) { uint32_t val; if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED) return 1; qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val); if (val & ACTIVE_CTX_STATUS_ABO) return 1; else return 0; } /* returns 1 if actually waited for specified number of cycles */ static int qat_ae_wait_num_cycles(struct qat_softc *sc, u_char ae, int cycles, int check) { uint32_t cnt, actx; int pcnt, ccnt, elapsed, times; qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); pcnt = cnt & 0xffff; times = TIMEOUT_AE_CHECK; do { qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); ccnt = cnt & 0xffff; elapsed = ccnt - pcnt; if (elapsed == 0) { times--; } if (times <= 0) { device_printf(sc->sc_dev, "qat_ae_wait_num_cycles timeout\n"); return -1; } if (elapsed < 0) elapsed += 0x10000; if (elapsed >= CYCLES_FROM_READY2EXE && check) { if (qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &actx) == 0) { if ((actx & ACTIVE_CTX_STATUS_ABO) == 0) return 0; } } } while (cycles > elapsed); if (check && qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &actx) == 0) { if ((actx & ACTIVE_CTX_STATUS_ABO) == 0) return 0; } return 1; } int qat_ae_init(struct qat_softc *sc) { int error; uint32_t mask, val = 0; u_char ae; /* XXX adf_initSysMemInfo */ /* XXX Disable clock gating for some chip if debug mode */ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { struct qat_ae *qae = &sc->sc_ae[ae]; if (!(mask & 1)) continue; qae->qae_ustore_size = USTORE_SIZE; qae->qae_free_addr = 0; qae->qae_free_size = USTORE_SIZE; qae->qae_live_ctx_mask = AE_ALL_CTX; qae->qae_ustore_dram_addr = 0; qae->qae_reload_size = 0; } /* XXX Enable attention interrupt */ error = qat_ae_clear_reset(sc); if (error) return error; qat_ae_clear_xfer(sc); if (!sc->sc_hw.qhw_fw_auth) { error = qat_ae_clear_gprs(sc); if (error) return error; } /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { if (!(mask & 1)) continue; qat_ae_read_4(sc, ae, SIGNATURE_ENABLE, &val); val |= 0x1; qat_ae_write_4(sc, ae, SIGNATURE_ENABLE, val); } error = qat_ae_clear_reset(sc); if (error) return error; /* XXX XXX XXX Clean MMP memory if mem scrub is supported */ /* halMem_ScrubMMPMemory */ return 0; } int qat_ae_start(struct qat_softc *sc) { int error; u_char ae; for (ae = 0; ae < sc->sc_ae_num; ae++) { if ((sc->sc_ae_mask & (1 << ae)) == 0) continue; error = qat_aefw_start(sc, ae, 0xff); if (error) return error; } return 0; } void qat_ae_cluster_intr(void *arg) { /* Nothing to implement until we support SRIOV. */ printf("qat_ae_cluster_intr\n"); } static int qat_ae_clear_reset(struct qat_softc *sc) { int error; uint32_t times, reset, clock, reg, mask; u_char ae; reset = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET); reset &= ~(__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK)); reset &= ~(__SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK)); times = TIMEOUT_AE_RESET; do { qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_RESET, reset); if ((times--) == 0) { device_printf(sc->sc_dev, "couldn't reset AEs\n"); return EBUSY; } reg = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET); } while ((__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK) | __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK)) & reg); /* Enable clock for AE and QAT */ clock = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_CLK_EN); clock |= __SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_CLK_EN_AE_MASK); clock |= __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_CLK_EN_ACCEL_MASK); qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_CLK_EN, clock); error = qat_ae_check(sc); if (error) return error; /* * Set undefined power-up/reset states to reasonable default values... * just to make sure we're starting from a known point */ for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { if (!(mask & 1)) continue; /* init the ctx_enable */ qat_ae_write_4(sc, ae, CTX_ENABLES, CTX_ENABLES_INIT); /* initialize the PCs */ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_STS_INDIRECT, UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); /* init the ctx_arb */ qat_ae_write_4(sc, ae, CTX_ARB_CNTL, CTX_ARB_CNTL_INIT); /* enable cc */ qat_ae_write_4(sc, ae, CC_ENABLE, CC_ENABLE_INIT); qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_INIT); qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_SIG_EVENTS_INDIRECT, CTX_SIG_EVENTS_INDIRECT_INIT); } if ((sc->sc_ae_mask != 0) && sc->sc_flags & QAT_FLAG_ESRAM_ENABLE_AUTO_INIT) { /* XXX XXX XXX init eSram only when this is boot time */ } if ((sc->sc_ae_mask != 0) && sc->sc_flags & QAT_FLAG_SHRAM_WAIT_READY) { /* XXX XXX XXX wait shram to complete initialization */ } qat_ae_reset_timestamp(sc); return 0; } static int qat_ae_check(struct qat_softc *sc) { int error, times, ae; uint32_t cnt, pcnt, mask; for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { if (!(mask & 1)) continue; times = TIMEOUT_AE_CHECK; error = qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); if (error) { device_printf(sc->sc_dev, "couldn't access AE %d CSR\n", ae); return error; } pcnt = cnt & 0xffff; while (1) { error = qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); if (error) { device_printf(sc->sc_dev, "couldn't access AE %d CSR\n", ae); return error; } cnt &= 0xffff; if (cnt == pcnt) times--; else break; if (times <= 0) { device_printf(sc->sc_dev, "AE %d CSR is useless\n", ae); return EFAULT; } } } return 0; } static int qat_ae_reset_timestamp(struct qat_softc *sc) { uint32_t misc, mask; u_char ae; /* stop the timestamp timers */ misc = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_MISC); if (misc & CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN) { qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC, misc & (~CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN)); } for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { if (!(mask & 1)) continue; qat_ae_write_4(sc, ae, TIMESTAMP_LOW, 0); qat_ae_write_4(sc, ae, TIMESTAMP_HIGH, 0); } /* start timestamp timers */ qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC, misc | CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN); return 0; } static void qat_ae_clear_xfer(struct qat_softc *sc) { u_int mask, reg; u_char ae; for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { if (!(mask & 1)) continue; for (reg = 0; reg < MAX_GPR_REG; reg++) { qat_aereg_abs_data_write(sc, ae, AEREG_SR_RD_ABS, reg, 0); qat_aereg_abs_data_write(sc, ae, AEREG_DR_RD_ABS, reg, 0); } } } static int qat_ae_clear_gprs(struct qat_softc *sc) { uint32_t val; uint32_t saved_ctx = 0; int times = TIMEOUT_AE_CHECK, rv; u_char ae; u_int mask; for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { if (!(mask & 1)) continue; /* turn off share control store bit */ val = qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); val &= ~AE_MISC_CONTROL_SHARE_CS; qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val); /* turn off ucode parity */ /* make sure nn_mode is set to self */ qat_ae_read_4(sc, ae, CTX_ENABLES, &val); val &= CTX_ENABLES_IGNORE_W1C_MASK; val |= CTX_ENABLES_NN_MODE; val &= ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE; qat_ae_write_4(sc, ae, CTX_ENABLES, val); /* copy instructions to ustore */ qat_ae_ucode_write(sc, ae, 0, nitems(ae_clear_gprs_inst), ae_clear_gprs_inst); /* set PC */ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_STS_INDIRECT, UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); /* save current context */ qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &saved_ctx); /* change the active context */ /* start the context from ctx 0 */ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 0); /* wakeup-event voluntary */ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY); /* clean signals */ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_SIG_EVENTS_INDIRECT, 0); qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0); qat_ae_enable_ctx(sc, ae, AE_ALL_CTX); } for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { if (!(mask & 1)) continue; /* wait for AE to finish */ do { rv = qat_ae_wait_num_cycles(sc, ae, AE_EXEC_CYCLE, 1); } while (rv && times--); if (times <= 0) { device_printf(sc->sc_dev, "qat_ae_clear_gprs timeout"); return ETIMEDOUT; } qat_ae_disable_ctx(sc, ae, AE_ALL_CTX); /* change the active context */ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, saved_ctx & ACTIVE_CTX_STATUS_ACNO); /* init the ctx_enable */ qat_ae_write_4(sc, ae, CTX_ENABLES, CTX_ENABLES_INIT); /* initialize the PCs */ qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_STS_INDIRECT, UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); /* init the ctx_arb */ qat_ae_write_4(sc, ae, CTX_ARB_CNTL, CTX_ARB_CNTL_INIT); /* enable cc */ qat_ae_write_4(sc, ae, CC_ENABLE, CC_ENABLE_INIT); qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_INIT); qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_SIG_EVENTS_INDIRECT, CTX_SIG_EVENTS_INDIRECT_INIT); } return 0; } static void qat_ae_get_shared_ustore_ae(u_char ae, u_char *nae) { if (ae & 0x1) *nae = ae - 1; else *nae = ae + 1; } static u_int qat_ae_ucode_parity64(uint64_t ucode) { ucode ^= ucode >> 1; ucode ^= ucode >> 2; ucode ^= ucode >> 4; ucode ^= ucode >> 8; ucode ^= ucode >> 16; ucode ^= ucode >> 32; return ((u_int)(ucode & 1)); } static uint64_t qat_ae_ucode_set_ecc(uint64_t ucode) { static const uint64_t bit0mask=0xff800007fffULL, bit1mask=0x1f801ff801fULL, bit2mask=0xe387e0781e1ULL, bit3mask=0x7cb8e388e22ULL, bit4mask=0xaf5b2c93244ULL, bit5mask=0xf56d5525488ULL, bit6mask=0xdaf69a46910ULL; /* clear the ecc bits */ ucode &= ~(0x7fULL << USTORE_ECC_BIT_0); ucode |= (uint64_t)qat_ae_ucode_parity64(bit0mask & ucode) << USTORE_ECC_BIT_0; ucode |= (uint64_t)qat_ae_ucode_parity64(bit1mask & ucode) << USTORE_ECC_BIT_1; ucode |= (uint64_t)qat_ae_ucode_parity64(bit2mask & ucode) << USTORE_ECC_BIT_2; ucode |= (uint64_t)qat_ae_ucode_parity64(bit3mask & ucode) << USTORE_ECC_BIT_3; ucode |= (uint64_t)qat_ae_ucode_parity64(bit4mask & ucode) << USTORE_ECC_BIT_4; ucode |= (uint64_t)qat_ae_ucode_parity64(bit5mask & ucode) << USTORE_ECC_BIT_5; ucode |= (uint64_t)qat_ae_ucode_parity64(bit6mask & ucode) << USTORE_ECC_BIT_6; return (ucode); } static int qat_ae_ucode_write(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst, const uint64_t *ucode) { uint64_t tmp; uint32_t ustore_addr, ulo, uhi; int i; qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); uaddr |= USTORE_ADDRESS_ECS; qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); for (i = 0; i < ninst; i++) { tmp = qat_ae_ucode_set_ecc(ucode[i]); ulo = (uint32_t)(tmp & 0xffffffff); uhi = (uint32_t)(tmp >> 32); qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo); /* this will auto increment the address */ qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi); } qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); return 0; } static int qat_ae_ucode_read(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst, uint64_t *ucode) { uint32_t misc, ustore_addr, ulo, uhi; u_int ii; u_char nae; if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED) return EBUSY; /* determine whether it neighbour AE runs in shared control store * status */ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); if (misc & AE_MISC_CONTROL_SHARE_CS) { qat_ae_get_shared_ustore_ae(ae, &nae); if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae)) return EBUSY; } /* if reloadable, then get it all from dram-ustore */ if (__SHIFTOUT(misc, AE_MISC_CONTROL_CS_RELOAD)) panic("notyet"); /* XXX getReloadUwords */ /* disable SHARE_CS bit to workaround silicon bug */ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc & 0xfffffffb); MPASS(uaddr + ninst <= USTORE_SIZE); /* save ustore-addr csr */ qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); uaddr |= USTORE_ADDRESS_ECS; /* enable ecs bit */ for (ii = 0; ii < ninst; ii++) { qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); uaddr++; qat_ae_read_4(sc, ae, USTORE_DATA_LOWER, &ulo); qat_ae_read_4(sc, ae, USTORE_DATA_UPPER, &uhi); ucode[ii] = uhi; ucode[ii] = (ucode[ii] << 32) | ulo; } /* restore SHARE_CS bit to workaround silicon bug */ qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); return 0; } static u_int qat_ae_concat_ucode(uint64_t *ucode, u_int ninst, u_int size, u_int addr, u_int *value) { const uint64_t *inst_arr; u_int ninst0, curvalue; int ii, vali, fixup, usize = 0; if (size == 0) return 0; ninst0 = ninst; vali = 0; curvalue = value[vali++]; switch (size) { case 0x1: inst_arr = ae_inst_1b; usize = nitems(ae_inst_1b); break; case 0x2: inst_arr = ae_inst_2b; usize = nitems(ae_inst_2b); break; case 0x3: inst_arr = ae_inst_3b; usize = nitems(ae_inst_3b); break; default: inst_arr = ae_inst_4b; usize = nitems(ae_inst_4b); break; } fixup = ninst; for (ii = 0; ii < usize; ii++) ucode[ninst++] = inst_arr[ii]; INSERT_IMMED_GPRA_CONST(ucode[fixup], (addr)); fixup++; INSERT_IMMED_GPRA_CONST(ucode[fixup], 0); fixup++; INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0)); fixup++; INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16)); /* XXX fixup++ ? */ if (size <= 0x4) return (ninst - ninst0); size -= sizeof(u_int); while (size >= sizeof(u_int)) { curvalue = value[vali++]; fixup = ninst; ucode[ninst++] = ae_inst_4b[0x2]; ucode[ninst++] = ae_inst_4b[0x3]; ucode[ninst++] = ae_inst_4b[0x8]; INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16)); fixup++; INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0)); /* XXX fixup++ ? */ addr += sizeof(u_int); size -= sizeof(u_int); } /* call this function recusive when the left size less than 4 */ ninst += qat_ae_concat_ucode(ucode, ninst, size, addr, value + vali); return (ninst - ninst0); } static int qat_ae_exec_ucode(struct qat_softc *sc, u_char ae, u_char ctx, uint64_t *ucode, u_int ninst, int cond_code_off, u_int max_cycles, u_int *endpc) { int error = 0, share_cs = 0; uint64_t savucode[MAX_EXEC_INST]; uint32_t indr_lm_addr_0, indr_lm_addr_1; uint32_t indr_lm_addr_byte_0, indr_lm_addr_byte_1; uint32_t indr_future_cnt_sig; uint32_t indr_sig, active_sig; uint32_t wakeup_ev, savpc, savcc, savctx, ctxarbctl; uint32_t misc, nmisc, ctxen; u_char nae; MPASS(ninst <= USTORE_SIZE); if (qat_ae_is_active(sc, ae)) return EBUSY; /* save current LM addr */ qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_0_INDIRECT, &indr_lm_addr_0); qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_1_INDIRECT, &indr_lm_addr_1); qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, &indr_lm_addr_byte_0); qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, &indr_lm_addr_byte_1); /* backup shared control store bit, and force AE to none-shared mode before executing ucode snippet */ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); if (misc & AE_MISC_CONTROL_SHARE_CS) { share_cs = 1; qat_ae_get_shared_ustore_ae(ae, &nae); if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae)) return EBUSY; } nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); /* save current states: */ if (ninst <= MAX_EXEC_INST) { error = qat_ae_ucode_read(sc, ae, 0, ninst, savucode); if (error) { qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); return error; } } /* save wakeup-events */ qat_ae_ctx_indr_read(sc, ae, ctx, CTX_WAKEUP_EVENTS_INDIRECT, &wakeup_ev); /* save PC */ qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, &savpc); savpc &= UPC_MASK; /* save ctx enables */ qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; /* save conditional-code */ qat_ae_read_4(sc, ae, CC_ENABLE, &savcc); /* save current context */ qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx); qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl); /* save indirect csrs */ qat_ae_ctx_indr_read(sc, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, &indr_future_cnt_sig); qat_ae_ctx_indr_read(sc, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &indr_sig); qat_ae_read_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, &active_sig); /* turn off ucode parity */ qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen & ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE); /* copy instructions to ustore */ qat_ae_ucode_write(sc, ae, 0, ninst, ucode); /* set PC */ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, 0); /* change the active context */ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, ctx & ACTIVE_CTX_STATUS_ACNO); if (cond_code_off) { /* disable conditional-code*/ qat_ae_write_4(sc, ae, CC_ENABLE, savcc & 0xffffdfff); } /* wakeup-event voluntary */ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY); /* clean signals */ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, 0); qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0); /* enable context */ qat_ae_enable_ctx(sc, ae, 1 << ctx); /* wait for it to finish */ if (qat_ae_wait_num_cycles(sc, ae, max_cycles, 1) != 0) error = ETIMEDOUT; /* see if we need to get the current PC */ if (endpc != NULL) { uint32_t ctx_status; qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, &ctx_status); *endpc = ctx_status & UPC_MASK; } #if 0 { uint32_t ctx_status; qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, &ctx_status); printf("%s: endpc 0x%08x\n", __func__, ctx_status & UPC_MASK); } #endif /* retore to previous states: */ /* disable context */ qat_ae_disable_ctx(sc, ae, 1 << ctx); if (ninst <= MAX_EXEC_INST) { /* instructions */ qat_ae_ucode_write(sc, ae, 0, ninst, savucode); } /* wakeup-events */ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_WAKEUP_EVENTS_INDIRECT, wakeup_ev); qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, savpc); /* only restore shared control store bit, other bit might be changed by AE code snippet */ qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); if (share_cs) nmisc = misc | AE_MISC_CONTROL_SHARE_CS; else nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); /* conditional-code */ qat_ae_write_4(sc, ae, CC_ENABLE, savcc); /* change the active context */ qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, savctx & ACTIVE_CTX_STATUS_ACNO); /* restore the nxt ctx to run */ qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); /* restore current LM addr */ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_0_INDIRECT, indr_lm_addr_0); qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_1_INDIRECT, indr_lm_addr_1); qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, indr_lm_addr_byte_0); qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, indr_lm_addr_byte_1); /* restore indirect csrs */ qat_ae_ctx_indr_write(sc, ae, 1 << ctx, FUTURE_COUNT_SIGNAL_INDIRECT, indr_future_cnt_sig); qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, indr_sig); qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, active_sig); /* ctx-enables */ qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); return error; } static int qat_ae_exec_ucode_init_lm(struct qat_softc *sc, u_char ae, u_char ctx, int *first_exec, uint64_t *ucode, u_int ninst, u_int *gpr_a0, u_int *gpr_a1, u_int *gpr_a2, u_int *gpr_b0, u_int *gpr_b1) { if (*first_exec) { qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0); qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1); qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2); qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0); qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1); *first_exec = 0; } return qat_ae_exec_ucode(sc, ae, ctx, ucode, ninst, 1, ninst * 5, NULL); } static int qat_ae_restore_init_lm_gprs(struct qat_softc *sc, u_char ae, u_char ctx, u_int gpr_a0, u_int gpr_a1, u_int gpr_a2, u_int gpr_b0, u_int gpr_b1) { qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0); qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1); qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2); qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0); qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1); return 0; } static int qat_ae_get_inst_num(int lmsize) { int ninst, left; if (lmsize == 0) return 0; left = lmsize % sizeof(u_int); if (left) { ninst = nitems(ae_inst_1b) + qat_ae_get_inst_num(lmsize - left); } else { /* 3 instruction is needed for further code */ ninst = (lmsize - sizeof(u_int)) * 3 / 4 + nitems(ae_inst_4b); } return (ninst); } static int qat_ae_batch_put_lm(struct qat_softc *sc, u_char ae, struct qat_ae_batch_init_list *qabi_list, size_t nqabi) { struct qat_ae_batch_init *qabi; size_t alloc_ninst, ninst; uint64_t *ucode; u_int gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1; int insnsz, error = 0, execed = 0, first_exec = 1; if (STAILQ_FIRST(qabi_list) == NULL) return 0; alloc_ninst = min(USTORE_SIZE, nqabi); ucode = qat_alloc_mem(sizeof(uint64_t) * alloc_ninst); ninst = 0; STAILQ_FOREACH(qabi, qabi_list, qabi_next) { insnsz = qat_ae_get_inst_num(qabi->qabi_size); if (insnsz + ninst > alloc_ninst) { /* add ctx_arb[kill] */ ucode[ninst++] = 0x0E000010000ull; execed = 1; error = qat_ae_exec_ucode_init_lm(sc, ae, 0, &first_exec, ucode, ninst, &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1); if (error) { qat_ae_restore_init_lm_gprs(sc, ae, 0, gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1); qat_free_mem(ucode); return error; } /* run microExec to execute the microcode */ ninst = 0; } ninst += qat_ae_concat_ucode(ucode, ninst, qabi->qabi_size, qabi->qabi_addr, qabi->qabi_value); } if (ninst > 0) { ucode[ninst++] = 0x0E000010000ull; execed = 1; error = qat_ae_exec_ucode_init_lm(sc, ae, 0, &first_exec, ucode, ninst, &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1); } if (execed) { qat_ae_restore_init_lm_gprs(sc, ae, 0, gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1); } qat_free_mem(ucode); return error; } static int qat_ae_write_pc(struct qat_softc *sc, u_char ae, u_int ctx_mask, u_int upc) { if (qat_ae_is_active(sc, ae)) return EBUSY; qat_ae_ctx_indr_write(sc, ae, ctx_mask, CTX_STS_INDIRECT, UPC_MASK & upc); return 0; } static inline u_int qat_aefw_csum_calc(u_int reg, int ch) { int i; u_int topbit = CRC_BITMASK(CRC_WIDTH - 1); u_int inbyte = (u_int)((reg >> 0x18) ^ ch); reg ^= inbyte << (CRC_WIDTH - 0x8); for (i = 0; i < 0x8; i++) { if (reg & topbit) reg = (reg << 1) ^ CRC_POLY; else reg <<= 1; } return (reg & CRC_WIDTHMASK(CRC_WIDTH)); } static u_int qat_aefw_csum(char *buf, int size) { u_int csum = 0; while (size--) { csum = qat_aefw_csum_calc(csum, *buf++); } return csum; } static const char * qat_aefw_uof_string(struct qat_softc *sc, size_t offset) { if (offset >= sc->sc_aefw_uof.qafu_str_tab_size) return NULL; if (sc->sc_aefw_uof.qafu_str_tab == NULL) return NULL; return (const char *)((uintptr_t)sc->sc_aefw_uof.qafu_str_tab + offset); } static struct uof_chunk_hdr * qat_aefw_uof_find_chunk(struct qat_softc *sc, const char *id, struct uof_chunk_hdr *cur) { struct uof_obj_hdr *uoh = sc->sc_aefw_uof.qafu_obj_hdr; struct uof_chunk_hdr *uch; int i; uch = (struct uof_chunk_hdr *)(uoh + 1); for (i = 0; i < uoh->uoh_num_chunks; i++, uch++) { if (uch->uch_offset + uch->uch_size > sc->sc_aefw_uof.qafu_size) return NULL; if (cur < uch && !strncmp(uch->uch_id, id, UOF_OBJ_ID_LEN)) return uch; } return NULL; } static int qat_aefw_load_mof(struct qat_softc *sc) { const struct firmware *fw; fw = firmware_get(sc->sc_hw.qhw_mof_fwname); if (fw == NULL) { device_printf(sc->sc_dev, "couldn't load MOF firmware %s\n", sc->sc_hw.qhw_mof_fwname); return ENXIO; } sc->sc_fw_mof = qat_alloc_mem(fw->datasize); sc->sc_fw_mof_size = fw->datasize; memcpy(sc->sc_fw_mof, fw->data, fw->datasize); firmware_put(fw, FIRMWARE_UNLOAD); return 0; } static void qat_aefw_unload_mof(struct qat_softc *sc) { if (sc->sc_fw_mof != NULL) { qat_free_mem(sc->sc_fw_mof); sc->sc_fw_mof = NULL; } } static int qat_aefw_load_mmp(struct qat_softc *sc) { const struct firmware *fw; fw = firmware_get(sc->sc_hw.qhw_mmp_fwname); if (fw == NULL) { device_printf(sc->sc_dev, "couldn't load MOF firmware %s\n", sc->sc_hw.qhw_mmp_fwname); return ENXIO; } sc->sc_fw_mmp = qat_alloc_mem(fw->datasize); sc->sc_fw_mmp_size = fw->datasize; memcpy(sc->sc_fw_mmp, fw->data, fw->datasize); firmware_put(fw, FIRMWARE_UNLOAD); return 0; } static void qat_aefw_unload_mmp(struct qat_softc *sc) { if (sc->sc_fw_mmp != NULL) { qat_free_mem(sc->sc_fw_mmp); sc->sc_fw_mmp = NULL; } } static int qat_aefw_mof_find_uof0(struct qat_softc *sc, struct mof_uof_hdr *muh, struct mof_uof_chunk_hdr *head, u_int nchunk, size_t size, const char *id, size_t *fwsize, void **fwptr) { int i; char *uof_name; for (i = 0; i < nchunk; i++) { struct mof_uof_chunk_hdr *much = &head[i]; if (strncmp(much->much_id, id, MOF_OBJ_ID_LEN)) return EINVAL; if (much->much_offset + much->much_size > size) return EINVAL; if (sc->sc_mof.qmf_sym_size <= much->much_name) return EINVAL; uof_name = (char *)((uintptr_t)sc->sc_mof.qmf_sym + much->much_name); if (!strcmp(uof_name, sc->sc_fw_uof_name)) { *fwptr = (void *)((uintptr_t)muh + (uintptr_t)much->much_offset); *fwsize = (size_t)much->much_size; return 0; } } return ENOENT; } static int qat_aefw_mof_find_uof(struct qat_softc *sc) { struct mof_uof_hdr *uof_hdr, *suof_hdr; u_int nuof_chunks = 0, nsuof_chunks = 0; int error; uof_hdr = sc->sc_mof.qmf_uof_objs; suof_hdr = sc->sc_mof.qmf_suof_objs; if (uof_hdr != NULL) { if (uof_hdr->muh_max_chunks < uof_hdr->muh_num_chunks) { return EINVAL; } nuof_chunks = uof_hdr->muh_num_chunks; } if (suof_hdr != NULL) { if (suof_hdr->muh_max_chunks < suof_hdr->muh_num_chunks) return EINVAL; nsuof_chunks = suof_hdr->muh_num_chunks; } if (nuof_chunks + nsuof_chunks == 0) return EINVAL; if (uof_hdr != NULL) { error = qat_aefw_mof_find_uof0(sc, uof_hdr, (struct mof_uof_chunk_hdr *)(uof_hdr + 1), nuof_chunks, sc->sc_mof.qmf_uof_objs_size, UOF_IMAG, &sc->sc_fw_uof_size, &sc->sc_fw_uof); if (error && error != ENOENT) return error; } if (suof_hdr != NULL) { error = qat_aefw_mof_find_uof0(sc, suof_hdr, (struct mof_uof_chunk_hdr *)(suof_hdr + 1), nsuof_chunks, sc->sc_mof.qmf_suof_objs_size, SUOF_IMAG, &sc->sc_fw_suof_size, &sc->sc_fw_suof); if (error && error != ENOENT) return error; } if (sc->sc_fw_uof == NULL && sc->sc_fw_suof == NULL) return ENOENT; return 0; } static int qat_aefw_mof_parse(struct qat_softc *sc) { const struct mof_file_hdr *mfh; const struct mof_file_chunk_hdr *mfch; size_t size; u_int csum; int error, i; size = sc->sc_fw_mof_size; if (size < sizeof(struct mof_file_hdr)) return EINVAL; size -= sizeof(struct mof_file_hdr); mfh = sc->sc_fw_mof; if (mfh->mfh_fid != MOF_FID) return EINVAL; csum = qat_aefw_csum((char *)((uintptr_t)sc->sc_fw_mof + offsetof(struct mof_file_hdr, mfh_min_ver)), sc->sc_fw_mof_size - offsetof(struct mof_file_hdr, mfh_min_ver)); if (mfh->mfh_csum != csum) return EINVAL; if (mfh->mfh_min_ver != MOF_MIN_VER || mfh->mfh_maj_ver != MOF_MAJ_VER) return EINVAL; if (mfh->mfh_max_chunks < mfh->mfh_num_chunks) return EINVAL; if (size < sizeof(struct mof_file_chunk_hdr) * mfh->mfh_num_chunks) return EINVAL; mfch = (const struct mof_file_chunk_hdr *)(mfh + 1); for (i = 0; i < mfh->mfh_num_chunks; i++, mfch++) { if (mfch->mfch_offset + mfch->mfch_size > sc->sc_fw_mof_size) return EINVAL; if (!strncmp(mfch->mfch_id, SYM_OBJS, MOF_OBJ_ID_LEN)) { if (sc->sc_mof.qmf_sym != NULL) return EINVAL; sc->sc_mof.qmf_sym = (void *)((uintptr_t)sc->sc_fw_mof + (uintptr_t)mfch->mfch_offset + sizeof(u_int)); sc->sc_mof.qmf_sym_size = *(u_int *)((uintptr_t)sc->sc_fw_mof + (uintptr_t)mfch->mfch_offset); if (sc->sc_mof.qmf_sym_size % sizeof(u_int) != 0) return EINVAL; if (mfch->mfch_size != sc->sc_mof.qmf_sym_size + sizeof(u_int) || mfch->mfch_size == 0) return EINVAL; if (*(char *)((uintptr_t)sc->sc_mof.qmf_sym + sc->sc_mof.qmf_sym_size - 1) != '\0') return EINVAL; } else if (!strncmp(mfch->mfch_id, UOF_OBJS, MOF_OBJ_ID_LEN)) { if (sc->sc_mof.qmf_uof_objs != NULL) return EINVAL; sc->sc_mof.qmf_uof_objs = (void *)((uintptr_t)sc->sc_fw_mof + (uintptr_t)mfch->mfch_offset); sc->sc_mof.qmf_uof_objs_size = mfch->mfch_size; } else if (!strncmp(mfch->mfch_id, SUOF_OBJS, MOF_OBJ_ID_LEN)) { if (sc->sc_mof.qmf_suof_objs != NULL) return EINVAL; sc->sc_mof.qmf_suof_objs = (void *)((uintptr_t)sc->sc_fw_mof + (uintptr_t)mfch->mfch_offset); sc->sc_mof.qmf_suof_objs_size = mfch->mfch_size; } } if (sc->sc_mof.qmf_sym == NULL || (sc->sc_mof.qmf_uof_objs == NULL && sc->sc_mof.qmf_suof_objs == NULL)) return EINVAL; error = qat_aefw_mof_find_uof(sc); if (error) return error; return 0; } static int qat_aefw_uof_parse_image(struct qat_softc *sc, struct qat_uof_image *qui, struct uof_chunk_hdr *uch) { struct uof_image *image; struct uof_code_page *page; uintptr_t base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr; size_t lim = uch->uch_offset + uch->uch_size, size; int i, p; size = uch->uch_size; if (size < sizeof(struct uof_image)) return EINVAL; size -= sizeof(struct uof_image); qui->qui_image = image = (struct uof_image *)(base + uch->uch_offset); #define ASSIGN_OBJ_TAB(np, typep, type, base, off, lim) \ do { \ u_int nent; \ nent = ((struct uof_obj_table *)((base) + (off)))->uot_nentries;\ if ((lim) < off + sizeof(struct uof_obj_table) + \ sizeof(type) * nent) \ return EINVAL; \ *(np) = nent; \ if (nent > 0) \ *(typep) = (type)((struct uof_obj_table *) \ ((base) + (off)) + 1); \ else \ *(typep) = NULL; \ } while (0) ASSIGN_OBJ_TAB(&qui->qui_num_ae_reg, &qui->qui_ae_reg, struct uof_ae_reg *, base, image->ui_reg_tab, lim); ASSIGN_OBJ_TAB(&qui->qui_num_init_reg_sym, &qui->qui_init_reg_sym, struct uof_init_reg_sym *, base, image->ui_init_reg_sym_tab, lim); ASSIGN_OBJ_TAB(&qui->qui_num_sbreak, &qui->qui_sbreak, struct qui_sbreak *, base, image->ui_sbreak_tab, lim); if (size < sizeof(struct uof_code_page) * image->ui_num_pages) return EINVAL; if (nitems(qui->qui_pages) < image->ui_num_pages) return EINVAL; page = (struct uof_code_page *)(image + 1); for (p = 0; p < image->ui_num_pages; p++, page++) { struct qat_uof_page *qup = &qui->qui_pages[p]; struct uof_code_area *uca; qup->qup_page_num = page->ucp_page_num; qup->qup_def_page = page->ucp_def_page; qup->qup_page_region = page->ucp_page_region; qup->qup_beg_vaddr = page->ucp_beg_vaddr; qup->qup_beg_paddr = page->ucp_beg_paddr; ASSIGN_OBJ_TAB(&qup->qup_num_uc_var, &qup->qup_uc_var, struct uof_uword_fixup *, base, page->ucp_uc_var_tab, lim); ASSIGN_OBJ_TAB(&qup->qup_num_imp_var, &qup->qup_imp_var, struct uof_import_var *, base, page->ucp_imp_var_tab, lim); ASSIGN_OBJ_TAB(&qup->qup_num_imp_expr, &qup->qup_imp_expr, struct uof_uword_fixup *, base, page->ucp_imp_expr_tab, lim); ASSIGN_OBJ_TAB(&qup->qup_num_neigh_reg, &qup->qup_neigh_reg, struct uof_uword_fixup *, base, page->ucp_neigh_reg_tab, lim); if (lim < page->ucp_code_area + sizeof(struct uof_code_area)) return EINVAL; uca = (struct uof_code_area *)(base + page->ucp_code_area); qup->qup_num_micro_words = uca->uca_num_micro_words; ASSIGN_OBJ_TAB(&qup->qup_num_uw_blocks, &qup->qup_uw_blocks, struct qat_uof_uword_block *, base, uca->uca_uword_block_tab, lim); for (i = 0; i < qup->qup_num_uw_blocks; i++) { u_int uwordoff = ((struct uof_uword_block *)( &qup->qup_uw_blocks[i]))->uub_uword_offset; if (lim < uwordoff) return EINVAL; qup->qup_uw_blocks[i].quub_micro_words = (base + uwordoff); } } #undef ASSIGN_OBJ_TAB return 0; } static int qat_aefw_uof_parse_images(struct qat_softc *sc) { struct uof_chunk_hdr *uch = NULL; - u_int assigned_ae; int i, error; for (i = 0; i < MAX_NUM_AE * MAX_AE_CTX; i++) { uch = qat_aefw_uof_find_chunk(sc, UOF_IMAG, uch); if (uch == NULL) break; if (i >= nitems(sc->sc_aefw_uof.qafu_imgs)) return ENOENT; error = qat_aefw_uof_parse_image(sc, &sc->sc_aefw_uof.qafu_imgs[i], uch); if (error) return error; sc->sc_aefw_uof.qafu_num_imgs++; } - assigned_ae = 0; - for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { - assigned_ae |= sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned; - } - return 0; } static int qat_aefw_uof_parse(struct qat_softc *sc) { struct uof_file_hdr *ufh; struct uof_file_chunk_hdr *ufch; struct uof_obj_hdr *uoh; struct uof_chunk_hdr *uch; void *uof = NULL; size_t size, uof_size, hdr_size; uintptr_t base; u_int csum; int i; size = sc->sc_fw_uof_size; if (size < MIN_UOF_SIZE) return EINVAL; size -= sizeof(struct uof_file_hdr); ufh = sc->sc_fw_uof; if (ufh->ufh_id != UOF_FID) return EINVAL; if (ufh->ufh_min_ver != UOF_MIN_VER || ufh->ufh_maj_ver != UOF_MAJ_VER) return EINVAL; if (ufh->ufh_max_chunks < ufh->ufh_num_chunks) return EINVAL; if (size < sizeof(struct uof_file_chunk_hdr) * ufh->ufh_num_chunks) return EINVAL; ufch = (struct uof_file_chunk_hdr *)(ufh + 1); uof_size = 0; for (i = 0; i < ufh->ufh_num_chunks; i++, ufch++) { if (ufch->ufch_offset + ufch->ufch_size > sc->sc_fw_uof_size) return EINVAL; if (!strncmp(ufch->ufch_id, UOF_OBJS, UOF_OBJ_ID_LEN)) { if (uof != NULL) return EINVAL; uof = (void *)((uintptr_t)sc->sc_fw_uof + ufch->ufch_offset); uof_size = ufch->ufch_size; csum = qat_aefw_csum(uof, uof_size); if (csum != ufch->ufch_csum) return EINVAL; } } if (uof == NULL) return ENOENT; size = uof_size; if (size < sizeof(struct uof_obj_hdr)) return EINVAL; size -= sizeof(struct uof_obj_hdr); uoh = uof; if (size < sizeof(struct uof_chunk_hdr) * uoh->uoh_num_chunks) return EINVAL; /* Check if the UOF objects are compatible with the chip */ if ((uoh->uoh_cpu_type & sc->sc_hw.qhw_prod_type) == 0) return ENOTSUP; if (uoh->uoh_min_cpu_ver > sc->sc_rev || uoh->uoh_max_cpu_ver < sc->sc_rev) return ENOTSUP; sc->sc_aefw_uof.qafu_size = uof_size; sc->sc_aefw_uof.qafu_obj_hdr = uoh; base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr; /* map uof string-table */ uch = qat_aefw_uof_find_chunk(sc, UOF_STRT, NULL); if (uch != NULL) { hdr_size = offsetof(struct uof_str_tab, ust_strings); sc->sc_aefw_uof.qafu_str_tab = (void *)(base + uch->uch_offset + hdr_size); sc->sc_aefw_uof.qafu_str_tab_size = uch->uch_size - hdr_size; } /* get ustore mem inits table -- should be only one */ uch = qat_aefw_uof_find_chunk(sc, UOF_IMEM, NULL); if (uch != NULL) { if (uch->uch_size < sizeof(struct uof_obj_table)) return EINVAL; sc->sc_aefw_uof.qafu_num_init_mem = ((struct uof_obj_table *)(base + uch->uch_offset))->uot_nentries; if (sc->sc_aefw_uof.qafu_num_init_mem) { sc->sc_aefw_uof.qafu_init_mem = (struct uof_init_mem *)(base + uch->uch_offset + sizeof(struct uof_obj_table)); sc->sc_aefw_uof.qafu_init_mem_size = uch->uch_size - sizeof(struct uof_obj_table); } } uch = qat_aefw_uof_find_chunk(sc, UOF_MSEG, NULL); if (uch != NULL) { if (uch->uch_size < sizeof(struct uof_obj_table) + sizeof(struct uof_var_mem_seg)) return EINVAL; sc->sc_aefw_uof.qafu_var_mem_seg = (struct uof_var_mem_seg *)(base + uch->uch_offset + sizeof(struct uof_obj_table)); } return qat_aefw_uof_parse_images(sc); } static int qat_aefw_suof_parse_image(struct qat_softc *sc, struct qat_suof_image *qsi, struct suof_chunk_hdr *sch) { struct qat_aefw_suof *qafs = &sc->sc_aefw_suof; struct simg_ae_mode *ae_mode; u_int maj_ver; qsi->qsi_simg_buf = qafs->qafs_suof_buf + sch->sch_offset + sizeof(struct suof_obj_hdr); qsi->qsi_simg_len = ((struct suof_obj_hdr *) (qafs->qafs_suof_buf + sch->sch_offset))->soh_img_length; qsi->qsi_css_header = qsi->qsi_simg_buf; qsi->qsi_css_key = qsi->qsi_css_header + sizeof(struct css_hdr); qsi->qsi_css_signature = qsi->qsi_css_key + CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN; qsi->qsi_css_simg = qsi->qsi_css_signature + CSS_SIGNATURE_LEN; ae_mode = (struct simg_ae_mode *)qsi->qsi_css_simg; qsi->qsi_ae_mask = ae_mode->sam_ae_mask; qsi->qsi_simg_name = (u_long)&ae_mode->sam_simg_name; qsi->qsi_appmeta_data = (u_long)&ae_mode->sam_appmeta_data; qsi->qsi_fw_type = ae_mode->sam_fw_type; if (ae_mode->sam_dev_type != sc->sc_hw.qhw_prod_type) return EINVAL; maj_ver = (QAT_PID_MAJOR_REV | (sc->sc_rev & QAT_PID_MINOR_REV)) & 0xff; if ((maj_ver > ae_mode->sam_devmax_ver) || (maj_ver < ae_mode->sam_devmin_ver)) { return EINVAL; } return 0; } static int qat_aefw_suof_parse(struct qat_softc *sc) { struct suof_file_hdr *sfh; struct suof_chunk_hdr *sch; struct qat_aefw_suof *qafs = &sc->sc_aefw_suof; struct qat_suof_image *qsi; size_t size; u_int csum; int ae0_img = MAX_AE; int i, error; size = sc->sc_fw_suof_size; if (size < sizeof(struct suof_file_hdr)) return EINVAL; sfh = sc->sc_fw_suof; if (sfh->sfh_file_id != SUOF_FID) return EINVAL; if (sfh->sfh_fw_type != 0) return EINVAL; if (sfh->sfh_num_chunks <= 1) return EINVAL; if (sfh->sfh_min_ver != SUOF_MIN_VER || sfh->sfh_maj_ver != SUOF_MAJ_VER) return EINVAL; csum = qat_aefw_csum((char *)&sfh->sfh_min_ver, size - offsetof(struct suof_file_hdr, sfh_min_ver)); if (csum != sfh->sfh_check_sum) return EINVAL; size -= sizeof(struct suof_file_hdr); qafs->qafs_file_id = SUOF_FID; qafs->qafs_suof_buf = sc->sc_fw_suof; qafs->qafs_suof_size = sc->sc_fw_suof_size; qafs->qafs_check_sum = sfh->sfh_check_sum; qafs->qafs_min_ver = sfh->sfh_min_ver; qafs->qafs_maj_ver = sfh->sfh_maj_ver; qafs->qafs_fw_type = sfh->sfh_fw_type; if (size < sizeof(struct suof_chunk_hdr)) return EINVAL; sch = (struct suof_chunk_hdr *)(sfh + 1); size -= sizeof(struct suof_chunk_hdr); if (size < sizeof(struct suof_str_tab)) return EINVAL; size -= offsetof(struct suof_str_tab, sst_strings); qafs->qafs_sym_size = ((struct suof_str_tab *) (qafs->qafs_suof_buf + sch->sch_offset))->sst_tab_length; if (size < qafs->qafs_sym_size) return EINVAL; qafs->qafs_sym_str = qafs->qafs_suof_buf + sch->sch_offset + offsetof(struct suof_str_tab, sst_strings); qafs->qafs_num_simgs = sfh->sfh_num_chunks - 1; if (qafs->qafs_num_simgs == 0) return EINVAL; qsi = qat_alloc_mem( sizeof(struct qat_suof_image) * qafs->qafs_num_simgs); qafs->qafs_simg = qsi; for (i = 0; i < qafs->qafs_num_simgs; i++) { error = qat_aefw_suof_parse_image(sc, &qsi[i], &sch[i + 1]); if (error) return error; if ((qsi[i].qsi_ae_mask & 0x1) != 0) ae0_img = i; } if (ae0_img != qafs->qafs_num_simgs - 1) { struct qat_suof_image last_qsi; memcpy(&last_qsi, &qsi[qafs->qafs_num_simgs - 1], sizeof(struct qat_suof_image)); memcpy(&qsi[qafs->qafs_num_simgs - 1], &qsi[ae0_img], sizeof(struct qat_suof_image)); memcpy(&qsi[ae0_img], &last_qsi, sizeof(struct qat_suof_image)); } return 0; } static int qat_aefw_alloc_auth_dmamem(struct qat_softc *sc, char *image, size_t size, struct qat_dmamem *dma) { struct css_hdr *css = (struct css_hdr *)image; struct auth_chunk *auth_chunk; struct fw_auth_desc *auth_desc; size_t mapsize, simg_offset = sizeof(struct auth_chunk); bus_size_t bus_addr; uintptr_t virt_addr; int error; if (size > AE_IMG_OFFSET + CSS_MAX_IMAGE_LEN) return EINVAL; mapsize = (css->css_fw_type == CSS_AE_FIRMWARE) ? CSS_AE_SIMG_LEN + simg_offset : size + CSS_FWSK_PAD_LEN + simg_offset; error = qat_alloc_dmamem(sc, dma, 1, mapsize, PAGE_SIZE); if (error) return error; memset(dma->qdm_dma_vaddr, 0, mapsize); auth_chunk = dma->qdm_dma_vaddr; auth_chunk->ac_chunk_size = mapsize; auth_chunk->ac_chunk_bus_addr = dma->qdm_dma_seg.ds_addr; virt_addr = (uintptr_t)dma->qdm_dma_vaddr; virt_addr += simg_offset; bus_addr = auth_chunk->ac_chunk_bus_addr; bus_addr += simg_offset; auth_desc = &auth_chunk->ac_fw_auth_desc; auth_desc->fad_css_hdr_high = (uint64_t)bus_addr >> 32; auth_desc->fad_css_hdr_low = bus_addr; memcpy((void *)virt_addr, image, sizeof(struct css_hdr)); /* pub key */ virt_addr += sizeof(struct css_hdr); bus_addr += sizeof(struct css_hdr); image += sizeof(struct css_hdr); auth_desc->fad_fwsk_pub_high = (uint64_t)bus_addr >> 32; auth_desc->fad_fwsk_pub_low = bus_addr; memcpy((void *)virt_addr, image, CSS_FWSK_MODULUS_LEN); memset((void *)(virt_addr + CSS_FWSK_MODULUS_LEN), 0, CSS_FWSK_PAD_LEN); memcpy((void *)(virt_addr + CSS_FWSK_MODULUS_LEN + CSS_FWSK_PAD_LEN), image + CSS_FWSK_MODULUS_LEN, sizeof(uint32_t)); virt_addr += CSS_FWSK_PUB_LEN; bus_addr += CSS_FWSK_PUB_LEN; image += CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN; auth_desc->fad_signature_high = (uint64_t)bus_addr >> 32; auth_desc->fad_signature_low = bus_addr; memcpy((void *)virt_addr, image, CSS_SIGNATURE_LEN); virt_addr += CSS_SIGNATURE_LEN; bus_addr += CSS_SIGNATURE_LEN; image += CSS_SIGNATURE_LEN; auth_desc->fad_img_high = (uint64_t)bus_addr >> 32; auth_desc->fad_img_low = bus_addr; auth_desc->fad_img_len = size - AE_IMG_OFFSET; memcpy((void *)virt_addr, image, auth_desc->fad_img_len); if (css->css_fw_type == CSS_AE_FIRMWARE) { auth_desc->fad_img_ae_mode_data_high = auth_desc->fad_img_high; auth_desc->fad_img_ae_mode_data_low = auth_desc->fad_img_low; bus_addr += sizeof(struct simg_ae_mode); auth_desc->fad_img_ae_init_data_high = (uint64_t)bus_addr >> 32; auth_desc->fad_img_ae_init_data_low = bus_addr; bus_addr += SIMG_AE_INIT_SEQ_LEN; auth_desc->fad_img_ae_insts_high = (uint64_t)bus_addr >> 32; auth_desc->fad_img_ae_insts_low = bus_addr; } else { auth_desc->fad_img_ae_insts_high = auth_desc->fad_img_high; auth_desc->fad_img_ae_insts_low = auth_desc->fad_img_low; } bus_dmamap_sync(dma->qdm_dma_tag, dma->qdm_dma_map, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return 0; } static int qat_aefw_auth(struct qat_softc *sc, struct qat_dmamem *dma) { bus_addr_t addr; uint32_t fcu, sts; int retry = 0; addr = dma->qdm_dma_seg.ds_addr; qat_cap_global_write_4(sc, FCU_DRAM_ADDR_HI, (uint64_t)addr >> 32); qat_cap_global_write_4(sc, FCU_DRAM_ADDR_LO, addr); qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_AUTH); do { DELAY(FW_AUTH_WAIT_PERIOD * 1000); fcu = qat_cap_global_read_4(sc, FCU_STATUS); sts = __SHIFTOUT(fcu, FCU_STATUS_STS); if (sts == FCU_STATUS_STS_VERI_FAIL) goto fail; if (fcu & FCU_STATUS_AUTHFWLD && sts == FCU_STATUS_STS_VERI_DONE) { return 0; } } while (retry++ < FW_AUTH_MAX_RETRY); fail: device_printf(sc->sc_dev, "firmware authentication error: status 0x%08x retry %d\n", fcu, retry); return EINVAL; } static int qat_aefw_suof_load(struct qat_softc *sc, struct qat_dmamem *dma) { struct simg_ae_mode *ae_mode; uint32_t fcu, sts, loaded; u_int mask; u_char ae; int retry = 0; ae_mode = (struct simg_ae_mode *)((uintptr_t)dma->qdm_dma_vaddr + sizeof(struct auth_chunk) + sizeof(struct css_hdr) + CSS_FWSK_PUB_LEN + CSS_SIGNATURE_LEN); for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { if (!(mask & 1)) continue; if (!((ae_mode->sam_ae_mask >> ae) & 0x1)) continue; if (qat_ae_is_active(sc, ae)) { device_printf(sc->sc_dev, "AE %d is active\n", ae); return EINVAL; } qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_LOAD | __SHIFTIN(ae, FCU_CTRL_AE)); do { DELAY(FW_AUTH_WAIT_PERIOD * 1000); fcu = qat_cap_global_read_4(sc, FCU_STATUS); sts = __SHIFTOUT(fcu, FCU_STATUS_STS); loaded = __SHIFTOUT(fcu, FCU_STATUS_LOADED_AE); if (sts == FCU_STATUS_STS_LOAD_DONE && (loaded & (1 << ae))) { break; } } while (retry++ < FW_AUTH_MAX_RETRY); if (retry > FW_AUTH_MAX_RETRY) { device_printf(sc->sc_dev, "firmware load timeout: status %08x\n", fcu); return EINVAL; } } return 0; } static int qat_aefw_suof_write(struct qat_softc *sc) { struct qat_suof_image *qsi; int i, error = 0; for (i = 0; i < sc->sc_aefw_suof.qafs_num_simgs; i++) { qsi = &sc->sc_aefw_suof.qafs_simg[i]; error = qat_aefw_alloc_auth_dmamem(sc, qsi->qsi_simg_buf, qsi->qsi_simg_len, &qsi->qsi_dma); if (error) return error; error = qat_aefw_auth(sc, &qsi->qsi_dma); if (error) { qat_free_dmamem(sc, &qsi->qsi_dma); return error; } error = qat_aefw_suof_load(sc, &qsi->qsi_dma); if (error) { qat_free_dmamem(sc, &qsi->qsi_dma); return error; } qat_free_dmamem(sc, &qsi->qsi_dma); } qat_free_mem(sc->sc_aefw_suof.qafs_simg); return 0; } static int qat_aefw_uof_assign_image(struct qat_softc *sc, struct qat_ae *qae, struct qat_uof_image *qui) { struct qat_ae_slice *slice; int i, npages, nregions; if (qae->qae_num_slices >= nitems(qae->qae_slices)) return ENOENT; if (qui->qui_image->ui_ae_mode & (AE_MODE_RELOAD_CTX_SHARED | AE_MODE_SHARED_USTORE)) { /* XXX */ device_printf(sc->sc_dev, "shared ae mode is not supported yet\n"); return ENOTSUP; } qae->qae_shareable_ustore = 0; /* XXX */ qae->qae_effect_ustore_size = USTORE_SIZE; slice = &qae->qae_slices[qae->qae_num_slices]; slice->qas_image = qui; slice->qas_assigned_ctx_mask = qui->qui_image->ui_ctx_assigned; nregions = qui->qui_image->ui_num_page_regions; npages = qui->qui_image->ui_num_pages; if (nregions > nitems(slice->qas_regions)) return ENOENT; if (npages > nitems(slice->qas_pages)) return ENOENT; for (i = 0; i < nregions; i++) { STAILQ_INIT(&slice->qas_regions[i].qar_waiting_pages); } for (i = 0; i < npages; i++) { struct qat_ae_page *page = &slice->qas_pages[i]; int region; page->qap_page = &qui->qui_pages[i]; region = page->qap_page->qup_page_region; if (region >= nregions) return EINVAL; page->qap_region = &slice->qas_regions[region]; } qae->qae_num_slices++; return 0; } static int qat_aefw_uof_init_ae(struct qat_softc *sc, u_char ae) { struct uof_image *image; struct qat_ae *qae = &(QAT_AE(sc, ae)); int s; u_char nn_mode; for (s = 0; s < qae->qae_num_slices; s++) { if (qae->qae_slices[s].qas_image == NULL) continue; image = qae->qae_slices[s].qas_image->qui_image; qat_ae_write_ctx_mode(sc, ae, __SHIFTOUT(image->ui_ae_mode, AE_MODE_CTX_MODE)); nn_mode = __SHIFTOUT(image->ui_ae_mode, AE_MODE_NN_MODE); if (nn_mode != AE_MODE_NN_MODE_DONTCARE) qat_ae_write_nn_mode(sc, ae, nn_mode); qat_ae_write_lm_mode(sc, ae, AEREG_LMEM0, __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM0)); qat_ae_write_lm_mode(sc, ae, AEREG_LMEM1, __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM1)); qat_ae_write_shared_cs_mode(sc, ae, __SHIFTOUT(image->ui_ae_mode, AE_MODE_SHARED_USTORE)); qat_ae_set_reload_ustore(sc, ae, image->ui_reloadable_size, __SHIFTOUT(image->ui_ae_mode, AE_MODE_RELOAD_CTX_SHARED), qae->qae_reloc_ustore_dram); } return 0; } static int qat_aefw_uof_init(struct qat_softc *sc) { int ae, i, error; uint32_t mask; for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { struct qat_ae *qae; if (!(mask & 1)) continue; qae = &(QAT_AE(sc, ae)); for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { if ((sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned & (1 << ae)) == 0) continue; error = qat_aefw_uof_assign_image(sc, qae, &sc->sc_aefw_uof.qafu_imgs[i]); if (error) return error; } /* XXX UcLo_initNumUwordUsed */ qae->qae_reloc_ustore_dram = UINT_MAX; /* XXX */ error = qat_aefw_uof_init_ae(sc, ae); if (error) return error; } return 0; } int qat_aefw_load(struct qat_softc *sc) { int error; error = qat_aefw_load_mof(sc); if (error) return error; error = qat_aefw_load_mmp(sc); if (error) return error; error = qat_aefw_mof_parse(sc); if (error) { device_printf(sc->sc_dev, "couldn't parse mof: %d\n", error); return error; } if (sc->sc_hw.qhw_fw_auth) { error = qat_aefw_suof_parse(sc); if (error) { device_printf(sc->sc_dev, "couldn't parse suof: %d\n", error); return error; } error = qat_aefw_suof_write(sc); if (error) { device_printf(sc->sc_dev, "could not write firmware: %d\n", error); return error; } } else { error = qat_aefw_uof_parse(sc); if (error) { device_printf(sc->sc_dev, "couldn't parse uof: %d\n", error); return error; } error = qat_aefw_uof_init(sc); if (error) { device_printf(sc->sc_dev, "couldn't init for aefw: %d\n", error); return error; } error = qat_aefw_uof_write(sc); if (error) { device_printf(sc->sc_dev, "Could not write firmware: %d\n", error); return error; } } return 0; } void qat_aefw_unload(struct qat_softc *sc) { qat_aefw_unload_mmp(sc); qat_aefw_unload_mof(sc); } int qat_aefw_start(struct qat_softc *sc, u_char ae, u_int ctx_mask) { uint32_t fcu; int retry = 0; if (sc->sc_hw.qhw_fw_auth) { qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_START); do { DELAY(FW_AUTH_WAIT_PERIOD * 1000); fcu = qat_cap_global_read_4(sc, FCU_STATUS); if (fcu & FCU_STATUS_DONE) return 0; } while (retry++ < FW_AUTH_MAX_RETRY); device_printf(sc->sc_dev, "firmware start timeout: status %08x\n", fcu); return EINVAL; } else { qat_ae_ctx_indr_write(sc, ae, (~ctx_mask) & AE_ALL_CTX, CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_SLEEP); qat_ae_enable_ctx(sc, ae, ctx_mask); } return 0; } static int qat_aefw_init_memory_one(struct qat_softc *sc, struct uof_init_mem *uim) { struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; struct qat_ae_batch_init_list *qabi_list; struct uof_mem_val_attr *memattr; size_t *curinit; u_long ael; int i; const char *sym; char *ep; memattr = (struct uof_mem_val_attr *)(uim + 1); switch (uim->uim_region) { case LMEM_REGION: if ((uim->uim_addr + uim->uim_num_bytes) > MAX_LMEM_REG * 4) { device_printf(sc->sc_dev, "Invalid lmem addr or bytes\n"); return ENOBUFS; } if (uim->uim_scope != UOF_SCOPE_LOCAL) return EINVAL; sym = qat_aefw_uof_string(sc, uim->uim_sym_name); ael = strtoul(sym, &ep, 10); if (ep == sym || ael > MAX_AE) return EINVAL; if ((sc->sc_ae_mask & (1 << ael)) == 0) return 0; /* ae is fused out */ curinit = &qafu->qafu_num_lm_init[ael]; qabi_list = &qafu->qafu_lm_init[ael]; for (i = 0; i < uim->uim_num_val_attr; i++, memattr++) { struct qat_ae_batch_init *qabi; qabi = qat_alloc_mem(sizeof(struct qat_ae_batch_init)); if (*curinit == 0) STAILQ_INIT(qabi_list); STAILQ_INSERT_TAIL(qabi_list, qabi, qabi_next); qabi->qabi_ae = (u_int)ael; qabi->qabi_addr = uim->uim_addr + memattr->umva_byte_offset; qabi->qabi_value = &memattr->umva_value; qabi->qabi_size = 4; qafu->qafu_num_lm_init_inst[ael] += qat_ae_get_inst_num(qabi->qabi_size); (*curinit)++; if (*curinit >= MAX_LMEM_REG) { device_printf(sc->sc_dev, "Invalid lmem val attr\n"); return ENOBUFS; } } break; case SRAM_REGION: case DRAM_REGION: case DRAM1_REGION: case SCRATCH_REGION: case UMEM_REGION: /* XXX */ /* fallthrough */ default: device_printf(sc->sc_dev, "unsupported memory region to init: %d\n", uim->uim_region); return ENOTSUP; } return 0; } static void qat_aefw_free_lm_init(struct qat_softc *sc, u_char ae) { struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; struct qat_ae_batch_init *qabi; while ((qabi = STAILQ_FIRST(&qafu->qafu_lm_init[ae])) != NULL) { STAILQ_REMOVE_HEAD(&qafu->qafu_lm_init[ae], qabi_next); qat_free_mem(qabi); } qafu->qafu_num_lm_init[ae] = 0; qafu->qafu_num_lm_init_inst[ae] = 0; } static int qat_aefw_init_ustore(struct qat_softc *sc) { uint64_t *fill; uint32_t dont_init; int a, i, p; int error = 0; int usz, end, start; u_char ae, nae; fill = qat_alloc_mem(MAX_USTORE * sizeof(uint64_t)); for (a = 0; a < sc->sc_aefw_uof.qafu_num_imgs; a++) { struct qat_uof_image *qui = &sc->sc_aefw_uof.qafu_imgs[a]; struct uof_image *ui = qui->qui_image; for (i = 0; i < MAX_USTORE; i++) memcpy(&fill[i], ui->ui_fill_pattern, sizeof(uint64_t)); /* * Compute do_not_init value as a value that will not be equal * to fill data when cast to an int */ dont_init = 0; if (dont_init == (uint32_t)fill[0]) dont_init = 0xffffffff; for (p = 0; p < ui->ui_num_pages; p++) { struct qat_uof_page *qup = &qui->qui_pages[p]; if (!qup->qup_def_page) continue; for (i = qup->qup_beg_paddr; i < qup->qup_beg_paddr + qup->qup_num_micro_words; i++ ) { fill[i] = (uint64_t)dont_init; } } for (ae = 0; ae < sc->sc_ae_num; ae++) { MPASS(ae < UOF_MAX_NUM_OF_AE); if ((ui->ui_ae_assigned & (1 << ae)) == 0) continue; if (QAT_AE(sc, ae).qae_shareable_ustore && (ae & 1)) { qat_ae_get_shared_ustore_ae(ae, &nae); if (ui->ui_ae_assigned & (1 << ae)) continue; } usz = QAT_AE(sc, ae).qae_effect_ustore_size; /* initialize the areas not going to be overwritten */ end = -1; do { /* find next uword that needs to be initialized */ for (start = end + 1; start < usz; start++) { if ((uint32_t)fill[start] != dont_init) break; } /* see if there are no more such uwords */ if (start >= usz) break; for (end = start + 1; end < usz; end++) { if ((uint32_t)fill[end] == dont_init) break; } if (QAT_AE(sc, ae).qae_shareable_ustore) { error = ENOTSUP; /* XXX */ goto out; } else { error = qat_ae_ucode_write(sc, ae, start, end - start, &fill[start]); if (error) { goto out; } } } while (end < usz); } } out: qat_free_mem(fill); return error; } static int qat_aefw_init_reg(struct qat_softc *sc, u_char ae, u_char ctx_mask, enum aereg_type regtype, u_short regaddr, u_int value) { int error = 0; u_char ctx; switch (regtype) { case AEREG_GPA_REL: case AEREG_GPB_REL: case AEREG_SR_REL: case AEREG_SR_RD_REL: case AEREG_SR_WR_REL: case AEREG_DR_REL: case AEREG_DR_RD_REL: case AEREG_DR_WR_REL: case AEREG_NEIGH_REL: /* init for all valid ctx */ for (ctx = 0; ctx < MAX_AE_CTX; ctx++) { if ((ctx_mask & (1 << ctx)) == 0) continue; error = qat_aereg_rel_data_write(sc, ae, ctx, regtype, regaddr, value); } break; case AEREG_GPA_ABS: case AEREG_GPB_ABS: case AEREG_SR_ABS: case AEREG_SR_RD_ABS: case AEREG_SR_WR_ABS: case AEREG_DR_ABS: case AEREG_DR_RD_ABS: case AEREG_DR_WR_ABS: error = qat_aereg_abs_data_write(sc, ae, regtype, regaddr, value); break; default: error = EINVAL; break; } return error; } static int qat_aefw_init_reg_sym_expr(struct qat_softc *sc, u_char ae, struct qat_uof_image *qui) { u_int i, expres; u_char ctx_mask; for (i = 0; i < qui->qui_num_init_reg_sym; i++) { struct uof_init_reg_sym *uirs = &qui->qui_init_reg_sym[i]; if (uirs->uirs_value_type == EXPR_VAL) { /* XXX */ device_printf(sc->sc_dev, "does not support initializing EXPR_VAL\n"); return ENOTSUP; } else { expres = uirs->uirs_value; } switch (uirs->uirs_init_type) { case INIT_REG: if (__SHIFTOUT(qui->qui_image->ui_ae_mode, AE_MODE_CTX_MODE) == MAX_AE_CTX) { ctx_mask = 0xff; /* 8-ctx mode */ } else { ctx_mask = 0x55; /* 4-ctx mode */ } qat_aefw_init_reg(sc, ae, ctx_mask, (enum aereg_type)uirs->uirs_reg_type, (u_short)uirs->uirs_addr_offset, expres); break; case INIT_REG_CTX: if (__SHIFTOUT(qui->qui_image->ui_ae_mode, AE_MODE_CTX_MODE) == MAX_AE_CTX) { ctx_mask = 0xff; /* 8-ctx mode */ } else { ctx_mask = 0x55; /* 4-ctx mode */ } if (((1 << uirs->uirs_ctx) & ctx_mask) == 0) return EINVAL; qat_aefw_init_reg(sc, ae, 1 << uirs->uirs_ctx, (enum aereg_type)uirs->uirs_reg_type, (u_short)uirs->uirs_addr_offset, expres); break; case INIT_EXPR: case INIT_EXPR_ENDIAN_SWAP: default: device_printf(sc->sc_dev, "does not support initializing init_type %d\n", uirs->uirs_init_type); return ENOTSUP; } } return 0; } static int qat_aefw_init_memory(struct qat_softc *sc) { struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; size_t uimsz, initmemsz = qafu->qafu_init_mem_size; struct uof_init_mem *uim; int error, i; u_char ae; uim = qafu->qafu_init_mem; for (i = 0; i < qafu->qafu_num_init_mem; i++) { uimsz = sizeof(struct uof_init_mem) + sizeof(struct uof_mem_val_attr) * uim->uim_num_val_attr; if (uimsz > initmemsz) { device_printf(sc->sc_dev, "invalid uof_init_mem or uof_mem_val_attr size\n"); return EINVAL; } if (uim->uim_num_bytes > 0) { error = qat_aefw_init_memory_one(sc, uim); if (error) { device_printf(sc->sc_dev, "Could not init ae memory: %d\n", error); return error; } } uim = (struct uof_init_mem *)((uintptr_t)uim + uimsz); initmemsz -= uimsz; } /* run Batch put LM API */ for (ae = 0; ae < MAX_AE; ae++) { error = qat_ae_batch_put_lm(sc, ae, &qafu->qafu_lm_init[ae], qafu->qafu_num_lm_init_inst[ae]); if (error) device_printf(sc->sc_dev, "Could not put lm\n"); qat_aefw_free_lm_init(sc, ae); } error = qat_aefw_init_ustore(sc); /* XXX run Batch put LM API */ return error; } static int qat_aefw_init_globals(struct qat_softc *sc) { struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; int error, i, p, s; u_char ae; /* initialize the memory segments */ if (qafu->qafu_num_init_mem > 0) { error = qat_aefw_init_memory(sc); if (error) return error; } else { error = qat_aefw_init_ustore(sc); if (error) return error; } /* XXX bind import variables with ivd values */ /* XXX bind the uC global variables * local variables will done on-the-fly */ for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { for (p = 0; p < sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_num_pages; p++) { struct qat_uof_page *qup = &sc->sc_aefw_uof.qafu_imgs[i].qui_pages[p]; if (qup->qup_num_uw_blocks && (qup->qup_num_uc_var || qup->qup_num_imp_var)) { device_printf(sc->sc_dev, "not support uC global variables\n"); return ENOTSUP; } } } for (ae = 0; ae < sc->sc_ae_num; ae++) { struct qat_ae *qae = &(QAT_AE(sc, ae)); for (s = 0; s < qae->qae_num_slices; s++) { struct qat_ae_slice *qas = &qae->qae_slices[s]; if (qas->qas_image == NULL) continue; error = qat_aefw_init_reg_sym_expr(sc, ae, qas->qas_image); if (error) return error; } } return 0; } static uint64_t qat_aefw_get_uof_inst(struct qat_softc *sc, struct qat_uof_page *qup, u_int addr) { uint64_t uinst = 0; u_int i; /* find the block */ for (i = 0; i < qup->qup_num_uw_blocks; i++) { struct qat_uof_uword_block *quub = &qup->qup_uw_blocks[i]; if ((addr >= quub->quub_start_addr) && (addr <= (quub->quub_start_addr + (quub->quub_num_words - 1)))) { /* unpack n bytes and assigned to the 64-bit uword value. note: the microwords are stored as packed bytes. */ addr -= quub->quub_start_addr; addr *= AEV2_PACKED_UWORD_BYTES; memcpy(&uinst, (void *)((uintptr_t)quub->quub_micro_words + addr), AEV2_PACKED_UWORD_BYTES); uinst = uinst & UWORD_MASK; return uinst; } } return INVLD_UWORD; } static int qat_aefw_do_pagein(struct qat_softc *sc, u_char ae, struct qat_uof_page *qup) { struct qat_ae *qae = &(QAT_AE(sc, ae)); uint64_t fill, *ucode_cpybuf; - u_int error, i, upaddr, uraddr, ninst, cpylen; + u_int error, i, upaddr, ninst, cpylen; if (qup->qup_num_uc_var || qup->qup_num_neigh_reg || qup->qup_num_imp_var || qup->qup_num_imp_expr) { device_printf(sc->sc_dev, "does not support fixup locals\n"); return ENOTSUP; } ucode_cpybuf = qat_alloc_mem(UWORD_CPYBUF_SIZE * sizeof(uint64_t)); /* XXX get fill-pattern from an image -- they are all the same */ memcpy(&fill, sc->sc_aefw_uof.qafu_imgs[0].qui_image->ui_fill_pattern, sizeof(uint64_t)); upaddr = qup->qup_beg_paddr; - uraddr = 0; ninst = qup->qup_num_micro_words; while (ninst > 0) { cpylen = min(ninst, UWORD_CPYBUF_SIZE); /* load the buffer */ for (i = 0; i < cpylen; i++) { /* keep below code structure in case there are * different handling for shared secnarios */ if (!qae->qae_shareable_ustore) { /* qat_aefw_get_uof_inst() takes an address that * is relative to the start of the page. * So we don't need to add in the physical * offset of the page. */ if (qup->qup_page_region != 0) { /* XXX */ device_printf(sc->sc_dev, "region != 0 is not supported\n"); qat_free_mem(ucode_cpybuf); return ENOTSUP; } else { /* for mixing case, it should take * physical address */ ucode_cpybuf[i] = qat_aefw_get_uof_inst( sc, qup, upaddr + i); if (ucode_cpybuf[i] == INVLD_UWORD) { /* fill hole in the uof */ ucode_cpybuf[i] = fill; } } } else { /* XXX */ qat_free_mem(ucode_cpybuf); return ENOTSUP; } } /* copy the buffer to ustore */ if (!qae->qae_shareable_ustore) { error = qat_ae_ucode_write(sc, ae, upaddr, cpylen, ucode_cpybuf); if (error) return error; } else { /* XXX */ qat_free_mem(ucode_cpybuf); return ENOTSUP; } upaddr += cpylen; - uraddr += cpylen; ninst -= cpylen; } qat_free_mem(ucode_cpybuf); return 0; } static int qat_aefw_uof_write_one(struct qat_softc *sc, struct qat_uof_image *qui) { struct uof_image *ui = qui->qui_image; struct qat_ae_page *qap; u_int s, p, c; int error; u_char ae, ctx_mask; if (__SHIFTOUT(ui->ui_ae_mode, AE_MODE_CTX_MODE) == MAX_AE_CTX) ctx_mask = 0xff; /* 8-ctx mode */ else ctx_mask = 0x55; /* 4-ctx mode */ /* load the default page and set assigned CTX PC * to the entrypoint address */ for (ae = 0; ae < sc->sc_ae_num; ae++) { struct qat_ae *qae = &(QAT_AE(sc, ae)); struct qat_ae_slice *qas; u_int metadata; MPASS(ae < UOF_MAX_NUM_OF_AE); if ((ui->ui_ae_assigned & (1 << ae)) == 0) continue; /* find the slice to which this image is assigned */ for (s = 0; s < qae->qae_num_slices; s++) { qas = &qae->qae_slices[s]; if (ui->ui_ctx_assigned & qas->qas_assigned_ctx_mask) break; } if (s >= qae->qae_num_slices) continue; qas = &qae->qae_slices[s]; for (p = 0; p < ui->ui_num_pages; p++) { qap = &qas->qas_pages[p]; /* Only load pages loaded by default */ if (!qap->qap_page->qup_def_page) continue; error = qat_aefw_do_pagein(sc, ae, qap->qap_page); if (error) return error; } metadata = qas->qas_image->qui_image->ui_app_metadata; if (metadata != 0xffffffff && bootverbose) { device_printf(sc->sc_dev, "loaded firmware: %s\n", qat_aefw_uof_string(sc, metadata)); } /* Assume starting page is page 0 */ qap = &qas->qas_pages[0]; for (c = 0; c < MAX_AE_CTX; c++) { if (ctx_mask & (1 << c)) qas->qas_cur_pages[c] = qap; else qas->qas_cur_pages[c] = NULL; } /* set the live context */ qae->qae_live_ctx_mask = ui->ui_ctx_assigned; /* set context PC to the image entrypoint address */ error = qat_ae_write_pc(sc, ae, ui->ui_ctx_assigned, ui->ui_entry_address); if (error) return error; } /* XXX store the checksum for convenience */ return 0; } static int qat_aefw_uof_write(struct qat_softc *sc) { int error = 0; int i; error = qat_aefw_init_globals(sc); if (error) { device_printf(sc->sc_dev, "Could not initialize globals\n"); return error; } for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { error = qat_aefw_uof_write_one(sc, &sc->sc_aefw_uof.qafu_imgs[i]); if (error) break; } /* XXX UcLo_computeFreeUstore */ return error; }