xref: /netbsd/sys/dev/pci/qat/qat_hw17.c (revision 54e21c12)
1*54e21c12Shikaru /*	$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $	*/
2*54e21c12Shikaru 
3*54e21c12Shikaru /*
4*54e21c12Shikaru  * Copyright (c) 2019 Internet Initiative Japan, Inc.
5*54e21c12Shikaru  * All rights reserved.
6*54e21c12Shikaru  *
7*54e21c12Shikaru  * Redistribution and use in source and binary forms, with or without
8*54e21c12Shikaru  * modification, are permitted provided that the following conditions
9*54e21c12Shikaru  * are met:
10*54e21c12Shikaru  * 1. Redistributions of source code must retain the above copyright
11*54e21c12Shikaru  *    notice, this list of conditions and the following disclaimer.
12*54e21c12Shikaru  * 2. Redistributions in binary form must reproduce the above copyright
13*54e21c12Shikaru  *    notice, this list of conditions and the following disclaimer in the
14*54e21c12Shikaru  *    documentation and/or other materials provided with the distribution.
15*54e21c12Shikaru  *
16*54e21c12Shikaru  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17*54e21c12Shikaru  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18*54e21c12Shikaru  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19*54e21c12Shikaru  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20*54e21c12Shikaru  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21*54e21c12Shikaru  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22*54e21c12Shikaru  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23*54e21c12Shikaru  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24*54e21c12Shikaru  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25*54e21c12Shikaru  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26*54e21c12Shikaru  * POSSIBILITY OF SUCH DAMAGE.
27*54e21c12Shikaru  */
28*54e21c12Shikaru 
29*54e21c12Shikaru /*
30*54e21c12Shikaru  *   Copyright(c) 2014 Intel Corporation.
31*54e21c12Shikaru  *   Redistribution and use in source and binary forms, with or without
32*54e21c12Shikaru  *   modification, are permitted provided that the following conditions
33*54e21c12Shikaru  *   are met:
34*54e21c12Shikaru  *
35*54e21c12Shikaru  *     * Redistributions of source code must retain the above copyright
36*54e21c12Shikaru  *       notice, this list of conditions and the following disclaimer.
37*54e21c12Shikaru  *     * Redistributions in binary form must reproduce the above copyright
38*54e21c12Shikaru  *       notice, this list of conditions and the following disclaimer in
39*54e21c12Shikaru  *       the documentation and/or other materials provided with the
40*54e21c12Shikaru  *       distribution.
41*54e21c12Shikaru  *     * Neither the name of Intel Corporation nor the names of its
42*54e21c12Shikaru  *       contributors may be used to endorse or promote products derived
43*54e21c12Shikaru  *       from this software without specific prior written permission.
44*54e21c12Shikaru  *
45*54e21c12Shikaru  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46*54e21c12Shikaru  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47*54e21c12Shikaru  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48*54e21c12Shikaru  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49*54e21c12Shikaru  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50*54e21c12Shikaru  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51*54e21c12Shikaru  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52*54e21c12Shikaru  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53*54e21c12Shikaru  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54*54e21c12Shikaru  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55*54e21c12Shikaru  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56*54e21c12Shikaru  */
57*54e21c12Shikaru 
58*54e21c12Shikaru #include <sys/cdefs.h>
59*54e21c12Shikaru __KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
60*54e21c12Shikaru 
61*54e21c12Shikaru #include <sys/param.h>
62*54e21c12Shikaru #include <sys/systm.h>
63*54e21c12Shikaru #include <sys/proc.h>
64*54e21c12Shikaru 
65*54e21c12Shikaru #include <opencrypto/xform.h>
66*54e21c12Shikaru 
67*54e21c12Shikaru /* XXX same as sys/arch/x86/x86/via_padlock.c */
68*54e21c12Shikaru #include <opencrypto/cryptosoft_xform.c>
69*54e21c12Shikaru 
70*54e21c12Shikaru #include <dev/pci/pcireg.h>
71*54e21c12Shikaru #include <dev/pci/pcivar.h>
72*54e21c12Shikaru 
73*54e21c12Shikaru #include "qatreg.h"
74*54e21c12Shikaru #include "qat_hw17reg.h"
75*54e21c12Shikaru #include "qatvar.h"
76*54e21c12Shikaru #include "qat_hw17var.h"
77*54e21c12Shikaru 
78*54e21c12Shikaru int		qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t,
79*54e21c12Shikaru 		    void *, void *);
80*54e21c12Shikaru int		qat_adm_mailbox_send(struct qat_softc *,
81*54e21c12Shikaru 		    struct fw_init_admin_req *, struct fw_init_admin_resp *);
82*54e21c12Shikaru int		qat_adm_mailbox_send_init_me(struct qat_softc *);
83*54e21c12Shikaru int		qat_adm_mailbox_send_hb_timer(struct qat_softc *);
84*54e21c12Shikaru int		qat_adm_mailbox_send_fw_status(struct qat_softc *);
85*54e21c12Shikaru int		qat_adm_mailbox_send_constants(struct qat_softc *);
86*54e21c12Shikaru 
87*54e21c12Shikaru uint32_t	qat_hw17_crypto_setup_cipher_desc(struct qat_session *,
88*54e21c12Shikaru 		    struct qat_crypto_desc *, struct cryptoini *,
89*54e21c12Shikaru 		    union hw_cipher_algo_blk *, uint32_t, struct fw_la_bulk_req *,
90*54e21c12Shikaru 		    enum fw_slice);
91*54e21c12Shikaru uint32_t	qat_hw17_crypto_setup_auth_desc(struct qat_session *,
92*54e21c12Shikaru 		    struct qat_crypto_desc *, struct cryptoini *,
93*54e21c12Shikaru 		    union hw_auth_algo_blk *, uint32_t, struct fw_la_bulk_req *,
94*54e21c12Shikaru 		    enum fw_slice);
95*54e21c12Shikaru void		qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *,
96*54e21c12Shikaru 		    struct fw_la_bulk_req *);
97*54e21c12Shikaru 
98*54e21c12Shikaru int
qat_adm_mailbox_init(struct qat_softc * sc)99*54e21c12Shikaru qat_adm_mailbox_init(struct qat_softc *sc)
100*54e21c12Shikaru {
101*54e21c12Shikaru 	uint64_t addr;
102*54e21c12Shikaru 	int error;
103*54e21c12Shikaru 	struct qat_dmamem *qdm;
104*54e21c12Shikaru 
105*54e21c12Shikaru 	error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma,
106*54e21c12Shikaru 	    PAGE_SIZE, PAGE_SIZE);
107*54e21c12Shikaru 	if (error)
108*54e21c12Shikaru 		return error;
109*54e21c12Shikaru 
110*54e21c12Shikaru 	qdm = &sc->sc_admin_comms.qadc_const_tbl_dma;
111*54e21c12Shikaru 	error = qat_alloc_dmamem(sc, qdm, PAGE_SIZE, PAGE_SIZE);
112*54e21c12Shikaru 	if (error)
113*54e21c12Shikaru 		return error;
114*54e21c12Shikaru 
115*54e21c12Shikaru 	memcpy(qdm->qdm_dma_vaddr,
116*54e21c12Shikaru 	    mailbox_const_tab, sizeof(mailbox_const_tab));
117*54e21c12Shikaru 
118*54e21c12Shikaru 	bus_dmamap_sync(sc->sc_dmat, qdm->qdm_dma_map, 0,
119*54e21c12Shikaru 	    qdm->qdm_dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
120*54e21c12Shikaru 
121*54e21c12Shikaru 	error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma,
122*54e21c12Shikaru 	    PAGE_SIZE, PAGE_SIZE);
123*54e21c12Shikaru 	if (error)
124*54e21c12Shikaru 		return error;
125*54e21c12Shikaru 
126*54e21c12Shikaru 	addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr;
127*54e21c12Shikaru 	qat_misc_write_4(sc, ADMINMSGUR, addr >> 32);
128*54e21c12Shikaru 	qat_misc_write_4(sc, ADMINMSGLR, addr);
129*54e21c12Shikaru 
130*54e21c12Shikaru 	return 0;
131*54e21c12Shikaru }
132*54e21c12Shikaru 
133*54e21c12Shikaru int
qat_adm_mailbox_put_msg_sync(struct qat_softc * sc,uint32_t ae,void * in,void * out)134*54e21c12Shikaru qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae,
135*54e21c12Shikaru     void *in, void *out)
136*54e21c12Shikaru {
137*54e21c12Shikaru 	uint32_t mailbox;
138*54e21c12Shikaru 	bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE);
139*54e21c12Shikaru 	int offset = ae * ADMINMSG_LEN * 2;
140*54e21c12Shikaru 	int times, received;
141*54e21c12Shikaru 	uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset;
142*54e21c12Shikaru 
143*54e21c12Shikaru 	mailbox = qat_misc_read_4(sc, mb_offset);
144*54e21c12Shikaru 	if (mailbox == 1)
145*54e21c12Shikaru 		return EAGAIN;
146*54e21c12Shikaru 
147*54e21c12Shikaru 	memcpy(buf, in, ADMINMSG_LEN);
148*54e21c12Shikaru 	bus_dmamap_sync(sc->sc_dmat, sc->sc_admin_comms.qadc_dma.qdm_dma_map, 0,
149*54e21c12Shikaru 	    sc->sc_admin_comms.qadc_dma.qdm_dma_map->dm_mapsize,
150*54e21c12Shikaru 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
151*54e21c12Shikaru 	qat_misc_write_4(sc, mb_offset, 1);
152*54e21c12Shikaru 
153*54e21c12Shikaru 	received = 0;
154*54e21c12Shikaru 	for (times = 0; times < 50; times++) {
155*54e21c12Shikaru 		delay(20000);
156*54e21c12Shikaru 		if (qat_misc_read_4(sc, mb_offset) == 0) {
157*54e21c12Shikaru 			received = 1;
158*54e21c12Shikaru 			break;
159*54e21c12Shikaru 		}
160*54e21c12Shikaru 	}
161*54e21c12Shikaru 	if (received) {
162*54e21c12Shikaru 		bus_dmamap_sync(sc->sc_dmat, sc->sc_admin_comms.qadc_dma.qdm_dma_map, 0,
163*54e21c12Shikaru 		    sc->sc_admin_comms.qadc_dma.qdm_dma_map->dm_mapsize,
164*54e21c12Shikaru 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
165*54e21c12Shikaru 		memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN);
166*54e21c12Shikaru 	} else {
167*54e21c12Shikaru 		aprint_error_dev(sc->sc_dev,
168*54e21c12Shikaru 		    "Failed to send admin msg to accelerator\n");
169*54e21c12Shikaru 	}
170*54e21c12Shikaru 
171*54e21c12Shikaru 	return received ? 0 : EFAULT;
172*54e21c12Shikaru }
173*54e21c12Shikaru 
174*54e21c12Shikaru int
qat_adm_mailbox_send(struct qat_softc * sc,struct fw_init_admin_req * req,struct fw_init_admin_resp * resp)175*54e21c12Shikaru qat_adm_mailbox_send(struct qat_softc *sc,
176*54e21c12Shikaru     struct fw_init_admin_req *req, struct fw_init_admin_resp *resp)
177*54e21c12Shikaru {
178*54e21c12Shikaru 	int error;
179*54e21c12Shikaru 	uint32_t mask;
180*54e21c12Shikaru 	uint8_t ae;
181*54e21c12Shikaru 
182*54e21c12Shikaru 	for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
183*54e21c12Shikaru 		if (!(mask & 1))
184*54e21c12Shikaru 			continue;
185*54e21c12Shikaru 
186*54e21c12Shikaru 		error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp);
187*54e21c12Shikaru 		if (error)
188*54e21c12Shikaru 			return error;
189*54e21c12Shikaru 		if (resp->init_resp_hdr.status) {
190*54e21c12Shikaru 			aprint_error_dev(sc->sc_dev,
191*54e21c12Shikaru 			    "Failed to send admin msg: cmd %d\n",
192*54e21c12Shikaru 			    req->init_admin_cmd_id);
193*54e21c12Shikaru 			return EFAULT;
194*54e21c12Shikaru 		}
195*54e21c12Shikaru 	}
196*54e21c12Shikaru 
197*54e21c12Shikaru 	return 0;
198*54e21c12Shikaru }
199*54e21c12Shikaru 
200*54e21c12Shikaru int
qat_adm_mailbox_send_init_me(struct qat_softc * sc)201*54e21c12Shikaru qat_adm_mailbox_send_init_me(struct qat_softc *sc)
202*54e21c12Shikaru {
203*54e21c12Shikaru 	struct fw_init_admin_req req;
204*54e21c12Shikaru 	struct fw_init_admin_resp resp;
205*54e21c12Shikaru 
206*54e21c12Shikaru 	memset(&req, 0, sizeof(req));
207*54e21c12Shikaru 	req.init_admin_cmd_id = FW_INIT_ME;
208*54e21c12Shikaru 
209*54e21c12Shikaru 	return qat_adm_mailbox_send(sc, &req, &resp);
210*54e21c12Shikaru }
211*54e21c12Shikaru 
212*54e21c12Shikaru int
qat_adm_mailbox_send_hb_timer(struct qat_softc * sc)213*54e21c12Shikaru qat_adm_mailbox_send_hb_timer(struct qat_softc *sc)
214*54e21c12Shikaru {
215*54e21c12Shikaru 	struct fw_init_admin_req req;
216*54e21c12Shikaru 	struct fw_init_admin_resp resp;
217*54e21c12Shikaru 
218*54e21c12Shikaru 	memset(&req, 0, sizeof(req));
219*54e21c12Shikaru 	req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET;
220*54e21c12Shikaru 
221*54e21c12Shikaru 	req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr;
222*54e21c12Shikaru 	req.heartbeat_ticks =
223*54e21c12Shikaru 	    sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL;
224*54e21c12Shikaru 
225*54e21c12Shikaru 	return qat_adm_mailbox_send(sc, &req, &resp);
226*54e21c12Shikaru }
227*54e21c12Shikaru 
228*54e21c12Shikaru int
qat_adm_mailbox_send_fw_status(struct qat_softc * sc)229*54e21c12Shikaru qat_adm_mailbox_send_fw_status(struct qat_softc *sc)
230*54e21c12Shikaru {
231*54e21c12Shikaru 	int error;
232*54e21c12Shikaru 	struct fw_init_admin_req req;
233*54e21c12Shikaru 	struct fw_init_admin_resp resp;
234*54e21c12Shikaru 
235*54e21c12Shikaru 	memset(&req, 0, sizeof(req));
236*54e21c12Shikaru 	req.init_admin_cmd_id = FW_STATUS_GET;
237*54e21c12Shikaru 
238*54e21c12Shikaru 	error = qat_adm_mailbox_send(sc, &req, &resp);
239*54e21c12Shikaru 	if (error)
240*54e21c12Shikaru 		return error;
241*54e21c12Shikaru 
242*54e21c12Shikaru 	aprint_normal_dev(sc->sc_dev,
243*54e21c12Shikaru 	    "loaded firmware: version %d.%d.%d\n",
244*54e21c12Shikaru 	    resp.u.s.version_major_num,
245*54e21c12Shikaru 	    resp.u.s.version_minor_num,
246*54e21c12Shikaru 	    resp.init_resp_pars.u.s1.version_patch_num);
247*54e21c12Shikaru 
248*54e21c12Shikaru 	return 0;
249*54e21c12Shikaru }
250*54e21c12Shikaru 
251*54e21c12Shikaru int
qat_adm_mailbox_send_constants(struct qat_softc * sc)252*54e21c12Shikaru qat_adm_mailbox_send_constants(struct qat_softc *sc)
253*54e21c12Shikaru {
254*54e21c12Shikaru 	struct fw_init_admin_req req;
255*54e21c12Shikaru 	struct fw_init_admin_resp resp;
256*54e21c12Shikaru 
257*54e21c12Shikaru 	memset(&req, 0, sizeof(req));
258*54e21c12Shikaru 	req.init_admin_cmd_id = FW_CONSTANTS_CFG;
259*54e21c12Shikaru 
260*54e21c12Shikaru 	req.init_cfg_sz = 1024;
261*54e21c12Shikaru 	req.init_cfg_ptr =
262*54e21c12Shikaru 	    sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr;
263*54e21c12Shikaru 
264*54e21c12Shikaru 	return qat_adm_mailbox_send(sc, &req, &resp);
265*54e21c12Shikaru }
266*54e21c12Shikaru 
267*54e21c12Shikaru int
qat_adm_mailbox_send_init(struct qat_softc * sc)268*54e21c12Shikaru qat_adm_mailbox_send_init(struct qat_softc *sc)
269*54e21c12Shikaru {
270*54e21c12Shikaru 	int error;
271*54e21c12Shikaru 
272*54e21c12Shikaru 	error = qat_adm_mailbox_send_init_me(sc);
273*54e21c12Shikaru 	if (error)
274*54e21c12Shikaru 		return error;
275*54e21c12Shikaru 
276*54e21c12Shikaru 	error = qat_adm_mailbox_send_hb_timer(sc);
277*54e21c12Shikaru 	if (error)
278*54e21c12Shikaru 		return error;
279*54e21c12Shikaru 
280*54e21c12Shikaru 	error = qat_adm_mailbox_send_fw_status(sc);
281*54e21c12Shikaru 	if (error)
282*54e21c12Shikaru 		return error;
283*54e21c12Shikaru 
284*54e21c12Shikaru 	return qat_adm_mailbox_send_constants(sc);
285*54e21c12Shikaru }
286*54e21c12Shikaru 
287*54e21c12Shikaru int
qat_arb_init(struct qat_softc * sc)288*54e21c12Shikaru qat_arb_init(struct qat_softc *sc)
289*54e21c12Shikaru {
290*54e21c12Shikaru 	uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
291*54e21c12Shikaru 	uint32_t arb, i;
292*54e21c12Shikaru 	const uint32_t *thd_2_arb_cfg;
293*54e21c12Shikaru 
294*54e21c12Shikaru 	/* Service arb configured for 32 bytes responses and
295*54e21c12Shikaru 	 * ring flow control check enabled. */
296*54e21c12Shikaru 	for (arb = 0; arb < MAX_ARB; arb++)
297*54e21c12Shikaru 		qat_arb_sarconfig_write_4(sc, arb, arb_cfg);
298*54e21c12Shikaru 
299*54e21c12Shikaru 	/* Map worker threads to service arbiters */
300*54e21c12Shikaru 	sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg);
301*54e21c12Shikaru 
302*54e21c12Shikaru 	if (!thd_2_arb_cfg)
303*54e21c12Shikaru 		return EINVAL;
304*54e21c12Shikaru 
305*54e21c12Shikaru 	for (i = 0; i < sc->sc_hw.qhw_num_engines; i++)
306*54e21c12Shikaru 		qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i));
307*54e21c12Shikaru 
308*54e21c12Shikaru 	return 0;
309*54e21c12Shikaru }
310*54e21c12Shikaru 
311*54e21c12Shikaru int
qat_set_ssm_wdtimer(struct qat_softc * sc)312*54e21c12Shikaru qat_set_ssm_wdtimer(struct qat_softc *sc)
313*54e21c12Shikaru {
314*54e21c12Shikaru 	uint32_t timer;
315*54e21c12Shikaru 	u_int mask;
316*54e21c12Shikaru 	int i;
317*54e21c12Shikaru 
318*54e21c12Shikaru 	timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT;
319*54e21c12Shikaru 	for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
320*54e21c12Shikaru 		if (!(mask & 1))
321*54e21c12Shikaru 			continue;
322*54e21c12Shikaru 		qat_misc_write_4(sc, SSMWDT(i), timer);
323*54e21c12Shikaru 		qat_misc_write_4(sc, SSMWDTPKE(i), timer);
324*54e21c12Shikaru 	}
325*54e21c12Shikaru 
326*54e21c12Shikaru 	return 0;
327*54e21c12Shikaru }
328*54e21c12Shikaru 
329*54e21c12Shikaru int
qat_check_slice_hang(struct qat_softc * sc)330*54e21c12Shikaru qat_check_slice_hang(struct qat_softc *sc)
331*54e21c12Shikaru {
332*54e21c12Shikaru 	int handled = 0;
333*54e21c12Shikaru 
334*54e21c12Shikaru 	return handled;
335*54e21c12Shikaru }
336*54e21c12Shikaru 
337*54e21c12Shikaru uint32_t
qat_hw17_crypto_setup_cipher_desc(struct qat_session * qs,struct qat_crypto_desc * desc,struct cryptoini * crie,union hw_cipher_algo_blk * cipher,uint32_t cd_blk_offset,struct fw_la_bulk_req * req_tmpl,enum fw_slice next_slice)338*54e21c12Shikaru qat_hw17_crypto_setup_cipher_desc(struct qat_session *qs,
339*54e21c12Shikaru     struct qat_crypto_desc *desc, struct cryptoini *crie,
340*54e21c12Shikaru     union hw_cipher_algo_blk *cipher, uint32_t cd_blk_offset,
341*54e21c12Shikaru     struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
342*54e21c12Shikaru {
343*54e21c12Shikaru 	struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
344*54e21c12Shikaru 	    (struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
345*54e21c12Shikaru 	int keylen = crie->cri_klen / 8;
346*54e21c12Shikaru 
347*54e21c12Shikaru 	cipher->max.cipher_config.val =
348*54e21c12Shikaru 	    qat_crypto_load_cipher_cryptoini(desc, crie);
349*54e21c12Shikaru 	memcpy(cipher->max.key, crie->cri_key, keylen);
350*54e21c12Shikaru 
351*54e21c12Shikaru 	cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3;
352*54e21c12Shikaru 	cipher_cd_ctrl->cipher_key_sz = keylen >> 3;
353*54e21c12Shikaru 	cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3;
354*54e21c12Shikaru 	FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER);
355*54e21c12Shikaru 	FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice);
356*54e21c12Shikaru 
357*54e21c12Shikaru 	return roundup(sizeof(struct hw_cipher_config) + keylen, 8);
358*54e21c12Shikaru }
359*54e21c12Shikaru 
360*54e21c12Shikaru uint32_t
qat_hw17_crypto_setup_auth_desc(struct qat_session * qs,struct qat_crypto_desc * desc,struct cryptoini * cria,union hw_auth_algo_blk * auth,uint32_t cd_blk_offset,struct fw_la_bulk_req * req_tmpl,enum fw_slice next_slice)361*54e21c12Shikaru qat_hw17_crypto_setup_auth_desc(struct qat_session *qs,
362*54e21c12Shikaru     struct qat_crypto_desc *desc, struct cryptoini *cria,
363*54e21c12Shikaru     union hw_auth_algo_blk *auth, uint32_t cd_blk_offset,
364*54e21c12Shikaru     struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
365*54e21c12Shikaru {
366*54e21c12Shikaru 	struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl =
367*54e21c12Shikaru 	    (struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
368*54e21c12Shikaru 	struct qat_sym_hash_def const *hash_def;
369*54e21c12Shikaru 	uint8_t *state1, *state2;
370*54e21c12Shikaru 
371*54e21c12Shikaru 	auth->max.inner_setup.auth_config.config =
372*54e21c12Shikaru 	    qat_crypto_load_auth_cryptoini(desc, cria, &hash_def);
373*54e21c12Shikaru 	auth->max.inner_setup.auth_counter.counter =
374*54e21c12Shikaru 	    htonl(hash_def->qshd_qat->qshqi_auth_counter);
375*54e21c12Shikaru 
376*54e21c12Shikaru 	auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3;
377*54e21c12Shikaru 	auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
378*54e21c12Shikaru 	auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
379*54e21c12Shikaru 	auth_cd_ctrl->final_sz = desc->qcd_auth_sz;
380*54e21c12Shikaru 
381*54e21c12Shikaru 	auth_cd_ctrl->inner_state1_sz =
382*54e21c12Shikaru 	    roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
383*54e21c12Shikaru 	auth_cd_ctrl->inner_state2_sz =
384*54e21c12Shikaru 	    roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
385*54e21c12Shikaru 	auth_cd_ctrl->inner_state2_offset =
386*54e21c12Shikaru 	    auth_cd_ctrl->hash_cfg_offset +
387*54e21c12Shikaru 	    ((sizeof(struct hw_auth_setup) +
388*54e21c12Shikaru 	    auth_cd_ctrl->inner_state1_sz) >> 3);
389*54e21c12Shikaru 
390*54e21c12Shikaru 	state1 = auth->max.state1;
391*54e21c12Shikaru 	state2 = auth->max.state1 + auth_cd_ctrl->inner_state1_sz;
392*54e21c12Shikaru 	qat_crypto_hmac_precompute(desc, cria, hash_def, state1, state2);
393*54e21c12Shikaru 
394*54e21c12Shikaru 	FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH);
395*54e21c12Shikaru 	FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice);
396*54e21c12Shikaru 
397*54e21c12Shikaru 	return roundup(auth_cd_ctrl->inner_state1_sz +
398*54e21c12Shikaru 	    auth_cd_ctrl->inner_state2_sz +
399*54e21c12Shikaru 	    sizeof(struct hw_auth_setup), 8);
400*54e21c12Shikaru }
401*54e21c12Shikaru 
402*54e21c12Shikaru void
qat_hw17_init_comn_req_hdr(struct qat_crypto_desc * desc,struct fw_la_bulk_req * req)403*54e21c12Shikaru qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc,
404*54e21c12Shikaru     struct fw_la_bulk_req *req)
405*54e21c12Shikaru {
406*54e21c12Shikaru 	union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
407*54e21c12Shikaru 	struct fw_comn_req_hdr *req_hdr = &req->comn_hdr;
408*54e21c12Shikaru 
409*54e21c12Shikaru 	req_hdr->service_cmd_id = desc->qcd_cmd_id;
410*54e21c12Shikaru 	req_hdr->hdr_flags = FW_COMN_VALID;
411*54e21c12Shikaru 	req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA;
412*54e21c12Shikaru 	req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD(
413*54e21c12Shikaru 	    COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL);
414*54e21c12Shikaru 	req_hdr->serv_specif_flags = 0;
415*54e21c12Shikaru 	cd_pars->s.content_desc_addr = desc->qcd_desc_paddr;
416*54e21c12Shikaru }
417*54e21c12Shikaru 
418*54e21c12Shikaru void
qat_hw17_crypto_setup_desc(struct qat_crypto * qcy,struct qat_session * qs,struct qat_crypto_desc * desc,struct cryptoini * crie,struct cryptoini * cria)419*54e21c12Shikaru qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
420*54e21c12Shikaru     struct qat_crypto_desc *desc,
421*54e21c12Shikaru     struct cryptoini *crie, struct cryptoini *cria)
422*54e21c12Shikaru {
423*54e21c12Shikaru 	union hw_cipher_algo_blk *cipher;
424*54e21c12Shikaru 	union hw_auth_algo_blk *auth;
425*54e21c12Shikaru 	struct fw_la_bulk_req *req_tmpl;
426*54e21c12Shikaru 	struct fw_comn_req_hdr *req_hdr;
427*54e21c12Shikaru 	union fw_comn_req_hdr_cd_pars *cd_pars;
428*54e21c12Shikaru 	uint32_t cd_blk_offset = 0;
429*54e21c12Shikaru 	int i;
430*54e21c12Shikaru 	uint8_t *cd_blk_ptr;
431*54e21c12Shikaru 
432*54e21c12Shikaru 	req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache;
433*54e21c12Shikaru 	req_hdr = &req_tmpl->comn_hdr;
434*54e21c12Shikaru 	cd_pars = &req_tmpl->cd_pars;
435*54e21c12Shikaru 	cd_blk_ptr = desc->qcd_content_desc;
436*54e21c12Shikaru 
437*54e21c12Shikaru 	memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req));
438*54e21c12Shikaru 	qat_hw17_init_comn_req_hdr(desc, req_tmpl);
439*54e21c12Shikaru 
440*54e21c12Shikaru 	for (i = 0; i < MAX_FW_SLICE; i++) {
441*54e21c12Shikaru 		switch (desc->qcd_slices[i]) {
442*54e21c12Shikaru 		case FW_SLICE_CIPHER:
443*54e21c12Shikaru 			cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr +
444*54e21c12Shikaru 			    cd_blk_offset);
445*54e21c12Shikaru 			cd_blk_offset += qat_hw17_crypto_setup_cipher_desc(
446*54e21c12Shikaru 			    qs, desc, crie, cipher, cd_blk_offset, req_tmpl,
447*54e21c12Shikaru 			    desc->qcd_slices[i + 1]);
448*54e21c12Shikaru 			break;
449*54e21c12Shikaru 		case FW_SLICE_AUTH:
450*54e21c12Shikaru 			auth = (union hw_auth_algo_blk *)(cd_blk_ptr +
451*54e21c12Shikaru 			    cd_blk_offset);
452*54e21c12Shikaru 			cd_blk_offset += qat_hw17_crypto_setup_auth_desc(
453*54e21c12Shikaru 			    qs, desc, cria, auth, cd_blk_offset, req_tmpl,
454*54e21c12Shikaru 			    desc->qcd_slices[i + 1]);
455*54e21c12Shikaru 			req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES;
456*54e21c12Shikaru 			/* no digest verify */
457*54e21c12Shikaru 			break;
458*54e21c12Shikaru 		case FW_SLICE_DRAM_WR:
459*54e21c12Shikaru 			i = MAX_FW_SLICE; /* end of chain */
460*54e21c12Shikaru 			break;
461*54e21c12Shikaru 		default:
462*54e21c12Shikaru 			KASSERT(0);
463*54e21c12Shikaru 			break;
464*54e21c12Shikaru 		}
465*54e21c12Shikaru 	}
466*54e21c12Shikaru 
467*54e21c12Shikaru 	cd_pars->s.content_desc_params_sz =
468*54e21c12Shikaru 	    roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3;
469*54e21c12Shikaru 
470*54e21c12Shikaru #ifdef QAT_DUMP
471*54e21c12Shikaru 	qat_dump_raw(QAT_DUMP_DESC, "qcd_content_desc",
472*54e21c12Shikaru 	    desc->qcd_content_desc, cd_pars->s.content_desc_params_sz << 3);
473*54e21c12Shikaru 	qat_dump_raw(QAT_DUMP_DESC, "qcd_req_cache",
474*54e21c12Shikaru 	    &desc->qcd_req_cache, sizeof(desc->qcd_req_cache));
475*54e21c12Shikaru #endif
476*54e21c12Shikaru 
477*54e21c12Shikaru 	bus_dmamap_sync(qcy->qcy_sc->sc_dmat,
478*54e21c12Shikaru 	    qcy->qcy_session_dmamems[qs->qs_lid].qdm_dma_map, 0,
479*54e21c12Shikaru 	    sizeof(struct qat_session),
480*54e21c12Shikaru 	    BUS_DMASYNC_PREWRITE);
481*54e21c12Shikaru }
482*54e21c12Shikaru 
483*54e21c12Shikaru void
qat_hw17_crypto_setup_req_params(struct qat_crypto_bank * qcb,struct qat_session * qs,struct qat_crypto_desc const * desc,struct qat_sym_cookie * qsc,struct cryptodesc * crde,struct cryptodesc * crda,bus_addr_t icv_paddr)484*54e21c12Shikaru qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb, struct qat_session *qs,
485*54e21c12Shikaru     struct qat_crypto_desc const *desc, struct qat_sym_cookie *qsc,
486*54e21c12Shikaru     struct cryptodesc *crde, struct cryptodesc *crda, bus_addr_t icv_paddr)
487*54e21c12Shikaru {
488*54e21c12Shikaru 	struct qat_sym_bulk_cookie *qsbc;
489*54e21c12Shikaru 	struct fw_la_bulk_req *bulk_req;
490*54e21c12Shikaru 	struct fw_la_cipher_req_params *cipher_param;
491*54e21c12Shikaru 	struct fw_la_auth_req_params *auth_param;
492*54e21c12Shikaru 	uint32_t req_params_offset = 0;
493*54e21c12Shikaru 	uint8_t *req_params_ptr;
494*54e21c12Shikaru 	enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
495*54e21c12Shikaru 
496*54e21c12Shikaru 	qsbc = &qsc->u.qsc_bulk_cookie;
497*54e21c12Shikaru 	bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
498*54e21c12Shikaru 
499*54e21c12Shikaru 	memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req));
500*54e21c12Shikaru 	bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
501*54e21c12Shikaru 	bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
502*54e21c12Shikaru 	bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
503*54e21c12Shikaru 
504*54e21c12Shikaru 	if (icv_paddr != 0)
505*54e21c12Shikaru 		bulk_req->comn_hdr.serv_specif_flags |= FW_LA_DIGEST_IN_BUFFER;
506*54e21c12Shikaru 
507*54e21c12Shikaru 	req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars;
508*54e21c12Shikaru 
509*54e21c12Shikaru 	if (cmd_id != FW_LA_CMD_AUTH) {
510*54e21c12Shikaru 		cipher_param = (struct fw_la_cipher_req_params *)
511*54e21c12Shikaru 		    (req_params_ptr + req_params_offset);
512*54e21c12Shikaru 		req_params_offset += sizeof(struct fw_la_cipher_req_params);
513*54e21c12Shikaru 
514*54e21c12Shikaru 		cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr;
515*54e21c12Shikaru 		cipher_param->cipher_offset = crde->crd_skip;
516*54e21c12Shikaru 		cipher_param->cipher_length = crde->crd_len;
517*54e21c12Shikaru 	}
518*54e21c12Shikaru 
519*54e21c12Shikaru 	if (cmd_id != FW_LA_CMD_CIPHER) {
520*54e21c12Shikaru 		auth_param = (struct fw_la_auth_req_params *)
521*54e21c12Shikaru 		    (req_params_ptr + req_params_offset);
522*54e21c12Shikaru 		req_params_offset += sizeof(struct fw_la_auth_req_params);
523*54e21c12Shikaru 
524*54e21c12Shikaru 		auth_param->auth_off = crda->crd_skip;
525*54e21c12Shikaru 		auth_param->auth_len = crda->crd_len;
526*54e21c12Shikaru 		auth_param->auth_res_addr = icv_paddr;
527*54e21c12Shikaru 		auth_param->auth_res_sz = 0; /* XXX no digest verify */
528*54e21c12Shikaru 		auth_param->hash_state_sz = 0;
529*54e21c12Shikaru 		auth_param->u1.auth_partial_st_prefix = 0;
530*54e21c12Shikaru 		auth_param->u2.aad_sz = 0;
531*54e21c12Shikaru 	}
532*54e21c12Shikaru 
533*54e21c12Shikaru #ifdef QAT_DUMP
534*54e21c12Shikaru 	qat_dump_raw(QAT_DUMP_DESC, "req_params", req_params_ptr, req_params_offset);
535*54e21c12Shikaru #endif
536*54e21c12Shikaru }
537*54e21c12Shikaru 
538