xref: /openbsd/sys/dev/ic/qwx.c (revision 9032038b)
1 /*	$OpenBSD: qwx.c,v 1.63 2024/06/11 10:06:35 stsp Exp $	*/
2 
3 /*
4  * Copyright 2023 Stefan Sperling <stsp@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Copyright (c) 2018-2019 The Linux Foundation.
21  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc.
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted (subject to the limitations in the disclaimer
26  * below) provided that the following conditions are met:
27  *
28  *  * Redistributions of source code must retain the above copyright notice,
29  *    this list of conditions and the following disclaimer.
30  *
31  *  * Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  *
35  *  * Neither the name of [Owner Organization] nor the names of its
36  *    contributors may be used to endorse or promote products derived from
37  *    this software without specific prior written permission.
38  *
39  * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
40  * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
41  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
42  * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
43  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
44  * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
45  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
46  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
47  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
48  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
49  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
50  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 /*
54  * Driver for Qualcomm Technologies 802.11ax chipset.
55  */
56 
57 #include "bpfilter.h"
58 
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/device.h>
62 #include <sys/rwlock.h>
63 #include <sys/systm.h>
64 #include <sys/socket.h>
65 #include <sys/sockio.h>
66 
67 #include <sys/refcnt.h>
68 #include <sys/task.h>
69 
70 #include <machine/bus.h>
71 #include <machine/intr.h>
72 
73 #ifdef __HAVE_FDT
74 #include <dev/ofw/openfirm.h>
75 #endif
76 
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80 #include <net/if.h>
81 #include <net/if_media.h>
82 
83 #include <netinet/in.h>
84 #include <netinet/if_ether.h>
85 
86 #include <net80211/ieee80211_var.h>
87 #include <net80211/ieee80211_radiotap.h>
88 
89 /* XXX linux porting goo */
90 #ifdef __LP64__
91 #define BITS_PER_LONG		64
92 #else
93 #define BITS_PER_LONG		32
94 #endif
95 #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
96 #define __bf_shf(x) (__builtin_ffsll(x) - 1)
97 #define ffz(x) ffs(~(x))
98 #define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m)))
99 #define FIELD_PREP(_m, _v) (((typeof(_m))(_v) << __bf_shf(_m)) & (_m))
100 #define BIT(x)               (1UL << (x))
101 #define test_bit(i, a)  ((a) & (1 << (i)))
102 #define clear_bit(i, a) ((a)) &= ~(1 << (i))
103 #define set_bit(i, a)   ((a)) |= (1 << (i))
104 #define container_of(ptr, type, member) ({			\
105 	const __typeof( ((type *)0)->member ) *__mptr = (ptr);	\
106 	(type *)( (char *)__mptr - offsetof(type,member) );})
107 
108 /* #define QWX_DEBUG */
109 
110 #include <dev/ic/qwxreg.h>
111 #include <dev/ic/qwxvar.h>
112 
113 #ifdef QWX_DEBUG
114 uint32_t	qwx_debug = 0
115 		    | QWX_D_MISC
116 /*		    | QWX_D_MHI */
117 /*		    | QWX_D_QMI */
118 /*		    | QWX_D_WMI */
119 /*		    | QWX_D_HTC */
120 /*		    | QWX_D_HTT */
121 /*		    | QWX_D_MAC */
122 /*		    | QWX_D_MGMT */
123 		;
124 #endif
125 
126 int qwx_ce_init_pipes(struct qwx_softc *);
127 int qwx_hal_srng_src_num_free(struct qwx_softc *, struct hal_srng *, int);
128 int qwx_ce_per_engine_service(struct qwx_softc *, uint16_t);
129 int qwx_hal_srng_setup(struct qwx_softc *, enum hal_ring_type, int, int,
130     struct hal_srng_params *);
131 int qwx_ce_send(struct qwx_softc *, struct mbuf *, uint8_t, uint16_t);
132 int qwx_htc_connect_service(struct qwx_htc *, struct qwx_htc_svc_conn_req *,
133     struct qwx_htc_svc_conn_resp *);
134 void qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc *, struct hal_srng *);
135 void qwx_wmi_free_dbring_caps(struct qwx_softc *);
136 int qwx_wmi_set_peer_param(struct qwx_softc *, uint8_t *, uint32_t,
137     uint32_t, uint32_t, uint32_t);
138 int qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *, int, int,
139     uint8_t *, uint64_t, uint8_t, uint8_t, uint32_t);
140 const void **qwx_wmi_tlv_parse_alloc(struct qwx_softc *, const void *, size_t);
141 int qwx_core_init(struct qwx_softc *);
142 int qwx_qmi_event_server_arrive(struct qwx_softc *);
143 int qwx_mac_register(struct qwx_softc *);
144 int qwx_mac_start(struct qwx_softc *);
145 void qwx_mac_scan_finish(struct qwx_softc *);
146 int qwx_mac_mgmt_tx_wmi(struct qwx_softc *, struct qwx_vif *, uint8_t,
147     struct ieee80211_node *, struct mbuf *);
148 int qwx_dp_tx(struct qwx_softc *, struct qwx_vif *, uint8_t,
149     struct ieee80211_node *, struct mbuf *);
150 int qwx_dp_tx_send_reo_cmd(struct qwx_softc *, struct dp_rx_tid *,
151     enum hal_reo_cmd_type , struct ath11k_hal_reo_cmd *,
152     void (*func)(struct qwx_dp *, void *, enum hal_reo_cmd_status));
153 void qwx_dp_rx_deliver_msdu(struct qwx_softc *, struct qwx_rx_msdu *);
154 void qwx_dp_service_mon_ring(void *);
155 void qwx_peer_frags_flush(struct qwx_softc *, struct ath11k_peer *);
156 int qwx_wmi_vdev_install_key(struct qwx_softc *,
157     struct wmi_vdev_install_key_arg *, uint8_t);
158 int qwx_dp_peer_rx_pn_replay_config(struct qwx_softc *, struct qwx_vif *,
159     struct ieee80211_node *, struct ieee80211_key *, int);
160 void qwx_setkey_clear(struct qwx_softc *);
161 
162 int qwx_scan(struct qwx_softc *);
163 void qwx_scan_abort(struct qwx_softc *);
164 int qwx_auth(struct qwx_softc *);
165 int qwx_deauth(struct qwx_softc *);
166 int qwx_run(struct qwx_softc *);
167 int qwx_run_stop(struct qwx_softc *);
168 
169 struct ieee80211_node *
qwx_node_alloc(struct ieee80211com * ic)170 qwx_node_alloc(struct ieee80211com *ic)
171 {
172 	struct qwx_node *nq;
173 
174 	nq = malloc(sizeof(struct qwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
175 	nq->peer.peer_id = HAL_INVALID_PEERID;
176 	return (struct ieee80211_node *)nq;
177 }
178 
179 int
qwx_init(struct ifnet * ifp)180 qwx_init(struct ifnet *ifp)
181 {
182 	int error;
183 	struct qwx_softc *sc = ifp->if_softc;
184 	struct ieee80211com *ic = &sc->sc_ic;
185 
186 	sc->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
187 	/*
188 	 * There are several known hardware/software crypto issues
189 	 * on wcn6855 devices, firmware 0x1106196e. It is unclear
190 	 * if these are driver or firmware bugs.
191 	 *
192 	 * 1) Broadcast/Multicast frames will only be received on
193 	 *    encrypted networks if hardware crypto is used and a
194 	 *    CCMP group key is used. Otherwise such frames never
195 	 *    even trigger an interrupt. This breaks ARP and IPv6.
196 	 *    This issue is known to affect the Linux ath11k vendor
197 	 *    driver when software crypto mode is selected.
198 	 *    Workaround: Use hardware crypto on WPA2 networks.
199 	 *    However, even with hardware crypto broadcast frames
200 	 *    are never received if TKIP is used as the WPA2 group
201 	 *    cipher and we have no workaround for this.
202 	 *
203 	 * 2) Adding WEP keys for hardware crypto crashes the firmware.
204 	 *    Presumably, lack of WEP support is deliberate because the
205 	 *    Linux ath11k vendor driver rejects attempts to install
206 	 *    WEP keys to hardware.
207 	 *    Workaround: Use software crypto if WEP is enabled.
208 	 *    This suffers from the broadcast issues mentioned above.
209 	 *
210 	 * 3) A WPA1 group key handshake message from the AP is never
211 	 *    received if hardware crypto is used.
212 	 *    Workaround: Use software crypto if WPA1 is enabled.
213 	 *    This suffers from the broadcast issues mentioned above,
214 	 *    even on WPA2 networks when WPA1 and WPA2 are both enabled.
215 	 *    On OpenBSD, WPA1 is disabled by default.
216 	 *
217 	 * The only known fully working configurations are unencrypted
218 	 * networks, and WPA2/CCMP-only networks provided WPA1 remains
219 	 * disabled.
220 	 */
221 	if ((ic->ic_flags & IEEE80211_F_WEPON) ||
222 	    (ic->ic_rsnprotos & IEEE80211_PROTO_WPA))
223 		sc->crypto_mode = ATH11K_CRYPT_MODE_SW;
224 	else
225 		sc->crypto_mode = ATH11K_CRYPT_MODE_HW;
226 	sc->frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
227 	ic->ic_state = IEEE80211_S_INIT;
228 	sc->ns_nstate = IEEE80211_S_INIT;
229 	sc->scan.state = ATH11K_SCAN_IDLE;
230 	sc->vdev_id_11d_scan = QWX_11D_INVALID_VDEV_ID;
231 
232 	error = qwx_core_init(sc);
233 	if (error)
234 		return error;
235 
236 	memset(&sc->qrtr_server, 0, sizeof(sc->qrtr_server));
237 	sc->qrtr_server.node = QRTR_NODE_BCAST;
238 
239 	/* wait for QRTR init to be done */
240 	while (sc->qrtr_server.node == QRTR_NODE_BCAST) {
241 		error = tsleep_nsec(&sc->qrtr_server, 0, "qwxqrtr",
242 		    SEC_TO_NSEC(5));
243 		if (error) {
244 			printf("%s: qrtr init timeout\n", sc->sc_dev.dv_xname);
245 			return error;
246 		}
247 	}
248 
249 	error = qwx_qmi_event_server_arrive(sc);
250 	if (error)
251 		return error;
252 
253 	if (sc->attached) {
254 		/* Update MAC in case the upper layers changed it. */
255 		IEEE80211_ADDR_COPY(ic->ic_myaddr,
256 		    ((struct arpcom *)ifp)->ac_enaddr);
257 	} else {
258 		sc->attached = 1;
259 
260 		/* Configure channel information obtained from firmware. */
261 		ieee80211_channel_init(ifp);
262 
263 		/* Configure initial MAC address. */
264 		error = if_setlladdr(ifp, ic->ic_myaddr);
265 		if (error)
266 			printf("%s: could not set MAC address %s: %d\n",
267 			    sc->sc_dev.dv_xname, ether_sprintf(ic->ic_myaddr),
268 			    error);
269 
270 		ieee80211_media_init(ifp, qwx_media_change,
271 		    ieee80211_media_status);
272 	}
273 
274 	if (ifp->if_flags & IFF_UP) {
275 		refcnt_init(&sc->task_refs);
276 
277 		ifq_clr_oactive(&ifp->if_snd);
278 		ifp->if_flags |= IFF_RUNNING;
279 
280 		error = qwx_mac_start(sc);
281 		if (error)
282 			return error;
283 
284 		ieee80211_begin_scan(ifp);
285 	}
286 
287 	return 0;
288 }
289 
290 void
qwx_add_task(struct qwx_softc * sc,struct taskq * taskq,struct task * task)291 qwx_add_task(struct qwx_softc *sc, struct taskq *taskq, struct task *task)
292 {
293 	int s = splnet();
294 
295 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
296 		splx(s);
297 		return;
298 	}
299 
300 	refcnt_take(&sc->task_refs);
301 	if (!task_add(taskq, task))
302 		refcnt_rele_wake(&sc->task_refs);
303 	splx(s);
304 }
305 
306 void
qwx_del_task(struct qwx_softc * sc,struct taskq * taskq,struct task * task)307 qwx_del_task(struct qwx_softc *sc, struct taskq *taskq, struct task *task)
308 {
309 	if (task_del(taskq, task))
310 		refcnt_rele(&sc->task_refs);
311 }
312 
313 void
qwx_stop(struct ifnet * ifp)314 qwx_stop(struct ifnet *ifp)
315 {
316 	struct qwx_softc *sc = ifp->if_softc;
317 	struct ieee80211com *ic = &sc->sc_ic;
318 	int s = splnet();
319 
320 	rw_assert_wrlock(&sc->ioctl_rwl);
321 
322 	timeout_del(&sc->mon_reap_timer);
323 
324 	/* Disallow new tasks. */
325 	set_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
326 
327 	/* Cancel scheduled tasks and let any stale tasks finish up. */
328 	task_del(systq, &sc->init_task);
329 	qwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
330 	qwx_del_task(sc, systq, &sc->setkey_task);
331 	refcnt_finalize(&sc->task_refs, "qwxstop");
332 
333 	qwx_setkey_clear(sc);
334 
335 	clear_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
336 
337 	ifp->if_timer = sc->sc_tx_timer = 0;
338 
339 	ifp->if_flags &= ~IFF_RUNNING;
340 	ifq_clr_oactive(&ifp->if_snd);
341 
342 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
343 	sc->ns_nstate = IEEE80211_S_INIT;
344 	sc->scan.state = ATH11K_SCAN_IDLE;
345 	sc->vdev_id_11d_scan = QWX_11D_INVALID_VDEV_ID;
346 	sc->pdevs_active = 0;
347 
348 	/* power off hardware */
349 	qwx_core_deinit(sc);
350 
351 	splx(s);
352 }
353 
354 void
qwx_free_firmware(struct qwx_softc * sc)355 qwx_free_firmware(struct qwx_softc *sc)
356 {
357 	int i;
358 
359 	for (i = 0; i < nitems(sc->fw_img); i++) {
360 		free(sc->fw_img[i].data, M_DEVBUF, sc->fw_img[i].size);
361 		sc->fw_img[i].data = NULL;
362 		sc->fw_img[i].size = 0;
363 	}
364 }
365 
366 int
qwx_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)367 qwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
368 {
369 	struct qwx_softc *sc = ifp->if_softc;
370 	int s, err = 0;
371 
372 	/*
373 	 * Prevent processes from entering this function while another
374 	 * process is tsleep'ing in it.
375 	 */
376 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
377 	if (err)
378 		return err;
379 	s = splnet();
380 
381 	switch (cmd) {
382 	case SIOCSIFADDR:
383 		ifp->if_flags |= IFF_UP;
384 		/* FALLTHROUGH */
385 	case SIOCSIFFLAGS:
386 		if (ifp->if_flags & IFF_UP) {
387 			if (!(ifp->if_flags & IFF_RUNNING)) {
388 				/* Force reload of firmware image from disk. */
389 				qwx_free_firmware(sc);
390 				err = qwx_init(ifp);
391 			}
392 		} else {
393 			if (ifp->if_flags & IFF_RUNNING)
394 				qwx_stop(ifp);
395 		}
396 		break;
397 
398 	default:
399 		err = ieee80211_ioctl(ifp, cmd, data);
400 	}
401 
402 	if (err == ENETRESET) {
403 		err = 0;
404 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
405 		    (IFF_UP | IFF_RUNNING)) {
406 			qwx_stop(ifp);
407 			err = qwx_init(ifp);
408 		}
409 	}
410 
411 	splx(s);
412 	rw_exit(&sc->ioctl_rwl);
413 
414 	return err;
415 }
416 
417 int
qwx_tx(struct qwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)418 qwx_tx(struct qwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
419 {
420 	struct ieee80211_frame *wh;
421 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
422 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
423 	uint8_t frame_type;
424 
425 	wh = mtod(m, struct ieee80211_frame *);
426 	frame_type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
427 
428 #if NBPFILTER > 0
429 	if (sc->sc_drvbpf != NULL) {
430 		struct qwx_tx_radiotap_header *tap = &sc->sc_txtap;
431 
432 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
433 		    m, BPF_DIRECTION_OUT);
434 	}
435 #endif
436 
437 	if (frame_type == IEEE80211_FC0_TYPE_MGT)
438 		return qwx_mac_mgmt_tx_wmi(sc, arvif, pdev_id, ni, m);
439 
440 	return qwx_dp_tx(sc, arvif, pdev_id, ni, m);
441 }
442 
443 void
qwx_start(struct ifnet * ifp)444 qwx_start(struct ifnet *ifp)
445 {
446 	struct qwx_softc *sc = ifp->if_softc;
447 	struct ieee80211com *ic = &sc->sc_ic;
448 	struct ieee80211_node *ni;
449 	struct ether_header *eh;
450 	struct mbuf *m;
451 
452 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
453 		return;
454 
455 	for (;;) {
456 		/* why isn't this done per-queue? */
457 		if (sc->qfullmsk != 0) {
458 			ifq_set_oactive(&ifp->if_snd);
459 			break;
460 		}
461 
462 		/* need to send management frames even if we're not RUNning */
463 		m = mq_dequeue(&ic->ic_mgtq);
464 		if (m) {
465 			ni = m->m_pkthdr.ph_cookie;
466 			goto sendit;
467 		}
468 
469 		if (ic->ic_state != IEEE80211_S_RUN ||
470 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
471 			break;
472 
473 		m = ifq_dequeue(&ifp->if_snd);
474 		if (!m)
475 			break;
476 		if (m->m_len < sizeof (*eh) &&
477 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
478 			ifp->if_oerrors++;
479 			continue;
480 		}
481 #if NBPFILTER > 0
482 		if (ifp->if_bpf != NULL)
483 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
484 #endif
485 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
486 			ifp->if_oerrors++;
487 			continue;
488 		}
489 
490  sendit:
491 #if NBPFILTER > 0
492 		if (ic->ic_rawbpf != NULL)
493 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
494 #endif
495 		if (qwx_tx(sc, m, ni) != 0) {
496 			ieee80211_release_node(ic, ni);
497 			ifp->if_oerrors++;
498 			continue;
499 		}
500 
501 		if (ifp->if_flags & IFF_UP)
502 			ifp->if_timer = 1;
503 	}
504 }
505 
506 void
qwx_watchdog(struct ifnet * ifp)507 qwx_watchdog(struct ifnet *ifp)
508 {
509 	struct qwx_softc *sc = ifp->if_softc;
510 
511 	ifp->if_timer = 0;
512 
513 	if (sc->sc_tx_timer > 0) {
514 		if (--sc->sc_tx_timer == 0) {
515 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
516 			if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
517 				task_add(systq, &sc->init_task);
518 			ifp->if_oerrors++;
519 			return;
520 		}
521 		ifp->if_timer = 1;
522 	}
523 
524 	ieee80211_watchdog(ifp);
525 }
526 
527 int
qwx_media_change(struct ifnet * ifp)528 qwx_media_change(struct ifnet *ifp)
529 {
530 	int err;
531 
532 	err = ieee80211_media_change(ifp);
533 	if (err != ENETRESET)
534 		return err;
535 
536 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
537 	    (IFF_UP | IFF_RUNNING)) {
538 		qwx_stop(ifp);
539 		err = qwx_init(ifp);
540 	}
541 
542 	return err;
543 }
544 
545 int
qwx_queue_setkey_cmd(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k,int cmd)546 qwx_queue_setkey_cmd(struct ieee80211com *ic, struct ieee80211_node *ni,
547     struct ieee80211_key *k, int cmd)
548 {
549 	struct qwx_softc *sc = ic->ic_softc;
550 	struct qwx_setkey_task_arg *a;
551 
552 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg) ||
553 	    k->k_id > WMI_MAX_KEY_INDEX)
554 		return ENOSPC;
555 
556 	a = &sc->setkey_arg[sc->setkey_cur];
557 	a->ni = ieee80211_ref_node(ni);
558 	a->k = k;
559 	a->cmd = cmd;
560 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
561 	sc->setkey_nkeys++;
562 	qwx_add_task(sc, systq, &sc->setkey_task);
563 	return EBUSY;
564 }
565 
566 int
qwx_set_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)567 qwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
568     struct ieee80211_key *k)
569 {
570 	struct qwx_softc *sc = ic->ic_softc;
571 
572 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags) ||
573 	    k->k_cipher == IEEE80211_CIPHER_WEP40 ||
574 	    k->k_cipher == IEEE80211_CIPHER_WEP104)
575 		return ieee80211_set_key(ic, ni, k);
576 
577 	return qwx_queue_setkey_cmd(ic, ni, k, QWX_ADD_KEY);
578 }
579 
580 void
qwx_delete_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)581 qwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
582     struct ieee80211_key *k)
583 {
584 	struct qwx_softc *sc = ic->ic_softc;
585 
586 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags) ||
587 	    k->k_cipher == IEEE80211_CIPHER_WEP40 ||
588 	    k->k_cipher == IEEE80211_CIPHER_WEP104) {
589 		ieee80211_delete_key(ic, ni, k);
590 		return;
591 	}
592 
593 	if (ic->ic_state != IEEE80211_S_RUN) {
594 		/* Keys removed implicitly when firmware station is removed. */
595 		return;
596 	}
597 
598 	/*
599 	 * net80211 calls us with a NULL node when deleting group keys,
600 	 * but firmware expects a MAC address in the command.
601 	 */
602 	if (ni == NULL)
603 		ni = ic->ic_bss;
604 
605 	qwx_queue_setkey_cmd(ic, ni, k, QWX_DEL_KEY);
606 }
607 
608 int
qwx_wmi_install_key_cmd(struct qwx_softc * sc,struct qwx_vif * arvif,uint8_t * macaddr,struct ieee80211_key * k,uint32_t flags,int delete_key)609 qwx_wmi_install_key_cmd(struct qwx_softc *sc, struct qwx_vif *arvif,
610     uint8_t *macaddr, struct ieee80211_key *k, uint32_t flags,
611     int delete_key)
612 {
613 	int ret;
614 	struct wmi_vdev_install_key_arg arg = {
615 		.vdev_id = arvif->vdev_id,
616 		.key_idx = k->k_id,
617 		.key_len = k->k_len,
618 		.key_data = k->k_key,
619 		.key_flags = flags,
620 		.macaddr = macaddr,
621 	};
622 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
623 #ifdef notyet
624 	lockdep_assert_held(&arvif->ar->conf_mutex);
625 
626 	reinit_completion(&ar->install_key_done);
627 #endif
628 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
629 		return 0;
630 
631 	if (delete_key) {
632 		arg.key_cipher = WMI_CIPHER_NONE;
633 		arg.key_data = NULL;
634 	} else {
635 		switch (k->k_cipher) {
636 		case IEEE80211_CIPHER_CCMP:
637 			arg.key_cipher = WMI_CIPHER_AES_CCM;
638 #if 0
639 			/* TODO: Re-check if flag is valid */
640 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
641 #endif
642 			break;
643 		case IEEE80211_CIPHER_TKIP:
644 			arg.key_cipher = WMI_CIPHER_TKIP;
645 			arg.key_txmic_len = 8;
646 			arg.key_rxmic_len = 8;
647 			break;
648 #if 0
649 		case WLAN_CIPHER_SUITE_CCMP_256:
650 			arg.key_cipher = WMI_CIPHER_AES_CCM;
651 			break;
652 		case WLAN_CIPHER_SUITE_GCMP:
653 		case WLAN_CIPHER_SUITE_GCMP_256:
654 			arg.key_cipher = WMI_CIPHER_AES_GCM;
655 			break;
656 #endif
657 		default:
658 			printf("%s: cipher %u is not supported\n",
659 			    sc->sc_dev.dv_xname, k->k_cipher);
660 			return EOPNOTSUPP;
661 		}
662 #if 0
663 		if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags))
664 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
665 				      IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
666 #endif
667 	}
668 
669 	sc->install_key_done = 0;
670 	ret = qwx_wmi_vdev_install_key(sc, &arg, pdev_id);
671 	if (ret)
672 		return ret;
673 
674 	while (!sc->install_key_done) {
675 		ret = tsleep_nsec(&sc->install_key_done, 0, "qwxinstkey",
676 		    SEC_TO_NSEC(1));
677 		if (ret) {
678 			printf("%s: install key timeout\n",
679 			    sc->sc_dev.dv_xname);
680 			return -1;
681 		}
682 	}
683 
684 	return sc->install_key_status;
685 }
686 
687 int
qwx_add_sta_key(struct qwx_softc * sc,struct ieee80211_node * ni,struct ieee80211_key * k)688 qwx_add_sta_key(struct qwx_softc *sc, struct ieee80211_node *ni,
689     struct ieee80211_key *k)
690 {
691 	struct ieee80211com *ic = &sc->sc_ic;
692 	struct qwx_node *nq = (struct qwx_node *)ni;
693 	struct ath11k_peer *peer = &nq->peer;
694 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
695 	int ret = 0;
696 	uint32_t flags = 0;
697 	const int want_keymask = (QWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
698 	    QWX_NODE_FLAG_HAVE_GROUP_KEY);
699 
700 	/*
701 	 * Flush the fragments cache during key (re)install to
702 	 * ensure all frags in the new frag list belong to the same key.
703 	 */
704 	qwx_peer_frags_flush(sc, peer);
705 
706 	if (k->k_flags & IEEE80211_KEY_GROUP)
707 		flags |= WMI_KEY_GROUP;
708 	else
709 		flags |= WMI_KEY_PAIRWISE;
710 
711 	ret = qwx_wmi_install_key_cmd(sc, arvif, ni->ni_macaddr, k, flags, 0);
712 	if (ret) {
713 		printf("%s: installing crypto key failed (%d)\n",
714 		    sc->sc_dev.dv_xname, ret);
715 		return ret;
716 	}
717 
718 	ret = qwx_dp_peer_rx_pn_replay_config(sc, arvif, ni, k, 0);
719 	if (ret) {
720 		printf("%s: failed to offload PN replay detection %d\n",
721 		    sc->sc_dev.dv_xname, ret);
722 		return ret;
723 	}
724 
725 	if (k->k_flags & IEEE80211_KEY_GROUP)
726 		nq->flags |= QWX_NODE_FLAG_HAVE_GROUP_KEY;
727 	else
728 		nq->flags |= QWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
729 
730 	if ((nq->flags & want_keymask) == want_keymask) {
731 		DPRINTF("marking port %s valid\n",
732 		    ether_sprintf(ni->ni_macaddr));
733 		ni->ni_port_valid = 1;
734 		ieee80211_set_link_state(ic, LINK_STATE_UP);
735 	}
736 
737 	return 0;
738 }
739 
740 int
qwx_del_sta_key(struct qwx_softc * sc,struct ieee80211_node * ni,struct ieee80211_key * k)741 qwx_del_sta_key(struct qwx_softc *sc, struct ieee80211_node *ni,
742     struct ieee80211_key *k)
743 {
744 	struct qwx_node *nq = (struct qwx_node *)ni;
745 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
746 	int ret = 0;
747 
748 	ret = qwx_wmi_install_key_cmd(sc, arvif, ni->ni_macaddr, k, 0, 1);
749 	if (ret) {
750 		printf("%s: deleting crypto key failed (%d)\n",
751 		    sc->sc_dev.dv_xname, ret);
752 		return ret;
753 	}
754 
755 	ret = qwx_dp_peer_rx_pn_replay_config(sc, arvif, ni, k, 1);
756 	if (ret) {
757 		printf("%s: failed to disable PN replay detection %d\n",
758 		    sc->sc_dev.dv_xname, ret);
759 		return ret;
760 	}
761 
762 	if (k->k_flags & IEEE80211_KEY_GROUP)
763 		nq->flags &= ~QWX_NODE_FLAG_HAVE_GROUP_KEY;
764 	else
765 		nq->flags &= ~QWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
766 
767 	return 0;
768 }
769 
770 void
qwx_setkey_task(void * arg)771 qwx_setkey_task(void *arg)
772 {
773 	struct qwx_softc *sc = arg;
774 	struct ieee80211com *ic = &sc->sc_ic;
775 	struct qwx_setkey_task_arg *a;
776 	int err = 0, s = splnet();
777 
778 	while (sc->setkey_nkeys > 0) {
779 		if (err || test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
780 			break;
781 		a = &sc->setkey_arg[sc->setkey_tail];
782 		KASSERT(a->cmd == QWX_ADD_KEY || a->cmd == QWX_DEL_KEY);
783 		if (ic->ic_state == IEEE80211_S_RUN) {
784 			if (a->cmd == QWX_ADD_KEY)
785 				err = qwx_add_sta_key(sc, a->ni, a->k);
786 			else
787 				err = qwx_del_sta_key(sc, a->ni, a->k);
788 		}
789 		ieee80211_release_node(ic, a->ni);
790 		a->ni = NULL;
791 		a->k = NULL;
792 		sc->setkey_tail = (sc->setkey_tail + 1) %
793 		    nitems(sc->setkey_arg);
794 		sc->setkey_nkeys--;
795 	}
796 
797 	refcnt_rele_wake(&sc->task_refs);
798 	splx(s);
799 }
800 
801 void
qwx_setkey_clear(struct qwx_softc * sc)802 qwx_setkey_clear(struct qwx_softc *sc)
803 {
804 	struct ieee80211com *ic = &sc->sc_ic;
805 	struct qwx_setkey_task_arg *a;
806 
807 	while (sc->setkey_nkeys > 0) {
808 		a = &sc->setkey_arg[sc->setkey_tail];
809 		ieee80211_release_node(ic, a->ni);
810 		a->ni = NULL;
811 		sc->setkey_tail = (sc->setkey_tail + 1) %
812 		    nitems(sc->setkey_arg);
813 		sc->setkey_nkeys--;
814 	}
815 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
816 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
817 }
818 
819 int
qwx_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)820 qwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
821 {
822 	struct ifnet *ifp = &ic->ic_if;
823 	struct qwx_softc *sc = ifp->if_softc;
824 
825 	/*
826 	 * Prevent attempts to transition towards the same state, unless
827 	 * we are scanning in which case a SCAN -> SCAN transition
828 	 * triggers another scan iteration. And AUTH -> AUTH is needed
829 	 * to support band-steering.
830 	 */
831 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
832 	    nstate != IEEE80211_S_AUTH)
833 		return 0;
834 	if (ic->ic_state == IEEE80211_S_RUN) {
835 #if 0
836 		qwx_del_task(sc, systq, &sc->ba_task);
837 #endif
838 		qwx_del_task(sc, systq, &sc->setkey_task);
839 		qwx_setkey_clear(sc);
840 #if 0
841 		qwx_del_task(sc, systq, &sc->bgscan_done_task);
842 #endif
843 	}
844 
845 	sc->ns_nstate = nstate;
846 	sc->ns_arg = arg;
847 
848 	qwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
849 
850 	return 0;
851 }
852 
853 void
qwx_newstate_task(void * arg)854 qwx_newstate_task(void *arg)
855 {
856 	struct qwx_softc *sc = (struct qwx_softc *)arg;
857 	struct ieee80211com *ic = &sc->sc_ic;
858 	struct ifnet *ifp = &ic->ic_if;
859 	enum ieee80211_state nstate = sc->ns_nstate;
860 	enum ieee80211_state ostate = ic->ic_state;
861 	int err = 0, s = splnet();
862 
863 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
864 		/* qwx_stop() is waiting for us. */
865 		refcnt_rele_wake(&sc->task_refs);
866 		splx(s);
867 		return;
868 	}
869 
870 	if (ostate == IEEE80211_S_SCAN) {
871 		if (nstate == ostate) {
872 			if (sc->scan.state != ATH11K_SCAN_IDLE) {
873 				refcnt_rele_wake(&sc->task_refs);
874 				splx(s);
875 				return;
876 			}
877 			/* Firmware is no longer scanning. Do another scan. */
878 			goto next_scan;
879 		}
880 	}
881 
882 	if (nstate <= ostate) {
883 		switch (ostate) {
884 		case IEEE80211_S_RUN:
885 			err = qwx_run_stop(sc);
886 			if (err)
887 				goto out;
888 			/* FALLTHROUGH */
889 		case IEEE80211_S_ASSOC:
890 		case IEEE80211_S_AUTH:
891 			if (nstate <= IEEE80211_S_AUTH) {
892 				err = qwx_deauth(sc);
893 				if (err)
894 					goto out;
895 			}
896 			/* FALLTHROUGH */
897 		case IEEE80211_S_SCAN:
898 		case IEEE80211_S_INIT:
899 			break;
900 		}
901 
902 		/* Die now if qwx_stop() was called while we were sleeping. */
903 		if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
904 			refcnt_rele_wake(&sc->task_refs);
905 			splx(s);
906 			return;
907 		}
908 	}
909 
910 	switch (nstate) {
911 	case IEEE80211_S_INIT:
912 		break;
913 
914 	case IEEE80211_S_SCAN:
915 next_scan:
916 		err = qwx_scan(sc);
917 		if (err)
918 			break;
919 		if (ifp->if_flags & IFF_DEBUG)
920 			printf("%s: %s -> %s\n", ifp->if_xname,
921 			    ieee80211_state_name[ic->ic_state],
922 			    ieee80211_state_name[IEEE80211_S_SCAN]);
923 #if 0
924 		if ((sc->sc_flags & QWX_FLAG_BGSCAN) == 0) {
925 #endif
926 			ieee80211_set_link_state(ic, LINK_STATE_DOWN);
927 			ieee80211_node_cleanup(ic, ic->ic_bss);
928 #if 0
929 		}
930 #endif
931 		ic->ic_state = IEEE80211_S_SCAN;
932 		refcnt_rele_wake(&sc->task_refs);
933 		splx(s);
934 		return;
935 
936 	case IEEE80211_S_AUTH:
937 		err = qwx_auth(sc);
938 		break;
939 
940 	case IEEE80211_S_ASSOC:
941 		break;
942 
943 	case IEEE80211_S_RUN:
944 		err = qwx_run(sc);
945 		break;
946 	}
947 out:
948 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
949 		if (err)
950 			task_add(systq, &sc->init_task);
951 		else
952 			sc->sc_newstate(ic, nstate, sc->ns_arg);
953 	}
954 	refcnt_rele_wake(&sc->task_refs);
955 	splx(s);
956 }
957 
958 struct cfdriver qwx_cd = {
959 	NULL, "qwx", DV_IFNET
960 };
961 
962 void
qwx_init_wmi_config_qca6390(struct qwx_softc * sc,struct target_resource_config * config)963 qwx_init_wmi_config_qca6390(struct qwx_softc *sc,
964     struct target_resource_config *config)
965 {
966 	config->num_vdevs = 4;
967 	config->num_peers = 16;
968 	config->num_tids = 32;
969 
970 	config->num_offload_peers = 3;
971 	config->num_offload_reorder_buffs = 3;
972 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
973 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
974 	config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
975 	config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
976 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
977 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
978 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
979 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
980 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
981 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
982 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
983 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
984 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
985 	config->num_mcast_groups = 0;
986 	config->num_mcast_table_elems = 0;
987 	config->mcast2ucast_mode = 0;
988 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
989 	config->num_wds_entries = 0;
990 	config->dma_burst_size = 0;
991 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
992 	config->vow_config = TARGET_VOW_CONFIG;
993 	config->gtk_offload_max_vdev = 2;
994 	config->num_msdu_desc = 0x400;
995 	config->beacon_tx_offload_max_vdev = 2;
996 	config->rx_batchmode = TARGET_RX_BATCHMODE;
997 
998 	config->peer_map_unmap_v2_support = 0;
999 	config->use_pdev_id = 1;
1000 	config->max_frag_entries = 0xa;
1001 	config->num_tdls_vdevs = 0x1;
1002 	config->num_tdls_conn_table_entries = 8;
1003 	config->beacon_tx_offload_max_vdev = 0x2;
1004 	config->num_multicast_filter_entries = 0x20;
1005 	config->num_wow_filters = 0x16;
1006 	config->num_keep_alive_pattern = 0;
1007 	config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
1008 }
1009 
1010 void
qwx_hw_ipq8074_reo_setup(struct qwx_softc * sc)1011 qwx_hw_ipq8074_reo_setup(struct qwx_softc *sc)
1012 {
1013 	uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1014 	uint32_t val;
1015 	/* Each hash entry uses three bits to map to a particular ring. */
1016 	uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
1017 	    HAL_HASH_ROUTING_RING_SW2 << 3 |
1018 	    HAL_HASH_ROUTING_RING_SW3 << 6 |
1019 	    HAL_HASH_ROUTING_RING_SW4 << 9 |
1020 	    HAL_HASH_ROUTING_RING_SW1 << 12 |
1021 	    HAL_HASH_ROUTING_RING_SW2 << 15 |
1022 	    HAL_HASH_ROUTING_RING_SW3 << 18 |
1023 	    HAL_HASH_ROUTING_RING_SW4 << 21;
1024 
1025 	val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
1026 
1027 	val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
1028 	val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
1029 	    HAL_SRNG_RING_ID_REO2SW1) |
1030 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
1031 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
1032 	sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
1033 
1034 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
1035 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1036 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
1037 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1038 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
1039 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1040 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
1041 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1042 
1043 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
1044 	    FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
1045 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
1046 	    FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
1047 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
1048 	    FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
1049 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
1050 	    FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
1051 }
1052 
1053 void
qwx_init_wmi_config_ipq8074(struct qwx_softc * sc,struct target_resource_config * config)1054 qwx_init_wmi_config_ipq8074(struct qwx_softc *sc,
1055     struct target_resource_config *config)
1056 {
1057 	config->num_vdevs = sc->num_radios * TARGET_NUM_VDEVS(sc);
1058 
1059 	if (sc->num_radios == 2) {
1060 		config->num_peers = TARGET_NUM_PEERS(sc, DBS);
1061 		config->num_tids = TARGET_NUM_TIDS(sc, DBS);
1062 	} else if (sc->num_radios == 3) {
1063 		config->num_peers = TARGET_NUM_PEERS(sc, DBS_SBS);
1064 		config->num_tids = TARGET_NUM_TIDS(sc, DBS_SBS);
1065 	} else {
1066 		/* Control should not reach here */
1067 		config->num_peers = TARGET_NUM_PEERS(sc, SINGLE);
1068 		config->num_tids = TARGET_NUM_TIDS(sc, SINGLE);
1069 	}
1070 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
1071 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
1072 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
1073 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
1074 	config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
1075 	config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
1076 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
1077 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
1078 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
1079 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
1080 
1081 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
1082 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
1083 	else
1084 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
1085 
1086 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
1087 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
1088 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
1089 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
1090 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
1091 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
1092 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
1093 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
1094 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
1095 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
1096 	config->rx_skip_defrag_timeout_dup_detection_check =
1097 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
1098 	config->vow_config = TARGET_VOW_CONFIG;
1099 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
1100 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
1101 	config->beacon_tx_offload_max_vdev = sc->num_radios * TARGET_MAX_BCN_OFFLD;
1102 	config->rx_batchmode = TARGET_RX_BATCHMODE;
1103 	config->peer_map_unmap_v2_support = 1;
1104 	config->twt_ap_pdev_count = sc->num_radios;
1105 	config->twt_ap_sta_count = 1000;
1106 	config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
1107 	config->flag1 |= WMI_RSRC_CFG_FLAG1_ACK_RSSI;
1108 	config->ema_max_vap_cnt = sc->num_radios;
1109 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
1110 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
1111 }
1112 
1113 void
qwx_hw_wcn6855_reo_setup(struct qwx_softc * sc)1114 qwx_hw_wcn6855_reo_setup(struct qwx_softc *sc)
1115 {
1116 	uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1117 	uint32_t val;
1118 	/* Each hash entry uses four bits to map to a particular ring. */
1119 	uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
1120 	    HAL_HASH_ROUTING_RING_SW2 << 4 |
1121 	    HAL_HASH_ROUTING_RING_SW3 << 8 |
1122 	    HAL_HASH_ROUTING_RING_SW4 << 12 |
1123 	    HAL_HASH_ROUTING_RING_SW1 << 16 |
1124 	    HAL_HASH_ROUTING_RING_SW2 << 20 |
1125 	    HAL_HASH_ROUTING_RING_SW3 << 24 |
1126 	    HAL_HASH_ROUTING_RING_SW4 << 28;
1127 
1128 	val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
1129 	val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
1130 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
1131 	sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
1132 
1133 	val = sc->ops.read32(sc, reo_base + HAL_REO1_MISC_CTL(sc));
1134 	val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
1135 	val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING,
1136 	    HAL_SRNG_RING_ID_REO2SW1);
1137 	sc->ops.write32(sc, reo_base + HAL_REO1_MISC_CTL(sc), val);
1138 
1139 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
1140 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1141 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
1142 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1143 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
1144 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1145 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
1146 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1147 
1148 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
1149 	    ring_hash_map);
1150 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
1151 	    ring_hash_map);
1152 }
1153 
1154 void
qwx_hw_ipq5018_reo_setup(struct qwx_softc * sc)1155 qwx_hw_ipq5018_reo_setup(struct qwx_softc *sc)
1156 {
1157 	uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1158 	uint32_t val;
1159 
1160 	/* Each hash entry uses three bits to map to a particular ring. */
1161 	uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
1162 	    HAL_HASH_ROUTING_RING_SW2 << 4 |
1163 	    HAL_HASH_ROUTING_RING_SW3 << 8 |
1164 	    HAL_HASH_ROUTING_RING_SW4 << 12 |
1165 	    HAL_HASH_ROUTING_RING_SW1 << 16 |
1166 	    HAL_HASH_ROUTING_RING_SW2 << 20 |
1167 	    HAL_HASH_ROUTING_RING_SW3 << 24 |
1168 	    HAL_HASH_ROUTING_RING_SW4 << 28;
1169 
1170 	val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
1171 
1172 	val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
1173 	val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
1174 	    HAL_SRNG_RING_ID_REO2SW1) |
1175 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
1176 	    FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
1177 	sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
1178 
1179 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
1180 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1181 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
1182 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1183 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
1184 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1185 	sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
1186 	    HAL_DEFAULT_REO_TIMEOUT_USEC);
1187 
1188 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
1189 	    ring_hash_map);
1190 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
1191 	    ring_hash_map);
1192 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
1193 	    ring_hash_map);
1194 	sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
1195 	    ring_hash_map);
1196 }
1197 
1198 int
qwx_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params * hw,int mac_id)1199 qwx_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw, int mac_id)
1200 {
1201 	return mac_id;
1202 }
1203 
1204 int
qwx_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params * hw,int mac_id)1205 qwx_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw, int mac_id)
1206 {
1207 	return 0;
1208 }
1209 
1210 int
qwx_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params * hw,int mac_id)1211 qwx_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw, int mac_id)
1212 {
1213 	return 0;
1214 }
1215 
1216 int
qwx_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params * hw,int mac_id)1217 qwx_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw, int mac_id)
1218 {
1219 	return mac_id;
1220 }
1221 
1222 int
qwx_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc * desc)1223 qwx_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1224 {
1225 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
1226 	    le32toh(desc->u.ipq8074.msdu_end.info2));
1227 }
1228 
1229 uint8_t
qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc * desc)1230 qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1231 {
1232 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
1233 	    le32toh(desc->u.ipq8074.msdu_end.info2));
1234 }
1235 
1236 uint8_t *
qwx_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc * desc)1237 qwx_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1238 {
1239 	return desc->u.ipq8074.hdr_status;
1240 }
1241 
1242 int
qwx_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc * desc)1243 qwx_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1244 {
1245 	return le32toh(desc->u.ipq8074.mpdu_start.info1) &
1246 	       RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
1247 }
1248 
1249 uint32_t
qwx_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc * desc)1250 qwx_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1251 {
1252 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
1253 	    le32toh(desc->u.ipq8074.mpdu_start.info2));
1254 }
1255 
1256 uint8_t
qwx_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc * desc)1257 qwx_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1258 {
1259 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1260 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1261 }
1262 
1263 uint8_t
qwx_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc * desc)1264 qwx_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1265 {
1266 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1267 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1268 }
1269 
1270 int
qwx_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc * desc)1271 qwx_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
1272 {
1273 	return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
1274 	    le32toh(desc->u.ipq8074.msdu_start.info2));
1275 }
1276 
1277 int
qwx_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc * desc)1278 qwx_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1279 {
1280 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
1281 	      le32toh(desc->u.ipq8074.mpdu_start.info1));
1282 }
1283 
1284 int
qwx_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc * desc)1285 qwx_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1286 {
1287 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
1288 	      le32toh(desc->u.ipq8074.mpdu_start.info1));
1289 }
1290 
1291 uint16_t
qwx_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc * desc)1292 qwx_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1293 {
1294 	return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
1295 	    le32toh(desc->u.ipq8074.mpdu_start.info1));
1296 }
1297 
1298 uint16_t
qwx_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc * desc)1299 qwx_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1300 {
1301 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1302 	    le32toh(desc->u.ipq8074.msdu_start.info1));
1303 }
1304 
1305 uint8_t
qwx_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc * desc)1306 qwx_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1307 {
1308 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1309 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1310 }
1311 
1312 uint8_t
qwx_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc * desc)1313 qwx_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1314 {
1315 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1316 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1317 }
1318 
1319 uint8_t
qwx_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc * desc)1320 qwx_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1321 {
1322 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1323 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1324 }
1325 
1326 uint32_t
qwx_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc * desc)1327 qwx_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1328 {
1329 	return le32toh(desc->u.ipq8074.msdu_start.phy_meta_data);
1330 }
1331 
1332 uint8_t
qwx_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc * desc)1333 qwx_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1334 {
1335 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1336 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1337 }
1338 
1339 uint8_t
qwx_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc * desc)1340 qwx_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1341 {
1342 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1343 	    le32toh(desc->u.ipq8074.msdu_start.info3));
1344 }
1345 
1346 uint8_t
qwx_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc * desc)1347 qwx_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1348 {
1349 	return FIELD_GET(RX_MPDU_START_INFO2_TID,
1350 	    le32toh(desc->u.ipq8074.mpdu_start.info2));
1351 }
1352 
1353 uint16_t
qwx_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc * desc)1354 qwx_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1355 {
1356 	return le16toh(desc->u.ipq8074.mpdu_start.sw_peer_id);
1357 }
1358 
1359 void
qwx_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc * fdesc,struct hal_rx_desc * ldesc)1360 qwx_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1361 				       struct hal_rx_desc *ldesc)
1362 {
1363 	memcpy((uint8_t *)&fdesc->u.ipq8074.msdu_end, (uint8_t *)&ldesc->u.ipq8074.msdu_end,
1364 	       sizeof(struct rx_msdu_end_ipq8074));
1365 	memcpy((uint8_t *)&fdesc->u.ipq8074.attention, (uint8_t *)&ldesc->u.ipq8074.attention,
1366 	       sizeof(struct rx_attention));
1367 	memcpy((uint8_t *)&fdesc->u.ipq8074.mpdu_end, (uint8_t *)&ldesc->u.ipq8074.mpdu_end,
1368 	       sizeof(struct rx_mpdu_end));
1369 }
1370 
1371 uint32_t
qwx_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc * desc)1372 qwx_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1373 {
1374 	return FIELD_GET(HAL_TLV_HDR_TAG,
1375 	    le32toh(desc->u.ipq8074.mpdu_start_tag));
1376 }
1377 
1378 uint32_t
qwx_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc * desc)1379 qwx_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1380 {
1381 	return le16toh(desc->u.ipq8074.mpdu_start.phy_ppdu_id);
1382 }
1383 
1384 void
qwx_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc * desc,uint16_t len)1385 qwx_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1386 {
1387 	uint32_t info = le32toh(desc->u.ipq8074.msdu_start.info1);
1388 
1389 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1390 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1391 
1392 	desc->u.ipq8074.msdu_start.info1 = htole32(info);
1393 }
1394 
1395 int
qwx_dp_rx_h_msdu_end_first_msdu(struct qwx_softc * sc,struct hal_rx_desc * desc)1396 qwx_dp_rx_h_msdu_end_first_msdu(struct qwx_softc *sc, struct hal_rx_desc *desc)
1397 {
1398 	return sc->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
1399 }
1400 
1401 int
qwx_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc * desc)1402 qwx_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1403 {
1404 	return le32toh(desc->u.ipq8074.mpdu_start.info1) &
1405 	       RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
1406 }
1407 
1408 uint8_t *
qwx_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc * desc)1409 qwx_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1410 {
1411 	return desc->u.ipq8074.mpdu_start.addr2;
1412 }
1413 
1414 struct rx_attention *
qwx_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc * desc)1415 qwx_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
1416 {
1417 	return &desc->u.ipq8074.attention;
1418 }
1419 
1420 uint8_t *
qwx_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc * desc)1421 qwx_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1422 {
1423 	return &desc->u.ipq8074.msdu_payload[0];
1424 }
1425 
1426 int
qwx_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc * desc)1427 qwx_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1428 {
1429 	return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU,
1430 	      le16toh(desc->u.qcn9074.msdu_end.info4));
1431 }
1432 
1433 int
qwx_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc * desc)1434 qwx_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
1435 {
1436 	return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU,
1437 	      le16toh(desc->u.qcn9074.msdu_end.info4));
1438 }
1439 
1440 uint8_t
qwx_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc * desc)1441 qwx_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1442 {
1443 	return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING,
1444 	    le16toh(desc->u.qcn9074.msdu_end.info4));
1445 }
1446 
1447 uint8_t *
qwx_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc * desc)1448 qwx_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1449 {
1450 	return desc->u.qcn9074.hdr_status;
1451 }
1452 
1453 int
qwx_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc * desc)1454 qwx_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1455 {
1456 	return le32toh(desc->u.qcn9074.mpdu_start.info11) &
1457 	       RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID;
1458 }
1459 
1460 uint32_t
qwx_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc * desc)1461 qwx_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1462 {
1463 	return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE,
1464 	    le32toh(desc->u.qcn9074.mpdu_start.info9));
1465 }
1466 
1467 uint8_t
qwx_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc * desc)1468 qwx_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1469 {
1470 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1471 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1472 }
1473 
1474 uint8_t
qwx_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc * desc)1475 qwx_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1476 {
1477 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1478 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1479 }
1480 
1481 int
qwx_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc * desc)1482 qwx_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
1483 {
1484 	return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
1485 	    le32toh(desc->u.qcn9074.msdu_start.info2));
1486 }
1487 
1488 int
qwx_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc * desc)1489 qwx_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1490 {
1491 	return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
1492 	      le32toh(desc->u.qcn9074.mpdu_start.info11));
1493 }
1494 
1495 int
qwx_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc * desc)1496 qwx_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1497 {
1498 	return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID,
1499 	      le32toh(desc->u.qcn9074.mpdu_start.info11));
1500 }
1501 
1502 uint16_t
qwx_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc * desc)1503 qwx_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1504 {
1505 	return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM,
1506 	    le32toh(desc->u.qcn9074.mpdu_start.info11));
1507 }
1508 
1509 uint16_t
qwx_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc * desc)1510 qwx_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1511 {
1512 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1513 	    le32toh(desc->u.qcn9074.msdu_start.info1));
1514 }
1515 
1516 uint8_t
qwx_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc * desc)1517 qwx_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1518 {
1519 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1520 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1521 }
1522 
1523 uint8_t
qwx_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc * desc)1524 qwx_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1525 {
1526 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1527 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1528 }
1529 
1530 uint8_t
qwx_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc * desc)1531 qwx_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1532 {
1533 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1534 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1535 }
1536 
1537 uint32_t
qwx_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc * desc)1538 qwx_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1539 {
1540 	return le32toh(desc->u.qcn9074.msdu_start.phy_meta_data);
1541 }
1542 
1543 uint8_t
qwx_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc * desc)1544 qwx_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1545 {
1546 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1547 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1548 }
1549 
1550 uint8_t
qwx_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc * desc)1551 qwx_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1552 {
1553 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1554 	    le32toh(desc->u.qcn9074.msdu_start.info3));
1555 }
1556 
1557 uint8_t
qwx_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc * desc)1558 qwx_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1559 {
1560 	return FIELD_GET(RX_MPDU_START_INFO9_TID,
1561 	    le32toh(desc->u.qcn9074.mpdu_start.info9));
1562 }
1563 
1564 uint16_t
qwx_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc * desc)1565 qwx_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1566 {
1567 	return le16toh(desc->u.qcn9074.mpdu_start.sw_peer_id);
1568 }
1569 
1570 void
qwx_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc * fdesc,struct hal_rx_desc * ldesc)1571 qwx_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1572 				       struct hal_rx_desc *ldesc)
1573 {
1574 	memcpy((uint8_t *)&fdesc->u.qcn9074.msdu_end, (uint8_t *)&ldesc->u.qcn9074.msdu_end,
1575 	       sizeof(struct rx_msdu_end_qcn9074));
1576 	memcpy((uint8_t *)&fdesc->u.qcn9074.attention, (uint8_t *)&ldesc->u.qcn9074.attention,
1577 	       sizeof(struct rx_attention));
1578 	memcpy((uint8_t *)&fdesc->u.qcn9074.mpdu_end, (uint8_t *)&ldesc->u.qcn9074.mpdu_end,
1579 	       sizeof(struct rx_mpdu_end));
1580 }
1581 
1582 uint32_t
qwx_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc * desc)1583 qwx_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1584 {
1585 	return FIELD_GET(HAL_TLV_HDR_TAG,
1586 	    le32toh(desc->u.qcn9074.mpdu_start_tag));
1587 }
1588 
1589 uint32_t
qwx_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc * desc)1590 qwx_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1591 {
1592 	return le16toh(desc->u.qcn9074.mpdu_start.phy_ppdu_id);
1593 }
1594 
1595 void
qwx_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc * desc,uint16_t len)1596 qwx_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1597 {
1598 	uint32_t info = le32toh(desc->u.qcn9074.msdu_start.info1);
1599 
1600 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1601 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1602 
1603 	desc->u.qcn9074.msdu_start.info1 = htole32(info);
1604 }
1605 
1606 struct rx_attention *
qwx_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc * desc)1607 qwx_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc)
1608 {
1609 	return &desc->u.qcn9074.attention;
1610 }
1611 
1612 uint8_t *
qwx_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc * desc)1613 qwx_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1614 {
1615 	return &desc->u.qcn9074.msdu_payload[0];
1616 }
1617 
1618 int
qwx_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc * desc)1619 qwx_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1620 {
1621 	return le32toh(desc->u.qcn9074.mpdu_start.info11) &
1622 	       RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
1623 }
1624 
1625 uint8_t *
qwx_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc * desc)1626 qwx_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1627 {
1628 	return desc->u.qcn9074.mpdu_start.addr2;
1629 }
1630 
1631 int
qwx_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc * desc)1632 qwx_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
1633 {
1634 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
1635 	      le32toh(desc->u.wcn6855.msdu_end.info2));
1636 }
1637 
1638 int
qwx_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc * desc)1639 qwx_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
1640 {
1641 	return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
1642 	      le32toh(desc->u.wcn6855.msdu_end.info2));
1643 }
1644 
1645 uint8_t
qwx_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc * desc)1646 qwx_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
1647 {
1648 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
1649 	    le32toh(desc->u.wcn6855.msdu_end.info2));
1650 }
1651 
1652 uint8_t *
qwx_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc * desc)1653 qwx_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
1654 {
1655 	return desc->u.wcn6855.hdr_status;
1656 }
1657 
1658 int
qwx_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc * desc)1659 qwx_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
1660 {
1661 	return le32toh(desc->u.wcn6855.mpdu_start.info1) &
1662 	       RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
1663 }
1664 
1665 uint32_t
qwx_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc * desc)1666 qwx_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
1667 {
1668 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
1669 	    le32toh(desc->u.wcn6855.mpdu_start.info2));
1670 }
1671 
1672 uint8_t
qwx_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc * desc)1673 qwx_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
1674 {
1675 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
1676 	    le32toh(desc->u.wcn6855.msdu_start.info2));
1677 }
1678 
1679 uint8_t
qwx_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc * desc)1680 qwx_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
1681 {
1682 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
1683 	    le32toh(desc->u.wcn6855.msdu_start.info2));
1684 }
1685 
1686 int
qwx_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc * desc)1687 qwx_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
1688 {
1689 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
1690 	      le32toh(desc->u.wcn6855.mpdu_start.info1));
1691 }
1692 
1693 int
qwx_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc * desc)1694 qwx_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
1695 {
1696 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
1697 	      le32toh(desc->u.wcn6855.mpdu_start.info1));
1698 }
1699 
1700 uint16_t
qwx_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc * desc)1701 qwx_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
1702 {
1703 	return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
1704 	    le32toh(desc->u.wcn6855.mpdu_start.info1));
1705 }
1706 
1707 uint16_t
qwx_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc * desc)1708 qwx_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
1709 {
1710 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
1711 	    le32toh(desc->u.wcn6855.msdu_start.info1));
1712 }
1713 
1714 uint8_t
qwx_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc * desc)1715 qwx_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
1716 {
1717 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
1718 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1719 }
1720 
1721 uint8_t
qwx_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc * desc)1722 qwx_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
1723 {
1724 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
1725 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1726 }
1727 
1728 uint8_t
qwx_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc * desc)1729 qwx_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
1730 {
1731 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
1732 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1733 }
1734 
1735 uint32_t
qwx_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc * desc)1736 qwx_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
1737 {
1738 	return le32toh(desc->u.wcn6855.msdu_start.phy_meta_data);
1739 }
1740 
1741 uint8_t
qwx_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc * desc)1742 qwx_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
1743 {
1744 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
1745 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1746 }
1747 
1748 uint8_t
qwx_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc * desc)1749 qwx_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
1750 {
1751 	return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
1752 	    le32toh(desc->u.wcn6855.msdu_start.info3));
1753 }
1754 
1755 uint8_t
qwx_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc * desc)1756 qwx_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
1757 {
1758 	return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
1759 	    le32toh(desc->u.wcn6855.mpdu_start.info2));
1760 }
1761 
1762 uint16_t
qwx_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc * desc)1763 qwx_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
1764 {
1765 	return le16toh(desc->u.wcn6855.mpdu_start.sw_peer_id);
1766 }
1767 
1768 void
qwx_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc * fdesc,struct hal_rx_desc * ldesc)1769 qwx_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
1770     struct hal_rx_desc *ldesc)
1771 {
1772 	memcpy((uint8_t *)&fdesc->u.wcn6855.msdu_end, (uint8_t *)&ldesc->u.wcn6855.msdu_end,
1773 	       sizeof(struct rx_msdu_end_wcn6855));
1774 	memcpy((uint8_t *)&fdesc->u.wcn6855.attention, (uint8_t *)&ldesc->u.wcn6855.attention,
1775 	       sizeof(struct rx_attention));
1776 	memcpy((uint8_t *)&fdesc->u.wcn6855.mpdu_end, (uint8_t *)&ldesc->u.wcn6855.mpdu_end,
1777 	       sizeof(struct rx_mpdu_end));
1778 }
1779 
1780 uint32_t
qwx_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc * desc)1781 qwx_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
1782 {
1783 	return FIELD_GET(HAL_TLV_HDR_TAG,
1784 	    le32toh(desc->u.wcn6855.mpdu_start_tag));
1785 }
1786 
1787 uint32_t
qwx_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc * desc)1788 qwx_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
1789 {
1790 	return le16toh(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
1791 }
1792 
1793 void
qwx_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc * desc,uint16_t len)1794 qwx_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, uint16_t len)
1795 {
1796 	uint32_t info = le32toh(desc->u.wcn6855.msdu_start.info1);
1797 
1798 	info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
1799 	info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
1800 
1801 	desc->u.wcn6855.msdu_start.info1 = htole32(info);
1802 }
1803 
1804 struct rx_attention *
qwx_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc * desc)1805 qwx_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
1806 {
1807 	return &desc->u.wcn6855.attention;
1808 }
1809 
1810 uint8_t *
qwx_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc * desc)1811 qwx_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
1812 {
1813 	return &desc->u.wcn6855.msdu_payload[0];
1814 }
1815 
1816 int
qwx_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc * desc)1817 qwx_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
1818 {
1819 	return le32toh(desc->u.wcn6855.mpdu_start.info1) &
1820 	       RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
1821 }
1822 
1823 uint8_t *
qwx_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc * desc)1824 qwx_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
1825 {
1826 	return desc->u.wcn6855.mpdu_start.addr2;
1827 }
1828 
1829 /* Map from pdev index to hw mac index */
1830 uint8_t
qwx_hw_ipq8074_mac_from_pdev_id(int pdev_idx)1831 qwx_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
1832 {
1833 	switch (pdev_idx) {
1834 	case 0:
1835 		return 0;
1836 	case 1:
1837 		return 2;
1838 	case 2:
1839 		return 1;
1840 	default:
1841 		return ATH11K_INVALID_HW_MAC_ID;
1842 	}
1843 }
1844 
1845 uint8_t
qwx_hw_ipq6018_mac_from_pdev_id(int pdev_idx)1846 qwx_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
1847 {
1848 	return pdev_idx;
1849 }
1850 
1851 static inline int
qwx_hw_get_mac_from_pdev_id(struct qwx_softc * sc,int pdev_idx)1852 qwx_hw_get_mac_from_pdev_id(struct qwx_softc *sc, int pdev_idx)
1853 {
1854 	if (sc->hw_params.hw_ops->get_hw_mac_from_pdev_id)
1855 		return sc->hw_params.hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
1856 
1857 	return 0;
1858 }
1859 
1860 const struct ath11k_hw_ops ipq8074_ops = {
1861 	.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
1862 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
1863 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
1864 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
1865 #if notyet
1866 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
1867 #endif
1868 	.rx_desc_get_first_msdu = qwx_hw_ipq8074_rx_desc_get_first_msdu,
1869 #if notyet
1870 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
1871 #endif
1872 	.rx_desc_get_l3_pad_bytes = qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes,
1873 	.rx_desc_get_hdr_status = qwx_hw_ipq8074_rx_desc_get_hdr_status,
1874 	.rx_desc_encrypt_valid = qwx_hw_ipq8074_rx_desc_encrypt_valid,
1875 	.rx_desc_get_encrypt_type = qwx_hw_ipq8074_rx_desc_get_encrypt_type,
1876 	.rx_desc_get_decap_type = qwx_hw_ipq8074_rx_desc_get_decap_type,
1877 #ifdef notyet
1878 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
1879 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
1880 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
1881 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
1882 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
1883 #endif
1884 	.rx_desc_get_msdu_len = qwx_hw_ipq8074_rx_desc_get_msdu_len,
1885 #ifdef notyet
1886 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
1887 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
1888 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
1889 #endif
1890 	.rx_desc_get_msdu_freq = qwx_hw_ipq8074_rx_desc_get_msdu_freq,
1891 #ifdef notyet
1892 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
1893 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
1894 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
1895 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
1896 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
1897 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
1898 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
1899 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
1900 #endif
1901 	.rx_desc_get_attention = qwx_hw_ipq8074_rx_desc_get_attention,
1902 #ifdef notyet
1903 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
1904 #endif
1905 	.reo_setup = qwx_hw_ipq8074_reo_setup,
1906 #ifdef notyet
1907 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
1908 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
1909 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
1910 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
1911 #endif
1912 };
1913 
1914 const struct ath11k_hw_ops ipq6018_ops = {
1915 	.get_hw_mac_from_pdev_id = qwx_hw_ipq6018_mac_from_pdev_id,
1916 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
1917 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
1918 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
1919 #if notyet
1920 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
1921 #endif
1922 	.rx_desc_get_first_msdu = qwx_hw_ipq8074_rx_desc_get_first_msdu,
1923 #if notyet
1924 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
1925 #endif
1926 	.rx_desc_get_l3_pad_bytes = qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes,
1927 	.rx_desc_get_hdr_status = qwx_hw_ipq8074_rx_desc_get_hdr_status,
1928 	.rx_desc_encrypt_valid = qwx_hw_ipq8074_rx_desc_encrypt_valid,
1929 	.rx_desc_get_encrypt_type = qwx_hw_ipq8074_rx_desc_get_encrypt_type,
1930 	.rx_desc_get_decap_type = qwx_hw_ipq8074_rx_desc_get_decap_type,
1931 #ifdef notyet
1932 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
1933 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
1934 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
1935 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
1936 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
1937 #endif
1938 	.rx_desc_get_msdu_len = qwx_hw_ipq8074_rx_desc_get_msdu_len,
1939 #ifdef notyet
1940 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
1941 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
1942 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
1943 #endif
1944 	.rx_desc_get_msdu_freq = qwx_hw_ipq8074_rx_desc_get_msdu_freq,
1945 #ifdef notyet
1946 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
1947 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
1948 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
1949 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
1950 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
1951 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
1952 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
1953 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
1954 #endif
1955 	.rx_desc_get_attention = qwx_hw_ipq8074_rx_desc_get_attention,
1956 #ifdef notyet
1957 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
1958 #endif
1959 	.reo_setup = qwx_hw_ipq8074_reo_setup,
1960 #ifdef notyet
1961 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
1962 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
1963 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
1964 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
1965 #endif
1966 };
1967 
1968 const struct ath11k_hw_ops qca6390_ops = {
1969 	.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
1970 	.wmi_init_config = qwx_init_wmi_config_qca6390,
1971 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
1972 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
1973 #if notyet
1974 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
1975 #endif
1976 	.rx_desc_get_first_msdu = qwx_hw_ipq8074_rx_desc_get_first_msdu,
1977 #if notyet
1978 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
1979 #endif
1980 	.rx_desc_get_l3_pad_bytes = qwx_hw_ipq8074_rx_desc_get_l3_pad_bytes,
1981 	.rx_desc_get_hdr_status = qwx_hw_ipq8074_rx_desc_get_hdr_status,
1982 	.rx_desc_encrypt_valid = qwx_hw_ipq8074_rx_desc_encrypt_valid,
1983 	.rx_desc_get_encrypt_type = qwx_hw_ipq8074_rx_desc_get_encrypt_type,
1984 	.rx_desc_get_decap_type = qwx_hw_ipq8074_rx_desc_get_decap_type,
1985 #ifdef notyet
1986 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
1987 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
1988 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
1989 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
1990 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
1991 #endif
1992 	.rx_desc_get_msdu_len = qwx_hw_ipq8074_rx_desc_get_msdu_len,
1993 #ifdef notyet
1994 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
1995 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
1996 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
1997 #endif
1998 	.rx_desc_get_msdu_freq = qwx_hw_ipq8074_rx_desc_get_msdu_freq,
1999 #ifdef notyet
2000 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
2001 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
2002 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
2003 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
2004 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
2005 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
2006 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
2007 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
2008 #endif
2009 	.rx_desc_get_attention = qwx_hw_ipq8074_rx_desc_get_attention,
2010 #ifdef notyet
2011 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
2012 #endif
2013 	.reo_setup = qwx_hw_ipq8074_reo_setup,
2014 #ifdef notyet
2015 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
2016 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
2017 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
2018 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
2019 #endif
2020 };
2021 
2022 const struct ath11k_hw_ops qcn9074_ops = {
2023 	.get_hw_mac_from_pdev_id = qwx_hw_ipq6018_mac_from_pdev_id,
2024 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
2025 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
2026 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
2027 #if notyet
2028 	.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
2029 #endif
2030 	.rx_desc_get_first_msdu = qwx_hw_qcn9074_rx_desc_get_first_msdu,
2031 #if notyet
2032 	.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
2033 #endif
2034 	.rx_desc_get_l3_pad_bytes = qwx_hw_qcn9074_rx_desc_get_l3_pad_bytes,
2035 	.rx_desc_get_hdr_status = qwx_hw_qcn9074_rx_desc_get_hdr_status,
2036 	.rx_desc_encrypt_valid = qwx_hw_qcn9074_rx_desc_encrypt_valid,
2037 	.rx_desc_get_encrypt_type = qwx_hw_qcn9074_rx_desc_get_encrypt_type,
2038 	.rx_desc_get_decap_type = qwx_hw_qcn9074_rx_desc_get_decap_type,
2039 #ifdef notyet
2040 	.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
2041 	.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
2042 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
2043 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
2044 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
2045 #endif
2046 	.rx_desc_get_msdu_len = qwx_hw_qcn9074_rx_desc_get_msdu_len,
2047 #ifdef notyet
2048 	.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
2049 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
2050 	.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
2051 #endif
2052 	.rx_desc_get_msdu_freq = qwx_hw_qcn9074_rx_desc_get_msdu_freq,
2053 #ifdef notyet
2054 	.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
2055 	.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
2056 	.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
2057 	.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
2058 	.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
2059 	.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
2060 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
2061 	.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
2062 #endif
2063 	.rx_desc_get_attention = qwx_hw_qcn9074_rx_desc_get_attention,
2064 #ifdef notyet
2065 	.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
2066 #endif
2067 	.reo_setup = qwx_hw_ipq8074_reo_setup,
2068 #ifdef notyet
2069 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
2070 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
2071 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
2072 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
2073 #endif
2074 };
2075 
2076 const struct ath11k_hw_ops wcn6855_ops = {
2077 	.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
2078 	.wmi_init_config = qwx_init_wmi_config_qca6390,
2079 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
2080 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
2081 #if notyet
2082 	.tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
2083 #endif
2084 	.rx_desc_get_first_msdu = qwx_hw_wcn6855_rx_desc_get_first_msdu,
2085 #if notyet
2086 	.rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
2087 #endif
2088 	.rx_desc_get_l3_pad_bytes = qwx_hw_wcn6855_rx_desc_get_l3_pad_bytes,
2089 	.rx_desc_get_hdr_status = qwx_hw_wcn6855_rx_desc_get_hdr_status,
2090 	.rx_desc_encrypt_valid = qwx_hw_wcn6855_rx_desc_encrypt_valid,
2091 	.rx_desc_get_encrypt_type = qwx_hw_wcn6855_rx_desc_get_encrypt_type,
2092 	.rx_desc_get_decap_type = qwx_hw_wcn6855_rx_desc_get_decap_type,
2093 #ifdef notyet
2094 	.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
2095 	.rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
2096 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
2097 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
2098 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
2099 #endif
2100 	.rx_desc_get_msdu_len = qwx_hw_wcn6855_rx_desc_get_msdu_len,
2101 #ifdef notyet
2102 	.rx_desc_get_msdu_sgi = ath11k_hw_wcn6855_rx_desc_get_msdu_sgi,
2103 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
2104 	.rx_desc_get_msdu_rx_bw = ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
2105 #endif
2106 	.rx_desc_get_msdu_freq = qwx_hw_wcn6855_rx_desc_get_msdu_freq,
2107 #ifdef notyet
2108 	.rx_desc_get_msdu_pkt_type = ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
2109 	.rx_desc_get_msdu_nss = ath11k_hw_wcn6855_rx_desc_get_msdu_nss,
2110 	.rx_desc_get_mpdu_tid = ath11k_hw_wcn6855_rx_desc_get_mpdu_tid,
2111 	.rx_desc_get_mpdu_peer_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
2112 	.rx_desc_copy_attn_end_tlv = ath11k_hw_wcn6855_rx_desc_copy_attn_end,
2113 	.rx_desc_get_mpdu_start_tag = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
2114 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
2115 	.rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
2116 #endif
2117 	.rx_desc_get_attention = qwx_hw_wcn6855_rx_desc_get_attention,
2118 #ifdef notyet
2119 	.rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
2120 #endif
2121 	.reo_setup = qwx_hw_wcn6855_reo_setup,
2122 #ifdef notyet
2123 	.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
2124 	.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
2125 	.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
2126 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
2127 #endif
2128 };
2129 
2130 const struct ath11k_hw_ops wcn6750_ops = {
2131 	.get_hw_mac_from_pdev_id = qwx_hw_ipq8074_mac_from_pdev_id,
2132 	.wmi_init_config = qwx_init_wmi_config_qca6390,
2133 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
2134 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
2135 #if notyet
2136 	.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
2137 #endif
2138 	.rx_desc_get_first_msdu = qwx_hw_qcn9074_rx_desc_get_first_msdu,
2139 #if notyet
2140 	.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
2141 #endif
2142 	.rx_desc_get_l3_pad_bytes = qwx_hw_qcn9074_rx_desc_get_l3_pad_bytes,
2143 	.rx_desc_get_hdr_status = qwx_hw_qcn9074_rx_desc_get_hdr_status,
2144 	.rx_desc_encrypt_valid = qwx_hw_qcn9074_rx_desc_encrypt_valid,
2145 	.rx_desc_get_encrypt_type = qwx_hw_qcn9074_rx_desc_get_encrypt_type,
2146 	.rx_desc_get_decap_type = qwx_hw_qcn9074_rx_desc_get_decap_type,
2147 #ifdef notyet
2148 	.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
2149 	.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
2150 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
2151 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
2152 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
2153 #endif
2154 	.rx_desc_get_msdu_len = qwx_hw_qcn9074_rx_desc_get_msdu_len,
2155 #ifdef notyet
2156 	.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
2157 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
2158 	.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
2159 #endif
2160 	.rx_desc_get_msdu_freq = qwx_hw_qcn9074_rx_desc_get_msdu_freq,
2161 #ifdef notyet
2162 	.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
2163 	.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
2164 	.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
2165 	.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
2166 	.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
2167 	.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
2168 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
2169 	.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
2170 #endif
2171 	.rx_desc_get_attention = qwx_hw_qcn9074_rx_desc_get_attention,
2172 #ifdef notyet
2173 	.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
2174 #endif
2175 	.reo_setup = qwx_hw_wcn6855_reo_setup,
2176 #ifdef notyet
2177 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
2178 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
2179 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
2180 	.get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
2181 #endif
2182 };
2183 
2184 #define ATH11K_TX_RING_MASK_0 BIT(0)
2185 #define ATH11K_TX_RING_MASK_1 BIT(1)
2186 #define ATH11K_TX_RING_MASK_2 BIT(2)
2187 #define ATH11K_TX_RING_MASK_3 BIT(3)
2188 #define ATH11K_TX_RING_MASK_4 BIT(4)
2189 
2190 #define ATH11K_RX_RING_MASK_0 0x1
2191 #define ATH11K_RX_RING_MASK_1 0x2
2192 #define ATH11K_RX_RING_MASK_2 0x4
2193 #define ATH11K_RX_RING_MASK_3 0x8
2194 
2195 #define ATH11K_RX_ERR_RING_MASK_0 0x1
2196 
2197 #define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
2198 
2199 #define ATH11K_REO_STATUS_RING_MASK_0 0x1
2200 
2201 #define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
2202 #define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
2203 #define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
2204 
2205 #define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
2206 #define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
2207 #define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
2208 
2209 #define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
2210 #define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
2211 #define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
2212 
2213 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
2214 	.tx  = {
2215 		ATH11K_TX_RING_MASK_0,
2216 		ATH11K_TX_RING_MASK_1,
2217 		ATH11K_TX_RING_MASK_2,
2218 	},
2219 	.rx_mon_status = {
2220 		0, 0, 0, 0,
2221 		ATH11K_RX_MON_STATUS_RING_MASK_0,
2222 		ATH11K_RX_MON_STATUS_RING_MASK_1,
2223 		ATH11K_RX_MON_STATUS_RING_MASK_2,
2224 	},
2225 	.rx = {
2226 		0, 0, 0, 0, 0, 0, 0,
2227 		ATH11K_RX_RING_MASK_0,
2228 		ATH11K_RX_RING_MASK_1,
2229 		ATH11K_RX_RING_MASK_2,
2230 		ATH11K_RX_RING_MASK_3,
2231 	},
2232 	.rx_err = {
2233 		ATH11K_RX_ERR_RING_MASK_0,
2234 	},
2235 	.rx_wbm_rel = {
2236 		ATH11K_RX_WBM_REL_RING_MASK_0,
2237 	},
2238 	.reo_status = {
2239 		ATH11K_REO_STATUS_RING_MASK_0,
2240 	},
2241 	.rxdma2host = {
2242 		ATH11K_RXDMA2HOST_RING_MASK_0,
2243 		ATH11K_RXDMA2HOST_RING_MASK_1,
2244 		ATH11K_RXDMA2HOST_RING_MASK_2,
2245 	},
2246 	.host2rxdma = {
2247 		ATH11K_HOST2RXDMA_RING_MASK_0,
2248 		ATH11K_HOST2RXDMA_RING_MASK_1,
2249 		ATH11K_HOST2RXDMA_RING_MASK_2,
2250 	},
2251 };
2252 
2253 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
2254 	.tx  = {
2255 		ATH11K_TX_RING_MASK_0,
2256 	},
2257 	.rx_mon_status = {
2258 		0, 0, 0, 0,
2259 		ATH11K_RX_MON_STATUS_RING_MASK_0,
2260 		ATH11K_RX_MON_STATUS_RING_MASK_1,
2261 		ATH11K_RX_MON_STATUS_RING_MASK_2,
2262 	},
2263 	.rx = {
2264 		0, 0, 0, 0, 0, 0, 0,
2265 		ATH11K_RX_RING_MASK_0,
2266 		ATH11K_RX_RING_MASK_1,
2267 		ATH11K_RX_RING_MASK_2,
2268 		ATH11K_RX_RING_MASK_3,
2269 	},
2270 	.rx_err = {
2271 		ATH11K_RX_ERR_RING_MASK_0,
2272 	},
2273 	.rx_wbm_rel = {
2274 		ATH11K_RX_WBM_REL_RING_MASK_0,
2275 	},
2276 	.reo_status = {
2277 		ATH11K_REO_STATUS_RING_MASK_0,
2278 	},
2279 	.rxdma2host = {
2280 		ATH11K_RXDMA2HOST_RING_MASK_0,
2281 		ATH11K_RXDMA2HOST_RING_MASK_1,
2282 		ATH11K_RXDMA2HOST_RING_MASK_2,
2283 	},
2284 	.host2rxdma = {
2285 	},
2286 };
2287 
2288 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
2289 	.tx  = {
2290 		ATH11K_TX_RING_MASK_0,
2291 		ATH11K_TX_RING_MASK_1,
2292 		ATH11K_TX_RING_MASK_2,
2293 	},
2294 	.rx_mon_status = {
2295 		0, 0, 0,
2296 		ATH11K_RX_MON_STATUS_RING_MASK_0,
2297 		ATH11K_RX_MON_STATUS_RING_MASK_1,
2298 		ATH11K_RX_MON_STATUS_RING_MASK_2,
2299 	},
2300 	.rx = {
2301 		0, 0, 0, 0,
2302 		ATH11K_RX_RING_MASK_0,
2303 		ATH11K_RX_RING_MASK_1,
2304 		ATH11K_RX_RING_MASK_2,
2305 		ATH11K_RX_RING_MASK_3,
2306 	},
2307 	.rx_err = {
2308 		0, 0, 0,
2309 		ATH11K_RX_ERR_RING_MASK_0,
2310 	},
2311 	.rx_wbm_rel = {
2312 		0, 0, 0,
2313 		ATH11K_RX_WBM_REL_RING_MASK_0,
2314 	},
2315 	.reo_status = {
2316 		0, 0, 0,
2317 		ATH11K_REO_STATUS_RING_MASK_0,
2318 	},
2319 	.rxdma2host = {
2320 		0, 0, 0,
2321 		ATH11K_RXDMA2HOST_RING_MASK_0,
2322 	},
2323 	.host2rxdma = {
2324 		0, 0, 0,
2325 		ATH11K_HOST2RXDMA_RING_MASK_0,
2326 	},
2327 };
2328 
2329 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
2330 	.tx  = {
2331 		ATH11K_TX_RING_MASK_0,
2332 		0,
2333 		ATH11K_TX_RING_MASK_2,
2334 		0,
2335 		ATH11K_TX_RING_MASK_4,
2336 	},
2337 	.rx_mon_status = {
2338 		0, 0, 0, 0, 0, 0,
2339 		ATH11K_RX_MON_STATUS_RING_MASK_0,
2340 	},
2341 	.rx = {
2342 		0, 0, 0, 0, 0, 0, 0,
2343 		ATH11K_RX_RING_MASK_0,
2344 		ATH11K_RX_RING_MASK_1,
2345 		ATH11K_RX_RING_MASK_2,
2346 		ATH11K_RX_RING_MASK_3,
2347 	},
2348 	.rx_err = {
2349 		0, ATH11K_RX_ERR_RING_MASK_0,
2350 	},
2351 	.rx_wbm_rel = {
2352 		0, ATH11K_RX_WBM_REL_RING_MASK_0,
2353 	},
2354 	.reo_status = {
2355 		0, ATH11K_REO_STATUS_RING_MASK_0,
2356 	},
2357 	.rxdma2host = {
2358 		ATH11K_RXDMA2HOST_RING_MASK_0,
2359 		ATH11K_RXDMA2HOST_RING_MASK_1,
2360 		ATH11K_RXDMA2HOST_RING_MASK_2,
2361 	},
2362 	.host2rxdma = {
2363 	},
2364 };
2365 
2366 /* Target firmware's Copy Engine configuration. */
2367 const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[] = {
2368 	/* CE0: host->target HTC control and raw streams */
2369 	{
2370 		.pipenum = htole32(0),
2371 		.pipedir = htole32(PIPEDIR_OUT),
2372 		.nentries = htole32(32),
2373 		.nbytes_max = htole32(2048),
2374 		.flags = htole32(CE_ATTR_FLAGS),
2375 		.reserved = htole32(0),
2376 	},
2377 
2378 	/* CE1: target->host HTT + HTC control */
2379 	{
2380 		.pipenum = htole32(1),
2381 		.pipedir = htole32(PIPEDIR_IN),
2382 		.nentries = htole32(32),
2383 		.nbytes_max = htole32(2048),
2384 		.flags = htole32(CE_ATTR_FLAGS),
2385 		.reserved = htole32(0),
2386 	},
2387 
2388 	/* CE2: target->host WMI */
2389 	{
2390 		.pipenum = htole32(2),
2391 		.pipedir = htole32(PIPEDIR_IN),
2392 		.nentries = htole32(32),
2393 		.nbytes_max = htole32(2048),
2394 		.flags = htole32(CE_ATTR_FLAGS),
2395 		.reserved = htole32(0),
2396 	},
2397 
2398 	/* CE3: host->target WMI */
2399 	{
2400 		.pipenum = htole32(3),
2401 		.pipedir = htole32(PIPEDIR_OUT),
2402 		.nentries = htole32(32),
2403 		.nbytes_max = htole32(2048),
2404 		.flags = htole32(CE_ATTR_FLAGS),
2405 		.reserved = htole32(0),
2406 	},
2407 
2408 	/* CE4: host->target HTT */
2409 	{
2410 		.pipenum = htole32(4),
2411 		.pipedir = htole32(PIPEDIR_OUT),
2412 		.nentries = htole32(256),
2413 		.nbytes_max = htole32(256),
2414 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2415 		.reserved = htole32(0),
2416 	},
2417 
2418 	/* CE5: target->host Pktlog */
2419 	{
2420 		.pipenum = htole32(5),
2421 		.pipedir = htole32(PIPEDIR_IN),
2422 		.nentries = htole32(32),
2423 		.nbytes_max = htole32(2048),
2424 		.flags = htole32(0),
2425 		.reserved = htole32(0),
2426 	},
2427 
2428 	/* CE6: Reserved for target autonomous hif_memcpy */
2429 	{
2430 		.pipenum = htole32(6),
2431 		.pipedir = htole32(PIPEDIR_INOUT),
2432 		.nentries = htole32(32),
2433 		.nbytes_max = htole32(65535),
2434 		.flags = htole32(CE_ATTR_FLAGS),
2435 		.reserved = htole32(0),
2436 	},
2437 
2438 	/* CE7 used only by Host */
2439 	{
2440 		.pipenum = htole32(7),
2441 		.pipedir = htole32(PIPEDIR_OUT),
2442 		.nentries = htole32(32),
2443 		.nbytes_max = htole32(2048),
2444 		.flags = htole32(CE_ATTR_FLAGS),
2445 		.reserved = htole32(0),
2446 	},
2447 
2448 	/* CE8 target->host used only by IPA */
2449 	{
2450 		.pipenum = htole32(8),
2451 		.pipedir = htole32(PIPEDIR_INOUT),
2452 		.nentries = htole32(32),
2453 		.nbytes_max = htole32(65535),
2454 		.flags = htole32(CE_ATTR_FLAGS),
2455 		.reserved = htole32(0),
2456 	},
2457 
2458 	/* CE9 host->target HTT */
2459 	{
2460 		.pipenum = htole32(9),
2461 		.pipedir = htole32(PIPEDIR_OUT),
2462 		.nentries = htole32(32),
2463 		.nbytes_max = htole32(2048),
2464 		.flags = htole32(CE_ATTR_FLAGS),
2465 		.reserved = htole32(0),
2466 	},
2467 
2468 	/* CE10 target->host HTT */
2469 	{
2470 		.pipenum = htole32(10),
2471 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2472 		.nentries = htole32(0),
2473 		.nbytes_max = htole32(0),
2474 		.flags = htole32(CE_ATTR_FLAGS),
2475 		.reserved = htole32(0),
2476 	},
2477 
2478 	/* CE11 Not used */
2479 };
2480 
2481 /* Map from service/endpoint to Copy Engine.
2482  * This table is derived from the CE_PCI TABLE, above.
2483  * It is passed to the Target at startup for use by firmware.
2484  */
2485 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[] = {
2486 	{
2487 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2488 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2489 		.pipenum = htole32(3),
2490 	},
2491 	{
2492 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2493 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2494 		.pipenum = htole32(2),
2495 	},
2496 	{
2497 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2498 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2499 		.pipenum = htole32(3),
2500 	},
2501 	{
2502 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2503 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2504 		.pipenum = htole32(2),
2505 	},
2506 	{
2507 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2508 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2509 		.pipenum = htole32(3),
2510 	},
2511 	{
2512 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2513 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2514 		.pipenum = htole32(2),
2515 	},
2516 	{
2517 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2518 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2519 		.pipenum = htole32(3),
2520 	},
2521 	{
2522 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2523 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2524 		.pipenum = htole32(2),
2525 	},
2526 	{
2527 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2528 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2529 		.pipenum = htole32(3),
2530 	},
2531 	{
2532 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2533 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2534 		.pipenum = htole32(2),
2535 	},
2536 	{
2537 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2538 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2539 		.pipenum = htole32(7),
2540 	},
2541 	{
2542 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2543 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2544 		.pipenum = htole32(2),
2545 	},
2546 	{
2547 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
2548 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2549 		.pipenum = htole32(9),
2550 	},
2551 	{
2552 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
2553 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2554 		.pipenum = htole32(2),
2555 	},
2556 	{
2557 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2558 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2559 		.pipenum = htole32(0),
2560 	},
2561 	{
2562 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2563 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2564 		.pipenum = htole32(1),
2565 	},
2566 	{ /* not used */
2567 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
2568 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2569 		.pipenum = htole32(0),
2570 	},
2571 	{ /* not used */
2572 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
2573 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2574 		.pipenum = htole32(1),
2575 	},
2576 	{
2577 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2578 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2579 		.pipenum = htole32(4),
2580 	},
2581 	{
2582 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2583 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2584 		.pipenum = htole32(1),
2585 	},
2586 	{
2587 		.service_id = htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
2588 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2589 		.pipenum = htole32(5),
2590 	},
2591 
2592 	/* (Additions here) */
2593 
2594 	{ /* terminator entry */ }
2595 };
2596 
2597 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[] = {
2598 	{
2599 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2600 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2601 		.pipenum = htole32(3),
2602 	},
2603 	{
2604 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2605 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2606 		.pipenum = htole32(2),
2607 	},
2608 	{
2609 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2610 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2611 		.pipenum = htole32(3),
2612 	},
2613 	{
2614 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2615 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2616 		.pipenum = htole32(2),
2617 	},
2618 	{
2619 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2620 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2621 		.pipenum = htole32(3),
2622 	},
2623 	{
2624 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2625 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2626 		.pipenum = htole32(2),
2627 	},
2628 	{
2629 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2630 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2631 		.pipenum = htole32(3),
2632 	},
2633 	{
2634 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2635 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2636 		.pipenum = htole32(2),
2637 	},
2638 	{
2639 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2640 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2641 		.pipenum = htole32(3),
2642 	},
2643 	{
2644 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2645 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2646 		.pipenum = htole32(2),
2647 	},
2648 	{
2649 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2650 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2651 		.pipenum = htole32(7),
2652 	},
2653 	{
2654 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
2655 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2656 		.pipenum = htole32(2),
2657 	},
2658 	{
2659 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2660 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2661 		.pipenum = htole32(0),
2662 	},
2663 	{
2664 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2665 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2666 		.pipenum = htole32(1),
2667 	},
2668 	{ /* not used */
2669 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
2670 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2671 		.pipenum = htole32(0),
2672 	},
2673 	{ /* not used */
2674 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
2675 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2676 		.pipenum = htole32(1),
2677 	},
2678 	{
2679 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2680 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2681 		.pipenum = htole32(4),
2682 	},
2683 	{
2684 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2685 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2686 		.pipenum = htole32(1),
2687 	},
2688 	{
2689 		.service_id = htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
2690 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2691 		.pipenum = htole32(5),
2692 	},
2693 
2694 	/* (Additions here) */
2695 
2696 	{ /* terminator entry */ }
2697 };
2698 
2699 /* Target firmware's Copy Engine configuration. */
2700 const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[] = {
2701 	/* CE0: host->target HTC control and raw streams */
2702 	{
2703 		.pipenum = htole32(0),
2704 		.pipedir = htole32(PIPEDIR_OUT),
2705 		.nentries = htole32(32),
2706 		.nbytes_max = htole32(2048),
2707 		.flags = htole32(CE_ATTR_FLAGS),
2708 		.reserved = htole32(0),
2709 	},
2710 
2711 	/* CE1: target->host HTT + HTC control */
2712 	{
2713 		.pipenum = htole32(1),
2714 		.pipedir = htole32(PIPEDIR_IN),
2715 		.nentries = htole32(32),
2716 		.nbytes_max = htole32(2048),
2717 		.flags = htole32(CE_ATTR_FLAGS),
2718 		.reserved = htole32(0),
2719 	},
2720 
2721 	/* CE2: target->host WMI */
2722 	{
2723 		.pipenum = htole32(2),
2724 		.pipedir = htole32(PIPEDIR_IN),
2725 		.nentries = htole32(32),
2726 		.nbytes_max = htole32(2048),
2727 		.flags = htole32(CE_ATTR_FLAGS),
2728 		.reserved = htole32(0),
2729 	},
2730 
2731 	/* CE3: host->target WMI */
2732 	{
2733 		.pipenum = htole32(3),
2734 		.pipedir = htole32(PIPEDIR_OUT),
2735 		.nentries = htole32(32),
2736 		.nbytes_max = htole32(2048),
2737 		.flags = htole32(CE_ATTR_FLAGS),
2738 		.reserved = htole32(0),
2739 	},
2740 
2741 	/* CE4: host->target HTT */
2742 	{
2743 		.pipenum = htole32(4),
2744 		.pipedir = htole32(PIPEDIR_OUT),
2745 		.nentries = htole32(256),
2746 		.nbytes_max = htole32(256),
2747 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2748 		.reserved = htole32(0),
2749 	},
2750 
2751 	/* CE5: target->host Pktlog */
2752 	{
2753 		.pipenum = htole32(5),
2754 		.pipedir = htole32(PIPEDIR_IN),
2755 		.nentries = htole32(32),
2756 		.nbytes_max = htole32(2048),
2757 		.flags = htole32(CE_ATTR_FLAGS),
2758 		.reserved = htole32(0),
2759 	},
2760 
2761 	/* CE6: Reserved for target autonomous hif_memcpy */
2762 	{
2763 		.pipenum = htole32(6),
2764 		.pipedir = htole32(PIPEDIR_INOUT),
2765 		.nentries = htole32(32),
2766 		.nbytes_max = htole32(16384),
2767 		.flags = htole32(CE_ATTR_FLAGS),
2768 		.reserved = htole32(0),
2769 	},
2770 
2771 	/* CE7 used only by Host */
2772 	{
2773 		.pipenum = htole32(7),
2774 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2775 		.nentries = htole32(0),
2776 		.nbytes_max = htole32(0),
2777 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2778 		.reserved = htole32(0),
2779 	},
2780 
2781 	/* CE8 target->host used only by IPA */
2782 	{
2783 		.pipenum = htole32(8),
2784 		.pipedir = htole32(PIPEDIR_INOUT),
2785 		.nentries = htole32(32),
2786 		.nbytes_max = htole32(16384),
2787 		.flags = htole32(CE_ATTR_FLAGS),
2788 		.reserved = htole32(0),
2789 	},
2790 	/* CE 9, 10, 11 are used by MHI driver */
2791 };
2792 
2793 /* Map from service/endpoint to Copy Engine.
2794  * This table is derived from the CE_PCI TABLE, above.
2795  * It is passed to the Target at startup for use by firmware.
2796  */
2797 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
2798 	{
2799 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2800 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2801 		htole32(3),
2802 	},
2803 	{
2804 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2805 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2806 		htole32(2),
2807 	},
2808 	{
2809 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2810 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2811 		htole32(3),
2812 	},
2813 	{
2814 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2815 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2816 		htole32(2),
2817 	},
2818 	{
2819 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2820 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2821 		htole32(3),
2822 	},
2823 	{
2824 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2825 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2826 		htole32(2),
2827 	},
2828 	{
2829 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2830 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2831 		htole32(3),
2832 	},
2833 	{
2834 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
2835 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2836 		htole32(2),
2837 	},
2838 	{
2839 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2840 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2841 		htole32(3),
2842 	},
2843 	{
2844 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
2845 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2846 		htole32(2),
2847 	},
2848 	{
2849 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2850 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2851 		htole32(0),
2852 	},
2853 	{
2854 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
2855 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2856 		htole32(2),
2857 	},
2858 	{
2859 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2860 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2861 		htole32(4),
2862 	},
2863 	{
2864 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
2865 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2866 		htole32(1),
2867 	},
2868 
2869 	/* (Additions here) */
2870 
2871 	{ /* must be last */
2872 		htole32(0),
2873 		htole32(0),
2874 		htole32(0),
2875 	},
2876 };
2877 
2878 /* Target firmware's Copy Engine configuration. */
2879 const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = {
2880 	/* CE0: host->target HTC control and raw streams */
2881 	{
2882 		.pipenum = htole32(0),
2883 		.pipedir = htole32(PIPEDIR_OUT),
2884 		.nentries = htole32(32),
2885 		.nbytes_max = htole32(2048),
2886 		.flags = htole32(CE_ATTR_FLAGS),
2887 		.reserved = htole32(0),
2888 	},
2889 
2890 	/* CE1: target->host HTT + HTC control */
2891 	{
2892 		.pipenum = htole32(1),
2893 		.pipedir = htole32(PIPEDIR_IN),
2894 		.nentries = htole32(32),
2895 		.nbytes_max = htole32(2048),
2896 		.flags = htole32(CE_ATTR_FLAGS),
2897 		.reserved = htole32(0),
2898 	},
2899 
2900 	/* CE2: target->host WMI */
2901 	{
2902 		.pipenum = htole32(2),
2903 		.pipedir = htole32(PIPEDIR_IN),
2904 		.nentries = htole32(32),
2905 		.nbytes_max = htole32(2048),
2906 		.flags = htole32(CE_ATTR_FLAGS),
2907 		.reserved = htole32(0),
2908 	},
2909 
2910 	/* CE3: host->target WMI */
2911 	{
2912 		.pipenum = htole32(3),
2913 		.pipedir = htole32(PIPEDIR_OUT),
2914 		.nentries = htole32(32),
2915 		.nbytes_max = htole32(2048),
2916 		.flags = htole32(CE_ATTR_FLAGS),
2917 		.reserved = htole32(0),
2918 	},
2919 
2920 	/* CE4: host->target HTT */
2921 	{
2922 		.pipenum = htole32(4),
2923 		.pipedir = htole32(PIPEDIR_OUT),
2924 		.nentries = htole32(256),
2925 		.nbytes_max = htole32(256),
2926 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2927 		.reserved = htole32(0),
2928 	},
2929 
2930 	/* CE5: target->host Pktlog */
2931 	{
2932 		.pipenum = htole32(5),
2933 		.pipedir = htole32(PIPEDIR_IN),
2934 		.nentries = htole32(32),
2935 		.nbytes_max = htole32(2048),
2936 		.flags = htole32(CE_ATTR_FLAGS),
2937 		.reserved = htole32(0),
2938 	},
2939 
2940 	/* CE6: Reserved for target autonomous hif_memcpy */
2941 	{
2942 		.pipenum = htole32(6),
2943 		.pipedir = htole32(PIPEDIR_INOUT),
2944 		.nentries = htole32(32),
2945 		.nbytes_max = htole32(16384),
2946 		.flags = htole32(CE_ATTR_FLAGS),
2947 		.reserved = htole32(0),
2948 	},
2949 
2950 	/* CE7 used only by Host */
2951 	{
2952 		.pipenum = htole32(7),
2953 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
2954 		.nentries = htole32(0),
2955 		.nbytes_max = htole32(0),
2956 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
2957 		.reserved = htole32(0),
2958 	},
2959 
2960 	/* CE8 target->host used only by IPA */
2961 	{
2962 		.pipenum = htole32(8),
2963 		.pipedir = htole32(PIPEDIR_INOUT),
2964 		.nentries = htole32(32),
2965 		.nbytes_max = htole32(16384),
2966 		.flags = htole32(CE_ATTR_FLAGS),
2967 		.reserved = htole32(0),
2968 	},
2969 	/* CE 9, 10, 11 are used by MHI driver */
2970 };
2971 
2972 /* Map from service/endpoint to Copy Engine.
2973  * This table is derived from the CE_PCI TABLE, above.
2974  * It is passed to the Target at startup for use by firmware.
2975  */
2976 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = {
2977 	{
2978 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2979 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2980 		htole32(3),
2981 	},
2982 	{
2983 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
2984 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2985 		htole32(2),
2986 	},
2987 	{
2988 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2989 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
2990 		htole32(3),
2991 	},
2992 	{
2993 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
2994 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
2995 		htole32(2),
2996 	},
2997 	{
2998 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
2999 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3000 		htole32(3),
3001 	},
3002 	{
3003 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
3004 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3005 		htole32(2),
3006 	},
3007 	{
3008 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
3009 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3010 		htole32(3),
3011 	},
3012 	{
3013 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
3014 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3015 		htole32(2),
3016 	},
3017 	{
3018 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
3019 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3020 		htole32(3),
3021 	},
3022 	{
3023 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
3024 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3025 		htole32(2),
3026 	},
3027 	{
3028 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
3029 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3030 		htole32(0),
3031 	},
3032 	{
3033 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
3034 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3035 		htole32(1),
3036 	},
3037 	{
3038 		htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
3039 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3040 		htole32(0),
3041 	},
3042 	{
3043 		htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
3044 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3045 		htole32(1),
3046 	},
3047 	{
3048 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
3049 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
3050 		htole32(4),
3051 	},
3052 	{
3053 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
3054 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3055 		htole32(1),
3056 	},
3057 	{
3058 		htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
3059 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
3060 		htole32(5),
3061 	},
3062 
3063 	/* (Additions here) */
3064 
3065 	{ /* must be last */
3066 		htole32(0),
3067 		htole32(0),
3068 		htole32(0),
3069 	},
3070 };
3071 
3072 #define QWX_CE_COUNT_IPQ8074	21
3073 
3074 const struct ce_attr qwx_host_ce_config_ipq8074[QWX_CE_COUNT_IPQ8074] = {
3075 	/* CE0: host->target HTC control and raw streams */
3076 	{
3077 		.flags = CE_ATTR_FLAGS,
3078 		.src_nentries = 16,
3079 		.src_sz_max = 2048,
3080 		.dest_nentries = 0,
3081 		.send_cb = qwx_htc_tx_completion_handler,
3082 	},
3083 
3084 	/* CE1: target->host HTT + HTC control */
3085 	{
3086 		.flags = CE_ATTR_FLAGS,
3087 		.src_nentries = 0,
3088 		.src_sz_max = 2048,
3089 		.dest_nentries = 512,
3090 		.recv_cb = qwx_htc_rx_completion_handler,
3091 	},
3092 
3093 	/* CE2: target->host WMI */
3094 	{
3095 		.flags = CE_ATTR_FLAGS,
3096 		.src_nentries = 0,
3097 		.src_sz_max = 2048,
3098 		.dest_nentries = 512,
3099 		.recv_cb = qwx_htc_rx_completion_handler,
3100 	},
3101 
3102 	/* CE3: host->target WMI (mac0) */
3103 	{
3104 		.flags = CE_ATTR_FLAGS,
3105 		.src_nentries = 32,
3106 		.src_sz_max = 2048,
3107 		.dest_nentries = 0,
3108 		.send_cb = qwx_htc_tx_completion_handler,
3109 	},
3110 
3111 	/* CE4: host->target HTT */
3112 	{
3113 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3114 		.src_nentries = 2048,
3115 		.src_sz_max = 256,
3116 		.dest_nentries = 0,
3117 	},
3118 
3119 	/* CE5: target->host pktlog */
3120 	{
3121 		.flags = CE_ATTR_FLAGS,
3122 		.src_nentries = 0,
3123 		.src_sz_max = 2048,
3124 		.dest_nentries = 512,
3125 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
3126 	},
3127 
3128 	/* CE6: target autonomous hif_memcpy */
3129 	{
3130 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3131 		.src_nentries = 0,
3132 		.src_sz_max = 0,
3133 		.dest_nentries = 0,
3134 	},
3135 
3136 	/* CE7: host->target WMI (mac1) */
3137 	{
3138 		.flags = CE_ATTR_FLAGS,
3139 		.src_nentries = 32,
3140 		.src_sz_max = 2048,
3141 		.dest_nentries = 0,
3142 		.send_cb = qwx_htc_tx_completion_handler,
3143 	},
3144 
3145 	/* CE8: target autonomous hif_memcpy */
3146 	{
3147 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3148 		.src_nentries = 0,
3149 		.src_sz_max = 0,
3150 		.dest_nentries = 0,
3151 	},
3152 
3153 	/* CE9: host->target WMI (mac2) */
3154 	{
3155 		.flags = CE_ATTR_FLAGS,
3156 		.src_nentries = 32,
3157 		.src_sz_max = 2048,
3158 		.dest_nentries = 0,
3159 		.send_cb = qwx_htc_tx_completion_handler,
3160 	},
3161 
3162 	/* CE10: target->host HTT */
3163 	{
3164 		.flags = CE_ATTR_FLAGS,
3165 		.src_nentries = 0,
3166 		.src_sz_max = 2048,
3167 		.dest_nentries = 512,
3168 		.recv_cb = qwx_htc_rx_completion_handler,
3169 	},
3170 
3171 	/* CE11: Not used */
3172 	{
3173 		.flags = CE_ATTR_FLAGS,
3174 		.src_nentries = 0,
3175 		.src_sz_max = 0,
3176 		.dest_nentries = 0,
3177 	},
3178 };
3179 
3180 #define QWX_CE_COUNT_QCA6390	9
3181 
3182 const struct ce_attr qwx_host_ce_config_qca6390[QWX_CE_COUNT_QCA6390] = {
3183 	/* CE0: host->target HTC control and raw streams */
3184 	{
3185 		.flags = CE_ATTR_FLAGS,
3186 		.src_nentries = 16,
3187 		.src_sz_max = 2048,
3188 		.dest_nentries = 0,
3189 	},
3190 
3191 	/* CE1: target->host HTT + HTC control */
3192 	{
3193 		.flags = CE_ATTR_FLAGS,
3194 		.src_nentries = 0,
3195 		.src_sz_max = 2048,
3196 		.dest_nentries = 512,
3197 		.recv_cb = qwx_htc_rx_completion_handler,
3198 	},
3199 
3200 	/* CE2: target->host WMI */
3201 	{
3202 		.flags = CE_ATTR_FLAGS,
3203 		.src_nentries = 0,
3204 		.src_sz_max = 2048,
3205 		.dest_nentries = 512,
3206 		.recv_cb = qwx_htc_rx_completion_handler,
3207 	},
3208 
3209 	/* CE3: host->target WMI (mac0) */
3210 	{
3211 		.flags = CE_ATTR_FLAGS,
3212 		.src_nentries = 32,
3213 		.src_sz_max = 2048,
3214 		.dest_nentries = 0,
3215 		.send_cb = qwx_htc_tx_completion_handler,
3216 	},
3217 
3218 	/* CE4: host->target HTT */
3219 	{
3220 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3221 		.src_nentries = 2048,
3222 		.src_sz_max = 256,
3223 		.dest_nentries = 0,
3224 	},
3225 
3226 	/* CE5: target->host pktlog */
3227 	{
3228 		.flags = CE_ATTR_FLAGS,
3229 		.src_nentries = 0,
3230 		.src_sz_max = 2048,
3231 		.dest_nentries = 512,
3232 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
3233 	},
3234 
3235 	/* CE6: target autonomous hif_memcpy */
3236 	{
3237 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3238 		.src_nentries = 0,
3239 		.src_sz_max = 0,
3240 		.dest_nentries = 0,
3241 	},
3242 
3243 	/* CE7: host->target WMI (mac1) */
3244 	{
3245 		.flags = CE_ATTR_FLAGS,
3246 		.src_nentries = 32,
3247 		.src_sz_max = 2048,
3248 		.dest_nentries = 0,
3249 		.send_cb = qwx_htc_tx_completion_handler,
3250 	},
3251 
3252 	/* CE8: target autonomous hif_memcpy */
3253 	{
3254 		.flags = CE_ATTR_FLAGS,
3255 		.src_nentries = 0,
3256 		.src_sz_max = 0,
3257 		.dest_nentries = 0,
3258 	},
3259 
3260 };
3261 
3262 #define QWX_CE_COUNT_QCN9074	6
3263 
3264 const struct ce_attr qwx_host_ce_config_qcn9074[QWX_CE_COUNT_QCN9074] = {
3265 	/* CE0: host->target HTC control and raw streams */
3266 	{
3267 		.flags = CE_ATTR_FLAGS,
3268 		.src_nentries = 16,
3269 		.src_sz_max = 2048,
3270 		.dest_nentries = 0,
3271 	},
3272 
3273 	/* CE1: target->host HTT + HTC control */
3274 	{
3275 		.flags = CE_ATTR_FLAGS,
3276 		.src_nentries = 0,
3277 		.src_sz_max = 2048,
3278 		.dest_nentries = 512,
3279 		.recv_cb = qwx_htc_rx_completion_handler,
3280 	},
3281 
3282 	/* CE2: target->host WMI */
3283 	{
3284 		.flags = CE_ATTR_FLAGS,
3285 		.src_nentries = 0,
3286 		.src_sz_max = 2048,
3287 		.dest_nentries = 32,
3288 		.recv_cb = qwx_htc_rx_completion_handler,
3289 	},
3290 
3291 	/* CE3: host->target WMI (mac0) */
3292 	{
3293 		.flags = CE_ATTR_FLAGS,
3294 		.src_nentries = 32,
3295 		.src_sz_max = 2048,
3296 		.dest_nentries = 0,
3297 		.send_cb = qwx_htc_tx_completion_handler,
3298 	},
3299 
3300 	/* CE4: host->target HTT */
3301 	{
3302 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
3303 		.src_nentries = 2048,
3304 		.src_sz_max = 256,
3305 		.dest_nentries = 0,
3306 	},
3307 
3308 	/* CE5: target->host pktlog */
3309 	{
3310 		.flags = CE_ATTR_FLAGS,
3311 		.src_nentries = 0,
3312 		.src_sz_max = 2048,
3313 		.dest_nentries = 512,
3314 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
3315 	},
3316 };
3317 
3318 static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
3319 	{
3320 		.tcl_ring_num = 0,
3321 		.wbm_ring_num = 0,
3322 		.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
3323 	},
3324 	{
3325 		.tcl_ring_num = 1,
3326 		.wbm_ring_num = 1,
3327 		.rbm_id = HAL_RX_BUF_RBM_SW1_BM,
3328 	},
3329 	{
3330 		.tcl_ring_num = 2,
3331 		.wbm_ring_num = 2,
3332 		.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
3333 	},
3334 };
3335 
3336 static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
3337 	{
3338 		.tcl_ring_num = 0,
3339 		.wbm_ring_num = 0,
3340 		.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
3341 	},
3342 	{
3343 		.tcl_ring_num = 1,
3344 		.wbm_ring_num = 4,
3345 		.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
3346 	},
3347 	{
3348 		.tcl_ring_num = 2,
3349 		.wbm_ring_num = 2,
3350 		.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
3351 	},
3352 };
3353 
3354 
3355 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
3356 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
3357 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
3358 };
3359 
3360 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
3361 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
3362 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
3363 };
3364 
3365 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
3366 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
3367 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
3368 };
3369 
3370 static const struct ath11k_hw_params ath11k_hw_params[] = {
3371 	{
3372 		.hw_rev = ATH11K_HW_IPQ8074,
3373 		.name = "ipq8074 hw2.0",
3374 		.fw = {
3375 			.dir = "ipq8074-hw2.0",
3376 			.board_size = 256 * 1024,
3377 			.cal_offset = 128 * 1024,
3378 		},
3379 		.max_radios = 3,
3380 		.bdf_addr = 0x4B0C0000,
3381 		.hw_ops = &ipq8074_ops,
3382 		.ring_mask = &ath11k_hw_ring_mask_ipq8074,
3383 		.internal_sleep_clock = false,
3384 		.regs = &ipq8074_regs,
3385 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
3386 		.host_ce_config = qwx_host_ce_config_ipq8074,
3387 		.ce_count = QWX_CE_COUNT_IPQ8074,
3388 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
3389 		.target_ce_count = 11,
3390 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
3391 		.svc_to_ce_map_len = 21,
3392 		.single_pdev_only = false,
3393 		.rxdma1_enable = true,
3394 		.num_rxmda_per_pdev = 1,
3395 		.rx_mac_buf_ring = false,
3396 		.vdev_start_delay = false,
3397 		.htt_peer_map_v2 = true,
3398 #if notyet
3399 		.spectral = {
3400 			.fft_sz = 2,
3401 			/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
3402 			 * so added pad size as 2 bytes to compensate the BIN size
3403 			 */
3404 			.fft_pad_sz = 2,
3405 			.summary_pad_sz = 0,
3406 			.fft_hdr_len = 16,
3407 			.max_fft_bins = 512,
3408 			.fragment_160mhz = true,
3409 		},
3410 
3411 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3412 					BIT(NL80211_IFTYPE_AP) |
3413 					BIT(NL80211_IFTYPE_MESH_POINT),
3414 		.supports_monitor = true,
3415 		.full_monitor_mode = false,
3416 #endif
3417 		.supports_shadow_regs = false,
3418 		.idle_ps = false,
3419 		.supports_sta_ps = false,
3420 		.cold_boot_calib = true,
3421 		.cbcal_restart_fw = true,
3422 		.fw_mem_mode = 0,
3423 		.num_vdevs = 16 + 1,
3424 		.num_peers = 512,
3425 		.supports_suspend = false,
3426 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
3427 		.supports_regdb = false,
3428 		.fix_l1ss = true,
3429 		.credit_flow = false,
3430 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3431 		.hal_params = &ath11k_hw_hal_params_ipq8074,
3432 #if notyet
3433 		.supports_dynamic_smps_6ghz = false,
3434 		.alloc_cacheable_memory = true,
3435 		.supports_rssi_stats = false,
3436 #endif
3437 		.fw_wmi_diag_event = false,
3438 		.current_cc_support = false,
3439 		.dbr_debug_support = true,
3440 		.global_reset = false,
3441 #ifdef notyet
3442 		.bios_sar_capa = NULL,
3443 #endif
3444 		.m3_fw_support = false,
3445 		.fixed_bdf_addr = true,
3446 		.fixed_mem_region = true,
3447 		.static_window_map = false,
3448 #if notyet
3449 		.hybrid_bus_type = false,
3450 		.fixed_fw_mem = false,
3451 		.support_off_channel_tx = false,
3452 		.supports_multi_bssid = false,
3453 
3454 		.sram_dump = {},
3455 
3456 		.tcl_ring_retry = true,
3457 #endif
3458 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3459 #ifdef notyet
3460 		.smp2p_wow_exit = false,
3461 #endif
3462 	},
3463 	{
3464 		.hw_rev = ATH11K_HW_IPQ6018_HW10,
3465 		.name = "ipq6018 hw1.0",
3466 		.fw = {
3467 			.dir = "ipq6018-hw1.0",
3468 			.board_size = 256 * 1024,
3469 			.cal_offset = 128 * 1024,
3470 		},
3471 		.max_radios = 2,
3472 		.bdf_addr = 0x4ABC0000,
3473 		.hw_ops = &ipq6018_ops,
3474 		.ring_mask = &ath11k_hw_ring_mask_ipq8074,
3475 		.internal_sleep_clock = false,
3476 		.regs = &ipq8074_regs,
3477 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
3478 		.host_ce_config = qwx_host_ce_config_ipq8074,
3479 		.ce_count = QWX_CE_COUNT_IPQ8074,
3480 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
3481 		.target_ce_count = 11,
3482 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
3483 		.svc_to_ce_map_len = 19,
3484 		.single_pdev_only = false,
3485 		.rxdma1_enable = true,
3486 		.num_rxmda_per_pdev = 1,
3487 		.rx_mac_buf_ring = false,
3488 		.vdev_start_delay = false,
3489 		.htt_peer_map_v2 = true,
3490 #if notyet
3491 		.spectral = {
3492 			.fft_sz = 4,
3493 			.fft_pad_sz = 0,
3494 			.summary_pad_sz = 0,
3495 			.fft_hdr_len = 16,
3496 			.max_fft_bins = 512,
3497 			.fragment_160mhz = true,
3498 		},
3499 
3500 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3501 					BIT(NL80211_IFTYPE_AP) |
3502 					BIT(NL80211_IFTYPE_MESH_POINT),
3503 		.supports_monitor = true,
3504 		.full_monitor_mode = false,
3505 #endif
3506 		.supports_shadow_regs = false,
3507 		.idle_ps = false,
3508 		.supports_sta_ps = false,
3509 		.cold_boot_calib = true,
3510 		.cbcal_restart_fw = true,
3511 		.fw_mem_mode = 0,
3512 		.num_vdevs = 16 + 1,
3513 		.num_peers = 512,
3514 		.supports_suspend = false,
3515 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
3516 		.supports_regdb = false,
3517 		.fix_l1ss = true,
3518 		.credit_flow = false,
3519 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3520 		.hal_params = &ath11k_hw_hal_params_ipq8074,
3521 #if notyet
3522 		.supports_dynamic_smps_6ghz = false,
3523 		.alloc_cacheable_memory = true,
3524 		.supports_rssi_stats = false,
3525 #endif
3526 		.fw_wmi_diag_event = false,
3527 		.current_cc_support = false,
3528 		.dbr_debug_support = true,
3529 		.global_reset = false,
3530 #ifdef notyet
3531 		.bios_sar_capa = NULL,
3532 #endif
3533 		.m3_fw_support = false,
3534 		.fixed_bdf_addr = true,
3535 		.fixed_mem_region = true,
3536 		.static_window_map = false,
3537 		.hybrid_bus_type = false,
3538 		.fixed_fw_mem = false,
3539 #if notyet
3540 		.support_off_channel_tx = false,
3541 		.supports_multi_bssid = false,
3542 
3543 		.sram_dump = {},
3544 
3545 		.tcl_ring_retry = true,
3546 #endif
3547 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3548 #ifdef notyet
3549 		.smp2p_wow_exit = false,
3550 #endif
3551 	},
3552 	{
3553 		.name = "qca6390 hw2.0",
3554 		.hw_rev = ATH11K_HW_QCA6390_HW20,
3555 		.fw = {
3556 			.dir = "qca6390-hw2.0",
3557 			.board_size = 256 * 1024,
3558 			.cal_offset = 128 * 1024,
3559 		},
3560 		.max_radios = 3,
3561 		.bdf_addr = 0x4B0C0000,
3562 		.hw_ops = &qca6390_ops,
3563 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
3564 		.internal_sleep_clock = true,
3565 		.regs = &qca6390_regs,
3566 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
3567 		.host_ce_config = qwx_host_ce_config_qca6390,
3568 		.ce_count = QWX_CE_COUNT_QCA6390,
3569 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
3570 		.target_ce_count = 9,
3571 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
3572 		.svc_to_ce_map_len = 14,
3573 		.single_pdev_only = true,
3574 		.rxdma1_enable = false,
3575 		.num_rxmda_per_pdev = 2,
3576 		.rx_mac_buf_ring = true,
3577 		.vdev_start_delay = true,
3578 		.htt_peer_map_v2 = false,
3579 #if notyet
3580 		.spectral = {
3581 			.fft_sz = 0,
3582 			.fft_pad_sz = 0,
3583 			.summary_pad_sz = 0,
3584 			.fft_hdr_len = 0,
3585 			.max_fft_bins = 0,
3586 			.fragment_160mhz = false,
3587 		},
3588 
3589 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3590 					BIT(NL80211_IFTYPE_AP),
3591 		.supports_monitor = false,
3592 		.full_monitor_mode = false,
3593 #endif
3594 		.supports_shadow_regs = true,
3595 		.idle_ps = true,
3596 		.supports_sta_ps = true,
3597 		.cold_boot_calib = false,
3598 		.cbcal_restart_fw = false,
3599 		.fw_mem_mode = 0,
3600 		.num_vdevs = 16 + 1,
3601 		.num_peers = 512,
3602 		.supports_suspend = true,
3603 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
3604 		.supports_regdb = false,
3605 		.fix_l1ss = true,
3606 		.credit_flow = true,
3607 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
3608 		.hal_params = &ath11k_hw_hal_params_qca6390,
3609 #if notyet
3610 		.supports_dynamic_smps_6ghz = false,
3611 		.alloc_cacheable_memory = false,
3612 		.supports_rssi_stats = true,
3613 #endif
3614 		.fw_wmi_diag_event = true,
3615 		.current_cc_support = true,
3616 		.dbr_debug_support = false,
3617 		.global_reset = true,
3618 #ifdef notyet
3619 		.bios_sar_capa = NULL,
3620 #endif
3621 		.m3_fw_support = true,
3622 		.fixed_bdf_addr = false,
3623 		.fixed_mem_region = false,
3624 		.static_window_map = false,
3625 		.hybrid_bus_type = false,
3626 		.fixed_fw_mem = false,
3627 #if notyet
3628 		.support_off_channel_tx = true,
3629 		.supports_multi_bssid = true,
3630 
3631 		.sram_dump = {
3632 			.start = 0x01400000,
3633 			.end = 0x0171ffff,
3634 		},
3635 
3636 		.tcl_ring_retry = true,
3637 #endif
3638 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3639 #ifdef notyet
3640 		.smp2p_wow_exit = false,
3641 #endif
3642 	},
3643 	{
3644 		.name = "qcn9074 hw1.0",
3645 		.hw_rev = ATH11K_HW_QCN9074_HW10,
3646 		.fw = {
3647 			.dir = "qcn9074-hw1.0",
3648 			.board_size = 256 * 1024,
3649 			.cal_offset = 128 * 1024,
3650 		},
3651 		.max_radios = 1,
3652 #if notyet
3653 		.single_pdev_only = false,
3654 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074,
3655 #endif
3656 		.hw_ops = &qcn9074_ops,
3657 		.ring_mask = &ath11k_hw_ring_mask_qcn9074,
3658 		.internal_sleep_clock = false,
3659 		.regs = &qcn9074_regs,
3660 		.host_ce_config = qwx_host_ce_config_qcn9074,
3661 		.ce_count = QWX_CE_COUNT_QCN9074,
3662 		.target_ce_config = ath11k_target_ce_config_wlan_qcn9074,
3663 		.target_ce_count = 9,
3664 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
3665 		.svc_to_ce_map_len = 18,
3666 		.rxdma1_enable = true,
3667 		.num_rxmda_per_pdev = 1,
3668 		.rx_mac_buf_ring = false,
3669 		.vdev_start_delay = false,
3670 		.htt_peer_map_v2 = true,
3671 #if notyet
3672 		.spectral = {
3673 			.fft_sz = 2,
3674 			.fft_pad_sz = 0,
3675 			.summary_pad_sz = 16,
3676 			.fft_hdr_len = 24,
3677 			.max_fft_bins = 1024,
3678 			.fragment_160mhz = false,
3679 		},
3680 
3681 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3682 					BIT(NL80211_IFTYPE_AP) |
3683 					BIT(NL80211_IFTYPE_MESH_POINT),
3684 		.supports_monitor = true,
3685 		.full_monitor_mode = true,
3686 #endif
3687 		.supports_shadow_regs = false,
3688 		.idle_ps = false,
3689 		.supports_sta_ps = false,
3690 		.cold_boot_calib = false,
3691 		.cbcal_restart_fw = false,
3692 		.fw_mem_mode = 2,
3693 		.num_vdevs = 8,
3694 		.num_peers = 128,
3695 		.supports_suspend = false,
3696 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
3697 		.supports_regdb = false,
3698 		.fix_l1ss = true,
3699 		.credit_flow = false,
3700 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3701 		.hal_params = &ath11k_hw_hal_params_ipq8074,
3702 #if notyet
3703 		.supports_dynamic_smps_6ghz = true,
3704 		.alloc_cacheable_memory = true,
3705 		.supports_rssi_stats = false,
3706 #endif
3707 		.fw_wmi_diag_event = false,
3708 		.current_cc_support = false,
3709 		.dbr_debug_support = true,
3710 		.global_reset = false,
3711 #ifdef notyet
3712 		.bios_sar_capa = NULL,
3713 #endif
3714 		.m3_fw_support = true,
3715 		.fixed_bdf_addr = false,
3716 		.fixed_mem_region = false,
3717 		.static_window_map = true,
3718 		.hybrid_bus_type = false,
3719 		.fixed_fw_mem = false,
3720 #if notyet
3721 		.support_off_channel_tx = false,
3722 		.supports_multi_bssid = false,
3723 
3724 		.sram_dump = {},
3725 
3726 		.tcl_ring_retry = true,
3727 #endif
3728 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3729 #ifdef notyet
3730 		.smp2p_wow_exit = false,
3731 #endif
3732 	},
3733 	{
3734 		.name = "wcn6855 hw2.0",
3735 		.hw_rev = ATH11K_HW_WCN6855_HW20,
3736 		.fw = {
3737 			.dir = "wcn6855-hw2.0",
3738 			.board_size = 256 * 1024,
3739 			.cal_offset = 128 * 1024,
3740 		},
3741 		.max_radios = 3,
3742 		.bdf_addr = 0x4B0C0000,
3743 		.hw_ops = &wcn6855_ops,
3744 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
3745 		.internal_sleep_clock = true,
3746 		.regs = &wcn6855_regs,
3747 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
3748 		.host_ce_config = qwx_host_ce_config_qca6390,
3749 		.ce_count = QWX_CE_COUNT_QCA6390,
3750 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
3751 		.target_ce_count = 9,
3752 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
3753 		.svc_to_ce_map_len = 14,
3754 		.single_pdev_only = true,
3755 		.rxdma1_enable = false,
3756 		.num_rxmda_per_pdev = 2,
3757 		.rx_mac_buf_ring = true,
3758 		.vdev_start_delay = true,
3759 		.htt_peer_map_v2 = false,
3760 #if notyet
3761 		.spectral = {
3762 			.fft_sz = 0,
3763 			.fft_pad_sz = 0,
3764 			.summary_pad_sz = 0,
3765 			.fft_hdr_len = 0,
3766 			.max_fft_bins = 0,
3767 			.fragment_160mhz = false,
3768 		},
3769 
3770 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3771 					BIT(NL80211_IFTYPE_AP),
3772 		.supports_monitor = false,
3773 		.full_monitor_mode = false,
3774 #endif
3775 		.supports_shadow_regs = true,
3776 		.idle_ps = true,
3777 		.supports_sta_ps = true,
3778 		.cold_boot_calib = false,
3779 		.cbcal_restart_fw = false,
3780 		.fw_mem_mode = 0,
3781 		.num_vdevs = 16 + 1,
3782 		.num_peers = 512,
3783 		.supports_suspend = true,
3784 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
3785 		.supports_regdb = true,
3786 		.fix_l1ss = false,
3787 		.credit_flow = true,
3788 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
3789 		.hal_params = &ath11k_hw_hal_params_qca6390,
3790 #if notyet
3791 		.supports_dynamic_smps_6ghz = false,
3792 		.alloc_cacheable_memory = false,
3793 		.supports_rssi_stats = true,
3794 #endif
3795 		.fw_wmi_diag_event = true,
3796 		.current_cc_support = true,
3797 		.dbr_debug_support = false,
3798 		.global_reset = true,
3799 #ifdef notyet
3800 		.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
3801 #endif
3802 		.m3_fw_support = true,
3803 		.fixed_bdf_addr = false,
3804 		.fixed_mem_region = false,
3805 		.static_window_map = false,
3806 		.hybrid_bus_type = false,
3807 		.fixed_fw_mem = false,
3808 #if notyet
3809 		.support_off_channel_tx = true,
3810 		.supports_multi_bssid = true,
3811 
3812 		.sram_dump = {
3813 			.start = 0x01400000,
3814 			.end = 0x0177ffff,
3815 		},
3816 
3817 		.tcl_ring_retry = true,
3818 #endif
3819 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3820 #ifdef notyet
3821 		.smp2p_wow_exit = false,
3822 #endif
3823 	},
3824 	{
3825 		.name = "wcn6855 hw2.1",
3826 		.hw_rev = ATH11K_HW_WCN6855_HW21,
3827 		.fw = {
3828 			.dir = "wcn6855-hw2.1",
3829 			.board_size = 256 * 1024,
3830 			.cal_offset = 128 * 1024,
3831 		},
3832 		.max_radios = 3,
3833 		.bdf_addr = 0x4B0C0000,
3834 		.hw_ops = &wcn6855_ops,
3835 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
3836 		.internal_sleep_clock = true,
3837 		.regs = &wcn6855_regs,
3838 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
3839 		.host_ce_config = qwx_host_ce_config_qca6390,
3840 		.ce_count = QWX_CE_COUNT_QCA6390,
3841 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
3842 		.target_ce_count = 9,
3843 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
3844 		.svc_to_ce_map_len = 14,
3845 		.single_pdev_only = true,
3846 		.rxdma1_enable = false,
3847 		.num_rxmda_per_pdev = 2,
3848 		.rx_mac_buf_ring = true,
3849 		.vdev_start_delay = true,
3850 		.htt_peer_map_v2 = false,
3851 #if notyet
3852 		.spectral = {
3853 			.fft_sz = 0,
3854 			.fft_pad_sz = 0,
3855 			.summary_pad_sz = 0,
3856 			.fft_hdr_len = 0,
3857 			.max_fft_bins = 0,
3858 			.fragment_160mhz = false,
3859 		},
3860 
3861 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3862 					BIT(NL80211_IFTYPE_AP),
3863 		.supports_monitor = false,
3864 #endif
3865 		.supports_shadow_regs = true,
3866 		.idle_ps = true,
3867 		.supports_sta_ps = true,
3868 		.cold_boot_calib = false,
3869 		.cbcal_restart_fw = false,
3870 		.fw_mem_mode = 0,
3871 		.num_vdevs = 16 + 1,
3872 		.num_peers = 512,
3873 		.supports_suspend = true,
3874 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
3875 		.supports_regdb = true,
3876 		.fix_l1ss = false,
3877 		.credit_flow = true,
3878 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
3879 		.hal_params = &ath11k_hw_hal_params_qca6390,
3880 #if notyet
3881 		.supports_dynamic_smps_6ghz = false,
3882 		.alloc_cacheable_memory = false,
3883 		.supports_rssi_stats = true,
3884 #endif
3885 		.fw_wmi_diag_event = true,
3886 		.current_cc_support = true,
3887 		.dbr_debug_support = false,
3888 		.global_reset = true,
3889 #ifdef notyet
3890 		.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
3891 #endif
3892 		.m3_fw_support = true,
3893 		.fixed_bdf_addr = false,
3894 		.fixed_mem_region = false,
3895 		.static_window_map = false,
3896 		.hybrid_bus_type = false,
3897 		.fixed_fw_mem = false,
3898 #if notyet
3899 		.support_off_channel_tx = true,
3900 		.supports_multi_bssid = true,
3901 
3902 		.sram_dump = {
3903 			.start = 0x01400000,
3904 			.end = 0x0177ffff,
3905 		},
3906 
3907 		.tcl_ring_retry = true,
3908 #endif
3909 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
3910 #ifdef notyet
3911 		.smp2p_wow_exit = false,
3912 #endif
3913 	},
3914 	{
3915 		.name = "wcn6750 hw1.0",
3916 		.hw_rev = ATH11K_HW_WCN6750_HW10,
3917 		.fw = {
3918 			.dir = "wcn6750-hw1.0",
3919 			.board_size = 256 * 1024,
3920 			.cal_offset = 128 * 1024,
3921 		},
3922 		.max_radios = 1,
3923 		.bdf_addr = 0x4B0C0000,
3924 		.hw_ops = &wcn6750_ops,
3925 		.ring_mask = &ath11k_hw_ring_mask_wcn6750,
3926 		.internal_sleep_clock = false,
3927 		.regs = &wcn6750_regs,
3928 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
3929 		.host_ce_config = qwx_host_ce_config_qca6390,
3930 		.ce_count = QWX_CE_COUNT_QCA6390,
3931 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
3932 		.target_ce_count = 9,
3933 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
3934 		.svc_to_ce_map_len = 14,
3935 		.single_pdev_only = true,
3936 		.rxdma1_enable = false,
3937 		.num_rxmda_per_pdev = 1,
3938 		.rx_mac_buf_ring = true,
3939 		.vdev_start_delay = true,
3940 		.htt_peer_map_v2 = false,
3941 #if notyet
3942 		.spectral = {
3943 			.fft_sz = 0,
3944 			.fft_pad_sz = 0,
3945 			.summary_pad_sz = 0,
3946 			.fft_hdr_len = 0,
3947 			.max_fft_bins = 0,
3948 			.fragment_160mhz = false,
3949 		},
3950 
3951 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
3952 					BIT(NL80211_IFTYPE_AP),
3953 		.supports_monitor = false,
3954 #endif
3955 		.supports_shadow_regs = true,
3956 		.idle_ps = true,
3957 		.supports_sta_ps = true,
3958 		.cold_boot_calib = true,
3959 		.cbcal_restart_fw = false,
3960 		.fw_mem_mode = 0,
3961 		.num_vdevs = 16 + 1,
3962 		.num_peers = 512,
3963 		.supports_suspend = false,
3964 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
3965 		.supports_regdb = true,
3966 		.fix_l1ss = false,
3967 		.credit_flow = true,
3968 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
3969 		.hal_params = &ath11k_hw_hal_params_wcn6750,
3970 #if notyet
3971 		.supports_dynamic_smps_6ghz = false,
3972 		.alloc_cacheable_memory = false,
3973 		.supports_rssi_stats = true,
3974 #endif
3975 		.fw_wmi_diag_event = false,
3976 		.current_cc_support = true,
3977 		.dbr_debug_support = false,
3978 		.global_reset = false,
3979 #ifdef notyet
3980 		.bios_sar_capa = NULL,
3981 #endif
3982 		.m3_fw_support = false,
3983 		.fixed_bdf_addr = false,
3984 		.fixed_mem_region = false,
3985 		.static_window_map = true,
3986 		.hybrid_bus_type = true,
3987 		.fixed_fw_mem = true,
3988 #if notyet
3989 		.support_off_channel_tx = true,
3990 		.supports_multi_bssid = true,
3991 
3992 		.sram_dump = {},
3993 
3994 		.tcl_ring_retry = false,
3995 #endif
3996 		.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
3997 #ifdef notyet
3998 		.smp2p_wow_exit = true,
3999 #endif
4000 	},
4001 };
4002 
4003 const struct ath11k_hw_regs ipq8074_regs = {
4004 	/* SW2TCL(x) R0 ring configuration address */
4005 	.hal_tcl1_ring_base_lsb = 0x00000510,
4006 	.hal_tcl1_ring_base_msb = 0x00000514,
4007 	.hal_tcl1_ring_id = 0x00000518,
4008 	.hal_tcl1_ring_misc = 0x00000520,
4009 	.hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
4010 	.hal_tcl1_ring_tp_addr_msb = 0x00000530,
4011 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
4012 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
4013 	.hal_tcl1_ring_msi1_base_lsb = 0x00000558,
4014 	.hal_tcl1_ring_msi1_base_msb = 0x0000055c,
4015 	.hal_tcl1_ring_msi1_data = 0x00000560,
4016 	.hal_tcl2_ring_base_lsb = 0x00000568,
4017 	.hal_tcl_ring_base_lsb = 0x00000618,
4018 
4019 	/* TCL STATUS ring address */
4020 	.hal_tcl_status_ring_base_lsb = 0x00000720,
4021 
4022 	/* REO2SW(x) R0 ring configuration address */
4023 	.hal_reo1_ring_base_lsb = 0x0000029c,
4024 	.hal_reo1_ring_base_msb = 0x000002a0,
4025 	.hal_reo1_ring_id = 0x000002a4,
4026 	.hal_reo1_ring_misc = 0x000002ac,
4027 	.hal_reo1_ring_hp_addr_lsb = 0x000002b0,
4028 	.hal_reo1_ring_hp_addr_msb = 0x000002b4,
4029 	.hal_reo1_ring_producer_int_setup = 0x000002c0,
4030 	.hal_reo1_ring_msi1_base_lsb = 0x000002e4,
4031 	.hal_reo1_ring_msi1_base_msb = 0x000002e8,
4032 	.hal_reo1_ring_msi1_data = 0x000002ec,
4033 	.hal_reo2_ring_base_lsb = 0x000002f4,
4034 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
4035 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
4036 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
4037 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
4038 
4039 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4040 	.hal_reo1_ring_hp = 0x00003038,
4041 	.hal_reo1_ring_tp = 0x0000303c,
4042 	.hal_reo2_ring_hp = 0x00003040,
4043 
4044 	/* REO2TCL R0 ring configuration address */
4045 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
4046 	.hal_reo_tcl_ring_hp = 0x00003058,
4047 
4048 	/* REO CMD ring address */
4049 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
4050 	.hal_reo_cmd_ring_hp = 0x00003020,
4051 
4052 	/* REO status address */
4053 	.hal_reo_status_ring_base_lsb = 0x00000504,
4054 	.hal_reo_status_hp = 0x00003070,
4055 
4056 	/* SW2REO ring address */
4057 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
4058 	.hal_sw2reo_ring_hp = 0x00003028,
4059 
4060 	/* WCSS relative address */
4061 	.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
4062 	.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
4063 	.hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
4064 	.hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
4065 
4066 	/* WBM Idle address */
4067 	.hal_wbm_idle_link_ring_base_lsb = 0x00000860,
4068 	.hal_wbm_idle_link_ring_misc = 0x00000870,
4069 
4070 	/* SW2WBM release address */
4071 	.hal_wbm_release_ring_base_lsb = 0x000001d8,
4072 
4073 	/* WBM2SW release address */
4074 	.hal_wbm0_release_ring_base_lsb = 0x00000910,
4075 	.hal_wbm1_release_ring_base_lsb = 0x00000968,
4076 
4077 	/* PCIe base address */
4078 	.pcie_qserdes_sysclk_en_sel = 0x0,
4079 	.pcie_pcs_osc_dtct_config_base = 0x0,
4080 
4081 	/* Shadow register area */
4082 	.hal_shadow_base_addr = 0x0,
4083 
4084 	/* REO misc control register, not used in IPQ8074 */
4085 	.hal_reo1_misc_ctl = 0x0,
4086 };
4087 
4088 const struct ath11k_hw_regs qca6390_regs = {
4089 	/* SW2TCL(x) R0 ring configuration address */
4090 	.hal_tcl1_ring_base_lsb = 0x00000684,
4091 	.hal_tcl1_ring_base_msb = 0x00000688,
4092 	.hal_tcl1_ring_id = 0x0000068c,
4093 	.hal_tcl1_ring_misc = 0x00000694,
4094 	.hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
4095 	.hal_tcl1_ring_tp_addr_msb = 0x000006a4,
4096 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
4097 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
4098 	.hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
4099 	.hal_tcl1_ring_msi1_base_msb = 0x000006d0,
4100 	.hal_tcl1_ring_msi1_data = 0x000006d4,
4101 	.hal_tcl2_ring_base_lsb = 0x000006dc,
4102 	.hal_tcl_ring_base_lsb = 0x0000078c,
4103 
4104 	/* TCL STATUS ring address */
4105 	.hal_tcl_status_ring_base_lsb = 0x00000894,
4106 
4107 	/* REO2SW(x) R0 ring configuration address */
4108 	.hal_reo1_ring_base_lsb = 0x00000244,
4109 	.hal_reo1_ring_base_msb = 0x00000248,
4110 	.hal_reo1_ring_id = 0x0000024c,
4111 	.hal_reo1_ring_misc = 0x00000254,
4112 	.hal_reo1_ring_hp_addr_lsb = 0x00000258,
4113 	.hal_reo1_ring_hp_addr_msb = 0x0000025c,
4114 	.hal_reo1_ring_producer_int_setup = 0x00000268,
4115 	.hal_reo1_ring_msi1_base_lsb = 0x0000028c,
4116 	.hal_reo1_ring_msi1_base_msb = 0x00000290,
4117 	.hal_reo1_ring_msi1_data = 0x00000294,
4118 	.hal_reo2_ring_base_lsb = 0x0000029c,
4119 	.hal_reo1_aging_thresh_ix_0 = 0x0000050c,
4120 	.hal_reo1_aging_thresh_ix_1 = 0x00000510,
4121 	.hal_reo1_aging_thresh_ix_2 = 0x00000514,
4122 	.hal_reo1_aging_thresh_ix_3 = 0x00000518,
4123 
4124 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4125 	.hal_reo1_ring_hp = 0x00003030,
4126 	.hal_reo1_ring_tp = 0x00003034,
4127 	.hal_reo2_ring_hp = 0x00003038,
4128 
4129 	/* REO2TCL R0 ring configuration address */
4130 	.hal_reo_tcl_ring_base_lsb = 0x000003a4,
4131 	.hal_reo_tcl_ring_hp = 0x00003050,
4132 
4133 	/* REO CMD ring address */
4134 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
4135 	.hal_reo_cmd_ring_hp = 0x00003020,
4136 
4137 	/* REO status address */
4138 	.hal_reo_status_ring_base_lsb = 0x000004ac,
4139 	.hal_reo_status_hp = 0x00003068,
4140 
4141 	/* SW2REO ring address */
4142 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
4143 	.hal_sw2reo_ring_hp = 0x00003028,
4144 
4145 	/* WCSS relative address */
4146 	.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
4147 	.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
4148 	.hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
4149 	.hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
4150 
4151 	/* WBM Idle address */
4152 	.hal_wbm_idle_link_ring_base_lsb = 0x00000860,
4153 	.hal_wbm_idle_link_ring_misc = 0x00000870,
4154 
4155 	/* SW2WBM release address */
4156 	.hal_wbm_release_ring_base_lsb = 0x000001d8,
4157 
4158 	/* WBM2SW release address */
4159 	.hal_wbm0_release_ring_base_lsb = 0x00000910,
4160 	.hal_wbm1_release_ring_base_lsb = 0x00000968,
4161 
4162 	/* PCIe base address */
4163 	.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
4164 	.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
4165 
4166 	/* Shadow register area */
4167 	.hal_shadow_base_addr = 0x000008fc,
4168 
4169 	/* REO misc control register, not used in QCA6390 */
4170 	.hal_reo1_misc_ctl = 0x0,
4171 };
4172 
4173 const struct ath11k_hw_regs qcn9074_regs = {
4174 	/* SW2TCL(x) R0 ring configuration address */
4175 	.hal_tcl1_ring_base_lsb = 0x000004f0,
4176 	.hal_tcl1_ring_base_msb = 0x000004f4,
4177 	.hal_tcl1_ring_id = 0x000004f8,
4178 	.hal_tcl1_ring_misc = 0x00000500,
4179 	.hal_tcl1_ring_tp_addr_lsb = 0x0000050c,
4180 	.hal_tcl1_ring_tp_addr_msb = 0x00000510,
4181 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520,
4182 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524,
4183 	.hal_tcl1_ring_msi1_base_lsb = 0x00000538,
4184 	.hal_tcl1_ring_msi1_base_msb = 0x0000053c,
4185 	.hal_tcl1_ring_msi1_data = 0x00000540,
4186 	.hal_tcl2_ring_base_lsb = 0x00000548,
4187 	.hal_tcl_ring_base_lsb = 0x000005f8,
4188 
4189 	/* TCL STATUS ring address */
4190 	.hal_tcl_status_ring_base_lsb = 0x00000700,
4191 
4192 	/* REO2SW(x) R0 ring configuration address */
4193 	.hal_reo1_ring_base_lsb = 0x0000029c,
4194 	.hal_reo1_ring_base_msb = 0x000002a0,
4195 	.hal_reo1_ring_id = 0x000002a4,
4196 	.hal_reo1_ring_misc = 0x000002ac,
4197 	.hal_reo1_ring_hp_addr_lsb = 0x000002b0,
4198 	.hal_reo1_ring_hp_addr_msb = 0x000002b4,
4199 	.hal_reo1_ring_producer_int_setup = 0x000002c0,
4200 	.hal_reo1_ring_msi1_base_lsb = 0x000002e4,
4201 	.hal_reo1_ring_msi1_base_msb = 0x000002e8,
4202 	.hal_reo1_ring_msi1_data = 0x000002ec,
4203 	.hal_reo2_ring_base_lsb = 0x000002f4,
4204 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
4205 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
4206 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
4207 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
4208 
4209 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4210 	.hal_reo1_ring_hp = 0x00003038,
4211 	.hal_reo1_ring_tp = 0x0000303c,
4212 	.hal_reo2_ring_hp = 0x00003040,
4213 
4214 	/* REO2TCL R0 ring configuration address */
4215 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
4216 	.hal_reo_tcl_ring_hp = 0x00003058,
4217 
4218 	/* REO CMD ring address */
4219 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
4220 	.hal_reo_cmd_ring_hp = 0x00003020,
4221 
4222 	/* REO status address */
4223 	.hal_reo_status_ring_base_lsb = 0x00000504,
4224 	.hal_reo_status_hp = 0x00003070,
4225 
4226 	/* SW2REO ring address */
4227 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
4228 	.hal_sw2reo_ring_hp = 0x00003028,
4229 
4230 	/* WCSS relative address */
4231 	.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
4232 	.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
4233 	.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
4234 	.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
4235 
4236 	/* WBM Idle address */
4237 	.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
4238 	.hal_wbm_idle_link_ring_misc = 0x00000884,
4239 
4240 	/* SW2WBM release address */
4241 	.hal_wbm_release_ring_base_lsb = 0x000001ec,
4242 
4243 	/* WBM2SW release address */
4244 	.hal_wbm0_release_ring_base_lsb = 0x00000924,
4245 	.hal_wbm1_release_ring_base_lsb = 0x0000097c,
4246 
4247 	/* PCIe base address */
4248 	.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
4249 	.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
4250 
4251 	/* Shadow register area */
4252 	.hal_shadow_base_addr = 0x0,
4253 
4254 	/* REO misc control register, not used in QCN9074 */
4255 	.hal_reo1_misc_ctl = 0x0,
4256 };
4257 
4258 const struct ath11k_hw_regs wcn6855_regs = {
4259 	/* SW2TCL(x) R0 ring configuration address */
4260 	.hal_tcl1_ring_base_lsb = 0x00000690,
4261 	.hal_tcl1_ring_base_msb = 0x00000694,
4262 	.hal_tcl1_ring_id = 0x00000698,
4263 	.hal_tcl1_ring_misc = 0x000006a0,
4264 	.hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
4265 	.hal_tcl1_ring_tp_addr_msb = 0x000006b0,
4266 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
4267 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
4268 	.hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
4269 	.hal_tcl1_ring_msi1_base_msb = 0x000006dc,
4270 	.hal_tcl1_ring_msi1_data = 0x000006e0,
4271 	.hal_tcl2_ring_base_lsb = 0x000006e8,
4272 	.hal_tcl_ring_base_lsb = 0x00000798,
4273 
4274 	/* TCL STATUS ring address */
4275 	.hal_tcl_status_ring_base_lsb = 0x000008a0,
4276 
4277 	/* REO2SW(x) R0 ring configuration address */
4278 	.hal_reo1_ring_base_lsb = 0x00000244,
4279 	.hal_reo1_ring_base_msb = 0x00000248,
4280 	.hal_reo1_ring_id = 0x0000024c,
4281 	.hal_reo1_ring_misc = 0x00000254,
4282 	.hal_reo1_ring_hp_addr_lsb = 0x00000258,
4283 	.hal_reo1_ring_hp_addr_msb = 0x0000025c,
4284 	.hal_reo1_ring_producer_int_setup = 0x00000268,
4285 	.hal_reo1_ring_msi1_base_lsb = 0x0000028c,
4286 	.hal_reo1_ring_msi1_base_msb = 0x00000290,
4287 	.hal_reo1_ring_msi1_data = 0x00000294,
4288 	.hal_reo2_ring_base_lsb = 0x0000029c,
4289 	.hal_reo1_aging_thresh_ix_0 = 0x000005bc,
4290 	.hal_reo1_aging_thresh_ix_1 = 0x000005c0,
4291 	.hal_reo1_aging_thresh_ix_2 = 0x000005c4,
4292 	.hal_reo1_aging_thresh_ix_3 = 0x000005c8,
4293 
4294 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4295 	.hal_reo1_ring_hp = 0x00003030,
4296 	.hal_reo1_ring_tp = 0x00003034,
4297 	.hal_reo2_ring_hp = 0x00003038,
4298 
4299 	/* REO2TCL R0 ring configuration address */
4300 	.hal_reo_tcl_ring_base_lsb = 0x00000454,
4301 	.hal_reo_tcl_ring_hp = 0x00003060,
4302 
4303 	/* REO CMD ring address */
4304 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
4305 	.hal_reo_cmd_ring_hp = 0x00003020,
4306 
4307 	/* REO status address */
4308 	.hal_reo_status_ring_base_lsb = 0x0000055c,
4309 	.hal_reo_status_hp = 0x00003078,
4310 
4311 	/* SW2REO ring address */
4312 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
4313 	.hal_sw2reo_ring_hp = 0x00003028,
4314 
4315 	/* WCSS relative address */
4316 	.hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
4317 	.hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
4318 	.hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
4319 	.hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
4320 
4321 	/* WBM Idle address */
4322 	.hal_wbm_idle_link_ring_base_lsb = 0x00000870,
4323 	.hal_wbm_idle_link_ring_misc = 0x00000880,
4324 
4325 	/* SW2WBM release address */
4326 	.hal_wbm_release_ring_base_lsb = 0x000001e8,
4327 
4328 	/* WBM2SW release address */
4329 	.hal_wbm0_release_ring_base_lsb = 0x00000920,
4330 	.hal_wbm1_release_ring_base_lsb = 0x00000978,
4331 
4332 	/* PCIe base address */
4333 	.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
4334 	.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
4335 
4336 	/* Shadow register area */
4337 	.hal_shadow_base_addr = 0x000008fc,
4338 
4339 	/* REO misc control register, used for fragment
4340 	 * destination ring config in WCN6855.
4341 	 */
4342 	.hal_reo1_misc_ctl = 0x00000630,
4343 };
4344 
4345 const struct ath11k_hw_regs wcn6750_regs = {
4346 	/* SW2TCL(x) R0 ring configuration address */
4347 	.hal_tcl1_ring_base_lsb = 0x00000694,
4348 	.hal_tcl1_ring_base_msb = 0x00000698,
4349 	.hal_tcl1_ring_id = 0x0000069c,
4350 	.hal_tcl1_ring_misc = 0x000006a4,
4351 	.hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
4352 	.hal_tcl1_ring_tp_addr_msb = 0x000006b4,
4353 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
4354 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
4355 	.hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
4356 	.hal_tcl1_ring_msi1_base_msb = 0x000006e0,
4357 	.hal_tcl1_ring_msi1_data = 0x000006e4,
4358 	.hal_tcl2_ring_base_lsb = 0x000006ec,
4359 	.hal_tcl_ring_base_lsb = 0x0000079c,
4360 
4361 	/* TCL STATUS ring address */
4362 	.hal_tcl_status_ring_base_lsb = 0x000008a4,
4363 
4364 	/* REO2SW(x) R0 ring configuration address */
4365 	.hal_reo1_ring_base_lsb = 0x000001ec,
4366 	.hal_reo1_ring_base_msb = 0x000001f0,
4367 	.hal_reo1_ring_id = 0x000001f4,
4368 	.hal_reo1_ring_misc = 0x000001fc,
4369 	.hal_reo1_ring_hp_addr_lsb = 0x00000200,
4370 	.hal_reo1_ring_hp_addr_msb = 0x00000204,
4371 	.hal_reo1_ring_producer_int_setup = 0x00000210,
4372 	.hal_reo1_ring_msi1_base_lsb = 0x00000234,
4373 	.hal_reo1_ring_msi1_base_msb = 0x00000238,
4374 	.hal_reo1_ring_msi1_data = 0x0000023c,
4375 	.hal_reo2_ring_base_lsb = 0x00000244,
4376 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
4377 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
4378 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
4379 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
4380 
4381 	/* REO2SW(x) R2 ring pointers (head/tail) address */
4382 	.hal_reo1_ring_hp = 0x00003028,
4383 	.hal_reo1_ring_tp = 0x0000302c,
4384 	.hal_reo2_ring_hp = 0x00003030,
4385 
4386 	/* REO2TCL R0 ring configuration address */
4387 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
4388 	.hal_reo_tcl_ring_hp = 0x00003058,
4389 
4390 	/* REO CMD ring address */
4391 	.hal_reo_cmd_ring_base_lsb = 0x000000e4,
4392 	.hal_reo_cmd_ring_hp = 0x00003010,
4393 
4394 	/* REO status address */
4395 	.hal_reo_status_ring_base_lsb = 0x00000504,
4396 	.hal_reo_status_hp = 0x00003070,
4397 
4398 	/* SW2REO ring address */
4399 	.hal_sw2reo_ring_base_lsb = 0x0000013c,
4400 	.hal_sw2reo_ring_hp = 0x00003018,
4401 
4402 	/* WCSS relative address */
4403 	.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
4404 	.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
4405 	.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
4406 	.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
4407 
4408 	/* WBM Idle address */
4409 	.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
4410 	.hal_wbm_idle_link_ring_misc = 0x00000884,
4411 
4412 	/* SW2WBM release address */
4413 	.hal_wbm_release_ring_base_lsb = 0x000001ec,
4414 
4415 	/* WBM2SW release address */
4416 	.hal_wbm0_release_ring_base_lsb = 0x00000924,
4417 	.hal_wbm1_release_ring_base_lsb = 0x0000097c,
4418 
4419 	/* PCIe base address */
4420 	.pcie_qserdes_sysclk_en_sel = 0x0,
4421 	.pcie_pcs_osc_dtct_config_base = 0x0,
4422 
4423 	/* Shadow register area */
4424 	.hal_shadow_base_addr = 0x00000504,
4425 
4426 	/* REO misc control register, used for fragment
4427 	 * destination ring config in WCN6750.
4428 	 */
4429 	.hal_reo1_misc_ctl = 0x000005d8,
4430 };
4431 
4432 #define QWX_SLEEP_CLOCK_SELECT_INTERNAL_BIT	0x02
4433 #define QWX_HOST_CSTATE_BIT			0x04
4434 #define QWX_PLATFORM_CAP_PCIE_GLOBAL_RESET	0x08
4435 #define QWX_PLATFORM_CAP_PCIE_PME_D3COLD	0x10
4436 
4437 const struct qmi_elem_info qmi_response_type_v01_ei[] = {
4438 	{
4439 		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
4440 		.elem_len	= 1,
4441 		.elem_size	= sizeof(uint16_t),
4442 		.array_type	= NO_ARRAY,
4443 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4444 		.offset		= offsetof(struct qmi_response_type_v01, result),
4445 		.ei_array	= NULL,
4446 	},
4447 	{
4448 		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
4449 		.elem_len	= 1,
4450 		.elem_size	= sizeof(uint16_t),
4451 		.array_type	= NO_ARRAY,
4452 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4453 		.offset		= offsetof(struct qmi_response_type_v01, error),
4454 		.ei_array	= NULL,
4455 	},
4456 	{
4457 		.data_type	= QMI_EOTI,
4458 		.elem_len	= 0,
4459 		.elem_size	= 0,
4460 		.array_type	= NO_ARRAY,
4461 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4462 		.offset		= 0,
4463 		.ei_array	= NULL,
4464 	},
4465 };
4466 
4467 const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
4468 	{
4469 		.data_type	= QMI_OPT_FLAG,
4470 		.elem_len	= 1,
4471 		.elem_size	= sizeof(uint8_t),
4472 		.array_type	= NO_ARRAY,
4473 		.tlv_type	= 0x10,
4474 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4475 					   fw_ready_enable_valid),
4476 	},
4477 	{
4478 		.data_type	= QMI_UNSIGNED_1_BYTE,
4479 		.elem_len	= 1,
4480 		.elem_size	= sizeof(uint8_t),
4481 		.array_type	= NO_ARRAY,
4482 		.tlv_type	= 0x10,
4483 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4484 					   fw_ready_enable),
4485 	},
4486 	{
4487 		.data_type	= QMI_OPT_FLAG,
4488 		.elem_len	= 1,
4489 		.elem_size	= sizeof(uint8_t),
4490 		.array_type	= NO_ARRAY,
4491 		.tlv_type	= 0x11,
4492 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4493 					   initiate_cal_download_enable_valid),
4494 	},
4495 	{
4496 		.data_type	= QMI_UNSIGNED_1_BYTE,
4497 		.elem_len	= 1,
4498 		.elem_size	= sizeof(uint8_t),
4499 		.array_type	= NO_ARRAY,
4500 		.tlv_type	= 0x11,
4501 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4502 					   initiate_cal_download_enable),
4503 	},
4504 	{
4505 		.data_type	= QMI_OPT_FLAG,
4506 		.elem_len	= 1,
4507 		.elem_size	= sizeof(uint8_t),
4508 		.array_type	= NO_ARRAY,
4509 		.tlv_type	= 0x12,
4510 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4511 					   initiate_cal_update_enable_valid),
4512 	},
4513 	{
4514 		.data_type	= QMI_UNSIGNED_1_BYTE,
4515 		.elem_len	= 1,
4516 		.elem_size	= sizeof(uint8_t),
4517 		.array_type	= NO_ARRAY,
4518 		.tlv_type	= 0x12,
4519 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4520 					   initiate_cal_update_enable),
4521 	},
4522 	{
4523 		.data_type	= QMI_OPT_FLAG,
4524 		.elem_len	= 1,
4525 		.elem_size	= sizeof(uint8_t),
4526 		.array_type	= NO_ARRAY,
4527 		.tlv_type	= 0x13,
4528 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4529 					   msa_ready_enable_valid),
4530 	},
4531 	{
4532 		.data_type	= QMI_UNSIGNED_1_BYTE,
4533 		.elem_len	= 1,
4534 		.elem_size	= sizeof(uint8_t),
4535 		.array_type	= NO_ARRAY,
4536 		.tlv_type	= 0x13,
4537 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4538 					   msa_ready_enable),
4539 	},
4540 	{
4541 		.data_type	= QMI_OPT_FLAG,
4542 		.elem_len	= 1,
4543 		.elem_size	= sizeof(uint8_t),
4544 		.array_type	= NO_ARRAY,
4545 		.tlv_type	= 0x14,
4546 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4547 					   pin_connect_result_enable_valid),
4548 	},
4549 	{
4550 		.data_type	= QMI_UNSIGNED_1_BYTE,
4551 		.elem_len	= 1,
4552 		.elem_size	= sizeof(uint8_t),
4553 		.array_type	= NO_ARRAY,
4554 		.tlv_type	= 0x14,
4555 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4556 					   pin_connect_result_enable),
4557 	},
4558 	{
4559 		.data_type	= QMI_OPT_FLAG,
4560 		.elem_len	= 1,
4561 		.elem_size	= sizeof(uint8_t),
4562 		.array_type	= NO_ARRAY,
4563 		.tlv_type	= 0x15,
4564 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4565 					   client_id_valid),
4566 	},
4567 	{
4568 		.data_type	= QMI_UNSIGNED_4_BYTE,
4569 		.elem_len	= 1,
4570 		.elem_size	= sizeof(uint32_t),
4571 		.array_type	= NO_ARRAY,
4572 		.tlv_type	= 0x15,
4573 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4574 					   client_id),
4575 	},
4576 	{
4577 		.data_type	= QMI_OPT_FLAG,
4578 		.elem_len	= 1,
4579 		.elem_size	= sizeof(uint8_t),
4580 		.array_type	= NO_ARRAY,
4581 		.tlv_type	= 0x16,
4582 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4583 					   request_mem_enable_valid),
4584 	},
4585 	{
4586 		.data_type	= QMI_UNSIGNED_1_BYTE,
4587 		.elem_len	= 1,
4588 		.elem_size	= sizeof(uint8_t),
4589 		.array_type	= NO_ARRAY,
4590 		.tlv_type	= 0x16,
4591 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4592 					   request_mem_enable),
4593 	},
4594 	{
4595 		.data_type	= QMI_OPT_FLAG,
4596 		.elem_len	= 1,
4597 		.elem_size	= sizeof(uint8_t),
4598 		.array_type	= NO_ARRAY,
4599 		.tlv_type	= 0x17,
4600 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4601 					   fw_mem_ready_enable_valid),
4602 	},
4603 	{
4604 		.data_type	= QMI_UNSIGNED_1_BYTE,
4605 		.elem_len	= 1,
4606 		.elem_size	= sizeof(uint8_t),
4607 		.array_type	= NO_ARRAY,
4608 		.tlv_type	= 0x17,
4609 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4610 					   fw_mem_ready_enable),
4611 	},
4612 	{
4613 		.data_type	= QMI_OPT_FLAG,
4614 		.elem_len	= 1,
4615 		.elem_size	= sizeof(uint8_t),
4616 		.array_type	= NO_ARRAY,
4617 		.tlv_type	= 0x18,
4618 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4619 					   fw_init_done_enable_valid),
4620 	},
4621 	{
4622 		.data_type	= QMI_UNSIGNED_1_BYTE,
4623 		.elem_len	= 1,
4624 		.elem_size	= sizeof(uint8_t),
4625 		.array_type	= NO_ARRAY,
4626 		.tlv_type	= 0x18,
4627 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4628 					   fw_init_done_enable),
4629 	},
4630 
4631 	{
4632 		.data_type	= QMI_OPT_FLAG,
4633 		.elem_len	= 1,
4634 		.elem_size	= sizeof(uint8_t),
4635 		.array_type	= NO_ARRAY,
4636 		.tlv_type	= 0x19,
4637 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4638 					   rejuvenate_enable_valid),
4639 	},
4640 	{
4641 		.data_type	= QMI_UNSIGNED_1_BYTE,
4642 		.elem_len	= 1,
4643 		.elem_size	= sizeof(uint8_t),
4644 		.array_type	= NO_ARRAY,
4645 		.tlv_type	= 0x19,
4646 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4647 					   rejuvenate_enable),
4648 	},
4649 	{
4650 		.data_type	= QMI_OPT_FLAG,
4651 		.elem_len	= 1,
4652 		.elem_size	= sizeof(uint8_t),
4653 		.array_type	= NO_ARRAY,
4654 		.tlv_type	= 0x1A,
4655 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4656 					   xo_cal_enable_valid),
4657 	},
4658 	{
4659 		.data_type	= QMI_UNSIGNED_1_BYTE,
4660 		.elem_len	= 1,
4661 		.elem_size	= sizeof(uint8_t),
4662 		.array_type	= NO_ARRAY,
4663 		.tlv_type	= 0x1A,
4664 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4665 					   xo_cal_enable),
4666 	},
4667 	{
4668 		.data_type	= QMI_OPT_FLAG,
4669 		.elem_len	= 1,
4670 		.elem_size	= sizeof(uint8_t),
4671 		.array_type	= NO_ARRAY,
4672 		.tlv_type	= 0x1B,
4673 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4674 					   cal_done_enable_valid),
4675 	},
4676 	{
4677 		.data_type	= QMI_UNSIGNED_1_BYTE,
4678 		.elem_len	= 1,
4679 		.elem_size	= sizeof(uint8_t),
4680 		.array_type	= NO_ARRAY,
4681 		.tlv_type	= 0x1B,
4682 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
4683 					   cal_done_enable),
4684 	},
4685 	{
4686 		.data_type	= QMI_EOTI,
4687 		.array_type	= NO_ARRAY,
4688 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4689 	},
4690 };
4691 
4692 const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
4693 	{
4694 		.data_type	= QMI_STRUCT,
4695 		.elem_len	= 1,
4696 		.elem_size	= sizeof(struct qmi_response_type_v01),
4697 		.array_type	= NO_ARRAY,
4698 		.tlv_type	= 0x02,
4699 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
4700 					   resp),
4701 		.ei_array	= qmi_response_type_v01_ei,
4702 	},
4703 	{
4704 		.data_type	= QMI_OPT_FLAG,
4705 		.elem_len	= 1,
4706 		.elem_size	= sizeof(uint8_t),
4707 		.array_type	= NO_ARRAY,
4708 		.tlv_type	= 0x10,
4709 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
4710 					   fw_status_valid),
4711 	},
4712 	{
4713 		.data_type	= QMI_UNSIGNED_8_BYTE,
4714 		.elem_len	= 1,
4715 		.elem_size	= sizeof(uint64_t),
4716 		.array_type	= NO_ARRAY,
4717 		.tlv_type	= 0x10,
4718 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
4719 					   fw_status),
4720 	},
4721 	{
4722 		.data_type	= QMI_EOTI,
4723 		.array_type	= NO_ARRAY,
4724 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4725 	},
4726 };
4727 
4728 const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
4729 	{
4730 		.data_type	= QMI_OPT_FLAG,
4731 		.elem_len	= 1,
4732 		.elem_size	= sizeof(uint8_t),
4733 		.array_type	= NO_ARRAY,
4734 		.tlv_type	= 0x10,
4735 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4736 					   num_clients_valid),
4737 	},
4738 	{
4739 		.data_type	= QMI_UNSIGNED_4_BYTE,
4740 		.elem_len	= 1,
4741 		.elem_size	= sizeof(uint32_t),
4742 		.array_type	= NO_ARRAY,
4743 		.tlv_type	= 0x10,
4744 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4745 					   num_clients),
4746 	},
4747 	{
4748 		.data_type	= QMI_OPT_FLAG,
4749 		.elem_len	= 1,
4750 		.elem_size	= sizeof(uint8_t),
4751 		.array_type	= NO_ARRAY,
4752 		.tlv_type	= 0x11,
4753 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4754 					   wake_msi_valid),
4755 	},
4756 	{
4757 		.data_type	= QMI_UNSIGNED_4_BYTE,
4758 		.elem_len	= 1,
4759 		.elem_size	= sizeof(uint32_t),
4760 		.array_type	= NO_ARRAY,
4761 		.tlv_type	= 0x11,
4762 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4763 					   wake_msi),
4764 	},
4765 	{
4766 		.data_type	= QMI_OPT_FLAG,
4767 		.elem_len	= 1,
4768 		.elem_size	= sizeof(uint8_t),
4769 		.array_type	= NO_ARRAY,
4770 		.tlv_type	= 0x12,
4771 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4772 					   gpios_valid),
4773 	},
4774 	{
4775 		.data_type	= QMI_DATA_LEN,
4776 		.elem_len	= 1,
4777 		.elem_size	= sizeof(uint8_t),
4778 		.array_type	= NO_ARRAY,
4779 		.tlv_type	= 0x12,
4780 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4781 					   gpios_len),
4782 	},
4783 	{
4784 		.data_type	= QMI_UNSIGNED_4_BYTE,
4785 		.elem_len	= QMI_WLFW_MAX_NUM_GPIO_V01,
4786 		.elem_size	= sizeof(uint32_t),
4787 		.array_type	= VAR_LEN_ARRAY,
4788 		.tlv_type	= 0x12,
4789 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4790 					   gpios),
4791 	},
4792 	{
4793 		.data_type	= QMI_OPT_FLAG,
4794 		.elem_len	= 1,
4795 		.elem_size	= sizeof(uint8_t),
4796 		.array_type	= NO_ARRAY,
4797 		.tlv_type	= 0x13,
4798 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4799 					   nm_modem_valid),
4800 	},
4801 	{
4802 		.data_type	= QMI_UNSIGNED_1_BYTE,
4803 		.elem_len	= 1,
4804 		.elem_size	= sizeof(uint8_t),
4805 		.array_type	= NO_ARRAY,
4806 		.tlv_type	= 0x13,
4807 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4808 					   nm_modem),
4809 	},
4810 	{
4811 		.data_type	= QMI_OPT_FLAG,
4812 		.elem_len	= 1,
4813 		.elem_size	= sizeof(uint8_t),
4814 		.array_type	= NO_ARRAY,
4815 		.tlv_type	= 0x14,
4816 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4817 					   bdf_support_valid),
4818 	},
4819 	{
4820 		.data_type	= QMI_UNSIGNED_1_BYTE,
4821 		.elem_len	= 1,
4822 		.elem_size	= sizeof(uint8_t),
4823 		.array_type	= NO_ARRAY,
4824 		.tlv_type	= 0x14,
4825 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4826 					   bdf_support),
4827 	},
4828 	{
4829 		.data_type	= QMI_OPT_FLAG,
4830 		.elem_len	= 1,
4831 		.elem_size	= sizeof(uint8_t),
4832 		.array_type	= NO_ARRAY,
4833 		.tlv_type	= 0x15,
4834 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4835 					   bdf_cache_support_valid),
4836 	},
4837 	{
4838 		.data_type	= QMI_UNSIGNED_1_BYTE,
4839 		.elem_len	= 1,
4840 		.elem_size	= sizeof(uint8_t),
4841 		.array_type	= NO_ARRAY,
4842 		.tlv_type	= 0x15,
4843 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4844 					   bdf_cache_support),
4845 	},
4846 	{
4847 		.data_type	= QMI_OPT_FLAG,
4848 		.elem_len	= 1,
4849 		.elem_size	= sizeof(uint8_t),
4850 		.array_type	= NO_ARRAY,
4851 		.tlv_type	= 0x16,
4852 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4853 					   m3_support_valid),
4854 	},
4855 	{
4856 		.data_type	= QMI_UNSIGNED_1_BYTE,
4857 		.elem_len	= 1,
4858 		.elem_size	= sizeof(uint8_t),
4859 		.array_type	= NO_ARRAY,
4860 		.tlv_type	= 0x16,
4861 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4862 					   m3_support),
4863 	},
4864 	{
4865 		.data_type	= QMI_OPT_FLAG,
4866 		.elem_len	= 1,
4867 		.elem_size	= sizeof(uint8_t),
4868 		.array_type	= NO_ARRAY,
4869 		.tlv_type	= 0x17,
4870 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4871 					   m3_cache_support_valid),
4872 	},
4873 	{
4874 		.data_type	= QMI_UNSIGNED_1_BYTE,
4875 		.elem_len	= 1,
4876 		.elem_size	= sizeof(uint8_t),
4877 		.array_type	= NO_ARRAY,
4878 		.tlv_type	= 0x17,
4879 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4880 					   m3_cache_support),
4881 	},
4882 	{
4883 		.data_type	= QMI_OPT_FLAG,
4884 		.elem_len	= 1,
4885 		.elem_size	= sizeof(uint8_t),
4886 		.array_type	= NO_ARRAY,
4887 		.tlv_type	= 0x18,
4888 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4889 					   cal_filesys_support_valid),
4890 	},
4891 	{
4892 		.data_type	= QMI_UNSIGNED_1_BYTE,
4893 		.elem_len	= 1,
4894 		.elem_size	= sizeof(uint8_t),
4895 		.array_type	= NO_ARRAY,
4896 		.tlv_type	= 0x18,
4897 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4898 					   cal_filesys_support),
4899 	},
4900 	{
4901 		.data_type	= QMI_OPT_FLAG,
4902 		.elem_len	= 1,
4903 		.elem_size	= sizeof(uint8_t),
4904 		.array_type	= NO_ARRAY,
4905 		.tlv_type	= 0x19,
4906 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4907 					   cal_cache_support_valid),
4908 	},
4909 	{
4910 		.data_type	= QMI_UNSIGNED_1_BYTE,
4911 		.elem_len	= 1,
4912 		.elem_size	= sizeof(uint8_t),
4913 		.array_type	= NO_ARRAY,
4914 		.tlv_type	= 0x19,
4915 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4916 					   cal_cache_support),
4917 	},
4918 	{
4919 		.data_type	= QMI_OPT_FLAG,
4920 		.elem_len	= 1,
4921 		.elem_size	= sizeof(uint8_t),
4922 		.array_type	= NO_ARRAY,
4923 		.tlv_type	= 0x1A,
4924 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4925 					   cal_done_valid),
4926 	},
4927 	{
4928 		.data_type	= QMI_UNSIGNED_1_BYTE,
4929 		.elem_len	= 1,
4930 		.elem_size	= sizeof(uint8_t),
4931 		.array_type	= NO_ARRAY,
4932 		.tlv_type	= 0x1A,
4933 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4934 					   cal_done),
4935 	},
4936 	{
4937 		.data_type	= QMI_OPT_FLAG,
4938 		.elem_len	= 1,
4939 		.elem_size	= sizeof(uint8_t),
4940 		.array_type	= NO_ARRAY,
4941 		.tlv_type	= 0x1B,
4942 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4943 					   mem_bucket_valid),
4944 	},
4945 	{
4946 		.data_type	= QMI_UNSIGNED_4_BYTE,
4947 		.elem_len	= 1,
4948 		.elem_size	= sizeof(uint32_t),
4949 		.array_type	= NO_ARRAY,
4950 		.tlv_type	= 0x1B,
4951 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4952 					   mem_bucket),
4953 	},
4954 	{
4955 		.data_type	= QMI_OPT_FLAG,
4956 		.elem_len	= 1,
4957 		.elem_size	= sizeof(uint8_t),
4958 		.array_type	= NO_ARRAY,
4959 		.tlv_type	= 0x1C,
4960 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4961 					   mem_cfg_mode_valid),
4962 	},
4963 	{
4964 		.data_type	= QMI_UNSIGNED_1_BYTE,
4965 		.elem_len	= 1,
4966 		.elem_size	= sizeof(uint8_t),
4967 		.array_type	= NO_ARRAY,
4968 		.tlv_type	= 0x1C,
4969 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
4970 					   mem_cfg_mode),
4971 	},
4972 	{
4973 		.data_type	= QMI_EOTI,
4974 		.array_type	= NO_ARRAY,
4975 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4976 	},
4977 };
4978 
4979 const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
4980 	{
4981 		.data_type	= QMI_STRUCT,
4982 		.elem_len	= 1,
4983 		.elem_size	= sizeof(struct qmi_response_type_v01),
4984 		.array_type	= NO_ARRAY,
4985 		.tlv_type	= 0x02,
4986 		.offset		= offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
4987 		.ei_array	= qmi_response_type_v01_ei,
4988 	},
4989 	{
4990 		.data_type	= QMI_EOTI,
4991 		.array_type	= NO_ARRAY,
4992 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4993 	},
4994 };
4995 
4996 const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
4997 	{
4998 		.data_type	= QMI_UNSIGNED_8_BYTE,
4999 		.elem_len	= 1,
5000 		.elem_size	= sizeof(uint64_t),
5001 		.array_type	= NO_ARRAY,
5002 		.tlv_type	= 0,
5003 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
5004 	},
5005 	{
5006 		.data_type	= QMI_UNSIGNED_4_BYTE,
5007 		.elem_len	= 1,
5008 		.elem_size	= sizeof(uint32_t),
5009 		.array_type	= NO_ARRAY,
5010 		.tlv_type	= 0,
5011 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
5012 	},
5013 	{
5014 		.data_type	= QMI_UNSIGNED_1_BYTE,
5015 		.elem_len	= 1,
5016 		.elem_size	= sizeof(uint8_t),
5017 		.array_type	= NO_ARRAY,
5018 		.tlv_type	= 0,
5019 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
5020 	},
5021 	{
5022 		.data_type	= QMI_EOTI,
5023 		.array_type	= NO_ARRAY,
5024 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5025 	},
5026 };
5027 
5028 const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
5029 	{
5030 		.data_type	= QMI_UNSIGNED_4_BYTE,
5031 		.elem_len	= 1,
5032 		.elem_size	= sizeof(uint32_t),
5033 		.array_type	= NO_ARRAY,
5034 		.tlv_type	= 0,
5035 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01,
5036 				  size),
5037 	},
5038 	{
5039 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5040 		.elem_len	= 1,
5041 		.elem_size	= sizeof(enum qmi_wlanfw_mem_type_enum_v01),
5042 		.array_type	= NO_ARRAY,
5043 		.tlv_type	= 0,
5044 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
5045 	},
5046 	{
5047 		.data_type	= QMI_DATA_LEN,
5048 		.elem_len	= 1,
5049 		.elem_size	= sizeof(uint8_t),
5050 		.array_type	= NO_ARRAY,
5051 		.tlv_type	= 0,
5052 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
5053 	},
5054 	{
5055 		.data_type	= QMI_STRUCT,
5056 		.elem_len	= QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
5057 		.elem_size	= sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
5058 		.array_type	= VAR_LEN_ARRAY,
5059 		.tlv_type	= 0,
5060 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
5061 		.ei_array	= qmi_wlanfw_mem_cfg_s_v01_ei,
5062 	},
5063 	{
5064 		.data_type	= QMI_EOTI,
5065 		.array_type	= NO_ARRAY,
5066 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5067 	},
5068 };
5069 
5070 const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
5071 	{
5072 		.data_type	= QMI_DATA_LEN,
5073 		.elem_len	= 1,
5074 		.elem_size	= sizeof(uint8_t),
5075 		.array_type	= NO_ARRAY,
5076 		.tlv_type	= 0x01,
5077 		.offset		= offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
5078 					   mem_seg_len),
5079 	},
5080 	{
5081 		.data_type	= QMI_STRUCT,
5082 		.elem_len	= ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
5083 		.elem_size	= sizeof(struct qmi_wlanfw_mem_seg_s_v01),
5084 		.array_type	= VAR_LEN_ARRAY,
5085 		.tlv_type	= 0x01,
5086 		.offset		= offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
5087 					   mem_seg),
5088 		.ei_array	= qmi_wlanfw_mem_seg_s_v01_ei,
5089 	},
5090 	{
5091 		.data_type	= QMI_EOTI,
5092 		.array_type	= NO_ARRAY,
5093 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5094 	},
5095 };
5096 
5097 const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
5098 	{
5099 		.data_type	= QMI_UNSIGNED_8_BYTE,
5100 		.elem_len	= 1,
5101 		.elem_size	= sizeof(uint64_t),
5102 		.array_type	= NO_ARRAY,
5103 		.tlv_type	= 0,
5104 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
5105 	},
5106 	{
5107 		.data_type	= QMI_UNSIGNED_4_BYTE,
5108 		.elem_len	= 1,
5109 		.elem_size	= sizeof(uint32_t),
5110 		.array_type	= NO_ARRAY,
5111 		.tlv_type	= 0,
5112 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
5113 	},
5114 	{
5115 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5116 		.elem_len	= 1,
5117 		.elem_size	= sizeof(enum qmi_wlanfw_mem_type_enum_v01),
5118 		.array_type	= NO_ARRAY,
5119 		.tlv_type	= 0,
5120 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
5121 	},
5122 	{
5123 		.data_type	= QMI_UNSIGNED_1_BYTE,
5124 		.elem_len	= 1,
5125 		.elem_size	= sizeof(uint8_t),
5126 		.array_type	= NO_ARRAY,
5127 		.tlv_type	= 0,
5128 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
5129 	},
5130 	{
5131 		.data_type	= QMI_EOTI,
5132 		.array_type	= NO_ARRAY,
5133 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5134 	},
5135 };
5136 
5137 const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
5138 	{
5139 		.data_type	= QMI_DATA_LEN,
5140 		.elem_len	= 1,
5141 		.elem_size	= sizeof(uint8_t),
5142 		.array_type	= NO_ARRAY,
5143 		.tlv_type	= 0x01,
5144 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
5145 					   mem_seg_len),
5146 	},
5147 	{
5148 		.data_type	= QMI_STRUCT,
5149 		.elem_len	= ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
5150 		.elem_size	= sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
5151 		.array_type	= VAR_LEN_ARRAY,
5152 		.tlv_type	= 0x01,
5153 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
5154 					   mem_seg),
5155 		.ei_array	= qmi_wlanfw_mem_seg_resp_s_v01_ei,
5156 	},
5157 	{
5158 		.data_type	= QMI_EOTI,
5159 		.array_type	= NO_ARRAY,
5160 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5161 	},
5162 };
5163 
5164 const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
5165 	{
5166 		.data_type	= QMI_STRUCT,
5167 		.elem_len	= 1,
5168 		.elem_size	= sizeof(struct qmi_response_type_v01),
5169 		.array_type	= NO_ARRAY,
5170 		.tlv_type	= 0x02,
5171 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
5172 					   resp),
5173 		.ei_array	= qmi_response_type_v01_ei,
5174 	},
5175 	{
5176 		.data_type	= QMI_EOTI,
5177 		.array_type	= NO_ARRAY,
5178 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5179 	},
5180 };
5181 
5182 const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
5183 	{
5184 		.data_type	= QMI_EOTI,
5185 		.array_type	= NO_ARRAY,
5186 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5187 	},
5188 };
5189 
5190 const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
5191 	{
5192 		.data_type	= QMI_UNSIGNED_4_BYTE,
5193 		.elem_len	= 1,
5194 		.elem_size	= sizeof(uint32_t),
5195 		.array_type	= NO_ARRAY,
5196 		.tlv_type	= 0,
5197 		.offset		= offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
5198 					   chip_id),
5199 	},
5200 	{
5201 		.data_type	= QMI_UNSIGNED_4_BYTE,
5202 		.elem_len	= 1,
5203 		.elem_size	= sizeof(uint32_t),
5204 		.array_type	= NO_ARRAY,
5205 		.tlv_type	= 0,
5206 		.offset		= offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
5207 					   chip_family),
5208 	},
5209 	{
5210 		.data_type	= QMI_EOTI,
5211 		.array_type	= NO_ARRAY,
5212 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5213 	},
5214 };
5215 
5216 const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
5217 	{
5218 		.data_type	= QMI_UNSIGNED_4_BYTE,
5219 		.elem_len	= 1,
5220 		.elem_size	= sizeof(uint32_t),
5221 		.array_type	= NO_ARRAY,
5222 		.tlv_type	= 0,
5223 		.offset		= offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
5224 					   board_id),
5225 	},
5226 	{
5227 		.data_type	= QMI_EOTI,
5228 		.array_type	= NO_ARRAY,
5229 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5230 	},
5231 };
5232 
5233 const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
5234 	{
5235 		.data_type	= QMI_UNSIGNED_4_BYTE,
5236 		.elem_len	= 1,
5237 		.elem_size	= sizeof(uint32_t),
5238 		.array_type	= NO_ARRAY,
5239 		.tlv_type	= 0,
5240 		.offset		= offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
5241 	},
5242 	{
5243 		.data_type	= QMI_EOTI,
5244 		.array_type	= NO_ARRAY,
5245 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5246 	},
5247 };
5248 
5249 const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
5250 	{
5251 		.data_type	= QMI_UNSIGNED_4_BYTE,
5252 		.elem_len	= 1,
5253 		.elem_size	= sizeof(uint32_t),
5254 		.array_type	= NO_ARRAY,
5255 		.tlv_type	= 0,
5256 		.offset		= offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
5257 					   fw_version),
5258 	},
5259 	{
5260 		.data_type	= QMI_STRING,
5261 		.elem_len	= ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
5262 		.elem_size	= sizeof(char),
5263 		.array_type	= NO_ARRAY,
5264 		.tlv_type	= 0,
5265 		.offset		= offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
5266 					   fw_build_timestamp),
5267 	},
5268 	{
5269 		.data_type	= QMI_EOTI,
5270 		.array_type	= NO_ARRAY,
5271 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5272 	},
5273 };
5274 
5275 const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
5276 	{
5277 		.data_type	= QMI_STRUCT,
5278 		.elem_len	= 1,
5279 		.elem_size	= sizeof(struct qmi_response_type_v01),
5280 		.array_type	= NO_ARRAY,
5281 		.tlv_type	= 0x02,
5282 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
5283 		.ei_array	= qmi_response_type_v01_ei,
5284 	},
5285 	{
5286 		.data_type	= QMI_OPT_FLAG,
5287 		.elem_len	= 1,
5288 		.elem_size	= sizeof(uint8_t),
5289 		.array_type	= NO_ARRAY,
5290 		.tlv_type	= 0x10,
5291 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5292 					   chip_info_valid),
5293 	},
5294 	{
5295 		.data_type	= QMI_STRUCT,
5296 		.elem_len	= 1,
5297 		.elem_size	= sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
5298 		.array_type	= NO_ARRAY,
5299 		.tlv_type	= 0x10,
5300 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5301 					   chip_info),
5302 		.ei_array	= qmi_wlanfw_rf_chip_info_s_v01_ei,
5303 	},
5304 	{
5305 		.data_type	= QMI_OPT_FLAG,
5306 		.elem_len	= 1,
5307 		.elem_size	= sizeof(uint8_t),
5308 		.array_type	= NO_ARRAY,
5309 		.tlv_type	= 0x11,
5310 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5311 					   board_info_valid),
5312 	},
5313 	{
5314 		.data_type	= QMI_STRUCT,
5315 		.elem_len	= 1,
5316 		.elem_size	= sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
5317 		.array_type	= NO_ARRAY,
5318 		.tlv_type	= 0x11,
5319 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5320 					   board_info),
5321 		.ei_array	= qmi_wlanfw_rf_board_info_s_v01_ei,
5322 	},
5323 	{
5324 		.data_type	= QMI_OPT_FLAG,
5325 		.elem_len	= 1,
5326 		.elem_size	= sizeof(uint8_t),
5327 		.array_type	= NO_ARRAY,
5328 		.tlv_type	= 0x12,
5329 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5330 					   soc_info_valid),
5331 	},
5332 	{
5333 		.data_type	= QMI_STRUCT,
5334 		.elem_len	= 1,
5335 		.elem_size	= sizeof(struct qmi_wlanfw_soc_info_s_v01),
5336 		.array_type	= NO_ARRAY,
5337 		.tlv_type	= 0x12,
5338 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5339 					   soc_info),
5340 		.ei_array	= qmi_wlanfw_soc_info_s_v01_ei,
5341 	},
5342 	{
5343 		.data_type	= QMI_OPT_FLAG,
5344 		.elem_len	= 1,
5345 		.elem_size	= sizeof(uint8_t),
5346 		.array_type	= NO_ARRAY,
5347 		.tlv_type	= 0x13,
5348 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5349 					   fw_version_info_valid),
5350 	},
5351 	{
5352 		.data_type	= QMI_STRUCT,
5353 		.elem_len	= 1,
5354 		.elem_size	= sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
5355 		.array_type	= NO_ARRAY,
5356 		.tlv_type	= 0x13,
5357 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5358 					   fw_version_info),
5359 		.ei_array	= qmi_wlanfw_fw_version_info_s_v01_ei,
5360 	},
5361 	{
5362 		.data_type	= QMI_OPT_FLAG,
5363 		.elem_len	= 1,
5364 		.elem_size	= sizeof(uint8_t),
5365 		.array_type	= NO_ARRAY,
5366 		.tlv_type	= 0x14,
5367 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5368 					   fw_build_id_valid),
5369 	},
5370 	{
5371 		.data_type	= QMI_STRING,
5372 		.elem_len	= ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
5373 		.elem_size	= sizeof(char),
5374 		.array_type	= NO_ARRAY,
5375 		.tlv_type	= 0x14,
5376 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5377 					   fw_build_id),
5378 	},
5379 	{
5380 		.data_type	= QMI_OPT_FLAG,
5381 		.elem_len	= 1,
5382 		.elem_size	= sizeof(uint8_t),
5383 		.array_type	= NO_ARRAY,
5384 		.tlv_type	= 0x15,
5385 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5386 					   num_macs_valid),
5387 	},
5388 	{
5389 		.data_type	= QMI_UNSIGNED_1_BYTE,
5390 		.elem_len	= 1,
5391 		.elem_size	= sizeof(uint8_t),
5392 		.array_type	= NO_ARRAY,
5393 		.tlv_type	= 0x15,
5394 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5395 					   num_macs),
5396 	},
5397 	{
5398 		.data_type      = QMI_OPT_FLAG,
5399 		.elem_len       = 1,
5400 		.elem_size      = sizeof(uint8_t),
5401 		.array_type     = NO_ARRAY,
5402 		.tlv_type       = 0x16,
5403 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5404 					   voltage_mv_valid),
5405 	},
5406 	{
5407 		.data_type      = QMI_UNSIGNED_4_BYTE,
5408 		.elem_len       = 1,
5409 		.elem_size      = sizeof(uint32_t),
5410 		.array_type     = NO_ARRAY,
5411 		.tlv_type       = 0x16,
5412 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5413 					   voltage_mv),
5414 	},
5415 	{
5416 		.data_type      = QMI_OPT_FLAG,
5417 		.elem_len       = 1,
5418 		.elem_size      = sizeof(uint8_t),
5419 		.array_type     = NO_ARRAY,
5420 		.tlv_type       = 0x17,
5421 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5422 					   time_freq_hz_valid),
5423 	},
5424 	{
5425 		.data_type      = QMI_UNSIGNED_4_BYTE,
5426 		.elem_len       = 1,
5427 		.elem_size      = sizeof(uint32_t),
5428 		.array_type     = NO_ARRAY,
5429 		.tlv_type       = 0x17,
5430 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5431 					   time_freq_hz),
5432 	},
5433 	{
5434 		.data_type      = QMI_OPT_FLAG,
5435 		.elem_len       = 1,
5436 		.elem_size      = sizeof(uint8_t),
5437 		.array_type     = NO_ARRAY,
5438 		.tlv_type       = 0x18,
5439 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5440 					   otp_version_valid),
5441 	},
5442 	{
5443 		.data_type      = QMI_UNSIGNED_4_BYTE,
5444 		.elem_len       = 1,
5445 		.elem_size      = sizeof(uint32_t),
5446 		.array_type     = NO_ARRAY,
5447 		.tlv_type       = 0x18,
5448 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5449 					   otp_version),
5450 	},
5451 	{
5452 		.data_type      = QMI_OPT_FLAG,
5453 		.elem_len       = 1,
5454 		.elem_size      = sizeof(uint8_t),
5455 		.array_type     = NO_ARRAY,
5456 		.tlv_type       = 0x19,
5457 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5458 					   eeprom_read_timeout_valid),
5459 	},
5460 	{
5461 		.data_type      = QMI_UNSIGNED_4_BYTE,
5462 		.elem_len       = 1,
5463 		.elem_size      = sizeof(uint32_t),
5464 		.array_type     = NO_ARRAY,
5465 		.tlv_type       = 0x19,
5466 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
5467 					   eeprom_read_timeout),
5468 	},
5469 	{
5470 		.data_type	= QMI_EOTI,
5471 		.array_type	= NO_ARRAY,
5472 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5473 	},
5474 };
5475 
5476 const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
5477 	{
5478 		.data_type	= QMI_UNSIGNED_1_BYTE,
5479 		.elem_len	= 1,
5480 		.elem_size	= sizeof(uint8_t),
5481 		.array_type	= NO_ARRAY,
5482 		.tlv_type	= 0x01,
5483 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5484 					   valid),
5485 	},
5486 	{
5487 		.data_type	= QMI_OPT_FLAG,
5488 		.elem_len	= 1,
5489 		.elem_size	= sizeof(uint8_t),
5490 		.array_type	= NO_ARRAY,
5491 		.tlv_type	= 0x10,
5492 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5493 					   file_id_valid),
5494 	},
5495 	{
5496 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5497 		.elem_len	= 1,
5498 		.elem_size	= sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
5499 		.array_type	= NO_ARRAY,
5500 		.tlv_type	= 0x10,
5501 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5502 					   file_id),
5503 	},
5504 	{
5505 		.data_type	= QMI_OPT_FLAG,
5506 		.elem_len	= 1,
5507 		.elem_size	= sizeof(uint8_t),
5508 		.array_type	= NO_ARRAY,
5509 		.tlv_type	= 0x11,
5510 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5511 					   total_size_valid),
5512 	},
5513 	{
5514 		.data_type	= QMI_UNSIGNED_4_BYTE,
5515 		.elem_len	= 1,
5516 		.elem_size	= sizeof(uint32_t),
5517 		.array_type	= NO_ARRAY,
5518 		.tlv_type	= 0x11,
5519 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5520 					   total_size),
5521 	},
5522 	{
5523 		.data_type	= QMI_OPT_FLAG,
5524 		.elem_len	= 1,
5525 		.elem_size	= sizeof(uint8_t),
5526 		.array_type	= NO_ARRAY,
5527 		.tlv_type	= 0x12,
5528 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5529 					   seg_id_valid),
5530 	},
5531 	{
5532 		.data_type	= QMI_UNSIGNED_4_BYTE,
5533 		.elem_len	= 1,
5534 		.elem_size	= sizeof(uint32_t),
5535 		.array_type	= NO_ARRAY,
5536 		.tlv_type	= 0x12,
5537 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5538 					   seg_id),
5539 	},
5540 	{
5541 		.data_type	= QMI_OPT_FLAG,
5542 		.elem_len	= 1,
5543 		.elem_size	= sizeof(uint8_t),
5544 		.array_type	= NO_ARRAY,
5545 		.tlv_type	= 0x13,
5546 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5547 					   data_valid),
5548 	},
5549 	{
5550 		.data_type	= QMI_DATA_LEN,
5551 		.elem_len	= 1,
5552 		.elem_size	= sizeof(uint16_t),
5553 		.array_type	= NO_ARRAY,
5554 		.tlv_type	= 0x13,
5555 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5556 					   data_len),
5557 	},
5558 	{
5559 		.data_type	= QMI_UNSIGNED_1_BYTE,
5560 		.elem_len	= QMI_WLANFW_MAX_DATA_SIZE_V01,
5561 		.elem_size	= sizeof(uint8_t),
5562 		.array_type	= VAR_LEN_ARRAY,
5563 		.tlv_type	= 0x13,
5564 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5565 					   data),
5566 	},
5567 	{
5568 		.data_type	= QMI_OPT_FLAG,
5569 		.elem_len	= 1,
5570 		.elem_size	= sizeof(uint8_t),
5571 		.array_type	= NO_ARRAY,
5572 		.tlv_type	= 0x14,
5573 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5574 					   end_valid),
5575 	},
5576 	{
5577 		.data_type	= QMI_UNSIGNED_1_BYTE,
5578 		.elem_len	= 1,
5579 		.elem_size	= sizeof(uint8_t),
5580 		.array_type	= NO_ARRAY,
5581 		.tlv_type	= 0x14,
5582 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5583 					   end),
5584 	},
5585 	{
5586 		.data_type	= QMI_OPT_FLAG,
5587 		.elem_len	= 1,
5588 		.elem_size	= sizeof(uint8_t),
5589 		.array_type	= NO_ARRAY,
5590 		.tlv_type	= 0x15,
5591 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5592 					   bdf_type_valid),
5593 	},
5594 	{
5595 		.data_type	= QMI_UNSIGNED_1_BYTE,
5596 		.elem_len	= 1,
5597 		.elem_size	= sizeof(uint8_t),
5598 		.array_type	= NO_ARRAY,
5599 		.tlv_type	= 0x15,
5600 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
5601 					   bdf_type),
5602 	},
5603 
5604 	{
5605 		.data_type	= QMI_EOTI,
5606 		.array_type	= NO_ARRAY,
5607 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5608 	},
5609 };
5610 
5611 const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
5612 	{
5613 		.data_type	= QMI_STRUCT,
5614 		.elem_len	= 1,
5615 		.elem_size	= sizeof(struct qmi_response_type_v01),
5616 		.array_type	= NO_ARRAY,
5617 		.tlv_type	= 0x02,
5618 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
5619 					   resp),
5620 		.ei_array	= qmi_response_type_v01_ei,
5621 	},
5622 	{
5623 		.data_type	= QMI_EOTI,
5624 		.array_type	= NO_ARRAY,
5625 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5626 	},
5627 };
5628 
5629 const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
5630 	{
5631 		.data_type	= QMI_UNSIGNED_8_BYTE,
5632 		.elem_len	= 1,
5633 		.elem_size	= sizeof(uint64_t),
5634 		.array_type	= NO_ARRAY,
5635 		.tlv_type	= 0x01,
5636 		.offset		= offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
5637 	},
5638 	{
5639 		.data_type	= QMI_UNSIGNED_4_BYTE,
5640 		.elem_len	= 1,
5641 		.elem_size	= sizeof(uint32_t),
5642 		.array_type	= NO_ARRAY,
5643 		.tlv_type	= 0x02,
5644 		.offset		= offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
5645 	},
5646 	{
5647 		.data_type	= QMI_EOTI,
5648 		.array_type	= NO_ARRAY,
5649 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5650 	},
5651 };
5652 
5653 const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
5654 	{
5655 		.data_type	= QMI_STRUCT,
5656 		.elem_len	= 1,
5657 		.elem_size	= sizeof(struct qmi_response_type_v01),
5658 		.array_type	= NO_ARRAY,
5659 		.tlv_type	= 0x02,
5660 		.offset		= offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
5661 		.ei_array	= qmi_response_type_v01_ei,
5662 	},
5663 	{
5664 		.data_type	= QMI_EOTI,
5665 		.array_type	= NO_ARRAY,
5666 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5667 	},
5668 };
5669 
5670 const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
5671 	{
5672 		.data_type	= QMI_OPT_FLAG,
5673 		.elem_len	= 1,
5674 		.elem_size	= sizeof(uint8_t),
5675 		.array_type	= NO_ARRAY,
5676 		.tlv_type	= 0x10,
5677 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
5678 					   enablefwlog_valid),
5679 	},
5680 	{
5681 		.data_type	= QMI_UNSIGNED_1_BYTE,
5682 		.elem_len	= 1,
5683 		.elem_size	= sizeof(uint8_t),
5684 		.array_type	= NO_ARRAY,
5685 		.tlv_type	= 0x10,
5686 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
5687 					   enablefwlog),
5688 	},
5689 	{
5690 		.data_type	= QMI_EOTI,
5691 		.array_type	= NO_ARRAY,
5692 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5693 	},
5694 };
5695 
5696 const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
5697 	{
5698 		.data_type	= QMI_STRUCT,
5699 		.elem_len	= 1,
5700 		.elem_size	= sizeof(struct qmi_response_type_v01),
5701 		.array_type	= NO_ARRAY,
5702 		.tlv_type	= 0x02,
5703 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
5704 					   resp),
5705 		.ei_array	= qmi_response_type_v01_ei,
5706 	},
5707 	{
5708 		.data_type	= QMI_EOTI,
5709 		.array_type	= NO_ARRAY,
5710 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5711 	},
5712 };
5713 
5714 const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
5715 	{
5716 		.data_type	= QMI_UNSIGNED_4_BYTE,
5717 		.elem_len	= 1,
5718 		.elem_size	= sizeof(uint32_t),
5719 		.array_type	= NO_ARRAY,
5720 		.tlv_type	= 0,
5721 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5722 					   pipe_num),
5723 	},
5724 	{
5725 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5726 		.elem_len	= 1,
5727 		.elem_size	= sizeof(enum qmi_wlanfw_pipedir_enum_v01),
5728 		.array_type	= NO_ARRAY,
5729 		.tlv_type	= 0,
5730 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5731 					   pipe_dir),
5732 	},
5733 	{
5734 		.data_type	= QMI_UNSIGNED_4_BYTE,
5735 		.elem_len	= 1,
5736 		.elem_size	= sizeof(uint32_t),
5737 		.array_type	= NO_ARRAY,
5738 		.tlv_type	= 0,
5739 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5740 					   nentries),
5741 	},
5742 	{
5743 		.data_type	= QMI_UNSIGNED_4_BYTE,
5744 		.elem_len	= 1,
5745 		.elem_size	= sizeof(uint32_t),
5746 		.array_type	= NO_ARRAY,
5747 		.tlv_type	= 0,
5748 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5749 					   nbytes_max),
5750 	},
5751 	{
5752 		.data_type	= QMI_UNSIGNED_4_BYTE,
5753 		.elem_len	= 1,
5754 		.elem_size	= sizeof(uint32_t),
5755 		.array_type	= NO_ARRAY,
5756 		.tlv_type	= 0,
5757 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
5758 					   flags),
5759 	},
5760 	{
5761 		.data_type	= QMI_EOTI,
5762 		.array_type	= NO_ARRAY,
5763 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5764 	},
5765 };
5766 
5767 const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
5768 	{
5769 		.data_type	= QMI_UNSIGNED_4_BYTE,
5770 		.elem_len	= 1,
5771 		.elem_size	= sizeof(uint32_t),
5772 		.array_type	= NO_ARRAY,
5773 		.tlv_type	= 0,
5774 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
5775 					   service_id),
5776 	},
5777 	{
5778 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
5779 		.elem_len	= 1,
5780 		.elem_size	= sizeof(enum qmi_wlanfw_pipedir_enum_v01),
5781 		.array_type	= NO_ARRAY,
5782 		.tlv_type	= 0,
5783 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
5784 					   pipe_dir),
5785 	},
5786 	{
5787 		.data_type	= QMI_UNSIGNED_4_BYTE,
5788 		.elem_len	= 1,
5789 		.elem_size	= sizeof(uint32_t),
5790 		.array_type	= NO_ARRAY,
5791 		.tlv_type	= 0,
5792 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
5793 					   pipe_num),
5794 	},
5795 	{
5796 		.data_type	= QMI_EOTI,
5797 		.array_type	= NO_ARRAY,
5798 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5799 	},
5800 };
5801 
5802 const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
5803 	{
5804 		.data_type	= QMI_UNSIGNED_2_BYTE,
5805 		.elem_len	= 1,
5806 		.elem_size	= sizeof(uint16_t),
5807 		.array_type	= NO_ARRAY,
5808 		.tlv_type	= 0,
5809 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
5810 	},
5811 	{
5812 		.data_type	= QMI_UNSIGNED_2_BYTE,
5813 		.elem_len	= 1,
5814 		.elem_size	= sizeof(uint16_t),
5815 		.array_type	= NO_ARRAY,
5816 		.tlv_type	= 0,
5817 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
5818 					   offset),
5819 	},
5820 	{
5821 		.data_type	= QMI_EOTI,
5822 		.array_type	= QMI_COMMON_TLV_TYPE,
5823 	},
5824 };
5825 
5826 const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
5827 	{
5828 		.data_type	= QMI_UNSIGNED_4_BYTE,
5829 		.elem_len	= 1,
5830 		.elem_size	= sizeof(uint32_t),
5831 		.array_type	= NO_ARRAY,
5832 		.tlv_type	= 0,
5833 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
5834 					   addr),
5835 	},
5836 	{
5837 		.data_type	= QMI_EOTI,
5838 		.array_type	= NO_ARRAY,
5839 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5840 	},
5841 };
5842 
5843 const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
5844 	{
5845 		.data_type	= QMI_UNSIGNED_4_BYTE,
5846 		.elem_len	= 1,
5847 		.elem_size	= sizeof(uint32_t),
5848 		.array_type	= NO_ARRAY,
5849 		.tlv_type	= 0x01,
5850 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
5851 					   mode),
5852 	},
5853 	{
5854 		.data_type	= QMI_OPT_FLAG,
5855 		.elem_len	= 1,
5856 		.elem_size	= sizeof(uint8_t),
5857 		.array_type	= NO_ARRAY,
5858 		.tlv_type	= 0x10,
5859 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
5860 					   hw_debug_valid),
5861 	},
5862 	{
5863 		.data_type	= QMI_UNSIGNED_1_BYTE,
5864 		.elem_len	= 1,
5865 		.elem_size	= sizeof(uint8_t),
5866 		.array_type	= NO_ARRAY,
5867 		.tlv_type	= 0x10,
5868 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
5869 					   hw_debug),
5870 	},
5871 	{
5872 		.data_type	= QMI_EOTI,
5873 		.array_type	= NO_ARRAY,
5874 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5875 	},
5876 };
5877 
5878 const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
5879 	{
5880 		.data_type	= QMI_STRUCT,
5881 		.elem_len	= 1,
5882 		.elem_size	= sizeof(struct qmi_response_type_v01),
5883 		.array_type	= NO_ARRAY,
5884 		.tlv_type	= 0x02,
5885 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
5886 					   resp),
5887 		.ei_array	= qmi_response_type_v01_ei,
5888 	},
5889 	{
5890 		.data_type	= QMI_EOTI,
5891 		.array_type	= NO_ARRAY,
5892 		.tlv_type	= QMI_COMMON_TLV_TYPE,
5893 	},
5894 };
5895 
5896 const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
5897 	{
5898 		.data_type	= QMI_OPT_FLAG,
5899 		.elem_len	= 1,
5900 		.elem_size	= sizeof(uint8_t),
5901 		.array_type	= NO_ARRAY,
5902 		.tlv_type	= 0x10,
5903 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5904 					   host_version_valid),
5905 	},
5906 	{
5907 		.data_type	= QMI_STRING,
5908 		.elem_len	= QMI_WLANFW_MAX_STR_LEN_V01 + 1,
5909 		.elem_size	= sizeof(char),
5910 		.array_type	= NO_ARRAY,
5911 		.tlv_type	= 0x10,
5912 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5913 					   host_version),
5914 	},
5915 	{
5916 		.data_type	= QMI_OPT_FLAG,
5917 		.elem_len	= 1,
5918 		.elem_size	= sizeof(uint8_t),
5919 		.array_type	= NO_ARRAY,
5920 		.tlv_type	= 0x11,
5921 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5922 					   tgt_cfg_valid),
5923 	},
5924 	{
5925 		.data_type	= QMI_DATA_LEN,
5926 		.elem_len	= 1,
5927 		.elem_size	= sizeof(uint8_t),
5928 		.array_type	= NO_ARRAY,
5929 		.tlv_type	= 0x11,
5930 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5931 					   tgt_cfg_len),
5932 	},
5933 	{
5934 		.data_type	= QMI_STRUCT,
5935 		.elem_len	= QMI_WLANFW_MAX_NUM_CE_V01,
5936 		.elem_size	= sizeof(
5937 				struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
5938 		.array_type	= VAR_LEN_ARRAY,
5939 		.tlv_type	= 0x11,
5940 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5941 					   tgt_cfg),
5942 		.ei_array	= qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
5943 	},
5944 	{
5945 		.data_type	= QMI_OPT_FLAG,
5946 		.elem_len	= 1,
5947 		.elem_size	= sizeof(uint8_t),
5948 		.array_type	= NO_ARRAY,
5949 		.tlv_type	= 0x12,
5950 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5951 					   svc_cfg_valid),
5952 	},
5953 	{
5954 		.data_type	= QMI_DATA_LEN,
5955 		.elem_len	= 1,
5956 		.elem_size	= sizeof(uint8_t),
5957 		.array_type	= NO_ARRAY,
5958 		.tlv_type	= 0x12,
5959 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5960 					   svc_cfg_len),
5961 	},
5962 	{
5963 		.data_type	= QMI_STRUCT,
5964 		.elem_len	= QMI_WLANFW_MAX_NUM_SVC_V01,
5965 		.elem_size	= sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
5966 		.array_type	= VAR_LEN_ARRAY,
5967 		.tlv_type	= 0x12,
5968 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5969 					   svc_cfg),
5970 		.ei_array	= qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
5971 	},
5972 	{
5973 		.data_type	= QMI_OPT_FLAG,
5974 		.elem_len	= 1,
5975 		.elem_size	= sizeof(uint8_t),
5976 		.array_type	= NO_ARRAY,
5977 		.tlv_type	= 0x13,
5978 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5979 					   shadow_reg_valid),
5980 	},
5981 	{
5982 		.data_type	= QMI_DATA_LEN,
5983 		.elem_len	= 1,
5984 		.elem_size	= sizeof(uint8_t),
5985 		.array_type	= NO_ARRAY,
5986 		.tlv_type	= 0x13,
5987 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5988 					   shadow_reg_len),
5989 	},
5990 	{
5991 		.data_type	= QMI_STRUCT,
5992 		.elem_len	= QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
5993 		.elem_size	= sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
5994 		.array_type	= VAR_LEN_ARRAY,
5995 		.tlv_type	= 0x13,
5996 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
5997 					   shadow_reg),
5998 		.ei_array	= qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
5999 	},
6000 	{
6001 		.data_type	= QMI_OPT_FLAG,
6002 		.elem_len	= 1,
6003 		.elem_size	= sizeof(uint8_t),
6004 		.array_type	= NO_ARRAY,
6005 		.tlv_type	= 0x14,
6006 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
6007 					   shadow_reg_v2_valid),
6008 	},
6009 	{
6010 		.data_type	= QMI_DATA_LEN,
6011 		.elem_len	= 1,
6012 		.elem_size	= sizeof(uint8_t),
6013 		.array_type	= NO_ARRAY,
6014 		.tlv_type	= 0x14,
6015 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
6016 					   shadow_reg_v2_len),
6017 	},
6018 	{
6019 		.data_type	= QMI_STRUCT,
6020 		.elem_len	= QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
6021 		.elem_size	= sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
6022 		.array_type	= VAR_LEN_ARRAY,
6023 		.tlv_type	= 0x14,
6024 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
6025 					   shadow_reg_v2),
6026 		.ei_array	= qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
6027 	},
6028 	{
6029 		.data_type	= QMI_EOTI,
6030 		.array_type	= NO_ARRAY,
6031 		.tlv_type	= QMI_COMMON_TLV_TYPE,
6032 	},
6033 };
6034 
6035 const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
6036 	{
6037 		.data_type	= QMI_STRUCT,
6038 		.elem_len	= 1,
6039 		.elem_size	= sizeof(struct qmi_response_type_v01),
6040 		.array_type	= NO_ARRAY,
6041 		.tlv_type	= 0x02,
6042 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
6043 		.ei_array	= qmi_response_type_v01_ei,
6044 	},
6045 	{
6046 		.data_type	= QMI_EOTI,
6047 		.array_type	= NO_ARRAY,
6048 		.tlv_type	= QMI_COMMON_TLV_TYPE,
6049 	},
6050 };
6051 
6052 int
qwx_ce_intr(void * arg)6053 qwx_ce_intr(void *arg)
6054 {
6055 	struct qwx_ce_pipe *pipe = arg;
6056 	struct qwx_softc *sc = pipe->sc;
6057 
6058 	if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags) ||
6059 	    ((sc->msi_ce_irqmask & (1 << pipe->pipe_num)) == 0)) {
6060 		DPRINTF("%s: unexpected interrupt on pipe %d\n",
6061 		    __func__, pipe->pipe_num);
6062 		return 1;
6063 	}
6064 
6065 	return qwx_ce_per_engine_service(sc, pipe->pipe_num);
6066 }
6067 
6068 int
qwx_ext_intr(void * arg)6069 qwx_ext_intr(void *arg)
6070 {
6071 	struct qwx_ext_irq_grp *irq_grp = arg;
6072 	struct qwx_softc *sc = irq_grp->sc;
6073 
6074 	if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) {
6075 		DPRINTF("%s: unexpected interrupt for ext group %d\n",
6076 		    __func__, irq_grp->grp_id);
6077 		return 1;
6078 	}
6079 
6080 	return qwx_dp_service_srng(sc, irq_grp->grp_id);
6081 }
6082 
6083 const char *qmi_data_type_name[QMI_NUM_DATA_TYPES] = {
6084 	"EOTI",
6085 	"OPT_FLAG",
6086 	"DATA_LEN",
6087 	"UNSIGNED_1_BYTE",
6088 	"UNSIGNED_2_BYTE",
6089 	"UNSIGNED_4_BYTE",
6090 	"UNSIGNED_8_BYTE",
6091 	"SIGNED_2_BYTE_ENUM",
6092 	"SIGNED_4_BYTE_ENUM",
6093 	"STRUCT",
6094 	"STRING"
6095 };
6096 
6097 const struct qmi_elem_info *
qwx_qmi_decode_get_elem(const struct qmi_elem_info * ei,uint8_t elem_type)6098 qwx_qmi_decode_get_elem(const struct qmi_elem_info *ei, uint8_t elem_type)
6099 {
6100 	while (ei->data_type != QMI_EOTI && ei->tlv_type != elem_type)
6101 		ei++;
6102 
6103 	DNPRINTF(QWX_D_QMI, "%s: found elem 0x%x data type 0x%x\n", __func__,
6104 	    ei->tlv_type, ei->data_type);
6105 	return ei;
6106 }
6107 
6108 size_t
qwx_qmi_decode_min_elem_size(const struct qmi_elem_info * ei,int nested)6109 qwx_qmi_decode_min_elem_size(const struct qmi_elem_info *ei, int nested)
6110 {
6111 	size_t min_size = 0;
6112 
6113 	switch (ei->data_type) {
6114 	case QMI_EOTI:
6115 	case QMI_OPT_FLAG:
6116 		break;
6117 	case QMI_DATA_LEN:
6118 		if (ei->elem_len == 1)
6119 			min_size += sizeof(uint8_t);
6120 		else
6121 			min_size += sizeof(uint16_t);
6122 		break;
6123 	case QMI_UNSIGNED_1_BYTE:
6124 	case QMI_UNSIGNED_2_BYTE:
6125 	case QMI_UNSIGNED_4_BYTE:
6126 	case QMI_UNSIGNED_8_BYTE:
6127 	case QMI_SIGNED_2_BYTE_ENUM:
6128 	case QMI_SIGNED_4_BYTE_ENUM:
6129 		min_size += ei->elem_len * ei->elem_size;
6130 		break;
6131 	case QMI_STRUCT:
6132 		if (nested > 2) {
6133 			printf("%s: QMI struct element 0x%x with "
6134 			    "data type %s (0x%x) is nested too "
6135 			    "deeply\n", __func__,
6136 			    ei->tlv_type,
6137 			    qmi_data_type_name[ei->data_type],
6138 			    ei->data_type);
6139 		}
6140 		ei = ei->ei_array;
6141 		while (ei->data_type != QMI_EOTI) {
6142 			min_size += qwx_qmi_decode_min_elem_size(ei,
6143 			    nested + 1);
6144 			ei++;
6145 		}
6146 		break;
6147 	case QMI_STRING:
6148 		min_size += 1;
6149 		/* Strings nested in structs use an in-band length field. */
6150 		if (nested) {
6151 			if (ei->elem_len <= 0xff)
6152 				min_size += sizeof(uint8_t);
6153 			else
6154 				min_size += sizeof(uint16_t);
6155 		}
6156 		break;
6157 	default:
6158 		printf("%s: unhandled data type 0x%x\n", __func__,
6159 		    ei->data_type);
6160 		break;
6161 	}
6162 
6163 	return min_size;
6164 }
6165 
6166 int
qwx_qmi_decode_tlv_hdr(struct qwx_softc * sc,const struct qmi_elem_info ** next_ei,uint16_t * actual_size,size_t output_len,const struct qmi_elem_info * ei0,uint8_t * input,size_t input_len)6167 qwx_qmi_decode_tlv_hdr(struct qwx_softc *sc,
6168     const struct qmi_elem_info **next_ei, uint16_t *actual_size,
6169     size_t output_len, const struct qmi_elem_info *ei0,
6170     uint8_t *input, size_t input_len)
6171 {
6172 	uint8_t *p = input;
6173 	size_t remain = input_len;
6174 	uint8_t elem_type;
6175 	uint16_t elem_size = 0;
6176 	const struct qmi_elem_info *ei;
6177 
6178 	*next_ei = NULL;
6179 	*actual_size = 0;
6180 
6181 	if (remain < 3) {
6182 		printf("%s: QMI message TLV header too short\n",
6183 		   sc->sc_dev.dv_xname);
6184 		return -1;
6185 	}
6186 	elem_type = *p;
6187 	p++;
6188 	remain--;
6189 
6190 	/*
6191 	 * By relying on TLV type information we can skip over EIs which
6192 	 * describe optional elements that have not been encoded.
6193 	 * Such elements will be left at their default value (zero) in
6194 	 * the decoded output struct.
6195 	 * XXX We currently allow elements to appear in any order and
6196 	 * we do not detect duplicates.
6197 	 */
6198 	ei = qwx_qmi_decode_get_elem(ei0, elem_type);
6199 
6200 	DNPRINTF(QWX_D_QMI,
6201 	    "%s: decoding element 0x%x with data type %s (0x%x)\n",
6202 	    __func__, elem_type, qmi_data_type_name[ei->data_type],
6203 	    ei->data_type);
6204 
6205 	if (remain < 2) {
6206 		printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
6207 		return -1;
6208 	}
6209 
6210 	if (ei->data_type == QMI_DATA_LEN && ei->elem_len == 1) {
6211 		elem_size = p[0];
6212 		p++;
6213 		remain--;
6214 	} else {
6215 		elem_size = (p[0] | (p[1] << 8));
6216 		p += 2;
6217 		remain -= 2;
6218 	}
6219 
6220 	*next_ei = ei;
6221 	*actual_size = elem_size;
6222 
6223 	if (ei->data_type == QMI_EOTI) {
6224 		DNPRINTF(QWX_D_QMI,
6225 		    "%s: unrecognized QMI element type 0x%x size %u\n",
6226 		    sc->sc_dev.dv_xname, elem_type, elem_size);
6227 		return 0;
6228 	}
6229 
6230 	/*
6231 	 * Is this an optional element which has been encoded?
6232 	 * If so, use info about this optional element for verification.
6233 	 */
6234 	if (ei->data_type == QMI_OPT_FLAG)
6235 		ei++;
6236 
6237 	DNPRINTF(QWX_D_QMI, "%s: ei->size %u, actual size %u\n", __func__,
6238 	    ei->elem_size, *actual_size);
6239 
6240 	switch (ei->data_type) {
6241 	case QMI_UNSIGNED_1_BYTE:
6242 	case QMI_UNSIGNED_2_BYTE:
6243 	case QMI_UNSIGNED_4_BYTE:
6244 	case QMI_UNSIGNED_8_BYTE:
6245 	case QMI_SIGNED_2_BYTE_ENUM:
6246 	case QMI_SIGNED_4_BYTE_ENUM:
6247 		if (elem_size != ei->elem_size) {
6248 			printf("%s: QMI message element 0x%x "
6249 			    "data type %s (0x%x) with bad size: %u\n",
6250 			    sc->sc_dev.dv_xname, elem_type,
6251 			    qmi_data_type_name[ei->data_type],
6252 			    ei->data_type, elem_size);
6253 			return -1;
6254 		}
6255 		break;
6256 	case QMI_DATA_LEN:
6257 		break;
6258 	case QMI_STRING:
6259 	case QMI_STRUCT:
6260 		if (elem_size < qwx_qmi_decode_min_elem_size(ei, 0)) {
6261 			printf("%s: QMI message element 0x%x "
6262 			    "data type %s (0x%x) with bad size: %u\n",
6263 			    sc->sc_dev.dv_xname, elem_type,
6264 			    qmi_data_type_name[ei->data_type],
6265 			    ei->data_type, elem_size);
6266 			return -1;
6267 		}
6268 		break;
6269 	default:
6270 		printf("%s: unexpected QMI message element "
6271 		    "data type 0x%x\n", sc->sc_dev.dv_xname,
6272 		    ei->data_type);
6273 		return -1;
6274 	}
6275 
6276 	if (remain < elem_size) {
6277 		printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
6278 		return -1;
6279 	}
6280 
6281 	if (ei->offset + ei->elem_size > output_len) {
6282 		printf("%s: QMI message element type 0x%x too large: %u\n",
6283 		    sc->sc_dev.dv_xname, elem_type, ei->elem_size);
6284 		return -1;
6285 	}
6286 
6287 	return 0;
6288 }
6289 
6290 int
qwx_qmi_decode_byte(void * output,const struct qmi_elem_info * ei,void * input)6291 qwx_qmi_decode_byte(void *output, const struct qmi_elem_info *ei, void *input)
6292 {
6293 	if (ei->elem_size != sizeof(uint8_t)) {
6294 		printf("%s: bad element size\n", __func__);
6295 		return -1;
6296 	}
6297 
6298 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
6299 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
6300 	memcpy(output, input, ei->elem_size);
6301 	return 0;
6302 }
6303 
6304 int
qwx_qmi_decode_word(void * output,const struct qmi_elem_info * ei,void * input)6305 qwx_qmi_decode_word(void *output, const struct qmi_elem_info *ei, void *input)
6306 {
6307 	if (ei->elem_size != sizeof(uint16_t)) {
6308 		printf("%s: bad element size\n", __func__);
6309 		return -1;
6310 	}
6311 
6312 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
6313 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
6314 	memcpy(output, input, ei->elem_size);
6315 	return 0;
6316 }
6317 
6318 int
qwx_qmi_decode_dword(void * output,const struct qmi_elem_info * ei,void * input)6319 qwx_qmi_decode_dword(void *output, const struct qmi_elem_info *ei, void *input)
6320 {
6321 	if (ei->elem_size != sizeof(uint32_t)) {
6322 		printf("%s: bad element size\n", __func__);
6323 		return -1;
6324 	}
6325 
6326 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
6327 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
6328 	memcpy(output, input, ei->elem_size);
6329 	return 0;
6330 }
6331 
6332 int
qwx_qmi_decode_qword(void * output,const struct qmi_elem_info * ei,void * input)6333 qwx_qmi_decode_qword(void *output, const struct qmi_elem_info *ei, void *input)
6334 {
6335 	if (ei->elem_size != sizeof(uint64_t)) {
6336 		printf("%s: bad element size\n", __func__);
6337 		return -1;
6338 	}
6339 
6340 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
6341 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
6342 	memcpy(output, input, ei->elem_size);
6343 	return 0;
6344 }
6345 
6346 int
qwx_qmi_decode_datalen(struct qwx_softc * sc,size_t * used,uint32_t * datalen,void * output,size_t output_len,const struct qmi_elem_info * ei,uint8_t * input,uint16_t input_len)6347 qwx_qmi_decode_datalen(struct qwx_softc *sc, size_t *used, uint32_t *datalen,
6348     void *output, size_t output_len, const struct qmi_elem_info *ei,
6349     uint8_t *input, uint16_t input_len)
6350 {
6351 	uint8_t *p = input;
6352 	size_t remain = input_len;
6353 
6354 	*datalen = 0;
6355 
6356 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
6357 	for (int i = 0; i < input_len; i++) {
6358 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
6359 	}
6360 	DNPRINTF(QWX_D_QMI, "\n");
6361 
6362 	if (remain < ei->elem_size) {
6363 		printf("%s: QMI message too short: remain=%zu elem_size=%u\n", __func__, remain, ei->elem_size);
6364 		return -1;
6365 	}
6366 
6367 	switch (ei->elem_size) {
6368 	case sizeof(uint8_t):
6369 		*datalen = p[0];
6370 		break;
6371 	case sizeof(uint16_t):
6372 		*datalen = p[0] | (p[1] << 8);
6373 		break;
6374 	default:
6375 		printf("%s: bad datalen element size %u\n",
6376 		    sc->sc_dev.dv_xname, ei->elem_size);
6377 		return -1;
6378 
6379 	}
6380 	*used = ei->elem_size;
6381 
6382 	if (ei->offset + sizeof(*datalen) > output_len) {
6383 		printf("%s: QMI message element type 0x%x too large\n",
6384 		    sc->sc_dev.dv_xname, ei->tlv_type);
6385 		return -1;
6386 	}
6387 	memcpy(output + ei->offset, datalen, sizeof(*datalen));
6388 	return 0;
6389 }
6390 
6391 int
qwx_qmi_decode_string(struct qwx_softc * sc,size_t * used_total,void * output,size_t output_len,const struct qmi_elem_info * ei,uint8_t * input,uint16_t input_len,uint16_t elem_size,int nested)6392 qwx_qmi_decode_string(struct qwx_softc *sc, size_t *used_total,
6393     void *output, size_t output_len, const struct qmi_elem_info *ei,
6394     uint8_t *input, uint16_t input_len, uint16_t elem_size, int nested)
6395 {
6396 	uint8_t *p = input;
6397 	uint16_t len;
6398 	size_t remain = input_len;
6399 
6400 	*used_total = 0;
6401 
6402 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
6403 	for (int i = 0; i < input_len; i++) {
6404 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
6405 	}
6406 	DNPRINTF(QWX_D_QMI, "\n");
6407 
6408 	if (nested) {
6409 		/* Strings nested in structs use an in-band length field. */
6410 		if (ei->elem_len <= 0xff) {
6411 			if (remain == 0) {
6412 				printf("%s: QMI string length header exceeds "
6413 				    "input buffer size\n", __func__);
6414 				return -1;
6415 			}
6416 			len = p[0];
6417 			p++;
6418 			(*used_total)++;
6419 			remain--;
6420 		} else {
6421 			if (remain < 2) {
6422 				printf("%s: QMI string length header exceeds "
6423 				    "input buffer size\n", __func__);
6424 				return -1;
6425 			}
6426 			len = p[0] | (p[1] << 8);
6427 			p += 2;
6428 			*used_total += 2;
6429 			remain -= 2;
6430 		}
6431 	} else
6432 		len = elem_size;
6433 
6434 	if (len > ei->elem_len) {
6435 		printf("%s: QMI string element of length %u exceeds "
6436 		    "maximum length %u\n", __func__, len, ei->elem_len);
6437 		return -1;
6438 	}
6439 	if (len > remain) {
6440 		printf("%s: QMI string element of length %u exceeds "
6441 		    "input buffer size %zu\n", __func__, len, remain);
6442 		return -1;
6443 	}
6444 	if (len > output_len) {
6445 		printf("%s: QMI string element of length %u exceeds "
6446 		    "output buffer size %zu\n", __func__, len, output_len);
6447 		return -1;
6448 	}
6449 
6450 	memcpy(output, p, len);
6451 
6452 	p = output;
6453 	p[len] = '\0';
6454 	DNPRINTF(QWX_D_QMI, "%s: string (len %u): %s\n", __func__, len, p);
6455 
6456 	*used_total += len;
6457 	return 0;
6458 }
6459 
6460 int
qwx_qmi_decode_struct(struct qwx_softc * sc,size_t * used_total,void * output,size_t output_len,const struct qmi_elem_info * struct_ei,uint8_t * input,uint16_t input_len,int nested)6461 qwx_qmi_decode_struct(struct qwx_softc *sc, size_t *used_total,
6462     void *output, size_t output_len,
6463     const struct qmi_elem_info *struct_ei,
6464     uint8_t *input, uint16_t input_len,
6465     int nested)
6466 {
6467 	const struct qmi_elem_info *ei = struct_ei->ei_array;
6468 	uint32_t min_size;
6469 	uint8_t *p = input;
6470 	size_t remain = input_len;
6471 	size_t used = 0;
6472 
6473 	*used_total = 0;
6474 
6475 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
6476 	for (int i = 0; i < input_len; i++) {
6477 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
6478 	}
6479 	DNPRINTF(QWX_D_QMI, "\n");
6480 
6481 	min_size = qwx_qmi_decode_min_elem_size(struct_ei, 0);
6482 	DNPRINTF(QWX_D_QMI, "%s: minimum struct size: %u\n", __func__, min_size);
6483 	while (*used_total < min_size && ei->data_type != QMI_EOTI) {
6484 		if (remain == 0) {
6485 			printf("%s: QMI message too short\n", __func__);
6486 			return -1;
6487 		}
6488 
6489 		if (ei->data_type == QMI_DATA_LEN) {
6490 			uint32_t datalen;
6491 
6492 			used = 0;
6493 			if (qwx_qmi_decode_datalen(sc, &used, &datalen,
6494 			    output, output_len, ei, p, remain))
6495 				return -1;
6496 			DNPRINTF(QWX_D_QMI, "%s: datalen %u used %zu bytes\n",
6497 			    __func__, datalen, used);
6498 			p += used;
6499 			remain -= used;
6500 			*used_total += used;
6501 			if (remain < datalen) {
6502 				printf("%s: QMI message too short\n", __func__);
6503 				return -1;
6504 			}
6505 			ei++;
6506 			DNPRINTF(QWX_D_QMI, "%s: datalen is for data_type=0x%x "
6507 			    "tlv_type=0x%x elem_size=%u(0x%x) remain=%zu\n",
6508 			    __func__, ei->data_type, ei->tlv_type,
6509 			    ei->elem_size, ei->elem_size, remain);
6510 			if (datalen == 0) {
6511 				ei++;
6512 				DNPRINTF(QWX_D_QMI,
6513 				    "%s: skipped to data_type=0x%x "
6514 				    "tlv_type=0x%x elem_size=%u(0x%x) "
6515 				    "remain=%zu\n", __func__,
6516 				    ei->data_type, ei->tlv_type,
6517 				    ei->elem_size, ei->elem_size, remain);
6518 				continue;
6519 			}
6520 		} else {
6521 			if (remain < ei->elem_size) {
6522 				printf("%s: QMI message too short\n",
6523 				    __func__);
6524 				return -1;
6525 			}
6526 		}
6527 
6528 		if (ei->offset + ei->elem_size > output_len) {
6529 			printf("%s: QMI message struct member element "
6530 			    "type 0x%x too large: %u\n", sc->sc_dev.dv_xname,
6531 			    ei->tlv_type, ei->elem_size);
6532 			return -1;
6533 		}
6534 
6535 		DNPRINTF(QWX_D_QMI,
6536 		    "%s: decoding struct member element 0x%x with "
6537 		    "data type %s (0x%x) size=%u(0x%x) remain=%zu\n", __func__,
6538 		    ei->tlv_type, qmi_data_type_name[ei->data_type],
6539 		    ei->data_type, ei->elem_size, ei->elem_size, remain);
6540 		switch (ei->data_type) {
6541 		case QMI_UNSIGNED_1_BYTE:
6542 			if (qwx_qmi_decode_byte(output + ei->offset, ei, p))
6543 				return -1;
6544 			remain -= ei->elem_size;
6545 			p += ei->elem_size;
6546 			*used_total += ei->elem_size;
6547 			break;
6548 		case QMI_UNSIGNED_2_BYTE:
6549 		case QMI_SIGNED_2_BYTE_ENUM:
6550 			if (qwx_qmi_decode_word(output + ei->offset, ei, p))
6551 				return -1;
6552 			remain -= ei->elem_size;
6553 			p += ei->elem_size;
6554 			*used_total += ei->elem_size;
6555 			break;
6556 		case QMI_UNSIGNED_4_BYTE:
6557 		case QMI_SIGNED_4_BYTE_ENUM:
6558 			if (qwx_qmi_decode_dword(output + ei->offset, ei, p))
6559 				return -1;
6560 			remain -= ei->elem_size;
6561 			p += ei->elem_size;
6562 			*used_total += ei->elem_size;
6563 			break;
6564 		case QMI_UNSIGNED_8_BYTE:
6565 			if (qwx_qmi_decode_qword(output + ei->offset, ei, p))
6566 				return -1;
6567 			remain -= ei->elem_size;
6568 			p += ei->elem_size;
6569 			*used_total += ei->elem_size;
6570 			break;
6571 		case QMI_STRUCT:
6572 			if (nested > 2) {
6573 				printf("%s: QMI struct element data type 0x%x "
6574 				    "is nested too deeply\n",
6575 				    sc->sc_dev.dv_xname, ei->data_type);
6576 				return -1;
6577 			}
6578 			used = 0;
6579 			if (qwx_qmi_decode_struct(sc, &used,
6580 			    output + ei->offset, output_len - ei->offset,
6581 			    ei, p, remain, nested + 1))
6582 				return -1;
6583 			remain -= used;
6584 			p += used;
6585 			*used_total += used;
6586 			break;
6587 		case QMI_STRING:
6588 			used = 0;
6589 			if (qwx_qmi_decode_string(sc, &used,
6590 			    output + ei->offset, output_len - ei->offset,
6591 			    ei, p, remain, 0, 1))
6592 				return -1;
6593 			remain -= used;
6594 			p += used;
6595 			*used_total += used;
6596 			break;
6597 		default:
6598 			printf("%s: unhandled QMI struct element "
6599 			    "data type 0x%x\n", sc->sc_dev.dv_xname,
6600 			    ei->data_type);
6601 			return -1;
6602 		}
6603 
6604 		ei++;
6605 		DNPRINTF(QWX_D_QMI, "%s: next ei 0x%x ei->data_type=0x%x\n",
6606 		    __func__, ei->tlv_type, ei->data_type);
6607 	}
6608 
6609 	DNPRINTF(QWX_D_QMI, "%s: used_total=%zu ei->data_type=0x%x\n",
6610 	    __func__, *used_total, ei->data_type);
6611 
6612 	return 0;
6613 }
6614 
6615 int
qwx_qmi_decode_msg(struct qwx_softc * sc,void * output,size_t output_len,const struct qmi_elem_info * ei0,uint8_t * input,uint16_t input_len)6616 qwx_qmi_decode_msg(struct qwx_softc *sc, void *output, size_t output_len,
6617     const struct qmi_elem_info *ei0, uint8_t *input, uint16_t input_len)
6618 {
6619 	uint8_t *p = input;
6620 	size_t remain = input_len, used;
6621 	const struct qmi_elem_info *ei = ei0;
6622 
6623 	memset(output, 0, output_len);
6624 
6625 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
6626 	for (int i = 0; i < input_len; i++) {
6627 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
6628 	}
6629 	DNPRINTF(QWX_D_QMI, "\n");
6630 
6631 	while (remain > 0 && ei->data_type != QMI_EOTI) {
6632 		uint32_t nelem = 1, i;
6633 		uint16_t datalen;
6634 
6635 		if (qwx_qmi_decode_tlv_hdr(sc, &ei, &datalen, output_len,
6636 		    ei0, p, remain))
6637 			return -1;
6638 
6639 		/* Skip unrecognized elements. */
6640 		if (ei->data_type == QMI_EOTI) {
6641 			p += 3 + datalen;
6642 			remain -= 3 + datalen;
6643 			ei = ei0;
6644 			continue;
6645 		}
6646 
6647 		/* Set 'valid' flag for optional fields in output struct. */
6648 		if (ei->data_type == QMI_OPT_FLAG) {
6649 			uint8_t *pvalid;
6650 
6651 			if (ei->offset + ei->elem_size > output_len) {
6652 				printf("%s: QMI message element type 0x%x "
6653 				    "too large: %u\n", sc->sc_dev.dv_xname,
6654 				    ei->tlv_type, ei->elem_size);
6655 			}
6656 
6657 			pvalid = (uint8_t *)output + ei->offset;
6658 			*pvalid = 1;
6659 
6660 			ei++;
6661 		}
6662 
6663 		p += 3;
6664 		remain -= 3;
6665 
6666 		if (ei->data_type == QMI_DATA_LEN) {
6667 			const struct qmi_elem_info *datalen_ei = ei;
6668 			uint8_t elem_type = ei->tlv_type;
6669 
6670 			/*
6671 			 * Size info in TLV header indicates the
6672 			 * total length of element data that follows.
6673 			 */
6674 			if (remain < datalen) {
6675 				printf("%s:%d QMI message too short\n",
6676 				    __func__, __LINE__);
6677 				return -1;
6678 			}
6679 
6680 			ei++;
6681 			DNPRINTF(QWX_D_QMI,
6682 			    "%s: next ei data_type=0x%x tlv_type=0x%x "
6683 			    "dst elem_size=%u(0x%x) src total size=%u "
6684 			    "remain=%zu\n", __func__, ei->data_type,
6685 			    ei->tlv_type, ei->elem_size, ei->elem_size,
6686 			    datalen, remain);
6687 
6688 			/* Related EIs must have the same type. */
6689 			if (ei->tlv_type != elem_type) {
6690 				printf("%s: unexepected element type 0x%x; "
6691 				    "expected 0x%x\n", __func__,
6692 				    ei->tlv_type, elem_type);
6693 				return -1;
6694 			}
6695 
6696 			if (datalen == 0) {
6697 				if (ei->data_type != QMI_EOTI)
6698 					ei++;
6699 				continue;
6700 			}
6701 
6702 			/*
6703 			 * For variable length arrays a one- or two-byte
6704 			 * value follows the header, indicating the number
6705 			 * of elements in the array.
6706 			 */
6707 			if (ei->array_type == VAR_LEN_ARRAY) {
6708 				DNPRINTF(QWX_D_QMI,
6709 				    "%s: variable length array\n", __func__);
6710 				used = 0;
6711 				if (qwx_qmi_decode_datalen(sc, &used, &nelem,
6712 				    output, output_len, datalen_ei, p, remain))
6713 					return -1;
6714 				p += used;
6715 				remain -= used;
6716 				/*
6717 				 * Previous datalen value included the total
6718 				 * amount of bytes following the DATALEN TLV
6719 				 * header.
6720 				 */
6721 				datalen -= used;
6722 
6723 				if (nelem == 0) {
6724 					if (ei->data_type != QMI_EOTI)
6725 						ei++;
6726 					continue;
6727 				}
6728 
6729 				DNPRINTF(QWX_D_QMI,
6730 				    "%s: datalen %u used %zu bytes\n",
6731 				    __func__, nelem, used);
6732 
6733 				DNPRINTF(QWX_D_QMI,
6734 				    "%s: decoding %u array elements with "
6735 				    "src size %u dest size %u\n", __func__,
6736 				    nelem, datalen / nelem, ei->elem_size);
6737 			}
6738 		}
6739 
6740 		if (remain < datalen) {
6741 			printf("%s:%d QMI message too short: remain=%zu, "
6742 			    "datalen=%u\n", __func__, __LINE__, remain,
6743 			    datalen);
6744 			return -1;
6745 		}
6746 		if (output_len < nelem * ei->elem_size) {
6747 			printf("%s: QMI output buffer too short: remain=%zu "
6748 			    "nelem=%u ei->elem_size=%u\n", __func__, remain,
6749 			    nelem, ei->elem_size);
6750 			return -1;
6751 		}
6752 
6753 		for (i = 0; i < nelem && remain > 0; i++) {
6754 			size_t outoff;
6755 
6756 			outoff = ei->offset + (ei->elem_size * i);
6757 			switch (ei->data_type) {
6758 			case QMI_STRUCT:
6759 				used = 0;
6760 				if (qwx_qmi_decode_struct(sc, &used,
6761 				    output + outoff, output_len - outoff,
6762 				    ei, p, remain, 0))
6763 					return -1;
6764 				remain -= used;
6765 				p += used;
6766 				if (used != datalen) {
6767 					DNPRINTF(QWX_D_QMI,
6768 					    "%s struct used only %zu bytes "
6769 					    "of %u input bytes\n", __func__,
6770 					    used, datalen);
6771 				} else {
6772 					DNPRINTF(QWX_D_QMI,
6773 					    "%s: struct used %zu bytes "
6774 					    "of input\n", __func__, used);
6775 				}
6776 				break;
6777 			case QMI_STRING:
6778 				used = 0;
6779 				if (qwx_qmi_decode_string(sc, &used,
6780 				    output + outoff, output_len - outoff,
6781 				    ei, p, remain, datalen, 0))
6782 					return -1;
6783 				remain -= used;
6784 				p += used;
6785 				if (used != datalen) {
6786 					DNPRINTF(QWX_D_QMI,
6787 					    "%s: string used only %zu bytes "
6788 					    "of %u input bytes\n", __func__,
6789 					    used, datalen);
6790 				} else {
6791 					DNPRINTF(QWX_D_QMI,
6792 					    "%s: string used %zu bytes "
6793 					    "of input\n", __func__, used);
6794 				}
6795 				break;
6796 			case QMI_UNSIGNED_1_BYTE:
6797 				if (remain < ei->elem_size) {
6798 					printf("%s: QMI message too "
6799 					    "short\n", __func__);
6800 					return -1;
6801 				}
6802 				if (qwx_qmi_decode_byte(output + outoff,
6803 				    ei, p))
6804 					return -1;
6805 				remain -= ei->elem_size;
6806 				p += ei->elem_size;
6807 				break;
6808 			case QMI_UNSIGNED_2_BYTE:
6809 			case QMI_SIGNED_2_BYTE_ENUM:
6810 				if (remain < ei->elem_size) {
6811 					printf("%s: QMI message too "
6812 					    "short\n", __func__);
6813 					return -1;
6814 				}
6815 				if (qwx_qmi_decode_word(output + outoff,
6816 				    ei, p))
6817 					return -1;
6818 				remain -= ei->elem_size;
6819 				p += ei->elem_size;
6820 				break;
6821 			case QMI_UNSIGNED_4_BYTE:
6822 			case QMI_SIGNED_4_BYTE_ENUM:
6823 				if (remain < ei->elem_size) {
6824 					printf("%s: QMI message too "
6825 					    "short\n", __func__);
6826 					return -1;
6827 				}
6828 				if (qwx_qmi_decode_dword(output + outoff,
6829 				    ei, p))
6830 					return -1;
6831 				remain -= ei->elem_size;
6832 				p += ei->elem_size;
6833 				break;
6834 			case QMI_UNSIGNED_8_BYTE:
6835 				if (remain < ei->elem_size) {
6836 					printf("%s: QMI message too "
6837 					    "short 4\n", __func__);
6838 					return -1;
6839 				}
6840 				if (qwx_qmi_decode_qword(output + outoff,
6841 				    ei, p))
6842 					return -1;
6843 				remain -= ei->elem_size;
6844 				p += ei->elem_size;
6845 				break;
6846 			default:
6847 				printf("%s: unhandled QMI message element "
6848 				    "data type 0x%x\n",
6849 				    sc->sc_dev.dv_xname, ei->data_type);
6850 				return -1;
6851 			}
6852 		}
6853 
6854 		ei++;
6855 		DNPRINTF(QWX_D_QMI,
6856 		    "%s: next ei 0x%x ei->data_type=0x%x remain=%zu\n",
6857 		    __func__, ei->tlv_type, ei->data_type, remain);
6858 
6859 		DNPRINTF(QWX_D_QMI, "%s: remaining input: ", __func__);
6860 		for (int i = 0; i < remain; i++)
6861 			DNPRINTF(QWX_D_QMI, " %02x", p[i]);
6862 		DNPRINTF(QWX_D_QMI, "\n");
6863 	}
6864 
6865 	return 0;
6866 }
6867 
6868 void
qwx_qmi_recv_wlanfw_ind_register_req_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)6869 qwx_qmi_recv_wlanfw_ind_register_req_v1(struct qwx_softc *sc, struct mbuf *m,
6870     uint16_t txn_id, uint16_t msg_len)
6871 {
6872 	struct qmi_wlanfw_ind_register_resp_msg_v01 resp;
6873 	const struct qmi_elem_info *ei;
6874 	uint8_t *msg = mtod(m, uint8_t *);
6875 
6876 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
6877 
6878 	ei = qmi_wlanfw_ind_register_resp_msg_v01_ei;
6879 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6880 		return;
6881 
6882 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
6883 	    __func__, le16toh(resp.resp.result));
6884 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
6885 	    __func__, le16toh(resp.resp.error));
6886 	DNPRINTF(QWX_D_QMI, "%s: resp.fw_status=0x%llx\n",
6887 	   __func__, le64toh(resp.fw_status));
6888 
6889 	sc->qmi_resp.result = le16toh(resp.resp.result);
6890 	sc->qmi_resp.error = le16toh(resp.resp.error);
6891 	wakeup(&sc->qmi_resp);
6892 }
6893 
6894 void
qwx_qmi_recv_wlanfw_host_cap_resp_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)6895 qwx_qmi_recv_wlanfw_host_cap_resp_v1(struct qwx_softc *sc, struct mbuf *m,
6896     uint16_t txn_id, uint16_t msg_len)
6897 {
6898 	struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
6899 	const struct qmi_elem_info *ei;
6900 	uint8_t *msg = mtod(m, uint8_t *);
6901 
6902 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
6903 
6904 	ei = qmi_wlanfw_host_cap_resp_msg_v01_ei;
6905 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6906 		return;
6907 
6908 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
6909 	    __func__, le16toh(resp.resp.result));
6910 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
6911 	    __func__, le16toh(resp.resp.error));
6912 
6913 	sc->qmi_resp.result = le16toh(resp.resp.result);
6914 	sc->qmi_resp.error = le16toh(resp.resp.error);
6915 	wakeup(&sc->qmi_resp);
6916 }
6917 
6918 void
qwx_qmi_recv_wlanfw_respond_mem_resp_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)6919 qwx_qmi_recv_wlanfw_respond_mem_resp_v1(struct qwx_softc *sc, struct mbuf *m,
6920     uint16_t txn_id, uint16_t msg_len)
6921 {
6922 	struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
6923 	const struct qmi_elem_info *ei;
6924 	uint8_t *msg = mtod(m, uint8_t *);
6925 
6926 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
6927 
6928 	ei = qmi_wlanfw_respond_mem_resp_msg_v01_ei;
6929 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6930 		return;
6931 
6932 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
6933 	    __func__, le16toh(resp.resp.result));
6934 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
6935 	    __func__, le16toh(resp.resp.error));
6936 
6937 	sc->qmi_resp.result = le16toh(resp.resp.result);
6938 	sc->qmi_resp.error = le16toh(resp.resp.error);
6939 	wakeup(&sc->qmi_resp);
6940 }
6941 
6942 void
qwx_qmi_recv_wlanfw_cap_resp_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)6943 qwx_qmi_recv_wlanfw_cap_resp_v1(struct qwx_softc *sc, struct mbuf *m,
6944     uint16_t txn_id, uint16_t msg_len)
6945 {
6946 	struct qmi_wlanfw_cap_resp_msg_v01 resp;
6947 	const struct qmi_elem_info *ei;
6948 	uint8_t *msg = mtod(m, uint8_t *);
6949 
6950 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
6951 
6952 	memset(&resp, 0, sizeof(resp));
6953 
6954 	ei = qmi_wlanfw_cap_resp_msg_v01_ei;
6955 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
6956 		return;
6957 
6958 	if (resp.chip_info_valid) {
6959 		sc->qmi_target.chip_id = resp.chip_info.chip_id;
6960 		sc->qmi_target.chip_family = resp.chip_info.chip_family;
6961 	}
6962 
6963 	if (resp.board_info_valid)
6964 		sc->qmi_target.board_id = resp.board_info.board_id;
6965 	else
6966 		sc->qmi_target.board_id = 0xFF;
6967 
6968 	if (resp.soc_info_valid)
6969 		sc->qmi_target.soc_id = resp.soc_info.soc_id;
6970 
6971 	if (resp.fw_version_info_valid) {
6972 		sc->qmi_target.fw_version = resp.fw_version_info.fw_version;
6973 		strlcpy(sc->qmi_target.fw_build_timestamp,
6974 			resp.fw_version_info.fw_build_timestamp,
6975 			sizeof(sc->qmi_target.fw_build_timestamp));
6976 	}
6977 
6978 	if (resp.fw_build_id_valid)
6979 		strlcpy(sc->qmi_target.fw_build_id, resp.fw_build_id,
6980 			sizeof(sc->qmi_target.fw_build_id));
6981 
6982 	if (resp.eeprom_read_timeout_valid) {
6983 		sc->qmi_target.eeprom_caldata = resp.eeprom_read_timeout;
6984 		DNPRINTF(QWX_D_QMI,
6985 		    "%s: qmi cal data supported from eeprom\n", __func__);
6986 	}
6987 
6988 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
6989 	    __func__, le16toh(resp.resp.result));
6990 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
6991 	    __func__, le16toh(resp.resp.error));
6992 
6993 	sc->qmi_resp.result = le16toh(resp.resp.result);
6994 	sc->qmi_resp.error = le16toh(resp.resp.error);
6995 	wakeup(&sc->qmi_resp);
6996 }
6997 
6998 void
qwx_qmi_recv_wlanfw_bdf_download_resp_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)6999 qwx_qmi_recv_wlanfw_bdf_download_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7000     uint16_t txn_id, uint16_t msg_len)
7001 {
7002 	struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
7003 	const struct qmi_elem_info *ei;
7004 	uint8_t *msg = mtod(m, uint8_t *);
7005 
7006 	memset(&resp, 0, sizeof(resp));
7007 
7008 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7009 
7010 	ei = qmi_wlanfw_bdf_download_resp_msg_v01_ei;
7011 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7012 		return;
7013 
7014 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7015 	    __func__, le16toh(resp.resp.result));
7016 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7017 	    __func__, le16toh(resp.resp.error));
7018 
7019 	sc->qmi_resp.result = le16toh(resp.resp.result);
7020 	sc->qmi_resp.error = le16toh(resp.resp.error);
7021 	wakeup(&sc->qmi_resp);
7022 }
7023 
7024 void
qwx_qmi_recv_wlanfw_m3_info_resp_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)7025 qwx_qmi_recv_wlanfw_m3_info_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7026     uint16_t txn_id, uint16_t msg_len)
7027 {
7028 	struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
7029 	const struct qmi_elem_info *ei;
7030 	uint8_t *msg = mtod(m, uint8_t *);
7031 
7032 	memset(&resp, 0, sizeof(resp));
7033 
7034 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7035 
7036 	ei = qmi_wlanfw_m3_info_resp_msg_v01_ei;
7037 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7038 		return;
7039 
7040 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7041 	    __func__, le16toh(resp.resp.result));
7042 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7043 	    __func__, le16toh(resp.resp.error));
7044 
7045 	sc->qmi_resp.result = le16toh(resp.resp.result);
7046 	sc->qmi_resp.error = le16toh(resp.resp.error);
7047 	wakeup(&sc->qmi_resp);
7048 }
7049 
7050 void
qwx_qmi_recv_wlanfw_wlan_ini_resp_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)7051 qwx_qmi_recv_wlanfw_wlan_ini_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7052     uint16_t txn_id, uint16_t msg_len)
7053 {
7054 	struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp;
7055 	const struct qmi_elem_info *ei;
7056 	uint8_t *msg = mtod(m, uint8_t *);
7057 
7058 	memset(&resp, 0, sizeof(resp));
7059 
7060 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7061 
7062 	ei = qmi_wlanfw_wlan_ini_resp_msg_v01_ei;
7063 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7064 		return;
7065 
7066 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7067 	    __func__, le16toh(resp.resp.result));
7068 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7069 	    __func__, le16toh(resp.resp.error));
7070 
7071 	sc->qmi_resp.result = le16toh(resp.resp.result);
7072 	sc->qmi_resp.error = le16toh(resp.resp.error);
7073 	wakeup(&sc->qmi_resp);
7074 }
7075 
7076 void
qwx_qmi_recv_wlanfw_wlan_cfg_resp_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)7077 qwx_qmi_recv_wlanfw_wlan_cfg_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7078     uint16_t txn_id, uint16_t msg_len)
7079 {
7080 	struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
7081 	const struct qmi_elem_info *ei;
7082 	uint8_t *msg = mtod(m, uint8_t *);
7083 
7084 	memset(&resp, 0, sizeof(resp));
7085 
7086 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7087 
7088 	ei = qmi_wlanfw_wlan_cfg_resp_msg_v01_ei;
7089 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7090 		return;
7091 
7092 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7093 	    __func__, le16toh(resp.resp.result));
7094 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7095 	    __func__, le16toh(resp.resp.error));
7096 
7097 	sc->qmi_resp.result = le16toh(resp.resp.result);
7098 	sc->qmi_resp.error = le16toh(resp.resp.error);
7099 	wakeup(&sc->qmi_resp);
7100 }
7101 
7102 void
qwx_qmi_recv_wlanfw_wlan_mode_resp_v1(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)7103 qwx_qmi_recv_wlanfw_wlan_mode_resp_v1(struct qwx_softc *sc, struct mbuf *m,
7104     uint16_t txn_id, uint16_t msg_len)
7105 {
7106 	struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
7107 	const struct qmi_elem_info *ei;
7108 	uint8_t *msg = mtod(m, uint8_t *);
7109 
7110 	memset(&resp, 0, sizeof(resp));
7111 
7112 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7113 
7114 	ei = qmi_wlanfw_wlan_mode_resp_msg_v01_ei;
7115 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
7116 		return;
7117 
7118 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
7119 	    __func__, le16toh(resp.resp.result));
7120 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
7121 	    __func__, le16toh(resp.resp.error));
7122 
7123 	sc->qmi_resp.result = le16toh(resp.resp.result);
7124 	sc->qmi_resp.error = le16toh(resp.resp.error);
7125 	wakeup(&sc->qmi_resp);
7126 }
7127 
7128 void
qwx_qmi_recv_response(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_id,uint16_t msg_len)7129 qwx_qmi_recv_response(struct qwx_softc *sc, struct mbuf *m,
7130     uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
7131 {
7132 	switch (msg_id) {
7133 	case QMI_WLANFW_IND_REGISTER_REQ_V01:
7134 		qwx_qmi_recv_wlanfw_ind_register_req_v1(sc, m, txn_id, msg_len);
7135 		break;
7136 	case QMI_WLFW_HOST_CAP_RESP_V01:
7137 		qwx_qmi_recv_wlanfw_host_cap_resp_v1(sc, m, txn_id, msg_len);
7138 		break;
7139 	case QMI_WLFW_RESPOND_MEM_RESP_V01:
7140 		qwx_qmi_recv_wlanfw_respond_mem_resp_v1(sc, m, txn_id, msg_len);
7141 		break;
7142 	case QMI_WLANFW_CAP_RESP_V01:
7143 		qwx_qmi_recv_wlanfw_cap_resp_v1(sc, m, txn_id, msg_len);
7144 		break;
7145 	case QMI_WLANFW_BDF_DOWNLOAD_RESP_V01:
7146 		qwx_qmi_recv_wlanfw_bdf_download_resp_v1(sc, m, txn_id,
7147 		    msg_len);
7148 		break;
7149 	case QMI_WLANFW_M3_INFO_RESP_V01:
7150 		qwx_qmi_recv_wlanfw_m3_info_resp_v1(sc, m, txn_id, msg_len);
7151 		break;
7152 	case QMI_WLANFW_WLAN_INI_RESP_V01:
7153 		qwx_qmi_recv_wlanfw_wlan_ini_resp_v1(sc, m, txn_id, msg_len);
7154 		break;
7155 	case QMI_WLANFW_WLAN_CFG_RESP_V01:
7156 		qwx_qmi_recv_wlanfw_wlan_cfg_resp_v1(sc, m, txn_id, msg_len);
7157 		break;
7158 	case QMI_WLANFW_WLAN_MODE_RESP_V01:
7159 		qwx_qmi_recv_wlanfw_wlan_mode_resp_v1(sc, m, txn_id, msg_len);
7160 		break;
7161 	default:
7162 		printf("%s: unhandled QMI response 0x%x\n",
7163 		    sc->sc_dev.dv_xname, msg_id);
7164 		break;
7165 	}
7166 }
7167 
7168 void
qwx_qmi_recv_wlanfw_request_mem_indication(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_len)7169 qwx_qmi_recv_wlanfw_request_mem_indication(struct qwx_softc *sc, struct mbuf *m,
7170     uint16_t txn_id, uint16_t msg_len)
7171 {
7172 	struct qmi_wlanfw_request_mem_ind_msg_v01 *ind = NULL;
7173 	const struct qmi_elem_info *ei;
7174 	uint8_t *msg = mtod(m, uint8_t *);
7175 
7176 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
7177 
7178 	if (!sc->expect_fwmem_req || sc->sc_req_mem_ind != NULL)
7179 		return;
7180 
7181 	/* This structure is too large for the stack. */
7182 	ind = malloc(sizeof(*ind), M_DEVBUF, M_NOWAIT | M_ZERO);
7183 	if (ind == NULL)
7184 		return;
7185 
7186 	ei = qmi_wlanfw_request_mem_ind_msg_v01_ei;
7187 	if (qwx_qmi_decode_msg(sc, ind, sizeof(*ind), ei, msg, msg_len)) {
7188 		free(ind, M_DEVBUF, sizeof(*ind));
7189 		return;
7190 	}
7191 
7192 	/* Handled by qwx_qmi_mem_seg_send() in process context */
7193 	sc->sc_req_mem_ind = ind;
7194 	wakeup(&sc->sc_req_mem_ind);
7195 }
7196 
7197 void
qwx_qmi_recv_indication(struct qwx_softc * sc,struct mbuf * m,uint16_t txn_id,uint16_t msg_id,uint16_t msg_len)7198 qwx_qmi_recv_indication(struct qwx_softc *sc, struct mbuf *m,
7199     uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
7200 {
7201 	switch (msg_id) {
7202 	case QMI_WLFW_REQUEST_MEM_IND_V01:
7203 		qwx_qmi_recv_wlanfw_request_mem_indication(sc, m,
7204 		    txn_id, msg_len);
7205 		break;
7206 	case QMI_WLFW_FW_MEM_READY_IND_V01:
7207 		sc->fwmem_ready = 1;
7208 		wakeup(&sc->fwmem_ready);
7209 		break;
7210 	case QMI_WLFW_FW_INIT_DONE_IND_V01:
7211 		sc->fw_init_done = 1;
7212 		wakeup(&sc->fw_init_done);
7213 		break;
7214 	default:
7215 		printf("%s: unhandled QMI indication 0x%x\n",
7216 		    sc->sc_dev.dv_xname, msg_id);
7217 		break;
7218 	}
7219 }
7220 
7221 void
qwx_qrtr_recv_data(struct qwx_softc * sc,struct mbuf * m,size_t size)7222 qwx_qrtr_recv_data(struct qwx_softc *sc, struct mbuf *m, size_t size)
7223 {
7224 	struct qmi_header hdr;
7225 	uint16_t txn_id, msg_id, msg_len;
7226 
7227 	if (size < sizeof(hdr)) {
7228 		printf("%s: QMI message too short: %zu bytes\n",
7229 		    sc->sc_dev.dv_xname, size);
7230 		return;
7231 	}
7232 
7233 	memcpy(&hdr, mtod(m, void *), sizeof(hdr));
7234 
7235 	DNPRINTF(QWX_D_QMI,
7236 	    "%s: QMI message type=0x%x txn=0x%x id=0x%x len=%u\n",
7237 	    __func__, hdr.type, le16toh(hdr.txn_id),
7238 	    le16toh(hdr.msg_id), le16toh(hdr.msg_len));
7239 
7240 	txn_id = le16toh(hdr.txn_id);
7241 	msg_id = le16toh(hdr.msg_id);
7242 	msg_len = le16toh(hdr.msg_len);
7243 	if (sizeof(hdr) + msg_len != size) {
7244 		printf("%s: bad length in QMI message header: %u\n",
7245 		    sc->sc_dev.dv_xname, msg_len);
7246 		return;
7247 	}
7248 
7249 	switch (hdr.type) {
7250 	case QMI_RESPONSE:
7251 		m_adj(m, sizeof(hdr));
7252 		qwx_qmi_recv_response(sc, m, txn_id, msg_id, msg_len);
7253 		break;
7254 	case QMI_INDICATION:
7255 		m_adj(m, sizeof(hdr));
7256 		qwx_qmi_recv_indication(sc, m, txn_id, msg_id, msg_len);
7257 		break;
7258 	default:
7259 		printf("%s: unhandled QMI message type %u\n",
7260 		    sc->sc_dev.dv_xname, hdr.type);
7261 		break;
7262 	}
7263 }
7264 
7265 int
qwx_qrtr_say_hello(struct qwx_softc * sc)7266 qwx_qrtr_say_hello(struct qwx_softc *sc)
7267 {
7268 	struct qrtr_hdr_v1 hdr;
7269 	struct qrtr_ctrl_pkt pkt;
7270 	struct mbuf *m;
7271 	size_t totlen, padlen;
7272 	int err;
7273 
7274 	totlen = sizeof(hdr) + sizeof(pkt);
7275 	padlen = roundup(totlen, 4);
7276 
7277 	m = m_gethdr(M_DONTWAIT, MT_DATA);
7278 	if (m == NULL) {
7279 		err = ENOBUFS;
7280 		goto done;
7281 	}
7282 
7283 	if (padlen <= MCLBYTES)
7284 		MCLGET(m, M_DONTWAIT);
7285 	else
7286 		MCLGETL(m, M_DONTWAIT, padlen);
7287 	if ((m->m_flags & M_EXT) == 0) {
7288 		err = ENOBUFS;
7289 		goto done;
7290 	}
7291 
7292 	m->m_len = m->m_pkthdr.len = padlen;
7293 
7294 	memset(&hdr, 0, sizeof(hdr));
7295 	hdr.version = htole32(QRTR_PROTO_VER_1);
7296 	hdr.type = htole32(QRTR_TYPE_HELLO);
7297 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
7298 	hdr.src_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
7299 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
7300 	hdr.dst_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
7301 	hdr.size = htole32(sizeof(pkt));
7302 
7303 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
7304 	if (err)
7305 		goto done;
7306 
7307 	memset(&pkt, 0, sizeof(pkt));
7308 	pkt.cmd = htole32(QRTR_TYPE_HELLO);
7309 
7310 	err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
7311 	if (err)
7312 		goto done;
7313 
7314 	/* Zero-pad the mbuf */
7315 	if (padlen != totlen) {
7316 		uint32_t pad = 0;
7317 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
7318 		if (err)
7319 			goto done;
7320 	}
7321 
7322 	err = sc->ops.submit_xfer(sc, m);
7323 done:
7324 	if (err)
7325 		m_freem(m);
7326 	return err;
7327 }
7328 
7329 int
qwx_qrtr_resume_tx(struct qwx_softc * sc)7330 qwx_qrtr_resume_tx(struct qwx_softc *sc)
7331 {
7332 	struct qrtr_hdr_v1 hdr;
7333 	struct qrtr_ctrl_pkt pkt;
7334 	struct mbuf *m;
7335 	size_t totlen, padlen;
7336 	int err;
7337 
7338 	totlen = sizeof(hdr) + sizeof(pkt);
7339 	padlen = roundup(totlen, 4);
7340 
7341 	m = m_gethdr(M_DONTWAIT, MT_DATA);
7342 	if (m == NULL) {
7343 		err = ENOBUFS;
7344 		goto done;
7345 	}
7346 
7347 	if (padlen <= MCLBYTES)
7348 		MCLGET(m, M_DONTWAIT);
7349 	else
7350 		MCLGETL(m, M_DONTWAIT, padlen);
7351 	if ((m->m_flags & M_EXT) == 0) {
7352 		err = ENOBUFS;
7353 		goto done;
7354 	}
7355 
7356 	m->m_len = m->m_pkthdr.len = padlen;
7357 
7358 	memset(&hdr, 0, sizeof(hdr));
7359 	hdr.version = htole32(QRTR_PROTO_VER_1);
7360 	hdr.type = htole32(QRTR_TYPE_RESUME_TX);
7361 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
7362 	hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
7363 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
7364 	hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
7365 	hdr.size = htole32(sizeof(pkt));
7366 
7367 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
7368 	if (err)
7369 		goto done;
7370 
7371 	memset(&pkt, 0, sizeof(pkt));
7372 	pkt.cmd = htole32(QRTR_TYPE_RESUME_TX);
7373 	pkt.client.node = htole32(0x01);
7374 	pkt.client.port = htole32(0x4000);
7375 
7376 	err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
7377 	if (err)
7378 		goto done;
7379 
7380 	/* Zero-pad the mbuf */
7381 	if (padlen != totlen) {
7382 		uint32_t pad = 0;
7383 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
7384 		if (err)
7385 			goto done;
7386 	}
7387 
7388 	err = sc->ops.submit_xfer(sc, m);
7389 done:
7390 	if (err)
7391 		m_freem(m);
7392 	return err;
7393 }
7394 
7395 void
qwx_qrtr_recv_msg(struct qwx_softc * sc,struct mbuf * m)7396 qwx_qrtr_recv_msg(struct qwx_softc *sc, struct mbuf *m)
7397 {
7398 	struct qrtr_hdr_v1 *v1 = mtod(m, struct qrtr_hdr_v1 *);
7399 	struct qrtr_hdr_v2 *v2 = mtod(m, struct qrtr_hdr_v2 *);
7400 	struct qrtr_ctrl_pkt *pkt;
7401 	uint32_t type, size, hdrsize;
7402 	uint8_t ver, confirm_rx;
7403 
7404 	ver = *mtod(m, uint8_t *);
7405 	switch (ver) {
7406 	case QRTR_PROTO_VER_1:
7407 		DNPRINTF(QWX_D_QMI,
7408 		    "%s: type %u size %u confirm_rx %u\n", __func__,
7409 		    letoh32(v1->type), letoh32(v1->size),
7410 		    letoh32(v1->confirm_rx));
7411 		type = letoh32(v1->type);
7412 		size = letoh32(v1->size);
7413 		confirm_rx = !!letoh32(v1->confirm_rx);
7414 		hdrsize = sizeof(*v1);
7415 		break;
7416 	case QRTR_PROTO_VER_2:
7417 		DNPRINTF(QWX_D_QMI,
7418 		    "%s: type %u size %u confirm_rx %u\n", __func__,
7419 		    v2->type, letoh32(v2->size),
7420 		    !!(v2->flags & QRTR_FLAGS_CONFIRM_RX));
7421 		type = v2->type;
7422 		size = letoh32(v2->size);
7423 		confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
7424 		hdrsize = sizeof(*v2);
7425 		break;
7426 	default:
7427 		printf("%s: unsupported qrtr version %u\n",
7428 		    sc->sc_dev.dv_xname, ver);
7429 		return;
7430 	}
7431 
7432 	if (size > m->m_pkthdr.len) {
7433 		printf("%s: bad size in qrtr message header: %u\n",
7434 		    sc->sc_dev.dv_xname, size);
7435 		return;
7436 	}
7437 
7438 	switch (type) {
7439 	case QRTR_TYPE_DATA:
7440 		m_adj(m, hdrsize);
7441 		qwx_qrtr_recv_data(sc, m, size);
7442 		break;
7443 	case QRTR_TYPE_HELLO:
7444 		qwx_qrtr_say_hello(sc);
7445 		break;
7446 	case QRTR_TYPE_NEW_SERVER:
7447 		m_adj(m, hdrsize);
7448 		pkt = mtod(m, struct qrtr_ctrl_pkt *);
7449 		sc->qrtr_server.service = le32toh(pkt->server.service);
7450 		sc->qrtr_server.instance = le32toh(pkt->server.instance);
7451 		sc->qrtr_server.node = le32toh(pkt->server.node);
7452 		sc->qrtr_server.port = le32toh(pkt->server.port);
7453 		DNPRINTF(QWX_D_QMI,
7454 		    "%s: new server: service=0x%x instance=0x%x node=0x%x "
7455 		    "port=0x%x\n", __func__, sc->qrtr_server.service,
7456 		    sc->qrtr_server.instance,
7457 		    sc->qrtr_server.node, sc->qrtr_server.port);
7458 		wakeup(&sc->qrtr_server);
7459 		break;
7460 	default:
7461 		DPRINTF("%s: unhandled qrtr type %u\n",
7462 		    sc->sc_dev.dv_xname, type);
7463 		return;
7464 	}
7465 
7466 	if (confirm_rx)
7467 		qwx_qrtr_resume_tx(sc);
7468 }
7469 
7470 // Not needed because we don't implenent QMI as a network service.
7471 #define qwx_qmi_init_service(sc)	(0)
7472 #define qwx_qmi_deinit_service(sc)	(0)
7473 
7474 int
qwx_qmi_encode_datalen(uint8_t * p,uint32_t * datalen,const struct qmi_elem_info * ei,void * input)7475 qwx_qmi_encode_datalen(uint8_t *p, uint32_t *datalen,
7476     const struct qmi_elem_info *ei, void *input)
7477 {
7478 	memcpy(datalen, input + ei->offset, sizeof(uint32_t));
7479 
7480 	if (ei->elem_size == sizeof(uint8_t)) {
7481 		p[0] = (*datalen & 0xff);
7482 	} else if (ei->elem_size == sizeof(uint16_t)) {
7483 		p[0] = (*datalen & 0xff);
7484 		p[1] = (*datalen >> 8) & 0xff;
7485 	} else {
7486 		printf("%s: bad element size\n", __func__);
7487 		return -1;
7488 	}
7489 
7490 	return 0;
7491 }
7492 
7493 int
qwx_qmi_encode_byte(uint8_t * p,const struct qmi_elem_info * ei,void * input,int i)7494 qwx_qmi_encode_byte(uint8_t *p, const struct qmi_elem_info *ei, void *input,
7495     int i)
7496 {
7497 	if (ei->elem_size != sizeof(uint8_t)) {
7498 		printf("%s: bad element size\n", __func__);
7499 		return -1;
7500 	}
7501 
7502 	if (p == NULL)
7503 		return 0;
7504 
7505 	memcpy(p, input + ei->offset + (i * ei->elem_size), ei->elem_size);
7506 	return 0;
7507 }
7508 
7509 int
qwx_qmi_encode_word(uint8_t * p,const struct qmi_elem_info * ei,void * input,int i)7510 qwx_qmi_encode_word(uint8_t *p, const struct qmi_elem_info *ei, void *input,
7511     int i)
7512 {
7513 	uint16_t val;
7514 
7515 	if (ei->elem_size != sizeof(val)) {
7516 		printf("%s: bad element size\n", __func__);
7517 		return -1;
7518 	}
7519 
7520 	if (p == NULL)
7521 		return 0;
7522 
7523 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
7524 	val = htole16(val);
7525 	memcpy(p, &val, sizeof(val));
7526 	return 0;
7527 }
7528 
7529 int
qwx_qmi_encode_dword(uint8_t * p,const struct qmi_elem_info * ei,void * input,int i)7530 qwx_qmi_encode_dword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
7531     int i)
7532 {
7533 	uint32_t val;
7534 
7535 	if (ei->elem_size != sizeof(val)) {
7536 		printf("%s: bad element size\n", __func__);
7537 		return -1;
7538 	}
7539 
7540 	if (p == NULL)
7541 		return 0;
7542 
7543 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
7544 	val = htole32(val);
7545 	memcpy(p, &val, sizeof(val));
7546 	return 0;
7547 }
7548 
7549 int
qwx_qmi_encode_qword(uint8_t * p,const struct qmi_elem_info * ei,void * input,int i)7550 qwx_qmi_encode_qword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
7551     int i)
7552 {
7553 	uint64_t val;
7554 
7555 	if (ei->elem_size != sizeof(val)) {
7556 		printf("%s: bad element size\n", __func__);
7557 		return -1;
7558 	}
7559 
7560 	if (p == NULL)
7561 		return 0;
7562 
7563 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
7564 	val = htole64(val);
7565 	memcpy(p, &val, sizeof(val));
7566 	return 0;
7567 }
7568 
7569 int
qwx_qmi_encode_struct(uint8_t * p,size_t * encoded_len,const struct qmi_elem_info * struct_ei,void * input,size_t input_len)7570 qwx_qmi_encode_struct(uint8_t *p, size_t *encoded_len,
7571     const struct qmi_elem_info *struct_ei, void *input, size_t input_len)
7572 {
7573 	const struct qmi_elem_info *ei = struct_ei->ei_array;
7574 	size_t remain = input_len;
7575 
7576 	*encoded_len = 0;
7577 
7578 	while (ei->data_type != QMI_EOTI) {
7579 		if (ei->data_type == QMI_OPT_FLAG) {
7580 			uint8_t do_encode, tlv_type;
7581 
7582 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
7583 			ei++; /* Advance to element we might have to encode. */
7584 			if (ei->data_type == QMI_OPT_FLAG ||
7585 			    ei->data_type == QMI_EOTI) {
7586 				printf("%s: bad optional flag element\n",
7587 				    __func__);
7588 				return -1;
7589 			}
7590 			if (!do_encode) {
7591 				/* The element will not be encoded. Skip it. */
7592 				tlv_type = ei->tlv_type;
7593 				while (ei->data_type != QMI_EOTI &&
7594 				    ei->tlv_type == tlv_type)
7595 					ei++;
7596 				continue;
7597 			}
7598 		}
7599 
7600 		if (ei->elem_size > remain) {
7601 			printf("%s: QMI message buffer too short\n", __func__);
7602 			return -1;
7603 		}
7604 
7605 		switch (ei->data_type) {
7606 		case QMI_UNSIGNED_1_BYTE:
7607 			if (qwx_qmi_encode_byte(p, ei, input, 0))
7608 				return -1;
7609 			break;
7610 		case QMI_UNSIGNED_2_BYTE:
7611 			if (qwx_qmi_encode_word(p, ei, input, 0))
7612 				return -1;
7613 			break;
7614 		case QMI_UNSIGNED_4_BYTE:
7615 		case QMI_SIGNED_4_BYTE_ENUM:
7616 			if (qwx_qmi_encode_dword(p, ei, input, 0))
7617 				return -1;
7618 			break;
7619 		case QMI_UNSIGNED_8_BYTE:
7620 			if (qwx_qmi_encode_qword(p, ei, input, 0))
7621 				return -1;
7622 			break;
7623 		default:
7624 			printf("%s: unhandled QMI struct element type %d\n",
7625 			    __func__, ei->data_type);
7626 			return -1;
7627 		}
7628 
7629 		remain -= ei->elem_size;
7630 		if (p != NULL)
7631 			p += ei->elem_size;
7632 		*encoded_len += ei->elem_size;
7633 		ei++;
7634 	}
7635 
7636 	return 0;
7637 }
7638 
7639 int
qwx_qmi_encode_string(uint8_t * p,size_t * encoded_len,const struct qmi_elem_info * string_ei,void * input,size_t input_len)7640 qwx_qmi_encode_string(uint8_t *p, size_t *encoded_len,
7641     const struct qmi_elem_info *string_ei, void *input, size_t input_len)
7642 {
7643 	*encoded_len = strnlen(input, input_len);
7644 	if (*encoded_len > string_ei->elem_len) {
7645 		printf("%s: QMI message buffer too short\n", __func__);
7646 		return -1;
7647 	}
7648 
7649 	if (p)
7650 		memcpy(p, input, *encoded_len);
7651 
7652 	return 0;
7653 }
7654 
7655 int
qwx_qmi_encode_msg(uint8_t ** encoded_msg,size_t * encoded_len,int type,uint16_t * txn_id,uint16_t msg_id,size_t msg_len,const struct qmi_elem_info * ei,void * input,size_t input_len)7656 qwx_qmi_encode_msg(uint8_t **encoded_msg, size_t *encoded_len, int type,
7657     uint16_t *txn_id, uint16_t msg_id, size_t msg_len,
7658     const struct qmi_elem_info *ei, void *input, size_t input_len)
7659 {
7660 	const struct qmi_elem_info *ei0 = ei;
7661 	struct qmi_header hdr;
7662 	size_t remain;
7663 	uint8_t *p, *op;
7664 
7665 	*encoded_msg = NULL;
7666 	*encoded_len = 0;
7667 
7668 	/* First pass: Determine length of encoded message. */
7669 	while (ei->data_type != QMI_EOTI) {
7670 		int nelem = 1, i;
7671 
7672 		if (ei->offset + ei->elem_size > input_len) {
7673 			printf("%s: bad input buffer offset at element 0x%x "
7674 			    "data type 0x%x\n",
7675 			    __func__, ei->tlv_type, ei->data_type);
7676 			goto err;
7677 		}
7678 
7679 		/*
7680 		 * OPT_FLAG determines whether the next element
7681 		 * should be considered for encoding.
7682 		 */
7683 		if (ei->data_type == QMI_OPT_FLAG) {
7684 			uint8_t do_encode, tlv_type;
7685 
7686 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
7687 			ei++; /* Advance to element we might have to encode. */
7688 			if (ei->data_type == QMI_OPT_FLAG ||
7689 			    ei->data_type == QMI_EOTI) {
7690 				printf("%s: bad optional element\n", __func__);
7691 				goto err;
7692 			}
7693 			if (!do_encode) {
7694 				/* The element will not be encoded. Skip it. */
7695 				tlv_type = ei->tlv_type;
7696 				while (ei->data_type != QMI_EOTI &&
7697 				    ei->tlv_type == tlv_type)
7698 					ei++;
7699 				continue;
7700 			}
7701 		}
7702 
7703 		*encoded_len += 3; /* type, length */
7704 		if (ei->data_type == QMI_DATA_LEN) {
7705 			uint32_t datalen = 0;
7706 			uint8_t dummy[2];
7707 
7708 			if (qwx_qmi_encode_datalen(dummy, &datalen, ei, input))
7709 				goto err;
7710 			*encoded_len += ei->elem_size;
7711 			ei++;
7712 			if (ei->array_type != VAR_LEN_ARRAY) {
7713 				printf("%s: data len not for a var array\n",
7714 				    __func__);
7715 				goto err;
7716 			}
7717 			nelem = datalen;
7718 			if (ei->data_type == QMI_STRUCT) {
7719 				for (i = 0; i < nelem; i++) {
7720 					size_t encoded_struct_len = 0;
7721 					size_t inoff = ei->offset + (i * ei->elem_size);
7722 
7723 					if (qwx_qmi_encode_struct(NULL,
7724 					    &encoded_struct_len, ei,
7725 					    input + inoff, input_len - inoff))
7726 						goto err;
7727 
7728 					*encoded_len += encoded_struct_len;
7729 				}
7730 			} else
7731 				*encoded_len += nelem * ei->elem_size;
7732 			ei++;
7733 		} else if (ei->data_type == QMI_STRING) {
7734 			size_t encoded_string_len = 0;
7735 			size_t inoff = ei->offset;
7736 
7737 			if (qwx_qmi_encode_string(NULL,
7738 			    &encoded_string_len, ei,
7739 			    input + inoff, input_len - inoff))
7740 				goto err;
7741 			*encoded_len += encoded_string_len;
7742 			ei++;
7743 		} else {
7744 			*encoded_len += ei->elem_size;
7745 			ei++;
7746 		}
7747 	}
7748 
7749 	*encoded_len += sizeof(hdr);
7750 	*encoded_msg = malloc(*encoded_len, M_DEVBUF, M_NOWAIT | M_ZERO);
7751 	if (*encoded_msg == NULL)
7752 		return ENOMEM;
7753 
7754 	hdr.type = type;
7755 	hdr.txn_id = htole16(*txn_id);
7756 	hdr.msg_id = htole16(msg_id);
7757 	hdr.msg_len = htole16(*encoded_len - sizeof(hdr));
7758 	memcpy(*encoded_msg, &hdr, sizeof(hdr));
7759 
7760 	/* Second pass: Encode the message. */
7761 	ei = ei0;
7762 	p = *encoded_msg + sizeof(hdr);
7763 	remain = *encoded_len - sizeof(hdr);
7764 	while (ei->data_type != QMI_EOTI) {
7765 		uint32_t datalen = 0;
7766 		int nelem = 1, i;
7767 
7768 		if (ei->data_type == QMI_OPT_FLAG) {
7769 			uint8_t do_encode, tlv_type;
7770 
7771 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
7772 			ei++; /* Advance to element we might have to encode. */
7773 			if (ei->data_type == QMI_OPT_FLAG ||
7774 			    ei->data_type == QMI_EOTI) {
7775 				printf("%s: bad optional flag element\n",
7776 				    __func__);
7777 				goto err;
7778 			}
7779 			if (!do_encode) {
7780 				/* The element will not be encoded. Skip it. */
7781 				tlv_type = ei->tlv_type;
7782 				while (ei->data_type != QMI_EOTI &&
7783 				    ei->tlv_type == tlv_type)
7784 					ei++;
7785 				continue;
7786 			}
7787 		}
7788 
7789 		if (ei->elem_size + 3 > remain) {
7790 			printf("%s: QMI message buffer too short\n", __func__);
7791 			goto err;
7792 		}
7793 
7794 		/* 3 bytes of type-length-value header, remember for later */
7795 		op = p;
7796 		p += 3;
7797 
7798 		if (ei->data_type == QMI_DATA_LEN) {
7799 			if (qwx_qmi_encode_datalen(p, &datalen, ei, input))
7800 				goto err;
7801 			p += ei->elem_size;
7802 			ei++;
7803 			if (ei->array_type == VAR_LEN_ARRAY)
7804 				nelem = datalen;
7805 		}
7806 
7807 		for (i = 0; i < nelem; i++) {
7808 			size_t encoded_struct_len = 0;
7809 			size_t encoded_string_len = 0;
7810 			size_t inoff = ei->offset + (i * ei->elem_size);
7811 
7812 			switch (ei->data_type) {
7813 			case QMI_UNSIGNED_1_BYTE:
7814 				if (qwx_qmi_encode_byte(p, ei, input, i))
7815 					goto err;
7816 				remain -= ei->elem_size;
7817 				p += ei->elem_size;
7818 				break;
7819 			case QMI_UNSIGNED_2_BYTE:
7820 			case QMI_SIGNED_2_BYTE_ENUM:
7821 				if (qwx_qmi_encode_word(p, ei, input, i))
7822 					goto err;
7823 				remain -= ei->elem_size;
7824 				p += ei->elem_size;
7825 				break;
7826 			case QMI_UNSIGNED_4_BYTE:
7827 			case QMI_SIGNED_4_BYTE_ENUM:
7828 				if (qwx_qmi_encode_dword(p, ei, input, i))
7829 					goto err;
7830 				remain -= ei->elem_size;
7831 				p += ei->elem_size;
7832 				break;
7833 			case QMI_UNSIGNED_8_BYTE:
7834 				if (qwx_qmi_encode_qword(p, ei, input, i))
7835 					goto err;
7836 				remain -= ei->elem_size;
7837 				p += ei->elem_size;
7838 				break;
7839 			case QMI_STRUCT:
7840 				if (qwx_qmi_encode_struct(p,
7841 				    &encoded_struct_len, ei,
7842 				    input + inoff, input_len - inoff))
7843 					goto err;
7844 				remain -= encoded_struct_len;
7845 				p += encoded_struct_len;
7846 				break;
7847 			case QMI_STRING:
7848 				if (qwx_qmi_encode_string(p,
7849 				    &encoded_string_len, ei,
7850 				    input + inoff, input_len - inoff))
7851 					goto err;
7852 				remain -= encoded_string_len;
7853 				p += encoded_string_len;
7854 				break;
7855 			default:
7856 				printf("%s: unhandled QMI message element type %d\n",
7857 				    __func__, ei->data_type);
7858 				goto err;
7859 			}
7860 		}
7861 
7862 		op[0] = ei->tlv_type;
7863 		op[1] = (p - (op + 3)) & 0xff;
7864 		op[2] = ((p - (op + 3)) >> 8) & 0xff;
7865 
7866 		ei++;
7867 	}
7868 
7869 	if (0) {
7870 		int i;
7871 		DNPRINTF(QWX_D_QMI,
7872 		   "%s: message type 0x%x txnid 0x%x msgid 0x%x "
7873 		    "msglen %zu encoded:", __func__,
7874 		    type, *txn_id, msg_id, *encoded_len - sizeof(hdr));
7875 		for (i = 0; i < *encoded_len; i++) {
7876 			DNPRINTF(QWX_D_QMI, "%s %.2x", i % 16 == 0 ? "\n" : "",
7877 			    (*encoded_msg)[i]);
7878 		}
7879 		if (i % 16)
7880 			DNPRINTF(QWX_D_QMI, "\n");
7881 	}
7882 
7883 	(*txn_id)++; /* wrap-around is fine */
7884 	return 0;
7885 err:
7886 	free(*encoded_msg, M_DEVBUF, *encoded_len);
7887 	*encoded_msg = NULL;
7888 	*encoded_len = 0;
7889 	return -1;
7890 }
7891 
7892 int
qwx_qmi_send_request(struct qwx_softc * sc,uint16_t msg_id,size_t msg_len,const struct qmi_elem_info * ei,void * req,size_t req_len)7893 qwx_qmi_send_request(struct qwx_softc *sc, uint16_t msg_id, size_t msg_len,
7894     const struct qmi_elem_info *ei, void *req, size_t req_len)
7895 {
7896 	struct qrtr_hdr_v1 hdr;
7897 	struct mbuf *m;
7898 	uint8_t *encoded_msg;
7899 	size_t encoded_len;
7900 	size_t totlen, padlen;
7901 	int err;
7902 
7903 	if (qwx_qmi_encode_msg(&encoded_msg, &encoded_len, QMI_REQUEST,
7904 	    &sc->qmi_txn_id, msg_id, msg_len, ei, req, req_len))
7905 		return -1;
7906 
7907 	totlen = sizeof(hdr) + encoded_len;
7908 	padlen = roundup(totlen, 4);
7909 
7910 	m = m_gethdr(M_DONTWAIT, MT_DATA);
7911 	if (m == NULL) {
7912 		err = ENOBUFS;
7913 		goto done;
7914 	}
7915 
7916 	if (padlen <= MCLBYTES)
7917 		MCLGET(m, M_DONTWAIT);
7918 	else
7919 		MCLGETL(m, M_DONTWAIT, padlen);
7920 	if ((m->m_flags & M_EXT) == 0) {
7921 		err = ENOBUFS;
7922 		goto done;
7923 	}
7924 
7925 	m->m_len = m->m_pkthdr.len = padlen;
7926 
7927 	memset(&hdr, 0, sizeof(hdr));
7928 	hdr.version = htole32(QRTR_PROTO_VER_1);
7929 	hdr.type = htole32(QRTR_TYPE_DATA);
7930 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
7931 	hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
7932 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
7933 	hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
7934 	hdr.size = htole32(encoded_len);
7935 
7936 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
7937 	if (err)
7938 		goto done;
7939 
7940 	err = m_copyback(m, sizeof(hdr), encoded_len, encoded_msg, M_NOWAIT);
7941 	if (err)
7942 		goto done;
7943 
7944 	/* Zero-pad the mbuf */
7945 	if (padlen != totlen) {
7946 		uint32_t pad = 0;
7947 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
7948 		if (err)
7949 			goto done;
7950 	}
7951 
7952 	err = sc->ops.submit_xfer(sc, m);
7953 done:
7954 	if (err)
7955 		m_freem(m);
7956 	free(encoded_msg, M_DEVBUF, encoded_len);
7957 	return err;
7958 }
7959 
7960 int
qwx_qmi_fw_ind_register_send(struct qwx_softc * sc)7961 qwx_qmi_fw_ind_register_send(struct qwx_softc *sc)
7962 {
7963 	struct qmi_wlanfw_ind_register_req_msg_v01 req;
7964 	int ret;
7965 
7966 	memset(&req, 0, sizeof(req));
7967 
7968 	req.client_id_valid = 1;
7969 	req.client_id = QMI_WLANFW_CLIENT_ID;
7970 	req.fw_ready_enable_valid = 1;
7971 	req.fw_ready_enable = 1;
7972 	req.cal_done_enable_valid = 1;
7973 	req.cal_done_enable = 1;
7974 	req.fw_init_done_enable_valid = 1;
7975 	req.fw_init_done_enable = 1;
7976 
7977 	req.pin_connect_result_enable_valid = 0;
7978 	req.pin_connect_result_enable = 0;
7979 
7980 	/*
7981 	 * WCN6750 doesn't request for DDR memory via QMI,
7982 	 * instead it uses a fixed 12MB reserved memory region in DDR.
7983 	 */
7984 	if (!sc->hw_params.fixed_fw_mem) {
7985 		req.request_mem_enable_valid = 1;
7986 		req.request_mem_enable = 1;
7987 		req.fw_mem_ready_enable_valid = 1;
7988 		req.fw_mem_ready_enable = 1;
7989 	}
7990 
7991 	DNPRINTF(QWX_D_QMI, "%s: qmi indication register request\n", __func__);
7992 
7993 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_IND_REGISTER_REQ_V01,
7994 			       QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
7995 			       qmi_wlanfw_ind_register_req_msg_v01_ei,
7996 			       &req, sizeof(req));
7997 	if (ret) {
7998 		printf("%s: failed to send indication register request: %d\n",
7999 		    sc->sc_dev.dv_xname, ret);
8000 		return -1;
8001 	}
8002 
8003 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8004 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8005 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwind",
8006 		    SEC_TO_NSEC(1));
8007 		if (ret) {
8008 			printf("%s: fw indication register request timeout\n",
8009 			    sc->sc_dev.dv_xname);
8010 			return ret;
8011 		}
8012 	}
8013 
8014 	return 0;
8015 }
8016 
8017 int
qwx_qmi_host_cap_send(struct qwx_softc * sc)8018 qwx_qmi_host_cap_send(struct qwx_softc *sc)
8019 {
8020 	struct qmi_wlanfw_host_cap_req_msg_v01 req;
8021 	int ret;
8022 
8023 	memset(&req, 0, sizeof(req));
8024 	req.num_clients_valid = 1;
8025 	req.num_clients = 1;
8026 	req.mem_cfg_mode = sc->hw_params.fw_mem_mode;
8027 	req.mem_cfg_mode_valid = 1;
8028 	req.bdf_support_valid = 1;
8029 	req.bdf_support = 1;
8030 
8031 	if (sc->hw_params.m3_fw_support) {
8032 		req.m3_support_valid = 1;
8033 		req.m3_support = 1;
8034 		req.m3_cache_support_valid = 1;
8035 		req.m3_cache_support = 1;
8036 	} else {
8037 		req.m3_support_valid = 0;
8038 		req.m3_support = 0;
8039 		req.m3_cache_support_valid = 0;
8040 		req.m3_cache_support = 0;
8041 	}
8042 
8043 	req.cal_done_valid = 1;
8044 	req.cal_done = sc->qmi_cal_done;
8045 
8046 	if (sc->hw_params.internal_sleep_clock) {
8047 		req.nm_modem_valid = 1;
8048 
8049 		/* Notify firmware that this is non-qualcomm platform. */
8050 		req.nm_modem |= QWX_HOST_CSTATE_BIT;
8051 
8052 		/* Notify firmware about the sleep clock selection,
8053 		 * nm_modem_bit[1] is used for this purpose. Host driver on
8054 		 * non-qualcomm platforms should select internal sleep
8055 		 * clock.
8056 		 */
8057 		req.nm_modem |= QWX_SLEEP_CLOCK_SELECT_INTERNAL_BIT;
8058 	}
8059 
8060 	if (sc->hw_params.global_reset)
8061 		req.nm_modem |= QWX_PLATFORM_CAP_PCIE_GLOBAL_RESET;
8062 
8063 	req.nm_modem |= QWX_PLATFORM_CAP_PCIE_PME_D3COLD;
8064 
8065 	DNPRINTF(QWX_D_QMI, "%s: qmi host cap request\n", __func__);
8066 
8067 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_HOST_CAP_REQ_V01,
8068 			       QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
8069 			       qmi_wlanfw_host_cap_req_msg_v01_ei,
8070 			       &req, sizeof(req));
8071 	if (ret) {
8072 		printf("%s: failed to send host cap request: %d\n",
8073 		    sc->sc_dev.dv_xname, ret);
8074 		return -1;
8075 	}
8076 
8077 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8078 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8079 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwhcap",
8080 		    SEC_TO_NSEC(1));
8081 		if (ret) {
8082 			printf("%s: fw host cap request timeout\n",
8083 			    sc->sc_dev.dv_xname);
8084 			return ret;
8085 		}
8086 	}
8087 
8088 	return 0;
8089 }
8090 
8091 int
qwx_qmi_mem_seg_send(struct qwx_softc * sc)8092 qwx_qmi_mem_seg_send(struct qwx_softc *sc)
8093 {
8094 	struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
8095 	struct qmi_wlanfw_request_mem_ind_msg_v01 *ind;
8096 	uint32_t mem_seg_len;
8097 	const uint32_t mem_seg_len_max = 64; /* bump if needed by future fw */
8098 	uint16_t expected_result;
8099 	size_t total_size;
8100 	int i, ret;
8101 
8102 	sc->fwmem_ready = 0;
8103 
8104 	while (sc->sc_req_mem_ind == NULL) {
8105 		ret = tsleep_nsec(&sc->sc_req_mem_ind, 0, "qwxfwmem",
8106 		    SEC_TO_NSEC(10));
8107 		if (ret) {
8108 			printf("%s: fw memory request timeout\n",
8109 			    sc->sc_dev.dv_xname);
8110 			return -1;
8111 		}
8112 	}
8113 
8114 	sc->expect_fwmem_req = 0;
8115 
8116 	ind = sc->sc_req_mem_ind;
8117 	mem_seg_len = le32toh(ind->mem_seg_len);
8118 	if (mem_seg_len > mem_seg_len_max) {
8119 		printf("%s: firmware requested too many memory segments: %u\n",
8120 		    sc->sc_dev.dv_xname, mem_seg_len);
8121 		free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
8122 		sc->sc_req_mem_ind = NULL;
8123 		return -1;
8124 	}
8125 
8126 	total_size = 0;
8127 	for (i = 0; i < mem_seg_len; i++) {
8128 		if (ind->mem_seg[i].size == 0) {
8129 			printf("%s: firmware requested zero-sized "
8130 			    "memory segment %u\n", sc->sc_dev.dv_xname, i);
8131 			free(sc->sc_req_mem_ind, M_DEVBUF,
8132 			    sizeof(*sc->sc_req_mem_ind));
8133 			sc->sc_req_mem_ind = NULL;
8134 			return -1;
8135 		}
8136 		total_size += le32toh(ind->mem_seg[i].size);
8137 	}
8138 
8139 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
8140 	if (req == NULL) {
8141 		printf("%s: failed to allocate respond memory request\n",
8142 		    sc->sc_dev.dv_xname);
8143 		free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
8144 		sc->sc_req_mem_ind = NULL;
8145 		return -1;
8146 	}
8147 
8148 	if (total_size == 0) {
8149 		/* Should not happen. Send back an empty allocation. */
8150 		printf("%s: firmware has requested no memory\n",
8151 		    sc->sc_dev.dv_xname);
8152 		mem_seg_len = 0;
8153 	} else if (sc->fwmem == NULL || QWX_DMA_LEN(sc->fwmem) < total_size) {
8154 		if (sc->fwmem != NULL)
8155 			qwx_dmamem_free(sc->sc_dmat, sc->fwmem);
8156 		sc->fwmem = qwx_dmamem_alloc(sc->sc_dmat, total_size, 65536);
8157 		if (sc->fwmem == NULL) {
8158 			printf("%s: failed to allocate %zu bytes of DMA "
8159 			    "memory for firmware\n", sc->sc_dev.dv_xname,
8160 			    total_size);
8161 			/* Send back an empty allocation. */
8162 			mem_seg_len = 0;
8163 		} else
8164 			DPRINTF("%s: allocated %zu bytes of DMA memory for "
8165 			    "firmware\n", sc->sc_dev.dv_xname, total_size);
8166 	}
8167 
8168 	/* Chunk DMA memory block into segments as requested by firmware. */
8169 	req->mem_seg_len = htole32(mem_seg_len);
8170 	if (sc->fwmem) {
8171 		uint64_t paddr = QWX_DMA_DVA(sc->fwmem);
8172 
8173 		for (i = 0; i < mem_seg_len; i++) {
8174 			DPRINTF("%s: mem seg[%d] addr=%llx size=%u type=%u\n",
8175 			    __func__, i, paddr, le32toh(ind->mem_seg[i].size),
8176 			    le32toh(ind->mem_seg[i].type));
8177 			req->mem_seg[i].addr = htole64(paddr);
8178 			paddr += le32toh(ind->mem_seg[i].size);
8179 
8180 			/* Values in 'ind' are in little-endian format. */
8181 			req->mem_seg[i].size = ind->mem_seg[i].size;
8182 			req->mem_seg[i].type = ind->mem_seg[i].type;
8183 		}
8184 	}
8185 
8186 	free(ind, M_DEVBUF, sizeof(*ind));
8187 	sc->sc_req_mem_ind = NULL;
8188 
8189 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_RESPOND_MEM_REQ_V01,
8190 			       QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
8191 			       qmi_wlanfw_respond_mem_req_msg_v01_ei,
8192 			       req, sizeof(*req));
8193 	free(req, M_DEVBUF, sizeof(*req));
8194 	if (ret) {
8195 		printf("%s: failed to send respond memory request: %d\n",
8196 		    sc->sc_dev.dv_xname, ret);
8197 		return -1;
8198 	}
8199 
8200 	if (mem_seg_len == 0) {
8201 		expected_result = QMI_RESULT_FAILURE_V01;
8202 		sc->qmi_resp.result = QMI_RESULT_SUCCESS_V01;
8203 	} else {
8204 		expected_result = QMI_RESULT_SUCCESS_V01;
8205 		sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8206 	}
8207 	while (sc->qmi_resp.result != expected_result) {
8208 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwrespmem",
8209 		    SEC_TO_NSEC(1));
8210 		if (ret) {
8211 			printf("%s: fw respond memory request timeout\n",
8212 			    sc->sc_dev.dv_xname);
8213 			return -1;
8214 		}
8215 	}
8216 
8217 	if (mem_seg_len == 0) {
8218 		sc->expect_fwmem_req = 1;
8219 		return EBUSY; /* retry */
8220 	}
8221 
8222 	if (!sc->hw_params.fixed_fw_mem) {
8223 		while (!sc->fwmem_ready) {
8224 			ret = tsleep_nsec(&sc->fwmem_ready, 0, "qwxfwrdy",
8225 			    SEC_TO_NSEC(10));
8226 			if (ret) {
8227 				printf("%s: fw memory ready timeout\n",
8228 				    sc->sc_dev.dv_xname);
8229 				return -1;
8230 			}
8231 		}
8232 	}
8233 
8234 	return 0;
8235 }
8236 
8237 int
qwx_core_check_smbios(struct qwx_softc * sc)8238 qwx_core_check_smbios(struct qwx_softc *sc)
8239 {
8240 	return 0; /* TODO */
8241 }
8242 
8243 int
qwx_core_check_dt(struct qwx_softc * sc)8244 qwx_core_check_dt(struct qwx_softc *sc)
8245 {
8246 #ifdef __HAVE_FDT
8247 	if (sc->sc_node == 0)
8248 		return 0;
8249 
8250 	OF_getprop(sc->sc_node, "qcom,ath11k-calibration-variant",
8251 	    sc->qmi_target.bdf_ext, sizeof(sc->qmi_target.bdf_ext) - 1);
8252 #endif
8253 
8254 	return 0;
8255 }
8256 
8257 int
qwx_qmi_request_target_cap(struct qwx_softc * sc)8258 qwx_qmi_request_target_cap(struct qwx_softc *sc)
8259 {
8260 	struct qmi_wlanfw_cap_req_msg_v01 req;
8261 	int ret = 0;
8262 	int r;
8263 	char *fw_build_id;
8264 	int fw_build_id_mask_len;
8265 
8266 	memset(&req, 0, sizeof(req));
8267 
8268 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_CAP_REQ_V01,
8269 	    QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
8270 	    qmi_wlanfw_cap_req_msg_v01_ei, &req, sizeof(req));
8271 	if (ret) {
8272 		printf("%s: failed to send qmi cap request: %d\n",
8273 		    sc->sc_dev.dv_xname, ret);
8274 		goto out;
8275 	}
8276 
8277 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8278 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8279 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwcap",
8280 		    SEC_TO_NSEC(1));
8281 		if (ret) {
8282 			printf("%s: qmi cap request failed\n",
8283 			    sc->sc_dev.dv_xname);
8284 			return ret;
8285 		}
8286 	}
8287 
8288 	fw_build_id = sc->qmi_target.fw_build_id;
8289 	fw_build_id_mask_len = strlen(QWX_FW_BUILD_ID_MASK);
8290 	if (!strncmp(fw_build_id, QWX_FW_BUILD_ID_MASK, fw_build_id_mask_len))
8291 		fw_build_id = fw_build_id + fw_build_id_mask_len;
8292 
8293 	DPRINTF("%s: chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
8294 	    sc->sc_dev.dv_xname,
8295 	    sc->qmi_target.chip_id, sc->qmi_target.chip_family,
8296 	    sc->qmi_target.board_id, sc->qmi_target.soc_id);
8297 
8298 	DPRINTF("%s: fw_version 0x%x fw_build_timestamp %s fw_build_id %s\n",
8299 	    sc->sc_dev.dv_xname, sc->qmi_target.fw_version,
8300 	    sc->qmi_target.fw_build_timestamp, fw_build_id);
8301 
8302 	r = qwx_core_check_smbios(sc);
8303 	if (r)
8304 		DPRINTF("%s: SMBIOS bdf variant name not set\n", __func__);
8305 
8306 	r = qwx_core_check_dt(sc);
8307 	if (r)
8308 		DPRINTF("%s: DT bdf variant name not set\n", __func__);
8309 
8310 out:
8311 	return ret;
8312 }
8313 
8314 int
qwx_qmi_request_device_info(struct qwx_softc * sc)8315 qwx_qmi_request_device_info(struct qwx_softc *sc)
8316 {
8317 	/* device info message req is only sent for hybrid bus devices */
8318 	if (!sc->hw_params.hybrid_bus_type)
8319 		return 0;
8320 
8321 	/* TODO */
8322 	return -1;
8323 }
8324 
8325 int
_qwx_core_create_board_name(struct qwx_softc * sc,char * name,size_t name_len,int with_variant,int bus_type_mode)8326 _qwx_core_create_board_name(struct qwx_softc *sc, char *name,
8327     size_t name_len, int with_variant, int bus_type_mode)
8328 {
8329 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
8330 	char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
8331 
8332 	if (with_variant && sc->qmi_target.bdf_ext[0] != '\0')
8333 		snprintf(variant, sizeof(variant), ",variant=%s",
8334 		    sc->qmi_target.bdf_ext);
8335 
8336 	switch (sc->id.bdf_search) {
8337 	case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
8338 		if (bus_type_mode)
8339 			snprintf(name, name_len, "bus=%s", sc->sc_bus_str);
8340 		else
8341 			snprintf(name, name_len,
8342 			    "bus=%s,vendor=%04x,device=%04x,"
8343 			    "subsystem-vendor=%04x,subsystem-device=%04x,"
8344 			    "qmi-chip-id=%d,qmi-board-id=%d%s",
8345 			    sc->sc_bus_str, sc->id.vendor, sc->id.device,
8346 			    sc->id.subsystem_vendor, sc->id.subsystem_device,
8347 			    sc->qmi_target.chip_id, sc->qmi_target.board_id,
8348 			    variant);
8349 		break;
8350 	default:
8351 		snprintf(name, name_len,
8352 		    "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
8353 		    sc->sc_bus_str, sc->qmi_target.chip_id,
8354 		    sc->qmi_target.board_id, variant);
8355 		break;
8356 	}
8357 
8358 	DPRINTF("%s: using board name '%s'\n", __func__, name);
8359 
8360 	return 0;
8361 }
8362 
8363 int
qwx_core_create_board_name(struct qwx_softc * sc,char * name,size_t name_len)8364 qwx_core_create_board_name(struct qwx_softc *sc, char *name, size_t name_len)
8365 {
8366 	return _qwx_core_create_board_name(sc, name, name_len, 1, 0);
8367 }
8368 
8369 int
qwx_core_create_fallback_board_name(struct qwx_softc * sc,char * name,size_t name_len)8370 qwx_core_create_fallback_board_name(struct qwx_softc *sc, char *name,
8371     size_t name_len)
8372 {
8373 	return _qwx_core_create_board_name(sc, name, name_len, 0, 0);
8374 }
8375 
8376 int
qwx_core_create_bus_type_board_name(struct qwx_softc * sc,char * name,size_t name_len)8377 qwx_core_create_bus_type_board_name(struct qwx_softc *sc, char *name,
8378     size_t name_len)
8379 {
8380 	return _qwx_core_create_board_name(sc, name, name_len, 0, 1);
8381 }
8382 
8383 struct ath11k_fw_ie {
8384 	uint32_t id;
8385 	uint32_t len;
8386 	uint8_t data[];
8387 };
8388 
8389 enum ath11k_bd_ie_board_type {
8390 	ATH11K_BD_IE_BOARD_NAME = 0,
8391 	ATH11K_BD_IE_BOARD_DATA = 1,
8392 };
8393 
8394 enum ath11k_bd_ie_regdb_type {
8395 	ATH11K_BD_IE_REGDB_NAME = 0,
8396 	ATH11K_BD_IE_REGDB_DATA = 1,
8397 };
8398 
8399 enum ath11k_bd_ie_type {
8400 	/* contains sub IEs of enum ath11k_bd_ie_board_type */
8401 	ATH11K_BD_IE_BOARD = 0,
8402 	/* contains sub IEs of enum ath11k_bd_ie_regdb_type */
8403 	ATH11K_BD_IE_REGDB = 1,
8404 };
8405 
8406 static inline const char *
qwx_bd_ie_type_str(enum ath11k_bd_ie_type type)8407 qwx_bd_ie_type_str(enum ath11k_bd_ie_type type)
8408 {
8409 	switch (type) {
8410 	case ATH11K_BD_IE_BOARD:
8411 		return "board data";
8412 	case ATH11K_BD_IE_REGDB:
8413 		return "regdb data";
8414 	}
8415 
8416 	return "unknown";
8417 }
8418 
8419 int
qwx_core_parse_bd_ie_board(struct qwx_softc * sc,const u_char ** boardfw,size_t * boardfw_len,const void * buf,size_t buf_len,const char * boardname,int ie_id,int name_id,int data_id)8420 qwx_core_parse_bd_ie_board(struct qwx_softc *sc,
8421     const u_char **boardfw, size_t *boardfw_len,
8422     const void *buf, size_t buf_len,
8423     const char *boardname, int ie_id, int name_id, int data_id)
8424 {
8425 	const struct ath11k_fw_ie *hdr;
8426 	int name_match_found = 0;
8427 	int ret, board_ie_id;
8428 	size_t board_ie_len;
8429 	const void *board_ie_data;
8430 
8431 	*boardfw = NULL;
8432 	*boardfw_len = 0;
8433 
8434 	/* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
8435 	while (buf_len > sizeof(struct ath11k_fw_ie)) {
8436 		hdr = buf;
8437 		board_ie_id = le32toh(hdr->id);
8438 		board_ie_len = le32toh(hdr->len);
8439 		board_ie_data = hdr->data;
8440 
8441 		buf_len -= sizeof(*hdr);
8442 		buf += sizeof(*hdr);
8443 
8444 		if (buf_len < roundup(board_ie_len, 4)) {
8445 			printf("%s: invalid %s length: %zu < %zu\n",
8446 			    sc->sc_dev.dv_xname, qwx_bd_ie_type_str(ie_id),
8447 			    buf_len, roundup(board_ie_len, 4));
8448 			return EINVAL;
8449 		}
8450 
8451 		if (board_ie_id == name_id) {
8452 			if (board_ie_len != strlen(boardname))
8453 				goto next;
8454 
8455 			ret = memcmp(board_ie_data, boardname, board_ie_len);
8456 			if (ret)
8457 				goto next;
8458 
8459 			name_match_found = 1;
8460 			   DPRINTF("%s: found match %s for name '%s'", __func__,
8461 			       qwx_bd_ie_type_str(ie_id), boardname);
8462 		} else if (board_ie_id == data_id) {
8463 			if (!name_match_found)
8464 				/* no match found */
8465 				goto next;
8466 
8467 			DPRINTF("%s: found %s for '%s'", __func__,
8468 			    qwx_bd_ie_type_str(ie_id), boardname);
8469 
8470 			*boardfw = board_ie_data;
8471 			*boardfw_len = board_ie_len;
8472 			return 0;
8473 		} else {
8474 			printf("%s: unknown %s id found: %d\n", __func__,
8475 			    qwx_bd_ie_type_str(ie_id), board_ie_id);
8476 		}
8477 next:
8478 		/* jump over the padding */
8479 		board_ie_len = roundup(board_ie_len, 4);
8480 
8481 		buf_len -= board_ie_len;
8482 		buf += board_ie_len;
8483 	}
8484 
8485 	/* no match found */
8486 	return ENOENT;
8487 }
8488 
8489 int
qwx_core_fetch_board_data_api_n(struct qwx_softc * sc,const u_char ** boardfw,size_t * boardfw_len,u_char * fwdata,size_t fwdata_len,const char * boardname,int ie_id_match,int name_id,int data_id)8490 qwx_core_fetch_board_data_api_n(struct qwx_softc *sc,
8491     const u_char **boardfw, size_t *boardfw_len,
8492     u_char *fwdata, size_t fwdata_len,
8493     const char *boardname, int ie_id_match, int name_id, int data_id)
8494 {
8495 	size_t len, magic_len;
8496 	const uint8_t *data;
8497 	char *filename;
8498 	size_t ie_len;
8499 	struct ath11k_fw_ie *hdr;
8500 	int ret, ie_id;
8501 
8502 	filename = ATH11K_BOARD_API2_FILE;
8503 
8504 	*boardfw = NULL;
8505 	*boardfw_len = 0;
8506 
8507 	data = fwdata;
8508 	len = fwdata_len;
8509 
8510 	/* magic has extra null byte padded */
8511 	magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
8512 	if (len < magic_len) {
8513 		printf("%s: failed to find magic value in %s, "
8514 		    "file too short: %zu\n",
8515 		    sc->sc_dev.dv_xname, filename, len);
8516 		return EINVAL;
8517 	}
8518 
8519 	if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
8520 		DPRINTF("%s: found invalid board magic\n", sc->sc_dev.dv_xname);
8521 		return EINVAL;
8522 	}
8523 
8524 	/* magic is padded to 4 bytes */
8525 	magic_len = roundup(magic_len, 4);
8526 	if (len < magic_len) {
8527 		printf("%s: %s too small to contain board data, len: %zu\n",
8528 		    sc->sc_dev.dv_xname, filename, len);
8529 		return EINVAL;
8530 	}
8531 
8532 	data += magic_len;
8533 	len -= magic_len;
8534 
8535 	while (len > sizeof(struct ath11k_fw_ie)) {
8536 		hdr = (struct ath11k_fw_ie *)data;
8537 		ie_id = le32toh(hdr->id);
8538 		ie_len = le32toh(hdr->len);
8539 
8540 		len -= sizeof(*hdr);
8541 		data = hdr->data;
8542 
8543 		if (len < roundup(ie_len, 4)) {
8544 			printf("%s: invalid length for board ie_id %d "
8545 			    "ie_len %zu len %zu\n",
8546 			    sc->sc_dev.dv_xname, ie_id, ie_len, len);
8547 			return EINVAL;
8548 		}
8549 
8550 		if (ie_id == ie_id_match) {
8551 			ret = qwx_core_parse_bd_ie_board(sc,
8552 			    boardfw, boardfw_len, data, ie_len,
8553 			    boardname, ie_id_match, name_id, data_id);
8554 			if (ret == ENOENT)
8555 				/* no match found, continue */
8556 				goto next;
8557 			else if (ret)
8558 				/* there was an error, bail out */
8559 				return ret;
8560 			/* either found or error, so stop searching */
8561 			goto out;
8562 		}
8563 next:
8564 		/* jump over the padding */
8565 		ie_len = roundup(ie_len, 4);
8566 
8567 		len -= ie_len;
8568 		data += ie_len;
8569 	}
8570 
8571 out:
8572 	if (!*boardfw || !*boardfw_len) {
8573 		printf("%s: failed to fetch %s for %s from %s\n",
8574 		    __func__, qwx_bd_ie_type_str(ie_id_match),
8575 		    boardname, filename);
8576 		return ENOENT;
8577 	}
8578 
8579 	return 0;
8580 }
8581 
8582 int
qwx_core_fetch_bdf(struct qwx_softc * sc,u_char ** data,size_t * len,const u_char ** boardfw,size_t * boardfw_len,const char * filename)8583 qwx_core_fetch_bdf(struct qwx_softc *sc, u_char **data, size_t *len,
8584     const u_char **boardfw, size_t *boardfw_len, const char *filename)
8585 {
8586 	char path[PATH_MAX];
8587 	char boardname[200];
8588 	int ret;
8589 
8590 	ret = snprintf(path, sizeof(path), "%s-%s-%s",
8591 	    ATH11K_FW_DIR, sc->hw_params.fw.dir, filename);
8592 	if (ret < 0 || ret >= sizeof(path))
8593 		return ENOSPC;
8594 
8595 	ret = qwx_core_create_board_name(sc, boardname, sizeof(boardname));
8596 	if (ret) {
8597 		DPRINTF("%s: failed to create board name: %d",
8598 		    sc->sc_dev.dv_xname, ret);
8599 		return ret;
8600 	}
8601 
8602 	ret = loadfirmware(path, data, len);
8603 	if (ret) {
8604 		printf("%s: could not read %s (error %d)\n",
8605 		    sc->sc_dev.dv_xname, path, ret);
8606 		return ret;
8607 	}
8608 
8609 	ret = qwx_core_fetch_board_data_api_n(sc, boardfw, boardfw_len,
8610 	    *data, *len, boardname, ATH11K_BD_IE_BOARD,
8611 	    ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA);
8612 	if (ret) {
8613 		DPRINTF("%s: failed to fetch board data for %s from %s\n",
8614 		    sc->sc_dev.dv_xname, boardname, path);
8615 		return ret;
8616 	}
8617 
8618 	return 0;
8619 }
8620 
8621 int
qwx_qmi_load_file_target_mem(struct qwx_softc * sc,const u_char * data,size_t len,int type)8622 qwx_qmi_load_file_target_mem(struct qwx_softc *sc, const u_char *data,
8623     size_t len, int type)
8624 {
8625 	struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
8626 	const uint8_t *p = data;
8627 #ifdef notyet
8628 	void *bdf_addr = NULL;
8629 #endif
8630 	int ret = EINVAL; /* empty fw image */
8631 	uint32_t remaining = len;
8632 
8633 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
8634 	if (!req) {
8635 		printf("%s: failed to allocate bfd download request\n",
8636 		    sc->sc_dev.dv_xname);
8637 		return ENOMEM;
8638 	}
8639 
8640 	if (sc->hw_params.fixed_bdf_addr) {
8641 #ifdef notyet
8642 		bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
8643 		if (!bdf_addr) {
8644 			ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
8645 			ret = -EIO;
8646 			goto err_free_req;
8647 		}
8648 #else
8649 		printf("%s: fixed bdf address not yet supported\n",
8650 		    sc->sc_dev.dv_xname);
8651 		ret = EIO;
8652 		goto err_free_req;
8653 #endif
8654 	}
8655 
8656 	while (remaining) {
8657 		req->valid = 1;
8658 		req->file_id_valid = 1;
8659 		req->file_id = sc->qmi_target.board_id;
8660 		req->total_size_valid = 1;
8661 		req->total_size = remaining;
8662 		req->seg_id_valid = 1;
8663 		req->data_valid = 1;
8664 		req->bdf_type = type;
8665 		req->bdf_type_valid = 1;
8666 		req->end_valid = 1;
8667 		req->end = 0;
8668 
8669 		if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
8670 			req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
8671 		} else {
8672 			req->data_len = remaining;
8673 			req->end = 1;
8674 		}
8675 
8676 		if (sc->hw_params.fixed_bdf_addr ||
8677 		    type == ATH11K_QMI_FILE_TYPE_EEPROM) {
8678 			req->data_valid = 0;
8679 			req->end = 1;
8680 			req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
8681 		} else {
8682 			memcpy(req->data, p, req->data_len);
8683 		}
8684 #ifdef notyet
8685 		if (ab->hw_params.fixed_bdf_addr) {
8686 			if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
8687 				bdf_addr += ab->hw_params.fw.cal_offset;
8688 
8689 			memcpy_toio(bdf_addr, p, len);
8690 		}
8691 #endif
8692 		DPRINTF("%s: bdf download req fixed addr type %d\n",
8693 		    __func__, type);
8694 
8695 		ret = qwx_qmi_send_request(sc,
8696 		    QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
8697 		    QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
8698 		    qmi_wlanfw_bdf_download_req_msg_v01_ei,
8699 		    req, sizeof(*req));
8700 		if (ret) {
8701 			printf("%s: failed to send bdf download request\n",
8702 			    sc->sc_dev.dv_xname);
8703 			goto err_iounmap;
8704 		}
8705 
8706 		sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8707 		while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8708 			ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxbdf",
8709 			    SEC_TO_NSEC(1));
8710 			if (ret) {
8711 				printf("%s: bdf download request timeout\n",
8712 				    sc->sc_dev.dv_xname);
8713 				goto err_iounmap;
8714 			}
8715 		}
8716 
8717 		if (sc->hw_params.fixed_bdf_addr ||
8718 		    type == ATH11K_QMI_FILE_TYPE_EEPROM) {
8719 			remaining = 0;
8720 		} else {
8721 			remaining -= req->data_len;
8722 			p += req->data_len;
8723 			req->seg_id++;
8724 			DPRINTF("%s: bdf download request remaining %i\n",
8725 			    __func__, remaining);
8726 		}
8727 	}
8728 
8729 err_iounmap:
8730 #ifdef notyet
8731 	if (ab->hw_params.fixed_bdf_addr)
8732 		iounmap(bdf_addr);
8733 #endif
8734 err_free_req:
8735 	free(req, M_DEVBUF, sizeof(*req));
8736 
8737 	return ret;
8738 }
8739 
8740 #define QWX_ELFMAG	"\177ELF"
8741 #define QWX_SELFMAG	4
8742 
8743 int
qwx_qmi_load_bdf_qmi(struct qwx_softc * sc,int regdb)8744 qwx_qmi_load_bdf_qmi(struct qwx_softc *sc, int regdb)
8745 {
8746 	u_char *data = NULL;
8747 	const u_char *boardfw;
8748 	size_t len = 0, boardfw_len;
8749 	uint32_t fw_size;
8750 	int ret = 0, bdf_type;
8751 #ifdef notyet
8752 	const uint8_t *tmp;
8753 	uint32_t file_type;
8754 #endif
8755 	int fw_idx = regdb ? QWX_FW_REGDB : QWX_FW_BOARD;
8756 
8757 	if (sc->fw_img[fw_idx].data) {
8758 		boardfw = sc->fw_img[fw_idx].data;
8759 		boardfw_len = sc->fw_img[fw_idx].size;
8760 	} else {
8761 		ret = qwx_core_fetch_bdf(sc, &data, &len,
8762 		    &boardfw, &boardfw_len,
8763 		    regdb ? ATH11K_REGDB_FILE : ATH11K_BOARD_API2_FILE);
8764 		if (ret)
8765 			return ret;
8766 
8767 		sc->fw_img[fw_idx].data = malloc(boardfw_len, M_DEVBUF,
8768 		    M_NOWAIT);
8769 		if (sc->fw_img[fw_idx].data) {
8770 			memcpy(sc->fw_img[fw_idx].data, boardfw, boardfw_len);
8771 			sc->fw_img[fw_idx].size = boardfw_len;
8772 		}
8773 	}
8774 
8775 	if (regdb)
8776 		bdf_type = ATH11K_QMI_BDF_TYPE_REGDB;
8777 	else if (boardfw_len >= QWX_SELFMAG &&
8778 	    memcmp(boardfw, QWX_ELFMAG, QWX_SELFMAG) == 0)
8779 		bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
8780 	else
8781 		bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
8782 
8783 	DPRINTF("%s: bdf_type %d\n", __func__, bdf_type);
8784 
8785 	fw_size = MIN(sc->hw_params.fw.board_size, boardfw_len);
8786 
8787 	ret = qwx_qmi_load_file_target_mem(sc, boardfw, fw_size, bdf_type);
8788 	if (ret) {
8789 		printf("%s: failed to load bdf file\n", __func__);
8790 		goto out;
8791 	}
8792 
8793 	/* QCA6390/WCN6855 does not support cal data, skip it */
8794 	if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
8795 		goto out;
8796 #ifdef notyet
8797 	if (ab->qmi.target.eeprom_caldata) {
8798 		file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
8799 		tmp = filename;
8800 		fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
8801 	} else {
8802 		file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
8803 
8804 		/* cal-<bus>-<id>.bin */
8805 		snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
8806 			 ath11k_bus_str(ab->hif.bus), dev_name(dev));
8807 		fw_entry = ath11k_core_firmware_request(ab, filename);
8808 		if (!IS_ERR(fw_entry))
8809 			goto success;
8810 
8811 		fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
8812 		if (IS_ERR(fw_entry)) {
8813 			/* Caldata may not be present during first time calibration in
8814 			 * factory hence allow to boot without loading caldata in ftm mode
8815 			 */
8816 			if (ath11k_ftm_mode) {
8817 				ath11k_info(ab,
8818 					    "Booting without cal data file in factory test mode\n");
8819 				return 0;
8820 			}
8821 			ret = PTR_ERR(fw_entry);
8822 			ath11k_warn(ab,
8823 				    "qmi failed to load CAL data file:%s\n",
8824 				    filename);
8825 			goto out;
8826 		}
8827 success:
8828 		fw_size = MIN(ab->hw_params.fw.board_size, fw_entry->size);
8829 		tmp = fw_entry->data;
8830 	}
8831 
8832 	ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
8833 	if (ret < 0) {
8834 		ath11k_warn(ab, "qmi failed to load caldata\n");
8835 		goto out_qmi_cal;
8836 	}
8837 
8838 	ath11k_dbg(ab, ATH11K_DBG_QMI, "caldata type: %u\n", file_type);
8839 
8840 out_qmi_cal:
8841 	if (!ab->qmi.target.eeprom_caldata)
8842 		release_firmware(fw_entry);
8843 #endif
8844 out:
8845 	free(data, M_DEVBUF, len);
8846 	if (ret == 0)
8847 		DPRINTF("%s: BDF download sequence completed\n", __func__);
8848 
8849 	return ret;
8850 }
8851 
8852 int
qwx_qmi_event_load_bdf(struct qwx_softc * sc)8853 qwx_qmi_event_load_bdf(struct qwx_softc *sc)
8854 {
8855 	int ret;
8856 
8857 	ret = qwx_qmi_request_target_cap(sc);
8858 	if (ret < 0) {
8859 		printf("%s: failed to request qmi target capabilities: %d\n",
8860 		    sc->sc_dev.dv_xname, ret);
8861 		return ret;
8862 	}
8863 
8864 	ret = qwx_qmi_request_device_info(sc);
8865 	if (ret < 0) {
8866 		printf("%s: failed to request qmi device info: %d\n",
8867 		    sc->sc_dev.dv_xname, ret);
8868 		return ret;
8869 	}
8870 
8871 	if (sc->hw_params.supports_regdb)
8872 		qwx_qmi_load_bdf_qmi(sc, 1);
8873 
8874 	ret = qwx_qmi_load_bdf_qmi(sc, 0);
8875 	if (ret < 0) {
8876 		printf("%s: failed to load board data file: %d\n",
8877 		    sc->sc_dev.dv_xname, ret);
8878 		return ret;
8879 	}
8880 
8881 	return 0;
8882 }
8883 
8884 int
qwx_qmi_m3_load(struct qwx_softc * sc)8885 qwx_qmi_m3_load(struct qwx_softc *sc)
8886 {
8887 	u_char *data;
8888 	size_t len;
8889 	char path[PATH_MAX];
8890 	int ret;
8891 
8892 	if (sc->fw_img[QWX_FW_M3].data) {
8893 		data = sc->fw_img[QWX_FW_M3].data;
8894 		len = sc->fw_img[QWX_FW_M3].size;
8895 	} else {
8896 		ret = snprintf(path, sizeof(path), "%s-%s-%s",
8897 		    ATH11K_FW_DIR, sc->hw_params.fw.dir, ATH11K_M3_FILE);
8898 		if (ret < 0 || ret >= sizeof(path))
8899 			return ENOSPC;
8900 
8901 		ret = loadfirmware(path, &data, &len);
8902 		if (ret) {
8903 			printf("%s: could not read %s (error %d)\n",
8904 			    sc->sc_dev.dv_xname, path, ret);
8905 			return ret;
8906 		}
8907 
8908 		sc->fw_img[QWX_FW_M3].data = data;
8909 		sc->fw_img[QWX_FW_M3].size = len;
8910 	}
8911 
8912 	if (sc->m3_mem == NULL || QWX_DMA_LEN(sc->m3_mem) < len) {
8913 		if (sc->m3_mem)
8914 			qwx_dmamem_free(sc->sc_dmat, sc->m3_mem);
8915 		sc->m3_mem = qwx_dmamem_alloc(sc->sc_dmat, len, 65536);
8916 		if (sc->m3_mem == NULL) {
8917 			printf("%s: failed to allocate %zu bytes of DMA "
8918 			    "memory for M3 firmware\n", sc->sc_dev.dv_xname,
8919 			    len);
8920 			return ENOMEM;
8921 		}
8922 	}
8923 
8924 	memcpy(QWX_DMA_KVA(sc->m3_mem), data, len);
8925 	return 0;
8926 }
8927 
8928 int
qwx_qmi_wlanfw_m3_info_send(struct qwx_softc * sc)8929 qwx_qmi_wlanfw_m3_info_send(struct qwx_softc *sc)
8930 {
8931 	struct qmi_wlanfw_m3_info_req_msg_v01 req;
8932 	int ret = 0;
8933 	uint64_t paddr;
8934 	uint32_t size;
8935 
8936 	memset(&req, 0, sizeof(req));
8937 
8938 	if (sc->hw_params.m3_fw_support) {
8939 		ret = qwx_qmi_m3_load(sc);
8940 		if (ret) {
8941 			printf("%s: failed to load m3 firmware: %d",
8942 			    sc->sc_dev.dv_xname, ret);
8943 			return ret;
8944 		}
8945 
8946 		paddr = QWX_DMA_DVA(sc->m3_mem);
8947 		size = QWX_DMA_LEN(sc->m3_mem);
8948 		req.addr = htole64(paddr);
8949 		req.size = htole32(size);
8950 	} else {
8951 		req.addr = 0;
8952 		req.size = 0;
8953 	}
8954 
8955 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_M3_INFO_REQ_V01,
8956 	    QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
8957 	    qmi_wlanfw_m3_info_req_msg_v01_ei, &req, sizeof(req));
8958 	if (ret) {
8959 		printf("%s: failed to send m3 information request: %d\n",
8960 		    sc->sc_dev.dv_xname, ret);
8961 		return ret;
8962 	}
8963 
8964 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
8965 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
8966 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwm3",
8967 		    SEC_TO_NSEC(1));
8968 		if (ret) {
8969 			printf("%s: m3 information request timeout\n",
8970 			    sc->sc_dev.dv_xname);
8971 			return ret;
8972 		}
8973 	}
8974 
8975 	return 0;
8976 }
8977 
8978 void
qwx_hal_dump_srng_stats(struct qwx_softc * sc)8979 qwx_hal_dump_srng_stats(struct qwx_softc *sc)
8980 {
8981 	DPRINTF("%s not implemented\n", __func__);
8982 }
8983 
8984 uint16_t
qwx_hal_srng_get_entrysize(struct qwx_softc * sc,uint32_t ring_type)8985 qwx_hal_srng_get_entrysize(struct qwx_softc *sc, uint32_t ring_type)
8986 {
8987 	struct hal_srng_config *srng_config;
8988 
8989 	KASSERT(ring_type < HAL_MAX_RING_TYPES);
8990 
8991 	srng_config = &sc->hal.srng_config[ring_type];
8992 	return (srng_config->entry_size << 2);
8993 }
8994 
8995 uint32_t
qwx_hal_srng_get_max_entries(struct qwx_softc * sc,uint32_t ring_type)8996 qwx_hal_srng_get_max_entries(struct qwx_softc *sc, uint32_t ring_type)
8997 {
8998 	struct hal_srng_config *srng_config;
8999 
9000 	KASSERT(ring_type < HAL_MAX_RING_TYPES);
9001 
9002 	srng_config = &sc->hal.srng_config[ring_type];
9003 	return (srng_config->max_size / srng_config->entry_size);
9004 }
9005 
9006 uint32_t *
qwx_hal_srng_dst_get_next_entry(struct qwx_softc * sc,struct hal_srng * srng)9007 qwx_hal_srng_dst_get_next_entry(struct qwx_softc *sc, struct hal_srng *srng)
9008 {
9009 	uint32_t *desc;
9010 #ifdef notyet
9011 	lockdep_assert_held(&srng->lock);
9012 #endif
9013 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
9014 		return NULL;
9015 
9016 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
9017 
9018 	srng->u.dst_ring.tp += srng->entry_size;
9019 
9020 	/* wrap around to start of ring*/
9021 	if (srng->u.dst_ring.tp == srng->ring_size)
9022 		srng->u.dst_ring.tp = 0;
9023 #ifdef notyet
9024 	/* Try to prefetch the next descriptor in the ring */
9025 	if (srng->flags & HAL_SRNG_FLAGS_CACHED)
9026 		ath11k_hal_srng_prefetch_desc(ab, srng);
9027 #endif
9028 	return desc;
9029 }
9030 
9031 int
qwx_hal_srng_dst_num_free(struct qwx_softc * sc,struct hal_srng * srng,int sync_hw_ptr)9032 qwx_hal_srng_dst_num_free(struct qwx_softc *sc, struct hal_srng *srng,
9033     int sync_hw_ptr)
9034 {
9035 	uint32_t tp, hp;
9036 #ifdef notyet
9037 	lockdep_assert_held(&srng->lock);
9038 #endif
9039 	tp = srng->u.dst_ring.tp;
9040 
9041 	if (sync_hw_ptr) {
9042 		hp = *srng->u.dst_ring.hp_addr;
9043 		srng->u.dst_ring.cached_hp = hp;
9044 	} else {
9045 		hp = srng->u.dst_ring.cached_hp;
9046 	}
9047 
9048 	if (hp >= tp)
9049 		return (hp - tp) / srng->entry_size;
9050 	else
9051 		return (srng->ring_size - tp + hp) / srng->entry_size;
9052 }
9053 
9054 uint32_t *
qwx_hal_srng_src_get_next_reaped(struct qwx_softc * sc,struct hal_srng * srng)9055 qwx_hal_srng_src_get_next_reaped(struct qwx_softc *sc, struct hal_srng *srng)
9056 {
9057 	uint32_t *desc;
9058 #ifdef notyet
9059 	lockdep_assert_held(&srng->lock);
9060 #endif
9061 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
9062 		return NULL;
9063 
9064 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
9065 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
9066 			      srng->ring_size;
9067 
9068 	return desc;
9069 }
9070 
9071 uint32_t *
qwx_hal_srng_src_peek(struct qwx_softc * sc,struct hal_srng * srng)9072 qwx_hal_srng_src_peek(struct qwx_softc *sc, struct hal_srng *srng)
9073 {
9074 #ifdef notyet
9075 	lockdep_assert_held(&srng->lock);
9076 #endif
9077 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
9078 	    srng->u.src_ring.cached_tp)
9079 		return NULL;
9080 
9081 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
9082 }
9083 
9084 void
qwx_get_msi_address(struct qwx_softc * sc,uint32_t * addr_lo,uint32_t * addr_hi)9085 qwx_get_msi_address(struct qwx_softc *sc, uint32_t *addr_lo,
9086     uint32_t *addr_hi)
9087 {
9088 	*addr_lo = sc->msi_addr_lo;
9089 	*addr_hi = sc->msi_addr_hi;
9090 }
9091 
9092 int
qwx_dp_srng_find_ring_in_mask(int ring_num,const uint8_t * grp_mask)9093 qwx_dp_srng_find_ring_in_mask(int ring_num, const uint8_t *grp_mask)
9094 {
9095 	int ext_group_num;
9096 	uint8_t mask = 1 << ring_num;
9097 
9098 	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
9099 	     ext_group_num++) {
9100 		if (mask & grp_mask[ext_group_num])
9101 			return ext_group_num;
9102 	}
9103 
9104 	return -1;
9105 }
9106 
9107 int
qwx_dp_srng_calculate_msi_group(struct qwx_softc * sc,enum hal_ring_type type,int ring_num)9108 qwx_dp_srng_calculate_msi_group(struct qwx_softc *sc, enum hal_ring_type type,
9109     int ring_num)
9110 {
9111 	const uint8_t *grp_mask;
9112 
9113 	switch (type) {
9114 	case HAL_WBM2SW_RELEASE:
9115 		if (ring_num == DP_RX_RELEASE_RING_NUM) {
9116 			grp_mask = &sc->hw_params.ring_mask->rx_wbm_rel[0];
9117 			ring_num = 0;
9118 		} else {
9119 			grp_mask = &sc->hw_params.ring_mask->tx[0];
9120 		}
9121 		break;
9122 	case HAL_REO_EXCEPTION:
9123 		grp_mask = &sc->hw_params.ring_mask->rx_err[0];
9124 		break;
9125 	case HAL_REO_DST:
9126 		grp_mask = &sc->hw_params.ring_mask->rx[0];
9127 		break;
9128 	case HAL_REO_STATUS:
9129 		grp_mask = &sc->hw_params.ring_mask->reo_status[0];
9130 		break;
9131 	case HAL_RXDMA_MONITOR_STATUS:
9132 	case HAL_RXDMA_MONITOR_DST:
9133 		grp_mask = &sc->hw_params.ring_mask->rx_mon_status[0];
9134 		break;
9135 	case HAL_RXDMA_DST:
9136 		grp_mask = &sc->hw_params.ring_mask->rxdma2host[0];
9137 		break;
9138 	case HAL_RXDMA_BUF:
9139 		grp_mask = &sc->hw_params.ring_mask->host2rxdma[0];
9140 		break;
9141 	case HAL_RXDMA_MONITOR_BUF:
9142 	case HAL_TCL_DATA:
9143 	case HAL_TCL_CMD:
9144 	case HAL_REO_CMD:
9145 	case HAL_SW2WBM_RELEASE:
9146 	case HAL_WBM_IDLE_LINK:
9147 	case HAL_TCL_STATUS:
9148 	case HAL_REO_REINJECT:
9149 	case HAL_CE_SRC:
9150 	case HAL_CE_DST:
9151 	case HAL_CE_DST_STATUS:
9152 	default:
9153 		return -1;
9154 	}
9155 
9156 	return qwx_dp_srng_find_ring_in_mask(ring_num, grp_mask);
9157 }
9158 
9159 void
qwx_dp_srng_msi_setup(struct qwx_softc * sc,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)9160 qwx_dp_srng_msi_setup(struct qwx_softc *sc, struct hal_srng_params *ring_params,
9161     enum hal_ring_type type, int ring_num)
9162 {
9163 	int msi_group_number;
9164 	uint32_t msi_data_start = 0;
9165 	uint32_t msi_data_count = 1;
9166 	uint32_t msi_irq_start = 0;
9167 	uint32_t addr_lo;
9168 	uint32_t addr_hi;
9169 	int ret;
9170 
9171 	ret = sc->ops.get_user_msi_vector(sc, "DP",
9172 	    &msi_data_count, &msi_data_start, &msi_irq_start);
9173 	if (ret)
9174 		return;
9175 
9176 	msi_group_number = qwx_dp_srng_calculate_msi_group(sc, type,
9177 	    ring_num);
9178 	if (msi_group_number < 0) {
9179 		DPRINTF("%s ring not part of an ext_group; ring_type %d,"
9180 		    "ring_num %d\n", __func__, type, ring_num);
9181 		ring_params->msi_addr = 0;
9182 		ring_params->msi_data = 0;
9183 		return;
9184 	}
9185 
9186 	qwx_get_msi_address(sc, &addr_lo, &addr_hi);
9187 
9188 	ring_params->msi_addr = addr_lo;
9189 	ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
9190 	ring_params->msi_data = (msi_group_number % msi_data_count) +
9191 	    msi_data_start;
9192 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
9193 }
9194 
9195 int
qwx_dp_srng_setup(struct qwx_softc * sc,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)9196 qwx_dp_srng_setup(struct qwx_softc *sc, struct dp_srng *ring,
9197     enum hal_ring_type type, int ring_num, int mac_id, int num_entries)
9198 {
9199 	struct hal_srng_params params = { 0 };
9200 	uint16_t entry_sz = qwx_hal_srng_get_entrysize(sc, type);
9201 	uint32_t max_entries = qwx_hal_srng_get_max_entries(sc, type);
9202 	int ret;
9203 	int cached = 0;
9204 
9205 	if (num_entries > max_entries)
9206 		num_entries = max_entries;
9207 
9208 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
9209 
9210 #ifdef notyet
9211 	if (sc->hw_params.alloc_cacheable_memory) {
9212 		/* Allocate the reo dst and tx completion rings from cacheable memory */
9213 		switch (type) {
9214 		case HAL_REO_DST:
9215 		case HAL_WBM2SW_RELEASE:
9216 			cached = true;
9217 			break;
9218 		default:
9219 			cached = false;
9220 		}
9221 
9222 		if (cached) {
9223 			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
9224 			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
9225 		}
9226 		if (!ring->vaddr_unaligned)
9227 			return -ENOMEM;
9228 	}
9229 #endif
9230 	if (!cached) {
9231 		ring->mem = qwx_dmamem_alloc(sc->sc_dmat, ring->size,
9232 		    PAGE_SIZE);
9233 		if (ring->mem == NULL) {
9234 			printf("%s: could not allocate DP SRNG DMA memory\n",
9235 			    sc->sc_dev.dv_xname);
9236 			return ENOMEM;
9237 
9238 		}
9239 	}
9240 
9241 	ring->vaddr = QWX_DMA_KVA(ring->mem);
9242 	ring->paddr = QWX_DMA_DVA(ring->mem);
9243 
9244 	params.ring_base_vaddr = ring->vaddr;
9245 	params.ring_base_paddr = ring->paddr;
9246 	params.num_entries = num_entries;
9247 	qwx_dp_srng_msi_setup(sc, &params, type, ring_num + mac_id);
9248 
9249 	switch (type) {
9250 	case HAL_REO_DST:
9251 		params.intr_batch_cntr_thres_entries =
9252 		    HAL_SRNG_INT_BATCH_THRESHOLD_RX;
9253 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
9254 		break;
9255 	case HAL_RXDMA_BUF:
9256 	case HAL_RXDMA_MONITOR_BUF:
9257 	case HAL_RXDMA_MONITOR_STATUS:
9258 		params.low_threshold = num_entries >> 3;
9259 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
9260 		params.intr_batch_cntr_thres_entries = 0;
9261 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
9262 		break;
9263 	case HAL_WBM2SW_RELEASE:
9264 		if (ring_num < 3) {
9265 			params.intr_batch_cntr_thres_entries =
9266 			    HAL_SRNG_INT_BATCH_THRESHOLD_TX;
9267 			params.intr_timer_thres_us =
9268 			    HAL_SRNG_INT_TIMER_THRESHOLD_TX;
9269 			break;
9270 		}
9271 		/* follow through when ring_num >= 3 */
9272 		/* FALLTHROUGH */
9273 	case HAL_REO_EXCEPTION:
9274 	case HAL_REO_REINJECT:
9275 	case HAL_REO_CMD:
9276 	case HAL_REO_STATUS:
9277 	case HAL_TCL_DATA:
9278 	case HAL_TCL_CMD:
9279 	case HAL_TCL_STATUS:
9280 	case HAL_WBM_IDLE_LINK:
9281 	case HAL_SW2WBM_RELEASE:
9282 	case HAL_RXDMA_DST:
9283 	case HAL_RXDMA_MONITOR_DST:
9284 	case HAL_RXDMA_MONITOR_DESC:
9285 		params.intr_batch_cntr_thres_entries =
9286 		    HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
9287 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
9288 		break;
9289 	case HAL_RXDMA_DIR_BUF:
9290 		break;
9291 	default:
9292 		printf("%s: Not a valid ring type in dp :%d\n",
9293 		    sc->sc_dev.dv_xname, type);
9294 		return EINVAL;
9295 	}
9296 
9297 	if (cached) {
9298 		params.flags |= HAL_SRNG_FLAGS_CACHED;
9299 		ring->cached = 1;
9300 	}
9301 
9302 	ret = qwx_hal_srng_setup(sc, type, ring_num, mac_id, &params);
9303 	if (ret < 0) {
9304 		printf("%s: failed to setup srng: %d ring_id %d\n",
9305 		    sc->sc_dev.dv_xname, ret, ring_num);
9306 		return ret;
9307 	}
9308 
9309 	ring->ring_id = ret;
9310 	return 0;
9311 }
9312 
9313 void
qwx_hal_srng_access_begin(struct qwx_softc * sc,struct hal_srng * srng)9314 qwx_hal_srng_access_begin(struct qwx_softc *sc, struct hal_srng *srng)
9315 {
9316 #ifdef notyet
9317 	lockdep_assert_held(&srng->lock);
9318 #endif
9319 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
9320 		srng->u.src_ring.cached_tp =
9321 			*(volatile uint32_t *)srng->u.src_ring.tp_addr;
9322 	} else {
9323 		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
9324 	}
9325 }
9326 
9327 void
qwx_hal_srng_access_end(struct qwx_softc * sc,struct hal_srng * srng)9328 qwx_hal_srng_access_end(struct qwx_softc *sc, struct hal_srng *srng)
9329 {
9330 #ifdef notyet
9331 	lockdep_assert_held(&srng->lock);
9332 #endif
9333 	/* TODO: See if we need a write memory barrier here */
9334 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
9335 		/* For LMAC rings, ring pointer updates are done through FW and
9336 		 * hence written to a shared memory location that is read by FW
9337 		 */
9338 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
9339 			srng->u.src_ring.last_tp =
9340 			    *(volatile uint32_t *)srng->u.src_ring.tp_addr;
9341 			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
9342 		} else {
9343 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
9344 			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
9345 		}
9346 	} else {
9347 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
9348 			srng->u.src_ring.last_tp =
9349 			    *(volatile uint32_t *)srng->u.src_ring.tp_addr;
9350 			sc->ops.write32(sc,
9351 			    (unsigned long)srng->u.src_ring.hp_addr -
9352 			    (unsigned long)sc->mem, srng->u.src_ring.hp);
9353 		} else {
9354 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
9355 			sc->ops.write32(sc,
9356 			    (unsigned long)srng->u.dst_ring.tp_addr -
9357 			    (unsigned long)sc->mem, srng->u.dst_ring.tp);
9358 		}
9359 	}
9360 #ifdef notyet
9361 	srng->timestamp = jiffies;
9362 #endif
9363 }
9364 
9365 int
qwx_wbm_idle_ring_setup(struct qwx_softc * sc,uint32_t * n_link_desc)9366 qwx_wbm_idle_ring_setup(struct qwx_softc *sc, uint32_t *n_link_desc)
9367 {
9368 	struct qwx_dp *dp = &sc->dp;
9369 	uint32_t n_mpdu_link_desc, n_mpdu_queue_desc;
9370 	uint32_t n_tx_msdu_link_desc, n_rx_msdu_link_desc;
9371 	int ret = 0;
9372 
9373 	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
9374 			   HAL_NUM_MPDUS_PER_LINK_DESC;
9375 
9376 	n_mpdu_queue_desc = n_mpdu_link_desc /
9377 			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
9378 
9379 	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
9380 			       DP_AVG_MSDUS_PER_FLOW) /
9381 			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
9382 
9383 	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
9384 			       DP_AVG_MSDUS_PER_MPDU) /
9385 			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
9386 
9387 	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
9388 		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
9389 
9390 	if (*n_link_desc & (*n_link_desc - 1))
9391 		*n_link_desc = 1 << fls(*n_link_desc);
9392 
9393 	ret = qwx_dp_srng_setup(sc, &dp->wbm_idle_ring,
9394 	    HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
9395 	if (ret) {
9396 		printf("%s: failed to setup wbm_idle_ring: %d\n",
9397 		    sc->sc_dev.dv_xname, ret);
9398 	}
9399 
9400 	return ret;
9401 }
9402 
9403 void
qwx_dp_link_desc_bank_free(struct qwx_softc * sc,struct dp_link_desc_bank * link_desc_banks)9404 qwx_dp_link_desc_bank_free(struct qwx_softc *sc,
9405     struct dp_link_desc_bank *link_desc_banks)
9406 {
9407 	int i;
9408 
9409 	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
9410 		if (link_desc_banks[i].mem) {
9411 			qwx_dmamem_free(sc->sc_dmat, link_desc_banks[i].mem);
9412 			link_desc_banks[i].mem = NULL;
9413 		}
9414 	}
9415 }
9416 
9417 int
qwx_dp_link_desc_bank_alloc(struct qwx_softc * sc,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)9418 qwx_dp_link_desc_bank_alloc(struct qwx_softc *sc,
9419     struct dp_link_desc_bank *desc_bank, int n_link_desc_bank,
9420     int last_bank_sz)
9421 {
9422 	struct qwx_dp *dp = &sc->dp;
9423 	int i;
9424 	int ret = 0;
9425 	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
9426 
9427 	for (i = 0; i < n_link_desc_bank; i++) {
9428 		if (i == (n_link_desc_bank - 1) && last_bank_sz)
9429 			desc_sz = last_bank_sz;
9430 
9431 		desc_bank[i].mem = qwx_dmamem_alloc(sc->sc_dmat, desc_sz,
9432 		    PAGE_SIZE);
9433 		if (!desc_bank[i].mem) {
9434 			ret = ENOMEM;
9435 			goto err;
9436 		}
9437 
9438 		desc_bank[i].vaddr = QWX_DMA_KVA(desc_bank[i].mem);
9439 		desc_bank[i].paddr = QWX_DMA_DVA(desc_bank[i].mem);
9440 		desc_bank[i].size = desc_sz;
9441 	}
9442 
9443 	return 0;
9444 
9445 err:
9446 	qwx_dp_link_desc_bank_free(sc, dp->link_desc_banks);
9447 
9448 	return ret;
9449 }
9450 
9451 void
qwx_hal_setup_link_idle_list(struct qwx_softc * sc,struct hal_wbm_idle_scatter_list * sbuf,uint32_t nsbufs,uint32_t tot_link_desc,uint32_t end_offset)9452 qwx_hal_setup_link_idle_list(struct qwx_softc *sc,
9453     struct hal_wbm_idle_scatter_list *sbuf,
9454     uint32_t nsbufs, uint32_t tot_link_desc, uint32_t end_offset)
9455 {
9456 	struct ath11k_buffer_addr *link_addr;
9457 	int i;
9458 	uint32_t reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
9459 
9460 	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
9461 
9462 	for (i = 1; i < nsbufs; i++) {
9463 		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
9464 		link_addr->info1 = FIELD_PREP(
9465 		    HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
9466 		    (uint64_t)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
9467 		    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
9468 		    BASE_ADDR_MATCH_TAG_VAL);
9469 
9470 		link_addr = (void *)sbuf[i].vaddr +
9471 		    HAL_WBM_IDLE_SCATTER_BUF_SIZE;
9472 	}
9473 
9474 	sc->ops.write32(sc,
9475 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
9476 	    FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
9477 	    FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
9478 	sc->ops.write32(sc,
9479 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
9480 	    FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
9481 	    reg_scatter_buf_sz * nsbufs));
9482 	sc->ops.write32(sc,
9483 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_LSB,
9484 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
9485 	    sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
9486 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_WBM_REG +
9487 	    HAL_WBM_SCATTERED_RING_BASE_MSB,
9488 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
9489 	    (uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
9490 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
9491 	    BASE_ADDR_MATCH_TAG_VAL));
9492 
9493 	/* Setup head and tail pointers for the idle list */
9494 	sc->ops.write32(sc,
9495 	    HAL_SEQ_WCSS_UMAC_WBM_REG +
9496 	    HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
9497 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[nsbufs - 1].paddr));
9498 	sc->ops.write32(sc,
9499 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
9500 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
9501 	    ((uint64_t)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
9502 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
9503 	    (end_offset >> 2)));
9504 	sc->ops.write32(sc,
9505 	    HAL_SEQ_WCSS_UMAC_WBM_REG +
9506 	    HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
9507 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
9508 
9509 	sc->ops.write32(sc,
9510 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
9511 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
9512 	sc->ops.write32(sc,
9513 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
9514 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
9515 	    ((uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
9516 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 0));
9517 	sc->ops.write32(sc,
9518 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
9519 	    2 * tot_link_desc);
9520 
9521 	/* Enable the SRNG */
9522 	sc->ops.write32(sc,
9523 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(sc),
9524 	    0x40);
9525 }
9526 
9527 void
qwx_hal_set_link_desc_addr(struct hal_wbm_link_desc * desc,uint32_t cookie,bus_addr_t paddr)9528 qwx_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, uint32_t cookie,
9529     bus_addr_t paddr)
9530 {
9531 	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
9532 	    (paddr & HAL_ADDR_LSB_REG_MASK));
9533 	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
9534 	    ((uint64_t)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
9535 	    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
9536 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
9537 }
9538 
9539 void
qwx_dp_scatter_idle_link_desc_cleanup(struct qwx_softc * sc)9540 qwx_dp_scatter_idle_link_desc_cleanup(struct qwx_softc *sc)
9541 {
9542 	struct qwx_dp *dp = &sc->dp;
9543 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
9544 	int i;
9545 
9546 	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
9547 		if (slist[i].mem == NULL)
9548 			continue;
9549 
9550 		qwx_dmamem_free(sc->sc_dmat, slist[i].mem);
9551 		slist[i].mem = NULL;
9552 		slist[i].vaddr = NULL;
9553 		slist[i].paddr = 0L;
9554 	}
9555 }
9556 
9557 int
qwx_dp_scatter_idle_link_desc_setup(struct qwx_softc * sc,int size,uint32_t n_link_desc_bank,uint32_t n_link_desc,uint32_t last_bank_sz)9558 qwx_dp_scatter_idle_link_desc_setup(struct qwx_softc *sc, int size,
9559     uint32_t n_link_desc_bank, uint32_t n_link_desc, uint32_t last_bank_sz)
9560 {
9561 	struct qwx_dp *dp = &sc->dp;
9562 	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
9563 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
9564 	uint32_t n_entries_per_buf;
9565 	int num_scatter_buf, scatter_idx;
9566 	struct hal_wbm_link_desc *scatter_buf;
9567 	int n_entries;
9568 	bus_addr_t paddr;
9569 	int rem_entries;
9570 	int i;
9571 	int ret = 0;
9572 	uint32_t end_offset;
9573 
9574 	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
9575 	    qwx_hal_srng_get_entrysize(sc, HAL_WBM_IDLE_LINK);
9576 	num_scatter_buf = howmany(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
9577 
9578 	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
9579 		return EINVAL;
9580 
9581 	for (i = 0; i < num_scatter_buf; i++) {
9582 		slist[i].mem = qwx_dmamem_alloc(sc->sc_dmat,
9583 		    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, PAGE_SIZE);
9584 		if (slist[i].mem == NULL) {
9585 			ret = ENOMEM;
9586 			goto err;
9587 		}
9588 
9589 		slist[i].vaddr = QWX_DMA_KVA(slist[i].mem);
9590 		slist[i].paddr = QWX_DMA_DVA(slist[i].mem);
9591 	}
9592 
9593 	scatter_idx = 0;
9594 	scatter_buf = slist[scatter_idx].vaddr;
9595 	rem_entries = n_entries_per_buf;
9596 
9597 	for (i = 0; i < n_link_desc_bank; i++) {
9598 		n_entries = DP_LINK_DESC_ALLOC_SIZE_THRESH / HAL_LINK_DESC_SIZE;
9599 		paddr = link_desc_banks[i].paddr;
9600 		while (n_entries) {
9601 			qwx_hal_set_link_desc_addr(scatter_buf, i, paddr);
9602 			n_entries--;
9603 			paddr += HAL_LINK_DESC_SIZE;
9604 			if (rem_entries) {
9605 				rem_entries--;
9606 				scatter_buf++;
9607 				continue;
9608 			}
9609 
9610 			rem_entries = n_entries_per_buf;
9611 			scatter_idx++;
9612 			scatter_buf = slist[scatter_idx].vaddr;
9613 		}
9614 	}
9615 
9616 	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
9617 	    sizeof(struct hal_wbm_link_desc);
9618 	qwx_hal_setup_link_idle_list(sc, slist, num_scatter_buf,
9619 	    n_link_desc, end_offset);
9620 
9621 	return 0;
9622 
9623 err:
9624 	qwx_dp_scatter_idle_link_desc_cleanup(sc);
9625 
9626 	return ret;
9627 }
9628 
9629 uint32_t *
qwx_hal_srng_src_get_next_entry(struct qwx_softc * sc,struct hal_srng * srng)9630 qwx_hal_srng_src_get_next_entry(struct qwx_softc *sc, struct hal_srng *srng)
9631 {
9632 	uint32_t *desc;
9633 	uint32_t next_hp;
9634 #ifdef notyet
9635 	lockdep_assert_held(&srng->lock);
9636 #endif
9637 
9638 	/* TODO: Using % is expensive, but we have to do this since size of some
9639 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
9640 	 * if separate function is defined for rings having power of 2 ring size
9641 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
9642 	 * overhead of % by using mask (with &).
9643 	 */
9644 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
9645 
9646 	if (next_hp == srng->u.src_ring.cached_tp)
9647 		return NULL;
9648 
9649 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
9650 	srng->u.src_ring.hp = next_hp;
9651 
9652 	/* TODO: Reap functionality is not used by all rings. If particular
9653 	 * ring does not use reap functionality, we need not update reap_hp
9654 	 * with next_hp pointer. Need to make sure a separate function is used
9655 	 * before doing any optimization by removing below code updating
9656 	 * reap_hp.
9657 	 */
9658 	srng->u.src_ring.reap_hp = next_hp;
9659 
9660 	return desc;
9661 }
9662 
9663 uint32_t *
qwx_hal_srng_src_reap_next(struct qwx_softc * sc,struct hal_srng * srng)9664 qwx_hal_srng_src_reap_next(struct qwx_softc *sc, struct hal_srng *srng)
9665 {
9666 	uint32_t *desc;
9667 	uint32_t next_reap_hp;
9668 #ifdef notyet
9669 	lockdep_assert_held(&srng->lock);
9670 #endif
9671 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
9672 	    srng->ring_size;
9673 
9674 	if (next_reap_hp == srng->u.src_ring.cached_tp)
9675 		return NULL;
9676 
9677 	desc = srng->ring_base_vaddr + next_reap_hp;
9678 	srng->u.src_ring.reap_hp = next_reap_hp;
9679 
9680 	return desc;
9681 }
9682 
9683 int
qwx_dp_link_desc_setup(struct qwx_softc * sc,struct dp_link_desc_bank * link_desc_banks,uint32_t ring_type,struct hal_srng * srng,uint32_t n_link_desc)9684 qwx_dp_link_desc_setup(struct qwx_softc *sc,
9685     struct dp_link_desc_bank *link_desc_banks, uint32_t ring_type,
9686     struct hal_srng *srng, uint32_t n_link_desc)
9687 {
9688 	uint32_t tot_mem_sz;
9689 	uint32_t n_link_desc_bank, last_bank_sz;
9690 	uint32_t entry_sz, n_entries;
9691 	uint64_t paddr;
9692 	uint32_t *desc;
9693 	int i, ret;
9694 
9695 	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
9696 	tot_mem_sz += HAL_LINK_DESC_ALIGN;
9697 
9698 	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
9699 		n_link_desc_bank = 1;
9700 		last_bank_sz = tot_mem_sz;
9701 	} else {
9702 		n_link_desc_bank = tot_mem_sz /
9703 		    (DP_LINK_DESC_ALLOC_SIZE_THRESH - HAL_LINK_DESC_ALIGN);
9704 		last_bank_sz = tot_mem_sz % (DP_LINK_DESC_ALLOC_SIZE_THRESH -
9705 		    HAL_LINK_DESC_ALIGN);
9706 
9707 		if (last_bank_sz)
9708 			n_link_desc_bank += 1;
9709 	}
9710 
9711 	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
9712 		return EINVAL;
9713 
9714 	ret = qwx_dp_link_desc_bank_alloc(sc, link_desc_banks,
9715 	    n_link_desc_bank, last_bank_sz);
9716 	if (ret)
9717 		return ret;
9718 
9719 	/* Setup link desc idle list for HW internal usage */
9720 	entry_sz = qwx_hal_srng_get_entrysize(sc, ring_type);
9721 	tot_mem_sz = entry_sz * n_link_desc;
9722 
9723 	/* Setup scatter desc list when the total memory requirement is more */
9724 	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
9725 	    ring_type != HAL_RXDMA_MONITOR_DESC) {
9726 		ret = qwx_dp_scatter_idle_link_desc_setup(sc, tot_mem_sz,
9727 		    n_link_desc_bank, n_link_desc, last_bank_sz);
9728 		if (ret) {
9729 			printf("%s: failed to setup scatting idle list "
9730 			    "descriptor :%d\n",
9731 			    sc->sc_dev.dv_xname, ret);
9732 			goto fail_desc_bank_free;
9733 		}
9734 
9735 		return 0;
9736 	}
9737 #if 0
9738 	spin_lock_bh(&srng->lock);
9739 #endif
9740 	qwx_hal_srng_access_begin(sc, srng);
9741 
9742 	for (i = 0; i < n_link_desc_bank; i++) {
9743 		n_entries = (link_desc_banks[i].size) / HAL_LINK_DESC_SIZE;
9744 		paddr = link_desc_banks[i].paddr;
9745 		while (n_entries &&
9746 		    (desc = qwx_hal_srng_src_get_next_entry(sc, srng))) {
9747 			qwx_hal_set_link_desc_addr(
9748 			    (struct hal_wbm_link_desc *) desc, i, paddr);
9749 			n_entries--;
9750 			paddr += HAL_LINK_DESC_SIZE;
9751 		}
9752 	}
9753 
9754 	qwx_hal_srng_access_end(sc, srng);
9755 #if 0
9756 	spin_unlock_bh(&srng->lock);
9757 #endif
9758 
9759 	return 0;
9760 
9761 fail_desc_bank_free:
9762 	qwx_dp_link_desc_bank_free(sc, link_desc_banks);
9763 
9764 	return ret;
9765 }
9766 
9767 void
qwx_dp_srng_cleanup(struct qwx_softc * sc,struct dp_srng * ring)9768 qwx_dp_srng_cleanup(struct qwx_softc *sc, struct dp_srng *ring)
9769 {
9770 	if (ring->mem == NULL)
9771 		return;
9772 
9773 #if 0
9774 	if (ring->cached)
9775 		kfree(ring->vaddr_unaligned);
9776 	else
9777 #endif
9778 		qwx_dmamem_free(sc->sc_dmat, ring->mem);
9779 
9780 	ring->mem = NULL;
9781 	ring->vaddr = NULL;
9782 	ring->paddr = 0;
9783 }
9784 
9785 void
qwx_dp_shadow_stop_timer(struct qwx_softc * sc,struct qwx_hp_update_timer * update_timer)9786 qwx_dp_shadow_stop_timer(struct qwx_softc *sc,
9787     struct qwx_hp_update_timer *update_timer)
9788 {
9789 	if (!sc->hw_params.supports_shadow_regs)
9790 		return;
9791 
9792 	timeout_del(&update_timer->timer);
9793 }
9794 
9795 void
qwx_dp_shadow_start_timer(struct qwx_softc * sc,struct hal_srng * srng,struct qwx_hp_update_timer * update_timer)9796 qwx_dp_shadow_start_timer(struct qwx_softc *sc, struct hal_srng *srng,
9797     struct qwx_hp_update_timer *update_timer)
9798 {
9799 #ifdef notyet
9800 	lockdep_assert_held(&srng->lock);
9801 #endif
9802 	if (!sc->hw_params.supports_shadow_regs)
9803 		return;
9804 
9805 	update_timer->tx_num++;
9806 	if (update_timer->started)
9807 		return;
9808 
9809 	update_timer->started = 1;
9810 	update_timer->timer_tx_num = update_timer->tx_num;
9811 
9812 	timeout_add_msec(&update_timer->timer, update_timer->interval);
9813 }
9814 
9815 void
qwx_dp_shadow_timer_handler(void * arg)9816 qwx_dp_shadow_timer_handler(void *arg)
9817 {
9818 	struct qwx_hp_update_timer *update_timer = arg;
9819 	struct qwx_softc *sc = update_timer->sc;
9820 	struct hal_srng	*srng = &sc->hal.srng_list[update_timer->ring_id];
9821 	int s;
9822 
9823 #ifdef notyet
9824 	spin_lock_bh(&srng->lock);
9825 #endif
9826 	s = splnet();
9827 
9828 	/*
9829 	 * Update HP if there were no TX operations during the timeout interval,
9830 	 * and stop the timer. Timer will be restarted if more TX happens.
9831 	 */
9832 	if (update_timer->timer_tx_num != update_timer->tx_num) {
9833 		update_timer->timer_tx_num = update_timer->tx_num;
9834 		timeout_add_msec(&update_timer->timer, update_timer->interval);
9835 	} else {
9836 		update_timer->started = 0;
9837 		qwx_hal_srng_shadow_update_hp_tp(sc, srng);
9838 	}
9839 #ifdef notyet
9840 	spin_unlock_bh(&srng->lock);
9841 #endif
9842 	splx(s);
9843 }
9844 
9845 void
qwx_dp_stop_shadow_timers(struct qwx_softc * sc)9846 qwx_dp_stop_shadow_timers(struct qwx_softc *sc)
9847 {
9848 	int i;
9849 
9850 	for (i = 0; i < sc->hw_params.max_tx_ring; i++)
9851 		qwx_dp_shadow_stop_timer(sc, &sc->dp.tx_ring_timer[i]);
9852 
9853 	qwx_dp_shadow_stop_timer(sc, &sc->dp.reo_cmd_timer);
9854 }
9855 
9856 void
qwx_dp_srng_common_cleanup(struct qwx_softc * sc)9857 qwx_dp_srng_common_cleanup(struct qwx_softc *sc)
9858 {
9859 	struct qwx_dp *dp = &sc->dp;
9860 	int i;
9861 
9862 	qwx_dp_stop_shadow_timers(sc);
9863 	qwx_dp_srng_cleanup(sc, &dp->wbm_desc_rel_ring);
9864 	qwx_dp_srng_cleanup(sc, &dp->tcl_cmd_ring);
9865 	qwx_dp_srng_cleanup(sc, &dp->tcl_status_ring);
9866 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
9867 		qwx_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_data_ring);
9868 		qwx_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_comp_ring);
9869 	}
9870 	qwx_dp_srng_cleanup(sc, &dp->reo_reinject_ring);
9871 	qwx_dp_srng_cleanup(sc, &dp->rx_rel_ring);
9872 	qwx_dp_srng_cleanup(sc, &dp->reo_except_ring);
9873 	qwx_dp_srng_cleanup(sc, &dp->reo_cmd_ring);
9874 	qwx_dp_srng_cleanup(sc, &dp->reo_status_ring);
9875 }
9876 
9877 void
qwx_hal_srng_get_params(struct qwx_softc * sc,struct hal_srng * srng,struct hal_srng_params * params)9878 qwx_hal_srng_get_params(struct qwx_softc *sc, struct hal_srng *srng,
9879     struct hal_srng_params *params)
9880 {
9881 	params->ring_base_paddr = srng->ring_base_paddr;
9882 	params->ring_base_vaddr = srng->ring_base_vaddr;
9883 	params->num_entries = srng->num_entries;
9884 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
9885 	params->intr_batch_cntr_thres_entries =
9886 		srng->intr_batch_cntr_thres_entries;
9887 	params->low_threshold = srng->u.src_ring.low_threshold;
9888 	params->msi_addr = srng->msi_addr;
9889 	params->msi_data = srng->msi_data;
9890 	params->flags = srng->flags;
9891 }
9892 
9893 void
qwx_hal_tx_init_data_ring(struct qwx_softc * sc,struct hal_srng * srng)9894 qwx_hal_tx_init_data_ring(struct qwx_softc *sc, struct hal_srng *srng)
9895 {
9896 	struct hal_srng_params params;
9897 	struct hal_tlv_hdr *tlv;
9898 	int i, entry_size;
9899 	uint8_t *desc;
9900 
9901 	memset(&params, 0, sizeof(params));
9902 
9903 	entry_size = qwx_hal_srng_get_entrysize(sc, HAL_TCL_DATA);
9904 	qwx_hal_srng_get_params(sc, srng, &params);
9905 	desc = (uint8_t *)params.ring_base_vaddr;
9906 
9907 	for (i = 0; i < params.num_entries; i++) {
9908 		tlv = (struct hal_tlv_hdr *)desc;
9909 		tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
9910 		    FIELD_PREP(HAL_TLV_HDR_LEN,
9911 		    sizeof(struct hal_tcl_data_cmd));
9912 		desc += entry_size;
9913 	}
9914 }
9915 
9916 #define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
9917 
9918 /* dscp_tid_map - Default DSCP-TID mapping
9919  *
9920  * DSCP        TID
9921  * 000000      0
9922  * 001000      1
9923  * 010000      2
9924  * 011000      3
9925  * 100000      4
9926  * 101000      5
9927  * 110000      6
9928  * 111000      7
9929  */
9930 static const uint8_t dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
9931 	0, 0, 0, 0, 0, 0, 0, 0,
9932 	1, 1, 1, 1, 1, 1, 1, 1,
9933 	2, 2, 2, 2, 2, 2, 2, 2,
9934 	3, 3, 3, 3, 3, 3, 3, 3,
9935 	4, 4, 4, 4, 4, 4, 4, 4,
9936 	5, 5, 5, 5, 5, 5, 5, 5,
9937 	6, 6, 6, 6, 6, 6, 6, 6,
9938 	7, 7, 7, 7, 7, 7, 7, 7,
9939 };
9940 
9941 void
qwx_hal_tx_set_dscp_tid_map(struct qwx_softc * sc,int id)9942 qwx_hal_tx_set_dscp_tid_map(struct qwx_softc *sc, int id)
9943 {
9944 	uint32_t ctrl_reg_val;
9945 	uint32_t addr;
9946 	uint8_t hw_map_val[HAL_DSCP_TID_TBL_SIZE];
9947 	int i;
9948 	uint32_t value;
9949 	int cnt = 0;
9950 
9951 	ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9952 	    HAL_TCL1_RING_CMN_CTRL_REG);
9953 
9954 	/* Enable read/write access */
9955 	ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
9956 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9957 	    HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
9958 
9959 	addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
9960 	       (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
9961 
9962 	/* Configure each DSCP-TID mapping in three bits there by configure
9963 	 * three bytes in an iteration.
9964 	 */
9965 	for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
9966 		value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
9967 				   dscp_tid_map[i]) |
9968 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
9969 				   dscp_tid_map[i + 1]) |
9970 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
9971 				   dscp_tid_map[i + 2]) |
9972 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
9973 				   dscp_tid_map[i + 3]) |
9974 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
9975 				   dscp_tid_map[i + 4]) |
9976 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
9977 				   dscp_tid_map[i + 5]) |
9978 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
9979 				   dscp_tid_map[i + 6]) |
9980 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
9981 				   dscp_tid_map[i + 7]);
9982 		memcpy(&hw_map_val[cnt], (uint8_t *)&value, 3);
9983 		cnt += 3;
9984 	}
9985 
9986 	for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
9987 		sc->ops.write32(sc, addr, *(uint32_t *)&hw_map_val[i]);
9988 		addr += 4;
9989 	}
9990 
9991 	/* Disable read/write access */
9992 	ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9993 	    HAL_TCL1_RING_CMN_CTRL_REG);
9994 	ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
9995 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
9996 	    HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
9997 }
9998 
9999 void
qwx_dp_shadow_init_timer(struct qwx_softc * sc,struct qwx_hp_update_timer * update_timer,uint32_t interval,uint32_t ring_id)10000 qwx_dp_shadow_init_timer(struct qwx_softc *sc,
10001     struct qwx_hp_update_timer *update_timer,
10002     uint32_t interval, uint32_t ring_id)
10003 {
10004 	if (!sc->hw_params.supports_shadow_regs)
10005 		return;
10006 
10007 	update_timer->tx_num = 0;
10008 	update_timer->timer_tx_num = 0;
10009 	update_timer->sc = sc;
10010 	update_timer->ring_id = ring_id;
10011 	update_timer->interval = interval;
10012 	update_timer->init = 1;
10013 	timeout_set(&update_timer->timer, qwx_dp_shadow_timer_handler,
10014 	    update_timer);
10015 }
10016 
10017 void
qwx_hal_reo_init_cmd_ring(struct qwx_softc * sc,struct hal_srng * srng)10018 qwx_hal_reo_init_cmd_ring(struct qwx_softc *sc, struct hal_srng *srng)
10019 {
10020 	struct hal_srng_params params;
10021 	struct hal_tlv_hdr *tlv;
10022 	struct hal_reo_get_queue_stats *desc;
10023 	int i, cmd_num = 1;
10024 	int entry_size;
10025 	uint8_t *entry;
10026 
10027 	memset(&params, 0, sizeof(params));
10028 
10029 	entry_size = qwx_hal_srng_get_entrysize(sc, HAL_REO_CMD);
10030 	qwx_hal_srng_get_params(sc, srng, &params);
10031 	entry = (uint8_t *)params.ring_base_vaddr;
10032 
10033 	for (i = 0; i < params.num_entries; i++) {
10034 		tlv = (struct hal_tlv_hdr *)entry;
10035 		desc = (struct hal_reo_get_queue_stats *)tlv->value;
10036 		desc->cmd.info0 = FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER,
10037 		    cmd_num++);
10038 		entry += entry_size;
10039 	}
10040 }
10041 
10042 int
qwx_hal_reo_cmd_queue_stats(struct hal_tlv_hdr * tlv,struct ath11k_hal_reo_cmd * cmd)10043 qwx_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, struct ath11k_hal_reo_cmd *cmd)
10044 {
10045 	struct hal_reo_get_queue_stats *desc;
10046 
10047 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
10048 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
10049 
10050 	desc = (struct hal_reo_get_queue_stats *)tlv->value;
10051 
10052 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10053 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
10054 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10055 
10056 	desc->queue_addr_lo = cmd->addr_lo;
10057 	desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
10058 	    cmd->addr_hi);
10059 	if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
10060 		desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
10061 
10062 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
10063 }
10064 
10065 int
qwx_hal_reo_cmd_flush_cache(struct ath11k_hal * hal,struct hal_tlv_hdr * tlv,struct ath11k_hal_reo_cmd * cmd)10066 qwx_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
10067     struct ath11k_hal_reo_cmd *cmd)
10068 {
10069 	struct hal_reo_flush_cache *desc;
10070 	uint8_t avail_slot = ffz(hal->avail_blk_resource);
10071 
10072 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
10073 		if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
10074 			return ENOSPC;
10075 
10076 		hal->current_blk_index = avail_slot;
10077 	}
10078 
10079 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
10080 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
10081 
10082 	desc = (struct hal_reo_flush_cache *)tlv->value;
10083 
10084 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10085 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
10086 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10087 
10088 	desc->cache_addr_lo = cmd->addr_lo;
10089 	desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
10090 	    cmd->addr_hi);
10091 
10092 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
10093 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
10094 
10095 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
10096 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
10097 		desc->info0 |=
10098 		    FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
10099 		    avail_slot);
10100 	}
10101 
10102 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
10103 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
10104 
10105 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
10106 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
10107 
10108 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
10109 }
10110 
10111 int
qwx_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr * tlv,struct ath11k_hal_reo_cmd * cmd)10112 qwx_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
10113     struct ath11k_hal_reo_cmd *cmd)
10114 {
10115 	struct hal_reo_update_rx_queue *desc;
10116 
10117 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
10118 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
10119 
10120 	desc = (struct hal_reo_update_rx_queue *)tlv->value;
10121 
10122 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10123 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
10124 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
10125 
10126 	desc->queue_addr_lo = cmd->addr_lo;
10127 	desc->info0 =
10128 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
10129 		    cmd->addr_hi) |
10130 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
10131 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
10132 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
10133 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
10134 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
10135 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
10136 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
10137 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
10138 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
10139 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
10140 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
10141 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
10142 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
10143 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
10144 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
10145 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
10146 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
10147 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
10148 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
10149 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
10150 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
10151 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
10152 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
10153 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
10154 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
10155 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
10156 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
10157 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
10158 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
10159 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
10160 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
10161 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
10162 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
10163 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
10164 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
10165 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
10166 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
10167 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
10168 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
10169 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
10170 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
10171 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
10172 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
10173 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
10174 
10175 	desc->info1 =
10176 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
10177 		    cmd->rx_queue_num) |
10178 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
10179 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
10180 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
10181 		    FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
10182 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
10183 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
10184 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
10185 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
10186 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
10187 		    FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
10188 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
10189 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
10190 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
10191 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
10192 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
10193 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
10194 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
10195 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
10196 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
10197 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
10198 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
10199 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
10200 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
10201 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
10202 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
10203 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
10204 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
10205 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
10206 
10207 	if (cmd->pn_size == 24)
10208 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
10209 	else if (cmd->pn_size == 48)
10210 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
10211 	else if (cmd->pn_size == 128)
10212 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
10213 
10214 	if (cmd->ba_window_size < 1)
10215 		cmd->ba_window_size = 1;
10216 
10217 	if (cmd->ba_window_size == 1)
10218 		cmd->ba_window_size++;
10219 
10220 	desc->info2 = FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
10221 	    cmd->ba_window_size - 1) |
10222 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
10223 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
10224 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
10225 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
10226 	        FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
10227 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
10228 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
10229 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
10230 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
10231 
10232 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
10233 }
10234 
10235 int
qwx_hal_reo_cmd_send(struct qwx_softc * sc,struct hal_srng * srng,enum hal_reo_cmd_type type,struct ath11k_hal_reo_cmd * cmd)10236 qwx_hal_reo_cmd_send(struct qwx_softc *sc, struct hal_srng *srng,
10237     enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd)
10238 {
10239 	struct hal_tlv_hdr *reo_desc;
10240 	int ret;
10241 #ifdef notyet
10242 	spin_lock_bh(&srng->lock);
10243 #endif
10244 	qwx_hal_srng_access_begin(sc, srng);
10245 	reo_desc = (struct hal_tlv_hdr *)qwx_hal_srng_src_get_next_entry(sc, srng);
10246 	if (!reo_desc) {
10247 		ret = ENOBUFS;
10248 		goto out;
10249 	}
10250 
10251 	switch (type) {
10252 	case HAL_REO_CMD_GET_QUEUE_STATS:
10253 		ret = qwx_hal_reo_cmd_queue_stats(reo_desc, cmd);
10254 		break;
10255 	case HAL_REO_CMD_FLUSH_CACHE:
10256 		ret = qwx_hal_reo_cmd_flush_cache(&sc->hal, reo_desc, cmd);
10257 		break;
10258 	case HAL_REO_CMD_UPDATE_RX_QUEUE:
10259 		ret = qwx_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
10260 		break;
10261 	case HAL_REO_CMD_FLUSH_QUEUE:
10262 	case HAL_REO_CMD_UNBLOCK_CACHE:
10263 	case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
10264 		printf("%s: unsupported reo command %d\n",
10265 		   sc->sc_dev.dv_xname, type);
10266 		ret = ENOTSUP;
10267 		break;
10268 	default:
10269 		printf("%s: unknown reo command %d\n",
10270 		    sc->sc_dev.dv_xname, type);
10271 		ret = EINVAL;
10272 		break;
10273 	}
10274 
10275 	qwx_dp_shadow_start_timer(sc, srng, &sc->dp.reo_cmd_timer);
10276 out:
10277 	qwx_hal_srng_access_end(sc, srng);
10278 #ifdef notyet
10279 	spin_unlock_bh(&srng->lock);
10280 #endif
10281 	return ret;
10282 }
10283 int
qwx_dp_srng_common_setup(struct qwx_softc * sc)10284 qwx_dp_srng_common_setup(struct qwx_softc *sc)
10285 {
10286 	struct qwx_dp *dp = &sc->dp;
10287 	struct hal_srng *srng;
10288 	int i, ret;
10289 	uint8_t tcl_num, wbm_num;
10290 
10291 	ret = qwx_dp_srng_setup(sc, &dp->wbm_desc_rel_ring, HAL_SW2WBM_RELEASE,
10292 	    0, 0, DP_WBM_RELEASE_RING_SIZE);
10293 	if (ret) {
10294 		printf("%s: failed to set up wbm2sw_release ring :%d\n",
10295 		    sc->sc_dev.dv_xname, ret);
10296 		goto err;
10297 	}
10298 
10299 	ret = qwx_dp_srng_setup(sc, &dp->tcl_cmd_ring, HAL_TCL_CMD,
10300 	    0, 0, DP_TCL_CMD_RING_SIZE);
10301 	if (ret) {
10302 		printf("%s: failed to set up tcl_cmd ring :%d\n",
10303 		    sc->sc_dev.dv_xname, ret);
10304 		goto err;
10305 	}
10306 
10307 	ret = qwx_dp_srng_setup(sc, &dp->tcl_status_ring, HAL_TCL_STATUS,
10308 	    0, 0, DP_TCL_STATUS_RING_SIZE);
10309 	if (ret) {
10310 		printf("%s: failed to set up tcl_status ring :%d\n",
10311 		    sc->sc_dev.dv_xname, ret);
10312 		goto err;
10313 	}
10314 
10315 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
10316 		const struct ath11k_hw_hal_params *hal_params;
10317 
10318 		hal_params = sc->hw_params.hal_params;
10319 		tcl_num = hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
10320 		wbm_num = hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
10321 
10322 		ret = qwx_dp_srng_setup(sc, &dp->tx_ring[i].tcl_data_ring,
10323 		    HAL_TCL_DATA, tcl_num, 0, sc->hw_params.tx_ring_size);
10324 		if (ret) {
10325 			printf("%s: failed to set up tcl_data ring (%d) :%d\n",
10326 			    sc->sc_dev.dv_xname, i, ret);
10327 			goto err;
10328 		}
10329 
10330 		ret = qwx_dp_srng_setup(sc, &dp->tx_ring[i].tcl_comp_ring,
10331 		    HAL_WBM2SW_RELEASE, wbm_num, 0, DP_TX_COMP_RING_SIZE);
10332 		if (ret) {
10333 			printf("%s: failed to set up tcl_comp ring (%d) :%d\n",
10334 			    sc->sc_dev.dv_xname, i, ret);
10335 			goto err;
10336 		}
10337 
10338 		srng = &sc->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
10339 		qwx_hal_tx_init_data_ring(sc, srng);
10340 
10341 		qwx_dp_shadow_init_timer(sc, &dp->tx_ring_timer[i],
10342 		    ATH11K_SHADOW_DP_TIMER_INTERVAL,
10343 		    dp->tx_ring[i].tcl_data_ring.ring_id);
10344 	}
10345 
10346 	ret = qwx_dp_srng_setup(sc, &dp->reo_reinject_ring, HAL_REO_REINJECT,
10347 	    0, 0, DP_REO_REINJECT_RING_SIZE);
10348 	if (ret) {
10349 		printf("%s: failed to set up reo_reinject ring :%d\n",
10350 		    sc->sc_dev.dv_xname, ret);
10351 		goto err;
10352 	}
10353 
10354 	ret = qwx_dp_srng_setup(sc, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
10355 	    DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
10356 	if (ret) {
10357 		printf("%s: failed to set up rx_rel ring :%d\n",
10358 		    sc->sc_dev.dv_xname, ret);
10359 		goto err;
10360 	}
10361 
10362 	ret = qwx_dp_srng_setup(sc, &dp->reo_except_ring, HAL_REO_EXCEPTION,
10363 	    0, 0, DP_REO_EXCEPTION_RING_SIZE);
10364 	if (ret) {
10365 		printf("%s: failed to set up reo_exception ring :%d\n",
10366 		    sc->sc_dev.dv_xname, ret);
10367 		goto err;
10368 	}
10369 
10370 	ret = qwx_dp_srng_setup(sc, &dp->reo_cmd_ring, HAL_REO_CMD, 0, 0,
10371 	    DP_REO_CMD_RING_SIZE);
10372 	if (ret) {
10373 		printf("%s: failed to set up reo_cmd ring :%d\n",
10374 		    sc->sc_dev.dv_xname, ret);
10375 		goto err;
10376 	}
10377 
10378 	srng = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
10379 	qwx_hal_reo_init_cmd_ring(sc, srng);
10380 
10381 	qwx_dp_shadow_init_timer(sc, &dp->reo_cmd_timer,
10382 	     ATH11K_SHADOW_CTRL_TIMER_INTERVAL, dp->reo_cmd_ring.ring_id);
10383 
10384 	ret = qwx_dp_srng_setup(sc, &dp->reo_status_ring, HAL_REO_STATUS,
10385 	    0, 0, DP_REO_STATUS_RING_SIZE);
10386 	if (ret) {
10387 		printf("%s: failed to set up reo_status ring :%d\n",
10388 		    sc->sc_dev.dv_xname, ret);
10389 		goto err;
10390 	}
10391 
10392 	/* When hash based routing of rx packet is enabled, 32 entries to map
10393 	 * the hash values to the ring will be configured.
10394 	 */
10395 	sc->hw_params.hw_ops->reo_setup(sc);
10396 	return 0;
10397 
10398 err:
10399 	qwx_dp_srng_common_cleanup(sc);
10400 
10401 	return ret;
10402 }
10403 
10404 void
qwx_dp_link_desc_cleanup(struct qwx_softc * sc,struct dp_link_desc_bank * desc_bank,uint32_t ring_type,struct dp_srng * ring)10405 qwx_dp_link_desc_cleanup(struct qwx_softc *sc,
10406     struct dp_link_desc_bank *desc_bank, uint32_t ring_type,
10407     struct dp_srng *ring)
10408 {
10409 	qwx_dp_link_desc_bank_free(sc, desc_bank);
10410 
10411 	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
10412 		qwx_dp_srng_cleanup(sc, ring);
10413 		qwx_dp_scatter_idle_link_desc_cleanup(sc);
10414 	}
10415 }
10416 
10417 void
qwx_dp_tx_ring_free_tx_data(struct qwx_softc * sc,struct dp_tx_ring * tx_ring)10418 qwx_dp_tx_ring_free_tx_data(struct qwx_softc *sc, struct dp_tx_ring *tx_ring)
10419 {
10420 	int i;
10421 
10422 	if (tx_ring->data == NULL)
10423 		return;
10424 
10425 	for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
10426 		struct qwx_tx_data *tx_data = &tx_ring->data[i];
10427 
10428 		if (tx_data->map) {
10429 			bus_dmamap_unload(sc->sc_dmat, tx_data->map);
10430 			bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
10431 		}
10432 
10433 		m_freem(tx_data->m);
10434 	}
10435 
10436 	free(tx_ring->data, M_DEVBUF,
10437 	    sc->hw_params.tx_ring_size * sizeof(struct qwx_tx_data));
10438 	tx_ring->data = NULL;
10439 }
10440 
10441 int
qwx_dp_tx_ring_alloc_tx_data(struct qwx_softc * sc,struct dp_tx_ring * tx_ring)10442 qwx_dp_tx_ring_alloc_tx_data(struct qwx_softc *sc, struct dp_tx_ring *tx_ring)
10443 {
10444 	int i, ret;
10445 
10446 	tx_ring->data = mallocarray(sc->hw_params.tx_ring_size,
10447 	   sizeof(struct qwx_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO);
10448 	if (tx_ring->data == NULL)
10449 		return ENOMEM;
10450 
10451 	for (i = 0; i < sc->hw_params.tx_ring_size; i++) {
10452 		struct qwx_tx_data *tx_data = &tx_ring->data[i];
10453 
10454 		ret = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
10455 		    BUS_DMA_NOWAIT, &tx_data->map);
10456 		if (ret)
10457 			return ret;
10458 	}
10459 
10460 	return 0;
10461 }
10462 
10463 int
qwx_dp_alloc(struct qwx_softc * sc)10464 qwx_dp_alloc(struct qwx_softc *sc)
10465 {
10466 	struct qwx_dp *dp = &sc->dp;
10467 	struct hal_srng *srng = NULL;
10468 	size_t size = 0;
10469 	uint32_t n_link_desc = 0;
10470 	int ret;
10471 	int i;
10472 
10473 	dp->sc = sc;
10474 
10475 	TAILQ_INIT(&dp->reo_cmd_list);
10476 	TAILQ_INIT(&dp->reo_cmd_cache_flush_list);
10477 #if 0
10478 	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
10479 	spin_lock_init(&dp->reo_cmd_lock);
10480 #endif
10481 
10482 	dp->reo_cmd_cache_flush_count = 0;
10483 
10484 	ret = qwx_wbm_idle_ring_setup(sc, &n_link_desc);
10485 	if (ret) {
10486 		printf("%s: failed to setup wbm_idle_ring: %d\n",
10487 		    sc->sc_dev.dv_xname, ret);
10488 		return ret;
10489 	}
10490 
10491 	srng = &sc->hal.srng_list[dp->wbm_idle_ring.ring_id];
10492 
10493 	ret = qwx_dp_link_desc_setup(sc, dp->link_desc_banks,
10494 	    HAL_WBM_IDLE_LINK, srng, n_link_desc);
10495 	if (ret) {
10496 		printf("%s: failed to setup link desc: %d\n",
10497 		   sc->sc_dev.dv_xname, ret);
10498 		return ret;
10499 	}
10500 
10501 	ret = qwx_dp_srng_common_setup(sc);
10502 	if (ret)
10503 		goto fail_link_desc_cleanup;
10504 
10505 	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
10506 
10507 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
10508 #if 0
10509 		idr_init(&dp->tx_ring[i].txbuf_idr);
10510 		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
10511 #endif
10512 		ret = qwx_dp_tx_ring_alloc_tx_data(sc, &dp->tx_ring[i]);
10513 		if (ret)
10514 			goto fail_cmn_srng_cleanup;
10515 
10516 		dp->tx_ring[i].cur = 0;
10517 		dp->tx_ring[i].queued = 0;
10518 		dp->tx_ring[i].tcl_data_ring_id = i;
10519 		dp->tx_ring[i].tx_status_head = 0;
10520 		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
10521 		dp->tx_ring[i].tx_status = malloc(size, M_DEVBUF,
10522 		    M_NOWAIT | M_ZERO);
10523 		if (!dp->tx_ring[i].tx_status) {
10524 			ret = ENOMEM;
10525 			goto fail_cmn_srng_cleanup;
10526 		}
10527 	}
10528 
10529 	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
10530 		qwx_hal_tx_set_dscp_tid_map(sc, i);
10531 
10532 	/* Init any SOC level resource for DP */
10533 
10534 	return 0;
10535 fail_cmn_srng_cleanup:
10536 	qwx_dp_srng_common_cleanup(sc);
10537 fail_link_desc_cleanup:
10538 	qwx_dp_link_desc_cleanup(sc, dp->link_desc_banks, HAL_WBM_IDLE_LINK,
10539 	    &dp->wbm_idle_ring);
10540 
10541 	return ret;
10542 }
10543 
10544 void
qwx_dp_reo_cmd_list_cleanup(struct qwx_softc * sc)10545 qwx_dp_reo_cmd_list_cleanup(struct qwx_softc *sc)
10546 {
10547 	struct qwx_dp *dp = &sc->dp;
10548 	struct dp_reo_cmd *cmd, *tmp;
10549 	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
10550 	struct dp_rx_tid *rx_tid;
10551 #ifdef notyet
10552 	spin_lock_bh(&dp->reo_cmd_lock);
10553 #endif
10554 	TAILQ_FOREACH_SAFE(cmd, &dp->reo_cmd_list, entry, tmp) {
10555 		TAILQ_REMOVE(&dp->reo_cmd_list, cmd, entry);
10556 		rx_tid = &cmd->data;
10557 		if (rx_tid->mem) {
10558 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
10559 			rx_tid->mem = NULL;
10560 			rx_tid->vaddr = NULL;
10561 			rx_tid->paddr = 0ULL;
10562 			rx_tid->size = 0;
10563 		}
10564 		free(cmd, M_DEVBUF, sizeof(*cmd));
10565 	}
10566 
10567 	TAILQ_FOREACH_SAFE(cmd_cache, &dp->reo_cmd_cache_flush_list,
10568 	    entry, tmp_cache) {
10569 		TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, cmd_cache, entry);
10570 		dp->reo_cmd_cache_flush_count--;
10571 		rx_tid = &cmd_cache->data;
10572 		if (rx_tid->mem) {
10573 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
10574 			rx_tid->mem = NULL;
10575 			rx_tid->vaddr = NULL;
10576 			rx_tid->paddr = 0ULL;
10577 			rx_tid->size = 0;
10578 		}
10579 		free(cmd_cache, M_DEVBUF, sizeof(*cmd_cache));
10580 	}
10581 #ifdef notyet
10582 	spin_unlock_bh(&dp->reo_cmd_lock);
10583 #endif
10584 }
10585 
10586 void
qwx_dp_free(struct qwx_softc * sc)10587 qwx_dp_free(struct qwx_softc *sc)
10588 {
10589 	struct qwx_dp *dp = &sc->dp;
10590 	int i;
10591 
10592 	qwx_dp_link_desc_cleanup(sc, dp->link_desc_banks,
10593 	    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
10594 
10595 	qwx_dp_srng_common_cleanup(sc);
10596 	qwx_dp_reo_cmd_list_cleanup(sc);
10597 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
10598 #if 0
10599 		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
10600 		idr_for_each(&dp->tx_ring[i].txbuf_idr,
10601 			     ath11k_dp_tx_pending_cleanup, ab);
10602 		idr_destroy(&dp->tx_ring[i].txbuf_idr);
10603 		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
10604 #endif
10605 		qwx_dp_tx_ring_free_tx_data(sc, &dp->tx_ring[i]);
10606 		free(dp->tx_ring[i].tx_status, M_DEVBUF,
10607 		    sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE);
10608 		dp->tx_ring[i].tx_status = NULL;
10609 	}
10610 
10611 	/* Deinit any SOC level resource */
10612 }
10613 
10614 void
qwx_qmi_process_coldboot_calibration(struct qwx_softc * sc)10615 qwx_qmi_process_coldboot_calibration(struct qwx_softc *sc)
10616 {
10617 	printf("%s not implemented\n", __func__);
10618 }
10619 
10620 int
qwx_qmi_wlanfw_wlan_ini_send(struct qwx_softc * sc,int enable)10621 qwx_qmi_wlanfw_wlan_ini_send(struct qwx_softc *sc, int enable)
10622 {
10623 	int ret;
10624 	struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
10625 
10626 	req.enablefwlog_valid = 1;
10627 	req.enablefwlog = enable ? 1 : 0;
10628 
10629 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_INI_REQ_V01,
10630 	    QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
10631 	    qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req, sizeof(req));
10632 	if (ret) {
10633 		printf("%s: failed to send wlan ini request, err = %d\n",
10634 		    sc->sc_dev.dv_xname, ret);
10635 		return ret;
10636 	}
10637 
10638 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10639 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10640 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxini",
10641 		    SEC_TO_NSEC(1));
10642 		if (ret) {
10643 			printf("%s: wlan ini request timeout\n",
10644 			    sc->sc_dev.dv_xname);
10645 			return ret;
10646 		}
10647 	}
10648 
10649 	return 0;
10650 }
10651 
10652 int
qwx_qmi_wlanfw_wlan_cfg_send(struct qwx_softc * sc)10653 qwx_qmi_wlanfw_wlan_cfg_send(struct qwx_softc *sc)
10654 {
10655 	struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
10656 	const struct ce_pipe_config *ce_cfg;
10657 	const struct service_to_pipe *svc_cfg;
10658 	int ret = 0, pipe_num;
10659 
10660 	ce_cfg	= sc->hw_params.target_ce_config;
10661 	svc_cfg	= sc->hw_params.svc_to_ce_map;
10662 
10663 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
10664 	if (!req)
10665 		return ENOMEM;
10666 
10667 	req->host_version_valid = 1;
10668 	strlcpy(req->host_version, ATH11K_HOST_VERSION_STRING,
10669 	    sizeof(req->host_version));
10670 
10671 	req->tgt_cfg_valid = 1;
10672 	/* This is number of CE configs */
10673 	req->tgt_cfg_len = sc->hw_params.target_ce_count;
10674 	for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
10675 		req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
10676 		req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
10677 		req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
10678 		req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
10679 		req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
10680 	}
10681 
10682 	req->svc_cfg_valid = 1;
10683 	/* This is number of Service/CE configs */
10684 	req->svc_cfg_len = sc->hw_params.svc_to_ce_map_len;
10685 	for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
10686 		req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
10687 		req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
10688 		req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
10689 	}
10690 	req->shadow_reg_valid = 0;
10691 
10692 	/* set shadow v2 configuration */
10693 	if (sc->hw_params.supports_shadow_regs) {
10694 		req->shadow_reg_v2_valid = 1;
10695 		req->shadow_reg_v2_len = MIN(sc->qmi_ce_cfg.shadow_reg_v2_len,
10696 		    QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
10697 		memcpy(&req->shadow_reg_v2, sc->qmi_ce_cfg.shadow_reg_v2,
10698 		       sizeof(uint32_t) * req->shadow_reg_v2_len);
10699 	} else {
10700 		req->shadow_reg_v2_valid = 0;
10701 	}
10702 
10703 	DNPRINTF(QWX_D_QMI, "%s: wlan cfg req\n", __func__);
10704 
10705 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_CFG_REQ_V01,
10706 	    QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
10707 	    qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req, sizeof(*req));
10708 	if (ret) {
10709 		printf("%s: failed to send wlan config request: %d\n",
10710 		    sc->sc_dev.dv_xname, ret);
10711 		goto out;
10712 	}
10713 
10714 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10715 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10716 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxwlancfg",
10717 		    SEC_TO_NSEC(1));
10718 		if (ret) {
10719 			printf("%s: wlan config request failed\n",
10720 			    sc->sc_dev.dv_xname);
10721 			goto out;
10722 		}
10723 	}
10724 out:
10725 	free(req, M_DEVBUF, sizeof(*req));
10726 	return ret;
10727 }
10728 
10729 int
qwx_qmi_wlanfw_mode_send(struct qwx_softc * sc,enum ath11k_firmware_mode mode)10730 qwx_qmi_wlanfw_mode_send(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
10731 {
10732 	int ret;
10733 	struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
10734 
10735 	req.mode = mode;
10736 	req.hw_debug_valid = 1;
10737 	req.hw_debug = 0;
10738 
10739 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_MODE_REQ_V01,
10740 	    QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
10741 	    qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req, sizeof(req));
10742 	if (ret) {
10743 		printf("%s: failed to send wlan mode request, err = %d\n",
10744 		    sc->sc_dev.dv_xname, ret);
10745 		return ret;
10746 	}
10747 
10748 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
10749 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
10750 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwmode",
10751 		    SEC_TO_NSEC(1));
10752 		if (ret) {
10753 			if (mode == ATH11K_FIRMWARE_MODE_OFF)
10754 				return 0;
10755 			printf("%s: wlan mode request timeout\n",
10756 			    sc->sc_dev.dv_xname);
10757 			return ret;
10758 		}
10759 	}
10760 
10761 	return 0;
10762 }
10763 
10764 int
qwx_qmi_firmware_start(struct qwx_softc * sc,enum ath11k_firmware_mode mode)10765 qwx_qmi_firmware_start(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
10766 {
10767 	int ret;
10768 
10769 	DPRINTF("%s: firmware start\n", sc->sc_dev.dv_xname);
10770 
10771 	if (sc->hw_params.fw_wmi_diag_event) {
10772 		ret = qwx_qmi_wlanfw_wlan_ini_send(sc, 1);
10773 		if (ret < 0) {
10774 			printf("%s: qmi failed to send wlan fw ini: %d\n",
10775 			    sc->sc_dev.dv_xname, ret);
10776 			return ret;
10777 		}
10778 	}
10779 
10780 	ret = qwx_qmi_wlanfw_wlan_cfg_send(sc);
10781 	if (ret) {
10782 		printf("%s: qmi failed to send wlan cfg: %d\n",
10783 		    sc->sc_dev.dv_xname, ret);
10784 		return ret;
10785 	}
10786 
10787 	ret = qwx_qmi_wlanfw_mode_send(sc, mode);
10788 	if (ret) {
10789 		printf("%s: qmi failed to send wlan fw mode: %d\n",
10790 		    sc->sc_dev.dv_xname, ret);
10791 		return ret;
10792 	}
10793 
10794 	return 0;
10795 }
10796 
10797 void
qwx_qmi_firmware_stop(struct qwx_softc * sc)10798 qwx_qmi_firmware_stop(struct qwx_softc *sc)
10799 {
10800 	int ret;
10801 
10802 	ret = qwx_qmi_wlanfw_mode_send(sc, ATH11K_FIRMWARE_MODE_OFF);
10803 	if (ret) {
10804 		printf("%s: qmi failed to send wlan mode off: %d\n",
10805 		    sc->sc_dev.dv_xname, ret);
10806 	}
10807 }
10808 
10809 int
qwx_core_start_firmware(struct qwx_softc * sc,enum ath11k_firmware_mode mode)10810 qwx_core_start_firmware(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
10811 {
10812 	int ret;
10813 
10814 	qwx_ce_get_shadow_config(sc, &sc->qmi_ce_cfg.shadow_reg_v2,
10815 	    &sc->qmi_ce_cfg.shadow_reg_v2_len);
10816 
10817 	ret = qwx_qmi_firmware_start(sc, mode);
10818 	if (ret) {
10819 		printf("%s: failed to send firmware start: %d\n",
10820 		    sc->sc_dev.dv_xname, ret);
10821 		return ret;
10822 	}
10823 
10824 	return ret;
10825 }
10826 
10827 int
qwx_wmi_pdev_attach(struct qwx_softc * sc,uint8_t pdev_id)10828 qwx_wmi_pdev_attach(struct qwx_softc *sc, uint8_t pdev_id)
10829 {
10830 	struct qwx_pdev_wmi *wmi_handle;
10831 
10832 	if (pdev_id >= sc->hw_params.max_radios)
10833 		return EINVAL;
10834 
10835 	wmi_handle = &sc->wmi.wmi[pdev_id];
10836 	wmi_handle->wmi = &sc->wmi;
10837 
10838 	wmi_handle->tx_ce_desc = 1;
10839 
10840 	return 0;
10841 }
10842 
10843 void
qwx_wmi_detach(struct qwx_softc * sc)10844 qwx_wmi_detach(struct qwx_softc *sc)
10845 {
10846 	qwx_wmi_free_dbring_caps(sc);
10847 }
10848 
10849 int
qwx_wmi_attach(struct qwx_softc * sc)10850 qwx_wmi_attach(struct qwx_softc *sc)
10851 {
10852 	int ret;
10853 
10854 	ret = qwx_wmi_pdev_attach(sc, 0);
10855 	if (ret)
10856 		return ret;
10857 
10858 	sc->wmi.sc = sc;
10859 	sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
10860 	sc->wmi.tx_credits = 1;
10861 
10862 	/* It's overwritten when service_ext_ready is handled */
10863 	if (sc->hw_params.single_pdev_only &&
10864 	    sc->hw_params.num_rxmda_per_pdev > 1)
10865 		sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
10866 
10867 	return 0;
10868 }
10869 
10870 void
qwx_wmi_htc_tx_complete(struct qwx_softc * sc,struct mbuf * m)10871 qwx_wmi_htc_tx_complete(struct qwx_softc *sc, struct mbuf *m)
10872 {
10873 	struct qwx_pdev_wmi *wmi = NULL;
10874 	uint32_t i;
10875 	uint8_t wmi_ep_count;
10876 	uint8_t eid;
10877 
10878 	eid = (uintptr_t)m->m_pkthdr.ph_cookie;
10879 	m_freem(m);
10880 
10881 	if (eid >= ATH11K_HTC_EP_COUNT)
10882 		return;
10883 
10884 	wmi_ep_count = sc->htc.wmi_ep_count;
10885 	if (wmi_ep_count > sc->hw_params.max_radios)
10886 		return;
10887 
10888 	for (i = 0; i < sc->htc.wmi_ep_count; i++) {
10889 		if (sc->wmi.wmi[i].eid == eid) {
10890 			wmi = &sc->wmi.wmi[i];
10891 			break;
10892 		}
10893 	}
10894 
10895 	if (wmi)
10896 		wakeup(&wmi->tx_ce_desc);
10897 }
10898 
10899 int
qwx_wmi_tlv_services_parser(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)10900 qwx_wmi_tlv_services_parser(struct qwx_softc *sc, uint16_t tag, uint16_t len,
10901     const void *ptr, void *data)
10902 {
10903 	const struct wmi_service_available_event *ev;
10904 	uint32_t *wmi_ext2_service_bitmap;
10905 	int i, j;
10906 
10907 	switch (tag) {
10908 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
10909 		ev = (struct wmi_service_available_event *)ptr;
10910 		for (i = 0, j = WMI_MAX_SERVICE;
10911 		    i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
10912 		    j < WMI_MAX_EXT_SERVICE;
10913 		    i++) {
10914 			do {
10915 				if (ev->wmi_service_segment_bitmap[i] &
10916 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
10917 					setbit(sc->wmi.svc_map, j);
10918 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
10919 		}
10920 
10921 		DNPRINTF(QWX_D_WMI,
10922 		    "%s: wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, "
10923 		    "2:0x%04x, 3:0x%04x\n", __func__,
10924 		    ev->wmi_service_segment_bitmap[0],
10925 		    ev->wmi_service_segment_bitmap[1],
10926 		    ev->wmi_service_segment_bitmap[2],
10927 		    ev->wmi_service_segment_bitmap[3]);
10928 		break;
10929 	case WMI_TAG_ARRAY_UINT32:
10930 		wmi_ext2_service_bitmap = (uint32_t *)ptr;
10931 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
10932 		    i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
10933 		    j < WMI_MAX_EXT2_SERVICE;
10934 		    i++) {
10935 			do {
10936 				if (wmi_ext2_service_bitmap[i] &
10937 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
10938 					setbit(sc->wmi.svc_map, j);
10939 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
10940 		}
10941 
10942 		DNPRINTF(QWX_D_WMI,
10943 		    "%s: wmi_ext2_service__bitmap  0:0x%04x, 1:0x%04x, "
10944 		    "2:0x%04x, 3:0x%04x\n", __func__,
10945 		    wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
10946 		    wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
10947 		break;
10948 	}
10949 
10950 	return 0;
10951 }
10952 
10953 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
10954 	[WMI_TAG_ARRAY_BYTE]
10955 		= { .min_len = 0 },
10956 	[WMI_TAG_ARRAY_UINT32]
10957 		= { .min_len = 0 },
10958 	[WMI_TAG_SERVICE_READY_EVENT]
10959 		= { .min_len = sizeof(struct wmi_service_ready_event) },
10960 	[WMI_TAG_SERVICE_READY_EXT_EVENT]
10961 		= { .min_len =  sizeof(struct wmi_service_ready_ext_event) },
10962 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
10963 		= { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
10964 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES]
10965 		= { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
10966 	[WMI_TAG_VDEV_START_RESPONSE_EVENT]
10967 		= { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
10968 	[WMI_TAG_PEER_DELETE_RESP_EVENT]
10969 		= { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
10970 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
10971 		= { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
10972 	[WMI_TAG_VDEV_STOPPED_EVENT]
10973 		= { .min_len = sizeof(struct wmi_vdev_stopped_event) },
10974 	[WMI_TAG_REG_CHAN_LIST_CC_EVENT]
10975 		= { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
10976 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]
10977 		= { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
10978 	[WMI_TAG_MGMT_RX_HDR]
10979 		= { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
10980 	[WMI_TAG_MGMT_TX_COMPL_EVENT]
10981 		= { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
10982 	[WMI_TAG_SCAN_EVENT]
10983 		= { .min_len = sizeof(struct wmi_scan_event) },
10984 	[WMI_TAG_PEER_STA_KICKOUT_EVENT]
10985 		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
10986 	[WMI_TAG_ROAM_EVENT]
10987 		= { .min_len = sizeof(struct wmi_roam_event) },
10988 	[WMI_TAG_CHAN_INFO_EVENT]
10989 		= { .min_len = sizeof(struct wmi_chan_info_event) },
10990 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
10991 		= { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
10992 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
10993 		= { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
10994 	[WMI_TAG_READY_EVENT] = {
10995 		.min_len = sizeof(struct wmi_ready_event_min) },
10996 	[WMI_TAG_SERVICE_AVAILABLE_EVENT]
10997 		= {.min_len = sizeof(struct wmi_service_available_event) },
10998 	[WMI_TAG_PEER_ASSOC_CONF_EVENT]
10999 		= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
11000 	[WMI_TAG_STATS_EVENT]
11001 		= { .min_len = sizeof(struct wmi_stats_event) },
11002 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
11003 		= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
11004 	[WMI_TAG_HOST_SWFDA_EVENT] = {
11005 		.min_len = sizeof(struct wmi_fils_discovery_event) },
11006 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
11007 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
11008 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
11009 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
11010 	[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
11011 		.min_len = sizeof(struct wmi_obss_color_collision_event) },
11012 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
11013 		.min_len = sizeof(struct wmi_11d_new_cc_ev) },
11014 	[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
11015 		.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
11016 	[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
11017 		.min_len = sizeof(struct wmi_twt_add_dialog_event) },
11018 };
11019 
11020 int
qwx_wmi_tlv_iter(struct qwx_softc * sc,const void * ptr,size_t len,int (* iter)(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data),void * data)11021 qwx_wmi_tlv_iter(struct qwx_softc *sc, const void *ptr, size_t len,
11022     int (*iter)(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11023     const void *ptr, void *data), void *data)
11024 {
11025 	const void *begin = ptr;
11026 	const struct wmi_tlv *tlv;
11027 	uint16_t tlv_tag, tlv_len;
11028 	int ret;
11029 
11030 	while (len > 0) {
11031 		if (len < sizeof(*tlv)) {
11032 			printf("%s: wmi tlv parse failure at byte %zd "
11033 			    "(%zu bytes left, %zu expected)\n", __func__,
11034 			    ptr - begin, len, sizeof(*tlv));
11035 			return EINVAL;
11036 		}
11037 
11038 		tlv = ptr;
11039 		tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
11040 		tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
11041 		ptr += sizeof(*tlv);
11042 		len -= sizeof(*tlv);
11043 
11044 		if (tlv_len > len) {
11045 			printf("%s: wmi tlv parse failure of tag %u "
11046 			    "at byte %zd (%zu bytes left, %u expected)\n",
11047 			    __func__, tlv_tag, ptr - begin, len, tlv_len);
11048 			return EINVAL;
11049 		}
11050 
11051 		if (tlv_tag < nitems(wmi_tlv_policies) &&
11052 		    wmi_tlv_policies[tlv_tag].min_len &&
11053 		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
11054 			printf("%s: wmi tlv parse failure of tag %u "
11055 			    "at byte %zd (%u bytes is less than "
11056 			    "min length %zu)\n", __func__,
11057 			    tlv_tag, ptr - begin, tlv_len,
11058 			    wmi_tlv_policies[tlv_tag].min_len);
11059 			return EINVAL;
11060 		}
11061 
11062 		ret = iter(sc, tlv_tag, tlv_len, ptr, data);
11063 		if (ret)
11064 			return ret;
11065 
11066 		ptr += tlv_len;
11067 		len -= tlv_len;
11068 	}
11069 
11070 	return 0;
11071 }
11072 
11073 int
qwx_pull_service_ready_tlv(struct qwx_softc * sc,const void * evt_buf,struct ath11k_targ_cap * cap)11074 qwx_pull_service_ready_tlv(struct qwx_softc *sc, const void *evt_buf,
11075     struct ath11k_targ_cap *cap)
11076 {
11077 	const struct wmi_service_ready_event *ev = evt_buf;
11078 
11079 	if (!ev)
11080 		return EINVAL;
11081 
11082 	cap->phy_capability = ev->phy_capability;
11083 	cap->max_frag_entry = ev->max_frag_entry;
11084 	cap->num_rf_chains = ev->num_rf_chains;
11085 	cap->ht_cap_info = ev->ht_cap_info;
11086 	cap->vht_cap_info = ev->vht_cap_info;
11087 	cap->vht_supp_mcs = ev->vht_supp_mcs;
11088 	cap->hw_min_tx_power = ev->hw_min_tx_power;
11089 	cap->hw_max_tx_power = ev->hw_max_tx_power;
11090 	cap->sys_cap_info = ev->sys_cap_info;
11091 	cap->min_pkt_size_enable = ev->min_pkt_size_enable;
11092 	cap->max_bcn_ie_size = ev->max_bcn_ie_size;
11093 	cap->max_num_scan_channels = ev->max_num_scan_channels;
11094 	cap->max_supported_macs = ev->max_supported_macs;
11095 	cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
11096 	cap->txrx_chainmask = ev->txrx_chainmask;
11097 	cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
11098 	cap->num_msdu_desc = ev->num_msdu_desc;
11099 
11100 	return 0;
11101 }
11102 
11103 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
11104  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
11105  * 4-byte word.
11106  */
11107 void
qwx_wmi_service_bitmap_copy(struct qwx_pdev_wmi * wmi,const uint32_t * wmi_svc_bm)11108 qwx_wmi_service_bitmap_copy(struct qwx_pdev_wmi *wmi,
11109     const uint32_t *wmi_svc_bm)
11110 {
11111 	int i, j = 0;
11112 
11113 	for (i = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
11114 		do {
11115 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
11116 				setbit(wmi->wmi->svc_map, j);
11117 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
11118 	}
11119 }
11120 
11121 int
qwx_wmi_tlv_svc_rdy_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)11122 qwx_wmi_tlv_svc_rdy_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11123     const void *ptr, void *data)
11124 {
11125 	struct wmi_tlv_svc_ready_parse *svc_ready = data;
11126 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11127 	uint16_t expect_len;
11128 
11129 	switch (tag) {
11130 	case WMI_TAG_SERVICE_READY_EVENT:
11131 		if (qwx_pull_service_ready_tlv(sc, ptr, &sc->target_caps))
11132 			return EINVAL;
11133 		break;
11134 
11135 	case WMI_TAG_ARRAY_UINT32:
11136 		if (!svc_ready->wmi_svc_bitmap_done) {
11137 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(uint32_t);
11138 			if (len < expect_len) {
11139 				printf("%s: invalid len %d for the tag 0x%x\n",
11140 				    __func__, len, tag);
11141 				return EINVAL;
11142 			}
11143 
11144 			qwx_wmi_service_bitmap_copy(wmi_handle, ptr);
11145 
11146 			svc_ready->wmi_svc_bitmap_done = 1;
11147 		}
11148 		break;
11149 	default:
11150 		break;
11151 	}
11152 
11153 	return 0;
11154 }
11155 
11156 void
qwx_service_ready_event(struct qwx_softc * sc,struct mbuf * m)11157 qwx_service_ready_event(struct qwx_softc *sc, struct mbuf *m)
11158 {
11159 	struct wmi_tlv_svc_ready_parse svc_ready = { };
11160 	int ret;
11161 
11162 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11163 	    qwx_wmi_tlv_svc_rdy_parse, &svc_ready);
11164 	if (ret) {
11165 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11166 		return;
11167 	}
11168 
11169 	DNPRINTF(QWX_D_WMI, "%s: event service ready\n", __func__);
11170 }
11171 
11172 int
qwx_pull_svc_ready_ext(struct qwx_pdev_wmi * wmi_handle,const void * ptr,struct ath11k_service_ext_param * param)11173 qwx_pull_svc_ready_ext(struct qwx_pdev_wmi *wmi_handle, const void *ptr,
11174     struct ath11k_service_ext_param *param)
11175 {
11176 	const struct wmi_service_ready_ext_event *ev = ptr;
11177 
11178 	if (!ev)
11179 		return EINVAL;
11180 
11181 	/* Move this to host based bitmap */
11182 	param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
11183 	param->default_fw_config_bits =	ev->default_fw_config_bits;
11184 	param->he_cap_info = ev->he_cap_info;
11185 	param->mpdu_density = ev->mpdu_density;
11186 	param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
11187 	memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
11188 
11189 	return 0;
11190 }
11191 
11192 int
qwx_pull_mac_phy_cap_svc_ready_ext(struct qwx_pdev_wmi * wmi_handle,struct wmi_soc_mac_phy_hw_mode_caps * hw_caps,struct wmi_hw_mode_capabilities * wmi_hw_mode_caps,struct wmi_soc_hal_reg_capabilities * hal_reg_caps,struct wmi_mac_phy_capabilities * wmi_mac_phy_caps,uint8_t hw_mode_id,uint8_t phy_id,struct qwx_pdev * pdev)11193 qwx_pull_mac_phy_cap_svc_ready_ext(struct qwx_pdev_wmi *wmi_handle,
11194     struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
11195     struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
11196     struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
11197     struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
11198     uint8_t hw_mode_id, uint8_t phy_id, struct qwx_pdev *pdev)
11199 {
11200 	struct wmi_mac_phy_capabilities *mac_phy_caps;
11201 	struct qwx_softc *sc = wmi_handle->wmi->sc;
11202 	struct ath11k_band_cap *cap_band;
11203 	struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
11204 	uint32_t phy_map;
11205 	uint32_t hw_idx, phy_idx = 0;
11206 
11207 	if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
11208 		return EINVAL;
11209 
11210 	for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
11211 		if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
11212 			break;
11213 
11214 		phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
11215 		while (phy_map) {
11216 			phy_map >>= 1;
11217 			phy_idx++;
11218 		}
11219 	}
11220 
11221 	if (hw_idx == hw_caps->num_hw_modes)
11222 		return EINVAL;
11223 
11224 	phy_idx += phy_id;
11225 	if (phy_id >= hal_reg_caps->num_phy)
11226 		return EINVAL;
11227 
11228 	mac_phy_caps = wmi_mac_phy_caps + phy_idx;
11229 
11230 	pdev->pdev_id = mac_phy_caps->pdev_id;
11231 	pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
11232 	pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
11233 	sc->target_pdev_ids[sc->target_pdev_count].supported_bands =
11234 	    mac_phy_caps->supported_bands;
11235 	sc->target_pdev_ids[sc->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
11236 	sc->target_pdev_count++;
11237 
11238 	if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
11239 	    !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
11240 		return EINVAL;
11241 
11242 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
11243 	 * band to band for a single radio, need to see how this should be
11244 	 * handled.
11245 	 */
11246 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
11247 		pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
11248 		pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
11249 	}
11250 
11251 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
11252 		pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
11253 		pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
11254 		pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
11255 		pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
11256 		pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
11257 		pdev_cap->nss_ratio_enabled =
11258 		    WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
11259 		pdev_cap->nss_ratio_info =
11260 		    WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
11261 	}
11262 
11263 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
11264 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
11265 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
11266 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
11267 	 * will be advertised for second mac or vice-versa. Compute the shift value
11268 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
11269 	 * mac80211.
11270 	 */
11271 	pdev_cap->tx_chain_mask_shift = ffs(pdev_cap->tx_chain_mask);
11272 	pdev_cap->rx_chain_mask_shift = ffs(pdev_cap->rx_chain_mask);
11273 
11274 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
11275 		cap_band = &pdev_cap->band[0];
11276 		cap_band->phy_id = mac_phy_caps->phy_id;
11277 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
11278 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
11279 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
11280 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
11281 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
11282 		memcpy(cap_band->he_cap_phy_info,
11283 		    &mac_phy_caps->he_cap_phy_info_2g,
11284 		    sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
11285 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
11286 		    sizeof(struct ath11k_ppe_threshold));
11287 	}
11288 
11289 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
11290 		cap_band = &pdev_cap->band[1];
11291 		cap_band->phy_id = mac_phy_caps->phy_id;
11292 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
11293 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
11294 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
11295 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
11296 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
11297 		memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
11298 		    sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
11299 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
11300 		    sizeof(struct ath11k_ppe_threshold));
11301 #if 0
11302 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
11303 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
11304 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
11305 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
11306 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
11307 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
11308 		memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
11309 		       sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
11310 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
11311 		       sizeof(struct ath11k_ppe_threshold));
11312 #endif
11313 	}
11314 
11315 	return 0;
11316 }
11317 
11318 int
qwx_wmi_tlv_ext_soc_hal_reg_caps_parse(struct qwx_softc * sc,uint16_t len,const void * ptr,void * data)11319 qwx_wmi_tlv_ext_soc_hal_reg_caps_parse(struct qwx_softc *sc, uint16_t len,
11320     const void *ptr, void *data)
11321 {
11322 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11323 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11324 	uint8_t hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
11325 	uint32_t phy_id_map;
11326 	int pdev_index = 0;
11327 	int ret;
11328 
11329 	svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
11330 	svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
11331 
11332 	sc->num_radios = 0;
11333 	sc->target_pdev_count = 0;
11334 	phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
11335 
11336 	while (phy_id_map && sc->num_radios < MAX_RADIOS) {
11337 		ret = qwx_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
11338 		    svc_rdy_ext->hw_caps,
11339 		    svc_rdy_ext->hw_mode_caps,
11340 		    svc_rdy_ext->soc_hal_reg_caps,
11341 		    svc_rdy_ext->mac_phy_caps,
11342 		    hw_mode_id, sc->num_radios, &sc->pdevs[pdev_index]);
11343 		if (ret) {
11344 			printf("%s: failed to extract mac caps, idx: %d\n",
11345 			    __func__, sc->num_radios);
11346 			return ret;
11347 		}
11348 
11349 		sc->num_radios++;
11350 
11351 		/* For QCA6390, save mac_phy capability in the same pdev */
11352 		if (sc->hw_params.single_pdev_only)
11353 			pdev_index = 0;
11354 		else
11355 			pdev_index = sc->num_radios;
11356 
11357 		/* TODO: mac_phy_cap prints */
11358 		phy_id_map >>= 1;
11359 	}
11360 
11361 	/* For QCA6390, set num_radios to 1 because host manages
11362 	 * both 2G and 5G radio in one pdev.
11363 	 * Set pdev_id = 0 and 0 means soc level.
11364 	 */
11365 	if (sc->hw_params.single_pdev_only) {
11366 		sc->num_radios = 1;
11367 		sc->pdevs[0].pdev_id = 0;
11368 	}
11369 
11370 	return 0;
11371 }
11372 
11373 int
qwx_wmi_tlv_hw_mode_caps_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)11374 qwx_wmi_tlv_hw_mode_caps_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11375     const void *ptr, void *data)
11376 {
11377 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11378 	struct wmi_hw_mode_capabilities *hw_mode_cap;
11379 	uint32_t phy_map = 0;
11380 
11381 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
11382 		return EPROTO;
11383 
11384 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
11385 		return ENOBUFS;
11386 
11387 	hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
11388 	    hw_mode_id);
11389 	svc_rdy_ext->n_hw_mode_caps++;
11390 
11391 	phy_map = hw_mode_cap->phy_id_map;
11392 	while (phy_map) {
11393 		svc_rdy_ext->tot_phy_id++;
11394 		phy_map = phy_map >> 1;
11395 	}
11396 
11397 	return 0;
11398 }
11399 
11400 #define PRIMAP(_hw_mode_) \
11401 	[_hw_mode_] = _hw_mode_##_PRI
11402 
11403 static const int qwx_hw_mode_pri_map[] = {
11404 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
11405 	PRIMAP(WMI_HOST_HW_MODE_DBS),
11406 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
11407 	PRIMAP(WMI_HOST_HW_MODE_SBS),
11408 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
11409 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
11410 	/* keep last */
11411 	PRIMAP(WMI_HOST_HW_MODE_MAX),
11412 };
11413 
11414 int
qwx_wmi_tlv_hw_mode_caps(struct qwx_softc * sc,uint16_t len,const void * ptr,void * data)11415 qwx_wmi_tlv_hw_mode_caps(struct qwx_softc *sc, uint16_t len,
11416     const void *ptr, void *data)
11417 {
11418 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11419 	struct wmi_hw_mode_capabilities *hw_mode_caps;
11420 	enum wmi_host_hw_mode_config_type mode, pref;
11421 	uint32_t i;
11422 	int ret;
11423 
11424 	svc_rdy_ext->n_hw_mode_caps = 0;
11425 	svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
11426 
11427 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
11428 	    qwx_wmi_tlv_hw_mode_caps_parse, svc_rdy_ext);
11429 	if (ret) {
11430 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11431 		return ret;
11432 	}
11433 
11434 	i = 0;
11435 	while (i < svc_rdy_ext->n_hw_mode_caps) {
11436 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
11437 		mode = hw_mode_caps->hw_mode_id;
11438 		pref = sc->wmi.preferred_hw_mode;
11439 
11440 		if (qwx_hw_mode_pri_map[mode] < qwx_hw_mode_pri_map[pref]) {
11441 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
11442 			sc->wmi.preferred_hw_mode = mode;
11443 		}
11444 		i++;
11445 	}
11446 
11447 	DNPRINTF(QWX_D_WMI, "%s: preferred_hw_mode: %d\n", __func__,
11448 	    sc->wmi.preferred_hw_mode);
11449 	if (sc->wmi.preferred_hw_mode >= WMI_HOST_HW_MODE_MAX)
11450 		return EINVAL;
11451 
11452 	return 0;
11453 }
11454 
11455 int
qwx_wmi_tlv_mac_phy_caps_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)11456 qwx_wmi_tlv_mac_phy_caps_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11457     const void *ptr, void *data)
11458 {
11459 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11460 
11461 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
11462 		return EPROTO;
11463 
11464 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
11465 		return ENOBUFS;
11466 
11467 	len = MIN(len, sizeof(struct wmi_mac_phy_capabilities));
11468 	if (!svc_rdy_ext->n_mac_phy_caps) {
11469 		svc_rdy_ext->mac_phy_caps = mallocarray(
11470 		    svc_rdy_ext->tot_phy_id,
11471 		    sizeof(struct wmi_mac_phy_capabilities),
11472 		    M_DEVBUF, M_NOWAIT | M_ZERO);
11473 		if (!svc_rdy_ext->mac_phy_caps)
11474 			return ENOMEM;
11475 		svc_rdy_ext->mac_phy_caps_size = len * svc_rdy_ext->tot_phy_id;
11476 	}
11477 
11478 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps,
11479 	    ptr, len);
11480 	svc_rdy_ext->n_mac_phy_caps++;
11481 	return 0;
11482 }
11483 
11484 int
qwx_wmi_tlv_ext_hal_reg_caps_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)11485 qwx_wmi_tlv_ext_hal_reg_caps_parse(struct qwx_softc *sc,
11486     uint16_t tag, uint16_t len, const void *ptr, void *data)
11487 {
11488 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11489 
11490 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
11491 		return EPROTO;
11492 
11493 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
11494 		return ENOBUFS;
11495 
11496 	svc_rdy_ext->n_ext_hal_reg_caps++;
11497 	return 0;
11498 }
11499 
11500 int
qwx_pull_reg_cap_svc_rdy_ext(struct qwx_pdev_wmi * wmi_handle,struct wmi_soc_hal_reg_capabilities * reg_caps,struct wmi_hal_reg_capabilities_ext * wmi_ext_reg_cap,uint8_t phy_idx,struct ath11k_hal_reg_capabilities_ext * param)11501 qwx_pull_reg_cap_svc_rdy_ext(struct qwx_pdev_wmi *wmi_handle,
11502     struct wmi_soc_hal_reg_capabilities *reg_caps,
11503     struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
11504     uint8_t phy_idx, struct ath11k_hal_reg_capabilities_ext *param)
11505 {
11506 	struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
11507 
11508 	if (!reg_caps || !wmi_ext_reg_cap)
11509 		return EINVAL;
11510 
11511 	if (phy_idx >= reg_caps->num_phy)
11512 		return EINVAL;
11513 
11514 	ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
11515 
11516 	param->phy_id = ext_reg_cap->phy_id;
11517 	param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
11518 	param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext;
11519 	param->regcap1 = ext_reg_cap->regcap1;
11520 	param->regcap2 = ext_reg_cap->regcap2;
11521 	/* check if param->wireless_mode is needed */
11522 	param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
11523 	param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
11524 	param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
11525 	param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
11526 
11527 	return 0;
11528 }
11529 
11530 int
qwx_wmi_tlv_ext_hal_reg_caps(struct qwx_softc * sc,uint16_t len,const void * ptr,void * data)11531 qwx_wmi_tlv_ext_hal_reg_caps(struct qwx_softc *sc, uint16_t len,
11532     const void *ptr, void *data)
11533 {
11534 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11535 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11536 	struct ath11k_hal_reg_capabilities_ext reg_cap;
11537 	int ret;
11538 	uint32_t i;
11539 
11540 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
11541 	svc_rdy_ext->ext_hal_reg_caps =
11542 	    (struct wmi_hal_reg_capabilities_ext *)ptr;
11543 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
11544 	    qwx_wmi_tlv_ext_hal_reg_caps_parse, svc_rdy_ext);
11545 	if (ret) {
11546 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11547 		return ret;
11548 	}
11549 
11550 	for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
11551 		ret = qwx_pull_reg_cap_svc_rdy_ext(wmi_handle,
11552 		    svc_rdy_ext->soc_hal_reg_caps,
11553 		    svc_rdy_ext->ext_hal_reg_caps, i, &reg_cap);
11554 		if (ret) {
11555 			printf("%s: failed to extract reg cap %d\n",
11556 			    __func__, i);
11557 			return ret;
11558 		}
11559 
11560 		memcpy(&sc->hal_reg_cap[reg_cap.phy_id], &reg_cap,
11561 		    sizeof(sc->hal_reg_cap[0]));
11562 	}
11563 
11564 	return 0;
11565 }
11566 
11567 int
qwx_wmi_tlv_dma_ring_caps_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)11568 qwx_wmi_tlv_dma_ring_caps_parse(struct qwx_softc *sc, uint16_t tag,
11569     uint16_t len, const void *ptr, void *data)
11570 {
11571 	struct wmi_tlv_dma_ring_caps_parse *parse = data;
11572 
11573 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
11574 		return EPROTO;
11575 
11576 	parse->n_dma_ring_caps++;
11577 	return 0;
11578 }
11579 
11580 int
qwx_wmi_alloc_dbring_caps(struct qwx_softc * sc,uint32_t num_cap)11581 qwx_wmi_alloc_dbring_caps(struct qwx_softc *sc, uint32_t num_cap)
11582 {
11583 	void *ptr;
11584 
11585 	ptr = mallocarray(num_cap, sizeof(struct qwx_dbring_cap),
11586 	    M_DEVBUF, M_NOWAIT | M_ZERO);
11587 	if (!ptr)
11588 		return ENOMEM;
11589 
11590 	sc->db_caps = ptr;
11591 	sc->num_db_cap = num_cap;
11592 
11593 	return 0;
11594 }
11595 
11596 void
qwx_wmi_free_dbring_caps(struct qwx_softc * sc)11597 qwx_wmi_free_dbring_caps(struct qwx_softc *sc)
11598 {
11599 	free(sc->db_caps, M_DEVBUF,
11600 	    sc->num_db_cap * sizeof(struct qwx_dbring_cap));
11601 	sc->db_caps = NULL;
11602 	sc->num_db_cap = 0;
11603 }
11604 
11605 int
qwx_wmi_tlv_dma_ring_caps(struct qwx_softc * sc,uint16_t len,const void * ptr,void * data)11606 qwx_wmi_tlv_dma_ring_caps(struct qwx_softc *sc, uint16_t len,
11607     const void *ptr, void *data)
11608 {
11609 	struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
11610 	struct wmi_dma_ring_capabilities *dma_caps;
11611 	struct qwx_dbring_cap *dir_buff_caps;
11612 	int ret;
11613 	uint32_t i;
11614 
11615 	dma_caps_parse->n_dma_ring_caps = 0;
11616 	dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
11617 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
11618 	    qwx_wmi_tlv_dma_ring_caps_parse, dma_caps_parse);
11619 	if (ret) {
11620 		printf("%s: failed to parse dma ring caps tlv %d\n",
11621 		    __func__, ret);
11622 		return ret;
11623 	}
11624 
11625 	if (!dma_caps_parse->n_dma_ring_caps)
11626 		return 0;
11627 
11628 	if (sc->num_db_cap) {
11629 		DNPRINTF(QWX_D_WMI,
11630 		    "%s: Already processed, so ignoring dma ring caps\n",
11631 		    __func__);
11632 		return 0;
11633 	}
11634 
11635 	ret = qwx_wmi_alloc_dbring_caps(sc, dma_caps_parse->n_dma_ring_caps);
11636 	if (ret)
11637 		return ret;
11638 
11639 	dir_buff_caps = sc->db_caps;
11640 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
11641 		if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
11642 			printf("%s: Invalid module id %d\n", __func__,
11643 			    dma_caps[i].module_id);
11644 			ret = EINVAL;
11645 			goto free_dir_buff;
11646 		}
11647 
11648 		dir_buff_caps[i].id = dma_caps[i].module_id;
11649 		dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
11650 		dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
11651 		dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
11652 		dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
11653 	}
11654 
11655 	return 0;
11656 
11657 free_dir_buff:
11658 	qwx_wmi_free_dbring_caps(sc);
11659 	return ret;
11660 }
11661 
11662 int
qwx_wmi_tlv_svc_rdy_ext_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)11663 qwx_wmi_tlv_svc_rdy_ext_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11664     const void *ptr, void *data)
11665 {
11666 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
11667 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
11668 	int ret;
11669 
11670 	switch (tag) {
11671 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
11672 		ret = qwx_pull_svc_ready_ext(wmi_handle, ptr,
11673 		    &svc_rdy_ext->param);
11674 		if (ret) {
11675 			printf("%s: unable to extract ext params\n", __func__);
11676 			return ret;
11677 		}
11678 		break;
11679 
11680 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
11681 		svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
11682 		svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
11683 		break;
11684 
11685 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
11686 		ret = qwx_wmi_tlv_ext_soc_hal_reg_caps_parse(sc, len, ptr,
11687 		    svc_rdy_ext);
11688 		if (ret)
11689 			return ret;
11690 		break;
11691 
11692 	case WMI_TAG_ARRAY_STRUCT:
11693 		if (!svc_rdy_ext->hw_mode_done) {
11694 			ret = qwx_wmi_tlv_hw_mode_caps(sc, len, ptr,
11695 			    svc_rdy_ext);
11696 			if (ret)
11697 				return ret;
11698 
11699 			svc_rdy_ext->hw_mode_done = 1;
11700 		} else if (!svc_rdy_ext->mac_phy_done) {
11701 			svc_rdy_ext->n_mac_phy_caps = 0;
11702 			ret = qwx_wmi_tlv_iter(sc, ptr, len,
11703 			    qwx_wmi_tlv_mac_phy_caps_parse, svc_rdy_ext);
11704 			if (ret) {
11705 				printf("%s: failed to parse tlv %d\n",
11706 				    __func__, ret);
11707 				return ret;
11708 			}
11709 
11710 			svc_rdy_ext->mac_phy_done = 1;
11711 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
11712 			ret = qwx_wmi_tlv_ext_hal_reg_caps(sc, len, ptr,
11713 			    svc_rdy_ext);
11714 			if (ret)
11715 				return ret;
11716 
11717 			svc_rdy_ext->ext_hal_reg_done = 1;
11718 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
11719 			svc_rdy_ext->mac_phy_chainmask_combo_done = 1;
11720 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
11721 			svc_rdy_ext->mac_phy_chainmask_cap_done = 1;
11722 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
11723 			svc_rdy_ext->oem_dma_ring_cap_done = 1;
11724 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
11725 			ret = qwx_wmi_tlv_dma_ring_caps(sc, len, ptr,
11726 			    &svc_rdy_ext->dma_caps_parse);
11727 			if (ret)
11728 				return ret;
11729 
11730 			svc_rdy_ext->dma_ring_cap_done = 1;
11731 		}
11732 		break;
11733 
11734 	default:
11735 		break;
11736 	}
11737 
11738 	return 0;
11739 }
11740 
11741 void
qwx_service_ready_ext_event(struct qwx_softc * sc,struct mbuf * m)11742 qwx_service_ready_ext_event(struct qwx_softc *sc, struct mbuf *m)
11743 {
11744 	struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
11745 	int ret;
11746 
11747 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11748 	    qwx_wmi_tlv_svc_rdy_ext_parse, &svc_rdy_ext);
11749 	if (ret) {
11750 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11751 		qwx_wmi_free_dbring_caps(sc);
11752 		return;
11753 	}
11754 
11755 	DNPRINTF(QWX_D_WMI, "%s: event service ready ext\n", __func__);
11756 
11757 	if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_EXT2_MSG))
11758 		wakeup(&sc->wmi.service_ready);
11759 
11760 	free(svc_rdy_ext.mac_phy_caps, M_DEVBUF,
11761 	    svc_rdy_ext.mac_phy_caps_size);
11762 }
11763 
11764 int
qwx_wmi_tlv_svc_rdy_ext2_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)11765 qwx_wmi_tlv_svc_rdy_ext2_parse(struct qwx_softc *sc,
11766     uint16_t tag, uint16_t len, const void *ptr, void *data)
11767 {
11768 	struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
11769 	int ret;
11770 
11771 	switch (tag) {
11772 	case WMI_TAG_ARRAY_STRUCT:
11773 		if (!parse->dma_ring_cap_done) {
11774 			ret = qwx_wmi_tlv_dma_ring_caps(sc, len, ptr,
11775 			    &parse->dma_caps_parse);
11776 			if (ret)
11777 				return ret;
11778 
11779 			parse->dma_ring_cap_done = 1;
11780 		}
11781 		break;
11782 	default:
11783 		break;
11784 	}
11785 
11786 	return 0;
11787 }
11788 
11789 void
qwx_service_ready_ext2_event(struct qwx_softc * sc,struct mbuf * m)11790 qwx_service_ready_ext2_event(struct qwx_softc *sc, struct mbuf *m)
11791 {
11792 	struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
11793 	int ret;
11794 
11795 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11796 	    qwx_wmi_tlv_svc_rdy_ext2_parse, &svc_rdy_ext2);
11797 	if (ret) {
11798 		printf("%s: failed to parse ext2 event tlv %d\n",
11799 		    __func__, ret);
11800 		qwx_wmi_free_dbring_caps(sc);
11801 		return;
11802 	}
11803 
11804 	DNPRINTF(QWX_D_WMI, "%s: event service ready ext2\n", __func__);
11805 
11806 	sc->wmi.service_ready = 1;
11807 	wakeup(&sc->wmi.service_ready);
11808 }
11809 
11810 void
qwx_service_available_event(struct qwx_softc * sc,struct mbuf * m)11811 qwx_service_available_event(struct qwx_softc *sc, struct mbuf *m)
11812 {
11813 	int ret;
11814 
11815 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11816 	    qwx_wmi_tlv_services_parser, NULL);
11817 	if (ret)
11818 		printf("%s: failed to parse services available tlv %d\n",
11819 		    sc->sc_dev.dv_xname, ret);
11820 
11821 	DNPRINTF(QWX_D_WMI, "%s: event service available\n", __func__);
11822 }
11823 
11824 int
qwx_pull_peer_assoc_conf_ev(struct qwx_softc * sc,struct mbuf * m,struct wmi_peer_assoc_conf_arg * peer_assoc_conf)11825 qwx_pull_peer_assoc_conf_ev(struct qwx_softc *sc, struct mbuf *m,
11826     struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
11827 {
11828 	const void **tb;
11829 	const struct wmi_peer_assoc_conf_event *ev;
11830 	int ret;
11831 
11832 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11833 	if (tb == NULL) {
11834 		ret = ENOMEM;
11835 		printf("%s: failed to parse tlv: %d\n",
11836 		    sc->sc_dev.dv_xname, ret);
11837 		return ret;
11838 	}
11839 
11840 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
11841 	if (!ev) {
11842 		printf("%s: failed to fetch peer assoc conf ev\n",
11843 		    sc->sc_dev.dv_xname);
11844 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11845 		return EPROTO;
11846 	}
11847 
11848 	peer_assoc_conf->vdev_id = ev->vdev_id;
11849 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
11850 
11851 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11852 	return 0;
11853 }
11854 
11855 void
qwx_peer_assoc_conf_event(struct qwx_softc * sc,struct mbuf * m)11856 qwx_peer_assoc_conf_event(struct qwx_softc *sc, struct mbuf *m)
11857 {
11858 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
11859 
11860 	if (qwx_pull_peer_assoc_conf_ev(sc, m, &peer_assoc_conf) != 0) {
11861 		printf("%s: failed to extract peer assoc conf event\n",
11862 		   sc->sc_dev.dv_xname);
11863 		return;
11864 	}
11865 
11866 	DNPRINTF(QWX_D_WMI, "%s: event peer assoc conf ev vdev id %d "
11867 	    "macaddr %s\n", __func__, peer_assoc_conf.vdev_id,
11868 	    ether_sprintf((u_char *)peer_assoc_conf.macaddr));
11869 
11870 	sc->peer_assoc_done = 1;
11871 	wakeup(&sc->peer_assoc_done);
11872 }
11873 
11874 int
qwx_wmi_tlv_rdy_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)11875 qwx_wmi_tlv_rdy_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11876     const void *ptr, void *data)
11877 {
11878 	struct wmi_tlv_rdy_parse *rdy_parse = data;
11879 	struct wmi_ready_event fixed_param;
11880 	struct wmi_mac_addr *addr_list;
11881 	struct qwx_pdev *pdev;
11882 	uint32_t num_mac_addr;
11883 	int i;
11884 
11885 	switch (tag) {
11886 	case WMI_TAG_READY_EVENT:
11887 		memset(&fixed_param, 0, sizeof(fixed_param));
11888 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
11889 		       MIN(sizeof(fixed_param), len));
11890 		sc->wlan_init_status = fixed_param.ready_event_min.status;
11891 		rdy_parse->num_extra_mac_addr =
11892 			fixed_param.ready_event_min.num_extra_mac_addr;
11893 
11894 		IEEE80211_ADDR_COPY(sc->mac_addr,
11895 		    fixed_param.ready_event_min.mac_addr.addr);
11896 		sc->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
11897 		sc->wmi_ready = 1;
11898 		break;
11899 	case WMI_TAG_ARRAY_FIXED_STRUCT:
11900 		addr_list = (struct wmi_mac_addr *)ptr;
11901 		num_mac_addr = rdy_parse->num_extra_mac_addr;
11902 
11903 		if (!(sc->num_radios > 1 && num_mac_addr >= sc->num_radios))
11904 			break;
11905 
11906 		for (i = 0; i < sc->num_radios; i++) {
11907 			pdev = &sc->pdevs[i];
11908 			IEEE80211_ADDR_COPY(pdev->mac_addr, addr_list[i].addr);
11909 		}
11910 		sc->pdevs_macaddr_valid = 1;
11911 		break;
11912 	default:
11913 		break;
11914 	}
11915 
11916 	return 0;
11917 }
11918 
11919 void
qwx_ready_event(struct qwx_softc * sc,struct mbuf * m)11920 qwx_ready_event(struct qwx_softc *sc, struct mbuf *m)
11921 {
11922 	struct wmi_tlv_rdy_parse rdy_parse = { };
11923 	int ret;
11924 
11925 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11926 	    qwx_wmi_tlv_rdy_parse, &rdy_parse);
11927 	if (ret) {
11928 		printf("%s: failed to parse tlv %d\n", __func__, ret);
11929 		return;
11930 	}
11931 
11932 	DNPRINTF(QWX_D_WMI, "%s: event ready", __func__);
11933 
11934 	sc->wmi.unified_ready = 1;
11935 	wakeup(&sc->wmi.unified_ready);
11936 }
11937 
11938 int
qwx_pull_peer_del_resp_ev(struct qwx_softc * sc,struct mbuf * m,struct wmi_peer_delete_resp_event * peer_del_resp)11939 qwx_pull_peer_del_resp_ev(struct qwx_softc *sc, struct mbuf *m,
11940     struct wmi_peer_delete_resp_event *peer_del_resp)
11941 {
11942 	const void **tb;
11943 	const struct wmi_peer_delete_resp_event *ev;
11944 	int ret;
11945 
11946 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11947 	if (tb == NULL) {
11948 		ret = ENOMEM;
11949 		printf("%s: failed to parse tlv: %d\n",
11950 		    sc->sc_dev.dv_xname, ret);
11951 		return ret;
11952 	}
11953 
11954 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
11955 	if (!ev) {
11956 		printf("%s: failed to fetch peer delete resp ev\n",
11957 		    sc->sc_dev.dv_xname);
11958 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11959 		return EPROTO;
11960 	}
11961 
11962 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
11963 
11964 	peer_del_resp->vdev_id = ev->vdev_id;
11965 	IEEE80211_ADDR_COPY(peer_del_resp->peer_macaddr.addr,
11966 	    ev->peer_macaddr.addr);
11967 
11968 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11969 	return 0;
11970 }
11971 
11972 void
qwx_peer_delete_resp_event(struct qwx_softc * sc,struct mbuf * m)11973 qwx_peer_delete_resp_event(struct qwx_softc *sc, struct mbuf *m)
11974 {
11975 	struct wmi_peer_delete_resp_event peer_del_resp;
11976 
11977 	if (qwx_pull_peer_del_resp_ev(sc, m, &peer_del_resp) != 0) {
11978 		printf("%s: failed to extract peer delete resp",
11979 		    sc->sc_dev.dv_xname);
11980 		return;
11981 	}
11982 
11983 	sc->peer_delete_done = 1;
11984 	wakeup(&sc->peer_delete_done);
11985 
11986 	DNPRINTF(QWX_D_WMI, "%s: peer delete resp for vdev id %d addr %s\n",
11987 	    __func__, peer_del_resp.vdev_id,
11988 	    ether_sprintf(peer_del_resp.peer_macaddr.addr));
11989 }
11990 
11991 const char *
qwx_wmi_vdev_resp_print(uint32_t vdev_resp_status)11992 qwx_wmi_vdev_resp_print(uint32_t vdev_resp_status)
11993 {
11994 	switch (vdev_resp_status) {
11995 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
11996 		return "invalid vdev id";
11997 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
11998 		return "not supported";
11999 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
12000 		return "dfs violation";
12001 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
12002 		return "invalid regdomain";
12003 	default:
12004 		return "unknown";
12005 	}
12006 }
12007 
12008 int
qwx_pull_vdev_start_resp_tlv(struct qwx_softc * sc,struct mbuf * m,struct wmi_vdev_start_resp_event * vdev_rsp)12009 qwx_pull_vdev_start_resp_tlv(struct qwx_softc *sc, struct mbuf *m,
12010     struct wmi_vdev_start_resp_event *vdev_rsp)
12011 {
12012 	const void **tb;
12013 	const struct wmi_vdev_start_resp_event *ev;
12014 	int ret;
12015 
12016 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12017 	if (tb == NULL) {
12018 		ret = ENOMEM;
12019 		printf("%s: failed to parse tlv: %d\n",
12020 		    sc->sc_dev.dv_xname, ret);
12021 		return ret;
12022 	}
12023 
12024 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
12025 	if (!ev) {
12026 		printf("%s: failed to fetch vdev start resp ev\n",
12027 		    sc->sc_dev.dv_xname);
12028 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12029 		return EPROTO;
12030 	}
12031 
12032 	memset(vdev_rsp, 0, sizeof(*vdev_rsp));
12033 
12034 	vdev_rsp->vdev_id = ev->vdev_id;
12035 	vdev_rsp->requestor_id = ev->requestor_id;
12036 	vdev_rsp->resp_type = ev->resp_type;
12037 	vdev_rsp->status = ev->status;
12038 	vdev_rsp->chain_mask = ev->chain_mask;
12039 	vdev_rsp->smps_mode = ev->smps_mode;
12040 	vdev_rsp->mac_id = ev->mac_id;
12041 	vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
12042 	vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
12043 
12044 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12045 	return 0;
12046 }
12047 
12048 void
qwx_vdev_start_resp_event(struct qwx_softc * sc,struct mbuf * m)12049 qwx_vdev_start_resp_event(struct qwx_softc *sc, struct mbuf *m)
12050 {
12051 	struct wmi_vdev_start_resp_event vdev_start_resp;
12052 	uint32_t status;
12053 
12054 	if (qwx_pull_vdev_start_resp_tlv(sc, m, &vdev_start_resp) != 0) {
12055 		printf("%s: failed to extract vdev start resp",
12056 		    sc->sc_dev.dv_xname);
12057 		return;
12058 	}
12059 
12060 	status = vdev_start_resp.status;
12061 	if (status) {
12062 		printf("%s: vdev start resp error status %d (%s)\n",
12063 		    sc->sc_dev.dv_xname, status,
12064 		   qwx_wmi_vdev_resp_print(status));
12065 	}
12066 
12067 	sc->vdev_setup_done = 1;
12068 	wakeup(&sc->vdev_setup_done);
12069 
12070 	DNPRINTF(QWX_D_WMI, "%s: vdev start resp for vdev id %d", __func__,
12071 	    vdev_start_resp.vdev_id);
12072 }
12073 
12074 int
qwx_pull_vdev_stopped_param_tlv(struct qwx_softc * sc,struct mbuf * m,uint32_t * vdev_id)12075 qwx_pull_vdev_stopped_param_tlv(struct qwx_softc *sc, struct mbuf *m,
12076     uint32_t *vdev_id)
12077 {
12078 	const void **tb;
12079 	const struct wmi_vdev_stopped_event *ev;
12080 	int ret;
12081 
12082 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12083 	if (tb == NULL) {
12084 		ret = ENOMEM;
12085 		printf("%s: failed to parse tlv: %d\n",
12086 		    sc->sc_dev.dv_xname, ret);
12087 		return ret;
12088 	}
12089 
12090 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
12091 	if (!ev) {
12092 		printf("%s: failed to fetch vdev stop ev\n",
12093 		    sc->sc_dev.dv_xname);
12094 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12095 		return EPROTO;
12096 	}
12097 
12098 	*vdev_id = ev->vdev_id;
12099 
12100 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12101 	return 0;
12102 }
12103 
12104 void
qwx_vdev_stopped_event(struct qwx_softc * sc,struct mbuf * m)12105 qwx_vdev_stopped_event(struct qwx_softc *sc, struct mbuf *m)
12106 {
12107 	uint32_t vdev_id = 0;
12108 
12109 	if (qwx_pull_vdev_stopped_param_tlv(sc, m, &vdev_id) != 0) {
12110 		printf("%s: failed to extract vdev stopped event\n",
12111 		    sc->sc_dev.dv_xname);
12112 		return;
12113 	}
12114 
12115 	sc->vdev_setup_done = 1;
12116 	wakeup(&sc->vdev_setup_done);
12117 
12118 	DNPRINTF(QWX_D_WMI, "%s: vdev stopped for vdev id %d", __func__,
12119 	    vdev_id);
12120 }
12121 
12122 int
qwx_wmi_tlv_iter_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)12123 qwx_wmi_tlv_iter_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
12124     const void *ptr, void *data)
12125 {
12126 	const void **tb = data;
12127 
12128 	if (tag < WMI_TAG_MAX)
12129 		tb[tag] = ptr;
12130 
12131 	return 0;
12132 }
12133 
12134 int
qwx_wmi_tlv_parse(struct qwx_softc * sc,const void ** tb,const void * ptr,size_t len)12135 qwx_wmi_tlv_parse(struct qwx_softc *sc, const void **tb,
12136     const void *ptr, size_t len)
12137 {
12138 	return qwx_wmi_tlv_iter(sc, ptr, len, qwx_wmi_tlv_iter_parse,
12139 	    (void *)tb);
12140 }
12141 
12142 const void **
qwx_wmi_tlv_parse_alloc(struct qwx_softc * sc,const void * ptr,size_t len)12143 qwx_wmi_tlv_parse_alloc(struct qwx_softc *sc, const void *ptr, size_t len)
12144 {
12145 	const void **tb;
12146 	int ret;
12147 
12148 	tb = mallocarray(WMI_TAG_MAX, sizeof(*tb), M_DEVBUF, M_NOWAIT | M_ZERO);
12149 	if (!tb)
12150 		return NULL;
12151 
12152 	ret = qwx_wmi_tlv_parse(sc, tb, ptr, len);
12153 	if (ret) {
12154 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12155 		return NULL;
12156 	}
12157 
12158 	return tb;
12159 }
12160 
12161 static void
qwx_print_reg_rule(struct qwx_softc * sc,const char * band,uint32_t num_reg_rules,struct cur_reg_rule * reg_rule_ptr)12162 qwx_print_reg_rule(struct qwx_softc *sc, const char *band,
12163     uint32_t num_reg_rules, struct cur_reg_rule *reg_rule_ptr)
12164 {
12165 	struct cur_reg_rule *reg_rule = reg_rule_ptr;
12166 	uint32_t count;
12167 
12168 	DNPRINTF(QWX_D_WMI, "%s: number of reg rules in %s band: %d\n",
12169 	    __func__, band, num_reg_rules);
12170 
12171 	for (count = 0; count < num_reg_rules; count++) {
12172 		DNPRINTF(QWX_D_WMI,
12173 		    "%s: reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n",
12174 		    __func__, count + 1, reg_rule->start_freq,
12175 		    reg_rule->end_freq, reg_rule->max_bw, reg_rule->ant_gain,
12176 		    reg_rule->reg_power, reg_rule->flags);
12177 		reg_rule++;
12178 	}
12179 }
12180 
12181 struct cur_reg_rule *
qwx_create_reg_rules_from_wmi(uint32_t num_reg_rules,struct wmi_regulatory_rule_struct * wmi_reg_rule)12182 qwx_create_reg_rules_from_wmi(uint32_t num_reg_rules,
12183     struct wmi_regulatory_rule_struct *wmi_reg_rule)
12184 {
12185 	struct cur_reg_rule *reg_rule_ptr;
12186 	uint32_t count;
12187 
12188 	reg_rule_ptr = mallocarray(num_reg_rules, sizeof(*reg_rule_ptr),
12189 	    M_DEVBUF, M_NOWAIT | M_ZERO);
12190 	if (!reg_rule_ptr)
12191 		return NULL;
12192 
12193 	for (count = 0; count < num_reg_rules; count++) {
12194 		reg_rule_ptr[count].start_freq = FIELD_GET(REG_RULE_START_FREQ,
12195 		    wmi_reg_rule[count].freq_info);
12196 		reg_rule_ptr[count].end_freq = FIELD_GET(REG_RULE_END_FREQ,
12197 		    wmi_reg_rule[count].freq_info);
12198 		reg_rule_ptr[count].max_bw = FIELD_GET(REG_RULE_MAX_BW,
12199 		    wmi_reg_rule[count].bw_pwr_info);
12200 		reg_rule_ptr[count].reg_power = FIELD_GET(REG_RULE_REG_PWR,
12201 		    wmi_reg_rule[count].bw_pwr_info);
12202 		reg_rule_ptr[count].ant_gain = FIELD_GET(REG_RULE_ANT_GAIN,
12203 		    wmi_reg_rule[count].bw_pwr_info);
12204 		reg_rule_ptr[count].flags = FIELD_GET(REG_RULE_FLAGS,
12205 		    wmi_reg_rule[count].flag_info);
12206 	}
12207 
12208 	return reg_rule_ptr;
12209 }
12210 
12211 int
qwx_pull_reg_chan_list_update_ev(struct qwx_softc * sc,struct mbuf * m,struct cur_regulatory_info * reg_info)12212 qwx_pull_reg_chan_list_update_ev(struct qwx_softc *sc, struct mbuf *m,
12213     struct cur_regulatory_info *reg_info)
12214 {
12215 	const void **tb;
12216 	const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
12217 	struct wmi_regulatory_rule_struct *wmi_reg_rule;
12218 	uint32_t num_2ghz_reg_rules, num_5ghz_reg_rules;
12219 	int ret;
12220 
12221 	DNPRINTF(QWX_D_WMI, "%s: processing regulatory channel list\n",
12222 	    __func__);
12223 
12224 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12225 	if (tb == NULL) {
12226 		ret = ENOMEM; /* XXX allocation failure or parsing failure? */
12227 		printf("%s: failed to parse tlv: %d\n", __func__, ret);
12228 		return ENOMEM;
12229 	}
12230 
12231 	chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
12232 	if (!chan_list_event_hdr) {
12233 		printf("%s: failed to fetch reg chan list update ev\n",
12234 		    __func__);
12235 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12236 		return EPROTO;
12237 	}
12238 
12239 	reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules;
12240 	reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules;
12241 
12242 	if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) {
12243 		printf("%s: No regulatory rules available in the event info\n",
12244 		    __func__);
12245 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12246 		return EINVAL;
12247 	}
12248 
12249 	memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, REG_ALPHA2_LEN);
12250 	reg_info->dfs_region = chan_list_event_hdr->dfs_region;
12251 	reg_info->phybitmap = chan_list_event_hdr->phybitmap;
12252 	reg_info->num_phy = chan_list_event_hdr->num_phy;
12253 	reg_info->phy_id = chan_list_event_hdr->phy_id;
12254 	reg_info->ctry_code = chan_list_event_hdr->country_id;
12255 	reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
12256 
12257 	DNPRINTF(QWX_D_WMI, "%s: CC status_code %s\n", __func__,
12258 	    qwx_cc_status_to_str(reg_info->status_code));
12259 
12260 	reg_info->status_code =
12261 		qwx_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code);
12262 
12263 	reg_info->is_ext_reg_event = false;
12264 
12265 	reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz;
12266 	reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz;
12267 	reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz;
12268 	reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz;
12269 
12270 	num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
12271 	num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
12272 
12273 	DNPRINTF(QWX_D_WMI,
12274 	    "%s: cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d "
12275 	    "max_5ghz %d\n", __func__, reg_info->alpha2, reg_info->dfs_region,
12276 	    reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
12277 	    reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
12278 
12279 	DNPRINTF(QWX_D_WMI,
12280 	    "%s: num_2ghz_reg_rules %d num_5ghz_reg_rules %d\n", __func__,
12281 	    num_2ghz_reg_rules, num_5ghz_reg_rules);
12282 
12283 	wmi_reg_rule = (struct wmi_regulatory_rule_struct *)
12284 	    ((uint8_t *)chan_list_event_hdr + sizeof(*chan_list_event_hdr)
12285 	    + sizeof(struct wmi_tlv));
12286 
12287 	if (num_2ghz_reg_rules) {
12288 		reg_info->reg_rules_2ghz_ptr = qwx_create_reg_rules_from_wmi(
12289 		    num_2ghz_reg_rules, wmi_reg_rule);
12290 		if (!reg_info->reg_rules_2ghz_ptr) {
12291 			free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12292 			printf("%s: Unable to allocate memory for "
12293 			    "2 GHz rules\n", __func__);
12294 			return ENOMEM;
12295 		}
12296 
12297 		qwx_print_reg_rule(sc, "2 GHz", num_2ghz_reg_rules,
12298 		    reg_info->reg_rules_2ghz_ptr);
12299 	}
12300 
12301 	if (num_5ghz_reg_rules) {
12302 		wmi_reg_rule += num_2ghz_reg_rules;
12303 		reg_info->reg_rules_5ghz_ptr = qwx_create_reg_rules_from_wmi(
12304 		    num_5ghz_reg_rules, wmi_reg_rule);
12305 		if (!reg_info->reg_rules_5ghz_ptr) {
12306 			free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12307 			printf("%s: Unable to allocate memory for "
12308 			    "5 GHz rules\n", __func__);
12309 			return ENOMEM;
12310 		}
12311 
12312 		qwx_print_reg_rule(sc, "5 GHz", num_5ghz_reg_rules,
12313 		    reg_info->reg_rules_5ghz_ptr);
12314 	}
12315 
12316 	DNPRINTF(QWX_D_WMI, "%s: processed regulatory channel list\n",
12317 	    __func__);
12318 
12319 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12320 	return 0;
12321 }
12322 
12323 int
qwx_pull_reg_chan_list_ext_update_ev(struct qwx_softc * sc,struct mbuf * m,struct cur_regulatory_info * reg_info)12324 qwx_pull_reg_chan_list_ext_update_ev(struct qwx_softc *sc, struct mbuf *m,
12325     struct cur_regulatory_info *reg_info)
12326 {
12327 	printf("%s: not implemented\n", __func__);
12328 	return ENOTSUP;
12329 }
12330 
12331 void
qwx_init_channels(struct qwx_softc * sc,struct cur_regulatory_info * reg_info)12332 qwx_init_channels(struct qwx_softc *sc, struct cur_regulatory_info *reg_info)
12333 {
12334 	struct ieee80211com *ic = &sc->sc_ic;
12335 	struct ieee80211_channel *chan;
12336 	struct cur_reg_rule *rule;
12337 	int i, chnum;
12338 	uint16_t freq;
12339 
12340 	for (i = 0; i < reg_info->num_2ghz_reg_rules; i++) {
12341 		rule = &reg_info->reg_rules_2ghz_ptr[i];
12342 		if (rule->start_freq < 2402 ||
12343 		    rule->start_freq > 2500 ||
12344 		    rule->start_freq > rule->end_freq) {
12345 			DPRINTF("%s: bad regulatory rule: start freq %u, "
12346 			    "end freq %u\n", __func__, rule->start_freq,
12347 			    rule->end_freq);
12348 			continue;
12349 		}
12350 
12351 		freq = rule->start_freq + 10;
12352 		chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_2GHZ);
12353 		if (chnum < 1 || chnum > 14) {
12354 			DPRINTF("%s: bad regulatory rule: freq %u, "
12355 			    "channel %u\n", __func__, freq, chnum);
12356 			continue;
12357 		}
12358 		while (freq <= rule->end_freq && chnum <= 14) {
12359 			chan = &ic->ic_channels[chnum];
12360 			if (rule->flags & REGULATORY_CHAN_DISABLED) {
12361 				chan->ic_freq = 0;
12362 				chan->ic_flags = 0;
12363 			} else {
12364 				chan->ic_freq = freq;
12365 				chan->ic_flags = IEEE80211_CHAN_CCK |
12366 				    IEEE80211_CHAN_OFDM |
12367 				    IEEE80211_CHAN_DYN |
12368 				    IEEE80211_CHAN_2GHZ;
12369 			}
12370 			chnum++;
12371 			freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_2GHZ);
12372 		}
12373 	}
12374 
12375 	for (i = 0; i < reg_info->num_5ghz_reg_rules; i++) {
12376 		rule = &reg_info->reg_rules_5ghz_ptr[i];
12377 		if (rule->start_freq < 5170 ||
12378 		    rule->start_freq > 6000 ||
12379 		    rule->start_freq > rule->end_freq) {
12380 			DPRINTF("%s: bad regulatory rule: start freq %u, "
12381 			    "end freq %u\n", __func__, rule->start_freq,
12382 			    rule->end_freq);
12383 			continue;
12384 		}
12385 
12386 		freq = rule->start_freq + 10;
12387 		chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_5GHZ);
12388 		if (chnum < 36 || chnum > IEEE80211_CHAN_MAX) {
12389 			DPRINTF("%s: bad regulatory rule: freq %u, "
12390 			    "channel %u\n", __func__, freq, chnum);
12391 			continue;
12392 		}
12393 		while (freq <= rule->end_freq && freq <= 5885 &&
12394 		    chnum <= IEEE80211_CHAN_MAX) {
12395 			chan = &ic->ic_channels[chnum];
12396 			if (rule->flags & (REGULATORY_CHAN_DISABLED |
12397 			    REGULATORY_CHAN_NO_OFDM)) {
12398 				chan->ic_freq = 0;
12399 				chan->ic_flags = 0;
12400 			} else {
12401 				chan->ic_freq = freq;
12402 				chan->ic_flags = IEEE80211_CHAN_A;
12403 				if (rule->flags & (REGULATORY_CHAN_RADAR |
12404 				    REGULATORY_CHAN_NO_IR |
12405 				    REGULATORY_CHAN_INDOOR_ONLY)) {
12406 					chan->ic_flags |=
12407 					    IEEE80211_CHAN_PASSIVE;
12408 				}
12409 			}
12410 			chnum += 4;
12411 			freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_5GHZ);
12412 		}
12413 	}
12414 }
12415 
12416 int
qwx_reg_chan_list_event(struct qwx_softc * sc,struct mbuf * m,enum wmi_reg_chan_list_cmd_type id)12417 qwx_reg_chan_list_event(struct qwx_softc *sc, struct mbuf *m,
12418     enum wmi_reg_chan_list_cmd_type id)
12419 {
12420 	struct cur_regulatory_info *reg_info = NULL;
12421 	int ret = 0;
12422 #if 0
12423 	struct ieee80211_regdomain *regd = NULL;
12424 	bool intersect = false;
12425 	int pdev_idx, i, j;
12426 	struct ath11k *ar;
12427 #endif
12428 
12429 	reg_info = malloc(sizeof(*reg_info), M_DEVBUF, M_NOWAIT | M_ZERO);
12430 	if (!reg_info) {
12431 		ret = ENOMEM;
12432 		goto fallback;
12433 	}
12434 
12435 	if (id == WMI_REG_CHAN_LIST_CC_ID)
12436 		ret = qwx_pull_reg_chan_list_update_ev(sc, m, reg_info);
12437 	else
12438 		ret = qwx_pull_reg_chan_list_ext_update_ev(sc, m, reg_info);
12439 
12440 	if (ret) {
12441 		printf("%s: failed to extract regulatory info from "
12442 		    "received event\n", sc->sc_dev.dv_xname);
12443 		goto fallback;
12444 	}
12445 
12446 	DNPRINTF(QWX_D_WMI, "%s: event reg chan list id %d\n", __func__, id);
12447 
12448 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
12449 		/* In case of failure to set the requested ctry,
12450 		 * fw retains the current regd. We print a failure info
12451 		 * and return from here.
12452 		 */
12453 		printf("%s: Failed to set the requested Country "
12454 		    "regulatory setting\n", __func__);
12455 		goto mem_free;
12456 	}
12457 
12458 	qwx_init_channels(sc, reg_info);
12459 #if 0
12460 	pdev_idx = reg_info->phy_id;
12461 
12462 	/* Avoid default reg rule updates sent during FW recovery if
12463 	 * it is already available
12464 	 */
12465 	spin_lock(&ab->base_lock);
12466 	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
12467 	    ab->default_regd[pdev_idx]) {
12468 		spin_unlock(&ab->base_lock);
12469 		goto mem_free;
12470 	}
12471 	spin_unlock(&ab->base_lock);
12472 
12473 	if (pdev_idx >= ab->num_radios) {
12474 		/* Process the event for phy0 only if single_pdev_only
12475 		 * is true. If pdev_idx is valid but not 0, discard the
12476 		 * event. Otherwise, it goes to fallback.
12477 		 */
12478 		if (ab->hw_params.single_pdev_only &&
12479 		    pdev_idx < ab->hw_params.num_rxmda_per_pdev)
12480 			goto mem_free;
12481 		else
12482 			goto fallback;
12483 	}
12484 
12485 	/* Avoid multiple overwrites to default regd, during core
12486 	 * stop-start after mac registration.
12487 	 */
12488 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
12489 	    !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
12490 		    (char *)reg_info->alpha2, 2))
12491 		goto mem_free;
12492 
12493 	/* Intersect new rules with default regd if a new country setting was
12494 	 * requested, i.e a default regd was already set during initialization
12495 	 * and the regd coming from this event has a valid country info.
12496 	 */
12497 	if (ab->default_regd[pdev_idx] &&
12498 	    !ath11k_reg_is_world_alpha((char *)
12499 		ab->default_regd[pdev_idx]->alpha2) &&
12500 	    !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
12501 		intersect = true;
12502 
12503 	regd = ath11k_reg_build_regd(ab, reg_info, intersect);
12504 	if (!regd) {
12505 		ath11k_warn(ab, "failed to build regd from reg_info\n");
12506 		goto fallback;
12507 	}
12508 
12509 	spin_lock(&ab->base_lock);
12510 	if (ab->default_regd[pdev_idx]) {
12511 		/* The initial rules from FW after WMI Init is to build
12512 		 * the default regd. From then on, any rules updated for
12513 		 * the pdev could be due to user reg changes.
12514 		 * Free previously built regd before assigning the newly
12515 		 * generated regd to ar. NULL pointer handling will be
12516 		 * taken care by kfree itself.
12517 		 */
12518 		ar = ab->pdevs[pdev_idx].ar;
12519 		kfree(ab->new_regd[pdev_idx]);
12520 		ab->new_regd[pdev_idx] = regd;
12521 		queue_work(ab->workqueue, &ar->regd_update_work);
12522 	} else {
12523 		/* This regd would be applied during mac registration and is
12524 		 * held constant throughout for regd intersection purpose
12525 		 */
12526 		ab->default_regd[pdev_idx] = regd;
12527 	}
12528 	ab->dfs_region = reg_info->dfs_region;
12529 	spin_unlock(&ab->base_lock);
12530 #endif
12531 	goto mem_free;
12532 
12533 fallback:
12534 	/* Fallback to older reg (by sending previous country setting
12535 	 * again if fw has succeeded and we failed to process here.
12536 	 * The Regdomain should be uniform across driver and fw. Since the
12537 	 * FW has processed the command and sent a success status, we expect
12538 	 * this function to succeed as well. If it doesn't, CTRY needs to be
12539 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
12540 	 */
12541 	/* TODO: This is rare, but still should also be handled */
12542 mem_free:
12543 	if (reg_info) {
12544 		free(reg_info->reg_rules_2ghz_ptr, M_DEVBUF,
12545 		    reg_info->num_2ghz_reg_rules *
12546 		    sizeof(*reg_info->reg_rules_2ghz_ptr));
12547 		free(reg_info->reg_rules_5ghz_ptr, M_DEVBUF,
12548 		    reg_info->num_5ghz_reg_rules *
12549 		    sizeof(*reg_info->reg_rules_5ghz_ptr));
12550 #if 0
12551 		if (reg_info->is_ext_reg_event) {
12552 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
12553 				kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
12554 
12555 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
12556 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
12557 					kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
12558 		}
12559 #endif
12560 		free(reg_info, M_DEVBUF, sizeof(*reg_info));
12561 	}
12562 	return ret;
12563 }
12564 
12565 const char *
qwx_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)12566 qwx_wmi_event_scan_type_str(enum wmi_scan_event_type type,
12567     enum wmi_scan_completion_reason reason)
12568 {
12569 	switch (type) {
12570 	case WMI_SCAN_EVENT_STARTED:
12571 		return "started";
12572 	case WMI_SCAN_EVENT_COMPLETED:
12573 		switch (reason) {
12574 		case WMI_SCAN_REASON_COMPLETED:
12575 			return "completed";
12576 		case WMI_SCAN_REASON_CANCELLED:
12577 			return "completed [cancelled]";
12578 		case WMI_SCAN_REASON_PREEMPTED:
12579 			return "completed [preempted]";
12580 		case WMI_SCAN_REASON_TIMEDOUT:
12581 			return "completed [timedout]";
12582 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
12583 			return "completed [internal err]";
12584 		case WMI_SCAN_REASON_MAX:
12585 			break;
12586 		}
12587 		return "completed [unknown]";
12588 	case WMI_SCAN_EVENT_BSS_CHANNEL:
12589 		return "bss channel";
12590 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
12591 		return "foreign channel";
12592 	case WMI_SCAN_EVENT_DEQUEUED:
12593 		return "dequeued";
12594 	case WMI_SCAN_EVENT_PREEMPTED:
12595 		return "preempted";
12596 	case WMI_SCAN_EVENT_START_FAILED:
12597 		return "start failed";
12598 	case WMI_SCAN_EVENT_RESTARTED:
12599 		return "restarted";
12600 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
12601 		return "foreign channel exit";
12602 	default:
12603 		return "unknown";
12604 	}
12605 }
12606 
12607 const char *
qwx_scan_state_str(enum ath11k_scan_state state)12608 qwx_scan_state_str(enum ath11k_scan_state state)
12609 {
12610 	switch (state) {
12611 	case ATH11K_SCAN_IDLE:
12612 		return "idle";
12613 	case ATH11K_SCAN_STARTING:
12614 		return "starting";
12615 	case ATH11K_SCAN_RUNNING:
12616 		return "running";
12617 	case ATH11K_SCAN_ABORTING:
12618 		return "aborting";
12619 	}
12620 
12621 	return "unknown";
12622 }
12623 
12624 int
qwx_pull_scan_ev(struct qwx_softc * sc,struct mbuf * m,struct wmi_scan_event * scan_evt_param)12625 qwx_pull_scan_ev(struct qwx_softc *sc, struct mbuf *m,
12626     struct wmi_scan_event *scan_evt_param)
12627 {
12628 	const void **tb;
12629 	const struct wmi_scan_event *ev;
12630 
12631 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
12632 	if (tb == NULL) {
12633 		DPRINTF("%s: failed to parse tlv\n", __func__);
12634 		return EINVAL;
12635 	}
12636 
12637 	ev = tb[WMI_TAG_SCAN_EVENT];
12638 	if (!ev) {
12639 		DPRINTF("%s: failed to fetch scan ev\n", __func__);
12640 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12641 		return EPROTO;
12642 	}
12643 
12644 	scan_evt_param->event_type = ev->event_type;
12645 	scan_evt_param->reason = ev->reason;
12646 	scan_evt_param->channel_freq = ev->channel_freq;
12647 	scan_evt_param->scan_req_id = ev->scan_req_id;
12648 	scan_evt_param->scan_id = ev->scan_id;
12649 	scan_evt_param->vdev_id = ev->vdev_id;
12650 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
12651 
12652 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12653 	return 0;
12654 }
12655 
12656 void
qwx_wmi_event_scan_started(struct qwx_softc * sc)12657 qwx_wmi_event_scan_started(struct qwx_softc *sc)
12658 {
12659 #ifdef notyet
12660 	lockdep_assert_held(&ar->data_lock);
12661 #endif
12662 	switch (sc->scan.state) {
12663 	case ATH11K_SCAN_IDLE:
12664 	case ATH11K_SCAN_RUNNING:
12665 	case ATH11K_SCAN_ABORTING:
12666 		printf("%s: received scan started event in an invalid "
12667 		"scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12668 		qwx_scan_state_str(sc->scan.state), sc->scan.state);
12669 		break;
12670 	case ATH11K_SCAN_STARTING:
12671 		sc->scan.state = ATH11K_SCAN_RUNNING;
12672 #if 0
12673 		if (ar->scan.is_roc)
12674 			ieee80211_ready_on_channel(ar->hw);
12675 #endif
12676 		wakeup(&sc->scan.state);
12677 		break;
12678 	}
12679 }
12680 
12681 void
qwx_wmi_event_scan_completed(struct qwx_softc * sc)12682 qwx_wmi_event_scan_completed(struct qwx_softc *sc)
12683 {
12684 #ifdef notyet
12685 	lockdep_assert_held(&ar->data_lock);
12686 #endif
12687 	switch (sc->scan.state) {
12688 	case ATH11K_SCAN_IDLE:
12689 	case ATH11K_SCAN_STARTING:
12690 		/* One suspected reason scan can be completed while starting is
12691 		 * if firmware fails to deliver all scan events to the host,
12692 		 * e.g. when transport pipe is full. This has been observed
12693 		 * with spectral scan phyerr events starving wmi transport
12694 		 * pipe. In such case the "scan completed" event should be (and
12695 		 * is) ignored by the host as it may be just firmware's scan
12696 		 * state machine recovering.
12697 		 */
12698 		printf("%s: received scan completed event in an invalid "
12699 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12700 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12701 		break;
12702 	case ATH11K_SCAN_RUNNING:
12703 	case ATH11K_SCAN_ABORTING:
12704 		qwx_mac_scan_finish(sc);
12705 		break;
12706 	}
12707 }
12708 
12709 void
qwx_wmi_event_scan_bss_chan(struct qwx_softc * sc)12710 qwx_wmi_event_scan_bss_chan(struct qwx_softc *sc)
12711 {
12712 #ifdef notyet
12713 	lockdep_assert_held(&ar->data_lock);
12714 #endif
12715 	switch (sc->scan.state) {
12716 	case ATH11K_SCAN_IDLE:
12717 	case ATH11K_SCAN_STARTING:
12718 		printf("%s: received scan bss chan event in an invalid "
12719 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12720 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12721 		break;
12722 	case ATH11K_SCAN_RUNNING:
12723 	case ATH11K_SCAN_ABORTING:
12724 		sc->scan_channel = 0;
12725 		break;
12726 	}
12727 }
12728 
12729 void
qwx_wmi_event_scan_foreign_chan(struct qwx_softc * sc,uint32_t freq)12730 qwx_wmi_event_scan_foreign_chan(struct qwx_softc *sc, uint32_t freq)
12731 {
12732 #ifdef notyet
12733 	lockdep_assert_held(&ar->data_lock);
12734 #endif
12735 	switch (sc->scan.state) {
12736 	case ATH11K_SCAN_IDLE:
12737 	case ATH11K_SCAN_STARTING:
12738 		printf("%s: received scan foreign chan event in an invalid "
12739 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12740 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12741 		break;
12742 	case ATH11K_SCAN_RUNNING:
12743 	case ATH11K_SCAN_ABORTING:
12744 		sc->scan_channel = ieee80211_mhz2ieee(freq, 0);
12745 #if 0
12746 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
12747 			complete(&ar->scan.on_channel);
12748 #endif
12749 		break;
12750 	}
12751 }
12752 
12753 void
qwx_wmi_event_scan_start_failed(struct qwx_softc * sc)12754 qwx_wmi_event_scan_start_failed(struct qwx_softc *sc)
12755 {
12756 #ifdef notyet
12757 	lockdep_assert_held(&ar->data_lock);
12758 #endif
12759 	switch (sc->scan.state) {
12760 	case ATH11K_SCAN_IDLE:
12761 	case ATH11K_SCAN_RUNNING:
12762 	case ATH11K_SCAN_ABORTING:
12763 		printf("%s: received scan start failed event in an invalid "
12764 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
12765 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12766 		break;
12767 	case ATH11K_SCAN_STARTING:
12768 		wakeup(&sc->scan.state);
12769 		qwx_mac_scan_finish(sc);
12770 		break;
12771 	}
12772 }
12773 
12774 
12775 void
qwx_scan_event(struct qwx_softc * sc,struct mbuf * m)12776 qwx_scan_event(struct qwx_softc *sc, struct mbuf *m)
12777 {
12778 	struct wmi_scan_event scan_ev = { 0 };
12779 	struct qwx_vif *arvif;
12780 
12781 	if (qwx_pull_scan_ev(sc, m, &scan_ev) != 0) {
12782 		printf("%s: failed to extract scan event",
12783 		    sc->sc_dev.dv_xname);
12784 		return;
12785 	}
12786 #ifdef notyet
12787 	rcu_read_lock();
12788 #endif
12789 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
12790 		if (arvif->vdev_id == scan_ev.vdev_id)
12791 			break;
12792 	}
12793 
12794 	if (!arvif) {
12795 		printf("%s: received scan event for unknown vdev\n",
12796 		    sc->sc_dev.dv_xname);
12797 #if 0
12798 		rcu_read_unlock();
12799 #endif
12800 		return;
12801 	}
12802 #if 0
12803 	spin_lock_bh(&ar->data_lock);
12804 #endif
12805 	DNPRINTF(QWX_D_WMI,
12806 	    "%s: event scan %s type %d reason %d freq %d req_id %d scan_id %d "
12807 	    "vdev_id %d state %s (%d)\n", __func__,
12808 	    qwx_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
12809 	    scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
12810 	    scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
12811 	    qwx_scan_state_str(sc->scan.state), sc->scan.state);
12812 
12813 	switch (scan_ev.event_type) {
12814 	case WMI_SCAN_EVENT_STARTED:
12815 		qwx_wmi_event_scan_started(sc);
12816 		break;
12817 	case WMI_SCAN_EVENT_COMPLETED:
12818 		qwx_wmi_event_scan_completed(sc);
12819 		break;
12820 	case WMI_SCAN_EVENT_BSS_CHANNEL:
12821 		qwx_wmi_event_scan_bss_chan(sc);
12822 		break;
12823 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
12824 		qwx_wmi_event_scan_foreign_chan(sc, scan_ev.channel_freq);
12825 		break;
12826 	case WMI_SCAN_EVENT_START_FAILED:
12827 		printf("%s: received scan start failure event\n",
12828 		    sc->sc_dev.dv_xname);
12829 		qwx_wmi_event_scan_start_failed(sc);
12830 		break;
12831 	case WMI_SCAN_EVENT_DEQUEUED:
12832 		qwx_mac_scan_finish(sc);
12833 		break;
12834 	case WMI_SCAN_EVENT_PREEMPTED:
12835 	case WMI_SCAN_EVENT_RESTARTED:
12836 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
12837 	default:
12838 		break;
12839 	}
12840 #if 0
12841 	spin_unlock_bh(&ar->data_lock);
12842 
12843 	rcu_read_unlock();
12844 #endif
12845 }
12846 
12847 int
qwx_pull_chan_info_ev(struct qwx_softc * sc,uint8_t * evt_buf,uint32_t len,struct wmi_chan_info_event * ch_info_ev)12848 qwx_pull_chan_info_ev(struct qwx_softc *sc, uint8_t *evt_buf, uint32_t len,
12849     struct wmi_chan_info_event *ch_info_ev)
12850 {
12851 	const void **tb;
12852 	const struct wmi_chan_info_event *ev;
12853 
12854 	tb = qwx_wmi_tlv_parse_alloc(sc, evt_buf, len);
12855 	if (tb == NULL) {
12856 		printf("%s: failed to parse tlv\n", sc->sc_dev.dv_xname);
12857 		return EINVAL;
12858 	}
12859 
12860 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
12861 	if (!ev) {
12862 		printf("%s: failed to fetch chan info ev\n",
12863 		    sc->sc_dev.dv_xname);
12864 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12865 		return EPROTO;
12866 	}
12867 
12868 	ch_info_ev->err_code = ev->err_code;
12869 	ch_info_ev->freq = ev->freq;
12870 	ch_info_ev->cmd_flags = ev->cmd_flags;
12871 	ch_info_ev->noise_floor = ev->noise_floor;
12872 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
12873 	ch_info_ev->cycle_count = ev->cycle_count;
12874 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
12875 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
12876 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
12877 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
12878 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
12879 	ch_info_ev->vdev_id = ev->vdev_id;
12880 
12881 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
12882 	return 0;
12883 }
12884 
12885 void
qwx_chan_info_event(struct qwx_softc * sc,struct mbuf * m)12886 qwx_chan_info_event(struct qwx_softc *sc, struct mbuf *m)
12887 {
12888 	struct qwx_vif *arvif;
12889 	struct wmi_chan_info_event ch_info_ev = {0};
12890 	struct qwx_survey_info *survey;
12891 	int idx;
12892 	/* HW channel counters frequency value in hertz */
12893 	uint32_t cc_freq_hz = sc->cc_freq_hz;
12894 
12895 	if (qwx_pull_chan_info_ev(sc, mtod(m, void *), m->m_pkthdr.len,
12896 	    &ch_info_ev) != 0) {
12897 		printf("%s: failed to extract chan info event\n",
12898 		    sc->sc_dev.dv_xname);
12899 		return;
12900 	}
12901 
12902 	DNPRINTF(QWX_D_WMI, "%s: event chan info vdev_id %d err_code %d "
12903 	    "freq %d cmd_flags %d noise_floor %d rx_clear_count %d "
12904 	    "cycle_count %d mac_clk_mhz %d\n", __func__,
12905 	    ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
12906 	    ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
12907 	    ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
12908 	    ch_info_ev.mac_clk_mhz);
12909 
12910 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
12911 		DNPRINTF(QWX_D_WMI, "chan info report completed\n");
12912 		return;
12913 	}
12914 #ifdef notyet
12915 	rcu_read_lock();
12916 #endif
12917 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
12918 		if (arvif->vdev_id == ch_info_ev.vdev_id)
12919 			break;
12920 	}
12921 	if (!arvif) {
12922 		printf("%s: invalid vdev id in chan info ev %d\n",
12923 		   sc->sc_dev.dv_xname, ch_info_ev.vdev_id);
12924 #ifdef notyet
12925 		rcu_read_unlock();
12926 #endif
12927 		return;
12928 	}
12929 #ifdef notyet
12930 	spin_lock_bh(&ar->data_lock);
12931 #endif
12932 	switch (sc->scan.state) {
12933 	case ATH11K_SCAN_IDLE:
12934 	case ATH11K_SCAN_STARTING:
12935 		printf("%s: received chan info event without a scan request, "
12936 		    "ignoring\n", sc->sc_dev.dv_xname);
12937 		goto exit;
12938 	case ATH11K_SCAN_RUNNING:
12939 	case ATH11K_SCAN_ABORTING:
12940 		break;
12941 	}
12942 
12943 	idx = ieee80211_mhz2ieee(ch_info_ev.freq, 0);
12944 	if (idx >= nitems(sc->survey)) {
12945 		printf("%s: invalid frequency %d (idx %d out of bounds)\n",
12946 		    sc->sc_dev.dv_xname, ch_info_ev.freq, idx);
12947 		goto exit;
12948 	}
12949 
12950 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
12951 	 * HW channel counters frequency value
12952 	 */
12953 	if (ch_info_ev.mac_clk_mhz)
12954 		cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
12955 
12956 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
12957 		survey = &sc->survey[idx];
12958 		memset(survey, 0, sizeof(*survey));
12959 		survey->noise = ch_info_ev.noise_floor;
12960 		survey->time = ch_info_ev.cycle_count / cc_freq_hz;
12961 		survey->time_busy = ch_info_ev.rx_clear_count / cc_freq_hz;
12962 	}
12963 exit:
12964 #ifdef notyet
12965 	spin_unlock_bh(&ar->data_lock);
12966 	rcu_read_unlock();
12967 #else
12968 	return;
12969 #endif
12970 }
12971 
12972 int
qwx_wmi_tlv_mgmt_rx_parse(struct qwx_softc * sc,uint16_t tag,uint16_t len,const void * ptr,void * data)12973 qwx_wmi_tlv_mgmt_rx_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
12974     const void *ptr, void *data)
12975 {
12976 	struct wmi_tlv_mgmt_rx_parse *parse = data;
12977 
12978 	switch (tag) {
12979 	case WMI_TAG_MGMT_RX_HDR:
12980 		parse->fixed = ptr;
12981 		break;
12982 	case WMI_TAG_ARRAY_BYTE:
12983 		if (!parse->frame_buf_done) {
12984 			parse->frame_buf = ptr;
12985 			parse->frame_buf_done = 1;
12986 		}
12987 		break;
12988 	}
12989 	return 0;
12990 }
12991 
12992 int
qwx_pull_mgmt_rx_params_tlv(struct qwx_softc * sc,struct mbuf * m,struct mgmt_rx_event_params * hdr)12993 qwx_pull_mgmt_rx_params_tlv(struct qwx_softc *sc, struct mbuf *m,
12994     struct mgmt_rx_event_params *hdr)
12995 {
12996 	struct wmi_tlv_mgmt_rx_parse parse = { 0 };
12997 	const struct wmi_mgmt_rx_hdr *ev;
12998 	const uint8_t *frame;
12999 	int ret;
13000 	size_t totlen, hdrlen;
13001 
13002 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
13003 	    qwx_wmi_tlv_mgmt_rx_parse, &parse);
13004 	if (ret) {
13005 		printf("%s: failed to parse mgmt rx tlv %d\n",
13006 		    sc->sc_dev.dv_xname, ret);
13007 		return ret;
13008 	}
13009 
13010 	ev = parse.fixed;
13011 	frame = parse.frame_buf;
13012 
13013 	if (!ev || !frame) {
13014 		printf("%s: failed to fetch mgmt rx hdr\n",
13015 		    sc->sc_dev.dv_xname);
13016 		return EPROTO;
13017 	}
13018 
13019 	hdr->pdev_id =  ev->pdev_id;
13020 	hdr->chan_freq = le32toh(ev->chan_freq);
13021 	hdr->channel = le32toh(ev->channel);
13022 	hdr->snr = le32toh(ev->snr);
13023 	hdr->rate = le32toh(ev->rate);
13024 	hdr->phy_mode = le32toh(ev->phy_mode);
13025 	hdr->buf_len = le32toh(ev->buf_len);
13026 	hdr->status = le32toh(ev->status);
13027 	hdr->flags = le32toh(ev->flags);
13028 	hdr->rssi = le32toh(ev->rssi);
13029 	hdr->tsf_delta = le32toh(ev->tsf_delta);
13030 	memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
13031 
13032 	if (frame < mtod(m, uint8_t *) ||
13033 	    frame >= mtod(m, uint8_t *) + m->m_pkthdr.len) {
13034 		printf("%s: invalid mgmt rx frame pointer\n",
13035 		    sc->sc_dev.dv_xname);
13036 		return EPROTO;
13037 	}
13038 	hdrlen = frame - mtod(m, uint8_t *);
13039 
13040 	if (hdrlen + hdr->buf_len < hdr->buf_len) {
13041 		printf("%s: length overflow in mgmt rx hdr ev\n",
13042 		    sc->sc_dev.dv_xname);
13043 		return EPROTO;
13044 	}
13045 	totlen = hdrlen + hdr->buf_len;
13046 	if (m->m_pkthdr.len < totlen) {
13047 		printf("%s: invalid length in mgmt rx hdr ev\n",
13048 		    sc->sc_dev.dv_xname);
13049 		return EPROTO;
13050 	}
13051 
13052 	/* shift the mbuf to point at `frame` */
13053 	m->m_len = m->m_pkthdr.len = totlen;
13054 	m_adj(m, hdrlen);
13055 
13056 #if 0 /* Not needed on OpenBSD? */
13057 	ath11k_ce_byte_swap(skb->data, hdr->buf_len);
13058 #endif
13059 	return 0;
13060 }
13061 
13062 void
qwx_mgmt_rx_event(struct qwx_softc * sc,struct mbuf * m)13063 qwx_mgmt_rx_event(struct qwx_softc *sc, struct mbuf *m)
13064 {
13065 	struct ieee80211com *ic = &sc->sc_ic;
13066 	struct ifnet *ifp = &ic->ic_if;
13067 	struct mgmt_rx_event_params rx_ev = {0};
13068 	struct ieee80211_rxinfo rxi;
13069 	struct ieee80211_frame *wh;
13070 	struct ieee80211_node *ni;
13071 
13072 	if (qwx_pull_mgmt_rx_params_tlv(sc, m, &rx_ev) != 0) {
13073 		printf("%s: failed to extract mgmt rx event\n",
13074 		    sc->sc_dev.dv_xname);
13075 		m_freem(m);
13076 		return;
13077 	}
13078 
13079 	memset(&rxi, 0, sizeof(rxi));
13080 
13081 	DNPRINTF(QWX_D_MGMT, "%s: event mgmt rx status %08x\n", __func__,
13082 	    rx_ev.status);
13083 #ifdef notyet
13084 	rcu_read_lock();
13085 #endif
13086 	if (rx_ev.pdev_id >= nitems(sc->pdevs)) {
13087 		printf("%s: invalid pdev_id %d in mgmt_rx_event\n",
13088 		    sc->sc_dev.dv_xname, rx_ev.pdev_id);
13089 		m_freem(m);
13090 		goto exit;
13091 	}
13092 
13093 	if ((test_bit(ATH11K_CAC_RUNNING, sc->sc_flags)) ||
13094 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
13095 	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
13096 		m_freem(m);
13097 		goto exit;
13098 	}
13099 
13100 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) {
13101 		ic->ic_stats.is_ccmp_dec_errs++;
13102 		m_freem(m);
13103 		goto exit;
13104 	}
13105 
13106 	rxi.rxi_chan = rx_ev.channel;
13107 	rxi.rxi_rssi = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
13108 #if 0
13109 	status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
13110 #endif
13111 
13112 	wh = mtod(m, struct ieee80211_frame *);
13113 	ni = ieee80211_find_rxnode(ic, wh);
13114 #if 0
13115 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set.
13116 	 * Don't clear that. Also, FW delivers broadcast management frames
13117 	 * (ex: group privacy action frames in mesh) as encrypted payload.
13118 	 */
13119 	if (ieee80211_has_protected(hdr->frame_control) &&
13120 	    !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
13121 		status->flag |= RX_FLAG_DECRYPTED;
13122 
13123 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
13124 			status->flag |= RX_FLAG_IV_STRIPPED |
13125 					RX_FLAG_MMIC_STRIPPED;
13126 			hdr->frame_control = __cpu_to_le16(fc &
13127 					     ~IEEE80211_FCTL_PROTECTED);
13128 		}
13129 	}
13130 
13131 	if (ieee80211_is_beacon(hdr->frame_control))
13132 		ath11k_mac_handle_beacon(ar, skb);
13133 #endif
13134 
13135 	DNPRINTF(QWX_D_MGMT,
13136 	    "%s: event mgmt rx skb %p len %d ftype %02x stype %02x\n",
13137 	    __func__, m, m->m_pkthdr.len,
13138 	    wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK,
13139 	    wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK);
13140 
13141 	DNPRINTF(QWX_D_MGMT, "%s: event mgmt rx freq %d chan %d snr %d\n",
13142 	    __func__, rx_ev.chan_freq, rx_ev.channel, rx_ev.snr);
13143 
13144 #if NBPFILTER > 0
13145 	if (sc->sc_drvbpf != NULL) {
13146 		struct qwx_rx_radiotap_header *tap = &sc->sc_rxtap;
13147 
13148 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
13149 		    m, BPF_DIRECTION_IN);
13150 	}
13151 #endif
13152 	ieee80211_input(ifp, m, ni, &rxi);
13153 	ieee80211_release_node(ic, ni);
13154 exit:
13155 #ifdef notyet
13156 	rcu_read_unlock();
13157 #else
13158 	return;
13159 #endif
13160 }
13161 
13162 int
qwx_pull_mgmt_tx_compl_param_tlv(struct qwx_softc * sc,struct mbuf * m,struct wmi_mgmt_tx_compl_event * param)13163 qwx_pull_mgmt_tx_compl_param_tlv(struct qwx_softc *sc, struct mbuf *m,
13164     struct wmi_mgmt_tx_compl_event *param)
13165 {
13166 	const void **tb;
13167 	const struct wmi_mgmt_tx_compl_event *ev;
13168 	int ret = 0;
13169 
13170 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
13171 	if (tb == NULL) {
13172 		ret = ENOMEM;
13173 		printf("%s: failed to parse tlv: %d\n",
13174 		    sc->sc_dev.dv_xname, ret);
13175 		return ENOMEM;
13176 	}
13177 
13178 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
13179 	if (!ev) {
13180 		printf("%s: failed to fetch mgmt tx compl ev\n",
13181 		    sc->sc_dev.dv_xname);
13182 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13183 		return EPROTO;
13184 	}
13185 
13186 	param->pdev_id = ev->pdev_id;
13187 	param->desc_id = ev->desc_id;
13188 	param->status = ev->status;
13189 	param->ack_rssi = ev->ack_rssi;
13190 
13191 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13192 	return 0;
13193 }
13194 
13195 void
qwx_wmi_process_mgmt_tx_comp(struct qwx_softc * sc,struct wmi_mgmt_tx_compl_event * tx_compl_param)13196 qwx_wmi_process_mgmt_tx_comp(struct qwx_softc *sc,
13197     struct wmi_mgmt_tx_compl_event *tx_compl_param)
13198 {
13199 	struct ieee80211com *ic = &sc->sc_ic;
13200 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
13201 	struct ifnet *ifp = &ic->ic_if;
13202 	struct qwx_tx_data *tx_data;
13203 
13204 	if (tx_compl_param->desc_id >= nitems(arvif->txmgmt.data)) {
13205 		printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
13206 		    sc->sc_dev.dv_xname, tx_compl_param->desc_id);
13207 		return;
13208 	}
13209 
13210 	tx_data = &arvif->txmgmt.data[tx_compl_param->desc_id];
13211 	if (tx_data->m == NULL) {
13212 		printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
13213 		    sc->sc_dev.dv_xname, tx_compl_param->desc_id);
13214 		return;
13215 	}
13216 
13217 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
13218 	m_freem(tx_data->m);
13219 	tx_data->m = NULL;
13220 
13221 	ieee80211_release_node(ic, tx_data->ni);
13222 	tx_data->ni = NULL;
13223 
13224 	if (arvif->txmgmt.queued > 0)
13225 		arvif->txmgmt.queued--;
13226 
13227 	if (tx_compl_param->status != 0)
13228 		ifp->if_oerrors++;
13229 
13230 	if (arvif->txmgmt.queued < nitems(arvif->txmgmt.data) - 1) {
13231 		sc->qfullmsk &= ~(1U << QWX_MGMT_QUEUE_ID);
13232 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
13233 			ifq_clr_oactive(&ifp->if_snd);
13234 			(*ifp->if_start)(ifp);
13235 		}
13236 	}
13237 }
13238 
13239 void
qwx_mgmt_tx_compl_event(struct qwx_softc * sc,struct mbuf * m)13240 qwx_mgmt_tx_compl_event(struct qwx_softc *sc, struct mbuf *m)
13241 {
13242 	struct wmi_mgmt_tx_compl_event tx_compl_param = { 0 };
13243 
13244 	if (qwx_pull_mgmt_tx_compl_param_tlv(sc, m, &tx_compl_param) != 0) {
13245 		printf("%s: failed to extract mgmt tx compl event\n",
13246 		    sc->sc_dev.dv_xname);
13247 		return;
13248 	}
13249 
13250 	qwx_wmi_process_mgmt_tx_comp(sc, &tx_compl_param);
13251 
13252 	DNPRINTF(QWX_D_MGMT, "%s: event mgmt tx compl ev pdev_id %d, "
13253 	    "desc_id %d, status %d ack_rssi %d", __func__,
13254 	    tx_compl_param.pdev_id, tx_compl_param.desc_id,
13255 	    tx_compl_param.status, tx_compl_param.ack_rssi);
13256 }
13257 
13258 int
qwx_pull_roam_ev(struct qwx_softc * sc,struct mbuf * m,struct wmi_roam_event * roam_ev)13259 qwx_pull_roam_ev(struct qwx_softc *sc, struct mbuf *m,
13260     struct wmi_roam_event *roam_ev)
13261 {
13262 	const void **tb;
13263 	const struct wmi_roam_event *ev;
13264 	int ret;
13265 
13266 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
13267 	if (tb == NULL) {
13268 		ret = ENOMEM;
13269 		printf("%s: failed to parse tlv: %d\n",
13270 		    sc->sc_dev.dv_xname, ret);
13271 		return ret;
13272 	}
13273 
13274 	ev = tb[WMI_TAG_ROAM_EVENT];
13275 	if (!ev) {
13276 		printf("%s: failed to fetch roam ev\n",
13277 		    sc->sc_dev.dv_xname);
13278 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13279 		return EPROTO;
13280 	}
13281 
13282 	roam_ev->vdev_id = ev->vdev_id;
13283 	roam_ev->reason = ev->reason;
13284 	roam_ev->rssi = ev->rssi;
13285 
13286 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13287 	return 0;
13288 }
13289 
13290 void
qwx_mac_handle_beacon_miss(struct qwx_softc * sc,uint32_t vdev_id)13291 qwx_mac_handle_beacon_miss(struct qwx_softc *sc, uint32_t vdev_id)
13292 {
13293 	struct ieee80211com *ic = &sc->sc_ic;
13294 
13295 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
13296 	    (ic->ic_state != IEEE80211_S_RUN))
13297 		return;
13298 
13299 	if (ic->ic_mgt_timer == 0) {
13300 		if (ic->ic_if.if_flags & IFF_DEBUG)
13301 			printf("%s: receiving no beacons from %s; checking if "
13302 			    "this AP is still responding to probe requests\n",
13303 			    sc->sc_dev.dv_xname,
13304 			    ether_sprintf(ic->ic_bss->ni_macaddr));
13305 		/*
13306 		 * Rather than go directly to scan state, try to send a
13307 		 * directed probe request first. If that fails then the
13308 		 * state machine will drop us into scanning after timing
13309 		 * out waiting for a probe response.
13310 		 */
13311 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
13312 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
13313 	}
13314 }
13315 
13316 void
qwx_roam_event(struct qwx_softc * sc,struct mbuf * m)13317 qwx_roam_event(struct qwx_softc *sc, struct mbuf *m)
13318 {
13319 	struct wmi_roam_event roam_ev = {};
13320 
13321 	if (qwx_pull_roam_ev(sc, m, &roam_ev) != 0) {
13322 		printf("%s: failed to extract roam event\n",
13323 		    sc->sc_dev.dv_xname);
13324 		return;
13325 	}
13326 
13327 	DNPRINTF(QWX_D_WMI, "%s: event roam vdev %u reason 0x%08x rssi %d\n",
13328 	    __func__, roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
13329 
13330 	if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
13331 		return;
13332 
13333 	switch (roam_ev.reason) {
13334 	case WMI_ROAM_REASON_BEACON_MISS:
13335 		qwx_mac_handle_beacon_miss(sc, roam_ev.vdev_id);
13336 		break;
13337 	case WMI_ROAM_REASON_BETTER_AP:
13338 	case WMI_ROAM_REASON_LOW_RSSI:
13339 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
13340 	case WMI_ROAM_REASON_HO_FAILED:
13341 		break;
13342 	}
13343 }
13344 
13345 int
qwx_pull_vdev_install_key_compl_ev(struct qwx_softc * sc,struct mbuf * m,struct wmi_vdev_install_key_complete_arg * arg)13346 qwx_pull_vdev_install_key_compl_ev(struct qwx_softc *sc, struct mbuf *m,
13347     struct wmi_vdev_install_key_complete_arg *arg)
13348 {
13349 	const void **tb;
13350 	const struct wmi_vdev_install_key_compl_event *ev;
13351 	int ret;
13352 
13353 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
13354 	if (tb == NULL) {
13355 		ret = ENOMEM;
13356 		printf("%s: failed to parse tlv: %d\n",
13357 		    sc->sc_dev.dv_xname, ret);
13358 		return ret;
13359 	}
13360 
13361 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
13362 	if (!ev) {
13363 		printf("%s: failed to fetch vdev install key compl ev\n",
13364 		    sc->sc_dev.dv_xname);
13365 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13366 		return EPROTO;
13367 	}
13368 
13369 	arg->vdev_id = ev->vdev_id;
13370 	arg->macaddr = ev->peer_macaddr.addr;
13371 	arg->key_idx = ev->key_idx;
13372 	arg->key_flags = ev->key_flags;
13373 	arg->status = ev->status;
13374 
13375 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
13376 	return 0;
13377 }
13378 
13379 void
qwx_vdev_install_key_compl_event(struct qwx_softc * sc,struct mbuf * m)13380 qwx_vdev_install_key_compl_event(struct qwx_softc *sc, struct mbuf *m)
13381 {
13382 	struct wmi_vdev_install_key_complete_arg install_key_compl = { 0 };
13383 	struct qwx_vif *arvif;
13384 
13385 	if (qwx_pull_vdev_install_key_compl_ev(sc, m,
13386 	    &install_key_compl) != 0) {
13387 		printf("%s: failed to extract install key compl event\n",
13388 		    sc->sc_dev.dv_xname);
13389 		return;
13390 	}
13391 
13392 	DNPRINTF(QWX_D_WMI, "%s: event vdev install key ev idx %d flags %08x "
13393 	    "macaddr %s status %d\n", __func__, install_key_compl.key_idx,
13394 	    install_key_compl.key_flags,
13395 	    ether_sprintf((u_char *)install_key_compl.macaddr),
13396 	    install_key_compl.status);
13397 
13398 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
13399 		if (arvif->vdev_id == install_key_compl.vdev_id)
13400 			break;
13401 	}
13402 	if (!arvif) {
13403 		printf("%s: invalid vdev id in install key compl ev %d\n",
13404 		    sc->sc_dev.dv_xname, install_key_compl.vdev_id);
13405 		return;
13406 	}
13407 
13408 	sc->install_key_status = 0;
13409 
13410 	if (install_key_compl.status !=
13411 	    WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
13412 		printf("%s: install key failed for %s status %d\n",
13413 		    sc->sc_dev.dv_xname,
13414 		    ether_sprintf((u_char *)install_key_compl.macaddr),
13415 		    install_key_compl.status);
13416 		sc->install_key_status = install_key_compl.status;
13417 	}
13418 
13419 	sc->install_key_done = 1;
13420 	wakeup(&sc->install_key_done);
13421 }
13422 
13423 void
qwx_wmi_tlv_op_rx(struct qwx_softc * sc,struct mbuf * m)13424 qwx_wmi_tlv_op_rx(struct qwx_softc *sc, struct mbuf *m)
13425 {
13426 	struct wmi_cmd_hdr *cmd_hdr;
13427 	enum wmi_tlv_event_id id;
13428 
13429 	cmd_hdr = mtod(m, struct wmi_cmd_hdr *);
13430 	id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
13431 
13432 	m_adj(m, sizeof(struct wmi_cmd_hdr));
13433 
13434 	switch (id) {
13435 		/* Process all the WMI events here */
13436 	case WMI_SERVICE_READY_EVENTID:
13437 		qwx_service_ready_event(sc, m);
13438 		break;
13439 	case WMI_SERVICE_READY_EXT_EVENTID:
13440 		qwx_service_ready_ext_event(sc, m);
13441 		break;
13442 	case WMI_SERVICE_READY_EXT2_EVENTID:
13443 		qwx_service_ready_ext2_event(sc, m);
13444 		break;
13445 	case WMI_REG_CHAN_LIST_CC_EVENTID:
13446 		qwx_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_ID);
13447 		break;
13448 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
13449 		qwx_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_EXT_ID);
13450 		break;
13451 	case WMI_READY_EVENTID:
13452 		qwx_ready_event(sc, m);
13453 		break;
13454 	case WMI_PEER_DELETE_RESP_EVENTID:
13455 		qwx_peer_delete_resp_event(sc, m);
13456 		break;
13457 	case WMI_VDEV_START_RESP_EVENTID:
13458 		qwx_vdev_start_resp_event(sc, m);
13459 		break;
13460 #if 0
13461 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
13462 		ath11k_bcn_tx_status_event(ab, skb);
13463 		break;
13464 #endif
13465 	case WMI_VDEV_STOPPED_EVENTID:
13466 		qwx_vdev_stopped_event(sc, m);
13467 		break;
13468 	case WMI_MGMT_RX_EVENTID:
13469 		qwx_mgmt_rx_event(sc, m);
13470 		/* mgmt_rx_event() owns the skb now! */
13471 		return;
13472 	case WMI_MGMT_TX_COMPLETION_EVENTID:
13473 		qwx_mgmt_tx_compl_event(sc, m);
13474 		break;
13475 	case WMI_SCAN_EVENTID:
13476 		qwx_scan_event(sc, m);
13477 		break;
13478 #if 0
13479 	case WMI_PEER_STA_KICKOUT_EVENTID:
13480 		ath11k_peer_sta_kickout_event(ab, skb);
13481 		break;
13482 #endif
13483 	case WMI_ROAM_EVENTID:
13484 		qwx_roam_event(sc, m);
13485 		break;
13486 	case WMI_CHAN_INFO_EVENTID:
13487 		qwx_chan_info_event(sc, m);
13488 		break;
13489 #if 0
13490 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
13491 		ath11k_pdev_bss_chan_info_event(ab, skb);
13492 		break;
13493 #endif
13494 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
13495 		qwx_vdev_install_key_compl_event(sc, m);
13496 		break;
13497 	case WMI_SERVICE_AVAILABLE_EVENTID:
13498 		qwx_service_available_event(sc, m);
13499 		break;
13500 	case WMI_PEER_ASSOC_CONF_EVENTID:
13501 		qwx_peer_assoc_conf_event(sc, m);
13502 		break;
13503 	case WMI_UPDATE_STATS_EVENTID:
13504 		/* ignore */
13505 		break;
13506 #if 0
13507 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
13508 		ath11k_pdev_ctl_failsafe_check_event(ab, skb);
13509 		break;
13510 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
13511 		ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
13512 		break;
13513 	case WMI_PDEV_UTF_EVENTID:
13514 		ath11k_tm_wmi_event(ab, id, skb);
13515 		break;
13516 	case WMI_PDEV_TEMPERATURE_EVENTID:
13517 		ath11k_wmi_pdev_temperature_event(ab, skb);
13518 		break;
13519 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
13520 		ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
13521 		break;
13522 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
13523 		ath11k_fils_discovery_event(ab, skb);
13524 		break;
13525 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
13526 		ath11k_probe_resp_tx_status_event(ab, skb);
13527 		break;
13528 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
13529 		ath11k_wmi_obss_color_collision_event(ab, skb);
13530 		break;
13531 	case WMI_TWT_ADD_DIALOG_EVENTID:
13532 		ath11k_wmi_twt_add_dialog_event(ab, skb);
13533 		break;
13534 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
13535 		ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
13536 		break;
13537 	case WMI_VDEV_DELETE_RESP_EVENTID:
13538 		ath11k_vdev_delete_resp_event(ab, skb);
13539 		break;
13540 	case WMI_WOW_WAKEUP_HOST_EVENTID:
13541 		ath11k_wmi_event_wow_wakeup_host(ab, skb);
13542 		break;
13543 	case WMI_11D_NEW_COUNTRY_EVENTID:
13544 		ath11k_reg_11d_new_cc_event(ab, skb);
13545 		break;
13546 #endif
13547 	case WMI_DIAG_EVENTID:
13548 		/* Ignore. These events trigger tracepoints in Linux. */
13549 		break;
13550 #if 0
13551 	case WMI_PEER_STA_PS_STATECHG_EVENTID:
13552 		ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
13553 		break;
13554 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
13555 		ath11k_wmi_gtk_offload_status_event(ab, skb);
13556 		break;
13557 #endif
13558 	case WMI_UPDATE_FW_MEM_DUMP_EVENTID:
13559 		DPRINTF("%s: 0x%x: update fw mem dump\n", __func__, id);
13560 		break;
13561 	case WMI_PDEV_SET_HW_MODE_RESP_EVENTID:
13562 		DPRINTF("%s: 0x%x: set HW mode response event\n", __func__, id);
13563 		break;
13564 	case WMI_WLAN_FREQ_AVOID_EVENTID:
13565 		DPRINTF("%s: 0x%x: wlan freq avoid event\n", __func__, id);
13566 		break;
13567 	default:
13568 		DPRINTF("%s: unsupported event id 0x%x\n", __func__, id);
13569 		break;
13570 	}
13571 
13572 	m_freem(m);
13573 }
13574 
13575 void
qwx_wmi_op_ep_tx_credits(struct qwx_softc * sc)13576 qwx_wmi_op_ep_tx_credits(struct qwx_softc *sc)
13577 {
13578 	struct qwx_htc *htc = &sc->htc;
13579 	int i;
13580 
13581 	/* try to send pending beacons first. they take priority */
13582 	sc->wmi.tx_credits = 1;
13583 	wakeup(&sc->wmi.tx_credits);
13584 
13585 	if (!sc->hw_params.credit_flow)
13586 		return;
13587 
13588 	for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
13589 		struct qwx_htc_ep *ep = &htc->endpoint[i];
13590 		if (ep->tx_credit_flow_enabled && ep->tx_credits > 0)
13591 			wakeup(&ep->tx_credits);
13592 	}
13593 }
13594 
13595 int
qwx_connect_pdev_htc_service(struct qwx_softc * sc,uint32_t pdev_idx)13596 qwx_connect_pdev_htc_service(struct qwx_softc *sc, uint32_t pdev_idx)
13597 {
13598 	int status;
13599 	uint32_t svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
13600 	    ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
13601 	    ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
13602 	struct qwx_htc_svc_conn_req conn_req;
13603 	struct qwx_htc_svc_conn_resp conn_resp;
13604 
13605 	memset(&conn_req, 0, sizeof(conn_req));
13606 	memset(&conn_resp, 0, sizeof(conn_resp));
13607 
13608 	/* these fields are the same for all service endpoints */
13609 	conn_req.ep_ops.ep_tx_complete = qwx_wmi_htc_tx_complete;
13610 	conn_req.ep_ops.ep_rx_complete = qwx_wmi_tlv_op_rx;
13611 	conn_req.ep_ops.ep_tx_credits = qwx_wmi_op_ep_tx_credits;
13612 
13613 	/* connect to control service */
13614 	conn_req.service_id = svc_id[pdev_idx];
13615 
13616 	status = qwx_htc_connect_service(&sc->htc, &conn_req, &conn_resp);
13617 	if (status) {
13618 		printf("%s: failed to connect to WMI CONTROL service "
13619 		    "status: %d\n", sc->sc_dev.dv_xname, status);
13620 		return status;
13621 	}
13622 
13623 	sc->wmi.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
13624 	sc->wmi.wmi[pdev_idx].eid = conn_resp.eid;
13625 	sc->wmi.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
13626 	sc->wmi.wmi[pdev_idx].tx_ce_desc = 0;
13627 
13628 	return 0;
13629 }
13630 
13631 int
qwx_wmi_connect(struct qwx_softc * sc)13632 qwx_wmi_connect(struct qwx_softc *sc)
13633 {
13634 	uint32_t i;
13635 	uint8_t wmi_ep_count;
13636 
13637 	wmi_ep_count = sc->htc.wmi_ep_count;
13638 	if (wmi_ep_count > sc->hw_params.max_radios)
13639 		return -1;
13640 
13641 	for (i = 0; i < wmi_ep_count; i++)
13642 		qwx_connect_pdev_htc_service(sc, i);
13643 
13644 	return 0;
13645 }
13646 
13647 void
qwx_htc_reset_endpoint_states(struct qwx_htc * htc)13648 qwx_htc_reset_endpoint_states(struct qwx_htc *htc)
13649 {
13650 	struct qwx_htc_ep *ep;
13651 	int i;
13652 
13653 	for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
13654 		ep = &htc->endpoint[i];
13655 		ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
13656 		ep->max_ep_message_len = 0;
13657 		ep->max_tx_queue_depth = 0;
13658 		ep->eid = i;
13659 		ep->htc = htc;
13660 		ep->tx_credit_flow_enabled = 1;
13661 	}
13662 }
13663 
13664 void
qwx_htc_control_tx_complete(struct qwx_softc * sc,struct mbuf * m)13665 qwx_htc_control_tx_complete(struct qwx_softc *sc, struct mbuf *m)
13666 {
13667 	printf("%s: not implemented\n", __func__);
13668 
13669 	m_freem(m);
13670 }
13671 
13672 void
qwx_htc_control_rx_complete(struct qwx_softc * sc,struct mbuf * m)13673 qwx_htc_control_rx_complete(struct qwx_softc *sc, struct mbuf *m)
13674 {
13675 	printf("%s: not implemented\n", __func__);
13676 
13677 	m_freem(m);
13678 }
13679 
13680 uint8_t
qwx_htc_get_credit_allocation(struct qwx_htc * htc,uint16_t service_id)13681 qwx_htc_get_credit_allocation(struct qwx_htc *htc, uint16_t service_id)
13682 {
13683 	uint8_t i, allocation = 0;
13684 
13685 	for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
13686 		if (htc->service_alloc_table[i].service_id == service_id) {
13687 			allocation =
13688 			    htc->service_alloc_table[i].credit_allocation;
13689 		}
13690 	}
13691 
13692 	return allocation;
13693 }
13694 
13695 const char *
qwx_htc_service_name(enum ath11k_htc_svc_id id)13696 qwx_htc_service_name(enum ath11k_htc_svc_id id)
13697 {
13698 	switch (id) {
13699 	case ATH11K_HTC_SVC_ID_RESERVED:
13700 		return "Reserved";
13701 	case ATH11K_HTC_SVC_ID_RSVD_CTRL:
13702 		return "Control";
13703 	case ATH11K_HTC_SVC_ID_WMI_CONTROL:
13704 		return "WMI";
13705 	case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
13706 		return "DATA BE";
13707 	case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
13708 		return "DATA BK";
13709 	case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
13710 		return "DATA VI";
13711 	case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
13712 		return "DATA VO";
13713 	case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
13714 		return "WMI MAC1";
13715 	case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
13716 		return "WMI MAC2";
13717 	case ATH11K_HTC_SVC_ID_NMI_CONTROL:
13718 		return "NMI Control";
13719 	case ATH11K_HTC_SVC_ID_NMI_DATA:
13720 		return "NMI Data";
13721 	case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
13722 		return "HTT Data";
13723 	case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
13724 		return "RAW";
13725 	case ATH11K_HTC_SVC_ID_IPA_TX:
13726 		return "IPA TX";
13727 	case ATH11K_HTC_SVC_ID_PKT_LOG:
13728 		return "PKT LOG";
13729 	}
13730 
13731 	return "Unknown";
13732 }
13733 
13734 struct mbuf *
qwx_htc_alloc_mbuf(size_t payload_size)13735 qwx_htc_alloc_mbuf(size_t payload_size)
13736 {
13737 	struct mbuf *m;
13738 	size_t size = sizeof(struct ath11k_htc_hdr) + payload_size;
13739 
13740 	m = m_gethdr(M_DONTWAIT, MT_DATA);
13741 	if (m == NULL)
13742 		return NULL;
13743 
13744 	if (size <= MCLBYTES)
13745 		MCLGET(m, M_DONTWAIT);
13746 	else
13747 		MCLGETL(m, M_DONTWAIT, size);
13748 	if ((m->m_flags & M_EXT) == 0) {
13749 		m_freem(m);
13750 		return NULL;
13751 	}
13752 
13753 	m->m_len = m->m_pkthdr.len = size;
13754 	memset(mtod(m, void *), 0, size);
13755 
13756 	return m;
13757 }
13758 
13759 struct mbuf *
qwx_htc_build_tx_ctrl_mbuf(void)13760 qwx_htc_build_tx_ctrl_mbuf(void)
13761 {
13762 	size_t size;
13763 
13764 	size = ATH11K_HTC_CONTROL_BUFFER_SIZE - sizeof(struct ath11k_htc_hdr);
13765 
13766 	return qwx_htc_alloc_mbuf(size);
13767 }
13768 
13769 void
qwx_htc_prepare_tx_mbuf(struct qwx_htc_ep * ep,struct mbuf * m)13770 qwx_htc_prepare_tx_mbuf(struct qwx_htc_ep *ep, struct mbuf *m)
13771 {
13772 	struct ath11k_htc_hdr *hdr;
13773 
13774 	hdr = mtod(m, struct ath11k_htc_hdr *);
13775 
13776 	memset(hdr, 0, sizeof(*hdr));
13777 	hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
13778 	    FIELD_PREP(HTC_HDR_PAYLOADLEN, (m->m_pkthdr.len - sizeof(*hdr)));
13779 
13780 	if (ep->tx_credit_flow_enabled)
13781 		hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS,
13782 		    ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
13783 #ifdef notyet
13784 	spin_lock_bh(&ep->htc->tx_lock);
13785 #endif
13786 	hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
13787 #ifdef notyet
13788 	spin_unlock_bh(&ep->htc->tx_lock);
13789 #endif
13790 }
13791 
13792 int
qwx_htc_send(struct qwx_htc * htc,enum ath11k_htc_ep_id eid,struct mbuf * m)13793 qwx_htc_send(struct qwx_htc *htc, enum ath11k_htc_ep_id eid, struct mbuf *m)
13794 {
13795 	struct qwx_htc_ep *ep = &htc->endpoint[eid];
13796 	struct qwx_softc *sc = htc->sc;
13797 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ep->ul_pipe_id];
13798 	void *ctx;
13799 	struct qwx_tx_data *tx_data;
13800 	int credits = 0;
13801 	int ret;
13802 	int credit_flow_enabled = (sc->hw_params.credit_flow &&
13803 	    ep->tx_credit_flow_enabled);
13804 
13805 	if (eid >= ATH11K_HTC_EP_COUNT) {
13806 		printf("%s: Invalid endpoint id: %d\n", __func__, eid);
13807 		return ENOENT;
13808 	}
13809 
13810 	if (credit_flow_enabled) {
13811 		credits = howmany(m->m_pkthdr.len, htc->target_credit_size);
13812 #ifdef notyet
13813 		spin_lock_bh(&htc->tx_lock);
13814 #endif
13815 		if (ep->tx_credits < credits) {
13816 			DNPRINTF(QWX_D_HTC,
13817 			    "%s: ep %d insufficient credits required %d "
13818 			    "total %d\n", __func__, eid, credits,
13819 			    ep->tx_credits);
13820 #ifdef notyet
13821 			spin_unlock_bh(&htc->tx_lock);
13822 #endif
13823 			return EAGAIN;
13824 		}
13825 		ep->tx_credits -= credits;
13826 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits consumed %d total %d\n",
13827 		    __func__, eid, credits, ep->tx_credits);
13828 #ifdef notyet
13829 		spin_unlock_bh(&htc->tx_lock);
13830 #endif
13831 	}
13832 
13833 	qwx_htc_prepare_tx_mbuf(ep, m);
13834 
13835 	ctx = pipe->src_ring->per_transfer_context[pipe->src_ring->write_index];
13836 	tx_data = (struct qwx_tx_data *)ctx;
13837 
13838 	tx_data->eid = eid;
13839 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
13840 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
13841 	if (ret) {
13842 		printf("%s: can't map mbuf (error %d)\n",
13843 		    sc->sc_dev.dv_xname, ret);
13844 		if (ret != ENOBUFS)
13845 			m_freem(m);
13846 		goto err_credits;
13847 	}
13848 
13849 	DNPRINTF(QWX_D_HTC, "%s: tx mbuf %p eid %d paddr %lx\n",
13850 	    __func__, m, tx_data->eid, tx_data->map->dm_segs[0].ds_addr);
13851 #ifdef QWX_DEBUG
13852 	{
13853 		int i;
13854 		uint8_t *p = mtod(m, uint8_t *);
13855 		DNPRINTF(QWX_D_HTC, "%s message buffer:", __func__);
13856 		for (i = 0; i < m->m_pkthdr.len; i++) {
13857 			DNPRINTF(QWX_D_HTC, "%s %.2x",
13858 			    i % 16 == 0 ? "\n" : "", p[i]);
13859 		}
13860 		if (i % 16)
13861 			DNPRINTF(QWX_D_HTC, "\n");
13862 	}
13863 #endif
13864 	ret = qwx_ce_send(htc->sc, m, ep->ul_pipe_id, ep->eid);
13865 	if (ret)
13866 		goto err_unmap;
13867 
13868 	return 0;
13869 
13870 err_unmap:
13871 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
13872 err_credits:
13873 	if (credit_flow_enabled) {
13874 #ifdef notyet
13875 		spin_lock_bh(&htc->tx_lock);
13876 #endif
13877 		ep->tx_credits += credits;
13878 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits reverted %d total %d\n",
13879 		    __func__, eid, credits, ep->tx_credits);
13880 #ifdef notyet
13881 		spin_unlock_bh(&htc->tx_lock);
13882 #endif
13883 
13884 		if (ep->ep_ops.ep_tx_credits)
13885 			ep->ep_ops.ep_tx_credits(htc->sc);
13886 	}
13887 	return ret;
13888 }
13889 
13890 int
qwx_htc_connect_service(struct qwx_htc * htc,struct qwx_htc_svc_conn_req * conn_req,struct qwx_htc_svc_conn_resp * conn_resp)13891 qwx_htc_connect_service(struct qwx_htc *htc,
13892     struct qwx_htc_svc_conn_req *conn_req,
13893     struct qwx_htc_svc_conn_resp *conn_resp)
13894 {
13895 	struct qwx_softc *sc = htc->sc;
13896 	struct ath11k_htc_conn_svc *req_msg;
13897 	struct ath11k_htc_conn_svc_resp resp_msg_dummy;
13898 	struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
13899 	enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
13900 	struct qwx_htc_ep *ep;
13901 	struct mbuf *m;
13902 	unsigned int max_msg_size = 0;
13903 	int length, status = 0;
13904 	int disable_credit_flow_ctrl = 0;
13905 	uint16_t flags = 0;
13906 	uint16_t message_id, service_id;
13907 	uint8_t tx_alloc = 0;
13908 
13909 	/* special case for HTC pseudo control service */
13910 	if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
13911 		disable_credit_flow_ctrl = 1;
13912 		assigned_eid = ATH11K_HTC_EP_0;
13913 		max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
13914 		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
13915 		goto setup;
13916 	}
13917 
13918 	tx_alloc = qwx_htc_get_credit_allocation(htc, conn_req->service_id);
13919 	if (!tx_alloc)
13920 		DNPRINTF(QWX_D_HTC,
13921 		    "%s: htc service %s does not allocate target credits\n",
13922 		    sc->sc_dev.dv_xname,
13923 		    qwx_htc_service_name(conn_req->service_id));
13924 
13925 	m = qwx_htc_build_tx_ctrl_mbuf();
13926 	if (!m) {
13927 		printf("%s: Failed to allocate HTC packet\n",
13928 		    sc->sc_dev.dv_xname);
13929 		return ENOMEM;
13930 	}
13931 
13932 	length = sizeof(*req_msg);
13933 	m->m_len = m->m_pkthdr.len = sizeof(struct ath11k_htc_hdr) + length;
13934 
13935 	req_msg = (struct ath11k_htc_conn_svc *)(mtod(m, uint8_t *) +
13936 	    sizeof(struct ath11k_htc_hdr));
13937 	memset(req_msg, 0, length);
13938 	req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
13939 	    ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
13940 
13941 	flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
13942 
13943 	/* Only enable credit flow control for WMI ctrl service */
13944 	if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
13945 	      conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
13946 	      conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
13947 		flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
13948 		disable_credit_flow_ctrl = 1;
13949 	}
13950 
13951 	if (!sc->hw_params.credit_flow) {
13952 		flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
13953 		disable_credit_flow_ctrl = 1;
13954 	}
13955 
13956 	req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
13957 	req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
13958 	    conn_req->service_id);
13959 
13960 	sc->ctl_resp = 0;
13961 
13962 	status = qwx_htc_send(htc, ATH11K_HTC_EP_0, m);
13963 	if (status) {
13964 		if (status != ENOBUFS)
13965 			m_freem(m);
13966 		return status;
13967 	}
13968 
13969 	while (!sc->ctl_resp) {
13970 		int ret = tsleep_nsec(&sc->ctl_resp, 0, "qwxhtcinit",
13971 		    SEC_TO_NSEC(1));
13972 		if (ret) {
13973 			printf("%s: Service connect timeout\n",
13974 			    sc->sc_dev.dv_xname);
13975 			return ret;
13976 		}
13977 	}
13978 
13979 	/* we controlled the buffer creation, it's aligned */
13980 	resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
13981 	message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
13982 	service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
13983 			       resp_msg->msg_svc_id);
13984 	if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
13985 	    (htc->control_resp_len < sizeof(*resp_msg))) {
13986 		printf("%s: Invalid resp message ID 0x%x", __func__,
13987 		    message_id);
13988 		return EPROTO;
13989 	}
13990 
13991 	DNPRINTF(QWX_D_HTC, "%s: service %s connect response status 0x%lx "
13992 	    "assigned ep 0x%lx\n", __func__, qwx_htc_service_name(service_id),
13993 	    FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
13994 	    FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
13995 
13996 	conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
13997 	    resp_msg->flags_len);
13998 
13999 	/* check response status */
14000 	if (conn_resp->connect_resp_code !=
14001 	    ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
14002 		printf("%s: HTC Service %s connect request failed: 0x%x)\n",
14003 		    __func__, qwx_htc_service_name(service_id),
14004 		    conn_resp->connect_resp_code);
14005 		return EPROTO;
14006 	}
14007 
14008 	assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
14009 	    HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len);
14010 
14011 	max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
14012 	    resp_msg->flags_len);
14013 setup:
14014 	if (assigned_eid >= ATH11K_HTC_EP_COUNT)
14015 		return EPROTO;
14016 
14017 	if (max_msg_size == 0)
14018 		return EPROTO;
14019 
14020 	ep = &htc->endpoint[assigned_eid];
14021 	ep->eid = assigned_eid;
14022 
14023 	if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
14024 		return EPROTO;
14025 
14026 	/* return assigned endpoint to caller */
14027 	conn_resp->eid = assigned_eid;
14028 	conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
14029 	    resp_msg->flags_len);
14030 
14031 	/* setup the endpoint */
14032 	ep->service_id = conn_req->service_id;
14033 	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
14034 	ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
14035 	    resp_msg->flags_len);
14036 	ep->tx_credits = tx_alloc;
14037 
14038 	/* copy all the callbacks */
14039 	ep->ep_ops = conn_req->ep_ops;
14040 
14041 	status = sc->ops.map_service_to_pipe(htc->sc, ep->service_id,
14042 	    &ep->ul_pipe_id, &ep->dl_pipe_id);
14043 	if (status)
14044 		return status;
14045 
14046 	DNPRINTF(QWX_D_HTC,
14047 	    "%s: htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
14048 	    __func__, qwx_htc_service_name(ep->service_id), ep->ul_pipe_id,
14049 	    ep->dl_pipe_id, ep->eid);
14050 
14051 	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
14052 		ep->tx_credit_flow_enabled = 0;
14053 		DNPRINTF(QWX_D_HTC,
14054 		    "%s: htc service '%s' eid %d tx flow control disabled\n",
14055 		    __func__, qwx_htc_service_name(ep->service_id),
14056 		    assigned_eid);
14057 	}
14058 
14059 	return status;
14060 }
14061 
14062 int
qwx_htc_start(struct qwx_htc * htc)14063 qwx_htc_start(struct qwx_htc *htc)
14064 {
14065 	struct mbuf *m;
14066 	int status = 0;
14067 	struct qwx_softc *sc = htc->sc;
14068 	struct ath11k_htc_setup_complete_extended *msg;
14069 
14070 	m = qwx_htc_build_tx_ctrl_mbuf();
14071 	if (!m)
14072 		return ENOMEM;
14073 
14074 	m->m_len = m->m_pkthdr.len = sizeof(struct ath11k_htc_hdr) +
14075 	    sizeof(*msg);
14076 
14077 	msg = (struct ath11k_htc_setup_complete_extended *)(mtod(m, uint8_t *) +
14078 	    sizeof(struct ath11k_htc_hdr));
14079 	msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
14080 	    ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
14081 
14082 	if (sc->hw_params.credit_flow)
14083 		DNPRINTF(QWX_D_HTC, "%s: using tx credit flow control\n",
14084 		    __func__);
14085 	else
14086 		msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;
14087 
14088 	status = qwx_htc_send(htc, ATH11K_HTC_EP_0, m);
14089 	if (status) {
14090 		m_freem(m);
14091 		return status;
14092 	}
14093 
14094 	return 0;
14095 }
14096 
14097 int
qwx_htc_init(struct qwx_softc * sc)14098 qwx_htc_init(struct qwx_softc *sc)
14099 {
14100 	struct qwx_htc *htc = &sc->htc;
14101 	struct qwx_htc_svc_conn_req conn_req;
14102 	struct qwx_htc_svc_conn_resp conn_resp;
14103 	int ret;
14104 #ifdef notyet
14105 	spin_lock_init(&htc->tx_lock);
14106 #endif
14107 	qwx_htc_reset_endpoint_states(htc);
14108 
14109 	htc->sc = sc;
14110 
14111 	switch (sc->wmi.preferred_hw_mode) {
14112 	case WMI_HOST_HW_MODE_SINGLE:
14113 		htc->wmi_ep_count = 1;
14114 		break;
14115 	case WMI_HOST_HW_MODE_DBS:
14116 	case WMI_HOST_HW_MODE_DBS_OR_SBS:
14117 		htc->wmi_ep_count = 2;
14118 		break;
14119 	case WMI_HOST_HW_MODE_DBS_SBS:
14120 		htc->wmi_ep_count = 3;
14121 		break;
14122 	default:
14123 		htc->wmi_ep_count = sc->hw_params.max_radios;
14124 		break;
14125 	}
14126 
14127 	/* setup our pseudo HTC control endpoint connection */
14128 	memset(&conn_req, 0, sizeof(conn_req));
14129 	memset(&conn_resp, 0, sizeof(conn_resp));
14130 	conn_req.ep_ops.ep_tx_complete = qwx_htc_control_tx_complete;
14131 	conn_req.ep_ops.ep_rx_complete = qwx_htc_control_rx_complete;
14132 	conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
14133 	conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
14134 
14135 	/* connect fake service */
14136 	ret = qwx_htc_connect_service(htc, &conn_req, &conn_resp);
14137 	if (ret) {
14138 		printf("%s: could not connect to htc service (%d)\n",
14139 		    sc->sc_dev.dv_xname, ret);
14140 		return ret;
14141 	}
14142 
14143 	return 0;
14144 }
14145 
14146 int
qwx_htc_setup_target_buffer_assignments(struct qwx_htc * htc)14147 qwx_htc_setup_target_buffer_assignments(struct qwx_htc *htc)
14148 {
14149 	struct qwx_htc_svc_tx_credits *serv_entry;
14150 	uint32_t svc_id[] = {
14151 		ATH11K_HTC_SVC_ID_WMI_CONTROL,
14152 		ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
14153 		ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
14154 	};
14155 	int i, credits;
14156 
14157 	credits =  htc->total_transmit_credits;
14158 	serv_entry = htc->service_alloc_table;
14159 
14160 	if ((htc->wmi_ep_count == 0) ||
14161 	    (htc->wmi_ep_count > nitems(svc_id)))
14162 		return EINVAL;
14163 
14164 	/* Divide credits among number of endpoints for WMI */
14165 	credits = credits / htc->wmi_ep_count;
14166 	for (i = 0; i < htc->wmi_ep_count; i++) {
14167 		serv_entry[i].service_id = svc_id[i];
14168 		serv_entry[i].credit_allocation = credits;
14169 	}
14170 
14171 	return 0;
14172 }
14173 
14174 int
qwx_htc_wait_target(struct qwx_softc * sc)14175 qwx_htc_wait_target(struct qwx_softc *sc)
14176 {
14177 	struct qwx_htc *htc = &sc->htc;
14178 	int polling = 0, ret;
14179 	uint16_t i;
14180 	struct ath11k_htc_ready *ready;
14181 	uint16_t message_id;
14182 	uint16_t credit_count;
14183 	uint16_t credit_size;
14184 
14185 	sc->ctl_resp = 0;
14186 	while (!sc->ctl_resp) {
14187 		ret = tsleep_nsec(&sc->ctl_resp, 0, "qwxhtcinit",
14188 		    SEC_TO_NSEC(1));
14189 		if (ret) {
14190 			if (ret != EWOULDBLOCK)
14191 				return ret;
14192 
14193 			if (polling) {
14194 				printf("%s: failed to receive control response "
14195 				    "completion\n", sc->sc_dev.dv_xname);
14196 				return ret;
14197 			}
14198 
14199 			printf("%s: failed to receive control response "
14200 			    "completion, polling...\n", sc->sc_dev.dv_xname);
14201 			polling = 1;
14202 
14203 			for (i = 0; i < sc->hw_params.ce_count; i++)
14204 				qwx_ce_per_engine_service(sc, i);
14205 		}
14206 	}
14207 
14208 	if (htc->control_resp_len < sizeof(*ready)) {
14209 		printf("%s: Invalid HTC ready msg len:%d\n", __func__,
14210 		    htc->control_resp_len);
14211 		return EINVAL;
14212 	}
14213 
14214 	ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
14215 	message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
14216 	credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
14217 	    ready->id_credit_count);
14218 	credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
14219 
14220 	if (message_id != ATH11K_HTC_MSG_READY_ID) {
14221 		printf("%s: Invalid HTC ready msg: 0x%x\n", __func__,
14222 		    message_id);
14223 		return EINVAL;
14224 	}
14225 
14226 	htc->total_transmit_credits = credit_count;
14227 	htc->target_credit_size = credit_size;
14228 
14229 	DNPRINTF(QWX_D_HTC, "%s: target ready total_transmit_credits %d "
14230 	    "target_credit_size %d\n", __func__,
14231 	    htc->total_transmit_credits, htc->target_credit_size);
14232 
14233 	if ((htc->total_transmit_credits == 0) ||
14234 	    (htc->target_credit_size == 0)) {
14235 		printf("%s: Invalid credit size received\n", __func__);
14236 		return EINVAL;
14237 	}
14238 
14239 	/* For QCA6390, wmi endpoint uses 1 credit to avoid
14240 	 * back-to-back write.
14241 	 */
14242 	if (sc->hw_params.supports_shadow_regs)
14243 		htc->total_transmit_credits = 1;
14244 
14245 	qwx_htc_setup_target_buffer_assignments(htc);
14246 
14247 	return 0;
14248 }
14249 
14250 void
qwx_dp_htt_htc_tx_complete(struct qwx_softc * sc,struct mbuf * m)14251 qwx_dp_htt_htc_tx_complete(struct qwx_softc *sc, struct mbuf *m)
14252 {
14253 	/* Just free the mbuf, no further action required. */
14254 	m_freem(m);
14255 }
14256 
14257 static inline void
qwx_dp_get_mac_addr(uint32_t addr_l32,uint16_t addr_h16,uint8_t * addr)14258 qwx_dp_get_mac_addr(uint32_t addr_l32, uint16_t addr_h16, uint8_t *addr)
14259 {
14260 #if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
14261 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
14262 		addr_l32 = swab32(addr_l32);
14263 		addr_h16 = swab16(addr_h16);
14264 	}
14265 #endif
14266 	uint32_t val32;
14267 	uint16_t val16;
14268 
14269 	val32 = le32toh(addr_l32);
14270 	memcpy(addr, &val32, 4);
14271 	val16 = le16toh(addr_h16);
14272 	memcpy(addr + 4, &val16, IEEE80211_ADDR_LEN - 4);
14273 }
14274 
14275 void
qwx_peer_map_event(struct qwx_softc * sc,uint8_t vdev_id,uint16_t peer_id,uint8_t * mac_addr,uint16_t ast_hash,uint16_t hw_peer_id)14276 qwx_peer_map_event(struct qwx_softc *sc, uint8_t vdev_id, uint16_t peer_id,
14277     uint8_t *mac_addr, uint16_t ast_hash, uint16_t hw_peer_id)
14278 {
14279 	struct ieee80211com *ic = &sc->sc_ic;
14280 	struct ieee80211_node *ni;
14281 	struct qwx_node *nq;
14282 	struct ath11k_peer *peer;
14283 #ifdef notyet
14284 	spin_lock_bh(&ab->base_lock);
14285 #endif
14286 	ni = ieee80211_find_node(ic, mac_addr);
14287 	if (ni == NULL)
14288 		return;
14289 	nq = (struct qwx_node *)ni;
14290 	peer = &nq->peer;
14291 
14292 	peer->vdev_id = vdev_id;
14293 	peer->peer_id = peer_id;
14294 	peer->ast_hash = ast_hash;
14295 	peer->hw_peer_id = hw_peer_id;
14296 #if 0
14297 	ether_addr_copy(peer->addr, mac_addr);
14298 	list_add(&peer->list, &ab->peers);
14299 #endif
14300 	sc->peer_mapped = 1;
14301 	wakeup(&sc->peer_mapped);
14302 
14303 	DNPRINTF(QWX_D_HTT, "%s: peer map vdev %d peer %s id %d\n",
14304 	    __func__, vdev_id, ether_sprintf(mac_addr), peer_id);
14305 #ifdef notyet
14306 	spin_unlock_bh(&ab->base_lock);
14307 #endif
14308 }
14309 
14310 struct ieee80211_node *
qwx_peer_find_by_id(struct qwx_softc * sc,uint16_t peer_id)14311 qwx_peer_find_by_id(struct qwx_softc *sc, uint16_t peer_id)
14312 {
14313 	struct ieee80211com *ic = &sc->sc_ic;
14314 	struct ieee80211_node *ni = NULL;
14315 	int s;
14316 
14317 	s = splnet();
14318 	RBT_FOREACH(ni, ieee80211_tree, &ic->ic_tree) {
14319 		struct qwx_node *nq = (struct qwx_node *)ni;
14320 		if (nq->peer.peer_id == peer_id)
14321 			break;
14322 	}
14323 	splx(s);
14324 
14325 	return ni;
14326 }
14327 
14328 void
qwx_peer_unmap_event(struct qwx_softc * sc,uint16_t peer_id)14329 qwx_peer_unmap_event(struct qwx_softc *sc, uint16_t peer_id)
14330 {
14331 	struct ieee80211_node *ni;
14332 #ifdef notyet
14333 	spin_lock_bh(&ab->base_lock);
14334 #endif
14335 	ni = qwx_peer_find_by_id(sc, peer_id);
14336 	if (!ni) {
14337 		printf("%s: peer-unmap-event: unknown peer id %d\n",
14338 		    sc->sc_dev.dv_xname, peer_id);
14339 		goto exit;
14340 	}
14341 
14342 	DNPRINTF(QWX_D_HTT, "%s: peer unmap peer %s id %d\n",
14343 	    __func__, ether_sprintf(ni->ni_macaddr), peer_id);
14344 #if 0
14345 	list_del(&peer->list);
14346 	kfree(peer);
14347 #endif
14348 	sc->peer_mapped = 1;
14349 	wakeup(&sc->peer_mapped);
14350 exit:
14351 #ifdef notyet
14352 	spin_unlock_bh(&ab->base_lock);
14353 #endif
14354 	return;
14355 }
14356 
14357 void
qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc * sc,struct mbuf * m)14358 qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc *sc, struct mbuf *m)
14359 {
14360 	struct qwx_dp *dp = &sc->dp;
14361 	struct htt_resp_msg *resp = mtod(m, struct htt_resp_msg *);
14362 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE,
14363 	    *(uint32_t *)resp);
14364 	uint16_t peer_id;
14365 	uint8_t vdev_id;
14366 	uint8_t mac_addr[IEEE80211_ADDR_LEN];
14367 	uint16_t peer_mac_h16;
14368 	uint16_t ast_hash;
14369 	uint16_t hw_peer_id;
14370 
14371 	DPRINTF("%s: dp_htt rx msg type: 0x%0x\n", __func__, type);
14372 
14373 	switch (type) {
14374 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
14375 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
14376 		    resp->version_msg.version);
14377 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
14378 		    resp->version_msg.version);
14379 		dp->htt_tgt_version_received = 1;
14380 		wakeup(&dp->htt_tgt_version_received);
14381 		break;
14382 	case HTT_T2H_MSG_TYPE_PEER_MAP:
14383 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
14384 		    resp->peer_map_ev.info);
14385 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
14386 		    resp->peer_map_ev.info);
14387 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
14388 		    resp->peer_map_ev.info1);
14389 		qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
14390 		    peer_mac_h16, mac_addr);
14391 		qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, 0, 0);
14392 		break;
14393 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
14394 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
14395 		    resp->peer_map_ev.info);
14396 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
14397 		    resp->peer_map_ev.info);
14398 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
14399 		    resp->peer_map_ev.info1);
14400 		qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
14401 		    peer_mac_h16, mac_addr);
14402 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
14403 		    resp->peer_map_ev.info2);
14404 		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
14405 				       resp->peer_map_ev.info1);
14406 		qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, ast_hash,
14407 		    hw_peer_id);
14408 		break;
14409 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
14410 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
14411 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
14412 		    resp->peer_unmap_ev.info);
14413 		qwx_peer_unmap_event(sc, peer_id);
14414 		break;
14415 #if 0
14416 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
14417 		ath11k_htt_pull_ppdu_stats(ab, skb);
14418 		break;
14419 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
14420 		ath11k_debugfs_htt_ext_stats_handler(ab, skb);
14421 		break;
14422 	case HTT_T2H_MSG_TYPE_PKTLOG:
14423 		ath11k_htt_pktlog(ab, skb);
14424 		break;
14425 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
14426 		ath11k_htt_backpressure_event_handler(ab, skb);
14427 		break;
14428 #endif
14429 	default:
14430 		printf("%s: htt event %d not handled\n", __func__, type);
14431 		break;
14432 	}
14433 
14434 	m_freem(m);
14435 }
14436 
14437 int
qwx_dp_htt_connect(struct qwx_dp * dp)14438 qwx_dp_htt_connect(struct qwx_dp *dp)
14439 {
14440 	struct qwx_htc_svc_conn_req conn_req;
14441 	struct qwx_htc_svc_conn_resp conn_resp;
14442 	int status;
14443 
14444 	memset(&conn_req, 0, sizeof(conn_req));
14445 	memset(&conn_resp, 0, sizeof(conn_resp));
14446 
14447 	conn_req.ep_ops.ep_tx_complete = qwx_dp_htt_htc_tx_complete;
14448 	conn_req.ep_ops.ep_rx_complete = qwx_dp_htt_htc_t2h_msg_handler;
14449 
14450 	/* connect to control service */
14451 	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
14452 
14453 	status = qwx_htc_connect_service(&dp->sc->htc, &conn_req, &conn_resp);
14454 
14455 	if (status)
14456 		return status;
14457 
14458 	dp->eid = conn_resp.eid;
14459 
14460 	return 0;
14461 }
14462 
14463 void
qwx_dp_pdev_reo_cleanup(struct qwx_softc * sc)14464 qwx_dp_pdev_reo_cleanup(struct qwx_softc *sc)
14465 {
14466 	struct qwx_dp *dp = &sc->dp;
14467 	int i;
14468 
14469 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
14470 		qwx_dp_srng_cleanup(sc, &dp->reo_dst_ring[i]);
14471 }
14472 
14473 int
qwx_dp_pdev_reo_setup(struct qwx_softc * sc)14474 qwx_dp_pdev_reo_setup(struct qwx_softc *sc)
14475 {
14476 	struct qwx_dp *dp = &sc->dp;
14477 	int ret;
14478 	int i;
14479 
14480 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
14481 		ret = qwx_dp_srng_setup(sc, &dp->reo_dst_ring[i],
14482 		    HAL_REO_DST, i, 0, DP_REO_DST_RING_SIZE);
14483 		if (ret) {
14484 			printf("%s: failed to setup reo_dst_ring\n", __func__);
14485 			qwx_dp_pdev_reo_cleanup(sc);
14486 			return ret;
14487 		}
14488 	}
14489 
14490 	return 0;
14491 }
14492 
14493 void
qwx_dp_rx_pdev_srng_free(struct qwx_softc * sc,int mac_id)14494 qwx_dp_rx_pdev_srng_free(struct qwx_softc *sc, int mac_id)
14495 {
14496 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14497 	int i;
14498 
14499 	qwx_dp_srng_cleanup(sc, &dp->rx_refill_buf_ring.refill_buf_ring);
14500 
14501 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14502 		if (sc->hw_params.rx_mac_buf_ring)
14503 			qwx_dp_srng_cleanup(sc, &dp->rx_mac_buf_ring[i]);
14504 
14505 		qwx_dp_srng_cleanup(sc, &dp->rxdma_err_dst_ring[i]);
14506 		qwx_dp_srng_cleanup(sc,
14507 		    &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
14508 	}
14509 
14510 	qwx_dp_srng_cleanup(sc, &dp->rxdma_mon_buf_ring.refill_buf_ring);
14511 }
14512 
14513 int
qwx_dp_rx_pdev_srng_alloc(struct qwx_softc * sc)14514 qwx_dp_rx_pdev_srng_alloc(struct qwx_softc *sc)
14515 {
14516 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14517 #if 0
14518 	struct dp_srng *srng = NULL;
14519 #endif
14520 	int i;
14521 	int ret;
14522 
14523 	ret = qwx_dp_srng_setup(sc, &dp->rx_refill_buf_ring.refill_buf_ring,
14524 	    HAL_RXDMA_BUF, 0, dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
14525 	if (ret) {
14526 		printf("%s: failed to setup rx_refill_buf_ring\n",
14527 		    sc->sc_dev.dv_xname);
14528 		return ret;
14529 	}
14530 
14531 	if (sc->hw_params.rx_mac_buf_ring) {
14532 		for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14533 			ret = qwx_dp_srng_setup(sc, &dp->rx_mac_buf_ring[i],
14534 			    HAL_RXDMA_BUF, 1, dp->mac_id + i, 1024);
14535 			if (ret) {
14536 				printf("%s: failed to setup "
14537 				    "rx_mac_buf_ring %d\n",
14538 				    sc->sc_dev.dv_xname, i);
14539 				return ret;
14540 			}
14541 		}
14542 	}
14543 
14544 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14545 		ret = qwx_dp_srng_setup(sc, &dp->rxdma_err_dst_ring[i],
14546 		    HAL_RXDMA_DST, 0, dp->mac_id + i,
14547 		    DP_RXDMA_ERR_DST_RING_SIZE);
14548 		if (ret) {
14549 			printf("%s: failed to setup rxdma_err_dst_ring %d\n",
14550 			   sc->sc_dev.dv_xname, i);
14551 			return ret;
14552 		}
14553 	}
14554 #if 0
14555 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14556 		srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
14557 		ret = qwx_dp_srng_setup(sc, srng, HAL_RXDMA_MONITOR_STATUS, 0,
14558 		    dp->mac_id + i, DP_RXDMA_MON_STATUS_RING_SIZE);
14559 		if (ret) {
14560 			printf("%s: failed to setup "
14561 			    "rx_mon_status_refill_ring %d\n",
14562 			    sc->sc_dev.dv_xname, i);
14563 			return ret;
14564 		}
14565 	}
14566 #endif
14567 	/* if rxdma1_enable is false, then it doesn't need
14568 	 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
14569 	 * and rxdma_mon_desc_ring.
14570 	 * init reap timer for QCA6390.
14571 	 */
14572 	if (!sc->hw_params.rxdma1_enable) {
14573 		timeout_set(&sc->mon_reap_timer, qwx_dp_service_mon_ring, sc);
14574 		return 0;
14575 	}
14576 #if 0
14577 	ret = ath11k_dp_srng_setup(ar->ab,
14578 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
14579 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
14580 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
14581 	if (ret) {
14582 		ath11k_warn(ar->ab,
14583 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
14584 		return ret;
14585 	}
14586 
14587 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
14588 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
14589 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
14590 	if (ret) {
14591 		ath11k_warn(ar->ab,
14592 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
14593 		return ret;
14594 	}
14595 
14596 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
14597 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
14598 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
14599 	if (ret) {
14600 		ath11k_warn(ar->ab,
14601 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
14602 		return ret;
14603 	}
14604 #endif
14605 	return 0;
14606 }
14607 
14608 void
qwx_dp_rxdma_buf_ring_free(struct qwx_softc * sc,struct dp_rxdma_ring * rx_ring)14609 qwx_dp_rxdma_buf_ring_free(struct qwx_softc *sc, struct dp_rxdma_ring *rx_ring)
14610 {
14611 	int i;
14612 
14613 	for (i = 0; i < rx_ring->bufs_max; i++) {
14614 		struct qwx_rx_data *rx_data = &rx_ring->rx_data[i];
14615 
14616 		if (rx_data->map == NULL)
14617 			continue;
14618 
14619 		if (rx_data->m) {
14620 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
14621 			m_free(rx_data->m);
14622 			rx_data->m = NULL;
14623 		}
14624 
14625 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
14626 		rx_data->map = NULL;
14627 	}
14628 
14629 	free(rx_ring->rx_data, M_DEVBUF,
14630 	    sizeof(rx_ring->rx_data[0]) * rx_ring->bufs_max);
14631 	rx_ring->rx_data = NULL;
14632 	rx_ring->bufs_max = 0;
14633 	memset(rx_ring->freemap, 0xff, sizeof(rx_ring->freemap));
14634 }
14635 
14636 void
qwx_dp_rxdma_pdev_buf_free(struct qwx_softc * sc,int mac_id)14637 qwx_dp_rxdma_pdev_buf_free(struct qwx_softc *sc, int mac_id)
14638 {
14639 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14640 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
14641 	int i;
14642 
14643 	qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
14644 
14645 	rx_ring = &dp->rxdma_mon_buf_ring;
14646 	qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
14647 
14648 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14649 		rx_ring = &dp->rx_mon_status_refill_ring[i];
14650 		qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
14651 	}
14652 }
14653 
14654 void
qwx_hal_rx_buf_addr_info_set(void * desc,uint64_t paddr,uint32_t cookie,uint8_t manager)14655 qwx_hal_rx_buf_addr_info_set(void *desc, uint64_t paddr, uint32_t cookie,
14656     uint8_t manager)
14657 {
14658 	struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
14659 	uint32_t paddr_lo, paddr_hi;
14660 
14661 	paddr_lo = paddr & 0xffffffff;
14662 	paddr_hi = paddr >> 32;
14663 	binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
14664 	binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
14665 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
14666 	    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
14667 }
14668 
14669 void
qwx_hal_rx_buf_addr_info_get(void * desc,uint64_t * paddr,uint32_t * cookie,uint8_t * rbm)14670 qwx_hal_rx_buf_addr_info_get(void *desc, uint64_t *paddr, uint32_t *cookie,
14671     uint8_t *rbm)
14672 {
14673 	struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
14674 
14675 	*paddr = (((uint64_t)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
14676 	    binfo->info1)) << 32) |
14677 	    FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
14678 	*cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
14679 	*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
14680 }
14681 
14682 int
qwx_next_free_rxbuf_idx(struct dp_rxdma_ring * rx_ring)14683 qwx_next_free_rxbuf_idx(struct dp_rxdma_ring *rx_ring)
14684 {
14685 	int i, idx;
14686 
14687 	for (i = 0; i < nitems(rx_ring->freemap); i++) {
14688 		idx = ffs(rx_ring->freemap[i]);
14689 		if (idx > 0)
14690 			return ((idx - 1) + (i * 8));
14691 	}
14692 
14693 	return -1;
14694 }
14695 
14696 int
qwx_dp_rxbufs_replenish(struct qwx_softc * sc,int mac_id,struct dp_rxdma_ring * rx_ring,int req_entries,enum hal_rx_buf_return_buf_manager mgr)14697 qwx_dp_rxbufs_replenish(struct qwx_softc *sc, int mac_id,
14698     struct dp_rxdma_ring *rx_ring, int req_entries,
14699     enum hal_rx_buf_return_buf_manager mgr)
14700 {
14701 	struct hal_srng *srng;
14702 	uint32_t *desc;
14703 	struct mbuf *m;
14704 	int num_free;
14705 	int num_remain;
14706 	int ret, idx;
14707 	uint32_t cookie;
14708 	uint64_t paddr;
14709 	struct qwx_rx_data *rx_data;
14710 
14711 	req_entries = MIN(req_entries, rx_ring->bufs_max);
14712 
14713 	srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
14714 #ifdef notyet
14715 	spin_lock_bh(&srng->lock);
14716 #endif
14717 	qwx_hal_srng_access_begin(sc, srng);
14718 
14719 	num_free = qwx_hal_srng_src_num_free(sc, srng, 1);
14720 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
14721 		req_entries = num_free;
14722 
14723 	req_entries = MIN(num_free, req_entries);
14724 	num_remain = req_entries;
14725 
14726 	while (num_remain > 0) {
14727 		const size_t size = DP_RX_BUFFER_SIZE;
14728 
14729 		m = m_gethdr(M_DONTWAIT, MT_DATA);
14730 		if (m == NULL)
14731 			goto fail_free_mbuf;
14732 
14733 		if (size <= MCLBYTES)
14734 			MCLGET(m, M_DONTWAIT);
14735 		else
14736 			MCLGETL(m, M_DONTWAIT, size);
14737 		if ((m->m_flags & M_EXT) == 0)
14738 			goto fail_free_mbuf;
14739 
14740 		m->m_len = m->m_pkthdr.len = size;
14741 
14742 		idx = qwx_next_free_rxbuf_idx(rx_ring);
14743 		if (idx == -1)
14744 			goto fail_free_mbuf;
14745 
14746 		rx_data = &rx_ring->rx_data[idx];
14747 		if (rx_data->map == NULL) {
14748 			ret = bus_dmamap_create(sc->sc_dmat, size, 1,
14749 			    size, 0, BUS_DMA_NOWAIT, &rx_data->map);
14750 			if (ret)
14751 				goto fail_free_mbuf;
14752 		}
14753 
14754 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
14755 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
14756 		if (ret) {
14757 			printf("%s: can't map mbuf (error %d)\n",
14758 			    sc->sc_dev.dv_xname, ret);
14759 			goto fail_free_mbuf;
14760 		}
14761 
14762 		desc = qwx_hal_srng_src_get_next_entry(sc, srng);
14763 		if (!desc)
14764 			goto fail_dma_unmap;
14765 
14766 		rx_data->m = m;
14767 		m = NULL;
14768 
14769 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
14770 		    FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, idx);
14771 
14772 		clrbit(rx_ring->freemap, idx);
14773 		num_remain--;
14774 
14775 		paddr = rx_data->map->dm_segs[0].ds_addr;
14776 		qwx_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
14777 	}
14778 
14779 	qwx_hal_srng_access_end(sc, srng);
14780 #ifdef notyet
14781 	spin_unlock_bh(&srng->lock);
14782 #endif
14783 	return 0;
14784 
14785 fail_dma_unmap:
14786 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
14787 fail_free_mbuf:
14788 	m_free(m);
14789 
14790 	qwx_hal_srng_access_end(sc, srng);
14791 #ifdef notyet
14792 	spin_unlock_bh(&srng->lock);
14793 #endif
14794 	return ENOBUFS;
14795 }
14796 
14797 int
qwx_dp_rxdma_ring_buf_setup(struct qwx_softc * sc,struct dp_rxdma_ring * rx_ring,uint32_t ringtype)14798 qwx_dp_rxdma_ring_buf_setup(struct qwx_softc *sc,
14799     struct dp_rxdma_ring *rx_ring, uint32_t ringtype)
14800 {
14801 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14802 	int num_entries;
14803 
14804 	num_entries = rx_ring->refill_buf_ring.size /
14805 	    qwx_hal_srng_get_entrysize(sc, ringtype);
14806 
14807 	KASSERT(rx_ring->rx_data == NULL);
14808 	rx_ring->rx_data = mallocarray(num_entries, sizeof(rx_ring->rx_data[0]),
14809 	    M_DEVBUF, M_NOWAIT | M_ZERO);
14810 	if (rx_ring->rx_data == NULL)
14811 		return ENOMEM;
14812 
14813 	rx_ring->bufs_max = num_entries;
14814 	memset(rx_ring->freemap, 0xff, sizeof(rx_ring->freemap));
14815 
14816 	return qwx_dp_rxbufs_replenish(sc, dp->mac_id, rx_ring, num_entries,
14817 	    sc->hw_params.hal_params->rx_buf_rbm);
14818 }
14819 
14820 int
qwx_dp_rxdma_pdev_buf_setup(struct qwx_softc * sc)14821 qwx_dp_rxdma_pdev_buf_setup(struct qwx_softc *sc)
14822 {
14823 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14824 	struct dp_rxdma_ring *rx_ring;
14825 	int ret;
14826 #if 0
14827 	int i;
14828 #endif
14829 
14830 	rx_ring = &dp->rx_refill_buf_ring;
14831 	ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring, HAL_RXDMA_BUF);
14832 	if (ret)
14833 		return ret;
14834 
14835 	if (sc->hw_params.rxdma1_enable) {
14836 		rx_ring = &dp->rxdma_mon_buf_ring;
14837 		ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring,
14838 		    HAL_RXDMA_MONITOR_BUF);
14839 		if (ret)
14840 			return ret;
14841 	}
14842 #if 0
14843 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
14844 		rx_ring = &dp->rx_mon_status_refill_ring[i];
14845 		ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring,
14846 		    HAL_RXDMA_MONITOR_STATUS);
14847 		if (ret)
14848 			return ret;
14849 	}
14850 #endif
14851 	return 0;
14852 }
14853 
14854 void
qwx_dp_rx_pdev_free(struct qwx_softc * sc,int mac_id)14855 qwx_dp_rx_pdev_free(struct qwx_softc *sc, int mac_id)
14856 {
14857 	qwx_dp_rx_pdev_srng_free(sc, mac_id);
14858 	qwx_dp_rxdma_pdev_buf_free(sc, mac_id);
14859 }
14860 
14861 bus_addr_t
qwx_hal_srng_get_hp_addr(struct qwx_softc * sc,struct hal_srng * srng)14862 qwx_hal_srng_get_hp_addr(struct qwx_softc *sc, struct hal_srng *srng)
14863 {
14864 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
14865 		return 0;
14866 
14867 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
14868 		return sc->hal.wrp.paddr +
14869 		    ((unsigned long)srng->u.src_ring.hp_addr -
14870 		    (unsigned long)sc->hal.wrp.vaddr);
14871 	} else {
14872 		return sc->hal.rdp.paddr +
14873 		    ((unsigned long)srng->u.dst_ring.hp_addr -
14874 		    (unsigned long)sc->hal.rdp.vaddr);
14875 	}
14876 }
14877 
14878 bus_addr_t
qwx_hal_srng_get_tp_addr(struct qwx_softc * sc,struct hal_srng * srng)14879 qwx_hal_srng_get_tp_addr(struct qwx_softc *sc, struct hal_srng *srng)
14880 {
14881 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
14882 		return 0;
14883 
14884 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
14885 		return sc->hal.rdp.paddr +
14886 		    ((unsigned long)srng->u.src_ring.tp_addr -
14887 		    (unsigned long)sc->hal.rdp.vaddr);
14888 	} else {
14889 		return sc->hal.wrp.paddr +
14890 		    ((unsigned long)srng->u.dst_ring.tp_addr -
14891 		    (unsigned long)sc->hal.wrp.vaddr);
14892 	}
14893 }
14894 
14895 int
qwx_dp_tx_get_ring_id_type(struct qwx_softc * sc,int mac_id,uint32_t ring_id,enum hal_ring_type ring_type,enum htt_srng_ring_type * htt_ring_type,enum htt_srng_ring_id * htt_ring_id)14896 qwx_dp_tx_get_ring_id_type(struct qwx_softc *sc, int mac_id, uint32_t ring_id,
14897     enum hal_ring_type ring_type, enum htt_srng_ring_type *htt_ring_type,
14898     enum htt_srng_ring_id *htt_ring_id)
14899 {
14900 	int lmac_ring_id_offset = 0;
14901 
14902 	switch (ring_type) {
14903 	case HAL_RXDMA_BUF:
14904 		lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
14905 
14906 		/* for QCA6390, host fills rx buffer to fw and fw fills to
14907 		 * rxbuf ring for each rxdma
14908 		 */
14909 		if (!sc->hw_params.rx_mac_buf_ring) {
14910 			if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
14911 			    lmac_ring_id_offset) ||
14912 			    ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
14913 			    lmac_ring_id_offset)))
14914 				return EINVAL;
14915 			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
14916 			*htt_ring_type = HTT_SW_TO_HW_RING;
14917 		} else {
14918 			if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
14919 				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
14920 				*htt_ring_type = HTT_SW_TO_SW_RING;
14921 			} else {
14922 				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
14923 				*htt_ring_type = HTT_SW_TO_HW_RING;
14924 			}
14925 		}
14926 		break;
14927 	case HAL_RXDMA_DST:
14928 		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
14929 		*htt_ring_type = HTT_HW_TO_SW_RING;
14930 		break;
14931 	case HAL_RXDMA_MONITOR_BUF:
14932 		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
14933 		*htt_ring_type = HTT_SW_TO_HW_RING;
14934 		break;
14935 	case HAL_RXDMA_MONITOR_STATUS:
14936 		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
14937 		*htt_ring_type = HTT_SW_TO_HW_RING;
14938 		break;
14939 	case HAL_RXDMA_MONITOR_DST:
14940 		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
14941 		*htt_ring_type = HTT_HW_TO_SW_RING;
14942 		break;
14943 	case HAL_RXDMA_MONITOR_DESC:
14944 		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
14945 		*htt_ring_type = HTT_SW_TO_HW_RING;
14946 		break;
14947 	default:
14948 		printf("%s: Unsupported ring type in DP :%d\n",
14949 		    sc->sc_dev.dv_xname, ring_type);
14950 		return EINVAL;
14951 	}
14952 
14953 	return 0;
14954 }
14955 
14956 int
qwx_dp_tx_htt_srng_setup(struct qwx_softc * sc,uint32_t ring_id,int mac_id,enum hal_ring_type ring_type)14957 qwx_dp_tx_htt_srng_setup(struct qwx_softc *sc, uint32_t ring_id, int mac_id,
14958     enum hal_ring_type ring_type)
14959 {
14960 	struct htt_srng_setup_cmd *cmd;
14961 	struct hal_srng *srng = &sc->hal.srng_list[ring_id];
14962 	struct hal_srng_params params;
14963 	struct mbuf *m;
14964 	uint32_t ring_entry_sz;
14965 	uint64_t hp_addr, tp_addr;
14966 	enum htt_srng_ring_type htt_ring_type;
14967 	enum htt_srng_ring_id htt_ring_id;
14968 	int ret;
14969 
14970 	m = qwx_htc_alloc_mbuf(sizeof(*cmd));
14971 	if (!m)
14972 		return ENOMEM;
14973 
14974 	memset(&params, 0, sizeof(params));
14975 	qwx_hal_srng_get_params(sc, srng, &params);
14976 
14977 	hp_addr = qwx_hal_srng_get_hp_addr(sc, srng);
14978 	tp_addr = qwx_hal_srng_get_tp_addr(sc, srng);
14979 
14980 	ret = qwx_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
14981 	    ring_type, &htt_ring_type, &htt_ring_id);
14982 	if (ret)
14983 		goto err_free;
14984 
14985 	cmd = (struct htt_srng_setup_cmd *)(mtod(m, uint8_t *) +
14986 	    sizeof(struct ath11k_htc_hdr));
14987 	cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
14988 	    HTT_H2T_MSG_TYPE_SRING_SETUP);
14989 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
14990 	    htt_ring_type == HTT_HW_TO_SW_RING)
14991 		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
14992 		    DP_SW2HW_MACID(mac_id));
14993 	else
14994 		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
14995 		    mac_id);
14996 	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
14997 	    htt_ring_type);
14998 	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
14999 
15000 	cmd->ring_base_addr_lo = params.ring_base_paddr & HAL_ADDR_LSB_REG_MASK;
15001 
15002 	cmd->ring_base_addr_hi = (uint64_t)params.ring_base_paddr >>
15003 	    HAL_ADDR_MSB_REG_SHIFT;
15004 
15005 	ring_entry_sz = qwx_hal_srng_get_entrysize(sc, ring_type);
15006 
15007 	ring_entry_sz >>= 2;
15008 	cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
15009 	    ring_entry_sz);
15010 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
15011 	    params.num_entries * ring_entry_sz);
15012 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
15013 	    !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
15014 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
15015 	    !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
15016 	cmd->info1 |= FIELD_PREP(
15017 	    HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
15018 	    !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
15019 	if (htt_ring_type == HTT_SW_TO_HW_RING)
15020 		cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
15021 
15022 	cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
15023 	cmd->ring_head_off32_remote_addr_hi = hp_addr >> HAL_ADDR_MSB_REG_SHIFT;
15024 
15025 	cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
15026 	cmd->ring_tail_off32_remote_addr_hi = tp_addr >> HAL_ADDR_MSB_REG_SHIFT;
15027 
15028 	cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
15029 	cmd->ring_msi_addr_hi = 0;
15030 	cmd->msi_data = params.msi_data;
15031 
15032 	cmd->intr_info = FIELD_PREP(
15033 	    HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
15034 	    params.intr_batch_cntr_thres_entries * ring_entry_sz);
15035 	cmd->intr_info |= FIELD_PREP(
15036 	    HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
15037 	    params.intr_timer_thres_us >> 3);
15038 
15039 	cmd->info2 = 0;
15040 	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
15041 		cmd->info2 = FIELD_PREP(
15042 		    HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
15043 		    params.low_threshold);
15044 	}
15045 
15046 	DNPRINTF(QWX_D_HTT, "%s: htt srng setup msi_addr_lo 0x%x "
15047 	    "msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d "
15048 	    "intr_info 0x%x flags 0x%x\n", __func__, cmd->ring_msi_addr_lo,
15049 	    cmd->ring_msi_addr_hi, cmd->msi_data, ring_id, ring_type,
15050 	    cmd->intr_info, cmd->info2);
15051 
15052 	ret = qwx_htc_send(&sc->htc, sc->dp.eid, m);
15053 	if (ret)
15054 		goto err_free;
15055 
15056 	return 0;
15057 
15058 err_free:
15059 	m_freem(m);
15060 
15061 	return ret;
15062 }
15063 
15064 int
qwx_dp_tx_htt_h2t_ppdu_stats_req(struct qwx_softc * sc,uint32_t mask,uint8_t pdev_id)15065 qwx_dp_tx_htt_h2t_ppdu_stats_req(struct qwx_softc *sc, uint32_t mask,
15066     uint8_t pdev_id)
15067 {
15068 	struct qwx_dp *dp = &sc->dp;
15069 	struct mbuf *m;
15070 	struct htt_ppdu_stats_cfg_cmd *cmd;
15071 	int len = sizeof(*cmd);
15072 	uint8_t pdev_mask;
15073 	int ret;
15074 	int i;
15075 
15076 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
15077 		m = qwx_htc_alloc_mbuf(len);
15078 		if (!m)
15079 			return ENOMEM;
15080 
15081 		cmd = (struct htt_ppdu_stats_cfg_cmd *)(mtod(m, uint8_t *) +
15082 		    sizeof(struct ath11k_htc_hdr));
15083 		cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
15084 				      HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
15085 
15086 		pdev_mask = 1 << (pdev_id + i);
15087 		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
15088 		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK,
15089 		    mask);
15090 
15091 		ret = qwx_htc_send(&sc->htc, dp->eid, m);
15092 		if (ret) {
15093 			m_freem(m);
15094 			return ret;
15095 		}
15096 	}
15097 
15098 	return 0;
15099 }
15100 
15101 int
qwx_dp_tx_htt_rx_filter_setup(struct qwx_softc * sc,uint32_t ring_id,int mac_id,enum hal_ring_type ring_type,size_t rx_buf_size,struct htt_rx_ring_tlv_filter * tlv_filter)15102 qwx_dp_tx_htt_rx_filter_setup(struct qwx_softc *sc, uint32_t ring_id,
15103     int mac_id, enum hal_ring_type ring_type, size_t rx_buf_size,
15104     struct htt_rx_ring_tlv_filter *tlv_filter)
15105 {
15106 	struct htt_rx_ring_selection_cfg_cmd *cmd;
15107 	struct hal_srng *srng = &sc->hal.srng_list[ring_id];
15108 	struct hal_srng_params params;
15109 	struct mbuf *m;
15110 	int len = sizeof(*cmd);
15111 	enum htt_srng_ring_type htt_ring_type;
15112 	enum htt_srng_ring_id htt_ring_id;
15113 	int ret;
15114 
15115 	m = qwx_htc_alloc_mbuf(len);
15116 	if (!m)
15117 		return ENOMEM;
15118 
15119 	memset(&params, 0, sizeof(params));
15120 	qwx_hal_srng_get_params(sc, srng, &params);
15121 
15122 	ret = qwx_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
15123 	    ring_type, &htt_ring_type, &htt_ring_id);
15124 	if (ret)
15125 		goto err_free;
15126 
15127 	cmd = (struct htt_rx_ring_selection_cfg_cmd *)(mtod(m, uint8_t *) +
15128 	    sizeof(struct ath11k_htc_hdr));
15129 	cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
15130 	    HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
15131 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
15132 	    htt_ring_type == HTT_HW_TO_SW_RING) {
15133 		cmd->info0 |=
15134 		    FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
15135 		    DP_SW2HW_MACID(mac_id));
15136 	} else {
15137 		cmd->info0 |=
15138 		    FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
15139 		    mac_id);
15140 	}
15141 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
15142 	    htt_ring_id);
15143 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
15144 	    !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
15145 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
15146 	    !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
15147 
15148 	cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
15149 	    rx_buf_size);
15150 	cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
15151 	cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
15152 	cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
15153 	cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
15154 	cmd->rx_filter_tlv = tlv_filter->rx_filter;
15155 
15156 	ret = qwx_htc_send(&sc->htc, sc->dp.eid, m);
15157 	if (ret)
15158 		goto err_free;
15159 
15160 	return 0;
15161 
15162 err_free:
15163 	m_freem(m);
15164 
15165 	return ret;
15166 }
15167 
15168 int
qwx_dp_rx_pdev_alloc(struct qwx_softc * sc,int mac_id)15169 qwx_dp_rx_pdev_alloc(struct qwx_softc *sc, int mac_id)
15170 {
15171 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
15172 	uint32_t ring_id;
15173 	int i;
15174 	int ret;
15175 
15176 	ret = qwx_dp_rx_pdev_srng_alloc(sc);
15177 	if (ret) {
15178 		printf("%s: failed to setup rx srngs: %d\n",
15179 		    sc->sc_dev.dv_xname, ret);
15180 		return ret;
15181 	}
15182 
15183 	ret = qwx_dp_rxdma_pdev_buf_setup(sc);
15184 	if (ret) {
15185 		printf("%s: failed to setup rxdma ring: %d\n",
15186 		    sc->sc_dev.dv_xname, ret);
15187 		return ret;
15188 	}
15189 
15190 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
15191 	ret = qwx_dp_tx_htt_srng_setup(sc, ring_id, mac_id, HAL_RXDMA_BUF);
15192 	if (ret) {
15193 		printf("%s: failed to configure rx_refill_buf_ring: %d\n",
15194 		    sc->sc_dev.dv_xname, ret);
15195 		return ret;
15196 	}
15197 
15198 	if (sc->hw_params.rx_mac_buf_ring) {
15199 		for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
15200 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
15201 			ret = qwx_dp_tx_htt_srng_setup(sc, ring_id,
15202 			    mac_id + i, HAL_RXDMA_BUF);
15203 			if (ret) {
15204 				printf("%s: failed to configure "
15205 				    "rx_mac_buf_ring%d: %d\n",
15206 				    sc->sc_dev.dv_xname, i, ret);
15207 				return ret;
15208 			}
15209 		}
15210 	}
15211 
15212 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
15213 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
15214 		ret = qwx_dp_tx_htt_srng_setup(sc, ring_id, mac_id + i,
15215 		    HAL_RXDMA_DST);
15216 		if (ret) {
15217 			printf("%s: failed to configure "
15218 			    "rxdma_err_dest_ring%d %d\n",
15219 			    sc->sc_dev.dv_xname, i, ret);
15220 			return ret;
15221 		}
15222 	}
15223 
15224 	if (!sc->hw_params.rxdma1_enable)
15225 		goto config_refill_ring;
15226 #if 0
15227 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
15228 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
15229 					  mac_id, HAL_RXDMA_MONITOR_BUF);
15230 	if (ret) {
15231 		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
15232 			    ret);
15233 		return ret;
15234 	}
15235 	ret = ath11k_dp_tx_htt_srng_setup(ab,
15236 					  dp->rxdma_mon_dst_ring.ring_id,
15237 					  mac_id, HAL_RXDMA_MONITOR_DST);
15238 	if (ret) {
15239 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
15240 			    ret);
15241 		return ret;
15242 	}
15243 	ret = ath11k_dp_tx_htt_srng_setup(ab,
15244 					  dp->rxdma_mon_desc_ring.ring_id,
15245 					  mac_id, HAL_RXDMA_MONITOR_DESC);
15246 	if (ret) {
15247 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
15248 			    ret);
15249 		return ret;
15250 	}
15251 #endif
15252 config_refill_ring:
15253 #if 0
15254 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
15255 		ret = qwx_dp_tx_htt_srng_setup(sc,
15256 		    dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id,
15257 		    mac_id + i, HAL_RXDMA_MONITOR_STATUS);
15258 		if (ret) {
15259 			printf("%s: failed to configure "
15260 			    "mon_status_refill_ring%d %d\n",
15261 			    sc->sc_dev.dv_xname, i, ret);
15262 			return ret;
15263 		}
15264 	}
15265 #endif
15266 	return 0;
15267 }
15268 
15269 void
qwx_dp_pdev_free(struct qwx_softc * sc)15270 qwx_dp_pdev_free(struct qwx_softc *sc)
15271 {
15272 	int i;
15273 
15274 	timeout_del(&sc->mon_reap_timer);
15275 
15276 	for (i = 0; i < sc->num_radios; i++)
15277 		qwx_dp_rx_pdev_free(sc, i);
15278 }
15279 
15280 int
qwx_dp_pdev_alloc(struct qwx_softc * sc)15281 qwx_dp_pdev_alloc(struct qwx_softc *sc)
15282 {
15283 	int ret;
15284 	int i;
15285 
15286 	for (i = 0; i < sc->num_radios; i++) {
15287 		ret = qwx_dp_rx_pdev_alloc(sc, i);
15288 		if (ret) {
15289 			printf("%s: failed to allocate pdev rx "
15290 			    "for pdev_id %d\n", sc->sc_dev.dv_xname, i);
15291 			goto err;
15292 		}
15293 	}
15294 
15295 	return 0;
15296 
15297 err:
15298 	qwx_dp_pdev_free(sc);
15299 
15300 	return ret;
15301 }
15302 
15303 int
qwx_dp_tx_htt_h2t_ver_req_msg(struct qwx_softc * sc)15304 qwx_dp_tx_htt_h2t_ver_req_msg(struct qwx_softc *sc)
15305 {
15306 	struct qwx_dp *dp = &sc->dp;
15307 	struct mbuf *m;
15308 	struct htt_ver_req_cmd *cmd;
15309 	int len = sizeof(*cmd);
15310 	int ret;
15311 
15312 	dp->htt_tgt_version_received = 0;
15313 
15314 	m = qwx_htc_alloc_mbuf(len);
15315 	if (!m)
15316 		return ENOMEM;
15317 
15318 	cmd = (struct htt_ver_req_cmd *)(mtod(m, uint8_t *) +
15319 	    sizeof(struct ath11k_htc_hdr));
15320 	cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
15321 	    HTT_H2T_MSG_TYPE_VERSION_REQ);
15322 
15323 	ret = qwx_htc_send(&sc->htc, dp->eid, m);
15324 	if (ret) {
15325 		m_freem(m);
15326 		return ret;
15327 	}
15328 
15329 	while (!dp->htt_tgt_version_received) {
15330 		ret = tsleep_nsec(&dp->htt_tgt_version_received, 0,
15331 		    "qwxtgtver", SEC_TO_NSEC(3));
15332 		if (ret)
15333 			return ETIMEDOUT;
15334 	}
15335 
15336 	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
15337 		printf("%s: unsupported htt major version %d "
15338 		    "supported version is %d\n", __func__,
15339 		    dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
15340 		return ENOTSUP;
15341 	}
15342 
15343 	return 0;
15344 }
15345 
15346 void
qwx_dp_update_vdev_search(struct qwx_softc * sc,struct qwx_vif * arvif)15347 qwx_dp_update_vdev_search(struct qwx_softc *sc, struct qwx_vif *arvif)
15348 {
15349 	 /* When v2_map_support is true:for STA mode, enable address
15350 	  * search index, tcl uses ast_hash value in the descriptor.
15351 	  * When v2_map_support is false: for STA mode, don't enable
15352 	  * address search index.
15353 	  */
15354 	switch (arvif->vdev_type) {
15355 	case WMI_VDEV_TYPE_STA:
15356 		if (sc->hw_params.htt_peer_map_v2) {
15357 			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
15358 			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
15359 		} else {
15360 			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
15361 			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
15362 		}
15363 		break;
15364 	case WMI_VDEV_TYPE_AP:
15365 	case WMI_VDEV_TYPE_IBSS:
15366 		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
15367 		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
15368 		break;
15369 	case WMI_VDEV_TYPE_MONITOR:
15370 	default:
15371 		return;
15372 	}
15373 }
15374 
15375 void
qwx_dp_vdev_tx_attach(struct qwx_softc * sc,struct qwx_pdev * pdev,struct qwx_vif * arvif)15376 qwx_dp_vdev_tx_attach(struct qwx_softc *sc, struct qwx_pdev *pdev,
15377     struct qwx_vif *arvif)
15378 {
15379 	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
15380 	    FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, arvif->vdev_id) |
15381 	    FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, pdev->pdev_id);
15382 
15383 	/* set HTT extension valid bit to 0 by default */
15384 	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
15385 
15386 	qwx_dp_update_vdev_search(sc, arvif);
15387 }
15388 
15389 void
qwx_dp_tx_status_parse(struct qwx_softc * sc,struct hal_wbm_release_ring * desc,struct hal_tx_status * ts)15390 qwx_dp_tx_status_parse(struct qwx_softc *sc, struct hal_wbm_release_ring *desc,
15391     struct hal_tx_status *ts)
15392 {
15393 	ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
15394 	    desc->info0);
15395 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
15396 	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
15397 		return;
15398 
15399 	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
15400 		return;
15401 
15402 	ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
15403 	    desc->info0);
15404 	ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
15405 	    desc->info1);
15406 	ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
15407 	    desc->info1);
15408 	ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
15409 	    desc->info2);
15410 	if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
15411 	    ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
15412 	ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
15413 	ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
15414 	if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
15415 		ts->rate_stats = desc->rate_stats.info0;
15416 	else
15417 		ts->rate_stats = 0;
15418 }
15419 
15420 void
qwx_dp_tx_free_txbuf(struct qwx_softc * sc,int msdu_id,struct dp_tx_ring * tx_ring)15421 qwx_dp_tx_free_txbuf(struct qwx_softc *sc, int msdu_id,
15422     struct dp_tx_ring *tx_ring)
15423 {
15424 	struct qwx_tx_data *tx_data;
15425 
15426 	if (msdu_id >= sc->hw_params.tx_ring_size)
15427 		return;
15428 
15429 	tx_data = &tx_ring->data[msdu_id];
15430 
15431 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
15432 	m_freem(tx_data->m);
15433 	tx_data->m = NULL;
15434 
15435 	if (tx_ring->queued > 0)
15436 		tx_ring->queued--;
15437 }
15438 
15439 void
qwx_dp_tx_htt_tx_complete_buf(struct qwx_softc * sc,struct dp_tx_ring * tx_ring,struct qwx_dp_htt_wbm_tx_status * ts)15440 qwx_dp_tx_htt_tx_complete_buf(struct qwx_softc *sc, struct dp_tx_ring *tx_ring,
15441     struct qwx_dp_htt_wbm_tx_status *ts)
15442 {
15443 	/* Not using Tx status info for now. Just free the buffer. */
15444 	qwx_dp_tx_free_txbuf(sc, ts->msdu_id, tx_ring);
15445 }
15446 
15447 void
qwx_dp_tx_process_htt_tx_complete(struct qwx_softc * sc,void * desc,uint8_t mac_id,uint32_t msdu_id,struct dp_tx_ring * tx_ring)15448 qwx_dp_tx_process_htt_tx_complete(struct qwx_softc *sc, void *desc,
15449     uint8_t mac_id, uint32_t msdu_id, struct dp_tx_ring *tx_ring)
15450 {
15451 	struct htt_tx_wbm_completion *status_desc;
15452 	struct qwx_dp_htt_wbm_tx_status ts = {0};
15453 	enum hal_wbm_htt_tx_comp_status wbm_status;
15454 
15455 	status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
15456 
15457 	wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
15458 	    status_desc->info0);
15459 
15460 	switch (wbm_status) {
15461 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
15462 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
15463 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
15464 		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
15465 		ts.msdu_id = msdu_id;
15466 		ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
15467 		    status_desc->info1);
15468 
15469 		if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
15470 			ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
15471 			    status_desc->info2);
15472 		else
15473 			ts.peer_id = HTT_INVALID_PEER_ID;
15474 
15475 		qwx_dp_tx_htt_tx_complete_buf(sc, tx_ring, &ts);
15476 		break;
15477 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
15478 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
15479 		qwx_dp_tx_free_txbuf(sc, msdu_id, tx_ring);
15480 		break;
15481 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
15482 		/* This event is to be handled only when the driver decides to
15483 		 * use WDS offload functionality.
15484 		 */
15485 		break;
15486 	default:
15487 		printf("%s: Unknown htt tx status %d\n",
15488 		    sc->sc_dev.dv_xname, wbm_status);
15489 		break;
15490 	}
15491 }
15492 
15493 int
qwx_mac_hw_ratecode_to_legacy_rate(struct ieee80211_node * ni,uint8_t hw_rc,uint8_t preamble,uint8_t * rateidx,uint16_t * rate)15494 qwx_mac_hw_ratecode_to_legacy_rate(struct ieee80211_node *ni, uint8_t hw_rc,
15495     uint8_t preamble, uint8_t *rateidx, uint16_t *rate)
15496 {
15497 	struct ieee80211_rateset *rs = &ni->ni_rates;
15498 	int i;
15499 
15500 	if (preamble == WMI_RATE_PREAMBLE_CCK) {
15501 		hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
15502 		switch (hw_rc) {
15503 			case ATH11K_HW_RATE_CCK_LP_1M:
15504 				*rate = 2;
15505 				break;
15506 			case ATH11K_HW_RATE_CCK_LP_2M:
15507 			case ATH11K_HW_RATE_CCK_SP_2M:
15508 				*rate = 4;
15509 				break;
15510 			case ATH11K_HW_RATE_CCK_LP_5_5M:
15511 			case ATH11K_HW_RATE_CCK_SP_5_5M:
15512 				*rate = 11;
15513 				break;
15514 			case ATH11K_HW_RATE_CCK_LP_11M:
15515 			case ATH11K_HW_RATE_CCK_SP_11M:
15516 				*rate = 22;
15517 				break;
15518 			default:
15519 				return EINVAL;
15520 		}
15521 	} else {
15522 		switch (hw_rc) {
15523 			case ATH11K_HW_RATE_OFDM_6M:
15524 				*rate = 12;
15525 				break;
15526 			case ATH11K_HW_RATE_OFDM_9M:
15527 				*rate = 18;
15528 				break;
15529 			case ATH11K_HW_RATE_OFDM_12M:
15530 				*rate = 24;
15531 				break;
15532 			case ATH11K_HW_RATE_OFDM_18M:
15533 				*rate = 36;
15534 				break;
15535 			case ATH11K_HW_RATE_OFDM_24M:
15536 				*rate = 48;
15537 				break;
15538 			case ATH11K_HW_RATE_OFDM_36M:
15539 				*rate = 72;
15540 				break;
15541 			case ATH11K_HW_RATE_OFDM_48M:
15542 				*rate = 96;
15543 				break;
15544 			case ATH11K_HW_RATE_OFDM_54M:
15545 				*rate = 104;
15546 				break;
15547 			default:
15548 				return EINVAL;
15549 		}
15550 	}
15551 
15552 	for (i = 0; i < rs->rs_nrates; i++) {
15553 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
15554 		if (rval == *rate) {
15555 			*rateidx = i;
15556 			return 0;
15557 		}
15558 	}
15559 
15560 	return EINVAL;
15561 }
15562 
15563 void
qwx_dp_tx_complete_msdu(struct qwx_softc * sc,struct dp_tx_ring * tx_ring,uint32_t msdu_id,struct hal_tx_status * ts)15564 qwx_dp_tx_complete_msdu(struct qwx_softc *sc, struct dp_tx_ring *tx_ring,
15565     uint32_t msdu_id, struct hal_tx_status *ts)
15566 {
15567 	struct ieee80211com *ic = &sc->sc_ic;
15568 	struct qwx_tx_data *tx_data = &tx_ring->data[msdu_id];
15569 	uint8_t pkt_type, mcs, rateidx;
15570 	uint16_t rate;
15571 
15572 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) {
15573 		/* Must not happen */
15574 		return;
15575 	}
15576 
15577 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
15578 	m_freem(tx_data->m);
15579 	tx_data->m = NULL;
15580 
15581 	pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, ts->rate_stats);
15582 	mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, ts->rate_stats);
15583 	if (qwx_mac_hw_ratecode_to_legacy_rate(tx_data->ni, mcs, pkt_type,
15584 	    &rateidx, &rate) == 0)
15585 		tx_data->ni->ni_txrate = rateidx;
15586 
15587 	ieee80211_release_node(ic, tx_data->ni);
15588 	tx_data->ni = NULL;
15589 
15590 	if (tx_ring->queued > 0)
15591 		tx_ring->queued--;
15592 }
15593 
15594 #define QWX_TX_COMPL_NEXT(x)	(((x) + 1) % DP_TX_COMP_RING_SIZE)
15595 
15596 int
qwx_dp_tx_completion_handler(struct qwx_softc * sc,int ring_id)15597 qwx_dp_tx_completion_handler(struct qwx_softc *sc, int ring_id)
15598 {
15599 	struct ieee80211com *ic = &sc->sc_ic;
15600 	struct ifnet *ifp = &ic->ic_if;
15601 	struct qwx_dp *dp = &sc->dp;
15602 	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
15603 	struct hal_srng *status_ring = &sc->hal.srng_list[hal_ring_id];
15604 	struct hal_tx_status ts = { 0 };
15605 	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
15606 	uint32_t *desc;
15607 	uint32_t msdu_id;
15608 	uint8_t mac_id;
15609 #ifdef notyet
15610 	spin_lock_bh(&status_ring->lock);
15611 #endif
15612 	qwx_hal_srng_access_begin(sc, status_ring);
15613 
15614 	while ((QWX_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
15615 		tx_ring->tx_status_tail) &&
15616 	       (desc = qwx_hal_srng_dst_get_next_entry(sc, status_ring))) {
15617 		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], desc,
15618 		    sizeof(struct hal_wbm_release_ring));
15619 		tx_ring->tx_status_head =
15620 		    QWX_TX_COMPL_NEXT(tx_ring->tx_status_head);
15621 	}
15622 #if 0
15623 	if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
15624 		     (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
15625 		      tx_ring->tx_status_tail))) {
15626 		/* TODO: Process pending tx_status messages when kfifo_is_full() */
15627 		ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
15628 	}
15629 #endif
15630 	qwx_hal_srng_access_end(sc, status_ring);
15631 #ifdef notyet
15632 	spin_unlock_bh(&status_ring->lock);
15633 #endif
15634 	while (QWX_TX_COMPL_NEXT(tx_ring->tx_status_tail) !=
15635 	    tx_ring->tx_status_head) {
15636 		struct hal_wbm_release_ring *tx_status;
15637 		uint32_t desc_id;
15638 
15639 		tx_ring->tx_status_tail =
15640 		   QWX_TX_COMPL_NEXT(tx_ring->tx_status_tail);
15641 		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
15642 		qwx_dp_tx_status_parse(sc, tx_status, &ts);
15643 
15644 		desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
15645 		    tx_status->buf_addr_info.info1);
15646 		mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
15647 		if (mac_id >= MAX_RADIOS)
15648 			continue;
15649 		msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
15650 		if (msdu_id >= sc->hw_params.tx_ring_size)
15651 			continue;
15652 
15653 		if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
15654 			qwx_dp_tx_process_htt_tx_complete(sc,
15655 			    (void *)tx_status, mac_id, msdu_id, tx_ring);
15656 			continue;
15657 		}
15658 #if 0
15659 		spin_lock(&tx_ring->tx_idr_lock);
15660 		msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
15661 		if (unlikely(!msdu)) {
15662 			ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
15663 				    msdu_id);
15664 			spin_unlock(&tx_ring->tx_idr_lock);
15665 			continue;
15666 		}
15667 
15668 		spin_unlock(&tx_ring->tx_idr_lock);
15669 		ar = ab->pdevs[mac_id].ar;
15670 
15671 		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
15672 			wake_up(&ar->dp.tx_empty_waitq);
15673 #endif
15674 		qwx_dp_tx_complete_msdu(sc, tx_ring, msdu_id, &ts);
15675 	}
15676 
15677 	if (tx_ring->queued < sc->hw_params.tx_ring_size - 1) {
15678 		sc->qfullmsk &= ~(1 << ring_id);
15679 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
15680 			ifq_clr_oactive(&ifp->if_snd);
15681 			(*ifp->if_start)(ifp);
15682 		}
15683 	}
15684 
15685 	return 0;
15686 }
15687 
15688 void
qwx_hal_rx_reo_ent_paddr_get(struct qwx_softc * sc,void * desc,uint64_t * paddr,uint32_t * desc_bank)15689 qwx_hal_rx_reo_ent_paddr_get(struct qwx_softc *sc, void *desc, uint64_t *paddr,
15690     uint32_t *desc_bank)
15691 {
15692 	struct ath11k_buffer_addr *buff_addr = desc;
15693 
15694 	*paddr = ((uint64_t)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
15695 	    buff_addr->info1)) << 32) |
15696 	    FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
15697 
15698 	*desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
15699 }
15700 
15701 int
qwx_hal_desc_reo_parse_err(struct qwx_softc * sc,uint32_t * rx_desc,uint64_t * paddr,uint32_t * desc_bank)15702 qwx_hal_desc_reo_parse_err(struct qwx_softc *sc, uint32_t *rx_desc,
15703     uint64_t *paddr, uint32_t *desc_bank)
15704 {
15705 	struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
15706 	enum hal_reo_dest_ring_push_reason push_reason;
15707 	enum hal_reo_dest_ring_error_code err_code;
15708 
15709 	push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
15710 	    desc->info0);
15711 	err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
15712 	    desc->info0);
15713 #if 0
15714 	ab->soc_stats.reo_error[err_code]++;
15715 #endif
15716 	if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
15717 	    push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
15718 		printf("%s: expected error push reason code, received %d\n",
15719 		    sc->sc_dev.dv_xname, push_reason);
15720 		return EINVAL;
15721 	}
15722 
15723 	if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
15724 	    HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
15725 		printf("%s: expected buffer type link_desc",
15726 		    sc->sc_dev.dv_xname);
15727 		return EINVAL;
15728 	}
15729 
15730 	qwx_hal_rx_reo_ent_paddr_get(sc, rx_desc, paddr, desc_bank);
15731 
15732 	return 0;
15733 }
15734 
15735 void
qwx_hal_rx_msdu_link_info_get(void * link_desc,uint32_t * num_msdus,uint32_t * msdu_cookies,enum hal_rx_buf_return_buf_manager * rbm)15736 qwx_hal_rx_msdu_link_info_get(void *link_desc, uint32_t *num_msdus,
15737     uint32_t *msdu_cookies, enum hal_rx_buf_return_buf_manager *rbm)
15738 {
15739 	struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
15740 	struct hal_rx_msdu_details *msdu;
15741 	int i;
15742 
15743 	*num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
15744 
15745 	msdu = &link->msdu_link[0];
15746 	*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
15747 	    msdu->buf_addr_info.info1);
15748 
15749 	for (i = 0; i < *num_msdus; i++) {
15750 		msdu = &link->msdu_link[i];
15751 
15752 		if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
15753 		    msdu->buf_addr_info.info0)) {
15754 			*num_msdus = i;
15755 			break;
15756 		}
15757 		*msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
15758 		    msdu->buf_addr_info.info1);
15759 		msdu_cookies++;
15760 	}
15761 }
15762 
15763 void
qwx_hal_rx_msdu_link_desc_set(struct qwx_softc * sc,void * desc,void * link_desc,enum hal_wbm_rel_bm_act action)15764 qwx_hal_rx_msdu_link_desc_set(struct qwx_softc *sc, void *desc,
15765     void *link_desc, enum hal_wbm_rel_bm_act action)
15766 {
15767 	struct hal_wbm_release_ring *dst_desc = desc;
15768 	struct hal_wbm_release_ring *src_desc = link_desc;
15769 
15770 	dst_desc->buf_addr_info = src_desc->buf_addr_info;
15771 	dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
15772 	    HAL_WBM_REL_SRC_MODULE_SW) |
15773 	    FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
15774 	    FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
15775 	    HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
15776 }
15777 
15778 int
qwx_dp_rx_link_desc_return(struct qwx_softc * sc,uint32_t * link_desc,enum hal_wbm_rel_bm_act action)15779 qwx_dp_rx_link_desc_return(struct qwx_softc *sc, uint32_t *link_desc,
15780     enum hal_wbm_rel_bm_act action)
15781 {
15782 	struct qwx_dp *dp = &sc->dp;
15783 	struct hal_srng *srng;
15784 	uint32_t *desc;
15785 	int ret = 0;
15786 
15787 	srng = &sc->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
15788 #ifdef notyet
15789 	spin_lock_bh(&srng->lock);
15790 #endif
15791 	qwx_hal_srng_access_begin(sc, srng);
15792 
15793 	desc = qwx_hal_srng_src_get_next_entry(sc, srng);
15794 	if (!desc) {
15795 		ret = ENOBUFS;
15796 		goto exit;
15797 	}
15798 
15799 	qwx_hal_rx_msdu_link_desc_set(sc, (void *)desc, (void *)link_desc,
15800 	    action);
15801 
15802 exit:
15803 	qwx_hal_srng_access_end(sc, srng);
15804 #ifdef notyet
15805 	spin_unlock_bh(&srng->lock);
15806 #endif
15807 	return ret;
15808 }
15809 
15810 int
qwx_dp_rx_frag_h_mpdu(struct qwx_softc * sc,struct mbuf * m,uint32_t * ring_desc)15811 qwx_dp_rx_frag_h_mpdu(struct qwx_softc *sc, struct mbuf *m,
15812     uint32_t *ring_desc)
15813 {
15814 	printf("%s: not implemented\n", __func__);
15815 	return ENOTSUP;
15816 }
15817 
15818 static inline uint16_t
qwx_dp_rx_h_msdu_start_msdu_len(struct qwx_softc * sc,struct hal_rx_desc * desc)15819 qwx_dp_rx_h_msdu_start_msdu_len(struct qwx_softc *sc, struct hal_rx_desc *desc)
15820 {
15821 	return sc->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
15822 }
15823 
15824 void
qwx_dp_process_rx_err_buf(struct qwx_softc * sc,uint32_t * ring_desc,int buf_id,int drop)15825 qwx_dp_process_rx_err_buf(struct qwx_softc *sc, uint32_t *ring_desc,
15826     int buf_id, int drop)
15827 {
15828 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
15829 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
15830 	struct mbuf *m;
15831 	struct qwx_rx_data *rx_data;
15832 	struct hal_rx_desc *rx_desc;
15833 	uint16_t msdu_len;
15834 	uint32_t hal_rx_desc_sz = sc->hw_params.hal_desc_sz;
15835 
15836 	if (buf_id >= rx_ring->bufs_max || isset(rx_ring->freemap, buf_id))
15837 		return;
15838 
15839 	rx_data = &rx_ring->rx_data[buf_id];
15840 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
15841 	m = rx_data->m;
15842 	rx_data->m = NULL;
15843 	setbit(rx_ring->freemap, buf_id);
15844 
15845 	if (drop) {
15846 		m_freem(m);
15847 		return;
15848 	}
15849 
15850 	rx_desc = mtod(m, struct hal_rx_desc *);
15851 	msdu_len = qwx_dp_rx_h_msdu_start_msdu_len(sc, rx_desc);
15852 	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
15853 #if 0
15854 		uint8_t *hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
15855 		ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
15856 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
15857 				sizeof(struct ieee80211_hdr));
15858 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
15859 				sizeof(struct hal_rx_desc));
15860 #endif
15861 		m_freem(m);
15862 		return;
15863 	}
15864 
15865 	if (qwx_dp_rx_frag_h_mpdu(sc, m, ring_desc)) {
15866 		qwx_dp_rx_link_desc_return(sc, ring_desc,
15867 		    HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
15868 	}
15869 
15870 	m_freem(m);
15871 }
15872 
15873 int
qwx_dp_process_rx_err(struct qwx_softc * sc)15874 qwx_dp_process_rx_err(struct qwx_softc *sc)
15875 {
15876 	struct ieee80211com *ic = &sc->sc_ic;
15877 	struct ifnet *ifp = &ic->ic_if;
15878 	uint32_t msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
15879 	struct dp_link_desc_bank *link_desc_banks;
15880 	enum hal_rx_buf_return_buf_manager rbm;
15881 	int tot_n_bufs_reaped, ret, i;
15882 	int n_bufs_reaped[MAX_RADIOS] = {0};
15883 	struct dp_rxdma_ring *rx_ring;
15884 	struct dp_srng *reo_except;
15885 	uint32_t desc_bank, num_msdus;
15886 	struct hal_srng *srng;
15887 	struct qwx_dp *dp;
15888 	void *link_desc_va;
15889 	int buf_id, mac_id;
15890 	uint64_t paddr;
15891 	uint32_t *desc;
15892 	int is_frag;
15893 	uint8_t drop = 0;
15894 
15895 	tot_n_bufs_reaped = 0;
15896 
15897 	dp = &sc->dp;
15898 	reo_except = &dp->reo_except_ring;
15899 	link_desc_banks = dp->link_desc_banks;
15900 
15901 	srng = &sc->hal.srng_list[reo_except->ring_id];
15902 #ifdef notyet
15903 	spin_lock_bh(&srng->lock);
15904 #endif
15905 	qwx_hal_srng_access_begin(sc, srng);
15906 
15907 	while ((desc = qwx_hal_srng_dst_get_next_entry(sc, srng))) {
15908 		struct hal_reo_dest_ring *reo_desc =
15909 		    (struct hal_reo_dest_ring *)desc;
15910 #if 0
15911 		ab->soc_stats.err_ring_pkts++;
15912 #endif
15913 		ret = qwx_hal_desc_reo_parse_err(sc, desc, &paddr, &desc_bank);
15914 		if (ret) {
15915 			printf("%s: failed to parse error reo desc %d\n",
15916 			    sc->sc_dev.dv_xname, ret);
15917 			continue;
15918 		}
15919 		link_desc_va = link_desc_banks[desc_bank].vaddr +
15920 		    (paddr - link_desc_banks[desc_bank].paddr);
15921 		qwx_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
15922 		    msdu_cookies, &rbm);
15923 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
15924 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
15925 #if 0
15926 			ab->soc_stats.invalid_rbm++;
15927 #endif
15928 			printf("%s: invalid return buffer manager %d\n",
15929 			    sc->sc_dev.dv_xname, rbm);
15930 			qwx_dp_rx_link_desc_return(sc, desc,
15931 			    HAL_WBM_REL_BM_ACT_REL_MSDU);
15932 			continue;
15933 		}
15934 
15935 		is_frag = !!(reo_desc->rx_mpdu_info.info0 &
15936 		    RX_MPDU_DESC_INFO0_FRAG_FLAG);
15937 
15938 		/* Process only rx fragments with one msdu per link desc below,
15939 		 * and drop msdu's indicated due to error reasons.
15940 		 */
15941 		if (!is_frag || num_msdus > 1) {
15942 			drop = 1;
15943 			/* Return the link desc back to wbm idle list */
15944 			qwx_dp_rx_link_desc_return(sc, desc,
15945 			   HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
15946 		}
15947 
15948 		for (i = 0; i < num_msdus; i++) {
15949 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
15950 			    msdu_cookies[i]);
15951 
15952 			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
15953 			    msdu_cookies[i]);
15954 
15955 			qwx_dp_process_rx_err_buf(sc, desc, buf_id, drop);
15956 			n_bufs_reaped[mac_id]++;
15957 			tot_n_bufs_reaped++;
15958 		}
15959 	}
15960 
15961 	qwx_hal_srng_access_end(sc, srng);
15962 #ifdef notyet
15963 	spin_unlock_bh(&srng->lock);
15964 #endif
15965 	for (i = 0; i < sc->num_radios; i++) {
15966 		if (!n_bufs_reaped[i])
15967 			continue;
15968 
15969 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
15970 
15971 		qwx_dp_rxbufs_replenish(sc, i, rx_ring, n_bufs_reaped[i],
15972 		    sc->hw_params.hal_params->rx_buf_rbm);
15973 	}
15974 
15975 	ifp->if_ierrors += tot_n_bufs_reaped;
15976 
15977 	return tot_n_bufs_reaped;
15978 }
15979 
15980 int
qwx_hal_wbm_desc_parse_err(void * desc,struct hal_rx_wbm_rel_info * rel_info)15981 qwx_hal_wbm_desc_parse_err(void *desc, struct hal_rx_wbm_rel_info *rel_info)
15982 {
15983 	struct hal_wbm_release_ring *wbm_desc = desc;
15984 	enum hal_wbm_rel_desc_type type;
15985 	enum hal_wbm_rel_src_module rel_src;
15986 	enum hal_rx_buf_return_buf_manager ret_buf_mgr;
15987 
15988 	type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE, wbm_desc->info0);
15989 
15990 	/* We expect only WBM_REL buffer type */
15991 	if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU)
15992 		return -EINVAL;
15993 
15994 	rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
15995 	    wbm_desc->info0);
15996 	if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
15997 	    rel_src != HAL_WBM_REL_SRC_MODULE_REO)
15998 		return EINVAL;
15999 
16000 	ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
16001 	    wbm_desc->buf_addr_info.info1);
16002 	if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
16003 #if 0
16004 		ab->soc_stats.invalid_rbm++;
16005 #endif
16006 		return EINVAL;
16007 	}
16008 
16009 	rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
16010 	    wbm_desc->buf_addr_info.info1);
16011 	rel_info->err_rel_src = rel_src;
16012 	if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
16013 		rel_info->push_reason = FIELD_GET(
16014 		    HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON, wbm_desc->info0);
16015 		rel_info->err_code = FIELD_GET(
16016 		    HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE, wbm_desc->info0);
16017 	} else {
16018 		rel_info->push_reason = FIELD_GET(
16019 		    HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON, wbm_desc->info0);
16020 		rel_info->err_code = FIELD_GET(
16021 		    HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE, wbm_desc->info0);
16022 	}
16023 
16024 	rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
16025 	    wbm_desc->info2);
16026 	rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
16027 	    wbm_desc->info2);
16028 
16029 	return 0;
16030 }
16031 
16032 int
qwx_dp_rx_h_null_q_desc(struct qwx_softc * sc,struct qwx_rx_msdu * msdu,struct qwx_rx_msdu_list * msdu_list)16033 qwx_dp_rx_h_null_q_desc(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16034     struct qwx_rx_msdu_list *msdu_list)
16035 {
16036 	printf("%s: not implemented\n", __func__);
16037 	return ENOTSUP;
16038 }
16039 
16040 int
qwx_dp_rx_h_reo_err(struct qwx_softc * sc,struct qwx_rx_msdu * msdu,struct qwx_rx_msdu_list * msdu_list)16041 qwx_dp_rx_h_reo_err(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16042     struct qwx_rx_msdu_list *msdu_list)
16043 {
16044 	int drop = 0;
16045 #if 0
16046 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
16047 #endif
16048 	switch (msdu->err_code) {
16049 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
16050 		if (qwx_dp_rx_h_null_q_desc(sc, msdu, msdu_list))
16051 			drop = 1;
16052 		break;
16053 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
16054 		/* TODO: Do not drop PN failed packets in the driver;
16055 		 * instead, it is good to drop such packets in mac80211
16056 		 * after incrementing the replay counters.
16057 		 */
16058 		/* fallthrough */
16059 	default:
16060 		/* TODO: Review other errors and process them to mac80211
16061 		 * as appropriate.
16062 		 */
16063 		drop = 1;
16064 		break;
16065 	}
16066 
16067 	return drop;
16068 }
16069 
16070 int
qwx_dp_rx_h_rxdma_err(struct qwx_softc * sc,struct qwx_rx_msdu * msdu)16071 qwx_dp_rx_h_rxdma_err(struct qwx_softc *sc, struct qwx_rx_msdu *msdu)
16072 {
16073 	struct ieee80211com *ic = &sc->sc_ic;
16074 	int drop = 0;
16075 #if 0
16076 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
16077 #endif
16078 	switch (msdu->err_code) {
16079 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
16080 		ic->ic_stats.is_rx_locmicfail++;
16081 		drop = 1;
16082 		break;
16083 	default:
16084 		/* TODO: Review other rxdma error code to check if anything is
16085 		 * worth reporting to mac80211
16086 		 */
16087 		drop = 1;
16088 		break;
16089 	}
16090 
16091 	return drop;
16092 }
16093 
16094 void
qwx_dp_rx_wbm_err(struct qwx_softc * sc,struct qwx_rx_msdu * msdu,struct qwx_rx_msdu_list * msdu_list)16095 qwx_dp_rx_wbm_err(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16096     struct qwx_rx_msdu_list *msdu_list)
16097 {
16098 	int drop = 1;
16099 
16100 	switch (msdu->err_rel_src) {
16101 	case HAL_WBM_REL_SRC_MODULE_REO:
16102 		drop = qwx_dp_rx_h_reo_err(sc, msdu, msdu_list);
16103 		break;
16104 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
16105 		drop = qwx_dp_rx_h_rxdma_err(sc, msdu);
16106 		break;
16107 	default:
16108 		/* msdu will get freed */
16109 		break;
16110 	}
16111 
16112 	if (drop) {
16113 		m_freem(msdu->m);
16114 		msdu->m = NULL;
16115 		return;
16116 	}
16117 
16118 	qwx_dp_rx_deliver_msdu(sc, msdu);
16119 }
16120 
16121 int
qwx_dp_rx_process_wbm_err(struct qwx_softc * sc)16122 qwx_dp_rx_process_wbm_err(struct qwx_softc *sc)
16123 {
16124 	struct ieee80211com *ic = &sc->sc_ic;
16125 	struct ifnet *ifp = &ic->ic_if;
16126 	struct qwx_dp *dp = &sc->dp;
16127 	struct dp_rxdma_ring *rx_ring;
16128 	struct hal_rx_wbm_rel_info err_info;
16129 	struct hal_srng *srng;
16130 	struct qwx_rx_msdu_list msdu_list[MAX_RADIOS];
16131 	struct qwx_rx_msdu *msdu;
16132 	struct mbuf *m;
16133 	struct qwx_rx_data *rx_data;
16134 	uint32_t *rx_desc;
16135 	int idx, mac_id;
16136 	int num_buffs_reaped[MAX_RADIOS] = {0};
16137 	int total_num_buffs_reaped = 0;
16138 	int ret, i;
16139 
16140 	for (i = 0; i < sc->num_radios; i++)
16141 		TAILQ_INIT(&msdu_list[i]);
16142 
16143 	srng = &sc->hal.srng_list[dp->rx_rel_ring.ring_id];
16144 #ifdef notyet
16145 	spin_lock_bh(&srng->lock);
16146 #endif
16147 	qwx_hal_srng_access_begin(sc, srng);
16148 
16149 	while ((rx_desc = qwx_hal_srng_dst_get_next_entry(sc, srng))) {
16150 		ret = qwx_hal_wbm_desc_parse_err(rx_desc, &err_info);
16151 		if (ret) {
16152 			printf("%s: failed to parse rx error in wbm_rel "
16153 			    "ring desc %d\n", sc->sc_dev.dv_xname, ret);
16154 			continue;
16155 		}
16156 
16157 		idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
16158 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
16159 
16160 		if (mac_id >= MAX_RADIOS)
16161 			continue;
16162 
16163 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
16164 		if (idx >= rx_ring->bufs_max || isset(rx_ring->freemap, idx))
16165 			continue;
16166 
16167 		rx_data = &rx_ring->rx_data[idx];
16168 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16169 		m = rx_data->m;
16170 		rx_data->m = NULL;
16171 		setbit(rx_ring->freemap, idx);
16172 
16173 		num_buffs_reaped[mac_id]++;
16174 		total_num_buffs_reaped++;
16175 
16176 		if (err_info.push_reason !=
16177 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
16178 			m_freem(m);
16179 			continue;
16180 		}
16181 
16182 		msdu = &rx_data->rx_msdu;
16183 		memset(&msdu->rxi, 0, sizeof(msdu->rxi));
16184 		msdu->m = m;
16185 		msdu->err_rel_src = err_info.err_rel_src;
16186 		msdu->err_code = err_info.err_code;
16187 		msdu->rx_desc = mtod(m, struct hal_rx_desc *);
16188 		TAILQ_INSERT_TAIL(&msdu_list[mac_id], msdu, entry);
16189 	}
16190 
16191 	qwx_hal_srng_access_end(sc, srng);
16192 #ifdef notyet
16193 	spin_unlock_bh(&srng->lock);
16194 #endif
16195 	if (!total_num_buffs_reaped)
16196 		goto done;
16197 
16198 	for (i = 0; i < sc->num_radios; i++) {
16199 		if (!num_buffs_reaped[i])
16200 			continue;
16201 
16202 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
16203 		qwx_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
16204 		    sc->hw_params.hal_params->rx_buf_rbm);
16205 	}
16206 
16207 	for (i = 0; i < sc->num_radios; i++) {
16208 		while ((msdu = TAILQ_FIRST(msdu_list))) {
16209 			TAILQ_REMOVE(msdu_list, msdu, entry);
16210 			if (test_bit(ATH11K_CAC_RUNNING, sc->sc_flags)) {
16211 				m_freem(msdu->m);
16212 				msdu->m = NULL;
16213 				continue;
16214 			}
16215 			qwx_dp_rx_wbm_err(sc, msdu, &msdu_list[i]);
16216 			msdu->m = NULL;
16217 		}
16218 	}
16219 done:
16220 	ifp->if_ierrors += total_num_buffs_reaped;
16221 
16222 	return total_num_buffs_reaped;
16223 }
16224 
16225 struct qwx_rx_msdu *
qwx_dp_rx_get_msdu_last_buf(struct qwx_rx_msdu_list * msdu_list,struct qwx_rx_msdu * first)16226 qwx_dp_rx_get_msdu_last_buf(struct qwx_rx_msdu_list *msdu_list,
16227     struct qwx_rx_msdu *first)
16228 {
16229 	struct qwx_rx_msdu *msdu;
16230 
16231 	if (!first->is_continuation)
16232 		return first;
16233 
16234 	TAILQ_FOREACH(msdu, msdu_list, entry) {
16235 		if (!msdu->is_continuation)
16236 			return msdu;
16237 	}
16238 
16239 	return NULL;
16240 }
16241 
16242 static inline void *
qwx_dp_rx_get_attention(struct qwx_softc * sc,struct hal_rx_desc * desc)16243 qwx_dp_rx_get_attention(struct qwx_softc *sc, struct hal_rx_desc *desc)
16244 {
16245 	return sc->hw_params.hw_ops->rx_desc_get_attention(desc);
16246 }
16247 
16248 int
qwx_dp_rx_h_attn_is_mcbc(struct qwx_softc * sc,struct hal_rx_desc * desc)16249 qwx_dp_rx_h_attn_is_mcbc(struct qwx_softc *sc, struct hal_rx_desc *desc)
16250 {
16251 	struct rx_attention *attn = qwx_dp_rx_get_attention(sc, desc);
16252 
16253 	return qwx_dp_rx_h_msdu_end_first_msdu(sc, desc) &&
16254 		(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
16255 		 le32toh(attn->info1)));
16256 }
16257 
16258 static inline uint8_t
qwx_dp_rx_h_msdu_end_l3pad(struct qwx_softc * sc,struct hal_rx_desc * desc)16259 qwx_dp_rx_h_msdu_end_l3pad(struct qwx_softc *sc, struct hal_rx_desc *desc)
16260 {
16261 	return sc->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
16262 }
16263 
16264 static inline int
qwx_dp_rx_h_attn_msdu_done(struct rx_attention * attn)16265 qwx_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
16266 {
16267 	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, le32toh(attn->info2));
16268 }
16269 
16270 static inline uint32_t
qwx_dp_rx_h_msdu_start_freq(struct qwx_softc * sc,struct hal_rx_desc * desc)16271 qwx_dp_rx_h_msdu_start_freq(struct qwx_softc *sc, struct hal_rx_desc *desc)
16272 {
16273 	return sc->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
16274 }
16275 
16276 uint32_t
qwx_dp_rx_h_attn_mpdu_err(struct rx_attention * attn)16277 qwx_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
16278 {
16279 	uint32_t info = le32toh(attn->info1);
16280 	uint32_t errmap = 0;
16281 
16282 	if (info & RX_ATTENTION_INFO1_FCS_ERR)
16283 		errmap |= DP_RX_MPDU_ERR_FCS;
16284 
16285 	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
16286 		errmap |= DP_RX_MPDU_ERR_DECRYPT;
16287 
16288 	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
16289 		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
16290 
16291 	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
16292 		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
16293 
16294 	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
16295 		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
16296 
16297 	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
16298 		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
16299 
16300 	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
16301 		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
16302 
16303 	return errmap;
16304 }
16305 
16306 int
qwx_dp_rx_h_attn_msdu_len_err(struct qwx_softc * sc,struct hal_rx_desc * desc)16307 qwx_dp_rx_h_attn_msdu_len_err(struct qwx_softc *sc, struct hal_rx_desc *desc)
16308 {
16309 	struct rx_attention *rx_attention;
16310 	uint32_t errmap;
16311 
16312 	rx_attention = qwx_dp_rx_get_attention(sc, desc);
16313 	errmap = qwx_dp_rx_h_attn_mpdu_err(rx_attention);
16314 
16315 	return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
16316 }
16317 
16318 int
qwx_dp_rx_h_attn_is_decrypted(struct rx_attention * attn)16319 qwx_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
16320 {
16321 	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
16322 	    le32toh(attn->info2)) == RX_DESC_DECRYPT_STATUS_CODE_OK);
16323 }
16324 
16325 int
qwx_dp_rx_msdu_coalesce(struct qwx_softc * sc,struct qwx_rx_msdu_list * msdu_list,struct qwx_rx_msdu * first,struct qwx_rx_msdu * last,uint8_t l3pad_bytes,int msdu_len)16326 qwx_dp_rx_msdu_coalesce(struct qwx_softc *sc, struct qwx_rx_msdu_list *msdu_list,
16327     struct qwx_rx_msdu *first, struct qwx_rx_msdu *last, uint8_t l3pad_bytes,
16328     int msdu_len)
16329 {
16330 	printf("%s: not implemented\n", __func__);
16331 	return ENOTSUP;
16332 }
16333 
16334 void
qwx_dp_rx_h_rate(struct qwx_softc * sc,struct hal_rx_desc * rx_desc,struct ieee80211_rxinfo * rxi)16335 qwx_dp_rx_h_rate(struct qwx_softc *sc, struct hal_rx_desc *rx_desc,
16336     struct ieee80211_rxinfo *rxi)
16337 {
16338 	/* TODO */
16339 }
16340 
16341 void
qwx_dp_rx_h_ppdu(struct qwx_softc * sc,struct hal_rx_desc * rx_desc,struct ieee80211_rxinfo * rxi)16342 qwx_dp_rx_h_ppdu(struct qwx_softc *sc, struct hal_rx_desc *rx_desc,
16343     struct ieee80211_rxinfo *rxi)
16344 {
16345 	uint8_t channel_num;
16346 	uint32_t meta_data;
16347 
16348 	meta_data = qwx_dp_rx_h_msdu_start_freq(sc, rx_desc);
16349 	channel_num = meta_data & 0xff;
16350 
16351 	rxi->rxi_chan = channel_num;
16352 
16353 	qwx_dp_rx_h_rate(sc, rx_desc, rxi);
16354 }
16355 
16356 void
qwx_dp_rx_h_undecap_nwifi(struct qwx_softc * sc,struct qwx_rx_msdu * msdu,uint8_t * first_hdr,enum hal_encrypt_type enctype)16357 qwx_dp_rx_h_undecap_nwifi(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16358     uint8_t *first_hdr, enum hal_encrypt_type enctype)
16359 {
16360 	/*
16361 	* This function will need to do some work once we are receiving
16362 	* aggregated frames. For now, it needs to do nothing.
16363 	*/
16364 
16365 	if (!msdu->is_first_msdu)
16366 		printf("%s: not implemented\n", __func__);
16367 }
16368 
16369 void
qwx_dp_rx_h_undecap_raw(struct qwx_softc * sc,struct qwx_rx_msdu * msdu,enum hal_encrypt_type enctype,int decrypted)16370 qwx_dp_rx_h_undecap_raw(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16371     enum hal_encrypt_type enctype, int decrypted)
16372 {
16373 #if 0
16374 	struct ieee80211_hdr *hdr;
16375 	size_t hdr_len;
16376 	size_t crypto_len;
16377 #endif
16378 
16379 	if (!msdu->is_first_msdu ||
16380 	    !(msdu->is_first_msdu && msdu->is_last_msdu))
16381 		return;
16382 
16383 	m_adj(msdu->m, -IEEE80211_CRC_LEN);
16384 #if 0
16385 	if (!decrypted)
16386 		return;
16387 
16388 	hdr = (void *)msdu->data;
16389 
16390 	/* Tail */
16391 	if (status->flag & RX_FLAG_IV_STRIPPED) {
16392 		skb_trim(msdu, msdu->len -
16393 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
16394 
16395 		skb_trim(msdu, msdu->len -
16396 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
16397 	} else {
16398 		/* MIC */
16399 		if (status->flag & RX_FLAG_MIC_STRIPPED)
16400 			skb_trim(msdu, msdu->len -
16401 				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
16402 
16403 		/* ICV */
16404 		if (status->flag & RX_FLAG_ICV_STRIPPED)
16405 			skb_trim(msdu, msdu->len -
16406 				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
16407 	}
16408 
16409 	/* MMIC */
16410 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
16411 	    !ieee80211_has_morefrags(hdr->frame_control) &&
16412 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
16413 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
16414 
16415 	/* Head */
16416 	if (status->flag & RX_FLAG_IV_STRIPPED) {
16417 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
16418 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
16419 
16420 		memmove((void *)msdu->data + crypto_len,
16421 			(void *)msdu->data, hdr_len);
16422 		skb_pull(msdu, crypto_len);
16423 	}
16424 #endif
16425 }
16426 
16427 static inline uint8_t *
qwx_dp_rx_h_80211_hdr(struct qwx_softc * sc,struct hal_rx_desc * desc)16428 qwx_dp_rx_h_80211_hdr(struct qwx_softc *sc, struct hal_rx_desc *desc)
16429 {
16430 	return sc->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
16431 }
16432 
16433 static inline enum hal_encrypt_type
qwx_dp_rx_h_mpdu_start_enctype(struct qwx_softc * sc,struct hal_rx_desc * desc)16434 qwx_dp_rx_h_mpdu_start_enctype(struct qwx_softc *sc, struct hal_rx_desc *desc)
16435 {
16436 	if (!sc->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
16437 		return HAL_ENCRYPT_TYPE_OPEN;
16438 
16439 	return sc->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
16440 }
16441 
16442 static inline uint8_t
qwx_dp_rx_h_msdu_start_decap_type(struct qwx_softc * sc,struct hal_rx_desc * desc)16443 qwx_dp_rx_h_msdu_start_decap_type(struct qwx_softc *sc, struct hal_rx_desc *desc)
16444 {
16445 	return sc->hw_params.hw_ops->rx_desc_get_decap_type(desc);
16446 }
16447 
16448 void
qwx_dp_rx_h_undecap(struct qwx_softc * sc,struct qwx_rx_msdu * msdu,struct hal_rx_desc * rx_desc,enum hal_encrypt_type enctype,int decrypted)16449 qwx_dp_rx_h_undecap(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16450     struct hal_rx_desc *rx_desc, enum hal_encrypt_type enctype,
16451     int decrypted)
16452 {
16453 	uint8_t *first_hdr;
16454 	uint8_t decap;
16455 
16456 	first_hdr = qwx_dp_rx_h_80211_hdr(sc, rx_desc);
16457 	decap = qwx_dp_rx_h_msdu_start_decap_type(sc, rx_desc);
16458 
16459 	switch (decap) {
16460 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
16461 		qwx_dp_rx_h_undecap_nwifi(sc, msdu, first_hdr, enctype);
16462 		break;
16463 	case DP_RX_DECAP_TYPE_RAW:
16464 		qwx_dp_rx_h_undecap_raw(sc, msdu, enctype, decrypted);
16465 		break;
16466 #if 0
16467 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
16468 		ehdr = (struct ethhdr *)msdu->data;
16469 
16470 		/* mac80211 allows fast path only for authorized STA */
16471 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
16472 			ATH11K_SKB_RXCB(msdu)->is_eapol = true;
16473 			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
16474 						   enctype, status);
16475 			break;
16476 		}
16477 
16478 		/* PN for mcast packets will be validated in mac80211;
16479 		 * remove eth header and add 802.11 header.
16480 		 */
16481 		if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
16482 			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
16483 						   enctype, status);
16484 		break;
16485 	case DP_RX_DECAP_TYPE_8023:
16486 		/* TODO: Handle undecap for these formats */
16487 		break;
16488 #endif
16489 	}
16490 }
16491 
16492 int
qwx_dp_rx_h_mpdu(struct qwx_softc * sc,struct qwx_rx_msdu * msdu,struct hal_rx_desc * rx_desc)16493 qwx_dp_rx_h_mpdu(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16494     struct hal_rx_desc *rx_desc)
16495 {
16496 	struct ieee80211com *ic = &sc->sc_ic;
16497 	int fill_crypto_hdr = 0;
16498 	enum hal_encrypt_type enctype;
16499 	int is_decrypted = 0;
16500 #if 0
16501 	struct ath11k_skb_rxcb *rxcb;
16502 #endif
16503 	struct ieee80211_frame *wh;
16504 #if 0
16505 	struct ath11k_peer *peer;
16506 #endif
16507 	struct rx_attention *rx_attention;
16508 	uint32_t err_bitmap;
16509 
16510 	/* PN for multicast packets will be checked in net80211 */
16511 	fill_crypto_hdr = qwx_dp_rx_h_attn_is_mcbc(sc, rx_desc);
16512 	msdu->is_mcbc = fill_crypto_hdr;
16513 #if 0
16514 	if (rxcb->is_mcbc) {
16515 		rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
16516 		rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
16517 	}
16518 
16519 	spin_lock_bh(&ar->ab->base_lock);
16520 	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
16521 	if (peer) {
16522 		if (rxcb->is_mcbc)
16523 			enctype = peer->sec_type_grp;
16524 		else
16525 			enctype = peer->sec_type;
16526 	} else {
16527 #endif
16528 		enctype = qwx_dp_rx_h_mpdu_start_enctype(sc, rx_desc);
16529 #if 0
16530 	}
16531 	spin_unlock_bh(&ar->ab->base_lock);
16532 #endif
16533 	rx_attention = qwx_dp_rx_get_attention(sc, rx_desc);
16534 	err_bitmap = qwx_dp_rx_h_attn_mpdu_err(rx_attention);
16535 	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
16536 		is_decrypted = qwx_dp_rx_h_attn_is_decrypted(rx_attention);
16537 #if 0
16538 	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
16539 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
16540 			     RX_FLAG_MMIC_ERROR |
16541 			     RX_FLAG_DECRYPTED |
16542 			     RX_FLAG_IV_STRIPPED |
16543 			     RX_FLAG_MMIC_STRIPPED);
16544 
16545 #endif
16546 	if (err_bitmap & DP_RX_MPDU_ERR_FCS) {
16547 		if (ic->ic_flags & IEEE80211_F_RSNON)
16548 			ic->ic_stats.is_rx_decryptcrc++;
16549 		else
16550 			ic->ic_stats.is_rx_decap++;
16551 	}
16552 
16553 	/* XXX Trusting firmware to handle Michael MIC counter-measures... */
16554 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
16555 		ic->ic_stats.is_rx_locmicfail++;
16556 
16557 	if (err_bitmap & DP_RX_MPDU_ERR_DECRYPT)
16558 		ic->ic_stats.is_rx_wepfail++;
16559 
16560 	if (is_decrypted) {
16561 #if 0
16562 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
16563 
16564 		if (fill_crypto_hdr)
16565 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
16566 					RX_FLAG_ICV_STRIPPED;
16567 		else
16568 			rx_status->flag |= RX_FLAG_IV_STRIPPED |
16569 					   RX_FLAG_PN_VALIDATED;
16570 #endif
16571 		msdu->rxi.rxi_flags |= IEEE80211_RXI_HWDEC;
16572 	}
16573 #if 0
16574 	ath11k_dp_rx_h_csum_offload(ar, msdu);
16575 #endif
16576 	qwx_dp_rx_h_undecap(sc, msdu, rx_desc, enctype, is_decrypted);
16577 
16578 	if (is_decrypted && !fill_crypto_hdr &&
16579 	    qwx_dp_rx_h_msdu_start_decap_type(sc, rx_desc) !=
16580 	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
16581 		/* Hardware has stripped the IV. */
16582 		wh = mtod(msdu->m, struct ieee80211_frame *);
16583 		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
16584 	}
16585 
16586 	return err_bitmap ? EIO : 0;
16587 }
16588 
16589 int
qwx_dp_rx_process_msdu(struct qwx_softc * sc,struct qwx_rx_msdu * msdu,struct qwx_rx_msdu_list * msdu_list)16590 qwx_dp_rx_process_msdu(struct qwx_softc *sc, struct qwx_rx_msdu *msdu,
16591     struct qwx_rx_msdu_list *msdu_list)
16592 {
16593 	struct hal_rx_desc *rx_desc, *lrx_desc;
16594 	struct rx_attention *rx_attention;
16595 	struct qwx_rx_msdu *last_buf;
16596 	uint8_t l3_pad_bytes;
16597 	uint16_t msdu_len;
16598 	int ret;
16599 	uint32_t hal_rx_desc_sz = sc->hw_params.hal_desc_sz;
16600 
16601 	last_buf = qwx_dp_rx_get_msdu_last_buf(msdu_list, msdu);
16602 	if (!last_buf) {
16603 		DPRINTF("%s: No valid Rx buffer to access "
16604 		    "Atten/MSDU_END/MPDU_END tlvs\n", __func__);
16605 		return EIO;
16606 	}
16607 
16608 	rx_desc = mtod(msdu->m, struct hal_rx_desc *);
16609 	if (qwx_dp_rx_h_attn_msdu_len_err(sc, rx_desc)) {
16610 		DPRINTF("%s: msdu len not valid\n", __func__);
16611 		return EIO;
16612 	}
16613 
16614 	lrx_desc = mtod(last_buf->m, struct hal_rx_desc *);
16615 	rx_attention = qwx_dp_rx_get_attention(sc, lrx_desc);
16616 	if (!qwx_dp_rx_h_attn_msdu_done(rx_attention)) {
16617 		DPRINTF("%s: msdu_done bit in attention is not set\n",
16618 		    __func__);
16619 		return EIO;
16620 	}
16621 
16622 	msdu->rx_desc = rx_desc;
16623 	msdu_len = qwx_dp_rx_h_msdu_start_msdu_len(sc, rx_desc);
16624 	l3_pad_bytes = qwx_dp_rx_h_msdu_end_l3pad(sc, lrx_desc);
16625 
16626 	if (msdu->is_frag) {
16627 		m_adj(msdu->m, hal_rx_desc_sz);
16628 		msdu->m->m_len = msdu->m->m_pkthdr.len = msdu_len;
16629 	} else if (!msdu->is_continuation) {
16630 		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
16631 #if 0
16632 			uint8_t *hdr_status;
16633 
16634 			hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
16635 #endif
16636 			DPRINTF("%s: invalid msdu len %u\n",
16637 			    __func__, msdu_len);
16638 #if 0
16639 			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
16640 					sizeof(struct ieee80211_hdr));
16641 			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
16642 					sizeof(struct hal_rx_desc));
16643 #endif
16644 			return EINVAL;
16645 		}
16646 		m_adj(msdu->m, hal_rx_desc_sz + l3_pad_bytes);
16647 		msdu->m->m_len = msdu->m->m_pkthdr.len = msdu_len;
16648 	} else {
16649 		ret = qwx_dp_rx_msdu_coalesce(sc, msdu_list, msdu, last_buf,
16650 		    l3_pad_bytes, msdu_len);
16651 		if (ret) {
16652 			DPRINTF("%s: failed to coalesce msdu rx buffer%d\n",
16653 			    __func__, ret);
16654 			return ret;
16655 		}
16656 	}
16657 
16658 	memset(&msdu->rxi, 0, sizeof(msdu->rxi));
16659 	qwx_dp_rx_h_ppdu(sc, rx_desc, &msdu->rxi);
16660 
16661 	return qwx_dp_rx_h_mpdu(sc, msdu, rx_desc);
16662 }
16663 
16664 void
qwx_dp_rx_deliver_msdu(struct qwx_softc * sc,struct qwx_rx_msdu * msdu)16665 qwx_dp_rx_deliver_msdu(struct qwx_softc *sc, struct qwx_rx_msdu *msdu)
16666 {
16667 	struct ieee80211com *ic = &sc->sc_ic;
16668 	struct ifnet *ifp = &ic->ic_if;
16669 	struct ieee80211_frame *wh;
16670 	struct ieee80211_node *ni;
16671 
16672 	wh = mtod(msdu->m, struct ieee80211_frame *);
16673 	ni = ieee80211_find_rxnode(ic, wh);
16674 
16675 #if NBPFILTER > 0
16676 	if (sc->sc_drvbpf != NULL) {
16677 		struct qwx_rx_radiotap_header *tap = &sc->sc_rxtap;
16678 
16679 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
16680 		    msdu->m, BPF_DIRECTION_IN);
16681 	}
16682 #endif
16683 	ieee80211_input(ifp, msdu->m, ni, &msdu->rxi);
16684 	ieee80211_release_node(ic, ni);
16685 }
16686 
16687 void
qwx_dp_rx_process_received_packets(struct qwx_softc * sc,struct qwx_rx_msdu_list * msdu_list,int mac_id)16688 qwx_dp_rx_process_received_packets(struct qwx_softc *sc,
16689     struct qwx_rx_msdu_list *msdu_list, int mac_id)
16690 {
16691 	struct qwx_rx_msdu *msdu;
16692 	int ret;
16693 
16694 	while ((msdu = TAILQ_FIRST(msdu_list))) {
16695 		TAILQ_REMOVE(msdu_list, msdu, entry);
16696 		ret = qwx_dp_rx_process_msdu(sc, msdu, msdu_list);
16697 		if (ret) {
16698 			DNPRINTF(QWX_D_MAC, "Unable to process msdu: %d", ret);
16699 			m_freem(msdu->m);
16700 			msdu->m = NULL;
16701 			continue;
16702 		}
16703 
16704 		qwx_dp_rx_deliver_msdu(sc, msdu);
16705 		msdu->m = NULL;
16706 	}
16707 }
16708 
16709 int
qwx_dp_process_rx(struct qwx_softc * sc,int ring_id)16710 qwx_dp_process_rx(struct qwx_softc *sc, int ring_id)
16711 {
16712 	struct qwx_dp *dp = &sc->dp;
16713 	struct qwx_pdev_dp *pdev_dp = &sc->pdev_dp;
16714 	struct dp_rxdma_ring *rx_ring;
16715 	int num_buffs_reaped[MAX_RADIOS] = {0};
16716 	struct qwx_rx_msdu_list msdu_list[MAX_RADIOS];
16717 	struct qwx_rx_msdu *msdu;
16718 	struct mbuf *m;
16719 	struct qwx_rx_data *rx_data;
16720 	int total_msdu_reaped = 0;
16721 	struct hal_srng *srng;
16722 	int done = 0;
16723 	int idx;
16724 	unsigned int mac_id;
16725 	struct hal_reo_dest_ring *desc;
16726 	enum hal_reo_dest_ring_push_reason push_reason;
16727 	uint32_t cookie;
16728 	int i;
16729 
16730 	for (i = 0; i < MAX_RADIOS; i++)
16731 		TAILQ_INIT(&msdu_list[i]);
16732 
16733 	srng = &sc->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
16734 #ifdef notyet
16735 	spin_lock_bh(&srng->lock);
16736 #endif
16737 try_again:
16738 	qwx_hal_srng_access_begin(sc, srng);
16739 
16740 	while ((desc = (struct hal_reo_dest_ring *)
16741 	    qwx_hal_srng_dst_get_next_entry(sc, srng))) {
16742 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
16743 		    desc->buf_addr_info.info1);
16744 		idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
16745 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
16746 
16747 		if (mac_id >= MAX_RADIOS)
16748 			continue;
16749 
16750 		rx_ring = &pdev_dp->rx_refill_buf_ring;
16751 		if (idx >= rx_ring->bufs_max || isset(rx_ring->freemap, idx))
16752 			continue;
16753 
16754 		rx_data = &rx_ring->rx_data[idx];
16755 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16756 		m = rx_data->m;
16757 		rx_data->m = NULL;
16758 		setbit(rx_ring->freemap, idx);
16759 
16760 		num_buffs_reaped[mac_id]++;
16761 
16762 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
16763 		    desc->info0);
16764 		if (push_reason !=
16765 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
16766 			m_freem(m);
16767 #if 0
16768 			sc->soc_stats.hal_reo_error[
16769 			    dp->reo_dst_ring[ring_id].ring_id]++;
16770 #endif
16771 			continue;
16772 		}
16773 
16774 		msdu = &rx_data->rx_msdu;
16775 		msdu->m = m;
16776 		msdu->is_first_msdu = !!(desc->rx_msdu_info.info0 &
16777 		    RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
16778 		msdu->is_last_msdu = !!(desc->rx_msdu_info.info0 &
16779 		    RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
16780 		msdu->is_continuation = !!(desc->rx_msdu_info.info0 &
16781 		    RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
16782 		msdu->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
16783 		    desc->rx_mpdu_info.meta_data);
16784 		msdu->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
16785 		    desc->rx_mpdu_info.info0);
16786 		msdu->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
16787 		    desc->info0);
16788 
16789 		msdu->mac_id = mac_id;
16790 		TAILQ_INSERT_TAIL(&msdu_list[mac_id], msdu, entry);
16791 
16792 		if (msdu->is_continuation) {
16793 			done = 0;
16794 		} else {
16795 			total_msdu_reaped++;
16796 			done = 1;
16797 		}
16798 	}
16799 
16800 	/* Hw might have updated the head pointer after we cached it.
16801 	 * In this case, even though there are entries in the ring we'll
16802 	 * get rx_desc NULL. Give the read another try with updated cached
16803 	 * head pointer so that we can reap complete MPDU in the current
16804 	 * rx processing.
16805 	 */
16806 	if (!done && qwx_hal_srng_dst_num_free(sc, srng, 1)) {
16807 		qwx_hal_srng_access_end(sc, srng);
16808 		goto try_again;
16809 	}
16810 
16811 	qwx_hal_srng_access_end(sc, srng);
16812 #ifdef notyet
16813 	spin_unlock_bh(&srng->lock);
16814 #endif
16815 	if (!total_msdu_reaped)
16816 		goto exit;
16817 
16818 	for (i = 0; i < sc->num_radios; i++) {
16819 		if (!num_buffs_reaped[i])
16820 			continue;
16821 
16822 		qwx_dp_rx_process_received_packets(sc, &msdu_list[i], i);
16823 
16824 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
16825 
16826 		qwx_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
16827 		    sc->hw_params.hal_params->rx_buf_rbm);
16828 	}
16829 exit:
16830 	return total_msdu_reaped;
16831 }
16832 
16833 struct mbuf *
qwx_dp_rx_alloc_mon_status_buf(struct qwx_softc * sc,struct dp_rxdma_ring * rx_ring,int * buf_idx)16834 qwx_dp_rx_alloc_mon_status_buf(struct qwx_softc *sc,
16835     struct dp_rxdma_ring *rx_ring, int *buf_idx)
16836 {
16837 	struct mbuf *m;
16838 	struct qwx_rx_data *rx_data;
16839 	const size_t size = DP_RX_BUFFER_SIZE;
16840 	int ret, idx;
16841 
16842 	m = m_gethdr(M_DONTWAIT, MT_DATA);
16843 	if (m == NULL)
16844 		return NULL;
16845 
16846 	if (size <= MCLBYTES)
16847 		MCLGET(m, M_DONTWAIT);
16848 	else
16849 		MCLGETL(m, M_DONTWAIT, size);
16850 	if ((m->m_flags & M_EXT) == 0)
16851 		goto fail_free_mbuf;
16852 
16853 	m->m_len = m->m_pkthdr.len = size;
16854 	idx = qwx_next_free_rxbuf_idx(rx_ring);
16855 	if (idx == -1)
16856 		goto fail_free_mbuf;
16857 
16858 	rx_data = &rx_ring->rx_data[idx];
16859 	if (rx_data->m != NULL)
16860 		goto fail_free_mbuf;
16861 
16862 	if (rx_data->map == NULL) {
16863 		ret = bus_dmamap_create(sc->sc_dmat, size, 1,
16864 		    size, 0, BUS_DMA_NOWAIT, &rx_data->map);
16865 		if (ret)
16866 			goto fail_free_mbuf;
16867 	}
16868 
16869 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
16870 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
16871 	if (ret) {
16872 		printf("%s: can't map mbuf (error %d)\n",
16873 		    sc->sc_dev.dv_xname, ret);
16874 		goto fail_free_mbuf;
16875 	}
16876 
16877 	*buf_idx = idx;
16878 	rx_data->m = m;
16879 	clrbit(rx_ring->freemap, idx);
16880 	return m;
16881 
16882 fail_free_mbuf:
16883 	m_freem(m);
16884 	return NULL;
16885 }
16886 
16887 int
qwx_dp_rx_reap_mon_status_ring(struct qwx_softc * sc,int mac_id,struct mbuf_list * ml)16888 qwx_dp_rx_reap_mon_status_ring(struct qwx_softc *sc, int mac_id,
16889     struct mbuf_list *ml)
16890 {
16891 	const struct ath11k_hw_hal_params *hal_params;
16892 	struct qwx_pdev_dp *dp;
16893 	struct dp_rxdma_ring *rx_ring;
16894 	struct qwx_mon_data *pmon;
16895 	struct hal_srng *srng;
16896 	void *rx_mon_status_desc;
16897 	struct mbuf *m;
16898 	struct qwx_rx_data *rx_data;
16899 	struct hal_tlv_hdr *tlv;
16900 	uint32_t cookie;
16901 	int buf_idx, srng_id;
16902 	uint64_t paddr;
16903 	uint8_t rbm;
16904 	int num_buffs_reaped = 0;
16905 
16906 	dp = &sc->pdev_dp;
16907 	pmon = &dp->mon_data;
16908 
16909 	srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
16910 	    mac_id);
16911 	rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
16912 
16913 	srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
16914 #ifdef notyet
16915 	spin_lock_bh(&srng->lock);
16916 #endif
16917 	qwx_hal_srng_access_begin(sc, srng);
16918 	while (1) {
16919 		rx_mon_status_desc = qwx_hal_srng_src_peek(sc, srng);
16920 		if (!rx_mon_status_desc) {
16921 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
16922 			break;
16923 		}
16924 
16925 		qwx_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
16926 		    &cookie, &rbm);
16927 		if (paddr) {
16928 			buf_idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
16929 			if (buf_idx >= rx_ring->bufs_max ||
16930 			    isset(rx_ring->freemap, buf_idx)) {
16931 				pmon->buf_state = DP_MON_STATUS_REPLINISH;
16932 				goto move_next;
16933 			}
16934 
16935 			rx_data = &rx_ring->rx_data[buf_idx];
16936 
16937 			bus_dmamap_sync(sc->sc_dmat, rx_data->map, 0,
16938 			    rx_data->m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
16939 
16940 			tlv = mtod(rx_data->m, struct hal_tlv_hdr *);
16941 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
16942 			    HAL_RX_STATUS_BUFFER_DONE) {
16943 				/* If done status is missing, hold onto status
16944 				 * ring until status is done for this status
16945 				 * ring buffer.
16946 				 * Keep HP in mon_status_ring unchanged,
16947 				 * and break from here.
16948 				 * Check status for same buffer for next time
16949 				 */
16950 				pmon->buf_state = DP_MON_STATUS_NO_DMA;
16951 				break;
16952 			}
16953 
16954 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
16955 			m = rx_data->m;
16956 			rx_data->m = NULL;
16957 			setbit(rx_ring->freemap, buf_idx);
16958 #if 0
16959 			if (ab->hw_params.full_monitor_mode) {
16960 				ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
16961 				if (paddr == pmon->mon_status_paddr)
16962 					pmon->buf_state = DP_MON_STATUS_MATCH;
16963 			}
16964 #endif
16965 			ml_enqueue(ml, m);
16966 		} else {
16967 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
16968 		}
16969 move_next:
16970 		m = qwx_dp_rx_alloc_mon_status_buf(sc, rx_ring, &buf_idx);
16971 		if (!m) {
16972 			hal_params = sc->hw_params.hal_params;
16973 			qwx_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
16974 			    hal_params->rx_buf_rbm);
16975 			num_buffs_reaped++;
16976 			break;
16977 		}
16978 		rx_data = &rx_ring->rx_data[buf_idx];
16979 
16980 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
16981 		    FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_idx);
16982 
16983 		paddr = rx_data->map->dm_segs[0].ds_addr;
16984 		qwx_hal_rx_buf_addr_info_set(rx_mon_status_desc, paddr,
16985 		    cookie, sc->hw_params.hal_params->rx_buf_rbm);
16986 		qwx_hal_srng_src_get_next_entry(sc, srng);
16987 		num_buffs_reaped++;
16988 	}
16989 	qwx_hal_srng_access_end(sc, srng);
16990 #ifdef notyet
16991 	spin_unlock_bh(&srng->lock);
16992 #endif
16993 	return num_buffs_reaped;
16994 }
16995 
16996 enum hal_rx_mon_status
qwx_hal_rx_parse_mon_status(struct qwx_softc * sc,struct hal_rx_mon_ppdu_info * ppdu_info,struct mbuf * m)16997 qwx_hal_rx_parse_mon_status(struct qwx_softc *sc,
16998     struct hal_rx_mon_ppdu_info *ppdu_info, struct mbuf *m)
16999 {
17000 	/* TODO */
17001 	return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
17002 }
17003 
17004 int
qwx_dp_rx_process_mon_status(struct qwx_softc * sc,int mac_id)17005 qwx_dp_rx_process_mon_status(struct qwx_softc *sc, int mac_id)
17006 {
17007 	enum hal_rx_mon_status hal_status;
17008 	struct mbuf *m;
17009 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
17010 #if 0
17011 	struct ath11k_peer *peer;
17012 	struct ath11k_sta *arsta;
17013 #endif
17014 	int num_buffs_reaped = 0;
17015 #if 0
17016 	uint32_t rx_buf_sz;
17017 	uint16_t log_type;
17018 #endif
17019 	struct qwx_mon_data *pmon = (struct qwx_mon_data *)&sc->pdev_dp.mon_data;
17020 #if  0
17021 	struct qwx_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
17022 #endif
17023 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
17024 
17025 	num_buffs_reaped = qwx_dp_rx_reap_mon_status_ring(sc, mac_id, &ml);
17026 	if (!num_buffs_reaped)
17027 		goto exit;
17028 
17029 	memset(ppdu_info, 0, sizeof(*ppdu_info));
17030 	ppdu_info->peer_id = HAL_INVALID_PEERID;
17031 
17032 	while ((m = ml_dequeue(&ml))) {
17033 #if 0
17034 		if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
17035 			log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
17036 			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
17037 		} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
17038 			log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
17039 			rx_buf_sz = DP_RX_BUFFER_SIZE;
17040 		} else {
17041 			log_type = ATH11K_PKTLOG_TYPE_INVALID;
17042 			rx_buf_sz = 0;
17043 		}
17044 
17045 		if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
17046 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
17047 #endif
17048 
17049 		memset(ppdu_info, 0, sizeof(*ppdu_info));
17050 		ppdu_info->peer_id = HAL_INVALID_PEERID;
17051 		hal_status = qwx_hal_rx_parse_mon_status(sc, ppdu_info, m);
17052 #if 0
17053 		if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
17054 		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
17055 		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
17056 			rx_mon_stats->status_ppdu_done++;
17057 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
17058 			ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
17059 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
17060 		}
17061 #endif
17062 		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
17063 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
17064 			m_freem(m);
17065 			continue;
17066 		}
17067 #if 0
17068 		rcu_read_lock();
17069 		spin_lock_bh(&ab->base_lock);
17070 		peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
17071 
17072 		if (!peer || !peer->sta) {
17073 			ath11k_dbg(ab, ATH11K_DBG_DATA,
17074 				   "failed to find the peer with peer_id %d\n",
17075 				   ppdu_info->peer_id);
17076 			goto next_skb;
17077 		}
17078 
17079 		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
17080 		ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
17081 
17082 		if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
17083 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
17084 
17085 next_skb:
17086 		spin_unlock_bh(&ab->base_lock);
17087 		rcu_read_unlock();
17088 
17089 		dev_kfree_skb_any(skb);
17090 		memset(ppdu_info, 0, sizeof(*ppdu_info));
17091 		ppdu_info->peer_id = HAL_INVALID_PEERID;
17092 #endif
17093 	}
17094 exit:
17095 	return num_buffs_reaped;
17096 }
17097 
17098 int
qwx_dp_rx_process_mon_rings(struct qwx_softc * sc,int mac_id)17099 qwx_dp_rx_process_mon_rings(struct qwx_softc *sc, int mac_id)
17100 {
17101 	int ret = 0;
17102 #if 0
17103 	if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
17104 	    ab->hw_params.full_monitor_mode)
17105 		ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
17106 	else
17107 #endif
17108 		ret = qwx_dp_rx_process_mon_status(sc, mac_id);
17109 
17110 	return ret;
17111 }
17112 
17113 void
qwx_dp_service_mon_ring(void * arg)17114 qwx_dp_service_mon_ring(void *arg)
17115 {
17116 	struct qwx_softc *sc = arg;
17117 	int i;
17118 
17119 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++)
17120 		qwx_dp_rx_process_mon_rings(sc, i);
17121 
17122 	timeout_add(&sc->mon_reap_timer, ATH11K_MON_TIMER_INTERVAL);
17123 }
17124 
17125 int
qwx_dp_process_rxdma_err(struct qwx_softc * sc,int mac_id)17126 qwx_dp_process_rxdma_err(struct qwx_softc *sc, int mac_id)
17127 {
17128 	struct ieee80211com *ic = &sc->sc_ic;
17129 	struct ifnet *ifp = &ic->ic_if;
17130 	struct dp_srng *err_ring;
17131 	struct dp_rxdma_ring *rx_ring;
17132 	struct dp_link_desc_bank *link_desc_banks = sc->dp.link_desc_banks;
17133 	struct hal_srng *srng;
17134 	uint32_t msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
17135 	enum hal_rx_buf_return_buf_manager rbm;
17136 	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
17137 	struct qwx_rx_data *rx_data;
17138 	struct hal_reo_entrance_ring *entr_ring;
17139 	void *desc;
17140 	int num_buf_freed = 0;
17141 	uint64_t paddr;
17142 	uint32_t desc_bank;
17143 	void *link_desc_va;
17144 	int num_msdus;
17145 	int i, idx, srng_id;
17146 
17147 	srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
17148 	    mac_id);
17149 	err_ring = &sc->pdev_dp.rxdma_err_dst_ring[srng_id];
17150 	rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
17151 
17152 	srng = &sc->hal.srng_list[err_ring->ring_id];
17153 #ifdef notyet
17154 	spin_lock_bh(&srng->lock);
17155 #endif
17156 	qwx_hal_srng_access_begin(sc, srng);
17157 
17158 	while ((desc = qwx_hal_srng_dst_get_next_entry(sc, srng))) {
17159 		qwx_hal_rx_reo_ent_paddr_get(sc, desc, &paddr, &desc_bank);
17160 
17161 		entr_ring = (struct hal_reo_entrance_ring *)desc;
17162 		rxdma_err_code = FIELD_GET(
17163 		    HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
17164 		    entr_ring->info1);
17165 #if 0
17166 		ab->soc_stats.rxdma_error[rxdma_err_code]++;
17167 #endif
17168 		link_desc_va = link_desc_banks[desc_bank].vaddr +
17169 		     (paddr - link_desc_banks[desc_bank].paddr);
17170 		qwx_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
17171 		    msdu_cookies, &rbm);
17172 
17173 		for (i = 0; i < num_msdus; i++) {
17174 			idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
17175 			    msdu_cookies[i]);
17176 			if (idx >= rx_ring->bufs_max ||
17177 			    isset(rx_ring->freemap, idx))
17178 				continue;
17179 
17180 			rx_data = &rx_ring->rx_data[idx];
17181 
17182 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
17183 			m_freem(rx_data->m);
17184 			rx_data->m = NULL;
17185 			setbit(rx_ring->freemap, idx);
17186 
17187 			num_buf_freed++;
17188 		}
17189 
17190 		qwx_dp_rx_link_desc_return(sc, desc,
17191 		    HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
17192 	}
17193 
17194 	qwx_hal_srng_access_end(sc, srng);
17195 #ifdef notyet
17196 	spin_unlock_bh(&srng->lock);
17197 #endif
17198 	if (num_buf_freed)
17199 		qwx_dp_rxbufs_replenish(sc, mac_id, rx_ring, num_buf_freed,
17200 		    sc->hw_params.hal_params->rx_buf_rbm);
17201 
17202 	ifp->if_ierrors += num_buf_freed;
17203 
17204 	return num_buf_freed;
17205 }
17206 
17207 void
qwx_hal_reo_status_queue_stats(struct qwx_softc * sc,uint32_t * reo_desc,struct hal_reo_status * status)17208 qwx_hal_reo_status_queue_stats(struct qwx_softc *sc, uint32_t *reo_desc,
17209     struct hal_reo_status *status)
17210 {
17211 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17212 	struct hal_reo_get_queue_stats_status *desc =
17213 	    (struct hal_reo_get_queue_stats_status *)tlv->value;
17214 
17215 	status->uniform_hdr.cmd_num =
17216 	    FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17217 	status->uniform_hdr.cmd_status =
17218 	    FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17219 #if 0
17220 	ath11k_dbg(ab, ATH11K_DBG_HAL, "Queue stats status:\n");
17221 	ath11k_dbg(ab, ATH11K_DBG_HAL, "header: cmd_num %d status %d\n",
17222 		   status->uniform_hdr.cmd_num,
17223 		   status->uniform_hdr.cmd_status);
17224 	ath11k_dbg(ab, ATH11K_DBG_HAL, "ssn %ld cur_idx %ld\n",
17225 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
17226 			     desc->info0),
17227 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
17228 			     desc->info0));
17229 	ath11k_dbg(ab, ATH11K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
17230 		   desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
17231 	ath11k_dbg(ab, ATH11K_DBG_HAL,
17232 		   "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
17233 		   desc->last_rx_enqueue_timestamp,
17234 		   desc->last_rx_dequeue_timestamp);
17235 	ath11k_dbg(ab, ATH11K_DBG_HAL,
17236 		   "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
17237 		   desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
17238 		   desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
17239 		   desc->rx_bitmap[6], desc->rx_bitmap[7]);
17240 	ath11k_dbg(ab, ATH11K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
17241 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
17242 			     desc->info1),
17243 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
17244 			     desc->info1));
17245 	ath11k_dbg(ab, ATH11K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
17246 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
17247 			     desc->info2),
17248 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
17249 			     desc->info2),
17250 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
17251 			     desc->info2));
17252 	ath11k_dbg(ab, ATH11K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
17253 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
17254 			     desc->info3),
17255 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
17256 			     desc->info3));
17257 	ath11k_dbg(ab, ATH11K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
17258 		   desc->num_mpdu_frames, desc->num_msdu_frames,
17259 		   desc->total_bytes);
17260 	ath11k_dbg(ab, ATH11K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
17261 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
17262 			     desc->info4),
17263 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
17264 			     desc->info4),
17265 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
17266 			     desc->info4));
17267 	ath11k_dbg(ab, ATH11K_DBG_HAL, "looping count %ld\n",
17268 		   FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
17269 			     desc->info5));
17270 #endif
17271 }
17272 
17273 void
qwx_hal_reo_flush_queue_status(struct qwx_softc * sc,uint32_t * reo_desc,struct hal_reo_status * status)17274 qwx_hal_reo_flush_queue_status(struct qwx_softc *sc, uint32_t *reo_desc,
17275     struct hal_reo_status *status)
17276 {
17277 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17278 	struct hal_reo_flush_queue_status *desc =
17279 	    (struct hal_reo_flush_queue_status *)tlv->value;
17280 
17281 	status->uniform_hdr.cmd_num = FIELD_GET(
17282 	   HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17283 	status->uniform_hdr.cmd_status = FIELD_GET(
17284 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17285 	status->u.flush_queue.err_detected = FIELD_GET(
17286 	    HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED, desc->info0);
17287 }
17288 
17289 void
qwx_hal_reo_flush_cache_status(struct qwx_softc * sc,uint32_t * reo_desc,struct hal_reo_status * status)17290 qwx_hal_reo_flush_cache_status(struct qwx_softc *sc, uint32_t *reo_desc,
17291     struct hal_reo_status *status)
17292 {
17293 	struct ath11k_hal *hal = &sc->hal;
17294 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17295 	struct hal_reo_flush_cache_status *desc =
17296 	    (struct hal_reo_flush_cache_status *)tlv->value;
17297 
17298 	status->uniform_hdr.cmd_num = FIELD_GET(
17299 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17300 	status->uniform_hdr.cmd_status = FIELD_GET(
17301 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17302 
17303 	status->u.flush_cache.err_detected = FIELD_GET(
17304 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR, desc->info0);
17305 	status->u.flush_cache.err_code = FIELD_GET(
17306 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE, desc->info0);
17307 	if (!status->u.flush_cache.err_code)
17308 		hal->avail_blk_resource |= BIT(hal->current_blk_index);
17309 
17310 	status->u.flush_cache.cache_controller_flush_status_hit = FIELD_GET(
17311 	    HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT, desc->info0);
17312 
17313 	status->u.flush_cache.cache_controller_flush_status_desc_type =
17314 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
17315 	    desc->info0);
17316 	status->u.flush_cache.cache_controller_flush_status_client_id =
17317 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
17318 	    desc->info0);
17319 	status->u.flush_cache.cache_controller_flush_status_err =
17320 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
17321 	    desc->info0);
17322 	status->u.flush_cache.cache_controller_flush_status_cnt =
17323 	    FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
17324 	    desc->info0);
17325 }
17326 
17327 void
qwx_hal_reo_unblk_cache_status(struct qwx_softc * sc,uint32_t * reo_desc,struct hal_reo_status * status)17328 qwx_hal_reo_unblk_cache_status(struct qwx_softc *sc, uint32_t *reo_desc,
17329     struct hal_reo_status *status)
17330 {
17331 	struct ath11k_hal *hal = &sc->hal;
17332 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17333 	struct hal_reo_unblock_cache_status *desc =
17334 	   (struct hal_reo_unblock_cache_status *)tlv->value;
17335 
17336 	status->uniform_hdr.cmd_num = FIELD_GET(
17337 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17338 	status->uniform_hdr.cmd_status = FIELD_GET(
17339 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17340 
17341 	status->u.unblock_cache.err_detected = FIELD_GET(
17342 	    HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR, desc->info0);
17343 	status->u.unblock_cache.unblock_type = FIELD_GET(
17344 	    HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE, desc->info0);
17345 
17346 	if (!status->u.unblock_cache.err_detected &&
17347 	    status->u.unblock_cache.unblock_type ==
17348 	    HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
17349 		hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
17350 }
17351 
17352 void
qwx_hal_reo_flush_timeout_list_status(struct qwx_softc * ab,uint32_t * reo_desc,struct hal_reo_status * status)17353 qwx_hal_reo_flush_timeout_list_status(struct qwx_softc *ab, uint32_t *reo_desc,
17354     struct hal_reo_status *status)
17355 {
17356 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17357 	struct hal_reo_flush_timeout_list_status *desc =
17358 	    (struct hal_reo_flush_timeout_list_status *)tlv->value;
17359 
17360 	status->uniform_hdr.cmd_num = FIELD_GET(
17361 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17362 	status->uniform_hdr.cmd_status = FIELD_GET(
17363 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17364 
17365 	status->u.timeout_list.err_detected = FIELD_GET(
17366 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR, desc->info0);
17367 	status->u.timeout_list.list_empty = FIELD_GET(
17368 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY, desc->info0);
17369 
17370 	status->u.timeout_list.release_desc_cnt = FIELD_GET(
17371 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT, desc->info1);
17372 	status->u.timeout_list.fwd_buf_cnt = FIELD_GET(
17373 	    HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT, desc->info1);
17374 }
17375 
17376 void
qwx_hal_reo_desc_thresh_reached_status(struct qwx_softc * sc,uint32_t * reo_desc,struct hal_reo_status * status)17377 qwx_hal_reo_desc_thresh_reached_status(struct qwx_softc *sc, uint32_t *reo_desc,
17378     struct hal_reo_status *status)
17379 {
17380 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17381 	struct hal_reo_desc_thresh_reached_status *desc =
17382 	    (struct hal_reo_desc_thresh_reached_status *)tlv->value;
17383 
17384 	status->uniform_hdr.cmd_num = FIELD_GET(
17385 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->hdr.info0);
17386 	status->uniform_hdr.cmd_status = FIELD_GET(
17387 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->hdr.info0);
17388 
17389 	status->u.desc_thresh_reached.threshold_idx = FIELD_GET(
17390 	    HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX, desc->info0);
17391 
17392 	status->u.desc_thresh_reached.link_desc_counter0 = FIELD_GET(
17393 	    HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0, desc->info1);
17394 
17395 	status->u.desc_thresh_reached.link_desc_counter1 = FIELD_GET(
17396 	    HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1, desc->info2);
17397 
17398 	status->u.desc_thresh_reached.link_desc_counter2 = FIELD_GET(
17399 	    HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2, desc->info3);
17400 
17401 	status->u.desc_thresh_reached.link_desc_counter_sum = FIELD_GET(
17402 	    HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
17403 	    desc->info4);
17404 }
17405 
17406 void
qwx_hal_reo_update_rx_reo_queue_status(struct qwx_softc * ab,uint32_t * reo_desc,struct hal_reo_status * status)17407 qwx_hal_reo_update_rx_reo_queue_status(struct qwx_softc *ab, uint32_t *reo_desc,
17408     struct hal_reo_status *status)
17409 {
17410 	struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
17411 	struct hal_reo_status_hdr *desc =
17412 	    (struct hal_reo_status_hdr *)tlv->value;
17413 
17414 	status->uniform_hdr.cmd_num = FIELD_GET(
17415 	    HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, desc->info0);
17416 	status->uniform_hdr.cmd_status = FIELD_GET(
17417 	    HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, desc->info0);
17418 }
17419 
17420 int
qwx_dp_process_reo_status(struct qwx_softc * sc)17421 qwx_dp_process_reo_status(struct qwx_softc *sc)
17422 {
17423 	struct qwx_dp *dp = &sc->dp;
17424 	struct hal_srng *srng;
17425 	struct dp_reo_cmd *cmd, *tmp;
17426 	int found = 0, ret = 0;
17427 	uint32_t *reo_desc;
17428 	uint16_t tag;
17429 	struct hal_reo_status reo_status;
17430 
17431 	srng = &sc->hal.srng_list[dp->reo_status_ring.ring_id];
17432 	memset(&reo_status, 0, sizeof(reo_status));
17433 #ifdef notyet
17434 	spin_lock_bh(&srng->lock);
17435 #endif
17436 	qwx_hal_srng_access_begin(sc, srng);
17437 
17438 	while ((reo_desc = qwx_hal_srng_dst_get_next_entry(sc, srng))) {
17439 		ret = 1;
17440 
17441 		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
17442 		switch (tag) {
17443 		case HAL_REO_GET_QUEUE_STATS_STATUS:
17444 			qwx_hal_reo_status_queue_stats(sc, reo_desc,
17445 			    &reo_status);
17446 			break;
17447 		case HAL_REO_FLUSH_QUEUE_STATUS:
17448 			qwx_hal_reo_flush_queue_status(sc, reo_desc,
17449 			    &reo_status);
17450 			break;
17451 		case HAL_REO_FLUSH_CACHE_STATUS:
17452 			qwx_hal_reo_flush_cache_status(sc, reo_desc,
17453 			    &reo_status);
17454 			break;
17455 		case HAL_REO_UNBLOCK_CACHE_STATUS:
17456 			qwx_hal_reo_unblk_cache_status(sc, reo_desc,
17457 			    &reo_status);
17458 			break;
17459 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
17460 			qwx_hal_reo_flush_timeout_list_status(sc, reo_desc,
17461 			    &reo_status);
17462 			break;
17463 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
17464 			qwx_hal_reo_desc_thresh_reached_status(sc, reo_desc,
17465 			    &reo_status);
17466 			break;
17467 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
17468 			qwx_hal_reo_update_rx_reo_queue_status(sc, reo_desc,
17469 			    &reo_status);
17470 			break;
17471 		default:
17472 			printf("%s: Unknown reo status type %d\n",
17473 			    sc->sc_dev.dv_xname, tag);
17474 			continue;
17475 		}
17476 #ifdef notyet
17477 		spin_lock_bh(&dp->reo_cmd_lock);
17478 #endif
17479 		TAILQ_FOREACH_SAFE(cmd, &dp->reo_cmd_list, entry, tmp) {
17480 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
17481 				found = 1;
17482 				TAILQ_REMOVE(&dp->reo_cmd_list, cmd, entry);
17483 				break;
17484 			}
17485 		}
17486 #ifdef notyet
17487 		spin_unlock_bh(&dp->reo_cmd_lock);
17488 #endif
17489 		if (found) {
17490 			cmd->handler(dp, (void *)&cmd->data,
17491 			    reo_status.uniform_hdr.cmd_status);
17492 			free(cmd, M_DEVBUF, sizeof(*cmd));
17493 		}
17494 		found = 0;
17495 	}
17496 
17497 	qwx_hal_srng_access_end(sc, srng);
17498 #ifdef notyet
17499 	spin_unlock_bh(&srng->lock);
17500 #endif
17501 	return ret;
17502 }
17503 
17504 int
qwx_dp_service_srng(struct qwx_softc * sc,int grp_id)17505 qwx_dp_service_srng(struct qwx_softc *sc, int grp_id)
17506 {
17507 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
17508 	int i, j, ret = 0;
17509 
17510 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
17511 		const struct ath11k_hw_tcl2wbm_rbm_map *map;
17512 
17513 		map = &sc->hw_params.hal_params->tcl2wbm_rbm_map[i];
17514 		if ((sc->hw_params.ring_mask->tx[grp_id]) &
17515 		    (1 << (map->wbm_ring_num)) &&
17516 		    qwx_dp_tx_completion_handler(sc, i))
17517 			ret = 1;
17518 	}
17519 
17520 	if (sc->hw_params.ring_mask->rx_err[grp_id] &&
17521 	    qwx_dp_process_rx_err(sc))
17522 		ret = 1;
17523 
17524 	if (sc->hw_params.ring_mask->rx_wbm_rel[grp_id] &&
17525 	    qwx_dp_rx_process_wbm_err(sc))
17526 		ret = 1;
17527 
17528 	if (sc->hw_params.ring_mask->rx[grp_id]) {
17529 		i = fls(sc->hw_params.ring_mask->rx[grp_id]) - 1;
17530 		if (qwx_dp_process_rx(sc, i))
17531 			ret = 1;
17532 	}
17533 
17534 	for (i = 0; i < sc->num_radios; i++) {
17535 		for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
17536 			int id = i * sc->hw_params.num_rxmda_per_pdev + j;
17537 
17538 			if ((sc->hw_params.ring_mask->rx_mon_status[grp_id] &
17539 			   (1 << id)) == 0)
17540 				continue;
17541 
17542 			if (qwx_dp_rx_process_mon_rings(sc, id))
17543 				ret = 1;
17544 		}
17545 	}
17546 
17547 	if (sc->hw_params.ring_mask->reo_status[grp_id] &&
17548 	    qwx_dp_process_reo_status(sc))
17549 		ret = 1;
17550 
17551 	for (i = 0; i < sc->num_radios; i++) {
17552 		for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
17553 			int id = i * sc->hw_params.num_rxmda_per_pdev + j;
17554 
17555 			if (sc->hw_params.ring_mask->rxdma2host[grp_id] &
17556 			   (1 << (id))) {
17557 				if (qwx_dp_process_rxdma_err(sc, id))
17558 					ret = 1;
17559 			}
17560 
17561 			if (sc->hw_params.ring_mask->host2rxdma[grp_id] &
17562 			    (1 << id)) {
17563 				qwx_dp_rxbufs_replenish(sc, id,
17564 				    &dp->rx_refill_buf_ring, 0,
17565 				    sc->hw_params.hal_params->rx_buf_rbm);
17566 			}
17567 		}
17568 	}
17569 
17570 	return ret;
17571 }
17572 
17573 int
qwx_wmi_wait_for_service_ready(struct qwx_softc * sc)17574 qwx_wmi_wait_for_service_ready(struct qwx_softc *sc)
17575 {
17576 	int ret;
17577 
17578 	while (!sc->wmi.service_ready) {
17579 		ret = tsleep_nsec(&sc->wmi.service_ready, 0, "qwxwmirdy",
17580 		    SEC_TO_NSEC(5));
17581 		if (ret)
17582 			return -1;
17583 	}
17584 
17585 	return 0;
17586 }
17587 
17588 void
qwx_fill_band_to_mac_param(struct qwx_softc * sc,struct wmi_host_pdev_band_to_mac * band_to_mac)17589 qwx_fill_band_to_mac_param(struct qwx_softc *sc,
17590     struct wmi_host_pdev_band_to_mac *band_to_mac)
17591 {
17592 	uint8_t i;
17593 	struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
17594 	struct qwx_pdev *pdev;
17595 
17596 	for (i = 0; i < sc->num_radios; i++) {
17597 		pdev = &sc->pdevs[i];
17598 		hal_reg_cap = &sc->hal_reg_cap[i];
17599 		band_to_mac[i].pdev_id = pdev->pdev_id;
17600 
17601 		switch (pdev->cap.supported_bands) {
17602 		case WMI_HOST_WLAN_2G_5G_CAP:
17603 			band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
17604 			band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
17605 			break;
17606 		case WMI_HOST_WLAN_2G_CAP:
17607 			band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
17608 			band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
17609 			break;
17610 		case WMI_HOST_WLAN_5G_CAP:
17611 			band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
17612 			band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
17613 			break;
17614 		default:
17615 			break;
17616 		}
17617 	}
17618 }
17619 
17620 struct mbuf *
qwx_wmi_alloc_mbuf(size_t len)17621 qwx_wmi_alloc_mbuf(size_t len)
17622 {
17623 	struct mbuf *m;
17624 	uint32_t round_len = roundup(len, 4);
17625 
17626 	m = qwx_htc_alloc_mbuf(sizeof(struct wmi_cmd_hdr) + round_len);
17627 	if (!m)
17628 		return NULL;
17629 
17630 	return m;
17631 }
17632 
17633 int
qwx_wmi_cmd_send_nowait(struct qwx_pdev_wmi * wmi,struct mbuf * m,uint32_t cmd_id)17634 qwx_wmi_cmd_send_nowait(struct qwx_pdev_wmi *wmi, struct mbuf *m,
17635     uint32_t cmd_id)
17636 {
17637 	struct qwx_softc *sc = wmi->wmi->sc;
17638 	struct wmi_cmd_hdr *cmd_hdr;
17639 	uint32_t cmd = 0;
17640 
17641 	cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
17642 
17643 	cmd_hdr = (struct wmi_cmd_hdr *)(mtod(m, uint8_t *) +
17644 	    sizeof(struct ath11k_htc_hdr));
17645 	cmd_hdr->cmd_id = htole32(cmd);
17646 
17647 	DNPRINTF(QWX_D_WMI, "%s: sending WMI command 0x%u\n", __func__, cmd);
17648 	return qwx_htc_send(&sc->htc, wmi->eid, m);
17649 }
17650 
17651 int
qwx_wmi_cmd_send(struct qwx_pdev_wmi * wmi,struct mbuf * m,uint32_t cmd_id)17652 qwx_wmi_cmd_send(struct qwx_pdev_wmi *wmi, struct mbuf *m, uint32_t cmd_id)
17653 {
17654 	struct qwx_wmi_base *wmi_sc = wmi->wmi;
17655 	int ret = EOPNOTSUPP;
17656 	struct qwx_softc *sc = wmi_sc->sc;
17657 #ifdef notyet
17658 	might_sleep();
17659 #endif
17660 	if (sc->hw_params.credit_flow) {
17661 		struct qwx_htc *htc = &sc->htc;
17662 		struct qwx_htc_ep *ep = &htc->endpoint[wmi->eid];
17663 
17664 		while (!ep->tx_credits) {
17665 			ret = tsleep_nsec(&ep->tx_credits, 0, "qwxtxcrd",
17666 			    SEC_TO_NSEC(3));
17667 			if (ret) {
17668 				printf("%s: tx credits timeout\n",
17669 				    sc->sc_dev.dv_xname);
17670 				if (test_bit(ATH11K_FLAG_CRASH_FLUSH,
17671 				    sc->sc_flags))
17672 					return ESHUTDOWN;
17673 				else
17674 					return EAGAIN;
17675 			}
17676 		}
17677 	} else {
17678 		while (!wmi->tx_ce_desc) {
17679 			ret = tsleep_nsec(&wmi->tx_ce_desc, 0, "qwxtxce",
17680 			    SEC_TO_NSEC(3));
17681 			if (ret) {
17682 				printf("%s: tx ce desc timeout\n",
17683 				    sc->sc_dev.dv_xname);
17684 				if (test_bit(ATH11K_FLAG_CRASH_FLUSH,
17685 				    sc->sc_flags))
17686 					return ESHUTDOWN;
17687 				else
17688 					return EAGAIN;
17689 			}
17690 		}
17691 	}
17692 
17693 	ret = qwx_wmi_cmd_send_nowait(wmi, m, cmd_id);
17694 
17695 	if (ret == EAGAIN)
17696 		printf("%s: wmi command %d timeout\n",
17697 		    sc->sc_dev.dv_xname, cmd_id);
17698 
17699 	if (ret == ENOBUFS)
17700 		printf("%s: ce desc not available for wmi command %d\n",
17701 		    sc->sc_dev.dv_xname, cmd_id);
17702 
17703 	return ret;
17704 }
17705 
17706 int
qwx_wmi_pdev_set_param(struct qwx_softc * sc,uint32_t param_id,uint32_t param_value,uint8_t pdev_id)17707 qwx_wmi_pdev_set_param(struct qwx_softc *sc, uint32_t param_id,
17708     uint32_t param_value, uint8_t pdev_id)
17709 {
17710 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17711 	struct wmi_pdev_set_param_cmd *cmd;
17712 	struct mbuf *m;
17713 	int ret;
17714 
17715 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
17716 	if (!m)
17717 		return ENOMEM;
17718 
17719 	cmd = (struct wmi_pdev_set_param_cmd *)(mtod(m, uint8_t *) +
17720 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17721 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
17722 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17723 	cmd->pdev_id = pdev_id;
17724 	cmd->param_id = param_id;
17725 	cmd->param_value = param_value;
17726 
17727 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PDEV_SET_PARAM_CMDID);
17728 	if (ret) {
17729 		if (ret != ESHUTDOWN) {
17730 			printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
17731 			    sc->sc_dev.dv_xname);
17732 		}
17733 		m_freem(m);
17734 		return ret;
17735 	}
17736 
17737 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev set param %d pdev id %d value %d\n",
17738 	    __func__, param_id, pdev_id, param_value);
17739 
17740 	return 0;
17741 }
17742 
17743 int
qwx_wmi_pdev_lro_cfg(struct qwx_softc * sc,uint8_t pdev_id)17744 qwx_wmi_pdev_lro_cfg(struct qwx_softc *sc, uint8_t pdev_id)
17745 {
17746 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17747 	struct ath11k_wmi_pdev_lro_config_cmd *cmd;
17748 	struct mbuf *m;
17749 	int ret;
17750 
17751 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
17752 	if (!m)
17753 		return ENOMEM;
17754 
17755 	cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)(mtod(m, uint8_t *) +
17756 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17757 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
17758 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17759 
17760 	arc4random_buf(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
17761 	arc4random_buf(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
17762 
17763 	cmd->pdev_id = pdev_id;
17764 
17765 	ret = qwx_wmi_cmd_send(wmi, m, WMI_LRO_CONFIG_CMDID);
17766 	if (ret) {
17767 		if (ret != ESHUTDOWN) {
17768 			printf("%s: failed to send lro cfg req wmi cmd\n",
17769 			    sc->sc_dev.dv_xname);
17770 		}
17771 		m_freem(m);
17772 		return ret;
17773 	}
17774 
17775 	DNPRINTF(QWX_D_WMI, "%s: cmd lro config pdev_id 0x%x\n",
17776 	    __func__, pdev_id);
17777 
17778 	return 0;
17779 }
17780 
17781 int
qwx_wmi_pdev_set_ps_mode(struct qwx_softc * sc,int vdev_id,uint8_t pdev_id,enum wmi_sta_ps_mode psmode)17782 qwx_wmi_pdev_set_ps_mode(struct qwx_softc *sc, int vdev_id, uint8_t pdev_id,
17783     enum wmi_sta_ps_mode psmode)
17784 {
17785 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17786 	struct wmi_pdev_set_ps_mode_cmd *cmd;
17787 	struct mbuf *m;
17788 	int ret;
17789 
17790 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
17791 	if (!m)
17792 		return ENOMEM;
17793 
17794 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)(mtod(m, uint8_t *) +
17795 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17796 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17797 	    WMI_TAG_STA_POWERSAVE_MODE_CMD) |
17798 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17799 	cmd->vdev_id = vdev_id;
17800 	cmd->sta_ps_mode = psmode;
17801 
17802 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_MODE_CMDID);
17803 	if (ret) {
17804 		if (ret != ESHUTDOWN) {
17805 			printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
17806 			    sc->sc_dev.dv_xname);
17807 		}
17808 		m_freem(m);
17809 		return ret;
17810 	}
17811 
17812 	DNPRINTF(QWX_D_WMI, "%s: cmd sta powersave mode psmode %d vdev id %d\n",
17813 	    __func__, psmode, vdev_id);
17814 
17815 	return 0;
17816 }
17817 
17818 int
qwx_wmi_scan_prob_req_oui(struct qwx_softc * sc,const uint8_t * mac_addr,uint8_t pdev_id)17819 qwx_wmi_scan_prob_req_oui(struct qwx_softc *sc, const uint8_t *mac_addr,
17820     uint8_t pdev_id)
17821 {
17822 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17823 	struct mbuf *m;
17824 	struct wmi_scan_prob_req_oui_cmd *cmd;
17825 	uint32_t prob_req_oui;
17826 	int len, ret;
17827 
17828 	prob_req_oui = (((uint32_t)mac_addr[0]) << 16) |
17829 		       (((uint32_t)mac_addr[1]) << 8) | mac_addr[2];
17830 
17831 	len = sizeof(*cmd);
17832 	m = qwx_wmi_alloc_mbuf(len);
17833 	if (!m)
17834 		return ENOMEM;
17835 
17836 	cmd = (struct wmi_scan_prob_req_oui_cmd *)(mtod(m, uint8_t *) +
17837 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17838 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17839 	    WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
17840 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17841 	cmd->prob_req_oui = prob_req_oui;
17842 
17843 	DNPRINTF(QWX_D_WMI, "%s: scan prob req oui %d\n", __func__,
17844 	    prob_req_oui);
17845 
17846 	ret = qwx_wmi_cmd_send(wmi, m, WMI_SCAN_PROB_REQ_OUI_CMDID);
17847 	if (ret) {
17848 		if (ret != ESHUTDOWN) {
17849 			printf("%s: failed to send WMI_SCAN_PROB_REQ_OUI cmd\n",
17850 			    sc->sc_dev.dv_xname);
17851 		}
17852 		m_freem(m);
17853 		return ret;
17854 	}
17855 
17856 	return 0;
17857 }
17858 
17859 int
qwx_wmi_send_dfs_phyerr_offload_enable_cmd(struct qwx_softc * sc,uint32_t pdev_id)17860 qwx_wmi_send_dfs_phyerr_offload_enable_cmd(struct qwx_softc *sc, uint32_t pdev_id)
17861 {
17862 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17863 	struct wmi_dfs_phyerr_offload_cmd *cmd;
17864 	struct mbuf *m;
17865 	int ret;
17866 
17867 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
17868 	if (!m)
17869 		return ENOMEM;
17870 
17871 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)(mtod(m, uint8_t *) +
17872 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17873 
17874 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17875 	    WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
17876 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17877 
17878 	cmd->pdev_id = pdev_id;
17879 
17880 	ret = qwx_wmi_cmd_send(wmi, m,
17881 	    WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
17882 	if (ret) {
17883 		if (ret != ESHUTDOWN) {
17884 			printf("%s: failed to send "
17885 			    "WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n",
17886 			    sc->sc_dev.dv_xname);
17887 		}
17888 		m_free(m);
17889 		return ret;
17890 	}
17891 
17892 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev dfs phyerr offload enable "
17893 	    "pdev id %d\n", __func__, pdev_id);
17894 
17895 	return 0;
17896 }
17897 
17898 int
qwx_wmi_send_scan_chan_list_cmd(struct qwx_softc * sc,uint8_t pdev_id,struct scan_chan_list_params * chan_list)17899 qwx_wmi_send_scan_chan_list_cmd(struct qwx_softc *sc, uint8_t pdev_id,
17900     struct scan_chan_list_params *chan_list)
17901 {
17902 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
17903 	struct wmi_scan_chan_list_cmd *cmd;
17904 	struct mbuf *m;
17905 	struct wmi_channel *chan_info;
17906 	struct channel_param *tchan_info;
17907 	struct wmi_tlv *tlv;
17908 	void *ptr;
17909 	int i, ret, len;
17910 	uint16_t num_send_chans, num_sends = 0, max_chan_limit = 0;
17911 	uint32_t *reg1, *reg2;
17912 
17913 	tchan_info = chan_list->ch_param;
17914 	while (chan_list->nallchans) {
17915 		len = sizeof(*cmd) + TLV_HDR_SIZE;
17916 		max_chan_limit = (wmi->wmi->max_msg_len[pdev_id] - len) /
17917 		    sizeof(*chan_info);
17918 
17919 		if (chan_list->nallchans > max_chan_limit)
17920 			num_send_chans = max_chan_limit;
17921 		else
17922 			num_send_chans = chan_list->nallchans;
17923 
17924 		chan_list->nallchans -= num_send_chans;
17925 		len += sizeof(*chan_info) * num_send_chans;
17926 
17927 		m = qwx_wmi_alloc_mbuf(len);
17928 		if (!m)
17929 			return ENOMEM;
17930 
17931 		cmd = (struct wmi_scan_chan_list_cmd *)(mtod(m, uint8_t *) +
17932 		    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
17933 		cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17934 		    WMI_TAG_SCAN_CHAN_LIST_CMD) |
17935 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
17936 		cmd->pdev_id = chan_list->pdev_id;
17937 		cmd->num_scan_chans = num_send_chans;
17938 		if (num_sends)
17939 			cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
17940 
17941 		DNPRINTF(QWX_D_WMI, "%s: no.of chan = %d len = %d "
17942 		    "pdev_id = %d num_sends = %d\n", __func__, num_send_chans,
17943 		    len, cmd->pdev_id, num_sends);
17944 
17945 		ptr = (void *)(mtod(m, uint8_t *) +
17946 		    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
17947 		    sizeof(*cmd));
17948 
17949 		len = sizeof(*chan_info) * num_send_chans;
17950 		tlv = ptr;
17951 		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
17952 		    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
17953 		ptr += TLV_HDR_SIZE;
17954 
17955 		for (i = 0; i < num_send_chans; ++i) {
17956 			chan_info = ptr;
17957 			memset(chan_info, 0, sizeof(*chan_info));
17958 			len = sizeof(*chan_info);
17959 			chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
17960 			    WMI_TAG_CHANNEL) |
17961 			    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
17962 
17963 			reg1 = &chan_info->reg_info_1;
17964 			reg2 = &chan_info->reg_info_2;
17965 			chan_info->mhz = tchan_info->mhz;
17966 			chan_info->band_center_freq1 = tchan_info->cfreq1;
17967 			chan_info->band_center_freq2 = tchan_info->cfreq2;
17968 
17969 			if (tchan_info->is_chan_passive)
17970 				chan_info->info |= WMI_CHAN_INFO_PASSIVE;
17971 			if (tchan_info->allow_he)
17972 				chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
17973 			else if (tchan_info->allow_vht)
17974 				chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
17975 			else if (tchan_info->allow_ht)
17976 				chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
17977 			if (tchan_info->half_rate)
17978 				chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
17979 			if (tchan_info->quarter_rate)
17980 				chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
17981 			if (tchan_info->psc_channel)
17982 				chan_info->info |= WMI_CHAN_INFO_PSC;
17983 			if (tchan_info->dfs_set)
17984 				chan_info->info |= WMI_CHAN_INFO_DFS;
17985 
17986 			chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
17987 			    tchan_info->phy_mode);
17988 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
17989 			    tchan_info->minpower);
17990 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
17991 			    tchan_info->maxpower);
17992 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
17993 			    tchan_info->maxregpower);
17994 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
17995 			    tchan_info->reg_class_id);
17996 			*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
17997 			    tchan_info->antennamax);
17998 			*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
17999 			    tchan_info->maxregpower);
18000 
18001 			DNPRINTF(QWX_D_WMI, "%s: chan scan list "
18002 			    "chan[%d] = %u, chan_info->info %8x\n",
18003 			    __func__, i, chan_info->mhz, chan_info->info);
18004 
18005 			ptr += sizeof(*chan_info);
18006 
18007 			tchan_info++;
18008 		}
18009 
18010 		ret = qwx_wmi_cmd_send(wmi, m, WMI_SCAN_CHAN_LIST_CMDID);
18011 		if (ret) {
18012 			if (ret != ESHUTDOWN) {
18013 				printf("%s: failed to send WMI_SCAN_CHAN_LIST "
18014 				    "cmd\n", sc->sc_dev.dv_xname);
18015 			}
18016 			m_freem(m);
18017 			return ret;
18018 		}
18019 
18020 		DNPRINTF(QWX_D_WMI, "%s: cmd scan chan list channels %d\n",
18021 		    __func__, num_send_chans);
18022 
18023 		num_sends++;
18024 	}
18025 
18026 	return 0;
18027 }
18028 
18029 int
qwx_wmi_send_11d_scan_start_cmd(struct qwx_softc * sc,struct wmi_11d_scan_start_params * param,uint8_t pdev_id)18030 qwx_wmi_send_11d_scan_start_cmd(struct qwx_softc *sc,
18031     struct wmi_11d_scan_start_params *param, uint8_t pdev_id)
18032 {
18033 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18034 	struct wmi_11d_scan_start_cmd *cmd;
18035 	struct mbuf *m;
18036 	int ret;
18037 
18038 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
18039 	if (!m)
18040 		return ENOMEM;
18041 
18042 	cmd = (struct wmi_11d_scan_start_cmd *)(mtod(m, uint8_t *) +
18043 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18044 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
18045 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18046 
18047 	cmd->vdev_id = param->vdev_id;
18048 	cmd->scan_period_msec = param->scan_period_msec;
18049 	cmd->start_interval_msec = param->start_interval_msec;
18050 
18051 	ret = qwx_wmi_cmd_send(wmi, m, WMI_11D_SCAN_START_CMDID);
18052 	if (ret) {
18053 		if (ret != ESHUTDOWN) {
18054 			printf("%s: failed to send WMI_11D_SCAN_START_CMDID: "
18055 			    "%d\n", sc->sc_dev.dv_xname, ret);
18056 		}
18057 		m_freem(m);
18058 		return ret;
18059 	}
18060 
18061 	DNPRINTF(QWX_D_WMI, "%s: cmd 11d scan start vdev id %d period %d "
18062 	    "ms internal %d ms\n", __func__, cmd->vdev_id,
18063 	    cmd->scan_period_msec, cmd->start_interval_msec);
18064 
18065 	return 0;
18066 }
18067 
18068 static inline void
qwx_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd * cmd,struct scan_req_params * param)18069 qwx_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
18070     struct scan_req_params *param)
18071 {
18072 	/* Scan events subscription */
18073 	if (param->scan_ev_started)
18074 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_STARTED;
18075 	if (param->scan_ev_completed)
18076 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_COMPLETED;
18077 	if (param->scan_ev_bss_chan)
18078 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_BSS_CHANNEL;
18079 	if (param->scan_ev_foreign_chan)
18080 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN;
18081 	if (param->scan_ev_dequeued)
18082 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_DEQUEUED;
18083 	if (param->scan_ev_preempted)
18084 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_PREEMPTED;
18085 	if (param->scan_ev_start_failed)
18086 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_START_FAILED;
18087 	if (param->scan_ev_restarted)
18088 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESTARTED;
18089 	if (param->scan_ev_foreign_chn_exit)
18090 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
18091 	if (param->scan_ev_suspended)
18092 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_SUSPENDED;
18093 	if (param->scan_ev_resumed)
18094 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESUMED;
18095 
18096 	/** Set scan control flags */
18097 	cmd->scan_ctrl_flags = 0;
18098 	if (param->scan_f_passive)
18099 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_PASSIVE;
18100 	if (param->scan_f_strict_passive_pch)
18101 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
18102 	if (param->scan_f_promisc_mode)
18103 		cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROMISCUOS;
18104 	if (param->scan_f_capture_phy_err)
18105 		cmd->scan_ctrl_flags |=  WMI_SCAN_CAPTURE_PHY_ERROR;
18106 	if (param->scan_f_half_rate)
18107 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
18108 	if (param->scan_f_quarter_rate)
18109 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
18110 	if (param->scan_f_cck_rates)
18111 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_CCK_RATES;
18112 	if (param->scan_f_ofdm_rates)
18113 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_OFDM_RATES;
18114 	if (param->scan_f_chan_stat_evnt)
18115 		cmd->scan_ctrl_flags |=  WMI_SCAN_CHAN_STAT_EVENT;
18116 	if (param->scan_f_filter_prb_req)
18117 		cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROBE_REQ;
18118 	if (param->scan_f_bcast_probe)
18119 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_BCAST_PROBE_REQ;
18120 	if (param->scan_f_offchan_mgmt_tx)
18121 		cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_MGMT_TX;
18122 	if (param->scan_f_offchan_data_tx)
18123 		cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_DATA_TX;
18124 	if (param->scan_f_force_active_dfs_chn)
18125 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
18126 	if (param->scan_f_add_tpc_ie_in_probe)
18127 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
18128 	if (param->scan_f_add_ds_ie_in_probe)
18129 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
18130 	if (param->scan_f_add_spoofed_mac_in_probe)
18131 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
18132 	if (param->scan_f_add_rand_seq_in_probe)
18133 		cmd->scan_ctrl_flags |=  WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
18134 	if (param->scan_f_en_ie_whitelist_in_probe)
18135 		cmd->scan_ctrl_flags |=
18136 			 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
18137 
18138 	/* for adaptive scan mode using 3 bits (21 - 23 bits) */
18139 	WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
18140 	    param->adaptive_dwell_time_mode);
18141 
18142 	cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext;
18143 }
18144 
18145 int
qwx_wmi_send_scan_start_cmd(struct qwx_softc * sc,struct scan_req_params * params)18146 qwx_wmi_send_scan_start_cmd(struct qwx_softc *sc,
18147     struct scan_req_params *params)
18148 {
18149 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[params->pdev_id];
18150 	struct wmi_start_scan_cmd *cmd;
18151 	struct wmi_ssid *ssid = NULL;
18152 	struct wmi_mac_addr *bssid;
18153 	struct mbuf *m;
18154 	struct wmi_tlv *tlv;
18155 	void *ptr;
18156 	int i, ret, len;
18157 	uint32_t *tmp_ptr;
18158 	uint16_t extraie_len_with_pad = 0;
18159 	struct hint_short_ssid *s_ssid = NULL;
18160 	struct hint_bssid *hint_bssid = NULL;
18161 
18162 	len = sizeof(*cmd);
18163 
18164 	len += TLV_HDR_SIZE;
18165 	if (params->num_chan)
18166 		len += params->num_chan * sizeof(uint32_t);
18167 
18168 	len += TLV_HDR_SIZE;
18169 	if (params->num_ssids)
18170 		len += params->num_ssids * sizeof(*ssid);
18171 
18172 	len += TLV_HDR_SIZE;
18173 	if (params->num_bssid)
18174 		len += sizeof(*bssid) * params->num_bssid;
18175 
18176 	len += TLV_HDR_SIZE;
18177 	if (params->extraie.len && params->extraie.len <= 0xFFFF) {
18178 		extraie_len_with_pad = roundup(params->extraie.len,
18179 		    sizeof(uint32_t));
18180 	}
18181 	len += extraie_len_with_pad;
18182 
18183 	if (params->num_hint_bssid) {
18184 		len += TLV_HDR_SIZE +
18185 		    params->num_hint_bssid * sizeof(struct hint_bssid);
18186 	}
18187 
18188 	if (params->num_hint_s_ssid) {
18189 		len += TLV_HDR_SIZE +
18190 		    params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
18191 	}
18192 
18193 	m = qwx_wmi_alloc_mbuf(len);
18194 	if (!m)
18195 		return ENOMEM;
18196 
18197 	ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath11k_htc_hdr) +
18198 	    sizeof(struct wmi_cmd_hdr));
18199 
18200 	cmd = ptr;
18201 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
18202 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18203 
18204 	cmd->scan_id = params->scan_id;
18205 	cmd->scan_req_id = params->scan_req_id;
18206 	cmd->vdev_id = params->vdev_id;
18207 	cmd->scan_priority = params->scan_priority;
18208 	cmd->notify_scan_events = params->notify_scan_events;
18209 
18210 	qwx_wmi_copy_scan_event_cntrl_flags(cmd, params);
18211 
18212 	cmd->dwell_time_active = params->dwell_time_active;
18213 	cmd->dwell_time_active_2g = params->dwell_time_active_2g;
18214 	cmd->dwell_time_passive = params->dwell_time_passive;
18215 	cmd->dwell_time_active_6g = params->dwell_time_active_6g;
18216 	cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
18217 	cmd->min_rest_time = params->min_rest_time;
18218 	cmd->max_rest_time = params->max_rest_time;
18219 	cmd->repeat_probe_time = params->repeat_probe_time;
18220 	cmd->probe_spacing_time = params->probe_spacing_time;
18221 	cmd->idle_time = params->idle_time;
18222 	cmd->max_scan_time = params->max_scan_time;
18223 	cmd->probe_delay = params->probe_delay;
18224 	cmd->burst_duration = params->burst_duration;
18225 	cmd->num_chan = params->num_chan;
18226 	cmd->num_bssid = params->num_bssid;
18227 	cmd->num_ssids = params->num_ssids;
18228 	cmd->ie_len = params->extraie.len;
18229 	cmd->n_probes = params->n_probes;
18230 	IEEE80211_ADDR_COPY(cmd->mac_addr.addr, params->mac_addr.addr);
18231 	IEEE80211_ADDR_COPY(cmd->mac_mask.addr, params->mac_mask.addr);
18232 
18233 	ptr += sizeof(*cmd);
18234 
18235 	len = params->num_chan * sizeof(uint32_t);
18236 
18237 	tlv = ptr;
18238 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
18239 	    FIELD_PREP(WMI_TLV_LEN, len);
18240 	ptr += TLV_HDR_SIZE;
18241 	tmp_ptr = (uint32_t *)ptr;
18242 
18243 	for (i = 0; i < params->num_chan; ++i)
18244 		tmp_ptr[i] = params->chan_list[i];
18245 
18246 	ptr += len;
18247 
18248 	len = params->num_ssids * sizeof(*ssid);
18249 	tlv = ptr;
18250 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
18251 	    FIELD_PREP(WMI_TLV_LEN, len);
18252 
18253 	ptr += TLV_HDR_SIZE;
18254 
18255 	if (params->num_ssids) {
18256 		ssid = ptr;
18257 		for (i = 0; i < params->num_ssids; ++i) {
18258 			ssid->ssid_len = params->ssid[i].length;
18259 			memcpy(ssid->ssid, params->ssid[i].ssid,
18260 			       params->ssid[i].length);
18261 			ssid++;
18262 		}
18263 	}
18264 
18265 	ptr += (params->num_ssids * sizeof(*ssid));
18266 	len = params->num_bssid * sizeof(*bssid);
18267 	tlv = ptr;
18268 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
18269 	    FIELD_PREP(WMI_TLV_LEN, len);
18270 
18271 	ptr += TLV_HDR_SIZE;
18272 	bssid = ptr;
18273 
18274 	if (params->num_bssid) {
18275 		for (i = 0; i < params->num_bssid; ++i) {
18276 			IEEE80211_ADDR_COPY(bssid->addr,
18277 			    params->bssid_list[i].addr);
18278 			bssid++;
18279 		}
18280 	}
18281 
18282 	ptr += params->num_bssid * sizeof(*bssid);
18283 
18284 	len = extraie_len_with_pad;
18285 	tlv = ptr;
18286 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18287 	    FIELD_PREP(WMI_TLV_LEN, len);
18288 	ptr += TLV_HDR_SIZE;
18289 
18290 	if (extraie_len_with_pad)
18291 		memcpy(ptr, params->extraie.ptr, params->extraie.len);
18292 
18293 	ptr += extraie_len_with_pad;
18294 
18295 	if (params->num_hint_s_ssid) {
18296 		len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
18297 		tlv = ptr;
18298 		tlv->header = FIELD_PREP(WMI_TLV_TAG,
18299 		    WMI_TAG_ARRAY_FIXED_STRUCT) |
18300 		    FIELD_PREP(WMI_TLV_LEN, len);
18301 		ptr += TLV_HDR_SIZE;
18302 		s_ssid = ptr;
18303 		for (i = 0; i < params->num_hint_s_ssid; ++i) {
18304 			s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
18305 			s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
18306 			s_ssid++;
18307 		}
18308 		ptr += len;
18309 	}
18310 
18311 	if (params->num_hint_bssid) {
18312 		len = params->num_hint_bssid * sizeof(struct hint_bssid);
18313 		tlv = ptr;
18314 		tlv->header = FIELD_PREP(WMI_TLV_TAG,
18315 		    WMI_TAG_ARRAY_FIXED_STRUCT) |
18316 		    FIELD_PREP(WMI_TLV_LEN, len);
18317 		ptr += TLV_HDR_SIZE;
18318 		hint_bssid = ptr;
18319 		for (i = 0; i < params->num_hint_bssid; ++i) {
18320 			hint_bssid->freq_flags =
18321 				params->hint_bssid[i].freq_flags;
18322 			IEEE80211_ADDR_COPY(
18323 			    &params->hint_bssid[i].bssid.addr[0],
18324 			    &hint_bssid->bssid.addr[0]);
18325 			hint_bssid++;
18326 		}
18327 	}
18328 
18329 	ret = qwx_wmi_cmd_send(wmi, m, WMI_START_SCAN_CMDID);
18330 	if (ret) {
18331 		if (ret != ESHUTDOWN) {
18332 			printf("%s: failed to send WMI_START_SCAN_CMDID\n",
18333 			    sc->sc_dev.dv_xname);
18334 		}
18335 		m_freem(m);
18336 		return ret;
18337 	}
18338 
18339 	DNPRINTF(QWX_D_WMI, "%s: cmd start scan", __func__);
18340 
18341 	return 0;
18342 }
18343 
18344 int
qwx_wmi_send_scan_stop_cmd(struct qwx_softc * sc,struct scan_cancel_param * param)18345 qwx_wmi_send_scan_stop_cmd(struct qwx_softc *sc,
18346     struct scan_cancel_param *param)
18347 {
18348 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
18349 	struct wmi_stop_scan_cmd *cmd;
18350 	struct mbuf *m;
18351 	int ret;
18352 
18353 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
18354 	if (!m)
18355 		return ENOMEM;
18356 
18357 	cmd = (struct wmi_stop_scan_cmd *)(mtod(m, uint8_t *) +
18358 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18359 
18360 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
18361 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18362 
18363 	cmd->vdev_id = param->vdev_id;
18364 	cmd->requestor = param->requester;
18365 	cmd->scan_id = param->scan_id;
18366 	cmd->pdev_id = param->pdev_id;
18367 	/* stop the scan with the corresponding scan_id */
18368 	if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
18369 		/* Cancelling all scans */
18370 		cmd->req_type =  WMI_SCAN_STOP_ALL;
18371 	} else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
18372 		/* Cancelling VAP scans */
18373 		cmd->req_type =  WMI_SCN_STOP_VAP_ALL;
18374 	} else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
18375 		/* Cancelling specific scan */
18376 		cmd->req_type =  WMI_SCAN_STOP_ONE;
18377 	} else {
18378 		printf("%s: invalid scan cancel param %d\n",
18379 		    sc->sc_dev.dv_xname, param->req_type);
18380 		m_freem(m);
18381 		return EINVAL;
18382 	}
18383 
18384 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STOP_SCAN_CMDID);
18385 	if (ret) {
18386 		if (ret != ESHUTDOWN) {
18387 			printf("%s: failed to send WMI_STOP_SCAN_CMDID\n",
18388 			    sc->sc_dev.dv_xname);
18389 		}
18390 		m_freem(m);
18391 		return ret;
18392 	}
18393 
18394 	DNPRINTF(QWX_D_WMI, "%s: cmd stop scan\n", __func__);
18395 	return ret;
18396 }
18397 
18398 int
qwx_wmi_send_peer_create_cmd(struct qwx_softc * sc,uint8_t pdev_id,struct peer_create_params * param)18399 qwx_wmi_send_peer_create_cmd(struct qwx_softc *sc, uint8_t pdev_id,
18400     struct peer_create_params *param)
18401 {
18402 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18403 	struct wmi_peer_create_cmd *cmd;
18404 	struct mbuf *m;
18405 	int ret;
18406 
18407 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
18408 	if (!m)
18409 		return ENOMEM;
18410 
18411 	cmd = (struct wmi_peer_create_cmd *)(mtod(m, uint8_t *) +
18412 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18413 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
18414 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18415 
18416 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_addr);
18417 	cmd->peer_type = param->peer_type;
18418 	cmd->vdev_id = param->vdev_id;
18419 
18420 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_CREATE_CMDID);
18421 	if (ret) {
18422 		if (ret != ESHUTDOWN) {
18423 			printf("%s: failed to submit WMI_PEER_CREATE cmd\n",
18424 			    sc->sc_dev.dv_xname);
18425 		}
18426 		m_freem(m);
18427 		return ret;
18428 	}
18429 
18430 	DNPRINTF(QWX_D_WMI, "%s: cmd peer create vdev_id %d peer_addr %s\n",
18431 	    __func__, param->vdev_id, ether_sprintf(param->peer_addr));
18432 
18433 	return ret;
18434 }
18435 
18436 int
qwx_wmi_send_peer_delete_cmd(struct qwx_softc * sc,const uint8_t * peer_addr,uint8_t vdev_id,uint8_t pdev_id)18437 qwx_wmi_send_peer_delete_cmd(struct qwx_softc *sc, const uint8_t *peer_addr,
18438     uint8_t vdev_id, uint8_t pdev_id)
18439 {
18440 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18441 	struct wmi_peer_delete_cmd *cmd;
18442 	struct mbuf *m;
18443 	int ret;
18444 
18445 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
18446 	if (!m)
18447 		return ENOMEM;
18448 
18449 	cmd = (struct wmi_peer_delete_cmd *)(mtod(m, uint8_t *) +
18450 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18451 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
18452 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18453 
18454 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
18455 	cmd->vdev_id = vdev_id;
18456 
18457 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_DELETE_CMDID);
18458 	if (ret) {
18459 		if (ret != ESHUTDOWN) {
18460 			printf("%s: failed to send WMI_PEER_DELETE cmd\n",
18461 			    sc->sc_dev.dv_xname);
18462 		}
18463 		m_freem(m);
18464 		return ret;
18465 	}
18466 
18467 	DNPRINTF(QWX_D_WMI, "%s: cmd peer delete vdev_id %d peer_addr %pM\n",
18468 	    __func__, vdev_id, peer_addr);
18469 
18470 	return 0;
18471 }
18472 
18473 int
qwx_wmi_vdev_install_key(struct qwx_softc * sc,struct wmi_vdev_install_key_arg * arg,uint8_t pdev_id)18474 qwx_wmi_vdev_install_key(struct qwx_softc *sc,
18475     struct wmi_vdev_install_key_arg *arg, uint8_t pdev_id)
18476 {
18477 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18478 	struct wmi_vdev_install_key_cmd *cmd;
18479 	struct wmi_tlv *tlv;
18480 	struct mbuf *m;
18481 	int ret, len;
18482 	int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
18483 
18484 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
18485 
18486 	m = qwx_wmi_alloc_mbuf(len);
18487 	if (m == NULL)
18488 		return -ENOMEM;
18489 
18490 	cmd = (struct wmi_vdev_install_key_cmd *)(mtod(m, uint8_t *) +
18491 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18492 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18493 	    WMI_TAG_VDEV_INSTALL_KEY_CMD) |
18494 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18495 	cmd->vdev_id = arg->vdev_id;
18496 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, arg->macaddr);
18497 	cmd->key_idx = arg->key_idx;
18498 	cmd->key_flags = arg->key_flags;
18499 	cmd->key_cipher = arg->key_cipher;
18500 	cmd->key_len = arg->key_len;
18501 	cmd->key_txmic_len = arg->key_txmic_len;
18502 	cmd->key_rxmic_len = arg->key_rxmic_len;
18503 
18504 	if (arg->key_rsc_counter)
18505 		memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
18506 		       sizeof(struct wmi_key_seq_counter));
18507 
18508 	tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
18509 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
18510 	    sizeof(*cmd));
18511 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18512 	    FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
18513 	if (arg->key_data)
18514 		memcpy(tlv->value, (uint8_t *)arg->key_data,
18515 		    key_len_aligned);
18516 
18517 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_INSTALL_KEY_CMDID);
18518 	if (ret) {
18519 		printf("%s: failed to send WMI_VDEV_INSTALL_KEY cmd\n",
18520 		    sc->sc_dev.dv_xname);
18521 		m_freem(m);
18522 		return ret;
18523 	}
18524 
18525 	DNPRINTF(QWX_D_WMI,
18526 	    "%s: cmd vdev install key idx %d cipher %d len %d\n",
18527 	    __func__, arg->key_idx, arg->key_cipher, arg->key_len);
18528 
18529 	return ret;
18530 }
18531 
18532 void
qwx_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd * cmd,struct peer_assoc_params * param,int hw_crypto_disabled)18533 qwx_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
18534     struct peer_assoc_params *param, int hw_crypto_disabled)
18535 {
18536 	cmd->peer_flags = 0;
18537 
18538 	if (param->is_wme_set) {
18539 		if (param->qos_flag)
18540 			cmd->peer_flags |= WMI_PEER_QOS;
18541 		if (param->apsd_flag)
18542 			cmd->peer_flags |= WMI_PEER_APSD;
18543 		if (param->ht_flag)
18544 			cmd->peer_flags |= WMI_PEER_HT;
18545 		if (param->bw_40)
18546 			cmd->peer_flags |= WMI_PEER_40MHZ;
18547 		if (param->bw_80)
18548 			cmd->peer_flags |= WMI_PEER_80MHZ;
18549 		if (param->bw_160)
18550 			cmd->peer_flags |= WMI_PEER_160MHZ;
18551 
18552 		/* Typically if STBC is enabled for VHT it should be enabled
18553 		 * for HT as well
18554 		 **/
18555 		if (param->stbc_flag)
18556 			cmd->peer_flags |= WMI_PEER_STBC;
18557 
18558 		/* Typically if LDPC is enabled for VHT it should be enabled
18559 		 * for HT as well
18560 		 **/
18561 		if (param->ldpc_flag)
18562 			cmd->peer_flags |= WMI_PEER_LDPC;
18563 
18564 		if (param->static_mimops_flag)
18565 			cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
18566 		if (param->dynamic_mimops_flag)
18567 			cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
18568 		if (param->spatial_mux_flag)
18569 			cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
18570 		if (param->vht_flag)
18571 			cmd->peer_flags |= WMI_PEER_VHT;
18572 		if (param->he_flag)
18573 			cmd->peer_flags |= WMI_PEER_HE;
18574 		if (param->twt_requester)
18575 			cmd->peer_flags |= WMI_PEER_TWT_REQ;
18576 		if (param->twt_responder)
18577 			cmd->peer_flags |= WMI_PEER_TWT_RESP;
18578 	}
18579 
18580 	/* Suppress authorization for all AUTH modes that need 4-way handshake
18581 	 * (during re-association).
18582 	 * Authorization will be done for these modes on key installation.
18583 	 */
18584 	if (param->auth_flag)
18585 		cmd->peer_flags |= WMI_PEER_AUTH;
18586 	if (param->need_ptk_4_way) {
18587 		cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
18588 		if (!hw_crypto_disabled && param->is_assoc)
18589 			cmd->peer_flags &= ~WMI_PEER_AUTH;
18590 	}
18591 	if (param->need_gtk_2_way)
18592 		cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
18593 	/* safe mode bypass the 4-way handshake */
18594 	if (param->safe_mode_enabled)
18595 		cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
18596 				     WMI_PEER_NEED_GTK_2_WAY);
18597 
18598 	if (param->is_pmf_enabled)
18599 		cmd->peer_flags |= WMI_PEER_PMF;
18600 
18601 	/* Disable AMSDU for station transmit, if user configures it */
18602 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
18603 	 * it
18604 	 * if (param->amsdu_disable) Add after FW support
18605 	 **/
18606 
18607 	/* Target asserts if node is marked HT and all MCS is set to 0.
18608 	 * Mark the node as non-HT if all the mcs rates are disabled through
18609 	 * iwpriv
18610 	 **/
18611 	if (param->peer_ht_rates.num_rates == 0)
18612 		cmd->peer_flags &= ~WMI_PEER_HT;
18613 }
18614 
18615 int
qwx_wmi_send_peer_assoc_cmd(struct qwx_softc * sc,uint8_t pdev_id,struct peer_assoc_params * param)18616 qwx_wmi_send_peer_assoc_cmd(struct qwx_softc *sc, uint8_t pdev_id,
18617     struct peer_assoc_params *param)
18618 {
18619 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
18620 	struct wmi_peer_assoc_complete_cmd *cmd;
18621 	struct wmi_vht_rate_set *mcs;
18622 	struct wmi_he_rate_set *he_mcs;
18623 	struct mbuf *m;
18624 	struct wmi_tlv *tlv;
18625 	void *ptr;
18626 	uint32_t peer_legacy_rates_align;
18627 	uint32_t peer_ht_rates_align;
18628 	int i, ret, len;
18629 
18630 	peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
18631 	    sizeof(uint32_t));
18632 	peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
18633 	    sizeof(uint32_t));
18634 
18635 	len = sizeof(*cmd) +
18636 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(uint8_t)) +
18637 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(uint8_t)) +
18638 	      sizeof(*mcs) + TLV_HDR_SIZE +
18639 	      (sizeof(*he_mcs) * param->peer_he_mcs_count);
18640 
18641 	m = qwx_wmi_alloc_mbuf(len);
18642 	if (!m)
18643 		return ENOMEM;
18644 
18645 	ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath11k_htc_hdr) +
18646 	    sizeof(struct wmi_cmd_hdr));
18647 
18648 	cmd = ptr;
18649 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18650 	    WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
18651 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18652 
18653 	cmd->vdev_id = param->vdev_id;
18654 
18655 	cmd->peer_new_assoc = param->peer_new_assoc;
18656 	cmd->peer_associd = param->peer_associd;
18657 
18658 	qwx_wmi_copy_peer_flags(cmd, param,
18659 	    test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags));
18660 
18661 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_mac);
18662 
18663 	cmd->peer_rate_caps = param->peer_rate_caps;
18664 	cmd->peer_caps = param->peer_caps;
18665 	cmd->peer_listen_intval = param->peer_listen_intval;
18666 	cmd->peer_ht_caps = param->peer_ht_caps;
18667 	cmd->peer_max_mpdu = param->peer_max_mpdu;
18668 	cmd->peer_mpdu_density = param->peer_mpdu_density;
18669 	cmd->peer_vht_caps = param->peer_vht_caps;
18670 	cmd->peer_phymode = param->peer_phymode;
18671 
18672 	/* Update 11ax capabilities */
18673 	cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
18674 	cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
18675 	cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
18676 	cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
18677 	cmd->peer_he_ops = param->peer_he_ops;
18678 	memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
18679 	       sizeof(param->peer_he_cap_phyinfo));
18680 	memcpy(&cmd->peer_ppet, &param->peer_ppet,
18681 	       sizeof(param->peer_ppet));
18682 
18683 	/* Update peer legacy rate information */
18684 	ptr += sizeof(*cmd);
18685 
18686 	tlv = ptr;
18687 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18688 	    FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
18689 
18690 	ptr += TLV_HDR_SIZE;
18691 
18692 	cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
18693 	memcpy(ptr, param->peer_legacy_rates.rates,
18694 	    param->peer_legacy_rates.num_rates);
18695 
18696 	/* Update peer HT rate information */
18697 	ptr += peer_legacy_rates_align;
18698 
18699 	tlv = ptr;
18700 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
18701 	    FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
18702 	ptr += TLV_HDR_SIZE;
18703 	cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
18704 	memcpy(ptr, param->peer_ht_rates.rates,
18705 	    param->peer_ht_rates.num_rates);
18706 
18707 	/* VHT Rates */
18708 	ptr += peer_ht_rates_align;
18709 
18710 	mcs = ptr;
18711 
18712 	mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
18713 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
18714 
18715 	cmd->peer_nss = param->peer_nss;
18716 
18717 	/* Update bandwidth-NSS mapping */
18718 	cmd->peer_bw_rxnss_override = 0;
18719 	cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
18720 
18721 	if (param->vht_capable) {
18722 		mcs->rx_max_rate = param->rx_max_rate;
18723 		mcs->rx_mcs_set = param->rx_mcs_set;
18724 		mcs->tx_max_rate = param->tx_max_rate;
18725 		mcs->tx_mcs_set = param->tx_mcs_set;
18726 	}
18727 
18728 	/* HE Rates */
18729 	cmd->peer_he_mcs = param->peer_he_mcs_count;
18730 	cmd->min_data_rate = param->min_data_rate;
18731 
18732 	ptr += sizeof(*mcs);
18733 
18734 	len = param->peer_he_mcs_count * sizeof(*he_mcs);
18735 
18736 	tlv = ptr;
18737 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18738 	    FIELD_PREP(WMI_TLV_LEN, len);
18739 	ptr += TLV_HDR_SIZE;
18740 
18741 	/* Loop through the HE rate set */
18742 	for (i = 0; i < param->peer_he_mcs_count; i++) {
18743 		he_mcs = ptr;
18744 		he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18745 		    WMI_TAG_HE_RATE_SET) |
18746 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE);
18747 
18748 		he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
18749 		he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
18750 		ptr += sizeof(*he_mcs);
18751 	}
18752 
18753 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_ASSOC_CMDID);
18754 	if (ret) {
18755 		if (ret != ESHUTDOWN) {
18756 			printf("%s: failed to send WMI_PEER_ASSOC_CMDID\n",
18757 			    sc->sc_dev.dv_xname);
18758 		}
18759 		m_freem(m);
18760 		return ret;
18761 	}
18762 
18763 	DNPRINTF(QWX_D_WMI, "%s: cmd peer assoc vdev id %d assoc id %d "
18764 	    "peer mac %s peer_flags %x rate_caps %x peer_caps %x "
18765 	    "listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d "
18766 	    "peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x "
18767 	    "he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
18768 	    __func__, cmd->vdev_id, cmd->peer_associd,
18769 	    ether_sprintf(param->peer_mac),
18770 	    cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
18771 	    cmd->peer_listen_intval, cmd->peer_ht_caps,
18772 	    cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
18773 	    cmd->peer_mpdu_density, cmd->peer_vht_caps, cmd->peer_he_cap_info,
18774 	    cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
18775 	    cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
18776 	    cmd->peer_he_cap_phy[2], cmd->peer_bw_rxnss_override);
18777 
18778 	return 0;
18779 }
18780 
18781 void
qwx_wmi_copy_resource_config(struct wmi_resource_config * wmi_cfg,struct target_resource_config * tg_cfg)18782 qwx_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
18783     struct target_resource_config *tg_cfg)
18784 {
18785 	wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
18786 	wmi_cfg->num_peers = tg_cfg->num_peers;
18787 	wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
18788 	wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
18789 	wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
18790 	wmi_cfg->num_tids = tg_cfg->num_tids;
18791 	wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
18792 	wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
18793 	wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
18794 	wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
18795 	wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
18796 	wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
18797 	wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
18798 	wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
18799 	wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
18800 	wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
18801 	wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
18802 	wmi_cfg->roam_offload_max_ap_profiles =
18803 	    tg_cfg->roam_offload_max_ap_profiles;
18804 	wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
18805 	wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
18806 	wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
18807 	wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
18808 	wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
18809 	wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
18810 	wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
18811 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
18812 	    tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
18813 	wmi_cfg->vow_config = tg_cfg->vow_config;
18814 	wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
18815 	wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
18816 	wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
18817 	wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
18818 	wmi_cfg->num_tdls_conn_table_entries =
18819 	    tg_cfg->num_tdls_conn_table_entries;
18820 	wmi_cfg->beacon_tx_offload_max_vdev =
18821 	    tg_cfg->beacon_tx_offload_max_vdev;
18822 	wmi_cfg->num_multicast_filter_entries =
18823 	    tg_cfg->num_multicast_filter_entries;
18824 	wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
18825 	wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
18826 	wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
18827 	wmi_cfg->max_tdls_concurrent_sleep_sta =
18828 	    tg_cfg->max_tdls_concurrent_sleep_sta;
18829 	wmi_cfg->max_tdls_concurrent_buffer_sta =
18830 	    tg_cfg->max_tdls_concurrent_buffer_sta;
18831 	wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
18832 	wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
18833 	wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
18834 	wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
18835 	wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
18836 	wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
18837 	wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
18838 	wmi_cfg->flag1 = tg_cfg->flag1;
18839 	wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
18840 	wmi_cfg->sched_params = tg_cfg->sched_params;
18841 	wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
18842 	wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
18843 #ifdef notyet /* 6 GHz support */
18844 	wmi_cfg->host_service_flags &=
18845 	    ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
18846 	wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported <<
18847 	    WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
18848 	wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET;
18849 	wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
18850 	wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
18851 #endif
18852 }
18853 
18854 int
qwx_init_cmd_send(struct qwx_pdev_wmi * wmi,struct wmi_init_cmd_param * param)18855 qwx_init_cmd_send(struct qwx_pdev_wmi *wmi, struct wmi_init_cmd_param *param)
18856 {
18857 	struct mbuf *m;
18858 	struct wmi_init_cmd *cmd;
18859 	struct wmi_resource_config *cfg;
18860 	struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
18861 	struct wmi_pdev_band_to_mac *band_to_mac;
18862 	struct wlan_host_mem_chunk *host_mem_chunks;
18863 	struct wmi_tlv *tlv;
18864 	size_t ret, len;
18865 	void *ptr;
18866 	uint32_t hw_mode_len = 0;
18867 	uint16_t idx;
18868 
18869 	if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
18870 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
18871 		    (param->num_band_to_mac * sizeof(*band_to_mac));
18872 
18873 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
18874 	    (param->num_mem_chunks ?
18875 	    (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
18876 
18877 	m = qwx_wmi_alloc_mbuf(len);
18878 	if (!m)
18879 		return ENOMEM;
18880 
18881 	cmd = (struct wmi_init_cmd *)(mtod(m, uint8_t *) +
18882 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
18883 
18884 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
18885 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
18886 
18887 	ptr = mtod(m, uint8_t *) + sizeof(struct ath11k_htc_hdr) +
18888 	   sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
18889 	cfg = ptr;
18890 
18891 	qwx_wmi_copy_resource_config(cfg, param->res_cfg);
18892 
18893 	cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
18894 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
18895 
18896 	ptr += sizeof(*cfg);
18897 	host_mem_chunks = ptr + TLV_HDR_SIZE;
18898 	len = sizeof(struct wlan_host_mem_chunk);
18899 
18900 	for (idx = 0; idx < param->num_mem_chunks; ++idx) {
18901 		host_mem_chunks[idx].tlv_header =
18902 		    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
18903 		    FIELD_PREP(WMI_TLV_LEN, len);
18904 
18905 		host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
18906 		host_mem_chunks[idx].size = param->mem_chunks[idx].len;
18907 		host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
18908 
18909 		DNPRINTF(QWX_D_WMI,
18910 		    "%s: host mem chunk req_id %d paddr 0x%llx len %d\n",
18911 		    __func__, param->mem_chunks[idx].req_id,
18912 		    (uint64_t)param->mem_chunks[idx].paddr,
18913 		    param->mem_chunks[idx].len);
18914 	}
18915 	cmd->num_host_mem_chunks = param->num_mem_chunks;
18916 	len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
18917 
18918 	/* num_mem_chunks is zero */
18919 	tlv = ptr;
18920 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18921 	    FIELD_PREP(WMI_TLV_LEN, len);
18922 	ptr += TLV_HDR_SIZE + len;
18923 
18924 	if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
18925 		hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
18926 		hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18927 		    WMI_TAG_PDEV_SET_HW_MODE_CMD) |
18928 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*hw_mode) - TLV_HDR_SIZE);
18929 
18930 		hw_mode->hw_mode_index = param->hw_mode_id;
18931 		hw_mode->num_band_to_mac = param->num_band_to_mac;
18932 
18933 		ptr += sizeof(*hw_mode);
18934 
18935 		len = param->num_band_to_mac * sizeof(*band_to_mac);
18936 		tlv = ptr;
18937 		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
18938 		    FIELD_PREP(WMI_TLV_LEN, len);
18939 
18940 		ptr += TLV_HDR_SIZE;
18941 		len = sizeof(*band_to_mac);
18942 
18943 		for (idx = 0; idx < param->num_band_to_mac; idx++) {
18944 			band_to_mac = (void *)ptr;
18945 
18946 			band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
18947 			    WMI_TAG_PDEV_BAND_TO_MAC) |
18948 			    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
18949 			band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
18950 			band_to_mac->start_freq =
18951 			    param->band_to_mac[idx].start_freq;
18952 			band_to_mac->end_freq =
18953 			    param->band_to_mac[idx].end_freq;
18954 			ptr += sizeof(*band_to_mac);
18955 		}
18956 	}
18957 
18958 	ret = qwx_wmi_cmd_send(wmi, m, WMI_INIT_CMDID);
18959 	if (ret) {
18960 		if (ret != ESHUTDOWN)
18961 			printf("%s: failed to send WMI_INIT_CMDID\n", __func__);
18962 		m_freem(m);
18963 		return ret;
18964 	}
18965 
18966 	DNPRINTF(QWX_D_WMI, "%s: cmd wmi init\n", __func__);
18967 
18968 	return 0;
18969 }
18970 
18971 int
qwx_wmi_cmd_init(struct qwx_softc * sc)18972 qwx_wmi_cmd_init(struct qwx_softc *sc)
18973 {
18974 	struct qwx_wmi_base *wmi_sc = &sc->wmi;
18975 	struct wmi_init_cmd_param init_param;
18976 	struct target_resource_config  config;
18977 
18978 	memset(&init_param, 0, sizeof(init_param));
18979 	memset(&config, 0, sizeof(config));
18980 
18981 	sc->hw_params.hw_ops->wmi_init_config(sc, &config);
18982 
18983 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT))
18984 		config.is_reg_cc_ext_event_supported = 1;
18985 
18986 	memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
18987 
18988 	init_param.res_cfg = &wmi_sc->wlan_resource_config;
18989 	init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
18990 	init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
18991 	init_param.mem_chunks = wmi_sc->mem_chunks;
18992 
18993 	if (sc->hw_params.single_pdev_only)
18994 		init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
18995 
18996 	init_param.num_band_to_mac = sc->num_radios;
18997 	qwx_fill_band_to_mac_param(sc, init_param.band_to_mac);
18998 
18999 	return qwx_init_cmd_send(&wmi_sc->wmi[0], &init_param);
19000 }
19001 
19002 int
qwx_wmi_wait_for_unified_ready(struct qwx_softc * sc)19003 qwx_wmi_wait_for_unified_ready(struct qwx_softc *sc)
19004 {
19005 	int ret;
19006 
19007 	while (!sc->wmi.unified_ready) {
19008 		ret = tsleep_nsec(&sc->wmi.unified_ready, 0, "qwxunfrdy",
19009 		    SEC_TO_NSEC(5));
19010 		if (ret)
19011 			return -1;
19012 	}
19013 
19014 	return 0;
19015 }
19016 
19017 int
qwx_wmi_set_hw_mode(struct qwx_softc * sc,enum wmi_host_hw_mode_config_type mode)19018 qwx_wmi_set_hw_mode(struct qwx_softc *sc,
19019     enum wmi_host_hw_mode_config_type mode)
19020 {
19021 	struct wmi_pdev_set_hw_mode_cmd_param *cmd;
19022 	struct mbuf *m;
19023 	struct qwx_wmi_base *wmi = &sc->wmi;
19024 	int len;
19025 	int ret;
19026 
19027 	len = sizeof(*cmd);
19028 
19029 	m = qwx_wmi_alloc_mbuf(len);
19030 	if (!m)
19031 		return ENOMEM;
19032 
19033 	cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)(mtod(m, uint8_t *) +
19034 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19035 
19036 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
19037 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19038 
19039 	cmd->pdev_id = WMI_PDEV_ID_SOC;
19040 	cmd->hw_mode_index = mode;
19041 
19042 	ret = qwx_wmi_cmd_send(&wmi->wmi[0], m, WMI_PDEV_SET_HW_MODE_CMDID);
19043 	if (ret) {
19044 		if (ret != ESHUTDOWN) {
19045 			printf("%s: failed to send "
19046 			    "WMI_PDEV_SET_HW_MODE_CMDID\n", __func__);
19047 		}
19048 		m_freem(m);
19049 		return ret;
19050 	}
19051 
19052 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev set hw mode %d\n", __func__,
19053 	    cmd->hw_mode_index);
19054 
19055 	return 0;
19056 }
19057 
19058 int
qwx_wmi_set_sta_ps_param(struct qwx_softc * sc,uint32_t vdev_id,uint8_t pdev_id,uint32_t param,uint32_t param_value)19059 qwx_wmi_set_sta_ps_param(struct qwx_softc *sc, uint32_t vdev_id,
19060      uint8_t pdev_id, uint32_t param, uint32_t param_value)
19061 {
19062 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19063 	struct wmi_sta_powersave_param_cmd *cmd;
19064 	struct mbuf *m;
19065 	int ret;
19066 
19067 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19068 	if (!m)
19069 		return ENOMEM;
19070 
19071 	cmd = (struct wmi_sta_powersave_param_cmd *)(mtod(m, uint8_t *) +
19072 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19073 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
19074 	    WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
19075 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19076 
19077 	cmd->vdev_id = vdev_id;
19078 	cmd->param = param;
19079 	cmd->value = param_value;
19080 
19081 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_PARAM_CMDID);
19082 	if (ret) {
19083 		if (ret != ESHUTDOWN) {
19084 			printf("%s: failed to send "
19085 			    "WMI_STA_POWERSAVE_PARAM_CMDID",
19086 			    sc->sc_dev.dv_xname);
19087 		}
19088 		m_freem(m);
19089 		return ret;
19090 	}
19091 
19092 	DNPRINTF(QWX_D_WMI, "%s: cmd set powersave param vdev_id %d param %d "
19093 	    "value %d\n", __func__, vdev_id, param, param_value);
19094 
19095 	return 0;
19096 }
19097 
19098 int
qwx_wmi_mgmt_send(struct qwx_softc * sc,struct qwx_vif * arvif,uint8_t pdev_id,uint32_t buf_id,struct mbuf * frame,struct qwx_tx_data * tx_data)19099 qwx_wmi_mgmt_send(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
19100     uint32_t buf_id, struct mbuf *frame, struct qwx_tx_data *tx_data)
19101 {
19102 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19103 	struct wmi_mgmt_send_cmd *cmd;
19104 	struct wmi_tlv *frame_tlv;
19105 	struct mbuf *m;
19106 	uint32_t buf_len;
19107 	int ret, len;
19108 	uint64_t paddr;
19109 
19110 	paddr = tx_data->map->dm_segs[0].ds_addr;
19111 
19112 	buf_len = frame->m_pkthdr.len < WMI_MGMT_SEND_DOWNLD_LEN ?
19113 	    frame->m_pkthdr.len : WMI_MGMT_SEND_DOWNLD_LEN;
19114 
19115 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
19116 
19117 	m = qwx_wmi_alloc_mbuf(len);
19118 	if (!m)
19119 		return ENOMEM;
19120 
19121 	cmd = (struct wmi_mgmt_send_cmd *)(mtod(m, uint8_t *) +
19122 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19123 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
19124 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19125 	cmd->vdev_id = arvif->vdev_id;
19126 	cmd->desc_id = buf_id;
19127 	cmd->chanfreq = 0;
19128 	cmd->paddr_lo = paddr & 0xffffffff;
19129 	cmd->paddr_hi = paddr >> 32;
19130 	cmd->frame_len = frame->m_pkthdr.len;
19131 	cmd->buf_len = buf_len;
19132 	cmd->tx_params_valid = 0;
19133 
19134 	frame_tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
19135 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
19136 	    sizeof(*cmd));
19137 	frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
19138 	    FIELD_PREP(WMI_TLV_LEN, buf_len);
19139 
19140 	memcpy(frame_tlv->value, mtod(frame, void *), buf_len);
19141 #if 0 /* Not needed on OpenBSD? */
19142 	ath11k_ce_byte_swap(frame_tlv->value, buf_len);
19143 #endif
19144 	ret = qwx_wmi_cmd_send(wmi, m, WMI_MGMT_TX_SEND_CMDID);
19145 	if (ret) {
19146 		if (ret != ESHUTDOWN) {
19147 			printf("%s: failed to submit "
19148 			    "WMI_MGMT_TX_SEND_CMDID cmd\n",
19149 			    sc->sc_dev.dv_xname);
19150 		}
19151 		m_freem(m);
19152 		return ret;
19153 	}
19154 
19155 	DNPRINTF(QWX_D_WMI, "%s: cmd mgmt tx send", __func__);
19156 
19157 	tx_data->m = frame;
19158 	return 0;
19159 }
19160 
19161 int
qwx_wmi_vdev_create(struct qwx_softc * sc,uint8_t * macaddr,struct vdev_create_params * param)19162 qwx_wmi_vdev_create(struct qwx_softc *sc, uint8_t *macaddr,
19163     struct vdev_create_params *param)
19164 {
19165 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
19166 	struct wmi_vdev_create_cmd *cmd;
19167 	struct mbuf *m;
19168 	struct wmi_vdev_txrx_streams *txrx_streams;
19169 	struct wmi_tlv *tlv;
19170 	int ret, len;
19171 	void *ptr;
19172 
19173 	/* It can be optimized my sending tx/rx chain configuration
19174 	 * only for supported bands instead of always sending it for
19175 	 * both the bands.
19176 	 */
19177 	len = sizeof(*cmd) + TLV_HDR_SIZE +
19178 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
19179 
19180 	m = qwx_wmi_alloc_mbuf(len);
19181 	if (!m)
19182 		return ENOMEM;
19183 
19184 	cmd = (struct wmi_vdev_create_cmd *)(mtod(m, uint8_t *) +
19185 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19186 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
19187 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19188 
19189 	cmd->vdev_id = param->if_id;
19190 	cmd->vdev_type = param->type;
19191 	cmd->vdev_subtype = param->subtype;
19192 	cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
19193 	cmd->pdev_id = param->pdev_id;
19194 	cmd->mbssid_flags = param->mbssid_flags;
19195 	cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id;
19196 
19197 	IEEE80211_ADDR_COPY(cmd->vdev_macaddr.addr, macaddr);
19198 
19199 	ptr = (void *)(mtod(m, uint8_t *) +
19200 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
19201 	    sizeof(*cmd));
19202 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
19203 
19204 	tlv = ptr;
19205 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
19206 	    FIELD_PREP(WMI_TLV_LEN, len);
19207 
19208 	ptr += TLV_HDR_SIZE;
19209 	txrx_streams = ptr;
19210 	len = sizeof(*txrx_streams);
19211 	txrx_streams->tlv_header =
19212 	    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
19213 	    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
19214 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
19215 	txrx_streams->supported_tx_streams = param->chains[0].tx;
19216 	txrx_streams->supported_rx_streams = param->chains[0].rx;
19217 
19218 	txrx_streams++;
19219 	txrx_streams->tlv_header =
19220 	    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
19221 	    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
19222 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
19223 	txrx_streams->supported_tx_streams = param->chains[1].tx;
19224 	txrx_streams->supported_rx_streams = param->chains[1].rx;
19225 
19226 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_CREATE_CMDID);
19227 	if (ret) {
19228 		if (ret != ESHUTDOWN) {
19229 			printf("%s: failed to submit WMI_VDEV_CREATE_CMDID\n",
19230 			    sc->sc_dev.dv_xname);
19231 		}
19232 		m_freem(m);
19233 		return ret;
19234 	}
19235 
19236 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev create id %d type %d subtype %d "
19237 	    "macaddr %s pdevid %d\n", __func__, param->if_id, param->type,
19238 	    param->subtype, ether_sprintf(macaddr), param->pdev_id);
19239 
19240 	return ret;
19241 }
19242 
19243 int
qwx_wmi_vdev_set_param_cmd(struct qwx_softc * sc,uint32_t vdev_id,uint8_t pdev_id,uint32_t param_id,uint32_t param_value)19244 qwx_wmi_vdev_set_param_cmd(struct qwx_softc *sc, uint32_t vdev_id,
19245     uint8_t pdev_id, uint32_t param_id, uint32_t param_value)
19246 {
19247 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19248 	struct wmi_vdev_set_param_cmd *cmd;
19249 	struct mbuf *m;
19250 	int ret;
19251 
19252 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19253 	if (!m)
19254 		return ENOMEM;
19255 
19256 	cmd = (struct wmi_vdev_set_param_cmd *)(mtod(m, uint8_t *) +
19257 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19258 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
19259 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19260 
19261 	cmd->vdev_id = vdev_id;
19262 	cmd->param_id = param_id;
19263 	cmd->param_value = param_value;
19264 
19265 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_SET_PARAM_CMDID);
19266 	if (ret) {
19267 		if (ret != ESHUTDOWN) {
19268 			printf("%s: failed to send WMI_VDEV_SET_PARAM_CMDID\n",
19269 			    sc->sc_dev.dv_xname);
19270 		}
19271 		m_freem(m);
19272 		return ret;
19273 	}
19274 
19275 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev set param vdev 0x%x param %d "
19276 	    "value %d\n", __func__, vdev_id, param_id, param_value);
19277 
19278 	return 0;
19279 }
19280 
19281 int
qwx_wmi_vdev_up(struct qwx_softc * sc,uint32_t vdev_id,uint32_t pdev_id,uint32_t aid,const uint8_t * bssid,uint8_t * tx_bssid,uint32_t nontx_profile_idx,uint32_t nontx_profile_cnt)19282 qwx_wmi_vdev_up(struct qwx_softc *sc, uint32_t vdev_id, uint32_t pdev_id,
19283     uint32_t aid, const uint8_t *bssid, uint8_t *tx_bssid,
19284     uint32_t nontx_profile_idx, uint32_t nontx_profile_cnt)
19285 {
19286 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19287 	struct wmi_vdev_up_cmd *cmd;
19288 	struct mbuf *m;
19289 	int ret;
19290 
19291 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19292 	if (!m)
19293 		return ENOMEM;
19294 
19295 	cmd = (struct wmi_vdev_up_cmd *)(mtod(m, uint8_t *) +
19296 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19297 
19298 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
19299 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19300 	cmd->vdev_id = vdev_id;
19301 	cmd->vdev_assoc_id = aid;
19302 
19303 	IEEE80211_ADDR_COPY(cmd->vdev_bssid.addr, bssid);
19304 
19305 	cmd->nontx_profile_idx = nontx_profile_idx;
19306 	cmd->nontx_profile_cnt = nontx_profile_cnt;
19307 	if (tx_bssid)
19308 		IEEE80211_ADDR_COPY(cmd->tx_vdev_bssid.addr, tx_bssid);
19309 #if 0
19310 	if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
19311 		bss_conf = &arvif->vif->bss_conf;
19312 
19313 		if (bss_conf->nontransmitted) {
19314 			ether_addr_copy(cmd->tx_vdev_bssid.addr,
19315 					bss_conf->transmitter_bssid);
19316 			cmd->nontx_profile_idx = bss_conf->bssid_index;
19317 			cmd->nontx_profile_cnt = bss_conf->bssid_indicator;
19318 		}
19319 	}
19320 #endif
19321 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_UP_CMDID);
19322 	if (ret) {
19323 		if (ret != ESHUTDOWN) {
19324 			printf("%s: failed to submit WMI_VDEV_UP cmd\n",
19325 			    sc->sc_dev.dv_xname);
19326 		}
19327 		m_freem(m);
19328 		return ret;
19329 	}
19330 
19331 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev up id 0x%x assoc id %d bssid %s\n",
19332 	    __func__, vdev_id, aid, ether_sprintf((u_char *)bssid));
19333 
19334 	return 0;
19335 }
19336 
19337 int
qwx_wmi_vdev_down(struct qwx_softc * sc,uint32_t vdev_id,uint8_t pdev_id)19338 qwx_wmi_vdev_down(struct qwx_softc *sc, uint32_t vdev_id, uint8_t pdev_id)
19339 {
19340 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19341 	struct wmi_vdev_down_cmd *cmd;
19342 	struct mbuf *m;
19343 	int ret;
19344 
19345 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19346 	if (!m)
19347 		return ENOMEM;
19348 
19349 	cmd = (struct wmi_vdev_down_cmd *)(mtod(m, uint8_t *) +
19350 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19351 
19352 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
19353 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19354 	cmd->vdev_id = vdev_id;
19355 
19356 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_DOWN_CMDID);
19357 	if (ret) {
19358 		if (ret != ESHUTDOWN) {
19359 			printf("%s: failed to submit WMI_VDEV_DOWN cmd\n",
19360 			    sc->sc_dev.dv_xname);
19361 		}
19362 		m_freem(m);
19363 		return ret;
19364 	}
19365 
19366 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev down id 0x%x\n", __func__, vdev_id);
19367 
19368 	return 0;
19369 }
19370 
19371 void
qwx_wmi_put_wmi_channel(struct wmi_channel * chan,struct wmi_vdev_start_req_arg * arg)19372 qwx_wmi_put_wmi_channel(struct wmi_channel *chan,
19373     struct wmi_vdev_start_req_arg *arg)
19374 {
19375 	uint32_t center_freq1 = arg->channel.band_center_freq1;
19376 
19377 	memset(chan, 0, sizeof(*chan));
19378 
19379 	chan->mhz = arg->channel.freq;
19380 	chan->band_center_freq1 = arg->channel.band_center_freq1;
19381 
19382 	if (arg->channel.mode == MODE_11AX_HE160) {
19383 		if (arg->channel.freq > arg->channel.band_center_freq1)
19384 			chan->band_center_freq1 = center_freq1 + 40;
19385 		else
19386 			chan->band_center_freq1 = center_freq1 - 40;
19387 
19388 		chan->band_center_freq2 = arg->channel.band_center_freq1;
19389 	} else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
19390 	    (arg->channel.mode == MODE_11AX_HE80_80)) {
19391 		chan->band_center_freq2 = arg->channel.band_center_freq2;
19392 	} else
19393 		chan->band_center_freq2 = 0;
19394 
19395 	chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
19396 	if (arg->channel.passive)
19397 		chan->info |= WMI_CHAN_INFO_PASSIVE;
19398 	if (arg->channel.allow_ibss)
19399 		chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
19400 	if (arg->channel.allow_ht)
19401 		chan->info |= WMI_CHAN_INFO_ALLOW_HT;
19402 	if (arg->channel.allow_vht)
19403 		chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
19404 	if (arg->channel.allow_he)
19405 		chan->info |= WMI_CHAN_INFO_ALLOW_HE;
19406 	if (arg->channel.ht40plus)
19407 		chan->info |= WMI_CHAN_INFO_HT40_PLUS;
19408 	if (arg->channel.chan_radar)
19409 		chan->info |= WMI_CHAN_INFO_DFS;
19410 	if (arg->channel.freq2_radar)
19411 		chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
19412 
19413 	chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
19414 	    arg->channel.max_power) |
19415 	    FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
19416 	    arg->channel.max_reg_power);
19417 
19418 	chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
19419 	    arg->channel.max_antenna_gain) |
19420 	    FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
19421 	    arg->channel.max_power);
19422 }
19423 
19424 int
qwx_wmi_vdev_stop(struct qwx_softc * sc,uint8_t vdev_id,uint8_t pdev_id)19425 qwx_wmi_vdev_stop(struct qwx_softc *sc, uint8_t vdev_id, uint8_t pdev_id)
19426 {
19427 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19428 	struct wmi_vdev_stop_cmd *cmd;
19429 	struct mbuf *m;
19430 	int ret;
19431 
19432 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19433 	if (!m)
19434 		return ENOMEM;
19435 
19436 	cmd = (struct wmi_vdev_stop_cmd *)(mtod(m, uint8_t *) +
19437 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19438 
19439 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
19440 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19441 	cmd->vdev_id = vdev_id;
19442 
19443 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_STOP_CMDID);
19444 	if (ret) {
19445 		if (ret != ESHUTDOWN) {
19446 			printf("%s: failed to submit WMI_VDEV_STOP cmd\n",
19447 			    sc->sc_dev.dv_xname);
19448 		}
19449 		m_freem(m);
19450 		return ret;
19451 	}
19452 
19453 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev stop id 0x%x\n", __func__, vdev_id);
19454 
19455 	return ret;
19456 }
19457 
19458 int
qwx_wmi_vdev_start(struct qwx_softc * sc,struct wmi_vdev_start_req_arg * arg,int pdev_id,int restart)19459 qwx_wmi_vdev_start(struct qwx_softc *sc, struct wmi_vdev_start_req_arg *arg,
19460     int pdev_id, int restart)
19461 {
19462 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19463 	struct wmi_vdev_start_request_cmd *cmd;
19464 	struct mbuf *m;
19465 	struct wmi_channel *chan;
19466 	struct wmi_tlv *tlv;
19467 	void *ptr;
19468 	int ret, len;
19469 
19470 	if (arg->ssid_len > sizeof(cmd->ssid.ssid))
19471 		return EINVAL;
19472 
19473 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
19474 
19475 	m = qwx_wmi_alloc_mbuf(len);
19476 	if (!m)
19477 		return ENOMEM;
19478 
19479 	cmd = (struct wmi_vdev_start_request_cmd *)(mtod(m, uint8_t *) +
19480 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19481 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
19482 	    WMI_TAG_VDEV_START_REQUEST_CMD) |
19483 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19484 	cmd->vdev_id = arg->vdev_id;
19485 	cmd->beacon_interval = arg->bcn_intval;
19486 	cmd->bcn_tx_rate = arg->bcn_tx_rate;
19487 	cmd->dtim_period = arg->dtim_period;
19488 	cmd->num_noa_descriptors = arg->num_noa_descriptors;
19489 	cmd->preferred_rx_streams = arg->pref_rx_streams;
19490 	cmd->preferred_tx_streams = arg->pref_tx_streams;
19491 	cmd->cac_duration_ms = arg->cac_duration_ms;
19492 	cmd->regdomain = arg->regdomain;
19493 	cmd->he_ops = arg->he_ops;
19494 	cmd->mbssid_flags = arg->mbssid_flags;
19495 	cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
19496 
19497 	if (!restart) {
19498 		if (arg->ssid) {
19499 			cmd->ssid.ssid_len = arg->ssid_len;
19500 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
19501 		}
19502 		if (arg->hidden_ssid)
19503 			cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
19504 		if (arg->pmf_enabled)
19505 			cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
19506 	}
19507 
19508 	cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
19509 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
19510 		cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
19511 
19512 	ptr = mtod(m, void *) + sizeof(struct ath11k_htc_hdr) +
19513 	    sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
19514 	chan = ptr;
19515 
19516 	qwx_wmi_put_wmi_channel(chan, arg);
19517 
19518 	chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
19519 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE);
19520 	ptr += sizeof(*chan);
19521 
19522 	tlv = ptr;
19523 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
19524 	    FIELD_PREP(WMI_TLV_LEN, 0);
19525 
19526 	/* Note: This is a nested TLV containing:
19527 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
19528 	 */
19529 
19530 	ptr += sizeof(*tlv);
19531 
19532 	ret = qwx_wmi_cmd_send(wmi, m, restart ?
19533 	    WMI_VDEV_RESTART_REQUEST_CMDID : WMI_VDEV_START_REQUEST_CMDID);
19534 	if (ret) {
19535 		if (ret != ESHUTDOWN) {
19536 			printf("%s: failed to submit vdev_%s cmd\n",
19537 			    sc->sc_dev.dv_xname, restart ? "restart" : "start");
19538 		}
19539 		m_freem(m);
19540 		return ret;
19541 	}
19542 
19543 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev %s id 0x%x freq %u mode 0x%x\n",
19544 	   __func__, restart ? "restart" : "start", arg->vdev_id,
19545 	   arg->channel.freq, arg->channel.mode);
19546 
19547 	return ret;
19548 }
19549 
19550 int
qwx_core_start(struct qwx_softc * sc)19551 qwx_core_start(struct qwx_softc *sc)
19552 {
19553 	int ret;
19554 
19555 	ret = qwx_wmi_attach(sc);
19556 	if (ret) {
19557 		printf("%s: failed to attach wmi: %d\n",
19558 		    sc->sc_dev.dv_xname, ret);
19559 		return ret;
19560 	}
19561 
19562 	ret = qwx_htc_init(sc);
19563 	if (ret) {
19564 		printf("%s: failed to init htc: %d\n",
19565 		    sc->sc_dev.dv_xname, ret);
19566 		goto err_wmi_detach;
19567 	}
19568 
19569 	ret = sc->ops.start(sc);
19570 	if (ret) {
19571 		printf("%s: failed to start host interface: %d\n",
19572 		    sc->sc_dev.dv_xname, ret);
19573 		goto err_wmi_detach;
19574 	}
19575 
19576 	ret = qwx_htc_wait_target(sc);
19577 	if (ret) {
19578 		printf("%s: failed to connect to HTC: %d\n",
19579 		    sc->sc_dev.dv_xname, ret);
19580 		goto err_hif_stop;
19581 	}
19582 
19583 	ret = qwx_dp_htt_connect(&sc->dp);
19584 	if (ret) {
19585 		printf("%s: failed to connect to HTT: %d\n",
19586 		    sc->sc_dev.dv_xname, ret);
19587 		goto err_hif_stop;
19588 	}
19589 
19590 	ret = qwx_wmi_connect(sc);
19591 	if (ret) {
19592 		printf("%s: failed to connect wmi: %d\n",
19593 		    sc->sc_dev.dv_xname, ret);
19594 		goto err_hif_stop;
19595 	}
19596 
19597 	sc->wmi.service_ready = 0;
19598 
19599 	ret = qwx_htc_start(&sc->htc);
19600 	if (ret) {
19601 		printf("%s: failed to start HTC: %d\n",
19602 		    sc->sc_dev.dv_xname, ret);
19603 		goto err_hif_stop;
19604 	}
19605 
19606 	ret = qwx_wmi_wait_for_service_ready(sc);
19607 	if (ret) {
19608 		printf("%s: failed to receive wmi service ready event: %d\n",
19609 		    sc->sc_dev.dv_xname, ret);
19610 		goto err_hif_stop;
19611 	}
19612 #if 0
19613 	ret = ath11k_mac_allocate(ab);
19614 	if (ret) {
19615 		ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
19616 			   ret);
19617 		goto err_hif_stop;
19618 	}
19619 	ath11k_dp_pdev_pre_alloc(sc);
19620 #endif
19621 	ret = qwx_dp_pdev_reo_setup(sc);
19622 	if (ret) {
19623 		printf("%s: failed to initialize reo destination rings: %d\n",
19624 		    __func__, ret);
19625 		goto err_mac_destroy;
19626 	}
19627 
19628 	ret = qwx_wmi_cmd_init(sc);
19629 	if (ret) {
19630 		printf("%s: failed to send wmi init cmd: %d\n", __func__, ret);
19631 		goto err_reo_cleanup;
19632 	}
19633 
19634 	ret = qwx_wmi_wait_for_unified_ready(sc);
19635 	if (ret) {
19636 		printf("%s: failed to receive wmi unified ready event: %d\n",
19637 		    __func__, ret);
19638 		goto err_reo_cleanup;
19639 	}
19640 
19641 	/* put hardware to DBS mode */
19642 	if (sc->hw_params.single_pdev_only &&
19643 	    sc->hw_params.num_rxmda_per_pdev > 1) {
19644 		ret = qwx_wmi_set_hw_mode(sc, WMI_HOST_HW_MODE_DBS);
19645 		if (ret) {
19646 			printf("%s: failed to send dbs mode: %d\n",
19647 			    __func__, ret);
19648 			goto err_hif_stop;
19649 		}
19650 	}
19651 
19652 	ret = qwx_dp_tx_htt_h2t_ver_req_msg(sc);
19653 	if (ret) {
19654 		if (ret != ENOTSUP) {
19655 			printf("%s: failed to send htt version "
19656 			    "request message: %d\n", __func__, ret);
19657 		}
19658 		goto err_reo_cleanup;
19659 	}
19660 
19661 	return 0;
19662 err_reo_cleanup:
19663 	qwx_dp_pdev_reo_cleanup(sc);
19664 err_mac_destroy:
19665 #if 0
19666 	ath11k_mac_destroy(ab);
19667 #endif
19668 err_hif_stop:
19669 	sc->ops.stop(sc);
19670 err_wmi_detach:
19671 	qwx_wmi_detach(sc);
19672 	return ret;
19673 }
19674 
19675 void
qwx_core_stop(struct qwx_softc * sc)19676 qwx_core_stop(struct qwx_softc *sc)
19677 {
19678 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
19679 		qwx_qmi_firmware_stop(sc);
19680 
19681 	sc->ops.stop(sc);
19682 	qwx_wmi_detach(sc);
19683 	qwx_dp_pdev_reo_cleanup(sc);
19684 }
19685 
19686 void
qwx_core_pdev_destroy(struct qwx_softc * sc)19687 qwx_core_pdev_destroy(struct qwx_softc *sc)
19688 {
19689 	qwx_dp_pdev_free(sc);
19690 }
19691 
19692 int
qwx_core_pdev_create(struct qwx_softc * sc)19693 qwx_core_pdev_create(struct qwx_softc *sc)
19694 {
19695 	int ret;
19696 
19697 	ret = qwx_dp_pdev_alloc(sc);
19698 	if (ret) {
19699 		printf("%s: failed to attach DP pdev: %d\n",
19700 		    sc->sc_dev.dv_xname, ret);
19701 		return ret;
19702 	}
19703 
19704 	ret = qwx_mac_register(sc);
19705 	if (ret) {
19706 		printf("%s: failed register the radio with mac80211: %d\n",
19707 		    sc->sc_dev.dv_xname, ret);
19708 		goto err_dp_pdev_free;
19709 	}
19710 #if 0
19711 
19712 	ret = ath11k_thermal_register(ab);
19713 	if (ret) {
19714 		ath11k_err(ab, "could not register thermal device: %d\n",
19715 			   ret);
19716 		goto err_mac_unregister;
19717 	}
19718 
19719 	ret = ath11k_spectral_init(ab);
19720 	if (ret) {
19721 		ath11k_err(ab, "failed to init spectral %d\n", ret);
19722 		goto err_thermal_unregister;
19723 	}
19724 #endif
19725 	return 0;
19726 #if 0
19727 err_thermal_unregister:
19728 	ath11k_thermal_unregister(ab);
19729 err_mac_unregister:
19730 	ath11k_mac_unregister(ab);
19731 #endif
19732 err_dp_pdev_free:
19733 	qwx_dp_pdev_free(sc);
19734 #if 0
19735 err_pdev_debug:
19736 	ath11k_debugfs_pdev_destroy(ab);
19737 #endif
19738 	return ret;
19739 }
19740 
19741 void
qwx_core_deinit(struct qwx_softc * sc)19742 qwx_core_deinit(struct qwx_softc *sc)
19743 {
19744 	struct ath11k_hal *hal = &sc->hal;
19745 	int s = splnet();
19746 
19747 #ifdef notyet
19748 	mutex_lock(&ab->core_lock);
19749 #endif
19750 	sc->ops.irq_disable(sc);
19751 
19752 	qwx_core_stop(sc);
19753 	qwx_core_pdev_destroy(sc);
19754 #ifdef notyet
19755 	mutex_unlock(&ab->core_lock);
19756 #endif
19757 	sc->ops.power_down(sc);
19758 #if 0
19759 	ath11k_mac_destroy(ab);
19760 	ath11k_debugfs_soc_destroy(ab);
19761 #endif
19762 	qwx_dp_free(sc);
19763 #if 0
19764 	ath11k_reg_free(ab);
19765 #endif
19766 	qwx_qmi_deinit_service(sc);
19767 
19768 	hal->num_shadow_reg_configured = 0;
19769 
19770 	splx(s);
19771 }
19772 
19773 int
qwx_core_qmi_firmware_ready(struct qwx_softc * sc)19774 qwx_core_qmi_firmware_ready(struct qwx_softc *sc)
19775 {
19776 	int ret;
19777 
19778 	ret = qwx_core_start_firmware(sc, sc->fw_mode);
19779 	if (ret) {
19780 		printf("%s: failed to start firmware: %d\n",
19781 		    sc->sc_dev.dv_xname, ret);
19782 		return ret;
19783 	}
19784 
19785 	ret = qwx_ce_init_pipes(sc);
19786 	if (ret) {
19787 		printf("%s: failed to initialize CE: %d\n",
19788 		    sc->sc_dev.dv_xname, ret);
19789 		goto err_firmware_stop;
19790 	}
19791 
19792 	ret = qwx_dp_alloc(sc);
19793 	if (ret) {
19794 		printf("%s: failed to init DP: %d\n",
19795 		    sc->sc_dev.dv_xname, ret);
19796 		goto err_firmware_stop;
19797 	}
19798 
19799 	switch (sc->crypto_mode) {
19800 	case ATH11K_CRYPT_MODE_SW:
19801 		set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
19802 		set_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
19803 		break;
19804 	case ATH11K_CRYPT_MODE_HW:
19805 		clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
19806 		clear_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
19807 		break;
19808 	default:
19809 		printf("%s: invalid crypto_mode: %d\n",
19810 		    sc->sc_dev.dv_xname, sc->crypto_mode);
19811 		return EINVAL;
19812 	}
19813 
19814 	if (sc->frame_mode == ATH11K_HW_TXRX_RAW)
19815 		set_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
19816 #if 0
19817 	mutex_lock(&ab->core_lock);
19818 #endif
19819 	ret = qwx_core_start(sc);
19820 	if (ret) {
19821 		printf("%s: failed to start core: %d\n",
19822 		    sc->sc_dev.dv_xname, ret);
19823 		goto err_dp_free;
19824 	}
19825 
19826 	if (!sc->attached) {
19827 		printf("%s: %s fw 0x%x address %s\n", sc->sc_dev.dv_xname,
19828 		    sc->hw_params.name, sc->qmi_target.fw_version,
19829 		    ether_sprintf(sc->mac_addr));
19830 	}
19831 
19832 	ret = qwx_core_pdev_create(sc);
19833 	if (ret) {
19834 		printf("%s: failed to create pdev core: %d\n",
19835 		    sc->sc_dev.dv_xname, ret);
19836 		goto err_core_stop;
19837 	}
19838 
19839 #if 0 /* TODO: Is this in the right spot for OpenBSD? */
19840 	sc->ops.irq_enable(sc);
19841 #endif
19842 
19843 #if 0
19844 	mutex_unlock(&ab->core_lock);
19845 #endif
19846 
19847 	return 0;
19848 err_core_stop:
19849 	qwx_core_stop(sc);
19850 #if 0
19851 	ath11k_mac_destroy(ab);
19852 #endif
19853 err_dp_free:
19854 	qwx_dp_free(sc);
19855 #if 0
19856 	mutex_unlock(&ab->core_lock);
19857 #endif
19858 err_firmware_stop:
19859 	qwx_qmi_firmware_stop(sc);
19860 
19861 	return ret;
19862 }
19863 
19864 void
qwx_qmi_fw_init_done(struct qwx_softc * sc)19865 qwx_qmi_fw_init_done(struct qwx_softc *sc)
19866 {
19867 	int ret = 0;
19868 
19869 	clear_bit(ATH11K_FLAG_QMI_FAIL, sc->sc_flags);
19870 
19871 	if (sc->qmi_cal_done == 0 && sc->hw_params.cold_boot_calib) {
19872 		qwx_qmi_process_coldboot_calibration(sc);
19873 	} else {
19874 		clear_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
19875 		clear_bit(ATH11K_FLAG_RECOVERY, sc->sc_flags);
19876 		ret = qwx_core_qmi_firmware_ready(sc);
19877 		if (ret) {
19878 			set_bit(ATH11K_FLAG_QMI_FAIL, sc->sc_flags);
19879 			return;
19880 		}
19881 	}
19882 }
19883 
19884 int
qwx_qmi_event_server_arrive(struct qwx_softc * sc)19885 qwx_qmi_event_server_arrive(struct qwx_softc *sc)
19886 {
19887 	int ret;
19888 
19889 	sc->fw_init_done = 0;
19890 	sc->expect_fwmem_req = 1;
19891 
19892 	ret = qwx_qmi_fw_ind_register_send(sc);
19893 	if (ret < 0) {
19894 		printf("%s: failed to send qmi firmware indication: %d\n",
19895 		    sc->sc_dev.dv_xname, ret);
19896 		sc->expect_fwmem_req = 0;
19897 		return ret;
19898 	}
19899 
19900 	ret = qwx_qmi_host_cap_send(sc);
19901 	if (ret < 0) {
19902 		printf("%s: failed to send qmi host cap: %d\n",
19903 		    sc->sc_dev.dv_xname, ret);
19904 		sc->expect_fwmem_req = 0;
19905 		return ret;
19906 	}
19907 
19908 	ret = qwx_qmi_mem_seg_send(sc);
19909 	if (ret == EBUSY)
19910 		ret = qwx_qmi_mem_seg_send(sc);
19911 	sc->expect_fwmem_req = 0;
19912 	if (ret) {
19913 		printf("%s: failed to send qmi memory segments: %d\n",
19914 		    sc->sc_dev.dv_xname, ret);
19915 		return ret;
19916 	}
19917 
19918 	ret = qwx_qmi_event_load_bdf(sc);
19919 	if (ret < 0) {
19920 		printf("%s: qmi failed to download BDF:%d\n",
19921 		    sc->sc_dev.dv_xname, ret);
19922 		return ret;
19923 	}
19924 
19925 	ret = qwx_qmi_wlanfw_m3_info_send(sc);
19926 	if (ret) {
19927 		printf("%s: qmi m3 info send failed:%d\n",
19928 		    sc->sc_dev.dv_xname, ret);
19929 		return ret;
19930 	}
19931 
19932 	while (!sc->fw_init_done) {
19933 		ret = tsleep_nsec(&sc->fw_init_done, 0, "qwxfwinit",
19934 		    SEC_TO_NSEC(10));
19935 		if (ret) {
19936 			printf("%s: fw init timeout\n", sc->sc_dev.dv_xname);
19937 			return -1;
19938 		}
19939 	}
19940 
19941 	qwx_qmi_fw_init_done(sc);
19942 	return 0;
19943 }
19944 
19945 int
qwx_core_init(struct qwx_softc * sc)19946 qwx_core_init(struct qwx_softc *sc)
19947 {
19948 	int error;
19949 
19950 	error = qwx_qmi_init_service(sc);
19951 	if (error) {
19952 		printf("failed to initialize qmi :%d\n", error);
19953 		return error;
19954 	}
19955 
19956 	error = sc->ops.power_up(sc);
19957 	if (error)
19958 		qwx_qmi_deinit_service(sc);
19959 
19960 	return error;
19961 }
19962 
19963 int
qwx_init_hw_params(struct qwx_softc * sc)19964 qwx_init_hw_params(struct qwx_softc *sc)
19965 {
19966 	const struct ath11k_hw_params *hw_params = NULL;
19967 	int i;
19968 
19969 	for (i = 0; i < nitems(ath11k_hw_params); i++) {
19970 		hw_params = &ath11k_hw_params[i];
19971 
19972 		if (hw_params->hw_rev == sc->sc_hw_rev)
19973 			break;
19974 	}
19975 
19976 	if (i == nitems(ath11k_hw_params)) {
19977 		printf("%s: Unsupported hardware version: 0x%x\n",
19978 		    sc->sc_dev.dv_xname, sc->sc_hw_rev);
19979 		return EINVAL;
19980 	}
19981 
19982 	sc->hw_params = *hw_params;
19983 
19984 	DPRINTF("%s: %s\n", sc->sc_dev.dv_xname, sc->hw_params.name);
19985 
19986 	return 0;
19987 }
19988 
19989 static const struct hal_srng_config hw_srng_config_templ[QWX_NUM_SRNG_CFG] = {
19990 	/* TODO: max_rings can populated by querying HW capabilities */
19991 	{ /* REO_DST */
19992 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
19993 		.max_rings = 4,
19994 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
19995 		.lmac_ring = false,
19996 		.ring_dir = HAL_SRNG_DIR_DST,
19997 		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
19998 	},
19999 
20000 	{ /* REO_EXCEPTION */
20001 		/* Designating REO2TCL ring as exception ring. This ring is
20002 		 * similar to other REO2SW rings though it is named as REO2TCL.
20003 		 * Any of theREO2SW rings can be used as exception ring.
20004 		 */
20005 		.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
20006 		.max_rings = 1,
20007 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
20008 		.lmac_ring = false,
20009 		.ring_dir = HAL_SRNG_DIR_DST,
20010 		.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
20011 	},
20012 	{ /* REO_REINJECT */
20013 		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
20014 		.max_rings = 1,
20015 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
20016 		.lmac_ring = false,
20017 		.ring_dir = HAL_SRNG_DIR_SRC,
20018 		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
20019 	},
20020 	{ /* REO_CMD */
20021 		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
20022 		.max_rings = 1,
20023 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20024 			sizeof(struct hal_reo_get_queue_stats)) >> 2,
20025 		.lmac_ring = false,
20026 		.ring_dir = HAL_SRNG_DIR_SRC,
20027 		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
20028 	},
20029 	{ /* REO_STATUS */
20030 		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
20031 		.max_rings = 1,
20032 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20033 			sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
20034 		.lmac_ring = false,
20035 		.ring_dir = HAL_SRNG_DIR_DST,
20036 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
20037 	},
20038 	{ /* TCL_DATA */
20039 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
20040 		.max_rings = 3,
20041 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20042 			     sizeof(struct hal_tcl_data_cmd)) >> 2,
20043 		.lmac_ring = false,
20044 		.ring_dir = HAL_SRNG_DIR_SRC,
20045 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
20046 	},
20047 	{ /* TCL_CMD */
20048 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
20049 		.max_rings = 1,
20050 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20051 			     sizeof(struct hal_tcl_gse_cmd)) >> 2,
20052 		.lmac_ring =  false,
20053 		.ring_dir = HAL_SRNG_DIR_SRC,
20054 		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
20055 	},
20056 	{ /* TCL_STATUS */
20057 		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
20058 		.max_rings = 1,
20059 		.entry_size = (sizeof(struct hal_tlv_hdr) +
20060 			     sizeof(struct hal_tcl_status_ring)) >> 2,
20061 		.lmac_ring = false,
20062 		.ring_dir = HAL_SRNG_DIR_DST,
20063 		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
20064 	},
20065 	{ /* CE_SRC */
20066 		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
20067 		.max_rings = 12,
20068 		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
20069 		.lmac_ring = false,
20070 		.ring_dir = HAL_SRNG_DIR_SRC,
20071 		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
20072 	},
20073 	{ /* CE_DST */
20074 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
20075 		.max_rings = 12,
20076 		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
20077 		.lmac_ring = false,
20078 		.ring_dir = HAL_SRNG_DIR_SRC,
20079 		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
20080 	},
20081 	{ /* CE_DST_STATUS */
20082 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
20083 		.max_rings = 12,
20084 		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
20085 		.lmac_ring = false,
20086 		.ring_dir = HAL_SRNG_DIR_DST,
20087 		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
20088 	},
20089 	{ /* WBM_IDLE_LINK */
20090 		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
20091 		.max_rings = 1,
20092 		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
20093 		.lmac_ring = false,
20094 		.ring_dir = HAL_SRNG_DIR_SRC,
20095 		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
20096 	},
20097 	{ /* SW2WBM_RELEASE */
20098 		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
20099 		.max_rings = 1,
20100 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
20101 		.lmac_ring = false,
20102 		.ring_dir = HAL_SRNG_DIR_SRC,
20103 		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
20104 	},
20105 	{ /* WBM2SW_RELEASE */
20106 		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
20107 		.max_rings = 5,
20108 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
20109 		.lmac_ring = false,
20110 		.ring_dir = HAL_SRNG_DIR_DST,
20111 		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
20112 	},
20113 	{ /* RXDMA_BUF */
20114 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
20115 		.max_rings = 2,
20116 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
20117 		.lmac_ring = true,
20118 		.ring_dir = HAL_SRNG_DIR_SRC,
20119 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20120 	},
20121 	{ /* RXDMA_DST */
20122 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
20123 		.max_rings = 1,
20124 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
20125 		.lmac_ring = true,
20126 		.ring_dir = HAL_SRNG_DIR_DST,
20127 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20128 	},
20129 	{ /* RXDMA_MONITOR_BUF */
20130 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
20131 		.max_rings = 1,
20132 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
20133 		.lmac_ring = true,
20134 		.ring_dir = HAL_SRNG_DIR_SRC,
20135 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20136 	},
20137 	{ /* RXDMA_MONITOR_STATUS */
20138 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
20139 		.max_rings = 1,
20140 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
20141 		.lmac_ring = true,
20142 		.ring_dir = HAL_SRNG_DIR_SRC,
20143 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20144 	},
20145 	{ /* RXDMA_MONITOR_DST */
20146 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
20147 		.max_rings = 1,
20148 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
20149 		.lmac_ring = true,
20150 		.ring_dir = HAL_SRNG_DIR_DST,
20151 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20152 	},
20153 	{ /* RXDMA_MONITOR_DESC */
20154 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
20155 		.max_rings = 1,
20156 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
20157 		.lmac_ring = true,
20158 		.ring_dir = HAL_SRNG_DIR_SRC,
20159 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20160 	},
20161 	{ /* RXDMA DIR BUF */
20162 		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
20163 		.max_rings = 1,
20164 		.entry_size = 8 >> 2, /* TODO: Define the struct */
20165 		.lmac_ring = true,
20166 		.ring_dir = HAL_SRNG_DIR_SRC,
20167 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
20168 	},
20169 };
20170 
20171 int
qwx_hal_srng_create_config(struct qwx_softc * sc)20172 qwx_hal_srng_create_config(struct qwx_softc *sc)
20173 {
20174 	struct ath11k_hal *hal = &sc->hal;
20175 	struct hal_srng_config *s;
20176 
20177 	memcpy(hal->srng_config, hw_srng_config_templ,
20178 	    sizeof(hal->srng_config));
20179 
20180 	s = &hal->srng_config[HAL_REO_DST];
20181 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(sc);
20182 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(sc);
20183 	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(sc) - HAL_REO1_RING_BASE_LSB(sc);
20184 	s->reg_size[1] = HAL_REO2_RING_HP(sc) - HAL_REO1_RING_HP(sc);
20185 
20186 	s = &hal->srng_config[HAL_REO_EXCEPTION];
20187 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(sc);
20188 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(sc);
20189 
20190 	s = &hal->srng_config[HAL_REO_REINJECT];
20191 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(sc);
20192 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(sc);
20193 
20194 	s = &hal->srng_config[HAL_REO_CMD];
20195 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(sc);
20196 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(sc);
20197 
20198 	s = &hal->srng_config[HAL_REO_STATUS];
20199 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(sc);
20200 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(sc);
20201 
20202 	s = &hal->srng_config[HAL_TCL_DATA];
20203 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(sc);
20204 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
20205 	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(sc) - HAL_TCL1_RING_BASE_LSB(sc);
20206 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
20207 
20208 	s = &hal->srng_config[HAL_TCL_CMD];
20209 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(sc);
20210 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
20211 
20212 	s = &hal->srng_config[HAL_TCL_STATUS];
20213 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(sc);
20214 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
20215 
20216 	s = &hal->srng_config[HAL_CE_SRC];
20217 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) + HAL_CE_DST_RING_BASE_LSB +
20218 		ATH11K_CE_OFFSET(sc);
20219 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) + HAL_CE_DST_RING_HP +
20220 		ATH11K_CE_OFFSET(sc);
20221 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) -
20222 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc);
20223 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) -
20224 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc);
20225 
20226 	s = &hal->srng_config[HAL_CE_DST];
20227 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_RING_BASE_LSB +
20228 		ATH11K_CE_OFFSET(sc);
20229 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_RING_HP +
20230 		ATH11K_CE_OFFSET(sc);
20231 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
20232 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
20233 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
20234 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
20235 
20236 	s = &hal->srng_config[HAL_CE_DST_STATUS];
20237 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) +
20238 		HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(sc);
20239 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_STATUS_RING_HP +
20240 		ATH11K_CE_OFFSET(sc);
20241 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
20242 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
20243 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
20244 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
20245 
20246 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
20247 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(sc);
20248 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
20249 
20250 	s = &hal->srng_config[HAL_SW2WBM_RELEASE];
20251 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(sc);
20252 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
20253 
20254 	s = &hal->srng_config[HAL_WBM2SW_RELEASE];
20255 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
20256 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
20257 	s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(sc) -
20258 		HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
20259 	s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
20260 
20261 	return 0;
20262 }
20263 
20264 int
qwx_hal_srng_get_ring_id(struct qwx_softc * sc,enum hal_ring_type type,int ring_num,int mac_id)20265 qwx_hal_srng_get_ring_id(struct qwx_softc *sc,
20266     enum hal_ring_type type, int ring_num, int mac_id)
20267 {
20268 	struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
20269 	int ring_id;
20270 
20271 	if (ring_num >= srng_config->max_rings) {
20272 		printf("%s: invalid ring number :%d\n", __func__, ring_num);
20273 		return -1;
20274 	}
20275 
20276 	ring_id = srng_config->start_ring_id + ring_num;
20277 	if (srng_config->lmac_ring)
20278 		ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
20279 
20280 	if (ring_id >= HAL_SRNG_RING_ID_MAX) {
20281 		printf("%s: invalid ring ID :%d\n", __func__, ring_id);
20282 		return -1;
20283 	}
20284 
20285 	return ring_id;
20286 }
20287 
20288 void
qwx_hal_srng_update_hp_tp_addr(struct qwx_softc * sc,int shadow_cfg_idx,enum hal_ring_type ring_type,int ring_num)20289 qwx_hal_srng_update_hp_tp_addr(struct qwx_softc *sc, int shadow_cfg_idx,
20290     enum hal_ring_type ring_type, int ring_num)
20291 {
20292 	struct hal_srng *srng;
20293 	struct ath11k_hal *hal = &sc->hal;
20294 	int ring_id;
20295 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
20296 
20297 	ring_id = qwx_hal_srng_get_ring_id(sc, ring_type, ring_num, 0);
20298 	if (ring_id < 0)
20299 		return;
20300 
20301 	srng = &hal->srng_list[ring_id];
20302 
20303 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
20304 		srng->u.dst_ring.tp_addr = (uint32_t *)(
20305 		    HAL_SHADOW_REG(sc, shadow_cfg_idx) +
20306 		    (unsigned long)sc->mem);
20307 	else
20308 		srng->u.src_ring.hp_addr = (uint32_t *)(
20309 		    HAL_SHADOW_REG(sc, shadow_cfg_idx) +
20310 		    (unsigned long)sc->mem);
20311 }
20312 
20313 void
qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc * sc,struct hal_srng * srng)20314 qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc *sc, struct hal_srng *srng)
20315 {
20316 #ifdef notyet
20317 	lockdep_assert_held(&srng->lock);
20318 #endif
20319 	/* Update the shadow HP if the ring isn't empty. */
20320 	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
20321 	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
20322 		qwx_hal_srng_access_end(sc, srng);
20323 }
20324 
20325 int
qwx_hal_srng_update_shadow_config(struct qwx_softc * sc,enum hal_ring_type ring_type,int ring_num)20326 qwx_hal_srng_update_shadow_config(struct qwx_softc *sc,
20327     enum hal_ring_type ring_type, int ring_num)
20328 {
20329 	struct ath11k_hal *hal = &sc->hal;
20330 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
20331 	int shadow_cfg_idx = hal->num_shadow_reg_configured;
20332 	uint32_t target_reg;
20333 
20334 	if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
20335 		return EINVAL;
20336 
20337 	hal->num_shadow_reg_configured++;
20338 
20339 	target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
20340 	target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
20341 		ring_num;
20342 
20343 	/* For destination ring, shadow the TP */
20344 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
20345 		target_reg += HAL_OFFSET_FROM_HP_TO_TP;
20346 
20347 	hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
20348 
20349 	/* update hp/tp addr to hal structure*/
20350 	qwx_hal_srng_update_hp_tp_addr(sc, shadow_cfg_idx, ring_type, ring_num);
20351 
20352 	DPRINTF("%s: target_reg %x, shadow reg 0x%x shadow_idx 0x%x, "
20353 	    "ring_type %d, ring num %d\n", __func__, target_reg,
20354 	     HAL_SHADOW_REG(sc, shadow_cfg_idx), shadow_cfg_idx,
20355 	     ring_type, ring_num);
20356 
20357 	return 0;
20358 }
20359 
20360 void
qwx_hal_srng_shadow_config(struct qwx_softc * sc)20361 qwx_hal_srng_shadow_config(struct qwx_softc *sc)
20362 {
20363 	struct ath11k_hal *hal = &sc->hal;
20364 	int ring_type, ring_num;
20365 	struct hal_srng_config *cfg;
20366 
20367 	/* update all the non-CE srngs. */
20368 	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
20369 		cfg = &hal->srng_config[ring_type];
20370 
20371 		if (ring_type == HAL_CE_SRC ||
20372 		    ring_type == HAL_CE_DST ||
20373 			ring_type == HAL_CE_DST_STATUS)
20374 			continue;
20375 
20376 		if (cfg->lmac_ring)
20377 			continue;
20378 
20379 		for (ring_num = 0; ring_num < cfg->max_rings; ring_num++) {
20380 			qwx_hal_srng_update_shadow_config(sc, ring_type,
20381 			    ring_num);
20382 		}
20383 	}
20384 }
20385 
20386 void
qwx_hal_srng_get_shadow_config(struct qwx_softc * sc,uint32_t ** cfg,uint32_t * len)20387 qwx_hal_srng_get_shadow_config(struct qwx_softc *sc, uint32_t **cfg,
20388     uint32_t *len)
20389 {
20390 	struct ath11k_hal *hal = &sc->hal;
20391 
20392 	*len = hal->num_shadow_reg_configured;
20393 	*cfg = hal->shadow_reg_addr;
20394 }
20395 
20396 int
qwx_hal_alloc_cont_rdp(struct qwx_softc * sc)20397 qwx_hal_alloc_cont_rdp(struct qwx_softc *sc)
20398 {
20399 	struct ath11k_hal *hal = &sc->hal;
20400 	size_t size = sizeof(uint32_t) * HAL_SRNG_RING_ID_MAX;
20401 
20402 	if (hal->rdpmem == NULL) {
20403 		hal->rdpmem = qwx_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
20404 		if (hal->rdpmem == NULL) {
20405 			printf("%s: could not allocate RDP DMA memory\n",
20406 			    sc->sc_dev.dv_xname);
20407 			return ENOMEM;
20408 
20409 		}
20410 	}
20411 
20412 	hal->rdp.vaddr = QWX_DMA_KVA(hal->rdpmem);
20413 	hal->rdp.paddr = QWX_DMA_DVA(hal->rdpmem);
20414 	return 0;
20415 }
20416 
20417 void
qwx_hal_free_cont_rdp(struct qwx_softc * sc)20418 qwx_hal_free_cont_rdp(struct qwx_softc *sc)
20419 {
20420 	struct ath11k_hal *hal = &sc->hal;
20421 
20422 	if (hal->rdpmem == NULL)
20423 		return;
20424 
20425 	hal->rdp.vaddr = NULL;
20426 	hal->rdp.paddr = 0L;
20427 	qwx_dmamem_free(sc->sc_dmat, hal->rdpmem);
20428 	hal->rdpmem = NULL;
20429 }
20430 
20431 int
qwx_hal_alloc_cont_wrp(struct qwx_softc * sc)20432 qwx_hal_alloc_cont_wrp(struct qwx_softc *sc)
20433 {
20434 	struct ath11k_hal *hal = &sc->hal;
20435 	size_t size = sizeof(uint32_t) * HAL_SRNG_NUM_LMAC_RINGS;
20436 
20437 	if (hal->wrpmem == NULL) {
20438 		hal->wrpmem = qwx_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
20439 		if (hal->wrpmem == NULL) {
20440 			printf("%s: could not allocate WDP DMA memory\n",
20441 			    sc->sc_dev.dv_xname);
20442 			return ENOMEM;
20443 
20444 		}
20445 	}
20446 
20447 	hal->wrp.vaddr = QWX_DMA_KVA(hal->wrpmem);
20448 	hal->wrp.paddr = QWX_DMA_DVA(hal->wrpmem);
20449 	return 0;
20450 }
20451 
20452 void
qwx_hal_free_cont_wrp(struct qwx_softc * sc)20453 qwx_hal_free_cont_wrp(struct qwx_softc *sc)
20454 {
20455 	struct ath11k_hal *hal = &sc->hal;
20456 
20457 	if (hal->wrpmem == NULL)
20458 		return;
20459 
20460 	hal->wrp.vaddr = NULL;
20461 	hal->wrp.paddr = 0L;
20462 	qwx_dmamem_free(sc->sc_dmat, hal->wrpmem);
20463 	hal->wrpmem = NULL;
20464 }
20465 
20466 int
qwx_hal_srng_init(struct qwx_softc * sc)20467 qwx_hal_srng_init(struct qwx_softc *sc)
20468 {
20469 	struct ath11k_hal *hal = &sc->hal;
20470 	int ret;
20471 
20472 	memset(hal, 0, sizeof(*hal));
20473 
20474 	ret = qwx_hal_srng_create_config(sc);
20475 	if (ret)
20476 		goto err_hal;
20477 
20478 	ret = qwx_hal_alloc_cont_rdp(sc);
20479 	if (ret)
20480 		goto err_hal;
20481 
20482 	ret = qwx_hal_alloc_cont_wrp(sc);
20483 	if (ret)
20484 		goto err_free_cont_rdp;
20485 
20486 #ifdef notyet
20487 	qwx_hal_register_srng_key(sc);
20488 #endif
20489 
20490 	return 0;
20491 err_free_cont_rdp:
20492 	qwx_hal_free_cont_rdp(sc);
20493 
20494 err_hal:
20495 	return ret;
20496 }
20497 
20498 void
qwx_hal_srng_dst_hw_init(struct qwx_softc * sc,struct hal_srng * srng)20499 qwx_hal_srng_dst_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
20500 {
20501 	struct ath11k_hal *hal = &sc->hal;
20502 	uint32_t val;
20503 	uint64_t hp_addr;
20504 	uint32_t reg_base;
20505 
20506 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20507 
20508 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
20509 		sc->ops.write32(sc,
20510 		    reg_base + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(sc),
20511 		    srng->msi_addr);
20512 
20513 		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
20514 		    ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
20515 		    HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
20516 		sc->ops.write32(sc,
20517 		    reg_base + HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(sc), val);
20518 
20519 		sc->ops.write32(sc,
20520 		    reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(sc),
20521 		    srng->msi_data);
20522 	}
20523 
20524 	sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
20525 
20526 	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
20527 	    ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
20528 	    FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
20529 	    (srng->entry_size * srng->num_entries));
20530 	sc->ops.write32(sc,
20531 	    reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(sc), val);
20532 
20533 	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
20534 	    FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
20535 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_ID_OFFSET(sc), val);
20536 
20537 	/* interrupt setup */
20538 	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
20539 	    (srng->intr_timer_thres_us >> 3));
20540 
20541 	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
20542 	    (srng->intr_batch_cntr_thres_entries * srng->entry_size));
20543 
20544 	sc->ops.write32(sc,
20545 	    reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(sc), val);
20546 
20547 	hp_addr = hal->rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr -
20548 	    (unsigned long)hal->rdp.vaddr);
20549 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(sc),
20550 	    hp_addr & HAL_ADDR_LSB_REG_MASK);
20551 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(sc),
20552 	    hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
20553 
20554 	/* Initialize head and tail pointers to indicate ring is empty */
20555 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
20556 	sc->ops.write32(sc, reg_base, 0);
20557 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_TP_OFFSET(sc), 0);
20558 	*srng->u.dst_ring.hp_addr = 0;
20559 
20560 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20561 	val = 0;
20562 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
20563 		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
20564 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
20565 		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
20566 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
20567 		val |= HAL_REO1_RING_MISC_MSI_SWAP;
20568 	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
20569 
20570 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_MISC_OFFSET(sc), val);
20571 }
20572 
20573 void
qwx_hal_srng_src_hw_init(struct qwx_softc * sc,struct hal_srng * srng)20574 qwx_hal_srng_src_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
20575 {
20576 	struct ath11k_hal *hal = &sc->hal;
20577 	uint32_t val;
20578 	uint64_t tp_addr;
20579 	uint32_t reg_base;
20580 
20581 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20582 
20583 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
20584 		sc->ops.write32(sc,
20585 		    reg_base + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(sc),
20586 		    srng->msi_addr);
20587 
20588 		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
20589 		    ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
20590 		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
20591 		sc->ops.write32(sc,
20592 		    reg_base + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(sc),
20593 		    val);
20594 
20595 		sc->ops.write32(sc,
20596 		    reg_base + HAL_TCL1_RING_MSI1_DATA_OFFSET(sc),
20597 		    srng->msi_data);
20598 	}
20599 
20600 	sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
20601 
20602 	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
20603 	    ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
20604 	    FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
20605 	    (srng->entry_size * srng->num_entries));
20606 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(sc), val);
20607 
20608 	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
20609 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_ID_OFFSET(sc), val);
20610 
20611 	if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
20612 		sc->ops.write32(sc, reg_base, (uint32_t)srng->ring_base_paddr);
20613 		val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
20614 		    ((uint64_t)srng->ring_base_paddr >>
20615 		    HAL_ADDR_MSB_REG_SHIFT)) |
20616 		    FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
20617 		    (srng->entry_size * srng->num_entries));
20618 		sc->ops.write32(sc,
20619 		    reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(sc), val);
20620 	}
20621 
20622 	/* interrupt setup */
20623 	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
20624 	 * unit of 8 usecs instead of 1 usec (as required by v1).
20625 	 */
20626 	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
20627 	    srng->intr_timer_thres_us);
20628 
20629 	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
20630 	    (srng->intr_batch_cntr_thres_entries * srng->entry_size));
20631 
20632 	sc->ops.write32(sc,
20633 	    reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(sc), val);
20634 
20635 	val = 0;
20636 	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
20637 		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
20638 		    srng->u.src_ring.low_threshold);
20639 	}
20640 	sc->ops.write32(sc,
20641 	    reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(sc), val);
20642 
20643 	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
20644 		tp_addr = hal->rdp.paddr +
20645 		    ((unsigned long)srng->u.src_ring.tp_addr -
20646 		    (unsigned long)hal->rdp.vaddr);
20647 		sc->ops.write32(sc,
20648 		    reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(sc),
20649 		    tp_addr & HAL_ADDR_LSB_REG_MASK);
20650 		sc->ops.write32(sc,
20651 		    reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(sc),
20652 		    tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
20653 	}
20654 
20655 	/* Initialize head and tail pointers to indicate ring is empty */
20656 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
20657 	sc->ops.write32(sc, reg_base, 0);
20658 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
20659 	*srng->u.src_ring.tp_addr = 0;
20660 
20661 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
20662 	val = 0;
20663 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
20664 		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
20665 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
20666 		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
20667 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
20668 		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
20669 
20670 	/* Loop count is not used for SRC rings */
20671 	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
20672 
20673 	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
20674 
20675 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_MISC_OFFSET(sc), val);
20676 }
20677 
20678 void
qwx_hal_srng_hw_init(struct qwx_softc * sc,struct hal_srng * srng)20679 qwx_hal_srng_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
20680 {
20681 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
20682 		qwx_hal_srng_src_hw_init(sc, srng);
20683 	else
20684 		qwx_hal_srng_dst_hw_init(sc, srng);
20685 }
20686 
20687 void
qwx_hal_ce_dst_setup(struct qwx_softc * sc,struct hal_srng * srng,int ring_num)20688 qwx_hal_ce_dst_setup(struct qwx_softc *sc, struct hal_srng *srng, int ring_num)
20689 {
20690 	struct hal_srng_config *srng_config = &sc->hal.srng_config[HAL_CE_DST];
20691 	uint32_t addr;
20692 	uint32_t val;
20693 
20694 	addr = HAL_CE_DST_RING_CTRL +
20695 	    srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
20696 	    ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
20697 
20698 	val = sc->ops.read32(sc, addr);
20699 	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
20700 	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
20701 	    srng->u.dst_ring.max_buffer_length);
20702 	sc->ops.write32(sc, addr, val);
20703 }
20704 
20705 void
qwx_hal_ce_src_set_desc(void * buf,uint64_t paddr,uint32_t len,uint32_t id,uint8_t byte_swap_data)20706 qwx_hal_ce_src_set_desc(void *buf, uint64_t paddr, uint32_t len, uint32_t id,
20707     uint8_t byte_swap_data)
20708 {
20709 	struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
20710 
20711 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
20712 	desc->buffer_addr_info = FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
20713 	    (paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
20714 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
20715 	    byte_swap_data) |
20716 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
20717 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
20718 	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
20719 }
20720 
20721 void
qwx_hal_ce_dst_set_desc(void * buf,uint64_t paddr)20722 qwx_hal_ce_dst_set_desc(void *buf, uint64_t paddr)
20723 {
20724 	struct hal_ce_srng_dest_desc *desc =
20725 	    (struct hal_ce_srng_dest_desc *)buf;
20726 
20727 	desc->buffer_addr_low = htole32(paddr & HAL_ADDR_LSB_REG_MASK);
20728 	desc->buffer_addr_info = htole32(FIELD_PREP(
20729 	    HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
20730 	    (paddr >> HAL_ADDR_MSB_REG_SHIFT)));
20731 }
20732 
20733 uint32_t
qwx_hal_ce_dst_status_get_length(void * buf)20734 qwx_hal_ce_dst_status_get_length(void *buf)
20735 {
20736 	struct hal_ce_srng_dst_status_desc *desc =
20737 		(struct hal_ce_srng_dst_status_desc *)buf;
20738 	uint32_t len;
20739 
20740 	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
20741 	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
20742 
20743 	return len;
20744 }
20745 
20746 
20747 int
qwx_hal_srng_setup(struct qwx_softc * sc,enum hal_ring_type type,int ring_num,int mac_id,struct hal_srng_params * params)20748 qwx_hal_srng_setup(struct qwx_softc *sc, enum hal_ring_type type,
20749     int ring_num, int mac_id, struct hal_srng_params *params)
20750 {
20751 	struct ath11k_hal *hal = &sc->hal;
20752 	struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
20753 	struct hal_srng *srng;
20754 	int ring_id;
20755 	uint32_t lmac_idx;
20756 	int i;
20757 	uint32_t reg_base;
20758 
20759 	ring_id = qwx_hal_srng_get_ring_id(sc, type, ring_num, mac_id);
20760 	if (ring_id < 0)
20761 		return ring_id;
20762 
20763 	srng = &hal->srng_list[ring_id];
20764 
20765 	srng->ring_id = ring_id;
20766 	srng->ring_dir = srng_config->ring_dir;
20767 	srng->ring_base_paddr = params->ring_base_paddr;
20768 	srng->ring_base_vaddr = params->ring_base_vaddr;
20769 	srng->entry_size = srng_config->entry_size;
20770 	srng->num_entries = params->num_entries;
20771 	srng->ring_size = srng->entry_size * srng->num_entries;
20772 	srng->intr_batch_cntr_thres_entries =
20773 	    params->intr_batch_cntr_thres_entries;
20774 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
20775 	srng->flags = params->flags;
20776 	srng->msi_addr = params->msi_addr;
20777 	srng->msi_data = params->msi_data;
20778 	srng->initialized = 1;
20779 #if 0
20780 	spin_lock_init(&srng->lock);
20781 	lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
20782 #endif
20783 
20784 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
20785 		srng->hwreg_base[i] = srng_config->reg_start[i] +
20786 		    (ring_num * srng_config->reg_size[i]);
20787 	}
20788 
20789 	memset(srng->ring_base_vaddr, 0,
20790 	    (srng->entry_size * srng->num_entries) << 2);
20791 
20792 #if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
20793 	/* TODO: Add comments on these swap configurations */
20794 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
20795 		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
20796 			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
20797 #endif
20798 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
20799 
20800 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
20801 		srng->u.src_ring.hp = 0;
20802 		srng->u.src_ring.cached_tp = 0;
20803 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
20804 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
20805 		srng->u.src_ring.low_threshold = params->low_threshold *
20806 		    srng->entry_size;
20807 		if (srng_config->lmac_ring) {
20808 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
20809 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
20810 			    lmac_idx);
20811 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
20812 		} else {
20813 			if (!sc->hw_params.supports_shadow_regs)
20814 				srng->u.src_ring.hp_addr =
20815 				    (uint32_t *)((unsigned long)sc->mem +
20816 				    reg_base);
20817 			else
20818 				DPRINTF("%s: type %d ring_num %d reg_base "
20819 				    "0x%x shadow 0x%lx\n",
20820 				    sc->sc_dev.dv_xname, type, ring_num, reg_base,
20821 				   (unsigned long)srng->u.src_ring.hp_addr -
20822 				   (unsigned long)sc->mem);
20823 		}
20824 	} else {
20825 		/* During initialization loop count in all the descriptors
20826 		 * will be set to zero, and HW will set it to 1 on completing
20827 		 * descriptor update in first loop, and increments it by 1 on
20828 		 * subsequent loops (loop count wraps around after reaching
20829 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
20830 		 * loop count in descriptors updated by HW (to be processed
20831 		 * by SW).
20832 		 */
20833 		srng->u.dst_ring.loop_cnt = 1;
20834 		srng->u.dst_ring.tp = 0;
20835 		srng->u.dst_ring.cached_hp = 0;
20836 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
20837 		if (srng_config->lmac_ring) {
20838 			/* For LMAC rings, tail pointer updates will be done
20839 			 * through FW by writing to a shared memory location
20840 			 */
20841 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
20842 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
20843 			    lmac_idx);
20844 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
20845 		} else {
20846 			if (!sc->hw_params.supports_shadow_regs)
20847 				srng->u.dst_ring.tp_addr =
20848 				    (uint32_t *)((unsigned long)sc->mem +
20849 				    reg_base + (HAL_REO1_RING_TP(sc) -
20850 				    HAL_REO1_RING_HP(sc)));
20851 			else
20852 				DPRINTF("%s: type %d ring_num %d target_reg "
20853 				    "0x%x shadow 0x%lx\n", sc->sc_dev.dv_xname,
20854 				    type, ring_num,
20855 				    reg_base + (HAL_REO1_RING_TP(sc) -
20856 				    HAL_REO1_RING_HP(sc)),
20857 				    (unsigned long)srng->u.dst_ring.tp_addr -
20858 				    (unsigned long)sc->mem);
20859 		}
20860 	}
20861 
20862 	if (srng_config->lmac_ring)
20863 		return ring_id;
20864 
20865 	qwx_hal_srng_hw_init(sc, srng);
20866 
20867 	if (type == HAL_CE_DST) {
20868 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
20869 		qwx_hal_ce_dst_setup(sc, srng, ring_num);
20870 	}
20871 
20872 	return ring_id;
20873 }
20874 
20875 size_t
qwx_hal_ce_get_desc_size(enum hal_ce_desc type)20876 qwx_hal_ce_get_desc_size(enum hal_ce_desc type)
20877 {
20878 	switch (type) {
20879 	case HAL_CE_DESC_SRC:
20880 		return sizeof(struct hal_ce_srng_src_desc);
20881 	case HAL_CE_DESC_DST:
20882 		return sizeof(struct hal_ce_srng_dest_desc);
20883 	case HAL_CE_DESC_DST_STATUS:
20884 		return sizeof(struct hal_ce_srng_dst_status_desc);
20885 	}
20886 
20887 	return 0;
20888 }
20889 
20890 void
qwx_htc_tx_completion_handler(struct qwx_softc * sc,struct mbuf * m)20891 qwx_htc_tx_completion_handler(struct qwx_softc *sc, struct mbuf *m)
20892 {
20893 	printf("%s: not implemented\n", __func__);
20894 }
20895 
20896 struct qwx_tx_data *
qwx_ce_completed_send_next(struct qwx_ce_pipe * pipe)20897 qwx_ce_completed_send_next(struct qwx_ce_pipe *pipe)
20898 {
20899 	struct qwx_softc *sc = pipe->sc;
20900 	struct hal_srng *srng;
20901 	unsigned int sw_index;
20902 	unsigned int nentries_mask;
20903 	void *ctx;
20904 	struct qwx_tx_data *tx_data = NULL;
20905 	uint32_t *desc;
20906 #ifdef notyet
20907 	spin_lock_bh(&ab->ce.ce_lock);
20908 #endif
20909 	sw_index = pipe->src_ring->sw_index;
20910 	nentries_mask = pipe->src_ring->nentries_mask;
20911 
20912 	srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
20913 #ifdef notyet
20914 	spin_lock_bh(&srng->lock);
20915 #endif
20916 	qwx_hal_srng_access_begin(sc, srng);
20917 
20918 	desc = qwx_hal_srng_src_reap_next(sc, srng);
20919 	if (!desc)
20920 		goto err_unlock;
20921 
20922 	ctx = pipe->src_ring->per_transfer_context[sw_index];
20923 	tx_data = (struct qwx_tx_data *)ctx;
20924 
20925 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
20926 	pipe->src_ring->sw_index = sw_index;
20927 
20928 err_unlock:
20929 #ifdef notyet
20930 	spin_unlock_bh(&srng->lock);
20931 
20932 	spin_unlock_bh(&ab->ce.ce_lock);
20933 #endif
20934 	return tx_data;
20935 }
20936 
20937 int
qwx_ce_tx_process_cb(struct qwx_ce_pipe * pipe)20938 qwx_ce_tx_process_cb(struct qwx_ce_pipe *pipe)
20939 {
20940 	struct qwx_softc *sc = pipe->sc;
20941 	struct qwx_tx_data *tx_data;
20942 	struct mbuf *m;
20943 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
20944 	int ret = 0;
20945 
20946 	while ((tx_data = qwx_ce_completed_send_next(pipe)) != NULL) {
20947 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
20948 		m = tx_data->m;
20949 		tx_data->m = NULL;
20950 
20951 		if ((!pipe->send_cb) || sc->hw_params.credit_flow) {
20952 			m_freem(m);
20953 			continue;
20954 		}
20955 
20956 		ml_enqueue(&ml, m);
20957 		ret = 1;
20958 	}
20959 
20960 	while ((m = ml_dequeue(&ml))) {
20961 		DNPRINTF(QWX_D_CE, "%s: tx ce pipe %d len %d\n", __func__,
20962 		    pipe->pipe_num, m->m_len);
20963 		pipe->send_cb(sc, m);
20964 	}
20965 
20966 	return ret;
20967 }
20968 
20969 void
qwx_ce_poll_send_completed(struct qwx_softc * sc,uint8_t pipe_id)20970 qwx_ce_poll_send_completed(struct qwx_softc *sc, uint8_t pipe_id)
20971 {
20972 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
20973 	const struct ce_attr *attr =  &sc->hw_params.host_ce_config[pipe_id];
20974 
20975 	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
20976 		qwx_ce_tx_process_cb(pipe);
20977 }
20978 
20979 void
qwx_htc_process_credit_report(struct qwx_htc * htc,const struct ath11k_htc_credit_report * report,int len,enum ath11k_htc_ep_id eid)20980 qwx_htc_process_credit_report(struct qwx_htc *htc,
20981     const struct ath11k_htc_credit_report *report, int len,
20982     enum ath11k_htc_ep_id eid)
20983 {
20984 	struct qwx_softc *sc = htc->sc;
20985 	struct qwx_htc_ep *ep;
20986 	int i, n_reports;
20987 
20988 	if (len % sizeof(*report))
20989 		printf("%s: Uneven credit report len %d", __func__, len);
20990 
20991 	n_reports = len / sizeof(*report);
20992 #ifdef notyet
20993 	spin_lock_bh(&htc->tx_lock);
20994 #endif
20995 	for (i = 0; i < n_reports; i++, report++) {
20996 		if (report->eid >= ATH11K_HTC_EP_COUNT)
20997 			break;
20998 
20999 		ep = &htc->endpoint[report->eid];
21000 		ep->tx_credits += report->credits;
21001 
21002 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits got %d total %d\n",
21003 		    __func__, report->eid, report->credits, ep->tx_credits);
21004 
21005 		if (ep->ep_ops.ep_tx_credits) {
21006 #ifdef notyet
21007 			spin_unlock_bh(&htc->tx_lock);
21008 #endif
21009 			ep->ep_ops.ep_tx_credits(sc);
21010 #ifdef notyet
21011 			spin_lock_bh(&htc->tx_lock);
21012 #endif
21013 		}
21014 	}
21015 #ifdef notyet
21016 	spin_unlock_bh(&htc->tx_lock);
21017 #endif
21018 }
21019 
21020 int
qwx_htc_process_trailer(struct qwx_htc * htc,uint8_t * buffer,int length,enum ath11k_htc_ep_id src_eid)21021 qwx_htc_process_trailer(struct qwx_htc *htc, uint8_t *buffer, int length,
21022     enum ath11k_htc_ep_id src_eid)
21023 {
21024 	struct qwx_softc *sc = htc->sc;
21025 	int status = 0;
21026 	struct ath11k_htc_record *record;
21027 	size_t len;
21028 
21029 	while (length > 0) {
21030 		record = (struct ath11k_htc_record *)buffer;
21031 
21032 		if (length < sizeof(record->hdr)) {
21033 			status = EINVAL;
21034 			break;
21035 		}
21036 
21037 		if (record->hdr.len > length) {
21038 			/* no room left in buffer for record */
21039 			printf("%s: Invalid record length: %d\n",
21040 			    __func__, record->hdr.len);
21041 			status = EINVAL;
21042 			break;
21043 		}
21044 
21045 		if (sc->hw_params.credit_flow) {
21046 			switch (record->hdr.id) {
21047 			case ATH11K_HTC_RECORD_CREDITS:
21048 				len = sizeof(struct ath11k_htc_credit_report);
21049 				if (record->hdr.len < len) {
21050 					printf("%s: Credit report too long\n",
21051 					    __func__);
21052 					status = EINVAL;
21053 					break;
21054 				}
21055 				qwx_htc_process_credit_report(htc,
21056 				    record->credit_report,
21057 				    record->hdr.len, src_eid);
21058 				break;
21059 			default:
21060 				printf("%s: unhandled record: id:%d length:%d\n",
21061 				    __func__, record->hdr.id, record->hdr.len);
21062 				break;
21063 			}
21064 		}
21065 
21066 		if (status)
21067 			break;
21068 
21069 		/* multiple records may be present in a trailer */
21070 		buffer += sizeof(record->hdr) + record->hdr.len;
21071 		length -= sizeof(record->hdr) + record->hdr.len;
21072 	}
21073 
21074 	return status;
21075 }
21076 
21077 void
qwx_htc_suspend_complete(struct qwx_softc * sc,int ack)21078 qwx_htc_suspend_complete(struct qwx_softc *sc, int ack)
21079 {
21080 	printf("%s: not implemented\n", __func__);
21081 }
21082 
21083 void
qwx_htc_wakeup_from_suspend(struct qwx_softc * sc)21084 qwx_htc_wakeup_from_suspend(struct qwx_softc *sc)
21085 {
21086 	/* TODO This is really all the Linux driver does here... silence it? */
21087 	printf("%s: wakeup from suspend received\n", __func__);
21088 }
21089 
21090 void
qwx_htc_rx_completion_handler(struct qwx_softc * sc,struct mbuf * m)21091 qwx_htc_rx_completion_handler(struct qwx_softc *sc, struct mbuf *m)
21092 {
21093 	struct qwx_htc *htc = &sc->htc;
21094 	struct ath11k_htc_hdr *hdr;
21095 	struct qwx_htc_ep *ep;
21096 	uint16_t payload_len;
21097 	uint32_t message_id, trailer_len = 0;
21098 	uint8_t eid;
21099 	int trailer_present;
21100 
21101 	m = m_pullup(m, sizeof(struct ath11k_htc_hdr));
21102 	if (m == NULL) {
21103 		printf("%s: m_pullup failed\n", __func__);
21104 		m = NULL; /* already freed */
21105 		goto out;
21106 	}
21107 
21108 	hdr = mtod(m, struct ath11k_htc_hdr *);
21109 
21110 	eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
21111 
21112 	if (eid >= ATH11K_HTC_EP_COUNT) {
21113 		printf("%s: HTC Rx: invalid eid %d\n", __func__, eid);
21114 		printf("%s: HTC info: 0x%x\n", __func__, hdr->htc_info);
21115 		printf("%s: CTRL info: 0x%x\n", __func__, hdr->ctrl_info);
21116 		goto out;
21117 	}
21118 
21119 	ep = &htc->endpoint[eid];
21120 
21121 	payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
21122 
21123 	if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
21124 		printf("%s: HTC rx frame too long, len: %zu\n", __func__,
21125 		    payload_len + sizeof(*hdr));
21126 		goto out;
21127 	}
21128 
21129 	if (m->m_pkthdr.len < payload_len) {
21130 		printf("%s: HTC Rx: insufficient length, got %d, "
21131 		    "expected %d\n", __func__, m->m_pkthdr.len, payload_len);
21132 		goto out;
21133 	}
21134 
21135 	/* get flags to check for trailer */
21136 	trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
21137 	    ATH11K_HTC_FLAG_TRAILER_PRESENT;
21138 
21139 	DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p trailer_present %d\n",
21140 	    __func__, eid, m, trailer_present);
21141 
21142 	if (trailer_present) {
21143 		int status = 0;
21144 		uint8_t *trailer;
21145 		int trim;
21146 		size_t min_len;
21147 
21148 		trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
21149 		min_len = sizeof(struct ath11k_htc_record_hdr);
21150 
21151 		if ((trailer_len < min_len) ||
21152 		    (trailer_len > payload_len)) {
21153 			printf("%s: Invalid trailer length: %d\n", __func__,
21154 			    trailer_len);
21155 			goto out;
21156 		}
21157 
21158 		trailer = (uint8_t *)hdr;
21159 		trailer += sizeof(*hdr);
21160 		trailer += payload_len;
21161 		trailer -= trailer_len;
21162 		status = qwx_htc_process_trailer(htc, trailer,
21163 		    trailer_len, eid);
21164 		if (status)
21165 			goto out;
21166 
21167 		trim = trailer_len;
21168 		m_adj(m, -trim);
21169 	}
21170 
21171 	if (trailer_len >= payload_len)
21172 		/* zero length packet with trailer data, just drop these */
21173 		goto out;
21174 
21175 	m_adj(m, sizeof(*hdr));
21176 
21177 	if (eid == ATH11K_HTC_EP_0) {
21178 		struct ath11k_htc_msg *msg;
21179 
21180 		msg = mtod(m, struct ath11k_htc_msg *);
21181 		message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
21182 
21183 		DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p message_id %d\n",
21184 		    __func__, eid, m, message_id);
21185 
21186 		switch (message_id) {
21187 		case ATH11K_HTC_MSG_READY_ID:
21188 		case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
21189 			/* handle HTC control message */
21190 			if (sc->ctl_resp) {
21191 				/* this is a fatal error, target should not be
21192 				 * sending unsolicited messages on the ep 0
21193 				 */
21194 				printf("%s: HTC rx ctrl still processing\n",
21195 				    __func__);
21196 				goto out;
21197 			}
21198 
21199 			htc->control_resp_len =
21200 			    MIN(m->m_pkthdr.len, ATH11K_HTC_MAX_CTRL_MSG_LEN);
21201 
21202 			m_copydata(m, 0, htc->control_resp_len,
21203 			    htc->control_resp_buffer);
21204 
21205 			sc->ctl_resp = 1;
21206 			wakeup(&sc->ctl_resp);
21207 			break;
21208 		case ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE:
21209 			qwx_htc_suspend_complete(sc, 1);
21210 			break;
21211 		case ATH11K_HTC_MSG_NACK_SUSPEND:
21212 			qwx_htc_suspend_complete(sc, 0);
21213 			break;
21214 		case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
21215 			qwx_htc_wakeup_from_suspend(sc);
21216 			break;
21217 		default:
21218 			printf("%s: ignoring unsolicited htc ep0 event %ld\n",
21219 			    __func__,
21220 			    FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id));
21221 			break;
21222 		}
21223 		goto out;
21224 	}
21225 
21226 	DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p\n", __func__, eid, m);
21227 
21228 	ep->ep_ops.ep_rx_complete(sc, m);
21229 
21230 	/* poll tx completion for interrupt disabled CE's */
21231 	qwx_ce_poll_send_completed(sc, ep->ul_pipe_id);
21232 
21233 	/* mbuf is now owned by the rx completion handler */
21234 	m = NULL;
21235 out:
21236 	m_freem(m);
21237 }
21238 
21239 void
qwx_ce_free_ring(struct qwx_softc * sc,struct qwx_ce_ring * ring)21240 qwx_ce_free_ring(struct qwx_softc *sc, struct qwx_ce_ring *ring)
21241 {
21242 	bus_size_t dsize;
21243 	size_t size;
21244 
21245 	if (ring == NULL)
21246 		return;
21247 
21248 	if (ring->base_addr) {
21249 		dsize = ring->nentries * ring->desc_sz;
21250 		bus_dmamem_unmap(sc->sc_dmat, ring->base_addr, dsize);
21251 	}
21252 	if (ring->nsegs)
21253 		bus_dmamem_free(sc->sc_dmat, &ring->dsegs, ring->nsegs);
21254 	if (ring->dmap)
21255 		bus_dmamap_destroy(sc->sc_dmat, ring->dmap);
21256 
21257 	size = sizeof(*ring) + (ring->nentries *
21258 	    sizeof(ring->per_transfer_context[0]));
21259 	free(ring, M_DEVBUF, size);
21260 }
21261 
21262 static inline int
qwx_ce_need_shadow_fix(int ce_id)21263 qwx_ce_need_shadow_fix(int ce_id)
21264 {
21265 	/* only ce4 needs shadow workaround */
21266 	return (ce_id == 4);
21267 }
21268 
21269 void
qwx_ce_stop_shadow_timers(struct qwx_softc * sc)21270 qwx_ce_stop_shadow_timers(struct qwx_softc *sc)
21271 {
21272 	int i;
21273 
21274 	if (!sc->hw_params.supports_shadow_regs)
21275 		return;
21276 
21277 	for (i = 0; i < sc->hw_params.ce_count; i++)
21278 		if (qwx_ce_need_shadow_fix(i))
21279 			qwx_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
21280 }
21281 
21282 void
qwx_ce_free_pipes(struct qwx_softc * sc)21283 qwx_ce_free_pipes(struct qwx_softc *sc)
21284 {
21285 	struct qwx_ce_pipe *pipe;
21286 	int i;
21287 
21288 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21289 		pipe = &sc->ce.ce_pipe[i];
21290 		if (qwx_ce_need_shadow_fix(i))
21291 			qwx_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
21292 		if (pipe->src_ring) {
21293 			qwx_ce_free_ring(sc, pipe->src_ring);
21294 			pipe->src_ring = NULL;
21295 		}
21296 
21297 		if (pipe->dest_ring) {
21298 			qwx_ce_free_ring(sc, pipe->dest_ring);
21299 			pipe->dest_ring = NULL;
21300 		}
21301 
21302 		if (pipe->status_ring) {
21303 			qwx_ce_free_ring(sc, pipe->status_ring);
21304 			pipe->status_ring = NULL;
21305 		}
21306 	}
21307 }
21308 
21309 int
qwx_ce_alloc_src_ring_transfer_contexts(struct qwx_ce_pipe * pipe,const struct ce_attr * attr)21310 qwx_ce_alloc_src_ring_transfer_contexts(struct qwx_ce_pipe *pipe,
21311     const struct ce_attr *attr)
21312 {
21313 	struct qwx_softc *sc = pipe->sc;
21314 	struct qwx_tx_data *txdata;
21315 	size_t size;
21316 	int ret, i;
21317 
21318 	/* Allocate an array of qwx_tx_data structures. */
21319 	txdata = mallocarray(pipe->src_ring->nentries, sizeof(*txdata),
21320 	    M_DEVBUF, M_NOWAIT | M_ZERO);
21321 	if (txdata == NULL)
21322 		return ENOMEM;
21323 
21324 	size = sizeof(*txdata) * pipe->src_ring->nentries;
21325 
21326 	/* Create per-transfer DMA maps. */
21327 	for (i = 0; i < pipe->src_ring->nentries; i++) {
21328 		struct qwx_tx_data *ctx = &txdata[i];
21329 		ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
21330 		    attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
21331 		if (ret) {
21332 			int j;
21333 			for (j = 0; j < i; j++) {
21334 				struct qwx_tx_data *ctx = &txdata[j];
21335 				bus_dmamap_destroy(sc->sc_dmat, ctx->map);
21336 			}
21337 			free(txdata, M_DEVBUF, size);
21338 			return ret;
21339 		}
21340 		pipe->src_ring->per_transfer_context[i] = ctx;
21341 	}
21342 
21343 	return 0;
21344 }
21345 
21346 int
qwx_ce_alloc_dest_ring_transfer_contexts(struct qwx_ce_pipe * pipe,const struct ce_attr * attr)21347 qwx_ce_alloc_dest_ring_transfer_contexts(struct qwx_ce_pipe *pipe,
21348     const struct ce_attr *attr)
21349 {
21350 	struct qwx_softc *sc = pipe->sc;
21351 	struct qwx_rx_data *rxdata;
21352 	size_t size;
21353 	int ret, i;
21354 
21355 	/* Allocate an array of qwx_rx_data structures. */
21356 	rxdata = mallocarray(pipe->dest_ring->nentries, sizeof(*rxdata),
21357 	    M_DEVBUF, M_NOWAIT | M_ZERO);
21358 	if (rxdata == NULL)
21359 		return ENOMEM;
21360 
21361 	size = sizeof(*rxdata) * pipe->dest_ring->nentries;
21362 
21363 	/* Create per-transfer DMA maps. */
21364 	for (i = 0; i < pipe->dest_ring->nentries; i++) {
21365 		struct qwx_rx_data *ctx = &rxdata[i];
21366 		ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
21367 		    attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
21368 		if (ret) {
21369 			int j;
21370 			for (j = 0; j < i; j++) {
21371 				struct qwx_rx_data *ctx = &rxdata[j];
21372 				bus_dmamap_destroy(sc->sc_dmat, ctx->map);
21373 			}
21374 			free(rxdata, M_DEVBUF, size);
21375 			return ret;
21376 		}
21377 		pipe->dest_ring->per_transfer_context[i] = ctx;
21378 	}
21379 
21380 	return 0;
21381 }
21382 
21383 struct qwx_ce_ring *
qwx_ce_alloc_ring(struct qwx_softc * sc,int nentries,size_t desc_sz)21384 qwx_ce_alloc_ring(struct qwx_softc *sc, int nentries, size_t desc_sz)
21385 {
21386 	struct qwx_ce_ring *ce_ring;
21387 	size_t size = sizeof(*ce_ring) +
21388 	    (nentries * sizeof(ce_ring->per_transfer_context[0]));
21389 	bus_size_t dsize;
21390 
21391 	ce_ring = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
21392 	if (ce_ring == NULL)
21393 		return NULL;
21394 
21395 	ce_ring->nentries = nentries;
21396 	ce_ring->nentries_mask = nentries - 1;
21397 	ce_ring->desc_sz = desc_sz;
21398 
21399 	dsize = nentries * desc_sz;
21400 	if (bus_dmamap_create(sc->sc_dmat, dsize, 1, dsize, 0, BUS_DMA_NOWAIT,
21401 	    &ce_ring->dmap)) {
21402 		free(ce_ring, M_DEVBUF, size);
21403 		return NULL;
21404 	}
21405 
21406 	if (bus_dmamem_alloc(sc->sc_dmat, dsize, CE_DESC_RING_ALIGN, 0,
21407 	    &ce_ring->dsegs, 1, &ce_ring->nsegs,
21408 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
21409 		qwx_ce_free_ring(sc, ce_ring);
21410 		return NULL;
21411 	}
21412 
21413 	if (bus_dmamem_map(sc->sc_dmat, &ce_ring->dsegs, 1, dsize,
21414 	    &ce_ring->base_addr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
21415 		qwx_ce_free_ring(sc, ce_ring);
21416 		return NULL;
21417 	}
21418 
21419 	if (bus_dmamap_load(sc->sc_dmat, ce_ring->dmap, ce_ring->base_addr,
21420 	    dsize, NULL, BUS_DMA_NOWAIT)) {
21421 		qwx_ce_free_ring(sc, ce_ring);
21422 		return NULL;
21423 	}
21424 
21425 	return ce_ring;
21426 }
21427 
21428 int
qwx_ce_alloc_pipe(struct qwx_softc * sc,int ce_id)21429 qwx_ce_alloc_pipe(struct qwx_softc *sc, int ce_id)
21430 {
21431 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
21432 	const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
21433 	struct qwx_ce_ring *ring;
21434 	int nentries;
21435 	size_t desc_sz;
21436 
21437 	pipe->attr_flags = attr->flags;
21438 
21439 	if (attr->src_nentries) {
21440 		pipe->send_cb = attr->send_cb;
21441 		nentries = qwx_roundup_pow_of_two(attr->src_nentries);
21442 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
21443 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
21444 		if (ring == NULL)
21445 			return ENOMEM;
21446 		pipe->src_ring = ring;
21447 		if (qwx_ce_alloc_src_ring_transfer_contexts(pipe, attr))
21448 			return ENOMEM;
21449 	}
21450 
21451 	if (attr->dest_nentries) {
21452 		pipe->recv_cb = attr->recv_cb;
21453 		nentries = qwx_roundup_pow_of_two(attr->dest_nentries);
21454 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_DST);
21455 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
21456 		if (ring == NULL)
21457 			return ENOMEM;
21458 		pipe->dest_ring = ring;
21459 		if (qwx_ce_alloc_dest_ring_transfer_contexts(pipe, attr))
21460 			return ENOMEM;
21461 
21462 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
21463 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
21464 		if (ring == NULL)
21465 			return ENOMEM;
21466 		pipe->status_ring = ring;
21467 	}
21468 
21469 	return 0;
21470 }
21471 
21472 void
qwx_ce_rx_pipe_cleanup(struct qwx_ce_pipe * pipe)21473 qwx_ce_rx_pipe_cleanup(struct qwx_ce_pipe *pipe)
21474 {
21475 	struct qwx_softc *sc = pipe->sc;
21476 	struct qwx_ce_ring *ring = pipe->dest_ring;
21477 	void *ctx;
21478 	struct qwx_rx_data *rx_data;
21479 	int i;
21480 
21481 	if (!(ring && pipe->buf_sz))
21482 		return;
21483 
21484 	for (i = 0; i < ring->nentries; i++) {
21485 		ctx = ring->per_transfer_context[i];
21486 		if (!ctx)
21487 			continue;
21488 
21489 		rx_data = (struct qwx_rx_data *)ctx;
21490 		if (rx_data->m) {
21491 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
21492 			m_freem(rx_data->m);
21493 			rx_data->m = NULL;
21494 		}
21495 	}
21496 }
21497 
21498 void
qwx_ce_shadow_config(struct qwx_softc * sc)21499 qwx_ce_shadow_config(struct qwx_softc *sc)
21500 {
21501 	int i;
21502 
21503 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21504 		if (sc->hw_params.host_ce_config[i].src_nentries)
21505 			qwx_hal_srng_update_shadow_config(sc, HAL_CE_SRC, i);
21506 
21507 		if (sc->hw_params.host_ce_config[i].dest_nentries) {
21508 			qwx_hal_srng_update_shadow_config(sc, HAL_CE_DST, i);
21509 
21510 			qwx_hal_srng_update_shadow_config(sc,
21511 			    HAL_CE_DST_STATUS, i);
21512 		}
21513 	}
21514 }
21515 
21516 void
qwx_ce_get_shadow_config(struct qwx_softc * sc,uint32_t ** shadow_cfg,uint32_t * shadow_cfg_len)21517 qwx_ce_get_shadow_config(struct qwx_softc *sc, uint32_t **shadow_cfg,
21518     uint32_t *shadow_cfg_len)
21519 {
21520 	if (!sc->hw_params.supports_shadow_regs)
21521 		return;
21522 
21523 	qwx_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
21524 
21525 	/* shadow is already configured */
21526 	if (*shadow_cfg_len)
21527 		return;
21528 
21529 	/* shadow isn't configured yet, configure now.
21530 	 * non-CE srngs are configured firstly, then
21531 	 * all CE srngs.
21532 	 */
21533 	qwx_hal_srng_shadow_config(sc);
21534 	qwx_ce_shadow_config(sc);
21535 
21536 	/* get the shadow configuration */
21537 	qwx_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
21538 }
21539 
21540 void
qwx_ce_cleanup_pipes(struct qwx_softc * sc)21541 qwx_ce_cleanup_pipes(struct qwx_softc *sc)
21542 {
21543 	struct qwx_ce_pipe *pipe;
21544 	int pipe_num;
21545 
21546 	qwx_ce_stop_shadow_timers(sc);
21547 
21548 	for (pipe_num = 0; pipe_num < sc->hw_params.ce_count; pipe_num++) {
21549 		pipe = &sc->ce.ce_pipe[pipe_num];
21550 		qwx_ce_rx_pipe_cleanup(pipe);
21551 
21552 		/* Cleanup any src CE's which have interrupts disabled */
21553 		qwx_ce_poll_send_completed(sc, pipe_num);
21554 	}
21555 }
21556 
21557 int
qwx_ce_alloc_pipes(struct qwx_softc * sc)21558 qwx_ce_alloc_pipes(struct qwx_softc *sc)
21559 {
21560 	struct qwx_ce_pipe *pipe;
21561 	int i;
21562 	int ret;
21563 	const struct ce_attr *attr;
21564 
21565 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21566 		attr = &sc->hw_params.host_ce_config[i];
21567 		pipe = &sc->ce.ce_pipe[i];
21568 		pipe->pipe_num = i;
21569 		pipe->sc = sc;
21570 		pipe->buf_sz = attr->src_sz_max;
21571 
21572 		ret = qwx_ce_alloc_pipe(sc, i);
21573 		if (ret) {
21574 			/* Free any partial successful allocation */
21575 			qwx_ce_free_pipes(sc);
21576 			return ret;
21577 		}
21578 	}
21579 
21580 	return 0;
21581 }
21582 
21583 void
qwx_get_ce_msi_idx(struct qwx_softc * sc,uint32_t ce_id,uint32_t * msi_data_idx)21584 qwx_get_ce_msi_idx(struct qwx_softc *sc, uint32_t ce_id,
21585     uint32_t *msi_data_idx)
21586 {
21587 	*msi_data_idx = ce_id;
21588 }
21589 
21590 void
qwx_ce_srng_msi_ring_params_setup(struct qwx_softc * sc,uint32_t ce_id,struct hal_srng_params * ring_params)21591 qwx_ce_srng_msi_ring_params_setup(struct qwx_softc *sc, uint32_t ce_id,
21592     struct hal_srng_params *ring_params)
21593 {
21594 	uint32_t msi_data_start = 0;
21595 	uint32_t msi_data_count = 1, msi_data_idx;
21596 	uint32_t msi_irq_start = 0;
21597 	uint32_t addr_lo;
21598 	uint32_t addr_hi;
21599 	int ret;
21600 
21601 	ret = sc->ops.get_user_msi_vector(sc, "CE",
21602 	    &msi_data_count, &msi_data_start, &msi_irq_start);
21603 	if (ret)
21604 		return;
21605 
21606 	qwx_get_msi_address(sc, &addr_lo, &addr_hi);
21607 	qwx_get_ce_msi_idx(sc, ce_id, &msi_data_idx);
21608 
21609 	ring_params->msi_addr = addr_lo;
21610 	ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
21611 	ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
21612 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
21613 }
21614 
21615 int
qwx_ce_init_ring(struct qwx_softc * sc,struct qwx_ce_ring * ce_ring,int ce_id,enum hal_ring_type type)21616 qwx_ce_init_ring(struct qwx_softc *sc, struct qwx_ce_ring *ce_ring,
21617     int ce_id, enum hal_ring_type type)
21618 {
21619 	struct hal_srng_params params = { 0 };
21620 	int ret;
21621 
21622 	params.ring_base_paddr = ce_ring->dmap->dm_segs[0].ds_addr;
21623 	params.ring_base_vaddr = (uint32_t *)ce_ring->base_addr;
21624 	params.num_entries = ce_ring->nentries;
21625 
21626 	if (!(CE_ATTR_DIS_INTR & sc->hw_params.host_ce_config[ce_id].flags))
21627 		qwx_ce_srng_msi_ring_params_setup(sc, ce_id, &params);
21628 
21629 	switch (type) {
21630 	case HAL_CE_SRC:
21631 		if (!(CE_ATTR_DIS_INTR &
21632 		    sc->hw_params.host_ce_config[ce_id].flags))
21633 			params.intr_batch_cntr_thres_entries = 1;
21634 		break;
21635 	case HAL_CE_DST:
21636 		params.max_buffer_len =
21637 		    sc->hw_params.host_ce_config[ce_id].src_sz_max;
21638 		if (!(sc->hw_params.host_ce_config[ce_id].flags &
21639 		    CE_ATTR_DIS_INTR)) {
21640 			params.intr_timer_thres_us = 1024;
21641 			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
21642 			params.low_threshold = ce_ring->nentries - 3;
21643 		}
21644 		break;
21645 	case HAL_CE_DST_STATUS:
21646 		if (!(sc->hw_params.host_ce_config[ce_id].flags &
21647 		    CE_ATTR_DIS_INTR)) {
21648 			params.intr_batch_cntr_thres_entries = 1;
21649 			params.intr_timer_thres_us = 0x1000;
21650 		}
21651 		break;
21652 	default:
21653 		printf("%s: Invalid CE ring type %d\n",
21654 		    sc->sc_dev.dv_xname, type);
21655 		return EINVAL;
21656 	}
21657 
21658 	/* TODO: Init other params needed by HAL to init the ring */
21659 
21660 	ret = qwx_hal_srng_setup(sc, type, ce_id, 0, &params);
21661 	if (ret < 0) {
21662 		printf("%s: failed to setup srng: ring_id %d ce_id %d\n",
21663 		    sc->sc_dev.dv_xname, ret, ce_id);
21664 		return ret;
21665 	}
21666 
21667 	ce_ring->hal_ring_id = ret;
21668 
21669 	if (sc->hw_params.supports_shadow_regs &&
21670 	    qwx_ce_need_shadow_fix(ce_id))
21671 		qwx_dp_shadow_init_timer(sc, &sc->ce.hp_timer[ce_id],
21672 		    ATH11K_SHADOW_CTRL_TIMER_INTERVAL, ce_ring->hal_ring_id);
21673 
21674 	return 0;
21675 }
21676 
21677 int
qwx_ce_init_pipes(struct qwx_softc * sc)21678 qwx_ce_init_pipes(struct qwx_softc *sc)
21679 {
21680 	struct qwx_ce_pipe *pipe;
21681 	int i;
21682 	int ret;
21683 
21684 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21685 		pipe = &sc->ce.ce_pipe[i];
21686 
21687 		if (pipe->src_ring) {
21688 			ret = qwx_ce_init_ring(sc, pipe->src_ring, i,
21689 			    HAL_CE_SRC);
21690 			if (ret) {
21691 				printf("%s: failed to init src ring: %d\n",
21692 				    sc->sc_dev.dv_xname, ret);
21693 				/* Should we clear any partial init */
21694 				return ret;
21695 			}
21696 
21697 			pipe->src_ring->write_index = 0;
21698 			pipe->src_ring->sw_index = 0;
21699 		}
21700 
21701 		if (pipe->dest_ring) {
21702 			ret = qwx_ce_init_ring(sc, pipe->dest_ring, i,
21703 			    HAL_CE_DST);
21704 			if (ret) {
21705 				printf("%s: failed to init dest ring: %d\n",
21706 				    sc->sc_dev.dv_xname, ret);
21707 				/* Should we clear any partial init */
21708 				return ret;
21709 			}
21710 
21711 			pipe->rx_buf_needed = pipe->dest_ring->nentries ?
21712 			    pipe->dest_ring->nentries - 2 : 0;
21713 
21714 			pipe->dest_ring->write_index = 0;
21715 			pipe->dest_ring->sw_index = 0;
21716 		}
21717 
21718 		if (pipe->status_ring) {
21719 			ret = qwx_ce_init_ring(sc, pipe->status_ring, i,
21720 			    HAL_CE_DST_STATUS);
21721 			if (ret) {
21722 				printf("%s: failed to init status ring: %d\n",
21723 				    sc->sc_dev.dv_xname, ret);
21724 				/* Should we clear any partial init */
21725 				return ret;
21726 			}
21727 
21728 			pipe->status_ring->write_index = 0;
21729 			pipe->status_ring->sw_index = 0;
21730 		}
21731 	}
21732 
21733 	return 0;
21734 }
21735 
21736 int
qwx_hal_srng_src_num_free(struct qwx_softc * sc,struct hal_srng * srng,int sync_hw_ptr)21737 qwx_hal_srng_src_num_free(struct qwx_softc *sc, struct hal_srng *srng,
21738     int sync_hw_ptr)
21739 {
21740 	uint32_t tp, hp;
21741 #ifdef notyet
21742 	lockdep_assert_held(&srng->lock);
21743 #endif
21744 	hp = srng->u.src_ring.hp;
21745 
21746 	if (sync_hw_ptr) {
21747 		tp = *srng->u.src_ring.tp_addr;
21748 		srng->u.src_ring.cached_tp = tp;
21749 	} else {
21750 		tp = srng->u.src_ring.cached_tp;
21751 	}
21752 
21753 	if (tp > hp)
21754 		return ((tp - hp) / srng->entry_size) - 1;
21755 	else
21756 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
21757 }
21758 
21759 int
qwx_ce_rx_buf_enqueue_pipe(struct qwx_ce_pipe * pipe,bus_dmamap_t map)21760 qwx_ce_rx_buf_enqueue_pipe(struct qwx_ce_pipe *pipe, bus_dmamap_t map)
21761 {
21762 	struct qwx_softc *sc = pipe->sc;
21763 	struct qwx_ce_ring *ring = pipe->dest_ring;
21764 	struct hal_srng *srng;
21765 	unsigned int write_index;
21766 	unsigned int nentries_mask = ring->nentries_mask;
21767 	uint32_t *desc;
21768 	uint64_t paddr;
21769 	int ret;
21770 #ifdef notyet
21771 	lockdep_assert_held(&ab->ce.ce_lock);
21772 #endif
21773 	write_index = ring->write_index;
21774 
21775 	srng = &sc->hal.srng_list[ring->hal_ring_id];
21776 #ifdef notyet
21777 	spin_lock_bh(&srng->lock);
21778 #endif
21779 	qwx_hal_srng_access_begin(sc, srng);
21780 	bus_dmamap_sync(sc->sc_dmat, map, 0,
21781 	    srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_POSTREAD);
21782 
21783 	if (qwx_hal_srng_src_num_free(sc, srng, 0) < 1) {
21784 		ret = ENOSPC;
21785 		goto exit;
21786 	}
21787 
21788 	desc = qwx_hal_srng_src_get_next_entry(sc, srng);
21789 	if (!desc) {
21790 		ret = ENOSPC;
21791 		goto exit;
21792 	}
21793 
21794 	paddr = map->dm_segs[0].ds_addr;
21795 	qwx_hal_ce_dst_set_desc(desc, paddr);
21796 
21797 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
21798 	ring->write_index = write_index;
21799 
21800 	pipe->rx_buf_needed--;
21801 
21802 	ret = 0;
21803 exit:
21804 	qwx_hal_srng_access_end(sc, srng);
21805 	bus_dmamap_sync(sc->sc_dmat, map, 0,
21806 	    srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_PREREAD);
21807 #ifdef notyet
21808 	spin_unlock_bh(&srng->lock);
21809 #endif
21810 	return ret;
21811 }
21812 
21813 int
qwx_ce_rx_post_pipe(struct qwx_ce_pipe * pipe)21814 qwx_ce_rx_post_pipe(struct qwx_ce_pipe *pipe)
21815 {
21816 	struct qwx_softc *sc = pipe->sc;
21817 	int ret = 0;
21818 	unsigned int idx;
21819 	void *ctx;
21820 	struct qwx_rx_data *rx_data;
21821 	struct mbuf *m;
21822 
21823 	if (!pipe->dest_ring)
21824 		return 0;
21825 
21826 #ifdef notyet
21827 	spin_lock_bh(&ab->ce.ce_lock);
21828 #endif
21829 	while (pipe->rx_buf_needed) {
21830 		m = m_gethdr(M_DONTWAIT, MT_DATA);
21831 		if (m == NULL) {
21832 			ret = ENOBUFS;
21833 			goto done;
21834 		}
21835 
21836 		if (pipe->buf_sz <= MCLBYTES)
21837 			MCLGET(m, M_DONTWAIT);
21838 		else
21839 			MCLGETL(m, M_DONTWAIT, pipe->buf_sz);
21840 		if ((m->m_flags & M_EXT) == 0) {
21841 			ret = ENOBUFS;
21842 			goto done;
21843 		}
21844 
21845 		idx = pipe->dest_ring->write_index;
21846 		ctx = pipe->dest_ring->per_transfer_context[idx];
21847 		rx_data = (struct qwx_rx_data *)ctx;
21848 
21849 		m->m_len = m->m_pkthdr.len = pipe->buf_sz;
21850 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
21851 		    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
21852 		if (ret) {
21853 			printf("%s: can't map mbuf (error %d)\n",
21854 			    sc->sc_dev.dv_xname, ret);
21855 			m_freem(m);
21856 			goto done;
21857 		}
21858 
21859 		ret = qwx_ce_rx_buf_enqueue_pipe(pipe, rx_data->map);
21860 		if (ret) {
21861 			printf("%s: failed to enqueue rx buf: %d\n",
21862 			    sc->sc_dev.dv_xname, ret);
21863 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
21864 			m_freem(m);
21865 			break;
21866 		} else
21867 			rx_data->m = m;
21868 	}
21869 
21870 done:
21871 #ifdef notyet
21872 	spin_unlock_bh(&ab->ce.ce_lock);
21873 #endif
21874 	return ret;
21875 }
21876 
21877 void
qwx_ce_rx_post_buf(struct qwx_softc * sc)21878 qwx_ce_rx_post_buf(struct qwx_softc *sc)
21879 {
21880 	struct qwx_ce_pipe *pipe;
21881 	int i;
21882 	int ret;
21883 
21884 	for (i = 0; i < sc->hw_params.ce_count; i++) {
21885 		pipe = &sc->ce.ce_pipe[i];
21886 		ret = qwx_ce_rx_post_pipe(pipe);
21887 		if (ret) {
21888 			if (ret == ENOSPC)
21889 				continue;
21890 
21891 			printf("%s: failed to post rx buf to pipe: %d err: %d\n",
21892 			    sc->sc_dev.dv_xname, i, ret);
21893 #ifdef notyet
21894 			mod_timer(&ab->rx_replenish_retry,
21895 				  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
21896 #endif
21897 
21898 			return;
21899 		}
21900 	}
21901 }
21902 
21903 int
qwx_ce_completed_recv_next(struct qwx_ce_pipe * pipe,void ** per_transfer_contextp,int * nbytes)21904 qwx_ce_completed_recv_next(struct qwx_ce_pipe *pipe,
21905     void **per_transfer_contextp, int *nbytes)
21906 {
21907 	struct qwx_softc *sc = pipe->sc;
21908 	struct hal_srng *srng;
21909 	unsigned int sw_index;
21910 	unsigned int nentries_mask;
21911 	uint32_t *desc;
21912 	int ret = 0;
21913 #ifdef notyet
21914 	spin_lock_bh(&ab->ce.ce_lock);
21915 #endif
21916 	sw_index = pipe->dest_ring->sw_index;
21917 	nentries_mask = pipe->dest_ring->nentries_mask;
21918 
21919 	srng = &sc->hal.srng_list[pipe->status_ring->hal_ring_id];
21920 #ifdef notyet
21921 	spin_lock_bh(&srng->lock);
21922 #endif
21923 	qwx_hal_srng_access_begin(sc, srng);
21924 
21925 	desc = qwx_hal_srng_dst_get_next_entry(sc, srng);
21926 	if (!desc) {
21927 		ret = EIO;
21928 		goto err;
21929 	}
21930 
21931 	*nbytes = qwx_hal_ce_dst_status_get_length(desc);
21932 	if (*nbytes == 0) {
21933 		ret = EIO;
21934 		goto err;
21935 	}
21936 
21937 	if (per_transfer_contextp) {
21938 		*per_transfer_contextp =
21939 		    pipe->dest_ring->per_transfer_context[sw_index];
21940 	}
21941 
21942 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
21943 	pipe->dest_ring->sw_index = sw_index;
21944 
21945 	pipe->rx_buf_needed++;
21946 err:
21947 	qwx_hal_srng_access_end(sc, srng);
21948 #ifdef notyet
21949 	spin_unlock_bh(&srng->lock);
21950 	spin_unlock_bh(&ab->ce.ce_lock);
21951 #endif
21952 	return ret;
21953 }
21954 
21955 int
qwx_ce_recv_process_cb(struct qwx_ce_pipe * pipe)21956 qwx_ce_recv_process_cb(struct qwx_ce_pipe *pipe)
21957 {
21958 	struct qwx_softc *sc = pipe->sc;
21959 	struct mbuf *m;
21960 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
21961 	void *transfer_context;
21962 	unsigned int nbytes, max_nbytes;
21963 	int ret = 0, err;
21964 
21965 	while (qwx_ce_completed_recv_next(pipe, &transfer_context,
21966 	    &nbytes) == 0) {
21967 		struct qwx_rx_data *rx_data = transfer_context;
21968 
21969 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
21970 		m = rx_data->m;
21971 		rx_data->m = NULL;
21972 
21973 		max_nbytes = m->m_pkthdr.len;
21974 		if (max_nbytes < nbytes) {
21975 			printf("%s: received more than expected (nbytes %d, "
21976 			    "max %d)", __func__, nbytes, max_nbytes);
21977 			m_freem(m);
21978 			continue;
21979 		}
21980 		m->m_len = m->m_pkthdr.len = nbytes;
21981 		ml_enqueue(&ml, m);
21982 		ret = 1;
21983 	}
21984 
21985 	while ((m = ml_dequeue(&ml))) {
21986 		DNPRINTF(QWX_D_CE, "%s: rx ce pipe %d len %d\n", __func__,
21987 		    pipe->pipe_num, m->m_len);
21988 		pipe->recv_cb(sc, m);
21989 	}
21990 
21991 	err = qwx_ce_rx_post_pipe(pipe);
21992 	if (err && err != ENOSPC) {
21993 		printf("%s: failed to post rx buf to pipe: %d err: %d\n",
21994 		    __func__, pipe->pipe_num, err);
21995 #ifdef notyet
21996 		mod_timer(&ab->rx_replenish_retry,
21997 			  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
21998 #endif
21999 	}
22000 
22001 	return ret;
22002 }
22003 
22004 int
qwx_ce_per_engine_service(struct qwx_softc * sc,uint16_t ce_id)22005 qwx_ce_per_engine_service(struct qwx_softc *sc, uint16_t ce_id)
22006 {
22007 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
22008 	const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
22009 	int ret = 0;
22010 
22011 	if (attr->src_nentries) {
22012 		if (qwx_ce_tx_process_cb(pipe))
22013 			ret = 1;
22014 	}
22015 
22016 	if (pipe->recv_cb) {
22017 		if (qwx_ce_recv_process_cb(pipe))
22018 			ret = 1;
22019 	}
22020 
22021 	return ret;
22022 }
22023 
22024 int
qwx_ce_send(struct qwx_softc * sc,struct mbuf * m,uint8_t pipe_id,uint16_t transfer_id)22025 qwx_ce_send(struct qwx_softc *sc, struct mbuf *m, uint8_t pipe_id,
22026     uint16_t transfer_id)
22027 {
22028 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
22029 	struct hal_srng *srng;
22030 	uint32_t *desc;
22031 	unsigned int write_index, sw_index;
22032 	unsigned int nentries_mask;
22033 	int ret = 0;
22034 	uint8_t byte_swap_data = 0;
22035 	int num_used;
22036 	uint64_t paddr;
22037 	void *ctx;
22038 	struct qwx_tx_data *tx_data;
22039 
22040 	/* Check if some entries could be regained by handling tx completion if
22041 	 * the CE has interrupts disabled and the used entries is more than the
22042 	 * defined usage threshold.
22043 	 */
22044 	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
22045 #ifdef notyet
22046 		spin_lock_bh(&ab->ce.ce_lock);
22047 #endif
22048 		write_index = pipe->src_ring->write_index;
22049 
22050 		sw_index = pipe->src_ring->sw_index;
22051 
22052 		if (write_index >= sw_index)
22053 			num_used = write_index - sw_index;
22054 		else
22055 			num_used = pipe->src_ring->nentries - sw_index +
22056 			    write_index;
22057 #ifdef notyet
22058 		spin_unlock_bh(&ab->ce.ce_lock);
22059 #endif
22060 		if (num_used > ATH11K_CE_USAGE_THRESHOLD)
22061 			qwx_ce_poll_send_completed(sc, pipe->pipe_num);
22062 	}
22063 
22064 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
22065 		return ESHUTDOWN;
22066 #ifdef notyet
22067 	spin_lock_bh(&ab->ce.ce_lock);
22068 #endif
22069 	write_index = pipe->src_ring->write_index;
22070 	nentries_mask = pipe->src_ring->nentries_mask;
22071 
22072 	srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
22073 #ifdef notyet
22074 	spin_lock_bh(&srng->lock);
22075 #endif
22076 	qwx_hal_srng_access_begin(sc, srng);
22077 
22078 	if (qwx_hal_srng_src_num_free(sc, srng, 0) < 1) {
22079 		qwx_hal_srng_access_end(sc, srng);
22080 		ret = ENOBUFS;
22081 		goto err_unlock;
22082 	}
22083 
22084 	desc = qwx_hal_srng_src_get_next_reaped(sc, srng);
22085 	if (!desc) {
22086 		qwx_hal_srng_access_end(sc, srng);
22087 		ret = ENOBUFS;
22088 		goto err_unlock;
22089 	}
22090 
22091 	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
22092 		byte_swap_data = 1;
22093 
22094 	ctx = pipe->src_ring->per_transfer_context[write_index];
22095 	tx_data = (struct qwx_tx_data *)ctx;
22096 
22097 	paddr = tx_data->map->dm_segs[0].ds_addr;
22098 	qwx_hal_ce_src_set_desc(desc, paddr, m->m_pkthdr.len,
22099 	    transfer_id, byte_swap_data);
22100 
22101 	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
22102 	    write_index);
22103 
22104 	qwx_hal_srng_access_end(sc, srng);
22105 
22106 	if (qwx_ce_need_shadow_fix(pipe_id))
22107 		qwx_dp_shadow_start_timer(sc, srng, &sc->ce.hp_timer[pipe_id]);
22108 
22109 err_unlock:
22110 #ifdef notyet
22111 	spin_unlock_bh(&srng->lock);
22112 
22113 	spin_unlock_bh(&ab->ce.ce_lock);
22114 #endif
22115 	return ret;
22116 }
22117 
22118 int
qwx_get_num_chains(uint32_t mask)22119 qwx_get_num_chains(uint32_t mask)
22120 {
22121 	int num_chains = 0;
22122 
22123 	while (mask) {
22124 		if (mask & 0x1)
22125 			num_chains++;
22126 		mask >>= 1;
22127 	}
22128 
22129 	return num_chains;
22130 }
22131 
22132 int
qwx_set_antenna(struct qwx_pdev * pdev,uint32_t tx_ant,uint32_t rx_ant)22133 qwx_set_antenna(struct qwx_pdev *pdev, uint32_t tx_ant, uint32_t rx_ant)
22134 {
22135 	struct qwx_softc *sc = pdev->sc;
22136 	int ret;
22137 #ifdef notyet
22138 	lockdep_assert_held(&ar->conf_mutex);
22139 #endif
22140 	sc->cfg_tx_chainmask = tx_ant;
22141 	sc->cfg_rx_chainmask = rx_ant;
22142 #if 0
22143 	if (ar->state != ATH11K_STATE_ON &&
22144 	    ar->state != ATH11K_STATE_RESTARTED)
22145 		return 0;
22146 #endif
22147 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_TX_CHAIN_MASK,
22148 	    tx_ant, pdev->pdev_id);
22149 	if (ret) {
22150 		printf("%s: failed to set tx-chainmask: %d, req 0x%x\n",
22151 		    sc->sc_dev.dv_xname, ret, tx_ant);
22152 		return ret;
22153 	}
22154 
22155 	sc->num_tx_chains = qwx_get_num_chains(tx_ant);
22156 
22157 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_RX_CHAIN_MASK,
22158 	    rx_ant, pdev->pdev_id);
22159 	if (ret) {
22160 		printf("%s: failed to set rx-chainmask: %d, req 0x%x\n",
22161 		    sc->sc_dev.dv_xname, ret, rx_ant);
22162 		return ret;
22163 	}
22164 
22165 	sc->num_rx_chains = qwx_get_num_chains(rx_ant);
22166 #if 0
22167 	/* Reload HT/VHT/HE capability */
22168 	ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
22169 	ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
22170 #endif
22171 	return 0;
22172 }
22173 
22174 int
qwx_reg_update_chan_list(struct qwx_softc * sc,uint8_t pdev_id)22175 qwx_reg_update_chan_list(struct qwx_softc *sc, uint8_t pdev_id)
22176 {
22177 	struct ieee80211com *ic = &sc->sc_ic;
22178 	struct scan_chan_list_params *params;
22179 	struct ieee80211_channel *channel, *lastc;
22180 	struct channel_param *ch;
22181 	int num_channels = 0;
22182 	size_t params_size;
22183 	int ret;
22184 #if 0
22185 	if (ar->state == ATH11K_STATE_RESTARTING)
22186 		return 0;
22187 #endif
22188 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
22189 	for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
22190 		if (channel->ic_flags == 0)
22191 			continue;
22192 		num_channels++;
22193 	}
22194 
22195 	if (!num_channels)
22196 		return EINVAL;
22197 
22198 	params_size = sizeof(*params) +
22199 	    num_channels * sizeof(*params->ch_param);
22200 
22201 	/*
22202 	 * TODO: This is a temporary list for qwx_wmi_send_scan_chan_list_cmd
22203 	 * to loop over. Could that function loop over ic_channels directly?
22204 	 */
22205 	params = malloc(params_size, M_DEVBUF, M_NOWAIT | M_ZERO);
22206 	if (!params)
22207 		return ENOMEM;
22208 
22209 	params->pdev_id = pdev_id;
22210 	params->nallchans = num_channels;
22211 
22212 	ch = params->ch_param;
22213 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
22214 	for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
22215 		if (channel->ic_flags == 0)
22216 			continue;
22217 #ifdef notyet
22218 		/* TODO: Set to true/false based on some condition? */
22219 		ch->allow_ht = true;
22220 		ch->allow_vht = true;
22221 		ch->allow_he = true;
22222 #endif
22223 		ch->dfs_set = !!(IEEE80211_IS_CHAN_5GHZ(channel) &&
22224 		    (channel->ic_flags & IEEE80211_CHAN_PASSIVE));
22225 		ch->is_chan_passive = !!(channel->ic_flags &
22226 		    IEEE80211_CHAN_PASSIVE);
22227 		ch->is_chan_passive |= ch->dfs_set;
22228 		ch->mhz = ieee80211_ieee2mhz(ieee80211_chan2ieee(ic, channel),
22229 		    channel->ic_flags);
22230 		ch->cfreq1 = ch->mhz;
22231 		ch->minpower = 0;
22232 		ch->maxpower = 40; /* XXX from Linux debug trace */
22233 		ch->maxregpower = ch->maxpower;
22234 		ch->antennamax = 0;
22235 
22236 		/* TODO: Use appropriate phymodes */
22237 		if (IEEE80211_IS_CHAN_A(channel))
22238 			ch->phy_mode = MODE_11A;
22239 		else if (IEEE80211_IS_CHAN_G(channel))
22240 			ch->phy_mode = MODE_11G;
22241 		else
22242 			ch->phy_mode = MODE_11B;
22243 #ifdef notyet
22244 		if (channel->band == NL80211_BAND_6GHZ &&
22245 		    cfg80211_channel_is_psc(channel))
22246 			ch->psc_channel = true;
22247 #endif
22248 		DNPRINTF(QWX_D_WMI, "%s: mac channel freq %d maxpower %d "
22249 		    "regpower %d antenna %d mode %d\n", __func__,
22250 		    ch->mhz, ch->maxpower, ch->maxregpower,
22251 		    ch->antennamax, ch->phy_mode);
22252 
22253 		ch++;
22254 		/* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
22255 		 * set_agile, reg_class_idx
22256 		 */
22257 	}
22258 
22259 	ret = qwx_wmi_send_scan_chan_list_cmd(sc, pdev_id, params);
22260 	free(params, M_DEVBUF, params_size);
22261 
22262 	return ret;
22263 }
22264 
22265 static const struct htt_rx_ring_tlv_filter qwx_mac_mon_status_filter_default = {
22266 	.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
22267 	    HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
22268 	    HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
22269 	.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
22270 	.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
22271 	.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
22272 	.pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
22273 	    HTT_RX_FP_CTRL_FILTER_FLASG3
22274 };
22275 
22276 int
qwx_mac_register(struct qwx_softc * sc)22277 qwx_mac_register(struct qwx_softc *sc)
22278 {
22279 	/* Initialize channel counters frequency value in hertz */
22280 	sc->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
22281 
22282 	sc->free_vdev_map = (1U << (sc->num_radios * TARGET_NUM_VDEVS(sc))) - 1;
22283 
22284 	if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
22285 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr, sc->mac_addr);
22286 
22287 	return 0;
22288 }
22289 
22290 int
qwx_mac_config_mon_status_default(struct qwx_softc * sc,int enable)22291 qwx_mac_config_mon_status_default(struct qwx_softc *sc, int enable)
22292 {
22293 	struct htt_rx_ring_tlv_filter tlv_filter = { 0 };
22294 	int ret = 0;
22295 #if 0
22296 	int i;
22297 	struct dp_rxdma_ring *ring;
22298 #endif
22299 
22300 	if (enable)
22301 		tlv_filter = qwx_mac_mon_status_filter_default;
22302 #if 0 /* mon status info is not useful and the code triggers mbuf corruption */
22303 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
22304 		ring = &sc->pdev_dp.rx_mon_status_refill_ring[i];
22305 		ret = qwx_dp_tx_htt_rx_filter_setup(sc,
22306 		    ring->refill_buf_ring.ring_id, sc->pdev_dp.mac_id + i,
22307 		    HAL_RXDMA_MONITOR_STATUS, DP_RX_BUFFER_SIZE, &tlv_filter);
22308 		if (ret)
22309 			return ret;
22310 	}
22311 
22312 	if (enable && !sc->hw_params.rxdma1_enable) {
22313 		timeout_add_msec(&sc->mon_reap_timer,
22314 		    ATH11K_MON_TIMER_INTERVAL);
22315 	}
22316 #endif
22317 	return ret;
22318 }
22319 
22320 int
qwx_mac_txpower_recalc(struct qwx_softc * sc,struct qwx_pdev * pdev)22321 qwx_mac_txpower_recalc(struct qwx_softc *sc, struct qwx_pdev *pdev)
22322 {
22323 	struct qwx_vif *arvif;
22324 	int ret, txpower = -1;
22325 	uint32_t param;
22326 	uint32_t min_tx_power = sc->target_caps.hw_min_tx_power;
22327 	uint32_t max_tx_power = sc->target_caps.hw_max_tx_power;
22328 #ifdef notyet
22329 	lockdep_assert_held(&ar->conf_mutex);
22330 #endif
22331 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
22332 		if (arvif->txpower <= 0)
22333 			continue;
22334 
22335 		if (txpower == -1)
22336 			txpower = arvif->txpower;
22337 		else
22338 			txpower = MIN(txpower, arvif->txpower);
22339 	}
22340 
22341 	if (txpower == -1)
22342 		return 0;
22343 
22344 	/* txpwr is set as 2 units per dBm in FW*/
22345 	txpower = MIN(MAX(min_tx_power, txpower), max_tx_power) * 2;
22346 	DNPRINTF(QWX_D_MAC, "txpower to set in hw %d\n", txpower / 2);
22347 
22348 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
22349 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
22350 		ret = qwx_wmi_pdev_set_param(sc, param, txpower,
22351 		    pdev->pdev_id);
22352 		if (ret)
22353 			goto fail;
22354 	}
22355 
22356 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
22357 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
22358 		ret = qwx_wmi_pdev_set_param(sc, param, txpower,
22359 		    pdev->pdev_id);
22360 		if (ret)
22361 			goto fail;
22362 	}
22363 
22364 	return 0;
22365 
22366 fail:
22367 	DNPRINTF(QWX_D_MAC, "%s: failed to recalc txpower limit %d "
22368 	    "using pdev param %d: %d\n", sc->sc_dev.dv_xname, txpower / 2,
22369 	    param, ret);
22370 
22371 	return ret;
22372 }
22373 
22374 int
qwx_mac_op_start(struct qwx_pdev * pdev)22375 qwx_mac_op_start(struct qwx_pdev *pdev)
22376 {
22377 	struct qwx_softc *sc = pdev->sc;
22378 	struct ieee80211com *ic = &sc->sc_ic;
22379 	int ret;
22380 
22381 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_PMF_QOS, 1,
22382 	    pdev->pdev_id);
22383 	if (ret) {
22384 		printf("%s: failed to enable PMF QOS for pdev %d: %d\n",
22385 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22386 		goto err;
22387 	}
22388 
22389 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
22390 	    pdev->pdev_id);
22391 	if (ret) {
22392 		printf("%s: failed to enable dynamic bw for pdev %d: %d\n",
22393 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22394 		goto err;
22395 	}
22396 
22397 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT)) {
22398 		ret = qwx_wmi_scan_prob_req_oui(sc, ic->ic_myaddr,
22399 		    pdev->pdev_id);
22400 		if (ret) {
22401 			printf("%s: failed to set prob req oui for "
22402 			    "pdev %d: %i\n", sc->sc_dev.dv_xname,
22403 			    pdev->pdev_id, ret);
22404 			goto err;
22405 		}
22406 	}
22407 
22408 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 0,
22409 	    pdev->pdev_id);
22410 	if (ret) {
22411 		printf("%s: failed to set ac override for ARP for "
22412 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22413 		goto err;
22414 	}
22415 
22416 	ret = qwx_wmi_send_dfs_phyerr_offload_enable_cmd(sc, pdev->pdev_id);
22417 	if (ret) {
22418 		printf("%s: failed to offload radar detection for "
22419 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22420 		goto err;
22421 	}
22422 
22423 	ret = qwx_dp_tx_htt_h2t_ppdu_stats_req(sc, HTT_PPDU_STATS_TAG_DEFAULT,
22424 	    pdev->pdev_id);
22425 	if (ret) {
22426 		printf("%s: failed to req ppdu stats for pdev %d: %d\n",
22427 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22428 		goto err;
22429 	}
22430 
22431 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 1,
22432 	    pdev->pdev_id);
22433 	if (ret) {
22434 		printf("%s: failed to enable MESH MCAST ENABLE for "
22435 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22436 		goto err;
22437 	}
22438 
22439 	qwx_set_antenna(pdev, pdev->cap.tx_chain_mask, pdev->cap.rx_chain_mask);
22440 
22441 	/* TODO: Do we need to enable ANI? */
22442 
22443 	ret = qwx_reg_update_chan_list(sc, pdev->pdev_id);
22444 	if (ret) {
22445 		printf("%s: failed to update channel list for pdev %d: %d\n",
22446 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
22447 		goto err;
22448 	}
22449 
22450 	sc->num_started_vdevs = 0;
22451 	sc->num_created_vdevs = 0;
22452 	sc->num_peers = 0;
22453 	sc->allocated_vdev_map = 0;
22454 
22455 	/* Configure monitor status ring with default rx_filter to get rx status
22456 	 * such as rssi, rx_duration.
22457 	 */
22458 	ret = qwx_mac_config_mon_status_default(sc, 1);
22459 	if (ret) {
22460 		printf("%s: failed to configure monitor status ring "
22461 		    "with default rx_filter: (%d)\n",
22462 		    sc->sc_dev.dv_xname, ret);
22463 		goto err;
22464 	}
22465 
22466 	/* Configure the hash seed for hash based reo dest ring selection */
22467 	qwx_wmi_pdev_lro_cfg(sc, pdev->pdev_id);
22468 
22469 	/* allow device to enter IMPS */
22470 	if (sc->hw_params.idle_ps) {
22471 		ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
22472 		    1, pdev->pdev_id);
22473 		if (ret) {
22474 			printf("%s: failed to enable idle ps: %d\n",
22475 			    sc->sc_dev.dv_xname, ret);
22476 			goto err;
22477 		}
22478 	}
22479 #ifdef notyet
22480 	mutex_unlock(&ar->conf_mutex);
22481 #endif
22482 	sc->pdevs_active |= (1 << pdev->pdev_id);
22483 	return 0;
22484 err:
22485 #ifdef notyet
22486 	ar->state = ATH11K_STATE_OFF;
22487 	mutex_unlock(&ar->conf_mutex);
22488 #endif
22489 	return ret;
22490 }
22491 
22492 int
qwx_mac_setup_vdev_params_mbssid(struct qwx_vif * arvif,uint32_t * flags,uint32_t * tx_vdev_id)22493 qwx_mac_setup_vdev_params_mbssid(struct qwx_vif *arvif,
22494     uint32_t *flags, uint32_t *tx_vdev_id)
22495 {
22496 	*tx_vdev_id = 0;
22497 	*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
22498 	return 0;
22499 }
22500 
22501 int
qwx_mac_setup_vdev_create_params(struct qwx_vif * arvif,struct qwx_pdev * pdev,struct vdev_create_params * params)22502 qwx_mac_setup_vdev_create_params(struct qwx_vif *arvif, struct qwx_pdev *pdev,
22503     struct vdev_create_params *params)
22504 {
22505 	struct qwx_softc *sc = arvif->sc;
22506 	int ret;
22507 
22508 	params->if_id = arvif->vdev_id;
22509 	params->type = arvif->vdev_type;
22510 	params->subtype = arvif->vdev_subtype;
22511 	params->pdev_id = pdev->pdev_id;
22512 	params->mbssid_flags = 0;
22513 	params->mbssid_tx_vdev_id = 0;
22514 
22515 	if (!isset(sc->wmi.svc_map,
22516 	    WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
22517 		ret = qwx_mac_setup_vdev_params_mbssid(arvif,
22518 		    &params->mbssid_flags, &params->mbssid_tx_vdev_id);
22519 		if (ret)
22520 			return ret;
22521 	}
22522 
22523 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
22524 		params->chains[0].tx = sc->num_tx_chains;
22525 		params->chains[0].rx = sc->num_rx_chains;
22526 	}
22527 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
22528 		params->chains[1].tx = sc->num_tx_chains;
22529 		params->chains[1].rx = sc->num_rx_chains;
22530 	}
22531 #if 0
22532 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
22533 	    ar->supports_6ghz) {
22534 		params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
22535 		params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
22536 	}
22537 #endif
22538 	return 0;
22539 }
22540 
22541 int
qwx_mac_op_update_vif_offload(struct qwx_softc * sc,struct qwx_pdev * pdev,struct qwx_vif * arvif)22542 qwx_mac_op_update_vif_offload(struct qwx_softc *sc, struct qwx_pdev *pdev,
22543     struct qwx_vif *arvif)
22544 {
22545 	uint32_t param_id, param_value;
22546 	int ret;
22547 
22548 	param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
22549 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
22550 		param_value = ATH11K_HW_TXRX_RAW;
22551 	else
22552 		param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
22553 
22554 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
22555 	    param_id, param_value);
22556 	if (ret) {
22557 		printf("%s: failed to set vdev %d tx encap mode: %d\n",
22558 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22559 		return ret;
22560 	}
22561 
22562 	param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
22563 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
22564 		param_value = ATH11K_HW_TXRX_RAW;
22565 	else
22566 		param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
22567 
22568 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
22569 	    param_id, param_value);
22570 	if (ret) {
22571 		printf("%s: failed to set vdev %d rx decap mode: %d\n",
22572 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22573 		return ret;
22574 	}
22575 
22576 	return 0;
22577 }
22578 
22579 void
qwx_mac_vdev_delete(struct qwx_softc * sc,struct qwx_vif * arvif)22580 qwx_mac_vdev_delete(struct qwx_softc *sc, struct qwx_vif *arvif)
22581 {
22582 	printf("%s: not implemented\n", __func__);
22583 }
22584 
22585 int
qwx_mac_vdev_setup_sync(struct qwx_softc * sc)22586 qwx_mac_vdev_setup_sync(struct qwx_softc *sc)
22587 {
22588 	int ret;
22589 
22590 #ifdef notyet
22591 	lockdep_assert_held(&ar->conf_mutex);
22592 #endif
22593 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
22594 		return ESHUTDOWN;
22595 
22596 	while (!sc->vdev_setup_done) {
22597 		ret = tsleep_nsec(&sc->vdev_setup_done, 0, "qwxvdev",
22598 		    SEC_TO_NSEC(1));
22599 		if (ret) {
22600 			printf("%s: vdev start timeout\n",
22601 			    sc->sc_dev.dv_xname);
22602 			return ret;
22603 		}
22604 	}
22605 
22606 	return 0;
22607 }
22608 
22609 int
qwx_mac_set_txbf_conf(struct qwx_vif * arvif)22610 qwx_mac_set_txbf_conf(struct qwx_vif *arvif)
22611 {
22612 	/* TX beamforming is not yet supported. */
22613 	return 0;
22614 }
22615 
22616 int
qwx_mac_vdev_stop(struct qwx_softc * sc,struct qwx_vif * arvif,int pdev_id)22617 qwx_mac_vdev_stop(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
22618 {
22619 	int ret;
22620 #ifdef notyet
22621 	lockdep_assert_held(&ar->conf_mutex);
22622 #endif
22623 #if 0
22624 	reinit_completion(&ar->vdev_setup_done);
22625 #endif
22626 	sc->vdev_setup_done = 0;
22627 	ret = qwx_wmi_vdev_stop(sc, arvif->vdev_id, pdev_id);
22628 	if (ret) {
22629 		printf("%s: failed to stop WMI vdev %i: %d\n",
22630 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22631 		return ret;
22632 	}
22633 
22634 	ret = qwx_mac_vdev_setup_sync(sc);
22635 	if (ret) {
22636 		printf("%s: failed to synchronize setup for vdev %i: %d\n",
22637 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22638 		return ret;
22639 	}
22640 
22641 	if (sc->num_started_vdevs > 0)
22642 		sc->num_started_vdevs--;
22643 
22644 	DNPRINTF(QWX_D_MAC, "%s: vdev vdev_id %d stopped\n", __func__,
22645 	    arvif->vdev_id);
22646 
22647 	if (test_bit(ATH11K_CAC_RUNNING, sc->sc_flags)) {
22648 		clear_bit(ATH11K_CAC_RUNNING, sc->sc_flags);
22649 		DNPRINTF(QWX_D_MAC, "%s: CAC Stopped for vdev %d\n", __func__,
22650 		    arvif->vdev_id);
22651 	}
22652 
22653 	return 0;
22654 }
22655 
22656 int
qwx_mac_vdev_start_restart(struct qwx_softc * sc,struct qwx_vif * arvif,int pdev_id,int restart)22657 qwx_mac_vdev_start_restart(struct qwx_softc *sc, struct qwx_vif *arvif,
22658     int pdev_id, int restart)
22659 {
22660 	struct ieee80211com *ic = &sc->sc_ic;
22661 	struct ieee80211_channel *chan = ic->ic_bss->ni_chan;
22662 	struct wmi_vdev_start_req_arg arg = {};
22663 	int ret = 0;
22664 #ifdef notyet
22665 	lockdep_assert_held(&ar->conf_mutex);
22666 #endif
22667 #if 0
22668 	reinit_completion(&ar->vdev_setup_done);
22669 #endif
22670 	arg.vdev_id = arvif->vdev_id;
22671 	arg.dtim_period = ic->ic_dtim_period;
22672 	arg.bcn_intval = ic->ic_lintval;
22673 
22674 	arg.channel.freq = chan->ic_freq;
22675 	arg.channel.band_center_freq1 = chan->ic_freq;
22676 	arg.channel.band_center_freq2 = chan->ic_freq;
22677 
22678 	switch (ic->ic_curmode) {
22679 	case IEEE80211_MODE_11A:
22680 		arg.channel.mode = MODE_11A;
22681 		break;
22682 	case IEEE80211_MODE_11B:
22683 		arg.channel.mode = MODE_11B;
22684 		break;
22685 	case IEEE80211_MODE_11G:
22686 		arg.channel.mode = MODE_11G;
22687 		break;
22688 	default:
22689 		printf("%s: unsupported phy mode %d\n",
22690 		    sc->sc_dev.dv_xname, ic->ic_curmode);
22691 		return ENOTSUP;
22692 	}
22693 
22694 	arg.channel.min_power = 0;
22695 	arg.channel.max_power = 20; /* XXX */
22696 	arg.channel.max_reg_power = 20; /* XXX */
22697 	arg.channel.max_antenna_gain = 0; /* XXX */
22698 
22699 	arg.pref_tx_streams = 1;
22700 	arg.pref_rx_streams = 1;
22701 
22702 	arg.mbssid_flags = 0;
22703 	arg.mbssid_tx_vdev_id = 0;
22704 	if (isset(sc->wmi.svc_map,
22705 	    WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
22706 		ret = qwx_mac_setup_vdev_params_mbssid(arvif,
22707 		    &arg.mbssid_flags, &arg.mbssid_tx_vdev_id);
22708 		if (ret)
22709 			return ret;
22710 	}
22711 #if 0
22712 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
22713 		arg.ssid = arvif->u.ap.ssid;
22714 		arg.ssid_len = arvif->u.ap.ssid_len;
22715 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
22716 
22717 		/* For now allow DFS for AP mode */
22718 		arg.channel.chan_radar =
22719 			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
22720 
22721 		arg.channel.freq2_radar = ctx->radar_enabled;
22722 
22723 		arg.channel.passive = arg.channel.chan_radar;
22724 
22725 		spin_lock_bh(&ab->base_lock);
22726 		arg.regdomain = ar->ab->dfs_region;
22727 		spin_unlock_bh(&ab->base_lock);
22728 	}
22729 #endif
22730 	/* XXX */
22731 	arg.channel.passive |= !!(ieee80211_chan2ieee(ic, chan) >= 52);
22732 
22733 	DNPRINTF(QWX_D_MAC, "%s: vdev %d start center_freq %d phymode %s\n",
22734 	    __func__, arg.vdev_id, arg.channel.freq,
22735 	    qwx_wmi_phymode_str(arg.channel.mode));
22736 
22737 	sc->vdev_setup_done = 0;
22738 	ret = qwx_wmi_vdev_start(sc, &arg, pdev_id, restart);
22739 	if (ret) {
22740 		printf("%s: failed to %s WMI vdev %i\n", sc->sc_dev.dv_xname,
22741 		    restart ? "restart" : "start", arg.vdev_id);
22742 		return ret;
22743 	}
22744 
22745 	ret = qwx_mac_vdev_setup_sync(sc);
22746 	if (ret) {
22747 		printf("%s: failed to synchronize setup for vdev %i %s: %d\n",
22748 		    sc->sc_dev.dv_xname, arg.vdev_id,
22749 		    restart ? "restart" : "start", ret);
22750 		return ret;
22751 	}
22752 
22753 	if (!restart)
22754 		sc->num_started_vdevs++;
22755 
22756 	DNPRINTF(QWX_D_MAC, "%s: vdev %d started\n", __func__, arvif->vdev_id);
22757 
22758 	/* Enable CAC Flag in the driver by checking the channel DFS cac time,
22759 	 * i.e dfs_cac_ms value which will be valid only for radar channels
22760 	 * and state as NL80211_DFS_USABLE which indicates CAC needs to be
22761 	 * done before channel usage. This flags is used to drop rx packets.
22762 	 * during CAC.
22763 	 */
22764 	/* TODO Set the flag for other interface types as required */
22765 #if 0
22766 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
22767 	    chandef->chan->dfs_cac_ms &&
22768 	    chandef->chan->dfs_state == NL80211_DFS_USABLE) {
22769 		set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
22770 		ath11k_dbg(ab, ATH11K_DBG_MAC,
22771 			   "CAC Started in chan_freq %d for vdev %d\n",
22772 			   arg.channel.freq, arg.vdev_id);
22773 	}
22774 #endif
22775 	ret = qwx_mac_set_txbf_conf(arvif);
22776 	if (ret)
22777 		printf("%s: failed to set txbf conf for vdev %d: %d\n",
22778 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22779 
22780 	return 0;
22781 }
22782 
22783 int
qwx_mac_vdev_restart(struct qwx_softc * sc,struct qwx_vif * arvif,int pdev_id)22784 qwx_mac_vdev_restart(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
22785 {
22786 	return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 1);
22787 }
22788 
22789 int
qwx_mac_vdev_start(struct qwx_softc * sc,struct qwx_vif * arvif,int pdev_id)22790 qwx_mac_vdev_start(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
22791 {
22792 	return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 0);
22793 }
22794 
22795 void
qwx_vif_free(struct qwx_softc * sc,struct qwx_vif * arvif)22796 qwx_vif_free(struct qwx_softc *sc, struct qwx_vif *arvif)
22797 {
22798 	struct qwx_txmgmt_queue *txmgmt;
22799 	int i;
22800 
22801 	if (arvif == NULL)
22802 		return;
22803 
22804 	txmgmt = &arvif->txmgmt;
22805 	for (i = 0; i < nitems(txmgmt->data); i++) {
22806 		struct qwx_tx_data *tx_data = &txmgmt->data[i];
22807 
22808 		if (tx_data->m) {
22809 			m_freem(tx_data->m);
22810 			tx_data->m = NULL;
22811 		}
22812 		if (tx_data->map) {
22813 			bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
22814 			tx_data->map = NULL;
22815 		}
22816 	}
22817 
22818 	free(arvif, M_DEVBUF, sizeof(*arvif));
22819 }
22820 
22821 struct qwx_vif *
qwx_vif_alloc(struct qwx_softc * sc)22822 qwx_vif_alloc(struct qwx_softc *sc)
22823 {
22824 	struct qwx_vif *arvif;
22825 	struct qwx_txmgmt_queue *txmgmt;
22826 	int i, ret = 0;
22827 	const bus_size_t size = IEEE80211_MAX_LEN;
22828 
22829 	arvif = malloc(sizeof(*arvif), M_DEVBUF, M_NOWAIT | M_ZERO);
22830 	if (arvif == NULL)
22831 		return NULL;
22832 
22833 	txmgmt = &arvif->txmgmt;
22834 	for (i = 0; i < nitems(txmgmt->data); i++) {
22835 		struct qwx_tx_data *tx_data = &txmgmt->data[i];
22836 
22837 		ret = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
22838 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &tx_data->map);
22839 		if (ret) {
22840 			qwx_vif_free(sc, arvif);
22841 			return NULL;
22842 		}
22843 	}
22844 
22845 	arvif->sc = sc;
22846 
22847 	return arvif;
22848 }
22849 
22850 int
qwx_mac_op_add_interface(struct qwx_pdev * pdev)22851 qwx_mac_op_add_interface(struct qwx_pdev *pdev)
22852 {
22853 	struct qwx_softc *sc = pdev->sc;
22854 	struct ieee80211com *ic = &sc->sc_ic;
22855 	struct qwx_vif *arvif = NULL;
22856 	struct vdev_create_params vdev_param = { 0 };
22857 #if 0
22858 	struct peer_create_params peer_param;
22859 #endif
22860 	uint32_t param_id, param_value;
22861 	uint16_t nss;
22862 #if 0
22863 	int i;
22864 	int fbret;
22865 #endif
22866 	int ret, bit;
22867 #ifdef notyet
22868 	mutex_lock(&ar->conf_mutex);
22869 #endif
22870 #if 0
22871 	if (vif->type == NL80211_IFTYPE_AP &&
22872 	    ar->num_peers > (ar->max_num_peers - 1)) {
22873 		ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
22874 		ret = -ENOBUFS;
22875 		goto err;
22876 	}
22877 #endif
22878 	if (sc->num_created_vdevs > (TARGET_NUM_VDEVS(sc) - 1)) {
22879 		printf("%s: failed to create vdev %u, reached vdev limit %d\n",
22880 		    sc->sc_dev.dv_xname, sc->num_created_vdevs,
22881 		    TARGET_NUM_VDEVS(sc));
22882 		ret = EBUSY;
22883 		goto err;
22884 	}
22885 
22886 	arvif = qwx_vif_alloc(sc);
22887 	if (arvif == NULL) {
22888 		ret = ENOMEM;
22889 		goto err;
22890 	}
22891 #if 0
22892 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
22893 			  ath11k_mac_vif_sta_connection_loss_work);
22894 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
22895 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
22896 		arvif->bitrate_mask.control[i].gi = 0;
22897 		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
22898 		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
22899 		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
22900 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
22901 		memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
22902 		       sizeof(arvif->bitrate_mask.control[i].he_mcs));
22903 	}
22904 #endif
22905 
22906 	if (sc->free_vdev_map == 0) {
22907 		printf("%s: cannot add interface; all vdevs are busy\n",
22908 		    sc->sc_dev.dv_xname);
22909 		ret = EBUSY;
22910 		goto err;
22911 	}
22912 	bit = ffs(sc->free_vdev_map) - 1;
22913 
22914 	arvif->vdev_id = bit;
22915 	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
22916 
22917 	switch (ic->ic_opmode) {
22918 	case IEEE80211_M_STA:
22919 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
22920 		break;
22921 #if 0
22922 	case NL80211_IFTYPE_MESH_POINT:
22923 		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
22924 		fallthrough;
22925 	case NL80211_IFTYPE_AP:
22926 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
22927 		break;
22928 	case NL80211_IFTYPE_MONITOR:
22929 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
22930 		ar->monitor_vdev_id = bit;
22931 		break;
22932 #endif
22933 	default:
22934 		printf("%s: invalid operating mode %d\n",
22935 		    sc->sc_dev.dv_xname, ic->ic_opmode);
22936 		ret = EINVAL;
22937 		goto err;
22938 	}
22939 
22940 	DNPRINTF(QWX_D_MAC,
22941 	    "%s: add interface id %d type %d subtype %d map 0x%x\n",
22942 	    __func__, arvif->vdev_id, arvif->vdev_type,
22943 	    arvif->vdev_subtype, sc->free_vdev_map);
22944 
22945 	ret = qwx_mac_setup_vdev_create_params(arvif, pdev, &vdev_param);
22946 	if (ret) {
22947 		printf("%s: failed to create vdev parameters %d: %d\n",
22948 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
22949 		goto err;
22950 	}
22951 
22952 	ret = qwx_wmi_vdev_create(sc, ic->ic_myaddr, &vdev_param);
22953 	if (ret) {
22954 		printf("%s: failed to create WMI vdev %d %s: %d\n",
22955 		    sc->sc_dev.dv_xname, arvif->vdev_id,
22956 		    ether_sprintf(ic->ic_myaddr), ret);
22957 		goto err;
22958 	}
22959 
22960 	sc->num_created_vdevs++;
22961 	DNPRINTF(QWX_D_MAC, "%s: vdev %s created, vdev_id %d\n", __func__,
22962 	    ether_sprintf(ic->ic_myaddr), arvif->vdev_id);
22963 	sc->allocated_vdev_map |= 1U << arvif->vdev_id;
22964 	sc->free_vdev_map &= ~(1U << arvif->vdev_id);
22965 #ifdef notyet
22966 	spin_lock_bh(&ar->data_lock);
22967 #endif
22968 	TAILQ_INSERT_TAIL(&sc->vif_list, arvif, entry);
22969 #ifdef notyet
22970 	spin_unlock_bh(&ar->data_lock);
22971 #endif
22972 	ret = qwx_mac_op_update_vif_offload(sc, pdev, arvif);
22973 	if (ret)
22974 		goto err_vdev_del;
22975 
22976 	nss = qwx_get_num_chains(sc->cfg_tx_chainmask) ? : 1;
22977 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
22978 	    WMI_VDEV_PARAM_NSS, nss);
22979 	if (ret) {
22980 		printf("%s: failed to set vdev %d chainmask 0x%x, nss %d: %d\n",
22981 		    sc->sc_dev.dv_xname, arvif->vdev_id, sc->cfg_tx_chainmask,
22982 		    nss, ret);
22983 		goto err_vdev_del;
22984 	}
22985 
22986 	switch (arvif->vdev_type) {
22987 #if 0
22988 	case WMI_VDEV_TYPE_AP:
22989 		peer_param.vdev_id = arvif->vdev_id;
22990 		peer_param.peer_addr = vif->addr;
22991 		peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
22992 		ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
22993 		if (ret) {
22994 			ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
22995 				    arvif->vdev_id, ret);
22996 			goto err_vdev_del;
22997 		}
22998 
22999 		ret = ath11k_mac_set_kickout(arvif);
23000 		if (ret) {
23001 			ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
23002 				    arvif->vdev_id, ret);
23003 			goto err_peer_del;
23004 		}
23005 
23006 		ath11k_mac_11d_scan_stop_all(ar->ab);
23007 		break;
23008 #endif
23009 	case WMI_VDEV_TYPE_STA:
23010 		param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
23011 		param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
23012 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
23013 		    pdev->pdev_id, param_id, param_value);
23014 		if (ret) {
23015 			printf("%s: failed to set vdev %d RX wake policy: %d\n",
23016 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23017 			goto err_peer_del;
23018 		}
23019 
23020 		param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
23021 		param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
23022 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
23023 		    pdev->pdev_id, param_id, param_value);
23024 		if (ret) {
23025 			printf("%s: failed to set vdev %d "
23026 			    "TX wake threshold: %d\n",
23027 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23028 			goto err_peer_del;
23029 		}
23030 
23031 		param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
23032 		param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
23033 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
23034 		    pdev->pdev_id, param_id, param_value);
23035 		if (ret) {
23036 			printf("%s: failed to set vdev %d pspoll count: %d\n",
23037 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23038 			goto err_peer_del;
23039 		}
23040 
23041 		ret = qwx_wmi_pdev_set_ps_mode(sc, arvif->vdev_id,
23042 		    pdev->pdev_id, WMI_STA_PS_MODE_DISABLED);
23043 		if (ret) {
23044 			printf("%s: failed to disable vdev %d ps mode: %d\n",
23045 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23046 			goto err_peer_del;
23047 		}
23048 
23049 		if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
23050 			sc->completed_11d_scan = 0;
23051 			sc->state_11d = ATH11K_11D_PREPARING;
23052 		}
23053 		break;
23054 #if 0
23055 	case WMI_VDEV_TYPE_MONITOR:
23056 		set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
23057 		break;
23058 #endif
23059 	default:
23060 		printf("%s: invalid vdev type %d\n",
23061 		    sc->sc_dev.dv_xname, arvif->vdev_type);
23062 		ret = EINVAL;
23063 		goto err;
23064 	}
23065 
23066 	arvif->txpower = 40;
23067 	ret = qwx_mac_txpower_recalc(sc, pdev);
23068 	if (ret)
23069 		goto err_peer_del;
23070 
23071 	param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
23072 	param_value = ic->ic_rtsthreshold;
23073 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
23074 	    param_id, param_value);
23075 	if (ret) {
23076 		printf("%s: failed to set rts threshold for vdev %d: %d\n",
23077 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
23078 		goto err_peer_del;
23079 	}
23080 
23081 	qwx_dp_vdev_tx_attach(sc, pdev, arvif);
23082 #if 0
23083 	if (vif->type != NL80211_IFTYPE_MONITOR &&
23084 	    test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
23085 		ret = ath11k_mac_monitor_vdev_create(ar);
23086 		if (ret)
23087 			ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
23088 				    ret);
23089 	}
23090 
23091 	mutex_unlock(&ar->conf_mutex);
23092 #endif
23093 	return 0;
23094 
23095 err_peer_del:
23096 #if 0
23097 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
23098 		fbret = qwx_peer_delete(sc, arvif->vdev_id, vif->addr);
23099 		if (fbret) {
23100 			printf("%s: fallback fail to delete peer addr %pM "
23101 			    "vdev_id %d ret %d\n", sc->sc_dev.dv_xname,
23102 			    vif->addr, arvif->vdev_id, fbret);
23103 			goto err;
23104 		}
23105 	}
23106 #endif
23107 err_vdev_del:
23108 	qwx_mac_vdev_delete(sc, arvif);
23109 #ifdef notyet
23110 	spin_lock_bh(&ar->data_lock);
23111 #endif
23112 	TAILQ_REMOVE(&sc->vif_list, arvif, entry);
23113 #ifdef notyet
23114 	spin_unlock_bh(&ar->data_lock);
23115 #endif
23116 
23117 err:
23118 #ifdef notyet
23119 	mutex_unlock(&ar->conf_mutex);
23120 #endif
23121 	qwx_vif_free(sc, arvif);
23122 	return ret;
23123 }
23124 
23125 int
qwx_mac_start(struct qwx_softc * sc)23126 qwx_mac_start(struct qwx_softc *sc)
23127 {
23128 	struct qwx_pdev *pdev;
23129 	int i, error;
23130 
23131 	for (i = 0; i < sc->num_radios; i++) {
23132 		pdev = &sc->pdevs[i];
23133 		error = qwx_mac_op_start(pdev);
23134 		if (error)
23135 			return error;
23136 
23137 		error = qwx_mac_op_add_interface(pdev);
23138 		if (error)
23139 			return error;
23140 	}
23141 
23142 	return 0;
23143 }
23144 
23145 void
qwx_init_task(void * arg)23146 qwx_init_task(void *arg)
23147 {
23148 	struct qwx_softc *sc = arg;
23149 	struct ifnet *ifp = &sc->sc_ic.ic_if;
23150 	int s = splnet();
23151 	rw_enter_write(&sc->ioctl_rwl);
23152 
23153 	if (ifp->if_flags & IFF_RUNNING)
23154 		qwx_stop(ifp);
23155 
23156 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
23157 		qwx_init(ifp);
23158 
23159 	rw_exit(&sc->ioctl_rwl);
23160 	splx(s);
23161 }
23162 
23163 void
qwx_mac_11d_scan_start(struct qwx_softc * sc,struct qwx_vif * arvif)23164 qwx_mac_11d_scan_start(struct qwx_softc *sc, struct qwx_vif *arvif)
23165 {
23166 	struct ieee80211com *ic = &sc->sc_ic;
23167 	struct wmi_11d_scan_start_params param;
23168 	int ret;
23169 #ifdef notyet
23170 	mutex_lock(&ar->ab->vdev_id_11d_lock);
23171 #endif
23172 	DNPRINTF(QWX_D_MAC, "%s: vdev id for 11d scan %d\n", __func__,
23173 	    sc->vdev_id_11d_scan);
23174 #if 0
23175 	if (ar->regdom_set_by_user)
23176 		goto fin;
23177 #endif
23178 	if (sc->vdev_id_11d_scan != QWX_11D_INVALID_VDEV_ID)
23179 		goto fin;
23180 
23181 	if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD))
23182 		goto fin;
23183 
23184 	if (ic->ic_opmode != IEEE80211_M_STA)
23185 		goto fin;
23186 
23187 	param.vdev_id = arvif->vdev_id;
23188 	param.start_interval_msec = 0;
23189 	param.scan_period_msec = QWX_SCAN_11D_INTERVAL;
23190 
23191 	DNPRINTF(QWX_D_MAC, "%s: start 11d scan\n", __func__);
23192 
23193 	ret = qwx_wmi_send_11d_scan_start_cmd(sc, &param,
23194 	   0 /* TODO: derive pdev ID from arvif somehow? */);
23195 	if (ret) {
23196 		if (ret != ESHUTDOWN) {
23197 			printf("%s: failed to start 11d scan; vdev: %d "
23198 			    "ret: %d\n", sc->sc_dev.dv_xname,
23199 			    arvif->vdev_id, ret);
23200 		}
23201 	} else {
23202 		sc->vdev_id_11d_scan = arvif->vdev_id;
23203 		if (sc->state_11d == ATH11K_11D_PREPARING)
23204 			sc->state_11d = ATH11K_11D_RUNNING;
23205 	}
23206 fin:
23207 	if (sc->state_11d == ATH11K_11D_PREPARING) {
23208 		sc->state_11d = ATH11K_11D_IDLE;
23209 		sc->completed_11d_scan = 0;
23210 	}
23211 #ifdef notyet
23212 	mutex_unlock(&ar->ab->vdev_id_11d_lock);
23213 #endif
23214 }
23215 
23216 void
qwx_mac_scan_finish(struct qwx_softc * sc)23217 qwx_mac_scan_finish(struct qwx_softc *sc)
23218 {
23219 	struct ieee80211com *ic = &sc->sc_ic;
23220 	struct ifnet *ifp = &ic->ic_if;
23221 	enum ath11k_scan_state ostate;
23222 
23223 #ifdef notyet
23224 	lockdep_assert_held(&ar->data_lock);
23225 #endif
23226 	ostate = sc->scan.state;
23227 	switch (ostate) {
23228 	case ATH11K_SCAN_IDLE:
23229 		break;
23230 	case ATH11K_SCAN_RUNNING:
23231 	case ATH11K_SCAN_ABORTING:
23232 #if 0
23233 		if (ar->scan.is_roc && ar->scan.roc_notify)
23234 			ieee80211_remain_on_channel_expired(ar->hw);
23235 		fallthrough;
23236 #endif
23237 	case ATH11K_SCAN_STARTING:
23238 		sc->scan.state = ATH11K_SCAN_IDLE;
23239 		sc->scan_channel = 0;
23240 		sc->scan.roc_freq = 0;
23241 
23242 		timeout_del(&sc->scan.timeout);
23243 		if (!sc->scan.is_roc)
23244 			ieee80211_end_scan(ifp);
23245 #if 0
23246 		complete_all(&ar->scan.completed);
23247 #endif
23248 		break;
23249 	}
23250 }
23251 
23252 int
qwx_mac_get_rate_hw_value(struct ieee80211com * ic,struct ieee80211_node * ni,int bitrate)23253 qwx_mac_get_rate_hw_value(struct ieee80211com *ic,
23254     struct ieee80211_node *ni, int bitrate)
23255 {
23256 	uint32_t preamble;
23257 	uint16_t hw_value;
23258 	int shortpre = 0;
23259 
23260 	if (IEEE80211_IS_CHAN_CCK(ni->ni_chan))
23261 		preamble = WMI_RATE_PREAMBLE_CCK;
23262 	else
23263 		preamble = WMI_RATE_PREAMBLE_OFDM;
23264 
23265 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
23266 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
23267 		shortpre = 1;
23268 
23269 	switch (bitrate) {
23270 	case 2:
23271 		hw_value = ATH11K_HW_RATE_CCK_LP_1M;
23272 		break;
23273 	case 4:
23274 		if (shortpre)
23275 			hw_value = ATH11K_HW_RATE_CCK_SP_2M;
23276 		else
23277 			hw_value = ATH11K_HW_RATE_CCK_LP_2M;
23278 		break;
23279 	case 11:
23280 		if (shortpre)
23281 			hw_value = ATH11K_HW_RATE_CCK_SP_5_5M;
23282 		else
23283 			hw_value = ATH11K_HW_RATE_CCK_LP_5_5M;
23284 		break;
23285 	case 22:
23286 		if (shortpre)
23287 			hw_value = ATH11K_HW_RATE_CCK_SP_11M;
23288 		else
23289 			hw_value = ATH11K_HW_RATE_CCK_LP_11M;
23290 		break;
23291 	case 12:
23292 		hw_value = ATH11K_HW_RATE_OFDM_6M;
23293 		break;
23294 	case 18:
23295 		hw_value = ATH11K_HW_RATE_OFDM_9M;
23296 		break;
23297 	case 24:
23298 		hw_value = ATH11K_HW_RATE_OFDM_12M;
23299 		break;
23300 	case 36:
23301 		hw_value = ATH11K_HW_RATE_OFDM_18M;
23302 		break;
23303 	case 48:
23304 		hw_value = ATH11K_HW_RATE_OFDM_24M;
23305 		break;
23306 	case 72:
23307 		hw_value = ATH11K_HW_RATE_OFDM_36M;
23308 		break;
23309 	case 96:
23310 		hw_value = ATH11K_HW_RATE_OFDM_48M;
23311 		break;
23312 	case 108:
23313 		hw_value = ATH11K_HW_RATE_OFDM_54M;
23314 		break;
23315 	default:
23316 		return -1;
23317 	}
23318 
23319 	return ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
23320 }
23321 
23322 int
qwx_peer_delete(struct qwx_softc * sc,uint32_t vdev_id,uint8_t pdev_id,uint8_t * addr)23323 qwx_peer_delete(struct qwx_softc *sc, uint32_t vdev_id, uint8_t pdev_id,
23324     uint8_t *addr)
23325 {
23326 	int ret;
23327 
23328 	sc->peer_mapped = 0;
23329 	sc->peer_delete_done = 0;
23330 
23331 	ret = qwx_wmi_send_peer_delete_cmd(sc, addr, vdev_id, pdev_id);
23332 	if (ret) {
23333 		printf("%s: failed to delete peer vdev_id %d addr %s ret %d\n",
23334 		    sc->sc_dev.dv_xname, vdev_id, ether_sprintf(addr), ret);
23335 		return ret;
23336 	}
23337 
23338 	while (!sc->peer_mapped) {
23339 		ret = tsleep_nsec(&sc->peer_mapped, 0, "qwxpeer",
23340 		    SEC_TO_NSEC(3));
23341 		if (ret) {
23342 			printf("%s: peer delete unmap timeout\n",
23343 			    sc->sc_dev.dv_xname);
23344 			return ret;
23345 		}
23346 	}
23347 
23348 	while (!sc->peer_delete_done) {
23349 		ret = tsleep_nsec(&sc->peer_delete_done, 0, "qwxpeerd",
23350 		    SEC_TO_NSEC(3));
23351 		if (ret) {
23352 			printf("%s: peer delete command timeout\n",
23353 			    sc->sc_dev.dv_xname);
23354 			return ret;
23355 		}
23356 	}
23357 
23358 	sc->num_peers--;
23359 	return 0;
23360 }
23361 
23362 int
qwx_peer_create(struct qwx_softc * sc,struct qwx_vif * arvif,uint8_t pdev_id,struct ieee80211_node * ni,struct peer_create_params * param)23363 qwx_peer_create(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
23364     struct ieee80211_node *ni, struct peer_create_params *param)
23365 {
23366 	struct ieee80211com *ic = &sc->sc_ic;
23367 	struct qwx_node *nq = (struct qwx_node *)ni;
23368 	struct ath11k_peer *peer;
23369 	int ret;
23370 #ifdef notyet
23371 	lockdep_assert_held(&ar->conf_mutex);
23372 #endif
23373 	if (sc->num_peers > (TARGET_NUM_PEERS_PDEV(sc) - 1)) {
23374 		DPRINTF("%s: failed to create peer due to insufficient "
23375 		    "peer entry resource in firmware\n", __func__);
23376 		return ENOBUFS;
23377 	}
23378 #ifdef notyet
23379 	mutex_lock(&ar->ab->tbl_mtx_lock);
23380 	spin_lock_bh(&ar->ab->base_lock);
23381 #endif
23382 	peer = &nq->peer;
23383 	if (peer) {
23384 		if (peer->peer_id != HAL_INVALID_PEERID &&
23385 		    peer->vdev_id == param->vdev_id) {
23386 #ifdef notyet
23387 			spin_unlock_bh(&ar->ab->base_lock);
23388 			mutex_unlock(&ar->ab->tbl_mtx_lock);
23389 #endif
23390 			return EINVAL;
23391 		}
23392 #if 0
23393 		/* Assume sta is transitioning to another band.
23394 		 * Remove here the peer from rhash.
23395 		 */
23396 		ath11k_peer_rhash_delete(ar->ab, peer);
23397 #endif
23398 	}
23399 #ifdef notyet
23400 	spin_unlock_bh(&ar->ab->base_lock);
23401 	mutex_unlock(&ar->ab->tbl_mtx_lock);
23402 #endif
23403 	sc->peer_mapped = 0;
23404 
23405 	ret = qwx_wmi_send_peer_create_cmd(sc, pdev_id, param);
23406 	if (ret) {
23407 		printf("%s: failed to send peer create vdev_id %d ret %d\n",
23408 		    sc->sc_dev.dv_xname, param->vdev_id, ret);
23409 		return ret;
23410 	}
23411 
23412 	while (!sc->peer_mapped) {
23413 		ret = tsleep_nsec(&sc->peer_mapped, 0, "qwxpeer",
23414 		    SEC_TO_NSEC(3));
23415 		if (ret) {
23416 			printf("%s: peer create command timeout\n",
23417 			    sc->sc_dev.dv_xname);
23418 			return ret;
23419 		}
23420 	}
23421 
23422 #ifdef notyet
23423 	mutex_lock(&ar->ab->tbl_mtx_lock);
23424 	spin_lock_bh(&ar->ab->base_lock);
23425 #endif
23426 #if 0
23427 	peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
23428 	if (!peer) {
23429 		spin_unlock_bh(&ar->ab->base_lock);
23430 		mutex_unlock(&ar->ab->tbl_mtx_lock);
23431 		ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
23432 			    param->peer_addr, param->vdev_id);
23433 
23434 		ret = -ENOENT;
23435 		goto cleanup;
23436 	}
23437 
23438 	ret = ath11k_peer_rhash_add(ar->ab, peer);
23439 	if (ret) {
23440 		spin_unlock_bh(&ar->ab->base_lock);
23441 		mutex_unlock(&ar->ab->tbl_mtx_lock);
23442 		goto cleanup;
23443 	}
23444 #endif
23445 	peer->pdev_id = pdev_id;
23446 #if 0
23447 	peer->sta = sta;
23448 #endif
23449 	if (ic->ic_opmode == IEEE80211_M_STA) {
23450 		arvif->ast_hash = peer->ast_hash;
23451 		arvif->ast_idx = peer->hw_peer_id;
23452 	}
23453 #if 0
23454 	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
23455 	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
23456 
23457 	if (sta) {
23458 		struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
23459 		arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
23460 				       FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
23461 						  peer->peer_id);
23462 
23463 		/* set HTT extension valid bit to 0 by default */
23464 		arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
23465 	}
23466 #endif
23467 	sc->num_peers++;
23468 #ifdef notyet
23469 	spin_unlock_bh(&ar->ab->base_lock);
23470 	mutex_unlock(&ar->ab->tbl_mtx_lock);
23471 #endif
23472 	return 0;
23473 #if 0
23474 cleanup:
23475 	int fbret = qwx_peer_delete(sc, param->vdev_id, param->peer_addr);
23476 	if (fbret) {
23477 		printf("%s: failed peer %s delete vdev_id %d fallback ret %d\n",
23478 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
23479 		    param->vdev_id, fbret);
23480 	}
23481 
23482 	return ret;
23483 #endif
23484 }
23485 
23486 int
qwx_dp_tx_send_reo_cmd(struct qwx_softc * sc,struct dp_rx_tid * rx_tid,enum hal_reo_cmd_type type,struct ath11k_hal_reo_cmd * cmd,void (* cb)(struct qwx_dp *,void *,enum hal_reo_cmd_status))23487 qwx_dp_tx_send_reo_cmd(struct qwx_softc *sc, struct dp_rx_tid *rx_tid,
23488     enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd,
23489     void (*cb)(struct qwx_dp *, void *, enum hal_reo_cmd_status))
23490 {
23491 	struct qwx_dp *dp = &sc->dp;
23492 	struct dp_reo_cmd *dp_cmd;
23493 	struct hal_srng *cmd_ring;
23494 	int cmd_num;
23495 
23496 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
23497 		return ESHUTDOWN;
23498 
23499 	cmd_ring = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
23500 	cmd_num = qwx_hal_reo_cmd_send(sc, cmd_ring, type, cmd);
23501 	/* cmd_num should start from 1, during failure return the error code */
23502 	if (cmd_num < 0)
23503 		return cmd_num;
23504 
23505 	/* reo cmd ring descriptors has cmd_num starting from 1 */
23506 	if (cmd_num == 0)
23507 		return EINVAL;
23508 
23509 	if (!cb)
23510 		return 0;
23511 
23512 	/* Can this be optimized so that we keep the pending command list only
23513 	 * for tid delete command to free up the resource on the command status
23514 	 * indication?
23515 	 */
23516 	dp_cmd = malloc(sizeof(*dp_cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
23517 	if (!dp_cmd)
23518 		return ENOMEM;
23519 
23520 	memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
23521 	dp_cmd->cmd_num = cmd_num;
23522 	dp_cmd->handler = cb;
23523 #ifdef notyet
23524 	spin_lock_bh(&dp->reo_cmd_lock);
23525 #endif
23526 	TAILQ_INSERT_TAIL(&dp->reo_cmd_list, dp_cmd, entry);
23527 #ifdef notyet
23528 	spin_unlock_bh(&dp->reo_cmd_lock);
23529 #endif
23530 	return 0;
23531 }
23532 
23533 uint32_t
qwx_hal_reo_qdesc_size(uint32_t ba_window_size,uint8_t tid)23534 qwx_hal_reo_qdesc_size(uint32_t ba_window_size, uint8_t tid)
23535 {
23536 	uint32_t num_ext_desc;
23537 
23538 	if (ba_window_size <= 1) {
23539 		if (tid != HAL_DESC_REO_NON_QOS_TID)
23540 			num_ext_desc = 1;
23541 		else
23542 			num_ext_desc = 0;
23543 	} else if (ba_window_size <= 105) {
23544 		num_ext_desc = 1;
23545 	} else if (ba_window_size <= 210) {
23546 		num_ext_desc = 2;
23547 	} else {
23548 		num_ext_desc = 3;
23549 	}
23550 
23551 	return sizeof(struct hal_rx_reo_queue) +
23552 		(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
23553 }
23554 
23555 void
qwx_hal_reo_set_desc_hdr(struct hal_desc_header * hdr,uint8_t owner,uint8_t buffer_type,uint32_t magic)23556 qwx_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, uint8_t owner, uint8_t buffer_type, uint32_t magic)
23557 {
23558 	hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
23559 		     FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
23560 
23561 	/* Magic pattern in reserved bits for debugging */
23562 	hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
23563 }
23564 
23565 void
qwx_hal_reo_qdesc_setup(void * vaddr,int tid,uint32_t ba_window_size,uint32_t start_seq,enum hal_pn_type type)23566 qwx_hal_reo_qdesc_setup(void *vaddr, int tid, uint32_t ba_window_size,
23567     uint32_t start_seq, enum hal_pn_type type)
23568 {
23569 	struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
23570 	struct hal_rx_reo_queue_ext *ext_desc;
23571 
23572 	memset(qdesc, 0, sizeof(*qdesc));
23573 
23574 	qwx_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
23575 	    HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
23576 
23577 	qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
23578 
23579 	qdesc->info0 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
23580 	    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
23581 	    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, qwx_tid_to_ac(tid));
23582 
23583 	if (ba_window_size < 1)
23584 		ba_window_size = 1;
23585 
23586 	if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
23587 		ba_window_size++;
23588 
23589 	if (ba_window_size == 1)
23590 		qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
23591 
23592 	qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
23593 				   ba_window_size - 1);
23594 	switch (type) {
23595 	case HAL_PN_TYPE_NONE:
23596 	case HAL_PN_TYPE_WAPI_EVEN:
23597 	case HAL_PN_TYPE_WAPI_UNEVEN:
23598 		break;
23599 	case HAL_PN_TYPE_WPA:
23600 		qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
23601 		    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
23602 		    HAL_RX_REO_QUEUE_PN_SIZE_48);
23603 		break;
23604 	}
23605 
23606 	/* TODO: Set Ignore ampdu flags based on BA window size and/or
23607 	 * AMPDU capabilities
23608 	 */
23609 	qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
23610 
23611 	qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
23612 
23613 	if (start_seq <= 0xfff)
23614 		qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
23615 		    start_seq);
23616 
23617 	if (tid == HAL_DESC_REO_NON_QOS_TID)
23618 		return;
23619 
23620 	ext_desc = qdesc->ext_desc;
23621 
23622 	/* TODO: HW queue descriptors are currently allocated for max BA
23623 	 * window size for all QOS TIDs so that same descriptor can be used
23624 	 * later when ADDBA request is received. This should be changed to
23625 	 * allocate HW queue descriptors based on BA window size being
23626 	 * negotiated (0 for non BA cases), and reallocate when BA window
23627 	 * size changes and also send WMI message to FW to change the REO
23628 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
23629 	 */
23630 	memset(ext_desc, 0, sizeof(*ext_desc));
23631 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23632 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
23633 	ext_desc++;
23634 	memset(ext_desc, 0, sizeof(*ext_desc));
23635 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23636 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
23637 	ext_desc++;
23638 	memset(ext_desc, 0, sizeof(*ext_desc));
23639 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
23640 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
23641 }
23642 
23643 void
qwx_dp_reo_cmd_free(struct qwx_dp * dp,void * ctx,enum hal_reo_cmd_status status)23644 qwx_dp_reo_cmd_free(struct qwx_dp *dp, void *ctx,
23645     enum hal_reo_cmd_status status)
23646 {
23647 	struct qwx_softc *sc = dp->sc;
23648 	struct dp_rx_tid *rx_tid = ctx;
23649 
23650 	if (status != HAL_REO_CMD_SUCCESS)
23651 		printf("%s: failed to flush rx tid hw desc, tid %d status %d\n",
23652 		    sc->sc_dev.dv_xname, rx_tid->tid, status);
23653 
23654 	if (rx_tid->mem) {
23655 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23656 		rx_tid->mem = NULL;
23657 		rx_tid->vaddr = NULL;
23658 		rx_tid->paddr = 0ULL;
23659 		rx_tid->size = 0;
23660 	}
23661 }
23662 
23663 void
qwx_dp_reo_cache_flush(struct qwx_softc * sc,struct dp_rx_tid * rx_tid)23664 qwx_dp_reo_cache_flush(struct qwx_softc *sc, struct dp_rx_tid *rx_tid)
23665 {
23666 	struct ath11k_hal_reo_cmd cmd = {0};
23667 	unsigned long tot_desc_sz, desc_sz;
23668 	int ret;
23669 
23670 	tot_desc_sz = rx_tid->size;
23671 	desc_sz = qwx_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
23672 
23673 	while (tot_desc_sz > desc_sz) {
23674 		tot_desc_sz -= desc_sz;
23675 		cmd.addr_lo = (rx_tid->paddr + tot_desc_sz) & 0xffffffff;
23676 		cmd.addr_hi = rx_tid->paddr >> 32;
23677 		ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid,
23678 		    HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL);
23679 		if (ret) {
23680 			printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE, "
23681 			    "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid,
23682 			    ret);
23683 		}
23684 	}
23685 
23686 	memset(&cmd, 0, sizeof(cmd));
23687 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23688 	cmd.addr_hi = rx_tid->paddr >> 32;
23689 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
23690 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_FLUSH_CACHE,
23691 	    &cmd, qwx_dp_reo_cmd_free);
23692 	if (ret) {
23693 		printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE cmd, "
23694 		    "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid, ret);
23695 		if (rx_tid->mem) {
23696 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23697 			rx_tid->mem = NULL;
23698 			rx_tid->vaddr = NULL;
23699 			rx_tid->paddr = 0ULL;
23700 			rx_tid->size = 0;
23701 		}
23702 	}
23703 }
23704 
23705 void
qwx_dp_rx_tid_del_func(struct qwx_dp * dp,void * ctx,enum hal_reo_cmd_status status)23706 qwx_dp_rx_tid_del_func(struct qwx_dp *dp, void *ctx,
23707     enum hal_reo_cmd_status status)
23708 {
23709 	struct qwx_softc *sc = dp->sc;
23710 	struct dp_rx_tid *rx_tid = ctx;
23711 	struct dp_reo_cache_flush_elem *elem, *tmp;
23712 	uint64_t now;
23713 
23714 	if (status == HAL_REO_CMD_DRAIN) {
23715 		goto free_desc;
23716 	} else if (status != HAL_REO_CMD_SUCCESS) {
23717 		/* Shouldn't happen! Cleanup in case of other failure? */
23718 		printf("%s: failed to delete rx tid %d hw descriptor %d\n",
23719 		    sc->sc_dev.dv_xname, rx_tid->tid, status);
23720 		return;
23721 	}
23722 
23723 	elem = malloc(sizeof(*elem), M_DEVBUF, M_ZERO | M_NOWAIT);
23724 	if (!elem)
23725 		goto free_desc;
23726 
23727 	now = getnsecuptime();
23728 	elem->ts = now;
23729 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
23730 
23731 	rx_tid->mem = NULL;
23732 	rx_tid->vaddr = NULL;
23733 	rx_tid->paddr = 0ULL;
23734 	rx_tid->size = 0;
23735 
23736 #ifdef notyet
23737 	spin_lock_bh(&dp->reo_cmd_lock);
23738 #endif
23739 	TAILQ_INSERT_TAIL(&dp->reo_cmd_cache_flush_list, elem, entry);
23740 	dp->reo_cmd_cache_flush_count++;
23741 
23742 	/* Flush and invalidate aged REO desc from HW cache */
23743 	TAILQ_FOREACH_SAFE(elem, &dp->reo_cmd_cache_flush_list, entry, tmp) {
23744 		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
23745 		    now >= elem->ts + MSEC_TO_NSEC(DP_REO_DESC_FREE_TIMEOUT_MS)) {
23746 			TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, elem, entry);
23747 			dp->reo_cmd_cache_flush_count--;
23748 #ifdef notyet
23749 			spin_unlock_bh(&dp->reo_cmd_lock);
23750 #endif
23751 			qwx_dp_reo_cache_flush(sc, &elem->data);
23752 			free(elem, M_DEVBUF, sizeof(*elem));
23753 #ifdef notyet
23754 			spin_lock_bh(&dp->reo_cmd_lock);
23755 #endif
23756 		}
23757 	}
23758 #ifdef notyet
23759 	spin_unlock_bh(&dp->reo_cmd_lock);
23760 #endif
23761 	return;
23762 free_desc:
23763 	if (rx_tid->mem) {
23764 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23765 		rx_tid->mem = NULL;
23766 		rx_tid->vaddr = NULL;
23767 		rx_tid->paddr = 0ULL;
23768 		rx_tid->size = 0;
23769 	}
23770 }
23771 
23772 void
qwx_peer_rx_tid_delete(struct qwx_softc * sc,struct ath11k_peer * peer,uint8_t tid)23773 qwx_peer_rx_tid_delete(struct qwx_softc *sc, struct ath11k_peer *peer,
23774     uint8_t tid)
23775 {
23776 	struct ath11k_hal_reo_cmd cmd = {0};
23777 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
23778 	int ret;
23779 
23780 	if (!rx_tid->active)
23781 		return;
23782 
23783 	rx_tid->active = 0;
23784 
23785 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
23786 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23787 	cmd.addr_hi = rx_tid->paddr >> 32;
23788 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
23789 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
23790 	    &cmd, qwx_dp_rx_tid_del_func);
23791 	if (ret) {
23792 		if (ret != ESHUTDOWN) {
23793 			printf("%s: failed to send "
23794 			    "HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
23795 			    sc->sc_dev.dv_xname, tid, ret);
23796 		}
23797 
23798 		if (rx_tid->mem) {
23799 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23800 			rx_tid->mem = NULL;
23801 			rx_tid->vaddr = NULL;
23802 			rx_tid->paddr = 0ULL;
23803 			rx_tid->size = 0;
23804 		}
23805 	}
23806 }
23807 
23808 void
qwx_dp_rx_frags_cleanup(struct qwx_softc * sc,struct dp_rx_tid * rx_tid,int rel_link_desc)23809 qwx_dp_rx_frags_cleanup(struct qwx_softc *sc, struct dp_rx_tid *rx_tid,
23810     int rel_link_desc)
23811 {
23812 #ifdef notyet
23813 	lockdep_assert_held(&ab->base_lock);
23814 #endif
23815 #if 0
23816 	if (rx_tid->dst_ring_desc) {
23817 		if (rel_link_desc)
23818 			ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
23819 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
23820 		kfree(rx_tid->dst_ring_desc);
23821 		rx_tid->dst_ring_desc = NULL;
23822 	}
23823 #endif
23824 	rx_tid->cur_sn = 0;
23825 	rx_tid->last_frag_no = 0;
23826 	rx_tid->rx_frag_bitmap = 0;
23827 #if 0
23828 	__skb_queue_purge(&rx_tid->rx_frags);
23829 #endif
23830 }
23831 
23832 void
qwx_peer_frags_flush(struct qwx_softc * sc,struct ath11k_peer * peer)23833 qwx_peer_frags_flush(struct qwx_softc *sc, struct ath11k_peer *peer)
23834 {
23835 	struct dp_rx_tid *rx_tid;
23836 	int i;
23837 #ifdef notyet
23838 	lockdep_assert_held(&ar->ab->base_lock);
23839 #endif
23840 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
23841 		rx_tid = &peer->rx_tid[i];
23842 
23843 		qwx_dp_rx_frags_cleanup(sc, rx_tid, 1);
23844 #if 0
23845 		spin_unlock_bh(&ar->ab->base_lock);
23846 		del_timer_sync(&rx_tid->frag_timer);
23847 		spin_lock_bh(&ar->ab->base_lock);
23848 #endif
23849 	}
23850 }
23851 
23852 void
qwx_peer_rx_tid_cleanup(struct qwx_softc * sc,struct ath11k_peer * peer)23853 qwx_peer_rx_tid_cleanup(struct qwx_softc *sc, struct ath11k_peer *peer)
23854 {
23855 	struct dp_rx_tid *rx_tid;
23856 	int i;
23857 #ifdef notyet
23858 	lockdep_assert_held(&ar->ab->base_lock);
23859 #endif
23860 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
23861 		rx_tid = &peer->rx_tid[i];
23862 
23863 		qwx_peer_rx_tid_delete(sc, peer, i);
23864 		qwx_dp_rx_frags_cleanup(sc, rx_tid, 1);
23865 #if 0
23866 		spin_unlock_bh(&ar->ab->base_lock);
23867 		del_timer_sync(&rx_tid->frag_timer);
23868 		spin_lock_bh(&ar->ab->base_lock);
23869 #endif
23870 	}
23871 }
23872 
23873 int
qwx_peer_rx_tid_reo_update(struct qwx_softc * sc,struct ath11k_peer * peer,struct dp_rx_tid * rx_tid,uint32_t ba_win_sz,uint16_t ssn,int update_ssn)23874 qwx_peer_rx_tid_reo_update(struct qwx_softc *sc, struct ath11k_peer *peer,
23875     struct dp_rx_tid *rx_tid, uint32_t ba_win_sz, uint16_t ssn,
23876     int update_ssn)
23877 {
23878 	struct ath11k_hal_reo_cmd cmd = {0};
23879 	int ret;
23880 
23881 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
23882 	cmd.addr_hi = rx_tid->paddr >> 32;
23883 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
23884 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
23885 	cmd.ba_window_size = ba_win_sz;
23886 
23887 	if (update_ssn) {
23888 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
23889 		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
23890 	}
23891 
23892 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
23893 	    &cmd, NULL);
23894 	if (ret) {
23895 		printf("%s: failed to update rx tid queue, tid %d (%d)\n",
23896 		    sc->sc_dev.dv_xname, rx_tid->tid, ret);
23897 		return ret;
23898 	}
23899 
23900 	rx_tid->ba_win_sz = ba_win_sz;
23901 
23902 	return 0;
23903 }
23904 
23905 void
qwx_dp_rx_tid_mem_free(struct qwx_softc * sc,struct ieee80211_node * ni,int vdev_id,uint8_t tid)23906 qwx_dp_rx_tid_mem_free(struct qwx_softc *sc, struct ieee80211_node *ni,
23907     int vdev_id, uint8_t tid)
23908 {
23909 	struct qwx_node *nq = (struct qwx_node *)ni;
23910 	struct ath11k_peer *peer = &nq->peer;
23911 	struct dp_rx_tid *rx_tid;
23912 #ifdef notyet
23913 	spin_lock_bh(&ab->base_lock);
23914 #endif
23915 	rx_tid = &peer->rx_tid[tid];
23916 
23917 	if (rx_tid->mem) {
23918 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
23919 		rx_tid->mem = NULL;
23920 		rx_tid->vaddr = NULL;
23921 		rx_tid->paddr = 0ULL;
23922 		rx_tid->size = 0;
23923 	}
23924 
23925 	rx_tid->active = 0;
23926 #ifdef notyet
23927 	spin_unlock_bh(&ab->base_lock);
23928 #endif
23929 }
23930 
23931 int
qwx_peer_rx_tid_setup(struct qwx_softc * sc,struct ieee80211_node * ni,int vdev_id,int pdev_id,uint8_t tid,uint32_t ba_win_sz,uint16_t ssn,enum hal_pn_type pn_type)23932 qwx_peer_rx_tid_setup(struct qwx_softc *sc, struct ieee80211_node *ni,
23933     int vdev_id, int pdev_id, uint8_t tid, uint32_t ba_win_sz, uint16_t ssn,
23934     enum hal_pn_type pn_type)
23935 {
23936 	struct qwx_node *nq = (struct qwx_node *)ni;
23937 	struct ath11k_peer *peer = &nq->peer;
23938 	struct dp_rx_tid *rx_tid;
23939 	uint32_t hw_desc_sz;
23940 	void *vaddr;
23941 	uint64_t paddr;
23942 	int ret;
23943 #ifdef notyet
23944 	spin_lock_bh(&ab->base_lock);
23945 #endif
23946 	rx_tid = &peer->rx_tid[tid];
23947 	/* Update the tid queue if it is already setup */
23948 	if (rx_tid->active) {
23949 		paddr = rx_tid->paddr;
23950 		ret = qwx_peer_rx_tid_reo_update(sc, peer, rx_tid,
23951 		    ba_win_sz, ssn, 1);
23952 #ifdef notyet
23953 		spin_unlock_bh(&ab->base_lock);
23954 #endif
23955 		if (ret) {
23956 			printf("%s: failed to update reo for peer %s "
23957 			    "rx tid %d\n: %d", sc->sc_dev.dv_xname,
23958 			    ether_sprintf(ni->ni_macaddr), tid, ret);
23959 			return ret;
23960 		}
23961 
23962 		ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id,
23963 		    pdev_id, ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
23964 		if (ret)
23965 			printf("%s: failed to send wmi rx reorder queue "
23966 			    "for peer %s tid %d: %d\n", sc->sc_dev.dv_xname,
23967 			    ether_sprintf(ni->ni_macaddr), tid, ret);
23968 		return ret;
23969 	}
23970 
23971 	rx_tid->tid = tid;
23972 
23973 	rx_tid->ba_win_sz = ba_win_sz;
23974 
23975 	/* TODO: Optimize the memory allocation for qos tid based on
23976 	 * the actual BA window size in REO tid update path.
23977 	 */
23978 	if (tid == HAL_DESC_REO_NON_QOS_TID)
23979 		hw_desc_sz = qwx_hal_reo_qdesc_size(ba_win_sz, tid);
23980 	else
23981 		hw_desc_sz = qwx_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
23982 
23983 	rx_tid->mem = qwx_dmamem_alloc(sc->sc_dmat, hw_desc_sz,
23984 	    HAL_LINK_DESC_ALIGN);
23985 	if (rx_tid->mem == NULL) {
23986 #ifdef notyet
23987 		spin_unlock_bh(&ab->base_lock);
23988 #endif
23989 		return ENOMEM;
23990 	}
23991 
23992 	vaddr = QWX_DMA_KVA(rx_tid->mem);
23993 
23994 	qwx_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
23995 
23996 	paddr = QWX_DMA_DVA(rx_tid->mem);
23997 
23998 	rx_tid->vaddr = vaddr;
23999 	rx_tid->paddr = paddr;
24000 	rx_tid->size = hw_desc_sz;
24001 	rx_tid->active = 1;
24002 #ifdef notyet
24003 	spin_unlock_bh(&ab->base_lock);
24004 #endif
24005 	ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id, pdev_id,
24006 	    ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
24007 	if (ret) {
24008 		printf("%s: failed to setup rx reorder queue for peer %s "
24009 		    "tid %d: %d\n", sc->sc_dev.dv_xname,
24010 		    ether_sprintf(ni->ni_macaddr), tid, ret);
24011 		qwx_dp_rx_tid_mem_free(sc, ni, vdev_id, tid);
24012 	}
24013 
24014 	return ret;
24015 }
24016 
24017 int
qwx_peer_rx_frag_setup(struct qwx_softc * sc,struct ieee80211_node * ni,int vdev_id)24018 qwx_peer_rx_frag_setup(struct qwx_softc *sc, struct ieee80211_node *ni,
24019     int vdev_id)
24020 {
24021 	struct qwx_node *nq = (struct qwx_node *)ni;
24022 	struct ath11k_peer *peer = &nq->peer;
24023 	struct dp_rx_tid *rx_tid;
24024 	int i;
24025 #ifdef notyet
24026 	spin_lock_bh(&ab->base_lock);
24027 #endif
24028 	for (i = 0; i <= nitems(peer->rx_tid); i++) {
24029 		rx_tid = &peer->rx_tid[i];
24030 #if 0
24031 		rx_tid->ab = ab;
24032 		timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
24033 #endif
24034 	}
24035 #if 0
24036 	peer->dp_setup_done = true;
24037 #endif
24038 #ifdef notyet
24039 	spin_unlock_bh(&ab->base_lock);
24040 #endif
24041 	return 0;
24042 }
24043 
24044 int
qwx_dp_peer_setup(struct qwx_softc * sc,int vdev_id,int pdev_id,struct ieee80211_node * ni)24045 qwx_dp_peer_setup(struct qwx_softc *sc, int vdev_id, int pdev_id,
24046     struct ieee80211_node *ni)
24047 {
24048 	struct qwx_node *nq = (struct qwx_node *)ni;
24049 	struct ath11k_peer *peer = &nq->peer;
24050 	uint32_t reo_dest;
24051 	int ret = 0, tid;
24052 
24053 	/* reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
24054 	reo_dest = sc->pdev_dp.mac_id + 1;
24055 	ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, vdev_id, pdev_id,
24056 	    WMI_PEER_SET_DEFAULT_ROUTING, DP_RX_HASH_ENABLE | (reo_dest << 1));
24057 	if (ret) {
24058 		printf("%s: failed to set default routing %d peer %s "
24059 		    "vdev_id %d\n", sc->sc_dev.dv_xname, ret,
24060 		    ether_sprintf(ni->ni_macaddr), vdev_id);
24061 		return ret;
24062 	}
24063 
24064 	for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
24065 		ret = qwx_peer_rx_tid_setup(sc, ni, vdev_id, pdev_id,
24066 		    tid, 1, 0, HAL_PN_TYPE_NONE);
24067 		if (ret) {
24068 			printf("%s: failed to setup rxd tid queue for tid %d: %d\n",
24069 			    sc->sc_dev.dv_xname, tid, ret);
24070 			goto peer_clean;
24071 		}
24072 	}
24073 
24074 	ret = qwx_peer_rx_frag_setup(sc, ni, vdev_id);
24075 	if (ret) {
24076 		printf("%s: failed to setup rx defrag context\n",
24077 		    sc->sc_dev.dv_xname);
24078 		tid--;
24079 		goto peer_clean;
24080 	}
24081 
24082 	/* TODO: Setup other peer specific resource used in data path */
24083 
24084 	return 0;
24085 
24086 peer_clean:
24087 #ifdef notyet
24088 	spin_lock_bh(&ab->base_lock);
24089 #endif
24090 #if 0
24091 	peer = ath11k_peer_find(ab, vdev_id, addr);
24092 	if (!peer) {
24093 		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
24094 		spin_unlock_bh(&ab->base_lock);
24095 		return -ENOENT;
24096 	}
24097 #endif
24098 	for (; tid >= 0; tid--)
24099 		qwx_peer_rx_tid_delete(sc, peer, tid);
24100 #ifdef notyet
24101 	spin_unlock_bh(&ab->base_lock);
24102 #endif
24103 	return ret;
24104 }
24105 
24106 int
qwx_dp_peer_rx_pn_replay_config(struct qwx_softc * sc,struct qwx_vif * arvif,struct ieee80211_node * ni,struct ieee80211_key * k,int delete_key)24107 qwx_dp_peer_rx_pn_replay_config(struct qwx_softc *sc, struct qwx_vif *arvif,
24108     struct ieee80211_node *ni, struct ieee80211_key *k, int delete_key)
24109 {
24110 	struct ath11k_hal_reo_cmd cmd = {0};
24111 	struct qwx_node *nq = (struct qwx_node *)ni;
24112 	struct ath11k_peer *peer = &nq->peer;
24113 	struct dp_rx_tid *rx_tid;
24114 	uint8_t tid;
24115 	int ret = 0;
24116 
24117 	/*
24118 	 * NOTE: Enable PN/TSC replay check offload only for unicast frames.
24119 	 * We use net80211 PN/TSC replay check functionality for bcast/mcast
24120 	 * for now.
24121 	 */
24122 	if (k->k_flags & IEEE80211_KEY_GROUP)
24123 		return 0;
24124 
24125 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
24126 	cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
24127 		    HAL_REO_CMD_UPD0_PN_SIZE |
24128 		    HAL_REO_CMD_UPD0_PN_VALID |
24129 		    HAL_REO_CMD_UPD0_PN_CHECK |
24130 		    HAL_REO_CMD_UPD0_SVLD;
24131 
24132 	switch (k->k_cipher) {
24133 	case IEEE80211_CIPHER_TKIP:
24134 	case IEEE80211_CIPHER_CCMP:
24135 #if 0
24136 	case WLAN_CIPHER_SUITE_CCMP_256:
24137 	case WLAN_CIPHER_SUITE_GCMP:
24138 	case WLAN_CIPHER_SUITE_GCMP_256:
24139 #endif
24140 		if (!delete_key) {
24141 			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
24142 			cmd.pn_size = 48;
24143 		}
24144 		break;
24145 	default:
24146 		printf("%s: cipher %u is not supported\n",
24147 		    sc->sc_dev.dv_xname, k->k_cipher);
24148 		return EOPNOTSUPP;
24149 	}
24150 
24151 	for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
24152 		rx_tid = &peer->rx_tid[tid];
24153 		if (!rx_tid->active)
24154 			continue;
24155 		cmd.addr_lo = rx_tid->paddr & 0xffffffff;
24156 		cmd.addr_hi = (rx_tid->paddr >> 32);
24157 		ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid,
24158 		    HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, NULL);
24159 		if (ret) {
24160 			printf("%s: failed to configure rx tid %d queue "
24161 			    "for pn replay detection %d\n",
24162 			    sc->sc_dev.dv_xname, tid, ret);
24163 			break;
24164 		}
24165 	}
24166 
24167 	return ret;
24168 }
24169 
24170 enum hal_tcl_encap_type
qwx_dp_tx_get_encap_type(struct qwx_softc * sc)24171 qwx_dp_tx_get_encap_type(struct qwx_softc *sc)
24172 {
24173 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
24174 		return HAL_TCL_ENCAP_TYPE_RAW;
24175 #if 0
24176 	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
24177 		return HAL_TCL_ENCAP_TYPE_ETHERNET;
24178 #endif
24179 	return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
24180 }
24181 
24182 uint8_t
qwx_dp_tx_get_tid(struct mbuf * m)24183 qwx_dp_tx_get_tid(struct mbuf *m)
24184 {
24185 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
24186 	uint16_t qos = ieee80211_get_qos(wh);
24187 	uint8_t tid = qos & IEEE80211_QOS_TID;
24188 
24189 	return tid;
24190 }
24191 
24192 void
qwx_hal_tx_cmd_desc_setup(struct qwx_softc * sc,void * cmd,struct hal_tx_info * ti)24193 qwx_hal_tx_cmd_desc_setup(struct qwx_softc *sc, void *cmd,
24194     struct hal_tx_info *ti)
24195 {
24196 	struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
24197 
24198 	tcl_cmd->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
24199 	    ti->paddr);
24200 	tcl_cmd->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
24201 	    ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
24202 	tcl_cmd->buf_addr_info.info1 |= FIELD_PREP(
24203 	    BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
24204 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
24205 
24206 	tcl_cmd->info0 =
24207 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
24208 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
24209 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE, ti->encrypt_type) |
24210 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE, ti->search_type) |
24211 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN, ti->addr_search_flags) |
24212 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM, ti->meta_data_flags);
24213 
24214 	tcl_cmd->info1 = ti->flags0 |
24215 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
24216 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
24217 
24218 	tcl_cmd->info2 = ti->flags1 |
24219 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
24220 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
24221 
24222 	tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
24223 	    ti->dscp_tid_tbl_idx) |
24224 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX, ti->bss_ast_idx) |
24225 	    FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM, ti->bss_ast_hash);
24226 	tcl_cmd->info4 = 0;
24227 #ifdef notyet
24228 	if (ti->enable_mesh)
24229 		ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
24230 #endif
24231 }
24232 
24233 int
qwx_dp_tx(struct qwx_softc * sc,struct qwx_vif * arvif,uint8_t pdev_id,struct ieee80211_node * ni,struct mbuf * m)24234 qwx_dp_tx(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
24235     struct ieee80211_node *ni, struct mbuf *m)
24236 {
24237 	struct ieee80211com *ic = &sc->sc_ic;
24238 	struct qwx_dp *dp = &sc->dp;
24239 	struct hal_tx_info ti = {0};
24240 	struct qwx_tx_data *tx_data;
24241 	struct hal_srng *tcl_ring;
24242 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
24243 	struct ieee80211_key *k = NULL;
24244 	struct dp_tx_ring *tx_ring;
24245 	void *hal_tcl_desc;
24246 	uint8_t pool_id;
24247 	uint8_t hal_ring_id;
24248 	int ret, msdu_id, off;
24249 	uint32_t ring_selector = 0;
24250 	uint8_t ring_map = 0;
24251 
24252 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
24253 		m_freem(m);
24254 		return ESHUTDOWN;
24255 	}
24256 #if 0
24257 	if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
24258 		     !ieee80211_is_data(hdr->frame_control)))
24259 		return -ENOTSUPP;
24260 #endif
24261 	pool_id = 0;
24262 	ring_selector = 0;
24263 
24264 	ti.ring_id = ring_selector % sc->hw_params.max_tx_ring;
24265 	ti.rbm_id = sc->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
24266 
24267 	ring_map |= (1 << ti.ring_id);
24268 
24269 	tx_ring = &dp->tx_ring[ti.ring_id];
24270 
24271 	if (tx_ring->queued >= sc->hw_params.tx_ring_size) {
24272 		m_freem(m);
24273 		return ENOSPC;
24274 	}
24275 
24276 	msdu_id = tx_ring->cur;
24277 	tx_data = &tx_ring->data[msdu_id];
24278 	if (tx_data->m != NULL) {
24279 		m_freem(m);
24280 		return ENOSPC;
24281 	}
24282 
24283 	ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, pdev_id) |
24284 	    FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, msdu_id) |
24285 	    FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
24286 	ti.encap_type = qwx_dp_tx_get_encap_type(sc);
24287 
24288 	ti.meta_data_flags = arvif->tcl_metadata;
24289 
24290 	if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
24291 	    ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
24292 		k = ieee80211_get_txkey(ic, wh, ni);
24293 		if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags)) {
24294 			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
24295 		} else {
24296 			switch (k->k_cipher) {
24297 			case IEEE80211_CIPHER_CCMP:
24298 				ti.encrypt_type = HAL_ENCRYPT_TYPE_CCMP_128;
24299 				if (m_makespace(m, m->m_pkthdr.len,
24300 				    IEEE80211_CCMP_MICLEN, &off) == NULL) {
24301 					m_freem(m);
24302 					return ENOSPC;
24303 				}
24304 				break;
24305 			case IEEE80211_CIPHER_TKIP:
24306 				ti.encrypt_type = HAL_ENCRYPT_TYPE_TKIP_MIC;
24307 				if (m_makespace(m, m->m_pkthdr.len,
24308 				    IEEE80211_TKIP_MICLEN, &off) == NULL) {
24309 					m_freem(m);
24310 					return ENOSPC;
24311 				}
24312 				break;
24313 			default:
24314 				ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
24315 				break;
24316 			}
24317 		}
24318 
24319 		if (ti.encrypt_type == HAL_ENCRYPT_TYPE_OPEN) {
24320 			/* Using software crypto. */
24321 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
24322 				return ENOBUFS;
24323 			/* 802.11 header may have moved. */
24324 			wh = mtod(m, struct ieee80211_frame *);
24325 		}
24326 	}
24327 
24328 	ti.addr_search_flags = arvif->hal_addr_search_flags;
24329 	ti.search_type = arvif->search_type;
24330 	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
24331 	ti.pkt_offset = 0;
24332 	ti.lmac_id = qwx_hw_get_mac_from_pdev_id(sc, pdev_id);
24333 	ti.bss_ast_hash = arvif->ast_hash;
24334 	ti.bss_ast_idx = arvif->ast_idx;
24335 	ti.dscp_tid_tbl_idx = 0;
24336 #if 0
24337 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
24338 		   ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
24339 		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
24340 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
24341 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
24342 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
24343 			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
24344 	}
24345 
24346 	if (ieee80211_vif_is_mesh(arvif->vif))
24347 		ti.enable_mesh = true;
24348 #endif
24349 	ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
24350 
24351 	ti.tid = qwx_dp_tx_get_tid(m);
24352 #if 0
24353 	switch (ti.encap_type) {
24354 	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
24355 		ath11k_dp_tx_encap_nwifi(skb);
24356 		break;
24357 	case HAL_TCL_ENCAP_TYPE_RAW:
24358 		if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
24359 			ret = -EINVAL;
24360 			goto fail_remove_idr;
24361 		}
24362 		break;
24363 	case HAL_TCL_ENCAP_TYPE_ETHERNET:
24364 		/* no need to encap */
24365 		break;
24366 	case HAL_TCL_ENCAP_TYPE_802_3:
24367 	default:
24368 		/* TODO: Take care of other encap modes as well */
24369 		ret = -EINVAL;
24370 		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
24371 		goto fail_remove_idr;
24372 	}
24373 #endif
24374 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
24375 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
24376 	if (ret && ret != EFBIG) {
24377 		printf("%s: failed to map Tx buffer: %d\n",
24378 		    sc->sc_dev.dv_xname, ret);
24379 		m_freem(m);
24380 		return ret;
24381 	}
24382 	if (ret) {
24383 		/* Too many DMA segments, linearize mbuf. */
24384 		if (m_defrag(m, M_DONTWAIT)) {
24385 			m_freem(m);
24386 			return ENOBUFS;
24387 		}
24388 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map, m,
24389 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
24390 		if (ret) {
24391 			printf("%s: failed to map Tx buffer: %d\n",
24392 			    sc->sc_dev.dv_xname, ret);
24393 			m_freem(m);
24394 			return ret;
24395 		}
24396 	}
24397 	ti.paddr = tx_data->map->dm_segs[0].ds_addr;
24398 
24399 	ti.data_len = m->m_pkthdr.len;
24400 
24401 	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
24402 	tcl_ring = &sc->hal.srng_list[hal_ring_id];
24403 #ifdef notyet
24404 	spin_lock_bh(&tcl_ring->lock);
24405 #endif
24406 	qwx_hal_srng_access_begin(sc, tcl_ring);
24407 
24408 	hal_tcl_desc = (void *)qwx_hal_srng_src_get_next_entry(sc, tcl_ring);
24409 	if (!hal_tcl_desc) {
24410 		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
24411 		 * desc because the desc is directly enqueued onto hw queue.
24412 		 */
24413 		qwx_hal_srng_access_end(sc, tcl_ring);
24414 #if 0
24415 		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
24416 #endif
24417 #ifdef notyet
24418 		spin_unlock_bh(&tcl_ring->lock);
24419 #endif
24420 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
24421 		m_freem(m);
24422 		return ENOMEM;
24423 	}
24424 
24425 	tx_data->m = m;
24426 	tx_data->ni = ni;
24427 
24428 	qwx_hal_tx_cmd_desc_setup(sc,
24429 	    hal_tcl_desc + sizeof(struct hal_tlv_hdr), &ti);
24430 
24431 	qwx_hal_srng_access_end(sc, tcl_ring);
24432 
24433 	qwx_dp_shadow_start_timer(sc, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
24434 #ifdef notyet
24435 	spin_unlock_bh(&tcl_ring->lock);
24436 #endif
24437 	tx_ring->queued++;
24438 	tx_ring->cur = (tx_ring->cur + 1) % sc->hw_params.tx_ring_size;
24439 
24440 	if (tx_ring->queued >= sc->hw_params.tx_ring_size - 1)
24441 		sc->qfullmsk |= (1 << ti.ring_id);
24442 
24443 	return 0;
24444 }
24445 
24446 int
qwx_mac_station_remove(struct qwx_softc * sc,struct qwx_vif * arvif,uint8_t pdev_id,struct ieee80211_node * ni)24447 qwx_mac_station_remove(struct qwx_softc *sc, struct qwx_vif *arvif,
24448     uint8_t pdev_id, struct ieee80211_node *ni)
24449 {
24450 	struct qwx_node *nq = (struct qwx_node *)ni;
24451 	struct ath11k_peer *peer = &nq->peer;
24452 	int ret;
24453 
24454 	qwx_peer_rx_tid_cleanup(sc, peer);
24455 
24456 	ret = qwx_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
24457 	if (ret) {
24458 		printf("%s: unable to delete BSS peer: %d\n",
24459 		   sc->sc_dev.dv_xname, ret);
24460 		return ret;
24461 	}
24462 
24463 	return 0;
24464 }
24465 
24466 int
qwx_mac_station_add(struct qwx_softc * sc,struct qwx_vif * arvif,uint8_t pdev_id,struct ieee80211_node * ni)24467 qwx_mac_station_add(struct qwx_softc *sc, struct qwx_vif *arvif,
24468     uint8_t pdev_id, struct ieee80211_node *ni)
24469 {
24470 	struct peer_create_params peer_param;
24471 	int ret;
24472 #ifdef notyet
24473 	lockdep_assert_held(&ar->conf_mutex);
24474 #endif
24475 	peer_param.vdev_id = arvif->vdev_id;
24476 	peer_param.peer_addr = ni->ni_macaddr;
24477 	peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
24478 
24479 	ret = qwx_peer_create(sc, arvif, pdev_id, ni, &peer_param);
24480 	if (ret) {
24481 		printf("%s: Failed to add peer: %s for VDEV: %d\n",
24482 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
24483 		    arvif->vdev_id);
24484 		return ret;
24485 	}
24486 
24487 	DNPRINTF(QWX_D_MAC, "%s: Added peer: %s for VDEV: %d\n", __func__,
24488 	    ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
24489 
24490 	ret = qwx_dp_peer_setup(sc, arvif->vdev_id, pdev_id, ni);
24491 	if (ret) {
24492 		printf("%s: failed to setup dp for peer %s on vdev %d (%d)\n",
24493 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
24494 		    arvif->vdev_id, ret);
24495 		goto free_peer;
24496 	}
24497 
24498 	return 0;
24499 
24500 free_peer:
24501 	qwx_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
24502 	return ret;
24503 }
24504 
24505 int
qwx_mac_mgmt_tx_wmi(struct qwx_softc * sc,struct qwx_vif * arvif,uint8_t pdev_id,struct ieee80211_node * ni,struct mbuf * m)24506 qwx_mac_mgmt_tx_wmi(struct qwx_softc *sc, struct qwx_vif *arvif,
24507     uint8_t pdev_id, struct ieee80211_node *ni, struct mbuf *m)
24508 {
24509 	struct qwx_txmgmt_queue *txmgmt = &arvif->txmgmt;
24510 	struct qwx_tx_data *tx_data;
24511 	int buf_id;
24512 	int ret;
24513 
24514 	buf_id = txmgmt->cur;
24515 
24516 	DNPRINTF(QWX_D_MAC, "%s: tx mgmt frame, buf id %d\n", __func__, buf_id);
24517 
24518 	if (txmgmt->queued >= nitems(txmgmt->data))
24519 		return ENOSPC;
24520 
24521 	tx_data = &txmgmt->data[buf_id];
24522 #if 0
24523 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
24524 		if ((ieee80211_is_action(hdr->frame_control) ||
24525 		     ieee80211_is_deauth(hdr->frame_control) ||
24526 		     ieee80211_is_disassoc(hdr->frame_control)) &&
24527 		     ieee80211_has_protected(hdr->frame_control)) {
24528 			skb_put(skb, IEEE80211_CCMP_MIC_LEN);
24529 		}
24530 	}
24531 #endif
24532 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
24533 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
24534 	if (ret && ret != EFBIG) {
24535 		printf("%s: failed to map mgmt Tx buffer: %d\n",
24536 		    sc->sc_dev.dv_xname, ret);
24537 		return ret;
24538 	}
24539 	if (ret) {
24540 		/* Too many DMA segments, linearize mbuf. */
24541 		if (m_defrag(m, M_DONTWAIT)) {
24542 			m_freem(m);
24543 			return ENOBUFS;
24544 		}
24545 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map, m,
24546 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
24547 		if (ret) {
24548 			printf("%s: failed to map mgmt Tx buffer: %d\n",
24549 			    sc->sc_dev.dv_xname, ret);
24550 			m_freem(m);
24551 			return ret;
24552 		}
24553 	}
24554 
24555 	ret = qwx_wmi_mgmt_send(sc, arvif, pdev_id, buf_id, m, tx_data);
24556 	if (ret) {
24557 		printf("%s: failed to send mgmt frame: %d\n",
24558 		    sc->sc_dev.dv_xname, ret);
24559 		goto err_unmap_buf;
24560 	}
24561 	tx_data->ni = ni;
24562 
24563 	txmgmt->cur = (txmgmt->cur + 1) % nitems(txmgmt->data);
24564 	txmgmt->queued++;
24565 
24566 	if (txmgmt->queued >= nitems(txmgmt->data) - 1)
24567 		sc->qfullmsk |= (1U << QWX_MGMT_QUEUE_ID);
24568 
24569 	return 0;
24570 
24571 err_unmap_buf:
24572 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
24573 	return ret;
24574 }
24575 
24576 void
qwx_wmi_start_scan_init(struct qwx_softc * sc,struct scan_req_params * arg)24577 qwx_wmi_start_scan_init(struct qwx_softc *sc, struct scan_req_params *arg)
24578 {
24579 	/* setup commonly used values */
24580 	arg->scan_req_id = 1;
24581 	if (sc->state_11d == ATH11K_11D_PREPARING)
24582 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
24583 	else
24584 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
24585 	arg->dwell_time_active = 50;
24586 	arg->dwell_time_active_2g = 0;
24587 	arg->dwell_time_passive = 150;
24588 	arg->dwell_time_active_6g = 40;
24589 	arg->dwell_time_passive_6g = 30;
24590 	arg->min_rest_time = 50;
24591 	arg->max_rest_time = 500;
24592 	arg->repeat_probe_time = 0;
24593 	arg->probe_spacing_time = 0;
24594 	arg->idle_time = 0;
24595 	arg->max_scan_time = 20000;
24596 	arg->probe_delay = 5;
24597 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
24598 	    WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL |
24599 	    WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED;
24600 	arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
24601 
24602 	if (isset(sc->wmi.svc_map,
24603 	    WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE))
24604 		arg->scan_ctrl_flags_ext |=
24605 		    WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
24606 
24607 	arg->num_bssid = 1;
24608 
24609 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
24610 	 * ZEROs in probe request
24611 	 */
24612 	IEEE80211_ADDR_COPY(arg->bssid_list[0].addr, etheranyaddr);
24613 }
24614 
24615 int
qwx_wmi_set_peer_param(struct qwx_softc * sc,uint8_t * peer_addr,uint32_t vdev_id,uint32_t pdev_id,uint32_t param_id,uint32_t param_val)24616 qwx_wmi_set_peer_param(struct qwx_softc *sc, uint8_t *peer_addr,
24617     uint32_t vdev_id, uint32_t pdev_id, uint32_t param_id, uint32_t param_val)
24618 {
24619 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
24620 	struct wmi_peer_set_param_cmd *cmd;
24621 	struct mbuf *m;
24622 	int ret;
24623 
24624 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
24625 	if (!m)
24626 		return ENOMEM;
24627 
24628 	cmd = (struct wmi_peer_set_param_cmd *)(mtod(m, uint8_t *) +
24629 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
24630 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
24631 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
24632 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
24633 	cmd->vdev_id = vdev_id;
24634 	cmd->param_id = param_id;
24635 	cmd->param_value = param_val;
24636 
24637 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_SET_PARAM_CMDID);
24638 	if (ret) {
24639 		if (ret != ESHUTDOWN) {
24640 			printf("%s: failed to send WMI_PEER_SET_PARAM cmd\n",
24641 			    sc->sc_dev.dv_xname);
24642 		}
24643 		m_freem(m);
24644 		return ret;
24645 	}
24646 
24647 	DNPRINTF(QWX_D_WMI, "%s: cmd peer set param vdev %d peer %s "
24648 	    "set param %d value %d\n", __func__, vdev_id,
24649 	    ether_sprintf(peer_addr), param_id, param_val);
24650 
24651 	return 0;
24652 }
24653 
24654 int
qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc * sc,int vdev_id,int pdev_id,uint8_t * addr,uint64_t paddr,uint8_t tid,uint8_t ba_window_size_valid,uint32_t ba_window_size)24655 qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *sc, int vdev_id,
24656     int pdev_id, uint8_t *addr, uint64_t paddr, uint8_t tid,
24657     uint8_t ba_window_size_valid, uint32_t ba_window_size)
24658 {
24659 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
24660 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
24661 	struct mbuf *m;
24662 	int ret;
24663 
24664 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
24665 	if (!m)
24666 		return ENOMEM;
24667 
24668 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)(mtod(m, uint8_t *) +
24669 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
24670 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
24671 	    WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
24672 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
24673 
24674 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, addr);
24675 	cmd->vdev_id = vdev_id;
24676 	cmd->tid = tid;
24677 	cmd->queue_ptr_lo = paddr & 0xffffffff;
24678 	cmd->queue_ptr_hi = paddr >> 32;
24679 	cmd->queue_no = tid;
24680 	cmd->ba_window_size_valid = ba_window_size_valid;
24681 	cmd->ba_window_size = ba_window_size;
24682 
24683 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
24684 	if (ret) {
24685 		if (ret != ESHUTDOWN) {
24686 			printf("%s: failed to send "
24687 			    "WMI_PEER_REORDER_QUEUE_SETUP\n",
24688 			    sc->sc_dev.dv_xname);
24689 		}
24690 		m_freem(m);
24691 	}
24692 
24693 	DNPRINTF(QWX_D_WMI, "%s: cmd peer reorder queue setup addr %s "
24694 	    "vdev_id %d tid %d\n", __func__, ether_sprintf(addr), vdev_id, tid);
24695 
24696 	return ret;
24697 }
24698 
24699 enum ath11k_spectral_mode
qwx_spectral_get_mode(struct qwx_softc * sc)24700 qwx_spectral_get_mode(struct qwx_softc *sc)
24701 {
24702 #if 0
24703 	if (sc->spectral.enabled)
24704 		return ar->spectral.mode;
24705 	else
24706 #endif
24707 		return ATH11K_SPECTRAL_DISABLED;
24708 }
24709 
24710 void
qwx_spectral_reset_buffer(struct qwx_softc * sc)24711 qwx_spectral_reset_buffer(struct qwx_softc *sc)
24712 {
24713 	printf("%s: not implemented\n", __func__);
24714 }
24715 
24716 int
qwx_scan_stop(struct qwx_softc * sc)24717 qwx_scan_stop(struct qwx_softc *sc)
24718 {
24719 	struct scan_cancel_param arg = {
24720 		.req_type = WLAN_SCAN_CANCEL_SINGLE,
24721 		.scan_id = ATH11K_SCAN_ID,
24722 	};
24723 	int ret;
24724 #ifdef notyet
24725 	lockdep_assert_held(&ar->conf_mutex);
24726 #endif
24727 	/* TODO: Fill other STOP Params */
24728 	arg.pdev_id = 0; /* TODO: derive pdev ID somehow? */
24729 	arg.vdev_id = sc->scan.vdev_id;
24730 
24731 	ret = qwx_wmi_send_scan_stop_cmd(sc, &arg);
24732 	if (ret) {
24733 		printf("%s: failed to stop wmi scan: %d\n",
24734 		    sc->sc_dev.dv_xname, ret);
24735 		goto out;
24736 	}
24737 
24738 	while (sc->scan.state != ATH11K_SCAN_IDLE) {
24739 		ret = tsleep_nsec(&sc->scan.state, 0, "qwxscstop",
24740 		    SEC_TO_NSEC(3));
24741 		if (ret) {
24742 			printf("%s: scan stop timeout\n", sc->sc_dev.dv_xname);
24743 			break;
24744 		}
24745 	}
24746 out:
24747 	/* Scan state should be updated upon scan completion but in case
24748 	 * firmware fails to deliver the event (for whatever reason) it is
24749 	 * desired to clean up scan state anyway. Firmware may have just
24750 	 * dropped the scan completion event delivery due to transport pipe
24751 	 * being overflown with data and/or it can recover on its own before
24752 	 * next scan request is submitted.
24753 	 */
24754 #ifdef notyet
24755 	spin_lock_bh(&ar->data_lock);
24756 #endif
24757 	if (sc->scan.state != ATH11K_SCAN_IDLE)
24758 		qwx_mac_scan_finish(sc);
24759 #ifdef notyet
24760 	spin_unlock_bh(&ar->data_lock);
24761 #endif
24762 	return ret;
24763 }
24764 
24765 void
qwx_scan_timeout(void * arg)24766 qwx_scan_timeout(void *arg)
24767 {
24768 	struct qwx_softc *sc = arg;
24769 	int s = splnet();
24770 
24771 #ifdef notyet
24772 	mutex_lock(&ar->conf_mutex);
24773 #endif
24774 	printf("%s\n", __func__);
24775 	qwx_scan_abort(sc);
24776 #ifdef notyet
24777 	mutex_unlock(&ar->conf_mutex);
24778 #endif
24779 	splx(s);
24780 }
24781 
24782 int
qwx_start_scan(struct qwx_softc * sc,struct scan_req_params * arg)24783 qwx_start_scan(struct qwx_softc *sc, struct scan_req_params *arg)
24784 {
24785 	int ret;
24786 	unsigned long timeout = 1;
24787 #ifdef notyet
24788 	lockdep_assert_held(&ar->conf_mutex);
24789 #endif
24790 	if (qwx_spectral_get_mode(sc) == ATH11K_SPECTRAL_BACKGROUND)
24791 		qwx_spectral_reset_buffer(sc);
24792 
24793 	ret = qwx_wmi_send_scan_start_cmd(sc, arg);
24794 	if (ret)
24795 		return ret;
24796 
24797 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
24798 		timeout = 5;
24799 #if 0
24800 		if (ar->supports_6ghz)
24801 			timeout += 5 * HZ;
24802 #endif
24803 	}
24804 
24805 	while (sc->scan.state == ATH11K_SCAN_STARTING) {
24806 		ret = tsleep_nsec(&sc->scan.state, 0, "qwxscan",
24807 		    SEC_TO_NSEC(timeout));
24808 		if (ret) {
24809 			printf("%s: scan start timeout\n", sc->sc_dev.dv_xname);
24810 			qwx_scan_stop(sc);
24811 			break;
24812 		}
24813 	}
24814 
24815 #ifdef notyet
24816 	spin_lock_bh(&ar->data_lock);
24817 	spin_unlock_bh(&ar->data_lock);
24818 #endif
24819 	return ret;
24820 }
24821 
24822 #define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD		200 /* in msecs */
24823 
24824 int
qwx_scan(struct qwx_softc * sc)24825 qwx_scan(struct qwx_softc *sc)
24826 {
24827 	struct ieee80211com *ic = &sc->sc_ic;
24828 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list);
24829 	struct scan_req_params *arg = NULL;
24830 	struct ieee80211_channel *chan, *lastc;
24831 	int ret = 0, num_channels, i;
24832 	uint32_t scan_timeout;
24833 
24834 	if (arvif == NULL) {
24835 		printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
24836 		return EINVAL;
24837 	}
24838 
24839 	/*
24840 	 * TODO Will we need separate scan iterations on devices with
24841 	 * multiple radios?
24842 	 */
24843 	if (sc->num_radios > 1)
24844 		printf("%s: TODO: only scanning with first vdev\n", __func__);
24845 
24846 	/* Firmwares advertising the support of triggering 11D algorithm
24847 	 * on the scan results of a regular scan expects driver to send
24848 	 * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID.
24849 	 * With this feature, separate 11D scan can be avoided since
24850 	 * regdomain can be determined with the scan results of the
24851 	 * regular scan.
24852 	 */
24853 	if (sc->state_11d == ATH11K_11D_PREPARING &&
24854 	    isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN))
24855 		qwx_mac_11d_scan_start(sc, arvif);
24856 #ifdef notyet
24857 	mutex_lock(&ar->conf_mutex);
24858 
24859 	spin_lock_bh(&ar->data_lock);
24860 #endif
24861 	switch (sc->scan.state) {
24862 	case ATH11K_SCAN_IDLE:
24863 		sc->scan.started = 0;
24864 		sc->scan.completed = 0;
24865 		sc->scan.state = ATH11K_SCAN_STARTING;
24866 		sc->scan.is_roc = 0;
24867 		sc->scan.vdev_id = arvif->vdev_id;
24868 		ret = 0;
24869 		break;
24870 	case ATH11K_SCAN_STARTING:
24871 	case ATH11K_SCAN_RUNNING:
24872 	case ATH11K_SCAN_ABORTING:
24873 		ret = EBUSY;
24874 		break;
24875 	}
24876 #ifdef notyet
24877 	spin_unlock_bh(&ar->data_lock);
24878 #endif
24879 	if (ret)
24880 		goto exit;
24881 
24882 	arg = malloc(sizeof(*arg), M_DEVBUF, M_ZERO | M_NOWAIT);
24883 	if (!arg) {
24884 		ret = ENOMEM;
24885 		goto exit;
24886 	}
24887 
24888 	qwx_wmi_start_scan_init(sc, arg);
24889 	arg->vdev_id = arvif->vdev_id;
24890 	arg->scan_id = ATH11K_SCAN_ID;
24891 
24892 	if (ic->ic_des_esslen != 0) {
24893 		arg->num_ssids = 1;
24894 		arg->ssid[0].length  = ic->ic_des_esslen;
24895 		memcpy(&arg->ssid[0].ssid, ic->ic_des_essid,
24896 		    ic->ic_des_esslen);
24897 	} else
24898 		arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
24899 
24900 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
24901 	num_channels = 0;
24902 	for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
24903 		if (chan->ic_flags == 0)
24904 			continue;
24905 		num_channels++;
24906 	}
24907 	if (num_channels) {
24908 		arg->num_chan = num_channels;
24909 		arg->chan_list = mallocarray(arg->num_chan,
24910 		    sizeof(*arg->chan_list), M_DEVBUF, M_NOWAIT | M_ZERO);
24911 
24912 		if (!arg->chan_list) {
24913 			ret = ENOMEM;
24914 			goto exit;
24915 		}
24916 
24917 		i = 0;
24918 		for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
24919 			if (chan->ic_flags == 0)
24920 				continue;
24921 			if (isset(sc->wmi.svc_map,
24922 			    WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL)) {
24923 				arg->chan_list[i++] = chan->ic_freq &
24924 				    WMI_SCAN_CONFIG_PER_CHANNEL_MASK;
24925 #if 0
24926 				/* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan
24927 				 * flags, then scan all PSC channels in 6 GHz band and
24928 				 * those non-PSC channels where RNR IE is found during
24929 				 * the legacy 2.4/5 GHz scan.
24930 				 * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set,
24931 				 * then all channels in 6 GHz will be scanned.
24932 				 */
24933 				if (req->channels[i]->band == NL80211_BAND_6GHZ &&
24934 				    req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ &&
24935 				    !cfg80211_channel_is_psc(req->channels[i]))
24936 					arg->chan_list[i] |=
24937 						WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND;
24938 #endif
24939 			} else {
24940 				arg->chan_list[i++] = chan->ic_freq;
24941 			}
24942 		}
24943 	}
24944 #if 0
24945 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
24946 		arg->scan_f_add_spoofed_mac_in_probe = 1;
24947 		ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
24948 		ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
24949 	}
24950 #endif
24951 	scan_timeout = 5000;
24952 
24953 	/* Add a margin to account for event/command processing */
24954 	scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
24955 
24956 	ret = qwx_start_scan(sc, arg);
24957 	if (ret) {
24958 		if (ret != ESHUTDOWN) {
24959 			printf("%s: failed to start hw scan: %d\n",
24960 			    sc->sc_dev.dv_xname, ret);
24961 		}
24962 #ifdef notyet
24963 		spin_lock_bh(&ar->data_lock);
24964 #endif
24965 		sc->scan.state = ATH11K_SCAN_IDLE;
24966 #ifdef notyet
24967 		spin_unlock_bh(&ar->data_lock);
24968 #endif
24969 	} else {
24970 		/*
24971 		 * The current mode might have been fixed during association.
24972 		 * Ensure all channels get scanned.
24973 		 */
24974 		if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
24975 			ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
24976 	}
24977 #if 0
24978 	timeout_add_msec(&sc->scan.timeout, scan_timeout);
24979 #endif
24980 exit:
24981 	if (arg) {
24982 		free(arg->chan_list, M_DEVBUF,
24983 		    arg->num_chan * sizeof(*arg->chan_list));
24984 #if 0
24985 		kfree(arg->extraie.ptr);
24986 #endif
24987 		free(arg, M_DEVBUF, sizeof(*arg));
24988 	}
24989 #ifdef notyet
24990 	mutex_unlock(&ar->conf_mutex);
24991 #endif
24992 	if (sc->state_11d == ATH11K_11D_PREPARING)
24993 		qwx_mac_11d_scan_start(sc, arvif);
24994 
24995 	return ret;
24996 }
24997 
24998 void
qwx_scan_abort(struct qwx_softc * sc)24999 qwx_scan_abort(struct qwx_softc *sc)
25000 {
25001 	int ret;
25002 #ifdef notyet
25003 	lockdep_assert_held(&ar->conf_mutex);
25004 
25005 	spin_lock_bh(&ar->data_lock);
25006 #endif
25007 	switch (sc->scan.state) {
25008 	case ATH11K_SCAN_IDLE:
25009 		/* This can happen if timeout worker kicked in and called
25010 		 * abortion while scan completion was being processed.
25011 		 */
25012 		break;
25013 	case ATH11K_SCAN_STARTING:
25014 	case ATH11K_SCAN_ABORTING:
25015 		printf("%s: refusing scan abortion due to invalid "
25016 		    "scan state: %d\n", sc->sc_dev.dv_xname, sc->scan.state);
25017 		break;
25018 	case ATH11K_SCAN_RUNNING:
25019 		sc->scan.state = ATH11K_SCAN_ABORTING;
25020 #ifdef notyet
25021 		spin_unlock_bh(&ar->data_lock);
25022 #endif
25023 		ret = qwx_scan_stop(sc);
25024 		if (ret)
25025 			printf("%s: failed to abort scan: %d\n",
25026 			    sc->sc_dev.dv_xname, ret);
25027 #ifdef notyet
25028 		spin_lock_bh(&ar->data_lock);
25029 #endif
25030 		break;
25031 	}
25032 #ifdef notyet
25033 	spin_unlock_bh(&ar->data_lock);
25034 #endif
25035 }
25036 
25037 /*
25038  * Find a pdev which corresponds to a given channel.
25039  * This doesn't exactly match the semantics of the Linux driver
25040  * but because OpenBSD does not (yet) implement multi-bss mode
25041  * we can assume that only one PHY will be active in either the
25042  * 2 GHz or the 5 GHz band.
25043  */
25044 struct qwx_pdev *
qwx_get_pdev_for_chan(struct qwx_softc * sc,struct ieee80211_channel * chan)25045 qwx_get_pdev_for_chan(struct qwx_softc *sc, struct ieee80211_channel *chan)
25046 {
25047 	struct qwx_pdev *pdev;
25048 	int i;
25049 
25050 	for (i = 0; i < sc->num_radios; i++) {
25051 		if ((sc->pdevs_active & (1 << i)) == 0)
25052 			continue;
25053 
25054 		pdev = &sc->pdevs[i];
25055 		if (IEEE80211_IS_CHAN_2GHZ(chan) &&
25056 		    (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP))
25057 			return pdev;
25058 		if (IEEE80211_IS_CHAN_5GHZ(chan) &&
25059 		    (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP))
25060 			return pdev;
25061 	}
25062 
25063 	return NULL;
25064 }
25065 
25066 void
qwx_recalculate_mgmt_rate(struct qwx_softc * sc,struct ieee80211_node * ni,uint32_t vdev_id,uint32_t pdev_id)25067 qwx_recalculate_mgmt_rate(struct qwx_softc *sc, struct ieee80211_node *ni,
25068     uint32_t vdev_id, uint32_t pdev_id)
25069 {
25070 	struct ieee80211com *ic = &sc->sc_ic;
25071 	int hw_rate_code;
25072 	uint32_t vdev_param;
25073 	int bitrate;
25074 	int ret;
25075 #ifdef notyet
25076 	lockdep_assert_held(&ar->conf_mutex);
25077 #endif
25078 	bitrate = ieee80211_min_basic_rate(ic);
25079 	hw_rate_code = qwx_mac_get_rate_hw_value(ic, ni, bitrate);
25080 	if (hw_rate_code < 0) {
25081 		DPRINTF("%s: bitrate not supported %d\n",
25082 		    sc->sc_dev.dv_xname, bitrate);
25083 		return;
25084 	}
25085 
25086 	vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
25087 	ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id,
25088 	    vdev_param, hw_rate_code);
25089 	if (ret)
25090 		printf("%s: failed to set mgmt tx rate\n",
25091 		    sc->sc_dev.dv_xname);
25092 #if 0
25093 	/* For WCN6855, firmware will clear this param when vdev starts, hence
25094 	 * cache it here so that we can reconfigure it once vdev starts.
25095 	 */
25096 	ab->hw_rate_code = hw_rate_code;
25097 #endif
25098 	vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
25099 	ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id, vdev_param,
25100 	    hw_rate_code);
25101 	if (ret)
25102 		printf("%s: failed to set beacon tx rate\n",
25103 		    sc->sc_dev.dv_xname);
25104 }
25105 
25106 int
qwx_auth(struct qwx_softc * sc)25107 qwx_auth(struct qwx_softc *sc)
25108 {
25109 	struct ieee80211com *ic = &sc->sc_ic;
25110 	struct ieee80211_node *ni = ic->ic_bss;
25111 	uint32_t param_id;
25112 	struct qwx_vif *arvif;
25113 	struct qwx_pdev *pdev;
25114 	int ret;
25115 
25116 	arvif = TAILQ_FIRST(&sc->vif_list);
25117 	if (arvif == NULL) {
25118 		printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
25119 		return EINVAL;
25120 	}
25121 
25122 	pdev = qwx_get_pdev_for_chan(sc, ni->ni_chan);
25123 	if (pdev == NULL) {
25124 		printf("%s: no pdev found for channel %d\n",
25125 		    sc->sc_dev.dv_xname, ieee80211_chan2ieee(ic, ni->ni_chan));
25126 		return EINVAL;
25127 	}
25128 
25129 	param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
25130 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
25131 	    param_id, ni->ni_intval);
25132 	if (ret) {
25133 		printf("%s: failed to set beacon interval for VDEV: %d\n",
25134 		    sc->sc_dev.dv_xname, arvif->vdev_id);
25135 		return ret;
25136 	}
25137 
25138 	qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
25139 	ni->ni_txrate = 0;
25140 
25141 	ret = qwx_mac_station_add(sc, arvif, pdev->pdev_id, ni);
25142 	if (ret)
25143 		return ret;
25144 
25145 	/* Start vdev. */
25146 	ret = qwx_mac_vdev_start(sc, arvif, pdev->pdev_id);
25147 	if (ret) {
25148 		printf("%s: failed to start MAC for VDEV: %d\n",
25149 		    sc->sc_dev.dv_xname, arvif->vdev_id);
25150 		return ret;
25151 	}
25152 
25153 	/*
25154 	 * WCN6855 firmware clears basic-rate parameters when vdev starts.
25155 	 * Set it once more.
25156 	 */
25157 	qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
25158 
25159 	return ret;
25160 }
25161 
25162 int
qwx_deauth(struct qwx_softc * sc)25163 qwx_deauth(struct qwx_softc *sc)
25164 {
25165 	struct ieee80211com *ic = &sc->sc_ic;
25166 	struct ieee80211_node *ni = ic->ic_bss;
25167 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
25168 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
25169 	int ret;
25170 
25171 	ret = qwx_mac_vdev_stop(sc, arvif, pdev_id);
25172 	if (ret) {
25173 		printf("%s: unable to stop vdev vdev_id %d: %d\n",
25174 		   sc->sc_dev.dv_xname, arvif->vdev_id, ret);
25175 		return ret;
25176 	}
25177 
25178 	ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, arvif->vdev_id,
25179 	    pdev_id, WMI_PEER_AUTHORIZE, 0);
25180 	if (ret) {
25181 		printf("%s: unable to deauthorize BSS peer: %d\n",
25182 		   sc->sc_dev.dv_xname, ret);
25183 		return ret;
25184 	}
25185 
25186 	ret = qwx_mac_station_remove(sc, arvif, pdev_id, ni);
25187 	if (ret)
25188 		return ret;
25189 
25190 	DNPRINTF(QWX_D_MAC, "%s: disassociated from bssid %s aid %d\n",
25191 	    __func__, ether_sprintf(ni->ni_bssid), arvif->aid);
25192 
25193 	return 0;
25194 }
25195 
25196 void
qwx_peer_assoc_h_basic(struct qwx_softc * sc,struct qwx_vif * arvif,struct ieee80211_node * ni,struct peer_assoc_params * arg)25197 qwx_peer_assoc_h_basic(struct qwx_softc *sc, struct qwx_vif *arvif,
25198     struct ieee80211_node *ni, struct peer_assoc_params *arg)
25199 {
25200 #ifdef notyet
25201 	lockdep_assert_held(&ar->conf_mutex);
25202 #endif
25203 
25204 	IEEE80211_ADDR_COPY(arg->peer_mac, ni->ni_macaddr);
25205 	arg->vdev_id = arvif->vdev_id;
25206 	arg->peer_associd = ni->ni_associd;
25207 	arg->auth_flag = 1;
25208 	arg->peer_listen_intval = ni->ni_intval;
25209 	arg->peer_nss = 1;
25210 	arg->peer_caps = ni->ni_capinfo;
25211 }
25212 
25213 void
qwx_peer_assoc_h_crypto(struct qwx_softc * sc,struct qwx_vif * arvif,struct ieee80211_node * ni,struct peer_assoc_params * arg)25214 qwx_peer_assoc_h_crypto(struct qwx_softc *sc, struct qwx_vif *arvif,
25215     struct ieee80211_node *ni, struct peer_assoc_params *arg)
25216 {
25217 	struct ieee80211com *ic = &sc->sc_ic;
25218 
25219 	if (ic->ic_flags & IEEE80211_F_RSNON) {
25220 		arg->need_ptk_4_way = 1;
25221 		if (ni->ni_rsnprotos == IEEE80211_PROTO_WPA)
25222 			arg->need_gtk_2_way = 1;
25223 	}
25224 #if 0
25225 	if (sta->mfp) {
25226 		/* TODO: Need to check if FW supports PMF? */
25227 		arg->is_pmf_enabled = true;
25228 	}
25229 #endif
25230 }
25231 
25232 int
qwx_mac_rate_is_cck(uint8_t rate)25233 qwx_mac_rate_is_cck(uint8_t rate)
25234 {
25235 	return (rate == 2 || rate == 4 || rate == 11 || rate == 22);
25236 }
25237 
25238 void
qwx_peer_assoc_h_rates(struct ieee80211_node * ni,struct peer_assoc_params * arg)25239 qwx_peer_assoc_h_rates(struct ieee80211_node *ni, struct peer_assoc_params *arg)
25240 {
25241 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
25242 	struct ieee80211_rateset *rs = &ni->ni_rates;
25243 	int i;
25244 
25245 	for (i = 0, rateset->num_rates = 0;
25246 	    i < rs->rs_nrates && rateset->num_rates < nitems(rateset->rates);
25247 	    i++, rateset->num_rates++) {
25248 		uint8_t rate = rs->rs_rates[i] & IEEE80211_RATE_VAL;
25249 		if (qwx_mac_rate_is_cck(rate))
25250 			rate |= 0x80;
25251 		rateset->rates[rateset->num_rates] = rate;
25252 	}
25253 }
25254 
25255 void
qwx_peer_assoc_h_phymode(struct qwx_softc * sc,struct ieee80211_node * ni,struct peer_assoc_params * arg)25256 qwx_peer_assoc_h_phymode(struct qwx_softc *sc, struct ieee80211_node *ni,
25257     struct peer_assoc_params *arg)
25258 {
25259 	struct ieee80211com *ic = &sc->sc_ic;
25260 	enum wmi_phy_mode phymode;
25261 
25262 	switch (ic->ic_curmode) {
25263 	case IEEE80211_MODE_11A:
25264 		phymode = MODE_11A;
25265 		break;
25266 	case IEEE80211_MODE_11B:
25267 		phymode = MODE_11B;
25268 		break;
25269 	case IEEE80211_MODE_11G:
25270 		phymode = MODE_11G;
25271 		break;
25272 	default:
25273 		phymode = MODE_UNKNOWN;
25274 		break;
25275 	}
25276 
25277 	DNPRINTF(QWX_D_MAC, "%s: peer %s phymode %s\n", __func__,
25278 	    ether_sprintf(ni->ni_macaddr), qwx_wmi_phymode_str(phymode));
25279 
25280 	arg->peer_phymode = phymode;
25281 }
25282 
25283 void
qwx_peer_assoc_prepare(struct qwx_softc * sc,struct qwx_vif * arvif,struct ieee80211_node * ni,struct peer_assoc_params * arg,int reassoc)25284 qwx_peer_assoc_prepare(struct qwx_softc *sc, struct qwx_vif *arvif,
25285     struct ieee80211_node *ni, struct peer_assoc_params *arg, int reassoc)
25286 {
25287 	memset(arg, 0, sizeof(*arg));
25288 
25289 	arg->peer_new_assoc = !reassoc;
25290 	qwx_peer_assoc_h_basic(sc, arvif, ni, arg);
25291 	qwx_peer_assoc_h_crypto(sc, arvif, ni, arg);
25292 	qwx_peer_assoc_h_rates(ni, arg);
25293 	qwx_peer_assoc_h_phymode(sc, ni, arg);
25294 #if 0
25295 	qwx_peer_assoc_h_ht(sc, arvif, ni, arg);
25296 	qwx_peer_assoc_h_vht(sc, arvif, ni, arg);
25297 	qwx_peer_assoc_h_he(sc, arvif, ni, arg);
25298 	qwx_peer_assoc_h_he_6ghz(sc, arvif, ni, arg);
25299 	qwx_peer_assoc_h_qos(sc, arvif, ni, arg);
25300 	qwx_peer_assoc_h_smps(ni, arg);
25301 #endif
25302 #if 0
25303 	arsta->peer_nss = arg->peer_nss;
25304 #endif
25305 	/* TODO: amsdu_disable req? */
25306 }
25307 
25308 int
qwx_run(struct qwx_softc * sc)25309 qwx_run(struct qwx_softc *sc)
25310 {
25311 	struct ieee80211com *ic = &sc->sc_ic;
25312 	struct ieee80211_node *ni = ic->ic_bss;
25313 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
25314 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
25315 	struct peer_assoc_params peer_arg;
25316 	int ret;
25317 #ifdef notyet
25318 	lockdep_assert_held(&ar->conf_mutex);
25319 #endif
25320 
25321 	DNPRINTF(QWX_D_MAC, "%s: vdev %i assoc bssid %pM aid %d\n",
25322 	    __func__, arvif->vdev_id, arvif->bssid, arvif->aid);
25323 
25324 	qwx_peer_assoc_prepare(sc, arvif, ni, &peer_arg, 0);
25325 
25326 	peer_arg.is_assoc = 1;
25327 
25328 	sc->peer_assoc_done = 0;
25329 	ret = qwx_wmi_send_peer_assoc_cmd(sc, pdev_id, &peer_arg);
25330 	if (ret) {
25331 		printf("%s: failed to run peer assoc for %s vdev %i: %d\n",
25332 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
25333 		    arvif->vdev_id, ret);
25334 		return ret;
25335 	}
25336 
25337 	while (!sc->peer_assoc_done) {
25338 		ret = tsleep_nsec(&sc->peer_assoc_done, 0, "qwxassoc",
25339 		    SEC_TO_NSEC(1));
25340 		if (ret) {
25341 			printf("%s: failed to get peer assoc conf event "
25342 			    "for %s vdev %i\n", sc->sc_dev.dv_xname,
25343 			    ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
25344 			return ret;
25345 		}
25346 	}
25347 #if 0
25348 	ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
25349 				     &sta->deflink.ht_cap,
25350 				     le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
25351 	if (ret) {
25352 		ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
25353 			    arvif->vdev_id, ret);
25354 		return ret;
25355 	}
25356 
25357 	if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
25358 		ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
25359 			    arvif->vdev_id, bss_conf->bssid);
25360 		return;
25361 	}
25362 
25363 	WARN_ON(arvif->is_up);
25364 #endif
25365 
25366 	arvif->aid = ni->ni_associd;
25367 	IEEE80211_ADDR_COPY(arvif->bssid, ni->ni_bssid);
25368 
25369 	ret = qwx_wmi_vdev_up(sc, arvif->vdev_id, pdev_id, arvif->aid,
25370 	    arvif->bssid, NULL, 0, 0);
25371 	if (ret) {
25372 		printf("%s: failed to set vdev %d up: %d\n",
25373 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
25374 		return ret;
25375 	}
25376 
25377 	arvif->is_up = 1;
25378 #if 0
25379 	arvif->rekey_data.enable_offload = 0;
25380 #endif
25381 
25382 	DNPRINTF(QWX_D_MAC, "%s: vdev %d up (associated) bssid %s aid %d\n",
25383 	    __func__, arvif->vdev_id, ether_sprintf(ni->ni_bssid), arvif->aid);
25384 
25385 	ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, arvif->vdev_id,
25386 	    pdev_id, WMI_PEER_AUTHORIZE, 1);
25387 	if (ret) {
25388 		printf("%s: unable to authorize BSS peer: %d\n",
25389 		   sc->sc_dev.dv_xname, ret);
25390 		return ret;
25391 	}
25392 
25393 	/* Enable "ext" IRQs for datapath. */
25394 	sc->ops.irq_enable(sc);
25395 
25396 	return 0;
25397 }
25398 
25399 int
qwx_run_stop(struct qwx_softc * sc)25400 qwx_run_stop(struct qwx_softc *sc)
25401 {
25402 	struct ieee80211com *ic = &sc->sc_ic;
25403 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
25404 	uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
25405 	struct qwx_node *nq = (void *)ic->ic_bss;
25406 	int ret;
25407 
25408 	sc->ops.irq_disable(sc);
25409 
25410 	if (ic->ic_opmode == IEEE80211_M_STA) {
25411 		ic->ic_bss->ni_txrate = 0;
25412 		nq->flags = 0;
25413 	}
25414 
25415 	ret = qwx_wmi_vdev_down(sc, arvif->vdev_id, pdev_id);
25416 	if (ret)
25417 		return ret;
25418 
25419 	arvif->is_up = 0;
25420 
25421 	DNPRINTF(QWX_D_MAC, "%s: vdev %d down\n", __func__, arvif->vdev_id);
25422 
25423 	return 0;
25424 }
25425 
25426 #if NBPFILTER > 0
25427 void
qwx_radiotap_attach(struct qwx_softc * sc)25428 qwx_radiotap_attach(struct qwx_softc *sc)
25429 {
25430 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
25431 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
25432 
25433 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
25434 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
25435 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
25436 
25437 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
25438 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
25439 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
25440 }
25441 #endif
25442 
25443 int
qwx_attach(struct qwx_softc * sc)25444 qwx_attach(struct qwx_softc *sc)
25445 {
25446 	struct ieee80211com *ic = &sc->sc_ic;
25447 	struct ifnet *ifp = &ic->ic_if;
25448 	int error, i;
25449 
25450 	task_set(&sc->init_task, qwx_init_task, sc);
25451 	task_set(&sc->newstate_task, qwx_newstate_task, sc);
25452 	task_set(&sc->setkey_task, qwx_setkey_task, sc);
25453 	timeout_set_proc(&sc->scan.timeout, qwx_scan_timeout, sc);
25454 #if NBPFILTER > 0
25455 	qwx_radiotap_attach(sc);
25456 #endif
25457 	for (i = 0; i < nitems(sc->pdevs); i++)
25458 		sc->pdevs[i].sc = sc;
25459 
25460 	TAILQ_INIT(&sc->vif_list);
25461 
25462 	error = qwx_init(ifp);
25463 	if (error)
25464 		return error;
25465 
25466 	/* Turn device off until interface comes up. */
25467 	qwx_core_deinit(sc);
25468 
25469 	return 0;
25470 }
25471 
25472 void
qwx_detach(struct qwx_softc * sc)25473 qwx_detach(struct qwx_softc *sc)
25474 {
25475 	if (sc->fwmem) {
25476 		qwx_dmamem_free(sc->sc_dmat, sc->fwmem);
25477 		sc->fwmem = NULL;
25478 	}
25479 
25480 	if (sc->m3_mem) {
25481 		qwx_dmamem_free(sc->sc_dmat, sc->m3_mem);
25482 		sc->m3_mem = NULL;
25483 	}
25484 
25485 	qwx_free_firmware(sc);
25486 }
25487 
25488 struct qwx_dmamem *
qwx_dmamem_alloc(bus_dma_tag_t dmat,bus_size_t size,bus_size_t align)25489 qwx_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t align)
25490 {
25491 	struct qwx_dmamem *adm;
25492 	int nsegs;
25493 
25494 	adm = malloc(sizeof(*adm), M_DEVBUF, M_NOWAIT | M_ZERO);
25495 	if (adm == NULL)
25496 		return NULL;
25497 	adm->size = size;
25498 
25499 	if (bus_dmamap_create(dmat, size, 1, size, 0,
25500 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &adm->map) != 0)
25501 		goto admfree;
25502 
25503 	if (bus_dmamem_alloc_range(dmat, size, align, 0, &adm->seg, 1,
25504 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 0, 0xffffffff) != 0)
25505 		goto destroy;
25506 
25507 	if (bus_dmamem_map(dmat, &adm->seg, nsegs, size,
25508 	    &adm->kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0)
25509 		goto free;
25510 
25511 	if (bus_dmamap_load_raw(dmat, adm->map, &adm->seg, nsegs, size,
25512 	    BUS_DMA_NOWAIT) != 0)
25513 		goto unmap;
25514 
25515 	bzero(adm->kva, size);
25516 
25517 	return adm;
25518 
25519 unmap:
25520 	bus_dmamem_unmap(dmat, adm->kva, size);
25521 free:
25522 	bus_dmamem_free(dmat, &adm->seg, 1);
25523 destroy:
25524 	bus_dmamap_destroy(dmat, adm->map);
25525 admfree:
25526 	free(adm, M_DEVBUF, sizeof(*adm));
25527 
25528 	return NULL;
25529 }
25530 
25531 void
qwx_dmamem_free(bus_dma_tag_t dmat,struct qwx_dmamem * adm)25532 qwx_dmamem_free(bus_dma_tag_t dmat, struct qwx_dmamem *adm)
25533 {
25534 	bus_dmamem_unmap(dmat, adm->kva, adm->size);
25535 	bus_dmamem_free(dmat, &adm->seg, 1);
25536 	bus_dmamap_destroy(dmat, adm->map);
25537 	free(adm, M_DEVBUF, sizeof(*adm));
25538 }
25539 
25540 int
qwx_activate(struct device * self,int act)25541 qwx_activate(struct device *self, int act)
25542 {
25543 	struct qwx_softc *sc = (struct qwx_softc *)self;
25544 	struct ifnet *ifp = &sc->sc_ic.ic_if;
25545 	int err = 0;
25546 
25547 	switch (act) {
25548 	case DVACT_QUIESCE:
25549 		if (ifp->if_flags & IFF_RUNNING) {
25550 			rw_enter_write(&sc->ioctl_rwl);
25551 			qwx_stop(ifp);
25552 			rw_exit(&sc->ioctl_rwl);
25553 		}
25554 		break;
25555 	case DVACT_RESUME:
25556 		break;
25557 	case DVACT_WAKEUP:
25558 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
25559 			err = qwx_init(ifp);
25560 			if (err)
25561 				printf("%s: could not initialize hardware\n",
25562 				    sc->sc_dev.dv_xname);
25563 		}
25564 		break;
25565 	}
25566 
25567 	return 0;
25568 }
25569