xref: /openbsd/sys/dev/ic/qwx.c (revision 3bef86f7)
1 /*	$OpenBSD: qwx.c,v 1.8 2024/01/25 17:00:20 stsp Exp $	*/
2 
3 /*
4  * Copyright 2023 Stefan Sperling <stsp@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Copyright (c) 2018-2019 The Linux Foundation.
21  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc.
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted (subject to the limitations in the disclaimer
26  * below) provided that the following conditions are met:
27  *
28  *  * Redistributions of source code must retain the above copyright notice,
29  *    this list of conditions and the following disclaimer.
30  *
31  *  * Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  *
35  *  * Neither the name of [Owner Organization] nor the names of its
36  *    contributors may be used to endorse or promote products derived from
37  *    this software without specific prior written permission.
38  *
39  * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
40  * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
41  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
42  * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
43  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
44  * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
45  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
46  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
47  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
48  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
49  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
50  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 /*
54  * Driver for Qualcomm Technologies 802.11ax chipset.
55  */
56 
57 #include <sys/types.h>
58 #include <sys/param.h>
59 #include <sys/device.h>
60 #include <sys/rwlock.h>
61 #include <sys/systm.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 
65 #include <sys/refcnt.h>
66 #include <sys/task.h>
67 
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 
77 #include <net80211/ieee80211_var.h>
78 #include <net80211/ieee80211_radiotap.h>
79 
80 /* XXX linux porting goo */
81 #ifdef __LP64__
82 #define BITS_PER_LONG		64
83 #else
84 #define BITS_PER_LONG		32
85 #endif
86 #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
87 #define __bf_shf(x) (__builtin_ffsll(x) - 1)
88 #define ffz(x) ffs(~(x))
89 #define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m)))
90 #define FIELD_PREP(_m, _v) (((typeof(_m))(_v) << __bf_shf(_m)) & (_m))
91 #define BIT(x)               (1UL << (x))
92 #define test_bit(i, a)  ((a) & (1 << (i)))
93 #define clear_bit(i, a) ((a)) &= ~(1 << (i))
94 #define set_bit(i, a)   ((a)) |= (1 << (i))
95 #define container_of(ptr, type, member) ({			\
96 	const __typeof( ((type *)0)->member ) *__mptr = (ptr);	\
97 	(type *)( (char *)__mptr - offsetof(type,member) );})
98 
99 /* #define QWX_DEBUG */
100 
101 #include <dev/ic/qwxreg.h>
102 #include <dev/ic/qwxvar.h>
103 
104 #ifdef QWX_DEBUG
105 uint32_t	qwx_debug = 0
106 		    | QWX_D_MISC
107 /*		    | QWX_D_MHI */
108 /*		    | QWX_D_QMI */
109 /*		    | QWX_D_WMI */
110 /*		    | QWX_D_HTC */
111 /*		    | QWX_D_HTT */
112 /*		    | QWX_D_MAC */
113 /*		    | QWX_D_MGMT */
114 		;
115 #endif
116 
117 int qwx_ce_init_pipes(struct qwx_softc *);
118 int qwx_hal_srng_src_num_free(struct qwx_softc *, struct hal_srng *, int);
119 int qwx_ce_per_engine_service(struct qwx_softc *, uint16_t);
120 int qwx_hal_srng_setup(struct qwx_softc *, enum hal_ring_type, int, int,
121     struct hal_srng_params *);
122 int qwx_ce_send(struct qwx_softc *, struct mbuf *, uint8_t, uint16_t);
123 int qwx_htc_connect_service(struct qwx_htc *, struct qwx_htc_svc_conn_req *,
124     struct qwx_htc_svc_conn_resp *);
125 void qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc *, struct hal_srng *);
126 void qwx_wmi_free_dbring_caps(struct qwx_softc *);
127 int qwx_wmi_set_peer_param(struct qwx_softc *, uint8_t *, uint32_t,
128     uint32_t, uint32_t, uint32_t);
129 int qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *, int, int,
130     uint8_t *, uint64_t, uint8_t, uint8_t, uint32_t);
131 const void **qwx_wmi_tlv_parse_alloc(struct qwx_softc *, const void *, size_t);
132 int qwx_core_init(struct qwx_softc *);
133 int qwx_qmi_event_server_arrive(struct qwx_softc *);
134 int qwx_mac_register(struct qwx_softc *);
135 int qwx_mac_start(struct qwx_softc *);
136 void qwx_mac_scan_finish(struct qwx_softc *);
137 int qwx_dp_tx_send_reo_cmd(struct qwx_softc *, struct dp_rx_tid *,
138     enum hal_reo_cmd_type , struct ath11k_hal_reo_cmd *,
139     void (*func)(struct qwx_dp *, void *, enum hal_reo_cmd_status));
140 
141 int qwx_scan(struct qwx_softc *);
142 void qwx_scan_abort(struct qwx_softc *);
143 int qwx_assoc(struct qwx_softc *);
144 int qwx_disassoc(struct qwx_softc *);
145 int qwx_auth(struct qwx_softc *);
146 int qwx_deauth(struct qwx_softc *);
147 int qwx_run(struct qwx_softc *);
148 int qwx_run_stop(struct qwx_softc *);
149 
150 struct ieee80211_node *
151 qwx_node_alloc(struct ieee80211com *ic)
152 {
153 	struct qwx_node *nq;
154 
155 	nq = malloc(sizeof(struct qwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
156 	nq->peer.peer_id = HAL_INVALID_PEERID;
157 	return (struct ieee80211_node *)nq;
158 }
159 
160 int
161 qwx_init(struct ifnet *ifp)
162 {
163 	int error;
164 	struct qwx_softc *sc = ifp->if_softc;
165 	struct ieee80211com *ic = &sc->sc_ic;
166 
167 	sc->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
168 	sc->crypto_mode = ATH11K_CRYPT_MODE_SW;
169 	sc->frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
170 	ic->ic_state = IEEE80211_S_INIT;
171 	sc->ns_nstate = IEEE80211_S_INIT;
172 	sc->scan.state = ATH11K_SCAN_IDLE;
173 	sc->vdev_id_11d_scan = QWX_11D_INVALID_VDEV_ID;
174 
175 	error = qwx_core_init(sc);
176 	if (error) {
177 		printf(": failed to init core: %d\n", error);
178 		return error;
179 	}
180 
181 	memset(&sc->qrtr_server, 0, sizeof(sc->qrtr_server));
182 	sc->qrtr_server.node = QRTR_NODE_BCAST;
183 
184 	/* wait for QRTR init to be done */
185 	while (sc->qrtr_server.node == QRTR_NODE_BCAST) {
186 		error = tsleep_nsec(&sc->qrtr_server, 0, "qwxqrtr",
187 		    SEC_TO_NSEC(5));
188 		if (error) {
189 			printf("%s: qrtr init timeout\n", sc->sc_dev.dv_xname);
190 			return error;
191 		}
192 	}
193 
194 	error = qwx_qmi_event_server_arrive(sc);
195 	if (error)
196 		return error;
197 
198 	/* Configure channel information obtained from firmware. */
199 	ieee80211_channel_init(ifp);
200 	ieee80211_media_init(ifp, qwx_media_change, ieee80211_media_status);
201 
202 	if (sc->attached) {
203 		/*
204 		 * We are either going up for the first time or qwx_stop() ran
205 		 * before us and has waited for any stale tasks to finish up.
206 		 */
207 		KASSERT(sc->task_refs.r_refs == 0);
208 		refcnt_init(&sc->task_refs);
209 
210 		ifq_clr_oactive(&ifp->if_snd);
211 		ifp->if_flags |= IFF_RUNNING;
212 
213 		/* Update MAC in case the upper layers changed it. */
214 		IEEE80211_ADDR_COPY(ic->ic_myaddr,
215 		    ((struct arpcom *)ifp)->ac_enaddr);
216 
217 		error = qwx_mac_start(sc);
218 		if (error)
219 			return error;
220 
221 		ieee80211_begin_scan(ifp);
222 	} else {
223 		sc->attached = 1;
224 
225 		/* Configure MAC address at boot-time. */
226 		error = if_setlladdr(ifp, ic->ic_myaddr);
227 		if (error)
228 			printf("%s: could not set MAC address %s: %d\n",
229 			    sc->sc_dev.dv_xname, ether_sprintf(ic->ic_myaddr),
230 			    error);
231 	}
232 
233 	return 0;
234 }
235 
236 void
237 qwx_add_task(struct qwx_softc *sc, struct taskq *taskq, struct task *task)
238 {
239 	int s = splnet();
240 
241 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
242 		splx(s);
243 		return;
244 	}
245 
246 	refcnt_take(&sc->task_refs);
247 	if (!task_add(taskq, task))
248 		refcnt_rele_wake(&sc->task_refs);
249 	splx(s);
250 }
251 
252 void
253 qwx_del_task(struct qwx_softc *sc, struct taskq *taskq, struct task *task)
254 {
255 	if (task_del(taskq, task))
256 		refcnt_rele(&sc->task_refs);
257 }
258 
259 void
260 qwx_stop(struct ifnet *ifp)
261 {
262 	struct qwx_softc *sc = ifp->if_softc;
263 	struct ieee80211com *ic = &sc->sc_ic;
264 	int s = splnet();
265 
266 	rw_assert_wrlock(&sc->ioctl_rwl);
267 
268 	/* Disallow new tasks. */
269 	set_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
270 
271 	/* Cancel scheduled tasks and let any stale tasks finish up. */
272 	task_del(systq, &sc->init_task);
273 	qwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
274 	refcnt_finalize(&sc->task_refs, "qwxstop");
275 
276 	clear_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
277 
278 	ifp->if_timer = sc->sc_tx_timer = 0;
279 
280 	ifp->if_flags &= ~IFF_RUNNING;
281 	ifq_clr_oactive(&ifp->if_snd);
282 
283 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
284 	sc->ns_nstate = IEEE80211_S_INIT;
285 	sc->scan.state = ATH11K_SCAN_IDLE;
286 	sc->vdev_id_11d_scan = QWX_11D_INVALID_VDEV_ID;
287 	sc->pdevs_active = 0;
288 
289 	/* power off hardware */
290 	qwx_core_deinit(sc);
291 
292 	splx(s);
293 }
294 
295 int
296 qwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
297 {
298 	struct qwx_softc *sc = ifp->if_softc;
299 	int s, err = 0;
300 
301 	/*
302 	 * Prevent processes from entering this function while another
303 	 * process is tsleep'ing in it.
304 	 */
305 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
306 	if (err)
307 		return err;
308 	s = splnet();
309 
310 	switch (cmd) {
311 	case SIOCSIFADDR:
312 		ifp->if_flags |= IFF_UP;
313 		/* FALLTHROUGH */
314 	case SIOCSIFFLAGS:
315 		if (ifp->if_flags & IFF_UP) {
316 			if (!(ifp->if_flags & IFF_RUNNING)) {
317 				/* Force reload of firmware image from disk. */
318 				sc->have_firmware = 0;
319 				err = qwx_init(ifp);
320 			}
321 		} else {
322 			if (ifp->if_flags & IFF_RUNNING)
323 				qwx_stop(ifp);
324 		}
325 		break;
326 
327 	default:
328 		err = ieee80211_ioctl(ifp, cmd, data);
329 	}
330 
331 	if (err == ENETRESET) {
332 		err = 0;
333 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
334 		    (IFF_UP | IFF_RUNNING)) {
335 			qwx_stop(ifp);
336 			err = qwx_init(ifp);
337 		}
338 	}
339 
340 	splx(s);
341 	rw_exit(&sc->ioctl_rwl);
342 
343 	return err;
344 }
345 
346 void
347 qwx_start(struct ifnet *ifp)
348 {
349 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
350 		return;
351 
352 	printf("%s: not implemented\n", __func__);
353 }
354 
355 void
356 qwx_watchdog(struct ifnet *ifp)
357 {
358 	struct qwx_softc *sc = ifp->if_softc;
359 
360 	ifp->if_timer = 0;
361 
362 	if (sc->sc_tx_timer > 0) {
363 		if (--sc->sc_tx_timer == 0) {
364 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
365 			if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
366 				task_add(systq, &sc->init_task);
367 			ifp->if_oerrors++;
368 			return;
369 		}
370 		ifp->if_timer = 1;
371 	}
372 
373 	ieee80211_watchdog(ifp);
374 }
375 
376 int
377 qwx_media_change(struct ifnet *ifp)
378 {
379 	int err;
380 
381 	err = ieee80211_media_change(ifp);
382 	if (err != ENETRESET)
383 		return err;
384 
385 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
386 	    (IFF_UP | IFF_RUNNING)) {
387 		qwx_stop(ifp);
388 		err = qwx_init(ifp);
389 	}
390 
391 	return err;
392 }
393 
394 int
395 qwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
396 {
397 	struct ifnet *ifp = &ic->ic_if;
398 	struct qwx_softc *sc = ifp->if_softc;
399 
400 	/*
401 	 * Prevent attempts to transition towards the same state, unless
402 	 * we are scanning in which case a SCAN -> SCAN transition
403 	 * triggers another scan iteration. And AUTH -> AUTH is needed
404 	 * to support band-steering.
405 	 */
406 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
407 	    nstate != IEEE80211_S_AUTH)
408 		return 0;
409 #if 0
410 	if (ic->ic_state == IEEE80211_S_RUN) {
411 		qwx_del_task(sc, systq, &sc->ba_task);
412 		qwx_del_task(sc, systq, &sc->setkey_task);
413 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
414 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
415 		qwx_del_task(sc, systq, &sc->bgscan_done_task);
416 	}
417 #endif
418 	sc->ns_nstate = nstate;
419 	sc->ns_arg = arg;
420 
421 	qwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
422 
423 	return 0;
424 }
425 
426 void
427 qwx_newstate_task(void *arg)
428 {
429 	struct qwx_softc *sc = (struct qwx_softc *)arg;
430 	struct ieee80211com *ic = &sc->sc_ic;
431 	struct ifnet *ifp = &ic->ic_if;
432 	enum ieee80211_state nstate = sc->ns_nstate;
433 	enum ieee80211_state ostate = ic->ic_state;
434 	int err = 0, s = splnet();
435 
436 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
437 		/* qwx_stop() is waiting for us. */
438 		refcnt_rele_wake(&sc->task_refs);
439 		splx(s);
440 		return;
441 	}
442 
443 	if (ostate == IEEE80211_S_SCAN) {
444 		if (nstate == ostate) {
445 			if (sc->scan.state != ATH11K_SCAN_IDLE) {
446 				refcnt_rele_wake(&sc->task_refs);
447 				splx(s);
448 				return;
449 			}
450 			/* Firmware is no longer scanning. Do another scan. */
451 			goto next_scan;
452 		}
453 	}
454 
455 	if (nstate <= ostate) {
456 		switch (ostate) {
457 		case IEEE80211_S_RUN:
458 			err = qwx_run_stop(sc);
459 			if (err)
460 				goto out;
461 			/* FALLTHROUGH */
462 		case IEEE80211_S_ASSOC:
463 			if (nstate <= IEEE80211_S_ASSOC) {
464 				err = qwx_disassoc(sc);
465 				if (err)
466 					goto out;
467 			}
468 			/* FALLTHROUGH */
469 		case IEEE80211_S_AUTH:
470 			if (nstate <= IEEE80211_S_AUTH) {
471 				err = qwx_deauth(sc);
472 				if (err)
473 					goto out;
474 			}
475 			/* FALLTHROUGH */
476 		case IEEE80211_S_SCAN:
477 		case IEEE80211_S_INIT:
478 			break;
479 		}
480 
481 		/* Die now if qwx_stop() was called while we were sleeping. */
482 		if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
483 			refcnt_rele_wake(&sc->task_refs);
484 			splx(s);
485 			return;
486 		}
487 	}
488 
489 	switch (nstate) {
490 	case IEEE80211_S_INIT:
491 		break;
492 
493 	case IEEE80211_S_SCAN:
494 next_scan:
495 		err = qwx_scan(sc);
496 		if (err)
497 			break;
498 		if (ifp->if_flags & IFF_DEBUG)
499 			printf("%s: %s -> %s\n", ifp->if_xname,
500 			    ieee80211_state_name[ic->ic_state],
501 			    ieee80211_state_name[IEEE80211_S_SCAN]);
502 		ic->ic_state = IEEE80211_S_SCAN;
503 		refcnt_rele_wake(&sc->task_refs);
504 		splx(s);
505 		return;
506 
507 	case IEEE80211_S_AUTH:
508 		err = qwx_auth(sc);
509 		break;
510 
511 	case IEEE80211_S_ASSOC:
512 		err = qwx_assoc(sc);
513 		break;
514 
515 	case IEEE80211_S_RUN:
516 		err = qwx_run(sc);
517 		break;
518 	}
519 out:
520 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) {
521 		if (err)
522 			task_add(systq, &sc->init_task);
523 		else
524 			sc->sc_newstate(ic, nstate, sc->ns_arg);
525 	}
526 	refcnt_rele_wake(&sc->task_refs);
527 	splx(s);
528 }
529 
530 struct cfdriver qwx_cd = {
531 	NULL, "qwx", DV_IFNET
532 };
533 
534 void qwx_init_wmi_config_qca6390(struct qwx_softc *sc,
535     struct target_resource_config *config)
536 {
537 	config->num_vdevs = 4;
538 	config->num_peers = 16;
539 	config->num_tids = 32;
540 
541 	config->num_offload_peers = 3;
542 	config->num_offload_reorder_buffs = 3;
543 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
544 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
545 	config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
546 	config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
547 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
548 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
549 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
550 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
551 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
552 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
553 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
554 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
555 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
556 	config->num_mcast_groups = 0;
557 	config->num_mcast_table_elems = 0;
558 	config->mcast2ucast_mode = 0;
559 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
560 	config->num_wds_entries = 0;
561 	config->dma_burst_size = 0;
562 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
563 	config->vow_config = TARGET_VOW_CONFIG;
564 	config->gtk_offload_max_vdev = 2;
565 	config->num_msdu_desc = 0x400;
566 	config->beacon_tx_offload_max_vdev = 2;
567 	config->rx_batchmode = TARGET_RX_BATCHMODE;
568 
569 	config->peer_map_unmap_v2_support = 0;
570 	config->use_pdev_id = 1;
571 	config->max_frag_entries = 0xa;
572 	config->num_tdls_vdevs = 0x1;
573 	config->num_tdls_conn_table_entries = 8;
574 	config->beacon_tx_offload_max_vdev = 0x2;
575 	config->num_multicast_filter_entries = 0x20;
576 	config->num_wow_filters = 0x16;
577 	config->num_keep_alive_pattern = 0;
578 	config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
579 }
580 
581 void qwx_init_wmi_config_ipq8074(struct qwx_softc *sc,
582     struct target_resource_config *config)
583 {
584 	config->num_vdevs = sc->num_radios * TARGET_NUM_VDEVS(sc);
585 
586 	if (sc->num_radios == 2) {
587 		config->num_peers = TARGET_NUM_PEERS(sc, DBS);
588 		config->num_tids = TARGET_NUM_TIDS(sc, DBS);
589 	} else if (sc->num_radios == 3) {
590 		config->num_peers = TARGET_NUM_PEERS(sc, DBS_SBS);
591 		config->num_tids = TARGET_NUM_TIDS(sc, DBS_SBS);
592 	} else {
593 		/* Control should not reach here */
594 		config->num_peers = TARGET_NUM_PEERS(sc, SINGLE);
595 		config->num_tids = TARGET_NUM_TIDS(sc, SINGLE);
596 	}
597 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
598 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
599 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
600 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
601 	config->tx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
602 	config->rx_chain_mask = (1 << sc->target_caps.num_rf_chains) - 1;
603 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
604 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
605 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
606 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
607 
608 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
609 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
610 	else
611 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
612 
613 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
614 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
615 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
616 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
617 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
618 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
619 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
620 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
621 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
622 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
623 	config->rx_skip_defrag_timeout_dup_detection_check =
624 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
625 	config->vow_config = TARGET_VOW_CONFIG;
626 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
627 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
628 	config->beacon_tx_offload_max_vdev = sc->num_radios * TARGET_MAX_BCN_OFFLD;
629 	config->rx_batchmode = TARGET_RX_BATCHMODE;
630 	config->peer_map_unmap_v2_support = 1;
631 	config->twt_ap_pdev_count = sc->num_radios;
632 	config->twt_ap_sta_count = 1000;
633 	config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
634 	config->flag1 |= WMI_RSRC_CFG_FLAG1_ACK_RSSI;
635 	config->ema_max_vap_cnt = sc->num_radios;
636 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
637 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
638 }
639 
640 int
641 qwx_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw, int mac_id)
642 {
643 	return mac_id;
644 }
645 
646 int
647 qwx_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw, int mac_id)
648 {
649 	return 0;
650 }
651 
652 int qwx_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw, int mac_id)
653 {
654 	return 0;
655 }
656 
657 int
658 qwx_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw, int mac_id)
659 {
660 	return mac_id;
661 }
662 
663 const struct ath11k_hw_ops ipq8074_ops = {
664 #if notyet
665 	.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
666 #endif
667 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
668 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
669 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
670 #if notyet
671 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
672 	.rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
673 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
674 	.rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
675 	.rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
676 	.rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
677 	.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
678 	.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
679 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
680 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
681 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
682 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
683 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
684 	.rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
685 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
686 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
687 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
688 	.rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
689 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
690 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
691 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
692 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
693 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
694 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
695 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
696 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
697 	.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
698 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
699 	.reo_setup = ath11k_hw_ipq8074_reo_setup,
700 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
701 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
702 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
703 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
704 #endif
705 };
706 
707 const struct ath11k_hw_ops ipq6018_ops = {
708 #if notyet
709 	.get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
710 #endif
711 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
712 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
713 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
714 #if notyet
715 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
716 	.rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
717 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
718 	.rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
719 	.rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
720 	.rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
721 	.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
722 	.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
723 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
724 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
725 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
726 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
727 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
728 	.rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
729 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
730 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
731 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
732 	.rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
733 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
734 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
735 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
736 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
737 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
738 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
739 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
740 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
741 	.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
742 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
743 	.reo_setup = ath11k_hw_ipq8074_reo_setup,
744 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
745 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
746 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
747 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
748 #endif
749 };
750 
751 const struct ath11k_hw_ops qca6390_ops = {
752 #if notyet
753 	.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
754 #endif
755 	.wmi_init_config = qwx_init_wmi_config_qca6390,
756 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
757 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
758 #if notyet
759 	.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
760 	.rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
761 	.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
762 	.rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
763 	.rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
764 	.rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
765 	.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
766 	.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
767 	.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
768 	.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
769 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
770 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
771 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
772 	.rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
773 	.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
774 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
775 	.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
776 	.rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
777 	.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
778 	.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
779 	.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
780 	.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
781 	.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
782 	.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
783 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
784 	.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
785 	.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
786 	.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
787 	.reo_setup = ath11k_hw_ipq8074_reo_setup,
788 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
789 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
790 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
791 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
792 #endif
793 };
794 
795 const struct ath11k_hw_ops qcn9074_ops = {
796 #if notyet
797 	.get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
798 #endif
799 	.wmi_init_config = qwx_init_wmi_config_ipq8074,
800 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074,
801 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074,
802 #ifdef notyet
803 	.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
804 	.rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
805 	.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
806 	.rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
807 	.rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
808 	.rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
809 	.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
810 	.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
811 	.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
812 	.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
813 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
814 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
815 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
816 	.rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
817 	.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
818 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
819 	.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
820 	.rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
821 	.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
822 	.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
823 	.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
824 	.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
825 	.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
826 	.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
827 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
828 	.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
829 	.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
830 	.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
831 	.reo_setup = ath11k_hw_ipq8074_reo_setup,
832 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
833 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
834 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
835 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
836 #endif
837 };
838 
839 const struct ath11k_hw_ops wcn6855_ops = {
840 #if notyet
841 	.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
842 #endif
843 	.wmi_init_config = qwx_init_wmi_config_qca6390,
844 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
845 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
846 #ifdef notyet
847 	.tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
848 	.rx_desc_get_first_msdu = ath11k_hw_wcn6855_rx_desc_get_first_msdu,
849 	.rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
850 	.rx_desc_get_l3_pad_bytes = ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes,
851 	.rx_desc_get_hdr_status = ath11k_hw_wcn6855_rx_desc_get_hdr_status,
852 	.rx_desc_encrypt_valid = ath11k_hw_wcn6855_rx_desc_encrypt_valid,
853 	.rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
854 	.rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
855 	.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
856 	.rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
857 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
858 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
859 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
860 	.rx_desc_get_msdu_len = ath11k_hw_wcn6855_rx_desc_get_msdu_len,
861 	.rx_desc_get_msdu_sgi = ath11k_hw_wcn6855_rx_desc_get_msdu_sgi,
862 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
863 	.rx_desc_get_msdu_rx_bw = ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
864 	.rx_desc_get_msdu_freq = ath11k_hw_wcn6855_rx_desc_get_msdu_freq,
865 	.rx_desc_get_msdu_pkt_type = ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
866 	.rx_desc_get_msdu_nss = ath11k_hw_wcn6855_rx_desc_get_msdu_nss,
867 	.rx_desc_get_mpdu_tid = ath11k_hw_wcn6855_rx_desc_get_mpdu_tid,
868 	.rx_desc_get_mpdu_peer_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
869 	.rx_desc_copy_attn_end_tlv = ath11k_hw_wcn6855_rx_desc_copy_attn_end,
870 	.rx_desc_get_mpdu_start_tag = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
871 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
872 	.rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
873 	.rx_desc_get_attention = ath11k_hw_wcn6855_rx_desc_get_attention,
874 	.rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
875 	.reo_setup = ath11k_hw_wcn6855_reo_setup,
876 	.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
877 	.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
878 	.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
879 	.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
880 #endif
881 };
882 
883 const struct ath11k_hw_ops wcn6750_ops = {
884 #if notyet
885 	.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
886 #endif
887 	.wmi_init_config = qwx_init_wmi_config_qca6390,
888 	.mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390,
889 	.mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390,
890 #if notyet
891 	.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
892 	.rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
893 	.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
894 	.rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
895 	.rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
896 	.rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
897 	.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
898 	.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
899 	.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
900 	.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
901 	.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
902 	.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
903 	.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
904 	.rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
905 	.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
906 	.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
907 	.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
908 	.rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
909 	.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
910 	.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
911 	.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
912 	.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
913 	.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
914 	.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
915 	.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
916 	.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
917 	.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
918 	.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
919 	.reo_setup = ath11k_hw_wcn6855_reo_setup,
920 	.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
921 	.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
922 	.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
923 	.get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
924 #endif
925 };
926 
927 #define ATH11K_TX_RING_MASK_0 BIT(0)
928 #define ATH11K_TX_RING_MASK_1 BIT(1)
929 #define ATH11K_TX_RING_MASK_2 BIT(2)
930 #define ATH11K_TX_RING_MASK_3 BIT(3)
931 #define ATH11K_TX_RING_MASK_4 BIT(4)
932 
933 #define ATH11K_RX_RING_MASK_0 0x1
934 #define ATH11K_RX_RING_MASK_1 0x2
935 #define ATH11K_RX_RING_MASK_2 0x4
936 #define ATH11K_RX_RING_MASK_3 0x8
937 
938 #define ATH11K_RX_ERR_RING_MASK_0 0x1
939 
940 #define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
941 
942 #define ATH11K_REO_STATUS_RING_MASK_0 0x1
943 
944 #define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
945 #define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
946 #define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
947 
948 #define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
949 #define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
950 #define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
951 
952 #define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
953 #define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
954 #define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
955 
956 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
957 	.tx  = {
958 		ATH11K_TX_RING_MASK_0,
959 		ATH11K_TX_RING_MASK_1,
960 		ATH11K_TX_RING_MASK_2,
961 	},
962 	.rx_mon_status = {
963 		0, 0, 0, 0,
964 		ATH11K_RX_MON_STATUS_RING_MASK_0,
965 		ATH11K_RX_MON_STATUS_RING_MASK_1,
966 		ATH11K_RX_MON_STATUS_RING_MASK_2,
967 	},
968 	.rx = {
969 		0, 0, 0, 0, 0, 0, 0,
970 		ATH11K_RX_RING_MASK_0,
971 		ATH11K_RX_RING_MASK_1,
972 		ATH11K_RX_RING_MASK_2,
973 		ATH11K_RX_RING_MASK_3,
974 	},
975 	.rx_err = {
976 		ATH11K_RX_ERR_RING_MASK_0,
977 	},
978 	.rx_wbm_rel = {
979 		ATH11K_RX_WBM_REL_RING_MASK_0,
980 	},
981 	.reo_status = {
982 		ATH11K_REO_STATUS_RING_MASK_0,
983 	},
984 	.rxdma2host = {
985 		ATH11K_RXDMA2HOST_RING_MASK_0,
986 		ATH11K_RXDMA2HOST_RING_MASK_1,
987 		ATH11K_RXDMA2HOST_RING_MASK_2,
988 	},
989 	.host2rxdma = {
990 		ATH11K_HOST2RXDMA_RING_MASK_0,
991 		ATH11K_HOST2RXDMA_RING_MASK_1,
992 		ATH11K_HOST2RXDMA_RING_MASK_2,
993 	},
994 };
995 
996 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
997 	.tx  = {
998 		ATH11K_TX_RING_MASK_0,
999 	},
1000 	.rx_mon_status = {
1001 		0, 0, 0, 0,
1002 		ATH11K_RX_MON_STATUS_RING_MASK_0,
1003 		ATH11K_RX_MON_STATUS_RING_MASK_1,
1004 		ATH11K_RX_MON_STATUS_RING_MASK_2,
1005 	},
1006 	.rx = {
1007 		0, 0, 0, 0, 0, 0, 0,
1008 		ATH11K_RX_RING_MASK_0,
1009 		ATH11K_RX_RING_MASK_1,
1010 		ATH11K_RX_RING_MASK_2,
1011 		ATH11K_RX_RING_MASK_3,
1012 	},
1013 	.rx_err = {
1014 		ATH11K_RX_ERR_RING_MASK_0,
1015 	},
1016 	.rx_wbm_rel = {
1017 		ATH11K_RX_WBM_REL_RING_MASK_0,
1018 	},
1019 	.reo_status = {
1020 		ATH11K_REO_STATUS_RING_MASK_0,
1021 	},
1022 	.rxdma2host = {
1023 		ATH11K_RXDMA2HOST_RING_MASK_0,
1024 		ATH11K_RXDMA2HOST_RING_MASK_1,
1025 		ATH11K_RXDMA2HOST_RING_MASK_2,
1026 	},
1027 	.host2rxdma = {
1028 	},
1029 };
1030 
1031 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
1032 	.tx  = {
1033 		ATH11K_TX_RING_MASK_0,
1034 		ATH11K_TX_RING_MASK_1,
1035 		ATH11K_TX_RING_MASK_2,
1036 	},
1037 	.rx_mon_status = {
1038 		0, 0, 0,
1039 		ATH11K_RX_MON_STATUS_RING_MASK_0,
1040 		ATH11K_RX_MON_STATUS_RING_MASK_1,
1041 		ATH11K_RX_MON_STATUS_RING_MASK_2,
1042 	},
1043 	.rx = {
1044 		0, 0, 0, 0,
1045 		ATH11K_RX_RING_MASK_0,
1046 		ATH11K_RX_RING_MASK_1,
1047 		ATH11K_RX_RING_MASK_2,
1048 		ATH11K_RX_RING_MASK_3,
1049 	},
1050 	.rx_err = {
1051 		0, 0, 0,
1052 		ATH11K_RX_ERR_RING_MASK_0,
1053 	},
1054 	.rx_wbm_rel = {
1055 		0, 0, 0,
1056 		ATH11K_RX_WBM_REL_RING_MASK_0,
1057 	},
1058 	.reo_status = {
1059 		0, 0, 0,
1060 		ATH11K_REO_STATUS_RING_MASK_0,
1061 	},
1062 	.rxdma2host = {
1063 		0, 0, 0,
1064 		ATH11K_RXDMA2HOST_RING_MASK_0,
1065 	},
1066 	.host2rxdma = {
1067 		0, 0, 0,
1068 		ATH11K_HOST2RXDMA_RING_MASK_0,
1069 	},
1070 };
1071 
1072 const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
1073 	.tx  = {
1074 		ATH11K_TX_RING_MASK_0,
1075 		0,
1076 		ATH11K_TX_RING_MASK_2,
1077 		0,
1078 		ATH11K_TX_RING_MASK_4,
1079 	},
1080 	.rx_mon_status = {
1081 		0, 0, 0, 0, 0, 0,
1082 		ATH11K_RX_MON_STATUS_RING_MASK_0,
1083 	},
1084 	.rx = {
1085 		0, 0, 0, 0, 0, 0, 0,
1086 		ATH11K_RX_RING_MASK_0,
1087 		ATH11K_RX_RING_MASK_1,
1088 		ATH11K_RX_RING_MASK_2,
1089 		ATH11K_RX_RING_MASK_3,
1090 	},
1091 	.rx_err = {
1092 		0, ATH11K_RX_ERR_RING_MASK_0,
1093 	},
1094 	.rx_wbm_rel = {
1095 		0, ATH11K_RX_WBM_REL_RING_MASK_0,
1096 	},
1097 	.reo_status = {
1098 		0, ATH11K_REO_STATUS_RING_MASK_0,
1099 	},
1100 	.rxdma2host = {
1101 		ATH11K_RXDMA2HOST_RING_MASK_0,
1102 		ATH11K_RXDMA2HOST_RING_MASK_1,
1103 		ATH11K_RXDMA2HOST_RING_MASK_2,
1104 	},
1105 	.host2rxdma = {
1106 	},
1107 };
1108 
1109 /* Target firmware's Copy Engine configuration. */
1110 const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[] = {
1111 	/* CE0: host->target HTC control and raw streams */
1112 	{
1113 		.pipenum = htole32(0),
1114 		.pipedir = htole32(PIPEDIR_OUT),
1115 		.nentries = htole32(32),
1116 		.nbytes_max = htole32(2048),
1117 		.flags = htole32(CE_ATTR_FLAGS),
1118 		.reserved = htole32(0),
1119 	},
1120 
1121 	/* CE1: target->host HTT + HTC control */
1122 	{
1123 		.pipenum = htole32(1),
1124 		.pipedir = htole32(PIPEDIR_IN),
1125 		.nentries = htole32(32),
1126 		.nbytes_max = htole32(2048),
1127 		.flags = htole32(CE_ATTR_FLAGS),
1128 		.reserved = htole32(0),
1129 	},
1130 
1131 	/* CE2: target->host WMI */
1132 	{
1133 		.pipenum = htole32(2),
1134 		.pipedir = htole32(PIPEDIR_IN),
1135 		.nentries = htole32(32),
1136 		.nbytes_max = htole32(2048),
1137 		.flags = htole32(CE_ATTR_FLAGS),
1138 		.reserved = htole32(0),
1139 	},
1140 
1141 	/* CE3: host->target WMI */
1142 	{
1143 		.pipenum = htole32(3),
1144 		.pipedir = htole32(PIPEDIR_OUT),
1145 		.nentries = htole32(32),
1146 		.nbytes_max = htole32(2048),
1147 		.flags = htole32(CE_ATTR_FLAGS),
1148 		.reserved = htole32(0),
1149 	},
1150 
1151 	/* CE4: host->target HTT */
1152 	{
1153 		.pipenum = htole32(4),
1154 		.pipedir = htole32(PIPEDIR_OUT),
1155 		.nentries = htole32(256),
1156 		.nbytes_max = htole32(256),
1157 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
1158 		.reserved = htole32(0),
1159 	},
1160 
1161 	/* CE5: target->host Pktlog */
1162 	{
1163 		.pipenum = htole32(5),
1164 		.pipedir = htole32(PIPEDIR_IN),
1165 		.nentries = htole32(32),
1166 		.nbytes_max = htole32(2048),
1167 		.flags = htole32(0),
1168 		.reserved = htole32(0),
1169 	},
1170 
1171 	/* CE6: Reserved for target autonomous hif_memcpy */
1172 	{
1173 		.pipenum = htole32(6),
1174 		.pipedir = htole32(PIPEDIR_INOUT),
1175 		.nentries = htole32(32),
1176 		.nbytes_max = htole32(65535),
1177 		.flags = htole32(CE_ATTR_FLAGS),
1178 		.reserved = htole32(0),
1179 	},
1180 
1181 	/* CE7 used only by Host */
1182 	{
1183 		.pipenum = htole32(7),
1184 		.pipedir = htole32(PIPEDIR_OUT),
1185 		.nentries = htole32(32),
1186 		.nbytes_max = htole32(2048),
1187 		.flags = htole32(CE_ATTR_FLAGS),
1188 		.reserved = htole32(0),
1189 	},
1190 
1191 	/* CE8 target->host used only by IPA */
1192 	{
1193 		.pipenum = htole32(8),
1194 		.pipedir = htole32(PIPEDIR_INOUT),
1195 		.nentries = htole32(32),
1196 		.nbytes_max = htole32(65535),
1197 		.flags = htole32(CE_ATTR_FLAGS),
1198 		.reserved = htole32(0),
1199 	},
1200 
1201 	/* CE9 host->target HTT */
1202 	{
1203 		.pipenum = htole32(9),
1204 		.pipedir = htole32(PIPEDIR_OUT),
1205 		.nentries = htole32(32),
1206 		.nbytes_max = htole32(2048),
1207 		.flags = htole32(CE_ATTR_FLAGS),
1208 		.reserved = htole32(0),
1209 	},
1210 
1211 	/* CE10 target->host HTT */
1212 	{
1213 		.pipenum = htole32(10),
1214 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
1215 		.nentries = htole32(0),
1216 		.nbytes_max = htole32(0),
1217 		.flags = htole32(CE_ATTR_FLAGS),
1218 		.reserved = htole32(0),
1219 	},
1220 
1221 	/* CE11 Not used */
1222 };
1223 
1224 /* Map from service/endpoint to Copy Engine.
1225  * This table is derived from the CE_PCI TABLE, above.
1226  * It is passed to the Target at startup for use by firmware.
1227  */
1228 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[] = {
1229 	{
1230 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
1231 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1232 		.pipenum = htole32(3),
1233 	},
1234 	{
1235 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
1236 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1237 		.pipenum = htole32(2),
1238 	},
1239 	{
1240 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
1241 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1242 		.pipenum = htole32(3),
1243 	},
1244 	{
1245 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
1246 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1247 		.pipenum = htole32(2),
1248 	},
1249 	{
1250 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
1251 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1252 		.pipenum = htole32(3),
1253 	},
1254 	{
1255 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
1256 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1257 		.pipenum = htole32(2),
1258 	},
1259 	{
1260 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
1261 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1262 		.pipenum = htole32(3),
1263 	},
1264 	{
1265 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
1266 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1267 		.pipenum = htole32(2),
1268 	},
1269 	{
1270 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
1271 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1272 		.pipenum = htole32(3),
1273 	},
1274 	{
1275 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
1276 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1277 		.pipenum = htole32(2),
1278 	},
1279 	{
1280 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
1281 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1282 		.pipenum = htole32(7),
1283 	},
1284 	{
1285 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
1286 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1287 		.pipenum = htole32(2),
1288 	},
1289 	{
1290 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
1291 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1292 		.pipenum = htole32(9),
1293 	},
1294 	{
1295 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
1296 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1297 		.pipenum = htole32(2),
1298 	},
1299 	{
1300 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
1301 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1302 		.pipenum = htole32(0),
1303 	},
1304 	{
1305 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
1306 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1307 		.pipenum = htole32(1),
1308 	},
1309 	{ /* not used */
1310 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
1311 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1312 		.pipenum = htole32(0),
1313 	},
1314 	{ /* not used */
1315 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
1316 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1317 		.pipenum = htole32(1),
1318 	},
1319 	{
1320 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
1321 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1322 		.pipenum = htole32(4),
1323 	},
1324 	{
1325 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
1326 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1327 		.pipenum = htole32(1),
1328 	},
1329 	{
1330 		.service_id = htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
1331 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1332 		.pipenum = htole32(5),
1333 	},
1334 
1335 	/* (Additions here) */
1336 
1337 	{ /* terminator entry */ }
1338 };
1339 
1340 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[] = {
1341 	{
1342 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
1343 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1344 		.pipenum = htole32(3),
1345 	},
1346 	{
1347 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
1348 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1349 		.pipenum = htole32(2),
1350 	},
1351 	{
1352 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
1353 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1354 		.pipenum = htole32(3),
1355 	},
1356 	{
1357 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
1358 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1359 		.pipenum = htole32(2),
1360 	},
1361 	{
1362 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
1363 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1364 		.pipenum = htole32(3),
1365 	},
1366 	{
1367 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
1368 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1369 		.pipenum = htole32(2),
1370 	},
1371 	{
1372 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
1373 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1374 		.pipenum = htole32(3),
1375 	},
1376 	{
1377 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
1378 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1379 		.pipenum = htole32(2),
1380 	},
1381 	{
1382 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
1383 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1384 		.pipenum = htole32(3),
1385 	},
1386 	{
1387 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
1388 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1389 		.pipenum = htole32(2),
1390 	},
1391 	{
1392 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
1393 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1394 		.pipenum = htole32(7),
1395 	},
1396 	{
1397 		.service_id = htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
1398 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1399 		.pipenum = htole32(2),
1400 	},
1401 	{
1402 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
1403 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1404 		.pipenum = htole32(0),
1405 	},
1406 	{
1407 		.service_id = htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
1408 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1409 		.pipenum = htole32(1),
1410 	},
1411 	{ /* not used */
1412 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
1413 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1414 		.pipenum = htole32(0),
1415 	},
1416 	{ /* not used */
1417 		.service_id = htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
1418 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1419 		.pipenum = htole32(1),
1420 	},
1421 	{
1422 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
1423 		.pipedir = htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1424 		.pipenum = htole32(4),
1425 	},
1426 	{
1427 		.service_id = htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
1428 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1429 		.pipenum = htole32(1),
1430 	},
1431 	{
1432 		.service_id = htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
1433 		.pipedir = htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1434 		.pipenum = htole32(5),
1435 	},
1436 
1437 	/* (Additions here) */
1438 
1439 	{ /* terminator entry */ }
1440 };
1441 
1442 /* Target firmware's Copy Engine configuration. */
1443 const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[] = {
1444 	/* CE0: host->target HTC control and raw streams */
1445 	{
1446 		.pipenum = htole32(0),
1447 		.pipedir = htole32(PIPEDIR_OUT),
1448 		.nentries = htole32(32),
1449 		.nbytes_max = htole32(2048),
1450 		.flags = htole32(CE_ATTR_FLAGS),
1451 		.reserved = htole32(0),
1452 	},
1453 
1454 	/* CE1: target->host HTT + HTC control */
1455 	{
1456 		.pipenum = htole32(1),
1457 		.pipedir = htole32(PIPEDIR_IN),
1458 		.nentries = htole32(32),
1459 		.nbytes_max = htole32(2048),
1460 		.flags = htole32(CE_ATTR_FLAGS),
1461 		.reserved = htole32(0),
1462 	},
1463 
1464 	/* CE2: target->host WMI */
1465 	{
1466 		.pipenum = htole32(2),
1467 		.pipedir = htole32(PIPEDIR_IN),
1468 		.nentries = htole32(32),
1469 		.nbytes_max = htole32(2048),
1470 		.flags = htole32(CE_ATTR_FLAGS),
1471 		.reserved = htole32(0),
1472 	},
1473 
1474 	/* CE3: host->target WMI */
1475 	{
1476 		.pipenum = htole32(3),
1477 		.pipedir = htole32(PIPEDIR_OUT),
1478 		.nentries = htole32(32),
1479 		.nbytes_max = htole32(2048),
1480 		.flags = htole32(CE_ATTR_FLAGS),
1481 		.reserved = htole32(0),
1482 	},
1483 
1484 	/* CE4: host->target HTT */
1485 	{
1486 		.pipenum = htole32(4),
1487 		.pipedir = htole32(PIPEDIR_OUT),
1488 		.nentries = htole32(256),
1489 		.nbytes_max = htole32(256),
1490 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
1491 		.reserved = htole32(0),
1492 	},
1493 
1494 	/* CE5: target->host Pktlog */
1495 	{
1496 		.pipenum = htole32(5),
1497 		.pipedir = htole32(PIPEDIR_IN),
1498 		.nentries = htole32(32),
1499 		.nbytes_max = htole32(2048),
1500 		.flags = htole32(CE_ATTR_FLAGS),
1501 		.reserved = htole32(0),
1502 	},
1503 
1504 	/* CE6: Reserved for target autonomous hif_memcpy */
1505 	{
1506 		.pipenum = htole32(6),
1507 		.pipedir = htole32(PIPEDIR_INOUT),
1508 		.nentries = htole32(32),
1509 		.nbytes_max = htole32(16384),
1510 		.flags = htole32(CE_ATTR_FLAGS),
1511 		.reserved = htole32(0),
1512 	},
1513 
1514 	/* CE7 used only by Host */
1515 	{
1516 		.pipenum = htole32(7),
1517 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
1518 		.nentries = htole32(0),
1519 		.nbytes_max = htole32(0),
1520 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
1521 		.reserved = htole32(0),
1522 	},
1523 
1524 	/* CE8 target->host used only by IPA */
1525 	{
1526 		.pipenum = htole32(8),
1527 		.pipedir = htole32(PIPEDIR_INOUT),
1528 		.nentries = htole32(32),
1529 		.nbytes_max = htole32(16384),
1530 		.flags = htole32(CE_ATTR_FLAGS),
1531 		.reserved = htole32(0),
1532 	},
1533 	/* CE 9, 10, 11 are used by MHI driver */
1534 };
1535 
1536 /* Map from service/endpoint to Copy Engine.
1537  * This table is derived from the CE_PCI TABLE, above.
1538  * It is passed to the Target at startup for use by firmware.
1539  */
1540 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
1541 	{
1542 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
1543 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1544 		htole32(3),
1545 	},
1546 	{
1547 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
1548 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1549 		htole32(2),
1550 	},
1551 	{
1552 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
1553 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1554 		htole32(3),
1555 	},
1556 	{
1557 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
1558 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1559 		htole32(2),
1560 	},
1561 	{
1562 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
1563 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1564 		htole32(3),
1565 	},
1566 	{
1567 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
1568 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1569 		htole32(2),
1570 	},
1571 	{
1572 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
1573 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1574 		htole32(3),
1575 	},
1576 	{
1577 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
1578 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1579 		htole32(2),
1580 	},
1581 	{
1582 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
1583 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1584 		htole32(3),
1585 	},
1586 	{
1587 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
1588 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1589 		htole32(2),
1590 	},
1591 	{
1592 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
1593 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1594 		htole32(0),
1595 	},
1596 	{
1597 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
1598 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1599 		htole32(2),
1600 	},
1601 	{
1602 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
1603 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1604 		htole32(4),
1605 	},
1606 	{
1607 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
1608 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1609 		htole32(1),
1610 	},
1611 
1612 	/* (Additions here) */
1613 
1614 	{ /* must be last */
1615 		htole32(0),
1616 		htole32(0),
1617 		htole32(0),
1618 	},
1619 };
1620 
1621 /* Target firmware's Copy Engine configuration. */
1622 const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = {
1623 	/* CE0: host->target HTC control and raw streams */
1624 	{
1625 		.pipenum = htole32(0),
1626 		.pipedir = htole32(PIPEDIR_OUT),
1627 		.nentries = htole32(32),
1628 		.nbytes_max = htole32(2048),
1629 		.flags = htole32(CE_ATTR_FLAGS),
1630 		.reserved = htole32(0),
1631 	},
1632 
1633 	/* CE1: target->host HTT + HTC control */
1634 	{
1635 		.pipenum = htole32(1),
1636 		.pipedir = htole32(PIPEDIR_IN),
1637 		.nentries = htole32(32),
1638 		.nbytes_max = htole32(2048),
1639 		.flags = htole32(CE_ATTR_FLAGS),
1640 		.reserved = htole32(0),
1641 	},
1642 
1643 	/* CE2: target->host WMI */
1644 	{
1645 		.pipenum = htole32(2),
1646 		.pipedir = htole32(PIPEDIR_IN),
1647 		.nentries = htole32(32),
1648 		.nbytes_max = htole32(2048),
1649 		.flags = htole32(CE_ATTR_FLAGS),
1650 		.reserved = htole32(0),
1651 	},
1652 
1653 	/* CE3: host->target WMI */
1654 	{
1655 		.pipenum = htole32(3),
1656 		.pipedir = htole32(PIPEDIR_OUT),
1657 		.nentries = htole32(32),
1658 		.nbytes_max = htole32(2048),
1659 		.flags = htole32(CE_ATTR_FLAGS),
1660 		.reserved = htole32(0),
1661 	},
1662 
1663 	/* CE4: host->target HTT */
1664 	{
1665 		.pipenum = htole32(4),
1666 		.pipedir = htole32(PIPEDIR_OUT),
1667 		.nentries = htole32(256),
1668 		.nbytes_max = htole32(256),
1669 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
1670 		.reserved = htole32(0),
1671 	},
1672 
1673 	/* CE5: target->host Pktlog */
1674 	{
1675 		.pipenum = htole32(5),
1676 		.pipedir = htole32(PIPEDIR_IN),
1677 		.nentries = htole32(32),
1678 		.nbytes_max = htole32(2048),
1679 		.flags = htole32(CE_ATTR_FLAGS),
1680 		.reserved = htole32(0),
1681 	},
1682 
1683 	/* CE6: Reserved for target autonomous hif_memcpy */
1684 	{
1685 		.pipenum = htole32(6),
1686 		.pipedir = htole32(PIPEDIR_INOUT),
1687 		.nentries = htole32(32),
1688 		.nbytes_max = htole32(16384),
1689 		.flags = htole32(CE_ATTR_FLAGS),
1690 		.reserved = htole32(0),
1691 	},
1692 
1693 	/* CE7 used only by Host */
1694 	{
1695 		.pipenum = htole32(7),
1696 		.pipedir = htole32(PIPEDIR_INOUT_H2H),
1697 		.nentries = htole32(0),
1698 		.nbytes_max = htole32(0),
1699 		.flags = htole32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
1700 		.reserved = htole32(0),
1701 	},
1702 
1703 	/* CE8 target->host used only by IPA */
1704 	{
1705 		.pipenum = htole32(8),
1706 		.pipedir = htole32(PIPEDIR_INOUT),
1707 		.nentries = htole32(32),
1708 		.nbytes_max = htole32(16384),
1709 		.flags = htole32(CE_ATTR_FLAGS),
1710 		.reserved = htole32(0),
1711 	},
1712 	/* CE 9, 10, 11 are used by MHI driver */
1713 };
1714 
1715 /* Map from service/endpoint to Copy Engine.
1716  * This table is derived from the CE_PCI TABLE, above.
1717  * It is passed to the Target at startup for use by firmware.
1718  */
1719 const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = {
1720 	{
1721 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
1722 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1723 		htole32(3),
1724 	},
1725 	{
1726 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
1727 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1728 		htole32(2),
1729 	},
1730 	{
1731 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
1732 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1733 		htole32(3),
1734 	},
1735 	{
1736 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
1737 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1738 		htole32(2),
1739 	},
1740 	{
1741 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
1742 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1743 		htole32(3),
1744 	},
1745 	{
1746 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
1747 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1748 		htole32(2),
1749 	},
1750 	{
1751 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
1752 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1753 		htole32(3),
1754 	},
1755 	{
1756 		htole32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
1757 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1758 		htole32(2),
1759 	},
1760 	{
1761 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
1762 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1763 		htole32(3),
1764 	},
1765 	{
1766 		htole32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
1767 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1768 		htole32(2),
1769 	},
1770 	{
1771 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
1772 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1773 		htole32(0),
1774 	},
1775 	{
1776 		htole32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
1777 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1778 		htole32(1),
1779 	},
1780 	{
1781 		htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
1782 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1783 		htole32(0),
1784 	},
1785 	{
1786 		htole32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
1787 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1788 		htole32(1),
1789 	},
1790 	{
1791 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
1792 		htole32(PIPEDIR_OUT),	/* out = UL = host -> target */
1793 		htole32(4),
1794 	},
1795 	{
1796 		htole32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
1797 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1798 		htole32(1),
1799 	},
1800 	{
1801 		htole32(ATH11K_HTC_SVC_ID_PKT_LOG),
1802 		htole32(PIPEDIR_IN),	/* in = DL = target -> host */
1803 		htole32(5),
1804 	},
1805 
1806 	/* (Additions here) */
1807 
1808 	{ /* must be last */
1809 		htole32(0),
1810 		htole32(0),
1811 		htole32(0),
1812 	},
1813 };
1814 
1815 #define QWX_CE_COUNT_IPQ8074	21
1816 
1817 const struct ce_attr qwx_host_ce_config_ipq8074[QWX_CE_COUNT_IPQ8074] = {
1818 	/* CE0: host->target HTC control and raw streams */
1819 	{
1820 		.flags = CE_ATTR_FLAGS,
1821 		.src_nentries = 16,
1822 		.src_sz_max = 2048,
1823 		.dest_nentries = 0,
1824 		.send_cb = qwx_htc_tx_completion_handler,
1825 	},
1826 
1827 	/* CE1: target->host HTT + HTC control */
1828 	{
1829 		.flags = CE_ATTR_FLAGS,
1830 		.src_nentries = 0,
1831 		.src_sz_max = 2048,
1832 		.dest_nentries = 512,
1833 		.recv_cb = qwx_htc_rx_completion_handler,
1834 	},
1835 
1836 	/* CE2: target->host WMI */
1837 	{
1838 		.flags = CE_ATTR_FLAGS,
1839 		.src_nentries = 0,
1840 		.src_sz_max = 2048,
1841 		.dest_nentries = 512,
1842 		.recv_cb = qwx_htc_rx_completion_handler,
1843 	},
1844 
1845 	/* CE3: host->target WMI (mac0) */
1846 	{
1847 		.flags = CE_ATTR_FLAGS,
1848 		.src_nentries = 32,
1849 		.src_sz_max = 2048,
1850 		.dest_nentries = 0,
1851 		.send_cb = qwx_htc_tx_completion_handler,
1852 	},
1853 
1854 	/* CE4: host->target HTT */
1855 	{
1856 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
1857 		.src_nentries = 2048,
1858 		.src_sz_max = 256,
1859 		.dest_nentries = 0,
1860 	},
1861 
1862 	/* CE5: target->host pktlog */
1863 	{
1864 		.flags = CE_ATTR_FLAGS,
1865 		.src_nentries = 0,
1866 		.src_sz_max = 2048,
1867 		.dest_nentries = 512,
1868 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
1869 	},
1870 
1871 	/* CE6: target autonomous hif_memcpy */
1872 	{
1873 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
1874 		.src_nentries = 0,
1875 		.src_sz_max = 0,
1876 		.dest_nentries = 0,
1877 	},
1878 
1879 	/* CE7: host->target WMI (mac1) */
1880 	{
1881 		.flags = CE_ATTR_FLAGS,
1882 		.src_nentries = 32,
1883 		.src_sz_max = 2048,
1884 		.dest_nentries = 0,
1885 		.send_cb = qwx_htc_tx_completion_handler,
1886 	},
1887 
1888 	/* CE8: target autonomous hif_memcpy */
1889 	{
1890 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
1891 		.src_nentries = 0,
1892 		.src_sz_max = 0,
1893 		.dest_nentries = 0,
1894 	},
1895 
1896 	/* CE9: host->target WMI (mac2) */
1897 	{
1898 		.flags = CE_ATTR_FLAGS,
1899 		.src_nentries = 32,
1900 		.src_sz_max = 2048,
1901 		.dest_nentries = 0,
1902 		.send_cb = qwx_htc_tx_completion_handler,
1903 	},
1904 
1905 	/* CE10: target->host HTT */
1906 	{
1907 		.flags = CE_ATTR_FLAGS,
1908 		.src_nentries = 0,
1909 		.src_sz_max = 2048,
1910 		.dest_nentries = 512,
1911 		.recv_cb = qwx_htc_rx_completion_handler,
1912 	},
1913 
1914 	/* CE11: Not used */
1915 	{
1916 		.flags = CE_ATTR_FLAGS,
1917 		.src_nentries = 0,
1918 		.src_sz_max = 0,
1919 		.dest_nentries = 0,
1920 	},
1921 };
1922 
1923 #define QWX_CE_COUNT_QCA6390	9
1924 
1925 const struct ce_attr qwx_host_ce_config_qca6390[QWX_CE_COUNT_QCA6390] = {
1926 	/* CE0: host->target HTC control and raw streams */
1927 	{
1928 		.flags = CE_ATTR_FLAGS,
1929 		.src_nentries = 16,
1930 		.src_sz_max = 2048,
1931 		.dest_nentries = 0,
1932 	},
1933 
1934 	/* CE1: target->host HTT + HTC control */
1935 	{
1936 		.flags = CE_ATTR_FLAGS,
1937 		.src_nentries = 0,
1938 		.src_sz_max = 2048,
1939 		.dest_nentries = 512,
1940 		.recv_cb = qwx_htc_rx_completion_handler,
1941 	},
1942 
1943 	/* CE2: target->host WMI */
1944 	{
1945 		.flags = CE_ATTR_FLAGS,
1946 		.src_nentries = 0,
1947 		.src_sz_max = 2048,
1948 		.dest_nentries = 512,
1949 		.recv_cb = qwx_htc_rx_completion_handler,
1950 	},
1951 
1952 	/* CE3: host->target WMI (mac0) */
1953 	{
1954 		.flags = CE_ATTR_FLAGS,
1955 		.src_nentries = 32,
1956 		.src_sz_max = 2048,
1957 		.dest_nentries = 0,
1958 		.send_cb = qwx_htc_tx_completion_handler,
1959 	},
1960 
1961 	/* CE4: host->target HTT */
1962 	{
1963 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
1964 		.src_nentries = 2048,
1965 		.src_sz_max = 256,
1966 		.dest_nentries = 0,
1967 	},
1968 
1969 	/* CE5: target->host pktlog */
1970 	{
1971 		.flags = CE_ATTR_FLAGS,
1972 		.src_nentries = 0,
1973 		.src_sz_max = 2048,
1974 		.dest_nentries = 512,
1975 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
1976 	},
1977 
1978 	/* CE6: target autonomous hif_memcpy */
1979 	{
1980 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
1981 		.src_nentries = 0,
1982 		.src_sz_max = 0,
1983 		.dest_nentries = 0,
1984 	},
1985 
1986 	/* CE7: host->target WMI (mac1) */
1987 	{
1988 		.flags = CE_ATTR_FLAGS,
1989 		.src_nentries = 32,
1990 		.src_sz_max = 2048,
1991 		.dest_nentries = 0,
1992 		.send_cb = qwx_htc_tx_completion_handler,
1993 	},
1994 
1995 	/* CE8: target autonomous hif_memcpy */
1996 	{
1997 		.flags = CE_ATTR_FLAGS,
1998 		.src_nentries = 0,
1999 		.src_sz_max = 0,
2000 		.dest_nentries = 0,
2001 	},
2002 
2003 };
2004 
2005 #define QWX_CE_COUNT_QCN9074	6
2006 
2007 const struct ce_attr qwx_host_ce_config_qcn9074[QWX_CE_COUNT_QCN9074] = {
2008 	/* CE0: host->target HTC control and raw streams */
2009 	{
2010 		.flags = CE_ATTR_FLAGS,
2011 		.src_nentries = 16,
2012 		.src_sz_max = 2048,
2013 		.dest_nentries = 0,
2014 	},
2015 
2016 	/* CE1: target->host HTT + HTC control */
2017 	{
2018 		.flags = CE_ATTR_FLAGS,
2019 		.src_nentries = 0,
2020 		.src_sz_max = 2048,
2021 		.dest_nentries = 512,
2022 		.recv_cb = qwx_htc_rx_completion_handler,
2023 	},
2024 
2025 	/* CE2: target->host WMI */
2026 	{
2027 		.flags = CE_ATTR_FLAGS,
2028 		.src_nentries = 0,
2029 		.src_sz_max = 2048,
2030 		.dest_nentries = 32,
2031 		.recv_cb = qwx_htc_rx_completion_handler,
2032 	},
2033 
2034 	/* CE3: host->target WMI (mac0) */
2035 	{
2036 		.flags = CE_ATTR_FLAGS,
2037 		.src_nentries = 32,
2038 		.src_sz_max = 2048,
2039 		.dest_nentries = 0,
2040 		.send_cb = qwx_htc_tx_completion_handler,
2041 	},
2042 
2043 	/* CE4: host->target HTT */
2044 	{
2045 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
2046 		.src_nentries = 2048,
2047 		.src_sz_max = 256,
2048 		.dest_nentries = 0,
2049 	},
2050 
2051 	/* CE5: target->host pktlog */
2052 	{
2053 		.flags = CE_ATTR_FLAGS,
2054 		.src_nentries = 0,
2055 		.src_sz_max = 2048,
2056 		.dest_nentries = 512,
2057 		.recv_cb = qwx_dp_htt_htc_t2h_msg_handler,
2058 	},
2059 };
2060 
2061 static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
2062 	{
2063 		.tcl_ring_num = 0,
2064 		.wbm_ring_num = 0,
2065 		.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
2066 	},
2067 	{
2068 		.tcl_ring_num = 1,
2069 		.wbm_ring_num = 1,
2070 		.rbm_id = HAL_RX_BUF_RBM_SW1_BM,
2071 	},
2072 	{
2073 		.tcl_ring_num = 2,
2074 		.wbm_ring_num = 2,
2075 		.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
2076 	},
2077 };
2078 
2079 static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
2080 	{
2081 		.tcl_ring_num = 0,
2082 		.wbm_ring_num = 0,
2083 		.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
2084 	},
2085 	{
2086 		.tcl_ring_num = 1,
2087 		.wbm_ring_num = 4,
2088 		.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
2089 	},
2090 	{
2091 		.tcl_ring_num = 2,
2092 		.wbm_ring_num = 2,
2093 		.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
2094 	},
2095 };
2096 
2097 
2098 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
2099 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
2100 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
2101 };
2102 
2103 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
2104 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
2105 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
2106 };
2107 
2108 static const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
2109 	.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
2110 	.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
2111 };
2112 
2113 static const struct ath11k_hw_params ath11k_hw_params[] = {
2114 	{
2115 		.hw_rev = ATH11K_HW_IPQ8074,
2116 		.name = "ipq8074 hw2.0",
2117 		.fw = {
2118 			.dir = "ipq8074-hw2.0",
2119 			.board_size = 256 * 1024,
2120 			.cal_offset = 128 * 1024,
2121 		},
2122 		.max_radios = 3,
2123 		.bdf_addr = 0x4B0C0000,
2124 		.hw_ops = &ipq8074_ops,
2125 		.ring_mask = &ath11k_hw_ring_mask_ipq8074,
2126 		.internal_sleep_clock = false,
2127 		.regs = &ipq8074_regs,
2128 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
2129 		.host_ce_config = qwx_host_ce_config_ipq8074,
2130 		.ce_count = QWX_CE_COUNT_IPQ8074,
2131 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
2132 		.target_ce_count = 11,
2133 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
2134 		.svc_to_ce_map_len = 21,
2135 		.single_pdev_only = false,
2136 		.rxdma1_enable = true,
2137 		.num_rxmda_per_pdev = 1,
2138 		.rx_mac_buf_ring = false,
2139 		.vdev_start_delay = false,
2140 		.htt_peer_map_v2 = true,
2141 #if notyet
2142 		.spectral = {
2143 			.fft_sz = 2,
2144 			/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
2145 			 * so added pad size as 2 bytes to compensate the BIN size
2146 			 */
2147 			.fft_pad_sz = 2,
2148 			.summary_pad_sz = 0,
2149 			.fft_hdr_len = 16,
2150 			.max_fft_bins = 512,
2151 			.fragment_160mhz = true,
2152 		},
2153 
2154 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
2155 					BIT(NL80211_IFTYPE_AP) |
2156 					BIT(NL80211_IFTYPE_MESH_POINT),
2157 		.supports_monitor = true,
2158 		.full_monitor_mode = false,
2159 #endif
2160 		.supports_shadow_regs = false,
2161 		.idle_ps = false,
2162 		.supports_sta_ps = false,
2163 		.cold_boot_calib = true,
2164 		.cbcal_restart_fw = true,
2165 		.fw_mem_mode = 0,
2166 		.num_vdevs = 16 + 1,
2167 		.num_peers = 512,
2168 		.supports_suspend = false,
2169 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
2170 		.supports_regdb = false,
2171 		.fix_l1ss = true,
2172 		.credit_flow = false,
2173 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
2174 		.hal_params = &ath11k_hw_hal_params_ipq8074,
2175 #if notyet
2176 		.supports_dynamic_smps_6ghz = false,
2177 		.alloc_cacheable_memory = true,
2178 		.supports_rssi_stats = false,
2179 #endif
2180 		.fw_wmi_diag_event = false,
2181 		.current_cc_support = false,
2182 		.dbr_debug_support = true,
2183 		.global_reset = false,
2184 #ifdef notyet
2185 		.bios_sar_capa = NULL,
2186 #endif
2187 		.m3_fw_support = false,
2188 		.fixed_bdf_addr = true,
2189 		.fixed_mem_region = true,
2190 		.static_window_map = false,
2191 #if notyet
2192 		.hybrid_bus_type = false,
2193 		.fixed_fw_mem = false,
2194 		.support_off_channel_tx = false,
2195 		.supports_multi_bssid = false,
2196 
2197 		.sram_dump = {},
2198 
2199 		.tcl_ring_retry = true,
2200 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
2201 		.smp2p_wow_exit = false,
2202 #endif
2203 	},
2204 	{
2205 		.hw_rev = ATH11K_HW_IPQ6018_HW10,
2206 		.name = "ipq6018 hw1.0",
2207 		.fw = {
2208 			.dir = "ipq6018-hw1.0",
2209 			.board_size = 256 * 1024,
2210 			.cal_offset = 128 * 1024,
2211 		},
2212 		.max_radios = 2,
2213 		.bdf_addr = 0x4ABC0000,
2214 		.hw_ops = &ipq6018_ops,
2215 		.ring_mask = &ath11k_hw_ring_mask_ipq8074,
2216 		.internal_sleep_clock = false,
2217 		.regs = &ipq8074_regs,
2218 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
2219 		.host_ce_config = qwx_host_ce_config_ipq8074,
2220 		.ce_count = QWX_CE_COUNT_IPQ8074,
2221 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
2222 		.target_ce_count = 11,
2223 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
2224 		.svc_to_ce_map_len = 19,
2225 		.single_pdev_only = false,
2226 		.rxdma1_enable = true,
2227 		.num_rxmda_per_pdev = 1,
2228 		.rx_mac_buf_ring = false,
2229 		.vdev_start_delay = false,
2230 		.htt_peer_map_v2 = true,
2231 #if notyet
2232 		.spectral = {
2233 			.fft_sz = 4,
2234 			.fft_pad_sz = 0,
2235 			.summary_pad_sz = 0,
2236 			.fft_hdr_len = 16,
2237 			.max_fft_bins = 512,
2238 			.fragment_160mhz = true,
2239 		},
2240 
2241 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
2242 					BIT(NL80211_IFTYPE_AP) |
2243 					BIT(NL80211_IFTYPE_MESH_POINT),
2244 		.supports_monitor = true,
2245 		.full_monitor_mode = false,
2246 #endif
2247 		.supports_shadow_regs = false,
2248 		.idle_ps = false,
2249 		.supports_sta_ps = false,
2250 		.cold_boot_calib = true,
2251 		.cbcal_restart_fw = true,
2252 		.fw_mem_mode = 0,
2253 		.num_vdevs = 16 + 1,
2254 		.num_peers = 512,
2255 		.supports_suspend = false,
2256 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
2257 		.supports_regdb = false,
2258 		.fix_l1ss = true,
2259 		.credit_flow = false,
2260 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
2261 		.hal_params = &ath11k_hw_hal_params_ipq8074,
2262 #if notyet
2263 		.supports_dynamic_smps_6ghz = false,
2264 		.alloc_cacheable_memory = true,
2265 		.supports_rssi_stats = false,
2266 #endif
2267 		.fw_wmi_diag_event = false,
2268 		.current_cc_support = false,
2269 		.dbr_debug_support = true,
2270 		.global_reset = false,
2271 #ifdef notyet
2272 		.bios_sar_capa = NULL,
2273 #endif
2274 		.m3_fw_support = false,
2275 		.fixed_bdf_addr = true,
2276 		.fixed_mem_region = true,
2277 		.static_window_map = false,
2278 		.hybrid_bus_type = false,
2279 		.fixed_fw_mem = false,
2280 #if notyet
2281 		.support_off_channel_tx = false,
2282 		.supports_multi_bssid = false,
2283 
2284 		.sram_dump = {},
2285 
2286 		.tcl_ring_retry = true,
2287 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
2288 		.smp2p_wow_exit = false,
2289 #endif
2290 	},
2291 	{
2292 		.name = "qca6390 hw2.0",
2293 		.hw_rev = ATH11K_HW_QCA6390_HW20,
2294 		.fw = {
2295 			.dir = "qca6390-hw2.0",
2296 			.board_size = 256 * 1024,
2297 			.cal_offset = 128 * 1024,
2298 		},
2299 		.max_radios = 3,
2300 		.bdf_addr = 0x4B0C0000,
2301 		.hw_ops = &qca6390_ops,
2302 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
2303 		.internal_sleep_clock = true,
2304 		.regs = &qca6390_regs,
2305 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
2306 		.host_ce_config = qwx_host_ce_config_qca6390,
2307 		.ce_count = QWX_CE_COUNT_QCA6390,
2308 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
2309 		.target_ce_count = 9,
2310 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
2311 		.svc_to_ce_map_len = 14,
2312 		.single_pdev_only = true,
2313 		.rxdma1_enable = false,
2314 		.num_rxmda_per_pdev = 2,
2315 		.rx_mac_buf_ring = true,
2316 		.vdev_start_delay = true,
2317 		.htt_peer_map_v2 = false,
2318 #if notyet
2319 		.spectral = {
2320 			.fft_sz = 0,
2321 			.fft_pad_sz = 0,
2322 			.summary_pad_sz = 0,
2323 			.fft_hdr_len = 0,
2324 			.max_fft_bins = 0,
2325 			.fragment_160mhz = false,
2326 		},
2327 
2328 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
2329 					BIT(NL80211_IFTYPE_AP),
2330 		.supports_monitor = false,
2331 		.full_monitor_mode = false,
2332 #endif
2333 		.supports_shadow_regs = true,
2334 		.idle_ps = true,
2335 		.supports_sta_ps = true,
2336 		.cold_boot_calib = false,
2337 		.cbcal_restart_fw = false,
2338 		.fw_mem_mode = 0,
2339 		.num_vdevs = 16 + 1,
2340 		.num_peers = 512,
2341 		.supports_suspend = true,
2342 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
2343 		.supports_regdb = false,
2344 		.fix_l1ss = true,
2345 		.credit_flow = true,
2346 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
2347 		.hal_params = &ath11k_hw_hal_params_qca6390,
2348 #if notyet
2349 		.supports_dynamic_smps_6ghz = false,
2350 		.alloc_cacheable_memory = false,
2351 		.supports_rssi_stats = true,
2352 #endif
2353 		.fw_wmi_diag_event = true,
2354 		.current_cc_support = true,
2355 		.dbr_debug_support = false,
2356 		.global_reset = true,
2357 #ifdef notyet
2358 		.bios_sar_capa = NULL,
2359 #endif
2360 		.m3_fw_support = true,
2361 		.fixed_bdf_addr = false,
2362 		.fixed_mem_region = false,
2363 		.static_window_map = false,
2364 		.hybrid_bus_type = false,
2365 		.fixed_fw_mem = false,
2366 #if notyet
2367 		.support_off_channel_tx = true,
2368 		.supports_multi_bssid = true,
2369 
2370 		.sram_dump = {
2371 			.start = 0x01400000,
2372 			.end = 0x0171ffff,
2373 		},
2374 
2375 		.tcl_ring_retry = true,
2376 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
2377 		.smp2p_wow_exit = false,
2378 #endif
2379 	},
2380 	{
2381 		.name = "qcn9074 hw1.0",
2382 		.hw_rev = ATH11K_HW_QCN9074_HW10,
2383 		.fw = {
2384 			.dir = "qcn9074-hw1.0",
2385 			.board_size = 256 * 1024,
2386 			.cal_offset = 128 * 1024,
2387 		},
2388 		.max_radios = 1,
2389 #if notyet
2390 		.single_pdev_only = false,
2391 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074,
2392 #endif
2393 		.hw_ops = &qcn9074_ops,
2394 		.ring_mask = &ath11k_hw_ring_mask_qcn9074,
2395 		.internal_sleep_clock = false,
2396 		.regs = &qcn9074_regs,
2397 		.host_ce_config = qwx_host_ce_config_qcn9074,
2398 		.ce_count = QWX_CE_COUNT_QCN9074,
2399 		.target_ce_config = ath11k_target_ce_config_wlan_qcn9074,
2400 		.target_ce_count = 9,
2401 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
2402 		.svc_to_ce_map_len = 18,
2403 		.rxdma1_enable = true,
2404 		.num_rxmda_per_pdev = 1,
2405 		.rx_mac_buf_ring = false,
2406 		.vdev_start_delay = false,
2407 		.htt_peer_map_v2 = true,
2408 #if notyet
2409 		.spectral = {
2410 			.fft_sz = 2,
2411 			.fft_pad_sz = 0,
2412 			.summary_pad_sz = 16,
2413 			.fft_hdr_len = 24,
2414 			.max_fft_bins = 1024,
2415 			.fragment_160mhz = false,
2416 		},
2417 
2418 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
2419 					BIT(NL80211_IFTYPE_AP) |
2420 					BIT(NL80211_IFTYPE_MESH_POINT),
2421 		.supports_monitor = true,
2422 		.full_monitor_mode = true,
2423 #endif
2424 		.supports_shadow_regs = false,
2425 		.idle_ps = false,
2426 		.supports_sta_ps = false,
2427 		.cold_boot_calib = false,
2428 		.cbcal_restart_fw = false,
2429 		.fw_mem_mode = 2,
2430 		.num_vdevs = 8,
2431 		.num_peers = 128,
2432 		.supports_suspend = false,
2433 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
2434 		.supports_regdb = false,
2435 		.fix_l1ss = true,
2436 		.credit_flow = false,
2437 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
2438 		.hal_params = &ath11k_hw_hal_params_ipq8074,
2439 #if notyet
2440 		.supports_dynamic_smps_6ghz = true,
2441 		.alloc_cacheable_memory = true,
2442 		.supports_rssi_stats = false,
2443 #endif
2444 		.fw_wmi_diag_event = false,
2445 		.current_cc_support = false,
2446 		.dbr_debug_support = true,
2447 		.global_reset = false,
2448 #ifdef notyet
2449 		.bios_sar_capa = NULL,
2450 #endif
2451 		.m3_fw_support = true,
2452 		.fixed_bdf_addr = false,
2453 		.fixed_mem_region = false,
2454 		.static_window_map = true,
2455 		.hybrid_bus_type = false,
2456 		.fixed_fw_mem = false,
2457 #if notyet
2458 		.support_off_channel_tx = false,
2459 		.supports_multi_bssid = false,
2460 
2461 		.sram_dump = {},
2462 
2463 		.tcl_ring_retry = true,
2464 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
2465 		.smp2p_wow_exit = false,
2466 #endif
2467 	},
2468 	{
2469 		.name = "wcn6855 hw2.0",
2470 		.hw_rev = ATH11K_HW_WCN6855_HW20,
2471 		.fw = {
2472 			.dir = "wcn6855-hw2.0",
2473 			.board_size = 256 * 1024,
2474 			.cal_offset = 128 * 1024,
2475 		},
2476 		.max_radios = 3,
2477 		.bdf_addr = 0x4B0C0000,
2478 		.hw_ops = &wcn6855_ops,
2479 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
2480 		.internal_sleep_clock = true,
2481 		.regs = &wcn6855_regs,
2482 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
2483 		.host_ce_config = qwx_host_ce_config_qca6390,
2484 		.ce_count = QWX_CE_COUNT_QCA6390,
2485 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
2486 		.target_ce_count = 9,
2487 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
2488 		.svc_to_ce_map_len = 14,
2489 		.single_pdev_only = true,
2490 		.rxdma1_enable = false,
2491 		.num_rxmda_per_pdev = 2,
2492 		.rx_mac_buf_ring = true,
2493 		.vdev_start_delay = true,
2494 		.htt_peer_map_v2 = false,
2495 #if notyet
2496 		.spectral = {
2497 			.fft_sz = 0,
2498 			.fft_pad_sz = 0,
2499 			.summary_pad_sz = 0,
2500 			.fft_hdr_len = 0,
2501 			.max_fft_bins = 0,
2502 			.fragment_160mhz = false,
2503 		},
2504 
2505 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
2506 					BIT(NL80211_IFTYPE_AP),
2507 		.supports_monitor = false,
2508 		.full_monitor_mode = false,
2509 #endif
2510 		.supports_shadow_regs = true,
2511 		.idle_ps = true,
2512 		.supports_sta_ps = true,
2513 		.cold_boot_calib = false,
2514 		.cbcal_restart_fw = false,
2515 		.fw_mem_mode = 0,
2516 		.num_vdevs = 16 + 1,
2517 		.num_peers = 512,
2518 		.supports_suspend = true,
2519 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
2520 		.supports_regdb = true,
2521 		.fix_l1ss = false,
2522 		.credit_flow = true,
2523 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
2524 		.hal_params = &ath11k_hw_hal_params_qca6390,
2525 #if notyet
2526 		.supports_dynamic_smps_6ghz = false,
2527 		.alloc_cacheable_memory = false,
2528 		.supports_rssi_stats = true,
2529 #endif
2530 		.fw_wmi_diag_event = true,
2531 		.current_cc_support = true,
2532 		.dbr_debug_support = false,
2533 		.global_reset = true,
2534 #ifdef notyet
2535 		.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
2536 #endif
2537 		.m3_fw_support = true,
2538 		.fixed_bdf_addr = false,
2539 		.fixed_mem_region = false,
2540 		.static_window_map = false,
2541 		.hybrid_bus_type = false,
2542 		.fixed_fw_mem = false,
2543 #if notyet
2544 		.support_off_channel_tx = true,
2545 		.supports_multi_bssid = true,
2546 
2547 		.sram_dump = {
2548 			.start = 0x01400000,
2549 			.end = 0x0177ffff,
2550 		},
2551 
2552 		.tcl_ring_retry = true,
2553 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
2554 		.smp2p_wow_exit = false,
2555 #endif
2556 	},
2557 	{
2558 		.name = "wcn6855 hw2.1",
2559 		.hw_rev = ATH11K_HW_WCN6855_HW21,
2560 		.fw = {
2561 			.dir = "wcn6855-hw2.1",
2562 			.board_size = 256 * 1024,
2563 			.cal_offset = 128 * 1024,
2564 		},
2565 		.max_radios = 3,
2566 		.bdf_addr = 0x4B0C0000,
2567 		.hw_ops = &wcn6855_ops,
2568 		.ring_mask = &ath11k_hw_ring_mask_qca6390,
2569 		.internal_sleep_clock = true,
2570 		.regs = &wcn6855_regs,
2571 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
2572 		.host_ce_config = qwx_host_ce_config_qca6390,
2573 		.ce_count = QWX_CE_COUNT_QCA6390,
2574 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
2575 		.target_ce_count = 9,
2576 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
2577 		.svc_to_ce_map_len = 14,
2578 		.single_pdev_only = true,
2579 		.rxdma1_enable = false,
2580 		.num_rxmda_per_pdev = 2,
2581 		.rx_mac_buf_ring = true,
2582 		.vdev_start_delay = true,
2583 		.htt_peer_map_v2 = false,
2584 #if notyet
2585 		.spectral = {
2586 			.fft_sz = 0,
2587 			.fft_pad_sz = 0,
2588 			.summary_pad_sz = 0,
2589 			.fft_hdr_len = 0,
2590 			.max_fft_bins = 0,
2591 			.fragment_160mhz = false,
2592 		},
2593 
2594 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
2595 					BIT(NL80211_IFTYPE_AP),
2596 		.supports_monitor = false,
2597 #endif
2598 		.supports_shadow_regs = true,
2599 		.idle_ps = true,
2600 		.supports_sta_ps = true,
2601 		.cold_boot_calib = false,
2602 		.cbcal_restart_fw = false,
2603 		.fw_mem_mode = 0,
2604 		.num_vdevs = 16 + 1,
2605 		.num_peers = 512,
2606 		.supports_suspend = true,
2607 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
2608 		.supports_regdb = true,
2609 		.fix_l1ss = false,
2610 		.credit_flow = true,
2611 		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
2612 		.hal_params = &ath11k_hw_hal_params_qca6390,
2613 #if notyet
2614 		.supports_dynamic_smps_6ghz = false,
2615 		.alloc_cacheable_memory = false,
2616 		.supports_rssi_stats = true,
2617 #endif
2618 		.fw_wmi_diag_event = true,
2619 		.current_cc_support = true,
2620 		.dbr_debug_support = false,
2621 		.global_reset = true,
2622 #ifdef notyet
2623 		.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
2624 #endif
2625 		.m3_fw_support = true,
2626 		.fixed_bdf_addr = false,
2627 		.fixed_mem_region = false,
2628 		.static_window_map = false,
2629 		.hybrid_bus_type = false,
2630 		.fixed_fw_mem = false,
2631 #if notyet
2632 		.support_off_channel_tx = true,
2633 		.supports_multi_bssid = true,
2634 
2635 		.sram_dump = {
2636 			.start = 0x01400000,
2637 			.end = 0x0177ffff,
2638 		},
2639 
2640 		.tcl_ring_retry = true,
2641 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
2642 		.smp2p_wow_exit = false,
2643 #endif
2644 	},
2645 	{
2646 		.name = "wcn6750 hw1.0",
2647 		.hw_rev = ATH11K_HW_WCN6750_HW10,
2648 		.fw = {
2649 			.dir = "wcn6750-hw1.0",
2650 			.board_size = 256 * 1024,
2651 			.cal_offset = 128 * 1024,
2652 		},
2653 		.max_radios = 1,
2654 		.bdf_addr = 0x4B0C0000,
2655 		.hw_ops = &wcn6750_ops,
2656 		.ring_mask = &ath11k_hw_ring_mask_wcn6750,
2657 		.internal_sleep_clock = false,
2658 		.regs = &wcn6750_regs,
2659 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
2660 		.host_ce_config = qwx_host_ce_config_qca6390,
2661 		.ce_count = QWX_CE_COUNT_QCA6390,
2662 		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
2663 		.target_ce_count = 9,
2664 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
2665 		.svc_to_ce_map_len = 14,
2666 		.single_pdev_only = true,
2667 		.rxdma1_enable = false,
2668 		.num_rxmda_per_pdev = 1,
2669 		.rx_mac_buf_ring = true,
2670 		.vdev_start_delay = true,
2671 		.htt_peer_map_v2 = false,
2672 #if notyet
2673 		.spectral = {
2674 			.fft_sz = 0,
2675 			.fft_pad_sz = 0,
2676 			.summary_pad_sz = 0,
2677 			.fft_hdr_len = 0,
2678 			.max_fft_bins = 0,
2679 			.fragment_160mhz = false,
2680 		},
2681 
2682 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
2683 					BIT(NL80211_IFTYPE_AP),
2684 		.supports_monitor = false,
2685 #endif
2686 		.supports_shadow_regs = true,
2687 		.idle_ps = true,
2688 		.supports_sta_ps = true,
2689 		.cold_boot_calib = true,
2690 		.cbcal_restart_fw = false,
2691 		.fw_mem_mode = 0,
2692 		.num_vdevs = 16 + 1,
2693 		.num_peers = 512,
2694 		.supports_suspend = false,
2695 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
2696 		.supports_regdb = true,
2697 		.fix_l1ss = false,
2698 		.credit_flow = true,
2699 		.max_tx_ring = DP_TCL_NUM_RING_MAX,
2700 		.hal_params = &ath11k_hw_hal_params_wcn6750,
2701 #if notyet
2702 		.supports_dynamic_smps_6ghz = false,
2703 		.alloc_cacheable_memory = false,
2704 		.supports_rssi_stats = true,
2705 #endif
2706 		.fw_wmi_diag_event = false,
2707 		.current_cc_support = true,
2708 		.dbr_debug_support = false,
2709 		.global_reset = false,
2710 #ifdef notyet
2711 		.bios_sar_capa = NULL,
2712 #endif
2713 		.m3_fw_support = false,
2714 		.fixed_bdf_addr = false,
2715 		.fixed_mem_region = false,
2716 		.static_window_map = true,
2717 		.hybrid_bus_type = true,
2718 		.fixed_fw_mem = true,
2719 #if notyet
2720 		.support_off_channel_tx = true,
2721 		.supports_multi_bssid = true,
2722 
2723 		.sram_dump = {},
2724 
2725 		.tcl_ring_retry = false,
2726 		.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
2727 		.smp2p_wow_exit = true,
2728 #endif
2729 	},
2730 };
2731 
2732 const struct ath11k_hw_regs ipq8074_regs = {
2733 	/* SW2TCL(x) R0 ring configuration address */
2734 	.hal_tcl1_ring_base_lsb = 0x00000510,
2735 	.hal_tcl1_ring_base_msb = 0x00000514,
2736 	.hal_tcl1_ring_id = 0x00000518,
2737 	.hal_tcl1_ring_misc = 0x00000520,
2738 	.hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
2739 	.hal_tcl1_ring_tp_addr_msb = 0x00000530,
2740 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
2741 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
2742 	.hal_tcl1_ring_msi1_base_lsb = 0x00000558,
2743 	.hal_tcl1_ring_msi1_base_msb = 0x0000055c,
2744 	.hal_tcl1_ring_msi1_data = 0x00000560,
2745 	.hal_tcl2_ring_base_lsb = 0x00000568,
2746 	.hal_tcl_ring_base_lsb = 0x00000618,
2747 
2748 	/* TCL STATUS ring address */
2749 	.hal_tcl_status_ring_base_lsb = 0x00000720,
2750 
2751 	/* REO2SW(x) R0 ring configuration address */
2752 	.hal_reo1_ring_base_lsb = 0x0000029c,
2753 	.hal_reo1_ring_base_msb = 0x000002a0,
2754 	.hal_reo1_ring_id = 0x000002a4,
2755 	.hal_reo1_ring_misc = 0x000002ac,
2756 	.hal_reo1_ring_hp_addr_lsb = 0x000002b0,
2757 	.hal_reo1_ring_hp_addr_msb = 0x000002b4,
2758 	.hal_reo1_ring_producer_int_setup = 0x000002c0,
2759 	.hal_reo1_ring_msi1_base_lsb = 0x000002e4,
2760 	.hal_reo1_ring_msi1_base_msb = 0x000002e8,
2761 	.hal_reo1_ring_msi1_data = 0x000002ec,
2762 	.hal_reo2_ring_base_lsb = 0x000002f4,
2763 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
2764 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
2765 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
2766 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
2767 
2768 	/* REO2SW(x) R2 ring pointers (head/tail) address */
2769 	.hal_reo1_ring_hp = 0x00003038,
2770 	.hal_reo1_ring_tp = 0x0000303c,
2771 	.hal_reo2_ring_hp = 0x00003040,
2772 
2773 	/* REO2TCL R0 ring configuration address */
2774 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
2775 	.hal_reo_tcl_ring_hp = 0x00003058,
2776 
2777 	/* REO CMD ring address */
2778 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
2779 	.hal_reo_cmd_ring_hp = 0x00003020,
2780 
2781 	/* REO status address */
2782 	.hal_reo_status_ring_base_lsb = 0x00000504,
2783 	.hal_reo_status_hp = 0x00003070,
2784 
2785 	/* SW2REO ring address */
2786 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
2787 	.hal_sw2reo_ring_hp = 0x00003028,
2788 
2789 	/* WCSS relative address */
2790 	.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
2791 	.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
2792 	.hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
2793 	.hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
2794 
2795 	/* WBM Idle address */
2796 	.hal_wbm_idle_link_ring_base_lsb = 0x00000860,
2797 	.hal_wbm_idle_link_ring_misc = 0x00000870,
2798 
2799 	/* SW2WBM release address */
2800 	.hal_wbm_release_ring_base_lsb = 0x000001d8,
2801 
2802 	/* WBM2SW release address */
2803 	.hal_wbm0_release_ring_base_lsb = 0x00000910,
2804 	.hal_wbm1_release_ring_base_lsb = 0x00000968,
2805 
2806 	/* PCIe base address */
2807 	.pcie_qserdes_sysclk_en_sel = 0x0,
2808 	.pcie_pcs_osc_dtct_config_base = 0x0,
2809 
2810 	/* Shadow register area */
2811 	.hal_shadow_base_addr = 0x0,
2812 
2813 	/* REO misc control register, not used in IPQ8074 */
2814 	.hal_reo1_misc_ctl = 0x0,
2815 };
2816 
2817 const struct ath11k_hw_regs qca6390_regs = {
2818 	/* SW2TCL(x) R0 ring configuration address */
2819 	.hal_tcl1_ring_base_lsb = 0x00000684,
2820 	.hal_tcl1_ring_base_msb = 0x00000688,
2821 	.hal_tcl1_ring_id = 0x0000068c,
2822 	.hal_tcl1_ring_misc = 0x00000694,
2823 	.hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
2824 	.hal_tcl1_ring_tp_addr_msb = 0x000006a4,
2825 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
2826 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
2827 	.hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
2828 	.hal_tcl1_ring_msi1_base_msb = 0x000006d0,
2829 	.hal_tcl1_ring_msi1_data = 0x000006d4,
2830 	.hal_tcl2_ring_base_lsb = 0x000006dc,
2831 	.hal_tcl_ring_base_lsb = 0x0000078c,
2832 
2833 	/* TCL STATUS ring address */
2834 	.hal_tcl_status_ring_base_lsb = 0x00000894,
2835 
2836 	/* REO2SW(x) R0 ring configuration address */
2837 	.hal_reo1_ring_base_lsb = 0x00000244,
2838 	.hal_reo1_ring_base_msb = 0x00000248,
2839 	.hal_reo1_ring_id = 0x0000024c,
2840 	.hal_reo1_ring_misc = 0x00000254,
2841 	.hal_reo1_ring_hp_addr_lsb = 0x00000258,
2842 	.hal_reo1_ring_hp_addr_msb = 0x0000025c,
2843 	.hal_reo1_ring_producer_int_setup = 0x00000268,
2844 	.hal_reo1_ring_msi1_base_lsb = 0x0000028c,
2845 	.hal_reo1_ring_msi1_base_msb = 0x00000290,
2846 	.hal_reo1_ring_msi1_data = 0x00000294,
2847 	.hal_reo2_ring_base_lsb = 0x0000029c,
2848 	.hal_reo1_aging_thresh_ix_0 = 0x0000050c,
2849 	.hal_reo1_aging_thresh_ix_1 = 0x00000510,
2850 	.hal_reo1_aging_thresh_ix_2 = 0x00000514,
2851 	.hal_reo1_aging_thresh_ix_3 = 0x00000518,
2852 
2853 	/* REO2SW(x) R2 ring pointers (head/tail) address */
2854 	.hal_reo1_ring_hp = 0x00003030,
2855 	.hal_reo1_ring_tp = 0x00003034,
2856 	.hal_reo2_ring_hp = 0x00003038,
2857 
2858 	/* REO2TCL R0 ring configuration address */
2859 	.hal_reo_tcl_ring_base_lsb = 0x000003a4,
2860 	.hal_reo_tcl_ring_hp = 0x00003050,
2861 
2862 	/* REO CMD ring address */
2863 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
2864 	.hal_reo_cmd_ring_hp = 0x00003020,
2865 
2866 	/* REO status address */
2867 	.hal_reo_status_ring_base_lsb = 0x000004ac,
2868 	.hal_reo_status_hp = 0x00003068,
2869 
2870 	/* SW2REO ring address */
2871 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
2872 	.hal_sw2reo_ring_hp = 0x00003028,
2873 
2874 	/* WCSS relative address */
2875 	.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
2876 	.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
2877 	.hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
2878 	.hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
2879 
2880 	/* WBM Idle address */
2881 	.hal_wbm_idle_link_ring_base_lsb = 0x00000860,
2882 	.hal_wbm_idle_link_ring_misc = 0x00000870,
2883 
2884 	/* SW2WBM release address */
2885 	.hal_wbm_release_ring_base_lsb = 0x000001d8,
2886 
2887 	/* WBM2SW release address */
2888 	.hal_wbm0_release_ring_base_lsb = 0x00000910,
2889 	.hal_wbm1_release_ring_base_lsb = 0x00000968,
2890 
2891 	/* PCIe base address */
2892 	.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
2893 	.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
2894 
2895 	/* Shadow register area */
2896 	.hal_shadow_base_addr = 0x000008fc,
2897 
2898 	/* REO misc control register, not used in QCA6390 */
2899 	.hal_reo1_misc_ctl = 0x0,
2900 };
2901 
2902 const struct ath11k_hw_regs qcn9074_regs = {
2903 	/* SW2TCL(x) R0 ring configuration address */
2904 	.hal_tcl1_ring_base_lsb = 0x000004f0,
2905 	.hal_tcl1_ring_base_msb = 0x000004f4,
2906 	.hal_tcl1_ring_id = 0x000004f8,
2907 	.hal_tcl1_ring_misc = 0x00000500,
2908 	.hal_tcl1_ring_tp_addr_lsb = 0x0000050c,
2909 	.hal_tcl1_ring_tp_addr_msb = 0x00000510,
2910 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520,
2911 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524,
2912 	.hal_tcl1_ring_msi1_base_lsb = 0x00000538,
2913 	.hal_tcl1_ring_msi1_base_msb = 0x0000053c,
2914 	.hal_tcl1_ring_msi1_data = 0x00000540,
2915 	.hal_tcl2_ring_base_lsb = 0x00000548,
2916 	.hal_tcl_ring_base_lsb = 0x000005f8,
2917 
2918 	/* TCL STATUS ring address */
2919 	.hal_tcl_status_ring_base_lsb = 0x00000700,
2920 
2921 	/* REO2SW(x) R0 ring configuration address */
2922 	.hal_reo1_ring_base_lsb = 0x0000029c,
2923 	.hal_reo1_ring_base_msb = 0x000002a0,
2924 	.hal_reo1_ring_id = 0x000002a4,
2925 	.hal_reo1_ring_misc = 0x000002ac,
2926 	.hal_reo1_ring_hp_addr_lsb = 0x000002b0,
2927 	.hal_reo1_ring_hp_addr_msb = 0x000002b4,
2928 	.hal_reo1_ring_producer_int_setup = 0x000002c0,
2929 	.hal_reo1_ring_msi1_base_lsb = 0x000002e4,
2930 	.hal_reo1_ring_msi1_base_msb = 0x000002e8,
2931 	.hal_reo1_ring_msi1_data = 0x000002ec,
2932 	.hal_reo2_ring_base_lsb = 0x000002f4,
2933 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
2934 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
2935 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
2936 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
2937 
2938 	/* REO2SW(x) R2 ring pointers (head/tail) address */
2939 	.hal_reo1_ring_hp = 0x00003038,
2940 	.hal_reo1_ring_tp = 0x0000303c,
2941 	.hal_reo2_ring_hp = 0x00003040,
2942 
2943 	/* REO2TCL R0 ring configuration address */
2944 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
2945 	.hal_reo_tcl_ring_hp = 0x00003058,
2946 
2947 	/* REO CMD ring address */
2948 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
2949 	.hal_reo_cmd_ring_hp = 0x00003020,
2950 
2951 	/* REO status address */
2952 	.hal_reo_status_ring_base_lsb = 0x00000504,
2953 	.hal_reo_status_hp = 0x00003070,
2954 
2955 	/* SW2REO ring address */
2956 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
2957 	.hal_sw2reo_ring_hp = 0x00003028,
2958 
2959 	/* WCSS relative address */
2960 	.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
2961 	.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
2962 	.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
2963 	.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
2964 
2965 	/* WBM Idle address */
2966 	.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
2967 	.hal_wbm_idle_link_ring_misc = 0x00000884,
2968 
2969 	/* SW2WBM release address */
2970 	.hal_wbm_release_ring_base_lsb = 0x000001ec,
2971 
2972 	/* WBM2SW release address */
2973 	.hal_wbm0_release_ring_base_lsb = 0x00000924,
2974 	.hal_wbm1_release_ring_base_lsb = 0x0000097c,
2975 
2976 	/* PCIe base address */
2977 	.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
2978 	.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
2979 
2980 	/* Shadow register area */
2981 	.hal_shadow_base_addr = 0x0,
2982 
2983 	/* REO misc control register, not used in QCN9074 */
2984 	.hal_reo1_misc_ctl = 0x0,
2985 };
2986 
2987 const struct ath11k_hw_regs wcn6855_regs = {
2988 	/* SW2TCL(x) R0 ring configuration address */
2989 	.hal_tcl1_ring_base_lsb = 0x00000690,
2990 	.hal_tcl1_ring_base_msb = 0x00000694,
2991 	.hal_tcl1_ring_id = 0x00000698,
2992 	.hal_tcl1_ring_misc = 0x000006a0,
2993 	.hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
2994 	.hal_tcl1_ring_tp_addr_msb = 0x000006b0,
2995 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
2996 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
2997 	.hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
2998 	.hal_tcl1_ring_msi1_base_msb = 0x000006dc,
2999 	.hal_tcl1_ring_msi1_data = 0x000006e0,
3000 	.hal_tcl2_ring_base_lsb = 0x000006e8,
3001 	.hal_tcl_ring_base_lsb = 0x00000798,
3002 
3003 	/* TCL STATUS ring address */
3004 	.hal_tcl_status_ring_base_lsb = 0x000008a0,
3005 
3006 	/* REO2SW(x) R0 ring configuration address */
3007 	.hal_reo1_ring_base_lsb = 0x00000244,
3008 	.hal_reo1_ring_base_msb = 0x00000248,
3009 	.hal_reo1_ring_id = 0x0000024c,
3010 	.hal_reo1_ring_misc = 0x00000254,
3011 	.hal_reo1_ring_hp_addr_lsb = 0x00000258,
3012 	.hal_reo1_ring_hp_addr_msb = 0x0000025c,
3013 	.hal_reo1_ring_producer_int_setup = 0x00000268,
3014 	.hal_reo1_ring_msi1_base_lsb = 0x0000028c,
3015 	.hal_reo1_ring_msi1_base_msb = 0x00000290,
3016 	.hal_reo1_ring_msi1_data = 0x00000294,
3017 	.hal_reo2_ring_base_lsb = 0x0000029c,
3018 	.hal_reo1_aging_thresh_ix_0 = 0x000005bc,
3019 	.hal_reo1_aging_thresh_ix_1 = 0x000005c0,
3020 	.hal_reo1_aging_thresh_ix_2 = 0x000005c4,
3021 	.hal_reo1_aging_thresh_ix_3 = 0x000005c8,
3022 
3023 	/* REO2SW(x) R2 ring pointers (head/tail) address */
3024 	.hal_reo1_ring_hp = 0x00003030,
3025 	.hal_reo1_ring_tp = 0x00003034,
3026 	.hal_reo2_ring_hp = 0x00003038,
3027 
3028 	/* REO2TCL R0 ring configuration address */
3029 	.hal_reo_tcl_ring_base_lsb = 0x00000454,
3030 	.hal_reo_tcl_ring_hp = 0x00003060,
3031 
3032 	/* REO CMD ring address */
3033 	.hal_reo_cmd_ring_base_lsb = 0x00000194,
3034 	.hal_reo_cmd_ring_hp = 0x00003020,
3035 
3036 	/* REO status address */
3037 	.hal_reo_status_ring_base_lsb = 0x0000055c,
3038 	.hal_reo_status_hp = 0x00003078,
3039 
3040 	/* SW2REO ring address */
3041 	.hal_sw2reo_ring_base_lsb = 0x000001ec,
3042 	.hal_sw2reo_ring_hp = 0x00003028,
3043 
3044 	/* WCSS relative address */
3045 	.hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
3046 	.hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
3047 	.hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
3048 	.hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
3049 
3050 	/* WBM Idle address */
3051 	.hal_wbm_idle_link_ring_base_lsb = 0x00000870,
3052 	.hal_wbm_idle_link_ring_misc = 0x00000880,
3053 
3054 	/* SW2WBM release address */
3055 	.hal_wbm_release_ring_base_lsb = 0x000001e8,
3056 
3057 	/* WBM2SW release address */
3058 	.hal_wbm0_release_ring_base_lsb = 0x00000920,
3059 	.hal_wbm1_release_ring_base_lsb = 0x00000978,
3060 
3061 	/* PCIe base address */
3062 	.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
3063 	.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
3064 
3065 	/* Shadow register area */
3066 	.hal_shadow_base_addr = 0x000008fc,
3067 
3068 	/* REO misc control register, used for fragment
3069 	 * destination ring config in WCN6855.
3070 	 */
3071 	.hal_reo1_misc_ctl = 0x00000630,
3072 };
3073 
3074 const struct ath11k_hw_regs wcn6750_regs = {
3075 	/* SW2TCL(x) R0 ring configuration address */
3076 	.hal_tcl1_ring_base_lsb = 0x00000694,
3077 	.hal_tcl1_ring_base_msb = 0x00000698,
3078 	.hal_tcl1_ring_id = 0x0000069c,
3079 	.hal_tcl1_ring_misc = 0x000006a4,
3080 	.hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
3081 	.hal_tcl1_ring_tp_addr_msb = 0x000006b4,
3082 	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
3083 	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
3084 	.hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
3085 	.hal_tcl1_ring_msi1_base_msb = 0x000006e0,
3086 	.hal_tcl1_ring_msi1_data = 0x000006e4,
3087 	.hal_tcl2_ring_base_lsb = 0x000006ec,
3088 	.hal_tcl_ring_base_lsb = 0x0000079c,
3089 
3090 	/* TCL STATUS ring address */
3091 	.hal_tcl_status_ring_base_lsb = 0x000008a4,
3092 
3093 	/* REO2SW(x) R0 ring configuration address */
3094 	.hal_reo1_ring_base_lsb = 0x000001ec,
3095 	.hal_reo1_ring_base_msb = 0x000001f0,
3096 	.hal_reo1_ring_id = 0x000001f4,
3097 	.hal_reo1_ring_misc = 0x000001fc,
3098 	.hal_reo1_ring_hp_addr_lsb = 0x00000200,
3099 	.hal_reo1_ring_hp_addr_msb = 0x00000204,
3100 	.hal_reo1_ring_producer_int_setup = 0x00000210,
3101 	.hal_reo1_ring_msi1_base_lsb = 0x00000234,
3102 	.hal_reo1_ring_msi1_base_msb = 0x00000238,
3103 	.hal_reo1_ring_msi1_data = 0x0000023c,
3104 	.hal_reo2_ring_base_lsb = 0x00000244,
3105 	.hal_reo1_aging_thresh_ix_0 = 0x00000564,
3106 	.hal_reo1_aging_thresh_ix_1 = 0x00000568,
3107 	.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
3108 	.hal_reo1_aging_thresh_ix_3 = 0x00000570,
3109 
3110 	/* REO2SW(x) R2 ring pointers (head/tail) address */
3111 	.hal_reo1_ring_hp = 0x00003028,
3112 	.hal_reo1_ring_tp = 0x0000302c,
3113 	.hal_reo2_ring_hp = 0x00003030,
3114 
3115 	/* REO2TCL R0 ring configuration address */
3116 	.hal_reo_tcl_ring_base_lsb = 0x000003fc,
3117 	.hal_reo_tcl_ring_hp = 0x00003058,
3118 
3119 	/* REO CMD ring address */
3120 	.hal_reo_cmd_ring_base_lsb = 0x000000e4,
3121 	.hal_reo_cmd_ring_hp = 0x00003010,
3122 
3123 	/* REO status address */
3124 	.hal_reo_status_ring_base_lsb = 0x00000504,
3125 	.hal_reo_status_hp = 0x00003070,
3126 
3127 	/* SW2REO ring address */
3128 	.hal_sw2reo_ring_base_lsb = 0x0000013c,
3129 	.hal_sw2reo_ring_hp = 0x00003018,
3130 
3131 	/* WCSS relative address */
3132 	.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
3133 	.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
3134 	.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
3135 	.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
3136 
3137 	/* WBM Idle address */
3138 	.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
3139 	.hal_wbm_idle_link_ring_misc = 0x00000884,
3140 
3141 	/* SW2WBM release address */
3142 	.hal_wbm_release_ring_base_lsb = 0x000001ec,
3143 
3144 	/* WBM2SW release address */
3145 	.hal_wbm0_release_ring_base_lsb = 0x00000924,
3146 	.hal_wbm1_release_ring_base_lsb = 0x0000097c,
3147 
3148 	/* PCIe base address */
3149 	.pcie_qserdes_sysclk_en_sel = 0x0,
3150 	.pcie_pcs_osc_dtct_config_base = 0x0,
3151 
3152 	/* Shadow register area */
3153 	.hal_shadow_base_addr = 0x00000504,
3154 
3155 	/* REO misc control register, used for fragment
3156 	 * destination ring config in WCN6750.
3157 	 */
3158 	.hal_reo1_misc_ctl = 0x000005d8,
3159 };
3160 
3161 #define QWX_SLEEP_CLOCK_SELECT_INTERNAL_BIT	0x02
3162 #define QWX_HOST_CSTATE_BIT			0x04
3163 #define QWX_PLATFORM_CAP_PCIE_GLOBAL_RESET	0x08
3164 #define QWX_PLATFORM_CAP_PCIE_PME_D3COLD	0x10
3165 
3166 const struct qmi_elem_info qmi_response_type_v01_ei[] = {
3167 	{
3168 		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
3169 		.elem_len	= 1,
3170 		.elem_size	= sizeof(uint16_t),
3171 		.array_type	= NO_ARRAY,
3172 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3173 		.offset		= offsetof(struct qmi_response_type_v01, result),
3174 		.ei_array	= NULL,
3175 	},
3176 	{
3177 		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
3178 		.elem_len	= 1,
3179 		.elem_size	= sizeof(uint16_t),
3180 		.array_type	= NO_ARRAY,
3181 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3182 		.offset		= offsetof(struct qmi_response_type_v01, error),
3183 		.ei_array	= NULL,
3184 	},
3185 	{
3186 		.data_type	= QMI_EOTI,
3187 		.elem_len	= 0,
3188 		.elem_size	= 0,
3189 		.array_type	= NO_ARRAY,
3190 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3191 		.offset		= 0,
3192 		.ei_array	= NULL,
3193 	},
3194 };
3195 
3196 const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
3197 	{
3198 		.data_type	= QMI_OPT_FLAG,
3199 		.elem_len	= 1,
3200 		.elem_size	= sizeof(uint8_t),
3201 		.array_type	= NO_ARRAY,
3202 		.tlv_type	= 0x10,
3203 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3204 					   fw_ready_enable_valid),
3205 	},
3206 	{
3207 		.data_type	= QMI_UNSIGNED_1_BYTE,
3208 		.elem_len	= 1,
3209 		.elem_size	= sizeof(uint8_t),
3210 		.array_type	= NO_ARRAY,
3211 		.tlv_type	= 0x10,
3212 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3213 					   fw_ready_enable),
3214 	},
3215 	{
3216 		.data_type	= QMI_OPT_FLAG,
3217 		.elem_len	= 1,
3218 		.elem_size	= sizeof(uint8_t),
3219 		.array_type	= NO_ARRAY,
3220 		.tlv_type	= 0x11,
3221 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3222 					   initiate_cal_download_enable_valid),
3223 	},
3224 	{
3225 		.data_type	= QMI_UNSIGNED_1_BYTE,
3226 		.elem_len	= 1,
3227 		.elem_size	= sizeof(uint8_t),
3228 		.array_type	= NO_ARRAY,
3229 		.tlv_type	= 0x11,
3230 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3231 					   initiate_cal_download_enable),
3232 	},
3233 	{
3234 		.data_type	= QMI_OPT_FLAG,
3235 		.elem_len	= 1,
3236 		.elem_size	= sizeof(uint8_t),
3237 		.array_type	= NO_ARRAY,
3238 		.tlv_type	= 0x12,
3239 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3240 					   initiate_cal_update_enable_valid),
3241 	},
3242 	{
3243 		.data_type	= QMI_UNSIGNED_1_BYTE,
3244 		.elem_len	= 1,
3245 		.elem_size	= sizeof(uint8_t),
3246 		.array_type	= NO_ARRAY,
3247 		.tlv_type	= 0x12,
3248 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3249 					   initiate_cal_update_enable),
3250 	},
3251 	{
3252 		.data_type	= QMI_OPT_FLAG,
3253 		.elem_len	= 1,
3254 		.elem_size	= sizeof(uint8_t),
3255 		.array_type	= NO_ARRAY,
3256 		.tlv_type	= 0x13,
3257 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3258 					   msa_ready_enable_valid),
3259 	},
3260 	{
3261 		.data_type	= QMI_UNSIGNED_1_BYTE,
3262 		.elem_len	= 1,
3263 		.elem_size	= sizeof(uint8_t),
3264 		.array_type	= NO_ARRAY,
3265 		.tlv_type	= 0x13,
3266 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3267 					   msa_ready_enable),
3268 	},
3269 	{
3270 		.data_type	= QMI_OPT_FLAG,
3271 		.elem_len	= 1,
3272 		.elem_size	= sizeof(uint8_t),
3273 		.array_type	= NO_ARRAY,
3274 		.tlv_type	= 0x14,
3275 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3276 					   pin_connect_result_enable_valid),
3277 	},
3278 	{
3279 		.data_type	= QMI_UNSIGNED_1_BYTE,
3280 		.elem_len	= 1,
3281 		.elem_size	= sizeof(uint8_t),
3282 		.array_type	= NO_ARRAY,
3283 		.tlv_type	= 0x14,
3284 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3285 					   pin_connect_result_enable),
3286 	},
3287 	{
3288 		.data_type	= QMI_OPT_FLAG,
3289 		.elem_len	= 1,
3290 		.elem_size	= sizeof(uint8_t),
3291 		.array_type	= NO_ARRAY,
3292 		.tlv_type	= 0x15,
3293 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3294 					   client_id_valid),
3295 	},
3296 	{
3297 		.data_type	= QMI_UNSIGNED_4_BYTE,
3298 		.elem_len	= 1,
3299 		.elem_size	= sizeof(uint32_t),
3300 		.array_type	= NO_ARRAY,
3301 		.tlv_type	= 0x15,
3302 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3303 					   client_id),
3304 	},
3305 	{
3306 		.data_type	= QMI_OPT_FLAG,
3307 		.elem_len	= 1,
3308 		.elem_size	= sizeof(uint8_t),
3309 		.array_type	= NO_ARRAY,
3310 		.tlv_type	= 0x16,
3311 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3312 					   request_mem_enable_valid),
3313 	},
3314 	{
3315 		.data_type	= QMI_UNSIGNED_1_BYTE,
3316 		.elem_len	= 1,
3317 		.elem_size	= sizeof(uint8_t),
3318 		.array_type	= NO_ARRAY,
3319 		.tlv_type	= 0x16,
3320 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3321 					   request_mem_enable),
3322 	},
3323 	{
3324 		.data_type	= QMI_OPT_FLAG,
3325 		.elem_len	= 1,
3326 		.elem_size	= sizeof(uint8_t),
3327 		.array_type	= NO_ARRAY,
3328 		.tlv_type	= 0x17,
3329 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3330 					   fw_mem_ready_enable_valid),
3331 	},
3332 	{
3333 		.data_type	= QMI_UNSIGNED_1_BYTE,
3334 		.elem_len	= 1,
3335 		.elem_size	= sizeof(uint8_t),
3336 		.array_type	= NO_ARRAY,
3337 		.tlv_type	= 0x17,
3338 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3339 					   fw_mem_ready_enable),
3340 	},
3341 	{
3342 		.data_type	= QMI_OPT_FLAG,
3343 		.elem_len	= 1,
3344 		.elem_size	= sizeof(uint8_t),
3345 		.array_type	= NO_ARRAY,
3346 		.tlv_type	= 0x18,
3347 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3348 					   fw_init_done_enable_valid),
3349 	},
3350 	{
3351 		.data_type	= QMI_UNSIGNED_1_BYTE,
3352 		.elem_len	= 1,
3353 		.elem_size	= sizeof(uint8_t),
3354 		.array_type	= NO_ARRAY,
3355 		.tlv_type	= 0x18,
3356 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3357 					   fw_init_done_enable),
3358 	},
3359 
3360 	{
3361 		.data_type	= QMI_OPT_FLAG,
3362 		.elem_len	= 1,
3363 		.elem_size	= sizeof(uint8_t),
3364 		.array_type	= NO_ARRAY,
3365 		.tlv_type	= 0x19,
3366 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3367 					   rejuvenate_enable_valid),
3368 	},
3369 	{
3370 		.data_type	= QMI_UNSIGNED_1_BYTE,
3371 		.elem_len	= 1,
3372 		.elem_size	= sizeof(uint8_t),
3373 		.array_type	= NO_ARRAY,
3374 		.tlv_type	= 0x19,
3375 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3376 					   rejuvenate_enable),
3377 	},
3378 	{
3379 		.data_type	= QMI_OPT_FLAG,
3380 		.elem_len	= 1,
3381 		.elem_size	= sizeof(uint8_t),
3382 		.array_type	= NO_ARRAY,
3383 		.tlv_type	= 0x1A,
3384 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3385 					   xo_cal_enable_valid),
3386 	},
3387 	{
3388 		.data_type	= QMI_UNSIGNED_1_BYTE,
3389 		.elem_len	= 1,
3390 		.elem_size	= sizeof(uint8_t),
3391 		.array_type	= NO_ARRAY,
3392 		.tlv_type	= 0x1A,
3393 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3394 					   xo_cal_enable),
3395 	},
3396 	{
3397 		.data_type	= QMI_OPT_FLAG,
3398 		.elem_len	= 1,
3399 		.elem_size	= sizeof(uint8_t),
3400 		.array_type	= NO_ARRAY,
3401 		.tlv_type	= 0x1B,
3402 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3403 					   cal_done_enable_valid),
3404 	},
3405 	{
3406 		.data_type	= QMI_UNSIGNED_1_BYTE,
3407 		.elem_len	= 1,
3408 		.elem_size	= sizeof(uint8_t),
3409 		.array_type	= NO_ARRAY,
3410 		.tlv_type	= 0x1B,
3411 		.offset		= offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
3412 					   cal_done_enable),
3413 	},
3414 	{
3415 		.data_type	= QMI_EOTI,
3416 		.array_type	= NO_ARRAY,
3417 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3418 	},
3419 };
3420 
3421 const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
3422 	{
3423 		.data_type	= QMI_STRUCT,
3424 		.elem_len	= 1,
3425 		.elem_size	= sizeof(struct qmi_response_type_v01),
3426 		.array_type	= NO_ARRAY,
3427 		.tlv_type	= 0x02,
3428 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
3429 					   resp),
3430 		.ei_array	= qmi_response_type_v01_ei,
3431 	},
3432 	{
3433 		.data_type	= QMI_OPT_FLAG,
3434 		.elem_len	= 1,
3435 		.elem_size	= sizeof(uint8_t),
3436 		.array_type	= NO_ARRAY,
3437 		.tlv_type	= 0x10,
3438 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
3439 					   fw_status_valid),
3440 	},
3441 	{
3442 		.data_type	= QMI_UNSIGNED_8_BYTE,
3443 		.elem_len	= 1,
3444 		.elem_size	= sizeof(uint64_t),
3445 		.array_type	= NO_ARRAY,
3446 		.tlv_type	= 0x10,
3447 		.offset		= offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
3448 					   fw_status),
3449 	},
3450 	{
3451 		.data_type	= QMI_EOTI,
3452 		.array_type	= NO_ARRAY,
3453 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3454 	},
3455 };
3456 
3457 const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
3458 	{
3459 		.data_type	= QMI_OPT_FLAG,
3460 		.elem_len	= 1,
3461 		.elem_size	= sizeof(uint8_t),
3462 		.array_type	= NO_ARRAY,
3463 		.tlv_type	= 0x10,
3464 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3465 					   num_clients_valid),
3466 	},
3467 	{
3468 		.data_type	= QMI_UNSIGNED_4_BYTE,
3469 		.elem_len	= 1,
3470 		.elem_size	= sizeof(uint32_t),
3471 		.array_type	= NO_ARRAY,
3472 		.tlv_type	= 0x10,
3473 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3474 					   num_clients),
3475 	},
3476 	{
3477 		.data_type	= QMI_OPT_FLAG,
3478 		.elem_len	= 1,
3479 		.elem_size	= sizeof(uint8_t),
3480 		.array_type	= NO_ARRAY,
3481 		.tlv_type	= 0x11,
3482 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3483 					   wake_msi_valid),
3484 	},
3485 	{
3486 		.data_type	= QMI_UNSIGNED_4_BYTE,
3487 		.elem_len	= 1,
3488 		.elem_size	= sizeof(uint32_t),
3489 		.array_type	= NO_ARRAY,
3490 		.tlv_type	= 0x11,
3491 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3492 					   wake_msi),
3493 	},
3494 	{
3495 		.data_type	= QMI_OPT_FLAG,
3496 		.elem_len	= 1,
3497 		.elem_size	= sizeof(uint8_t),
3498 		.array_type	= NO_ARRAY,
3499 		.tlv_type	= 0x12,
3500 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3501 					   gpios_valid),
3502 	},
3503 	{
3504 		.data_type	= QMI_DATA_LEN,
3505 		.elem_len	= 1,
3506 		.elem_size	= sizeof(uint8_t),
3507 		.array_type	= NO_ARRAY,
3508 		.tlv_type	= 0x12,
3509 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3510 					   gpios_len),
3511 	},
3512 	{
3513 		.data_type	= QMI_UNSIGNED_4_BYTE,
3514 		.elem_len	= QMI_WLFW_MAX_NUM_GPIO_V01,
3515 		.elem_size	= sizeof(uint32_t),
3516 		.array_type	= VAR_LEN_ARRAY,
3517 		.tlv_type	= 0x12,
3518 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3519 					   gpios),
3520 	},
3521 	{
3522 		.data_type	= QMI_OPT_FLAG,
3523 		.elem_len	= 1,
3524 		.elem_size	= sizeof(uint8_t),
3525 		.array_type	= NO_ARRAY,
3526 		.tlv_type	= 0x13,
3527 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3528 					   nm_modem_valid),
3529 	},
3530 	{
3531 		.data_type	= QMI_UNSIGNED_1_BYTE,
3532 		.elem_len	= 1,
3533 		.elem_size	= sizeof(uint8_t),
3534 		.array_type	= NO_ARRAY,
3535 		.tlv_type	= 0x13,
3536 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3537 					   nm_modem),
3538 	},
3539 	{
3540 		.data_type	= QMI_OPT_FLAG,
3541 		.elem_len	= 1,
3542 		.elem_size	= sizeof(uint8_t),
3543 		.array_type	= NO_ARRAY,
3544 		.tlv_type	= 0x14,
3545 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3546 					   bdf_support_valid),
3547 	},
3548 	{
3549 		.data_type	= QMI_UNSIGNED_1_BYTE,
3550 		.elem_len	= 1,
3551 		.elem_size	= sizeof(uint8_t),
3552 		.array_type	= NO_ARRAY,
3553 		.tlv_type	= 0x14,
3554 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3555 					   bdf_support),
3556 	},
3557 	{
3558 		.data_type	= QMI_OPT_FLAG,
3559 		.elem_len	= 1,
3560 		.elem_size	= sizeof(uint8_t),
3561 		.array_type	= NO_ARRAY,
3562 		.tlv_type	= 0x15,
3563 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3564 					   bdf_cache_support_valid),
3565 	},
3566 	{
3567 		.data_type	= QMI_UNSIGNED_1_BYTE,
3568 		.elem_len	= 1,
3569 		.elem_size	= sizeof(uint8_t),
3570 		.array_type	= NO_ARRAY,
3571 		.tlv_type	= 0x15,
3572 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3573 					   bdf_cache_support),
3574 	},
3575 	{
3576 		.data_type	= QMI_OPT_FLAG,
3577 		.elem_len	= 1,
3578 		.elem_size	= sizeof(uint8_t),
3579 		.array_type	= NO_ARRAY,
3580 		.tlv_type	= 0x16,
3581 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3582 					   m3_support_valid),
3583 	},
3584 	{
3585 		.data_type	= QMI_UNSIGNED_1_BYTE,
3586 		.elem_len	= 1,
3587 		.elem_size	= sizeof(uint8_t),
3588 		.array_type	= NO_ARRAY,
3589 		.tlv_type	= 0x16,
3590 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3591 					   m3_support),
3592 	},
3593 	{
3594 		.data_type	= QMI_OPT_FLAG,
3595 		.elem_len	= 1,
3596 		.elem_size	= sizeof(uint8_t),
3597 		.array_type	= NO_ARRAY,
3598 		.tlv_type	= 0x17,
3599 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3600 					   m3_cache_support_valid),
3601 	},
3602 	{
3603 		.data_type	= QMI_UNSIGNED_1_BYTE,
3604 		.elem_len	= 1,
3605 		.elem_size	= sizeof(uint8_t),
3606 		.array_type	= NO_ARRAY,
3607 		.tlv_type	= 0x17,
3608 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3609 					   m3_cache_support),
3610 	},
3611 	{
3612 		.data_type	= QMI_OPT_FLAG,
3613 		.elem_len	= 1,
3614 		.elem_size	= sizeof(uint8_t),
3615 		.array_type	= NO_ARRAY,
3616 		.tlv_type	= 0x18,
3617 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3618 					   cal_filesys_support_valid),
3619 	},
3620 	{
3621 		.data_type	= QMI_UNSIGNED_1_BYTE,
3622 		.elem_len	= 1,
3623 		.elem_size	= sizeof(uint8_t),
3624 		.array_type	= NO_ARRAY,
3625 		.tlv_type	= 0x18,
3626 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3627 					   cal_filesys_support),
3628 	},
3629 	{
3630 		.data_type	= QMI_OPT_FLAG,
3631 		.elem_len	= 1,
3632 		.elem_size	= sizeof(uint8_t),
3633 		.array_type	= NO_ARRAY,
3634 		.tlv_type	= 0x19,
3635 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3636 					   cal_cache_support_valid),
3637 	},
3638 	{
3639 		.data_type	= QMI_UNSIGNED_1_BYTE,
3640 		.elem_len	= 1,
3641 		.elem_size	= sizeof(uint8_t),
3642 		.array_type	= NO_ARRAY,
3643 		.tlv_type	= 0x19,
3644 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3645 					   cal_cache_support),
3646 	},
3647 	{
3648 		.data_type	= QMI_OPT_FLAG,
3649 		.elem_len	= 1,
3650 		.elem_size	= sizeof(uint8_t),
3651 		.array_type	= NO_ARRAY,
3652 		.tlv_type	= 0x1A,
3653 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3654 					   cal_done_valid),
3655 	},
3656 	{
3657 		.data_type	= QMI_UNSIGNED_1_BYTE,
3658 		.elem_len	= 1,
3659 		.elem_size	= sizeof(uint8_t),
3660 		.array_type	= NO_ARRAY,
3661 		.tlv_type	= 0x1A,
3662 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3663 					   cal_done),
3664 	},
3665 	{
3666 		.data_type	= QMI_OPT_FLAG,
3667 		.elem_len	= 1,
3668 		.elem_size	= sizeof(uint8_t),
3669 		.array_type	= NO_ARRAY,
3670 		.tlv_type	= 0x1B,
3671 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3672 					   mem_bucket_valid),
3673 	},
3674 	{
3675 		.data_type	= QMI_UNSIGNED_4_BYTE,
3676 		.elem_len	= 1,
3677 		.elem_size	= sizeof(uint32_t),
3678 		.array_type	= NO_ARRAY,
3679 		.tlv_type	= 0x1B,
3680 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3681 					   mem_bucket),
3682 	},
3683 	{
3684 		.data_type	= QMI_OPT_FLAG,
3685 		.elem_len	= 1,
3686 		.elem_size	= sizeof(uint8_t),
3687 		.array_type	= NO_ARRAY,
3688 		.tlv_type	= 0x1C,
3689 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3690 					   mem_cfg_mode_valid),
3691 	},
3692 	{
3693 		.data_type	= QMI_UNSIGNED_1_BYTE,
3694 		.elem_len	= 1,
3695 		.elem_size	= sizeof(uint8_t),
3696 		.array_type	= NO_ARRAY,
3697 		.tlv_type	= 0x1C,
3698 		.offset		= offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
3699 					   mem_cfg_mode),
3700 	},
3701 	{
3702 		.data_type	= QMI_EOTI,
3703 		.array_type	= NO_ARRAY,
3704 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3705 	},
3706 };
3707 
3708 const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
3709 	{
3710 		.data_type	= QMI_STRUCT,
3711 		.elem_len	= 1,
3712 		.elem_size	= sizeof(struct qmi_response_type_v01),
3713 		.array_type	= NO_ARRAY,
3714 		.tlv_type	= 0x02,
3715 		.offset		= offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
3716 		.ei_array	= qmi_response_type_v01_ei,
3717 	},
3718 	{
3719 		.data_type	= QMI_EOTI,
3720 		.array_type	= NO_ARRAY,
3721 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3722 	},
3723 };
3724 
3725 const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
3726 	{
3727 		.data_type	= QMI_UNSIGNED_8_BYTE,
3728 		.elem_len	= 1,
3729 		.elem_size	= sizeof(uint64_t),
3730 		.array_type	= NO_ARRAY,
3731 		.tlv_type	= 0,
3732 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
3733 	},
3734 	{
3735 		.data_type	= QMI_UNSIGNED_4_BYTE,
3736 		.elem_len	= 1,
3737 		.elem_size	= sizeof(uint32_t),
3738 		.array_type	= NO_ARRAY,
3739 		.tlv_type	= 0,
3740 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
3741 	},
3742 	{
3743 		.data_type	= QMI_UNSIGNED_1_BYTE,
3744 		.elem_len	= 1,
3745 		.elem_size	= sizeof(uint8_t),
3746 		.array_type	= NO_ARRAY,
3747 		.tlv_type	= 0,
3748 		.offset		= offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
3749 	},
3750 	{
3751 		.data_type	= QMI_EOTI,
3752 		.array_type	= NO_ARRAY,
3753 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3754 	},
3755 };
3756 
3757 const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
3758 	{
3759 		.data_type	= QMI_UNSIGNED_4_BYTE,
3760 		.elem_len	= 1,
3761 		.elem_size	= sizeof(uint32_t),
3762 		.array_type	= NO_ARRAY,
3763 		.tlv_type	= 0,
3764 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01,
3765 				  size),
3766 	},
3767 	{
3768 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
3769 		.elem_len	= 1,
3770 		.elem_size	= sizeof(enum qmi_wlanfw_mem_type_enum_v01),
3771 		.array_type	= NO_ARRAY,
3772 		.tlv_type	= 0,
3773 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
3774 	},
3775 	{
3776 		.data_type	= QMI_DATA_LEN,
3777 		.elem_len	= 1,
3778 		.elem_size	= sizeof(uint8_t),
3779 		.array_type	= NO_ARRAY,
3780 		.tlv_type	= 0,
3781 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
3782 	},
3783 	{
3784 		.data_type	= QMI_STRUCT,
3785 		.elem_len	= QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
3786 		.elem_size	= sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
3787 		.array_type	= VAR_LEN_ARRAY,
3788 		.tlv_type	= 0,
3789 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
3790 		.ei_array	= qmi_wlanfw_mem_cfg_s_v01_ei,
3791 	},
3792 	{
3793 		.data_type	= QMI_EOTI,
3794 		.array_type	= NO_ARRAY,
3795 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3796 	},
3797 };
3798 
3799 const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
3800 	{
3801 		.data_type	= QMI_DATA_LEN,
3802 		.elem_len	= 1,
3803 		.elem_size	= sizeof(uint8_t),
3804 		.array_type	= NO_ARRAY,
3805 		.tlv_type	= 0x01,
3806 		.offset		= offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
3807 					   mem_seg_len),
3808 	},
3809 	{
3810 		.data_type	= QMI_STRUCT,
3811 		.elem_len	= ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
3812 		.elem_size	= sizeof(struct qmi_wlanfw_mem_seg_s_v01),
3813 		.array_type	= VAR_LEN_ARRAY,
3814 		.tlv_type	= 0x01,
3815 		.offset		= offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
3816 					   mem_seg),
3817 		.ei_array	= qmi_wlanfw_mem_seg_s_v01_ei,
3818 	},
3819 	{
3820 		.data_type	= QMI_EOTI,
3821 		.array_type	= NO_ARRAY,
3822 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3823 	},
3824 };
3825 
3826 const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
3827 	{
3828 		.data_type	= QMI_UNSIGNED_8_BYTE,
3829 		.elem_len	= 1,
3830 		.elem_size	= sizeof(uint64_t),
3831 		.array_type	= NO_ARRAY,
3832 		.tlv_type	= 0,
3833 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
3834 	},
3835 	{
3836 		.data_type	= QMI_UNSIGNED_4_BYTE,
3837 		.elem_len	= 1,
3838 		.elem_size	= sizeof(uint32_t),
3839 		.array_type	= NO_ARRAY,
3840 		.tlv_type	= 0,
3841 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
3842 	},
3843 	{
3844 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
3845 		.elem_len	= 1,
3846 		.elem_size	= sizeof(enum qmi_wlanfw_mem_type_enum_v01),
3847 		.array_type	= NO_ARRAY,
3848 		.tlv_type	= 0,
3849 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
3850 	},
3851 	{
3852 		.data_type	= QMI_UNSIGNED_1_BYTE,
3853 		.elem_len	= 1,
3854 		.elem_size	= sizeof(uint8_t),
3855 		.array_type	= NO_ARRAY,
3856 		.tlv_type	= 0,
3857 		.offset		= offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
3858 	},
3859 	{
3860 		.data_type	= QMI_EOTI,
3861 		.array_type	= NO_ARRAY,
3862 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3863 	},
3864 };
3865 
3866 const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
3867 	{
3868 		.data_type	= QMI_DATA_LEN,
3869 		.elem_len	= 1,
3870 		.elem_size	= sizeof(uint8_t),
3871 		.array_type	= NO_ARRAY,
3872 		.tlv_type	= 0x01,
3873 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
3874 					   mem_seg_len),
3875 	},
3876 	{
3877 		.data_type	= QMI_STRUCT,
3878 		.elem_len	= ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
3879 		.elem_size	= sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
3880 		.array_type	= VAR_LEN_ARRAY,
3881 		.tlv_type	= 0x01,
3882 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
3883 					   mem_seg),
3884 		.ei_array	= qmi_wlanfw_mem_seg_resp_s_v01_ei,
3885 	},
3886 	{
3887 		.data_type	= QMI_EOTI,
3888 		.array_type	= NO_ARRAY,
3889 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3890 	},
3891 };
3892 
3893 const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
3894 	{
3895 		.data_type	= QMI_STRUCT,
3896 		.elem_len	= 1,
3897 		.elem_size	= sizeof(struct qmi_response_type_v01),
3898 		.array_type	= NO_ARRAY,
3899 		.tlv_type	= 0x02,
3900 		.offset		= offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
3901 					   resp),
3902 		.ei_array	= qmi_response_type_v01_ei,
3903 	},
3904 	{
3905 		.data_type	= QMI_EOTI,
3906 		.array_type	= NO_ARRAY,
3907 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3908 	},
3909 };
3910 
3911 const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
3912 	{
3913 		.data_type	= QMI_EOTI,
3914 		.array_type	= NO_ARRAY,
3915 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3916 	},
3917 };
3918 
3919 const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
3920 	{
3921 		.data_type	= QMI_UNSIGNED_4_BYTE,
3922 		.elem_len	= 1,
3923 		.elem_size	= sizeof(uint32_t),
3924 		.array_type	= NO_ARRAY,
3925 		.tlv_type	= 0,
3926 		.offset		= offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
3927 					   chip_id),
3928 	},
3929 	{
3930 		.data_type	= QMI_UNSIGNED_4_BYTE,
3931 		.elem_len	= 1,
3932 		.elem_size	= sizeof(uint32_t),
3933 		.array_type	= NO_ARRAY,
3934 		.tlv_type	= 0,
3935 		.offset		= offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
3936 					   chip_family),
3937 	},
3938 	{
3939 		.data_type	= QMI_EOTI,
3940 		.array_type	= NO_ARRAY,
3941 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3942 	},
3943 };
3944 
3945 const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
3946 	{
3947 		.data_type	= QMI_UNSIGNED_4_BYTE,
3948 		.elem_len	= 1,
3949 		.elem_size	= sizeof(uint32_t),
3950 		.array_type	= NO_ARRAY,
3951 		.tlv_type	= 0,
3952 		.offset		= offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
3953 					   board_id),
3954 	},
3955 	{
3956 		.data_type	= QMI_EOTI,
3957 		.array_type	= NO_ARRAY,
3958 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3959 	},
3960 };
3961 
3962 const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
3963 	{
3964 		.data_type	= QMI_UNSIGNED_4_BYTE,
3965 		.elem_len	= 1,
3966 		.elem_size	= sizeof(uint32_t),
3967 		.array_type	= NO_ARRAY,
3968 		.tlv_type	= 0,
3969 		.offset		= offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
3970 	},
3971 	{
3972 		.data_type	= QMI_EOTI,
3973 		.array_type	= NO_ARRAY,
3974 		.tlv_type	= QMI_COMMON_TLV_TYPE,
3975 	},
3976 };
3977 
3978 const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
3979 	{
3980 		.data_type	= QMI_UNSIGNED_4_BYTE,
3981 		.elem_len	= 1,
3982 		.elem_size	= sizeof(uint32_t),
3983 		.array_type	= NO_ARRAY,
3984 		.tlv_type	= 0,
3985 		.offset		= offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
3986 					   fw_version),
3987 	},
3988 	{
3989 		.data_type	= QMI_STRING,
3990 		.elem_len	= ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
3991 		.elem_size	= sizeof(char),
3992 		.array_type	= NO_ARRAY,
3993 		.tlv_type	= 0,
3994 		.offset		= offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
3995 					   fw_build_timestamp),
3996 	},
3997 	{
3998 		.data_type	= QMI_EOTI,
3999 		.array_type	= NO_ARRAY,
4000 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4001 	},
4002 };
4003 
4004 const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
4005 	{
4006 		.data_type	= QMI_STRUCT,
4007 		.elem_len	= 1,
4008 		.elem_size	= sizeof(struct qmi_response_type_v01),
4009 		.array_type	= NO_ARRAY,
4010 		.tlv_type	= 0x02,
4011 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
4012 		.ei_array	= qmi_response_type_v01_ei,
4013 	},
4014 	{
4015 		.data_type	= QMI_OPT_FLAG,
4016 		.elem_len	= 1,
4017 		.elem_size	= sizeof(uint8_t),
4018 		.array_type	= NO_ARRAY,
4019 		.tlv_type	= 0x10,
4020 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4021 					   chip_info_valid),
4022 	},
4023 	{
4024 		.data_type	= QMI_STRUCT,
4025 		.elem_len	= 1,
4026 		.elem_size	= sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
4027 		.array_type	= NO_ARRAY,
4028 		.tlv_type	= 0x10,
4029 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4030 					   chip_info),
4031 		.ei_array	= qmi_wlanfw_rf_chip_info_s_v01_ei,
4032 	},
4033 	{
4034 		.data_type	= QMI_OPT_FLAG,
4035 		.elem_len	= 1,
4036 		.elem_size	= sizeof(uint8_t),
4037 		.array_type	= NO_ARRAY,
4038 		.tlv_type	= 0x11,
4039 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4040 					   board_info_valid),
4041 	},
4042 	{
4043 		.data_type	= QMI_STRUCT,
4044 		.elem_len	= 1,
4045 		.elem_size	= sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
4046 		.array_type	= NO_ARRAY,
4047 		.tlv_type	= 0x11,
4048 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4049 					   board_info),
4050 		.ei_array	= qmi_wlanfw_rf_board_info_s_v01_ei,
4051 	},
4052 	{
4053 		.data_type	= QMI_OPT_FLAG,
4054 		.elem_len	= 1,
4055 		.elem_size	= sizeof(uint8_t),
4056 		.array_type	= NO_ARRAY,
4057 		.tlv_type	= 0x12,
4058 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4059 					   soc_info_valid),
4060 	},
4061 	{
4062 		.data_type	= QMI_STRUCT,
4063 		.elem_len	= 1,
4064 		.elem_size	= sizeof(struct qmi_wlanfw_soc_info_s_v01),
4065 		.array_type	= NO_ARRAY,
4066 		.tlv_type	= 0x12,
4067 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4068 					   soc_info),
4069 		.ei_array	= qmi_wlanfw_soc_info_s_v01_ei,
4070 	},
4071 	{
4072 		.data_type	= QMI_OPT_FLAG,
4073 		.elem_len	= 1,
4074 		.elem_size	= sizeof(uint8_t),
4075 		.array_type	= NO_ARRAY,
4076 		.tlv_type	= 0x13,
4077 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4078 					   fw_version_info_valid),
4079 	},
4080 	{
4081 		.data_type	= QMI_STRUCT,
4082 		.elem_len	= 1,
4083 		.elem_size	= sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
4084 		.array_type	= NO_ARRAY,
4085 		.tlv_type	= 0x13,
4086 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4087 					   fw_version_info),
4088 		.ei_array	= qmi_wlanfw_fw_version_info_s_v01_ei,
4089 	},
4090 	{
4091 		.data_type	= QMI_OPT_FLAG,
4092 		.elem_len	= 1,
4093 		.elem_size	= sizeof(uint8_t),
4094 		.array_type	= NO_ARRAY,
4095 		.tlv_type	= 0x14,
4096 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4097 					   fw_build_id_valid),
4098 	},
4099 	{
4100 		.data_type	= QMI_STRING,
4101 		.elem_len	= ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
4102 		.elem_size	= sizeof(char),
4103 		.array_type	= NO_ARRAY,
4104 		.tlv_type	= 0x14,
4105 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4106 					   fw_build_id),
4107 	},
4108 	{
4109 		.data_type	= QMI_OPT_FLAG,
4110 		.elem_len	= 1,
4111 		.elem_size	= sizeof(uint8_t),
4112 		.array_type	= NO_ARRAY,
4113 		.tlv_type	= 0x15,
4114 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4115 					   num_macs_valid),
4116 	},
4117 	{
4118 		.data_type	= QMI_UNSIGNED_1_BYTE,
4119 		.elem_len	= 1,
4120 		.elem_size	= sizeof(uint8_t),
4121 		.array_type	= NO_ARRAY,
4122 		.tlv_type	= 0x15,
4123 		.offset		= offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4124 					   num_macs),
4125 	},
4126 	{
4127 		.data_type      = QMI_OPT_FLAG,
4128 		.elem_len       = 1,
4129 		.elem_size      = sizeof(uint8_t),
4130 		.array_type     = NO_ARRAY,
4131 		.tlv_type       = 0x16,
4132 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4133 					   voltage_mv_valid),
4134 	},
4135 	{
4136 		.data_type      = QMI_UNSIGNED_4_BYTE,
4137 		.elem_len       = 1,
4138 		.elem_size      = sizeof(uint32_t),
4139 		.array_type     = NO_ARRAY,
4140 		.tlv_type       = 0x16,
4141 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4142 					   voltage_mv),
4143 	},
4144 	{
4145 		.data_type      = QMI_OPT_FLAG,
4146 		.elem_len       = 1,
4147 		.elem_size      = sizeof(uint8_t),
4148 		.array_type     = NO_ARRAY,
4149 		.tlv_type       = 0x17,
4150 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4151 					   time_freq_hz_valid),
4152 	},
4153 	{
4154 		.data_type      = QMI_UNSIGNED_4_BYTE,
4155 		.elem_len       = 1,
4156 		.elem_size      = sizeof(uint32_t),
4157 		.array_type     = NO_ARRAY,
4158 		.tlv_type       = 0x17,
4159 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4160 					   time_freq_hz),
4161 	},
4162 	{
4163 		.data_type      = QMI_OPT_FLAG,
4164 		.elem_len       = 1,
4165 		.elem_size      = sizeof(uint8_t),
4166 		.array_type     = NO_ARRAY,
4167 		.tlv_type       = 0x18,
4168 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4169 					   otp_version_valid),
4170 	},
4171 	{
4172 		.data_type      = QMI_UNSIGNED_4_BYTE,
4173 		.elem_len       = 1,
4174 		.elem_size      = sizeof(uint32_t),
4175 		.array_type     = NO_ARRAY,
4176 		.tlv_type       = 0x18,
4177 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4178 					   otp_version),
4179 	},
4180 	{
4181 		.data_type      = QMI_OPT_FLAG,
4182 		.elem_len       = 1,
4183 		.elem_size      = sizeof(uint8_t),
4184 		.array_type     = NO_ARRAY,
4185 		.tlv_type       = 0x19,
4186 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4187 					   eeprom_read_timeout_valid),
4188 	},
4189 	{
4190 		.data_type      = QMI_UNSIGNED_4_BYTE,
4191 		.elem_len       = 1,
4192 		.elem_size      = sizeof(uint32_t),
4193 		.array_type     = NO_ARRAY,
4194 		.tlv_type       = 0x19,
4195 		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
4196 					   eeprom_read_timeout),
4197 	},
4198 	{
4199 		.data_type	= QMI_EOTI,
4200 		.array_type	= NO_ARRAY,
4201 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4202 	},
4203 };
4204 
4205 const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
4206 	{
4207 		.data_type	= QMI_UNSIGNED_1_BYTE,
4208 		.elem_len	= 1,
4209 		.elem_size	= sizeof(uint8_t),
4210 		.array_type	= NO_ARRAY,
4211 		.tlv_type	= 0x01,
4212 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4213 					   valid),
4214 	},
4215 	{
4216 		.data_type	= QMI_OPT_FLAG,
4217 		.elem_len	= 1,
4218 		.elem_size	= sizeof(uint8_t),
4219 		.array_type	= NO_ARRAY,
4220 		.tlv_type	= 0x10,
4221 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4222 					   file_id_valid),
4223 	},
4224 	{
4225 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
4226 		.elem_len	= 1,
4227 		.elem_size	= sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
4228 		.array_type	= NO_ARRAY,
4229 		.tlv_type	= 0x10,
4230 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4231 					   file_id),
4232 	},
4233 	{
4234 		.data_type	= QMI_OPT_FLAG,
4235 		.elem_len	= 1,
4236 		.elem_size	= sizeof(uint8_t),
4237 		.array_type	= NO_ARRAY,
4238 		.tlv_type	= 0x11,
4239 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4240 					   total_size_valid),
4241 	},
4242 	{
4243 		.data_type	= QMI_UNSIGNED_4_BYTE,
4244 		.elem_len	= 1,
4245 		.elem_size	= sizeof(uint32_t),
4246 		.array_type	= NO_ARRAY,
4247 		.tlv_type	= 0x11,
4248 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4249 					   total_size),
4250 	},
4251 	{
4252 		.data_type	= QMI_OPT_FLAG,
4253 		.elem_len	= 1,
4254 		.elem_size	= sizeof(uint8_t),
4255 		.array_type	= NO_ARRAY,
4256 		.tlv_type	= 0x12,
4257 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4258 					   seg_id_valid),
4259 	},
4260 	{
4261 		.data_type	= QMI_UNSIGNED_4_BYTE,
4262 		.elem_len	= 1,
4263 		.elem_size	= sizeof(uint32_t),
4264 		.array_type	= NO_ARRAY,
4265 		.tlv_type	= 0x12,
4266 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4267 					   seg_id),
4268 	},
4269 	{
4270 		.data_type	= QMI_OPT_FLAG,
4271 		.elem_len	= 1,
4272 		.elem_size	= sizeof(uint8_t),
4273 		.array_type	= NO_ARRAY,
4274 		.tlv_type	= 0x13,
4275 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4276 					   data_valid),
4277 	},
4278 	{
4279 		.data_type	= QMI_DATA_LEN,
4280 		.elem_len	= 1,
4281 		.elem_size	= sizeof(uint16_t),
4282 		.array_type	= NO_ARRAY,
4283 		.tlv_type	= 0x13,
4284 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4285 					   data_len),
4286 	},
4287 	{
4288 		.data_type	= QMI_UNSIGNED_1_BYTE,
4289 		.elem_len	= QMI_WLANFW_MAX_DATA_SIZE_V01,
4290 		.elem_size	= sizeof(uint8_t),
4291 		.array_type	= VAR_LEN_ARRAY,
4292 		.tlv_type	= 0x13,
4293 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4294 					   data),
4295 	},
4296 	{
4297 		.data_type	= QMI_OPT_FLAG,
4298 		.elem_len	= 1,
4299 		.elem_size	= sizeof(uint8_t),
4300 		.array_type	= NO_ARRAY,
4301 		.tlv_type	= 0x14,
4302 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4303 					   end_valid),
4304 	},
4305 	{
4306 		.data_type	= QMI_UNSIGNED_1_BYTE,
4307 		.elem_len	= 1,
4308 		.elem_size	= sizeof(uint8_t),
4309 		.array_type	= NO_ARRAY,
4310 		.tlv_type	= 0x14,
4311 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4312 					   end),
4313 	},
4314 	{
4315 		.data_type	= QMI_OPT_FLAG,
4316 		.elem_len	= 1,
4317 		.elem_size	= sizeof(uint8_t),
4318 		.array_type	= NO_ARRAY,
4319 		.tlv_type	= 0x15,
4320 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4321 					   bdf_type_valid),
4322 	},
4323 	{
4324 		.data_type	= QMI_UNSIGNED_1_BYTE,
4325 		.elem_len	= 1,
4326 		.elem_size	= sizeof(uint8_t),
4327 		.array_type	= NO_ARRAY,
4328 		.tlv_type	= 0x15,
4329 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
4330 					   bdf_type),
4331 	},
4332 
4333 	{
4334 		.data_type	= QMI_EOTI,
4335 		.array_type	= NO_ARRAY,
4336 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4337 	},
4338 };
4339 
4340 const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
4341 	{
4342 		.data_type	= QMI_STRUCT,
4343 		.elem_len	= 1,
4344 		.elem_size	= sizeof(struct qmi_response_type_v01),
4345 		.array_type	= NO_ARRAY,
4346 		.tlv_type	= 0x02,
4347 		.offset		= offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
4348 					   resp),
4349 		.ei_array	= qmi_response_type_v01_ei,
4350 	},
4351 	{
4352 		.data_type	= QMI_EOTI,
4353 		.array_type	= NO_ARRAY,
4354 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4355 	},
4356 };
4357 
4358 const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
4359 	{
4360 		.data_type	= QMI_UNSIGNED_8_BYTE,
4361 		.elem_len	= 1,
4362 		.elem_size	= sizeof(uint64_t),
4363 		.array_type	= NO_ARRAY,
4364 		.tlv_type	= 0x01,
4365 		.offset		= offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
4366 	},
4367 	{
4368 		.data_type	= QMI_UNSIGNED_4_BYTE,
4369 		.elem_len	= 1,
4370 		.elem_size	= sizeof(uint32_t),
4371 		.array_type	= NO_ARRAY,
4372 		.tlv_type	= 0x02,
4373 		.offset		= offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
4374 	},
4375 	{
4376 		.data_type	= QMI_EOTI,
4377 		.array_type	= NO_ARRAY,
4378 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4379 	},
4380 };
4381 
4382 const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
4383 	{
4384 		.data_type	= QMI_STRUCT,
4385 		.elem_len	= 1,
4386 		.elem_size	= sizeof(struct qmi_response_type_v01),
4387 		.array_type	= NO_ARRAY,
4388 		.tlv_type	= 0x02,
4389 		.offset		= offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
4390 		.ei_array	= qmi_response_type_v01_ei,
4391 	},
4392 	{
4393 		.data_type	= QMI_EOTI,
4394 		.array_type	= NO_ARRAY,
4395 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4396 	},
4397 };
4398 
4399 const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
4400 	{
4401 		.data_type	= QMI_OPT_FLAG,
4402 		.elem_len	= 1,
4403 		.elem_size	= sizeof(uint8_t),
4404 		.array_type	= NO_ARRAY,
4405 		.tlv_type	= 0x10,
4406 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
4407 					   enablefwlog_valid),
4408 	},
4409 	{
4410 		.data_type	= QMI_UNSIGNED_1_BYTE,
4411 		.elem_len	= 1,
4412 		.elem_size	= sizeof(uint8_t),
4413 		.array_type	= NO_ARRAY,
4414 		.tlv_type	= 0x10,
4415 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
4416 					   enablefwlog),
4417 	},
4418 	{
4419 		.data_type	= QMI_EOTI,
4420 		.array_type	= NO_ARRAY,
4421 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4422 	},
4423 };
4424 
4425 const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
4426 	{
4427 		.data_type	= QMI_STRUCT,
4428 		.elem_len	= 1,
4429 		.elem_size	= sizeof(struct qmi_response_type_v01),
4430 		.array_type	= NO_ARRAY,
4431 		.tlv_type	= 0x02,
4432 		.offset		= offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
4433 					   resp),
4434 		.ei_array	= qmi_response_type_v01_ei,
4435 	},
4436 	{
4437 		.data_type	= QMI_EOTI,
4438 		.array_type	= NO_ARRAY,
4439 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4440 	},
4441 };
4442 
4443 const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
4444 	{
4445 		.data_type	= QMI_UNSIGNED_4_BYTE,
4446 		.elem_len	= 1,
4447 		.elem_size	= sizeof(uint32_t),
4448 		.array_type	= NO_ARRAY,
4449 		.tlv_type	= 0,
4450 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4451 					   pipe_num),
4452 	},
4453 	{
4454 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
4455 		.elem_len	= 1,
4456 		.elem_size	= sizeof(enum qmi_wlanfw_pipedir_enum_v01),
4457 		.array_type	= NO_ARRAY,
4458 		.tlv_type	= 0,
4459 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4460 					   pipe_dir),
4461 	},
4462 	{
4463 		.data_type	= QMI_UNSIGNED_4_BYTE,
4464 		.elem_len	= 1,
4465 		.elem_size	= sizeof(uint32_t),
4466 		.array_type	= NO_ARRAY,
4467 		.tlv_type	= 0,
4468 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4469 					   nentries),
4470 	},
4471 	{
4472 		.data_type	= QMI_UNSIGNED_4_BYTE,
4473 		.elem_len	= 1,
4474 		.elem_size	= sizeof(uint32_t),
4475 		.array_type	= NO_ARRAY,
4476 		.tlv_type	= 0,
4477 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4478 					   nbytes_max),
4479 	},
4480 	{
4481 		.data_type	= QMI_UNSIGNED_4_BYTE,
4482 		.elem_len	= 1,
4483 		.elem_size	= sizeof(uint32_t),
4484 		.array_type	= NO_ARRAY,
4485 		.tlv_type	= 0,
4486 		.offset		= offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
4487 					   flags),
4488 	},
4489 	{
4490 		.data_type	= QMI_EOTI,
4491 		.array_type	= NO_ARRAY,
4492 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4493 	},
4494 };
4495 
4496 const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
4497 	{
4498 		.data_type	= QMI_UNSIGNED_4_BYTE,
4499 		.elem_len	= 1,
4500 		.elem_size	= sizeof(uint32_t),
4501 		.array_type	= NO_ARRAY,
4502 		.tlv_type	= 0,
4503 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
4504 					   service_id),
4505 	},
4506 	{
4507 		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
4508 		.elem_len	= 1,
4509 		.elem_size	= sizeof(enum qmi_wlanfw_pipedir_enum_v01),
4510 		.array_type	= NO_ARRAY,
4511 		.tlv_type	= 0,
4512 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
4513 					   pipe_dir),
4514 	},
4515 	{
4516 		.data_type	= QMI_UNSIGNED_4_BYTE,
4517 		.elem_len	= 1,
4518 		.elem_size	= sizeof(uint32_t),
4519 		.array_type	= NO_ARRAY,
4520 		.tlv_type	= 0,
4521 		.offset		= offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
4522 					   pipe_num),
4523 	},
4524 	{
4525 		.data_type	= QMI_EOTI,
4526 		.array_type	= NO_ARRAY,
4527 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4528 	},
4529 };
4530 
4531 const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
4532 	{
4533 		.data_type	= QMI_UNSIGNED_2_BYTE,
4534 		.elem_len	= 1,
4535 		.elem_size	= sizeof(uint16_t),
4536 		.array_type	= NO_ARRAY,
4537 		.tlv_type	= 0,
4538 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
4539 	},
4540 	{
4541 		.data_type	= QMI_UNSIGNED_2_BYTE,
4542 		.elem_len	= 1,
4543 		.elem_size	= sizeof(uint16_t),
4544 		.array_type	= NO_ARRAY,
4545 		.tlv_type	= 0,
4546 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
4547 					   offset),
4548 	},
4549 	{
4550 		.data_type	= QMI_EOTI,
4551 		.array_type	= QMI_COMMON_TLV_TYPE,
4552 	},
4553 };
4554 
4555 const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
4556 	{
4557 		.data_type	= QMI_UNSIGNED_4_BYTE,
4558 		.elem_len	= 1,
4559 		.elem_size	= sizeof(uint32_t),
4560 		.array_type	= NO_ARRAY,
4561 		.tlv_type	= 0,
4562 		.offset		= offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
4563 					   addr),
4564 	},
4565 	{
4566 		.data_type	= QMI_EOTI,
4567 		.array_type	= NO_ARRAY,
4568 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4569 	},
4570 };
4571 
4572 const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
4573 	{
4574 		.data_type	= QMI_UNSIGNED_4_BYTE,
4575 		.elem_len	= 1,
4576 		.elem_size	= sizeof(uint32_t),
4577 		.array_type	= NO_ARRAY,
4578 		.tlv_type	= 0x01,
4579 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
4580 					   mode),
4581 	},
4582 	{
4583 		.data_type	= QMI_OPT_FLAG,
4584 		.elem_len	= 1,
4585 		.elem_size	= sizeof(uint8_t),
4586 		.array_type	= NO_ARRAY,
4587 		.tlv_type	= 0x10,
4588 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
4589 					   hw_debug_valid),
4590 	},
4591 	{
4592 		.data_type	= QMI_UNSIGNED_1_BYTE,
4593 		.elem_len	= 1,
4594 		.elem_size	= sizeof(uint8_t),
4595 		.array_type	= NO_ARRAY,
4596 		.tlv_type	= 0x10,
4597 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
4598 					   hw_debug),
4599 	},
4600 	{
4601 		.data_type	= QMI_EOTI,
4602 		.array_type	= NO_ARRAY,
4603 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4604 	},
4605 };
4606 
4607 const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
4608 	{
4609 		.data_type	= QMI_STRUCT,
4610 		.elem_len	= 1,
4611 		.elem_size	= sizeof(struct qmi_response_type_v01),
4612 		.array_type	= NO_ARRAY,
4613 		.tlv_type	= 0x02,
4614 		.offset		= offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
4615 					   resp),
4616 		.ei_array	= qmi_response_type_v01_ei,
4617 	},
4618 	{
4619 		.data_type	= QMI_EOTI,
4620 		.array_type	= NO_ARRAY,
4621 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4622 	},
4623 };
4624 
4625 const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
4626 	{
4627 		.data_type	= QMI_OPT_FLAG,
4628 		.elem_len	= 1,
4629 		.elem_size	= sizeof(uint8_t),
4630 		.array_type	= NO_ARRAY,
4631 		.tlv_type	= 0x10,
4632 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4633 					   host_version_valid),
4634 	},
4635 	{
4636 		.data_type	= QMI_STRING,
4637 		.elem_len	= QMI_WLANFW_MAX_STR_LEN_V01 + 1,
4638 		.elem_size	= sizeof(char),
4639 		.array_type	= NO_ARRAY,
4640 		.tlv_type	= 0x10,
4641 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4642 					   host_version),
4643 	},
4644 	{
4645 		.data_type	= QMI_OPT_FLAG,
4646 		.elem_len	= 1,
4647 		.elem_size	= sizeof(uint8_t),
4648 		.array_type	= NO_ARRAY,
4649 		.tlv_type	= 0x11,
4650 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4651 					   tgt_cfg_valid),
4652 	},
4653 	{
4654 		.data_type	= QMI_DATA_LEN,
4655 		.elem_len	= 1,
4656 		.elem_size	= sizeof(uint8_t),
4657 		.array_type	= NO_ARRAY,
4658 		.tlv_type	= 0x11,
4659 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4660 					   tgt_cfg_len),
4661 	},
4662 	{
4663 		.data_type	= QMI_STRUCT,
4664 		.elem_len	= QMI_WLANFW_MAX_NUM_CE_V01,
4665 		.elem_size	= sizeof(
4666 				struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
4667 		.array_type	= VAR_LEN_ARRAY,
4668 		.tlv_type	= 0x11,
4669 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4670 					   tgt_cfg),
4671 		.ei_array	= qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
4672 	},
4673 	{
4674 		.data_type	= QMI_OPT_FLAG,
4675 		.elem_len	= 1,
4676 		.elem_size	= sizeof(uint8_t),
4677 		.array_type	= NO_ARRAY,
4678 		.tlv_type	= 0x12,
4679 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4680 					   svc_cfg_valid),
4681 	},
4682 	{
4683 		.data_type	= QMI_DATA_LEN,
4684 		.elem_len	= 1,
4685 		.elem_size	= sizeof(uint8_t),
4686 		.array_type	= NO_ARRAY,
4687 		.tlv_type	= 0x12,
4688 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4689 					   svc_cfg_len),
4690 	},
4691 	{
4692 		.data_type	= QMI_STRUCT,
4693 		.elem_len	= QMI_WLANFW_MAX_NUM_SVC_V01,
4694 		.elem_size	= sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
4695 		.array_type	= VAR_LEN_ARRAY,
4696 		.tlv_type	= 0x12,
4697 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4698 					   svc_cfg),
4699 		.ei_array	= qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
4700 	},
4701 	{
4702 		.data_type	= QMI_OPT_FLAG,
4703 		.elem_len	= 1,
4704 		.elem_size	= sizeof(uint8_t),
4705 		.array_type	= NO_ARRAY,
4706 		.tlv_type	= 0x13,
4707 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4708 					   shadow_reg_valid),
4709 	},
4710 	{
4711 		.data_type	= QMI_DATA_LEN,
4712 		.elem_len	= 1,
4713 		.elem_size	= sizeof(uint8_t),
4714 		.array_type	= NO_ARRAY,
4715 		.tlv_type	= 0x13,
4716 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4717 					   shadow_reg_len),
4718 	},
4719 	{
4720 		.data_type	= QMI_STRUCT,
4721 		.elem_len	= QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
4722 		.elem_size	= sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
4723 		.array_type	= VAR_LEN_ARRAY,
4724 		.tlv_type	= 0x13,
4725 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4726 					   shadow_reg),
4727 		.ei_array	= qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
4728 	},
4729 	{
4730 		.data_type	= QMI_OPT_FLAG,
4731 		.elem_len	= 1,
4732 		.elem_size	= sizeof(uint8_t),
4733 		.array_type	= NO_ARRAY,
4734 		.tlv_type	= 0x14,
4735 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4736 					   shadow_reg_v2_valid),
4737 	},
4738 	{
4739 		.data_type	= QMI_DATA_LEN,
4740 		.elem_len	= 1,
4741 		.elem_size	= sizeof(uint8_t),
4742 		.array_type	= NO_ARRAY,
4743 		.tlv_type	= 0x14,
4744 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4745 					   shadow_reg_v2_len),
4746 	},
4747 	{
4748 		.data_type	= QMI_STRUCT,
4749 		.elem_len	= QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
4750 		.elem_size	= sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
4751 		.array_type	= VAR_LEN_ARRAY,
4752 		.tlv_type	= 0x14,
4753 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
4754 					   shadow_reg_v2),
4755 		.ei_array	= qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
4756 	},
4757 	{
4758 		.data_type	= QMI_EOTI,
4759 		.array_type	= NO_ARRAY,
4760 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4761 	},
4762 };
4763 
4764 const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
4765 	{
4766 		.data_type	= QMI_STRUCT,
4767 		.elem_len	= 1,
4768 		.elem_size	= sizeof(struct qmi_response_type_v01),
4769 		.array_type	= NO_ARRAY,
4770 		.tlv_type	= 0x02,
4771 		.offset		= offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
4772 		.ei_array	= qmi_response_type_v01_ei,
4773 	},
4774 	{
4775 		.data_type	= QMI_EOTI,
4776 		.array_type	= NO_ARRAY,
4777 		.tlv_type	= QMI_COMMON_TLV_TYPE,
4778 	},
4779 };
4780 
4781 int
4782 qwx_ce_intr(void *arg)
4783 {
4784 	struct qwx_ce_pipe *pipe = arg;
4785 	struct qwx_softc *sc = pipe->sc;
4786 
4787 	if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags) ||
4788 	    ((sc->msi_ce_irqmask & (1 << pipe->pipe_num)) == 0)) {
4789 		DPRINTF("%s: unexpected interrupt on pipe %d\n",
4790 		    __func__, pipe->pipe_num);
4791 		return 1;
4792 	}
4793 
4794 	return qwx_ce_per_engine_service(sc, pipe->pipe_num);
4795 }
4796 
4797 int
4798 qwx_ext_intr(void *arg)
4799 {
4800 	struct qwx_ext_irq_grp *irq_grp = arg;
4801 	struct qwx_softc *sc = irq_grp->sc;
4802 
4803 	if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) {
4804 		DPRINTF("%s: unexpected interrupt for ext group %d\n",
4805 		    __func__, irq_grp->grp_id);
4806 		return 1;
4807 	}
4808 
4809 	return qwx_dp_service_srng(sc, irq_grp->grp_id);
4810 }
4811 
4812 const char *qmi_data_type_name[QMI_NUM_DATA_TYPES] = {
4813 	"EOTI",
4814 	"OPT_FLAG",
4815 	"DATA_LEN",
4816 	"UNSIGNED_1_BYTE",
4817 	"UNSIGNED_2_BYTE",
4818 	"UNSIGNED_4_BYTE",
4819 	"UNSIGNED_8_BYTE",
4820 	"SIGNED_2_BYTE_ENUM",
4821 	"SIGNED_4_BYTE_ENUM",
4822 	"STRUCT",
4823 	"STRING"
4824 };
4825 
4826 const struct qmi_elem_info *
4827 qwx_qmi_decode_get_elem(const struct qmi_elem_info *ei, uint8_t elem_type)
4828 {
4829 	while (ei->data_type != QMI_EOTI && ei->tlv_type != elem_type)
4830 		ei++;
4831 
4832 	DNPRINTF(QWX_D_QMI, "%s: found elem 0x%x data type 0x%x\n", __func__,
4833 	    ei->tlv_type, ei->data_type);
4834 	return ei;
4835 }
4836 
4837 size_t
4838 qwx_qmi_decode_min_elem_size(const struct qmi_elem_info *ei, int nested)
4839 {
4840 	size_t min_size = 0;
4841 
4842 	switch (ei->data_type) {
4843 	case QMI_EOTI:
4844 	case QMI_OPT_FLAG:
4845 		break;
4846 	case QMI_DATA_LEN:
4847 		if (ei->elem_len == 1)
4848 			min_size += sizeof(uint8_t);
4849 		else
4850 			min_size += sizeof(uint16_t);
4851 		break;
4852 	case QMI_UNSIGNED_1_BYTE:
4853 	case QMI_UNSIGNED_2_BYTE:
4854 	case QMI_UNSIGNED_4_BYTE:
4855 	case QMI_UNSIGNED_8_BYTE:
4856 	case QMI_SIGNED_2_BYTE_ENUM:
4857 	case QMI_SIGNED_4_BYTE_ENUM:
4858 		min_size += ei->elem_len * ei->elem_size;
4859 		break;
4860 	case QMI_STRUCT:
4861 		if (nested > 2) {
4862 			printf("%s: QMI struct element 0x%x with "
4863 			    "data type %s (0x%x) is nested too "
4864 			    "deeply\n", __func__,
4865 			    ei->tlv_type,
4866 			    qmi_data_type_name[ei->data_type],
4867 			    ei->data_type);
4868 		}
4869 		ei = ei->ei_array;
4870 		while (ei->data_type != QMI_EOTI) {
4871 			min_size += qwx_qmi_decode_min_elem_size(ei,
4872 			    nested + 1);
4873 			ei++;
4874 		}
4875 		break;
4876 	case QMI_STRING:
4877 		min_size += 1;
4878 		/* Strings nested in structs use an in-band length field. */
4879 		if (nested) {
4880 			if (ei->elem_len <= 0xff)
4881 				min_size += sizeof(uint8_t);
4882 			else
4883 				min_size += sizeof(uint16_t);
4884 		}
4885 		break;
4886 	default:
4887 		printf("%s: unhandled data type 0x%x\n", __func__,
4888 		    ei->data_type);
4889 		break;
4890 	}
4891 
4892 	return min_size;
4893 }
4894 
4895 int
4896 qwx_qmi_decode_tlv_hdr(struct qwx_softc *sc,
4897     const struct qmi_elem_info **next_ei, uint16_t *actual_size,
4898     size_t output_len, const struct qmi_elem_info *ei0,
4899     uint8_t *input, size_t input_len)
4900 {
4901 	uint8_t *p = input;
4902 	size_t remain = input_len;
4903 	uint8_t elem_type;
4904 	uint16_t elem_size = 0;
4905 	const struct qmi_elem_info *ei;
4906 
4907 	*next_ei = NULL;
4908 	*actual_size = 0;
4909 
4910 	if (remain < 3) {
4911 		printf("%s: QMI message TLV header too short\n",
4912 		   sc->sc_dev.dv_xname);
4913 		return -1;
4914 	}
4915 	elem_type = *p;
4916 	p++;
4917 	remain--;
4918 
4919 	/*
4920 	 * By relying on TLV type information we can skip over EIs which
4921 	 * describe optional elements that have not been encoded.
4922 	 * Such elements will be left at their default value (zero) in
4923 	 * the decoded output struct.
4924 	 * XXX We currently allow elements to appear in any order and
4925 	 * we do not detect duplicates.
4926 	 */
4927 	ei = qwx_qmi_decode_get_elem(ei0, elem_type);
4928 
4929 	DNPRINTF(QWX_D_QMI,
4930 	    "%s: decoding element 0x%x with data type %s (0x%x)\n",
4931 	    __func__, elem_type, qmi_data_type_name[ei->data_type],
4932 	    ei->data_type);
4933 
4934 	if (remain < 2) {
4935 		printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
4936 		return -1;
4937 	}
4938 
4939 	if (ei->data_type == QMI_DATA_LEN && ei->elem_len == 1) {
4940 		elem_size = p[0];
4941 		p++;
4942 		remain--;
4943 	} else {
4944 		elem_size = (p[0] | (p[1] << 8));
4945 		p += 2;
4946 		remain -= 2;
4947 	}
4948 
4949 	*next_ei = ei;
4950 	*actual_size = elem_size;
4951 
4952 	if (ei->data_type == QMI_EOTI) {
4953 		DNPRINTF(QWX_D_QMI,
4954 		    "%s: unrecognized QMI element type 0x%x size %u\n",
4955 		    sc->sc_dev.dv_xname, elem_type, elem_size);
4956 		return 0;
4957 	}
4958 
4959 	/*
4960 	 * Is this an optional element which has been encoded?
4961 	 * If so, use info about this optional element for verification.
4962 	 */
4963 	if (ei->data_type == QMI_OPT_FLAG)
4964 		ei++;
4965 
4966 	DNPRINTF(QWX_D_QMI, "%s: ei->size %u, actual size %u\n", __func__,
4967 	    ei->elem_size, *actual_size);
4968 
4969 	switch (ei->data_type) {
4970 	case QMI_UNSIGNED_1_BYTE:
4971 	case QMI_UNSIGNED_2_BYTE:
4972 	case QMI_UNSIGNED_4_BYTE:
4973 	case QMI_UNSIGNED_8_BYTE:
4974 	case QMI_SIGNED_2_BYTE_ENUM:
4975 	case QMI_SIGNED_4_BYTE_ENUM:
4976 		if (elem_size != ei->elem_size) {
4977 			printf("%s: QMI message element 0x%x "
4978 			    "data type %s (0x%x) with bad size: %u\n",
4979 			    sc->sc_dev.dv_xname, elem_type,
4980 			    qmi_data_type_name[ei->data_type],
4981 			    ei->data_type, elem_size);
4982 			return -1;
4983 		}
4984 		break;
4985 	case QMI_DATA_LEN:
4986 		break;
4987 	case QMI_STRING:
4988 	case QMI_STRUCT:
4989 		if (elem_size < qwx_qmi_decode_min_elem_size(ei, 0)) {
4990 			printf("%s: QMI message element 0x%x "
4991 			    "data type %s (0x%x) with bad size: %u\n",
4992 			    sc->sc_dev.dv_xname, elem_type,
4993 			    qmi_data_type_name[ei->data_type],
4994 			    ei->data_type, elem_size);
4995 			return -1;
4996 		}
4997 		break;
4998 	default:
4999 		printf("%s: unexpected QMI message element "
5000 		    "data type 0x%x\n", sc->sc_dev.dv_xname,
5001 		    ei->data_type);
5002 		return -1;
5003 	}
5004 
5005 	if (remain < elem_size) {
5006 		printf("%s: QMI message too short\n", sc->sc_dev.dv_xname);
5007 		return -1;
5008 	}
5009 
5010 	if (ei->offset + ei->elem_size > output_len) {
5011 		printf("%s: QMI message element type 0x%x too large: %u\n",
5012 		    sc->sc_dev.dv_xname, elem_type, ei->elem_size);
5013 		return -1;
5014 	}
5015 
5016 	return 0;
5017 }
5018 
5019 int
5020 qwx_qmi_decode_byte(void *output, const struct qmi_elem_info *ei, void *input)
5021 {
5022 	if (ei->elem_size != sizeof(uint8_t)) {
5023 		printf("%s: bad element size\n", __func__);
5024 		return -1;
5025 	}
5026 
5027 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
5028 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
5029 	memcpy(output, input, ei->elem_size);
5030 	return 0;
5031 }
5032 
5033 int
5034 qwx_qmi_decode_word(void *output, const struct qmi_elem_info *ei, void *input)
5035 {
5036 	if (ei->elem_size != sizeof(uint16_t)) {
5037 		printf("%s: bad element size\n", __func__);
5038 		return -1;
5039 	}
5040 
5041 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
5042 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
5043 	memcpy(output, input, ei->elem_size);
5044 	return 0;
5045 }
5046 
5047 int
5048 qwx_qmi_decode_dword(void *output, const struct qmi_elem_info *ei, void *input)
5049 {
5050 	if (ei->elem_size != sizeof(uint32_t)) {
5051 		printf("%s: bad element size\n", __func__);
5052 		return -1;
5053 	}
5054 
5055 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
5056 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
5057 	memcpy(output, input, ei->elem_size);
5058 	return 0;
5059 }
5060 
5061 int
5062 qwx_qmi_decode_qword(void *output, const struct qmi_elem_info *ei, void *input)
5063 {
5064 	if (ei->elem_size != sizeof(uint64_t)) {
5065 		printf("%s: bad element size\n", __func__);
5066 		return -1;
5067 	}
5068 
5069 	DNPRINTF(QWX_D_QMI, "%s: element 0x%x data type 0x%x size %u\n",
5070 	    __func__, ei->tlv_type, ei->data_type, ei->elem_size);
5071 	memcpy(output, input, ei->elem_size);
5072 	return 0;
5073 }
5074 
5075 int
5076 qwx_qmi_decode_datalen(struct qwx_softc *sc, size_t *used, uint32_t *datalen,
5077     void *output, size_t output_len, const struct qmi_elem_info *ei,
5078     uint8_t *input, uint16_t input_len)
5079 {
5080 	uint8_t *p = input;
5081 	size_t remain = input_len;
5082 
5083 	*datalen = 0;
5084 
5085 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
5086 	for (int i = 0; i < input_len; i++) {
5087 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
5088 	}
5089 	DNPRINTF(QWX_D_QMI, "\n");
5090 
5091 	if (remain < ei->elem_size) {
5092 		printf("%s: QMI message too short: remain=%zu elem_size=%u\n", __func__, remain, ei->elem_size);
5093 		return -1;
5094 	}
5095 
5096 	switch (ei->elem_size) {
5097 	case sizeof(uint8_t):
5098 		*datalen = p[0];
5099 		break;
5100 	case sizeof(uint16_t):
5101 		*datalen = p[0] | (p[1] << 8);
5102 		break;
5103 	default:
5104 		printf("%s: bad datalen element size %u\n",
5105 		    sc->sc_dev.dv_xname, ei->elem_size);
5106 		return -1;
5107 
5108 	}
5109 	*used = ei->elem_size;
5110 
5111 	if (ei->offset + sizeof(*datalen) > output_len) {
5112 		printf("%s: QMI message element type 0x%x too large\n",
5113 		    sc->sc_dev.dv_xname, ei->tlv_type);
5114 		return -1;
5115 	}
5116 	memcpy(output + ei->offset, datalen, sizeof(*datalen));
5117 	return 0;
5118 }
5119 
5120 int
5121 qwx_qmi_decode_string(struct qwx_softc *sc, size_t *used_total,
5122     void *output, size_t output_len, const struct qmi_elem_info *ei,
5123     uint8_t *input, uint16_t input_len, uint16_t elem_size, int nested)
5124 {
5125 	uint8_t *p = input;
5126 	uint16_t len;
5127 	size_t remain = input_len;
5128 
5129 	*used_total = 0;
5130 
5131 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
5132 	for (int i = 0; i < input_len; i++) {
5133 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
5134 	}
5135 	DNPRINTF(QWX_D_QMI, "\n");
5136 
5137 	if (nested) {
5138 		/* Strings nested in structs use an in-band length field. */
5139 		if (ei->elem_len <= 0xff) {
5140 			if (remain == 0) {
5141 				printf("%s: QMI string length header exceeds "
5142 				    "input buffer size\n", __func__);
5143 				return -1;
5144 			}
5145 			len = p[0];
5146 			p++;
5147 			(*used_total)++;
5148 			remain--;
5149 		} else {
5150 			if (remain < 2) {
5151 				printf("%s: QMI string length header exceeds "
5152 				    "input buffer size\n", __func__);
5153 				return -1;
5154 			}
5155 			len = p[0] | (p[1] << 8);
5156 			p += 2;
5157 			*used_total += 2;
5158 			remain -= 2;
5159 		}
5160 	} else
5161 		len = elem_size;
5162 
5163 	if (len > ei->elem_len) {
5164 		printf("%s: QMI string element of length %u exceeds "
5165 		    "maximum length %u\n", __func__, len, ei->elem_len);
5166 		return -1;
5167 	}
5168 	if (len > remain) {
5169 		printf("%s: QMI string element of length %u exceeds "
5170 		    "input buffer size %zu\n", __func__, len, remain);
5171 		return -1;
5172 	}
5173 	if (len > output_len) {
5174 		printf("%s: QMI string element of length %u exceeds "
5175 		    "output buffer size %zu\n", __func__, len, output_len);
5176 		return -1;
5177 	}
5178 
5179 	memcpy(output, p, len);
5180 
5181 	p = output;
5182 	p[len] = '\0';
5183 	DNPRINTF(QWX_D_QMI, "%s: string (len %u): %s\n", __func__, len, p);
5184 
5185 	*used_total += len;
5186 	return 0;
5187 }
5188 
5189 int
5190 qwx_qmi_decode_struct(struct qwx_softc *sc, size_t *used_total,
5191     void *output, size_t output_len,
5192     const struct qmi_elem_info *struct_ei,
5193     uint8_t *input, uint16_t input_len,
5194     int nested)
5195 {
5196 	const struct qmi_elem_info *ei = struct_ei->ei_array;
5197 	uint32_t min_size;
5198 	uint8_t *p = input;
5199 	size_t remain = input_len;
5200 	size_t used = 0;
5201 
5202 	*used_total = 0;
5203 
5204 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
5205 	for (int i = 0; i < input_len; i++) {
5206 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
5207 	}
5208 	DNPRINTF(QWX_D_QMI, "\n");
5209 
5210 	min_size = qwx_qmi_decode_min_elem_size(struct_ei, 0);
5211 	DNPRINTF(QWX_D_QMI, "%s: minimum struct size: %u\n", __func__, min_size);
5212 	while (*used_total < min_size && ei->data_type != QMI_EOTI) {
5213 		if (remain == 0) {
5214 			printf("%s: QMI message too short\n", __func__);
5215 			return -1;
5216 		}
5217 
5218 		if (ei->data_type == QMI_DATA_LEN) {
5219 			uint32_t datalen;
5220 
5221 			used = 0;
5222 			if (qwx_qmi_decode_datalen(sc, &used, &datalen,
5223 			    output, output_len, ei, p, remain))
5224 				return -1;
5225 			DNPRINTF(QWX_D_QMI, "%s: datalen %u used %zu bytes\n",
5226 			    __func__, datalen, used);
5227 			p += used;
5228 			remain -= used;
5229 			*used_total += used;
5230 			if (remain < datalen) {
5231 				printf("%s: QMI message too short\n", __func__);
5232 				return -1;
5233 			}
5234 			ei++;
5235 			DNPRINTF(QWX_D_QMI, "%s: datalen is for data_type=0x%x "
5236 			    "tlv_type=0x%x elem_size=%u(0x%x) remain=%zu\n",
5237 			    __func__, ei->data_type, ei->tlv_type,
5238 			    ei->elem_size, ei->elem_size, remain);
5239 			if (datalen == 0) {
5240 				ei++;
5241 				DNPRINTF(QWX_D_QMI,
5242 				    "%s: skipped to data_type=0x%x "
5243 				    "tlv_type=0x%x elem_size=%u(0x%x) "
5244 				    "remain=%zu\n", __func__,
5245 				    ei->data_type, ei->tlv_type,
5246 				    ei->elem_size, ei->elem_size, remain);
5247 				continue;
5248 			}
5249 		} else {
5250 			if (remain < ei->elem_size) {
5251 				printf("%s: QMI message too short\n",
5252 				    __func__);
5253 				return -1;
5254 			}
5255 		}
5256 
5257 		if (ei->offset + ei->elem_size > output_len) {
5258 			printf("%s: QMI message struct member element "
5259 			    "type 0x%x too large: %u\n", sc->sc_dev.dv_xname,
5260 			    ei->tlv_type, ei->elem_size);
5261 			return -1;
5262 		}
5263 
5264 		DNPRINTF(QWX_D_QMI,
5265 		    "%s: decoding struct member element 0x%x with "
5266 		    "data type %s (0x%x) size=%u(0x%x) remain=%zu\n", __func__,
5267 		    ei->tlv_type, qmi_data_type_name[ei->data_type],
5268 		    ei->data_type, ei->elem_size, ei->elem_size, remain);
5269 		switch (ei->data_type) {
5270 		case QMI_UNSIGNED_1_BYTE:
5271 			if (qwx_qmi_decode_byte(output + ei->offset, ei, p))
5272 				return -1;
5273 			remain -= ei->elem_size;
5274 			p += ei->elem_size;
5275 			*used_total += ei->elem_size;
5276 			break;
5277 		case QMI_UNSIGNED_2_BYTE:
5278 		case QMI_SIGNED_2_BYTE_ENUM:
5279 			if (qwx_qmi_decode_word(output + ei->offset, ei, p))
5280 				return -1;
5281 			remain -= ei->elem_size;
5282 			p += ei->elem_size;
5283 			*used_total += ei->elem_size;
5284 			break;
5285 		case QMI_UNSIGNED_4_BYTE:
5286 		case QMI_SIGNED_4_BYTE_ENUM:
5287 			if (qwx_qmi_decode_dword(output + ei->offset, ei, p))
5288 				return -1;
5289 			remain -= ei->elem_size;
5290 			p += ei->elem_size;
5291 			*used_total += ei->elem_size;
5292 			break;
5293 		case QMI_UNSIGNED_8_BYTE:
5294 			if (qwx_qmi_decode_qword(output + ei->offset, ei, p))
5295 				return -1;
5296 			remain -= ei->elem_size;
5297 			p += ei->elem_size;
5298 			*used_total += ei->elem_size;
5299 			break;
5300 		case QMI_STRUCT:
5301 			if (nested > 2) {
5302 				printf("%s: QMI struct element data type 0x%x "
5303 				    "is nested too deeply\n",
5304 				    sc->sc_dev.dv_xname, ei->data_type);
5305 				return -1;
5306 			}
5307 			used = 0;
5308 			if (qwx_qmi_decode_struct(sc, &used,
5309 			    output + ei->offset, output_len - ei->offset,
5310 			    ei, p, remain, nested + 1))
5311 				return -1;
5312 			remain -= used;
5313 			p += used;
5314 			*used_total += used;
5315 			break;
5316 		case QMI_STRING:
5317 			used = 0;
5318 			if (qwx_qmi_decode_string(sc, &used,
5319 			    output + ei->offset, output_len - ei->offset,
5320 			    ei, p, remain, 0, 1))
5321 				return -1;
5322 			remain -= used;
5323 			p += used;
5324 			*used_total += used;
5325 			break;
5326 		default:
5327 			printf("%s: unhandled QMI struct element "
5328 			    "data type 0x%x\n", sc->sc_dev.dv_xname,
5329 			    ei->data_type);
5330 			return -1;
5331 		}
5332 
5333 		ei++;
5334 		DNPRINTF(QWX_D_QMI, "%s: next ei 0x%x ei->data_type=0x%x\n",
5335 		    __func__, ei->tlv_type, ei->data_type);
5336 	}
5337 
5338 	DNPRINTF(QWX_D_QMI, "%s: used_total=%zu ei->data_type=0x%x\n",
5339 	    __func__, *used_total, ei->data_type);
5340 
5341 	return 0;
5342 }
5343 
5344 int
5345 qwx_qmi_decode_msg(struct qwx_softc *sc, void *output, size_t output_len,
5346     const struct qmi_elem_info *ei0, uint8_t *input, uint16_t input_len)
5347 {
5348 	uint8_t *p = input;
5349 	size_t remain = input_len, used;
5350 	const struct qmi_elem_info *ei = ei0;
5351 
5352 	memset(output, 0, output_len);
5353 
5354 	DNPRINTF(QWX_D_QMI, "%s: input: ", __func__);
5355 	for (int i = 0; i < input_len; i++) {
5356 		DNPRINTF(QWX_D_QMI, " %02x", input[i]);
5357 	}
5358 	DNPRINTF(QWX_D_QMI, "\n");
5359 
5360 	while (remain > 0 && ei->data_type != QMI_EOTI) {
5361 		uint32_t nelem = 1, i;
5362 		uint16_t datalen;
5363 
5364 		if (qwx_qmi_decode_tlv_hdr(sc, &ei, &datalen, output_len,
5365 		    ei0, p, remain))
5366 			return -1;
5367 
5368 		/* Skip unrecognized elements. */
5369 		if (ei->data_type == QMI_EOTI) {
5370 			p += 3 + datalen;
5371 			remain -= 3 + datalen;
5372 			ei = ei0;
5373 			continue;
5374 		}
5375 
5376 		/* Set 'valid' flag for optional fields in output struct. */
5377 		if (ei->data_type == QMI_OPT_FLAG) {
5378 			uint8_t *pvalid;
5379 
5380 			if (ei->offset + ei->elem_size > output_len) {
5381 				printf("%s: QMI message element type 0x%x "
5382 				    "too large: %u\n", sc->sc_dev.dv_xname,
5383 				    ei->tlv_type, ei->elem_size);
5384 			}
5385 
5386 			pvalid = (uint8_t *)output + ei->offset;
5387 			*pvalid = 1;
5388 
5389 			ei++;
5390 		}
5391 
5392 		p += 3;
5393 		remain -= 3;
5394 
5395 		if (ei->data_type == QMI_DATA_LEN) {
5396 			const struct qmi_elem_info *datalen_ei = ei;
5397 			uint8_t elem_type = ei->tlv_type;
5398 
5399 			/*
5400 			 * Size info in TLV header indicates the
5401 			 * total length of element data that follows.
5402 			 */
5403 			if (remain < datalen) {
5404 				printf("%s:%d QMI message too short\n",
5405 				    __func__, __LINE__);
5406 				return -1;
5407 			}
5408 
5409 			ei++;
5410 			DNPRINTF(QWX_D_QMI,
5411 			    "%s: next ei data_type=0x%x tlv_type=0x%x "
5412 			    "dst elem_size=%u(0x%x) src total size=%u "
5413 			    "remain=%zu\n", __func__, ei->data_type,
5414 			    ei->tlv_type, ei->elem_size, ei->elem_size,
5415 			    datalen, remain);
5416 
5417 			/* Related EIs must have the same type. */
5418 			if (ei->tlv_type != elem_type) {
5419 				printf("%s: unexepected element type 0x%x; "
5420 				    "expected 0x%x\n", __func__,
5421 				    ei->tlv_type, elem_type);
5422 				return -1;
5423 			}
5424 
5425 			if (datalen == 0) {
5426 				if (ei->data_type != QMI_EOTI)
5427 					ei++;
5428 				continue;
5429 			}
5430 
5431 			/*
5432 			 * For variable length arrays a one- or two-byte
5433 			 * value follows the header, indicating the number
5434 			 * of elements in the array.
5435 			 */
5436 			if (ei->array_type == VAR_LEN_ARRAY) {
5437 				DNPRINTF(QWX_D_QMI,
5438 				    "%s: variable length array\n", __func__);
5439 				used = 0;
5440 				if (qwx_qmi_decode_datalen(sc, &used, &nelem,
5441 				    output, output_len, datalen_ei, p, remain))
5442 					return -1;
5443 				p += used;
5444 				remain -= used;
5445 				/*
5446 				 * Previous datalen value included the total
5447 				 * amount of bytes following the DATALEN TLV
5448 				 * header.
5449 				 */
5450 				datalen -= used;
5451 
5452 				if (nelem == 0) {
5453 					if (ei->data_type != QMI_EOTI)
5454 						ei++;
5455 					continue;
5456 				}
5457 
5458 				DNPRINTF(QWX_D_QMI,
5459 				    "%s: datalen %u used %zu bytes\n",
5460 				    __func__, nelem, used);
5461 
5462 				DNPRINTF(QWX_D_QMI,
5463 				    "%s: decoding %u array elements with "
5464 				    "src size %u dest size %u\n", __func__,
5465 				    nelem, datalen / nelem, ei->elem_size);
5466 			}
5467 		}
5468 
5469 		if (remain < datalen) {
5470 			printf("%s:%d QMI message too short: remain=%zu, "
5471 			    "datalen=%u\n", __func__, __LINE__, remain,
5472 			    datalen);
5473 			return -1;
5474 		}
5475 		if (output_len < nelem * ei->elem_size) {
5476 			printf("%s: QMI output buffer too short: remain=%zu "
5477 			    "nelem=%u ei->elem_size=%u\n", __func__, remain,
5478 			    nelem, ei->elem_size);
5479 			return -1;
5480 		}
5481 
5482 		for (i = 0; i < nelem && remain > 0; i++) {
5483 			size_t outoff;
5484 
5485 			outoff = ei->offset + (ei->elem_size * i);
5486 			switch (ei->data_type) {
5487 			case QMI_STRUCT:
5488 				used = 0;
5489 				if (qwx_qmi_decode_struct(sc, &used,
5490 				    output + outoff, output_len - outoff,
5491 				    ei, p, remain, 0))
5492 					return -1;
5493 				remain -= used;
5494 				p += used;
5495 				if (used != datalen) {
5496 					DNPRINTF(QWX_D_QMI,
5497 					    "%s struct used only %zu bytes "
5498 					    "of %u input bytes\n", __func__,
5499 					    used, datalen);
5500 				} else {
5501 					DNPRINTF(QWX_D_QMI,
5502 					    "%s: struct used %zu bytes "
5503 					    "of input\n", __func__, used);
5504 				}
5505 				break;
5506 			case QMI_STRING:
5507 				used = 0;
5508 				if (qwx_qmi_decode_string(sc, &used,
5509 				    output + outoff, output_len - outoff,
5510 				    ei, p, remain, datalen, 0))
5511 					return -1;
5512 				remain -= used;
5513 				p += used;
5514 				if (used != datalen) {
5515 					DNPRINTF(QWX_D_QMI,
5516 					    "%s: string used only %zu bytes "
5517 					    "of %u input bytes\n", __func__,
5518 					    used, datalen);
5519 				} else {
5520 					DNPRINTF(QWX_D_QMI,
5521 					    "%s: string used %zu bytes "
5522 					    "of input\n", __func__, used);
5523 				}
5524 				break;
5525 			case QMI_UNSIGNED_1_BYTE:
5526 				if (remain < ei->elem_size) {
5527 					printf("%s: QMI message too "
5528 					    "short\n", __func__);
5529 					return -1;
5530 				}
5531 				if (qwx_qmi_decode_byte(output + outoff,
5532 				    ei, p))
5533 					return -1;
5534 				remain -= ei->elem_size;
5535 				p += ei->elem_size;
5536 				break;
5537 			case QMI_UNSIGNED_2_BYTE:
5538 			case QMI_SIGNED_2_BYTE_ENUM:
5539 				if (remain < ei->elem_size) {
5540 					printf("%s: QMI message too "
5541 					    "short\n", __func__);
5542 					return -1;
5543 				}
5544 				if (qwx_qmi_decode_word(output + outoff,
5545 				    ei, p))
5546 					return -1;
5547 				remain -= ei->elem_size;
5548 				p += ei->elem_size;
5549 				break;
5550 			case QMI_UNSIGNED_4_BYTE:
5551 			case QMI_SIGNED_4_BYTE_ENUM:
5552 				if (remain < ei->elem_size) {
5553 					printf("%s: QMI message too "
5554 					    "short\n", __func__);
5555 					return -1;
5556 				}
5557 				if (qwx_qmi_decode_dword(output + outoff,
5558 				    ei, p))
5559 					return -1;
5560 				remain -= ei->elem_size;
5561 				p += ei->elem_size;
5562 				break;
5563 			case QMI_UNSIGNED_8_BYTE:
5564 				if (remain < ei->elem_size) {
5565 					printf("%s: QMI message too "
5566 					    "short 4\n", __func__);
5567 					return -1;
5568 				}
5569 				if (qwx_qmi_decode_qword(output + outoff,
5570 				    ei, p))
5571 					return -1;
5572 				remain -= ei->elem_size;
5573 				p += ei->elem_size;
5574 				break;
5575 			default:
5576 				printf("%s: unhandled QMI message element "
5577 				    "data type 0x%x\n",
5578 				    sc->sc_dev.dv_xname, ei->data_type);
5579 				return -1;
5580 			}
5581 		}
5582 
5583 		ei++;
5584 		DNPRINTF(QWX_D_QMI,
5585 		    "%s: next ei 0x%x ei->data_type=0x%x remain=%zu\n",
5586 		    __func__, ei->tlv_type, ei->data_type, remain);
5587 
5588 		DNPRINTF(QWX_D_QMI, "%s: remaining input: ", __func__);
5589 		for (int i = 0; i < remain; i++)
5590 			DNPRINTF(QWX_D_QMI, " %02x", p[i]);
5591 		DNPRINTF(QWX_D_QMI, "\n");
5592 	}
5593 
5594 	return 0;
5595 }
5596 
5597 void
5598 qwx_qmi_recv_wlanfw_ind_register_req_v1(struct qwx_softc *sc, struct mbuf *m,
5599     uint16_t txn_id, uint16_t msg_len)
5600 {
5601 	struct qmi_wlanfw_ind_register_resp_msg_v01 resp;
5602 	const struct qmi_elem_info *ei;
5603 	uint8_t *msg = mtod(m, uint8_t *);
5604 
5605 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5606 
5607 	ei = qmi_wlanfw_ind_register_resp_msg_v01_ei;
5608 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5609 		return;
5610 
5611 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5612 	    __func__, le16toh(resp.resp.result));
5613 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5614 	    __func__, le16toh(resp.resp.error));
5615 	DNPRINTF(QWX_D_QMI, "%s: resp.fw_status=0x%llx\n",
5616 	   __func__, le64toh(resp.fw_status));
5617 
5618 	sc->qmi_resp.result = le16toh(resp.resp.result);
5619 	sc->qmi_resp.error = le16toh(resp.resp.error);
5620 	wakeup(&sc->qmi_resp);
5621 }
5622 
5623 void
5624 qwx_qmi_recv_wlanfw_host_cap_resp_v1(struct qwx_softc *sc, struct mbuf *m,
5625     uint16_t txn_id, uint16_t msg_len)
5626 {
5627 	struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
5628 	const struct qmi_elem_info *ei;
5629 	uint8_t *msg = mtod(m, uint8_t *);
5630 
5631 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5632 
5633 	ei = qmi_wlanfw_host_cap_resp_msg_v01_ei;
5634 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5635 		return;
5636 
5637 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5638 	    __func__, le16toh(resp.resp.result));
5639 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5640 	    __func__, le16toh(resp.resp.error));
5641 
5642 	sc->qmi_resp.result = le16toh(resp.resp.result);
5643 	sc->qmi_resp.error = le16toh(resp.resp.error);
5644 	wakeup(&sc->qmi_resp);
5645 }
5646 
5647 void
5648 qwx_qmi_recv_wlanfw_respond_mem_resp_v1(struct qwx_softc *sc, struct mbuf *m,
5649     uint16_t txn_id, uint16_t msg_len)
5650 {
5651 	struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
5652 	const struct qmi_elem_info *ei;
5653 	uint8_t *msg = mtod(m, uint8_t *);
5654 
5655 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5656 
5657 	ei = qmi_wlanfw_respond_mem_resp_msg_v01_ei;
5658 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5659 		return;
5660 
5661 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5662 	    __func__, le16toh(resp.resp.result));
5663 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5664 	    __func__, le16toh(resp.resp.error));
5665 
5666 	sc->qmi_resp.result = le16toh(resp.resp.result);
5667 	sc->qmi_resp.error = le16toh(resp.resp.error);
5668 	wakeup(&sc->qmi_resp);
5669 }
5670 
5671 void
5672 qwx_qmi_recv_wlanfw_cap_resp_v1(struct qwx_softc *sc, struct mbuf *m,
5673     uint16_t txn_id, uint16_t msg_len)
5674 {
5675 	struct qmi_wlanfw_cap_resp_msg_v01 resp;
5676 	const struct qmi_elem_info *ei;
5677 	uint8_t *msg = mtod(m, uint8_t *);
5678 
5679 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5680 
5681 	memset(&resp, 0, sizeof(resp));
5682 
5683 	ei = qmi_wlanfw_cap_resp_msg_v01_ei;
5684 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5685 		return;
5686 
5687 	if (resp.chip_info_valid) {
5688 		sc->qmi_target.chip_id = resp.chip_info.chip_id;
5689 		sc->qmi_target.chip_family = resp.chip_info.chip_family;
5690 	}
5691 
5692 	if (resp.board_info_valid)
5693 		sc->qmi_target.board_id = resp.board_info.board_id;
5694 	else
5695 		sc->qmi_target.board_id = 0xFF;
5696 
5697 	if (resp.soc_info_valid)
5698 		sc->qmi_target.soc_id = resp.soc_info.soc_id;
5699 
5700 	if (resp.fw_version_info_valid) {
5701 		sc->qmi_target.fw_version = resp.fw_version_info.fw_version;
5702 		strlcpy(sc->qmi_target.fw_build_timestamp,
5703 			resp.fw_version_info.fw_build_timestamp,
5704 			sizeof(sc->qmi_target.fw_build_timestamp));
5705 	}
5706 
5707 	if (resp.fw_build_id_valid)
5708 		strlcpy(sc->qmi_target.fw_build_id, resp.fw_build_id,
5709 			sizeof(sc->qmi_target.fw_build_id));
5710 
5711 	if (resp.eeprom_read_timeout_valid) {
5712 		sc->qmi_target.eeprom_caldata = resp.eeprom_read_timeout;
5713 		DNPRINTF(QWX_D_QMI,
5714 		    "%s: qmi cal data supported from eeprom\n", __func__);
5715 	}
5716 
5717 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5718 	    __func__, le16toh(resp.resp.result));
5719 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5720 	    __func__, le16toh(resp.resp.error));
5721 
5722 	sc->qmi_resp.result = le16toh(resp.resp.result);
5723 	sc->qmi_resp.error = le16toh(resp.resp.error);
5724 	wakeup(&sc->qmi_resp);
5725 }
5726 
5727 void
5728 qwx_qmi_recv_wlanfw_bdf_download_resp_v1(struct qwx_softc *sc, struct mbuf *m,
5729     uint16_t txn_id, uint16_t msg_len)
5730 {
5731 	struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
5732 	const struct qmi_elem_info *ei;
5733 	uint8_t *msg = mtod(m, uint8_t *);
5734 
5735 	memset(&resp, 0, sizeof(resp));
5736 
5737 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5738 
5739 	ei = qmi_wlanfw_bdf_download_resp_msg_v01_ei;
5740 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5741 		return;
5742 
5743 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5744 	    __func__, le16toh(resp.resp.result));
5745 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5746 	    __func__, le16toh(resp.resp.error));
5747 
5748 	sc->qmi_resp.result = le16toh(resp.resp.result);
5749 	sc->qmi_resp.error = le16toh(resp.resp.error);
5750 	wakeup(&sc->qmi_resp);
5751 }
5752 
5753 void
5754 qwx_qmi_recv_wlanfw_m3_info_resp_v1(struct qwx_softc *sc, struct mbuf *m,
5755     uint16_t txn_id, uint16_t msg_len)
5756 {
5757 	struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
5758 	const struct qmi_elem_info *ei;
5759 	uint8_t *msg = mtod(m, uint8_t *);
5760 
5761 	memset(&resp, 0, sizeof(resp));
5762 
5763 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5764 
5765 	ei = qmi_wlanfw_m3_info_resp_msg_v01_ei;
5766 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5767 		return;
5768 
5769 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5770 	    __func__, le16toh(resp.resp.result));
5771 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5772 	    __func__, le16toh(resp.resp.error));
5773 
5774 	sc->qmi_resp.result = le16toh(resp.resp.result);
5775 	sc->qmi_resp.error = le16toh(resp.resp.error);
5776 	wakeup(&sc->qmi_resp);
5777 }
5778 
5779 void
5780 qwx_qmi_recv_wlanfw_wlan_ini_resp_v1(struct qwx_softc *sc, struct mbuf *m,
5781     uint16_t txn_id, uint16_t msg_len)
5782 {
5783 	struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp;
5784 	const struct qmi_elem_info *ei;
5785 	uint8_t *msg = mtod(m, uint8_t *);
5786 
5787 	memset(&resp, 0, sizeof(resp));
5788 
5789 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5790 
5791 	ei = qmi_wlanfw_wlan_ini_resp_msg_v01_ei;
5792 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5793 		return;
5794 
5795 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5796 	    __func__, le16toh(resp.resp.result));
5797 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5798 	    __func__, le16toh(resp.resp.error));
5799 
5800 	sc->qmi_resp.result = le16toh(resp.resp.result);
5801 	sc->qmi_resp.error = le16toh(resp.resp.error);
5802 	wakeup(&sc->qmi_resp);
5803 }
5804 
5805 void
5806 qwx_qmi_recv_wlanfw_wlan_cfg_resp_v1(struct qwx_softc *sc, struct mbuf *m,
5807     uint16_t txn_id, uint16_t msg_len)
5808 {
5809 	struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
5810 	const struct qmi_elem_info *ei;
5811 	uint8_t *msg = mtod(m, uint8_t *);
5812 
5813 	memset(&resp, 0, sizeof(resp));
5814 
5815 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5816 
5817 	ei = qmi_wlanfw_wlan_cfg_resp_msg_v01_ei;
5818 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5819 		return;
5820 
5821 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5822 	    __func__, le16toh(resp.resp.result));
5823 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5824 	    __func__, le16toh(resp.resp.error));
5825 
5826 	sc->qmi_resp.result = le16toh(resp.resp.result);
5827 	sc->qmi_resp.error = le16toh(resp.resp.error);
5828 	wakeup(&sc->qmi_resp);
5829 }
5830 
5831 void
5832 qwx_qmi_recv_wlanfw_wlan_mode_resp_v1(struct qwx_softc *sc, struct mbuf *m,
5833     uint16_t txn_id, uint16_t msg_len)
5834 {
5835 	struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
5836 	const struct qmi_elem_info *ei;
5837 	uint8_t *msg = mtod(m, uint8_t *);
5838 
5839 	memset(&resp, 0, sizeof(resp));
5840 
5841 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5842 
5843 	ei = qmi_wlanfw_wlan_mode_resp_msg_v01_ei;
5844 	if (qwx_qmi_decode_msg(sc, &resp, sizeof(resp), ei, msg, msg_len))
5845 		return;
5846 
5847 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.result=0x%x\n",
5848 	    __func__, le16toh(resp.resp.result));
5849 	DNPRINTF(QWX_D_QMI, "%s: resp.resp.error=0x%x\n",
5850 	    __func__, le16toh(resp.resp.error));
5851 
5852 	sc->qmi_resp.result = le16toh(resp.resp.result);
5853 	sc->qmi_resp.error = le16toh(resp.resp.error);
5854 	wakeup(&sc->qmi_resp);
5855 }
5856 
5857 void
5858 qwx_qmi_recv_response(struct qwx_softc *sc, struct mbuf *m,
5859     uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
5860 {
5861 	switch (msg_id) {
5862 	case QMI_WLANFW_IND_REGISTER_REQ_V01:
5863 		qwx_qmi_recv_wlanfw_ind_register_req_v1(sc, m, txn_id, msg_len);
5864 		break;
5865 	case QMI_WLFW_HOST_CAP_RESP_V01:
5866 		qwx_qmi_recv_wlanfw_host_cap_resp_v1(sc, m, txn_id, msg_len);
5867 		break;
5868 	case QMI_WLFW_RESPOND_MEM_RESP_V01:
5869 		qwx_qmi_recv_wlanfw_respond_mem_resp_v1(sc, m, txn_id, msg_len);
5870 		break;
5871 	case QMI_WLANFW_CAP_RESP_V01:
5872 		qwx_qmi_recv_wlanfw_cap_resp_v1(sc, m, txn_id, msg_len);
5873 		break;
5874 	case QMI_WLANFW_BDF_DOWNLOAD_RESP_V01:
5875 		qwx_qmi_recv_wlanfw_bdf_download_resp_v1(sc, m, txn_id,
5876 		    msg_len);
5877 		break;
5878 	case QMI_WLANFW_M3_INFO_RESP_V01:
5879 		qwx_qmi_recv_wlanfw_m3_info_resp_v1(sc, m, txn_id, msg_len);
5880 		break;
5881 	case QMI_WLANFW_WLAN_INI_RESP_V01:
5882 		qwx_qmi_recv_wlanfw_wlan_ini_resp_v1(sc, m, txn_id, msg_len);
5883 		break;
5884 	case QMI_WLANFW_WLAN_CFG_RESP_V01:
5885 		qwx_qmi_recv_wlanfw_wlan_cfg_resp_v1(sc, m, txn_id, msg_len);
5886 		break;
5887 	case QMI_WLANFW_WLAN_MODE_RESP_V01:
5888 		qwx_qmi_recv_wlanfw_wlan_mode_resp_v1(sc, m, txn_id, msg_len);
5889 		break;
5890 	default:
5891 		printf("%s: unhandled QMI response 0x%x\n",
5892 		    sc->sc_dev.dv_xname, msg_id);
5893 		break;
5894 	}
5895 }
5896 
5897 void
5898 qwx_qmi_recv_wlanfw_request_mem_indication(struct qwx_softc *sc, struct mbuf *m,
5899     uint16_t txn_id, uint16_t msg_len)
5900 {
5901 	struct qmi_wlanfw_request_mem_ind_msg_v01 *ind = NULL;
5902 	const struct qmi_elem_info *ei;
5903 	uint8_t *msg = mtod(m, uint8_t *);
5904 
5905 	DNPRINTF(QWX_D_QMI, "%s\n", __func__);
5906 
5907 	/* This structure is too large for the stack. */
5908 	ind = malloc(sizeof(*ind), M_DEVBUF, M_NOWAIT | M_ZERO);
5909 	if (ind == NULL)
5910 		return;
5911 
5912 	ei = qmi_wlanfw_request_mem_ind_msg_v01_ei;
5913 	if (qwx_qmi_decode_msg(sc, ind, sizeof(*ind), ei, msg, msg_len)) {
5914 		free(ind, M_DEVBUF, sizeof(*ind));
5915 		return;
5916 	}
5917 
5918 	/* Handled by qwx_qmi_mem_seg_send() in process context */
5919 	sc->sc_req_mem_ind = ind;
5920 	wakeup(&sc->sc_req_mem_ind);
5921 }
5922 
5923 void
5924 qwx_qmi_recv_indication(struct qwx_softc *sc, struct mbuf *m,
5925     uint16_t txn_id, uint16_t msg_id, uint16_t msg_len)
5926 {
5927 	switch (msg_id) {
5928 	case QMI_WLFW_REQUEST_MEM_IND_V01:
5929 		qwx_qmi_recv_wlanfw_request_mem_indication(sc, m,
5930 		    txn_id, msg_len);
5931 		break;
5932 	case QMI_WLFW_FW_MEM_READY_IND_V01:
5933 		sc->fwmem_ready = 1;
5934 		wakeup(&sc->fwmem_ready);
5935 		break;
5936 	case QMI_WLFW_FW_INIT_DONE_IND_V01:
5937 		sc->fw_init_done = 1;
5938 		wakeup(&sc->fw_init_done);
5939 		break;
5940 	default:
5941 		printf("%s: unhandled QMI indication 0x%x\n",
5942 		    sc->sc_dev.dv_xname, msg_id);
5943 		break;
5944 	}
5945 }
5946 
5947 void
5948 qwx_qrtr_recv_data(struct qwx_softc *sc, struct mbuf *m, size_t size)
5949 {
5950 	struct qmi_header hdr;
5951 	uint16_t txn_id, msg_id, msg_len;
5952 
5953 	if (size < sizeof(hdr)) {
5954 		printf("%s: QMI message too short: %zu bytes\n",
5955 		    sc->sc_dev.dv_xname, size);
5956 		return;
5957 	}
5958 
5959 	memcpy(&hdr, mtod(m, void *), sizeof(hdr));
5960 
5961 	DNPRINTF(QWX_D_QMI,
5962 	    "%s: QMI message type=0x%x txn=0x%x id=0x%x len=%u\n",
5963 	    __func__, hdr.type, le16toh(hdr.txn_id),
5964 	    le16toh(hdr.msg_id), le16toh(hdr.msg_len));
5965 
5966 	txn_id = le16toh(hdr.txn_id);
5967 	msg_id = le16toh(hdr.msg_id);
5968 	msg_len = le16toh(hdr.msg_len);
5969 	if (sizeof(hdr) + msg_len != size) {
5970 		printf("%s: bad length in QMI message header: %u\n",
5971 		    sc->sc_dev.dv_xname, msg_len);
5972 		return;
5973 	}
5974 
5975 	switch (hdr.type) {
5976 	case QMI_RESPONSE:
5977 		m_adj(m, sizeof(hdr));
5978 		qwx_qmi_recv_response(sc, m, txn_id, msg_id, msg_len);
5979 		break;
5980 	case QMI_INDICATION:
5981 		m_adj(m, sizeof(hdr));
5982 		qwx_qmi_recv_indication(sc, m, txn_id, msg_id, msg_len);
5983 		break;
5984 	default:
5985 		printf("%s: unhandled QMI message type %u\n",
5986 		    sc->sc_dev.dv_xname, hdr.type);
5987 		break;
5988 	}
5989 }
5990 
5991 int
5992 qwx_qrtr_say_hello(struct qwx_softc *sc)
5993 {
5994 	struct qrtr_hdr_v1 hdr;
5995 	struct qrtr_ctrl_pkt pkt;
5996 	struct mbuf *m;
5997 	size_t totlen, padlen;
5998 	int err;
5999 
6000 	totlen = sizeof(hdr) + sizeof(pkt);
6001 	padlen = roundup(totlen, 4);
6002 
6003 	m = m_gethdr(M_DONTWAIT, MT_DATA);
6004 	if (m == NULL) {
6005 		err = ENOBUFS;
6006 		goto done;
6007 	}
6008 
6009 	if (padlen <= MCLBYTES)
6010 		MCLGET(m, M_DONTWAIT);
6011 	else
6012 		MCLGETL(m, M_DONTWAIT, padlen);
6013 	if ((m->m_flags & M_EXT) == 0) {
6014 		err = ENOBUFS;
6015 		goto done;
6016 	}
6017 
6018 	m->m_len = m->m_pkthdr.len = padlen;
6019 
6020 	memset(&hdr, 0, sizeof(hdr));
6021 	hdr.version = htole32(QRTR_PROTO_VER_1);
6022 	hdr.type = htole32(QRTR_TYPE_HELLO);
6023 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
6024 	hdr.src_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
6025 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
6026 	hdr.dst_port_id = htole32(0xfffffffeU); /* TODO make human-readable */
6027 	hdr.size = htole32(sizeof(pkt));
6028 
6029 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
6030 	if (err)
6031 		goto done;
6032 
6033 	memset(&pkt, 0, sizeof(pkt));
6034 	pkt.cmd = htole32(QRTR_TYPE_HELLO);
6035 
6036 	err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
6037 	if (err)
6038 		goto done;
6039 
6040 	/* Zero-pad the mbuf */
6041 	if (padlen != totlen) {
6042 		uint32_t pad = 0;
6043 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
6044 		if (err)
6045 			goto done;
6046 	}
6047 
6048 	err = sc->ops.submit_xfer(sc, m);
6049 done:
6050 	if (err)
6051 		m_freem(m);
6052 	return err;
6053 }
6054 
6055 int
6056 qwx_qrtr_resume_tx(struct qwx_softc *sc)
6057 {
6058 	struct qrtr_hdr_v1 hdr;
6059 	struct qrtr_ctrl_pkt pkt;
6060 	struct mbuf *m;
6061 	size_t totlen, padlen;
6062 	int err;
6063 
6064 	totlen = sizeof(hdr) + sizeof(pkt);
6065 	padlen = roundup(totlen, 4);
6066 
6067 	m = m_gethdr(M_DONTWAIT, MT_DATA);
6068 	if (m == NULL) {
6069 		err = ENOBUFS;
6070 		goto done;
6071 	}
6072 
6073 	if (padlen <= MCLBYTES)
6074 		MCLGET(m, M_DONTWAIT);
6075 	else
6076 		MCLGETL(m, M_DONTWAIT, padlen);
6077 	if ((m->m_flags & M_EXT) == 0) {
6078 		err = ENOBUFS;
6079 		goto done;
6080 	}
6081 
6082 	m->m_len = m->m_pkthdr.len = padlen;
6083 
6084 	memset(&hdr, 0, sizeof(hdr));
6085 	hdr.version = htole32(QRTR_PROTO_VER_1);
6086 	hdr.type = htole32(QRTR_TYPE_RESUME_TX);
6087 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
6088 	hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
6089 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
6090 	hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
6091 	hdr.size = htole32(sizeof(pkt));
6092 
6093 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
6094 	if (err)
6095 		goto done;
6096 
6097 	memset(&pkt, 0, sizeof(pkt));
6098 	pkt.cmd = htole32(QRTR_TYPE_RESUME_TX);
6099 	pkt.client.node = htole32(0x01);
6100 	pkt.client.port = htole32(0x4000);
6101 
6102 	err = m_copyback(m, sizeof(hdr), sizeof(pkt), &pkt, M_NOWAIT);
6103 	if (err)
6104 		goto done;
6105 
6106 	/* Zero-pad the mbuf */
6107 	if (padlen != totlen) {
6108 		uint32_t pad = 0;
6109 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
6110 		if (err)
6111 			goto done;
6112 	}
6113 
6114 	err = sc->ops.submit_xfer(sc, m);
6115 done:
6116 	if (err)
6117 		m_freem(m);
6118 	return err;
6119 }
6120 
6121 void
6122 qwx_qrtr_recv_msg(struct qwx_softc *sc, struct mbuf *m)
6123 {
6124 	struct qrtr_hdr_v1 *v1 = mtod(m, struct qrtr_hdr_v1 *);
6125 	struct qrtr_hdr_v2 *v2 = mtod(m, struct qrtr_hdr_v2 *);
6126 	struct qrtr_ctrl_pkt *pkt;
6127 	uint32_t type, size, hdrsize;
6128 	uint8_t ver, confirm_rx;
6129 
6130 	ver = *mtod(m, uint8_t *);
6131 	switch (ver) {
6132 	case QRTR_PROTO_VER_1:
6133 		DNPRINTF(QWX_D_QMI,
6134 		    "%s: type %u size %u confirm_rx %u\n", __func__,
6135 		    letoh32(v1->type), letoh32(v1->size),
6136 		    letoh32(v1->confirm_rx));
6137 		type = letoh32(v1->type);
6138 		size = letoh32(v1->size);
6139 		confirm_rx = !!letoh32(v1->confirm_rx);
6140 		hdrsize = sizeof(*v1);
6141 		break;
6142 	case QRTR_PROTO_VER_2:
6143 		DNPRINTF(QWX_D_QMI,
6144 		    "%s: type %u size %u confirm_rx %u\n", __func__,
6145 		    v2->type, letoh32(v2->size),
6146 		    !!(v2->flags & QRTR_FLAGS_CONFIRM_RX));
6147 		type = v2->type;
6148 		size = letoh32(v2->size);
6149 		confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
6150 		hdrsize = sizeof(*v2);
6151 		break;
6152 	default:
6153 		printf("%s: unsupported qrtr version %u\n",
6154 		    sc->sc_dev.dv_xname, ver);
6155 		return;
6156 	}
6157 
6158 	if (size > m->m_pkthdr.len) {
6159 		printf("%s: bad size in qrtr message header: %u\n",
6160 		    sc->sc_dev.dv_xname, size);
6161 		return;
6162 	}
6163 
6164 	switch (type) {
6165 	case QRTR_TYPE_DATA:
6166 		m_adj(m, hdrsize);
6167 		qwx_qrtr_recv_data(sc, m, size);
6168 		break;
6169 	case QRTR_TYPE_HELLO:
6170 		qwx_qrtr_say_hello(sc);
6171 		break;
6172 	case QRTR_TYPE_NEW_SERVER:
6173 		m_adj(m, hdrsize);
6174 		pkt = mtod(m, struct qrtr_ctrl_pkt *);
6175 		sc->qrtr_server.service = le32toh(pkt->server.service);
6176 		sc->qrtr_server.instance = le32toh(pkt->server.instance);
6177 		sc->qrtr_server.node = le32toh(pkt->server.node);
6178 		sc->qrtr_server.port = le32toh(pkt->server.port);
6179 		DNPRINTF(QWX_D_QMI,
6180 		    "%s: new server: service=0x%x instance=0x%x node=0x%x "
6181 		    "port=0x%x\n", __func__, sc->qrtr_server.service,
6182 		    sc->qrtr_server.instance,
6183 		    sc->qrtr_server.node, sc->qrtr_server.port);
6184 		wakeup(&sc->qrtr_server);
6185 		break;
6186 	default:
6187 		printf("%s: unhandled qrtr type %u\n",
6188 		    sc->sc_dev.dv_xname, type);
6189 		return;
6190 	}
6191 
6192 	if (confirm_rx)
6193 		qwx_qrtr_resume_tx(sc);
6194 }
6195 
6196 // Not needed because we don't implenent QMI as a network service.
6197 #define qwx_qmi_init_service(sc)	(0)
6198 #define qwx_qmi_deinit_service(sc)	(0)
6199 
6200 int
6201 qwx_qmi_encode_datalen(uint8_t *p, uint32_t *datalen,
6202     const struct qmi_elem_info *ei, void *input)
6203 {
6204 	memcpy(datalen, input + ei->offset, sizeof(uint32_t));
6205 
6206 	if (ei->elem_size == sizeof(uint8_t)) {
6207 		p[0] = (*datalen & 0xff);
6208 	} else if (ei->elem_size == sizeof(uint16_t)) {
6209 		p[0] = (*datalen & 0xff);
6210 		p[1] = (*datalen >> 8) & 0xff;
6211 	} else {
6212 		printf("%s: bad element size\n", __func__);
6213 		return -1;
6214 	}
6215 
6216 	return 0;
6217 }
6218 
6219 int
6220 qwx_qmi_encode_byte(uint8_t *p, const struct qmi_elem_info *ei, void *input,
6221     int i)
6222 {
6223 	if (ei->elem_size != sizeof(uint8_t)) {
6224 		printf("%s: bad element size\n", __func__);
6225 		return -1;
6226 	}
6227 
6228 	if (p == NULL)
6229 		return 0;
6230 
6231 	memcpy(p, input + ei->offset + (i * ei->elem_size), ei->elem_size);
6232 	return 0;
6233 }
6234 
6235 int
6236 qwx_qmi_encode_word(uint8_t *p, const struct qmi_elem_info *ei, void *input,
6237     int i)
6238 {
6239 	uint16_t val;
6240 
6241 	if (ei->elem_size != sizeof(val)) {
6242 		printf("%s: bad element size\n", __func__);
6243 		return -1;
6244 	}
6245 
6246 	if (p == NULL)
6247 		return 0;
6248 
6249 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
6250 	val = htole16(val);
6251 	memcpy(p, &val, sizeof(val));
6252 	return 0;
6253 }
6254 
6255 int
6256 qwx_qmi_encode_dword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
6257     int i)
6258 {
6259 	uint32_t val;
6260 
6261 	if (ei->elem_size != sizeof(val)) {
6262 		printf("%s: bad element size\n", __func__);
6263 		return -1;
6264 	}
6265 
6266 	if (p == NULL)
6267 		return 0;
6268 
6269 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
6270 	val = htole32(val);
6271 	memcpy(p, &val, sizeof(val));
6272 	return 0;
6273 }
6274 
6275 int
6276 qwx_qmi_encode_qword(uint8_t *p, const struct qmi_elem_info *ei, void *input,
6277     int i)
6278 {
6279 	uint64_t val;
6280 
6281 	if (ei->elem_size != sizeof(val)) {
6282 		printf("%s: bad element size\n", __func__);
6283 		return -1;
6284 	}
6285 
6286 	if (p == NULL)
6287 		return 0;
6288 
6289 	memcpy(&val, input + ei->offset + (i * ei->elem_size), ei->elem_size);
6290 	val = htole64(val);
6291 	memcpy(p, &val, sizeof(val));
6292 	return 0;
6293 }
6294 
6295 int
6296 qwx_qmi_encode_struct(uint8_t *p, size_t *encoded_len,
6297     const struct qmi_elem_info *struct_ei, void *input, size_t input_len)
6298 {
6299 	const struct qmi_elem_info *ei = struct_ei->ei_array;
6300 	size_t remain = input_len;
6301 
6302 	*encoded_len = 0;
6303 
6304 	while (ei->data_type != QMI_EOTI) {
6305 		if (ei->data_type == QMI_OPT_FLAG) {
6306 			uint8_t do_encode, tlv_type;
6307 
6308 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
6309 			ei++; /* Advance to element we might have to encode. */
6310 			if (ei->data_type == QMI_OPT_FLAG ||
6311 			    ei->data_type == QMI_EOTI) {
6312 				printf("%s: bad optional flag element\n",
6313 				    __func__);
6314 				return -1;
6315 			}
6316 			if (!do_encode) {
6317 				/* The element will not be encoded. Skip it. */
6318 				tlv_type = ei->tlv_type;
6319 				while (ei->data_type != QMI_EOTI &&
6320 				    ei->tlv_type == tlv_type)
6321 					ei++;
6322 				continue;
6323 			}
6324 		}
6325 
6326 		if (ei->elem_size > remain) {
6327 			printf("%s: QMI message buffer too short\n", __func__);
6328 			return -1;
6329 		}
6330 
6331 		switch (ei->data_type) {
6332 		case QMI_UNSIGNED_1_BYTE:
6333 			if (qwx_qmi_encode_byte(p, ei, input, 0))
6334 				return -1;
6335 			break;
6336 		case QMI_UNSIGNED_2_BYTE:
6337 			if (qwx_qmi_encode_word(p, ei, input, 0))
6338 				return -1;
6339 			break;
6340 		case QMI_UNSIGNED_4_BYTE:
6341 		case QMI_SIGNED_4_BYTE_ENUM:
6342 			if (qwx_qmi_encode_dword(p, ei, input, 0))
6343 				return -1;
6344 			break;
6345 		case QMI_UNSIGNED_8_BYTE:
6346 			if (qwx_qmi_encode_qword(p, ei, input, 0))
6347 				return -1;
6348 			break;
6349 		default:
6350 			printf("%s: unhandled QMI struct element type %d\n",
6351 			    __func__, ei->data_type);
6352 			return -1;
6353 		}
6354 
6355 		remain -= ei->elem_size;
6356 		if (p != NULL)
6357 			p += ei->elem_size;
6358 		*encoded_len += ei->elem_size;
6359 		ei++;
6360 	}
6361 
6362 	return 0;
6363 }
6364 
6365 int
6366 qwx_qmi_encode_string(uint8_t *p, size_t *encoded_len,
6367     const struct qmi_elem_info *string_ei, void *input, size_t input_len)
6368 {
6369 	*encoded_len = strnlen(input, input_len);
6370 	if (*encoded_len > string_ei->elem_len) {
6371 		printf("%s: QMI message buffer too short\n", __func__);
6372 		return -1;
6373 	}
6374 
6375 	if (p)
6376 		memcpy(p, input, *encoded_len);
6377 
6378 	return 0;
6379 }
6380 
6381 int
6382 qwx_qmi_encode_msg(uint8_t **encoded_msg, size_t *encoded_len, int type,
6383     uint16_t *txn_id, uint16_t msg_id, size_t msg_len,
6384     const struct qmi_elem_info *ei, void *input, size_t input_len)
6385 {
6386 	const struct qmi_elem_info *ei0 = ei;
6387 	struct qmi_header hdr;
6388 	size_t remain;
6389 	uint8_t *p, *op;
6390 
6391 	*encoded_msg = NULL;
6392 	*encoded_len = 0;
6393 
6394 	/* First pass: Determine length of encoded message. */
6395 	while (ei->data_type != QMI_EOTI) {
6396 		int nelem = 1, i;
6397 
6398 		if (ei->offset + ei->elem_size > input_len) {
6399 			printf("%s: bad input buffer offset at element 0x%x "
6400 			    "data type 0x%x\n",
6401 			    __func__, ei->tlv_type, ei->data_type);
6402 			goto err;
6403 		}
6404 
6405 		/*
6406 		 * OPT_FLAG determines whether the next element
6407 		 * should be considered for encoding.
6408 		 */
6409 		if (ei->data_type == QMI_OPT_FLAG) {
6410 			uint8_t do_encode, tlv_type;
6411 
6412 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
6413 			ei++; /* Advance to element we might have to encode. */
6414 			if (ei->data_type == QMI_OPT_FLAG ||
6415 			    ei->data_type == QMI_EOTI) {
6416 				printf("%s: bad optional element\n", __func__);
6417 				goto err;
6418 			}
6419 			if (!do_encode) {
6420 				/* The element will not be encoded. Skip it. */
6421 				tlv_type = ei->tlv_type;
6422 				while (ei->data_type != QMI_EOTI &&
6423 				    ei->tlv_type == tlv_type)
6424 					ei++;
6425 				continue;
6426 			}
6427 		}
6428 
6429 		*encoded_len += 3; /* type, length */
6430 		if (ei->data_type == QMI_DATA_LEN) {
6431 			uint32_t datalen = 0;
6432 			uint8_t dummy[2];
6433 
6434 			if (qwx_qmi_encode_datalen(dummy, &datalen, ei, input))
6435 				goto err;
6436 			*encoded_len += ei->elem_size;
6437 			ei++;
6438 			if (ei->array_type != VAR_LEN_ARRAY) {
6439 				printf("%s: data len not for a var array\n",
6440 				    __func__);
6441 				goto err;
6442 			}
6443 			nelem = datalen;
6444 			if (ei->data_type == QMI_STRUCT) {
6445 				for (i = 0; i < nelem; i++) {
6446 					size_t encoded_struct_len = 0;
6447 					size_t inoff = ei->offset + (i * ei->elem_size);
6448 
6449 					if (qwx_qmi_encode_struct(NULL,
6450 					    &encoded_struct_len, ei,
6451 					    input + inoff, input_len - inoff))
6452 						goto err;
6453 
6454 					*encoded_len += encoded_struct_len;
6455 				}
6456 			} else
6457 				*encoded_len += nelem * ei->elem_size;
6458 			ei++;
6459 		} else if (ei->data_type == QMI_STRING) {
6460 			size_t encoded_string_len = 0;
6461 			size_t inoff = ei->offset;
6462 
6463 			if (qwx_qmi_encode_string(NULL,
6464 			    &encoded_string_len, ei,
6465 			    input + inoff, input_len - inoff))
6466 				goto err;
6467 			*encoded_len += encoded_string_len;
6468 			ei++;
6469 		} else {
6470 			*encoded_len += ei->elem_size;
6471 			ei++;
6472 		}
6473 	}
6474 
6475 	*encoded_len += sizeof(hdr);
6476 	*encoded_msg = malloc(*encoded_len, M_DEVBUF, M_NOWAIT | M_ZERO);
6477 	if (*encoded_msg == NULL)
6478 		return ENOMEM;
6479 
6480 	hdr.type = type;
6481 	hdr.txn_id = htole16(*txn_id);
6482 	hdr.msg_id = htole16(msg_id);
6483 	hdr.msg_len = htole16(*encoded_len - sizeof(hdr));
6484 	memcpy(*encoded_msg, &hdr, sizeof(hdr));
6485 
6486 	/* Second pass: Encode the message. */
6487 	ei = ei0;
6488 	p = *encoded_msg + sizeof(hdr);
6489 	remain = *encoded_len - sizeof(hdr);
6490 	while (ei->data_type != QMI_EOTI) {
6491 		uint32_t datalen = 0;
6492 		int nelem = 1, i;
6493 
6494 		if (ei->data_type == QMI_OPT_FLAG) {
6495 			uint8_t do_encode, tlv_type;
6496 
6497 			memcpy(&do_encode, input + ei->offset, sizeof(uint8_t));
6498 			ei++; /* Advance to element we might have to encode. */
6499 			if (ei->data_type == QMI_OPT_FLAG ||
6500 			    ei->data_type == QMI_EOTI) {
6501 				printf("%s: bad optional flag element\n",
6502 				    __func__);
6503 				goto err;
6504 			}
6505 			if (!do_encode) {
6506 				/* The element will not be encoded. Skip it. */
6507 				tlv_type = ei->tlv_type;
6508 				while (ei->data_type != QMI_EOTI &&
6509 				    ei->tlv_type == tlv_type)
6510 					ei++;
6511 				continue;
6512 			}
6513 		}
6514 
6515 		if (ei->elem_size + 3 > remain) {
6516 			printf("%s: QMI message buffer too short\n", __func__);
6517 			goto err;
6518 		}
6519 
6520 		/* 3 bytes of type-length-value header, remember for later */
6521 		op = p;
6522 		p += 3;
6523 
6524 		if (ei->data_type == QMI_DATA_LEN) {
6525 			if (qwx_qmi_encode_datalen(p, &datalen, ei, input))
6526 				goto err;
6527 			p += ei->elem_size;
6528 			ei++;
6529 			if (ei->array_type == VAR_LEN_ARRAY)
6530 				nelem = datalen;
6531 		}
6532 
6533 		for (i = 0; i < nelem; i++) {
6534 			size_t encoded_struct_len = 0;
6535 			size_t encoded_string_len = 0;
6536 			size_t inoff = ei->offset + (i * ei->elem_size);
6537 
6538 			switch (ei->data_type) {
6539 			case QMI_UNSIGNED_1_BYTE:
6540 				if (qwx_qmi_encode_byte(p, ei, input, i))
6541 					goto err;
6542 				remain -= ei->elem_size;
6543 				p += ei->elem_size;
6544 				break;
6545 			case QMI_UNSIGNED_2_BYTE:
6546 			case QMI_SIGNED_2_BYTE_ENUM:
6547 				if (qwx_qmi_encode_word(p, ei, input, i))
6548 					goto err;
6549 				remain -= ei->elem_size;
6550 				p += ei->elem_size;
6551 				break;
6552 			case QMI_UNSIGNED_4_BYTE:
6553 			case QMI_SIGNED_4_BYTE_ENUM:
6554 				if (qwx_qmi_encode_dword(p, ei, input, i))
6555 					goto err;
6556 				remain -= ei->elem_size;
6557 				p += ei->elem_size;
6558 				break;
6559 			case QMI_UNSIGNED_8_BYTE:
6560 				if (qwx_qmi_encode_qword(p, ei, input, i))
6561 					goto err;
6562 				remain -= ei->elem_size;
6563 				p += ei->elem_size;
6564 				break;
6565 			case QMI_STRUCT:
6566 				if (qwx_qmi_encode_struct(p,
6567 				    &encoded_struct_len, ei,
6568 				    input + inoff, input_len - inoff))
6569 					goto err;
6570 				remain -= encoded_struct_len;
6571 				p += encoded_struct_len;
6572 				break;
6573 			case QMI_STRING:
6574 				if (qwx_qmi_encode_string(p,
6575 				    &encoded_string_len, ei,
6576 				    input + inoff, input_len - inoff))
6577 					goto err;
6578 				remain -= encoded_string_len;
6579 				p += encoded_string_len;
6580 				break;
6581 			default:
6582 				printf("%s: unhandled QMI message element type %d\n",
6583 				    __func__, ei->data_type);
6584 				goto err;
6585 			}
6586 		}
6587 
6588 		op[0] = ei->tlv_type;
6589 		op[1] = (p - (op + 3)) & 0xff;
6590 		op[2] = ((p - (op + 3)) >> 8) & 0xff;
6591 
6592 		ei++;
6593 	}
6594 
6595 	if (0) {
6596 		int i;
6597 		DNPRINTF(QWX_D_QMI,
6598 		   "%s: message type 0x%x txnid 0x%x msgid 0x%x "
6599 		    "msglen %zu encoded:", __func__,
6600 		    type, *txn_id, msg_id, *encoded_len - sizeof(hdr));
6601 		for (i = 0; i < *encoded_len; i++) {
6602 			DNPRINTF(QWX_D_QMI, "%s %.2x", i % 16 == 0 ? "\n" : "",
6603 			    (*encoded_msg)[i]);
6604 		}
6605 		if (i % 16)
6606 			DNPRINTF(QWX_D_QMI, "\n");
6607 	}
6608 
6609 	(*txn_id)++; /* wrap-around is fine */
6610 	return 0;
6611 err:
6612 	free(*encoded_msg, M_DEVBUF, *encoded_len);
6613 	*encoded_msg = NULL;
6614 	*encoded_len = 0;
6615 	return -1;
6616 }
6617 
6618 int
6619 qwx_qmi_send_request(struct qwx_softc *sc, uint16_t msg_id, size_t msg_len,
6620     const struct qmi_elem_info *ei, void *req, size_t req_len)
6621 {
6622 	struct qrtr_hdr_v1 hdr;
6623 	struct mbuf *m;
6624 	uint8_t *encoded_msg;
6625 	size_t encoded_len;
6626 	size_t totlen, padlen;
6627 	int err;
6628 
6629 	if (qwx_qmi_encode_msg(&encoded_msg, &encoded_len, QMI_REQUEST,
6630 	    &sc->qmi_txn_id, msg_id, msg_len, ei, req, req_len))
6631 		return -1;
6632 
6633 	totlen = sizeof(hdr) + encoded_len;
6634 	padlen = roundup(totlen, 4);
6635 
6636 	m = m_gethdr(M_DONTWAIT, MT_DATA);
6637 	if (m == NULL) {
6638 		err = ENOBUFS;
6639 		goto done;
6640 	}
6641 
6642 	if (padlen <= MCLBYTES)
6643 		MCLGET(m, M_DONTWAIT);
6644 	else
6645 		MCLGETL(m, M_DONTWAIT, padlen);
6646 	if ((m->m_flags & M_EXT) == 0) {
6647 		err = ENOBUFS;
6648 		goto done;
6649 	}
6650 
6651 	m->m_len = m->m_pkthdr.len = padlen;
6652 
6653 	memset(&hdr, 0, sizeof(hdr));
6654 	hdr.version = htole32(QRTR_PROTO_VER_1);
6655 	hdr.type = htole32(QRTR_TYPE_DATA);
6656 	hdr.src_node_id = htole32(0x01); /* TODO make human-readable */
6657 	hdr.src_port_id = htole32(0x4000); /* TODO make human-readable */
6658 	hdr.dst_node_id = htole32(0x07); /* TODO make human-readable */
6659 	hdr.dst_port_id = htole32(0x01); /* TODO make human-readable */
6660 	hdr.size = htole32(encoded_len);
6661 
6662 	err = m_copyback(m, 0, sizeof(hdr), &hdr, M_NOWAIT);
6663 	if (err)
6664 		goto done;
6665 
6666 	err = m_copyback(m, sizeof(hdr), encoded_len, encoded_msg, M_NOWAIT);
6667 	if (err)
6668 		goto done;
6669 
6670 	/* Zero-pad the mbuf */
6671 	if (padlen != totlen) {
6672 		uint32_t pad = 0;
6673 		err = m_copyback(m, totlen, padlen - totlen, &pad, M_NOWAIT);
6674 		if (err)
6675 			goto done;
6676 	}
6677 
6678 	err = sc->ops.submit_xfer(sc, m);
6679 done:
6680 	if (err)
6681 		m_freem(m);
6682 	free(encoded_msg, M_DEVBUF, encoded_len);
6683 	return err;
6684 }
6685 
6686 int
6687 qwx_qmi_fw_ind_register_send(struct qwx_softc *sc)
6688 {
6689 	struct qmi_wlanfw_ind_register_req_msg_v01 req;
6690 	int ret;
6691 
6692 	memset(&req, 0, sizeof(req));
6693 
6694 	req.client_id_valid = 1;
6695 	req.client_id = QMI_WLANFW_CLIENT_ID;
6696 	req.fw_ready_enable_valid = 1;
6697 	req.fw_ready_enable = 1;
6698 	req.cal_done_enable_valid = 1;
6699 	req.cal_done_enable = 1;
6700 	req.fw_init_done_enable_valid = 1;
6701 	req.fw_init_done_enable = 1;
6702 
6703 	req.pin_connect_result_enable_valid = 0;
6704 	req.pin_connect_result_enable = 0;
6705 
6706 	/*
6707 	 * WCN6750 doesn't request for DDR memory via QMI,
6708 	 * instead it uses a fixed 12MB reserved memory region in DDR.
6709 	 */
6710 	if (!sc->hw_params.fixed_fw_mem) {
6711 		req.request_mem_enable_valid = 1;
6712 		req.request_mem_enable = 1;
6713 		req.fw_mem_ready_enable_valid = 1;
6714 		req.fw_mem_ready_enable = 1;
6715 	}
6716 
6717 	DNPRINTF(QWX_D_QMI, "%s: qmi indication register request\n", __func__);
6718 
6719 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_IND_REGISTER_REQ_V01,
6720 			       QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
6721 			       qmi_wlanfw_ind_register_req_msg_v01_ei,
6722 			       &req, sizeof(req));
6723 	if (ret) {
6724 		printf("%s: failed to send indication register request: %d\n",
6725 		    sc->sc_dev.dv_xname, ret);
6726 		return -1;
6727 	}
6728 
6729 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
6730 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
6731 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwind",
6732 		    SEC_TO_NSEC(1));
6733 		if (ret) {
6734 			printf("%s: fw indication register request timeout\n",
6735 			    sc->sc_dev.dv_xname);
6736 			return ret;
6737 		}
6738 	}
6739 
6740 	return 0;
6741 }
6742 
6743 int
6744 qwx_qmi_host_cap_send(struct qwx_softc *sc)
6745 {
6746 	struct qmi_wlanfw_host_cap_req_msg_v01 req;
6747 	int ret;
6748 
6749 	memset(&req, 0, sizeof(req));
6750 	req.num_clients_valid = 1;
6751 	req.num_clients = 1;
6752 	req.mem_cfg_mode = sc->hw_params.fw_mem_mode;
6753 	req.mem_cfg_mode_valid = 1;
6754 	req.bdf_support_valid = 1;
6755 	req.bdf_support = 1;
6756 
6757 	if (sc->hw_params.m3_fw_support) {
6758 		req.m3_support_valid = 1;
6759 		req.m3_support = 1;
6760 		req.m3_cache_support_valid = 1;
6761 		req.m3_cache_support = 1;
6762 	} else {
6763 		req.m3_support_valid = 0;
6764 		req.m3_support = 0;
6765 		req.m3_cache_support_valid = 0;
6766 		req.m3_cache_support = 0;
6767 	}
6768 
6769 	req.cal_done_valid = 1;
6770 	req.cal_done = sc->qmi_cal_done;
6771 
6772 	if (sc->hw_params.internal_sleep_clock) {
6773 		req.nm_modem_valid = 1;
6774 
6775 		/* Notify firmware that this is non-qualcomm platform. */
6776 		req.nm_modem |= QWX_HOST_CSTATE_BIT;
6777 
6778 		/* Notify firmware about the sleep clock selection,
6779 		 * nm_modem_bit[1] is used for this purpose. Host driver on
6780 		 * non-qualcomm platforms should select internal sleep
6781 		 * clock.
6782 		 */
6783 		req.nm_modem |= QWX_SLEEP_CLOCK_SELECT_INTERNAL_BIT;
6784 	}
6785 
6786 	if (sc->hw_params.global_reset)
6787 		req.nm_modem |= QWX_PLATFORM_CAP_PCIE_GLOBAL_RESET;
6788 
6789 	req.nm_modem |= QWX_PLATFORM_CAP_PCIE_PME_D3COLD;
6790 
6791 	DNPRINTF(QWX_D_QMI, "%s: qmi host cap request\n", __func__);
6792 
6793 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_HOST_CAP_REQ_V01,
6794 			       QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
6795 			       qmi_wlanfw_host_cap_req_msg_v01_ei,
6796 			       &req, sizeof(req));
6797 	if (ret) {
6798 		printf("%s: failed to send host cap request: %d\n",
6799 		    sc->sc_dev.dv_xname, ret);
6800 		return -1;
6801 	}
6802 
6803 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
6804 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
6805 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwhcap",
6806 		    SEC_TO_NSEC(1));
6807 		if (ret) {
6808 			printf("%s: fw host cap request timeout\n",
6809 			    sc->sc_dev.dv_xname);
6810 			return ret;
6811 		}
6812 	}
6813 
6814 	return 0;
6815 }
6816 
6817 int
6818 qwx_qmi_mem_seg_send(struct qwx_softc *sc)
6819 {
6820 	struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
6821 	struct qmi_wlanfw_request_mem_ind_msg_v01 *ind;
6822 	uint32_t mem_seg_len;
6823 	const uint32_t mem_seg_len_max = 64; /* bump if needed by future fw */
6824 	uint16_t expected_result;
6825 	size_t total_size;
6826 	int i, ret;
6827 
6828 	sc->fwmem_ready = 0;
6829 
6830 	while (sc->sc_req_mem_ind == NULL) {
6831 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwmem",
6832 		    SEC_TO_NSEC(10));
6833 		if (ret) {
6834 			printf("%s: fw memory request timeout\n",
6835 			    sc->sc_dev.dv_xname);
6836 			return -1;
6837 		}
6838 	}
6839 
6840 	ind = sc->sc_req_mem_ind;
6841 	mem_seg_len = le32toh(ind->mem_seg_len);
6842 	if (mem_seg_len > mem_seg_len_max) {
6843 		printf("%s: firmware requested too many memory segments: %u\n",
6844 		    sc->sc_dev.dv_xname, mem_seg_len);
6845 		return -1;
6846 	}
6847 
6848 	total_size = 0;
6849 	for (i = 0; i < mem_seg_len; i++) {
6850 		if (ind->mem_seg[i].size == 0) {
6851 			printf("%s: firmware requested zero-sized "
6852 			    "memory segment %u\n", sc->sc_dev.dv_xname, i);
6853 			return -1;
6854 		}
6855 		total_size += le32toh(ind->mem_seg[i].size);
6856 	}
6857 
6858 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
6859 	if (req == NULL) {
6860 		printf("%s: failed to allocate respond memory request\n",
6861 		    sc->sc_dev.dv_xname);
6862 		free(sc->sc_req_mem_ind, M_DEVBUF, sizeof(*sc->sc_req_mem_ind));
6863 		sc->sc_req_mem_ind = NULL;
6864 		return -1;
6865 	}
6866 
6867 	if (total_size == 0) {
6868 		/* Should not happen. Send back an empty allocation. */
6869 		printf("%s: firmware has requested no memory\n",
6870 		    sc->sc_dev.dv_xname);
6871 		mem_seg_len = 0;
6872 	} else if (sc->fwmem == NULL || QWX_DMA_LEN(sc->fwmem) < total_size) {
6873 		if (sc->fwmem != NULL)
6874 			qwx_dmamem_free(sc->sc_dmat, sc->fwmem);
6875 		sc->fwmem = qwx_dmamem_alloc(sc->sc_dmat, total_size, 65536);
6876 		if (sc->fwmem == NULL) {
6877 			printf("%s: failed to allocate %zu bytes of DMA "
6878 			    "memory for firmware\n", sc->sc_dev.dv_xname,
6879 			    total_size);
6880 			/* Send back an empty allocation. */
6881 			mem_seg_len = 0;
6882 		} else
6883 			DPRINTF("%s: allocated %zu bytes of DMA memory for "
6884 			    "firmware\n", sc->sc_dev.dv_xname, total_size);
6885 	}
6886 
6887 	/* Chunk DMA memory block into segments as requested by firmware. */
6888 	req->mem_seg_len = htole32(mem_seg_len);
6889 	if (sc->fwmem) {
6890 		uint64_t paddr = QWX_DMA_DVA(sc->fwmem);
6891 
6892 		for (i = 0; i < mem_seg_len; i++) {
6893 			DPRINTF("%s: mem seg[%d] addr=%llx size=%u type=%u\n",
6894 			    __func__, i, paddr, le32toh(ind->mem_seg[i].size),
6895 			    le32toh(ind->mem_seg[i].type));
6896 			req->mem_seg[i].addr = htole64(paddr);
6897 			paddr += le32toh(ind->mem_seg[i].size);
6898 
6899 			/* Values in 'ind' are in little-endian format. */
6900 			req->mem_seg[i].size = ind->mem_seg[i].size;
6901 			req->mem_seg[i].type = ind->mem_seg[i].type;
6902 		}
6903 	}
6904 
6905 	free(ind, M_DEVBUF, sizeof(*ind));
6906 	sc->sc_req_mem_ind = NULL;
6907 
6908 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_RESPOND_MEM_REQ_V01,
6909 			       QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
6910 			       qmi_wlanfw_respond_mem_req_msg_v01_ei,
6911 			       req, sizeof(*req));
6912 	free(req, M_DEVBUF, sizeof(*req));
6913 	if (ret) {
6914 		printf("%s: failed to send respond memory request: %d\n",
6915 		    sc->sc_dev.dv_xname, ret);
6916 		return -1;
6917 	}
6918 
6919 	if (mem_seg_len == 0) {
6920 		expected_result = QMI_RESULT_FAILURE_V01;
6921 		sc->qmi_resp.result = QMI_RESULT_SUCCESS_V01;
6922 	} else {
6923 		expected_result = QMI_RESULT_SUCCESS_V01;
6924 		sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
6925 	}
6926 	while (sc->qmi_resp.result != expected_result) {
6927 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwrespmem",
6928 		    SEC_TO_NSEC(1));
6929 		if (ret) {
6930 			printf("%s: fw respond memory request timeout\n",
6931 			    sc->sc_dev.dv_xname);
6932 			return -1;
6933 		}
6934 	}
6935 
6936 	if (mem_seg_len == 0)
6937 		return EBUSY;
6938 
6939 	if (!sc->hw_params.fixed_fw_mem) {
6940 		while (!sc->fwmem_ready) {
6941 			ret = tsleep_nsec(&sc->fwmem_ready, 0, "qwxfwrdy",
6942 			    SEC_TO_NSEC(10));
6943 			if (ret) {
6944 				printf("%s: fw memory ready timeout\n",
6945 				    sc->sc_dev.dv_xname);
6946 				return -1;
6947 			}
6948 		}
6949 	}
6950 
6951 	return 0;
6952 }
6953 
6954 int
6955 qwx_core_check_smbios(struct qwx_softc *sc)
6956 {
6957 	return 0; /* TODO */
6958 }
6959 
6960 int
6961 qwx_core_check_dt(struct qwx_softc *sc)
6962 {
6963 	return 0; /* TODO */
6964 }
6965 
6966 int
6967 qwx_qmi_request_target_cap(struct qwx_softc *sc)
6968 {
6969 	struct qmi_wlanfw_cap_req_msg_v01 req;
6970 	int ret = 0;
6971 	int r;
6972 	char *fw_build_id;
6973 	int fw_build_id_mask_len;
6974 
6975 	memset(&req, 0, sizeof(req));
6976 
6977 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_CAP_REQ_V01,
6978 	    QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
6979 	    qmi_wlanfw_cap_req_msg_v01_ei, &req, sizeof(req));
6980 	if (ret) {
6981 		printf("%s: failed to send qmi cap request: %d\n",
6982 		    sc->sc_dev.dv_xname, ret);
6983 		goto out;
6984 	}
6985 
6986 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
6987 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
6988 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwcap",
6989 		    SEC_TO_NSEC(1));
6990 		if (ret) {
6991 			printf("%s: qmi cap request failed\n",
6992 			    sc->sc_dev.dv_xname);
6993 			return ret;
6994 		}
6995 	}
6996 
6997 	fw_build_id = sc->qmi_target.fw_build_id;
6998 	fw_build_id_mask_len = strlen(QWX_FW_BUILD_ID_MASK);
6999 	if (!strncmp(fw_build_id, QWX_FW_BUILD_ID_MASK, fw_build_id_mask_len))
7000 		fw_build_id = fw_build_id + fw_build_id_mask_len;
7001 
7002 	DPRINTF("%s: chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
7003 	    sc->sc_dev.dv_xname,
7004 	    sc->qmi_target.chip_id, sc->qmi_target.chip_family,
7005 	    sc->qmi_target.board_id, sc->qmi_target.soc_id);
7006 
7007 	DPRINTF("%s: fw_version 0x%x fw_build_timestamp %s fw_build_id %s\n",
7008 	    sc->sc_dev.dv_xname, sc->qmi_target.fw_version,
7009 	    sc->qmi_target.fw_build_timestamp, fw_build_id);
7010 
7011 	r = qwx_core_check_smbios(sc);
7012 	if (r)
7013 		DPRINTF("%s: SMBIOS bdf variant name not set\n", __func__);
7014 
7015 	r = qwx_core_check_dt(sc);
7016 	if (r)
7017 		DPRINTF("%s: DT bdf variant name not set\n", __func__);
7018 
7019 out:
7020 	return ret;
7021 }
7022 
7023 int
7024 qwx_qmi_request_device_info(struct qwx_softc *sc)
7025 {
7026 	/* device info message req is only sent for hybrid bus devices */
7027 	if (!sc->hw_params.hybrid_bus_type)
7028 		return 0;
7029 
7030 	/* TODO */
7031 	return -1;
7032 }
7033 
7034 int
7035 _qwx_core_create_board_name(struct qwx_softc *sc, char *name,
7036     size_t name_len, int with_variant, int bus_type_mode)
7037 {
7038 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
7039 	char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
7040 
7041 	if (with_variant && sc->qmi_target.bdf_ext[0] != '\0')
7042 		snprintf(variant, sizeof(variant), ",variant=%s",
7043 		    sc->qmi_target.bdf_ext);
7044 
7045 	switch (sc->id.bdf_search) {
7046 	case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
7047 		if (bus_type_mode)
7048 			snprintf(name, name_len, "bus=%s", sc->sc_bus_str);
7049 		else
7050 			snprintf(name, name_len,
7051 			    "bus=%s,vendor=%04x,device=%04x,"
7052 			    "subsystem-vendor=%04x,subsystem-device=%04x,"
7053 			    "qmi-chip-id=%d,qmi-board-id=%d%s",
7054 			    sc->sc_bus_str, sc->id.vendor, sc->id.device,
7055 			    sc->id.subsystem_vendor, sc->id.subsystem_device,
7056 			    sc->qmi_target.chip_id, sc->qmi_target.board_id,
7057 			    variant);
7058 		break;
7059 	default:
7060 		snprintf(name, name_len,
7061 		    "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
7062 		    sc->sc_bus_str, sc->qmi_target.chip_id,
7063 		    sc->qmi_target.board_id, variant);
7064 		break;
7065 	}
7066 
7067 	DPRINTF("%s: using board name '%s'\n", __func__, name);
7068 
7069 	return 0;
7070 }
7071 
7072 int
7073 qwx_core_create_board_name(struct qwx_softc *sc, char *name, size_t name_len)
7074 {
7075 	return _qwx_core_create_board_name(sc, name, name_len, 1, 0);
7076 }
7077 
7078 int
7079 qwx_core_create_fallback_board_name(struct qwx_softc *sc, char *name,
7080     size_t name_len)
7081 {
7082 	return _qwx_core_create_board_name(sc, name, name_len, 0, 0);
7083 }
7084 
7085 int
7086 qwx_core_create_bus_type_board_name(struct qwx_softc *sc, char *name,
7087     size_t name_len)
7088 {
7089 	return _qwx_core_create_board_name(sc, name, name_len, 0, 1);
7090 }
7091 
7092 struct ath11k_fw_ie {
7093 	uint32_t id;
7094 	uint32_t len;
7095 	uint8_t data[];
7096 };
7097 
7098 enum ath11k_bd_ie_board_type {
7099 	ATH11K_BD_IE_BOARD_NAME = 0,
7100 	ATH11K_BD_IE_BOARD_DATA = 1,
7101 };
7102 
7103 enum ath11k_bd_ie_regdb_type {
7104 	ATH11K_BD_IE_REGDB_NAME = 0,
7105 	ATH11K_BD_IE_REGDB_DATA = 1,
7106 };
7107 
7108 enum ath11k_bd_ie_type {
7109 	/* contains sub IEs of enum ath11k_bd_ie_board_type */
7110 	ATH11K_BD_IE_BOARD = 0,
7111 	/* contains sub IEs of enum ath11k_bd_ie_regdb_type */
7112 	ATH11K_BD_IE_REGDB = 1,
7113 };
7114 
7115 static inline const char *
7116 qwx_bd_ie_type_str(enum ath11k_bd_ie_type type)
7117 {
7118 	switch (type) {
7119 	case ATH11K_BD_IE_BOARD:
7120 		return "board data";
7121 	case ATH11K_BD_IE_REGDB:
7122 		return "regdb data";
7123 	}
7124 
7125 	return "unknown";
7126 }
7127 
7128 int
7129 qwx_core_parse_bd_ie_board(struct qwx_softc *sc,
7130     const u_char **boardfw, size_t *boardfw_len,
7131     const void *buf, size_t buf_len,
7132     const char *boardname, int ie_id, int name_id, int data_id)
7133 {
7134 	const struct ath11k_fw_ie *hdr;
7135 	int name_match_found = 0;
7136 	int ret, board_ie_id;
7137 	size_t board_ie_len;
7138 	const void *board_ie_data;
7139 
7140 	*boardfw = NULL;
7141 	*boardfw_len = 0;
7142 
7143 	/* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
7144 	while (buf_len > sizeof(struct ath11k_fw_ie)) {
7145 		hdr = buf;
7146 		board_ie_id = le32toh(hdr->id);
7147 		board_ie_len = le32toh(hdr->len);
7148 		board_ie_data = hdr->data;
7149 
7150 		buf_len -= sizeof(*hdr);
7151 		buf += sizeof(*hdr);
7152 
7153 		if (buf_len < roundup(board_ie_len, 4)) {
7154 			printf("%s: invalid %s length: %zu < %zu\n",
7155 			    sc->sc_dev.dv_xname, qwx_bd_ie_type_str(ie_id),
7156 			    buf_len, roundup(board_ie_len, 4));
7157 			return EINVAL;
7158 		}
7159 
7160 		if (board_ie_id == name_id) {
7161 			if (board_ie_len != strlen(boardname))
7162 				goto next;
7163 
7164 			ret = memcmp(board_ie_data, boardname, board_ie_len);
7165 			if (ret)
7166 				goto next;
7167 
7168 			name_match_found = 1;
7169 			   DPRINTF("%s: found match %s for name '%s'", __func__,
7170 			       qwx_bd_ie_type_str(ie_id), boardname);
7171 		} else if (board_ie_id == data_id) {
7172 			if (!name_match_found)
7173 				/* no match found */
7174 				goto next;
7175 
7176 			DPRINTF("%s: found %s for '%s'", __func__,
7177 			    qwx_bd_ie_type_str(ie_id), boardname);
7178 
7179 			*boardfw = board_ie_data;
7180 			*boardfw_len = board_ie_len;
7181 			return 0;
7182 		} else {
7183 			printf("%s: unknown %s id found: %d\n", __func__,
7184 			    qwx_bd_ie_type_str(ie_id), board_ie_id);
7185 		}
7186 next:
7187 		/* jump over the padding */
7188 		board_ie_len = roundup(board_ie_len, 4);
7189 
7190 		buf_len -= board_ie_len;
7191 		buf += board_ie_len;
7192 	}
7193 
7194 	/* no match found */
7195 	return ENOENT;
7196 }
7197 
7198 int
7199 qwx_core_fetch_board_data_api_n(struct qwx_softc *sc,
7200     const u_char **boardfw, size_t *boardfw_len,
7201     u_char *fwdata, size_t fwdata_len,
7202     const char *boardname, int ie_id_match, int name_id, int data_id)
7203 {
7204 	size_t len, magic_len;
7205 	const uint8_t *data;
7206 	char *filename;
7207 	size_t ie_len;
7208 	struct ath11k_fw_ie *hdr;
7209 	int ret, ie_id;
7210 
7211 	filename = ATH11K_BOARD_API2_FILE;
7212 
7213 	*boardfw = NULL;
7214 	*boardfw_len = 0;
7215 
7216 	data = fwdata;
7217 	len = fwdata_len;
7218 
7219 	/* magic has extra null byte padded */
7220 	magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
7221 	if (len < magic_len) {
7222 		printf("%s: failed to find magic value in %s, "
7223 		    "file too short: %zu\n",
7224 		    sc->sc_dev.dv_xname, filename, len);
7225 		return EINVAL;
7226 	}
7227 
7228 	if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
7229 		DPRINTF("%s: found invalid board magic\n", sc->sc_dev.dv_xname);
7230 		return EINVAL;
7231 	}
7232 
7233 	/* magic is padded to 4 bytes */
7234 	magic_len = roundup(magic_len, 4);
7235 	if (len < magic_len) {
7236 		printf("%s: %s too small to contain board data, len: %zu\n",
7237 		    sc->sc_dev.dv_xname, filename, len);
7238 		return EINVAL;
7239 	}
7240 
7241 	data += magic_len;
7242 	len -= magic_len;
7243 
7244 	while (len > sizeof(struct ath11k_fw_ie)) {
7245 		hdr = (struct ath11k_fw_ie *)data;
7246 		ie_id = le32toh(hdr->id);
7247 		ie_len = le32toh(hdr->len);
7248 
7249 		len -= sizeof(*hdr);
7250 		data = hdr->data;
7251 
7252 		if (len < roundup(ie_len, 4)) {
7253 			printf("%s: invalid length for board ie_id %d "
7254 			    "ie_len %zu len %zu\n",
7255 			    sc->sc_dev.dv_xname, ie_id, ie_len, len);
7256 			return EINVAL;
7257 		}
7258 
7259 		if (ie_id == ie_id_match) {
7260 			ret = qwx_core_parse_bd_ie_board(sc,
7261 			    boardfw, boardfw_len, data, ie_len,
7262 			    boardname, ie_id_match, name_id, data_id);
7263 			if (ret == ENOENT)
7264 				/* no match found, continue */
7265 				goto next;
7266 			else if (ret)
7267 				/* there was an error, bail out */
7268 				return ret;
7269 			/* either found or error, so stop searching */
7270 			goto out;
7271 		}
7272 next:
7273 		/* jump over the padding */
7274 		ie_len = roundup(ie_len, 4);
7275 
7276 		len -= ie_len;
7277 		data += ie_len;
7278 	}
7279 
7280 out:
7281 	if (!*boardfw || !*boardfw_len) {
7282 		printf("%s: failed to fetch %s for %s from %s\n",
7283 		    __func__, qwx_bd_ie_type_str(ie_id_match),
7284 		    boardname, filename);
7285 		return ENOENT;
7286 	}
7287 
7288 	return 0;
7289 }
7290 
7291 int
7292 qwx_core_fetch_bdf(struct qwx_softc *sc, u_char **data, size_t *len,
7293     const u_char **boardfw, size_t *boardfw_len, const char *filename)
7294 {
7295 	char path[PATH_MAX];
7296 	char boardname[200];
7297 	int ret;
7298 
7299 	ret = snprintf(path, sizeof(path), "%s-%s-%s",
7300 	    ATH11K_FW_DIR, sc->hw_params.fw.dir, filename);
7301 	if (ret < 0 || ret >= sizeof(path))
7302 		return ENOSPC;
7303 
7304 	ret = qwx_core_create_board_name(sc, boardname, sizeof(boardname));
7305 	if (ret) {
7306 		DPRINTF("%s: failed to create board name: %d",
7307 		    sc->sc_dev.dv_xname, ret);
7308 		return ret;
7309 	}
7310 
7311 	ret = loadfirmware(path, data, len);
7312 	if (ret) {
7313 		printf("%s: could not read %s (error %d)\n",
7314 		    sc->sc_dev.dv_xname, path, ret);
7315 		return ret;
7316 	}
7317 
7318 	ret = qwx_core_fetch_board_data_api_n(sc, boardfw, boardfw_len,
7319 	    *data, *len, boardname, ATH11K_BD_IE_BOARD,
7320 	    ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA);
7321 	if (ret) {
7322 		DPRINTF("%s: failed to fetch board data for %s from %s\n",
7323 		    sc->sc_dev.dv_xname, boardname, path);
7324 		return ret;
7325 	}
7326 
7327 	return 0;
7328 }
7329 
7330 int
7331 qwx_qmi_load_file_target_mem(struct qwx_softc *sc, const u_char *data,
7332     size_t len, int type)
7333 {
7334 	struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
7335 	const uint8_t *p = data;
7336 #ifdef notyet
7337 	void *bdf_addr = NULL;
7338 #endif
7339 	int ret;
7340 	uint32_t remaining = len;
7341 
7342 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
7343 	if (!req) {
7344 		printf("%s: failed to allocate bfd download request\n",
7345 		    sc->sc_dev.dv_xname);
7346 		return ENOMEM;
7347 	}
7348 
7349 	if (sc->hw_params.fixed_bdf_addr) {
7350 #ifdef notyet
7351 		bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
7352 		if (!bdf_addr) {
7353 			ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
7354 			ret = -EIO;
7355 			goto err_free_req;
7356 		}
7357 #else
7358 		printf("%s: fixed bdf address not yet supported\n",
7359 		    sc->sc_dev.dv_xname);
7360 		ret = EIO;
7361 		goto err_free_req;
7362 #endif
7363 	}
7364 
7365 	while (remaining) {
7366 		req->valid = 1;
7367 		req->file_id_valid = 1;
7368 		req->file_id = sc->qmi_target.board_id;
7369 		req->total_size_valid = 1;
7370 		req->total_size = remaining;
7371 		req->seg_id_valid = 1;
7372 		req->data_valid = 1;
7373 		req->bdf_type = type;
7374 		req->bdf_type_valid = 1;
7375 		req->end_valid = 1;
7376 		req->end = 0;
7377 
7378 		if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
7379 			req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
7380 		} else {
7381 			req->data_len = remaining;
7382 			req->end = 1;
7383 		}
7384 
7385 		if (sc->hw_params.fixed_bdf_addr ||
7386 		    type == ATH11K_QMI_FILE_TYPE_EEPROM) {
7387 			req->data_valid = 0;
7388 			req->end = 1;
7389 			req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
7390 		} else {
7391 			memcpy(req->data, p, req->data_len);
7392 		}
7393 #ifdef notyet
7394 		if (ab->hw_params.fixed_bdf_addr) {
7395 			if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
7396 				bdf_addr += ab->hw_params.fw.cal_offset;
7397 
7398 			memcpy_toio(bdf_addr, p, len);
7399 		}
7400 #endif
7401 		DPRINTF("%s: bdf download req fixed addr type %d\n",
7402 		    __func__, type);
7403 
7404 		ret = qwx_qmi_send_request(sc,
7405 		    QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
7406 		    QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
7407 		    qmi_wlanfw_bdf_download_req_msg_v01_ei,
7408 		    req, sizeof(*req));
7409 		if (ret) {
7410 			printf("%s: failed to send bdf download request\n",
7411 			    sc->sc_dev.dv_xname);
7412 			goto err_iounmap;
7413 		}
7414 
7415 		sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
7416 		while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
7417 			ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxbdf",
7418 			    SEC_TO_NSEC(1));
7419 			if (ret) {
7420 				printf("%s: bdf download request timeout\n",
7421 				    sc->sc_dev.dv_xname);
7422 				goto err_iounmap;
7423 			}
7424 		}
7425 
7426 		if (sc->hw_params.fixed_bdf_addr ||
7427 		    type == ATH11K_QMI_FILE_TYPE_EEPROM) {
7428 			remaining = 0;
7429 		} else {
7430 			remaining -= req->data_len;
7431 			p += req->data_len;
7432 			req->seg_id++;
7433 			DPRINTF("%s: bdf download request remaining %i\n",
7434 			    __func__, remaining);
7435 		}
7436 	}
7437 
7438 err_iounmap:
7439 #ifdef notyet
7440 	if (ab->hw_params.fixed_bdf_addr)
7441 		iounmap(bdf_addr);
7442 #endif
7443 err_free_req:
7444 	free(req, M_DEVBUF, sizeof(*req));
7445 
7446 	return ret;
7447 }
7448 
7449 #define QWX_ELFMAG	"\177ELF"
7450 #define QWX_SELFMAG	4
7451 
7452 int
7453 qwx_qmi_load_bdf_qmi(struct qwx_softc *sc, int regdb)
7454 {
7455 	u_char *data;
7456 	const u_char *boardfw;
7457 	size_t len, boardfw_len;
7458 	uint32_t fw_size;
7459 	int ret = 0, bdf_type;
7460 #ifdef notyet
7461 	const uint8_t *tmp;
7462 	uint32_t file_type;
7463 #endif
7464 
7465 	ret = qwx_core_fetch_bdf(sc, &data, &len, &boardfw, &boardfw_len,
7466 	    regdb ? ATH11K_REGDB_FILE : ATH11K_BOARD_API2_FILE);
7467 	if (ret)
7468 		goto out;
7469 
7470 	if (regdb)
7471 		bdf_type = ATH11K_QMI_BDF_TYPE_REGDB;
7472 	else if (len >= QWX_SELFMAG &&
7473 	    memcmp(boardfw, QWX_ELFMAG, QWX_SELFMAG) == 0)
7474 		bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
7475 	else
7476 		bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
7477 
7478 	DPRINTF("%s: bdf_type %d\n", __func__, bdf_type);
7479 
7480 	fw_size = MIN(sc->hw_params.fw.board_size, len);
7481 
7482 	ret = qwx_qmi_load_file_target_mem(sc, boardfw, boardfw_len, bdf_type);
7483 	if (ret < 0) {
7484 		printf("%s: failed to load bdf file\n", __func__);
7485 		goto out;
7486 	}
7487 
7488 	/* QCA6390/WCN6855 does not support cal data, skip it */
7489 	if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
7490 		goto out;
7491 #ifdef notyet
7492 	if (ab->qmi.target.eeprom_caldata) {
7493 		file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
7494 		tmp = filename;
7495 		fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
7496 	} else {
7497 		file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
7498 
7499 		/* cal-<bus>-<id>.bin */
7500 		snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
7501 			 ath11k_bus_str(ab->hif.bus), dev_name(dev));
7502 		fw_entry = ath11k_core_firmware_request(ab, filename);
7503 		if (!IS_ERR(fw_entry))
7504 			goto success;
7505 
7506 		fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
7507 		if (IS_ERR(fw_entry)) {
7508 			/* Caldata may not be present during first time calibration in
7509 			 * factory hence allow to boot without loading caldata in ftm mode
7510 			 */
7511 			if (ath11k_ftm_mode) {
7512 				ath11k_info(ab,
7513 					    "Booting without cal data file in factory test mode\n");
7514 				return 0;
7515 			}
7516 			ret = PTR_ERR(fw_entry);
7517 			ath11k_warn(ab,
7518 				    "qmi failed to load CAL data file:%s\n",
7519 				    filename);
7520 			goto out;
7521 		}
7522 success:
7523 		fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
7524 		tmp = fw_entry->data;
7525 	}
7526 
7527 	ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
7528 	if (ret < 0) {
7529 		ath11k_warn(ab, "qmi failed to load caldata\n");
7530 		goto out_qmi_cal;
7531 	}
7532 
7533 	ath11k_dbg(ab, ATH11K_DBG_QMI, "caldata type: %u\n", file_type);
7534 
7535 out_qmi_cal:
7536 	if (!ab->qmi.target.eeprom_caldata)
7537 		release_firmware(fw_entry);
7538 #endif
7539 out:
7540 	free(data, M_DEVBUF, len);
7541 	if (ret == 0)
7542 		DPRINTF("%s: BDF download sequence completed\n", __func__);
7543 
7544 	return ret;
7545 }
7546 
7547 int
7548 qwx_qmi_event_load_bdf(struct qwx_softc *sc)
7549 {
7550 	int ret;
7551 
7552 	ret = qwx_qmi_request_target_cap(sc);
7553 	if (ret < 0) {
7554 		printf("%s: failed to request qmi target capabilities: %d\n",
7555 		    sc->sc_dev.dv_xname, ret);
7556 		return ret;
7557 	}
7558 
7559 	ret = qwx_qmi_request_device_info(sc);
7560 	if (ret < 0) {
7561 		printf("%s: failed to request qmi device info: %d\n",
7562 		    sc->sc_dev.dv_xname, ret);
7563 		return ret;
7564 	}
7565 
7566 	if (sc->hw_params.supports_regdb)
7567 		qwx_qmi_load_bdf_qmi(sc, 1);
7568 
7569 	ret = qwx_qmi_load_bdf_qmi(sc, 0);
7570 	if (ret < 0) {
7571 		printf("%s: failed to load board data file: %d\n",
7572 		    sc->sc_dev.dv_xname, ret);
7573 		return ret;
7574 	}
7575 
7576 	return 0;
7577 }
7578 
7579 int
7580 qwx_qmi_m3_load(struct qwx_softc *sc)
7581 {
7582 	u_char *data;
7583 	size_t len;
7584 	char path[PATH_MAX];
7585 	int ret;
7586 
7587 	ret = snprintf(path, sizeof(path), "%s-%s-%s",
7588 	    ATH11K_FW_DIR, sc->hw_params.fw.dir, ATH11K_M3_FILE);
7589 	if (ret < 0 || ret >= sizeof(path))
7590 		return ENOSPC;
7591 
7592 	ret = loadfirmware(path, &data, &len);
7593 	if (ret) {
7594 		printf("%s: could not read %s (error %d)\n",
7595 		    sc->sc_dev.dv_xname, path, ret);
7596 		return ret;
7597 	}
7598 
7599 	if (sc->m3_mem == NULL || QWX_DMA_LEN(sc->m3_mem) < len) {
7600 		if (sc->m3_mem)
7601 			qwx_dmamem_free(sc->sc_dmat, sc->m3_mem);
7602 		sc->m3_mem = qwx_dmamem_alloc(sc->sc_dmat, len, 65536);
7603 		if (sc->m3_mem == NULL) {
7604 			printf("%s: failed to allocate %zu bytes of DMA "
7605 			    "memory for M3 firmware\n", sc->sc_dev.dv_xname,
7606 			    len);
7607 			return ENOMEM;
7608 		}
7609 	}
7610 
7611 	memcpy(QWX_DMA_KVA(sc->m3_mem), data, len);
7612 	free(data, M_DEVBUF, len);
7613 	return 0;
7614 }
7615 
7616 int
7617 qwx_qmi_wlanfw_m3_info_send(struct qwx_softc *sc)
7618 {
7619 	struct qmi_wlanfw_m3_info_req_msg_v01 req;
7620 	int ret = 0;
7621 	uint64_t paddr;
7622 	uint32_t size;
7623 
7624 	memset(&req, 0, sizeof(req));
7625 
7626 	if (sc->hw_params.m3_fw_support) {
7627 		ret = qwx_qmi_m3_load(sc);
7628 		if (ret) {
7629 			printf("%s: failed to load m3 firmware: %d",
7630 			    sc->sc_dev.dv_xname, ret);
7631 			return ret;
7632 		}
7633 
7634 		paddr = QWX_DMA_DVA(sc->m3_mem);
7635 		size = QWX_DMA_LEN(sc->m3_mem);
7636 		req.addr = htole64(paddr);
7637 		req.size = htole32(size);
7638 	} else {
7639 		req.addr = 0;
7640 		req.size = 0;
7641 	}
7642 
7643 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_M3_INFO_REQ_V01,
7644 	    QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
7645 	    qmi_wlanfw_m3_info_req_msg_v01_ei, &req, sizeof(req));
7646 	if (ret) {
7647 		printf("%s: failed to send m3 information request: %d\n",
7648 		    sc->sc_dev.dv_xname, ret);
7649 		return ret;
7650 	}
7651 
7652 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
7653 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
7654 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwm3",
7655 		    SEC_TO_NSEC(1));
7656 		if (ret) {
7657 			printf("%s: m3 information request timeout\n",
7658 			    sc->sc_dev.dv_xname);
7659 			return ret;
7660 		}
7661 	}
7662 
7663 	return 0;
7664 }
7665 
7666 void
7667 qwx_hal_dump_srng_stats(struct qwx_softc *sc)
7668 {
7669 	DPRINTF("%s not implemented\n", __func__);
7670 }
7671 
7672 uint16_t
7673 qwx_hal_srng_get_entrysize(struct qwx_softc *sc, uint32_t ring_type)
7674 {
7675 	struct hal_srng_config *srng_config;
7676 
7677 	KASSERT(ring_type < HAL_MAX_RING_TYPES);
7678 
7679 	srng_config = &sc->hal.srng_config[ring_type];
7680 	return (srng_config->entry_size << 2);
7681 }
7682 
7683 uint32_t
7684 qwx_hal_srng_get_max_entries(struct qwx_softc *sc, uint32_t ring_type)
7685 {
7686 	struct hal_srng_config *srng_config;
7687 
7688 	KASSERT(ring_type < HAL_MAX_RING_TYPES);
7689 
7690 	srng_config = &sc->hal.srng_config[ring_type];
7691 	return (srng_config->max_size / srng_config->entry_size);
7692 }
7693 
7694 uint32_t *
7695 qwx_hal_srng_dst_get_next_entry(struct qwx_softc *sc, struct hal_srng *srng)
7696 {
7697 	uint32_t *desc;
7698 #ifdef notyet
7699 	lockdep_assert_held(&srng->lock);
7700 #endif
7701 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
7702 		return NULL;
7703 
7704 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
7705 
7706 	srng->u.dst_ring.tp += srng->entry_size;
7707 
7708 	/* wrap around to start of ring*/
7709 	if (srng->u.dst_ring.tp == srng->ring_size)
7710 		srng->u.dst_ring.tp = 0;
7711 #ifdef notyet
7712 	/* Try to prefetch the next descriptor in the ring */
7713 	if (srng->flags & HAL_SRNG_FLAGS_CACHED)
7714 		ath11k_hal_srng_prefetch_desc(ab, srng);
7715 #endif
7716 	return desc;
7717 }
7718 
7719 int
7720 qwx_hal_srng_dst_num_free(struct qwx_softc *sc, struct hal_srng *srng,
7721     int sync_hw_ptr)
7722 {
7723 	uint32_t tp, hp;
7724 #ifdef notyet
7725 	lockdep_assert_held(&srng->lock);
7726 #endif
7727 	tp = srng->u.dst_ring.tp;
7728 
7729 	if (sync_hw_ptr) {
7730 		hp = *srng->u.dst_ring.hp_addr;
7731 		srng->u.dst_ring.cached_hp = hp;
7732 	} else {
7733 		hp = srng->u.dst_ring.cached_hp;
7734 	}
7735 
7736 	if (hp >= tp)
7737 		return (hp - tp) / srng->entry_size;
7738 	else
7739 		return (srng->ring_size - tp + hp) / srng->entry_size;
7740 }
7741 
7742 uint32_t *
7743 qwx_hal_srng_src_get_next_reaped(struct qwx_softc *sc, struct hal_srng *srng)
7744 {
7745 	uint32_t *desc;
7746 #ifdef notyet
7747 	lockdep_assert_held(&srng->lock);
7748 #endif
7749 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
7750 		return NULL;
7751 
7752 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
7753 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
7754 			      srng->ring_size;
7755 
7756 	return desc;
7757 }
7758 
7759 uint32_t *
7760 qwx_hal_srng_src_peek(struct qwx_softc *sc, struct hal_srng *srng)
7761 {
7762 #ifdef notyet
7763 	lockdep_assert_held(&srng->lock);
7764 #endif
7765 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
7766 	    srng->u.src_ring.cached_tp)
7767 		return NULL;
7768 
7769 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
7770 }
7771 
7772 void
7773 qwx_get_msi_address(struct qwx_softc *sc, uint32_t *addr_lo,
7774     uint32_t *addr_hi)
7775 {
7776 	*addr_lo = sc->msi_addr_lo;
7777 	*addr_hi = sc->msi_addr_hi;
7778 }
7779 
7780 int
7781 qwx_dp_srng_find_ring_in_mask(int ring_num, const uint8_t *grp_mask)
7782 {
7783 	int ext_group_num;
7784 	uint8_t mask = 1 << ring_num;
7785 
7786 	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
7787 	     ext_group_num++) {
7788 		if (mask & grp_mask[ext_group_num])
7789 			return ext_group_num;
7790 	}
7791 
7792 	return -1;
7793 }
7794 
7795 int
7796 qwx_dp_srng_calculate_msi_group(struct qwx_softc *sc, enum hal_ring_type type,
7797     int ring_num)
7798 {
7799 	const uint8_t *grp_mask;
7800 
7801 	switch (type) {
7802 	case HAL_WBM2SW_RELEASE:
7803 		if (ring_num == DP_RX_RELEASE_RING_NUM) {
7804 			grp_mask = &sc->hw_params.ring_mask->rx_wbm_rel[0];
7805 			ring_num = 0;
7806 		} else {
7807 			grp_mask = &sc->hw_params.ring_mask->tx[0];
7808 		}
7809 		break;
7810 	case HAL_REO_EXCEPTION:
7811 		grp_mask = &sc->hw_params.ring_mask->rx_err[0];
7812 		break;
7813 	case HAL_REO_DST:
7814 		grp_mask = &sc->hw_params.ring_mask->rx[0];
7815 		break;
7816 	case HAL_REO_STATUS:
7817 		grp_mask = &sc->hw_params.ring_mask->reo_status[0];
7818 		break;
7819 	case HAL_RXDMA_MONITOR_STATUS:
7820 	case HAL_RXDMA_MONITOR_DST:
7821 		grp_mask = &sc->hw_params.ring_mask->rx_mon_status[0];
7822 		break;
7823 	case HAL_RXDMA_DST:
7824 		grp_mask = &sc->hw_params.ring_mask->rxdma2host[0];
7825 		break;
7826 	case HAL_RXDMA_BUF:
7827 		grp_mask = &sc->hw_params.ring_mask->host2rxdma[0];
7828 		break;
7829 	case HAL_RXDMA_MONITOR_BUF:
7830 	case HAL_TCL_DATA:
7831 	case HAL_TCL_CMD:
7832 	case HAL_REO_CMD:
7833 	case HAL_SW2WBM_RELEASE:
7834 	case HAL_WBM_IDLE_LINK:
7835 	case HAL_TCL_STATUS:
7836 	case HAL_REO_REINJECT:
7837 	case HAL_CE_SRC:
7838 	case HAL_CE_DST:
7839 	case HAL_CE_DST_STATUS:
7840 	default:
7841 		return -1;
7842 	}
7843 
7844 	return qwx_dp_srng_find_ring_in_mask(ring_num, grp_mask);
7845 }
7846 
7847 void
7848 qwx_dp_srng_msi_setup(struct qwx_softc *sc, struct hal_srng_params *ring_params,
7849     enum hal_ring_type type, int ring_num)
7850 {
7851 	int msi_group_number;
7852 	uint32_t msi_data_start = 0;
7853 	uint32_t msi_data_count = 1;
7854 	uint32_t msi_irq_start = 0;
7855 	uint32_t addr_lo;
7856 	uint32_t addr_hi;
7857 	int ret;
7858 
7859 	ret = sc->ops.get_user_msi_vector(sc, "DP",
7860 	    &msi_data_count, &msi_data_start, &msi_irq_start);
7861 	if (ret)
7862 		return;
7863 
7864 	msi_group_number = qwx_dp_srng_calculate_msi_group(sc, type,
7865 	    ring_num);
7866 	if (msi_group_number < 0) {
7867 		DPRINTF("%s ring not part of an ext_group; ring_type %d,"
7868 		    "ring_num %d\n", __func__, type, ring_num);
7869 		ring_params->msi_addr = 0;
7870 		ring_params->msi_data = 0;
7871 		return;
7872 	}
7873 
7874 	qwx_get_msi_address(sc, &addr_lo, &addr_hi);
7875 
7876 	ring_params->msi_addr = addr_lo;
7877 	ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
7878 	ring_params->msi_data = (msi_group_number % msi_data_count) +
7879 	    msi_data_start;
7880 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
7881 }
7882 
7883 int
7884 qwx_dp_srng_setup(struct qwx_softc *sc, struct dp_srng *ring,
7885     enum hal_ring_type type, int ring_num, int mac_id, int num_entries)
7886 {
7887 	struct hal_srng_params params = { 0 };
7888 	uint16_t entry_sz = qwx_hal_srng_get_entrysize(sc, type);
7889 	uint32_t max_entries = qwx_hal_srng_get_max_entries(sc, type);
7890 	int ret;
7891 	int cached = 0;
7892 
7893 	if (num_entries > max_entries)
7894 		num_entries = max_entries;
7895 
7896 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
7897 
7898 #ifdef notyet
7899 	if (sc->hw_params.alloc_cacheable_memory) {
7900 		/* Allocate the reo dst and tx completion rings from cacheable memory */
7901 		switch (type) {
7902 		case HAL_REO_DST:
7903 		case HAL_WBM2SW_RELEASE:
7904 			cached = true;
7905 			break;
7906 		default:
7907 			cached = false;
7908 		}
7909 
7910 		if (cached) {
7911 			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
7912 			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
7913 		}
7914 		if (!ring->vaddr_unaligned)
7915 			return -ENOMEM;
7916 	}
7917 #endif
7918 	if (!cached) {
7919 		ring->mem = qwx_dmamem_alloc(sc->sc_dmat, ring->size,
7920 		    PAGE_SIZE);
7921 		if (ring->mem == NULL) {
7922 			printf("%s: could not allocate DP SRNG DMA memory\n",
7923 			    sc->sc_dev.dv_xname);
7924 			return ENOMEM;
7925 
7926 		}
7927 	}
7928 
7929 	ring->vaddr = QWX_DMA_KVA(ring->mem);
7930 	ring->paddr = QWX_DMA_DVA(ring->mem);
7931 
7932 	params.ring_base_vaddr = ring->vaddr;
7933 	params.ring_base_paddr = ring->paddr;
7934 	params.num_entries = num_entries;
7935 	qwx_dp_srng_msi_setup(sc, &params, type, ring_num + mac_id);
7936 
7937 	switch (type) {
7938 	case HAL_REO_DST:
7939 		params.intr_batch_cntr_thres_entries =
7940 		    HAL_SRNG_INT_BATCH_THRESHOLD_RX;
7941 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
7942 		break;
7943 	case HAL_RXDMA_BUF:
7944 	case HAL_RXDMA_MONITOR_BUF:
7945 	case HAL_RXDMA_MONITOR_STATUS:
7946 		params.low_threshold = num_entries >> 3;
7947 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
7948 		params.intr_batch_cntr_thres_entries = 0;
7949 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
7950 		break;
7951 	case HAL_WBM2SW_RELEASE:
7952 		if (ring_num < 3) {
7953 			params.intr_batch_cntr_thres_entries =
7954 			    HAL_SRNG_INT_BATCH_THRESHOLD_TX;
7955 			params.intr_timer_thres_us =
7956 			    HAL_SRNG_INT_TIMER_THRESHOLD_TX;
7957 			break;
7958 		}
7959 		/* follow through when ring_num >= 3 */
7960 		/* FALLTHROUGH */
7961 	case HAL_REO_EXCEPTION:
7962 	case HAL_REO_REINJECT:
7963 	case HAL_REO_CMD:
7964 	case HAL_REO_STATUS:
7965 	case HAL_TCL_DATA:
7966 	case HAL_TCL_CMD:
7967 	case HAL_TCL_STATUS:
7968 	case HAL_WBM_IDLE_LINK:
7969 	case HAL_SW2WBM_RELEASE:
7970 	case HAL_RXDMA_DST:
7971 	case HAL_RXDMA_MONITOR_DST:
7972 	case HAL_RXDMA_MONITOR_DESC:
7973 		params.intr_batch_cntr_thres_entries =
7974 		    HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
7975 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
7976 		break;
7977 	case HAL_RXDMA_DIR_BUF:
7978 		break;
7979 	default:
7980 		printf("%s: Not a valid ring type in dp :%d\n",
7981 		    sc->sc_dev.dv_xname, type);
7982 		return EINVAL;
7983 	}
7984 
7985 	if (cached) {
7986 		params.flags |= HAL_SRNG_FLAGS_CACHED;
7987 		ring->cached = 1;
7988 	}
7989 
7990 	ret = qwx_hal_srng_setup(sc, type, ring_num, mac_id, &params);
7991 	if (ret < 0) {
7992 		printf("%s: failed to setup srng: %d ring_id %d\n",
7993 		    sc->sc_dev.dv_xname, ret, ring_num);
7994 		return ret;
7995 	}
7996 
7997 	ring->ring_id = ret;
7998 	return 0;
7999 }
8000 
8001 void
8002 qwx_hal_srng_access_begin(struct qwx_softc *sc, struct hal_srng *srng)
8003 {
8004 #ifdef notyet
8005 	lockdep_assert_held(&srng->lock);
8006 #endif
8007 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
8008 		srng->u.src_ring.cached_tp =
8009 			*(volatile uint32_t *)srng->u.src_ring.tp_addr;
8010 	} else {
8011 		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
8012 	}
8013 }
8014 
8015 void
8016 qwx_hal_srng_access_end(struct qwx_softc *sc, struct hal_srng *srng)
8017 {
8018 #ifdef notyet
8019 	lockdep_assert_held(&srng->lock);
8020 #endif
8021 	/* TODO: See if we need a write memory barrier here */
8022 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
8023 		/* For LMAC rings, ring pointer updates are done through FW and
8024 		 * hence written to a shared memory location that is read by FW
8025 		 */
8026 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
8027 			srng->u.src_ring.last_tp =
8028 			    *(volatile uint32_t *)srng->u.src_ring.tp_addr;
8029 			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
8030 		} else {
8031 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
8032 			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
8033 		}
8034 	} else {
8035 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
8036 			srng->u.src_ring.last_tp =
8037 			    *(volatile uint32_t *)srng->u.src_ring.tp_addr;
8038 			sc->ops.write32(sc,
8039 			    (unsigned long)srng->u.src_ring.hp_addr -
8040 			    (unsigned long)sc->mem, srng->u.src_ring.hp);
8041 		} else {
8042 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
8043 			sc->ops.write32(sc,
8044 			    (unsigned long)srng->u.dst_ring.tp_addr -
8045 			    (unsigned long)sc->mem, srng->u.dst_ring.tp);
8046 		}
8047 	}
8048 #ifdef notyet
8049 	srng->timestamp = jiffies;
8050 #endif
8051 }
8052 
8053 int
8054 qwx_wbm_idle_ring_setup(struct qwx_softc *sc, uint32_t *n_link_desc)
8055 {
8056 	struct qwx_dp *dp = &sc->dp;
8057 	uint32_t n_mpdu_link_desc, n_mpdu_queue_desc;
8058 	uint32_t n_tx_msdu_link_desc, n_rx_msdu_link_desc;
8059 	int ret = 0;
8060 
8061 	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
8062 			   HAL_NUM_MPDUS_PER_LINK_DESC;
8063 
8064 	n_mpdu_queue_desc = n_mpdu_link_desc /
8065 			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
8066 
8067 	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
8068 			       DP_AVG_MSDUS_PER_FLOW) /
8069 			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
8070 
8071 	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
8072 			       DP_AVG_MSDUS_PER_MPDU) /
8073 			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
8074 
8075 	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
8076 		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
8077 
8078 	if (*n_link_desc & (*n_link_desc - 1))
8079 		*n_link_desc = 1 << fls(*n_link_desc);
8080 
8081 	ret = qwx_dp_srng_setup(sc, &dp->wbm_idle_ring,
8082 	    HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
8083 	if (ret) {
8084 		printf("%s: failed to setup wbm_idle_ring: %d\n",
8085 		    sc->sc_dev.dv_xname, ret);
8086 	}
8087 
8088 	return ret;
8089 }
8090 
8091 void
8092 qwx_dp_link_desc_bank_free(struct qwx_softc *sc,
8093     struct dp_link_desc_bank *link_desc_banks)
8094 {
8095 	int i;
8096 
8097 	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
8098 		if (link_desc_banks[i].mem) {
8099 			qwx_dmamem_free(sc->sc_dmat, link_desc_banks[i].mem);
8100 			link_desc_banks[i].mem = NULL;
8101 		}
8102 	}
8103 }
8104 
8105 int
8106 qwx_dp_link_desc_bank_alloc(struct qwx_softc *sc,
8107     struct dp_link_desc_bank *desc_bank, int n_link_desc_bank,
8108     int last_bank_sz)
8109 {
8110 	struct qwx_dp *dp = &sc->dp;
8111 	int i;
8112 	int ret = 0;
8113 	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
8114 
8115 	for (i = 0; i < n_link_desc_bank; i++) {
8116 		if (i == (n_link_desc_bank - 1) && last_bank_sz)
8117 			desc_sz = last_bank_sz;
8118 
8119 		desc_bank[i].mem = qwx_dmamem_alloc(sc->sc_dmat, desc_sz,
8120 		    PAGE_SIZE);
8121 		if (!desc_bank[i].mem) {
8122 			ret = ENOMEM;
8123 			goto err;
8124 		}
8125 
8126 		desc_bank[i].vaddr = QWX_DMA_KVA(desc_bank[i].mem);
8127 		desc_bank[i].paddr = QWX_DMA_DVA(desc_bank[i].mem);
8128 		desc_bank[i].size = desc_sz;
8129 	}
8130 
8131 	return 0;
8132 
8133 err:
8134 	qwx_dp_link_desc_bank_free(sc, dp->link_desc_banks);
8135 
8136 	return ret;
8137 }
8138 
8139 void
8140 qwx_hal_setup_link_idle_list(struct qwx_softc *sc,
8141     struct hal_wbm_idle_scatter_list *sbuf,
8142     uint32_t nsbufs, uint32_t tot_link_desc, uint32_t end_offset)
8143 {
8144 	struct ath11k_buffer_addr *link_addr;
8145 	int i;
8146 	uint32_t reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
8147 
8148 	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
8149 
8150 	for (i = 1; i < nsbufs; i++) {
8151 		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
8152 		link_addr->info1 = FIELD_PREP(
8153 		    HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
8154 		    (uint64_t)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
8155 		    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
8156 		    BASE_ADDR_MATCH_TAG_VAL);
8157 
8158 		link_addr = (void *)sbuf[i].vaddr +
8159 		    HAL_WBM_IDLE_SCATTER_BUF_SIZE;
8160 	}
8161 
8162 	sc->ops.write32(sc,
8163 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
8164 	    FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
8165 	    FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
8166 	sc->ops.write32(sc,
8167 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
8168 	    FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
8169 	    reg_scatter_buf_sz * nsbufs));
8170 	sc->ops.write32(sc,
8171 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_LSB,
8172 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
8173 	    sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
8174 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_WBM_REG +
8175 	    HAL_WBM_SCATTERED_RING_BASE_MSB,
8176 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
8177 	    (uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
8178 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
8179 	    BASE_ADDR_MATCH_TAG_VAL));
8180 
8181 	/* Setup head and tail pointers for the idle list */
8182 	sc->ops.write32(sc,
8183 	    HAL_SEQ_WCSS_UMAC_WBM_REG +
8184 	    HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
8185 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[nsbufs - 1].paddr));
8186 	sc->ops.write32(sc,
8187 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
8188 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
8189 	    ((uint64_t)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
8190 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
8191 	    (end_offset >> 2)));
8192 	sc->ops.write32(sc,
8193 	    HAL_SEQ_WCSS_UMAC_WBM_REG +
8194 	    HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
8195 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
8196 
8197 	sc->ops.write32(sc,
8198 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
8199 	    FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr));
8200 	sc->ops.write32(sc,
8201 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
8202 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
8203 	    ((uint64_t)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
8204 	    FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 0));
8205 	sc->ops.write32(sc,
8206 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
8207 	    2 * tot_link_desc);
8208 
8209 	/* Enable the SRNG */
8210 	sc->ops.write32(sc,
8211 	    HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(sc),
8212 	    0x40);
8213 }
8214 
8215 void
8216 qwx_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, uint32_t cookie,
8217     bus_addr_t paddr)
8218 {
8219 	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
8220 	    (paddr & HAL_ADDR_LSB_REG_MASK));
8221 	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
8222 	    ((uint64_t)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
8223 	    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
8224 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
8225 }
8226 
8227 void
8228 qwx_dp_scatter_idle_link_desc_cleanup(struct qwx_softc *sc)
8229 {
8230 	struct qwx_dp *dp = &sc->dp;
8231 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
8232 	int i;
8233 
8234 	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
8235 		if (slist[i].mem == NULL)
8236 			continue;
8237 
8238 		qwx_dmamem_free(sc->sc_dmat, slist[i].mem);
8239 		slist[i].mem = NULL;
8240 		slist[i].vaddr = NULL;
8241 		slist[i].paddr = 0L;
8242 	}
8243 }
8244 
8245 int
8246 qwx_dp_scatter_idle_link_desc_setup(struct qwx_softc *sc, int size,
8247     uint32_t n_link_desc_bank, uint32_t n_link_desc, uint32_t last_bank_sz)
8248 {
8249 	struct qwx_dp *dp = &sc->dp;
8250 	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
8251 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
8252 	uint32_t n_entries_per_buf;
8253 	int num_scatter_buf, scatter_idx;
8254 	struct hal_wbm_link_desc *scatter_buf;
8255 	int n_entries;
8256 	bus_addr_t paddr;
8257 	int rem_entries;
8258 	int i;
8259 	int ret = 0;
8260 	uint32_t end_offset;
8261 
8262 	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
8263 	    qwx_hal_srng_get_entrysize(sc, HAL_WBM_IDLE_LINK);
8264 	num_scatter_buf = howmany(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
8265 
8266 	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
8267 		return EINVAL;
8268 
8269 	for (i = 0; i < num_scatter_buf; i++) {
8270 		slist[i].mem = qwx_dmamem_alloc(sc->sc_dmat,
8271 		    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, PAGE_SIZE);
8272 		if (slist[i].mem == NULL) {
8273 			ret = ENOMEM;
8274 			goto err;
8275 		}
8276 
8277 		slist[i].vaddr = QWX_DMA_KVA(slist[i].mem);
8278 		slist[i].paddr = QWX_DMA_DVA(slist[i].mem);
8279 	}
8280 
8281 	scatter_idx = 0;
8282 	scatter_buf = slist[scatter_idx].vaddr;
8283 	rem_entries = n_entries_per_buf;
8284 
8285 	for (i = 0; i < n_link_desc_bank; i++) {
8286 		n_entries = DP_LINK_DESC_ALLOC_SIZE_THRESH / HAL_LINK_DESC_SIZE;
8287 		paddr = link_desc_banks[i].paddr;
8288 		while (n_entries) {
8289 			qwx_hal_set_link_desc_addr(scatter_buf, i, paddr);
8290 			n_entries--;
8291 			paddr += HAL_LINK_DESC_SIZE;
8292 			if (rem_entries) {
8293 				rem_entries--;
8294 				scatter_buf++;
8295 				continue;
8296 			}
8297 
8298 			rem_entries = n_entries_per_buf;
8299 			scatter_idx++;
8300 			scatter_buf = slist[scatter_idx].vaddr;
8301 		}
8302 	}
8303 
8304 	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
8305 	    sizeof(struct hal_wbm_link_desc);
8306 	qwx_hal_setup_link_idle_list(sc, slist, num_scatter_buf,
8307 	    n_link_desc, end_offset);
8308 
8309 	return 0;
8310 
8311 err:
8312 	qwx_dp_scatter_idle_link_desc_cleanup(sc);
8313 
8314 	return ret;
8315 }
8316 
8317 uint32_t *
8318 qwx_hal_srng_src_get_next_entry(struct qwx_softc *sc, struct hal_srng *srng)
8319 {
8320 	uint32_t *desc;
8321 	uint32_t next_hp;
8322 #ifdef notyet
8323 	lockdep_assert_held(&srng->lock);
8324 #endif
8325 
8326 	/* TODO: Using % is expensive, but we have to do this since size of some
8327 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
8328 	 * if separate function is defined for rings having power of 2 ring size
8329 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
8330 	 * overhead of % by using mask (with &).
8331 	 */
8332 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
8333 
8334 	if (next_hp == srng->u.src_ring.cached_tp)
8335 		return NULL;
8336 
8337 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
8338 	srng->u.src_ring.hp = next_hp;
8339 
8340 	/* TODO: Reap functionality is not used by all rings. If particular
8341 	 * ring does not use reap functionality, we need not update reap_hp
8342 	 * with next_hp pointer. Need to make sure a separate function is used
8343 	 * before doing any optimization by removing below code updating
8344 	 * reap_hp.
8345 	 */
8346 	srng->u.src_ring.reap_hp = next_hp;
8347 
8348 	return desc;
8349 }
8350 
8351 uint32_t *
8352 qwx_hal_srng_src_reap_next(struct qwx_softc *sc, struct hal_srng *srng)
8353 {
8354 	uint32_t *desc;
8355 	uint32_t next_reap_hp;
8356 #ifdef notyet
8357 	lockdep_assert_held(&srng->lock);
8358 #endif
8359 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
8360 	    srng->ring_size;
8361 
8362 	if (next_reap_hp == srng->u.src_ring.cached_tp)
8363 		return NULL;
8364 
8365 	desc = srng->ring_base_vaddr + next_reap_hp;
8366 	srng->u.src_ring.reap_hp = next_reap_hp;
8367 
8368 	return desc;
8369 }
8370 
8371 int
8372 qwx_dp_link_desc_setup(struct qwx_softc *sc,
8373     struct dp_link_desc_bank *link_desc_banks, uint32_t ring_type,
8374     struct hal_srng *srng, uint32_t n_link_desc)
8375 {
8376 	uint32_t tot_mem_sz;
8377 	uint32_t n_link_desc_bank, last_bank_sz;
8378 	uint32_t entry_sz, n_entries;
8379 	uint64_t paddr;
8380 	uint32_t *desc;
8381 	int i, ret;
8382 
8383 	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
8384 	tot_mem_sz += HAL_LINK_DESC_ALIGN;
8385 
8386 	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
8387 		n_link_desc_bank = 1;
8388 		last_bank_sz = tot_mem_sz;
8389 	} else {
8390 		n_link_desc_bank = tot_mem_sz /
8391 		    (DP_LINK_DESC_ALLOC_SIZE_THRESH - HAL_LINK_DESC_ALIGN);
8392 		last_bank_sz = tot_mem_sz % (DP_LINK_DESC_ALLOC_SIZE_THRESH -
8393 		    HAL_LINK_DESC_ALIGN);
8394 
8395 		if (last_bank_sz)
8396 			n_link_desc_bank += 1;
8397 	}
8398 
8399 	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
8400 		return EINVAL;
8401 
8402 	ret = qwx_dp_link_desc_bank_alloc(sc, link_desc_banks,
8403 	    n_link_desc_bank, last_bank_sz);
8404 	if (ret)
8405 		return ret;
8406 
8407 	/* Setup link desc idle list for HW internal usage */
8408 	entry_sz = qwx_hal_srng_get_entrysize(sc, ring_type);
8409 	tot_mem_sz = entry_sz * n_link_desc;
8410 
8411 	/* Setup scatter desc list when the total memory requirement is more */
8412 	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
8413 	    ring_type != HAL_RXDMA_MONITOR_DESC) {
8414 		ret = qwx_dp_scatter_idle_link_desc_setup(sc, tot_mem_sz,
8415 		    n_link_desc_bank, n_link_desc, last_bank_sz);
8416 		if (ret) {
8417 			printf("%s: failed to setup scatting idle list "
8418 			    "descriptor :%d\n",
8419 			    sc->sc_dev.dv_xname, ret);
8420 			goto fail_desc_bank_free;
8421 		}
8422 
8423 		return 0;
8424 	}
8425 #if 0
8426 	spin_lock_bh(&srng->lock);
8427 #endif
8428 	qwx_hal_srng_access_begin(sc, srng);
8429 
8430 	for (i = 0; i < n_link_desc_bank; i++) {
8431 		n_entries = (link_desc_banks[i].size) / HAL_LINK_DESC_SIZE;
8432 		paddr = link_desc_banks[i].paddr;
8433 		while (n_entries &&
8434 		    (desc = qwx_hal_srng_src_get_next_entry(sc, srng))) {
8435 			qwx_hal_set_link_desc_addr(
8436 			    (struct hal_wbm_link_desc *) desc, i, paddr);
8437 			n_entries--;
8438 			paddr += HAL_LINK_DESC_SIZE;
8439 		}
8440 	}
8441 
8442 	qwx_hal_srng_access_end(sc, srng);
8443 #if 0
8444 	spin_unlock_bh(&srng->lock);
8445 #endif
8446 
8447 	return 0;
8448 
8449 fail_desc_bank_free:
8450 	qwx_dp_link_desc_bank_free(sc, link_desc_banks);
8451 
8452 	return ret;
8453 }
8454 
8455 void
8456 qwx_dp_srng_cleanup(struct qwx_softc *sc, struct dp_srng *ring)
8457 {
8458 	if (ring->mem == NULL)
8459 		return;
8460 
8461 #if 0
8462 	if (ring->cached)
8463 		kfree(ring->vaddr_unaligned);
8464 	else
8465 #endif
8466 		qwx_dmamem_free(sc->sc_dmat, ring->mem);
8467 
8468 	ring->mem = NULL;
8469 	ring->vaddr = NULL;
8470 	ring->paddr = 0;
8471 }
8472 
8473 void
8474 qwx_dp_shadow_stop_timer(struct qwx_softc *sc,
8475     struct qwx_hp_update_timer *update_timer)
8476 {
8477 	if (!sc->hw_params.supports_shadow_regs)
8478 		return;
8479 
8480 	timeout_del(&update_timer->timer);
8481 }
8482 
8483 void
8484 qwx_dp_shadow_start_timer(struct qwx_softc *sc, struct hal_srng *srng,
8485     struct qwx_hp_update_timer *update_timer)
8486 {
8487 #ifdef notyet
8488 	lockdep_assert_held(&srng->lock);
8489 #endif
8490 	if (!sc->hw_params.supports_shadow_regs)
8491 		return;
8492 
8493 	update_timer->started = 1;
8494 	update_timer->timer_tx_num = update_timer->tx_num;
8495 
8496 	timeout_add_msec(&update_timer->timer, update_timer->interval);
8497 }
8498 
8499 void
8500 qwx_dp_shadow_timer_handler(void *arg)
8501 {
8502 	struct qwx_hp_update_timer *update_timer = arg;
8503 	struct qwx_softc *sc = update_timer->sc;
8504 	struct hal_srng	*srng = &sc->hal.srng_list[update_timer->ring_id];
8505 	int s;
8506 
8507 #ifdef notyet
8508 	spin_lock_bh(&srng->lock);
8509 #endif
8510 	s = splnet();
8511 
8512 	/*
8513 	 * Update HP if there were no TX operations during the timeout interval,
8514 	 * and stop the timer. Timer will be restarted if more TX happens.
8515 	 */
8516 	if (update_timer->timer_tx_num != update_timer->tx_num) {
8517 		update_timer->timer_tx_num = update_timer->tx_num;
8518 		timeout_add_msec(&update_timer->timer, update_timer->interval);
8519 	} else {
8520 		update_timer->started = 0;
8521 		qwx_hal_srng_shadow_update_hp_tp(sc, srng);
8522 	}
8523 #ifdef notyet
8524 	spin_unlock_bh(&srng->lock);
8525 #endif
8526 	splx(s);
8527 }
8528 
8529 void
8530 qwx_dp_stop_shadow_timers(struct qwx_softc *sc)
8531 {
8532 	int i;
8533 
8534 	for (i = 0; i < sc->hw_params.max_tx_ring; i++)
8535 		qwx_dp_shadow_stop_timer(sc, &sc->dp.tx_ring_timer[i]);
8536 
8537 	qwx_dp_shadow_stop_timer(sc, &sc->dp.reo_cmd_timer);
8538 }
8539 
8540 void
8541 qwx_dp_srng_common_cleanup(struct qwx_softc *sc)
8542 {
8543 	struct qwx_dp *dp = &sc->dp;
8544 	int i;
8545 
8546 	qwx_dp_stop_shadow_timers(sc);
8547 	qwx_dp_srng_cleanup(sc, &dp->wbm_desc_rel_ring);
8548 	qwx_dp_srng_cleanup(sc, &dp->tcl_cmd_ring);
8549 	qwx_dp_srng_cleanup(sc, &dp->tcl_status_ring);
8550 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
8551 		qwx_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_data_ring);
8552 		qwx_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_comp_ring);
8553 	}
8554 	qwx_dp_srng_cleanup(sc, &dp->reo_reinject_ring);
8555 	qwx_dp_srng_cleanup(sc, &dp->rx_rel_ring);
8556 	qwx_dp_srng_cleanup(sc, &dp->reo_except_ring);
8557 	qwx_dp_srng_cleanup(sc, &dp->reo_cmd_ring);
8558 	qwx_dp_srng_cleanup(sc, &dp->reo_status_ring);
8559 }
8560 
8561 void
8562 qwx_hal_srng_get_params(struct qwx_softc *sc, struct hal_srng *srng,
8563     struct hal_srng_params *params)
8564 {
8565 	params->ring_base_paddr = srng->ring_base_paddr;
8566 	params->ring_base_vaddr = srng->ring_base_vaddr;
8567 	params->num_entries = srng->num_entries;
8568 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
8569 	params->intr_batch_cntr_thres_entries =
8570 		srng->intr_batch_cntr_thres_entries;
8571 	params->low_threshold = srng->u.src_ring.low_threshold;
8572 	params->msi_addr = srng->msi_addr;
8573 	params->msi_data = srng->msi_data;
8574 	params->flags = srng->flags;
8575 }
8576 
8577 void
8578 qwx_hal_tx_init_data_ring(struct qwx_softc *sc, struct hal_srng *srng)
8579 {
8580 	struct hal_srng_params params;
8581 	struct hal_tlv_hdr *tlv;
8582 	int i, entry_size;
8583 	uint8_t *desc;
8584 
8585 	memset(&params, 0, sizeof(params));
8586 
8587 	entry_size = qwx_hal_srng_get_entrysize(sc, HAL_TCL_DATA);
8588 	qwx_hal_srng_get_params(sc, srng, &params);
8589 	desc = (uint8_t *)params.ring_base_vaddr;
8590 
8591 	for (i = 0; i < params.num_entries; i++) {
8592 		tlv = (struct hal_tlv_hdr *)desc;
8593 		tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
8594 		    FIELD_PREP(HAL_TLV_HDR_LEN,
8595 		    sizeof(struct hal_tcl_data_cmd));
8596 		desc += entry_size;
8597 	}
8598 }
8599 
8600 #define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
8601 
8602 /* dscp_tid_map - Default DSCP-TID mapping
8603  *
8604  * DSCP        TID
8605  * 000000      0
8606  * 001000      1
8607  * 010000      2
8608  * 011000      3
8609  * 100000      4
8610  * 101000      5
8611  * 110000      6
8612  * 111000      7
8613  */
8614 static const uint8_t dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
8615 	0, 0, 0, 0, 0, 0, 0, 0,
8616 	1, 1, 1, 1, 1, 1, 1, 1,
8617 	2, 2, 2, 2, 2, 2, 2, 2,
8618 	3, 3, 3, 3, 3, 3, 3, 3,
8619 	4, 4, 4, 4, 4, 4, 4, 4,
8620 	5, 5, 5, 5, 5, 5, 5, 5,
8621 	6, 6, 6, 6, 6, 6, 6, 6,
8622 	7, 7, 7, 7, 7, 7, 7, 7,
8623 };
8624 
8625 void
8626 qwx_hal_tx_set_dscp_tid_map(struct qwx_softc *sc, int id)
8627 {
8628 	uint32_t ctrl_reg_val;
8629 	uint32_t addr;
8630 	uint8_t hw_map_val[HAL_DSCP_TID_TBL_SIZE];
8631 	int i;
8632 	uint32_t value;
8633 	int cnt = 0;
8634 
8635 	ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
8636 	    HAL_TCL1_RING_CMN_CTRL_REG);
8637 
8638 	/* Enable read/write access */
8639 	ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
8640 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
8641 	    HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
8642 
8643 	addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
8644 	       (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
8645 
8646 	/* Configure each DSCP-TID mapping in three bits there by configure
8647 	 * three bytes in an iteration.
8648 	 */
8649 	for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
8650 		value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
8651 				   dscp_tid_map[i]) |
8652 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
8653 				   dscp_tid_map[i + 1]) |
8654 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
8655 				   dscp_tid_map[i + 2]) |
8656 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
8657 				   dscp_tid_map[i + 3]) |
8658 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
8659 				   dscp_tid_map[i + 4]) |
8660 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
8661 				   dscp_tid_map[i + 5]) |
8662 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
8663 				   dscp_tid_map[i + 6]) |
8664 			FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
8665 				   dscp_tid_map[i + 7]);
8666 		memcpy(&hw_map_val[cnt], (uint8_t *)&value, 3);
8667 		cnt += 3;
8668 	}
8669 
8670 	for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
8671 		sc->ops.write32(sc, addr, *(uint32_t *)&hw_map_val[i]);
8672 		addr += 4;
8673 	}
8674 
8675 	/* Disable read/write access */
8676 	ctrl_reg_val = sc->ops.read32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
8677 	    HAL_TCL1_RING_CMN_CTRL_REG);
8678 	ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
8679 	sc->ops.write32(sc, HAL_SEQ_WCSS_UMAC_TCL_REG +
8680 	    HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
8681 }
8682 
8683 void
8684 qwx_dp_shadow_init_timer(struct qwx_softc *sc,
8685     struct qwx_hp_update_timer *update_timer,
8686     uint32_t interval, uint32_t ring_id)
8687 {
8688 	if (!sc->hw_params.supports_shadow_regs)
8689 		return;
8690 
8691 	update_timer->tx_num = 0;
8692 	update_timer->timer_tx_num = 0;
8693 	update_timer->sc = sc;
8694 	update_timer->ring_id = ring_id;
8695 	update_timer->interval = interval;
8696 	update_timer->init = 1;
8697 	timeout_set(&update_timer->timer, qwx_dp_shadow_timer_handler,
8698 	    update_timer);
8699 }
8700 
8701 void
8702 qwx_hal_reo_init_cmd_ring(struct qwx_softc *sc, struct hal_srng *srng)
8703 {
8704 	struct hal_srng_params params;
8705 	struct hal_tlv_hdr *tlv;
8706 	struct hal_reo_get_queue_stats *desc;
8707 	int i, cmd_num = 1;
8708 	int entry_size;
8709 	uint8_t *entry;
8710 
8711 	memset(&params, 0, sizeof(params));
8712 
8713 	entry_size = qwx_hal_srng_get_entrysize(sc, HAL_REO_CMD);
8714 	qwx_hal_srng_get_params(sc, srng, &params);
8715 	entry = (uint8_t *)params.ring_base_vaddr;
8716 
8717 	for (i = 0; i < params.num_entries; i++) {
8718 		tlv = (struct hal_tlv_hdr *)entry;
8719 		desc = (struct hal_reo_get_queue_stats *)tlv->value;
8720 		desc->cmd.info0 = FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER,
8721 		    cmd_num++);
8722 		entry += entry_size;
8723 	}
8724 }
8725 
8726 int
8727 qwx_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, struct ath11k_hal_reo_cmd *cmd)
8728 {
8729 	struct hal_reo_get_queue_stats *desc;
8730 
8731 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
8732 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
8733 
8734 	desc = (struct hal_reo_get_queue_stats *)tlv->value;
8735 
8736 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
8737 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
8738 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
8739 
8740 	desc->queue_addr_lo = cmd->addr_lo;
8741 	desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
8742 	    cmd->addr_hi);
8743 	if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
8744 		desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
8745 
8746 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
8747 }
8748 
8749 int
8750 qwx_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
8751     struct ath11k_hal_reo_cmd *cmd)
8752 {
8753 	struct hal_reo_flush_cache *desc;
8754 	uint8_t avail_slot = ffz(hal->avail_blk_resource);
8755 
8756 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
8757 		if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
8758 			return ENOSPC;
8759 
8760 		hal->current_blk_index = avail_slot;
8761 	}
8762 
8763 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
8764 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
8765 
8766 	desc = (struct hal_reo_flush_cache *)tlv->value;
8767 
8768 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
8769 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
8770 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
8771 
8772 	desc->cache_addr_lo = cmd->addr_lo;
8773 	desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
8774 	    cmd->addr_hi);
8775 
8776 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
8777 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
8778 
8779 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
8780 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
8781 		desc->info0 |=
8782 		    FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
8783 		    avail_slot);
8784 	}
8785 
8786 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
8787 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
8788 
8789 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
8790 		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
8791 
8792 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
8793 }
8794 
8795 int
8796 qwx_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
8797     struct ath11k_hal_reo_cmd *cmd)
8798 {
8799 	struct hal_reo_update_rx_queue *desc;
8800 
8801 	tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
8802 	    FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
8803 
8804 	desc = (struct hal_reo_update_rx_queue *)tlv->value;
8805 
8806 	desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
8807 	if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
8808 		desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
8809 
8810 	desc->queue_addr_lo = cmd->addr_lo;
8811 	desc->info0 =
8812 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
8813 		    cmd->addr_hi) |
8814 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
8815 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
8816 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
8817 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
8818 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
8819 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
8820 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
8821 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
8822 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
8823 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
8824 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
8825 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
8826 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
8827 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
8828 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
8829 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
8830 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
8831 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
8832 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
8833 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
8834 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
8835 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
8836 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
8837 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
8838 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
8839 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
8840 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
8841 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
8842 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
8843 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
8844 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
8845 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
8846 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
8847 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
8848 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
8849 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
8850 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
8851 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
8852 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
8853 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
8854 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
8855 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
8856 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
8857 		    !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
8858 
8859 	desc->info1 =
8860 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
8861 		    cmd->rx_queue_num) |
8862 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
8863 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
8864 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
8865 		    FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
8866 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
8867 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
8868 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
8869 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
8870 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
8871 		    FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
8872 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
8873 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
8874 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
8875 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
8876 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
8877 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
8878 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
8879 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
8880 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
8881 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
8882 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
8883 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
8884 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
8885 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
8886 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
8887 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
8888 		FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
8889 		    !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
8890 
8891 	if (cmd->pn_size == 24)
8892 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
8893 	else if (cmd->pn_size == 48)
8894 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
8895 	else if (cmd->pn_size == 128)
8896 		cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
8897 
8898 	if (cmd->ba_window_size < 1)
8899 		cmd->ba_window_size = 1;
8900 
8901 	if (cmd->ba_window_size == 1)
8902 		cmd->ba_window_size++;
8903 
8904 	desc->info2 = FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
8905 	    cmd->ba_window_size - 1) |
8906 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
8907 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
8908 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
8909 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
8910 	        FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
8911 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
8912 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
8913 	    FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
8914 	        !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
8915 
8916 	return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
8917 }
8918 
8919 int
8920 qwx_hal_reo_cmd_send(struct qwx_softc *sc, struct hal_srng *srng,
8921     enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd)
8922 {
8923 	struct hal_tlv_hdr *reo_desc;
8924 	int ret;
8925 #ifdef notyet
8926 	spin_lock_bh(&srng->lock);
8927 #endif
8928 	qwx_hal_srng_access_begin(sc, srng);
8929 	reo_desc = (struct hal_tlv_hdr *)qwx_hal_srng_src_get_next_entry(sc, srng);
8930 	if (!reo_desc) {
8931 		ret = ENOBUFS;
8932 		goto out;
8933 	}
8934 
8935 	switch (type) {
8936 	case HAL_REO_CMD_GET_QUEUE_STATS:
8937 		ret = qwx_hal_reo_cmd_queue_stats(reo_desc, cmd);
8938 		break;
8939 	case HAL_REO_CMD_FLUSH_CACHE:
8940 		ret = qwx_hal_reo_cmd_flush_cache(&sc->hal, reo_desc, cmd);
8941 		break;
8942 	case HAL_REO_CMD_UPDATE_RX_QUEUE:
8943 		ret = qwx_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
8944 		break;
8945 	case HAL_REO_CMD_FLUSH_QUEUE:
8946 	case HAL_REO_CMD_UNBLOCK_CACHE:
8947 	case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
8948 		printf("%s: unsupported reo command %d\n",
8949 		   sc->sc_dev.dv_xname, type);
8950 		ret = ENOTSUP;
8951 		break;
8952 	default:
8953 		printf("%s: unknown reo command %d\n",
8954 		    sc->sc_dev.dv_xname, type);
8955 		ret = EINVAL;
8956 		break;
8957 	}
8958 
8959 	qwx_dp_shadow_start_timer(sc, srng, &sc->dp.reo_cmd_timer);
8960 out:
8961 	qwx_hal_srng_access_end(sc, srng);
8962 #ifdef notyet
8963 	spin_unlock_bh(&srng->lock);
8964 #endif
8965 	return ret;
8966 }
8967 int
8968 qwx_dp_srng_common_setup(struct qwx_softc *sc)
8969 {
8970 	struct qwx_dp *dp = &sc->dp;
8971 	struct hal_srng *srng;
8972 	int i, ret;
8973 	uint8_t tcl_num, wbm_num;
8974 
8975 	ret = qwx_dp_srng_setup(sc, &dp->wbm_desc_rel_ring, HAL_SW2WBM_RELEASE,
8976 	    0, 0, DP_WBM_RELEASE_RING_SIZE);
8977 	if (ret) {
8978 		printf("%s: failed to set up wbm2sw_release ring :%d\n",
8979 		    sc->sc_dev.dv_xname, ret);
8980 		goto err;
8981 	}
8982 
8983 	ret = qwx_dp_srng_setup(sc, &dp->tcl_cmd_ring, HAL_TCL_CMD,
8984 	    0, 0, DP_TCL_CMD_RING_SIZE);
8985 	if (ret) {
8986 		printf("%s: failed to set up tcl_cmd ring :%d\n",
8987 		    sc->sc_dev.dv_xname, ret);
8988 		goto err;
8989 	}
8990 
8991 	ret = qwx_dp_srng_setup(sc, &dp->tcl_status_ring, HAL_TCL_STATUS,
8992 	    0, 0, DP_TCL_STATUS_RING_SIZE);
8993 	if (ret) {
8994 		printf("%s: failed to set up tcl_status ring :%d\n",
8995 		    sc->sc_dev.dv_xname, ret);
8996 		goto err;
8997 	}
8998 
8999 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
9000 		const struct ath11k_hw_hal_params *hal_params;
9001 
9002 		hal_params = sc->hw_params.hal_params;
9003 		tcl_num = hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
9004 		wbm_num = hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
9005 
9006 		ret = qwx_dp_srng_setup(sc, &dp->tx_ring[i].tcl_data_ring,
9007 		    HAL_TCL_DATA, tcl_num, 0, sc->hw_params.tx_ring_size);
9008 		if (ret) {
9009 			printf("%s: failed to set up tcl_data ring (%d) :%d\n",
9010 			    sc->sc_dev.dv_xname, i, ret);
9011 			goto err;
9012 		}
9013 
9014 		ret = qwx_dp_srng_setup(sc, &dp->tx_ring[i].tcl_comp_ring,
9015 		    HAL_WBM2SW_RELEASE, wbm_num, 0, DP_TX_COMP_RING_SIZE);
9016 		if (ret) {
9017 			printf("%s: failed to set up tcl_comp ring (%d) :%d\n",
9018 			    sc->sc_dev.dv_xname, i, ret);
9019 			goto err;
9020 		}
9021 
9022 		srng = &sc->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
9023 		qwx_hal_tx_init_data_ring(sc, srng);
9024 
9025 		qwx_dp_shadow_init_timer(sc, &dp->tx_ring_timer[i],
9026 		    ATH11K_SHADOW_DP_TIMER_INTERVAL,
9027 		    dp->tx_ring[i].tcl_data_ring.ring_id);
9028 	}
9029 
9030 	ret = qwx_dp_srng_setup(sc, &dp->reo_reinject_ring, HAL_REO_REINJECT,
9031 	    0, 0, DP_REO_REINJECT_RING_SIZE);
9032 	if (ret) {
9033 		printf("%s: failed to set up reo_reinject ring :%d\n",
9034 		    sc->sc_dev.dv_xname, ret);
9035 		goto err;
9036 	}
9037 
9038 	ret = qwx_dp_srng_setup(sc, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
9039 	    DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
9040 	if (ret) {
9041 		printf("%s: failed to set up rx_rel ring :%d\n",
9042 		    sc->sc_dev.dv_xname, ret);
9043 		goto err;
9044 	}
9045 
9046 	ret = qwx_dp_srng_setup(sc, &dp->reo_except_ring, HAL_REO_EXCEPTION,
9047 	    0, 0, DP_REO_EXCEPTION_RING_SIZE);
9048 	if (ret) {
9049 		printf("%s: failed to set up reo_exception ring :%d\n",
9050 		    sc->sc_dev.dv_xname, ret);
9051 		goto err;
9052 	}
9053 
9054 	ret = qwx_dp_srng_setup(sc, &dp->reo_cmd_ring, HAL_REO_CMD, 0, 0,
9055 	    DP_REO_CMD_RING_SIZE);
9056 	if (ret) {
9057 		printf("%s: failed to set up reo_cmd ring :%d\n",
9058 		    sc->sc_dev.dv_xname, ret);
9059 		goto err;
9060 	}
9061 
9062 	srng = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
9063 	qwx_hal_reo_init_cmd_ring(sc, srng);
9064 
9065 	qwx_dp_shadow_init_timer(sc, &dp->reo_cmd_timer,
9066 	     ATH11K_SHADOW_CTRL_TIMER_INTERVAL, dp->reo_cmd_ring.ring_id);
9067 
9068 	ret = qwx_dp_srng_setup(sc, &dp->reo_status_ring, HAL_REO_STATUS,
9069 	    0, 0, DP_REO_STATUS_RING_SIZE);
9070 	if (ret) {
9071 		printf("%s: failed to set up reo_status ring :%d\n",
9072 		    sc->sc_dev.dv_xname, ret);
9073 		goto err;
9074 	}
9075 #ifdef notyet
9076 	/* When hash based routing of rx packet is enabled, 32 entries to map
9077 	 * the hash values to the ring will be configured.
9078 	 */
9079 	sc->hw_params.hw_ops->reo_setup(sc);
9080 #endif
9081 	return 0;
9082 
9083 err:
9084 	qwx_dp_srng_common_cleanup(sc);
9085 
9086 	return ret;
9087 }
9088 
9089 void
9090 qwx_dp_link_desc_cleanup(struct qwx_softc *sc,
9091     struct dp_link_desc_bank *desc_bank, uint32_t ring_type,
9092     struct dp_srng *ring)
9093 {
9094 	qwx_dp_link_desc_bank_free(sc, desc_bank);
9095 
9096 	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
9097 		qwx_dp_srng_cleanup(sc, ring);
9098 		qwx_dp_scatter_idle_link_desc_cleanup(sc);
9099 	}
9100 }
9101 
9102 
9103 int
9104 qwx_dp_alloc(struct qwx_softc *sc)
9105 {
9106 	struct qwx_dp *dp = &sc->dp;
9107 	struct hal_srng *srng = NULL;
9108 	size_t size = 0;
9109 	uint32_t n_link_desc = 0;
9110 	int ret;
9111 	int i;
9112 
9113 	dp->sc = sc;
9114 
9115 	TAILQ_INIT(&dp->reo_cmd_list);
9116 	TAILQ_INIT(&dp->reo_cmd_cache_flush_list);
9117 #if 0
9118 	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
9119 	spin_lock_init(&dp->reo_cmd_lock);
9120 #endif
9121 
9122 	dp->reo_cmd_cache_flush_count = 0;
9123 
9124 	ret = qwx_wbm_idle_ring_setup(sc, &n_link_desc);
9125 	if (ret) {
9126 		printf("%s: failed to setup wbm_idle_ring: %d\n",
9127 		    sc->sc_dev.dv_xname, ret);
9128 		return ret;
9129 	}
9130 
9131 	srng = &sc->hal.srng_list[dp->wbm_idle_ring.ring_id];
9132 
9133 	ret = qwx_dp_link_desc_setup(sc, dp->link_desc_banks,
9134 	    HAL_WBM_IDLE_LINK, srng, n_link_desc);
9135 	if (ret) {
9136 		printf("%s: failed to setup link desc: %d\n",
9137 		   sc->sc_dev.dv_xname, ret);
9138 		return ret;
9139 	}
9140 
9141 	ret = qwx_dp_srng_common_setup(sc);
9142 	if (ret)
9143 		goto fail_link_desc_cleanup;
9144 
9145 	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
9146 
9147 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
9148 #if 0
9149 		idr_init(&dp->tx_ring[i].txbuf_idr);
9150 		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
9151 #endif
9152 		dp->tx_ring[i].tcl_data_ring_id = i;
9153 
9154 		dp->tx_ring[i].tx_status_head = 0;
9155 		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
9156 		dp->tx_ring[i].tx_status = malloc(size, M_DEVBUF,
9157 		    M_NOWAIT | M_ZERO);
9158 		if (!dp->tx_ring[i].tx_status) {
9159 			ret = ENOMEM;
9160 			goto fail_cmn_srng_cleanup;
9161 		}
9162 	}
9163 
9164 	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
9165 		qwx_hal_tx_set_dscp_tid_map(sc, i);
9166 
9167 	/* Init any SOC level resource for DP */
9168 
9169 	return 0;
9170 fail_cmn_srng_cleanup:
9171 	qwx_dp_srng_common_cleanup(sc);
9172 fail_link_desc_cleanup:
9173 	qwx_dp_link_desc_cleanup(sc, dp->link_desc_banks, HAL_WBM_IDLE_LINK,
9174 	    &dp->wbm_idle_ring);
9175 
9176 	return ret;
9177 }
9178 
9179 void
9180 qwx_dp_free(struct qwx_softc *sc)
9181 {
9182 	struct qwx_dp *dp = &sc->dp;
9183 	int i;
9184 
9185 	qwx_dp_link_desc_cleanup(sc, dp->link_desc_banks,
9186 	    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
9187 
9188 	qwx_dp_srng_common_cleanup(sc);
9189 #ifdef notyet
9190 	ath11k_dp_reo_cmd_list_cleanup(ab);
9191 #endif
9192 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
9193 #if 0
9194 		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
9195 		idr_for_each(&dp->tx_ring[i].txbuf_idr,
9196 			     ath11k_dp_tx_pending_cleanup, ab);
9197 		idr_destroy(&dp->tx_ring[i].txbuf_idr);
9198 		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
9199 #endif
9200 		free(dp->tx_ring[i].tx_status, M_DEVBUF,
9201 		    sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE);
9202 		dp->tx_ring[i].tx_status = NULL;
9203 	}
9204 
9205 	/* Deinit any SOC level resource */
9206 }
9207 
9208 void
9209 qwx_qmi_process_coldboot_calibration(struct qwx_softc *sc)
9210 {
9211 	printf("%s not implemented\n", __func__);
9212 }
9213 
9214 int
9215 qwx_qmi_wlanfw_wlan_ini_send(struct qwx_softc *sc, int enable)
9216 {
9217 	int ret;
9218 	struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
9219 
9220 	req.enablefwlog_valid = 1;
9221 	req.enablefwlog = enable ? 1 : 0;
9222 
9223 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_INI_REQ_V01,
9224 	    QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
9225 	    qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req, sizeof(req));
9226 	if (ret) {
9227 		printf("%s: failed to send wlan ini request, err = %d\n",
9228 		    sc->sc_dev.dv_xname, ret);
9229 		return ret;
9230 	}
9231 
9232 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
9233 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
9234 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxini",
9235 		    SEC_TO_NSEC(1));
9236 		if (ret) {
9237 			printf("%s: wlan ini request timeout\n",
9238 			    sc->sc_dev.dv_xname);
9239 			return ret;
9240 		}
9241 	}
9242 
9243 	return 0;
9244 }
9245 
9246 int
9247 qwx_qmi_wlanfw_wlan_cfg_send(struct qwx_softc *sc)
9248 {
9249 	struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
9250 	const struct ce_pipe_config *ce_cfg;
9251 	const struct service_to_pipe *svc_cfg;
9252 	int ret = 0, pipe_num;
9253 
9254 	ce_cfg	= sc->hw_params.target_ce_config;
9255 	svc_cfg	= sc->hw_params.svc_to_ce_map;
9256 
9257 	req = malloc(sizeof(*req), M_DEVBUF, M_NOWAIT | M_ZERO);
9258 	if (!req)
9259 		return ENOMEM;
9260 
9261 	req->host_version_valid = 1;
9262 	strlcpy(req->host_version, ATH11K_HOST_VERSION_STRING,
9263 	    sizeof(req->host_version));
9264 
9265 	req->tgt_cfg_valid = 1;
9266 	/* This is number of CE configs */
9267 	req->tgt_cfg_len = sc->hw_params.target_ce_count;
9268 	for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
9269 		req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
9270 		req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
9271 		req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
9272 		req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
9273 		req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
9274 	}
9275 
9276 	req->svc_cfg_valid = 1;
9277 	/* This is number of Service/CE configs */
9278 	req->svc_cfg_len = sc->hw_params.svc_to_ce_map_len;
9279 	for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
9280 		req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
9281 		req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
9282 		req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
9283 	}
9284 	req->shadow_reg_valid = 0;
9285 
9286 	/* set shadow v2 configuration */
9287 	if (sc->hw_params.supports_shadow_regs) {
9288 		req->shadow_reg_v2_valid = 1;
9289 		req->shadow_reg_v2_len = MIN(sc->qmi_ce_cfg.shadow_reg_v2_len,
9290 		    QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
9291 		memcpy(&req->shadow_reg_v2, sc->qmi_ce_cfg.shadow_reg_v2,
9292 		       sizeof(uint32_t) * req->shadow_reg_v2_len);
9293 	} else {
9294 		req->shadow_reg_v2_valid = 0;
9295 	}
9296 
9297 	DNPRINTF(QWX_D_QMI, "%s: wlan cfg req\n", __func__);
9298 
9299 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_CFG_REQ_V01,
9300 	    QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
9301 	    qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req, sizeof(*req));
9302 	if (ret) {
9303 		printf("%s: failed to send wlan config request: %d\n",
9304 		    sc->sc_dev.dv_xname, ret);
9305 		goto out;
9306 	}
9307 
9308 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
9309 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
9310 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxwlancfg",
9311 		    SEC_TO_NSEC(1));
9312 		if (ret) {
9313 			printf("%s: wlan config request failed\n",
9314 			    sc->sc_dev.dv_xname);
9315 			goto out;
9316 		}
9317 	}
9318 out:
9319 	free(req, M_DEVBUF, sizeof(*req));
9320 	return ret;
9321 }
9322 
9323 int
9324 qwx_qmi_wlanfw_mode_send(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
9325 {
9326 	int ret;
9327 	struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
9328 
9329 	req.mode = mode;
9330 	req.hw_debug_valid = 1;
9331 	req.hw_debug = 0;
9332 
9333 	ret = qwx_qmi_send_request(sc, QMI_WLANFW_WLAN_MODE_REQ_V01,
9334 	    QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
9335 	    qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req, sizeof(req));
9336 	if (ret) {
9337 		printf("%s: failed to send wlan mode request, err = %d\n",
9338 		    sc->sc_dev.dv_xname, ret);
9339 		return ret;
9340 	}
9341 
9342 	sc->qmi_resp.result = QMI_RESULT_FAILURE_V01;
9343 	while (sc->qmi_resp.result != QMI_RESULT_SUCCESS_V01) {
9344 		ret = tsleep_nsec(&sc->qmi_resp, 0, "qwxfwmode",
9345 		    SEC_TO_NSEC(1));
9346 		if (ret) {
9347 			if (mode == ATH11K_FIRMWARE_MODE_OFF)
9348 				return 0;
9349 			printf("%s: wlan mode request timeout\n",
9350 			    sc->sc_dev.dv_xname);
9351 			return ret;
9352 		}
9353 	}
9354 
9355 	return 0;
9356 }
9357 
9358 int
9359 qwx_qmi_firmware_start(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
9360 {
9361 	int ret;
9362 
9363 	DPRINTF("%s: firmware start\n", sc->sc_dev.dv_xname);
9364 
9365 	if (sc->hw_params.fw_wmi_diag_event) {
9366 		ret = qwx_qmi_wlanfw_wlan_ini_send(sc, 1);
9367 		if (ret < 0) {
9368 			printf("%s: qmi failed to send wlan fw ini: %d\n",
9369 			    sc->sc_dev.dv_xname, ret);
9370 			return ret;
9371 		}
9372 	}
9373 
9374 	ret = qwx_qmi_wlanfw_wlan_cfg_send(sc);
9375 	if (ret) {
9376 		printf("%s: qmi failed to send wlan cfg: %d\n",
9377 		    sc->sc_dev.dv_xname, ret);
9378 		return ret;
9379 	}
9380 
9381 	ret = qwx_qmi_wlanfw_mode_send(sc, mode);
9382 	if (ret) {
9383 		printf("%s: qmi failed to send wlan fw mode: %d\n",
9384 		    sc->sc_dev.dv_xname, ret);
9385 		return ret;
9386 	}
9387 
9388 	return 0;
9389 }
9390 
9391 void
9392 qwx_qmi_firmware_stop(struct qwx_softc *sc)
9393 {
9394 	int ret;
9395 
9396 	ret = qwx_qmi_wlanfw_mode_send(sc, ATH11K_FIRMWARE_MODE_OFF);
9397 	if (ret) {
9398 		printf("%s: qmi failed to send wlan mode off: %d\n",
9399 		    sc->sc_dev.dv_xname, ret);
9400 	}
9401 }
9402 
9403 int
9404 qwx_core_start_firmware(struct qwx_softc *sc, enum ath11k_firmware_mode mode)
9405 {
9406 	int ret;
9407 
9408 	qwx_ce_get_shadow_config(sc, &sc->qmi_ce_cfg.shadow_reg_v2,
9409 	    &sc->qmi_ce_cfg.shadow_reg_v2_len);
9410 
9411 	ret = qwx_qmi_firmware_start(sc, mode);
9412 	if (ret) {
9413 		printf("%s: failed to send firmware start: %d\n",
9414 		    sc->sc_dev.dv_xname, ret);
9415 		return ret;
9416 	}
9417 
9418 	return ret;
9419 }
9420 
9421 int
9422 qwx_wmi_pdev_attach(struct qwx_softc *sc, uint8_t pdev_id)
9423 {
9424 	struct qwx_pdev_wmi *wmi_handle;
9425 
9426 	if (pdev_id >= sc->hw_params.max_radios)
9427 		return EINVAL;
9428 
9429 	wmi_handle = &sc->wmi.wmi[pdev_id];
9430 	wmi_handle->wmi = &sc->wmi;
9431 
9432 	wmi_handle->tx_ce_desc = 1;
9433 
9434 	return 0;
9435 }
9436 
9437 void
9438 qwx_wmi_detach(struct qwx_softc *sc)
9439 {
9440 	qwx_wmi_free_dbring_caps(sc);
9441 }
9442 
9443 int
9444 qwx_wmi_attach(struct qwx_softc *sc)
9445 {
9446 	int ret;
9447 
9448 	ret = qwx_wmi_pdev_attach(sc, 0);
9449 	if (ret)
9450 		return ret;
9451 
9452 	sc->wmi.sc = sc;
9453 	sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
9454 	sc->wmi.tx_credits = 1;
9455 
9456 	/* It's overwritten when service_ext_ready is handled */
9457 	if (sc->hw_params.single_pdev_only &&
9458 	    sc->hw_params.num_rxmda_per_pdev > 1)
9459 		sc->wmi.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
9460 
9461 	return 0;
9462 }
9463 
9464 void
9465 qwx_wmi_htc_tx_complete(struct qwx_softc *sc, struct mbuf *m)
9466 {
9467 	struct qwx_pdev_wmi *wmi = NULL;
9468 	uint32_t i;
9469 	uint8_t wmi_ep_count;
9470 	uint8_t eid;
9471 
9472 	eid = (uintptr_t)m->m_pkthdr.ph_cookie;
9473 	m_freem(m);
9474 
9475 	if (eid >= ATH11K_HTC_EP_COUNT)
9476 		return;
9477 
9478 	wmi_ep_count = sc->htc.wmi_ep_count;
9479 	if (wmi_ep_count > sc->hw_params.max_radios)
9480 		return;
9481 
9482 	for (i = 0; i < sc->htc.wmi_ep_count; i++) {
9483 		if (sc->wmi.wmi[i].eid == eid) {
9484 			wmi = &sc->wmi.wmi[i];
9485 			break;
9486 		}
9487 	}
9488 
9489 	if (wmi)
9490 		wakeup(&wmi->tx_ce_desc);
9491 }
9492 
9493 int
9494 qwx_wmi_tlv_services_parser(struct qwx_softc *sc, uint16_t tag, uint16_t len,
9495     const void *ptr, void *data)
9496 {
9497 	const struct wmi_service_available_event *ev;
9498 	uint32_t *wmi_ext2_service_bitmap;
9499 	int i, j;
9500 
9501 	switch (tag) {
9502 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
9503 		ev = (struct wmi_service_available_event *)ptr;
9504 		for (i = 0, j = WMI_MAX_SERVICE;
9505 		    i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
9506 		    j < WMI_MAX_EXT_SERVICE;
9507 		    i++) {
9508 			do {
9509 				if (ev->wmi_service_segment_bitmap[i] &
9510 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
9511 					setbit(sc->wmi.svc_map, j);
9512 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
9513 		}
9514 
9515 		DNPRINTF(QWX_D_WMI,
9516 		    "%s: wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, "
9517 		    "2:0x%04x, 3:0x%04x\n", __func__,
9518 		    ev->wmi_service_segment_bitmap[0],
9519 		    ev->wmi_service_segment_bitmap[1],
9520 		    ev->wmi_service_segment_bitmap[2],
9521 		    ev->wmi_service_segment_bitmap[3]);
9522 		break;
9523 	case WMI_TAG_ARRAY_UINT32:
9524 		wmi_ext2_service_bitmap = (uint32_t *)ptr;
9525 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
9526 		    i < WMI_SERVICE_SEGMENT_BM_SIZE32 &&
9527 		    j < WMI_MAX_EXT2_SERVICE;
9528 		    i++) {
9529 			do {
9530 				if (wmi_ext2_service_bitmap[i] &
9531 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
9532 					setbit(sc->wmi.svc_map, j);
9533 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
9534 		}
9535 
9536 		DNPRINTF(QWX_D_WMI,
9537 		    "%s: wmi_ext2_service__bitmap  0:0x%04x, 1:0x%04x, "
9538 		    "2:0x%04x, 3:0x%04x\n", __func__,
9539 		    wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
9540 		    wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
9541 		break;
9542 	}
9543 
9544 	return 0;
9545 }
9546 
9547 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
9548 	[WMI_TAG_ARRAY_BYTE]
9549 		= { .min_len = 0 },
9550 	[WMI_TAG_ARRAY_UINT32]
9551 		= { .min_len = 0 },
9552 	[WMI_TAG_SERVICE_READY_EVENT]
9553 		= { .min_len = sizeof(struct wmi_service_ready_event) },
9554 	[WMI_TAG_SERVICE_READY_EXT_EVENT]
9555 		= { .min_len =  sizeof(struct wmi_service_ready_ext_event) },
9556 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
9557 		= { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
9558 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES]
9559 		= { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
9560 	[WMI_TAG_VDEV_START_RESPONSE_EVENT]
9561 		= { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
9562 	[WMI_TAG_PEER_DELETE_RESP_EVENT]
9563 		= { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
9564 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
9565 		= { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
9566 	[WMI_TAG_VDEV_STOPPED_EVENT]
9567 		= { .min_len = sizeof(struct wmi_vdev_stopped_event) },
9568 	[WMI_TAG_REG_CHAN_LIST_CC_EVENT]
9569 		= { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
9570 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]
9571 		= { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
9572 	[WMI_TAG_MGMT_RX_HDR]
9573 		= { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
9574 	[WMI_TAG_MGMT_TX_COMPL_EVENT]
9575 		= { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
9576 	[WMI_TAG_SCAN_EVENT]
9577 		= { .min_len = sizeof(struct wmi_scan_event) },
9578 	[WMI_TAG_PEER_STA_KICKOUT_EVENT]
9579 		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
9580 	[WMI_TAG_ROAM_EVENT]
9581 		= { .min_len = sizeof(struct wmi_roam_event) },
9582 	[WMI_TAG_CHAN_INFO_EVENT]
9583 		= { .min_len = sizeof(struct wmi_chan_info_event) },
9584 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
9585 		= { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
9586 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
9587 		= { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
9588 	[WMI_TAG_READY_EVENT] = {
9589 		.min_len = sizeof(struct wmi_ready_event_min) },
9590 	[WMI_TAG_SERVICE_AVAILABLE_EVENT]
9591 		= {.min_len = sizeof(struct wmi_service_available_event) },
9592 	[WMI_TAG_PEER_ASSOC_CONF_EVENT]
9593 		= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
9594 	[WMI_TAG_STATS_EVENT]
9595 		= { .min_len = sizeof(struct wmi_stats_event) },
9596 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
9597 		= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
9598 	[WMI_TAG_HOST_SWFDA_EVENT] = {
9599 		.min_len = sizeof(struct wmi_fils_discovery_event) },
9600 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
9601 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
9602 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
9603 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
9604 	[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
9605 		.min_len = sizeof(struct wmi_obss_color_collision_event) },
9606 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
9607 		.min_len = sizeof(struct wmi_11d_new_cc_ev) },
9608 	[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
9609 		.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
9610 	[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
9611 		.min_len = sizeof(struct wmi_twt_add_dialog_event) },
9612 };
9613 
9614 int
9615 qwx_wmi_tlv_iter(struct qwx_softc *sc, const void *ptr, size_t len,
9616     int (*iter)(struct qwx_softc *sc, uint16_t tag, uint16_t len,
9617     const void *ptr, void *data), void *data)
9618 {
9619 	const void *begin = ptr;
9620 	const struct wmi_tlv *tlv;
9621 	uint16_t tlv_tag, tlv_len;
9622 	int ret;
9623 
9624 	while (len > 0) {
9625 		if (len < sizeof(*tlv)) {
9626 			printf("%s: wmi tlv parse failure at byte %zd "
9627 			    "(%zu bytes left, %zu expected)\n", __func__,
9628 			    ptr - begin, len, sizeof(*tlv));
9629 			return EINVAL;
9630 		}
9631 
9632 		tlv = ptr;
9633 		tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
9634 		tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
9635 		ptr += sizeof(*tlv);
9636 		len -= sizeof(*tlv);
9637 
9638 		if (tlv_len > len) {
9639 			printf("%s: wmi tlv parse failure of tag %u "
9640 			    "at byte %zd (%zu bytes left, %u expected)\n",
9641 			    __func__, tlv_tag, ptr - begin, len, tlv_len);
9642 			return EINVAL;
9643 		}
9644 
9645 		if (tlv_tag < nitems(wmi_tlv_policies) &&
9646 		    wmi_tlv_policies[tlv_tag].min_len &&
9647 		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
9648 			printf("%s: wmi tlv parse failure of tag %u "
9649 			    "at byte %zd (%u bytes is less than "
9650 			    "min length %zu)\n", __func__,
9651 			    tlv_tag, ptr - begin, tlv_len,
9652 			    wmi_tlv_policies[tlv_tag].min_len);
9653 			return EINVAL;
9654 		}
9655 
9656 		ret = iter(sc, tlv_tag, tlv_len, ptr, data);
9657 		if (ret)
9658 			return ret;
9659 
9660 		ptr += tlv_len;
9661 		len -= tlv_len;
9662 	}
9663 
9664 	return 0;
9665 }
9666 
9667 int
9668 qwx_pull_service_ready_tlv(struct qwx_softc *sc, const void *evt_buf,
9669     struct ath11k_targ_cap *cap)
9670 {
9671 	const struct wmi_service_ready_event *ev = evt_buf;
9672 
9673 	if (!ev)
9674 		return EINVAL;
9675 
9676 	cap->phy_capability = ev->phy_capability;
9677 	cap->max_frag_entry = ev->max_frag_entry;
9678 	cap->num_rf_chains = ev->num_rf_chains;
9679 	cap->ht_cap_info = ev->ht_cap_info;
9680 	cap->vht_cap_info = ev->vht_cap_info;
9681 	cap->vht_supp_mcs = ev->vht_supp_mcs;
9682 	cap->hw_min_tx_power = ev->hw_min_tx_power;
9683 	cap->hw_max_tx_power = ev->hw_max_tx_power;
9684 	cap->sys_cap_info = ev->sys_cap_info;
9685 	cap->min_pkt_size_enable = ev->min_pkt_size_enable;
9686 	cap->max_bcn_ie_size = ev->max_bcn_ie_size;
9687 	cap->max_num_scan_channels = ev->max_num_scan_channels;
9688 	cap->max_supported_macs = ev->max_supported_macs;
9689 	cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
9690 	cap->txrx_chainmask = ev->txrx_chainmask;
9691 	cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
9692 	cap->num_msdu_desc = ev->num_msdu_desc;
9693 
9694 	return 0;
9695 }
9696 
9697 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
9698  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
9699  * 4-byte word.
9700  */
9701 void
9702 qwx_wmi_service_bitmap_copy(struct qwx_pdev_wmi *wmi,
9703     const uint32_t *wmi_svc_bm)
9704 {
9705 	int i, j = 0;
9706 
9707 	for (i = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
9708 		do {
9709 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
9710 				setbit(wmi->wmi->svc_map, j);
9711 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
9712 	}
9713 }
9714 
9715 int
9716 qwx_wmi_tlv_svc_rdy_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
9717     const void *ptr, void *data)
9718 {
9719 	struct wmi_tlv_svc_ready_parse *svc_ready = data;
9720 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
9721 	uint16_t expect_len;
9722 
9723 	switch (tag) {
9724 	case WMI_TAG_SERVICE_READY_EVENT:
9725 		if (qwx_pull_service_ready_tlv(sc, ptr, &sc->target_caps))
9726 			return EINVAL;
9727 		break;
9728 
9729 	case WMI_TAG_ARRAY_UINT32:
9730 		if (!svc_ready->wmi_svc_bitmap_done) {
9731 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(uint32_t);
9732 			if (len < expect_len) {
9733 				printf("%s: invalid len %d for the tag 0x%x\n",
9734 				    __func__, len, tag);
9735 				return EINVAL;
9736 			}
9737 
9738 			qwx_wmi_service_bitmap_copy(wmi_handle, ptr);
9739 
9740 			svc_ready->wmi_svc_bitmap_done = 1;
9741 		}
9742 		break;
9743 	default:
9744 		break;
9745 	}
9746 
9747 	return 0;
9748 }
9749 
9750 void
9751 qwx_service_ready_event(struct qwx_softc *sc, struct mbuf *m)
9752 {
9753 	struct wmi_tlv_svc_ready_parse svc_ready = { };
9754 	int ret;
9755 
9756 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
9757 	    qwx_wmi_tlv_svc_rdy_parse, &svc_ready);
9758 	if (ret) {
9759 		printf("%s: failed to parse tlv %d\n", __func__, ret);
9760 		return;
9761 	}
9762 
9763 	DNPRINTF(QWX_D_WMI, "%s: event service ready\n", __func__);
9764 }
9765 
9766 int
9767 qwx_pull_svc_ready_ext(struct qwx_pdev_wmi *wmi_handle, const void *ptr,
9768     struct ath11k_service_ext_param *param)
9769 {
9770 	const struct wmi_service_ready_ext_event *ev = ptr;
9771 
9772 	if (!ev)
9773 		return EINVAL;
9774 
9775 	/* Move this to host based bitmap */
9776 	param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
9777 	param->default_fw_config_bits =	ev->default_fw_config_bits;
9778 	param->he_cap_info = ev->he_cap_info;
9779 	param->mpdu_density = ev->mpdu_density;
9780 	param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
9781 	memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
9782 
9783 	return 0;
9784 }
9785 
9786 int
9787 qwx_pull_mac_phy_cap_svc_ready_ext(struct qwx_pdev_wmi *wmi_handle,
9788     struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
9789     struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
9790     struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
9791     struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
9792     uint8_t hw_mode_id, uint8_t phy_id, struct qwx_pdev *pdev)
9793 {
9794 	struct wmi_mac_phy_capabilities *mac_phy_caps;
9795 	struct qwx_softc *sc = wmi_handle->wmi->sc;
9796 	struct ath11k_band_cap *cap_band;
9797 	struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
9798 	uint32_t phy_map;
9799 	uint32_t hw_idx, phy_idx = 0;
9800 
9801 	if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
9802 		return EINVAL;
9803 
9804 	for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
9805 		if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
9806 			break;
9807 
9808 		phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
9809 		while (phy_map) {
9810 			phy_map >>= 1;
9811 			phy_idx++;
9812 		}
9813 	}
9814 
9815 	if (hw_idx == hw_caps->num_hw_modes)
9816 		return EINVAL;
9817 
9818 	phy_idx += phy_id;
9819 	if (phy_id >= hal_reg_caps->num_phy)
9820 		return EINVAL;
9821 
9822 	mac_phy_caps = wmi_mac_phy_caps + phy_idx;
9823 
9824 	pdev->pdev_id = mac_phy_caps->pdev_id;
9825 	pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
9826 	pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
9827 	sc->target_pdev_ids[sc->target_pdev_count].supported_bands =
9828 	    mac_phy_caps->supported_bands;
9829 	sc->target_pdev_ids[sc->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
9830 	sc->target_pdev_count++;
9831 
9832 	if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
9833 	    !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
9834 		return EINVAL;
9835 
9836 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
9837 	 * band to band for a single radio, need to see how this should be
9838 	 * handled.
9839 	 */
9840 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
9841 		pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
9842 		pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
9843 	}
9844 
9845 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
9846 		pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
9847 		pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
9848 		pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
9849 		pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
9850 		pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
9851 		pdev_cap->nss_ratio_enabled =
9852 		    WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
9853 		pdev_cap->nss_ratio_info =
9854 		    WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
9855 	}
9856 
9857 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
9858 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
9859 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
9860 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
9861 	 * will be advertised for second mac or vice-versa. Compute the shift value
9862 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
9863 	 * mac80211.
9864 	 */
9865 	pdev_cap->tx_chain_mask_shift = ffs(pdev_cap->tx_chain_mask);
9866 	pdev_cap->rx_chain_mask_shift = ffs(pdev_cap->rx_chain_mask);
9867 
9868 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
9869 		cap_band = &pdev_cap->band[0];
9870 		cap_band->phy_id = mac_phy_caps->phy_id;
9871 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
9872 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
9873 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
9874 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
9875 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
9876 		memcpy(cap_band->he_cap_phy_info,
9877 		    &mac_phy_caps->he_cap_phy_info_2g,
9878 		    sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
9879 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
9880 		    sizeof(struct ath11k_ppe_threshold));
9881 	}
9882 
9883 	if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
9884 		cap_band = &pdev_cap->band[1];
9885 		cap_band->phy_id = mac_phy_caps->phy_id;
9886 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
9887 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
9888 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
9889 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
9890 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
9891 		memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
9892 		    sizeof(uint32_t) * PSOC_HOST_MAX_PHY_SIZE);
9893 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
9894 		    sizeof(struct ath11k_ppe_threshold));
9895 #if 0
9896 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
9897 		cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
9898 		cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
9899 		cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
9900 		cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
9901 		cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
9902 		memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
9903 		       sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
9904 		memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
9905 		       sizeof(struct ath11k_ppe_threshold));
9906 #endif
9907 	}
9908 
9909 	return 0;
9910 }
9911 
9912 int
9913 qwx_wmi_tlv_ext_soc_hal_reg_caps_parse(struct qwx_softc *sc, uint16_t len,
9914     const void *ptr, void *data)
9915 {
9916 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
9917 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
9918 	uint8_t hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
9919 	uint32_t phy_id_map;
9920 	int pdev_index = 0;
9921 	int ret;
9922 
9923 	svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
9924 	svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
9925 
9926 	sc->num_radios = 0;
9927 	sc->target_pdev_count = 0;
9928 	phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
9929 
9930 	while (phy_id_map && sc->num_radios < MAX_RADIOS) {
9931 		ret = qwx_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
9932 		    svc_rdy_ext->hw_caps,
9933 		    svc_rdy_ext->hw_mode_caps,
9934 		    svc_rdy_ext->soc_hal_reg_caps,
9935 		    svc_rdy_ext->mac_phy_caps,
9936 		    hw_mode_id, sc->num_radios, &sc->pdevs[pdev_index]);
9937 		if (ret) {
9938 			printf("%s: failed to extract mac caps, idx: %d\n",
9939 			    __func__, sc->num_radios);
9940 			return ret;
9941 		}
9942 
9943 		sc->num_radios++;
9944 
9945 		/* For QCA6390, save mac_phy capability in the same pdev */
9946 		if (sc->hw_params.single_pdev_only)
9947 			pdev_index = 0;
9948 		else
9949 			pdev_index = sc->num_radios;
9950 
9951 		/* TODO: mac_phy_cap prints */
9952 		phy_id_map >>= 1;
9953 	}
9954 
9955 	/* For QCA6390, set num_radios to 1 because host manages
9956 	 * both 2G and 5G radio in one pdev.
9957 	 * Set pdev_id = 0 and 0 means soc level.
9958 	 */
9959 	if (sc->hw_params.single_pdev_only) {
9960 		sc->num_radios = 1;
9961 		sc->pdevs[0].pdev_id = 0;
9962 	}
9963 
9964 	return 0;
9965 }
9966 
9967 int
9968 qwx_wmi_tlv_hw_mode_caps_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
9969     const void *ptr, void *data)
9970 {
9971 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
9972 	struct wmi_hw_mode_capabilities *hw_mode_cap;
9973 	uint32_t phy_map = 0;
9974 
9975 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
9976 		return EPROTO;
9977 
9978 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
9979 		return ENOBUFS;
9980 
9981 	hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
9982 	    hw_mode_id);
9983 	svc_rdy_ext->n_hw_mode_caps++;
9984 
9985 	phy_map = hw_mode_cap->phy_id_map;
9986 	while (phy_map) {
9987 		svc_rdy_ext->tot_phy_id++;
9988 		phy_map = phy_map >> 1;
9989 	}
9990 
9991 	return 0;
9992 }
9993 
9994 #define PRIMAP(_hw_mode_) \
9995 	[_hw_mode_] = _hw_mode_##_PRI
9996 
9997 static const int qwx_hw_mode_pri_map[] = {
9998 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
9999 	PRIMAP(WMI_HOST_HW_MODE_DBS),
10000 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
10001 	PRIMAP(WMI_HOST_HW_MODE_SBS),
10002 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
10003 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
10004 	/* keep last */
10005 	PRIMAP(WMI_HOST_HW_MODE_MAX),
10006 };
10007 
10008 int
10009 qwx_wmi_tlv_hw_mode_caps(struct qwx_softc *sc, uint16_t len,
10010     const void *ptr, void *data)
10011 {
10012 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10013 	struct wmi_hw_mode_capabilities *hw_mode_caps;
10014 	enum wmi_host_hw_mode_config_type mode, pref;
10015 	uint32_t i;
10016 	int ret;
10017 
10018 	svc_rdy_ext->n_hw_mode_caps = 0;
10019 	svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
10020 
10021 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
10022 	    qwx_wmi_tlv_hw_mode_caps_parse, svc_rdy_ext);
10023 	if (ret) {
10024 		printf("%s: failed to parse tlv %d\n", __func__, ret);
10025 		return ret;
10026 	}
10027 
10028 	i = 0;
10029 	while (i < svc_rdy_ext->n_hw_mode_caps) {
10030 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
10031 		mode = hw_mode_caps->hw_mode_id;
10032 		pref = sc->wmi.preferred_hw_mode;
10033 
10034 		if (qwx_hw_mode_pri_map[mode] < qwx_hw_mode_pri_map[pref]) {
10035 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
10036 			sc->wmi.preferred_hw_mode = mode;
10037 		}
10038 		i++;
10039 	}
10040 
10041 	DNPRINTF(QWX_D_WMI, "%s: preferred_hw_mode: %d\n", __func__,
10042 	    sc->wmi.preferred_hw_mode);
10043 	if (sc->wmi.preferred_hw_mode >= WMI_HOST_HW_MODE_MAX)
10044 		return EINVAL;
10045 
10046 	return 0;
10047 }
10048 
10049 int
10050 qwx_wmi_tlv_mac_phy_caps_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
10051     const void *ptr, void *data)
10052 {
10053 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10054 
10055 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
10056 		return EPROTO;
10057 
10058 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
10059 		return ENOBUFS;
10060 
10061 	len = MIN(len, sizeof(struct wmi_mac_phy_capabilities));
10062 	if (!svc_rdy_ext->n_mac_phy_caps) {
10063 		svc_rdy_ext->mac_phy_caps = mallocarray(
10064 		    svc_rdy_ext->tot_phy_id,
10065 		    sizeof(struct wmi_mac_phy_capabilities),
10066 		    M_DEVBUF, M_NOWAIT | M_ZERO);
10067 		if (!svc_rdy_ext->mac_phy_caps)
10068 			return ENOMEM;
10069 		svc_rdy_ext->mac_phy_caps_size = len * svc_rdy_ext->tot_phy_id;
10070 	}
10071 
10072 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps,
10073 	    ptr, len);
10074 	svc_rdy_ext->n_mac_phy_caps++;
10075 	return 0;
10076 }
10077 
10078 int
10079 qwx_wmi_tlv_ext_hal_reg_caps_parse(struct qwx_softc *sc,
10080     uint16_t tag, uint16_t len, const void *ptr, void *data)
10081 {
10082 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10083 
10084 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
10085 		return EPROTO;
10086 
10087 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
10088 		return ENOBUFS;
10089 
10090 	svc_rdy_ext->n_ext_hal_reg_caps++;
10091 	return 0;
10092 }
10093 
10094 int
10095 qwx_pull_reg_cap_svc_rdy_ext(struct qwx_pdev_wmi *wmi_handle,
10096     struct wmi_soc_hal_reg_capabilities *reg_caps,
10097     struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
10098     uint8_t phy_idx, struct ath11k_hal_reg_capabilities_ext *param)
10099 {
10100 	struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
10101 
10102 	if (!reg_caps || !wmi_ext_reg_cap)
10103 		return EINVAL;
10104 
10105 	if (phy_idx >= reg_caps->num_phy)
10106 		return EINVAL;
10107 
10108 	ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
10109 
10110 	param->phy_id = ext_reg_cap->phy_id;
10111 	param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
10112 	param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext;
10113 	param->regcap1 = ext_reg_cap->regcap1;
10114 	param->regcap2 = ext_reg_cap->regcap2;
10115 	/* check if param->wireless_mode is needed */
10116 	param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
10117 	param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
10118 	param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
10119 	param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
10120 
10121 	return 0;
10122 }
10123 
10124 int
10125 qwx_wmi_tlv_ext_hal_reg_caps(struct qwx_softc *sc, uint16_t len,
10126     const void *ptr, void *data)
10127 {
10128 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
10129 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10130 	struct ath11k_hal_reg_capabilities_ext reg_cap;
10131 	int ret;
10132 	uint32_t i;
10133 
10134 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
10135 	svc_rdy_ext->ext_hal_reg_caps =
10136 	    (struct wmi_hal_reg_capabilities_ext *)ptr;
10137 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
10138 	    qwx_wmi_tlv_ext_hal_reg_caps_parse, svc_rdy_ext);
10139 	if (ret) {
10140 		printf("%s: failed to parse tlv %d\n", __func__, ret);
10141 		return ret;
10142 	}
10143 
10144 	for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
10145 		ret = qwx_pull_reg_cap_svc_rdy_ext(wmi_handle,
10146 		    svc_rdy_ext->soc_hal_reg_caps,
10147 		    svc_rdy_ext->ext_hal_reg_caps, i, &reg_cap);
10148 		if (ret) {
10149 			printf("%s: failed to extract reg cap %d\n",
10150 			    __func__, i);
10151 			return ret;
10152 		}
10153 
10154 		memcpy(&sc->hal_reg_cap[reg_cap.phy_id], &reg_cap,
10155 		    sizeof(sc->hal_reg_cap[0]));
10156 	}
10157 
10158 	return 0;
10159 }
10160 
10161 int
10162 qwx_wmi_tlv_dma_ring_caps_parse(struct qwx_softc *sc, uint16_t tag,
10163     uint16_t len, const void *ptr, void *data)
10164 {
10165 	struct wmi_tlv_dma_ring_caps_parse *parse = data;
10166 
10167 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
10168 		return EPROTO;
10169 
10170 	parse->n_dma_ring_caps++;
10171 	return 0;
10172 }
10173 
10174 int
10175 qwx_wmi_alloc_dbring_caps(struct qwx_softc *sc, uint32_t num_cap)
10176 {
10177 	void *ptr;
10178 
10179 	ptr = mallocarray(num_cap, sizeof(struct qwx_dbring_cap),
10180 	    M_DEVBUF, M_NOWAIT | M_ZERO);
10181 	if (!ptr)
10182 		return ENOMEM;
10183 
10184 	sc->db_caps = ptr;
10185 	sc->num_db_cap = num_cap;
10186 
10187 	return 0;
10188 }
10189 
10190 void
10191 qwx_wmi_free_dbring_caps(struct qwx_softc *sc)
10192 {
10193 	free(sc->db_caps, M_DEVBUF,
10194 	    sc->num_db_cap * sizeof(struct qwx_dbring_cap));
10195 	sc->db_caps = NULL;
10196 	sc->num_db_cap = 0;
10197 }
10198 
10199 int
10200 qwx_wmi_tlv_dma_ring_caps(struct qwx_softc *sc, uint16_t len,
10201     const void *ptr, void *data)
10202 {
10203 	struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
10204 	struct wmi_dma_ring_capabilities *dma_caps;
10205 	struct qwx_dbring_cap *dir_buff_caps;
10206 	int ret;
10207 	uint32_t i;
10208 
10209 	dma_caps_parse->n_dma_ring_caps = 0;
10210 	dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
10211 	ret = qwx_wmi_tlv_iter(sc, ptr, len,
10212 	    qwx_wmi_tlv_dma_ring_caps_parse, dma_caps_parse);
10213 	if (ret) {
10214 		printf("%s: failed to parse dma ring caps tlv %d\n",
10215 		    __func__, ret);
10216 		return ret;
10217 	}
10218 
10219 	if (!dma_caps_parse->n_dma_ring_caps)
10220 		return 0;
10221 
10222 	if (sc->num_db_cap) {
10223 		DNPRINTF(QWX_D_WMI,
10224 		    "%s: Already processed, so ignoring dma ring caps\n",
10225 		    __func__);
10226 		return 0;
10227 	}
10228 
10229 	ret = qwx_wmi_alloc_dbring_caps(sc, dma_caps_parse->n_dma_ring_caps);
10230 	if (ret)
10231 		return ret;
10232 
10233 	dir_buff_caps = sc->db_caps;
10234 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
10235 		if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
10236 			printf("%s: Invalid module id %d\n", __func__,
10237 			    dma_caps[i].module_id);
10238 			ret = EINVAL;
10239 			goto free_dir_buff;
10240 		}
10241 
10242 		dir_buff_caps[i].id = dma_caps[i].module_id;
10243 		dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
10244 		dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
10245 		dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
10246 		dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
10247 	}
10248 
10249 	return 0;
10250 
10251 free_dir_buff:
10252 	qwx_wmi_free_dbring_caps(sc);
10253 	return ret;
10254 }
10255 
10256 int
10257 qwx_wmi_tlv_svc_rdy_ext_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
10258     const void *ptr, void *data)
10259 {
10260 	struct qwx_pdev_wmi *wmi_handle = &sc->wmi.wmi[0];
10261 	struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
10262 	int ret;
10263 
10264 	switch (tag) {
10265 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
10266 		ret = qwx_pull_svc_ready_ext(wmi_handle, ptr,
10267 		    &svc_rdy_ext->param);
10268 		if (ret) {
10269 			printf("%s: unable to extract ext params\n", __func__);
10270 			return ret;
10271 		}
10272 		break;
10273 
10274 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
10275 		svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
10276 		svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
10277 		break;
10278 
10279 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
10280 		ret = qwx_wmi_tlv_ext_soc_hal_reg_caps_parse(sc, len, ptr,
10281 		    svc_rdy_ext);
10282 		if (ret)
10283 			return ret;
10284 		break;
10285 
10286 	case WMI_TAG_ARRAY_STRUCT:
10287 		if (!svc_rdy_ext->hw_mode_done) {
10288 			ret = qwx_wmi_tlv_hw_mode_caps(sc, len, ptr,
10289 			    svc_rdy_ext);
10290 			if (ret)
10291 				return ret;
10292 
10293 			svc_rdy_ext->hw_mode_done = 1;
10294 		} else if (!svc_rdy_ext->mac_phy_done) {
10295 			svc_rdy_ext->n_mac_phy_caps = 0;
10296 			ret = qwx_wmi_tlv_iter(sc, ptr, len,
10297 			    qwx_wmi_tlv_mac_phy_caps_parse, svc_rdy_ext);
10298 			if (ret) {
10299 				printf("%s: failed to parse tlv %d\n",
10300 				    __func__, ret);
10301 				return ret;
10302 			}
10303 
10304 			svc_rdy_ext->mac_phy_done = 1;
10305 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
10306 			ret = qwx_wmi_tlv_ext_hal_reg_caps(sc, len, ptr,
10307 			    svc_rdy_ext);
10308 			if (ret)
10309 				return ret;
10310 
10311 			svc_rdy_ext->ext_hal_reg_done = 1;
10312 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
10313 			svc_rdy_ext->mac_phy_chainmask_combo_done = 1;
10314 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
10315 			svc_rdy_ext->mac_phy_chainmask_cap_done = 1;
10316 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
10317 			svc_rdy_ext->oem_dma_ring_cap_done = 1;
10318 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
10319 			ret = qwx_wmi_tlv_dma_ring_caps(sc, len, ptr,
10320 			    &svc_rdy_ext->dma_caps_parse);
10321 			if (ret)
10322 				return ret;
10323 
10324 			svc_rdy_ext->dma_ring_cap_done = 1;
10325 		}
10326 		break;
10327 
10328 	default:
10329 		break;
10330 	}
10331 
10332 	return 0;
10333 }
10334 
10335 void
10336 qwx_service_ready_ext_event(struct qwx_softc *sc, struct mbuf *m)
10337 {
10338 	struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
10339 	int ret;
10340 
10341 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
10342 	    qwx_wmi_tlv_svc_rdy_ext_parse, &svc_rdy_ext);
10343 	if (ret) {
10344 		printf("%s: failed to parse tlv %d\n", __func__, ret);
10345 		qwx_wmi_free_dbring_caps(sc);
10346 		return;
10347 	}
10348 
10349 	DNPRINTF(QWX_D_WMI, "%s: event service ready ext\n", __func__);
10350 
10351 	if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_EXT2_MSG))
10352 		wakeup(&sc->wmi.service_ready);
10353 
10354 	free(svc_rdy_ext.mac_phy_caps, M_DEVBUF,
10355 	    svc_rdy_ext.mac_phy_caps_size);
10356 }
10357 
10358 int
10359 qwx_wmi_tlv_svc_rdy_ext2_parse(struct qwx_softc *sc,
10360     uint16_t tag, uint16_t len, const void *ptr, void *data)
10361 {
10362 	struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
10363 	int ret;
10364 
10365 	switch (tag) {
10366 	case WMI_TAG_ARRAY_STRUCT:
10367 		if (!parse->dma_ring_cap_done) {
10368 			ret = qwx_wmi_tlv_dma_ring_caps(sc, len, ptr,
10369 			    &parse->dma_caps_parse);
10370 			if (ret)
10371 				return ret;
10372 
10373 			parse->dma_ring_cap_done = 1;
10374 		}
10375 		break;
10376 	default:
10377 		break;
10378 	}
10379 
10380 	return 0;
10381 }
10382 
10383 void
10384 qwx_service_ready_ext2_event(struct qwx_softc *sc, struct mbuf *m)
10385 {
10386 	struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
10387 	int ret;
10388 
10389 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
10390 	    qwx_wmi_tlv_svc_rdy_ext2_parse, &svc_rdy_ext2);
10391 	if (ret) {
10392 		printf("%s: failed to parse ext2 event tlv %d\n",
10393 		    __func__, ret);
10394 		qwx_wmi_free_dbring_caps(sc);
10395 		return;
10396 	}
10397 
10398 	DNPRINTF(QWX_D_WMI, "%s: event service ready ext2\n", __func__);
10399 
10400 	sc->wmi.service_ready = 1;
10401 	wakeup(&sc->wmi.service_ready);
10402 }
10403 
10404 void
10405 qwx_service_available_event(struct qwx_softc *sc, struct mbuf *m)
10406 {
10407 	int ret;
10408 
10409 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
10410 	    qwx_wmi_tlv_services_parser, NULL);
10411 	if (ret)
10412 		printf("%s: failed to parse services available tlv %d\n",
10413 		    sc->sc_dev.dv_xname, ret);
10414 
10415 	DNPRINTF(QWX_D_WMI, "%s: event service available\n", __func__);
10416 }
10417 
10418 int
10419 qwx_wmi_tlv_rdy_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
10420     const void *ptr, void *data)
10421 {
10422 	struct wmi_tlv_rdy_parse *rdy_parse = data;
10423 	struct wmi_ready_event fixed_param;
10424 	struct wmi_mac_addr *addr_list;
10425 	struct qwx_pdev *pdev;
10426 	uint32_t num_mac_addr;
10427 	int i;
10428 
10429 	switch (tag) {
10430 	case WMI_TAG_READY_EVENT:
10431 		memset(&fixed_param, 0, sizeof(fixed_param));
10432 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
10433 		       MIN(sizeof(fixed_param), len));
10434 		sc->wlan_init_status = fixed_param.ready_event_min.status;
10435 		rdy_parse->num_extra_mac_addr =
10436 			fixed_param.ready_event_min.num_extra_mac_addr;
10437 
10438 		IEEE80211_ADDR_COPY(sc->mac_addr,
10439 		    fixed_param.ready_event_min.mac_addr.addr);
10440 		sc->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
10441 		sc->wmi_ready = 1;
10442 		break;
10443 	case WMI_TAG_ARRAY_FIXED_STRUCT:
10444 		addr_list = (struct wmi_mac_addr *)ptr;
10445 		num_mac_addr = rdy_parse->num_extra_mac_addr;
10446 
10447 		if (!(sc->num_radios > 1 && num_mac_addr >= sc->num_radios))
10448 			break;
10449 
10450 		for (i = 0; i < sc->num_radios; i++) {
10451 			pdev = &sc->pdevs[i];
10452 			IEEE80211_ADDR_COPY(pdev->mac_addr, addr_list[i].addr);
10453 		}
10454 		sc->pdevs_macaddr_valid = 1;
10455 		break;
10456 	default:
10457 		break;
10458 	}
10459 
10460 	return 0;
10461 }
10462 
10463 void
10464 qwx_ready_event(struct qwx_softc *sc, struct mbuf *m)
10465 {
10466 	struct wmi_tlv_rdy_parse rdy_parse = { };
10467 	int ret;
10468 
10469 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
10470 	    qwx_wmi_tlv_rdy_parse, &rdy_parse);
10471 	if (ret) {
10472 		printf("%s: failed to parse tlv %d\n", __func__, ret);
10473 		return;
10474 	}
10475 
10476 	DNPRINTF(QWX_D_WMI, "%s: event ready", __func__);
10477 
10478 	sc->wmi.unified_ready = 1;
10479 	wakeup(&sc->wmi.unified_ready);
10480 }
10481 
10482 int
10483 qwx_pull_peer_del_resp_ev(struct qwx_softc *sc, struct mbuf *m,
10484     struct wmi_peer_delete_resp_event *peer_del_resp)
10485 {
10486 	const void **tb;
10487 	const struct wmi_peer_delete_resp_event *ev;
10488 	int ret;
10489 
10490 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
10491 	if (tb == NULL) {
10492 		ret = ENOMEM;
10493 		printf("%s: failed to parse tlv: %d\n",
10494 		    sc->sc_dev.dv_xname, ret);
10495 		return ret;
10496 	}
10497 
10498 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
10499 	if (!ev) {
10500 		printf("%s: failed to fetch peer delete resp ev\n",
10501 		    sc->sc_dev.dv_xname);
10502 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10503 		return EPROTO;
10504 	}
10505 
10506 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
10507 
10508 	peer_del_resp->vdev_id = ev->vdev_id;
10509 	IEEE80211_ADDR_COPY(peer_del_resp->peer_macaddr.addr,
10510 	    ev->peer_macaddr.addr);
10511 
10512 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10513 	return 0;
10514 }
10515 
10516 void
10517 qwx_peer_delete_resp_event(struct qwx_softc *sc, struct mbuf *m)
10518 {
10519 	struct wmi_peer_delete_resp_event peer_del_resp;
10520 
10521 	if (qwx_pull_peer_del_resp_ev(sc, m, &peer_del_resp) != 0) {
10522 		printf("%s: failed to extract peer delete resp",
10523 		    sc->sc_dev.dv_xname);
10524 		return;
10525 	}
10526 
10527 	sc->peer_delete_done = 1;
10528 	wakeup(&sc->peer_delete_done);
10529 
10530 	DNPRINTF(QWX_D_WMI, "%s: peer delete resp for vdev id %d addr %s\n",
10531 	    __func__, peer_del_resp.vdev_id,
10532 	    ether_sprintf(peer_del_resp.peer_macaddr.addr));
10533 }
10534 
10535 const char *
10536 qwx_wmi_vdev_resp_print(uint32_t vdev_resp_status)
10537 {
10538 	switch (vdev_resp_status) {
10539 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
10540 		return "invalid vdev id";
10541 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
10542 		return "not supported";
10543 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
10544 		return "dfs violation";
10545 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
10546 		return "invalid regdomain";
10547 	default:
10548 		return "unknown";
10549 	}
10550 }
10551 
10552 int
10553 qwx_pull_vdev_start_resp_tlv(struct qwx_softc *sc, struct mbuf *m,
10554     struct wmi_vdev_start_resp_event *vdev_rsp)
10555 {
10556 	const void **tb;
10557 	const struct wmi_vdev_start_resp_event *ev;
10558 	int ret;
10559 
10560 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
10561 	if (tb == NULL) {
10562 		ret = ENOMEM;
10563 		printf("%s: failed to parse tlv: %d\n",
10564 		    sc->sc_dev.dv_xname, ret);
10565 		return ret;
10566 	}
10567 
10568 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
10569 	if (!ev) {
10570 		printf("%s: failed to fetch vdev start resp ev\n",
10571 		    sc->sc_dev.dv_xname);
10572 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10573 		return EPROTO;
10574 	}
10575 
10576 	memset(vdev_rsp, 0, sizeof(*vdev_rsp));
10577 
10578 	vdev_rsp->vdev_id = ev->vdev_id;
10579 	vdev_rsp->requestor_id = ev->requestor_id;
10580 	vdev_rsp->resp_type = ev->resp_type;
10581 	vdev_rsp->status = ev->status;
10582 	vdev_rsp->chain_mask = ev->chain_mask;
10583 	vdev_rsp->smps_mode = ev->smps_mode;
10584 	vdev_rsp->mac_id = ev->mac_id;
10585 	vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
10586 	vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
10587 
10588 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10589 	return 0;
10590 }
10591 
10592 void
10593 qwx_vdev_start_resp_event(struct qwx_softc *sc, struct mbuf *m)
10594 {
10595 	struct wmi_vdev_start_resp_event vdev_start_resp;
10596 	uint32_t status;
10597 
10598 	if (qwx_pull_vdev_start_resp_tlv(sc, m, &vdev_start_resp) != 0) {
10599 		printf("%s: failed to extract vdev start resp",
10600 		    sc->sc_dev.dv_xname);
10601 		return;
10602 	}
10603 
10604 	status = vdev_start_resp.status;
10605 	if (status) {
10606 		printf("%s: vdev start resp error status %d (%s)\n",
10607 		    sc->sc_dev.dv_xname, status,
10608 		   qwx_wmi_vdev_resp_print(status));
10609 	}
10610 
10611 	sc->vdev_setup_done = 1;
10612 	wakeup(&sc->vdev_setup_done);
10613 
10614 	DNPRINTF(QWX_D_WMI, "%s: vdev start resp for vdev id %d", __func__,
10615 	    vdev_start_resp.vdev_id);
10616 }
10617 
10618 int
10619 qwx_wmi_tlv_iter_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
10620     const void *ptr, void *data)
10621 {
10622 	const void **tb = data;
10623 
10624 	if (tag < WMI_TAG_MAX)
10625 		tb[tag] = ptr;
10626 
10627 	return 0;
10628 }
10629 
10630 int
10631 qwx_wmi_tlv_parse(struct qwx_softc *sc, const void **tb,
10632     const void *ptr, size_t len)
10633 {
10634 	return qwx_wmi_tlv_iter(sc, ptr, len, qwx_wmi_tlv_iter_parse,
10635 	    (void *)tb);
10636 }
10637 
10638 const void **
10639 qwx_wmi_tlv_parse_alloc(struct qwx_softc *sc, const void *ptr, size_t len)
10640 {
10641 	const void **tb;
10642 	int ret;
10643 
10644 	tb = mallocarray(WMI_TAG_MAX, sizeof(*tb), M_DEVBUF, M_NOWAIT | M_ZERO);
10645 	if (!tb)
10646 		return NULL;
10647 
10648 	ret = qwx_wmi_tlv_parse(sc, tb, ptr, len);
10649 	if (ret) {
10650 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10651 		return NULL;
10652 	}
10653 
10654 	return tb;
10655 }
10656 
10657 static void
10658 qwx_print_reg_rule(struct qwx_softc *sc, const char *band,
10659     uint32_t num_reg_rules, struct cur_reg_rule *reg_rule_ptr)
10660 {
10661 	struct cur_reg_rule *reg_rule = reg_rule_ptr;
10662 	uint32_t count;
10663 
10664 	DNPRINTF(QWX_D_WMI, "%s: number of reg rules in %s band: %d\n",
10665 	    __func__, band, num_reg_rules);
10666 
10667 	for (count = 0; count < num_reg_rules; count++) {
10668 		DNPRINTF(QWX_D_WMI,
10669 		    "%s: reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n",
10670 		    __func__, count + 1, reg_rule->start_freq,
10671 		    reg_rule->end_freq, reg_rule->max_bw, reg_rule->ant_gain,
10672 		    reg_rule->reg_power, reg_rule->flags);
10673 		reg_rule++;
10674 	}
10675 }
10676 
10677 struct cur_reg_rule
10678 *qwx_create_reg_rules_from_wmi(uint32_t num_reg_rules,
10679     struct wmi_regulatory_rule_struct *wmi_reg_rule)
10680 {
10681 	struct cur_reg_rule *reg_rule_ptr;
10682 	uint32_t count;
10683 
10684 	reg_rule_ptr = mallocarray(num_reg_rules, sizeof(*reg_rule_ptr),
10685 	    M_DEVBUF, M_NOWAIT | M_ZERO);
10686 	if (!reg_rule_ptr)
10687 		return NULL;
10688 
10689 	for (count = 0; count < num_reg_rules; count++) {
10690 		reg_rule_ptr[count].start_freq = FIELD_GET(REG_RULE_START_FREQ,
10691 		    wmi_reg_rule[count].freq_info);
10692 		reg_rule_ptr[count].end_freq = FIELD_GET(REG_RULE_END_FREQ,
10693 		    wmi_reg_rule[count].freq_info);
10694 		reg_rule_ptr[count].max_bw = FIELD_GET(REG_RULE_MAX_BW,
10695 		    wmi_reg_rule[count].bw_pwr_info);
10696 		reg_rule_ptr[count].reg_power = FIELD_GET(REG_RULE_REG_PWR,
10697 		    wmi_reg_rule[count].bw_pwr_info);
10698 		reg_rule_ptr[count].ant_gain = FIELD_GET(REG_RULE_ANT_GAIN,
10699 		    wmi_reg_rule[count].bw_pwr_info);
10700 		reg_rule_ptr[count].flags = FIELD_GET(REG_RULE_FLAGS,
10701 		    wmi_reg_rule[count].flag_info);
10702 	}
10703 
10704 	return reg_rule_ptr;
10705 }
10706 
10707 int
10708 qwx_pull_reg_chan_list_update_ev(struct qwx_softc *sc, struct mbuf *m,
10709     struct cur_regulatory_info *reg_info)
10710 {
10711 	const void **tb;
10712 	const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
10713 	struct wmi_regulatory_rule_struct *wmi_reg_rule;
10714 	uint32_t num_2ghz_reg_rules, num_5ghz_reg_rules;
10715 	int ret;
10716 
10717 	DNPRINTF(QWX_D_WMI, "%s: processing regulatory channel list\n",
10718 	    __func__);
10719 
10720 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
10721 	if (tb == NULL) {
10722 		ret = ENOMEM; /* XXX allocation failure or parsing failure? */
10723 		printf("%s: failed to parse tlv: %d\n", __func__, ret);
10724 		return ENOMEM;
10725 	}
10726 
10727 	chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
10728 	if (!chan_list_event_hdr) {
10729 		printf("%s: failed to fetch reg chan list update ev\n",
10730 		    __func__);
10731 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10732 		return EPROTO;
10733 	}
10734 
10735 	reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules;
10736 	reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules;
10737 
10738 	if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) {
10739 		printf("%s: No regulatory rules available in the event info\n",
10740 		    __func__);
10741 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10742 		return EINVAL;
10743 	}
10744 
10745 	memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, REG_ALPHA2_LEN);
10746 	reg_info->dfs_region = chan_list_event_hdr->dfs_region;
10747 	reg_info->phybitmap = chan_list_event_hdr->phybitmap;
10748 	reg_info->num_phy = chan_list_event_hdr->num_phy;
10749 	reg_info->phy_id = chan_list_event_hdr->phy_id;
10750 	reg_info->ctry_code = chan_list_event_hdr->country_id;
10751 	reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
10752 
10753 	DNPRINTF(QWX_D_WMI, "%s: CC status_code %s\n", __func__,
10754 	    qwx_cc_status_to_str(reg_info->status_code));
10755 
10756 	reg_info->status_code =
10757 		qwx_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code);
10758 
10759 	reg_info->is_ext_reg_event = false;
10760 
10761 	reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz;
10762 	reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz;
10763 	reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz;
10764 	reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz;
10765 
10766 	num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
10767 	num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
10768 
10769 	DNPRINTF(QWX_D_WMI,
10770 	    "%s: cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d "
10771 	    "max_5ghz %d\n", __func__, reg_info->alpha2, reg_info->dfs_region,
10772 	    reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
10773 	    reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
10774 
10775 	DNPRINTF(QWX_D_WMI,
10776 	    "%s: num_2ghz_reg_rules %d num_5ghz_reg_rules %d\n", __func__,
10777 	    num_2ghz_reg_rules, num_5ghz_reg_rules);
10778 
10779 	wmi_reg_rule = (struct wmi_regulatory_rule_struct *)
10780 	    ((uint8_t *)chan_list_event_hdr + sizeof(*chan_list_event_hdr)
10781 	    + sizeof(struct wmi_tlv));
10782 
10783 	if (num_2ghz_reg_rules) {
10784 		reg_info->reg_rules_2ghz_ptr = qwx_create_reg_rules_from_wmi(
10785 		    num_2ghz_reg_rules, wmi_reg_rule);
10786 		if (!reg_info->reg_rules_2ghz_ptr) {
10787 			free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10788 			printf("%s: Unable to allocate memory for "
10789 			    "2 GHz rules\n", __func__);
10790 			return ENOMEM;
10791 		}
10792 
10793 		qwx_print_reg_rule(sc, "2 GHz", num_2ghz_reg_rules,
10794 		    reg_info->reg_rules_2ghz_ptr);
10795 	}
10796 
10797 	if (num_5ghz_reg_rules) {
10798 		wmi_reg_rule += num_2ghz_reg_rules;
10799 		reg_info->reg_rules_5ghz_ptr = qwx_create_reg_rules_from_wmi(
10800 		    num_5ghz_reg_rules, wmi_reg_rule);
10801 		if (!reg_info->reg_rules_5ghz_ptr) {
10802 			free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10803 			printf("%s: Unable to allocate memory for "
10804 			    "5 GHz rules\n", __func__);
10805 			return ENOMEM;
10806 		}
10807 
10808 		qwx_print_reg_rule(sc, "5 GHz", num_5ghz_reg_rules,
10809 		    reg_info->reg_rules_5ghz_ptr);
10810 	}
10811 
10812 	DNPRINTF(QWX_D_WMI, "%s: processed regulatory channel list\n",
10813 	    __func__);
10814 
10815 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
10816 	return 0;
10817 }
10818 
10819 int
10820 qwx_pull_reg_chan_list_ext_update_ev(struct qwx_softc *sc, struct mbuf *m,
10821     struct cur_regulatory_info *reg_info)
10822 {
10823 	printf("%s: not implemented\n", __func__);
10824 	return ENOTSUP;
10825 }
10826 
10827 void
10828 qwx_init_channels(struct qwx_softc *sc, struct cur_regulatory_info *reg_info)
10829 {
10830 	struct ieee80211com *ic = &sc->sc_ic;
10831 	struct ieee80211_channel *chan;
10832 	struct cur_reg_rule *rule;
10833 	int i, chnum;
10834 	uint16_t freq;
10835 
10836 	for (i = 0; i < reg_info->num_2ghz_reg_rules; i++) {
10837 		rule = &reg_info->reg_rules_2ghz_ptr[i];
10838 		if (rule->start_freq < 2402 ||
10839 		    rule->start_freq > 2500 ||
10840 		    rule->start_freq > rule->end_freq) {
10841 			DPRINTF("%s: bad regulatory rule: start freq %u, "
10842 			    "end freq %u\n", __func__, rule->start_freq,
10843 			    rule->end_freq);
10844 			continue;
10845 		}
10846 
10847 		freq = rule->start_freq + 10;
10848 		chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_2GHZ);
10849 		if (chnum < 1 || chnum > 14) {
10850 			DPRINTF("%s: bad regulatory rule: freq %u, "
10851 			    "channel %u\n", __func__, freq, chnum);
10852 			continue;
10853 		}
10854 		while (freq <= rule->end_freq && chnum <= 14) {
10855 			chan = &ic->ic_channels[chnum];
10856 			if (rule->flags & REGULATORY_CHAN_DISABLED) {
10857 				chan->ic_freq = 0;
10858 				chan->ic_flags = 0;
10859 			} else {
10860 				chan->ic_freq = freq;
10861 				chan->ic_flags = IEEE80211_CHAN_CCK |
10862 				    IEEE80211_CHAN_OFDM |
10863 				    IEEE80211_CHAN_DYN |
10864 				    IEEE80211_CHAN_2GHZ;
10865 			}
10866 			chnum++;
10867 			freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_2GHZ);
10868 		}
10869 	}
10870 
10871 	for (i = 0; i < reg_info->num_5ghz_reg_rules; i++) {
10872 		rule = &reg_info->reg_rules_5ghz_ptr[i];
10873 		if (rule->start_freq < 5170 ||
10874 		    rule->start_freq > 6000 ||
10875 		    rule->start_freq > rule->end_freq) {
10876 			DPRINTF("%s: bad regulatory rule: start freq %u, "
10877 			    "end freq %u\n", __func__, rule->start_freq,
10878 			    rule->end_freq);
10879 			continue;
10880 		}
10881 
10882 		freq = rule->start_freq + 10;
10883 		chnum = ieee80211_mhz2ieee(freq, IEEE80211_CHAN_5GHZ);
10884 		if (chnum < 36 || chnum > IEEE80211_CHAN_MAX) {
10885 			DPRINTF("%s: bad regulatory rule: freq %u, "
10886 			    "channel %u\n", __func__, freq, chnum);
10887 			continue;
10888 		}
10889 		while (freq <= rule->end_freq && freq <= 5885 &&
10890 		    chnum <= IEEE80211_CHAN_MAX) {
10891 			chan = &ic->ic_channels[chnum];
10892 			if (rule->flags & (REGULATORY_CHAN_DISABLED |
10893 			    REGULATORY_CHAN_NO_OFDM)) {
10894 				chan->ic_freq = 0;
10895 				chan->ic_flags = 0;
10896 			} else {
10897 				chan->ic_freq = freq;
10898 				chan->ic_flags = IEEE80211_CHAN_A;
10899 				if (rule->flags & (REGULATORY_CHAN_RADAR |
10900 				    REGULATORY_CHAN_NO_IR |
10901 				    REGULATORY_CHAN_INDOOR_ONLY)) {
10902 					chan->ic_flags |=
10903 					    IEEE80211_CHAN_PASSIVE;
10904 				}
10905 			}
10906 			chnum += 4;
10907 			freq = ieee80211_ieee2mhz(chnum, IEEE80211_CHAN_5GHZ);
10908 		}
10909 	}
10910 }
10911 
10912 int
10913 qwx_reg_chan_list_event(struct qwx_softc *sc, struct mbuf *m,
10914     enum wmi_reg_chan_list_cmd_type id)
10915 {
10916 	struct cur_regulatory_info *reg_info = NULL;
10917 	int ret = 0;
10918 #if 0
10919 	struct ieee80211_regdomain *regd = NULL;
10920 	bool intersect = false;
10921 	int pdev_idx, i, j;
10922 	struct ath11k *ar;
10923 #endif
10924 
10925 	reg_info = malloc(sizeof(*reg_info), M_DEVBUF, M_NOWAIT | M_ZERO);
10926 	if (!reg_info) {
10927 		ret = ENOMEM;
10928 		goto fallback;
10929 	}
10930 
10931 	if (id == WMI_REG_CHAN_LIST_CC_ID)
10932 		ret = qwx_pull_reg_chan_list_update_ev(sc, m, reg_info);
10933 	else
10934 		ret = qwx_pull_reg_chan_list_ext_update_ev(sc, m, reg_info);
10935 
10936 	if (ret) {
10937 		printf("%s: failed to extract regulatory info from "
10938 		    "received event\n", sc->sc_dev.dv_xname);
10939 		goto fallback;
10940 	}
10941 
10942 	DNPRINTF(QWX_D_WMI, "%s: event reg chan list id %d\n", __func__, id);
10943 
10944 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
10945 		/* In case of failure to set the requested ctry,
10946 		 * fw retains the current regd. We print a failure info
10947 		 * and return from here.
10948 		 */
10949 		printf("%s: Failed to set the requested Country "
10950 		    "regulatory setting\n", __func__);
10951 		goto mem_free;
10952 	}
10953 
10954 	qwx_init_channels(sc, reg_info);
10955 #if 0
10956 	pdev_idx = reg_info->phy_id;
10957 
10958 	/* Avoid default reg rule updates sent during FW recovery if
10959 	 * it is already available
10960 	 */
10961 	spin_lock(&ab->base_lock);
10962 	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
10963 	    ab->default_regd[pdev_idx]) {
10964 		spin_unlock(&ab->base_lock);
10965 		goto mem_free;
10966 	}
10967 	spin_unlock(&ab->base_lock);
10968 
10969 	if (pdev_idx >= ab->num_radios) {
10970 		/* Process the event for phy0 only if single_pdev_only
10971 		 * is true. If pdev_idx is valid but not 0, discard the
10972 		 * event. Otherwise, it goes to fallback.
10973 		 */
10974 		if (ab->hw_params.single_pdev_only &&
10975 		    pdev_idx < ab->hw_params.num_rxmda_per_pdev)
10976 			goto mem_free;
10977 		else
10978 			goto fallback;
10979 	}
10980 
10981 	/* Avoid multiple overwrites to default regd, during core
10982 	 * stop-start after mac registration.
10983 	 */
10984 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
10985 	    !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
10986 		    (char *)reg_info->alpha2, 2))
10987 		goto mem_free;
10988 
10989 	/* Intersect new rules with default regd if a new country setting was
10990 	 * requested, i.e a default regd was already set during initialization
10991 	 * and the regd coming from this event has a valid country info.
10992 	 */
10993 	if (ab->default_regd[pdev_idx] &&
10994 	    !ath11k_reg_is_world_alpha((char *)
10995 		ab->default_regd[pdev_idx]->alpha2) &&
10996 	    !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
10997 		intersect = true;
10998 
10999 	regd = ath11k_reg_build_regd(ab, reg_info, intersect);
11000 	if (!regd) {
11001 		ath11k_warn(ab, "failed to build regd from reg_info\n");
11002 		goto fallback;
11003 	}
11004 
11005 	spin_lock(&ab->base_lock);
11006 	if (ab->default_regd[pdev_idx]) {
11007 		/* The initial rules from FW after WMI Init is to build
11008 		 * the default regd. From then on, any rules updated for
11009 		 * the pdev could be due to user reg changes.
11010 		 * Free previously built regd before assigning the newly
11011 		 * generated regd to ar. NULL pointer handling will be
11012 		 * taken care by kfree itself.
11013 		 */
11014 		ar = ab->pdevs[pdev_idx].ar;
11015 		kfree(ab->new_regd[pdev_idx]);
11016 		ab->new_regd[pdev_idx] = regd;
11017 		queue_work(ab->workqueue, &ar->regd_update_work);
11018 	} else {
11019 		/* This regd would be applied during mac registration and is
11020 		 * held constant throughout for regd intersection purpose
11021 		 */
11022 		ab->default_regd[pdev_idx] = regd;
11023 	}
11024 	ab->dfs_region = reg_info->dfs_region;
11025 	spin_unlock(&ab->base_lock);
11026 #endif
11027 	goto mem_free;
11028 
11029 fallback:
11030 	/* Fallback to older reg (by sending previous country setting
11031 	 * again if fw has succeeded and we failed to process here.
11032 	 * The Regdomain should be uniform across driver and fw. Since the
11033 	 * FW has processed the command and sent a success status, we expect
11034 	 * this function to succeed as well. If it doesn't, CTRY needs to be
11035 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
11036 	 */
11037 	/* TODO: This is rare, but still should also be handled */
11038 mem_free:
11039 	if (reg_info) {
11040 		free(reg_info->reg_rules_2ghz_ptr, M_DEVBUF,
11041 		    reg_info->num_2ghz_reg_rules *
11042 		    sizeof(*reg_info->reg_rules_2ghz_ptr));
11043 		free(reg_info->reg_rules_5ghz_ptr, M_DEVBUF,
11044 		    reg_info->num_5ghz_reg_rules *
11045 		    sizeof(*reg_info->reg_rules_5ghz_ptr));
11046 #if 0
11047 		if (reg_info->is_ext_reg_event) {
11048 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
11049 				kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
11050 
11051 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
11052 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
11053 					kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
11054 		}
11055 #endif
11056 		free(reg_info, M_DEVBUF, sizeof(*reg_info));
11057 	}
11058 	return ret;
11059 }
11060 
11061 const char *
11062 qwx_wmi_event_scan_type_str(enum wmi_scan_event_type type,
11063     enum wmi_scan_completion_reason reason)
11064 {
11065 	switch (type) {
11066 	case WMI_SCAN_EVENT_STARTED:
11067 		return "started";
11068 	case WMI_SCAN_EVENT_COMPLETED:
11069 		switch (reason) {
11070 		case WMI_SCAN_REASON_COMPLETED:
11071 			return "completed";
11072 		case WMI_SCAN_REASON_CANCELLED:
11073 			return "completed [cancelled]";
11074 		case WMI_SCAN_REASON_PREEMPTED:
11075 			return "completed [preempted]";
11076 		case WMI_SCAN_REASON_TIMEDOUT:
11077 			return "completed [timedout]";
11078 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
11079 			return "completed [internal err]";
11080 		case WMI_SCAN_REASON_MAX:
11081 			break;
11082 		}
11083 		return "completed [unknown]";
11084 	case WMI_SCAN_EVENT_BSS_CHANNEL:
11085 		return "bss channel";
11086 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
11087 		return "foreign channel";
11088 	case WMI_SCAN_EVENT_DEQUEUED:
11089 		return "dequeued";
11090 	case WMI_SCAN_EVENT_PREEMPTED:
11091 		return "preempted";
11092 	case WMI_SCAN_EVENT_START_FAILED:
11093 		return "start failed";
11094 	case WMI_SCAN_EVENT_RESTARTED:
11095 		return "restarted";
11096 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
11097 		return "foreign channel exit";
11098 	default:
11099 		return "unknown";
11100 	}
11101 }
11102 
11103 const char *
11104 qwx_scan_state_str(enum ath11k_scan_state state)
11105 {
11106 	switch (state) {
11107 	case ATH11K_SCAN_IDLE:
11108 		return "idle";
11109 	case ATH11K_SCAN_STARTING:
11110 		return "starting";
11111 	case ATH11K_SCAN_RUNNING:
11112 		return "running";
11113 	case ATH11K_SCAN_ABORTING:
11114 		return "aborting";
11115 	}
11116 
11117 	return "unknown";
11118 }
11119 
11120 int
11121 qwx_pull_scan_ev(struct qwx_softc *sc, struct mbuf *m,
11122     struct wmi_scan_event *scan_evt_param)
11123 {
11124 	const void **tb;
11125 	const struct wmi_scan_event *ev;
11126 
11127 	tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
11128 	if (tb == NULL) {
11129 		DPRINTF("%s: failed to parse tlv\n", __func__);
11130 		return EINVAL;
11131 	}
11132 
11133 	ev = tb[WMI_TAG_SCAN_EVENT];
11134 	if (!ev) {
11135 		DPRINTF("%s: failed to fetch scan ev\n", __func__);
11136 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11137 		return EPROTO;
11138 	}
11139 
11140 	scan_evt_param->event_type = ev->event_type;
11141 	scan_evt_param->reason = ev->reason;
11142 	scan_evt_param->channel_freq = ev->channel_freq;
11143 	scan_evt_param->scan_req_id = ev->scan_req_id;
11144 	scan_evt_param->scan_id = ev->scan_id;
11145 	scan_evt_param->vdev_id = ev->vdev_id;
11146 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
11147 
11148 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11149 	return 0;
11150 }
11151 
11152 void
11153 qwx_wmi_event_scan_started(struct qwx_softc *sc)
11154 {
11155 #ifdef notyet
11156 	lockdep_assert_held(&ar->data_lock);
11157 #endif
11158 	switch (sc->scan.state) {
11159 	case ATH11K_SCAN_IDLE:
11160 	case ATH11K_SCAN_RUNNING:
11161 	case ATH11K_SCAN_ABORTING:
11162 		printf("%s: received scan started event in an invalid "
11163 		"scan state: %s (%d)\n", sc->sc_dev.dv_xname,
11164 		qwx_scan_state_str(sc->scan.state), sc->scan.state);
11165 		break;
11166 	case ATH11K_SCAN_STARTING:
11167 		sc->scan.state = ATH11K_SCAN_RUNNING;
11168 #if 0
11169 		if (ar->scan.is_roc)
11170 			ieee80211_ready_on_channel(ar->hw);
11171 #endif
11172 		wakeup(&sc->scan.state);
11173 		break;
11174 	}
11175 }
11176 
11177 void
11178 qwx_wmi_event_scan_completed(struct qwx_softc *sc)
11179 {
11180 #ifdef notyet
11181 	lockdep_assert_held(&ar->data_lock);
11182 #endif
11183 	switch (sc->scan.state) {
11184 	case ATH11K_SCAN_IDLE:
11185 	case ATH11K_SCAN_STARTING:
11186 		/* One suspected reason scan can be completed while starting is
11187 		 * if firmware fails to deliver all scan events to the host,
11188 		 * e.g. when transport pipe is full. This has been observed
11189 		 * with spectral scan phyerr events starving wmi transport
11190 		 * pipe. In such case the "scan completed" event should be (and
11191 		 * is) ignored by the host as it may be just firmware's scan
11192 		 * state machine recovering.
11193 		 */
11194 		printf("%s: received scan completed event in an invalid "
11195 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
11196 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
11197 		break;
11198 	case ATH11K_SCAN_RUNNING:
11199 	case ATH11K_SCAN_ABORTING:
11200 		qwx_mac_scan_finish(sc);
11201 		break;
11202 	}
11203 }
11204 
11205 void
11206 qwx_wmi_event_scan_bss_chan(struct qwx_softc *sc)
11207 {
11208 #ifdef notyet
11209 	lockdep_assert_held(&ar->data_lock);
11210 #endif
11211 	switch (sc->scan.state) {
11212 	case ATH11K_SCAN_IDLE:
11213 	case ATH11K_SCAN_STARTING:
11214 		printf("%s: received scan bss chan event in an invalid "
11215 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
11216 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
11217 		break;
11218 	case ATH11K_SCAN_RUNNING:
11219 	case ATH11K_SCAN_ABORTING:
11220 		sc->scan_channel = 0;
11221 		break;
11222 	}
11223 }
11224 
11225 void
11226 qwx_wmi_event_scan_foreign_chan(struct qwx_softc *sc, uint32_t freq)
11227 {
11228 #ifdef notyet
11229 	lockdep_assert_held(&ar->data_lock);
11230 #endif
11231 	switch (sc->scan.state) {
11232 	case ATH11K_SCAN_IDLE:
11233 	case ATH11K_SCAN_STARTING:
11234 		printf("%s: received scan foreign chan event in an invalid "
11235 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
11236 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
11237 		break;
11238 	case ATH11K_SCAN_RUNNING:
11239 	case ATH11K_SCAN_ABORTING:
11240 		sc->scan_channel = ieee80211_mhz2ieee(freq, 0);
11241 #if 0
11242 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
11243 			complete(&ar->scan.on_channel);
11244 #endif
11245 		break;
11246 	}
11247 }
11248 
11249 void
11250 qwx_wmi_event_scan_start_failed(struct qwx_softc *sc)
11251 {
11252 #ifdef notyet
11253 	lockdep_assert_held(&ar->data_lock);
11254 #endif
11255 	switch (sc->scan.state) {
11256 	case ATH11K_SCAN_IDLE:
11257 	case ATH11K_SCAN_RUNNING:
11258 	case ATH11K_SCAN_ABORTING:
11259 		printf("%s: received scan start failed event in an invalid "
11260 		    "scan state: %s (%d)\n", sc->sc_dev.dv_xname,
11261 		    qwx_scan_state_str(sc->scan.state), sc->scan.state);
11262 		break;
11263 	case ATH11K_SCAN_STARTING:
11264 		wakeup(&sc->scan.state);
11265 		qwx_mac_scan_finish(sc);
11266 		break;
11267 	}
11268 }
11269 
11270 
11271 void
11272 qwx_scan_event(struct qwx_softc *sc, struct mbuf *m)
11273 {
11274 	struct wmi_scan_event scan_ev = { 0 };
11275 	struct qwx_vif *arvif;
11276 
11277 	if (qwx_pull_scan_ev(sc, m, &scan_ev) != 0) {
11278 		printf("%s: failed to extract scan event",
11279 		    sc->sc_dev.dv_xname);
11280 		return;
11281 	}
11282 #ifdef notyet
11283 	rcu_read_lock();
11284 #endif
11285 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
11286 		if (arvif->vdev_id == scan_ev.vdev_id)
11287 			break;
11288 	}
11289 
11290 	if (!arvif) {
11291 		printf("%s: received scan event for unknown vdev\n",
11292 		    sc->sc_dev.dv_xname);
11293 #if 0
11294 		rcu_read_unlock();
11295 #endif
11296 		return;
11297 	}
11298 #if 0
11299 	spin_lock_bh(&ar->data_lock);
11300 #endif
11301 	DNPRINTF(QWX_D_WMI,
11302 	    "%s: event scan %s type %d reason %d freq %d req_id %d scan_id %d "
11303 	    "vdev_id %d state %s (%d)\n", __func__,
11304 	    qwx_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
11305 	    scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
11306 	    scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
11307 	    qwx_scan_state_str(sc->scan.state), sc->scan.state);
11308 
11309 	switch (scan_ev.event_type) {
11310 	case WMI_SCAN_EVENT_STARTED:
11311 		qwx_wmi_event_scan_started(sc);
11312 		break;
11313 	case WMI_SCAN_EVENT_COMPLETED:
11314 		qwx_wmi_event_scan_completed(sc);
11315 		break;
11316 	case WMI_SCAN_EVENT_BSS_CHANNEL:
11317 		qwx_wmi_event_scan_bss_chan(sc);
11318 		break;
11319 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
11320 		qwx_wmi_event_scan_foreign_chan(sc, scan_ev.channel_freq);
11321 		break;
11322 	case WMI_SCAN_EVENT_START_FAILED:
11323 		printf("%s: received scan start failure event\n",
11324 		    sc->sc_dev.dv_xname);
11325 		qwx_wmi_event_scan_start_failed(sc);
11326 		break;
11327 	case WMI_SCAN_EVENT_DEQUEUED:
11328 		qwx_mac_scan_finish(sc);
11329 		break;
11330 	case WMI_SCAN_EVENT_PREEMPTED:
11331 	case WMI_SCAN_EVENT_RESTARTED:
11332 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
11333 	default:
11334 		break;
11335 	}
11336 #if 0
11337 	spin_unlock_bh(&ar->data_lock);
11338 
11339 	rcu_read_unlock();
11340 #endif
11341 }
11342 
11343 int
11344 qwx_pull_chan_info_ev(struct qwx_softc *sc, uint8_t *evt_buf, uint32_t len,
11345     struct wmi_chan_info_event *ch_info_ev)
11346 {
11347 	const void **tb;
11348 	const struct wmi_chan_info_event *ev;
11349 
11350 	tb = qwx_wmi_tlv_parse_alloc(sc, evt_buf, len);
11351 	if (tb == NULL) {
11352 		printf("%s: failed to parse tlv\n", sc->sc_dev.dv_xname);
11353 		return EINVAL;
11354 	}
11355 
11356 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
11357 	if (!ev) {
11358 		printf("%s: failed to fetch chan info ev\n",
11359 		    sc->sc_dev.dv_xname);
11360 		free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11361 		return EPROTO;
11362 	}
11363 
11364 	ch_info_ev->err_code = ev->err_code;
11365 	ch_info_ev->freq = ev->freq;
11366 	ch_info_ev->cmd_flags = ev->cmd_flags;
11367 	ch_info_ev->noise_floor = ev->noise_floor;
11368 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
11369 	ch_info_ev->cycle_count = ev->cycle_count;
11370 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
11371 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
11372 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
11373 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
11374 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
11375 	ch_info_ev->vdev_id = ev->vdev_id;
11376 
11377 	free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
11378 	return 0;
11379 }
11380 
11381 void
11382 qwx_chan_info_event(struct qwx_softc *sc, struct mbuf *m)
11383 {
11384 	struct qwx_vif *arvif;
11385 	struct wmi_chan_info_event ch_info_ev = {0};
11386 	struct qwx_survey_info *survey;
11387 	int idx;
11388 	/* HW channel counters frequency value in hertz */
11389 	uint32_t cc_freq_hz = sc->cc_freq_hz;
11390 
11391 	if (qwx_pull_chan_info_ev(sc, mtod(m, void *), m->m_pkthdr.len,
11392 	    &ch_info_ev) != 0) {
11393 		printf("%s: failed to extract chan info event\n",
11394 		    sc->sc_dev.dv_xname);
11395 		return;
11396 	}
11397 
11398 	DNPRINTF(QWX_D_WMI, "%s: event chan info vdev_id %d err_code %d "
11399 	    "freq %d cmd_flags %d noise_floor %d rx_clear_count %d "
11400 	    "cycle_count %d mac_clk_mhz %d\n", __func__,
11401 	    ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
11402 	    ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
11403 	    ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
11404 	    ch_info_ev.mac_clk_mhz);
11405 
11406 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
11407 		DNPRINTF(QWX_D_WMI, "chan info report completed\n");
11408 		return;
11409 	}
11410 #ifdef notyet
11411 	rcu_read_lock();
11412 #endif
11413 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
11414 		if (arvif->vdev_id == ch_info_ev.vdev_id)
11415 			break;
11416 	}
11417 	if (!arvif) {
11418 		printf("%s: invalid vdev id in chan info ev %d\n",
11419 		   sc->sc_dev.dv_xname, ch_info_ev.vdev_id);
11420 #ifdef notyet
11421 		rcu_read_unlock();
11422 #endif
11423 		return;
11424 	}
11425 #ifdef notyet
11426 	spin_lock_bh(&ar->data_lock);
11427 #endif
11428 	switch (sc->scan.state) {
11429 	case ATH11K_SCAN_IDLE:
11430 	case ATH11K_SCAN_STARTING:
11431 		printf("%s: received chan info event without a scan request, "
11432 		    "ignoring\n", sc->sc_dev.dv_xname);
11433 		goto exit;
11434 	case ATH11K_SCAN_RUNNING:
11435 	case ATH11K_SCAN_ABORTING:
11436 		break;
11437 	}
11438 
11439 	idx = ieee80211_mhz2ieee(ch_info_ev.freq, 0);
11440 	if (idx >= nitems(sc->survey)) {
11441 		printf("%s: invalid frequency %d (idx %d out of bounds)\n",
11442 		    sc->sc_dev.dv_xname, ch_info_ev.freq, idx);
11443 		goto exit;
11444 	}
11445 
11446 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
11447 	 * HW channel counters frequency value
11448 	 */
11449 	if (ch_info_ev.mac_clk_mhz)
11450 		cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
11451 
11452 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
11453 		survey = &sc->survey[idx];
11454 		memset(survey, 0, sizeof(*survey));
11455 		survey->noise = ch_info_ev.noise_floor;
11456 		survey->time = ch_info_ev.cycle_count / cc_freq_hz;
11457 		survey->time_busy = ch_info_ev.rx_clear_count / cc_freq_hz;
11458 	}
11459 exit:
11460 #ifdef notyet
11461 	spin_unlock_bh(&ar->data_lock);
11462 	rcu_read_unlock();
11463 #else
11464 	return;
11465 #endif
11466 }
11467 
11468 int
11469 qwx_wmi_tlv_mgmt_rx_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len,
11470     const void *ptr, void *data)
11471 {
11472 	struct wmi_tlv_mgmt_rx_parse *parse = data;
11473 
11474 	switch (tag) {
11475 	case WMI_TAG_MGMT_RX_HDR:
11476 		parse->fixed = ptr;
11477 		break;
11478 	case WMI_TAG_ARRAY_BYTE:
11479 		if (!parse->frame_buf_done) {
11480 			parse->frame_buf = ptr;
11481 			parse->frame_buf_done = 1;
11482 		}
11483 		break;
11484 	}
11485 	return 0;
11486 }
11487 
11488 int
11489 qwx_pull_mgmt_rx_params_tlv(struct qwx_softc *sc, struct mbuf *m,
11490     struct mgmt_rx_event_params *hdr)
11491 {
11492 	struct wmi_tlv_mgmt_rx_parse parse = { 0 };
11493 	const struct wmi_mgmt_rx_hdr *ev;
11494 	const uint8_t *frame;
11495 	int ret;
11496 	size_t totlen, hdrlen;
11497 
11498 	ret = qwx_wmi_tlv_iter(sc, mtod(m, void *), m->m_pkthdr.len,
11499 	    qwx_wmi_tlv_mgmt_rx_parse, &parse);
11500 	if (ret) {
11501 		printf("%s: failed to parse mgmt rx tlv %d\n",
11502 		    sc->sc_dev.dv_xname, ret);
11503 		return ret;
11504 	}
11505 
11506 	ev = parse.fixed;
11507 	frame = parse.frame_buf;
11508 
11509 	if (!ev || !frame) {
11510 		printf("%s: failed to fetch mgmt rx hdr\n",
11511 		    sc->sc_dev.dv_xname);
11512 		return EPROTO;
11513 	}
11514 
11515 	hdr->pdev_id =  ev->pdev_id;
11516 	hdr->chan_freq = le32toh(ev->chan_freq);
11517 	hdr->channel = le32toh(ev->channel);
11518 	hdr->snr = le32toh(ev->snr);
11519 	hdr->rate = le32toh(ev->rate);
11520 	hdr->phy_mode = le32toh(ev->phy_mode);
11521 	hdr->buf_len = le32toh(ev->buf_len);
11522 	hdr->status = le32toh(ev->status);
11523 	hdr->flags = le32toh(ev->flags);
11524 	hdr->rssi = le32toh(ev->rssi);
11525 	hdr->tsf_delta = le32toh(ev->tsf_delta);
11526 	memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
11527 
11528 	if (frame < mtod(m, uint8_t *) ||
11529 	    frame >= mtod(m, uint8_t *) + m->m_pkthdr.len) {
11530 		printf("%s: invalid mgmt rx frame pointer\n",
11531 		    sc->sc_dev.dv_xname);
11532 		return EPROTO;
11533 	}
11534 	hdrlen = frame - mtod(m, uint8_t *);
11535 
11536 	if (hdrlen + hdr->buf_len < hdr->buf_len) {
11537 		printf("%s: length overflow in mgmt rx hdr ev\n",
11538 		    sc->sc_dev.dv_xname);
11539 		return EPROTO;
11540 	}
11541 	totlen = hdrlen + hdr->buf_len;
11542 	if (m->m_pkthdr.len < totlen) {
11543 		printf("%s: invalid length in mgmt rx hdr ev\n",
11544 		    sc->sc_dev.dv_xname);
11545 		return EPROTO;
11546 	}
11547 
11548 	/* shift the mbuf to point at `frame` */
11549 	m->m_len = m->m_pkthdr.len = totlen;
11550 	m_adj(m, hdrlen);
11551 
11552 #if 0 /* Not needed on OpenBSD? */
11553 	ath11k_ce_byte_swap(skb->data, hdr->buf_len);
11554 #endif
11555 	return 0;
11556 }
11557 
11558 void
11559 qwx_mgmt_rx_event(struct qwx_softc *sc, struct mbuf *m)
11560 {
11561 	struct ieee80211com *ic = &sc->sc_ic;
11562 	struct ifnet *ifp = &ic->ic_if;
11563 	struct mgmt_rx_event_params rx_ev = {0};
11564 	struct ieee80211_rxinfo rxi;
11565 	struct ieee80211_frame *wh;
11566 	struct ieee80211_node *ni;
11567 
11568 	if (qwx_pull_mgmt_rx_params_tlv(sc, m, &rx_ev) != 0) {
11569 		printf("%s: failed to extract mgmt rx event\n",
11570 		    sc->sc_dev.dv_xname);
11571 		m_freem(m);
11572 		return;
11573 	}
11574 
11575 	memset(&rxi, 0, sizeof(rxi));
11576 
11577 	DNPRINTF(QWX_D_MGMT, "%s: event mgmt rx status %08x\n", __func__,
11578 	    rx_ev.status);
11579 #ifdef notyet
11580 	rcu_read_lock();
11581 #endif
11582 	if (rx_ev.pdev_id >= nitems(sc->pdevs)) {
11583 		printf("%s: invalid pdev_id %d in mgmt_rx_event\n",
11584 		    sc->sc_dev.dv_xname, rx_ev.pdev_id);
11585 		m_freem(m);
11586 		goto exit;
11587 	}
11588 
11589 	if ((test_bit(ATH11K_CAC_RUNNING, sc->sc_flags)) ||
11590 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
11591 	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
11592 		m_freem(m);
11593 		goto exit;
11594 	}
11595 
11596 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) {
11597 		ic->ic_stats.is_ccmp_dec_errs++;
11598 		m_freem(m);
11599 		goto exit;
11600 	}
11601 
11602 	rxi.rxi_chan = rx_ev.channel;
11603 	rxi.rxi_rssi = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
11604 #if 0
11605 	status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
11606 #endif
11607 
11608 	wh = mtod(m, struct ieee80211_frame *);
11609 	ni = ieee80211_find_rxnode(ic, wh);
11610 #if 0
11611 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set.
11612 	 * Don't clear that. Also, FW delivers broadcast management frames
11613 	 * (ex: group privacy action frames in mesh) as encrypted payload.
11614 	 */
11615 	if (ieee80211_has_protected(hdr->frame_control) &&
11616 	    !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
11617 		status->flag |= RX_FLAG_DECRYPTED;
11618 
11619 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
11620 			status->flag |= RX_FLAG_IV_STRIPPED |
11621 					RX_FLAG_MMIC_STRIPPED;
11622 			hdr->frame_control = __cpu_to_le16(fc &
11623 					     ~IEEE80211_FCTL_PROTECTED);
11624 		}
11625 	}
11626 
11627 	if (ieee80211_is_beacon(hdr->frame_control))
11628 		ath11k_mac_handle_beacon(ar, skb);
11629 #endif
11630 
11631 	DNPRINTF(QWX_D_MGMT,
11632 	    "%s: event mgmt rx skb %p len %d ftype %02x stype %02x\n",
11633 	    __func__, m, m->m_pkthdr.len,
11634 	    wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK,
11635 	    wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK);
11636 
11637 	DNPRINTF(QWX_D_MGMT, "%s: event mgmt rx freq %d chan %d snr %d\n",
11638 	    __func__, rx_ev.chan_freq, rx_ev.channel, rx_ev.snr);
11639 
11640 	ieee80211_input(ifp, m, ni, &rxi);
11641 exit:
11642 #ifdef notyet
11643 	rcu_read_unlock();
11644 #else
11645 	return;
11646 #endif
11647 }
11648 
11649 void
11650 qwx_wmi_tlv_op_rx(struct qwx_softc *sc, struct mbuf *m)
11651 {
11652 	struct wmi_cmd_hdr *cmd_hdr;
11653 	enum wmi_tlv_event_id id;
11654 
11655 	cmd_hdr = mtod(m, struct wmi_cmd_hdr *);
11656 	id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
11657 
11658 	m_adj(m, sizeof(struct wmi_cmd_hdr));
11659 
11660 	switch (id) {
11661 		/* Process all the WMI events here */
11662 	case WMI_SERVICE_READY_EVENTID:
11663 		qwx_service_ready_event(sc, m);
11664 		break;
11665 	case WMI_SERVICE_READY_EXT_EVENTID:
11666 		qwx_service_ready_ext_event(sc, m);
11667 		break;
11668 	case WMI_SERVICE_READY_EXT2_EVENTID:
11669 		qwx_service_ready_ext2_event(sc, m);
11670 		break;
11671 	case WMI_REG_CHAN_LIST_CC_EVENTID:
11672 		qwx_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_ID);
11673 		break;
11674 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
11675 		qwx_reg_chan_list_event(sc, m, WMI_REG_CHAN_LIST_CC_EXT_ID);
11676 		break;
11677 	case WMI_READY_EVENTID:
11678 		qwx_ready_event(sc, m);
11679 		break;
11680 	case WMI_PEER_DELETE_RESP_EVENTID:
11681 		qwx_peer_delete_resp_event(sc, m);
11682 		break;
11683 	case WMI_VDEV_START_RESP_EVENTID:
11684 		qwx_vdev_start_resp_event(sc, m);
11685 		break;
11686 #if 0
11687 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
11688 		ath11k_bcn_tx_status_event(ab, skb);
11689 		break;
11690 	case WMI_VDEV_STOPPED_EVENTID:
11691 		ath11k_vdev_stopped_event(ab, skb);
11692 		break;
11693 #endif
11694 	case WMI_MGMT_RX_EVENTID:
11695 		DPRINTF("%s: 0x%x: mgmt rx event\n", __func__, id);
11696 		qwx_mgmt_rx_event(sc, m);
11697 		/* mgmt_rx_event() owns the skb now! */
11698 		return;
11699 #if 0
11700 	case WMI_MGMT_TX_COMPLETION_EVENTID:
11701 		ath11k_mgmt_tx_compl_event(ab, skb);
11702 		break;
11703 #endif
11704 	case WMI_SCAN_EVENTID:
11705 		DPRINTF("%s: 0x%x: scan event\n", __func__, id);
11706 		qwx_scan_event(sc, m);
11707 		break;
11708 #if 0
11709 	case WMI_PEER_STA_KICKOUT_EVENTID:
11710 		ath11k_peer_sta_kickout_event(ab, skb);
11711 		break;
11712 	case WMI_ROAM_EVENTID:
11713 		ath11k_roam_event(ab, skb);
11714 		break;
11715 #endif
11716 	case WMI_CHAN_INFO_EVENTID:
11717 		DPRINTF("%s: 0x%x: chan info event\n", __func__, id);
11718 		qwx_chan_info_event(sc, m);
11719 		break;
11720 #if 0
11721 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
11722 		ath11k_pdev_bss_chan_info_event(ab, skb);
11723 		break;
11724 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
11725 		ath11k_vdev_install_key_compl_event(ab, skb);
11726 		break;
11727 #endif
11728 	case WMI_SERVICE_AVAILABLE_EVENTID:
11729 		qwx_service_available_event(sc, m);
11730 		break;
11731 #if 0
11732 	case WMI_PEER_ASSOC_CONF_EVENTID:
11733 		ath11k_peer_assoc_conf_event(ab, skb);
11734 		break;
11735 	case WMI_UPDATE_STATS_EVENTID:
11736 		ath11k_update_stats_event(ab, skb);
11737 		break;
11738 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
11739 		ath11k_pdev_ctl_failsafe_check_event(ab, skb);
11740 		break;
11741 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
11742 		ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
11743 		break;
11744 	case WMI_PDEV_UTF_EVENTID:
11745 		ath11k_tm_wmi_event(ab, id, skb);
11746 		break;
11747 	case WMI_PDEV_TEMPERATURE_EVENTID:
11748 		ath11k_wmi_pdev_temperature_event(ab, skb);
11749 		break;
11750 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
11751 		ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
11752 		break;
11753 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
11754 		ath11k_fils_discovery_event(ab, skb);
11755 		break;
11756 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
11757 		ath11k_probe_resp_tx_status_event(ab, skb);
11758 		break;
11759 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
11760 		ath11k_wmi_obss_color_collision_event(ab, skb);
11761 		break;
11762 	case WMI_TWT_ADD_DIALOG_EVENTID:
11763 		ath11k_wmi_twt_add_dialog_event(ab, skb);
11764 		break;
11765 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
11766 		ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
11767 		break;
11768 	case WMI_VDEV_DELETE_RESP_EVENTID:
11769 		ath11k_vdev_delete_resp_event(ab, skb);
11770 		break;
11771 	case WMI_WOW_WAKEUP_HOST_EVENTID:
11772 		ath11k_wmi_event_wow_wakeup_host(ab, skb);
11773 		break;
11774 	case WMI_11D_NEW_COUNTRY_EVENTID:
11775 		ath11k_reg_11d_new_cc_event(ab, skb);
11776 		break;
11777 #endif
11778 	case WMI_DIAG_EVENTID:
11779 		/* Ignore. These events trigger tracepoints in Linux. */
11780 		break;
11781 #if 0
11782 	case WMI_PEER_STA_PS_STATECHG_EVENTID:
11783 		ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
11784 		break;
11785 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
11786 		ath11k_wmi_gtk_offload_status_event(ab, skb);
11787 		break;
11788 #endif
11789 	case WMI_UPDATE_FW_MEM_DUMP_EVENTID:
11790 		DPRINTF("%s: 0x%x: update fw mem dump\n", __func__, id);
11791 		break;
11792 	case WMI_PDEV_SET_HW_MODE_RESP_EVENTID:
11793 		DPRINTF("%s: 0x%x: set HW mode response event\n", __func__, id);
11794 		break;
11795 	case WMI_WLAN_FREQ_AVOID_EVENTID:
11796 		DPRINTF("%s: 0x%x: wlan freq avoid event\n", __func__, id);
11797 		break;
11798 	default:
11799 		printf("%s: unsupported event id 0x%x\n", __func__, id);
11800 		break;
11801 	}
11802 
11803 	m_freem(m);
11804 }
11805 
11806 void
11807 qwx_wmi_op_ep_tx_credits(struct qwx_softc *sc)
11808 {
11809 	struct qwx_htc *htc = &sc->htc;
11810 	int i;
11811 
11812 	/* try to send pending beacons first. they take priority */
11813 	sc->wmi.tx_credits = 1;
11814 	wakeup(&sc->wmi.tx_credits);
11815 
11816 	if (!sc->hw_params.credit_flow)
11817 		return;
11818 
11819 	for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
11820 		struct qwx_htc_ep *ep = &htc->endpoint[i];
11821 		if (ep->tx_credit_flow_enabled && ep->tx_credits > 0)
11822 			wakeup(&ep->tx_credits);
11823 	}
11824 }
11825 
11826 int
11827 qwx_connect_pdev_htc_service(struct qwx_softc *sc, uint32_t pdev_idx)
11828 {
11829 	int status;
11830 	uint32_t svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
11831 	    ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
11832 	    ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
11833 	struct qwx_htc_svc_conn_req conn_req;
11834 	struct qwx_htc_svc_conn_resp conn_resp;
11835 
11836 	memset(&conn_req, 0, sizeof(conn_req));
11837 	memset(&conn_resp, 0, sizeof(conn_resp));
11838 
11839 	/* these fields are the same for all service endpoints */
11840 	conn_req.ep_ops.ep_tx_complete = qwx_wmi_htc_tx_complete;
11841 	conn_req.ep_ops.ep_rx_complete = qwx_wmi_tlv_op_rx;
11842 	conn_req.ep_ops.ep_tx_credits = qwx_wmi_op_ep_tx_credits;
11843 
11844 	/* connect to control service */
11845 	conn_req.service_id = svc_id[pdev_idx];
11846 
11847 	status = qwx_htc_connect_service(&sc->htc, &conn_req, &conn_resp);
11848 	if (status) {
11849 		printf("%s: failed to connect to WMI CONTROL service "
11850 		    "status: %d\n", sc->sc_dev.dv_xname, status);
11851 		return status;
11852 	}
11853 
11854 	sc->wmi.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
11855 	sc->wmi.wmi[pdev_idx].eid = conn_resp.eid;
11856 	sc->wmi.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
11857 	sc->wmi.wmi[pdev_idx].tx_ce_desc = 0;
11858 
11859 	return 0;
11860 }
11861 
11862 int
11863 qwx_wmi_connect(struct qwx_softc *sc)
11864 {
11865 	uint32_t i;
11866 	uint8_t wmi_ep_count;
11867 
11868 	wmi_ep_count = sc->htc.wmi_ep_count;
11869 	if (wmi_ep_count > sc->hw_params.max_radios)
11870 		return -1;
11871 
11872 	for (i = 0; i < wmi_ep_count; i++)
11873 		qwx_connect_pdev_htc_service(sc, i);
11874 
11875 	return 0;
11876 }
11877 
11878 void
11879 qwx_htc_reset_endpoint_states(struct qwx_htc *htc)
11880 {
11881 	struct qwx_htc_ep *ep;
11882 	int i;
11883 
11884 	for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
11885 		ep = &htc->endpoint[i];
11886 		ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
11887 		ep->max_ep_message_len = 0;
11888 		ep->max_tx_queue_depth = 0;
11889 		ep->eid = i;
11890 		ep->htc = htc;
11891 		ep->tx_credit_flow_enabled = 1;
11892 	}
11893 }
11894 
11895 void
11896 qwx_htc_control_tx_complete(struct qwx_softc *sc, struct mbuf *m)
11897 {
11898 	printf("%s: not implemented\n", __func__);
11899 
11900 	m_freem(m);
11901 }
11902 
11903 void
11904 qwx_htc_control_rx_complete(struct qwx_softc *sc, struct mbuf *m)
11905 {
11906 	printf("%s: not implemented\n", __func__);
11907 
11908 	m_freem(m);
11909 }
11910 
11911 uint8_t
11912 qwx_htc_get_credit_allocation(struct qwx_htc *htc, uint16_t service_id)
11913 {
11914 	uint8_t i, allocation = 0;
11915 
11916 	for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
11917 		if (htc->service_alloc_table[i].service_id == service_id) {
11918 			allocation =
11919 			    htc->service_alloc_table[i].credit_allocation;
11920 		}
11921 	}
11922 
11923 	return allocation;
11924 }
11925 
11926 const char *
11927 qwx_htc_service_name(enum ath11k_htc_svc_id id)
11928 {
11929 	switch (id) {
11930 	case ATH11K_HTC_SVC_ID_RESERVED:
11931 		return "Reserved";
11932 	case ATH11K_HTC_SVC_ID_RSVD_CTRL:
11933 		return "Control";
11934 	case ATH11K_HTC_SVC_ID_WMI_CONTROL:
11935 		return "WMI";
11936 	case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
11937 		return "DATA BE";
11938 	case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
11939 		return "DATA BK";
11940 	case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
11941 		return "DATA VI";
11942 	case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
11943 		return "DATA VO";
11944 	case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
11945 		return "WMI MAC1";
11946 	case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
11947 		return "WMI MAC2";
11948 	case ATH11K_HTC_SVC_ID_NMI_CONTROL:
11949 		return "NMI Control";
11950 	case ATH11K_HTC_SVC_ID_NMI_DATA:
11951 		return "NMI Data";
11952 	case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
11953 		return "HTT Data";
11954 	case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
11955 		return "RAW";
11956 	case ATH11K_HTC_SVC_ID_IPA_TX:
11957 		return "IPA TX";
11958 	case ATH11K_HTC_SVC_ID_PKT_LOG:
11959 		return "PKT LOG";
11960 	}
11961 
11962 	return "Unknown";
11963 }
11964 
11965 struct mbuf *
11966 qwx_htc_alloc_mbuf(size_t payload_size)
11967 {
11968 	struct mbuf *m;
11969 	size_t size = sizeof(struct ath11k_htc_hdr) + payload_size;
11970 
11971 	m = m_gethdr(M_DONTWAIT, MT_DATA);
11972 	if (m == NULL)
11973 		return NULL;
11974 
11975 	if (size <= MCLBYTES)
11976 		MCLGET(m, M_DONTWAIT);
11977 	else
11978 		MCLGETL(m, M_DONTWAIT, size);
11979 	if ((m->m_flags & M_EXT) == 0) {
11980 		m_freem(m);
11981 		return NULL;
11982 	}
11983 
11984 	m->m_len = m->m_pkthdr.len = size;
11985 	memset(mtod(m, void *), 0, size);
11986 
11987 	return m;
11988 }
11989 
11990 struct mbuf *
11991 qwx_htc_build_tx_ctrl_mbuf(void)
11992 {
11993 	size_t size;
11994 
11995 	size = ATH11K_HTC_CONTROL_BUFFER_SIZE - sizeof(struct ath11k_htc_hdr);
11996 
11997 	return qwx_htc_alloc_mbuf(size);
11998 }
11999 
12000 void
12001 qwx_htc_prepare_tx_mbuf(struct qwx_htc_ep *ep, struct mbuf *m)
12002 {
12003 	struct ath11k_htc_hdr *hdr;
12004 
12005 	hdr = mtod(m, struct ath11k_htc_hdr *);
12006 
12007 	memset(hdr, 0, sizeof(*hdr));
12008 	hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
12009 	    FIELD_PREP(HTC_HDR_PAYLOADLEN, (m->m_pkthdr.len - sizeof(*hdr)));
12010 
12011 	if (ep->tx_credit_flow_enabled)
12012 		hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS,
12013 		    ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
12014 #ifdef notyet
12015 	spin_lock_bh(&ep->htc->tx_lock);
12016 #endif
12017 	hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
12018 #ifdef notyet
12019 	spin_unlock_bh(&ep->htc->tx_lock);
12020 #endif
12021 }
12022 
12023 int
12024 qwx_htc_send(struct qwx_htc *htc, enum ath11k_htc_ep_id eid, struct mbuf *m)
12025 {
12026 	struct qwx_htc_ep *ep = &htc->endpoint[eid];
12027 	struct qwx_softc *sc = htc->sc;
12028 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ep->ul_pipe_id];
12029 	void *ctx;
12030 	struct qwx_tx_data *tx_data;
12031 	int credits = 0;
12032 	int ret;
12033 	int credit_flow_enabled = (sc->hw_params.credit_flow &&
12034 	    ep->tx_credit_flow_enabled);
12035 
12036 	if (eid >= ATH11K_HTC_EP_COUNT) {
12037 		printf("%s: Invalid endpoint id: %d\n", __func__, eid);
12038 		return ENOENT;
12039 	}
12040 
12041 	if (credit_flow_enabled) {
12042 		credits = howmany(m->m_pkthdr.len, htc->target_credit_size);
12043 #ifdef notyet
12044 		spin_lock_bh(&htc->tx_lock);
12045 #endif
12046 		if (ep->tx_credits < credits) {
12047 			DNPRINTF(QWX_D_HTC,
12048 			    "%s: ep %d insufficient credits required %d "
12049 			    "total %d\n", __func__, eid, credits,
12050 			    ep->tx_credits);
12051 #ifdef notyet
12052 			spin_unlock_bh(&htc->tx_lock);
12053 #endif
12054 			return EAGAIN;
12055 		}
12056 		ep->tx_credits -= credits;
12057 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits consumed %d total %d\n",
12058 		    __func__, eid, credits, ep->tx_credits);
12059 #ifdef notyet
12060 		spin_unlock_bh(&htc->tx_lock);
12061 #endif
12062 	}
12063 
12064 	qwx_htc_prepare_tx_mbuf(ep, m);
12065 
12066 	ctx = pipe->src_ring->per_transfer_context[pipe->src_ring->write_index];
12067 	tx_data = (struct qwx_tx_data *)ctx;
12068 
12069 	tx_data->eid = eid;
12070 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
12071 	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
12072 	if (ret) {
12073 		printf("%s: can't map mbuf (error %d)\n",
12074 		    sc->sc_dev.dv_xname, ret);
12075 		if (ret != ENOBUFS)
12076 			m_freem(m);
12077 		goto err_credits;
12078 	}
12079 
12080 	DNPRINTF(QWX_D_HTC, "%s: tx mbuf %p eid %d paddr %lx\n",
12081 	    __func__, m, tx_data->eid, tx_data->map->dm_segs[0].ds_addr);
12082 #ifdef QWX_DEBUG
12083 	{
12084 		int i;
12085 		uint8_t *p = mtod(m, uint8_t *);
12086 		DNPRINTF(QWX_D_HTC, "%s message buffer:", __func__);
12087 		for (i = 0; i < m->m_pkthdr.len; i++) {
12088 			DNPRINTF(QWX_D_HTC, "%s %.2x",
12089 			    i % 16 == 0 ? "\n" : "", p[i]);
12090 		}
12091 		if (i % 16)
12092 			DNPRINTF(QWX_D_HTC, "\n");
12093 	}
12094 #endif
12095 	ret = qwx_ce_send(htc->sc, m, ep->ul_pipe_id, ep->eid);
12096 	if (ret)
12097 		goto err_unmap;
12098 
12099 	return 0;
12100 
12101 err_unmap:
12102 	bus_dmamap_unload(sc->sc_dmat, tx_data->map);
12103 err_credits:
12104 	if (credit_flow_enabled) {
12105 #ifdef notyet
12106 		spin_lock_bh(&htc->tx_lock);
12107 #endif
12108 		ep->tx_credits += credits;
12109 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits reverted %d total %d\n",
12110 		    __func__, eid, credits, ep->tx_credits);
12111 #ifdef notyet
12112 		spin_unlock_bh(&htc->tx_lock);
12113 #endif
12114 
12115 		if (ep->ep_ops.ep_tx_credits)
12116 			ep->ep_ops.ep_tx_credits(htc->sc);
12117 	}
12118 	return ret;
12119 }
12120 
12121 int
12122 qwx_htc_connect_service(struct qwx_htc *htc,
12123     struct qwx_htc_svc_conn_req *conn_req,
12124     struct qwx_htc_svc_conn_resp *conn_resp)
12125 {
12126 	struct qwx_softc *sc = htc->sc;
12127 	struct ath11k_htc_conn_svc *req_msg;
12128 	struct ath11k_htc_conn_svc_resp resp_msg_dummy;
12129 	struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
12130 	enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
12131 	struct qwx_htc_ep *ep;
12132 	struct mbuf *m;
12133 	unsigned int max_msg_size = 0;
12134 	int length, status = 0;
12135 	int disable_credit_flow_ctrl = 0;
12136 	uint16_t flags = 0;
12137 	uint16_t message_id, service_id;
12138 	uint8_t tx_alloc = 0;
12139 
12140 	/* special case for HTC pseudo control service */
12141 	if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
12142 		disable_credit_flow_ctrl = 1;
12143 		assigned_eid = ATH11K_HTC_EP_0;
12144 		max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
12145 		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
12146 		goto setup;
12147 	}
12148 
12149 	tx_alloc = qwx_htc_get_credit_allocation(htc, conn_req->service_id);
12150 	if (!tx_alloc)
12151 		DNPRINTF(QWX_D_HTC,
12152 		    "%s: htc service %s does not allocate target credits\n",
12153 		    sc->sc_dev.dv_xname,
12154 		    qwx_htc_service_name(conn_req->service_id));
12155 
12156 	m = qwx_htc_build_tx_ctrl_mbuf();
12157 	if (!m) {
12158 		printf("%s: Failed to allocate HTC packet\n",
12159 		    sc->sc_dev.dv_xname);
12160 		return ENOMEM;
12161 	}
12162 
12163 	length = sizeof(*req_msg);
12164 	m->m_len = m->m_pkthdr.len = sizeof(struct ath11k_htc_hdr) + length;
12165 
12166 	req_msg = (struct ath11k_htc_conn_svc *)(mtod(m, uint8_t *) +
12167 	    sizeof(struct ath11k_htc_hdr));
12168 	memset(req_msg, 0, length);
12169 	req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
12170 	    ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
12171 
12172 	flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
12173 
12174 	/* Only enable credit flow control for WMI ctrl service */
12175 	if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
12176 	      conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
12177 	      conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
12178 		flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
12179 		disable_credit_flow_ctrl = 1;
12180 	}
12181 
12182 	if (!sc->hw_params.credit_flow) {
12183 		flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
12184 		disable_credit_flow_ctrl = 1;
12185 	}
12186 
12187 	req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
12188 	req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
12189 	    conn_req->service_id);
12190 
12191 	sc->ctl_resp = 0;
12192 
12193 	status = qwx_htc_send(htc, ATH11K_HTC_EP_0, m);
12194 	if (status) {
12195 		if (status != ENOBUFS)
12196 			m_freem(m);
12197 		return status;
12198 	}
12199 
12200 	while (!sc->ctl_resp) {
12201 		int ret = tsleep_nsec(&sc->ctl_resp, 0, "qwxhtcinit",
12202 		    SEC_TO_NSEC(1));
12203 		if (ret) {
12204 			printf("%s: Service connect timeout\n",
12205 			    sc->sc_dev.dv_xname);
12206 			return ret;
12207 		}
12208 	}
12209 
12210 	/* we controlled the buffer creation, it's aligned */
12211 	resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
12212 	message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
12213 	service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
12214 			       resp_msg->msg_svc_id);
12215 	if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
12216 	    (htc->control_resp_len < sizeof(*resp_msg))) {
12217 		printf("%s: Invalid resp message ID 0x%x", __func__,
12218 		    message_id);
12219 		return EPROTO;
12220 	}
12221 
12222 	DNPRINTF(QWX_D_HTC, "%s: service %s connect response status 0x%lx "
12223 	    "assigned ep 0x%lx\n", __func__, qwx_htc_service_name(service_id),
12224 	    FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
12225 	    FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
12226 
12227 	conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
12228 	    resp_msg->flags_len);
12229 
12230 	/* check response status */
12231 	if (conn_resp->connect_resp_code !=
12232 	    ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
12233 		printf("%s: HTC Service %s connect request failed: 0x%x)\n",
12234 		    __func__, qwx_htc_service_name(service_id),
12235 		    conn_resp->connect_resp_code);
12236 		return EPROTO;
12237 	}
12238 
12239 	assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
12240 	    HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len);
12241 
12242 	max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
12243 	    resp_msg->flags_len);
12244 setup:
12245 	if (assigned_eid >= ATH11K_HTC_EP_COUNT)
12246 		return EPROTO;
12247 
12248 	if (max_msg_size == 0)
12249 		return EPROTO;
12250 
12251 	ep = &htc->endpoint[assigned_eid];
12252 	ep->eid = assigned_eid;
12253 
12254 	if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
12255 		return EPROTO;
12256 
12257 	/* return assigned endpoint to caller */
12258 	conn_resp->eid = assigned_eid;
12259 	conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
12260 	    resp_msg->flags_len);
12261 
12262 	/* setup the endpoint */
12263 	ep->service_id = conn_req->service_id;
12264 	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
12265 	ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
12266 	    resp_msg->flags_len);
12267 	ep->tx_credits = tx_alloc;
12268 
12269 	/* copy all the callbacks */
12270 	ep->ep_ops = conn_req->ep_ops;
12271 
12272 	status = sc->ops.map_service_to_pipe(htc->sc, ep->service_id,
12273 	    &ep->ul_pipe_id, &ep->dl_pipe_id);
12274 	if (status)
12275 		return status;
12276 
12277 	DNPRINTF(QWX_D_HTC,
12278 	    "%s: htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
12279 	    __func__, qwx_htc_service_name(ep->service_id), ep->ul_pipe_id,
12280 	    ep->dl_pipe_id, ep->eid);
12281 
12282 	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
12283 		ep->tx_credit_flow_enabled = 0;
12284 		DNPRINTF(QWX_D_HTC,
12285 		    "%s: htc service '%s' eid %d tx flow control disabled\n",
12286 		    __func__, qwx_htc_service_name(ep->service_id),
12287 		    assigned_eid);
12288 	}
12289 
12290 	return status;
12291 }
12292 
12293 int
12294 qwx_htc_start(struct qwx_htc *htc)
12295 {
12296 	struct mbuf *m;
12297 	int status = 0;
12298 	struct qwx_softc *sc = htc->sc;
12299 	struct ath11k_htc_setup_complete_extended *msg;
12300 
12301 	m = qwx_htc_build_tx_ctrl_mbuf();
12302 	if (!m)
12303 		return ENOMEM;
12304 
12305 	m->m_len = m->m_pkthdr.len = sizeof(struct ath11k_htc_hdr) +
12306 	    sizeof(*msg);
12307 
12308 	msg = (struct ath11k_htc_setup_complete_extended *)(mtod(m, uint8_t *) +
12309 	    sizeof(struct ath11k_htc_hdr));
12310 	msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
12311 	    ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
12312 
12313 	if (sc->hw_params.credit_flow)
12314 		DNPRINTF(QWX_D_HTC, "%s: using tx credit flow control\n",
12315 		    __func__);
12316 	else
12317 		msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;
12318 
12319 	status = qwx_htc_send(htc, ATH11K_HTC_EP_0, m);
12320 	if (status) {
12321 		m_freem(m);
12322 		return status;
12323 	}
12324 
12325 	return 0;
12326 }
12327 
12328 int
12329 qwx_htc_init(struct qwx_softc *sc)
12330 {
12331 	struct qwx_htc *htc = &sc->htc;
12332 	struct qwx_htc_svc_conn_req conn_req;
12333 	struct qwx_htc_svc_conn_resp conn_resp;
12334 	int ret;
12335 #ifdef notyet
12336 	spin_lock_init(&htc->tx_lock);
12337 #endif
12338 	qwx_htc_reset_endpoint_states(htc);
12339 
12340 	htc->sc = sc;
12341 
12342 	switch (sc->wmi.preferred_hw_mode) {
12343 	case WMI_HOST_HW_MODE_SINGLE:
12344 		htc->wmi_ep_count = 1;
12345 		break;
12346 	case WMI_HOST_HW_MODE_DBS:
12347 	case WMI_HOST_HW_MODE_DBS_OR_SBS:
12348 		htc->wmi_ep_count = 2;
12349 		break;
12350 	case WMI_HOST_HW_MODE_DBS_SBS:
12351 		htc->wmi_ep_count = 3;
12352 		break;
12353 	default:
12354 		htc->wmi_ep_count = sc->hw_params.max_radios;
12355 		break;
12356 	}
12357 
12358 	/* setup our pseudo HTC control endpoint connection */
12359 	memset(&conn_req, 0, sizeof(conn_req));
12360 	memset(&conn_resp, 0, sizeof(conn_resp));
12361 	conn_req.ep_ops.ep_tx_complete = qwx_htc_control_tx_complete;
12362 	conn_req.ep_ops.ep_rx_complete = qwx_htc_control_rx_complete;
12363 	conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
12364 	conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
12365 
12366 	/* connect fake service */
12367 	ret = qwx_htc_connect_service(htc, &conn_req, &conn_resp);
12368 	if (ret) {
12369 		printf("%s: could not connect to htc service (%d)\n",
12370 		    sc->sc_dev.dv_xname, ret);
12371 		return ret;
12372 	}
12373 
12374 	return 0;
12375 }
12376 
12377 int
12378 qwx_htc_setup_target_buffer_assignments(struct qwx_htc *htc)
12379 {
12380 	struct qwx_htc_svc_tx_credits *serv_entry;
12381 	uint32_t svc_id[] = {
12382 		ATH11K_HTC_SVC_ID_WMI_CONTROL,
12383 		ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
12384 		ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
12385 	};
12386 	int i, credits;
12387 
12388 	credits =  htc->total_transmit_credits;
12389 	serv_entry = htc->service_alloc_table;
12390 
12391 	if ((htc->wmi_ep_count == 0) ||
12392 	    (htc->wmi_ep_count > nitems(svc_id)))
12393 		return EINVAL;
12394 
12395 	/* Divide credits among number of endpoints for WMI */
12396 	credits = credits / htc->wmi_ep_count;
12397 	for (i = 0; i < htc->wmi_ep_count; i++) {
12398 		serv_entry[i].service_id = svc_id[i];
12399 		serv_entry[i].credit_allocation = credits;
12400 	}
12401 
12402 	return 0;
12403 }
12404 
12405 int
12406 qwx_htc_wait_target(struct qwx_softc *sc)
12407 {
12408 	struct qwx_htc *htc = &sc->htc;
12409 	int polling = 0, ret;
12410 	uint16_t i;
12411 	struct ath11k_htc_ready *ready;
12412 	uint16_t message_id;
12413 	uint16_t credit_count;
12414 	uint16_t credit_size;
12415 
12416 	sc->ctl_resp = 0;
12417 	while (!sc->ctl_resp) {
12418 		ret = tsleep_nsec(&sc->ctl_resp, 0, "qwxhtcinit",
12419 		    SEC_TO_NSEC(1));
12420 		if (ret) {
12421 			if (ret != EWOULDBLOCK)
12422 				return ret;
12423 
12424 			if (polling) {
12425 				printf("%s: failed to receive control response "
12426 				    "completion\n", sc->sc_dev.dv_xname);
12427 				return ret;
12428 			}
12429 
12430 			printf("%s: failed to receive control response "
12431 			    "completion, polling...\n", sc->sc_dev.dv_xname);
12432 			polling = 1;
12433 
12434 			for (i = 0; i < sc->hw_params.ce_count; i++)
12435 				qwx_ce_per_engine_service(sc, i);
12436 		}
12437 	}
12438 
12439 	if (htc->control_resp_len < sizeof(*ready)) {
12440 		printf("%s: Invalid HTC ready msg len:%d\n", __func__,
12441 		    htc->control_resp_len);
12442 		return EINVAL;
12443 	}
12444 
12445 	ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
12446 	message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
12447 	credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
12448 	    ready->id_credit_count);
12449 	credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
12450 
12451 	if (message_id != ATH11K_HTC_MSG_READY_ID) {
12452 		printf("%s: Invalid HTC ready msg: 0x%x\n", __func__,
12453 		    message_id);
12454 		return EINVAL;
12455 	}
12456 
12457 	htc->total_transmit_credits = credit_count;
12458 	htc->target_credit_size = credit_size;
12459 
12460 	DNPRINTF(QWX_D_HTC, "%s: target ready total_transmit_credits %d "
12461 	    "target_credit_size %d\n", __func__,
12462 	    htc->total_transmit_credits, htc->target_credit_size);
12463 
12464 	if ((htc->total_transmit_credits == 0) ||
12465 	    (htc->target_credit_size == 0)) {
12466 		printf("%s: Invalid credit size received\n", __func__);
12467 		return EINVAL;
12468 	}
12469 
12470 	/* For QCA6390, wmi endpoint uses 1 credit to avoid
12471 	 * back-to-back write.
12472 	 */
12473 	if (sc->hw_params.supports_shadow_regs)
12474 		htc->total_transmit_credits = 1;
12475 
12476 	qwx_htc_setup_target_buffer_assignments(htc);
12477 
12478 	return 0;
12479 }
12480 
12481 void
12482 qwx_dp_htt_htc_tx_complete(struct qwx_softc *sc, struct mbuf *m)
12483 {
12484 	/* Just free the mbuf, no further action required. */
12485 	m_freem(m);
12486 }
12487 
12488 static inline void
12489 qwx_dp_get_mac_addr(uint32_t addr_l32, uint16_t addr_h16, uint8_t *addr)
12490 {
12491 #if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
12492 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
12493 		addr_l32 = swab32(addr_l32);
12494 		addr_h16 = swab16(addr_h16);
12495 	}
12496 #endif
12497 	uint32_t val32;
12498 	uint16_t val16;
12499 
12500 	val32 = le32toh(addr_l32);
12501 	memcpy(addr, &val32, 4);
12502 	val16 = le16toh(addr_h16);
12503 	memcpy(addr + 4, &val16, IEEE80211_ADDR_LEN - 4);
12504 }
12505 
12506 void
12507 qwx_peer_map_event(struct qwx_softc *sc, uint8_t vdev_id, uint16_t peer_id,
12508     uint8_t *mac_addr, uint16_t ast_hash, uint16_t hw_peer_id)
12509 {
12510 	struct ieee80211com *ic = &sc->sc_ic;
12511 	struct ieee80211_node *ni;
12512 	struct qwx_node *nq;
12513 	struct ath11k_peer *peer;
12514 #ifdef notyet
12515 	spin_lock_bh(&ab->base_lock);
12516 #endif
12517 	ni = ieee80211_find_node(ic, mac_addr);
12518 	if (ni == NULL)
12519 		return;
12520 	nq = (struct qwx_node *)ni;
12521 	peer = &nq->peer;
12522 
12523 	peer->vdev_id = vdev_id;
12524 	peer->peer_id = peer_id;
12525 	peer->ast_hash = ast_hash;
12526 	peer->hw_peer_id = hw_peer_id;
12527 #if 0
12528 	ether_addr_copy(peer->addr, mac_addr);
12529 	list_add(&peer->list, &ab->peers);
12530 #endif
12531 	sc->peer_mapped = 1;
12532 	wakeup(&sc->peer_mapped);
12533 
12534 	DNPRINTF(QWX_D_HTT, "%s: peer map vdev %d peer %s id %d\n",
12535 	    __func__, vdev_id, ether_sprintf(mac_addr), peer_id);
12536 #ifdef notyet
12537 	spin_unlock_bh(&ab->base_lock);
12538 #endif
12539 }
12540 
12541 
12542 void
12543 qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc *sc, struct mbuf *m)
12544 {
12545 	struct qwx_dp *dp = &sc->dp;
12546 	struct htt_resp_msg *resp = mtod(m, struct htt_resp_msg *);
12547 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE,
12548 	    *(uint32_t *)resp);
12549 	uint16_t peer_id;
12550 	uint8_t vdev_id;
12551 	uint8_t mac_addr[IEEE80211_ADDR_LEN];
12552 	uint16_t peer_mac_h16;
12553 	uint16_t ast_hash;
12554 	uint16_t hw_peer_id;
12555 
12556 	DPRINTF("%s: dp_htt rx msg type: 0x%0x\n", __func__, type);
12557 
12558 	switch (type) {
12559 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
12560 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
12561 		    resp->version_msg.version);
12562 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
12563 		    resp->version_msg.version);
12564 		dp->htt_tgt_version_received = 1;
12565 		wakeup(&dp->htt_tgt_version_received);
12566 		break;
12567 	case HTT_T2H_MSG_TYPE_PEER_MAP:
12568 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
12569 		    resp->peer_map_ev.info);
12570 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
12571 		    resp->peer_map_ev.info);
12572 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
12573 		    resp->peer_map_ev.info1);
12574 		qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
12575 		    peer_mac_h16, mac_addr);
12576 		qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, 0, 0);
12577 		break;
12578 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
12579 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
12580 		    resp->peer_map_ev.info);
12581 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
12582 		    resp->peer_map_ev.info);
12583 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
12584 		    resp->peer_map_ev.info1);
12585 		qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
12586 		    peer_mac_h16, mac_addr);
12587 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
12588 		    resp->peer_map_ev.info2);
12589 		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
12590 				       resp->peer_map_ev.info1);
12591 		qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, ast_hash,
12592 		    hw_peer_id);
12593 		break;
12594 #if 0
12595 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
12596 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
12597 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
12598 				    resp->peer_unmap_ev.info);
12599 		ath11k_peer_unmap_event(ab, peer_id);
12600 		break;
12601 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
12602 		ath11k_htt_pull_ppdu_stats(ab, skb);
12603 		break;
12604 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
12605 		ath11k_debugfs_htt_ext_stats_handler(ab, skb);
12606 		break;
12607 	case HTT_T2H_MSG_TYPE_PKTLOG:
12608 		ath11k_htt_pktlog(ab, skb);
12609 		break;
12610 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
12611 		ath11k_htt_backpressure_event_handler(ab, skb);
12612 		break;
12613 #endif
12614 	default:
12615 		printf("%s: htt event %d not handled\n", __func__, type);
12616 		break;
12617 	}
12618 
12619 	m_freem(m);
12620 }
12621 
12622 int
12623 qwx_dp_htt_connect(struct qwx_dp *dp)
12624 {
12625 	struct qwx_htc_svc_conn_req conn_req;
12626 	struct qwx_htc_svc_conn_resp conn_resp;
12627 	int status;
12628 
12629 	memset(&conn_req, 0, sizeof(conn_req));
12630 	memset(&conn_resp, 0, sizeof(conn_resp));
12631 
12632 	conn_req.ep_ops.ep_tx_complete = qwx_dp_htt_htc_tx_complete;
12633 	conn_req.ep_ops.ep_rx_complete = qwx_dp_htt_htc_t2h_msg_handler;
12634 
12635 	/* connect to control service */
12636 	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
12637 
12638 	status = qwx_htc_connect_service(&dp->sc->htc, &conn_req, &conn_resp);
12639 
12640 	if (status)
12641 		return status;
12642 
12643 	dp->eid = conn_resp.eid;
12644 
12645 	return 0;
12646 }
12647 
12648 void
12649 qwx_dp_pdev_reo_cleanup(struct qwx_softc *sc)
12650 {
12651 	struct qwx_dp *dp = &sc->dp;
12652 	int i;
12653 
12654 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
12655 		qwx_dp_srng_cleanup(sc, &dp->reo_dst_ring[i]);
12656 }
12657 
12658 int
12659 qwx_dp_pdev_reo_setup(struct qwx_softc *sc)
12660 {
12661 	struct qwx_dp *dp = &sc->dp;
12662 	int ret;
12663 	int i;
12664 
12665 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
12666 		ret = qwx_dp_srng_setup(sc, &dp->reo_dst_ring[i],
12667 		    HAL_REO_DST, i, 0, DP_REO_DST_RING_SIZE);
12668 		if (ret) {
12669 			printf("%s: failed to setup reo_dst_ring\n", __func__);
12670 			qwx_dp_pdev_reo_cleanup(sc);
12671 			return ret;
12672 		}
12673 	}
12674 
12675 	return 0;
12676 }
12677 
12678 void
12679 qwx_dp_rx_pdev_srng_free(struct qwx_softc *sc, int mac_id)
12680 {
12681 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
12682 	int i;
12683 
12684 	qwx_dp_srng_cleanup(sc, &dp->rx_refill_buf_ring.refill_buf_ring);
12685 
12686 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
12687 		if (sc->hw_params.rx_mac_buf_ring)
12688 			qwx_dp_srng_cleanup(sc, &dp->rx_mac_buf_ring[i]);
12689 
12690 		qwx_dp_srng_cleanup(sc, &dp->rxdma_err_dst_ring[i]);
12691 		qwx_dp_srng_cleanup(sc,
12692 		    &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
12693 	}
12694 
12695 	qwx_dp_srng_cleanup(sc, &dp->rxdma_mon_buf_ring.refill_buf_ring);
12696 }
12697 
12698 int
12699 qwx_dp_rx_pdev_srng_alloc(struct qwx_softc *sc)
12700 {
12701 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
12702 #if 0
12703 	struct dp_srng *srng = NULL;
12704 #endif
12705 	int i;
12706 	int ret;
12707 
12708 	ret = qwx_dp_srng_setup(sc, &dp->rx_refill_buf_ring.refill_buf_ring,
12709 	    HAL_RXDMA_BUF, 0, dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
12710 	if (ret) {
12711 		printf("%s: failed to setup rx_refill_buf_ring\n",
12712 		    sc->sc_dev.dv_xname);
12713 		return ret;
12714 	}
12715 
12716 	if (sc->hw_params.rx_mac_buf_ring) {
12717 		for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
12718 			ret = qwx_dp_srng_setup(sc, &dp->rx_mac_buf_ring[i],
12719 			    HAL_RXDMA_BUF, 1, dp->mac_id + i, 1024);
12720 			if (ret) {
12721 				printf("%s: failed to setup "
12722 				    "rx_mac_buf_ring %d\n",
12723 				    sc->sc_dev.dv_xname, i);
12724 				return ret;
12725 			}
12726 		}
12727 	}
12728 
12729 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
12730 		ret = qwx_dp_srng_setup(sc, &dp->rxdma_err_dst_ring[i],
12731 		    HAL_RXDMA_DST, 0, dp->mac_id + i,
12732 		    DP_RXDMA_ERR_DST_RING_SIZE);
12733 		if (ret) {
12734 			printf("%s: failed to setup rxdma_err_dst_ring %d\n",
12735 			   sc->sc_dev.dv_xname, i);
12736 			return ret;
12737 		}
12738 	}
12739 #if 0
12740 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
12741 		srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
12742 		ret = qwx_dp_srng_setup(sc, srng, HAL_RXDMA_MONITOR_STATUS, 0,
12743 		    dp->mac_id + i, DP_RXDMA_MON_STATUS_RING_SIZE);
12744 		if (ret) {
12745 			printf("%s: failed to setup "
12746 			    "rx_mon_status_refill_ring %d\n",
12747 			    sc->sc_dev.dv_xname, i);
12748 			return ret;
12749 		}
12750 	}
12751 #endif
12752 	/* if rxdma1_enable is false, then it doesn't need
12753 	 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
12754 	 * and rxdma_mon_desc_ring.
12755 	 * init reap timer for QCA6390.
12756 	 */
12757 	if (!sc->hw_params.rxdma1_enable) {
12758 #if 0
12759 		//init mon status buffer reap timer
12760 		timer_setup(&ar->ab->mon_reap_timer,
12761 			    ath11k_dp_service_mon_ring, 0);
12762 #endif
12763 		return 0;
12764 	}
12765 #if 0
12766 	ret = ath11k_dp_srng_setup(ar->ab,
12767 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
12768 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
12769 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
12770 	if (ret) {
12771 		ath11k_warn(ar->ab,
12772 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
12773 		return ret;
12774 	}
12775 
12776 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
12777 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
12778 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
12779 	if (ret) {
12780 		ath11k_warn(ar->ab,
12781 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
12782 		return ret;
12783 	}
12784 
12785 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
12786 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
12787 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
12788 	if (ret) {
12789 		ath11k_warn(ar->ab,
12790 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
12791 		return ret;
12792 	}
12793 #endif
12794 	return 0;
12795 }
12796 
12797 void
12798 qwx_dp_rxdma_buf_ring_free(struct qwx_softc *sc, struct dp_rxdma_ring *rx_ring)
12799 {
12800 	int i;
12801 
12802 	for (i = 0; i < rx_ring->bufs_max; i++) {
12803 		struct qwx_rx_data *rx_data = &rx_ring->rx_data[i];
12804 
12805 		if (rx_data->map == NULL)
12806 			continue;
12807 
12808 		if (rx_data->m) {
12809 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
12810 			m_free(rx_data->m);
12811 			rx_data->m = NULL;
12812 		}
12813 
12814 		bus_dmamap_destroy(sc->sc_dmat, rx_data->map);
12815 		rx_data->map = NULL;
12816 	}
12817 
12818 	free(rx_ring->rx_data, M_DEVBUF,
12819 	    sizeof(rx_ring->rx_data[0]) * rx_ring->bufs_max);
12820 	rx_ring->rx_data = NULL;
12821 	rx_ring->bufs_max = 0;
12822 	rx_ring->cur = 0;
12823 }
12824 
12825 void
12826 qwx_dp_rxdma_pdev_buf_free(struct qwx_softc *sc, int mac_id)
12827 {
12828 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
12829 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
12830 	int i;
12831 
12832 	qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
12833 
12834 	rx_ring = &dp->rxdma_mon_buf_ring;
12835 	qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
12836 
12837 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
12838 		rx_ring = &dp->rx_mon_status_refill_ring[i];
12839 		qwx_dp_rxdma_buf_ring_free(sc, rx_ring);
12840 	}
12841 }
12842 
12843 void
12844 qwx_hal_rx_buf_addr_info_set(void *desc, uint64_t paddr, uint32_t cookie,
12845     uint8_t manager)
12846 {
12847 	struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
12848 	uint32_t paddr_lo, paddr_hi;
12849 
12850 	paddr_lo = paddr & 0xffffffff;
12851 	paddr_hi = paddr >> 32;
12852 	binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
12853 	binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
12854 	    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
12855 	    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
12856 }
12857 
12858 void
12859 qwx_hal_rx_buf_addr_info_get(void *desc, uint64_t *paddr, uint32_t *cookie,
12860     uint8_t *rbm)
12861 {
12862 	struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
12863 
12864 	*paddr = (((uint64_t)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
12865 	    binfo->info1)) << 32) |
12866 	    FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
12867 	*cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
12868 	*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
12869 }
12870 
12871 /* Returns number of Rx buffers replenished */
12872 int
12873 qwx_dp_rxbufs_replenish(struct qwx_softc *sc, int mac_id,
12874     struct dp_rxdma_ring *rx_ring, int req_entries,
12875     enum hal_rx_buf_return_buf_manager mgr)
12876 {
12877 	struct hal_srng *srng;
12878 	uint32_t *desc;
12879 	struct mbuf *m;
12880 	int num_free;
12881 	int num_remain;
12882 	int ret;
12883 	uint32_t cookie;
12884 	uint64_t paddr;
12885 	struct qwx_rx_data *rx_data;
12886 
12887 	req_entries = MIN(req_entries, rx_ring->bufs_max);
12888 
12889 	srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
12890 #ifdef notyet
12891 	spin_lock_bh(&srng->lock);
12892 #endif
12893 	qwx_hal_srng_access_begin(sc, srng);
12894 
12895 	num_free = qwx_hal_srng_src_num_free(sc, srng, 1);
12896 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
12897 		req_entries = num_free;
12898 
12899 	req_entries = MIN(num_free, req_entries);
12900 	num_remain = req_entries;
12901 
12902 	while (num_remain > 0) {
12903 		const size_t size = DP_RX_BUFFER_SIZE;
12904 
12905 		m = m_gethdr(M_DONTWAIT, MT_DATA);
12906 		if (m == NULL)
12907 			goto fail_free_mbuf;
12908 
12909 		if (size <= MCLBYTES)
12910 			MCLGET(m, M_DONTWAIT);
12911 		else
12912 			MCLGETL(m, M_DONTWAIT, size);
12913 		if ((m->m_flags & M_EXT) == 0)
12914 			goto fail_free_mbuf;
12915 
12916 		m->m_len = m->m_pkthdr.len = size;
12917 		rx_data = &rx_ring->rx_data[rx_ring->cur];
12918 		if (rx_data->m != NULL)
12919 			goto fail_free_mbuf;
12920 
12921 		if (rx_data->map == NULL) {
12922 			ret = bus_dmamap_create(sc->sc_dmat, size, 1,
12923 			    size, 0, BUS_DMA_NOWAIT, &rx_data->map);
12924 			if (ret)
12925 				goto fail_free_mbuf;
12926 		}
12927 
12928 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
12929 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
12930 		if (ret) {
12931 			printf("%s: can't map mbuf (error %d)\n",
12932 			    sc->sc_dev.dv_xname, ret);
12933 			goto fail_free_mbuf;
12934 		}
12935 
12936 		desc = qwx_hal_srng_src_get_next_entry(sc, srng);
12937 		if (!desc)
12938 			goto fail_dma_unmap;
12939 
12940 		rx_data->m = m;
12941 
12942 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
12943 		    FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, rx_ring->cur);
12944 
12945 		rx_ring->cur = (rx_ring->cur + 1) % rx_ring->bufs_max;
12946 		num_remain--;
12947 
12948 		paddr = rx_data->map->dm_segs[0].ds_addr;
12949 		qwx_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
12950 	}
12951 
12952 	qwx_hal_srng_access_end(sc, srng);
12953 #ifdef notyet
12954 	spin_unlock_bh(&srng->lock);
12955 #endif
12956 	return 0;
12957 
12958 fail_dma_unmap:
12959 	bus_dmamap_unload(sc->sc_dmat, rx_data->map);
12960 fail_free_mbuf:
12961 	m_free(m);
12962 
12963 	qwx_hal_srng_access_end(sc, srng);
12964 #ifdef notyet
12965 	spin_unlock_bh(&srng->lock);
12966 #endif
12967 	return ENOBUFS;
12968 }
12969 
12970 int
12971 qwx_dp_rxdma_ring_buf_setup(struct qwx_softc *sc,
12972     struct dp_rxdma_ring *rx_ring, uint32_t ringtype)
12973 {
12974 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
12975 	int num_entries;
12976 
12977 	num_entries = rx_ring->refill_buf_ring.size /
12978 	    qwx_hal_srng_get_entrysize(sc, ringtype);
12979 
12980 	KASSERT(rx_ring->rx_data == NULL);
12981 	rx_ring->rx_data = mallocarray(num_entries, sizeof(rx_ring->rx_data[0]),
12982 	    M_DEVBUF, M_NOWAIT | M_ZERO);
12983 	if (rx_ring->rx_data == NULL)
12984 		return ENOMEM;
12985 
12986 	rx_ring->bufs_max = num_entries;
12987 	rx_ring->cur = 0;
12988 
12989 	return qwx_dp_rxbufs_replenish(sc, dp->mac_id, rx_ring, num_entries,
12990 	    sc->hw_params.hal_params->rx_buf_rbm);
12991 }
12992 
12993 int
12994 qwx_dp_rxdma_pdev_buf_setup(struct qwx_softc *sc)
12995 {
12996 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
12997 	struct dp_rxdma_ring *rx_ring;
12998 	int ret;
12999 #if 0
13000 	int i;
13001 #endif
13002 
13003 	rx_ring = &dp->rx_refill_buf_ring;
13004 	ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring, HAL_RXDMA_BUF);
13005 	if (ret)
13006 		return ret;
13007 
13008 	if (sc->hw_params.rxdma1_enable) {
13009 		rx_ring = &dp->rxdma_mon_buf_ring;
13010 		ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring,
13011 		    HAL_RXDMA_MONITOR_BUF);
13012 		if (ret)
13013 			return ret;
13014 	}
13015 #if 0
13016 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
13017 		rx_ring = &dp->rx_mon_status_refill_ring[i];
13018 		ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring,
13019 		    HAL_RXDMA_MONITOR_STATUS);
13020 		if (ret)
13021 			return ret;
13022 	}
13023 #endif
13024 	return 0;
13025 }
13026 
13027 void
13028 qwx_dp_rx_pdev_free(struct qwx_softc *sc, int mac_id)
13029 {
13030 	qwx_dp_rx_pdev_srng_free(sc, mac_id);
13031 	qwx_dp_rxdma_pdev_buf_free(sc, mac_id);
13032 }
13033 
13034 bus_addr_t
13035 qwx_hal_srng_get_hp_addr(struct qwx_softc *sc, struct hal_srng *srng)
13036 {
13037 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
13038 		return 0;
13039 
13040 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
13041 		return sc->hal.wrp.paddr +
13042 		    ((unsigned long)srng->u.src_ring.hp_addr -
13043 		    (unsigned long)sc->hal.wrp.vaddr);
13044 	} else {
13045 		return sc->hal.rdp.paddr +
13046 		    ((unsigned long)srng->u.dst_ring.hp_addr -
13047 		    (unsigned long)sc->hal.rdp.vaddr);
13048 	}
13049 }
13050 
13051 bus_addr_t
13052 qwx_hal_srng_get_tp_addr(struct qwx_softc *sc, struct hal_srng *srng)
13053 {
13054 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
13055 		return 0;
13056 
13057 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
13058 		return sc->hal.rdp.paddr +
13059 		    ((unsigned long)srng->u.src_ring.tp_addr -
13060 		    (unsigned long)sc->hal.rdp.vaddr);
13061 	} else {
13062 		return sc->hal.wrp.paddr +
13063 		    ((unsigned long)srng->u.dst_ring.tp_addr -
13064 		    (unsigned long)sc->hal.wrp.vaddr);
13065 	}
13066 }
13067 
13068 int
13069 qwx_dp_tx_get_ring_id_type(struct qwx_softc *sc, int mac_id, uint32_t ring_id,
13070     enum hal_ring_type ring_type, enum htt_srng_ring_type *htt_ring_type,
13071     enum htt_srng_ring_id *htt_ring_id)
13072 {
13073 	int lmac_ring_id_offset = 0;
13074 
13075 	switch (ring_type) {
13076 	case HAL_RXDMA_BUF:
13077 		lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
13078 
13079 		/* for QCA6390, host fills rx buffer to fw and fw fills to
13080 		 * rxbuf ring for each rxdma
13081 		 */
13082 		if (!sc->hw_params.rx_mac_buf_ring) {
13083 			if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
13084 			    lmac_ring_id_offset) ||
13085 			    ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
13086 			    lmac_ring_id_offset)))
13087 				return EINVAL;
13088 			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
13089 			*htt_ring_type = HTT_SW_TO_HW_RING;
13090 		} else {
13091 			if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
13092 				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
13093 				*htt_ring_type = HTT_SW_TO_SW_RING;
13094 			} else {
13095 				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
13096 				*htt_ring_type = HTT_SW_TO_HW_RING;
13097 			}
13098 		}
13099 		break;
13100 	case HAL_RXDMA_DST:
13101 		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
13102 		*htt_ring_type = HTT_HW_TO_SW_RING;
13103 		break;
13104 	case HAL_RXDMA_MONITOR_BUF:
13105 		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
13106 		*htt_ring_type = HTT_SW_TO_HW_RING;
13107 		break;
13108 	case HAL_RXDMA_MONITOR_STATUS:
13109 		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
13110 		*htt_ring_type = HTT_SW_TO_HW_RING;
13111 		break;
13112 	case HAL_RXDMA_MONITOR_DST:
13113 		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
13114 		*htt_ring_type = HTT_HW_TO_SW_RING;
13115 		break;
13116 	case HAL_RXDMA_MONITOR_DESC:
13117 		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
13118 		*htt_ring_type = HTT_SW_TO_HW_RING;
13119 		break;
13120 	default:
13121 		printf("%s: Unsupported ring type in DP :%d\n",
13122 		    sc->sc_dev.dv_xname, ring_type);
13123 		return EINVAL;
13124 	}
13125 
13126 	return 0;
13127 }
13128 
13129 int
13130 qwx_dp_tx_htt_srng_setup(struct qwx_softc *sc, uint32_t ring_id, int mac_id,
13131     enum hal_ring_type ring_type)
13132 {
13133 	struct htt_srng_setup_cmd *cmd;
13134 	struct hal_srng *srng = &sc->hal.srng_list[ring_id];
13135 	struct hal_srng_params params;
13136 	struct mbuf *m;
13137 	uint32_t ring_entry_sz;
13138 	uint64_t hp_addr, tp_addr;
13139 	enum htt_srng_ring_type htt_ring_type;
13140 	enum htt_srng_ring_id htt_ring_id;
13141 	int ret;
13142 
13143 	m = qwx_htc_alloc_mbuf(sizeof(*cmd));
13144 	if (!m)
13145 		return ENOMEM;
13146 
13147 	memset(&params, 0, sizeof(params));
13148 	qwx_hal_srng_get_params(sc, srng, &params);
13149 
13150 	hp_addr = qwx_hal_srng_get_hp_addr(sc, srng);
13151 	tp_addr = qwx_hal_srng_get_tp_addr(sc, srng);
13152 
13153 	ret = qwx_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
13154 	    ring_type, &htt_ring_type, &htt_ring_id);
13155 	if (ret)
13156 		goto err_free;
13157 
13158 	cmd = (struct htt_srng_setup_cmd *)(mtod(m, uint8_t *) +
13159 	    sizeof(struct ath11k_htc_hdr));
13160 	cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
13161 	    HTT_H2T_MSG_TYPE_SRING_SETUP);
13162 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
13163 	    htt_ring_type == HTT_HW_TO_SW_RING)
13164 		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
13165 		    DP_SW2HW_MACID(mac_id));
13166 	else
13167 		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
13168 		    mac_id);
13169 	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
13170 	    htt_ring_type);
13171 	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
13172 
13173 	cmd->ring_base_addr_lo = params.ring_base_paddr & HAL_ADDR_LSB_REG_MASK;
13174 
13175 	cmd->ring_base_addr_hi = (uint64_t)params.ring_base_paddr >>
13176 	    HAL_ADDR_MSB_REG_SHIFT;
13177 
13178 	ring_entry_sz = qwx_hal_srng_get_entrysize(sc, ring_type);
13179 
13180 	ring_entry_sz >>= 2;
13181 	cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
13182 	    ring_entry_sz);
13183 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
13184 	    params.num_entries * ring_entry_sz);
13185 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
13186 	    !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
13187 	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
13188 	    !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
13189 	cmd->info1 |= FIELD_PREP(
13190 	    HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
13191 	    !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
13192 	if (htt_ring_type == HTT_SW_TO_HW_RING)
13193 		cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
13194 
13195 	cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
13196 	cmd->ring_head_off32_remote_addr_hi = hp_addr >> HAL_ADDR_MSB_REG_SHIFT;
13197 
13198 	cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
13199 	cmd->ring_tail_off32_remote_addr_hi = tp_addr >> HAL_ADDR_MSB_REG_SHIFT;
13200 
13201 	cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
13202 	cmd->ring_msi_addr_hi = 0;
13203 	cmd->msi_data = params.msi_data;
13204 
13205 	cmd->intr_info = FIELD_PREP(
13206 	    HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
13207 	    params.intr_batch_cntr_thres_entries * ring_entry_sz);
13208 	cmd->intr_info |= FIELD_PREP(
13209 	    HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
13210 	    params.intr_timer_thres_us >> 3);
13211 
13212 	cmd->info2 = 0;
13213 	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
13214 		cmd->info2 = FIELD_PREP(
13215 		    HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
13216 		    params.low_threshold);
13217 	}
13218 
13219 	DNPRINTF(QWX_D_HTT, "%s: htt srng setup msi_addr_lo 0x%x "
13220 	    "msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d "
13221 	    "intr_info 0x%x flags 0x%x\n", __func__, cmd->ring_msi_addr_lo,
13222 	    cmd->ring_msi_addr_hi, cmd->msi_data, ring_id, ring_type,
13223 	    cmd->intr_info, cmd->info2);
13224 
13225 	ret = qwx_htc_send(&sc->htc, sc->dp.eid, m);
13226 	if (ret)
13227 		goto err_free;
13228 
13229 	return 0;
13230 
13231 err_free:
13232 	m_freem(m);
13233 
13234 	return ret;
13235 }
13236 
13237 int
13238 qwx_dp_tx_htt_h2t_ppdu_stats_req(struct qwx_softc *sc, uint32_t mask,
13239     uint8_t pdev_id)
13240 {
13241 	struct qwx_dp *dp = &sc->dp;
13242 	struct mbuf *m;
13243 	struct htt_ppdu_stats_cfg_cmd *cmd;
13244 	int len = sizeof(*cmd);
13245 	uint8_t pdev_mask;
13246 	int ret;
13247 	int i;
13248 
13249 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
13250 		m = qwx_htc_alloc_mbuf(len);
13251 		if (!m)
13252 			return ENOMEM;
13253 
13254 		cmd = (struct htt_ppdu_stats_cfg_cmd *)(mtod(m, uint8_t *) +
13255 		    sizeof(struct ath11k_htc_hdr));
13256 		cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
13257 				      HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
13258 
13259 		pdev_mask = 1 << (pdev_id + i);
13260 		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
13261 		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK,
13262 		    mask);
13263 
13264 		ret = qwx_htc_send(&sc->htc, dp->eid, m);
13265 		if (ret) {
13266 			m_freem(m);
13267 			return ret;
13268 		}
13269 	}
13270 
13271 	return 0;
13272 }
13273 
13274 int
13275 qwx_dp_tx_htt_rx_filter_setup(struct qwx_softc *sc, uint32_t ring_id,
13276     int mac_id, enum hal_ring_type ring_type, size_t rx_buf_size,
13277     struct htt_rx_ring_tlv_filter *tlv_filter)
13278 {
13279 	struct htt_rx_ring_selection_cfg_cmd *cmd;
13280 	struct hal_srng *srng = &sc->hal.srng_list[ring_id];
13281 	struct hal_srng_params params;
13282 	struct mbuf *m;
13283 	int len = sizeof(*cmd);
13284 	enum htt_srng_ring_type htt_ring_type;
13285 	enum htt_srng_ring_id htt_ring_id;
13286 	int ret;
13287 
13288 	m = qwx_htc_alloc_mbuf(len);
13289 	if (!m)
13290 		return ENOMEM;
13291 
13292 	memset(&params, 0, sizeof(params));
13293 	qwx_hal_srng_get_params(sc, srng, &params);
13294 
13295 	ret = qwx_dp_tx_get_ring_id_type(sc, mac_id, ring_id,
13296 	    ring_type, &htt_ring_type, &htt_ring_id);
13297 	if (ret)
13298 		goto err_free;
13299 
13300 	cmd = (struct htt_rx_ring_selection_cfg_cmd *)(mtod(m, uint8_t *) +
13301 	    sizeof(struct ath11k_htc_hdr));
13302 	cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
13303 	    HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
13304 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
13305 	    htt_ring_type == HTT_HW_TO_SW_RING) {
13306 		cmd->info0 |=
13307 		    FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
13308 		    DP_SW2HW_MACID(mac_id));
13309 	} else {
13310 		cmd->info0 |=
13311 		    FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
13312 		    mac_id);
13313 	}
13314 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
13315 	    htt_ring_id);
13316 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
13317 	    !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
13318 	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
13319 	    !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
13320 
13321 	cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
13322 	    rx_buf_size);
13323 	cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
13324 	cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
13325 	cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
13326 	cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
13327 	cmd->rx_filter_tlv = tlv_filter->rx_filter;
13328 
13329 	ret = qwx_htc_send(&sc->htc, sc->dp.eid, m);
13330 	if (ret)
13331 		goto err_free;
13332 
13333 	return 0;
13334 
13335 err_free:
13336 	m_freem(m);
13337 
13338 	return ret;
13339 }
13340 
13341 int
13342 qwx_dp_rx_pdev_alloc(struct qwx_softc *sc, int mac_id)
13343 {
13344 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
13345 	uint32_t ring_id;
13346 	int i;
13347 	int ret;
13348 
13349 	ret = qwx_dp_rx_pdev_srng_alloc(sc);
13350 	if (ret) {
13351 		printf("%s: failed to setup rx srngs: %d\n",
13352 		    sc->sc_dev.dv_xname, ret);
13353 		return ret;
13354 	}
13355 
13356 	ret = qwx_dp_rxdma_pdev_buf_setup(sc);
13357 	if (ret) {
13358 		printf("%s: failed to setup rxdma ring: %d\n",
13359 		    sc->sc_dev.dv_xname, ret);
13360 		return ret;
13361 	}
13362 
13363 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
13364 	ret = qwx_dp_tx_htt_srng_setup(sc, ring_id, mac_id, HAL_RXDMA_BUF);
13365 	if (ret) {
13366 		printf("%s: failed to configure rx_refill_buf_ring: %d\n",
13367 		    sc->sc_dev.dv_xname, ret);
13368 		return ret;
13369 	}
13370 
13371 	if (sc->hw_params.rx_mac_buf_ring) {
13372 		for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
13373 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
13374 			ret = qwx_dp_tx_htt_srng_setup(sc, ring_id,
13375 			    mac_id + i, HAL_RXDMA_BUF);
13376 			if (ret) {
13377 				printf("%s: failed to configure "
13378 				    "rx_mac_buf_ring%d: %d\n",
13379 				    sc->sc_dev.dv_xname, i, ret);
13380 				return ret;
13381 			}
13382 		}
13383 	}
13384 
13385 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
13386 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
13387 		ret = qwx_dp_tx_htt_srng_setup(sc, ring_id, mac_id + i,
13388 		    HAL_RXDMA_DST);
13389 		if (ret) {
13390 			printf("%s: failed to configure "
13391 			    "rxdma_err_dest_ring%d %d\n",
13392 			    sc->sc_dev.dv_xname, i, ret);
13393 			return ret;
13394 		}
13395 	}
13396 
13397 	if (!sc->hw_params.rxdma1_enable)
13398 		goto config_refill_ring;
13399 #if 0
13400 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
13401 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
13402 					  mac_id, HAL_RXDMA_MONITOR_BUF);
13403 	if (ret) {
13404 		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
13405 			    ret);
13406 		return ret;
13407 	}
13408 	ret = ath11k_dp_tx_htt_srng_setup(ab,
13409 					  dp->rxdma_mon_dst_ring.ring_id,
13410 					  mac_id, HAL_RXDMA_MONITOR_DST);
13411 	if (ret) {
13412 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
13413 			    ret);
13414 		return ret;
13415 	}
13416 	ret = ath11k_dp_tx_htt_srng_setup(ab,
13417 					  dp->rxdma_mon_desc_ring.ring_id,
13418 					  mac_id, HAL_RXDMA_MONITOR_DESC);
13419 	if (ret) {
13420 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
13421 			    ret);
13422 		return ret;
13423 	}
13424 #endif
13425 config_refill_ring:
13426 #if 0
13427 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
13428 		ret = qwx_dp_tx_htt_srng_setup(sc,
13429 		    dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id,
13430 		    mac_id + i, HAL_RXDMA_MONITOR_STATUS);
13431 		if (ret) {
13432 			printf("%s: failed to configure "
13433 			    "mon_status_refill_ring%d %d\n",
13434 			    sc->sc_dev.dv_xname, i, ret);
13435 			return ret;
13436 		}
13437 	}
13438 #endif
13439 	return 0;
13440 }
13441 
13442 void
13443 qwx_dp_pdev_free(struct qwx_softc *sc)
13444 {
13445 	int i;
13446 
13447 	for (i = 0; i < sc->num_radios; i++)
13448 		qwx_dp_rx_pdev_free(sc, i);
13449 }
13450 
13451 int
13452 qwx_dp_pdev_alloc(struct qwx_softc *sc)
13453 {
13454 	int ret;
13455 	int i;
13456 
13457 	for (i = 0; i < sc->num_radios; i++) {
13458 		ret = qwx_dp_rx_pdev_alloc(sc, i);
13459 		if (ret) {
13460 			printf("%s: failed to allocate pdev rx "
13461 			    "for pdev_id %d\n", sc->sc_dev.dv_xname, i);
13462 			goto err;
13463 		}
13464 	}
13465 
13466 	return 0;
13467 
13468 err:
13469 	qwx_dp_pdev_free(sc);
13470 
13471 	return ret;
13472 }
13473 
13474 int
13475 qwx_dp_tx_htt_h2t_ver_req_msg(struct qwx_softc *sc)
13476 {
13477 	struct qwx_dp *dp = &sc->dp;
13478 	struct mbuf *m;
13479 	struct htt_ver_req_cmd *cmd;
13480 	int len = sizeof(*cmd);
13481 	int ret;
13482 
13483 	dp->htt_tgt_version_received = 0;
13484 
13485 	m = qwx_htc_alloc_mbuf(len);
13486 	if (!m)
13487 		return ENOMEM;
13488 
13489 	cmd = (struct htt_ver_req_cmd *)(mtod(m, uint8_t *) +
13490 	    sizeof(struct ath11k_htc_hdr));
13491 	cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
13492 	    HTT_H2T_MSG_TYPE_VERSION_REQ);
13493 
13494 	ret = qwx_htc_send(&sc->htc, dp->eid, m);
13495 	if (ret) {
13496 		m_freem(m);
13497 		return ret;
13498 	}
13499 
13500 	while (!dp->htt_tgt_version_received) {
13501 		ret = tsleep_nsec(&dp->htt_tgt_version_received, 0,
13502 		    "qwxtgtver", SEC_TO_NSEC(3));
13503 		if (ret)
13504 			return ETIMEDOUT;
13505 	}
13506 
13507 	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
13508 		printf("%s: unsupported htt major version %d "
13509 		    "supported version is %d\n", __func__,
13510 		    dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
13511 		return ENOTSUP;
13512 	}
13513 
13514 	return 0;
13515 }
13516 
13517 void
13518 qwx_dp_update_vdev_search(struct qwx_softc *sc, struct qwx_vif *arvif)
13519 {
13520 	 /* When v2_map_support is true:for STA mode, enable address
13521 	  * search index, tcl uses ast_hash value in the descriptor.
13522 	  * When v2_map_support is false: for STA mode, don't enable
13523 	  * address search index.
13524 	  */
13525 	switch (arvif->vdev_type) {
13526 	case WMI_VDEV_TYPE_STA:
13527 		if (sc->hw_params.htt_peer_map_v2) {
13528 			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
13529 			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
13530 		} else {
13531 			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
13532 			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
13533 		}
13534 		break;
13535 	case WMI_VDEV_TYPE_AP:
13536 	case WMI_VDEV_TYPE_IBSS:
13537 		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
13538 		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
13539 		break;
13540 	case WMI_VDEV_TYPE_MONITOR:
13541 	default:
13542 		return;
13543 	}
13544 }
13545 
13546 void
13547 qwx_dp_vdev_tx_attach(struct qwx_softc *sc, struct qwx_pdev *pdev,
13548     struct qwx_vif *arvif)
13549 {
13550 	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
13551 	    FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, arvif->vdev_id) |
13552 	    FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, pdev->pdev_id);
13553 
13554 	/* set HTT extension valid bit to 0 by default */
13555 	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
13556 
13557 	qwx_dp_update_vdev_search(sc, arvif);
13558 }
13559 
13560 int
13561 qwx_dp_tx_completion_handler(struct qwx_softc *sc, int ring_id)
13562 {
13563 	return 0;
13564 }
13565 
13566 int
13567 qwx_dp_process_rx_err(struct qwx_softc *sc)
13568 {
13569 	return 0;
13570 }
13571 
13572 int
13573 qwx_dp_rx_process_wbm_err(struct qwx_softc *sc)
13574 {
13575 	return 0;
13576 }
13577 
13578 void
13579 qwx_dp_rx_process_received_packets(struct qwx_softc *sc,
13580     struct mbuf_list *msdu_list, int mac_id)
13581 {
13582 	printf("%s: not implemented", __func__);
13583 	ml_purge(msdu_list);
13584 }
13585 
13586 int
13587 qwx_dp_process_rx(struct qwx_softc *sc, int ring_id)
13588 {
13589 	struct qwx_dp *dp = &sc->dp;
13590 	struct qwx_pdev_dp *pdev_dp = &sc->pdev_dp;
13591 	struct dp_rxdma_ring *rx_ring;
13592 	int num_buffs_reaped[MAX_RADIOS] = {0};
13593 	struct mbuf_list msdu_list[MAX_RADIOS];
13594 	struct mbuf *m;
13595 	struct qwx_rx_data *rx_data;
13596 	int total_msdu_reaped = 0;
13597 	struct hal_srng *srng;
13598 	int done = 0;
13599 	int idx, mac_id;
13600 	struct hal_reo_dest_ring *desc;
13601 	enum hal_reo_dest_ring_push_reason push_reason;
13602 	uint32_t cookie;
13603 	int i;
13604 
13605 	for (i = 0; i < MAX_RADIOS; i++)
13606 		ml_init(&msdu_list[i]);
13607 
13608 	srng = &sc->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
13609 #ifdef notyet
13610 	spin_lock_bh(&srng->lock);
13611 #endif
13612 try_again:
13613 	qwx_hal_srng_access_begin(sc, srng);
13614 
13615 	while ((desc = (struct hal_reo_dest_ring *)
13616 	    qwx_hal_srng_dst_get_next_entry(sc, srng))) {
13617 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
13618 		    desc->buf_addr_info.info1);
13619 		idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
13620 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
13621 
13622 		rx_ring = &pdev_dp->rx_refill_buf_ring;
13623 		if (idx >= rx_ring->bufs_max)
13624 			continue;
13625 
13626 		rx_data = &rx_ring->rx_data[idx];
13627 		if (rx_data->m == NULL)
13628 			continue;
13629 
13630 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
13631 		m = rx_data->m;
13632 		rx_data->m = NULL;
13633 
13634 		num_buffs_reaped[mac_id]++;
13635 
13636 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
13637 		    desc->info0);
13638 		if (push_reason !=
13639 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
13640 			m_freem(m);
13641 #if 0
13642 			sc->soc_stats.hal_reo_error[
13643 			    dp->reo_dst_ring[ring_id].ring_id]++;
13644 #endif
13645 			continue;
13646 		}
13647 
13648 		rx_data->is_first_msdu = !!(desc->rx_msdu_info.info0 &
13649 		    RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
13650 		rx_data->is_last_msdu = !!(desc->rx_msdu_info.info0 &
13651 		    RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
13652 		rx_data->is_continuation = !!(desc->rx_msdu_info.info0 &
13653 		    RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
13654 		rx_data->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
13655 		    desc->rx_mpdu_info.meta_data);
13656 		rx_data->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
13657 		    desc->rx_mpdu_info.info0);
13658 		rx_data->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
13659 		    desc->info0);
13660 
13661 		rx_data->mac_id = mac_id;
13662 		ml_enqueue(&msdu_list[mac_id], m);
13663 
13664 		if (rx_data->is_continuation) {
13665 			done = 0;
13666 		} else {
13667 			total_msdu_reaped++;
13668 			done = 1;
13669 		}
13670 	}
13671 
13672 	/* Hw might have updated the head pointer after we cached it.
13673 	 * In this case, even though there are entries in the ring we'll
13674 	 * get rx_desc NULL. Give the read another try with updated cached
13675 	 * head pointer so that we can reap complete MPDU in the current
13676 	 * rx processing.
13677 	 */
13678 	if (!done && qwx_hal_srng_dst_num_free(sc, srng, 1)) {
13679 		qwx_hal_srng_access_end(sc, srng);
13680 		goto try_again;
13681 	}
13682 
13683 	qwx_hal_srng_access_end(sc, srng);
13684 #ifdef notyet
13685 	spin_unlock_bh(&srng->lock);
13686 #endif
13687 	if (!total_msdu_reaped)
13688 		goto exit;
13689 
13690 	for (i = 0; i < sc->num_radios; i++) {
13691 		if (!num_buffs_reaped[i])
13692 			continue;
13693 
13694 		qwx_dp_rx_process_received_packets(sc, &msdu_list[i], i);
13695 
13696 		rx_ring = &sc->pdev_dp.rx_refill_buf_ring;
13697 
13698 		qwx_dp_rxbufs_replenish(sc, i, rx_ring, num_buffs_reaped[i],
13699 		    sc->hw_params.hal_params->rx_buf_rbm);
13700 	}
13701 exit:
13702 	return total_msdu_reaped;
13703 }
13704 
13705 struct mbuf *
13706 qwx_dp_rx_alloc_mon_status_buf(struct qwx_softc *sc,
13707     struct dp_rxdma_ring *rx_ring, int *buf_idx)
13708 {
13709 	struct mbuf *m;
13710 	struct qwx_rx_data *rx_data;
13711 	const size_t size = DP_RX_BUFFER_SIZE;
13712 	int ret;
13713 
13714 	m = m_gethdr(M_DONTWAIT, MT_DATA);
13715 	if (m == NULL)
13716 		return NULL;
13717 
13718 	if (size <= MCLBYTES)
13719 		MCLGET(m, M_DONTWAIT);
13720 	else
13721 		MCLGETL(m, M_DONTWAIT, size);
13722 	if ((m->m_flags & M_EXT) == 0)
13723 		goto fail_free_mbuf;
13724 
13725 	m->m_len = m->m_pkthdr.len = size;
13726 	rx_data = &rx_ring->rx_data[rx_ring->cur];
13727 	if (rx_data->m != NULL)
13728 		goto fail_free_mbuf;
13729 
13730 	if (rx_data->map == NULL) {
13731 		ret = bus_dmamap_create(sc->sc_dmat, size, 1,
13732 		    size, 0, BUS_DMA_NOWAIT, &rx_data->map);
13733 		if (ret)
13734 			goto fail_free_mbuf;
13735 	}
13736 
13737 	ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m,
13738 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
13739 	if (ret) {
13740 		printf("%s: can't map mbuf (error %d)\n",
13741 		    sc->sc_dev.dv_xname, ret);
13742 		goto fail_free_mbuf;
13743 	}
13744 
13745 	*buf_idx = rx_ring->cur;
13746 	rx_data->m = m;
13747 	return m;
13748 
13749 fail_free_mbuf:
13750 	m_freem(m);
13751 	return NULL;
13752 }
13753 
13754 int
13755 qwx_dp_rx_reap_mon_status_ring(struct qwx_softc *sc, int mac_id,
13756     struct mbuf_list *ml)
13757 {
13758 	const struct ath11k_hw_hal_params *hal_params;
13759 	struct qwx_pdev_dp *dp;
13760 	struct dp_rxdma_ring *rx_ring;
13761 	struct qwx_mon_data *pmon;
13762 	struct hal_srng *srng;
13763 	void *rx_mon_status_desc;
13764 	struct mbuf *m;
13765 	struct qwx_rx_data *rx_data;
13766 	struct hal_tlv_hdr *tlv;
13767 	uint32_t cookie;
13768 	int buf_idx, srng_id;
13769 	uint64_t paddr;
13770 	uint8_t rbm;
13771 	int num_buffs_reaped = 0;
13772 
13773 	dp = &sc->pdev_dp;
13774 	pmon = &dp->mon_data;
13775 
13776 	srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params,
13777 	    mac_id);
13778 	rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
13779 
13780 	srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
13781 #ifdef notyet
13782 	spin_lock_bh(&srng->lock);
13783 #endif
13784 	qwx_hal_srng_access_begin(sc, srng);
13785 	while (1) {
13786 		rx_mon_status_desc = qwx_hal_srng_src_peek(sc, srng);
13787 		if (!rx_mon_status_desc) {
13788 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
13789 			break;
13790 		}
13791 
13792 		qwx_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
13793 		    &cookie, &rbm);
13794 		if (paddr) {
13795 			buf_idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
13796 
13797 			rx_data = &rx_ring->rx_data[buf_idx];
13798 			if (rx_data->m == NULL) {
13799 				printf("%s: rx monitor status with invalid "
13800 				    "buf_idx %d\n", __func__, buf_idx);
13801 				pmon->buf_state = DP_MON_STATUS_REPLINISH;
13802 				goto move_next;
13803 			}
13804 
13805 			bus_dmamap_sync(sc->sc_dmat, rx_data->map, 0,
13806 			    m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
13807 
13808 			tlv = mtod(m, struct hal_tlv_hdr *);
13809 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
13810 					HAL_RX_STATUS_BUFFER_DONE) {
13811 				printf("%s: mon status DONE not set %lx, "
13812 				    "buf_idx %d\n", __func__,
13813 				    FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl),
13814 				    buf_idx);
13815 				/* If done status is missing, hold onto status
13816 				 * ring until status is done for this status
13817 				 * ring buffer.
13818 				 * Keep HP in mon_status_ring unchanged,
13819 				 * and break from here.
13820 				 * Check status for same buffer for next time
13821 				 */
13822 				pmon->buf_state = DP_MON_STATUS_NO_DMA;
13823 				break;
13824 			}
13825 
13826 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
13827 			m = rx_data->m;
13828 			rx_data->m = NULL;
13829 #if 0
13830 			if (ab->hw_params.full_monitor_mode) {
13831 				ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
13832 				if (paddr == pmon->mon_status_paddr)
13833 					pmon->buf_state = DP_MON_STATUS_MATCH;
13834 			}
13835 #endif
13836 			ml_enqueue(ml, m);
13837 		} else {
13838 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
13839 		}
13840 move_next:
13841 		m = qwx_dp_rx_alloc_mon_status_buf(sc, rx_ring, &buf_idx);
13842 		if (!m) {
13843 			hal_params = sc->hw_params.hal_params;
13844 			qwx_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
13845 			    hal_params->rx_buf_rbm);
13846 			num_buffs_reaped++;
13847 			break;
13848 		}
13849 		rx_data = &rx_ring->rx_data[buf_idx];
13850 		KASSERT(rx_data->m == NULL);
13851 
13852 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
13853 		    FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_idx);
13854 
13855 		paddr = rx_data->map->dm_segs[0].ds_addr;
13856 		qwx_hal_rx_buf_addr_info_set(rx_mon_status_desc, paddr,
13857 		    cookie, sc->hw_params.hal_params->rx_buf_rbm);
13858 		qwx_hal_srng_src_get_next_entry(sc, srng);
13859 		num_buffs_reaped++;
13860 	}
13861 	qwx_hal_srng_access_end(sc, srng);
13862 #ifdef notyet
13863 	spin_unlock_bh(&srng->lock);
13864 #endif
13865 	return num_buffs_reaped;
13866 }
13867 
13868 enum hal_rx_mon_status
13869 qwx_hal_rx_parse_mon_status(struct qwx_softc *sc,
13870     struct hal_rx_mon_ppdu_info *ppdu_info, struct mbuf *m)
13871 {
13872 	/* TODO */
13873 	return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
13874 }
13875 
13876 int
13877 qwx_dp_rx_process_mon_status(struct qwx_softc *sc, int mac_id)
13878 {
13879 	enum hal_rx_mon_status hal_status;
13880 	struct mbuf *m;
13881 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
13882 #if 0
13883 	struct ath11k_peer *peer;
13884 	struct ath11k_sta *arsta;
13885 #endif
13886 	int num_buffs_reaped = 0;
13887 #if 0
13888 	uint32_t rx_buf_sz;
13889 	uint16_t log_type;
13890 #endif
13891 	struct qwx_mon_data *pmon = (struct qwx_mon_data *)&sc->pdev_dp.mon_data;
13892 #if  0
13893 	struct qwx_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
13894 #endif
13895 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
13896 
13897 	num_buffs_reaped = qwx_dp_rx_reap_mon_status_ring(sc, mac_id, &ml);
13898 	printf("%s: processing %d packets\n", __func__, num_buffs_reaped);
13899 	if (!num_buffs_reaped)
13900 		goto exit;
13901 
13902 	memset(ppdu_info, 0, sizeof(*ppdu_info));
13903 	ppdu_info->peer_id = HAL_INVALID_PEERID;
13904 
13905 	while ((m = ml_dequeue(&ml))) {
13906 #if 0
13907 		if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
13908 			log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
13909 			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
13910 		} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
13911 			log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
13912 			rx_buf_sz = DP_RX_BUFFER_SIZE;
13913 		} else {
13914 			log_type = ATH11K_PKTLOG_TYPE_INVALID;
13915 			rx_buf_sz = 0;
13916 		}
13917 
13918 		if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
13919 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
13920 #endif
13921 
13922 		memset(ppdu_info, 0, sizeof(*ppdu_info));
13923 		ppdu_info->peer_id = HAL_INVALID_PEERID;
13924 		hal_status = qwx_hal_rx_parse_mon_status(sc, ppdu_info, m);
13925 #if 0
13926 		if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
13927 		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
13928 		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
13929 			rx_mon_stats->status_ppdu_done++;
13930 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
13931 			ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
13932 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
13933 		}
13934 #endif
13935 		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
13936 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
13937 			m_freem(m);
13938 			continue;
13939 		}
13940 #if 0
13941 		rcu_read_lock();
13942 		spin_lock_bh(&ab->base_lock);
13943 		peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
13944 
13945 		if (!peer || !peer->sta) {
13946 			ath11k_dbg(ab, ATH11K_DBG_DATA,
13947 				   "failed to find the peer with peer_id %d\n",
13948 				   ppdu_info->peer_id);
13949 			goto next_skb;
13950 		}
13951 
13952 		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
13953 		ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
13954 
13955 		if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
13956 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
13957 
13958 next_skb:
13959 		spin_unlock_bh(&ab->base_lock);
13960 		rcu_read_unlock();
13961 
13962 		dev_kfree_skb_any(skb);
13963 		memset(ppdu_info, 0, sizeof(*ppdu_info));
13964 		ppdu_info->peer_id = HAL_INVALID_PEERID;
13965 #endif
13966 	}
13967 exit:
13968 	return num_buffs_reaped;
13969 }
13970 
13971 int
13972 qwx_dp_rx_process_mon_rings(struct qwx_softc *sc, int mac_id)
13973 {
13974 	int ret = 0;
13975 #if 0
13976 	if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
13977 	    ab->hw_params.full_monitor_mode)
13978 		ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
13979 	else
13980 #endif
13981 		ret = qwx_dp_rx_process_mon_status(sc, mac_id);
13982 
13983 	return ret;
13984 }
13985 
13986 int
13987 qwx_dp_process_rxdma_err(struct qwx_softc *sc)
13988 {
13989 	return 0;
13990 }
13991 
13992 int
13993 qwx_dp_process_reo_status(struct qwx_softc *sc)
13994 {
13995 	return 0;
13996 }
13997 
13998 int
13999 qwx_dp_service_srng(struct qwx_softc *sc, int grp_id)
14000 {
14001 	struct qwx_pdev_dp *dp = &sc->pdev_dp;
14002 	int i, j, ret = 0;
14003 
14004 	for (i = 0; i < sc->hw_params.max_tx_ring; i++) {
14005 		const struct ath11k_hw_tcl2wbm_rbm_map *map;
14006 
14007 		map = &sc->hw_params.hal_params->tcl2wbm_rbm_map[i];
14008 		if ((sc->hw_params.ring_mask->tx[grp_id]) &
14009 		    (1 << (map->wbm_ring_num)) &&
14010 		    qwx_dp_tx_completion_handler(sc, i))
14011 			ret = 1;
14012 	}
14013 
14014 	if (sc->hw_params.ring_mask->rx_err[grp_id] &&
14015 	    qwx_dp_process_rx_err(sc))
14016 		ret = 1;
14017 
14018 	if (sc->hw_params.ring_mask->rx_wbm_rel[grp_id] &&
14019 	    qwx_dp_rx_process_wbm_err(sc))
14020 		ret = 1;
14021 
14022 	if (sc->hw_params.ring_mask->rx[grp_id]) {
14023 		i = fls(sc->hw_params.ring_mask->rx[grp_id]) - 1;
14024 		if (qwx_dp_process_rx(sc, i))
14025 			ret = 1;
14026 	}
14027 
14028 	for (i = 0; i < sc->num_radios; i++) {
14029 		for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
14030 			int id = i * sc->hw_params.num_rxmda_per_pdev + j;
14031 
14032 			if ((sc->hw_params.ring_mask->rx_mon_status[grp_id] &
14033 			   (1 << id)) == 0)
14034 				continue;
14035 
14036 			if (qwx_dp_rx_process_mon_rings(sc, id))
14037 				ret = 1;
14038 		}
14039 	}
14040 
14041 	if (sc->hw_params.ring_mask->reo_status[grp_id] &&
14042 	    qwx_dp_process_reo_status(sc))
14043 		ret = 1;
14044 
14045 	for (i = 0; i < sc->num_radios; i++) {
14046 		for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) {
14047 			int id = i * sc->hw_params.num_rxmda_per_pdev + j;
14048 
14049 			if ((sc->hw_params.ring_mask->rxdma2host[grp_id] &
14050 			   (1 << (id))) == 0)
14051 				continue;
14052 
14053 			if (qwx_dp_process_rxdma_err(sc))
14054 				ret = 1;
14055 
14056 			qwx_dp_rxbufs_replenish(sc, id, &dp->rx_refill_buf_ring,
14057 			    0, sc->hw_params.hal_params->rx_buf_rbm);
14058 		}
14059 	}
14060 
14061 	return ret;
14062 }
14063 
14064 int
14065 qwx_wmi_wait_for_service_ready(struct qwx_softc *sc)
14066 {
14067 	int ret;
14068 
14069 	while (!sc->wmi.service_ready) {
14070 		ret = tsleep_nsec(&sc->wmi.service_ready, 0, "qwxwmirdy",
14071 		    SEC_TO_NSEC(5));
14072 		if (ret)
14073 			return -1;
14074 	}
14075 
14076 	return 0;
14077 }
14078 
14079 void
14080 qwx_fill_band_to_mac_param(struct qwx_softc *sc,
14081     struct wmi_host_pdev_band_to_mac *band_to_mac)
14082 {
14083 	uint8_t i;
14084 	struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
14085 	struct qwx_pdev *pdev;
14086 
14087 	for (i = 0; i < sc->num_radios; i++) {
14088 		pdev = &sc->pdevs[i];
14089 		hal_reg_cap = &sc->hal_reg_cap[i];
14090 		band_to_mac[i].pdev_id = pdev->pdev_id;
14091 
14092 		switch (pdev->cap.supported_bands) {
14093 		case WMI_HOST_WLAN_2G_5G_CAP:
14094 			band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
14095 			band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
14096 			break;
14097 		case WMI_HOST_WLAN_2G_CAP:
14098 			band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
14099 			band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
14100 			break;
14101 		case WMI_HOST_WLAN_5G_CAP:
14102 			band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
14103 			band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
14104 			break;
14105 		default:
14106 			break;
14107 		}
14108 	}
14109 }
14110 
14111 struct mbuf *
14112 qwx_wmi_alloc_mbuf(size_t len)
14113 {
14114 	struct mbuf *m;
14115 	uint32_t round_len = roundup(len, 4);
14116 
14117 	m = qwx_htc_alloc_mbuf(sizeof(struct wmi_cmd_hdr) + round_len);
14118 	if (!m)
14119 		return NULL;
14120 
14121 	return m;
14122 }
14123 
14124 int
14125 qwx_wmi_cmd_send_nowait(struct qwx_pdev_wmi *wmi, struct mbuf *m,
14126     uint32_t cmd_id)
14127 {
14128 	struct qwx_softc *sc = wmi->wmi->sc;
14129 	struct wmi_cmd_hdr *cmd_hdr;
14130 	uint32_t cmd = 0;
14131 
14132 	cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
14133 
14134 	cmd_hdr = (struct wmi_cmd_hdr *)(mtod(m, uint8_t *) +
14135 	    sizeof(struct ath11k_htc_hdr));
14136 	cmd_hdr->cmd_id = htole32(cmd);
14137 
14138 	DNPRINTF(QWX_D_WMI, "%s: sending WMI command 0x%u\n", __func__, cmd);
14139 	return qwx_htc_send(&sc->htc, wmi->eid, m);
14140 }
14141 
14142 int
14143 qwx_wmi_cmd_send(struct qwx_pdev_wmi *wmi, struct mbuf *m, uint32_t cmd_id)
14144 {
14145 	struct qwx_wmi_base *wmi_sc = wmi->wmi;
14146 	int ret = EOPNOTSUPP;
14147 	struct qwx_softc *sc = wmi_sc->sc;
14148 #ifdef notyet
14149 	might_sleep();
14150 #endif
14151 	if (sc->hw_params.credit_flow) {
14152 		struct qwx_htc *htc = &sc->htc;
14153 		struct qwx_htc_ep *ep = &htc->endpoint[wmi->eid];
14154 
14155 		while (!ep->tx_credits) {
14156 			ret = tsleep_nsec(&ep->tx_credits, 0, "qwxtxcrd",
14157 			    SEC_TO_NSEC(3));
14158 			if (ret) {
14159 				printf("%s: tx credits timeout\n",
14160 				    sc->sc_dev.dv_xname);
14161 				if (test_bit(ATH11K_FLAG_CRASH_FLUSH,
14162 				    sc->sc_flags))
14163 					return ESHUTDOWN;
14164 				else
14165 					return EAGAIN;
14166 			}
14167 		}
14168 	} else {
14169 		while (!wmi->tx_ce_desc) {
14170 			ret = tsleep_nsec(&wmi->tx_ce_desc, 0, "qwxtxce",
14171 			    SEC_TO_NSEC(3));
14172 			if (ret) {
14173 				printf("%s: tx ce desc timeout\n",
14174 				    sc->sc_dev.dv_xname);
14175 				if (test_bit(ATH11K_FLAG_CRASH_FLUSH,
14176 				    sc->sc_flags))
14177 					return ESHUTDOWN;
14178 				else
14179 					return EAGAIN;
14180 			}
14181 		}
14182 	}
14183 
14184 	ret = qwx_wmi_cmd_send_nowait(wmi, m, cmd_id);
14185 
14186 	if (ret == EAGAIN)
14187 		printf("%s: wmi command %d timeout\n",
14188 		    sc->sc_dev.dv_xname, cmd_id);
14189 
14190 	if (ret == ENOBUFS)
14191 		printf("%s: ce desc not available for wmi command %d\n",
14192 		    sc->sc_dev.dv_xname, cmd_id);
14193 
14194 	return ret;
14195 }
14196 
14197 int
14198 qwx_wmi_pdev_set_param(struct qwx_softc *sc, uint32_t param_id,
14199     uint32_t param_value, uint8_t pdev_id)
14200 {
14201 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14202 	struct wmi_pdev_set_param_cmd *cmd;
14203 	struct mbuf *m;
14204 	int ret;
14205 
14206 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
14207 	if (!m)
14208 		return ENOMEM;
14209 
14210 	cmd = (struct wmi_pdev_set_param_cmd *)(mtod(m, uint8_t *) +
14211 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14212 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
14213 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14214 	cmd->pdev_id = pdev_id;
14215 	cmd->param_id = param_id;
14216 	cmd->param_value = param_value;
14217 
14218 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PDEV_SET_PARAM_CMDID);
14219 	if (ret) {
14220 		printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
14221 		    sc->sc_dev.dv_xname);
14222 		m_freem(m);
14223 		return ret;
14224 	}
14225 
14226 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev set param %d pdev id %d value %d\n",
14227 	    __func__, param_id, pdev_id, param_value);
14228 
14229 	return 0;
14230 }
14231 
14232 int
14233 qwx_wmi_pdev_lro_cfg(struct qwx_softc *sc, uint8_t pdev_id)
14234 {
14235 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14236 	struct ath11k_wmi_pdev_lro_config_cmd *cmd;
14237 	struct mbuf *m;
14238 	int ret;
14239 
14240 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
14241 	if (!m)
14242 		return ENOMEM;
14243 
14244 	cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)(mtod(m, uint8_t *) +
14245 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14246 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
14247 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14248 
14249 	arc4random_buf(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
14250 	arc4random_buf(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
14251 
14252 	cmd->pdev_id = pdev_id;
14253 
14254 	ret = qwx_wmi_cmd_send(wmi, m, WMI_LRO_CONFIG_CMDID);
14255 	if (ret) {
14256 		printf("%s: failed to send lro cfg req wmi cmd\n",
14257 		    sc->sc_dev.dv_xname);
14258 		m_freem(m);
14259 		return ret;
14260 	}
14261 
14262 	DNPRINTF(QWX_D_WMI, "%s: cmd lro config pdev_id 0x%x\n",
14263 	    __func__, pdev_id);
14264 
14265 	return 0;
14266 }
14267 
14268 int
14269 qwx_wmi_pdev_set_ps_mode(struct qwx_softc *sc, int vdev_id, uint8_t pdev_id,
14270     enum wmi_sta_ps_mode psmode)
14271 {
14272 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14273 	struct wmi_pdev_set_ps_mode_cmd *cmd;
14274 	struct mbuf *m;
14275 	int ret;
14276 
14277 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
14278 	if (!m)
14279 		return ENOMEM;
14280 
14281 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)(mtod(m, uint8_t *) +
14282 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14283 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
14284 	    WMI_TAG_STA_POWERSAVE_MODE_CMD) |
14285 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14286 	cmd->vdev_id = vdev_id;
14287 	cmd->sta_ps_mode = psmode;
14288 
14289 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_MODE_CMDID);
14290 	if (ret) {
14291 		printf("%s: failed to send WMI_PDEV_SET_PARAM cmd\n",
14292 		    sc->sc_dev.dv_xname);
14293 		m_freem(m);
14294 		return ret;
14295 	}
14296 
14297 	DNPRINTF(QWX_D_WMI, "%s: cmd sta powersave mode psmode %d vdev id %d\n",
14298 	    __func__, psmode, vdev_id);
14299 
14300 	return 0;
14301 }
14302 
14303 int
14304 qwx_wmi_scan_prob_req_oui(struct qwx_softc *sc, const uint8_t *mac_addr,
14305     uint8_t pdev_id)
14306 {
14307 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14308 	struct mbuf *m;
14309 	struct wmi_scan_prob_req_oui_cmd *cmd;
14310 	uint32_t prob_req_oui;
14311 	int len, ret;
14312 
14313 	prob_req_oui = (((uint32_t)mac_addr[0]) << 16) |
14314 		       (((uint32_t)mac_addr[1]) << 8) | mac_addr[2];
14315 
14316 	len = sizeof(*cmd);
14317 	m = qwx_wmi_alloc_mbuf(len);
14318 	if (!m)
14319 		return ENOMEM;
14320 
14321 	cmd = (struct wmi_scan_prob_req_oui_cmd *)(mtod(m, uint8_t *) +
14322 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14323 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
14324 	    WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
14325 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14326 	cmd->prob_req_oui = prob_req_oui;
14327 
14328 	DNPRINTF(QWX_D_WMI, "%s: scan prob req oui %d\n", __func__,
14329 	    prob_req_oui);
14330 
14331 	ret = qwx_wmi_cmd_send(wmi, m, WMI_SCAN_PROB_REQ_OUI_CMDID);
14332 	if (ret) {
14333 		printf("%s: failed to send WMI_SCAN_PROB_REQ_OUI cmd\n",
14334 		    sc->sc_dev.dv_xname);
14335 		m_freem(m);
14336 		return ret;
14337 	}
14338 
14339 	return 0;
14340 }
14341 
14342 int
14343 qwx_wmi_send_dfs_phyerr_offload_enable_cmd(struct qwx_softc *sc, uint32_t pdev_id)
14344 {
14345 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14346 	struct wmi_dfs_phyerr_offload_cmd *cmd;
14347 	struct mbuf *m;
14348 	int ret;
14349 
14350 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
14351 	if (!m)
14352 		return ENOMEM;
14353 
14354 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)(mtod(m, uint8_t *) +
14355 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14356 
14357 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
14358 	    WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
14359 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14360 
14361 	cmd->pdev_id = pdev_id;
14362 
14363 	ret = qwx_wmi_cmd_send(wmi, m,
14364 	    WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
14365 	if (ret) {
14366 		printf("%s: failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE "
14367 		    "cmd\n", sc->sc_dev.dv_xname);
14368 		m_free(m);
14369 		return ret;
14370 	}
14371 
14372 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev dfs phyerr offload enable "
14373 	    "pdev id %d\n", __func__, pdev_id);
14374 
14375 	return 0;
14376 }
14377 
14378 int
14379 qwx_wmi_send_scan_chan_list_cmd(struct qwx_softc *sc, uint8_t pdev_id,
14380     struct scan_chan_list_params *chan_list)
14381 {
14382 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14383 	struct wmi_scan_chan_list_cmd *cmd;
14384 	struct mbuf *m;
14385 	struct wmi_channel *chan_info;
14386 	struct channel_param *tchan_info;
14387 	struct wmi_tlv *tlv;
14388 	void *ptr;
14389 	int i, ret, len;
14390 	uint16_t num_send_chans, num_sends = 0, max_chan_limit = 0;
14391 	uint32_t *reg1, *reg2;
14392 
14393 	tchan_info = chan_list->ch_param;
14394 	while (chan_list->nallchans) {
14395 		len = sizeof(*cmd) + TLV_HDR_SIZE;
14396 		max_chan_limit = (wmi->wmi->max_msg_len[pdev_id] - len) /
14397 		    sizeof(*chan_info);
14398 
14399 		if (chan_list->nallchans > max_chan_limit)
14400 			num_send_chans = max_chan_limit;
14401 		else
14402 			num_send_chans = chan_list->nallchans;
14403 
14404 		chan_list->nallchans -= num_send_chans;
14405 		len += sizeof(*chan_info) * num_send_chans;
14406 
14407 		m = qwx_wmi_alloc_mbuf(len);
14408 		if (!m)
14409 			return ENOMEM;
14410 
14411 		cmd = (struct wmi_scan_chan_list_cmd *)(mtod(m, uint8_t *) +
14412 		    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14413 		cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
14414 		    WMI_TAG_SCAN_CHAN_LIST_CMD) |
14415 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14416 		cmd->pdev_id = chan_list->pdev_id;
14417 		cmd->num_scan_chans = num_send_chans;
14418 		if (num_sends)
14419 			cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
14420 
14421 		DNPRINTF(QWX_D_WMI, "%s: no.of chan = %d len = %d "
14422 		    "pdev_id = %d num_sends = %d\n", __func__, num_send_chans,
14423 		    len, cmd->pdev_id, num_sends);
14424 
14425 		ptr = (void *)(mtod(m, uint8_t *) +
14426 		    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
14427 		    sizeof(*cmd));
14428 
14429 		len = sizeof(*chan_info) * num_send_chans;
14430 		tlv = ptr;
14431 		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
14432 		    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
14433 		ptr += TLV_HDR_SIZE;
14434 
14435 		for (i = 0; i < num_send_chans; ++i) {
14436 			chan_info = ptr;
14437 			memset(chan_info, 0, sizeof(*chan_info));
14438 			len = sizeof(*chan_info);
14439 			chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
14440 			    WMI_TAG_CHANNEL) |
14441 			    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
14442 
14443 			reg1 = &chan_info->reg_info_1;
14444 			reg2 = &chan_info->reg_info_2;
14445 			chan_info->mhz = tchan_info->mhz;
14446 			chan_info->band_center_freq1 = tchan_info->cfreq1;
14447 			chan_info->band_center_freq2 = tchan_info->cfreq2;
14448 
14449 			if (tchan_info->is_chan_passive)
14450 				chan_info->info |= WMI_CHAN_INFO_PASSIVE;
14451 			if (tchan_info->allow_he)
14452 				chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
14453 			else if (tchan_info->allow_vht)
14454 				chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
14455 			else if (tchan_info->allow_ht)
14456 				chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
14457 			if (tchan_info->half_rate)
14458 				chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
14459 			if (tchan_info->quarter_rate)
14460 				chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
14461 			if (tchan_info->psc_channel)
14462 				chan_info->info |= WMI_CHAN_INFO_PSC;
14463 			if (tchan_info->dfs_set)
14464 				chan_info->info |= WMI_CHAN_INFO_DFS;
14465 
14466 			chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
14467 			    tchan_info->phy_mode);
14468 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
14469 			    tchan_info->minpower);
14470 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
14471 			    tchan_info->maxpower);
14472 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
14473 			    tchan_info->maxregpower);
14474 			*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
14475 			    tchan_info->reg_class_id);
14476 			*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
14477 			    tchan_info->antennamax);
14478 			*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
14479 			    tchan_info->maxregpower);
14480 
14481 			DNPRINTF(QWX_D_WMI, "%s: chan scan list "
14482 			    "chan[%d] = %u, chan_info->info %8x\n",
14483 			    __func__, i, chan_info->mhz, chan_info->info);
14484 
14485 			ptr += sizeof(*chan_info);
14486 
14487 			tchan_info++;
14488 		}
14489 
14490 		ret = qwx_wmi_cmd_send(wmi, m, WMI_SCAN_CHAN_LIST_CMDID);
14491 		if (ret) {
14492 			printf("%s: failed to send WMI_SCAN_CHAN_LIST cmd\n",
14493 			    sc->sc_dev.dv_xname);
14494 			m_freem(m);
14495 			return ret;
14496 		}
14497 
14498 		DNPRINTF(QWX_D_WMI, "%s: cmd scan chan list channels %d\n",
14499 		    __func__, num_send_chans);
14500 
14501 		num_sends++;
14502 	}
14503 
14504 	return 0;
14505 }
14506 
14507 int
14508 qwx_wmi_send_11d_scan_start_cmd(struct qwx_softc *sc,
14509     struct wmi_11d_scan_start_params *param, uint8_t pdev_id)
14510 {
14511 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14512 	struct wmi_11d_scan_start_cmd *cmd;
14513 	struct mbuf *m;
14514 	int ret;
14515 
14516 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
14517 	if (!m)
14518 		return ENOMEM;
14519 
14520 	cmd = (struct wmi_11d_scan_start_cmd *)(mtod(m, uint8_t *) +
14521 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14522 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
14523 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14524 
14525 	cmd->vdev_id = param->vdev_id;
14526 	cmd->scan_period_msec = param->scan_period_msec;
14527 	cmd->start_interval_msec = param->start_interval_msec;
14528 
14529 	ret = qwx_wmi_cmd_send(wmi, m, WMI_11D_SCAN_START_CMDID);
14530 	if (ret) {
14531 		printf("%s: failed to send WMI_11D_SCAN_START_CMDID: %d\n",
14532 		    sc->sc_dev.dv_xname, ret);
14533 		m_freem(m);
14534 		return ret;
14535 	}
14536 
14537 	DNPRINTF(QWX_D_WMI, "%s: cmd 11d scan start vdev id %d period %d "
14538 	    "ms internal %d ms\n", __func__, cmd->vdev_id,
14539 	    cmd->scan_period_msec, cmd->start_interval_msec);
14540 
14541 	return 0;
14542 }
14543 
14544 static inline void
14545 qwx_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
14546     struct scan_req_params *param)
14547 {
14548 	/* Scan events subscription */
14549 	if (param->scan_ev_started)
14550 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_STARTED;
14551 	if (param->scan_ev_completed)
14552 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_COMPLETED;
14553 	if (param->scan_ev_bss_chan)
14554 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_BSS_CHANNEL;
14555 	if (param->scan_ev_foreign_chan)
14556 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN;
14557 	if (param->scan_ev_dequeued)
14558 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_DEQUEUED;
14559 	if (param->scan_ev_preempted)
14560 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_PREEMPTED;
14561 	if (param->scan_ev_start_failed)
14562 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_START_FAILED;
14563 	if (param->scan_ev_restarted)
14564 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESTARTED;
14565 	if (param->scan_ev_foreign_chn_exit)
14566 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
14567 	if (param->scan_ev_suspended)
14568 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_SUSPENDED;
14569 	if (param->scan_ev_resumed)
14570 		cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESUMED;
14571 
14572 	/** Set scan control flags */
14573 	cmd->scan_ctrl_flags = 0;
14574 	if (param->scan_f_passive)
14575 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_PASSIVE;
14576 	if (param->scan_f_strict_passive_pch)
14577 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
14578 	if (param->scan_f_promisc_mode)
14579 		cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROMISCUOS;
14580 	if (param->scan_f_capture_phy_err)
14581 		cmd->scan_ctrl_flags |=  WMI_SCAN_CAPTURE_PHY_ERROR;
14582 	if (param->scan_f_half_rate)
14583 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
14584 	if (param->scan_f_quarter_rate)
14585 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
14586 	if (param->scan_f_cck_rates)
14587 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_CCK_RATES;
14588 	if (param->scan_f_ofdm_rates)
14589 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_OFDM_RATES;
14590 	if (param->scan_f_chan_stat_evnt)
14591 		cmd->scan_ctrl_flags |=  WMI_SCAN_CHAN_STAT_EVENT;
14592 	if (param->scan_f_filter_prb_req)
14593 		cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROBE_REQ;
14594 	if (param->scan_f_bcast_probe)
14595 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_BCAST_PROBE_REQ;
14596 	if (param->scan_f_offchan_mgmt_tx)
14597 		cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_MGMT_TX;
14598 	if (param->scan_f_offchan_data_tx)
14599 		cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_DATA_TX;
14600 	if (param->scan_f_force_active_dfs_chn)
14601 		cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
14602 	if (param->scan_f_add_tpc_ie_in_probe)
14603 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
14604 	if (param->scan_f_add_ds_ie_in_probe)
14605 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
14606 	if (param->scan_f_add_spoofed_mac_in_probe)
14607 		cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
14608 	if (param->scan_f_add_rand_seq_in_probe)
14609 		cmd->scan_ctrl_flags |=  WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
14610 	if (param->scan_f_en_ie_whitelist_in_probe)
14611 		cmd->scan_ctrl_flags |=
14612 			 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
14613 
14614 	/* for adaptive scan mode using 3 bits (21 - 23 bits) */
14615 	WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
14616 	    param->adaptive_dwell_time_mode);
14617 
14618 	cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext;
14619 }
14620 
14621 int
14622 qwx_wmi_send_scan_start_cmd(struct qwx_softc *sc,
14623     struct scan_req_params *params)
14624 {
14625 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[params->pdev_id];
14626 	struct wmi_start_scan_cmd *cmd;
14627 	struct wmi_ssid *ssid = NULL;
14628 	struct wmi_mac_addr *bssid;
14629 	struct mbuf *m;
14630 	struct wmi_tlv *tlv;
14631 	void *ptr;
14632 	int i, ret, len;
14633 	uint32_t *tmp_ptr;
14634 	uint16_t extraie_len_with_pad = 0;
14635 	struct hint_short_ssid *s_ssid = NULL;
14636 	struct hint_bssid *hint_bssid = NULL;
14637 
14638 	len = sizeof(*cmd);
14639 
14640 	len += TLV_HDR_SIZE;
14641 	if (params->num_chan)
14642 		len += params->num_chan * sizeof(uint32_t);
14643 
14644 	len += TLV_HDR_SIZE;
14645 	if (params->num_ssids)
14646 		len += params->num_ssids * sizeof(*ssid);
14647 
14648 	len += TLV_HDR_SIZE;
14649 	if (params->num_bssid)
14650 		len += sizeof(*bssid) * params->num_bssid;
14651 
14652 	len += TLV_HDR_SIZE;
14653 	if (params->extraie.len && params->extraie.len <= 0xFFFF) {
14654 		extraie_len_with_pad = roundup(params->extraie.len,
14655 		    sizeof(uint32_t));
14656 	}
14657 	len += extraie_len_with_pad;
14658 
14659 	if (params->num_hint_bssid) {
14660 		len += TLV_HDR_SIZE +
14661 		    params->num_hint_bssid * sizeof(struct hint_bssid);
14662 	}
14663 
14664 	if (params->num_hint_s_ssid) {
14665 		len += TLV_HDR_SIZE +
14666 		    params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
14667 	}
14668 
14669 	m = qwx_wmi_alloc_mbuf(len);
14670 	if (!m)
14671 		return ENOMEM;
14672 
14673 	ptr = (void *)(mtod(m, uint8_t *) + sizeof(struct ath11k_htc_hdr) +
14674 	    sizeof(struct wmi_cmd_hdr));
14675 
14676 	cmd = ptr;
14677 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
14678 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14679 
14680 	cmd->scan_id = params->scan_id;
14681 	cmd->scan_req_id = params->scan_req_id;
14682 	cmd->vdev_id = params->vdev_id;
14683 	cmd->scan_priority = params->scan_priority;
14684 	cmd->notify_scan_events = params->notify_scan_events;
14685 
14686 	qwx_wmi_copy_scan_event_cntrl_flags(cmd, params);
14687 
14688 	cmd->dwell_time_active = params->dwell_time_active;
14689 	cmd->dwell_time_active_2g = params->dwell_time_active_2g;
14690 	cmd->dwell_time_passive = params->dwell_time_passive;
14691 	cmd->dwell_time_active_6g = params->dwell_time_active_6g;
14692 	cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
14693 	cmd->min_rest_time = params->min_rest_time;
14694 	cmd->max_rest_time = params->max_rest_time;
14695 	cmd->repeat_probe_time = params->repeat_probe_time;
14696 	cmd->probe_spacing_time = params->probe_spacing_time;
14697 	cmd->idle_time = params->idle_time;
14698 	cmd->max_scan_time = params->max_scan_time;
14699 	cmd->probe_delay = params->probe_delay;
14700 	cmd->burst_duration = params->burst_duration;
14701 	cmd->num_chan = params->num_chan;
14702 	cmd->num_bssid = params->num_bssid;
14703 	cmd->num_ssids = params->num_ssids;
14704 	cmd->ie_len = params->extraie.len;
14705 	cmd->n_probes = params->n_probes;
14706 	IEEE80211_ADDR_COPY(cmd->mac_addr.addr, params->mac_addr.addr);
14707 	IEEE80211_ADDR_COPY(cmd->mac_mask.addr, params->mac_mask.addr);
14708 
14709 	ptr += sizeof(*cmd);
14710 
14711 	len = params->num_chan * sizeof(uint32_t);
14712 
14713 	tlv = ptr;
14714 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
14715 	    FIELD_PREP(WMI_TLV_LEN, len);
14716 	ptr += TLV_HDR_SIZE;
14717 	tmp_ptr = (uint32_t *)ptr;
14718 
14719 	for (i = 0; i < params->num_chan; ++i)
14720 		tmp_ptr[i] = params->chan_list[i];
14721 
14722 	ptr += len;
14723 
14724 	len = params->num_ssids * sizeof(*ssid);
14725 	tlv = ptr;
14726 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
14727 	    FIELD_PREP(WMI_TLV_LEN, len);
14728 
14729 	ptr += TLV_HDR_SIZE;
14730 
14731 	if (params->num_ssids) {
14732 		ssid = ptr;
14733 		for (i = 0; i < params->num_ssids; ++i) {
14734 			ssid->ssid_len = params->ssid[i].length;
14735 			memcpy(ssid->ssid, params->ssid[i].ssid,
14736 			       params->ssid[i].length);
14737 			ssid++;
14738 		}
14739 	}
14740 
14741 	ptr += (params->num_ssids * sizeof(*ssid));
14742 	len = params->num_bssid * sizeof(*bssid);
14743 	tlv = ptr;
14744 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
14745 	    FIELD_PREP(WMI_TLV_LEN, len);
14746 
14747 	ptr += TLV_HDR_SIZE;
14748 	bssid = ptr;
14749 
14750 	if (params->num_bssid) {
14751 		for (i = 0; i < params->num_bssid; ++i) {
14752 			IEEE80211_ADDR_COPY(bssid->addr,
14753 			    params->bssid_list[i].addr);
14754 			bssid++;
14755 		}
14756 	}
14757 
14758 	ptr += params->num_bssid * sizeof(*bssid);
14759 
14760 	len = extraie_len_with_pad;
14761 	tlv = ptr;
14762 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
14763 	    FIELD_PREP(WMI_TLV_LEN, len);
14764 	ptr += TLV_HDR_SIZE;
14765 
14766 	if (extraie_len_with_pad)
14767 		memcpy(ptr, params->extraie.ptr, params->extraie.len);
14768 
14769 	ptr += extraie_len_with_pad;
14770 
14771 	if (params->num_hint_s_ssid) {
14772 		len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
14773 		tlv = ptr;
14774 		tlv->header = FIELD_PREP(WMI_TLV_TAG,
14775 		    WMI_TAG_ARRAY_FIXED_STRUCT) |
14776 		    FIELD_PREP(WMI_TLV_LEN, len);
14777 		ptr += TLV_HDR_SIZE;
14778 		s_ssid = ptr;
14779 		for (i = 0; i < params->num_hint_s_ssid; ++i) {
14780 			s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
14781 			s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
14782 			s_ssid++;
14783 		}
14784 		ptr += len;
14785 	}
14786 
14787 	if (params->num_hint_bssid) {
14788 		len = params->num_hint_bssid * sizeof(struct hint_bssid);
14789 		tlv = ptr;
14790 		tlv->header = FIELD_PREP(WMI_TLV_TAG,
14791 		    WMI_TAG_ARRAY_FIXED_STRUCT) |
14792 		    FIELD_PREP(WMI_TLV_LEN, len);
14793 		ptr += TLV_HDR_SIZE;
14794 		hint_bssid = ptr;
14795 		for (i = 0; i < params->num_hint_bssid; ++i) {
14796 			hint_bssid->freq_flags =
14797 				params->hint_bssid[i].freq_flags;
14798 			IEEE80211_ADDR_COPY(
14799 			    &params->hint_bssid[i].bssid.addr[0],
14800 			    &hint_bssid->bssid.addr[0]);
14801 			hint_bssid++;
14802 		}
14803 	}
14804 
14805 	ret = qwx_wmi_cmd_send(wmi, m, WMI_START_SCAN_CMDID);
14806 	if (ret) {
14807 		printf("%s: failed to send WMI_START_SCAN_CMDID\n",
14808 		    sc->sc_dev.dv_xname);
14809 		m_freem(m);
14810 		return ret;
14811 	}
14812 
14813 	DNPRINTF(QWX_D_WMI, "%s: cmd start scan", __func__);
14814 
14815 	return 0;
14816 }
14817 
14818 int
14819 qwx_wmi_send_scan_stop_cmd(struct qwx_softc *sc,
14820     struct scan_cancel_param *param)
14821 {
14822 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
14823 	struct wmi_stop_scan_cmd *cmd;
14824 	struct mbuf *m;
14825 	int ret;
14826 
14827 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
14828 	if (!m)
14829 		return ENOMEM;
14830 
14831 	cmd = (struct wmi_stop_scan_cmd *)(mtod(m, uint8_t *) +
14832 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14833 
14834 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
14835 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14836 
14837 	cmd->vdev_id = param->vdev_id;
14838 	cmd->requestor = param->requester;
14839 	cmd->scan_id = param->scan_id;
14840 	cmd->pdev_id = param->pdev_id;
14841 	/* stop the scan with the corresponding scan_id */
14842 	if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
14843 		/* Cancelling all scans */
14844 		cmd->req_type =  WMI_SCAN_STOP_ALL;
14845 	} else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
14846 		/* Cancelling VAP scans */
14847 		cmd->req_type =  WMI_SCN_STOP_VAP_ALL;
14848 	} else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
14849 		/* Cancelling specific scan */
14850 		cmd->req_type =  WMI_SCAN_STOP_ONE;
14851 	} else {
14852 		printf("%s: invalid scan cancel param %d\n",
14853 		    sc->sc_dev.dv_xname, param->req_type);
14854 		m_freem(m);
14855 		return EINVAL;
14856 	}
14857 
14858 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STOP_SCAN_CMDID);
14859 	if (ret) {
14860 		printf("%s: failed to send WMI_STOP_SCAN_CMDID\n",
14861 		    sc->sc_dev.dv_xname);
14862 		m_freem(m);
14863 		return ret;
14864 	}
14865 
14866 	DNPRINTF(QWX_D_WMI, "%s: cmd stop scan\n", __func__);
14867 	return ret;
14868 }
14869 
14870 int
14871 qwx_wmi_send_peer_create_cmd(struct qwx_softc *sc, uint8_t pdev_id,
14872     struct peer_create_params *param)
14873 {
14874 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14875 	struct wmi_peer_create_cmd *cmd;
14876 	struct mbuf *m;
14877 	int ret;
14878 
14879 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
14880 	if (!m)
14881 		return ENOMEM;
14882 
14883 	cmd = (struct wmi_peer_create_cmd *)(mtod(m, uint8_t *) +
14884 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14885 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
14886 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14887 
14888 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_addr);
14889 	cmd->peer_type = param->peer_type;
14890 	cmd->vdev_id = param->vdev_id;
14891 
14892 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_CREATE_CMDID);
14893 	if (ret) {
14894 		printf("%s: failed to submit WMI_PEER_CREATE cmd\n",
14895 		    sc->sc_dev.dv_xname);
14896 		m_freem(m);
14897 		return ret;
14898 	}
14899 
14900 	DNPRINTF(QWX_D_WMI, "%s: cmd peer create vdev_id %d peer_addr %s\n",
14901 	    __func__, param->vdev_id, ether_sprintf(param->peer_addr));
14902 
14903 	return ret;
14904 }
14905 
14906 int
14907 qwx_wmi_send_peer_delete_cmd(struct qwx_softc *sc, const uint8_t *peer_addr,
14908     uint8_t vdev_id, uint8_t pdev_id)
14909 {
14910 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
14911 	struct wmi_peer_delete_cmd *cmd;
14912 	struct mbuf *m;
14913 	int ret;
14914 
14915 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
14916 	if (!m)
14917 		return ENOMEM;
14918 
14919 	cmd = (struct wmi_peer_delete_cmd *)(mtod(m, uint8_t *) +
14920 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
14921 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
14922 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
14923 
14924 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
14925 	cmd->vdev_id = vdev_id;
14926 
14927 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_DELETE_CMDID);
14928 	if (ret) {
14929 		printf("%s: failed to send WMI_PEER_DELETE cmd\n",
14930 		    sc->sc_dev.dv_xname);
14931 		m_freem(m);
14932 		return ret;
14933 	}
14934 
14935 	DNPRINTF(QWX_D_WMI, "%s: cmd peer delete vdev_id %d peer_addr %pM\n",
14936 	    __func__, vdev_id, peer_addr);
14937 
14938 	return 0;
14939 }
14940 
14941 void
14942 qwx_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
14943     struct target_resource_config *tg_cfg)
14944 {
14945 	wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
14946 	wmi_cfg->num_peers = tg_cfg->num_peers;
14947 	wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
14948 	wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
14949 	wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
14950 	wmi_cfg->num_tids = tg_cfg->num_tids;
14951 	wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
14952 	wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
14953 	wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
14954 	wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
14955 	wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
14956 	wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
14957 	wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
14958 	wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
14959 	wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
14960 	wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
14961 	wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
14962 	wmi_cfg->roam_offload_max_ap_profiles =
14963 	    tg_cfg->roam_offload_max_ap_profiles;
14964 	wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
14965 	wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
14966 	wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
14967 	wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
14968 	wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
14969 	wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
14970 	wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
14971 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
14972 	    tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
14973 	wmi_cfg->vow_config = tg_cfg->vow_config;
14974 	wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
14975 	wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
14976 	wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
14977 	wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
14978 	wmi_cfg->num_tdls_conn_table_entries =
14979 	    tg_cfg->num_tdls_conn_table_entries;
14980 	wmi_cfg->beacon_tx_offload_max_vdev =
14981 	    tg_cfg->beacon_tx_offload_max_vdev;
14982 	wmi_cfg->num_multicast_filter_entries =
14983 	    tg_cfg->num_multicast_filter_entries;
14984 	wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
14985 	wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
14986 	wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
14987 	wmi_cfg->max_tdls_concurrent_sleep_sta =
14988 	    tg_cfg->max_tdls_concurrent_sleep_sta;
14989 	wmi_cfg->max_tdls_concurrent_buffer_sta =
14990 	    tg_cfg->max_tdls_concurrent_buffer_sta;
14991 	wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
14992 	wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
14993 	wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
14994 	wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
14995 	wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
14996 	wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
14997 	wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
14998 	wmi_cfg->flag1 = tg_cfg->flag1;
14999 	wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
15000 	wmi_cfg->sched_params = tg_cfg->sched_params;
15001 	wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
15002 	wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
15003 #ifdef notyet /* 6 GHz support */
15004 	wmi_cfg->host_service_flags &=
15005 	    ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
15006 	wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported <<
15007 	    WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
15008 	wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET;
15009 	wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
15010 	wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
15011 #endif
15012 }
15013 
15014 int
15015 qwx_init_cmd_send(struct qwx_pdev_wmi *wmi, struct wmi_init_cmd_param *param)
15016 {
15017 	struct mbuf *m;
15018 	struct wmi_init_cmd *cmd;
15019 	struct wmi_resource_config *cfg;
15020 	struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
15021 	struct wmi_pdev_band_to_mac *band_to_mac;
15022 	struct wlan_host_mem_chunk *host_mem_chunks;
15023 	struct wmi_tlv *tlv;
15024 	size_t ret, len;
15025 	void *ptr;
15026 	uint32_t hw_mode_len = 0;
15027 	uint16_t idx;
15028 
15029 	if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
15030 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
15031 		    (param->num_band_to_mac * sizeof(*band_to_mac));
15032 
15033 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
15034 	    (param->num_mem_chunks ?
15035 	    (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
15036 
15037 	m = qwx_wmi_alloc_mbuf(len);
15038 	if (!m)
15039 		return ENOMEM;
15040 
15041 	cmd = (struct wmi_init_cmd *)(mtod(m, uint8_t *) +
15042 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
15043 
15044 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
15045 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
15046 
15047 	ptr = mtod(m, uint8_t *) + sizeof(struct ath11k_htc_hdr) +
15048 	   sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
15049 	cfg = ptr;
15050 
15051 	qwx_wmi_copy_resource_config(cfg, param->res_cfg);
15052 
15053 	cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
15054 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
15055 
15056 	ptr += sizeof(*cfg);
15057 	host_mem_chunks = ptr + TLV_HDR_SIZE;
15058 	len = sizeof(struct wlan_host_mem_chunk);
15059 
15060 	for (idx = 0; idx < param->num_mem_chunks; ++idx) {
15061 		host_mem_chunks[idx].tlv_header =
15062 		    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
15063 		    FIELD_PREP(WMI_TLV_LEN, len);
15064 
15065 		host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
15066 		host_mem_chunks[idx].size = param->mem_chunks[idx].len;
15067 		host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
15068 
15069 		DNPRINTF(QWX_D_WMI,
15070 		    "%s: host mem chunk req_id %d paddr 0x%llx len %d\n",
15071 		    __func__, param->mem_chunks[idx].req_id,
15072 		    (uint64_t)param->mem_chunks[idx].paddr,
15073 		    param->mem_chunks[idx].len);
15074 	}
15075 	cmd->num_host_mem_chunks = param->num_mem_chunks;
15076 	len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
15077 
15078 	/* num_mem_chunks is zero */
15079 	tlv = ptr;
15080 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
15081 	    FIELD_PREP(WMI_TLV_LEN, len);
15082 	ptr += TLV_HDR_SIZE + len;
15083 
15084 	if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
15085 		hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
15086 		hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
15087 		    WMI_TAG_PDEV_SET_HW_MODE_CMD) |
15088 		    FIELD_PREP(WMI_TLV_LEN, sizeof(*hw_mode) - TLV_HDR_SIZE);
15089 
15090 		hw_mode->hw_mode_index = param->hw_mode_id;
15091 		hw_mode->num_band_to_mac = param->num_band_to_mac;
15092 
15093 		ptr += sizeof(*hw_mode);
15094 
15095 		len = param->num_band_to_mac * sizeof(*band_to_mac);
15096 		tlv = ptr;
15097 		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
15098 		    FIELD_PREP(WMI_TLV_LEN, len);
15099 
15100 		ptr += TLV_HDR_SIZE;
15101 		len = sizeof(*band_to_mac);
15102 
15103 		for (idx = 0; idx < param->num_band_to_mac; idx++) {
15104 			band_to_mac = (void *)ptr;
15105 
15106 			band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
15107 			    WMI_TAG_PDEV_BAND_TO_MAC) |
15108 			    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
15109 			band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
15110 			band_to_mac->start_freq =
15111 			    param->band_to_mac[idx].start_freq;
15112 			band_to_mac->end_freq =
15113 			    param->band_to_mac[idx].end_freq;
15114 			ptr += sizeof(*band_to_mac);
15115 		}
15116 	}
15117 
15118 	ret = qwx_wmi_cmd_send(wmi, m, WMI_INIT_CMDID);
15119 	if (ret) {
15120 		printf("%s: failed to send WMI_INIT_CMDID\n", __func__);
15121 		m_freem(m);
15122 		return ret;
15123 	}
15124 
15125 	DNPRINTF(QWX_D_WMI, "%s: cmd wmi init\n", __func__);
15126 
15127 	return 0;
15128 }
15129 
15130 int
15131 qwx_wmi_cmd_init(struct qwx_softc *sc)
15132 {
15133 	struct qwx_wmi_base *wmi_sc = &sc->wmi;
15134 	struct wmi_init_cmd_param init_param;
15135 	struct target_resource_config  config;
15136 
15137 	memset(&init_param, 0, sizeof(init_param));
15138 	memset(&config, 0, sizeof(config));
15139 
15140 	sc->hw_params.hw_ops->wmi_init_config(sc, &config);
15141 
15142 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT))
15143 		config.is_reg_cc_ext_event_supported = 1;
15144 
15145 	memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
15146 
15147 	init_param.res_cfg = &wmi_sc->wlan_resource_config;
15148 	init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
15149 	init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
15150 	init_param.mem_chunks = wmi_sc->mem_chunks;
15151 
15152 	if (sc->hw_params.single_pdev_only)
15153 		init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
15154 
15155 	init_param.num_band_to_mac = sc->num_radios;
15156 	qwx_fill_band_to_mac_param(sc, init_param.band_to_mac);
15157 
15158 	return qwx_init_cmd_send(&wmi_sc->wmi[0], &init_param);
15159 }
15160 
15161 int
15162 qwx_wmi_wait_for_unified_ready(struct qwx_softc *sc)
15163 {
15164 	int ret;
15165 
15166 	while (!sc->wmi.unified_ready) {
15167 		ret = tsleep_nsec(&sc->wmi.unified_ready, 0, "qwxunfrdy",
15168 		    SEC_TO_NSEC(5));
15169 		if (ret)
15170 			return -1;
15171 	}
15172 
15173 	return 0;
15174 }
15175 
15176 int
15177 qwx_wmi_set_hw_mode(struct qwx_softc *sc,
15178     enum wmi_host_hw_mode_config_type mode)
15179 {
15180 	struct wmi_pdev_set_hw_mode_cmd_param *cmd;
15181 	struct mbuf *m;
15182 	struct qwx_wmi_base *wmi = &sc->wmi;
15183 	int len;
15184 	int ret;
15185 
15186 	len = sizeof(*cmd);
15187 
15188 	m = qwx_wmi_alloc_mbuf(len);
15189 	if (!m)
15190 		return ENOMEM;
15191 
15192 	cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)(mtod(m, uint8_t *) +
15193 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
15194 
15195 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
15196 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
15197 
15198 	cmd->pdev_id = WMI_PDEV_ID_SOC;
15199 	cmd->hw_mode_index = mode;
15200 
15201 	ret = qwx_wmi_cmd_send(&wmi->wmi[0], m, WMI_PDEV_SET_HW_MODE_CMDID);
15202 	if (ret) {
15203 		printf("%s: failed to send WMI_PDEV_SET_HW_MODE_CMDID\n",
15204 		    __func__);
15205 		m_freem(m);
15206 		return ret;
15207 	}
15208 
15209 	DNPRINTF(QWX_D_WMI, "%s: cmd pdev set hw mode %d\n", __func__,
15210 	    cmd->hw_mode_index);
15211 
15212 	return 0;
15213 }
15214 
15215 int
15216 qwx_wmi_set_sta_ps_param(struct qwx_softc *sc, uint32_t vdev_id,
15217      uint8_t pdev_id, uint32_t param, uint32_t param_value)
15218 {
15219 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
15220 	struct wmi_sta_powersave_param_cmd *cmd;
15221 	struct mbuf *m;
15222 	int ret;
15223 
15224 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
15225 	if (!m)
15226 		return ENOMEM;
15227 
15228 	cmd = (struct wmi_sta_powersave_param_cmd *)(mtod(m, uint8_t *) +
15229 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
15230 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
15231 	    WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
15232 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
15233 
15234 	cmd->vdev_id = vdev_id;
15235 	cmd->param = param;
15236 	cmd->value = param_value;
15237 
15238 	ret = qwx_wmi_cmd_send(wmi, m, WMI_STA_POWERSAVE_PARAM_CMDID);
15239 	if (ret) {
15240 		printf("%s: failed to send WMI_STA_POWERSAVE_PARAM_CMDID",
15241 		    sc->sc_dev.dv_xname);
15242 		m_freem(m);
15243 		return ret;
15244 	}
15245 
15246 	DNPRINTF(QWX_D_WMI, "%s: cmd set powersave param vdev_id %d param %d "
15247 	    "value %d\n", __func__, vdev_id, param, param_value);
15248 
15249 	return 0;
15250 }
15251 
15252 int
15253 qwx_wmi_vdev_create(struct qwx_softc *sc, uint8_t *macaddr,
15254     struct vdev_create_params *param)
15255 {
15256 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[param->pdev_id];
15257 	struct wmi_vdev_create_cmd *cmd;
15258 	struct mbuf *m;
15259 	struct wmi_vdev_txrx_streams *txrx_streams;
15260 	struct wmi_tlv *tlv;
15261 	int ret, len;
15262 	void *ptr;
15263 
15264 	/* It can be optimized my sending tx/rx chain configuration
15265 	 * only for supported bands instead of always sending it for
15266 	 * both the bands.
15267 	 */
15268 	len = sizeof(*cmd) + TLV_HDR_SIZE +
15269 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
15270 
15271 	m = qwx_wmi_alloc_mbuf(len);
15272 	if (!m)
15273 		return ENOMEM;
15274 
15275 	cmd = (struct wmi_vdev_create_cmd *)(mtod(m, uint8_t *) +
15276 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
15277 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
15278 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
15279 
15280 	cmd->vdev_id = param->if_id;
15281 	cmd->vdev_type = param->type;
15282 	cmd->vdev_subtype = param->subtype;
15283 	cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
15284 	cmd->pdev_id = param->pdev_id;
15285 	cmd->mbssid_flags = param->mbssid_flags;
15286 	cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id;
15287 
15288 	IEEE80211_ADDR_COPY(cmd->vdev_macaddr.addr, macaddr);
15289 
15290 	ptr = (void *)(mtod(m, uint8_t *) +
15291 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
15292 	    sizeof(*cmd));
15293 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
15294 
15295 	tlv = ptr;
15296 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
15297 	    FIELD_PREP(WMI_TLV_LEN, len);
15298 
15299 	ptr += TLV_HDR_SIZE;
15300 	txrx_streams = ptr;
15301 	len = sizeof(*txrx_streams);
15302 	txrx_streams->tlv_header =
15303 	    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
15304 	    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
15305 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
15306 	txrx_streams->supported_tx_streams = param->chains[0].tx;
15307 	txrx_streams->supported_rx_streams = param->chains[0].rx;
15308 
15309 	txrx_streams++;
15310 	txrx_streams->tlv_header =
15311 	    FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
15312 	    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
15313 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
15314 	txrx_streams->supported_tx_streams = param->chains[1].tx;
15315 	txrx_streams->supported_rx_streams = param->chains[1].rx;
15316 
15317 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_CREATE_CMDID);
15318 	if (ret) {
15319 		printf("%s: failed to submit WMI_VDEV_CREATE_CMDID\n",
15320 		    sc->sc_dev.dv_xname);
15321 		m_freem(m);
15322 		return ret;
15323 	}
15324 
15325 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev create id %d type %d subtype %d "
15326 	    "macaddr %s pdevid %d\n", __func__, param->if_id, param->type,
15327 	    param->subtype, ether_sprintf(macaddr), param->pdev_id);
15328 
15329 	return ret;
15330 }
15331 
15332 int
15333 qwx_wmi_vdev_set_param_cmd(struct qwx_softc *sc, uint32_t vdev_id,
15334     uint8_t pdev_id, uint32_t param_id, uint32_t param_value)
15335 {
15336 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
15337 	struct wmi_vdev_set_param_cmd *cmd;
15338 	struct mbuf *m;
15339 	int ret;
15340 
15341 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
15342 	if (!m)
15343 		return ENOMEM;
15344 
15345 	cmd = (struct wmi_vdev_set_param_cmd *)(mtod(m, uint8_t *) +
15346 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
15347 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
15348 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
15349 
15350 	cmd->vdev_id = vdev_id;
15351 	cmd->param_id = param_id;
15352 	cmd->param_value = param_value;
15353 
15354 	ret = qwx_wmi_cmd_send(wmi, m, WMI_VDEV_SET_PARAM_CMDID);
15355 	if (ret) {
15356 		printf("%s: failed to send WMI_VDEV_SET_PARAM_CMDID\n",
15357 		    sc->sc_dev.dv_xname);
15358 		m_freem(m);
15359 		return ret;
15360 	}
15361 
15362 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev set param vdev 0x%x param %d "
15363 	    "value %d\n", __func__, vdev_id, param_id, param_value);
15364 
15365 	return 0;
15366 }
15367 
15368 void
15369 qwx_wmi_put_wmi_channel(struct wmi_channel *chan,
15370     struct wmi_vdev_start_req_arg *arg)
15371 {
15372 	uint32_t center_freq1 = arg->channel.band_center_freq1;
15373 
15374 	memset(chan, 0, sizeof(*chan));
15375 
15376 	chan->mhz = arg->channel.freq;
15377 	chan->band_center_freq1 = arg->channel.band_center_freq1;
15378 
15379 	if (arg->channel.mode == MODE_11AX_HE160) {
15380 		if (arg->channel.freq > arg->channel.band_center_freq1)
15381 			chan->band_center_freq1 = center_freq1 + 40;
15382 		else
15383 			chan->band_center_freq1 = center_freq1 - 40;
15384 
15385 		chan->band_center_freq2 = arg->channel.band_center_freq1;
15386 	} else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
15387 	    (arg->channel.mode == MODE_11AX_HE80_80)) {
15388 		chan->band_center_freq2 = arg->channel.band_center_freq2;
15389 	} else
15390 		chan->band_center_freq2 = 0;
15391 
15392 	chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
15393 	if (arg->channel.passive)
15394 		chan->info |= WMI_CHAN_INFO_PASSIVE;
15395 	if (arg->channel.allow_ibss)
15396 		chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
15397 	if (arg->channel.allow_ht)
15398 		chan->info |= WMI_CHAN_INFO_ALLOW_HT;
15399 	if (arg->channel.allow_vht)
15400 		chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
15401 	if (arg->channel.allow_he)
15402 		chan->info |= WMI_CHAN_INFO_ALLOW_HE;
15403 	if (arg->channel.ht40plus)
15404 		chan->info |= WMI_CHAN_INFO_HT40_PLUS;
15405 	if (arg->channel.chan_radar)
15406 		chan->info |= WMI_CHAN_INFO_DFS;
15407 	if (arg->channel.freq2_radar)
15408 		chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
15409 
15410 	chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
15411 	    arg->channel.max_power) |
15412 	    FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
15413 	    arg->channel.max_reg_power);
15414 
15415 	chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
15416 	    arg->channel.max_antenna_gain) |
15417 	    FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
15418 	    arg->channel.max_power);
15419 }
15420 
15421 int
15422 qwx_wmi_vdev_start(struct qwx_softc *sc, struct wmi_vdev_start_req_arg *arg,
15423     int pdev_id, int restart)
15424 {
15425 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
15426 	struct wmi_vdev_start_request_cmd *cmd;
15427 	struct mbuf *m;
15428 	struct wmi_channel *chan;
15429 	struct wmi_tlv *tlv;
15430 	void *ptr;
15431 	int ret, len;
15432 
15433 	if (arg->ssid_len > sizeof(cmd->ssid.ssid))
15434 		return EINVAL;
15435 
15436 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
15437 
15438 	m = qwx_wmi_alloc_mbuf(len);
15439 	if (!m)
15440 		return ENOMEM;
15441 
15442 	cmd = (struct wmi_vdev_start_request_cmd *)(mtod(m, uint8_t *) +
15443 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
15444 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
15445 	    WMI_TAG_VDEV_START_REQUEST_CMD) |
15446 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
15447 	cmd->vdev_id = arg->vdev_id;
15448 	cmd->beacon_interval = arg->bcn_intval;
15449 	cmd->bcn_tx_rate = arg->bcn_tx_rate;
15450 	cmd->dtim_period = arg->dtim_period;
15451 	cmd->num_noa_descriptors = arg->num_noa_descriptors;
15452 	cmd->preferred_rx_streams = arg->pref_rx_streams;
15453 	cmd->preferred_tx_streams = arg->pref_tx_streams;
15454 	cmd->cac_duration_ms = arg->cac_duration_ms;
15455 	cmd->regdomain = arg->regdomain;
15456 	cmd->he_ops = arg->he_ops;
15457 	cmd->mbssid_flags = arg->mbssid_flags;
15458 	cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
15459 
15460 	if (!restart) {
15461 		if (arg->ssid) {
15462 			cmd->ssid.ssid_len = arg->ssid_len;
15463 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
15464 		}
15465 		if (arg->hidden_ssid)
15466 			cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
15467 		if (arg->pmf_enabled)
15468 			cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
15469 	}
15470 
15471 	cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
15472 	if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags))
15473 		cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
15474 
15475 	ptr = mtod(m, void *) + sizeof(struct ath11k_htc_hdr) +
15476 	    sizeof(struct wmi_cmd_hdr) + sizeof(*cmd);
15477 	chan = ptr;
15478 
15479 	qwx_wmi_put_wmi_channel(chan, arg);
15480 
15481 	chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
15482 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE);
15483 	ptr += sizeof(*chan);
15484 
15485 	tlv = ptr;
15486 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
15487 	    FIELD_PREP(WMI_TLV_LEN, 0);
15488 
15489 	/* Note: This is a nested TLV containing:
15490 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
15491 	 */
15492 
15493 	ptr += sizeof(*tlv);
15494 
15495 	ret = qwx_wmi_cmd_send(wmi, m, restart ?
15496 	    WMI_VDEV_RESTART_REQUEST_CMDID : WMI_VDEV_START_REQUEST_CMDID);
15497 	if (ret) {
15498 		printf("%s: failed to submit vdev_%s cmd\n",
15499 		    sc->sc_dev.dv_xname, restart ? "restart" : "start");
15500 		m_freem(m);
15501 		return ret;
15502 	}
15503 
15504 	DNPRINTF(QWX_D_WMI, "%s: cmd vdev %s id 0x%x freq %u mode 0x%x\n",
15505 	   __func__, restart ? "restart" : "start", arg->vdev_id,
15506 	   arg->channel.freq, arg->channel.mode);
15507 
15508 	return ret;
15509 }
15510 
15511 int
15512 qwx_core_start(struct qwx_softc *sc)
15513 {
15514 	int ret;
15515 
15516 	ret = qwx_wmi_attach(sc);
15517 	if (ret) {
15518 		printf("%s: failed to attach wmi: %d\n",
15519 		    sc->sc_dev.dv_xname, ret);
15520 		return ret;
15521 	}
15522 
15523 	ret = qwx_htc_init(sc);
15524 	if (ret) {
15525 		printf("%s: failed to init htc: %d\n",
15526 		    sc->sc_dev.dv_xname, ret);
15527 		goto err_wmi_detach;
15528 	}
15529 
15530 	ret = sc->ops.start(sc);
15531 	if (ret) {
15532 		printf("%s: failed to start host interface: %d\n",
15533 		    sc->sc_dev.dv_xname, ret);
15534 		goto err_wmi_detach;
15535 	}
15536 
15537 	ret = qwx_htc_wait_target(sc);
15538 	if (ret) {
15539 		printf("%s: failed to connect to HTC: %d\n",
15540 		    sc->sc_dev.dv_xname, ret);
15541 		goto err_hif_stop;
15542 	}
15543 
15544 	ret = qwx_dp_htt_connect(&sc->dp);
15545 	if (ret) {
15546 		printf("%s: failed to connect to HTT: %d\n",
15547 		    sc->sc_dev.dv_xname, ret);
15548 		goto err_hif_stop;
15549 	}
15550 
15551 	ret = qwx_wmi_connect(sc);
15552 	if (ret) {
15553 		printf("%s: failed to connect wmi: %d\n",
15554 		    sc->sc_dev.dv_xname, ret);
15555 		goto err_hif_stop;
15556 	}
15557 
15558 	sc->wmi.service_ready = 0;
15559 
15560 	ret = qwx_htc_start(&sc->htc);
15561 	if (ret) {
15562 		printf("%s: failed to start HTC: %d\n",
15563 		    sc->sc_dev.dv_xname, ret);
15564 		goto err_hif_stop;
15565 	}
15566 
15567 	ret = qwx_wmi_wait_for_service_ready(sc);
15568 	if (ret) {
15569 		printf("%s: failed to receive wmi service ready event: %d\n",
15570 		    sc->sc_dev.dv_xname, ret);
15571 		goto err_hif_stop;
15572 	}
15573 #if 0
15574 	ret = ath11k_mac_allocate(ab);
15575 	if (ret) {
15576 		ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
15577 			   ret);
15578 		goto err_hif_stop;
15579 	}
15580 	ath11k_dp_pdev_pre_alloc(sc);
15581 #endif
15582 	ret = qwx_dp_pdev_reo_setup(sc);
15583 	if (ret) {
15584 		printf("%s: failed to initialize reo destination rings: %d\n",
15585 		    __func__, ret);
15586 		goto err_mac_destroy;
15587 	}
15588 
15589 	ret = qwx_wmi_cmd_init(sc);
15590 	if (ret) {
15591 		printf("%s: failed to send wmi init cmd: %d\n", __func__, ret);
15592 		goto err_reo_cleanup;
15593 	}
15594 
15595 	ret = qwx_wmi_wait_for_unified_ready(sc);
15596 	if (ret) {
15597 		printf("%s: failed to receive wmi unified ready event: %d\n",
15598 		    __func__, ret);
15599 		goto err_reo_cleanup;
15600 	}
15601 
15602 	/* put hardware to DBS mode */
15603 	if (sc->hw_params.single_pdev_only &&
15604 	    sc->hw_params.num_rxmda_per_pdev > 1) {
15605 		ret = qwx_wmi_set_hw_mode(sc, WMI_HOST_HW_MODE_DBS);
15606 		if (ret) {
15607 			printf("%s: failed to send dbs mode: %d\n",
15608 			    __func__, ret);
15609 			goto err_hif_stop;
15610 		}
15611 	}
15612 
15613 	ret = qwx_dp_tx_htt_h2t_ver_req_msg(sc);
15614 	if (ret) {
15615 		if (ret != ENOTSUP) {
15616 			printf("%s: failed to send htt version "
15617 			    "request message: %d\n", __func__, ret);
15618 		}
15619 		goto err_reo_cleanup;
15620 	}
15621 
15622 	return 0;
15623 err_reo_cleanup:
15624 	qwx_dp_pdev_reo_cleanup(sc);
15625 err_mac_destroy:
15626 #if 0
15627 	ath11k_mac_destroy(ab);
15628 #endif
15629 err_hif_stop:
15630 	sc->ops.stop(sc);
15631 err_wmi_detach:
15632 	qwx_wmi_detach(sc);
15633 	return ret;
15634 }
15635 
15636 void
15637 qwx_core_stop(struct qwx_softc *sc)
15638 {
15639 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
15640 		qwx_qmi_firmware_stop(sc);
15641 
15642 	sc->ops.stop(sc);
15643 	qwx_wmi_detach(sc);
15644 	qwx_dp_pdev_reo_cleanup(sc);
15645 }
15646 
15647 void
15648 qwx_core_pdev_destroy(struct qwx_softc *sc)
15649 {
15650 	qwx_dp_pdev_free(sc);
15651 }
15652 
15653 int
15654 qwx_core_pdev_create(struct qwx_softc *sc)
15655 {
15656 	int ret;
15657 
15658 	ret = qwx_dp_pdev_alloc(sc);
15659 	if (ret) {
15660 		printf("%s: failed to attach DP pdev: %d\n",
15661 		    sc->sc_dev.dv_xname, ret);
15662 		return ret;
15663 	}
15664 
15665 	ret = qwx_mac_register(sc);
15666 	if (ret) {
15667 		printf("%s: failed register the radio with mac80211: %d\n",
15668 		    sc->sc_dev.dv_xname, ret);
15669 		goto err_dp_pdev_free;
15670 	}
15671 #if 0
15672 
15673 	ret = ath11k_thermal_register(ab);
15674 	if (ret) {
15675 		ath11k_err(ab, "could not register thermal device: %d\n",
15676 			   ret);
15677 		goto err_mac_unregister;
15678 	}
15679 
15680 	ret = ath11k_spectral_init(ab);
15681 	if (ret) {
15682 		ath11k_err(ab, "failed to init spectral %d\n", ret);
15683 		goto err_thermal_unregister;
15684 	}
15685 
15686 	return 0;
15687 
15688 err_thermal_unregister:
15689 	ath11k_thermal_unregister(ab);
15690 err_mac_unregister:
15691 	ath11k_mac_unregister(ab);
15692 #endif
15693 err_dp_pdev_free:
15694 	qwx_dp_pdev_free(sc);
15695 #if 0
15696 err_pdev_debug:
15697 	ath11k_debugfs_pdev_destroy(ab);
15698 #endif
15699 	return ret;
15700 }
15701 
15702 void
15703 qwx_core_deinit(struct qwx_softc *sc)
15704 {
15705 	struct ath11k_hal *hal = &sc->hal;
15706 	int s = splnet();
15707 
15708 #ifdef notyet
15709 	mutex_lock(&ab->core_lock);
15710 #endif
15711 	sc->ops.irq_disable(sc);
15712 
15713 	qwx_core_stop(sc);
15714 	qwx_core_pdev_destroy(sc);
15715 #ifdef notyet
15716 	mutex_unlock(&ab->core_lock);
15717 #endif
15718 	sc->ops.power_down(sc);
15719 #if 0
15720 	ath11k_mac_destroy(ab);
15721 	ath11k_debugfs_soc_destroy(ab);
15722 #endif
15723 	qwx_dp_free(sc);
15724 #if 0
15725 	ath11k_reg_free(ab);
15726 #endif
15727 	qwx_qmi_deinit_service(sc);
15728 
15729 	hal->num_shadow_reg_configured = 0;
15730 
15731 	splx(s);
15732 }
15733 
15734 int
15735 qwx_core_qmi_firmware_ready(struct qwx_softc *sc)
15736 {
15737 	int ret;
15738 
15739 	ret = qwx_core_start_firmware(sc, sc->fw_mode);
15740 	if (ret) {
15741 		printf("%s: failed to start firmware: %d\n",
15742 		    sc->sc_dev.dv_xname, ret);
15743 		return ret;
15744 	}
15745 
15746 	ret = qwx_ce_init_pipes(sc);
15747 	if (ret) {
15748 		printf("%s: failed to initialize CE: %d\n",
15749 		    sc->sc_dev.dv_xname, ret);
15750 		goto err_firmware_stop;
15751 	}
15752 
15753 	ret = qwx_dp_alloc(sc);
15754 	if (ret) {
15755 		printf("%s: failed to init DP: %d\n",
15756 		    sc->sc_dev.dv_xname, ret);
15757 		goto err_firmware_stop;
15758 	}
15759 
15760 	switch (sc->crypto_mode) {
15761 	case ATH11K_CRYPT_MODE_SW:
15762 		set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
15763 		set_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
15764 		break;
15765 	case ATH11K_CRYPT_MODE_HW:
15766 		clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags);
15767 		clear_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
15768 		break;
15769 	default:
15770 		printf("%s: invalid crypto_mode: %d\n",
15771 		    sc->sc_dev.dv_xname, sc->crypto_mode);
15772 		return EINVAL;
15773 	}
15774 
15775 	if (sc->frame_mode == ATH11K_HW_TXRX_RAW)
15776 		set_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags);
15777 #if 0
15778 	mutex_lock(&ab->core_lock);
15779 #endif
15780 	ret = qwx_core_start(sc);
15781 	if (ret) {
15782 		printf("%s: failed to start core: %d\n",
15783 		    sc->sc_dev.dv_xname, ret);
15784 		goto err_dp_free;
15785 	}
15786 
15787 	if (!sc->attached) {
15788 		printf("%s: %s fw 0x%x address %s\n", sc->sc_dev.dv_xname,
15789 		    sc->hw_params.name, sc->qmi_target.fw_version,
15790 		    ether_sprintf(sc->mac_addr));
15791 	}
15792 
15793 	ret = qwx_core_pdev_create(sc);
15794 	if (ret) {
15795 		printf("%s: failed to create pdev core: %d\n",
15796 		    sc->sc_dev.dv_xname, ret);
15797 		goto err_core_stop;
15798 	}
15799 
15800 #if 0 /* TODO: Is this in the right spot for OpenBSD? */
15801 	sc->ops.irq_enable(sc);
15802 #endif
15803 
15804 #if 0
15805 	mutex_unlock(&ab->core_lock);
15806 #endif
15807 
15808 	return 0;
15809 err_core_stop:
15810 	qwx_core_stop(sc);
15811 #if 0
15812 	ath11k_mac_destroy(ab);
15813 #endif
15814 err_dp_free:
15815 	qwx_dp_free(sc);
15816 #if 0
15817 	mutex_unlock(&ab->core_lock);
15818 #endif
15819 err_firmware_stop:
15820 	qwx_qmi_firmware_stop(sc);
15821 
15822 	return ret;
15823 }
15824 
15825 void
15826 qwx_qmi_fw_init_done(struct qwx_softc *sc)
15827 {
15828 	int ret = 0;
15829 
15830 	clear_bit(ATH11K_FLAG_QMI_FAIL, sc->sc_flags);
15831 
15832 	if (sc->qmi_cal_done == 0 && sc->hw_params.cold_boot_calib) {
15833 		qwx_qmi_process_coldboot_calibration(sc);
15834 	} else {
15835 		clear_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags);
15836 		clear_bit(ATH11K_FLAG_RECOVERY, sc->sc_flags);
15837 		ret = qwx_core_qmi_firmware_ready(sc);
15838 		if (ret) {
15839 			set_bit(ATH11K_FLAG_QMI_FAIL, sc->sc_flags);
15840 			return;
15841 		}
15842 	}
15843 }
15844 
15845 int
15846 qwx_qmi_event_server_arrive(struct qwx_softc *sc)
15847 {
15848 	int ret;
15849 
15850 	sc->fw_init_done = 0;
15851 
15852 	ret = qwx_qmi_fw_ind_register_send(sc);
15853 	if (ret < 0) {
15854 		printf("%s: failed to send qmi firmware indication: %d\n",
15855 		    sc->sc_dev.dv_xname, ret);
15856 		return ret;
15857 	}
15858 
15859 	ret = qwx_qmi_host_cap_send(sc);
15860 	if (ret < 0) {
15861 		printf("%s: failed to send qmi host cap: %d\n",
15862 		    sc->sc_dev.dv_xname, ret);
15863 		return ret;
15864 	}
15865 
15866 	ret = qwx_qmi_mem_seg_send(sc);
15867 	if (ret == EBUSY)
15868 		ret = qwx_qmi_mem_seg_send(sc);
15869 	if (ret) {
15870 		printf("%s: failed to send qmi memory segments: %d\n",
15871 		    sc->sc_dev.dv_xname, ret);
15872 		return ret;
15873 	}
15874 
15875 	ret = qwx_qmi_event_load_bdf(sc);
15876 	if (ret < 0) {
15877 		printf("%s: qmi failed to download BDF:%d\n",
15878 		    sc->sc_dev.dv_xname, ret);
15879 		return ret;
15880 	}
15881 
15882 	ret = qwx_qmi_wlanfw_m3_info_send(sc);
15883 	if (ret) {
15884 		printf("%s: qmi m3 info send failed:%d\n",
15885 		    sc->sc_dev.dv_xname, ret);
15886 		return ret;
15887 	}
15888 
15889 	while (!sc->fw_init_done) {
15890 		ret = tsleep_nsec(&sc->fw_init_done, 0, "qwxfwinit",
15891 		    SEC_TO_NSEC(10));
15892 		if (ret) {
15893 			printf("%s: fw init timeout\n", sc->sc_dev.dv_xname);
15894 			return -1;
15895 		}
15896 	}
15897 
15898 	qwx_qmi_fw_init_done(sc);
15899 	return 0;
15900 }
15901 
15902 int
15903 qwx_core_init(struct qwx_softc *sc)
15904 {
15905 	int error;
15906 
15907 	error = qwx_qmi_init_service(sc);
15908 	if (error) {
15909 		printf("failed to initialize qmi :%d\n", error);
15910 		return error;
15911 	}
15912 
15913 	error = sc->ops.power_up(sc);
15914 	if (error) {
15915 		printf("failed to power up :%d\n", error);
15916 		qwx_qmi_deinit_service(sc);
15917 	}
15918 
15919 	return error;
15920 }
15921 
15922 int
15923 qwx_init_hw_params(struct qwx_softc *sc)
15924 {
15925 	const struct ath11k_hw_params *hw_params = NULL;
15926 	int i;
15927 
15928 	for (i = 0; i < nitems(ath11k_hw_params); i++) {
15929 		hw_params = &ath11k_hw_params[i];
15930 
15931 		if (hw_params->hw_rev == sc->sc_hw_rev)
15932 			break;
15933 	}
15934 
15935 	if (i == nitems(ath11k_hw_params)) {
15936 		printf("%s: Unsupported hardware version: 0x%x\n",
15937 		    sc->sc_dev.dv_xname, sc->sc_hw_rev);
15938 		return EINVAL;
15939 	}
15940 
15941 	sc->hw_params = *hw_params;
15942 
15943 	DPRINTF("%s: %s\n", sc->sc_dev.dv_xname, sc->hw_params.name);
15944 
15945 	return 0;
15946 }
15947 
15948 static const struct hal_srng_config hw_srng_config_templ[QWX_NUM_SRNG_CFG] = {
15949 	/* TODO: max_rings can populated by querying HW capabilities */
15950 	{ /* REO_DST */
15951 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
15952 		.max_rings = 4,
15953 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
15954 		.lmac_ring = false,
15955 		.ring_dir = HAL_SRNG_DIR_DST,
15956 		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
15957 	},
15958 
15959 	{ /* REO_EXCEPTION */
15960 		/* Designating REO2TCL ring as exception ring. This ring is
15961 		 * similar to other REO2SW rings though it is named as REO2TCL.
15962 		 * Any of theREO2SW rings can be used as exception ring.
15963 		 */
15964 		.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
15965 		.max_rings = 1,
15966 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
15967 		.lmac_ring = false,
15968 		.ring_dir = HAL_SRNG_DIR_DST,
15969 		.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
15970 	},
15971 	{ /* REO_REINJECT */
15972 		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
15973 		.max_rings = 1,
15974 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
15975 		.lmac_ring = false,
15976 		.ring_dir = HAL_SRNG_DIR_SRC,
15977 		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
15978 	},
15979 	{ /* REO_CMD */
15980 		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
15981 		.max_rings = 1,
15982 		.entry_size = (sizeof(struct hal_tlv_hdr) +
15983 			sizeof(struct hal_reo_get_queue_stats)) >> 2,
15984 		.lmac_ring = false,
15985 		.ring_dir = HAL_SRNG_DIR_SRC,
15986 		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
15987 	},
15988 	{ /* REO_STATUS */
15989 		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
15990 		.max_rings = 1,
15991 		.entry_size = (sizeof(struct hal_tlv_hdr) +
15992 			sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
15993 		.lmac_ring = false,
15994 		.ring_dir = HAL_SRNG_DIR_DST,
15995 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
15996 	},
15997 	{ /* TCL_DATA */
15998 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
15999 		.max_rings = 3,
16000 		.entry_size = (sizeof(struct hal_tlv_hdr) +
16001 			     sizeof(struct hal_tcl_data_cmd)) >> 2,
16002 		.lmac_ring = false,
16003 		.ring_dir = HAL_SRNG_DIR_SRC,
16004 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
16005 	},
16006 	{ /* TCL_CMD */
16007 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
16008 		.max_rings = 1,
16009 		.entry_size = (sizeof(struct hal_tlv_hdr) +
16010 			     sizeof(struct hal_tcl_gse_cmd)) >> 2,
16011 		.lmac_ring =  false,
16012 		.ring_dir = HAL_SRNG_DIR_SRC,
16013 		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
16014 	},
16015 	{ /* TCL_STATUS */
16016 		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
16017 		.max_rings = 1,
16018 		.entry_size = (sizeof(struct hal_tlv_hdr) +
16019 			     sizeof(struct hal_tcl_status_ring)) >> 2,
16020 		.lmac_ring = false,
16021 		.ring_dir = HAL_SRNG_DIR_DST,
16022 		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
16023 	},
16024 	{ /* CE_SRC */
16025 		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
16026 		.max_rings = 12,
16027 		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
16028 		.lmac_ring = false,
16029 		.ring_dir = HAL_SRNG_DIR_SRC,
16030 		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
16031 	},
16032 	{ /* CE_DST */
16033 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
16034 		.max_rings = 12,
16035 		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
16036 		.lmac_ring = false,
16037 		.ring_dir = HAL_SRNG_DIR_SRC,
16038 		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
16039 	},
16040 	{ /* CE_DST_STATUS */
16041 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
16042 		.max_rings = 12,
16043 		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
16044 		.lmac_ring = false,
16045 		.ring_dir = HAL_SRNG_DIR_DST,
16046 		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
16047 	},
16048 	{ /* WBM_IDLE_LINK */
16049 		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
16050 		.max_rings = 1,
16051 		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
16052 		.lmac_ring = false,
16053 		.ring_dir = HAL_SRNG_DIR_SRC,
16054 		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
16055 	},
16056 	{ /* SW2WBM_RELEASE */
16057 		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
16058 		.max_rings = 1,
16059 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
16060 		.lmac_ring = false,
16061 		.ring_dir = HAL_SRNG_DIR_SRC,
16062 		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
16063 	},
16064 	{ /* WBM2SW_RELEASE */
16065 		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
16066 		.max_rings = 5,
16067 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
16068 		.lmac_ring = false,
16069 		.ring_dir = HAL_SRNG_DIR_DST,
16070 		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
16071 	},
16072 	{ /* RXDMA_BUF */
16073 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
16074 		.max_rings = 2,
16075 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
16076 		.lmac_ring = true,
16077 		.ring_dir = HAL_SRNG_DIR_SRC,
16078 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
16079 	},
16080 	{ /* RXDMA_DST */
16081 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
16082 		.max_rings = 1,
16083 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
16084 		.lmac_ring = true,
16085 		.ring_dir = HAL_SRNG_DIR_DST,
16086 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
16087 	},
16088 	{ /* RXDMA_MONITOR_BUF */
16089 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
16090 		.max_rings = 1,
16091 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
16092 		.lmac_ring = true,
16093 		.ring_dir = HAL_SRNG_DIR_SRC,
16094 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
16095 	},
16096 	{ /* RXDMA_MONITOR_STATUS */
16097 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
16098 		.max_rings = 1,
16099 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
16100 		.lmac_ring = true,
16101 		.ring_dir = HAL_SRNG_DIR_SRC,
16102 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
16103 	},
16104 	{ /* RXDMA_MONITOR_DST */
16105 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
16106 		.max_rings = 1,
16107 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
16108 		.lmac_ring = true,
16109 		.ring_dir = HAL_SRNG_DIR_DST,
16110 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
16111 	},
16112 	{ /* RXDMA_MONITOR_DESC */
16113 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
16114 		.max_rings = 1,
16115 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
16116 		.lmac_ring = true,
16117 		.ring_dir = HAL_SRNG_DIR_SRC,
16118 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
16119 	},
16120 	{ /* RXDMA DIR BUF */
16121 		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
16122 		.max_rings = 1,
16123 		.entry_size = 8 >> 2, /* TODO: Define the struct */
16124 		.lmac_ring = true,
16125 		.ring_dir = HAL_SRNG_DIR_SRC,
16126 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
16127 	},
16128 };
16129 
16130 int
16131 qwx_hal_srng_create_config(struct qwx_softc *sc)
16132 {
16133 	struct ath11k_hal *hal = &sc->hal;
16134 	struct hal_srng_config *s;
16135 
16136 	memcpy(hal->srng_config, hw_srng_config_templ,
16137 	    sizeof(hal->srng_config));
16138 
16139 	s = &hal->srng_config[HAL_REO_DST];
16140 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(sc);
16141 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(sc);
16142 	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(sc) - HAL_REO1_RING_BASE_LSB(sc);
16143 	s->reg_size[1] = HAL_REO2_RING_HP(sc) - HAL_REO1_RING_HP(sc);
16144 
16145 	s = &hal->srng_config[HAL_REO_EXCEPTION];
16146 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(sc);
16147 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(sc);
16148 
16149 	s = &hal->srng_config[HAL_REO_REINJECT];
16150 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(sc);
16151 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(sc);
16152 
16153 	s = &hal->srng_config[HAL_REO_CMD];
16154 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(sc);
16155 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(sc);
16156 
16157 	s = &hal->srng_config[HAL_REO_STATUS];
16158 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(sc);
16159 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(sc);
16160 
16161 	s = &hal->srng_config[HAL_TCL_DATA];
16162 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(sc);
16163 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
16164 	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(sc) - HAL_TCL1_RING_BASE_LSB(sc);
16165 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
16166 
16167 	s = &hal->srng_config[HAL_TCL_CMD];
16168 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(sc);
16169 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
16170 
16171 	s = &hal->srng_config[HAL_TCL_STATUS];
16172 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(sc);
16173 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
16174 
16175 	s = &hal->srng_config[HAL_CE_SRC];
16176 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) + HAL_CE_DST_RING_BASE_LSB +
16177 		ATH11K_CE_OFFSET(sc);
16178 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc) + HAL_CE_DST_RING_HP +
16179 		ATH11K_CE_OFFSET(sc);
16180 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) -
16181 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc);
16182 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(sc) -
16183 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(sc);
16184 
16185 	s = &hal->srng_config[HAL_CE_DST];
16186 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_RING_BASE_LSB +
16187 		ATH11K_CE_OFFSET(sc);
16188 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_RING_HP +
16189 		ATH11K_CE_OFFSET(sc);
16190 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
16191 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
16192 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
16193 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
16194 
16195 	s = &hal->srng_config[HAL_CE_DST_STATUS];
16196 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) +
16197 		HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(sc);
16198 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc) + HAL_CE_DST_STATUS_RING_HP +
16199 		ATH11K_CE_OFFSET(sc);
16200 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
16201 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
16202 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(sc) -
16203 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(sc);
16204 
16205 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
16206 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(sc);
16207 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
16208 
16209 	s = &hal->srng_config[HAL_SW2WBM_RELEASE];
16210 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(sc);
16211 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
16212 
16213 	s = &hal->srng_config[HAL_WBM2SW_RELEASE];
16214 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
16215 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
16216 	s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(sc) -
16217 		HAL_WBM0_RELEASE_RING_BASE_LSB(sc);
16218 	s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
16219 
16220 	return 0;
16221 }
16222 
16223 int
16224 qwx_hal_srng_get_ring_id(struct qwx_softc *sc,
16225     enum hal_ring_type type, int ring_num, int mac_id)
16226 {
16227 	struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
16228 	int ring_id;
16229 
16230 	if (ring_num >= srng_config->max_rings) {
16231 		printf("%s: invalid ring number :%d\n", __func__, ring_num);
16232 		return -1;
16233 	}
16234 
16235 	ring_id = srng_config->start_ring_id + ring_num;
16236 	if (srng_config->lmac_ring)
16237 		ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
16238 
16239 	if (ring_id >= HAL_SRNG_RING_ID_MAX) {
16240 		printf("%s: invalid ring ID :%d\n", __func__, ring_id);
16241 		return -1;
16242 	}
16243 
16244 	return ring_id;
16245 }
16246 
16247 void
16248 qwx_hal_srng_update_hp_tp_addr(struct qwx_softc *sc, int shadow_cfg_idx,
16249     enum hal_ring_type ring_type, int ring_num)
16250 {
16251 	struct hal_srng *srng;
16252 	struct ath11k_hal *hal = &sc->hal;
16253 	int ring_id;
16254 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
16255 
16256 	ring_id = qwx_hal_srng_get_ring_id(sc, ring_type, ring_num, 0);
16257 	if (ring_id < 0)
16258 		return;
16259 
16260 	srng = &hal->srng_list[ring_id];
16261 
16262 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
16263 		srng->u.dst_ring.tp_addr = (uint32_t *)(
16264 		    HAL_SHADOW_REG(sc, shadow_cfg_idx) +
16265 		    (unsigned long)sc->mem);
16266 	else
16267 		srng->u.src_ring.hp_addr = (uint32_t *)(
16268 		    HAL_SHADOW_REG(sc, shadow_cfg_idx) +
16269 		    (unsigned long)sc->mem);
16270 }
16271 
16272 void
16273 qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc *sc, struct hal_srng *srng)
16274 {
16275 #ifdef notyet
16276 	lockdep_assert_held(&srng->lock);
16277 #endif
16278 	/* Update the shadow HP if the ring isn't empty. */
16279 	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
16280 	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
16281 		qwx_hal_srng_access_end(sc, srng);
16282 }
16283 
16284 int
16285 qwx_hal_srng_update_shadow_config(struct qwx_softc *sc,
16286     enum hal_ring_type ring_type, int ring_num)
16287 {
16288 	struct ath11k_hal *hal = &sc->hal;
16289 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
16290 	int shadow_cfg_idx = hal->num_shadow_reg_configured;
16291 	uint32_t target_reg;
16292 
16293 	if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
16294 		return EINVAL;
16295 
16296 	hal->num_shadow_reg_configured++;
16297 
16298 	target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
16299 	target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
16300 		ring_num;
16301 
16302 	/* For destination ring, shadow the TP */
16303 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
16304 		target_reg += HAL_OFFSET_FROM_HP_TO_TP;
16305 
16306 	hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
16307 
16308 	/* update hp/tp addr to hal structure*/
16309 	qwx_hal_srng_update_hp_tp_addr(sc, shadow_cfg_idx, ring_type, ring_num);
16310 
16311 	DPRINTF("%s: target_reg %x, shadow reg 0x%x shadow_idx 0x%x, "
16312 	    "ring_type %d, ring num %d\n", __func__, target_reg,
16313 	     HAL_SHADOW_REG(sc, shadow_cfg_idx), shadow_cfg_idx,
16314 	     ring_type, ring_num);
16315 
16316 	return 0;
16317 }
16318 
16319 void
16320 qwx_hal_srng_shadow_config(struct qwx_softc *sc)
16321 {
16322 	struct ath11k_hal *hal = &sc->hal;
16323 	int ring_type, ring_num;
16324 	struct hal_srng_config *cfg;
16325 
16326 	/* update all the non-CE srngs. */
16327 	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
16328 		cfg = &hal->srng_config[ring_type];
16329 
16330 		if (ring_type == HAL_CE_SRC ||
16331 		    ring_type == HAL_CE_DST ||
16332 			ring_type == HAL_CE_DST_STATUS)
16333 			continue;
16334 
16335 		if (cfg->lmac_ring)
16336 			continue;
16337 
16338 		for (ring_num = 0; ring_num < cfg->max_rings; ring_num++) {
16339 			qwx_hal_srng_update_shadow_config(sc, ring_type,
16340 			    ring_num);
16341 		}
16342 	}
16343 }
16344 
16345 void
16346 qwx_hal_srng_get_shadow_config(struct qwx_softc *sc, uint32_t **cfg,
16347     uint32_t *len)
16348 {
16349 	struct ath11k_hal *hal = &sc->hal;
16350 
16351 	*len = hal->num_shadow_reg_configured;
16352 	*cfg = hal->shadow_reg_addr;
16353 }
16354 
16355 int
16356 qwx_hal_alloc_cont_rdp(struct qwx_softc *sc)
16357 {
16358 	struct ath11k_hal *hal = &sc->hal;
16359 	size_t size = sizeof(uint32_t) * HAL_SRNG_RING_ID_MAX;
16360 
16361 	if (hal->rdpmem == NULL) {
16362 		hal->rdpmem = qwx_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
16363 		if (hal->rdpmem == NULL) {
16364 			printf("%s: could not allocate RDP DMA memory\n",
16365 			    sc->sc_dev.dv_xname);
16366 			return ENOMEM;
16367 
16368 		}
16369 	}
16370 
16371 	hal->rdp.vaddr = QWX_DMA_KVA(hal->rdpmem);
16372 	hal->rdp.paddr = QWX_DMA_DVA(hal->rdpmem);
16373 	return 0;
16374 }
16375 
16376 void
16377 qwx_hal_free_cont_rdp(struct qwx_softc *sc)
16378 {
16379 	struct ath11k_hal *hal = &sc->hal;
16380 
16381 	if (hal->rdpmem == NULL)
16382 		return;
16383 
16384 	hal->rdp.vaddr = NULL;
16385 	hal->rdp.paddr = 0L;
16386 	qwx_dmamem_free(sc->sc_dmat, hal->rdpmem);
16387 	hal->rdpmem = NULL;
16388 }
16389 
16390 int
16391 qwx_hal_alloc_cont_wrp(struct qwx_softc *sc)
16392 {
16393 	struct ath11k_hal *hal = &sc->hal;
16394 	size_t size = sizeof(uint32_t) * HAL_SRNG_NUM_LMAC_RINGS;
16395 
16396 	if (hal->wrpmem == NULL) {
16397 		hal->wrpmem = qwx_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE);
16398 		if (hal->wrpmem == NULL) {
16399 			printf("%s: could not allocate WDP DMA memory\n",
16400 			    sc->sc_dev.dv_xname);
16401 			return ENOMEM;
16402 
16403 		}
16404 	}
16405 
16406 	hal->wrp.vaddr = QWX_DMA_KVA(hal->wrpmem);
16407 	hal->wrp.paddr = QWX_DMA_DVA(hal->wrpmem);
16408 	return 0;
16409 }
16410 
16411 void
16412 qwx_hal_free_cont_wrp(struct qwx_softc *sc)
16413 {
16414 	struct ath11k_hal *hal = &sc->hal;
16415 
16416 	if (hal->wrpmem == NULL)
16417 		return;
16418 
16419 	hal->wrp.vaddr = NULL;
16420 	hal->wrp.paddr = 0L;
16421 	qwx_dmamem_free(sc->sc_dmat, hal->wrpmem);
16422 	hal->wrpmem = NULL;
16423 }
16424 
16425 int
16426 qwx_hal_srng_init(struct qwx_softc *sc)
16427 {
16428 	struct ath11k_hal *hal = &sc->hal;
16429 	int ret;
16430 
16431 	memset(hal, 0, sizeof(*hal));
16432 
16433 	ret = qwx_hal_srng_create_config(sc);
16434 	if (ret)
16435 		goto err_hal;
16436 
16437 	ret = qwx_hal_alloc_cont_rdp(sc);
16438 	if (ret)
16439 		goto err_hal;
16440 
16441 	ret = qwx_hal_alloc_cont_wrp(sc);
16442 	if (ret)
16443 		goto err_free_cont_rdp;
16444 
16445 #ifdef notyet
16446 	qwx_hal_register_srng_key(sc);
16447 #endif
16448 
16449 	return 0;
16450 err_free_cont_rdp:
16451 	qwx_hal_free_cont_rdp(sc);
16452 
16453 err_hal:
16454 	return ret;
16455 }
16456 
16457 void
16458 qwx_hal_srng_dst_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
16459 {
16460 	struct ath11k_hal *hal = &sc->hal;
16461 	uint32_t val;
16462 	uint64_t hp_addr;
16463 	uint32_t reg_base;
16464 
16465 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
16466 
16467 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
16468 		sc->ops.write32(sc,
16469 		    reg_base + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(sc),
16470 		    srng->msi_addr);
16471 
16472 		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
16473 		    ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
16474 		    HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
16475 		sc->ops.write32(sc,
16476 		    reg_base + HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(sc), val);
16477 
16478 		sc->ops.write32(sc,
16479 		    reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(sc),
16480 		    srng->msi_data);
16481 	}
16482 
16483 	sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
16484 
16485 	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
16486 	    ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
16487 	    FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
16488 	    (srng->entry_size * srng->num_entries));
16489 	sc->ops.write32(sc,
16490 	    reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(sc), val);
16491 
16492 	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
16493 	    FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
16494 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_ID_OFFSET(sc), val);
16495 
16496 	/* interrupt setup */
16497 	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
16498 	    (srng->intr_timer_thres_us >> 3));
16499 
16500 	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
16501 	    (srng->intr_batch_cntr_thres_entries * srng->entry_size));
16502 
16503 	sc->ops.write32(sc,
16504 	    reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(sc), val);
16505 
16506 	hp_addr = hal->rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr -
16507 	    (unsigned long)hal->rdp.vaddr);
16508 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(sc),
16509 	    hp_addr & HAL_ADDR_LSB_REG_MASK);
16510 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(sc),
16511 	    hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
16512 
16513 	/* Initialize head and tail pointers to indicate ring is empty */
16514 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
16515 	sc->ops.write32(sc, reg_base, 0);
16516 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_TP_OFFSET(sc), 0);
16517 	*srng->u.dst_ring.hp_addr = 0;
16518 
16519 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
16520 	val = 0;
16521 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
16522 		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
16523 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
16524 		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
16525 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
16526 		val |= HAL_REO1_RING_MISC_MSI_SWAP;
16527 	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
16528 
16529 	sc->ops.write32(sc, reg_base + HAL_REO1_RING_MISC_OFFSET(sc), val);
16530 }
16531 
16532 void
16533 qwx_hal_srng_src_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
16534 {
16535 	struct ath11k_hal *hal = &sc->hal;
16536 	uint32_t val;
16537 	uint64_t tp_addr;
16538 	uint32_t reg_base;
16539 
16540 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
16541 
16542 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
16543 		sc->ops.write32(sc,
16544 		    reg_base + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(sc),
16545 		    srng->msi_addr);
16546 
16547 		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
16548 		    ((uint64_t)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) |
16549 		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
16550 		sc->ops.write32(sc,
16551 		    reg_base + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(sc),
16552 		    val);
16553 
16554 		sc->ops.write32(sc,
16555 		    reg_base + HAL_TCL1_RING_MSI1_DATA_OFFSET(sc),
16556 		    srng->msi_data);
16557 	}
16558 
16559 	sc->ops.write32(sc, reg_base, srng->ring_base_paddr);
16560 
16561 	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
16562 	    ((uint64_t)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
16563 	    FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
16564 	    (srng->entry_size * srng->num_entries));
16565 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(sc), val);
16566 
16567 	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
16568 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_ID_OFFSET(sc), val);
16569 
16570 	if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
16571 		sc->ops.write32(sc, reg_base, (uint32_t)srng->ring_base_paddr);
16572 		val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
16573 		    ((uint64_t)srng->ring_base_paddr >>
16574 		    HAL_ADDR_MSB_REG_SHIFT)) |
16575 		    FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
16576 		    (srng->entry_size * srng->num_entries));
16577 		sc->ops.write32(sc,
16578 		    reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(sc), val);
16579 	}
16580 
16581 	/* interrupt setup */
16582 	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
16583 	 * unit of 8 usecs instead of 1 usec (as required by v1).
16584 	 */
16585 	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
16586 	    srng->intr_timer_thres_us);
16587 
16588 	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
16589 	    (srng->intr_batch_cntr_thres_entries * srng->entry_size));
16590 
16591 	sc->ops.write32(sc,
16592 	    reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(sc), val);
16593 
16594 	val = 0;
16595 	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
16596 		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
16597 		    srng->u.src_ring.low_threshold);
16598 	}
16599 	sc->ops.write32(sc,
16600 	    reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(sc), val);
16601 
16602 	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
16603 		tp_addr = hal->rdp.paddr +
16604 		    ((unsigned long)srng->u.src_ring.tp_addr -
16605 		    (unsigned long)hal->rdp.vaddr);
16606 		sc->ops.write32(sc,
16607 		    reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(sc),
16608 		    tp_addr & HAL_ADDR_LSB_REG_MASK);
16609 		sc->ops.write32(sc,
16610 		    reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(sc),
16611 		    tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
16612 	}
16613 
16614 	/* Initialize head and tail pointers to indicate ring is empty */
16615 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
16616 	sc->ops.write32(sc, reg_base, 0);
16617 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
16618 	*srng->u.src_ring.tp_addr = 0;
16619 
16620 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
16621 	val = 0;
16622 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
16623 		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
16624 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
16625 		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
16626 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
16627 		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
16628 
16629 	/* Loop count is not used for SRC rings */
16630 	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
16631 
16632 	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
16633 
16634 	sc->ops.write32(sc, reg_base + HAL_TCL1_RING_MISC_OFFSET(sc), val);
16635 }
16636 
16637 void
16638 qwx_hal_srng_hw_init(struct qwx_softc *sc, struct hal_srng *srng)
16639 {
16640 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
16641 		qwx_hal_srng_src_hw_init(sc, srng);
16642 	else
16643 		qwx_hal_srng_dst_hw_init(sc, srng);
16644 }
16645 
16646 void
16647 qwx_hal_ce_dst_setup(struct qwx_softc *sc, struct hal_srng *srng, int ring_num)
16648 {
16649 	struct hal_srng_config *srng_config = &sc->hal.srng_config[HAL_CE_DST];
16650 	uint32_t addr;
16651 	uint32_t val;
16652 
16653 	addr = HAL_CE_DST_RING_CTRL +
16654 	    srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
16655 	    ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
16656 
16657 	val = sc->ops.read32(sc, addr);
16658 	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
16659 	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
16660 	    srng->u.dst_ring.max_buffer_length);
16661 	sc->ops.write32(sc, addr, val);
16662 }
16663 
16664 void
16665 qwx_hal_ce_src_set_desc(void *buf, uint64_t paddr, uint32_t len, uint32_t id,
16666     uint8_t byte_swap_data)
16667 {
16668 	struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
16669 
16670 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
16671 	desc->buffer_addr_info = FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
16672 	    (paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
16673 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
16674 	    byte_swap_data) |
16675 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
16676 	    FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
16677 	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
16678 }
16679 
16680 void
16681 qwx_hal_ce_dst_set_desc(void *buf, uint64_t paddr)
16682 {
16683 	struct hal_ce_srng_dest_desc *desc =
16684 	    (struct hal_ce_srng_dest_desc *)buf;
16685 
16686 	desc->buffer_addr_low = htole32(paddr & HAL_ADDR_LSB_REG_MASK);
16687 	desc->buffer_addr_info = htole32(FIELD_PREP(
16688 	    HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
16689 	    (paddr >> HAL_ADDR_MSB_REG_SHIFT)));
16690 }
16691 
16692 uint32_t
16693 qwx_hal_ce_dst_status_get_length(void *buf)
16694 {
16695 	struct hal_ce_srng_dst_status_desc *desc =
16696 		(struct hal_ce_srng_dst_status_desc *)buf;
16697 	uint32_t len;
16698 
16699 	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
16700 	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
16701 
16702 	return len;
16703 }
16704 
16705 
16706 int
16707 qwx_hal_srng_setup(struct qwx_softc *sc, enum hal_ring_type type,
16708     int ring_num, int mac_id, struct hal_srng_params *params)
16709 {
16710 	struct ath11k_hal *hal = &sc->hal;
16711 	struct hal_srng_config *srng_config = &sc->hal.srng_config[type];
16712 	struct hal_srng *srng;
16713 	int ring_id;
16714 	uint32_t lmac_idx;
16715 	int i;
16716 	uint32_t reg_base;
16717 
16718 	ring_id = qwx_hal_srng_get_ring_id(sc, type, ring_num, mac_id);
16719 	if (ring_id < 0)
16720 		return ring_id;
16721 
16722 	srng = &hal->srng_list[ring_id];
16723 
16724 	srng->ring_id = ring_id;
16725 	srng->ring_dir = srng_config->ring_dir;
16726 	srng->ring_base_paddr = params->ring_base_paddr;
16727 	srng->ring_base_vaddr = params->ring_base_vaddr;
16728 	srng->entry_size = srng_config->entry_size;
16729 	srng->num_entries = params->num_entries;
16730 	srng->ring_size = srng->entry_size * srng->num_entries;
16731 	srng->intr_batch_cntr_thres_entries =
16732 	    params->intr_batch_cntr_thres_entries;
16733 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
16734 	srng->flags = params->flags;
16735 	srng->msi_addr = params->msi_addr;
16736 	srng->msi_data = params->msi_data;
16737 	srng->initialized = 1;
16738 #if 0
16739 	spin_lock_init(&srng->lock);
16740 	lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
16741 #endif
16742 
16743 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
16744 		srng->hwreg_base[i] = srng_config->reg_start[i] +
16745 		    (ring_num * srng_config->reg_size[i]);
16746 	}
16747 
16748 	memset(srng->ring_base_vaddr, 0,
16749 	    (srng->entry_size * srng->num_entries) << 2);
16750 
16751 #if 0 /* Not needed on OpenBSD? We do swapping in sofware... */
16752 	/* TODO: Add comments on these swap configurations */
16753 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
16754 		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
16755 			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
16756 #endif
16757 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
16758 
16759 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
16760 		srng->u.src_ring.hp = 0;
16761 		srng->u.src_ring.cached_tp = 0;
16762 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
16763 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
16764 		srng->u.src_ring.low_threshold = params->low_threshold *
16765 		    srng->entry_size;
16766 		if (srng_config->lmac_ring) {
16767 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
16768 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
16769 			    lmac_idx);
16770 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
16771 		} else {
16772 			if (!sc->hw_params.supports_shadow_regs)
16773 				srng->u.src_ring.hp_addr =
16774 				    (uint32_t *)((unsigned long)sc->mem +
16775 				    reg_base);
16776 			else
16777 				DPRINTF("%s: type %d ring_num %d reg_base "
16778 				    "0x%x shadow 0x%lx\n",
16779 				    sc->sc_dev.dv_xname, type, ring_num, reg_base,
16780 				   (unsigned long)srng->u.src_ring.hp_addr -
16781 				   (unsigned long)sc->mem);
16782 		}
16783 	} else {
16784 		/* During initialization loop count in all the descriptors
16785 		 * will be set to zero, and HW will set it to 1 on completing
16786 		 * descriptor update in first loop, and increments it by 1 on
16787 		 * subsequent loops (loop count wraps around after reaching
16788 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
16789 		 * loop count in descriptors updated by HW (to be processed
16790 		 * by SW).
16791 		 */
16792 		srng->u.dst_ring.loop_cnt = 1;
16793 		srng->u.dst_ring.tp = 0;
16794 		srng->u.dst_ring.cached_hp = 0;
16795 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
16796 		if (srng_config->lmac_ring) {
16797 			/* For LMAC rings, tail pointer updates will be done
16798 			 * through FW by writing to a shared memory location
16799 			 */
16800 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
16801 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
16802 			    lmac_idx);
16803 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
16804 		} else {
16805 			if (!sc->hw_params.supports_shadow_regs)
16806 				srng->u.dst_ring.tp_addr =
16807 				    (uint32_t *)((unsigned long)sc->mem +
16808 				    reg_base + (HAL_REO1_RING_TP(sc) -
16809 				    HAL_REO1_RING_HP(sc)));
16810 			else
16811 				DPRINTF("%s: type %d ring_num %d target_reg "
16812 				    "0x%x shadow 0x%lx\n", sc->sc_dev.dv_xname,
16813 				    type, ring_num,
16814 				    reg_base + (HAL_REO1_RING_TP(sc) -
16815 				    HAL_REO1_RING_HP(sc)),
16816 				    (unsigned long)srng->u.dst_ring.tp_addr -
16817 				    (unsigned long)sc->mem);
16818 		}
16819 	}
16820 
16821 	if (srng_config->lmac_ring)
16822 		return ring_id;
16823 
16824 	qwx_hal_srng_hw_init(sc, srng);
16825 
16826 	if (type == HAL_CE_DST) {
16827 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
16828 		qwx_hal_ce_dst_setup(sc, srng, ring_num);
16829 	}
16830 
16831 	return ring_id;
16832 }
16833 
16834 size_t
16835 qwx_hal_ce_get_desc_size(enum hal_ce_desc type)
16836 {
16837 	switch (type) {
16838 	case HAL_CE_DESC_SRC:
16839 		return sizeof(struct hal_ce_srng_src_desc);
16840 	case HAL_CE_DESC_DST:
16841 		return sizeof(struct hal_ce_srng_dest_desc);
16842 	case HAL_CE_DESC_DST_STATUS:
16843 		return sizeof(struct hal_ce_srng_dst_status_desc);
16844 	}
16845 
16846 	return 0;
16847 }
16848 
16849 void
16850 qwx_htc_tx_completion_handler(struct qwx_softc *sc, struct mbuf *m)
16851 {
16852 	printf("%s: not implemented\n", __func__);
16853 }
16854 
16855 struct qwx_tx_data *
16856 qwx_ce_completed_send_next(struct qwx_ce_pipe *pipe)
16857 {
16858 	struct qwx_softc *sc = pipe->sc;
16859 	struct hal_srng *srng;
16860 	unsigned int sw_index;
16861 	unsigned int nentries_mask;
16862 	void *ctx;
16863 	struct qwx_tx_data *tx_data = NULL;
16864 	uint32_t *desc;
16865 #ifdef notyet
16866 	spin_lock_bh(&ab->ce.ce_lock);
16867 #endif
16868 	sw_index = pipe->src_ring->sw_index;
16869 	nentries_mask = pipe->src_ring->nentries_mask;
16870 
16871 	srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
16872 #ifdef notyet
16873 	spin_lock_bh(&srng->lock);
16874 #endif
16875 	qwx_hal_srng_access_begin(sc, srng);
16876 
16877 	desc = qwx_hal_srng_src_reap_next(sc, srng);
16878 	if (!desc)
16879 		goto err_unlock;
16880 
16881 	ctx = pipe->src_ring->per_transfer_context[sw_index];
16882 	tx_data = (struct qwx_tx_data *)ctx;
16883 
16884 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
16885 	pipe->src_ring->sw_index = sw_index;
16886 
16887 err_unlock:
16888 #ifdef notyet
16889 	spin_unlock_bh(&srng->lock);
16890 
16891 	spin_unlock_bh(&ab->ce.ce_lock);
16892 #endif
16893 	return tx_data;
16894 }
16895 
16896 int
16897 qwx_ce_tx_process_cb(struct qwx_ce_pipe *pipe)
16898 {
16899 	struct qwx_softc *sc = pipe->sc;
16900 	struct qwx_tx_data *tx_data;
16901 	struct mbuf *m;
16902 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
16903 	int ret = 0;
16904 
16905 	while ((tx_data = qwx_ce_completed_send_next(pipe)) != NULL) {
16906 		bus_dmamap_unload(sc->sc_dmat, tx_data->map);
16907 		m = tx_data->m;
16908 		tx_data->m = NULL;
16909 
16910 		if ((!pipe->send_cb) || sc->hw_params.credit_flow) {
16911 			m_freem(m);
16912 			continue;
16913 		}
16914 
16915 		ml_enqueue(&ml, m);
16916 		ret = 1;
16917 	}
16918 
16919 	while ((m = ml_dequeue(&ml))) {
16920 		DNPRINTF(QWX_D_CE, "%s: tx ce pipe %d len %d\n", __func__,
16921 		    pipe->pipe_num, m->m_len);
16922 		pipe->send_cb(sc, m);
16923 	}
16924 
16925 	return ret;
16926 }
16927 
16928 void
16929 qwx_ce_poll_send_completed(struct qwx_softc *sc, uint8_t pipe_id)
16930 {
16931 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
16932 	const struct ce_attr *attr =  &sc->hw_params.host_ce_config[pipe_id];
16933 
16934 	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
16935 		qwx_ce_tx_process_cb(pipe);
16936 }
16937 
16938 void
16939 qwx_htc_process_credit_report(struct qwx_htc *htc,
16940     const struct ath11k_htc_credit_report *report, int len,
16941     enum ath11k_htc_ep_id eid)
16942 {
16943 	struct qwx_softc *sc = htc->sc;
16944 	struct qwx_htc_ep *ep;
16945 	int i, n_reports;
16946 
16947 	if (len % sizeof(*report))
16948 		printf("%s: Uneven credit report len %d", __func__, len);
16949 
16950 	n_reports = len / sizeof(*report);
16951 #ifdef notyet
16952 	spin_lock_bh(&htc->tx_lock);
16953 #endif
16954 	for (i = 0; i < n_reports; i++, report++) {
16955 		if (report->eid >= ATH11K_HTC_EP_COUNT)
16956 			break;
16957 
16958 		ep = &htc->endpoint[report->eid];
16959 		ep->tx_credits += report->credits;
16960 
16961 		DNPRINTF(QWX_D_HTC, "%s: ep %d credits got %d total %d\n",
16962 		    __func__, report->eid, report->credits, ep->tx_credits);
16963 
16964 		if (ep->ep_ops.ep_tx_credits) {
16965 #ifdef notyet
16966 			spin_unlock_bh(&htc->tx_lock);
16967 #endif
16968 			ep->ep_ops.ep_tx_credits(sc);
16969 #ifdef notyet
16970 			spin_lock_bh(&htc->tx_lock);
16971 #endif
16972 		}
16973 	}
16974 #ifdef notyet
16975 	spin_unlock_bh(&htc->tx_lock);
16976 #endif
16977 }
16978 
16979 int
16980 qwx_htc_process_trailer(struct qwx_htc *htc, uint8_t *buffer, int length,
16981     enum ath11k_htc_ep_id src_eid)
16982 {
16983 	struct qwx_softc *sc = htc->sc;
16984 	int status = 0;
16985 	struct ath11k_htc_record *record;
16986 	size_t len;
16987 
16988 	while (length > 0) {
16989 		record = (struct ath11k_htc_record *)buffer;
16990 
16991 		if (length < sizeof(record->hdr)) {
16992 			status = EINVAL;
16993 			break;
16994 		}
16995 
16996 		if (record->hdr.len > length) {
16997 			/* no room left in buffer for record */
16998 			printf("%s: Invalid record length: %d\n",
16999 			    __func__, record->hdr.len);
17000 			status = EINVAL;
17001 			break;
17002 		}
17003 
17004 		if (sc->hw_params.credit_flow) {
17005 			switch (record->hdr.id) {
17006 			case ATH11K_HTC_RECORD_CREDITS:
17007 				len = sizeof(struct ath11k_htc_credit_report);
17008 				if (record->hdr.len < len) {
17009 					printf("%s: Credit report too long\n",
17010 					    __func__);
17011 					status = EINVAL;
17012 					break;
17013 				}
17014 				qwx_htc_process_credit_report(htc,
17015 				    record->credit_report,
17016 				    record->hdr.len, src_eid);
17017 				break;
17018 			default:
17019 				printf("%s: unhandled record: id:%d length:%d\n",
17020 				    __func__, record->hdr.id, record->hdr.len);
17021 				break;
17022 			}
17023 		}
17024 
17025 		if (status)
17026 			break;
17027 
17028 		/* multiple records may be present in a trailer */
17029 		buffer += sizeof(record->hdr) + record->hdr.len;
17030 		length -= sizeof(record->hdr) + record->hdr.len;
17031 	}
17032 
17033 	return status;
17034 }
17035 
17036 void
17037 qwx_htc_suspend_complete(struct qwx_softc *sc, int ack)
17038 {
17039 	printf("%s: not implemented\n", __func__);
17040 }
17041 
17042 void
17043 qwx_htc_wakeup_from_suspend(struct qwx_softc *sc)
17044 {
17045 	/* TODO This is really all the Linux driver does here... silence it? */
17046 	printf("%s: wakeup from suspend received\n", __func__);
17047 }
17048 
17049 void
17050 qwx_htc_rx_completion_handler(struct qwx_softc *sc, struct mbuf *m)
17051 {
17052 	struct qwx_htc *htc = &sc->htc;
17053 	struct ath11k_htc_hdr *hdr;
17054 	struct qwx_htc_ep *ep;
17055 	uint16_t payload_len;
17056 	uint32_t message_id, trailer_len = 0;
17057 	uint8_t eid;
17058 	int trailer_present;
17059 
17060 	m = m_pullup(m, sizeof(struct ath11k_htc_hdr));
17061 	if (m == NULL) {
17062 		printf("%s: m_pullup failed\n", __func__);
17063 		m = NULL; /* already freed */
17064 		goto out;
17065 	}
17066 
17067 	hdr = mtod(m, struct ath11k_htc_hdr *);
17068 
17069 	eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
17070 
17071 	if (eid >= ATH11K_HTC_EP_COUNT) {
17072 		printf("%s: HTC Rx: invalid eid %d\n", __func__, eid);
17073 		printf("%s: HTC info: 0x%x\n", __func__, hdr->htc_info);
17074 		printf("%s: CTRL info: 0x%x\n", __func__, hdr->ctrl_info);
17075 		goto out;
17076 	}
17077 
17078 	ep = &htc->endpoint[eid];
17079 
17080 	payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
17081 
17082 	if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
17083 		printf("%s: HTC rx frame too long, len: %zu\n", __func__,
17084 		    payload_len + sizeof(*hdr));
17085 		goto out;
17086 	}
17087 
17088 	if (m->m_pkthdr.len < payload_len) {
17089 		printf("%s: HTC Rx: insufficient length, got %d, "
17090 		    "expected %d\n", __func__, m->m_pkthdr.len, payload_len);
17091 		goto out;
17092 	}
17093 
17094 	/* get flags to check for trailer */
17095 	trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
17096 	    ATH11K_HTC_FLAG_TRAILER_PRESENT;
17097 
17098 	DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p trailer_present %d\n",
17099 	    __func__, eid, m, trailer_present);
17100 
17101 	if (trailer_present) {
17102 		int status = 0;
17103 		uint8_t *trailer;
17104 		int trim;
17105 		size_t min_len;
17106 
17107 		trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
17108 		min_len = sizeof(struct ath11k_htc_record_hdr);
17109 
17110 		if ((trailer_len < min_len) ||
17111 		    (trailer_len > payload_len)) {
17112 			printf("%s: Invalid trailer length: %d\n", __func__,
17113 			    trailer_len);
17114 			goto out;
17115 		}
17116 
17117 		trailer = (uint8_t *)hdr;
17118 		trailer += sizeof(*hdr);
17119 		trailer += payload_len;
17120 		trailer -= trailer_len;
17121 		status = qwx_htc_process_trailer(htc, trailer,
17122 		    trailer_len, eid);
17123 		if (status)
17124 			goto out;
17125 
17126 		trim = trailer_len;
17127 		m_adj(m, -trim);
17128 	}
17129 
17130 	if (trailer_len >= payload_len)
17131 		/* zero length packet with trailer data, just drop these */
17132 		goto out;
17133 
17134 	m_adj(m, sizeof(*hdr));
17135 
17136 	if (eid == ATH11K_HTC_EP_0) {
17137 		struct ath11k_htc_msg *msg;
17138 
17139 		msg = mtod(m, struct ath11k_htc_msg *);
17140 		message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
17141 
17142 		DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p message_id %d\n",
17143 		    __func__, eid, m, message_id);
17144 
17145 		switch (message_id) {
17146 		case ATH11K_HTC_MSG_READY_ID:
17147 		case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
17148 			/* handle HTC control message */
17149 			if (sc->ctl_resp) {
17150 				/* this is a fatal error, target should not be
17151 				 * sending unsolicited messages on the ep 0
17152 				 */
17153 				printf("%s: HTC rx ctrl still processing\n",
17154 				    __func__);
17155 				goto out;
17156 			}
17157 
17158 			htc->control_resp_len =
17159 			    MIN(m->m_pkthdr.len, ATH11K_HTC_MAX_CTRL_MSG_LEN);
17160 
17161 			m_copydata(m, 0, htc->control_resp_len,
17162 			    htc->control_resp_buffer);
17163 
17164 			sc->ctl_resp = 1;
17165 			wakeup(&sc->ctl_resp);
17166 			break;
17167 		case ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE:
17168 			qwx_htc_suspend_complete(sc, 1);
17169 			break;
17170 		case ATH11K_HTC_MSG_NACK_SUSPEND:
17171 			qwx_htc_suspend_complete(sc, 0);
17172 			break;
17173 		case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
17174 			qwx_htc_wakeup_from_suspend(sc);
17175 			break;
17176 		default:
17177 			printf("%s: ignoring unsolicited htc ep0 event %ld\n",
17178 			    __func__,
17179 			    FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id));
17180 			break;
17181 		}
17182 		goto out;
17183 	}
17184 
17185 	DNPRINTF(QWX_D_HTC, "%s: rx ep %d mbuf %p\n", __func__, eid, m);
17186 
17187 	ep->ep_ops.ep_rx_complete(sc, m);
17188 
17189 	/* poll tx completion for interrupt disabled CE's */
17190 	qwx_ce_poll_send_completed(sc, ep->ul_pipe_id);
17191 
17192 	/* mbuf is now owned by the rx completion handler */
17193 	m = NULL;
17194 out:
17195 	m_freem(m);
17196 }
17197 
17198 void
17199 qwx_ce_free_ring(struct qwx_softc *sc, struct qwx_ce_ring *ring)
17200 {
17201 	bus_size_t dsize;
17202 	size_t size;
17203 
17204 	if (ring == NULL)
17205 		return;
17206 
17207 	if (ring->base_addr) {
17208 		dsize = ring->nentries * ring->desc_sz;
17209 		bus_dmamem_unmap(sc->sc_dmat, ring->base_addr, dsize);
17210 	}
17211 	if (ring->nsegs)
17212 		bus_dmamem_free(sc->sc_dmat, &ring->dsegs, ring->nsegs);
17213 	if (ring->dmap)
17214 		bus_dmamap_destroy(sc->sc_dmat, ring->dmap);
17215 
17216 	size = sizeof(*ring) + (ring->nentries *
17217 	    sizeof(ring->per_transfer_context[0]));
17218 	free(ring, M_DEVBUF, size);
17219 }
17220 
17221 static inline int
17222 qwx_ce_need_shadow_fix(int ce_id)
17223 {
17224 	/* only ce4 needs shadow workaround */
17225 	return (ce_id == 4);
17226 }
17227 
17228 void
17229 qwx_ce_stop_shadow_timers(struct qwx_softc *sc)
17230 {
17231 	int i;
17232 
17233 	if (!sc->hw_params.supports_shadow_regs)
17234 		return;
17235 
17236 	for (i = 0; i < sc->hw_params.ce_count; i++)
17237 		if (qwx_ce_need_shadow_fix(i))
17238 			qwx_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
17239 }
17240 
17241 void
17242 qwx_ce_free_pipes(struct qwx_softc *sc)
17243 {
17244 	struct qwx_ce_pipe *pipe;
17245 	int i;
17246 
17247 	for (i = 0; i < sc->hw_params.ce_count; i++) {
17248 		pipe = &sc->ce.ce_pipe[i];
17249 		if (qwx_ce_need_shadow_fix(i))
17250 			qwx_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
17251 		if (pipe->src_ring) {
17252 			qwx_ce_free_ring(sc, pipe->src_ring);
17253 			pipe->src_ring = NULL;
17254 		}
17255 
17256 		if (pipe->dest_ring) {
17257 			qwx_ce_free_ring(sc, pipe->dest_ring);
17258 			pipe->dest_ring = NULL;
17259 		}
17260 
17261 		if (pipe->status_ring) {
17262 			qwx_ce_free_ring(sc, pipe->status_ring);
17263 			pipe->status_ring = NULL;
17264 		}
17265 	}
17266 }
17267 
17268 int
17269 qwx_ce_alloc_src_ring_transfer_contexts(struct qwx_ce_pipe *pipe,
17270     const struct ce_attr *attr)
17271 {
17272 	struct qwx_softc *sc = pipe->sc;
17273 	struct qwx_tx_data *txdata;
17274 	size_t size;
17275 	int ret, i;
17276 
17277 	/* Allocate an array of qwx_tx_data structures. */
17278 	txdata = mallocarray(pipe->src_ring->nentries, sizeof(*txdata),
17279 	    M_DEVBUF, M_NOWAIT | M_ZERO);
17280 	if (txdata == NULL)
17281 		return ENOMEM;
17282 
17283 	size = sizeof(*txdata) * pipe->src_ring->nentries;
17284 
17285 	/* Create per-transfer DMA maps. */
17286 	for (i = 0; i < pipe->src_ring->nentries; i++) {
17287 		struct qwx_tx_data *ctx = &txdata[i];
17288 		ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
17289 		    attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
17290 		if (ret) {
17291 			int j;
17292 			for (j = 0; j < i; j++) {
17293 				struct qwx_tx_data *ctx = &txdata[j];
17294 				bus_dmamap_destroy(sc->sc_dmat, ctx->map);
17295 			}
17296 			free(txdata, M_DEVBUF, size);
17297 			return ret;
17298 		}
17299 		pipe->src_ring->per_transfer_context[i] = ctx;
17300 	}
17301 
17302 	return 0;
17303 }
17304 
17305 int
17306 qwx_ce_alloc_dest_ring_transfer_contexts(struct qwx_ce_pipe *pipe,
17307     const struct ce_attr *attr)
17308 {
17309 	struct qwx_softc *sc = pipe->sc;
17310 	struct qwx_rx_data *rxdata;
17311 	size_t size;
17312 	int ret, i;
17313 
17314 	/* Allocate an array of qwx_rx_data structures. */
17315 	rxdata = mallocarray(pipe->dest_ring->nentries, sizeof(*rxdata),
17316 	    M_DEVBUF, M_NOWAIT | M_ZERO);
17317 	if (rxdata == NULL)
17318 		return ENOMEM;
17319 
17320 	size = sizeof(*rxdata) * pipe->dest_ring->nentries;
17321 
17322 	/* Create per-transfer DMA maps. */
17323 	for (i = 0; i < pipe->dest_ring->nentries; i++) {
17324 		struct qwx_rx_data *ctx = &rxdata[i];
17325 		ret = bus_dmamap_create(sc->sc_dmat, attr->src_sz_max, 1,
17326 		    attr->src_sz_max, 0, BUS_DMA_NOWAIT, &ctx->map);
17327 		if (ret) {
17328 			int j;
17329 			for (j = 0; j < i; j++) {
17330 				struct qwx_rx_data *ctx = &rxdata[j];
17331 				bus_dmamap_destroy(sc->sc_dmat, ctx->map);
17332 			}
17333 			free(rxdata, M_DEVBUF, size);
17334 			return ret;
17335 		}
17336 		pipe->dest_ring->per_transfer_context[i] = ctx;
17337 	}
17338 
17339 	return 0;
17340 }
17341 
17342 struct qwx_ce_ring *
17343 qwx_ce_alloc_ring(struct qwx_softc *sc, int nentries, size_t desc_sz)
17344 {
17345 	struct qwx_ce_ring *ce_ring;
17346 	size_t size = sizeof(*ce_ring) +
17347 	    (nentries * sizeof(ce_ring->per_transfer_context[0]));
17348 	bus_size_t dsize;
17349 
17350 	ce_ring = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
17351 	if (ce_ring == NULL)
17352 		return NULL;
17353 
17354 	ce_ring->nentries = nentries;
17355 	ce_ring->nentries_mask = nentries - 1;
17356 	ce_ring->desc_sz = desc_sz;
17357 
17358 	dsize = nentries * desc_sz;
17359 	if (bus_dmamap_create(sc->sc_dmat, dsize, 1, dsize, 0, BUS_DMA_NOWAIT,
17360 	    &ce_ring->dmap)) {
17361 		free(ce_ring, M_DEVBUF, size);
17362 		return NULL;
17363 	}
17364 
17365 	if (bus_dmamem_alloc(sc->sc_dmat, dsize, CE_DESC_RING_ALIGN, 0,
17366 	    &ce_ring->dsegs, 1, &ce_ring->nsegs,
17367 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
17368 		qwx_ce_free_ring(sc, ce_ring);
17369 		return NULL;
17370 	}
17371 
17372 	if (bus_dmamem_map(sc->sc_dmat, &ce_ring->dsegs, 1, dsize,
17373 	    &ce_ring->base_addr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
17374 		qwx_ce_free_ring(sc, ce_ring);
17375 		return NULL;
17376 	}
17377 
17378 	if (bus_dmamap_load(sc->sc_dmat, ce_ring->dmap, ce_ring->base_addr,
17379 	    dsize, NULL, BUS_DMA_NOWAIT)) {
17380 		qwx_ce_free_ring(sc, ce_ring);
17381 		return NULL;
17382 	}
17383 
17384 	return ce_ring;
17385 }
17386 
17387 int
17388 qwx_ce_alloc_pipe(struct qwx_softc *sc, int ce_id)
17389 {
17390 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
17391 	const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
17392 	struct qwx_ce_ring *ring;
17393 	int nentries;
17394 	size_t desc_sz;
17395 
17396 	pipe->attr_flags = attr->flags;
17397 
17398 	if (attr->src_nentries) {
17399 		pipe->send_cb = attr->send_cb;
17400 		nentries = qwx_roundup_pow_of_two(attr->src_nentries);
17401 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
17402 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
17403 		if (ring == NULL)
17404 			return ENOMEM;
17405 		pipe->src_ring = ring;
17406 		if (qwx_ce_alloc_src_ring_transfer_contexts(pipe, attr))
17407 			return ENOMEM;
17408 	}
17409 
17410 	if (attr->dest_nentries) {
17411 		pipe->recv_cb = attr->recv_cb;
17412 		nentries = qwx_roundup_pow_of_two(attr->dest_nentries);
17413 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_DST);
17414 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
17415 		if (ring == NULL)
17416 			return ENOMEM;
17417 		pipe->dest_ring = ring;
17418 		if (qwx_ce_alloc_dest_ring_transfer_contexts(pipe, attr))
17419 			return ENOMEM;
17420 
17421 		desc_sz = qwx_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
17422 		ring = qwx_ce_alloc_ring(sc, nentries, desc_sz);
17423 		if (ring == NULL)
17424 			return ENOMEM;
17425 		pipe->status_ring = ring;
17426 	}
17427 
17428 	return 0;
17429 }
17430 
17431 void
17432 qwx_ce_rx_pipe_cleanup(struct qwx_ce_pipe *pipe)
17433 {
17434 	struct qwx_softc *sc = pipe->sc;
17435 	struct qwx_ce_ring *ring = pipe->dest_ring;
17436 	void *ctx;
17437 	struct qwx_rx_data *rx_data;
17438 	int i;
17439 
17440 	if (!(ring && pipe->buf_sz))
17441 		return;
17442 
17443 	for (i = 0; i < ring->nentries; i++) {
17444 		ctx = ring->per_transfer_context[i];
17445 		if (!ctx)
17446 			continue;
17447 
17448 		rx_data = (struct qwx_rx_data *)ctx;
17449 		if (rx_data->m) {
17450 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
17451 			m_freem(rx_data->m);
17452 			rx_data->m = NULL;
17453 		}
17454 	}
17455 }
17456 
17457 void
17458 qwx_ce_shadow_config(struct qwx_softc *sc)
17459 {
17460 	int i;
17461 
17462 	for (i = 0; i < sc->hw_params.ce_count; i++) {
17463 		if (sc->hw_params.host_ce_config[i].src_nentries)
17464 			qwx_hal_srng_update_shadow_config(sc, HAL_CE_SRC, i);
17465 
17466 		if (sc->hw_params.host_ce_config[i].dest_nentries) {
17467 			qwx_hal_srng_update_shadow_config(sc, HAL_CE_DST, i);
17468 
17469 			qwx_hal_srng_update_shadow_config(sc,
17470 			    HAL_CE_DST_STATUS, i);
17471 		}
17472 	}
17473 }
17474 
17475 void
17476 qwx_ce_get_shadow_config(struct qwx_softc *sc, uint32_t **shadow_cfg,
17477     uint32_t *shadow_cfg_len)
17478 {
17479 	if (!sc->hw_params.supports_shadow_regs)
17480 		return;
17481 
17482 	qwx_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
17483 
17484 	/* shadow is already configured */
17485 	if (*shadow_cfg_len)
17486 		return;
17487 
17488 	/* shadow isn't configured yet, configure now.
17489 	 * non-CE srngs are configured firstly, then
17490 	 * all CE srngs.
17491 	 */
17492 	qwx_hal_srng_shadow_config(sc);
17493 	qwx_ce_shadow_config(sc);
17494 
17495 	/* get the shadow configuration */
17496 	qwx_hal_srng_get_shadow_config(sc, shadow_cfg, shadow_cfg_len);
17497 }
17498 
17499 void
17500 qwx_ce_cleanup_pipes(struct qwx_softc *sc)
17501 {
17502 	struct qwx_ce_pipe *pipe;
17503 	int pipe_num;
17504 
17505 	qwx_ce_stop_shadow_timers(sc);
17506 
17507 	for (pipe_num = 0; pipe_num < sc->hw_params.ce_count; pipe_num++) {
17508 		pipe = &sc->ce.ce_pipe[pipe_num];
17509 		qwx_ce_rx_pipe_cleanup(pipe);
17510 
17511 		/* Cleanup any src CE's which have interrupts disabled */
17512 		qwx_ce_poll_send_completed(sc, pipe_num);
17513 	}
17514 }
17515 
17516 int
17517 qwx_ce_alloc_pipes(struct qwx_softc *sc)
17518 {
17519 	struct qwx_ce_pipe *pipe;
17520 	int i;
17521 	int ret;
17522 	const struct ce_attr *attr;
17523 
17524 	for (i = 0; i < sc->hw_params.ce_count; i++) {
17525 		attr = &sc->hw_params.host_ce_config[i];
17526 		pipe = &sc->ce.ce_pipe[i];
17527 		pipe->pipe_num = i;
17528 		pipe->sc = sc;
17529 		pipe->buf_sz = attr->src_sz_max;
17530 
17531 		ret = qwx_ce_alloc_pipe(sc, i);
17532 		if (ret) {
17533 			/* Free any partial successful allocation */
17534 			qwx_ce_free_pipes(sc);
17535 			return ret;
17536 		}
17537 	}
17538 
17539 	return 0;
17540 }
17541 
17542 void
17543 qwx_get_ce_msi_idx(struct qwx_softc *sc, uint32_t ce_id,
17544     uint32_t *msi_data_idx)
17545 {
17546 	*msi_data_idx = ce_id;
17547 }
17548 
17549 void
17550 qwx_ce_srng_msi_ring_params_setup(struct qwx_softc *sc, uint32_t ce_id,
17551     struct hal_srng_params *ring_params)
17552 {
17553 	uint32_t msi_data_start = 0;
17554 	uint32_t msi_data_count = 1, msi_data_idx;
17555 	uint32_t msi_irq_start = 0;
17556 	uint32_t addr_lo;
17557 	uint32_t addr_hi;
17558 	int ret;
17559 
17560 	ret = sc->ops.get_user_msi_vector(sc, "CE",
17561 	    &msi_data_count, &msi_data_start, &msi_irq_start);
17562 	if (ret)
17563 		return;
17564 
17565 	qwx_get_msi_address(sc, &addr_lo, &addr_hi);
17566 	qwx_get_ce_msi_idx(sc, ce_id, &msi_data_idx);
17567 
17568 	ring_params->msi_addr = addr_lo;
17569 	ring_params->msi_addr |= (((uint64_t)addr_hi) << 32);
17570 	ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
17571 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
17572 }
17573 
17574 int
17575 qwx_ce_init_ring(struct qwx_softc *sc, struct qwx_ce_ring *ce_ring,
17576     int ce_id, enum hal_ring_type type)
17577 {
17578 	struct hal_srng_params params = { 0 };
17579 	int ret;
17580 
17581 	params.ring_base_paddr = ce_ring->dmap->dm_segs[0].ds_addr;
17582 	params.ring_base_vaddr = (uint32_t *)ce_ring->base_addr;
17583 	params.num_entries = ce_ring->nentries;
17584 
17585 	if (!(CE_ATTR_DIS_INTR & sc->hw_params.host_ce_config[ce_id].flags))
17586 		qwx_ce_srng_msi_ring_params_setup(sc, ce_id, &params);
17587 
17588 	switch (type) {
17589 	case HAL_CE_SRC:
17590 		if (!(CE_ATTR_DIS_INTR &
17591 		    sc->hw_params.host_ce_config[ce_id].flags))
17592 			params.intr_batch_cntr_thres_entries = 1;
17593 		break;
17594 	case HAL_CE_DST:
17595 		params.max_buffer_len =
17596 		    sc->hw_params.host_ce_config[ce_id].src_sz_max;
17597 		if (!(sc->hw_params.host_ce_config[ce_id].flags &
17598 		    CE_ATTR_DIS_INTR)) {
17599 			params.intr_timer_thres_us = 1024;
17600 			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
17601 			params.low_threshold = ce_ring->nentries - 3;
17602 		}
17603 		break;
17604 	case HAL_CE_DST_STATUS:
17605 		if (!(sc->hw_params.host_ce_config[ce_id].flags &
17606 		    CE_ATTR_DIS_INTR)) {
17607 			params.intr_batch_cntr_thres_entries = 1;
17608 			params.intr_timer_thres_us = 0x1000;
17609 		}
17610 		break;
17611 	default:
17612 		printf("%s: Invalid CE ring type %d\n",
17613 		    sc->sc_dev.dv_xname, type);
17614 		return EINVAL;
17615 	}
17616 
17617 	/* TODO: Init other params needed by HAL to init the ring */
17618 
17619 	ret = qwx_hal_srng_setup(sc, type, ce_id, 0, &params);
17620 	if (ret < 0) {
17621 		printf("%s: failed to setup srng: ring_id %d ce_id %d\n",
17622 		    sc->sc_dev.dv_xname, ret, ce_id);
17623 		return ret;
17624 	}
17625 
17626 	ce_ring->hal_ring_id = ret;
17627 
17628 	if (sc->hw_params.supports_shadow_regs &&
17629 	    qwx_ce_need_shadow_fix(ce_id))
17630 		qwx_dp_shadow_init_timer(sc, &sc->ce.hp_timer[ce_id],
17631 		    ATH11K_SHADOW_CTRL_TIMER_INTERVAL, ce_ring->hal_ring_id);
17632 
17633 	return 0;
17634 }
17635 
17636 int
17637 qwx_ce_init_pipes(struct qwx_softc *sc)
17638 {
17639 	struct qwx_ce_pipe *pipe;
17640 	int i;
17641 	int ret;
17642 
17643 	for (i = 0; i < sc->hw_params.ce_count; i++) {
17644 		pipe = &sc->ce.ce_pipe[i];
17645 
17646 		if (pipe->src_ring) {
17647 			ret = qwx_ce_init_ring(sc, pipe->src_ring, i,
17648 			    HAL_CE_SRC);
17649 			if (ret) {
17650 				printf("%s: failed to init src ring: %d\n",
17651 				    sc->sc_dev.dv_xname, ret);
17652 				/* Should we clear any partial init */
17653 				return ret;
17654 			}
17655 
17656 			pipe->src_ring->write_index = 0;
17657 			pipe->src_ring->sw_index = 0;
17658 		}
17659 
17660 		if (pipe->dest_ring) {
17661 			ret = qwx_ce_init_ring(sc, pipe->dest_ring, i,
17662 			    HAL_CE_DST);
17663 			if (ret) {
17664 				printf("%s: failed to init dest ring: %d\n",
17665 				    sc->sc_dev.dv_xname, ret);
17666 				/* Should we clear any partial init */
17667 				return ret;
17668 			}
17669 
17670 			pipe->rx_buf_needed = pipe->dest_ring->nentries ?
17671 			    pipe->dest_ring->nentries - 2 : 0;
17672 
17673 			pipe->dest_ring->write_index = 0;
17674 			pipe->dest_ring->sw_index = 0;
17675 		}
17676 
17677 		if (pipe->status_ring) {
17678 			ret = qwx_ce_init_ring(sc, pipe->status_ring, i,
17679 			    HAL_CE_DST_STATUS);
17680 			if (ret) {
17681 				printf("%s: failed to init status ring: %d\n",
17682 				    sc->sc_dev.dv_xname, ret);
17683 				/* Should we clear any partial init */
17684 				return ret;
17685 			}
17686 
17687 			pipe->status_ring->write_index = 0;
17688 			pipe->status_ring->sw_index = 0;
17689 		}
17690 	}
17691 
17692 	return 0;
17693 }
17694 
17695 int
17696 qwx_hal_srng_src_num_free(struct qwx_softc *sc, struct hal_srng *srng,
17697     int sync_hw_ptr)
17698 {
17699 	uint32_t tp, hp;
17700 #ifdef notyet
17701 	lockdep_assert_held(&srng->lock);
17702 #endif
17703 	hp = srng->u.src_ring.hp;
17704 
17705 	if (sync_hw_ptr) {
17706 		tp = *srng->u.src_ring.tp_addr;
17707 		srng->u.src_ring.cached_tp = tp;
17708 	} else {
17709 		tp = srng->u.src_ring.cached_tp;
17710 	}
17711 
17712 	if (tp > hp)
17713 		return ((tp - hp) / srng->entry_size) - 1;
17714 	else
17715 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
17716 }
17717 
17718 int
17719 qwx_ce_rx_buf_enqueue_pipe(struct qwx_ce_pipe *pipe, bus_dmamap_t map)
17720 {
17721 	struct qwx_softc *sc = pipe->sc;
17722 	struct qwx_ce_ring *ring = pipe->dest_ring;
17723 	struct hal_srng *srng;
17724 	unsigned int write_index;
17725 	unsigned int nentries_mask = ring->nentries_mask;
17726 	uint32_t *desc;
17727 	uint64_t paddr;
17728 	int ret;
17729 #ifdef notyet
17730 	lockdep_assert_held(&ab->ce.ce_lock);
17731 #endif
17732 	write_index = ring->write_index;
17733 
17734 	srng = &sc->hal.srng_list[ring->hal_ring_id];
17735 #ifdef notyet
17736 	spin_lock_bh(&srng->lock);
17737 #endif
17738 	qwx_hal_srng_access_begin(sc, srng);
17739 	bus_dmamap_sync(sc->sc_dmat, map, 0,
17740 	    srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_POSTREAD);
17741 
17742 	if (qwx_hal_srng_src_num_free(sc, srng, 0) < 1) {
17743 		ret = ENOSPC;
17744 		goto exit;
17745 	}
17746 
17747 	desc = qwx_hal_srng_src_get_next_entry(sc, srng);
17748 	if (!desc) {
17749 		ret = ENOSPC;
17750 		goto exit;
17751 	}
17752 
17753 	paddr = map->dm_segs[0].ds_addr;
17754 	qwx_hal_ce_dst_set_desc(desc, paddr);
17755 
17756 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
17757 	ring->write_index = write_index;
17758 
17759 	pipe->rx_buf_needed--;
17760 
17761 	ret = 0;
17762 exit:
17763 	qwx_hal_srng_access_end(sc, srng);
17764 	bus_dmamap_sync(sc->sc_dmat, map, 0,
17765 	    srng->entry_size * sizeof(uint32_t), BUS_DMASYNC_PREREAD);
17766 #ifdef notyet
17767 	spin_unlock_bh(&srng->lock);
17768 #endif
17769 	return ret;
17770 }
17771 
17772 int
17773 qwx_ce_rx_post_pipe(struct qwx_ce_pipe *pipe)
17774 {
17775 	struct qwx_softc *sc = pipe->sc;
17776 	int ret = 0;
17777 	unsigned int idx;
17778 	void *ctx;
17779 	struct qwx_rx_data *rx_data;
17780 	struct mbuf *m;
17781 
17782 	if (!pipe->dest_ring)
17783 		return 0;
17784 
17785 #ifdef notyet
17786 	spin_lock_bh(&ab->ce.ce_lock);
17787 #endif
17788 	while (pipe->rx_buf_needed) {
17789 		m = m_gethdr(M_DONTWAIT, MT_DATA);
17790 		if (m == NULL) {
17791 			ret = ENOBUFS;
17792 			goto done;
17793 		}
17794 
17795 		if (pipe->buf_sz <= MCLBYTES)
17796 			MCLGET(m, M_DONTWAIT);
17797 		else
17798 			MCLGETL(m, M_DONTWAIT, pipe->buf_sz);
17799 		if ((m->m_flags & M_EXT) == 0) {
17800 			ret = ENOBUFS;
17801 			goto done;
17802 		}
17803 
17804 		idx = pipe->dest_ring->write_index;
17805 		ctx = pipe->dest_ring->per_transfer_context[idx];
17806 		rx_data = (struct qwx_rx_data *)ctx;
17807 
17808 		m->m_len = m->m_pkthdr.len = pipe->buf_sz;
17809 		ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map,
17810 		    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
17811 		if (ret) {
17812 			printf("%s: can't map mbuf (error %d)\n",
17813 			    sc->sc_dev.dv_xname, ret);
17814 			m_freem(m);
17815 			goto done;
17816 		}
17817 
17818 		ret = qwx_ce_rx_buf_enqueue_pipe(pipe, rx_data->map);
17819 		if (ret) {
17820 			printf("%s: failed to enqueue rx buf: %d\n",
17821 			    sc->sc_dev.dv_xname, ret);
17822 			bus_dmamap_unload(sc->sc_dmat, rx_data->map);
17823 			m_freem(m);
17824 			break;
17825 		} else
17826 			rx_data->m = m;
17827 	}
17828 
17829 done:
17830 #ifdef notyet
17831 	spin_unlock_bh(&ab->ce.ce_lock);
17832 #endif
17833 	return ret;
17834 }
17835 
17836 void
17837 qwx_ce_rx_post_buf(struct qwx_softc *sc)
17838 {
17839 	struct qwx_ce_pipe *pipe;
17840 	int i;
17841 	int ret;
17842 
17843 	for (i = 0; i < sc->hw_params.ce_count; i++) {
17844 		pipe = &sc->ce.ce_pipe[i];
17845 		ret = qwx_ce_rx_post_pipe(pipe);
17846 		if (ret) {
17847 			if (ret == ENOBUFS)
17848 				continue;
17849 
17850 			printf("%s: failed to post rx buf to pipe: %d err: %d\n",
17851 			    sc->sc_dev.dv_xname, i, ret);
17852 #ifdef notyet
17853 			mod_timer(&ab->rx_replenish_retry,
17854 				  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
17855 #endif
17856 
17857 			return;
17858 		}
17859 	}
17860 }
17861 
17862 int
17863 qwx_ce_completed_recv_next(struct qwx_ce_pipe *pipe,
17864     void **per_transfer_contextp, int *nbytes)
17865 {
17866 	struct qwx_softc *sc = pipe->sc;
17867 	struct hal_srng *srng;
17868 	unsigned int sw_index;
17869 	unsigned int nentries_mask;
17870 	uint32_t *desc;
17871 	int ret = 0;
17872 #ifdef notyet
17873 	spin_lock_bh(&ab->ce.ce_lock);
17874 #endif
17875 	sw_index = pipe->dest_ring->sw_index;
17876 	nentries_mask = pipe->dest_ring->nentries_mask;
17877 
17878 	srng = &sc->hal.srng_list[pipe->status_ring->hal_ring_id];
17879 #ifdef notyet
17880 	spin_lock_bh(&srng->lock);
17881 #endif
17882 	qwx_hal_srng_access_begin(sc, srng);
17883 
17884 	desc = qwx_hal_srng_dst_get_next_entry(sc, srng);
17885 	if (!desc) {
17886 		ret = EIO;
17887 		goto err;
17888 	}
17889 
17890 	*nbytes = qwx_hal_ce_dst_status_get_length(desc);
17891 	if (*nbytes == 0) {
17892 		ret = EIO;
17893 		goto err;
17894 	}
17895 
17896 	if (per_transfer_contextp) {
17897 		*per_transfer_contextp =
17898 		    pipe->dest_ring->per_transfer_context[sw_index];
17899 	}
17900 
17901 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
17902 	pipe->dest_ring->sw_index = sw_index;
17903 
17904 	pipe->rx_buf_needed++;
17905 err:
17906 	qwx_hal_srng_access_end(sc, srng);
17907 #ifdef notyet
17908 	spin_unlock_bh(&srng->lock);
17909 	spin_unlock_bh(&ab->ce.ce_lock);
17910 #endif
17911 	return ret;
17912 }
17913 
17914 int
17915 qwx_ce_recv_process_cb(struct qwx_ce_pipe *pipe)
17916 {
17917 	struct qwx_softc *sc = pipe->sc;
17918 	struct mbuf *m;
17919 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
17920 	void *transfer_context;
17921 	unsigned int nbytes, max_nbytes;
17922 	int ret = 0, err;
17923 
17924 	while (qwx_ce_completed_recv_next(pipe, &transfer_context,
17925 	    &nbytes) == 0) {
17926 		struct qwx_rx_data *rx_data = transfer_context;
17927 
17928 		bus_dmamap_unload(sc->sc_dmat, rx_data->map);
17929 		m = rx_data->m;
17930 		rx_data->m = NULL;
17931 
17932 		max_nbytes = m->m_pkthdr.len;
17933 		if (max_nbytes < nbytes) {
17934 			printf("%s: received more than expected (nbytes %d, "
17935 			    "max %d)", __func__, nbytes, max_nbytes);
17936 			m_freem(m);
17937 			continue;
17938 		}
17939 		m->m_len = m->m_pkthdr.len = nbytes;
17940 		ml_enqueue(&ml, m);
17941 		ret = 1;
17942 	}
17943 
17944 	while ((m = ml_dequeue(&ml))) {
17945 		DNPRINTF(QWX_D_CE, "%s: rx ce pipe %d len %d\n", __func__,
17946 		    pipe->pipe_num, m->m_len);
17947 		pipe->recv_cb(sc, m);
17948 	}
17949 
17950 	err = qwx_ce_rx_post_pipe(pipe);
17951 	if (err && err != ENOBUFS) {
17952 		printf("%s: failed to post rx buf to pipe: %d err: %d\n",
17953 		    __func__, pipe->pipe_num, err);
17954 #ifdef notyet
17955 		mod_timer(&ab->rx_replenish_retry,
17956 			  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
17957 #endif
17958 	}
17959 
17960 	return ret;
17961 }
17962 
17963 int
17964 qwx_ce_per_engine_service(struct qwx_softc *sc, uint16_t ce_id)
17965 {
17966 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
17967 	const struct ce_attr *attr = &sc->hw_params.host_ce_config[ce_id];
17968 	int ret = 0;
17969 
17970 	if (attr->src_nentries) {
17971 		if (qwx_ce_tx_process_cb(pipe))
17972 			ret = 1;
17973 	}
17974 
17975 	if (pipe->recv_cb) {
17976 		if (qwx_ce_recv_process_cb(pipe))
17977 			ret = 1;
17978 	}
17979 
17980 	return ret;
17981 }
17982 
17983 int
17984 qwx_ce_send(struct qwx_softc *sc, struct mbuf *m, uint8_t pipe_id,
17985     uint16_t transfer_id)
17986 {
17987 	struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
17988 	struct hal_srng *srng;
17989 	uint32_t *desc;
17990 	unsigned int write_index, sw_index;
17991 	unsigned int nentries_mask;
17992 	int ret = 0;
17993 	uint8_t byte_swap_data = 0;
17994 	int num_used;
17995 	uint64_t paddr;
17996 	void *ctx;
17997 	struct qwx_tx_data *tx_data;
17998 
17999 	/* Check if some entries could be regained by handling tx completion if
18000 	 * the CE has interrupts disabled and the used entries is more than the
18001 	 * defined usage threshold.
18002 	 */
18003 	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
18004 #ifdef notyet
18005 		spin_lock_bh(&ab->ce.ce_lock);
18006 #endif
18007 		write_index = pipe->src_ring->write_index;
18008 
18009 		sw_index = pipe->src_ring->sw_index;
18010 
18011 		if (write_index >= sw_index)
18012 			num_used = write_index - sw_index;
18013 		else
18014 			num_used = pipe->src_ring->nentries - sw_index +
18015 			    write_index;
18016 #ifdef notyet
18017 		spin_unlock_bh(&ab->ce.ce_lock);
18018 #endif
18019 		if (num_used > ATH11K_CE_USAGE_THRESHOLD)
18020 			qwx_ce_poll_send_completed(sc, pipe->pipe_num);
18021 	}
18022 
18023 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
18024 		return ESHUTDOWN;
18025 #ifdef notyet
18026 	spin_lock_bh(&ab->ce.ce_lock);
18027 #endif
18028 	write_index = pipe->src_ring->write_index;
18029 	nentries_mask = pipe->src_ring->nentries_mask;
18030 
18031 	srng = &sc->hal.srng_list[pipe->src_ring->hal_ring_id];
18032 #ifdef notyet
18033 	spin_lock_bh(&srng->lock);
18034 #endif
18035 	qwx_hal_srng_access_begin(sc, srng);
18036 
18037 	if (qwx_hal_srng_src_num_free(sc, srng, 0) < 1) {
18038 		qwx_hal_srng_access_end(sc, srng);
18039 		ret = ENOBUFS;
18040 		goto err_unlock;
18041 	}
18042 
18043 	desc = qwx_hal_srng_src_get_next_reaped(sc, srng);
18044 	if (!desc) {
18045 		qwx_hal_srng_access_end(sc, srng);
18046 		ret = ENOBUFS;
18047 		goto err_unlock;
18048 	}
18049 
18050 	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
18051 		byte_swap_data = 1;
18052 
18053 	ctx = pipe->src_ring->per_transfer_context[write_index];
18054 	tx_data = (struct qwx_tx_data *)ctx;
18055 
18056 	paddr = tx_data->map->dm_segs[0].ds_addr;
18057 	qwx_hal_ce_src_set_desc(desc, paddr, m->m_pkthdr.len,
18058 	    transfer_id, byte_swap_data);
18059 
18060 	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
18061 	    write_index);
18062 
18063 	qwx_hal_srng_access_end(sc, srng);
18064 
18065 	if (qwx_ce_need_shadow_fix(pipe_id))
18066 		qwx_dp_shadow_start_timer(sc, srng, &sc->ce.hp_timer[pipe_id]);
18067 
18068 err_unlock:
18069 #ifdef notyet
18070 	spin_unlock_bh(&srng->lock);
18071 
18072 	spin_unlock_bh(&ab->ce.ce_lock);
18073 #endif
18074 	return ret;
18075 }
18076 
18077 int
18078 qwx_get_num_chains(uint32_t mask)
18079 {
18080 	int num_chains = 0;
18081 
18082 	while (mask) {
18083 		if (mask & 0x1)
18084 			num_chains++;
18085 		mask >>= 1;
18086 	}
18087 
18088 	return num_chains;
18089 }
18090 
18091 int
18092 qwx_set_antenna(struct qwx_pdev *pdev, uint32_t tx_ant, uint32_t rx_ant)
18093 {
18094 	struct qwx_softc *sc = pdev->sc;
18095 	int ret;
18096 #ifdef notyet
18097 	lockdep_assert_held(&ar->conf_mutex);
18098 #endif
18099 	sc->cfg_tx_chainmask = tx_ant;
18100 	sc->cfg_rx_chainmask = rx_ant;
18101 #if 0
18102 	if (ar->state != ATH11K_STATE_ON &&
18103 	    ar->state != ATH11K_STATE_RESTARTED)
18104 		return 0;
18105 #endif
18106 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_TX_CHAIN_MASK,
18107 	    tx_ant, pdev->pdev_id);
18108 	if (ret) {
18109 		printf("%s: failed to set tx-chainmask: %d, req 0x%x\n",
18110 		    sc->sc_dev.dv_xname, ret, tx_ant);
18111 		return ret;
18112 	}
18113 
18114 	sc->num_tx_chains = qwx_get_num_chains(tx_ant);
18115 
18116 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_RX_CHAIN_MASK,
18117 	    rx_ant, pdev->pdev_id);
18118 	if (ret) {
18119 		printf("%s: failed to set rx-chainmask: %d, req 0x%x\n",
18120 		    sc->sc_dev.dv_xname, ret, rx_ant);
18121 		return ret;
18122 	}
18123 
18124 	sc->num_rx_chains = qwx_get_num_chains(rx_ant);
18125 #if 0
18126 	/* Reload HT/VHT/HE capability */
18127 	ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
18128 	ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
18129 #endif
18130 	return 0;
18131 }
18132 
18133 int
18134 qwx_reg_update_chan_list(struct qwx_softc *sc, uint8_t pdev_id)
18135 {
18136 	struct ieee80211com *ic = &sc->sc_ic;
18137 	struct scan_chan_list_params *params;
18138 	struct ieee80211_channel *channel, *lastc;
18139 	struct channel_param *ch;
18140 	int num_channels = 0;
18141 	size_t params_size;
18142 	int ret;
18143 #if 0
18144 	if (ar->state == ATH11K_STATE_RESTARTING)
18145 		return 0;
18146 #endif
18147 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
18148 	for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
18149 		if (channel->ic_flags == 0)
18150 			continue;
18151 		num_channels++;
18152 	}
18153 
18154 	if (!num_channels)
18155 		return EINVAL;
18156 
18157 	params_size = sizeof(*params) +
18158 	    num_channels * sizeof(*params->ch_param);
18159 
18160 	/*
18161 	 * TODO: This is a temporary list for qwx_wmi_send_scan_chan_list_cmd
18162 	 * to loop over. Could that function loop over ic_channels directly?
18163 	 */
18164 	params = malloc(params_size, M_DEVBUF, M_NOWAIT | M_ZERO);
18165 	if (!params)
18166 		return ENOMEM;
18167 
18168 	params->pdev_id = pdev_id;
18169 	params->nallchans = num_channels;
18170 
18171 	ch = params->ch_param;
18172 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
18173 	for (channel = &ic->ic_channels[1]; channel <= lastc; channel++) {
18174 		if (channel->ic_flags == 0)
18175 			continue;
18176 #ifdef notyet
18177 		/* TODO: Set to true/false based on some condition? */
18178 		ch->allow_ht = true;
18179 		ch->allow_vht = true;
18180 		ch->allow_he = true;
18181 #endif
18182 		ch->dfs_set = !!(IEEE80211_IS_CHAN_5GHZ(channel) &&
18183 		    (channel->ic_flags & IEEE80211_CHAN_PASSIVE));
18184 		ch->is_chan_passive = !!(channel->ic_flags &
18185 		    IEEE80211_CHAN_PASSIVE);
18186 		ch->is_chan_passive |= ch->dfs_set;
18187 		ch->mhz = ieee80211_ieee2mhz(ieee80211_chan2ieee(ic, channel),
18188 		    channel->ic_flags);
18189 		ch->cfreq1 = ch->mhz;
18190 		ch->minpower = 0;
18191 		ch->maxpower = 40; /* XXX from Linux debug trace */
18192 		ch->maxregpower = ch->maxpower;
18193 		ch->antennamax = 0;
18194 
18195 		/* TODO: Use appropriate phymodes */
18196 		if (IEEE80211_IS_CHAN_A(channel))
18197 			ch->phy_mode = MODE_11A;
18198 		else if (IEEE80211_IS_CHAN_G(channel))
18199 			ch->phy_mode = MODE_11G;
18200 		else
18201 			ch->phy_mode = MODE_11B;
18202 #ifdef notyet
18203 		if (channel->band == NL80211_BAND_6GHZ &&
18204 		    cfg80211_channel_is_psc(channel))
18205 			ch->psc_channel = true;
18206 #endif
18207 		DNPRINTF(QWX_D_WMI, "%s: mac channel freq %d maxpower %d "
18208 		    "regpower %d antenna %d mode %d\n", __func__,
18209 		    ch->mhz, ch->maxpower, ch->maxregpower,
18210 		    ch->antennamax, ch->phy_mode);
18211 
18212 		ch++;
18213 		/* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
18214 		 * set_agile, reg_class_idx
18215 		 */
18216 	}
18217 
18218 	ret = qwx_wmi_send_scan_chan_list_cmd(sc, pdev_id, params);
18219 	free(params, M_DEVBUF, params_size);
18220 
18221 	return ret;
18222 }
18223 
18224 static const struct htt_rx_ring_tlv_filter qwx_mac_mon_status_filter_default = {
18225 	.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
18226 	    HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
18227 	    HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
18228 	.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
18229 	.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
18230 	.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
18231 	.pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
18232 	    HTT_RX_FP_CTRL_FILTER_FLASG3
18233 };
18234 
18235 int
18236 qwx_mac_register(struct qwx_softc *sc)
18237 {
18238 	/* Initialize channel counters frequency value in hertz */
18239 	sc->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
18240 
18241 	sc->free_vdev_map = (1U << (sc->num_radios * TARGET_NUM_VDEVS(sc))) - 1;
18242 
18243 	if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
18244 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr, sc->mac_addr);
18245 
18246 	return 0;
18247 }
18248 
18249 int
18250 qwx_mac_config_mon_status_default(struct qwx_softc *sc, int enable)
18251 {
18252 	struct htt_rx_ring_tlv_filter tlv_filter = { 0 };
18253 	int ret = 0;
18254 #if 0
18255 	int i;
18256 	struct dp_rxdma_ring *ring;
18257 #endif
18258 
18259 	if (enable)
18260 		tlv_filter = qwx_mac_mon_status_filter_default;
18261 #if 0
18262 	for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) {
18263 		ring = &sc->pdev_dp.rx_mon_status_refill_ring[i];
18264 		ret = qwx_dp_tx_htt_rx_filter_setup(sc,
18265 		    ring->refill_buf_ring.ring_id, sc->pdev_dp.mac_id + i,
18266 		    HAL_RXDMA_MONITOR_STATUS, DP_RX_BUFFER_SIZE, &tlv_filter);
18267 		if (ret)
18268 			return ret;
18269 	}
18270 #endif
18271 #if 0
18272 	if (enable && !ar->ab->hw_params.rxdma1_enable)
18273 		mod_timer(&ar->ab->mon_reap_timer, jiffies +
18274 			  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
18275 #endif
18276 	return ret;
18277 }
18278 
18279 int
18280 qwx_mac_txpower_recalc(struct qwx_softc *sc, struct qwx_pdev *pdev)
18281 {
18282 	struct qwx_vif *arvif;
18283 	int ret, txpower = -1;
18284 	uint32_t param;
18285 	uint32_t min_tx_power = sc->target_caps.hw_min_tx_power;
18286 	uint32_t max_tx_power = sc->target_caps.hw_max_tx_power;
18287 #ifdef notyet
18288 	lockdep_assert_held(&ar->conf_mutex);
18289 #endif
18290 	TAILQ_FOREACH(arvif, &sc->vif_list, entry) {
18291 		if (arvif->txpower <= 0)
18292 			continue;
18293 
18294 		if (txpower == -1)
18295 			txpower = arvif->txpower;
18296 		else
18297 			txpower = MIN(txpower, arvif->txpower);
18298 	}
18299 
18300 	if (txpower == -1)
18301 		return 0;
18302 
18303 	/* txpwr is set as 2 units per dBm in FW*/
18304 	txpower = MIN(MAX(min_tx_power, txpower), max_tx_power) * 2;
18305 	DNPRINTF(QWX_D_MAC, "txpower to set in hw %d\n", txpower / 2);
18306 
18307 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
18308 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
18309 		ret = qwx_wmi_pdev_set_param(sc, param, txpower,
18310 		    pdev->pdev_id);
18311 		if (ret)
18312 			goto fail;
18313 	}
18314 
18315 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
18316 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
18317 		ret = qwx_wmi_pdev_set_param(sc, param, txpower,
18318 		    pdev->pdev_id);
18319 		if (ret)
18320 			goto fail;
18321 	}
18322 
18323 	return 0;
18324 
18325 fail:
18326 	DNPRINTF(QWX_D_MAC, "%s: failed to recalc txpower limit %d "
18327 	    "using pdev param %d: %d\n", sc->sc_dev.dv_xname, txpower / 2,
18328 	    param, ret);
18329 
18330 	return ret;
18331 }
18332 
18333 int
18334 qwx_mac_op_start(struct qwx_pdev *pdev)
18335 {
18336 	struct qwx_softc *sc = pdev->sc;
18337 	int ret;
18338 
18339 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_PMF_QOS, 1,
18340 	    pdev->pdev_id);
18341 	if (ret) {
18342 		printf("%s: failed to enable PMF QOS for pdev %d: %d\n",
18343 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
18344 		goto err;
18345 	}
18346 
18347 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
18348 	    pdev->pdev_id);
18349 	if (ret) {
18350 		printf("%s: failed to enable dynamic bw for pdev %d: %d\n",
18351 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
18352 		goto err;
18353 	}
18354 
18355 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT)) {
18356 		ret = qwx_wmi_scan_prob_req_oui(sc, sc->mac_addr,
18357 		    pdev->pdev_id);
18358 		if (ret) {
18359 			printf("%s: failed to set prob req oui for "
18360 			    "pdev %d: %i\n", sc->sc_dev.dv_xname,
18361 			    pdev->pdev_id, ret);
18362 			goto err;
18363 		}
18364 	}
18365 
18366 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 0,
18367 	    pdev->pdev_id);
18368 	if (ret) {
18369 		printf("%s: failed to set ac override for ARP for "
18370 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
18371 		goto err;
18372 	}
18373 
18374 	ret = qwx_wmi_send_dfs_phyerr_offload_enable_cmd(sc, pdev->pdev_id);
18375 	if (ret) {
18376 		printf("%s: failed to offload radar detection for "
18377 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
18378 		goto err;
18379 	}
18380 
18381 	ret = qwx_dp_tx_htt_h2t_ppdu_stats_req(sc, HTT_PPDU_STATS_TAG_DEFAULT,
18382 	    pdev->pdev_id);
18383 	if (ret) {
18384 		printf("%s: failed to req ppdu stats for pdev %d: %d\n",
18385 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
18386 		goto err;
18387 	}
18388 
18389 	ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 1,
18390 	    pdev->pdev_id);
18391 	if (ret) {
18392 		printf("%s: failed to enable MESH MCAST ENABLE for "
18393 		    "pdev %d: %d\n", sc->sc_dev.dv_xname, pdev->pdev_id, ret);
18394 		goto err;
18395 	}
18396 
18397 	qwx_set_antenna(pdev, pdev->cap.tx_chain_mask, pdev->cap.rx_chain_mask);
18398 
18399 	/* TODO: Do we need to enable ANI? */
18400 
18401 	ret = qwx_reg_update_chan_list(sc, pdev->pdev_id);
18402 	if (ret) {
18403 		printf("%s: failed to update channel list for pdev %d: %d\n",
18404 		    sc->sc_dev.dv_xname, pdev->pdev_id, ret);
18405 		goto err;
18406 	}
18407 
18408 	sc->num_started_vdevs = 0;
18409 	sc->num_created_vdevs = 0;
18410 	sc->num_peers = 0;
18411 	sc->allocated_vdev_map = 0;
18412 
18413 	/* Configure monitor status ring with default rx_filter to get rx status
18414 	 * such as rssi, rx_duration.
18415 	 */
18416 	ret = qwx_mac_config_mon_status_default(sc, 1);
18417 	if (ret) {
18418 		printf("%s: failed to configure monitor status ring "
18419 		    "with default rx_filter: (%d)\n",
18420 		    sc->sc_dev.dv_xname, ret);
18421 		goto err;
18422 	}
18423 
18424 	/* Configure the hash seed for hash based reo dest ring selection */
18425 	qwx_wmi_pdev_lro_cfg(sc, pdev->pdev_id);
18426 
18427 	/* allow device to enter IMPS */
18428 	if (sc->hw_params.idle_ps) {
18429 		ret = qwx_wmi_pdev_set_param(sc, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
18430 		    1, pdev->pdev_id);
18431 		if (ret) {
18432 			printf("%s: failed to enable idle ps: %d\n",
18433 			    sc->sc_dev.dv_xname, ret);
18434 			goto err;
18435 		}
18436 	}
18437 #ifdef notyet
18438 	mutex_unlock(&ar->conf_mutex);
18439 #endif
18440 	sc->pdevs_active |= (1 << pdev->pdev_id);
18441 	return 0;
18442 err:
18443 #ifdef notyet
18444 	ar->state = ATH11K_STATE_OFF;
18445 	mutex_unlock(&ar->conf_mutex);
18446 #endif
18447 	return ret;
18448 }
18449 
18450 int
18451 qwx_mac_setup_vdev_params_mbssid(struct qwx_vif *arvif,
18452     uint32_t *flags, uint32_t *tx_vdev_id)
18453 {
18454 	*tx_vdev_id = 0;
18455 	*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
18456 	return 0;
18457 }
18458 
18459 int
18460 qwx_mac_setup_vdev_create_params(struct qwx_vif *arvif, struct qwx_pdev *pdev,
18461     struct vdev_create_params *params)
18462 {
18463 	struct qwx_softc *sc = arvif->sc;
18464 	int ret;
18465 
18466 	params->if_id = arvif->vdev_id;
18467 	params->type = arvif->vdev_type;
18468 	params->subtype = arvif->vdev_subtype;
18469 	params->pdev_id = pdev->pdev_id;
18470 	params->mbssid_flags = 0;
18471 	params->mbssid_tx_vdev_id = 0;
18472 
18473 	if (!isset(sc->wmi.svc_map,
18474 	    WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
18475 		ret = qwx_mac_setup_vdev_params_mbssid(arvif,
18476 		    &params->mbssid_flags, &params->mbssid_tx_vdev_id);
18477 		if (ret)
18478 			return ret;
18479 	}
18480 
18481 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
18482 		params->chains[0].tx = sc->num_tx_chains;
18483 		params->chains[0].rx = sc->num_rx_chains;
18484 	}
18485 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
18486 		params->chains[1].tx = sc->num_tx_chains;
18487 		params->chains[1].rx = sc->num_rx_chains;
18488 	}
18489 #if 0
18490 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
18491 	    ar->supports_6ghz) {
18492 		params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
18493 		params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
18494 	}
18495 #endif
18496 	return 0;
18497 }
18498 
18499 int
18500 qwx_mac_op_update_vif_offload(struct qwx_softc *sc, struct qwx_pdev *pdev,
18501     struct qwx_vif *arvif)
18502 {
18503 	uint32_t param_id, param_value;
18504 	int ret;
18505 
18506 	param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
18507 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
18508 		param_value = ATH11K_HW_TXRX_RAW;
18509 	else
18510 		param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
18511 
18512 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
18513 	    param_id, param_value);
18514 	if (ret) {
18515 		printf("%s: failed to set vdev %d tx encap mode: %d\n",
18516 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18517 		return ret;
18518 	}
18519 
18520 	param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
18521 	if (test_bit(ATH11K_FLAG_RAW_MODE, sc->sc_flags))
18522 		param_value = ATH11K_HW_TXRX_RAW;
18523 	else
18524 		param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
18525 
18526 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
18527 	    param_id, param_value);
18528 	if (ret) {
18529 		printf("%s: failed to set vdev %d rx decap mode: %d\n",
18530 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18531 		return ret;
18532 	}
18533 
18534 	return 0;
18535 }
18536 
18537 void
18538 qwx_mac_vdev_delete(struct qwx_softc *sc, struct qwx_vif *arvif)
18539 {
18540 	printf("%s: not implemented\n", __func__);
18541 }
18542 
18543 int
18544 qwx_mac_vdev_setup_sync(struct qwx_softc *sc)
18545 {
18546 	int ret;
18547 
18548 #ifdef notyet
18549 	lockdep_assert_held(&ar->conf_mutex);
18550 #endif
18551 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
18552 		return ESHUTDOWN;
18553 
18554 	while (!sc->vdev_setup_done) {
18555 		ret = tsleep_nsec(&sc->vdev_setup_done, 0, "qwxvdev",
18556 		    SEC_TO_NSEC(1));
18557 		if (ret) {
18558 			printf("%s: vdev start timeout\n",
18559 			    sc->sc_dev.dv_xname);
18560 			return ret;
18561 		}
18562 	}
18563 
18564 	return 0;
18565 }
18566 
18567 int
18568 qwx_mac_set_txbf_conf(struct qwx_vif *arvif)
18569 {
18570 	/* TX beamforming is not yet supported. */
18571 	return 0;
18572 }
18573 
18574 int
18575 qwx_mac_vdev_start_restart(struct qwx_softc *sc, struct qwx_vif *arvif,
18576     int pdev_id, int restart)
18577 {
18578 	struct ieee80211com *ic = &sc->sc_ic;
18579 	struct ieee80211_channel *chan = ic->ic_bss->ni_chan;
18580 	struct wmi_vdev_start_req_arg arg = {};
18581 	int ret = 0;
18582 #ifdef notyet
18583 	lockdep_assert_held(&ar->conf_mutex);
18584 #endif
18585 #if 0
18586 	reinit_completion(&ar->vdev_setup_done);
18587 #endif
18588 	arg.vdev_id = arvif->vdev_id;
18589 	arg.dtim_period = ic->ic_dtim_period;
18590 	arg.bcn_intval = ic->ic_lintval;
18591 
18592 	arg.channel.freq = chan->ic_freq;
18593 	arg.channel.band_center_freq1 = chan->ic_freq;
18594 	arg.channel.band_center_freq2 = chan->ic_freq;
18595 
18596 	switch (ic->ic_curmode) {
18597 	case IEEE80211_MODE_11A:
18598 		arg.channel.mode = MODE_11A;
18599 		break;
18600 	case IEEE80211_MODE_11B:
18601 		arg.channel.mode = MODE_11B;
18602 		break;
18603 	case IEEE80211_MODE_11G:
18604 		arg.channel.mode = MODE_11G;
18605 		break;
18606 	default:
18607 		printf("%s: unsupported phy mode %d\n",
18608 		    sc->sc_dev.dv_xname, ic->ic_curmode);
18609 		return ENOTSUP;
18610 	}
18611 
18612 	arg.channel.min_power = 0;
18613 	arg.channel.max_power = 20; /* XXX */
18614 	arg.channel.max_reg_power = 20; /* XXX */
18615 	arg.channel.max_antenna_gain = 0; /* XXX */
18616 
18617 	arg.pref_tx_streams = 1;
18618 	arg.pref_rx_streams = 1;
18619 
18620 	arg.mbssid_flags = 0;
18621 	arg.mbssid_tx_vdev_id = 0;
18622 	if (isset(sc->wmi.svc_map,
18623 	    WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) {
18624 		ret = qwx_mac_setup_vdev_params_mbssid(arvif,
18625 		    &arg.mbssid_flags, &arg.mbssid_tx_vdev_id);
18626 		if (ret)
18627 			return ret;
18628 	}
18629 #if 0
18630 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
18631 		arg.ssid = arvif->u.ap.ssid;
18632 		arg.ssid_len = arvif->u.ap.ssid_len;
18633 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
18634 
18635 		/* For now allow DFS for AP mode */
18636 		arg.channel.chan_radar =
18637 			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
18638 
18639 		arg.channel.freq2_radar = ctx->radar_enabled;
18640 
18641 		arg.channel.passive = arg.channel.chan_radar;
18642 
18643 		spin_lock_bh(&ab->base_lock);
18644 		arg.regdomain = ar->ab->dfs_region;
18645 		spin_unlock_bh(&ab->base_lock);
18646 	}
18647 #endif
18648 	/* XXX */
18649 	arg.channel.passive |= !!(ieee80211_chan2ieee(ic, chan) >= 52);
18650 
18651 	DNPRINTF(QWX_D_MAC, "%s: vdev %d start center_freq %d phymode %s\n",
18652 	    __func__, arg.vdev_id, arg.channel.freq,
18653 	    qwx_wmi_phymode_str(arg.channel.mode));
18654 
18655 	sc->vdev_setup_done = 0;
18656 	ret = qwx_wmi_vdev_start(sc, &arg, pdev_id, restart);
18657 	if (ret) {
18658 		printf("%s: failed to %s WMI vdev %i\n", sc->sc_dev.dv_xname,
18659 		    restart ? "restart" : "start", arg.vdev_id);
18660 		return ret;
18661 	}
18662 
18663 	ret = qwx_mac_vdev_setup_sync(sc);
18664 	if (ret) {
18665 		printf("%s: failed to synchronize setup for vdev %i %s: %d\n",
18666 		    sc->sc_dev.dv_xname, arg.vdev_id,
18667 		    restart ? "restart" : "start", ret);
18668 		return ret;
18669 	}
18670 
18671 	if (!restart)
18672 		sc->num_started_vdevs++;
18673 
18674 	DNPRINTF(QWX_D_MAC, "%s: vdev %d started\n", __func__, arvif->vdev_id);
18675 
18676 	/* Enable CAC Flag in the driver by checking the channel DFS cac time,
18677 	 * i.e dfs_cac_ms value which will be valid only for radar channels
18678 	 * and state as NL80211_DFS_USABLE which indicates CAC needs to be
18679 	 * done before channel usage. This flags is used to drop rx packets.
18680 	 * during CAC.
18681 	 */
18682 	/* TODO Set the flag for other interface types as required */
18683 #if 0
18684 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
18685 	    chandef->chan->dfs_cac_ms &&
18686 	    chandef->chan->dfs_state == NL80211_DFS_USABLE) {
18687 		set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
18688 		ath11k_dbg(ab, ATH11K_DBG_MAC,
18689 			   "CAC Started in chan_freq %d for vdev %d\n",
18690 			   arg.channel.freq, arg.vdev_id);
18691 	}
18692 #endif
18693 	ret = qwx_mac_set_txbf_conf(arvif);
18694 	if (ret)
18695 		printf("%s: failed to set txbf conf for vdev %d: %d\n",
18696 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18697 
18698 	return 0;
18699 }
18700 
18701 int
18702 qwx_mac_vdev_restart(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
18703 {
18704 	return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 1);
18705 }
18706 
18707 int
18708 qwx_mac_vdev_start(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
18709 {
18710 	return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 0);
18711 }
18712 
18713 int
18714 qwx_mac_op_add_interface(struct qwx_pdev *pdev)
18715 {
18716 	struct qwx_softc *sc = pdev->sc;
18717 	struct ieee80211com *ic = &sc->sc_ic;
18718 	struct qwx_vif *arvif = NULL;
18719 	struct vdev_create_params vdev_param = { 0 };
18720 #if 0
18721 	struct peer_create_params peer_param;
18722 #endif
18723 	uint32_t param_id, param_value;
18724 	uint16_t nss;
18725 #if 0
18726 	int i;
18727 	int fbret;
18728 #endif
18729 	int ret, bit;
18730 #ifdef notyet
18731 	mutex_lock(&ar->conf_mutex);
18732 #endif
18733 #if 0
18734 	if (vif->type == NL80211_IFTYPE_AP &&
18735 	    ar->num_peers > (ar->max_num_peers - 1)) {
18736 		ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
18737 		ret = -ENOBUFS;
18738 		goto err;
18739 	}
18740 #endif
18741 	if (sc->num_created_vdevs > (TARGET_NUM_VDEVS(sc) - 1)) {
18742 		printf("%s: failed to create vdev %u, reached vdev limit %d\n",
18743 		    sc->sc_dev.dv_xname, sc->num_created_vdevs,
18744 		    TARGET_NUM_VDEVS(sc));
18745 		ret = EBUSY;
18746 		goto err;
18747 	}
18748 
18749 	arvif = malloc(sizeof(*arvif), M_DEVBUF, M_NOWAIT | M_ZERO);
18750 	if (arvif == NULL) {
18751 		ret = ENOMEM;
18752 		goto err;
18753 	}
18754 
18755 	arvif->sc = sc;
18756 #if 0
18757 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
18758 			  ath11k_mac_vif_sta_connection_loss_work);
18759 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
18760 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
18761 		arvif->bitrate_mask.control[i].gi = 0;
18762 		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
18763 		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
18764 		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
18765 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
18766 		memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
18767 		       sizeof(arvif->bitrate_mask.control[i].he_mcs));
18768 	}
18769 #endif
18770 
18771 	if (sc->free_vdev_map == 0) {
18772 		printf("%s: cannot add interface; all vdevs are busy\n",
18773 		    sc->sc_dev.dv_xname);
18774 		ret = EBUSY;
18775 		goto err;
18776 	}
18777 	bit = ffs(sc->free_vdev_map) - 1;
18778 
18779 	arvif->vdev_id = bit;
18780 	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
18781 
18782 	switch (ic->ic_opmode) {
18783 	case IEEE80211_M_STA:
18784 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
18785 		break;
18786 #if 0
18787 	case NL80211_IFTYPE_MESH_POINT:
18788 		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
18789 		fallthrough;
18790 	case NL80211_IFTYPE_AP:
18791 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
18792 		break;
18793 	case NL80211_IFTYPE_MONITOR:
18794 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
18795 		ar->monitor_vdev_id = bit;
18796 		break;
18797 #endif
18798 	default:
18799 		printf("%s: invalid operating mode %d\n",
18800 		    sc->sc_dev.dv_xname, ic->ic_opmode);
18801 		ret = EINVAL;
18802 		goto err;
18803 	}
18804 
18805 	DNPRINTF(QWX_D_MAC,
18806 	    "%s: add interface id %d type %d subtype %d map 0x%x\n",
18807 	    __func__, arvif->vdev_id, arvif->vdev_type,
18808 	    arvif->vdev_subtype, sc->free_vdev_map);
18809 
18810 	ret = qwx_mac_setup_vdev_create_params(arvif, pdev, &vdev_param);
18811 	if (ret) {
18812 		printf("%s: failed to create vdev parameters %d: %d\n",
18813 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18814 		goto err;
18815 	}
18816 
18817 	ret = qwx_wmi_vdev_create(sc, sc->mac_addr, &vdev_param);
18818 	if (ret) {
18819 		printf("%s: failed to create WMI vdev %d %s: %d\n",
18820 		    sc->sc_dev.dv_xname, arvif->vdev_id,
18821 		    ether_sprintf(sc->mac_addr), ret);
18822 		goto err;
18823 	}
18824 
18825 	sc->num_created_vdevs++;
18826 	DNPRINTF(QWX_D_MAC, "%s: vdev %s created, vdev_id %d\n", __func__,
18827 	    ether_sprintf(sc->mac_addr), arvif->vdev_id);
18828 	sc->allocated_vdev_map |= 1U << arvif->vdev_id;
18829 	sc->free_vdev_map &= ~(1U << arvif->vdev_id);
18830 #ifdef notyet
18831 	spin_lock_bh(&ar->data_lock);
18832 #endif
18833 	TAILQ_INSERT_TAIL(&sc->vif_list, arvif, entry);
18834 #ifdef notyet
18835 	spin_unlock_bh(&ar->data_lock);
18836 #endif
18837 	ret = qwx_mac_op_update_vif_offload(sc, pdev, arvif);
18838 	if (ret)
18839 		goto err_vdev_del;
18840 
18841 	nss = qwx_get_num_chains(sc->cfg_tx_chainmask) ? : 1;
18842 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
18843 	    WMI_VDEV_PARAM_NSS, nss);
18844 	if (ret) {
18845 		printf("%s: failed to set vdev %d chainmask 0x%x, nss %d: %d\n",
18846 		    sc->sc_dev.dv_xname, arvif->vdev_id, sc->cfg_tx_chainmask,
18847 		    nss, ret);
18848 		goto err_vdev_del;
18849 	}
18850 
18851 	switch (arvif->vdev_type) {
18852 #if 0
18853 	case WMI_VDEV_TYPE_AP:
18854 		peer_param.vdev_id = arvif->vdev_id;
18855 		peer_param.peer_addr = vif->addr;
18856 		peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
18857 		ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
18858 		if (ret) {
18859 			ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
18860 				    arvif->vdev_id, ret);
18861 			goto err_vdev_del;
18862 		}
18863 
18864 		ret = ath11k_mac_set_kickout(arvif);
18865 		if (ret) {
18866 			ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
18867 				    arvif->vdev_id, ret);
18868 			goto err_peer_del;
18869 		}
18870 
18871 		ath11k_mac_11d_scan_stop_all(ar->ab);
18872 		break;
18873 #endif
18874 	case WMI_VDEV_TYPE_STA:
18875 		param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
18876 		param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
18877 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
18878 		    pdev->pdev_id, param_id, param_value);
18879 		if (ret) {
18880 			printf("%s: failed to set vdev %d RX wake policy: %d\n",
18881 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18882 			goto err_peer_del;
18883 		}
18884 
18885 		param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
18886 		param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
18887 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
18888 		    pdev->pdev_id, param_id, param_value);
18889 		if (ret) {
18890 			printf("%s: failed to set vdev %d "
18891 			    "TX wake threshold: %d\n",
18892 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18893 			goto err_peer_del;
18894 		}
18895 
18896 		param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
18897 		param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
18898 		ret = qwx_wmi_set_sta_ps_param(sc, arvif->vdev_id,
18899 		    pdev->pdev_id, param_id, param_value);
18900 		if (ret) {
18901 			printf("%s: failed to set vdev %d pspoll count: %d\n",
18902 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18903 			goto err_peer_del;
18904 		}
18905 
18906 		ret = qwx_wmi_pdev_set_ps_mode(sc, arvif->vdev_id,
18907 		    pdev->pdev_id, WMI_STA_PS_MODE_DISABLED);
18908 		if (ret) {
18909 			printf("%s: failed to disable vdev %d ps mode: %d\n",
18910 			    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18911 			goto err_peer_del;
18912 		}
18913 
18914 		if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
18915 			sc->completed_11d_scan = 0;
18916 			sc->state_11d = ATH11K_11D_PREPARING;
18917 		}
18918 		break;
18919 #if 0
18920 	case WMI_VDEV_TYPE_MONITOR:
18921 		set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
18922 		break;
18923 #endif
18924 	default:
18925 		printf("%s: invalid vdev type %d\n",
18926 		    sc->sc_dev.dv_xname, arvif->vdev_type);
18927 		ret = EINVAL;
18928 		goto err;
18929 	}
18930 
18931 	arvif->txpower = 40;
18932 	ret = qwx_mac_txpower_recalc(sc, pdev);
18933 	if (ret)
18934 		goto err_peer_del;
18935 
18936 	param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
18937 	param_value = ic->ic_rtsthreshold;
18938 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
18939 	    param_id, param_value);
18940 	if (ret) {
18941 		printf("%s: failed to set rts threshold for vdev %d: %d\n",
18942 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
18943 		goto err_peer_del;
18944 	}
18945 
18946 	qwx_dp_vdev_tx_attach(sc, pdev, arvif);
18947 #if 0
18948 	if (vif->type != NL80211_IFTYPE_MONITOR &&
18949 	    test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
18950 		ret = ath11k_mac_monitor_vdev_create(ar);
18951 		if (ret)
18952 			ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
18953 				    ret);
18954 	}
18955 
18956 	mutex_unlock(&ar->conf_mutex);
18957 #endif
18958 	return 0;
18959 
18960 err_peer_del:
18961 #if 0
18962 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
18963 		fbret = qwx_peer_delete(sc, arvif->vdev_id, vif->addr);
18964 		if (fbret) {
18965 			printf("%s: fallback fail to delete peer addr %pM "
18966 			    "vdev_id %d ret %d\n", sc->sc_dev.dv_xname,
18967 			    vif->addr, arvif->vdev_id, fbret);
18968 			goto err;
18969 		}
18970 	}
18971 #endif
18972 err_vdev_del:
18973 	qwx_mac_vdev_delete(sc, arvif);
18974 #ifdef notyet
18975 	spin_lock_bh(&ar->data_lock);
18976 #endif
18977 	TAILQ_REMOVE(&sc->vif_list, arvif, entry);
18978 #ifdef notyet
18979 	spin_unlock_bh(&ar->data_lock);
18980 #endif
18981 
18982 err:
18983 #ifdef notyet
18984 	mutex_unlock(&ar->conf_mutex);
18985 #endif
18986 	free(arvif, M_DEVBUF, sizeof(*arvif));
18987 	return ret;
18988 }
18989 
18990 int
18991 qwx_mac_start(struct qwx_softc *sc)
18992 {
18993 	struct qwx_pdev *pdev;
18994 	int i, error;
18995 
18996 	for (i = 0; i < sc->num_radios; i++) {
18997 		pdev = &sc->pdevs[i];
18998 		error = qwx_mac_op_start(pdev);
18999 		if (error)
19000 			return error;
19001 
19002 		error = qwx_mac_op_add_interface(pdev);
19003 		if (error)
19004 			return error;
19005 	}
19006 
19007 	return 0;
19008 }
19009 
19010 void
19011 qwx_init_task(void *arg)
19012 {
19013 	struct qwx_softc *sc = arg;
19014 
19015 	printf("%s: %s not implemented\n", sc->sc_dev.dv_xname, __func__);
19016 }
19017 
19018 void
19019 qwx_mac_11d_scan_start(struct qwx_softc *sc, struct qwx_vif *arvif)
19020 {
19021 	struct ieee80211com *ic = &sc->sc_ic;
19022 	struct wmi_11d_scan_start_params param;
19023 	int ret;
19024 #ifdef notyet
19025 	mutex_lock(&ar->ab->vdev_id_11d_lock);
19026 #endif
19027 	DNPRINTF(QWX_D_MAC, "%s: vdev id for 11d scan %d\n", __func__,
19028 	    sc->vdev_id_11d_scan);
19029 #if 0
19030 	if (ar->regdom_set_by_user)
19031 		goto fin;
19032 #endif
19033 	if (sc->vdev_id_11d_scan != QWX_11D_INVALID_VDEV_ID)
19034 		goto fin;
19035 
19036 	if (!isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD))
19037 		goto fin;
19038 
19039 	if (ic->ic_opmode != IEEE80211_M_STA)
19040 		goto fin;
19041 
19042 	param.vdev_id = arvif->vdev_id;
19043 	param.start_interval_msec = 0;
19044 	param.scan_period_msec = QWX_SCAN_11D_INTERVAL;
19045 
19046 	DNPRINTF(QWX_D_MAC, "%s: start 11d scan\n", __func__);
19047 
19048 	ret = qwx_wmi_send_11d_scan_start_cmd(sc, &param,
19049 	   0 /* TODO: derive pdev ID from arvif somehow? */);
19050 	if (ret) {
19051 		printf("%s: failed to start 11d scan; vdev: %d ret: %d\n",
19052 		    sc->sc_dev.dv_xname, arvif->vdev_id, ret);
19053 	} else {
19054 		sc->vdev_id_11d_scan = arvif->vdev_id;
19055 		if (sc->state_11d == ATH11K_11D_PREPARING)
19056 			sc->state_11d = ATH11K_11D_RUNNING;
19057 	}
19058 fin:
19059 	if (sc->state_11d == ATH11K_11D_PREPARING) {
19060 		sc->state_11d = ATH11K_11D_IDLE;
19061 		sc->completed_11d_scan = 0;
19062 	}
19063 #ifdef notyet
19064 	mutex_unlock(&ar->ab->vdev_id_11d_lock);
19065 #endif
19066 }
19067 
19068 void
19069 qwx_mac_scan_finish(struct qwx_softc *sc)
19070 {
19071 	struct ieee80211com *ic = &sc->sc_ic;
19072 	struct ifnet *ifp = &ic->ic_if;
19073 	enum ath11k_scan_state ostate;
19074 
19075 #ifdef notyet
19076 	lockdep_assert_held(&ar->data_lock);
19077 #endif
19078 	ostate = sc->scan.state;
19079 	switch (ostate) {
19080 	case ATH11K_SCAN_IDLE:
19081 		break;
19082 	case ATH11K_SCAN_RUNNING:
19083 	case ATH11K_SCAN_ABORTING:
19084 #if 0
19085 		if (ar->scan.is_roc && ar->scan.roc_notify)
19086 			ieee80211_remain_on_channel_expired(ar->hw);
19087 		fallthrough;
19088 #endif
19089 	case ATH11K_SCAN_STARTING:
19090 		sc->scan.state = ATH11K_SCAN_IDLE;
19091 		sc->scan_channel = 0;
19092 		sc->scan.roc_freq = 0;
19093 
19094 		timeout_del(&sc->scan.timeout);
19095 		if (!sc->scan.is_roc)
19096 			ieee80211_end_scan(ifp);
19097 #if 0
19098 		complete_all(&ar->scan.completed);
19099 #endif
19100 		break;
19101 	}
19102 }
19103 
19104 int
19105 qwx_mac_get_rate_hw_value(struct ieee80211com *ic,
19106     struct ieee80211_node *ni, int bitrate)
19107 {
19108 	uint32_t preamble;
19109 	uint16_t hw_value;
19110 	int shortpre = 0;
19111 
19112 	if (IEEE80211_IS_CHAN_CCK(ni->ni_chan))
19113 		preamble = WMI_RATE_PREAMBLE_CCK;
19114 	else
19115 		preamble = WMI_RATE_PREAMBLE_OFDM;
19116 
19117 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
19118 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
19119 		shortpre = 1;
19120 
19121 	switch (bitrate) {
19122 	case 2:
19123 		hw_value = ATH11K_HW_RATE_CCK_LP_1M;
19124 		break;
19125 	case 4:
19126 		if (shortpre)
19127 			hw_value = ATH11K_HW_RATE_CCK_SP_2M;
19128 		else
19129 			hw_value = ATH11K_HW_RATE_CCK_LP_2M;
19130 		break;
19131 	case 11:
19132 		if (shortpre)
19133 			hw_value = ATH11K_HW_RATE_CCK_SP_5_5M;
19134 		else
19135 			hw_value = ATH11K_HW_RATE_CCK_LP_5_5M;
19136 		break;
19137 	case 22:
19138 		if (shortpre)
19139 			hw_value = ATH11K_HW_RATE_CCK_SP_11M;
19140 		else
19141 			hw_value = ATH11K_HW_RATE_CCK_LP_11M;
19142 		break;
19143 	case 12:
19144 		hw_value = ATH11K_HW_RATE_OFDM_6M;
19145 		break;
19146 	case 18:
19147 		hw_value = ATH11K_HW_RATE_OFDM_9M;
19148 		break;
19149 	case 24:
19150 		hw_value = ATH11K_HW_RATE_OFDM_12M;
19151 		break;
19152 	case 36:
19153 		hw_value = ATH11K_HW_RATE_OFDM_18M;
19154 		break;
19155 	case 48:
19156 		hw_value = ATH11K_HW_RATE_OFDM_24M;
19157 		break;
19158 	case 72:
19159 		hw_value = ATH11K_HW_RATE_OFDM_36M;
19160 		break;
19161 	case 96:
19162 		hw_value = ATH11K_HW_RATE_OFDM_48M;
19163 		break;
19164 	case 108:
19165 		hw_value = ATH11K_HW_RATE_OFDM_54M;
19166 		break;
19167 	default:
19168 		return -1;
19169 	}
19170 
19171 	return ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
19172 }
19173 
19174 int
19175 qwx_peer_delete(struct qwx_softc *sc, uint32_t vdev_id, uint8_t pdev_id,
19176     uint8_t *addr)
19177 {
19178 	int ret;
19179 
19180 	sc->peer_delete_done = 0;
19181 	ret = qwx_wmi_send_peer_delete_cmd(sc, addr, vdev_id, pdev_id);
19182 	if (ret) {
19183 		printf("%s: failed to delete peer vdev_id %d addr %s ret %d\n",
19184 		    sc->sc_dev.dv_xname, vdev_id, ether_sprintf(addr), ret);
19185 		return ret;
19186 	}
19187 
19188 	while (!sc->peer_delete_done) {
19189 		ret = tsleep_nsec(&sc->peer_delete_done, 0, "qwxpeerd",
19190 		    SEC_TO_NSEC(3));
19191 		if (ret) {
19192 			printf("%s: peer delete command timeout\n",
19193 			    sc->sc_dev.dv_xname);
19194 			return ret;
19195 		}
19196 	}
19197 
19198 	sc->num_peers--;
19199 	return 0;
19200 }
19201 
19202 int
19203 qwx_peer_create(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
19204     struct ieee80211_node *ni, struct peer_create_params *param)
19205 {
19206 	struct qwx_node *nq = (struct qwx_node *)ni;
19207 	struct ath11k_peer *peer;
19208 	int ret;
19209 #ifdef notyet
19210 	lockdep_assert_held(&ar->conf_mutex);
19211 #endif
19212 	if (sc->num_peers > (TARGET_NUM_PEERS_PDEV(sc) - 1)) {
19213 		DPRINTF("%s: failed to create peer due to insufficient "
19214 		    "peer entry resource in firmware\n", __func__);
19215 		return ENOBUFS;
19216 	}
19217 #ifdef notyet
19218 	mutex_lock(&ar->ab->tbl_mtx_lock);
19219 	spin_lock_bh(&ar->ab->base_lock);
19220 #endif
19221 	peer = &nq->peer;
19222 	if (peer) {
19223 		if (peer->peer_id != HAL_INVALID_PEERID &&
19224 		    peer->vdev_id == param->vdev_id) {
19225 #ifdef notyet
19226 			spin_unlock_bh(&ar->ab->base_lock);
19227 			mutex_unlock(&ar->ab->tbl_mtx_lock);
19228 #endif
19229 			return EINVAL;
19230 		}
19231 #if 0
19232 		/* Assume sta is transitioning to another band.
19233 		 * Remove here the peer from rhash.
19234 		 */
19235 		ath11k_peer_rhash_delete(ar->ab, peer);
19236 #endif
19237 	}
19238 #ifdef notyet
19239 	spin_unlock_bh(&ar->ab->base_lock);
19240 	mutex_unlock(&ar->ab->tbl_mtx_lock);
19241 #endif
19242 	sc->peer_mapped = 0;
19243 
19244 	ret = qwx_wmi_send_peer_create_cmd(sc, pdev_id, param);
19245 	if (ret) {
19246 		printf("%s: failed to send peer create vdev_id %d ret %d\n",
19247 		    sc->sc_dev.dv_xname, param->vdev_id, ret);
19248 		return ret;
19249 	}
19250 
19251 	while (!sc->peer_mapped) {
19252 		ret = tsleep_nsec(&sc->peer_mapped, 0, "qwxpeer",
19253 		    SEC_TO_NSEC(3));
19254 		if (ret) {
19255 			printf("%s: peer create command timeout\n",
19256 			    sc->sc_dev.dv_xname);
19257 			return ret;
19258 		}
19259 	}
19260 
19261 #ifdef notyet
19262 	mutex_lock(&ar->ab->tbl_mtx_lock);
19263 	spin_lock_bh(&ar->ab->base_lock);
19264 #endif
19265 #if 0
19266 	peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
19267 	if (!peer) {
19268 		spin_unlock_bh(&ar->ab->base_lock);
19269 		mutex_unlock(&ar->ab->tbl_mtx_lock);
19270 		ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
19271 			    param->peer_addr, param->vdev_id);
19272 
19273 		ret = -ENOENT;
19274 		goto cleanup;
19275 	}
19276 
19277 	ret = ath11k_peer_rhash_add(ar->ab, peer);
19278 	if (ret) {
19279 		spin_unlock_bh(&ar->ab->base_lock);
19280 		mutex_unlock(&ar->ab->tbl_mtx_lock);
19281 		goto cleanup;
19282 	}
19283 #endif
19284 	peer->pdev_id = pdev_id;
19285 #if 0
19286 	peer->sta = sta;
19287 
19288 	if (arvif->vif->type == NL80211_IFTYPE_STATION) {
19289 		arvif->ast_hash = peer->ast_hash;
19290 		arvif->ast_idx = peer->hw_peer_id;
19291 	}
19292 	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
19293 	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
19294 
19295 	if (sta) {
19296 		struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
19297 		arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
19298 				       FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
19299 						  peer->peer_id);
19300 
19301 		/* set HTT extension valid bit to 0 by default */
19302 		arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
19303 	}
19304 #endif
19305 	sc->num_peers++;
19306 #ifdef notyet
19307 	spin_unlock_bh(&ar->ab->base_lock);
19308 	mutex_unlock(&ar->ab->tbl_mtx_lock);
19309 #endif
19310 	return 0;
19311 #if 0
19312 cleanup:
19313 	int fbret = qwx_peer_delete(sc, param->vdev_id, param->peer_addr);
19314 	if (fbret) {
19315 		printf("%s: failed peer %s delete vdev_id %d fallback ret %d\n",
19316 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
19317 		    param->vdev_id, fbret);
19318 	}
19319 
19320 	return ret;
19321 #endif
19322 }
19323 
19324 int
19325 qwx_dp_tx_send_reo_cmd(struct qwx_softc *sc, struct dp_rx_tid *rx_tid,
19326     enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd,
19327     void (*cb)(struct qwx_dp *, void *, enum hal_reo_cmd_status))
19328 {
19329 	struct qwx_dp *dp = &sc->dp;
19330 	struct dp_reo_cmd *dp_cmd;
19331 	struct hal_srng *cmd_ring;
19332 	int cmd_num;
19333 
19334 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags))
19335 		return ESHUTDOWN;
19336 
19337 	cmd_ring = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id];
19338 	cmd_num = qwx_hal_reo_cmd_send(sc, cmd_ring, type, cmd);
19339 	/* cmd_num should start from 1, during failure return the error code */
19340 	if (cmd_num < 0)
19341 		return cmd_num;
19342 
19343 	/* reo cmd ring descriptors has cmd_num starting from 1 */
19344 	if (cmd_num == 0)
19345 		return EINVAL;
19346 
19347 	if (!cb)
19348 		return 0;
19349 
19350 	/* Can this be optimized so that we keep the pending command list only
19351 	 * for tid delete command to free up the resource on the command status
19352 	 * indication?
19353 	 */
19354 	dp_cmd = malloc(sizeof(*dp_cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
19355 	if (!dp_cmd)
19356 		return ENOMEM;
19357 
19358 	memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
19359 	dp_cmd->cmd_num = cmd_num;
19360 	dp_cmd->handler = cb;
19361 #ifdef notyet
19362 	spin_lock_bh(&dp->reo_cmd_lock);
19363 #endif
19364 	TAILQ_INSERT_TAIL(&dp->reo_cmd_list, dp_cmd, entry);
19365 #ifdef notyet
19366 	spin_unlock_bh(&dp->reo_cmd_lock);
19367 #endif
19368 	return 0;
19369 }
19370 
19371 uint32_t
19372 qwx_hal_reo_qdesc_size(uint32_t ba_window_size, uint8_t tid)
19373 {
19374 	uint32_t num_ext_desc;
19375 
19376 	if (ba_window_size <= 1) {
19377 		if (tid != HAL_DESC_REO_NON_QOS_TID)
19378 			num_ext_desc = 1;
19379 		else
19380 			num_ext_desc = 0;
19381 	} else if (ba_window_size <= 105) {
19382 		num_ext_desc = 1;
19383 	} else if (ba_window_size <= 210) {
19384 		num_ext_desc = 2;
19385 	} else {
19386 		num_ext_desc = 3;
19387 	}
19388 
19389 	return sizeof(struct hal_rx_reo_queue) +
19390 		(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
19391 }
19392 
19393 void
19394 qwx_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, uint8_t owner, uint8_t buffer_type, uint32_t magic)
19395 {
19396 	hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
19397 		     FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
19398 
19399 	/* Magic pattern in reserved bits for debugging */
19400 	hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
19401 }
19402 
19403 void
19404 qwx_hal_reo_qdesc_setup(void *vaddr, int tid, uint32_t ba_window_size,
19405     uint32_t start_seq, enum hal_pn_type type)
19406 {
19407 	struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
19408 	struct hal_rx_reo_queue_ext *ext_desc;
19409 
19410 	memset(qdesc, 0, sizeof(*qdesc));
19411 
19412 	qwx_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
19413 	    HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
19414 
19415 	qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
19416 
19417 	qdesc->info0 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
19418 	    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
19419 	    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, qwx_tid_to_ac(tid));
19420 
19421 	if (ba_window_size < 1)
19422 		ba_window_size = 1;
19423 
19424 	if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
19425 		ba_window_size++;
19426 
19427 	if (ba_window_size == 1)
19428 		qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
19429 
19430 	qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
19431 				   ba_window_size - 1);
19432 	switch (type) {
19433 	case HAL_PN_TYPE_NONE:
19434 	case HAL_PN_TYPE_WAPI_EVEN:
19435 	case HAL_PN_TYPE_WAPI_UNEVEN:
19436 		break;
19437 	case HAL_PN_TYPE_WPA:
19438 		qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
19439 		    FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
19440 		    HAL_RX_REO_QUEUE_PN_SIZE_48);
19441 		break;
19442 	}
19443 
19444 	/* TODO: Set Ignore ampdu flags based on BA window size and/or
19445 	 * AMPDU capabilities
19446 	 */
19447 	qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
19448 
19449 	qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
19450 
19451 	if (start_seq <= 0xfff)
19452 		qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
19453 		    start_seq);
19454 
19455 	if (tid == HAL_DESC_REO_NON_QOS_TID)
19456 		return;
19457 
19458 	ext_desc = qdesc->ext_desc;
19459 
19460 	/* TODO: HW queue descriptors are currently allocated for max BA
19461 	 * window size for all QOS TIDs so that same descriptor can be used
19462 	 * later when ADDBA request is received. This should be changed to
19463 	 * allocate HW queue descriptors based on BA window size being
19464 	 * negotiated (0 for non BA cases), and reallocate when BA window
19465 	 * size changes and also send WMI message to FW to change the REO
19466 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
19467 	 */
19468 	memset(ext_desc, 0, sizeof(*ext_desc));
19469 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
19470 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
19471 	ext_desc++;
19472 	memset(ext_desc, 0, sizeof(*ext_desc));
19473 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
19474 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
19475 	ext_desc++;
19476 	memset(ext_desc, 0, sizeof(*ext_desc));
19477 	qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
19478 	    HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
19479 }
19480 
19481 void
19482 qwx_dp_reo_cmd_free(struct qwx_dp *dp, void *ctx,
19483     enum hal_reo_cmd_status status)
19484 {
19485 	struct qwx_softc *sc = dp->sc;
19486 	struct dp_rx_tid *rx_tid = ctx;
19487 
19488 	if (status != HAL_REO_CMD_SUCCESS)
19489 		printf("%s: failed to flush rx tid hw desc, tid %d status %d\n",
19490 		    sc->sc_dev.dv_xname, rx_tid->tid, status);
19491 
19492 	if (rx_tid->mem) {
19493 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
19494 		rx_tid->mem = NULL;
19495 		rx_tid->vaddr = NULL;
19496 		rx_tid->paddr = 0ULL;
19497 		rx_tid->size = 0;
19498 	}
19499 }
19500 
19501 void
19502 qwx_dp_reo_cache_flush(struct qwx_softc *sc, struct dp_rx_tid *rx_tid)
19503 {
19504 	struct ath11k_hal_reo_cmd cmd = {0};
19505 	unsigned long tot_desc_sz, desc_sz;
19506 	int ret;
19507 
19508 	tot_desc_sz = rx_tid->size;
19509 	desc_sz = qwx_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
19510 
19511 	while (tot_desc_sz > desc_sz) {
19512 		tot_desc_sz -= desc_sz;
19513 		cmd.addr_lo = (rx_tid->paddr + tot_desc_sz) & 0xffffffff;
19514 		cmd.addr_hi = rx_tid->paddr >> 32;
19515 		ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid,
19516 		    HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL);
19517 		if (ret) {
19518 			printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE, "
19519 			    "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid,
19520 			    ret);
19521 		}
19522 	}
19523 
19524 	memset(&cmd, 0, sizeof(cmd));
19525 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
19526 	cmd.addr_hi = rx_tid->paddr >> 32;
19527 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
19528 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_FLUSH_CACHE,
19529 	    &cmd, qwx_dp_reo_cmd_free);
19530 	if (ret) {
19531 		printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE cmd, "
19532 		    "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid, ret);
19533 		if (rx_tid->mem) {
19534 			qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
19535 			rx_tid->mem = NULL;
19536 			rx_tid->vaddr = NULL;
19537 			rx_tid->paddr = 0ULL;
19538 			rx_tid->size = 0;
19539 		}
19540 	}
19541 }
19542 
19543 void
19544 qwx_dp_rx_tid_del_func(struct qwx_dp *dp, void *ctx,
19545     enum hal_reo_cmd_status status)
19546 {
19547 	struct qwx_softc *sc = dp->sc;
19548 	struct dp_rx_tid *rx_tid = ctx;
19549 	struct dp_reo_cache_flush_elem *elem, *tmp;
19550 	time_t now;
19551 
19552 	if (status == HAL_REO_CMD_DRAIN) {
19553 		goto free_desc;
19554 	} else if (status != HAL_REO_CMD_SUCCESS) {
19555 		/* Shouldn't happen! Cleanup in case of other failure? */
19556 		printf("%s: failed to delete rx tid %d hw descriptor %d\n",
19557 		    sc->sc_dev.dv_xname, rx_tid->tid, status);
19558 		return;
19559 	}
19560 
19561 	elem = malloc(sizeof(*elem), M_DEVBUF, M_ZERO | M_NOWAIT);
19562 	if (!elem)
19563 		goto free_desc;
19564 
19565 	now = gettime();
19566 	elem->ts = now;
19567 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
19568 #ifdef notyet
19569 	spin_lock_bh(&dp->reo_cmd_lock);
19570 #endif
19571 	TAILQ_INSERT_TAIL(&dp->reo_cmd_cache_flush_list, elem, entry);
19572 	dp->reo_cmd_cache_flush_count++;
19573 
19574 	/* Flush and invalidate aged REO desc from HW cache */
19575 	TAILQ_FOREACH_SAFE(elem, &dp->reo_cmd_cache_flush_list, entry, tmp) {
19576 		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
19577 		    now < elem->ts + DP_REO_DESC_FREE_TIMEOUT_MS) {
19578 			TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, elem, entry);
19579 			dp->reo_cmd_cache_flush_count--;
19580 #ifdef notyet
19581 			spin_unlock_bh(&dp->reo_cmd_lock);
19582 #endif
19583 			qwx_dp_reo_cache_flush(sc, &elem->data);
19584 			free(elem, M_DEVBUF, sizeof(*elem));
19585 #ifdef notyet
19586 			spin_lock_bh(&dp->reo_cmd_lock);
19587 #endif
19588 		}
19589 	}
19590 #ifdef notyet
19591 	spin_unlock_bh(&dp->reo_cmd_lock);
19592 #endif
19593 	return;
19594 free_desc:
19595 	if (rx_tid->mem) {
19596 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
19597 		rx_tid->mem = NULL;
19598 		rx_tid->vaddr = NULL;
19599 		rx_tid->paddr = 0ULL;
19600 		rx_tid->size = 0;
19601 	}
19602 }
19603 
19604 void
19605 qwx_peer_rx_tid_delete(struct qwx_softc *sc, struct ath11k_peer *peer,
19606     uint8_t tid)
19607 {
19608 	struct ath11k_hal_reo_cmd cmd = {0};
19609 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
19610 	int ret;
19611 
19612 	if (!rx_tid->active)
19613 		return;
19614 
19615 	rx_tid->active = 0;
19616 
19617 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
19618 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
19619 	cmd.addr_hi = rx_tid->paddr >> 32;
19620 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
19621 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
19622 	    &cmd, qwx_dp_rx_tid_del_func);
19623 	if (ret && ret != ESHUTDOWN) {
19624 		printf("%s: failed to send "
19625 		    "HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
19626 		    sc->sc_dev.dv_xname, tid, ret);
19627 	}
19628 
19629 	if (rx_tid->mem) {
19630 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
19631 		rx_tid->mem = NULL;
19632 		rx_tid->vaddr = NULL;
19633 		rx_tid->paddr = 0ULL;
19634 		rx_tid->size = 0;
19635 	}
19636 }
19637 
19638 int
19639 qwx_peer_rx_tid_reo_update(struct qwx_softc *sc, struct ath11k_peer *peer,
19640     struct dp_rx_tid *rx_tid, uint32_t ba_win_sz, uint16_t ssn,
19641     int update_ssn)
19642 {
19643 	struct ath11k_hal_reo_cmd cmd = {0};
19644 	int ret;
19645 
19646 	cmd.addr_lo = rx_tid->paddr & 0xffffffff;
19647 	cmd.addr_hi = rx_tid->paddr >> 32;
19648 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
19649 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
19650 	cmd.ba_window_size = ba_win_sz;
19651 
19652 	if (update_ssn) {
19653 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
19654 		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
19655 	}
19656 
19657 	ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE,
19658 	    &cmd, NULL);
19659 	if (ret) {
19660 		printf("%s: failed to update rx tid queue, tid %d (%d)\n",
19661 		    sc->sc_dev.dv_xname, rx_tid->tid, ret);
19662 		return ret;
19663 	}
19664 
19665 	rx_tid->ba_win_sz = ba_win_sz;
19666 
19667 	return 0;
19668 }
19669 
19670 void
19671 qwx_dp_rx_tid_mem_free(struct qwx_softc *sc, struct ieee80211_node *ni,
19672     int vdev_id, uint8_t tid)
19673 {
19674 	struct qwx_node *nq = (struct qwx_node *)ni;
19675 	struct ath11k_peer *peer = &nq->peer;
19676 	struct dp_rx_tid *rx_tid;
19677 #ifdef notyet
19678 	spin_lock_bh(&ab->base_lock);
19679 #endif
19680 	rx_tid = &peer->rx_tid[tid];
19681 
19682 	if (rx_tid->mem) {
19683 		qwx_dmamem_free(sc->sc_dmat, rx_tid->mem);
19684 		rx_tid->mem = NULL;
19685 		rx_tid->vaddr = NULL;
19686 		rx_tid->paddr = 0ULL;
19687 		rx_tid->size = 0;
19688 	}
19689 
19690 	rx_tid->active = 0;
19691 #ifdef notyet
19692 	spin_unlock_bh(&ab->base_lock);
19693 #endif
19694 }
19695 
19696 int
19697 qwx_peer_rx_tid_setup(struct qwx_softc *sc, struct ieee80211_node *ni,
19698     int vdev_id, int pdev_id, uint8_t tid, uint32_t ba_win_sz, uint16_t ssn,
19699     enum hal_pn_type pn_type)
19700 {
19701 	struct qwx_node *nq = (struct qwx_node *)ni;
19702 	struct ath11k_peer *peer = &nq->peer;
19703 	struct dp_rx_tid *rx_tid;
19704 	uint32_t hw_desc_sz;
19705 	void *vaddr;
19706 	uint64_t paddr;
19707 	int ret;
19708 #ifdef notyet
19709 	spin_lock_bh(&ab->base_lock);
19710 #endif
19711 	rx_tid = &peer->rx_tid[tid];
19712 	/* Update the tid queue if it is already setup */
19713 	if (rx_tid->active) {
19714 		paddr = rx_tid->paddr;
19715 		ret = qwx_peer_rx_tid_reo_update(sc, peer, rx_tid,
19716 		    ba_win_sz, ssn, 1);
19717 #ifdef notyet
19718 		spin_unlock_bh(&ab->base_lock);
19719 #endif
19720 		if (ret) {
19721 			printf("%s: failed to update reo for peer %s "
19722 			    "rx tid %d\n: %d", sc->sc_dev.dv_xname,
19723 			    ether_sprintf(ni->ni_macaddr), tid, ret);
19724 			return ret;
19725 		}
19726 
19727 		ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id,
19728 		    pdev_id, ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
19729 		if (ret)
19730 			printf("%s: failed to send wmi rx reorder queue "
19731 			    "for peer %s tid %d: %d\n", sc->sc_dev.dv_xname,
19732 			    ether_sprintf(ni->ni_macaddr), tid, ret);
19733 		return ret;
19734 	}
19735 
19736 	rx_tid->tid = tid;
19737 
19738 	rx_tid->ba_win_sz = ba_win_sz;
19739 
19740 	/* TODO: Optimize the memory allocation for qos tid based on
19741 	 * the actual BA window size in REO tid update path.
19742 	 */
19743 	if (tid == HAL_DESC_REO_NON_QOS_TID)
19744 		hw_desc_sz = qwx_hal_reo_qdesc_size(ba_win_sz, tid);
19745 	else
19746 		hw_desc_sz = qwx_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
19747 
19748 	rx_tid->mem = qwx_dmamem_alloc(sc->sc_dmat, hw_desc_sz,
19749 	    HAL_LINK_DESC_ALIGN);
19750 	if (rx_tid->mem == NULL) {
19751 #ifdef notyet
19752 		spin_unlock_bh(&ab->base_lock);
19753 #endif
19754 		return ENOMEM;
19755 	}
19756 
19757 	vaddr = QWX_DMA_KVA(rx_tid->mem);
19758 
19759 	qwx_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
19760 
19761 	paddr = QWX_DMA_DVA(rx_tid->mem);
19762 
19763 	rx_tid->vaddr = vaddr;
19764 	rx_tid->paddr = paddr;
19765 	rx_tid->size = hw_desc_sz;
19766 	rx_tid->active = 1;
19767 #ifdef notyet
19768 	spin_unlock_bh(&ab->base_lock);
19769 #endif
19770 	ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id, pdev_id,
19771 	    ni->ni_macaddr, paddr, tid, 1, ba_win_sz);
19772 	if (ret) {
19773 		printf("%s: failed to setup rx reorder queue for peer %s "
19774 		    "tid %d: %d\n", sc->sc_dev.dv_xname,
19775 		    ether_sprintf(ni->ni_macaddr), tid, ret);
19776 		qwx_dp_rx_tid_mem_free(sc, ni, vdev_id, tid);
19777 	}
19778 
19779 	return ret;
19780 }
19781 
19782 int
19783 qwx_peer_rx_frag_setup(struct qwx_softc *sc, struct ieee80211_node *ni,
19784     int vdev_id)
19785 {
19786 	struct qwx_node *nq = (struct qwx_node *)ni;
19787 	struct ath11k_peer *peer = &nq->peer;
19788 	struct dp_rx_tid *rx_tid;
19789 	int i;
19790 #ifdef notyet
19791 	spin_lock_bh(&ab->base_lock);
19792 #endif
19793 	for (i = 0; i <= nitems(peer->rx_tid); i++) {
19794 		rx_tid = &peer->rx_tid[i];
19795 #if 0
19796 		rx_tid->ab = ab;
19797 		timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
19798 #endif
19799 	}
19800 #if 0
19801 	peer->dp_setup_done = true;
19802 #endif
19803 #ifdef notyet
19804 	spin_unlock_bh(&ab->base_lock);
19805 #endif
19806 	return 0;
19807 }
19808 
19809 int
19810 qwx_dp_peer_setup(struct qwx_softc *sc, int vdev_id, int pdev_id,
19811     struct ieee80211_node *ni)
19812 {
19813 	struct qwx_node *nq = (struct qwx_node *)ni;
19814 	struct ath11k_peer *peer = &nq->peer;
19815 	uint32_t reo_dest;
19816 	int ret = 0, tid;
19817 
19818 	/* reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
19819 	reo_dest = sc->pdev_dp.mac_id + 1;
19820 	ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, vdev_id, pdev_id,
19821 	    WMI_PEER_SET_DEFAULT_ROUTING, DP_RX_HASH_ENABLE | (reo_dest << 1));
19822 	if (ret) {
19823 		printf("%s: failed to set default routing %d peer %s "
19824 		    "vdev_id %d\n", sc->sc_dev.dv_xname, ret,
19825 		    ether_sprintf(ni->ni_macaddr), vdev_id);
19826 		return ret;
19827 	}
19828 
19829 	for (tid = 0; tid < IEEE80211_NUM_TID; tid++) {
19830 		ret = qwx_peer_rx_tid_setup(sc, ni, vdev_id, pdev_id,
19831 		    tid, 1, 0, HAL_PN_TYPE_NONE);
19832 		if (ret) {
19833 			printf("%s: failed to setup rxd tid queue for tid %d: %d\n",
19834 			    sc->sc_dev.dv_xname, tid, ret);
19835 			goto peer_clean;
19836 		}
19837 	}
19838 
19839 	ret = qwx_peer_rx_frag_setup(sc, ni, vdev_id);
19840 	if (ret) {
19841 		printf("%s: failed to setup rx defrag context\n",
19842 		    sc->sc_dev.dv_xname);
19843 		tid--;
19844 		goto peer_clean;
19845 	}
19846 
19847 	/* TODO: Setup other peer specific resource used in data path */
19848 
19849 	return 0;
19850 
19851 peer_clean:
19852 #ifdef notyet
19853 	spin_lock_bh(&ab->base_lock);
19854 #endif
19855 #if 0
19856 	peer = ath11k_peer_find(ab, vdev_id, addr);
19857 	if (!peer) {
19858 		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
19859 		spin_unlock_bh(&ab->base_lock);
19860 		return -ENOENT;
19861 	}
19862 #endif
19863 	for (; tid >= 0; tid--)
19864 		qwx_peer_rx_tid_delete(sc, peer, tid);
19865 #ifdef notyet
19866 	spin_unlock_bh(&ab->base_lock);
19867 #endif
19868 	return ret;
19869 }
19870 
19871 int
19872 qwx_mac_station_add(struct qwx_softc *sc, struct qwx_vif *arvif,
19873     uint8_t pdev_id, struct ieee80211_node *ni)
19874 {
19875 	struct peer_create_params peer_param;
19876 	int ret;
19877 #ifdef notyet
19878 	lockdep_assert_held(&ar->conf_mutex);
19879 #endif
19880 	peer_param.vdev_id = arvif->vdev_id;
19881 	peer_param.peer_addr = ni->ni_macaddr;
19882 	peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
19883 
19884 	ret = qwx_peer_create(sc, arvif, pdev_id, ni, &peer_param);
19885 	if (ret) {
19886 		printf("%s: Failed to add peer: %s for VDEV: %d\n",
19887 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
19888 		    arvif->vdev_id);
19889 		return ret;
19890 	}
19891 
19892 	DNPRINTF(QWX_D_MAC, "%s: Added peer: %s for VDEV: %d\n", __func__,
19893 	    ether_sprintf(ni->ni_macaddr), arvif->vdev_id);
19894 
19895 	ret = qwx_dp_peer_setup(sc, arvif->vdev_id, pdev_id, ni);
19896 	if (ret) {
19897 		printf("%s: failed to setup dp for peer %s on vdev %d (%d)\n",
19898 		    sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr),
19899 		    arvif->vdev_id, ret);
19900 		goto free_peer;
19901 	}
19902 
19903 	return 0;
19904 
19905 free_peer:
19906 	qwx_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr);
19907 	return ret;
19908 }
19909 
19910 void
19911 qwx_wmi_start_scan_init(struct qwx_softc *sc, struct scan_req_params *arg)
19912 {
19913 	/* setup commonly used values */
19914 	arg->scan_req_id = 1;
19915 	if (sc->state_11d == ATH11K_11D_PREPARING)
19916 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
19917 	else
19918 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
19919 	arg->dwell_time_active = 50;
19920 	arg->dwell_time_active_2g = 0;
19921 	arg->dwell_time_passive = 150;
19922 	arg->dwell_time_active_6g = 40;
19923 	arg->dwell_time_passive_6g = 30;
19924 	arg->min_rest_time = 50;
19925 	arg->max_rest_time = 500;
19926 	arg->repeat_probe_time = 0;
19927 	arg->probe_spacing_time = 0;
19928 	arg->idle_time = 0;
19929 	arg->max_scan_time = 20000;
19930 	arg->probe_delay = 5;
19931 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
19932 	    WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL |
19933 	    WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED;
19934 	arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
19935 
19936 	if (isset(sc->wmi.svc_map,
19937 	    WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE))
19938 		arg->scan_ctrl_flags_ext |=
19939 		    WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
19940 
19941 	arg->num_bssid = 1;
19942 
19943 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
19944 	 * ZEROs in probe request
19945 	 */
19946 	IEEE80211_ADDR_COPY(arg->bssid_list[0].addr, etheranyaddr);
19947 }
19948 
19949 int
19950 qwx_wmi_set_peer_param(struct qwx_softc *sc, uint8_t *peer_addr,
19951     uint32_t vdev_id, uint32_t pdev_id, uint32_t param_id, uint32_t param_val)
19952 {
19953 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19954 	struct wmi_peer_set_param_cmd *cmd;
19955 	struct mbuf *m;
19956 	int ret;
19957 
19958 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19959 	if (!m)
19960 		return ENOMEM;
19961 
19962 	cmd = (struct wmi_peer_set_param_cmd *)(mtod(m, uint8_t *) +
19963 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
19964 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
19965 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
19966 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr);
19967 	cmd->vdev_id = vdev_id;
19968 	cmd->param_id = param_id;
19969 	cmd->param_value = param_val;
19970 
19971 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_SET_PARAM_CMDID);
19972 	if (ret) {
19973 		printf("%s: failed to send WMI_PEER_SET_PARAM cmd\n",
19974 		    sc->sc_dev.dv_xname);
19975 		m_freem(m);
19976 		return ret;
19977 	}
19978 
19979 	DNPRINTF(QWX_D_WMI, "%s: cmd peer set param vdev %d peer %s "
19980 	    "set param %d value %d\n", __func__, vdev_id,
19981 	    ether_sprintf(peer_addr), param_id, param_val);
19982 
19983 	return 0;
19984 }
19985 
19986 int
19987 qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *sc, int vdev_id,
19988     int pdev_id, uint8_t *addr, uint64_t paddr, uint8_t tid,
19989     uint8_t ba_window_size_valid, uint32_t ba_window_size)
19990 {
19991 	struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
19992 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
19993 	struct mbuf *m;
19994 	int ret;
19995 
19996 	m = qwx_wmi_alloc_mbuf(sizeof(*cmd));
19997 	if (!m)
19998 		return ENOMEM;
19999 
20000 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)(mtod(m, uint8_t *) +
20001 	    sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
20002 	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
20003 	    WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
20004 	    FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
20005 
20006 	IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, addr);
20007 	cmd->vdev_id = vdev_id;
20008 	cmd->tid = tid;
20009 	cmd->queue_ptr_lo = paddr & 0xffffffff;
20010 	cmd->queue_ptr_hi = paddr >> 32;
20011 	cmd->queue_no = tid;
20012 	cmd->ba_window_size_valid = ba_window_size_valid;
20013 	cmd->ba_window_size = ba_window_size;
20014 
20015 	ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
20016 	if (ret) {
20017 		printf("%s: failed to send WMI_PEER_REORDER_QUEUE_SETUP\n",
20018 		    sc->sc_dev.dv_xname);
20019 		m_freem(m);
20020 	}
20021 
20022 	DNPRINTF(QWX_D_WMI, "%s: cmd peer reorder queue setup addr %s "
20023 	    "vdev_id %d tid %d\n", __func__, ether_sprintf(addr), vdev_id, tid);
20024 
20025 	return ret;
20026 }
20027 
20028 enum ath11k_spectral_mode
20029 qwx_spectral_get_mode(struct qwx_softc *sc)
20030 {
20031 #if 0
20032 	if (sc->spectral.enabled)
20033 		return ar->spectral.mode;
20034 	else
20035 #endif
20036 		return ATH11K_SPECTRAL_DISABLED;
20037 }
20038 
20039 void
20040 qwx_spectral_reset_buffer(struct qwx_softc *sc)
20041 {
20042 	printf("%s: not implemented\n", __func__);
20043 }
20044 
20045 int
20046 qwx_scan_stop(struct qwx_softc *sc)
20047 {
20048 	struct scan_cancel_param arg = {
20049 		.req_type = WLAN_SCAN_CANCEL_SINGLE,
20050 		.scan_id = ATH11K_SCAN_ID,
20051 	};
20052 	int ret;
20053 #ifdef notyet
20054 	lockdep_assert_held(&ar->conf_mutex);
20055 #endif
20056 	/* TODO: Fill other STOP Params */
20057 	arg.pdev_id = 0; /* TODO: derive pdev ID somehow? */
20058 	arg.vdev_id = sc->scan.vdev_id;
20059 
20060 	ret = qwx_wmi_send_scan_stop_cmd(sc, &arg);
20061 	if (ret) {
20062 		printf("%s: failed to stop wmi scan: %d\n",
20063 		    sc->sc_dev.dv_xname, ret);
20064 		goto out;
20065 	}
20066 
20067 	while (sc->scan.state != ATH11K_SCAN_IDLE) {
20068 		ret = tsleep_nsec(&sc->scan.state, 0, "qwxscstop",
20069 		    SEC_TO_NSEC(3));
20070 		if (ret) {
20071 			printf("%s: scan stop timeout\n", sc->sc_dev.dv_xname);
20072 			break;
20073 		}
20074 	}
20075 out:
20076 	/* Scan state should be updated upon scan completion but in case
20077 	 * firmware fails to deliver the event (for whatever reason) it is
20078 	 * desired to clean up scan state anyway. Firmware may have just
20079 	 * dropped the scan completion event delivery due to transport pipe
20080 	 * being overflown with data and/or it can recover on its own before
20081 	 * next scan request is submitted.
20082 	 */
20083 #ifdef notyet
20084 	spin_lock_bh(&ar->data_lock);
20085 #endif
20086 	if (sc->scan.state != ATH11K_SCAN_IDLE)
20087 		qwx_mac_scan_finish(sc);
20088 #ifdef notyet
20089 	spin_unlock_bh(&ar->data_lock);
20090 #endif
20091 	return ret;
20092 }
20093 
20094 void
20095 qwx_scan_timeout(void *arg)
20096 {
20097 	struct qwx_softc *sc = arg;
20098 	int s = splnet();
20099 
20100 #ifdef notyet
20101 	mutex_lock(&ar->conf_mutex);
20102 #endif
20103 	printf("%s\n", __func__);
20104 	qwx_scan_abort(sc);
20105 #ifdef notyet
20106 	mutex_unlock(&ar->conf_mutex);
20107 #endif
20108 	splx(s);
20109 }
20110 
20111 int
20112 qwx_start_scan(struct qwx_softc *sc, struct scan_req_params *arg)
20113 {
20114 	int ret;
20115 	unsigned long timeout = 1;
20116 #ifdef notyet
20117 	lockdep_assert_held(&ar->conf_mutex);
20118 #endif
20119 	if (qwx_spectral_get_mode(sc) == ATH11K_SPECTRAL_BACKGROUND)
20120 		qwx_spectral_reset_buffer(sc);
20121 
20122 	ret = qwx_wmi_send_scan_start_cmd(sc, arg);
20123 	if (ret)
20124 		return ret;
20125 
20126 	if (isset(sc->wmi.svc_map, WMI_TLV_SERVICE_11D_OFFLOAD)) {
20127 		timeout = 5;
20128 #if 0
20129 		if (ar->supports_6ghz)
20130 			timeout += 5 * HZ;
20131 #endif
20132 	}
20133 
20134 	while (sc->scan.state == ATH11K_SCAN_STARTING) {
20135 		ret = tsleep_nsec(&sc->scan.state, 0, "qwxscan",
20136 		    SEC_TO_NSEC(timeout));
20137 		if (ret) {
20138 			printf("%s: scan start timeout\n", sc->sc_dev.dv_xname);
20139 			qwx_scan_stop(sc);
20140 			break;
20141 		}
20142 	}
20143 
20144 #ifdef notyet
20145 	spin_lock_bh(&ar->data_lock);
20146 	spin_unlock_bh(&ar->data_lock);
20147 #endif
20148 	return ret;
20149 }
20150 
20151 #define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD		200 /* in msecs */
20152 
20153 int
20154 qwx_scan(struct qwx_softc *sc)
20155 {
20156 	struct ieee80211com *ic = &sc->sc_ic;
20157 	struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list);
20158 	struct scan_req_params *arg = NULL;
20159 	struct ieee80211_channel *chan, *lastc;
20160 	int ret = 0, num_channels, i;
20161 	uint32_t scan_timeout;
20162 
20163 	if (arvif == NULL) {
20164 		printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
20165 		return EINVAL;
20166 	}
20167 
20168 	/*
20169 	 * TODO Will we need separate scan iterations on devices with
20170 	 * multiple radios?
20171 	 */
20172 	if (sc->num_radios > 1)
20173 		printf("%s: TODO: only scanning with first vdev\n", __func__);
20174 
20175 	/* Firmwares advertising the support of triggering 11D algorithm
20176 	 * on the scan results of a regular scan expects driver to send
20177 	 * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID.
20178 	 * With this feature, separate 11D scan can be avoided since
20179 	 * regdomain can be determined with the scan results of the
20180 	 * regular scan.
20181 	 */
20182 	if (sc->state_11d == ATH11K_11D_PREPARING &&
20183 	    isset(sc->wmi.svc_map, WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN))
20184 		qwx_mac_11d_scan_start(sc, arvif);
20185 #ifdef notyet
20186 	mutex_lock(&ar->conf_mutex);
20187 
20188 	spin_lock_bh(&ar->data_lock);
20189 #endif
20190 	switch (sc->scan.state) {
20191 	case ATH11K_SCAN_IDLE:
20192 		sc->scan.started = 0;
20193 		sc->scan.completed = 0;
20194 		sc->scan.state = ATH11K_SCAN_STARTING;
20195 		sc->scan.is_roc = 0;
20196 		sc->scan.vdev_id = arvif->vdev_id;
20197 		ret = 0;
20198 		break;
20199 	case ATH11K_SCAN_STARTING:
20200 	case ATH11K_SCAN_RUNNING:
20201 	case ATH11K_SCAN_ABORTING:
20202 		ret = EBUSY;
20203 		break;
20204 	}
20205 #ifdef notyet
20206 	spin_unlock_bh(&ar->data_lock);
20207 #endif
20208 	if (ret)
20209 		goto exit;
20210 
20211 	arg = malloc(sizeof(*arg), M_DEVBUF, M_ZERO | M_NOWAIT);
20212 	if (!arg) {
20213 		ret = ENOMEM;
20214 		goto exit;
20215 	}
20216 
20217 	qwx_wmi_start_scan_init(sc, arg);
20218 	arg->vdev_id = arvif->vdev_id;
20219 	arg->scan_id = ATH11K_SCAN_ID;
20220 
20221 	if (ic->ic_des_esslen != 0) {
20222 		arg->num_ssids = 1;
20223 		arg->ssid[0].length  = ic->ic_des_esslen;
20224 		memcpy(&arg->ssid[0].ssid, ic->ic_des_essid,
20225 		    ic->ic_des_esslen);
20226 	} else
20227 		arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
20228 
20229 	lastc = &ic->ic_channels[IEEE80211_CHAN_MAX];
20230 	num_channels = 0;
20231 	for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
20232 		if (chan->ic_flags == 0)
20233 			continue;
20234 		num_channels++;
20235 	}
20236 	if (num_channels) {
20237 		arg->num_chan = num_channels;
20238 		arg->chan_list = mallocarray(arg->num_chan,
20239 		    sizeof(*arg->chan_list), M_DEVBUF, M_NOWAIT | M_ZERO);
20240 
20241 		if (!arg->chan_list) {
20242 			ret = ENOMEM;
20243 			goto exit;
20244 		}
20245 
20246 		i = 0;
20247 		for (chan = &ic->ic_channels[1]; chan <= lastc; chan++) {
20248 			if (chan->ic_flags == 0)
20249 				continue;
20250 			if (isset(sc->wmi.svc_map,
20251 			    WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL)) {
20252 				arg->chan_list[i++] = chan->ic_freq &
20253 				    WMI_SCAN_CONFIG_PER_CHANNEL_MASK;
20254 #if 0
20255 				/* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan
20256 				 * flags, then scan all PSC channels in 6 GHz band and
20257 				 * those non-PSC channels where RNR IE is found during
20258 				 * the legacy 2.4/5 GHz scan.
20259 				 * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set,
20260 				 * then all channels in 6 GHz will be scanned.
20261 				 */
20262 				if (req->channels[i]->band == NL80211_BAND_6GHZ &&
20263 				    req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ &&
20264 				    !cfg80211_channel_is_psc(req->channels[i]))
20265 					arg->chan_list[i] |=
20266 						WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND;
20267 #endif
20268 			} else {
20269 				arg->chan_list[i++] = chan->ic_freq;
20270 			}
20271 		}
20272 	}
20273 #if 0
20274 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
20275 		arg->scan_f_add_spoofed_mac_in_probe = 1;
20276 		ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
20277 		ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
20278 	}
20279 #endif
20280 	scan_timeout = 5000;
20281 
20282 	/* Add a margin to account for event/command processing */
20283 	scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
20284 
20285 	ret = qwx_start_scan(sc, arg);
20286 	if (ret) {
20287 		printf("%s: failed to start hw scan: %d\n",
20288 		    sc->sc_dev.dv_xname, ret);
20289 #ifdef notyet
20290 		spin_lock_bh(&ar->data_lock);
20291 #endif
20292 		sc->scan.state = ATH11K_SCAN_IDLE;
20293 #ifdef notyet
20294 		spin_unlock_bh(&ar->data_lock);
20295 #endif
20296 	}
20297 #if 0
20298 	timeout_add_msec(&sc->scan.timeout, scan_timeout);
20299 #endif
20300 exit:
20301 	if (arg) {
20302 		free(arg->chan_list, M_DEVBUF,
20303 		    arg->num_chan * sizeof(*arg->chan_list));
20304 #if 0
20305 		kfree(arg->extraie.ptr);
20306 #endif
20307 		free(arg, M_DEVBUF, sizeof(*arg));
20308 	}
20309 #ifdef notyet
20310 	mutex_unlock(&ar->conf_mutex);
20311 #endif
20312 	if (sc->state_11d == ATH11K_11D_PREPARING)
20313 		qwx_mac_11d_scan_start(sc, arvif);
20314 
20315 	return ret;
20316 }
20317 
20318 void
20319 qwx_scan_abort(struct qwx_softc *sc)
20320 {
20321 	int ret;
20322 #ifdef notyet
20323 	lockdep_assert_held(&ar->conf_mutex);
20324 
20325 	spin_lock_bh(&ar->data_lock);
20326 #endif
20327 	switch (sc->scan.state) {
20328 	case ATH11K_SCAN_IDLE:
20329 		/* This can happen if timeout worker kicked in and called
20330 		 * abortion while scan completion was being processed.
20331 		 */
20332 		break;
20333 	case ATH11K_SCAN_STARTING:
20334 	case ATH11K_SCAN_ABORTING:
20335 		printf("%s: refusing scan abortion due to invalid "
20336 		    "scan state: %d\n", sc->sc_dev.dv_xname, sc->scan.state);
20337 		break;
20338 	case ATH11K_SCAN_RUNNING:
20339 		sc->scan.state = ATH11K_SCAN_ABORTING;
20340 #ifdef notyet
20341 		spin_unlock_bh(&ar->data_lock);
20342 #endif
20343 		ret = qwx_scan_stop(sc);
20344 		if (ret)
20345 			printf("%s: failed to abort scan: %d\n",
20346 			    sc->sc_dev.dv_xname, ret);
20347 #ifdef notyet
20348 		spin_lock_bh(&ar->data_lock);
20349 #endif
20350 		break;
20351 	}
20352 #ifdef notyet
20353 	spin_unlock_bh(&ar->data_lock);
20354 #endif
20355 }
20356 
20357 /*
20358  * Find a pdev which corresponds to a given channel.
20359  * This doesn't exactly match the semantics of the Linux driver
20360  * but because OpenBSD does not (yet) implement multi-bss mode
20361  * we can assume that only one PHY will be active in either the
20362  * 2 GHz or the 5 GHz band.
20363  */
20364 struct qwx_pdev *
20365 qwx_get_pdev_for_chan(struct qwx_softc *sc, struct ieee80211_channel *chan)
20366 {
20367 	struct qwx_pdev *pdev;
20368 	int i;
20369 
20370 	for (i = 0; i < sc->num_radios; i++) {
20371 		if ((sc->pdevs_active & (1 << i)) == 0)
20372 			continue;
20373 
20374 		pdev = &sc->pdevs[i];
20375 		if (IEEE80211_IS_CHAN_2GHZ(chan) &&
20376 		    (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP))
20377 			return pdev;
20378 		if (IEEE80211_IS_CHAN_5GHZ(chan) &&
20379 		    (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP))
20380 			return pdev;
20381 	}
20382 
20383 	return NULL;
20384 }
20385 
20386 void
20387 qwx_recalculate_mgmt_rate(struct qwx_softc *sc, struct ieee80211_node *ni,
20388     uint32_t vdev_id, uint32_t pdev_id)
20389 {
20390 	struct ieee80211com *ic = &sc->sc_ic;
20391 	int hw_rate_code;
20392 	uint32_t vdev_param;
20393 	int bitrate;
20394 	int ret;
20395 #ifdef notyet
20396 	lockdep_assert_held(&ar->conf_mutex);
20397 #endif
20398 	bitrate = ieee80211_min_basic_rate(ic);
20399 	hw_rate_code = qwx_mac_get_rate_hw_value(ic, ni, bitrate);
20400 	if (hw_rate_code < 0) {
20401 		DPRINTF("%s: bitrate not supported %d\n",
20402 		    sc->sc_dev.dv_xname, bitrate);
20403 		return;
20404 	}
20405 
20406 	vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
20407 	ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id,
20408 	    vdev_param, hw_rate_code);
20409 	if (ret)
20410 		printf("%s: failed to set mgmt tx rate\n",
20411 		    sc->sc_dev.dv_xname);
20412 #if 0
20413 	/* For WCN6855, firmware will clear this param when vdev starts, hence
20414 	 * cache it here so that we can reconfigure it once vdev starts.
20415 	 */
20416 	ab->hw_rate_code = hw_rate_code;
20417 #endif
20418 	vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
20419 	ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id, vdev_param,
20420 	    hw_rate_code);
20421 	if (ret)
20422 		printf("%s: failed to set beacon tx rate\n",
20423 		    sc->sc_dev.dv_xname);
20424 }
20425 
20426 int
20427 qwx_auth(struct qwx_softc *sc)
20428 {
20429 	struct ieee80211com *ic = &sc->sc_ic;
20430 	struct ieee80211_node *ni = ic->ic_bss;
20431 	uint32_t param_id;
20432 	struct qwx_vif *arvif;
20433 	struct qwx_pdev *pdev;
20434 	int ret;
20435 
20436 	arvif = TAILQ_FIRST(&sc->vif_list);
20437 	if (arvif == NULL) {
20438 		printf("%s: no vdev found\n", sc->sc_dev.dv_xname);
20439 		return EINVAL;
20440 	}
20441 
20442 	pdev = qwx_get_pdev_for_chan(sc, ni->ni_chan);
20443 	if (pdev == NULL) {
20444 		printf("%s: no pdev found for channel %d\n",
20445 		    sc->sc_dev.dv_xname, ieee80211_chan2ieee(ic, ni->ni_chan));
20446 		return EINVAL;
20447 	}
20448 
20449 	param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
20450 	ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id,
20451 	    param_id, ni->ni_intval);
20452 	if (ret) {
20453 		printf("%s: failed to set beacon interval for VDEV: %d\n",
20454 		    sc->sc_dev.dv_xname, arvif->vdev_id);
20455 		return ret;
20456 	}
20457 
20458 	qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
20459 
20460 	ret = qwx_mac_station_add(sc, arvif, pdev->pdev_id, ni);
20461 	if (ret)
20462 		return ret;
20463 
20464 	/* Start vdev. */
20465 	ret = qwx_mac_vdev_start(sc, arvif, pdev->pdev_id);
20466 	if (ret) {
20467 		printf("%s: failed to start MAC for VDEV: %d\n",
20468 		    sc->sc_dev.dv_xname, arvif->vdev_id);
20469 		return ret;
20470 	}
20471 
20472 	/*
20473 	 * WCN6855 firmware clears basic-rate parameters when vdev starts.
20474 	 * Set it once more.
20475 	 */
20476 	qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id);
20477 
20478 	return ret;
20479 }
20480 
20481 int
20482 qwx_deauth(struct qwx_softc *sc)
20483 {
20484 	printf("%s: not implemented\n", __func__);
20485 	return ENOTSUP;
20486 }
20487 
20488 int
20489 qwx_assoc(struct qwx_softc *sc)
20490 {
20491 	printf("%s: not implemented\n", __func__);
20492 	return ENOTSUP;
20493 }
20494 
20495 int
20496 qwx_disassoc(struct qwx_softc *sc)
20497 {
20498 	printf("%s: not implemented\n", __func__);
20499 	return ENOTSUP;
20500 }
20501 
20502 int
20503 qwx_run(struct qwx_softc *sc)
20504 {
20505 	printf("%s: not implemented\n", __func__);
20506 	return ENOTSUP;
20507 }
20508 
20509 int
20510 qwx_run_stop(struct qwx_softc *sc)
20511 {
20512 	printf("%s: not implemented\n", __func__);
20513 	return ENOTSUP;
20514 }
20515 
20516 int
20517 qwx_attach(struct qwx_softc *sc)
20518 {
20519 	struct ieee80211com *ic = &sc->sc_ic;
20520 	struct ifnet *ifp = &ic->ic_if;
20521 	int error, i;
20522 
20523 	task_set(&sc->init_task, qwx_init_task, sc);
20524 	task_set(&sc->newstate_task, qwx_newstate_task, sc);
20525 	timeout_set_proc(&sc->scan.timeout, qwx_scan_timeout, sc);
20526 
20527 	for (i = 0; i < nitems(sc->pdevs); i++)
20528 		sc->pdevs[i].sc = sc;
20529 
20530 	TAILQ_INIT(&sc->vif_list);
20531 
20532 	error = qwx_init(ifp);
20533 	if (error)
20534 		return error;
20535 
20536 	/* Turn device off until interface comes up. */
20537 	qwx_core_deinit(sc);
20538 
20539 	return 0;
20540 }
20541 
20542 void
20543 qwx_detach(struct qwx_softc *sc)
20544 {
20545 	if (sc->fwmem) {
20546 		qwx_dmamem_free(sc->sc_dmat, sc->fwmem);
20547 		sc->fwmem = NULL;
20548 	}
20549 
20550 	if (sc->m3_mem) {
20551 		qwx_dmamem_free(sc->sc_dmat, sc->m3_mem);
20552 		sc->m3_mem = NULL;
20553 	}
20554 }
20555 
20556 struct qwx_dmamem *
20557 qwx_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t align)
20558 {
20559 	struct qwx_dmamem *adm;
20560 	int nsegs;
20561 
20562 	adm = malloc(sizeof(*adm), M_DEVBUF, M_NOWAIT | M_ZERO);
20563 	if (adm == NULL)
20564 		return NULL;
20565 	adm->size = size;
20566 
20567 	if (bus_dmamap_create(dmat, size, 1, size, 0,
20568 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &adm->map) != 0)
20569 		goto admfree;
20570 
20571 	if (bus_dmamem_alloc_range(dmat, size, align, 0, &adm->seg, 1,
20572 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 0, 0xffffffff) != 0)
20573 		goto destroy;
20574 
20575 	if (bus_dmamem_map(dmat, &adm->seg, nsegs, size,
20576 	    &adm->kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0)
20577 		goto free;
20578 
20579 	if (bus_dmamap_load_raw(dmat, adm->map, &adm->seg, nsegs, size,
20580 	    BUS_DMA_NOWAIT) != 0)
20581 		goto unmap;
20582 
20583 	bzero(adm->kva, size);
20584 
20585 	return adm;
20586 
20587 unmap:
20588 	bus_dmamem_unmap(dmat, adm->kva, size);
20589 free:
20590 	bus_dmamem_free(dmat, &adm->seg, 1);
20591 destroy:
20592 	bus_dmamap_destroy(dmat, adm->map);
20593 admfree:
20594 	free(adm, M_DEVBUF, sizeof(*adm));
20595 
20596 	return NULL;
20597 }
20598 
20599 void
20600 qwx_dmamem_free(bus_dma_tag_t dmat, struct qwx_dmamem *adm)
20601 {
20602 	bus_dmamem_unmap(dmat, adm->kva, adm->size);
20603 	bus_dmamem_free(dmat, &adm->seg, 1);
20604 	bus_dmamap_destroy(dmat, adm->map);
20605 	free(adm, M_DEVBUF, sizeof(*adm));
20606 }
20607