xref: /openbsd/sys/dev/pci/if_iwx.c (revision 261a77c2)
1 /*	$OpenBSD: if_iwx.c,v 1.181 2024/02/16 11:44:52 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133 #undef DPRINTF /* defined in ieee80211_priv.h */
134 
135 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
136 
137 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
138 
139 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
140 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
141 
142 #ifdef IWX_DEBUG
143 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
144 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
145 int iwx_debug = 1;
146 #else
147 #define DPRINTF(x)	do { ; } while (0)
148 #define DPRINTFN(n, x)	do { ; } while (0)
149 #endif
150 
151 #include <dev/pci/if_iwxreg.h>
152 #include <dev/pci/if_iwxvar.h>
153 
154 const uint8_t iwx_nvm_channels_8000[] = {
155 	/* 2.4 GHz */
156 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 	/* 5 GHz */
158 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 	149, 153, 157, 161, 165, 169, 173, 177, 181
161 };
162 
163 static const uint8_t iwx_nvm_channels_uhb[] = {
164 	/* 2.4 GHz */
165 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 	/* 5 GHz */
167 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 	149, 153, 157, 161, 165, 169, 173, 177, 181,
170 	/* 6-7 GHz */
171 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175 };
176 
177 #define IWX_NUM_2GHZ_CHANNELS	14
178 #define IWX_NUM_5GHZ_CHANNELS	37
179 
180 const struct iwx_rate {
181 	uint16_t rate;
182 	uint8_t plcp;
183 	uint8_t ht_plcp;
184 } iwx_rates[] = {
185 		/* Legacy */		/* HT */
186 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
187 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
188 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
189 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
190 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
191 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
192 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
193 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
194 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
195 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
196 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
197 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
198 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
199 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
200 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
201 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
202 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
203 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
204 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
205 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
206 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
207 };
208 #define IWX_RIDX_CCK	0
209 #define IWX_RIDX_OFDM	4
210 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
211 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
212 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
213 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
214 
215 /* Convert an MCS index into an iwx_rates[] index. */
216 const int iwx_mcs2ridx[] = {
217 	IWX_RATE_MCS_0_INDEX,
218 	IWX_RATE_MCS_1_INDEX,
219 	IWX_RATE_MCS_2_INDEX,
220 	IWX_RATE_MCS_3_INDEX,
221 	IWX_RATE_MCS_4_INDEX,
222 	IWX_RATE_MCS_5_INDEX,
223 	IWX_RATE_MCS_6_INDEX,
224 	IWX_RATE_MCS_7_INDEX,
225 	IWX_RATE_MCS_8_INDEX,
226 	IWX_RATE_MCS_9_INDEX,
227 	IWX_RATE_MCS_10_INDEX,
228 	IWX_RATE_MCS_11_INDEX,
229 	IWX_RATE_MCS_12_INDEX,
230 	IWX_RATE_MCS_13_INDEX,
231 	IWX_RATE_MCS_14_INDEX,
232 	IWX_RATE_MCS_15_INDEX,
233 };
234 
235 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
236 uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
237 int	iwx_is_mimo_ht_plcp(uint8_t);
238 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
239 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
240 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
241 int	iwx_apply_debug_destination(struct iwx_softc *);
242 void	iwx_set_ltr(struct iwx_softc *);
243 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
244 int	iwx_ctxt_info_gen3_init(struct iwx_softc *,
245 	    const struct iwx_fw_sects *);
246 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
247 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
248 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
249 	    struct iwx_context_info_dram *);
250 void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
251 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
252 	    uint8_t *, size_t);
253 int	iwx_set_default_calib(struct iwx_softc *, const void *);
254 void	iwx_fw_info_free(struct iwx_fw_info *);
255 int	iwx_read_firmware(struct iwx_softc *);
256 uint32_t iwx_prph_addr_mask(struct iwx_softc *);
257 uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
258 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
259 void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
260 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
261 uint32_t iwx_read_umac_prph_unlocked(struct iwx_softc *, uint32_t);
262 uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
263 void	iwx_write_umac_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
264 void	iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
265 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
266 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
267 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
268 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
269 int	iwx_nic_lock(struct iwx_softc *);
270 void	iwx_nic_assert_locked(struct iwx_softc *);
271 void	iwx_nic_unlock(struct iwx_softc *);
272 int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
273 	    uint32_t);
274 int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
275 int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
276 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
277 	    bus_size_t);
278 void	iwx_dma_contig_free(struct iwx_dma_info *);
279 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
280 void	iwx_disable_rx_dma(struct iwx_softc *);
281 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
282 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
283 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
284 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
285 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
286 void	iwx_enable_rfkill_int(struct iwx_softc *);
287 int	iwx_check_rfkill(struct iwx_softc *);
288 void	iwx_enable_interrupts(struct iwx_softc *);
289 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
290 void	iwx_restore_interrupts(struct iwx_softc *);
291 void	iwx_disable_interrupts(struct iwx_softc *);
292 void	iwx_ict_reset(struct iwx_softc *);
293 int	iwx_set_hw_ready(struct iwx_softc *);
294 int	iwx_prepare_card_hw(struct iwx_softc *);
295 int	iwx_force_power_gating(struct iwx_softc *);
296 void	iwx_apm_config(struct iwx_softc *);
297 int	iwx_apm_init(struct iwx_softc *);
298 void	iwx_apm_stop(struct iwx_softc *);
299 int	iwx_allow_mcast(struct iwx_softc *);
300 void	iwx_init_msix_hw(struct iwx_softc *);
301 void	iwx_conf_msix_hw(struct iwx_softc *, int);
302 int	iwx_clear_persistence_bit(struct iwx_softc *);
303 int	iwx_start_hw(struct iwx_softc *);
304 void	iwx_stop_device(struct iwx_softc *);
305 void	iwx_nic_config(struct iwx_softc *);
306 int	iwx_nic_rx_init(struct iwx_softc *);
307 int	iwx_nic_init(struct iwx_softc *);
308 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
309 int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
310 void	iwx_post_alive(struct iwx_softc *);
311 int	iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
312 	    uint32_t);
313 void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
314 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
315 void	iwx_setup_ht_rates(struct iwx_softc *);
316 void	iwx_setup_vht_rates(struct iwx_softc *);
317 int	iwx_mimo_enabled(struct iwx_softc *);
318 void	iwx_mac_ctxt_task(void *);
319 void	iwx_phy_ctxt_task(void *);
320 void	iwx_updatechan(struct ieee80211com *);
321 void	iwx_updateprot(struct ieee80211com *);
322 void	iwx_updateslot(struct ieee80211com *);
323 void	iwx_updateedca(struct ieee80211com *);
324 void	iwx_updatedtim(struct ieee80211com *);
325 void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
326 	    uint16_t);
327 void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
328 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
329 	    uint8_t);
330 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
331 	    uint8_t);
332 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
333 	    uint8_t);
334 void	iwx_rx_ba_session_expired(void *);
335 void	iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
336 	    struct mbuf_list *);
337 void	iwx_reorder_timer_expired(void *);
338 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
339 	    uint16_t, uint16_t, int, int);
340 void	iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
341 	    uint8_t);
342 void	iwx_ba_task(void *);
343 
344 void	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
345 int	iwx_is_valid_mac_addr(const uint8_t *);
346 void	iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
347 int	iwx_nvm_get(struct iwx_softc *);
348 int	iwx_load_firmware(struct iwx_softc *);
349 int	iwx_start_fw(struct iwx_softc *);
350 int	iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
351 int	iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
352 void	iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
353 int	iwx_load_pnvm(struct iwx_softc *);
354 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
355 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
356 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
357 int	iwx_send_dqa_cmd(struct iwx_softc *);
358 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
359 int	iwx_config_ltr(struct iwx_softc *);
360 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
361 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
362 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
363 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
364 	    struct iwx_rx_data *);
365 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
366 int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
367 	    struct ieee80211_rxinfo *);
368 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
369 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
370 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
371 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
372 void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
373 void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
374 void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
375 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
376 	    struct iwx_rx_data *);
377 void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
378 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
379 	    struct iwx_rx_data *);
380 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
381 uint8_t	iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
382 int	iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
383 	    uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
384 int	iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
385 	    uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
386 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
387 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
388 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
389 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
390 	    const void *);
391 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
392 	    uint32_t *);
393 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
394 	    const void *, uint32_t *);
395 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
396 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
397 uint32_t iwx_fw_rateidx_ofdm(uint8_t);
398 uint32_t iwx_fw_rateidx_cck(uint8_t);
399 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
400 	    struct ieee80211_frame *, uint16_t *, uint32_t *);
401 void	iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
402 	    uint16_t, uint16_t);
403 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
404 int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
405 int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
406 int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
407 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
408 	    struct iwx_beacon_filter_cmd *);
409 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
410 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
411 	    struct iwx_mac_power_cmd *);
412 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
413 int	iwx_power_update_device(struct iwx_softc *);
414 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
415 int	iwx_disable_beacon_filter(struct iwx_softc *);
416 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
417 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
418 int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
419 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
420 int	iwx_config_umac_scan_reduced(struct iwx_softc *);
421 uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
422 void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
423 	    struct iwx_scan_general_params_v10 *, int);
424 void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
425 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
426 void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
427 	    struct iwx_scan_channel_params_v6 *, uint32_t, int);
428 int	iwx_umac_scan_v14(struct iwx_softc *, int);
429 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
430 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
431 int	iwx_rval2ridx(int);
432 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
433 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
434 	    struct iwx_mac_ctx_cmd *, uint32_t);
435 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
436 	    struct iwx_mac_data_sta *, int);
437 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
438 int	iwx_clear_statistics(struct iwx_softc *);
439 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
440 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
441 int	iwx_scan(struct iwx_softc *);
442 int	iwx_bgscan(struct ieee80211com *);
443 void	iwx_bgscan_done(struct ieee80211com *,
444 	    struct ieee80211_node_switch_bss_arg *, size_t);
445 void	iwx_bgscan_done_task(void *);
446 int	iwx_umac_scan_abort(struct iwx_softc *);
447 int	iwx_scan_abort(struct iwx_softc *);
448 int	iwx_enable_mgmt_queue(struct iwx_softc *);
449 int	iwx_disable_mgmt_queue(struct iwx_softc *);
450 int	iwx_rs_rval2idx(uint8_t);
451 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
452 uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
453 int	iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
454 int	iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
455 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
456 int	iwx_enable_data_tx_queues(struct iwx_softc *);
457 int	iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
458 	    uint8_t, uint8_t);
459 int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
460 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
461 	    uint8_t);
462 int	iwx_auth(struct iwx_softc *);
463 int	iwx_deauth(struct iwx_softc *);
464 int	iwx_run(struct iwx_softc *);
465 int	iwx_run_stop(struct iwx_softc *);
466 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
467 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
468 	    struct ieee80211_key *);
469 void	iwx_setkey_task(void *);
470 void	iwx_delete_key(struct ieee80211com *,
471 	    struct ieee80211_node *, struct ieee80211_key *);
472 int	iwx_media_change(struct ifnet *);
473 void	iwx_newstate_task(void *);
474 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
475 void	iwx_endscan(struct iwx_softc *);
476 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
477 	    struct ieee80211_node *);
478 int	iwx_sf_config(struct iwx_softc *, int);
479 int	iwx_send_bt_init_conf(struct iwx_softc *);
480 int	iwx_send_soc_conf(struct iwx_softc *);
481 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
482 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
483 int	iwx_init_hw(struct iwx_softc *);
484 int	iwx_init(struct ifnet *);
485 void	iwx_start(struct ifnet *);
486 void	iwx_stop(struct ifnet *);
487 void	iwx_watchdog(struct ifnet *);
488 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
489 const char *iwx_desc_lookup(uint32_t);
490 void	iwx_nic_error(struct iwx_softc *);
491 void	iwx_dump_driver_status(struct iwx_softc *);
492 void	iwx_nic_umac_error(struct iwx_softc *);
493 int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
494 	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
495 int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
496 void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
497 	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
498 	    struct mbuf_list *);
499 int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
500 	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
501 int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
502 	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
503 	    struct ieee80211_rxinfo *, struct mbuf_list *);
504 void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
505 	    struct mbuf_list *);
506 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
507 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
508 	    struct mbuf_list *);
509 void	iwx_notif_intr(struct iwx_softc *);
510 int	iwx_intr(void *);
511 int	iwx_intr_msix(void *);
512 int	iwx_match(struct device *, void *, void *);
513 int	iwx_preinit(struct iwx_softc *);
514 void	iwx_attach_hook(struct device *);
515 const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
516 void	iwx_attach(struct device *, struct device *, void *);
517 void	iwx_init_task(void *);
518 int	iwx_activate(struct device *, int);
519 void	iwx_resume(struct iwx_softc *);
520 int	iwx_wakeup(struct iwx_softc *);
521 
522 #if NBPFILTER > 0
523 void	iwx_radiotap_attach(struct iwx_softc *);
524 #endif
525 
526 uint8_t
527 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
528 {
529 	const struct iwx_fw_cmd_version *entry;
530 	int i;
531 
532 	for (i = 0; i < sc->n_cmd_versions; i++) {
533 		entry = &sc->cmd_versions[i];
534 		if (entry->group == grp && entry->cmd == cmd)
535 			return entry->cmd_ver;
536 	}
537 
538 	return IWX_FW_CMD_VER_UNKNOWN;
539 }
540 
541 uint8_t
542 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
543 {
544 	const struct iwx_fw_cmd_version *entry;
545 	int i;
546 
547 	for (i = 0; i < sc->n_cmd_versions; i++) {
548 		entry = &sc->cmd_versions[i];
549 		if (entry->group == grp && entry->cmd == cmd)
550 			return entry->notif_ver;
551 	}
552 
553 	return IWX_FW_CMD_VER_UNKNOWN;
554 }
555 
556 int
557 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
558 {
559 	switch (ht_plcp) {
560 	case IWX_RATE_HT_MIMO2_MCS_8_PLCP:
561 	case IWX_RATE_HT_MIMO2_MCS_9_PLCP:
562 	case IWX_RATE_HT_MIMO2_MCS_10_PLCP:
563 	case IWX_RATE_HT_MIMO2_MCS_11_PLCP:
564 	case IWX_RATE_HT_MIMO2_MCS_12_PLCP:
565 	case IWX_RATE_HT_MIMO2_MCS_13_PLCP:
566 	case IWX_RATE_HT_MIMO2_MCS_14_PLCP:
567 	case IWX_RATE_HT_MIMO2_MCS_15_PLCP:
568 		return 1;
569 	default:
570 		break;
571 	}
572 
573 	return 0;
574 }
575 
576 int
577 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
578 {
579 	struct iwx_fw_cscheme_list *l = (void *)data;
580 
581 	if (dlen < sizeof(*l) ||
582 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
583 		return EINVAL;
584 
585 	/* we don't actually store anything for now, always use s/w crypto */
586 
587 	return 0;
588 }
589 
590 int
591 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
592     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
593 {
594 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
595 	if (err) {
596 		printf("%s: could not allocate context info DMA memory\n",
597 		    DEVNAME(sc));
598 		return err;
599 	}
600 
601 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
602 
603 	return 0;
604 }
605 
606 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
607 {
608 	struct iwx_self_init_dram *dram = &sc->init_dram;
609 	int i;
610 
611 	if (!dram->paging)
612 		return;
613 
614 	/* free paging*/
615 	for (i = 0; i < dram->paging_cnt; i++)
616 		iwx_dma_contig_free(&dram->paging[i]);
617 
618 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
619 	dram->paging_cnt = 0;
620 	dram->paging = NULL;
621 }
622 
623 int
624 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
625 {
626 	int i = 0;
627 
628 	while (start < fws->fw_count &&
629 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
630 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
631 		start++;
632 		i++;
633 	}
634 
635 	return i;
636 }
637 
638 int
639 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
640     struct iwx_context_info_dram *ctxt_dram)
641 {
642 	struct iwx_self_init_dram *dram = &sc->init_dram;
643 	int i, ret, fw_cnt = 0;
644 
645 	KASSERT(dram->paging == NULL);
646 
647 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
648 	/* add 1 due to separator */
649 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
650 	/* add 2 due to separators */
651 	dram->paging_cnt = iwx_get_num_sections(fws,
652 	    dram->lmac_cnt + dram->umac_cnt + 2);
653 
654 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
655 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
656 	if (!dram->fw) {
657 		printf("%s: could not allocate memory for firmware sections\n",
658 		    DEVNAME(sc));
659 		return ENOMEM;
660 	}
661 
662 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
663 	    M_DEVBUF, M_ZERO | M_NOWAIT);
664 	if (!dram->paging) {
665 		printf("%s: could not allocate memory for firmware paging\n",
666 		    DEVNAME(sc));
667 		return ENOMEM;
668 	}
669 
670 	/* initialize lmac sections */
671 	for (i = 0; i < dram->lmac_cnt; i++) {
672 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
673 						   &dram->fw[fw_cnt]);
674 		if (ret)
675 			return ret;
676 		ctxt_dram->lmac_img[i] =
677 			htole64(dram->fw[fw_cnt].paddr);
678 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
679 		    (unsigned long long)dram->fw[fw_cnt].paddr,
680 		    (unsigned long long)dram->fw[fw_cnt].size));
681 		fw_cnt++;
682 	}
683 
684 	/* initialize umac sections */
685 	for (i = 0; i < dram->umac_cnt; i++) {
686 		/* access FW with +1 to make up for lmac separator */
687 		ret = iwx_ctxt_info_alloc_dma(sc,
688 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
689 		if (ret)
690 			return ret;
691 		ctxt_dram->umac_img[i] =
692 			htole64(dram->fw[fw_cnt].paddr);
693 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
694 			(unsigned long long)dram->fw[fw_cnt].paddr,
695 			(unsigned long long)dram->fw[fw_cnt].size));
696 		fw_cnt++;
697 	}
698 
699 	/*
700 	 * Initialize paging.
701 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
702 	 * stored separately.
703 	 * This is since the timing of its release is different -
704 	 * while fw memory can be released on alive, the paging memory can be
705 	 * freed only when the device goes down.
706 	 * Given that, the logic here in accessing the fw image is a bit
707 	 * different - fw_cnt isn't changing so loop counter is added to it.
708 	 */
709 	for (i = 0; i < dram->paging_cnt; i++) {
710 		/* access FW with +2 to make up for lmac & umac separators */
711 		int fw_idx = fw_cnt + i + 2;
712 
713 		ret = iwx_ctxt_info_alloc_dma(sc,
714 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
715 		if (ret)
716 			return ret;
717 
718 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
719 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
720 		    (unsigned long long)dram->paging[i].paddr,
721 		    (unsigned long long)dram->paging[i].size));
722 	}
723 
724 	return 0;
725 }
726 
727 void
728 iwx_fw_version_str(char *buf, size_t bufsize,
729     uint32_t major, uint32_t minor, uint32_t api)
730 {
731 	/*
732 	 * Starting with major version 35 the Linux driver prints the minor
733 	 * version in hexadecimal.
734 	 */
735 	if (major >= 35)
736 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
737 	else
738 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
739 }
740 
741 int
742 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
743     uint8_t min_power)
744 {
745 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
746 	uint32_t size = 0;
747 	uint8_t power;
748 	int err;
749 
750 	if (fw_mon->size)
751 		return 0;
752 
753 	for (power = max_power; power >= min_power; power--) {
754 		size = (1 << power);
755 
756 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
757 		if (err)
758 			continue;
759 
760 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
761 			 DEVNAME(sc), size));
762 		break;
763 	}
764 
765 	if (err) {
766 		fw_mon->size = 0;
767 		return err;
768 	}
769 
770 	if (power != max_power)
771 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
772 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
773 			(unsigned long)(1 << (max_power - 10))));
774 
775 	return 0;
776 }
777 
778 int
779 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
780 {
781 	if (!max_power) {
782 		/* default max_power is maximum */
783 		max_power = 26;
784 	} else {
785 		max_power += 11;
786 	}
787 
788 	if (max_power > 26) {
789 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
790 		     "check the FW TLV\n", DEVNAME(sc), max_power));
791 		return 0;
792 	}
793 
794 	if (sc->fw_mon.size)
795 		return 0;
796 
797 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
798 }
799 
800 int
801 iwx_apply_debug_destination(struct iwx_softc *sc)
802 {
803 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
804 	int i, err;
805 	uint8_t mon_mode, size_power, base_shift, end_shift;
806 	uint32_t base_reg, end_reg;
807 
808 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
809 	mon_mode = dest_v1->monitor_mode;
810 	size_power = dest_v1->size_power;
811 	base_reg = le32toh(dest_v1->base_reg);
812 	end_reg = le32toh(dest_v1->end_reg);
813 	base_shift = dest_v1->base_shift;
814 	end_shift = dest_v1->end_shift;
815 
816 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
817 
818 	if (mon_mode == EXTERNAL_MODE) {
819 		err = iwx_alloc_fw_monitor(sc, size_power);
820 		if (err)
821 			return err;
822 	}
823 
824 	if (!iwx_nic_lock(sc))
825 		return EBUSY;
826 
827 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
828 		uint32_t addr, val;
829 		uint8_t op;
830 
831 		addr = le32toh(dest_v1->reg_ops[i].addr);
832 		val = le32toh(dest_v1->reg_ops[i].val);
833 		op = dest_v1->reg_ops[i].op;
834 
835 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
836 		switch (op) {
837 		case CSR_ASSIGN:
838 			IWX_WRITE(sc, addr, val);
839 			break;
840 		case CSR_SETBIT:
841 			IWX_SETBITS(sc, addr, (1 << val));
842 			break;
843 		case CSR_CLEARBIT:
844 			IWX_CLRBITS(sc, addr, (1 << val));
845 			break;
846 		case PRPH_ASSIGN:
847 			iwx_write_prph(sc, addr, val);
848 			break;
849 		case PRPH_SETBIT:
850 			err = iwx_set_bits_prph(sc, addr, (1 << val));
851 			if (err)
852 				return err;
853 			break;
854 		case PRPH_CLEARBIT:
855 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
856 			if (err)
857 				return err;
858 			break;
859 		case PRPH_BLOCKBIT:
860 			if (iwx_read_prph(sc, addr) & (1 << val))
861 				goto monitor;
862 			break;
863 		default:
864 			DPRINTF(("%s: FW debug - unknown OP %d\n",
865 			    DEVNAME(sc), op));
866 			break;
867 		}
868 	}
869 
870 monitor:
871 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
872 		iwx_write_prph(sc, le32toh(base_reg),
873 		    sc->fw_mon.paddr >> base_shift);
874 		iwx_write_prph(sc, end_reg,
875 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
876 		    >> end_shift);
877 	}
878 
879 	iwx_nic_unlock(sc);
880 	return 0;
881 }
882 
883 void
884 iwx_set_ltr(struct iwx_softc *sc)
885 {
886 	uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
887 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
888 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
889 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
890 	    ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
891 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
892 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
893 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
894 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
895 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
896 	    (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
897 
898 	/*
899 	 * To workaround hardware latency issues during the boot process,
900 	 * initialize the LTR to ~250 usec (see ltr_val above).
901 	 * The firmware initializes this again later (to a smaller value).
902 	 */
903 	if (!sc->sc_integrated) {
904 		IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
905 	} else if (sc->sc_integrated &&
906 		   sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
907 		iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
908 		    IWX_HPM_MAC_LRT_ENABLE_ALL);
909 		iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
910 	}
911 }
912 
913 int
914 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
915 {
916 	struct iwx_context_info *ctxt_info;
917 	struct iwx_context_info_rbd_cfg *rx_cfg;
918 	uint32_t control_flags = 0;
919 	uint64_t paddr;
920 	int err;
921 
922 	ctxt_info = sc->ctxt_info_dma.vaddr;
923 	memset(ctxt_info, 0, sizeof(*ctxt_info));
924 
925 	ctxt_info->version.version = 0;
926 	ctxt_info->version.mac_id =
927 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
928 	/* size is in DWs */
929 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
930 
931 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
932 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
933 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
934 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
935 			(IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
936 	ctxt_info->control.control_flags = htole32(control_flags);
937 
938 	/* initialize RX default queue */
939 	rx_cfg = &ctxt_info->rbd_cfg;
940 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
941 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
942 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
943 
944 	/* initialize TX command queue */
945 	ctxt_info->hcmd_cfg.cmd_queue_addr =
946 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
947 	ctxt_info->hcmd_cfg.cmd_queue_size =
948 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
949 
950 	/* allocate ucode sections in dram and set addresses */
951 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
952 	if (err) {
953 		iwx_ctxt_info_free_fw_img(sc);
954 		return err;
955 	}
956 
957 	/* Configure debug, if exists */
958 	if (sc->sc_fw.dbg_dest_tlv_v1) {
959 		err = iwx_apply_debug_destination(sc);
960 		if (err) {
961 			iwx_ctxt_info_free_fw_img(sc);
962 			return err;
963 		}
964 	}
965 
966 	/*
967 	 * Write the context info DMA base address. The device expects a
968 	 * 64-bit address but a simple bus_space_write_8 to this register
969 	 * won't work on some devices, such as the AX201.
970 	 */
971 	paddr = sc->ctxt_info_dma.paddr;
972 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
973 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
974 
975 	/* kick FW self load */
976 	if (!iwx_nic_lock(sc)) {
977 		iwx_ctxt_info_free_fw_img(sc);
978 		return EBUSY;
979 	}
980 
981 	iwx_set_ltr(sc);
982 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
983 	iwx_nic_unlock(sc);
984 
985 	/* Context info will be released upon alive or failure to get one */
986 
987 	return 0;
988 }
989 
990 int
991 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
992 {
993 	struct iwx_context_info_gen3 *ctxt_info_gen3;
994 	struct iwx_prph_scratch *prph_scratch;
995 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
996 	uint16_t cb_size;
997 	uint32_t control_flags, scratch_size;
998 	uint64_t paddr;
999 	int err;
1000 
1001 	if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1002 		printf("%s: no image loader found in firmware file\n",
1003 		    DEVNAME(sc));
1004 		iwx_ctxt_info_free_fw_img(sc);
1005 		return EINVAL;
1006 	}
1007 
1008 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1009 	    sc->sc_fw.iml_len, 0);
1010 	if (err) {
1011 		printf("%s: could not allocate DMA memory for "
1012 		    "firmware image loader\n", DEVNAME(sc));
1013 		iwx_ctxt_info_free_fw_img(sc);
1014 		return ENOMEM;
1015 	}
1016 
1017 	prph_scratch = sc->prph_scratch_dma.vaddr;
1018 	memset(prph_scratch, 0, sizeof(*prph_scratch));
1019 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1020 	prph_sc_ctrl->version.version = 0;
1021 	prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1022 	prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1023 
1024 	control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1025 	    IWX_PRPH_SCRATCH_MTR_MODE |
1026 	    (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1027 	if (sc->sc_imr_enabled)
1028 		control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1029 	prph_sc_ctrl->control.control_flags = htole32(control_flags);
1030 
1031 	/* initialize RX default queue */
1032 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1033 	    htole64(sc->rxq.free_desc_dma.paddr);
1034 
1035 	/* allocate ucode sections in dram and set addresses */
1036 	err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1037 	if (err) {
1038 		iwx_dma_contig_free(&sc->iml_dma);
1039 		iwx_ctxt_info_free_fw_img(sc);
1040 		return err;
1041 	}
1042 
1043 	ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1044 	memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1045 	ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1046 	ctxt_info_gen3->prph_scratch_base_addr =
1047 	    htole64(sc->prph_scratch_dma.paddr);
1048 	scratch_size = sizeof(*prph_scratch);
1049 	ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1050 	ctxt_info_gen3->cr_head_idx_arr_base_addr =
1051 	    htole64(sc->rxq.stat_dma.paddr);
1052 	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1053 	    htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1054 	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1055 	    htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1056 	ctxt_info_gen3->mtr_base_addr =
1057 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1058 	ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1059 	cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1060 	ctxt_info_gen3->mtr_size = htole16(cb_size);
1061 	cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1062 	ctxt_info_gen3->mcr_size = htole16(cb_size);
1063 
1064 	memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1065 
1066 	paddr = sc->ctxt_info_dma.paddr;
1067 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1068 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1069 
1070 	paddr = sc->iml_dma.paddr;
1071 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1072 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1073 	IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1074 
1075 	IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1076 		    IWX_CSR_AUTO_FUNC_BOOT_ENA);
1077 
1078 	/* kick FW self load */
1079 	if (!iwx_nic_lock(sc)) {
1080 		iwx_dma_contig_free(&sc->iml_dma);
1081 		iwx_ctxt_info_free_fw_img(sc);
1082 		return EBUSY;
1083 	}
1084 	iwx_set_ltr(sc);
1085 	iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1086 	iwx_nic_unlock(sc);
1087 
1088 	/* Context info will be released upon alive or failure to get one */
1089 	return 0;
1090 }
1091 
1092 void
1093 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1094 {
1095 	struct iwx_self_init_dram *dram = &sc->init_dram;
1096 	int i;
1097 
1098 	if (!dram->fw)
1099 		return;
1100 
1101 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1102 		iwx_dma_contig_free(&dram->fw[i]);
1103 
1104 	free(dram->fw, M_DEVBUF,
1105 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
1106 	dram->lmac_cnt = 0;
1107 	dram->umac_cnt = 0;
1108 	dram->fw = NULL;
1109 }
1110 
1111 int
1112 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1113     uint8_t *data, size_t dlen)
1114 {
1115 	struct iwx_fw_sects *fws;
1116 	struct iwx_fw_onesect *fwone;
1117 
1118 	if (type >= IWX_UCODE_TYPE_MAX)
1119 		return EINVAL;
1120 	if (dlen < sizeof(uint32_t))
1121 		return EINVAL;
1122 
1123 	fws = &sc->sc_fw.fw_sects[type];
1124 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
1125 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1126 		return EINVAL;
1127 
1128 	fwone = &fws->fw_sect[fws->fw_count];
1129 
1130 	/* first 32bit are device load offset */
1131 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1132 
1133 	/* rest is data */
1134 	fwone->fws_data = data + sizeof(uint32_t);
1135 	fwone->fws_len = dlen - sizeof(uint32_t);
1136 
1137 	fws->fw_count++;
1138 	fws->fw_totlen += fwone->fws_len;
1139 
1140 	return 0;
1141 }
1142 
1143 #define IWX_DEFAULT_SCAN_CHANNELS	40
1144 /* Newer firmware might support more channels. Raise this value if needed. */
1145 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
1146 
1147 struct iwx_tlv_calib_data {
1148 	uint32_t ucode_type;
1149 	struct iwx_tlv_calib_ctrl calib;
1150 } __packed;
1151 
1152 int
1153 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1154 {
1155 	const struct iwx_tlv_calib_data *def_calib = data;
1156 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
1157 
1158 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
1159 		return EINVAL;
1160 
1161 	sc->sc_default_calib[ucode_type].flow_trigger =
1162 	    def_calib->calib.flow_trigger;
1163 	sc->sc_default_calib[ucode_type].event_trigger =
1164 	    def_calib->calib.event_trigger;
1165 
1166 	return 0;
1167 }
1168 
1169 void
1170 iwx_fw_info_free(struct iwx_fw_info *fw)
1171 {
1172 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
1173 	fw->fw_rawdata = NULL;
1174 	fw->fw_rawsize = 0;
1175 	/* don't touch fw->fw_status */
1176 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1177 	free(fw->iml, M_DEVBUF, fw->iml_len);
1178 	fw->iml = NULL;
1179 	fw->iml_len = 0;
1180 }
1181 
1182 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1183 
1184 int
1185 iwx_read_firmware(struct iwx_softc *sc)
1186 {
1187 	struct ieee80211com *ic = &sc->sc_ic;
1188 	struct iwx_fw_info *fw = &sc->sc_fw;
1189 	struct iwx_tlv_ucode_header *uhdr;
1190 	struct iwx_ucode_tlv tlv;
1191 	uint32_t tlv_type;
1192 	uint8_t *data;
1193 	int err;
1194 	size_t len;
1195 
1196 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1197 		return 0;
1198 
1199 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
1200 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
1201 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1202 
1203 	if (fw->fw_rawdata != NULL)
1204 		iwx_fw_info_free(fw);
1205 
1206 	err = loadfirmware(sc->sc_fwname,
1207 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1208 	if (err) {
1209 		printf("%s: could not read firmware %s (error %d)\n",
1210 		    DEVNAME(sc), sc->sc_fwname, err);
1211 		goto out;
1212 	}
1213 
1214 	if (ic->ic_if.if_flags & IFF_DEBUG)
1215 		printf("%s: using firmware %s\n", DEVNAME(sc), sc->sc_fwname);
1216 
1217 	sc->sc_capaflags = 0;
1218 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1219 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1220 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1221 	sc->n_cmd_versions = 0;
1222 
1223 	uhdr = (void *)fw->fw_rawdata;
1224 	if (*(uint32_t *)fw->fw_rawdata != 0
1225 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1226 		printf("%s: invalid firmware %s\n",
1227 		    DEVNAME(sc), sc->sc_fwname);
1228 		err = EINVAL;
1229 		goto out;
1230 	}
1231 
1232 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1233 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1234 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1235 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1236 
1237 	data = uhdr->data;
1238 	len = fw->fw_rawsize - sizeof(*uhdr);
1239 
1240 	while (len >= sizeof(tlv)) {
1241 		size_t tlv_len;
1242 		void *tlv_data;
1243 
1244 		memcpy(&tlv, data, sizeof(tlv));
1245 		tlv_len = le32toh(tlv.length);
1246 		tlv_type = le32toh(tlv.type);
1247 
1248 		len -= sizeof(tlv);
1249 		data += sizeof(tlv);
1250 		tlv_data = data;
1251 
1252 		if (len < tlv_len) {
1253 			printf("%s: firmware too short: %zu bytes\n",
1254 			    DEVNAME(sc), len);
1255 			err = EINVAL;
1256 			goto parse_out;
1257 		}
1258 
1259 		switch (tlv_type) {
1260 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1261 			if (tlv_len < sizeof(uint32_t)) {
1262 				err = EINVAL;
1263 				goto parse_out;
1264 			}
1265 			sc->sc_capa_max_probe_len
1266 			    = le32toh(*(uint32_t *)tlv_data);
1267 			if (sc->sc_capa_max_probe_len >
1268 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1269 				err = EINVAL;
1270 				goto parse_out;
1271 			}
1272 			break;
1273 		case IWX_UCODE_TLV_PAN:
1274 			if (tlv_len) {
1275 				err = EINVAL;
1276 				goto parse_out;
1277 			}
1278 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1279 			break;
1280 		case IWX_UCODE_TLV_FLAGS:
1281 			if (tlv_len < sizeof(uint32_t)) {
1282 				err = EINVAL;
1283 				goto parse_out;
1284 			}
1285 			/*
1286 			 * Apparently there can be many flags, but Linux driver
1287 			 * parses only the first one, and so do we.
1288 			 *
1289 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1290 			 * Intentional or a bug?  Observations from
1291 			 * current firmware file:
1292 			 *  1) TLV_PAN is parsed first
1293 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1294 			 * ==> this resets TLV_PAN to itself... hnnnk
1295 			 */
1296 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1297 			break;
1298 		case IWX_UCODE_TLV_CSCHEME:
1299 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1300 			if (err)
1301 				goto parse_out;
1302 			break;
1303 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1304 			uint32_t num_cpu;
1305 			if (tlv_len != sizeof(uint32_t)) {
1306 				err = EINVAL;
1307 				goto parse_out;
1308 			}
1309 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1310 			if (num_cpu < 1 || num_cpu > 2) {
1311 				err = EINVAL;
1312 				goto parse_out;
1313 			}
1314 			break;
1315 		}
1316 		case IWX_UCODE_TLV_SEC_RT:
1317 			err = iwx_firmware_store_section(sc,
1318 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1319 			if (err)
1320 				goto parse_out;
1321 			break;
1322 		case IWX_UCODE_TLV_SEC_INIT:
1323 			err = iwx_firmware_store_section(sc,
1324 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1325 			if (err)
1326 				goto parse_out;
1327 			break;
1328 		case IWX_UCODE_TLV_SEC_WOWLAN:
1329 			err = iwx_firmware_store_section(sc,
1330 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1331 			if (err)
1332 				goto parse_out;
1333 			break;
1334 		case IWX_UCODE_TLV_DEF_CALIB:
1335 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1336 				err = EINVAL;
1337 				goto parse_out;
1338 			}
1339 			err = iwx_set_default_calib(sc, tlv_data);
1340 			if (err)
1341 				goto parse_out;
1342 			break;
1343 		case IWX_UCODE_TLV_PHY_SKU:
1344 			if (tlv_len != sizeof(uint32_t)) {
1345 				err = EINVAL;
1346 				goto parse_out;
1347 			}
1348 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1349 			break;
1350 
1351 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1352 			struct iwx_ucode_api *api;
1353 			int idx, i;
1354 			if (tlv_len != sizeof(*api)) {
1355 				err = EINVAL;
1356 				goto parse_out;
1357 			}
1358 			api = (struct iwx_ucode_api *)tlv_data;
1359 			idx = le32toh(api->api_index);
1360 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1361 				err = EINVAL;
1362 				goto parse_out;
1363 			}
1364 			for (i = 0; i < 32; i++) {
1365 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1366 					continue;
1367 				setbit(sc->sc_ucode_api, i + (32 * idx));
1368 			}
1369 			break;
1370 		}
1371 
1372 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1373 			struct iwx_ucode_capa *capa;
1374 			int idx, i;
1375 			if (tlv_len != sizeof(*capa)) {
1376 				err = EINVAL;
1377 				goto parse_out;
1378 			}
1379 			capa = (struct iwx_ucode_capa *)tlv_data;
1380 			idx = le32toh(capa->api_index);
1381 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1382 				goto parse_out;
1383 			}
1384 			for (i = 0; i < 32; i++) {
1385 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1386 					continue;
1387 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1388 			}
1389 			break;
1390 		}
1391 
1392 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1393 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1394 			/* ignore, not used by current driver */
1395 			break;
1396 
1397 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1398 			err = iwx_firmware_store_section(sc,
1399 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1400 			    tlv_len);
1401 			if (err)
1402 				goto parse_out;
1403 			break;
1404 
1405 		case IWX_UCODE_TLV_PAGING:
1406 			if (tlv_len != sizeof(uint32_t)) {
1407 				err = EINVAL;
1408 				goto parse_out;
1409 			}
1410 			break;
1411 
1412 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1413 			if (tlv_len != sizeof(uint32_t)) {
1414 				err = EINVAL;
1415 				goto parse_out;
1416 			}
1417 			sc->sc_capa_n_scan_channels =
1418 			  le32toh(*(uint32_t *)tlv_data);
1419 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1420 				err = ERANGE;
1421 				goto parse_out;
1422 			}
1423 			break;
1424 
1425 		case IWX_UCODE_TLV_FW_VERSION:
1426 			if (tlv_len != sizeof(uint32_t) * 3) {
1427 				err = EINVAL;
1428 				goto parse_out;
1429 			}
1430 
1431 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1432 			    le32toh(((uint32_t *)tlv_data)[0]),
1433 			    le32toh(((uint32_t *)tlv_data)[1]),
1434 			    le32toh(((uint32_t *)tlv_data)[2]));
1435 			break;
1436 
1437 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1438 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1439 
1440 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1441 			if (*fw->dbg_dest_ver != 0) {
1442 				err = EINVAL;
1443 				goto parse_out;
1444 			}
1445 
1446 			if (fw->dbg_dest_tlv_init)
1447 				break;
1448 			fw->dbg_dest_tlv_init = true;
1449 
1450 			dest_v1 = (void *)tlv_data;
1451 			fw->dbg_dest_tlv_v1 = dest_v1;
1452 			fw->n_dest_reg = tlv_len -
1453 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1454 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1455 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1456 			break;
1457 		}
1458 
1459 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1460 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1461 
1462 			if (!fw->dbg_dest_tlv_init ||
1463 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1464 			    fw->dbg_conf_tlv[conf->id] != NULL)
1465 				break;
1466 
1467 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1468 			fw->dbg_conf_tlv[conf->id] = conf;
1469 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1470 			break;
1471 		}
1472 
1473 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1474 			struct iwx_umac_debug_addrs *dbg_ptrs =
1475 				(void *)tlv_data;
1476 
1477 			if (tlv_len != sizeof(*dbg_ptrs)) {
1478 				err = EINVAL;
1479 				goto parse_out;
1480 			}
1481 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1482 				break;
1483 			sc->sc_uc.uc_umac_error_event_table =
1484 				le32toh(dbg_ptrs->error_info_addr) &
1485 				~IWX_FW_ADDR_CACHE_CONTROL;
1486 			sc->sc_uc.error_event_table_tlv_status |=
1487 				IWX_ERROR_EVENT_TABLE_UMAC;
1488 			break;
1489 		}
1490 
1491 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1492 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1493 				(void *)tlv_data;
1494 
1495 			if (tlv_len != sizeof(*dbg_ptrs)) {
1496 				err = EINVAL;
1497 				goto parse_out;
1498 			}
1499 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1500 				break;
1501 			sc->sc_uc.uc_lmac_error_event_table[0] =
1502 				le32toh(dbg_ptrs->error_event_table_ptr) &
1503 				~IWX_FW_ADDR_CACHE_CONTROL;
1504 			sc->sc_uc.error_event_table_tlv_status |=
1505 				IWX_ERROR_EVENT_TABLE_LMAC1;
1506 			break;
1507 		}
1508 
1509 		case IWX_UCODE_TLV_FW_MEM_SEG:
1510 			break;
1511 
1512 		case IWX_UCODE_TLV_IML:
1513 			if (sc->sc_fw.iml != NULL) {
1514 				free(fw->iml, M_DEVBUF, fw->iml_len);
1515 				fw->iml_len = 0;
1516 			}
1517 			sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1518 			    M_WAIT | M_CANFAIL | M_ZERO);
1519 			if (sc->sc_fw.iml == NULL) {
1520 				err = ENOMEM;
1521 				goto parse_out;
1522 			}
1523 			memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1524 			sc->sc_fw.iml_len = tlv_len;
1525 			break;
1526 
1527 		case IWX_UCODE_TLV_CMD_VERSIONS:
1528 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1529 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1530 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1531 			}
1532 			if (sc->n_cmd_versions != 0) {
1533 				err = EINVAL;
1534 				goto parse_out;
1535 			}
1536 			if (tlv_len > sizeof(sc->cmd_versions)) {
1537 				err = EINVAL;
1538 				goto parse_out;
1539 			}
1540 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1541 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1542 			break;
1543 
1544 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1545 			break;
1546 
1547 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1548 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1549 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1550 		case IWX_UCODE_TLV_FW_NUM_BEACONS:
1551 			break;
1552 
1553 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1554 		case 58:
1555 		case 0x1000003:
1556 		case 0x1000004:
1557 			break;
1558 
1559 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1560 		case 0x1000000:
1561 		case 0x1000002:
1562 			break;
1563 
1564 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1565 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1566 		case IWX_UCODE_TLV_TYPE_HCMD:
1567 		case IWX_UCODE_TLV_TYPE_REGIONS:
1568 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1569 		case IWX_UCODE_TLV_TYPE_CONF_SET:
1570 		case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1571 		case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1572 		case IWX_UCODE_TLV_CURRENT_PC:
1573 			break;
1574 
1575 		/* undocumented TLV found in iwx-cc-a0-67 image */
1576 		case 0x100000b:
1577 			break;
1578 
1579 		/* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1580 		case 0x101:
1581 			break;
1582 
1583 		/* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1584 		case 0x100000c:
1585 			break;
1586 
1587 		default:
1588 			err = EINVAL;
1589 			goto parse_out;
1590 		}
1591 
1592 		/*
1593 		 * Check for size_t overflow and ignore missing padding at
1594 		 * end of firmware file.
1595 		 */
1596 		if (roundup(tlv_len, 4) > len)
1597 			break;
1598 
1599 		len -= roundup(tlv_len, 4);
1600 		data += roundup(tlv_len, 4);
1601 	}
1602 
1603 	KASSERT(err == 0);
1604 
1605  parse_out:
1606 	if (err) {
1607 		printf("%s: firmware parse error %d, "
1608 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1609 	}
1610 
1611  out:
1612 	if (err) {
1613 		fw->fw_status = IWX_FW_STATUS_NONE;
1614 		if (fw->fw_rawdata != NULL)
1615 			iwx_fw_info_free(fw);
1616 	} else
1617 		fw->fw_status = IWX_FW_STATUS_DONE;
1618 	wakeup(&sc->sc_fw);
1619 
1620 	return err;
1621 }
1622 
1623 uint32_t
1624 iwx_prph_addr_mask(struct iwx_softc *sc)
1625 {
1626 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1627 		return 0x00ffffff;
1628 	else
1629 		return 0x000fffff;
1630 }
1631 
1632 uint32_t
1633 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1634 {
1635 	uint32_t mask = iwx_prph_addr_mask(sc);
1636 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1637 	IWX_BARRIER_READ_WRITE(sc);
1638 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1639 }
1640 
1641 uint32_t
1642 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1643 {
1644 	iwx_nic_assert_locked(sc);
1645 	return iwx_read_prph_unlocked(sc, addr);
1646 }
1647 
1648 void
1649 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1650 {
1651 	uint32_t mask = iwx_prph_addr_mask(sc);
1652 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1653 	IWX_BARRIER_WRITE(sc);
1654 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1655 }
1656 
1657 void
1658 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1659 {
1660 	iwx_nic_assert_locked(sc);
1661 	iwx_write_prph_unlocked(sc, addr, val);
1662 }
1663 
1664 void
1665 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1666 {
1667 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1668 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1669 }
1670 
1671 uint32_t
1672 iwx_read_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1673 {
1674 	return iwx_read_prph_unlocked(sc, addr + sc->sc_umac_prph_offset);
1675 }
1676 
1677 uint32_t
1678 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1679 {
1680 	return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1681 }
1682 
1683 void
1684 iwx_write_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1685 {
1686 	iwx_write_prph_unlocked(sc, addr + sc->sc_umac_prph_offset, val);
1687 }
1688 
1689 void
1690 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1691 {
1692 	iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1693 }
1694 
1695 int
1696 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1697 {
1698 	int offs, err = 0;
1699 	uint32_t *vals = buf;
1700 
1701 	if (iwx_nic_lock(sc)) {
1702 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1703 		for (offs = 0; offs < dwords; offs++)
1704 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1705 		iwx_nic_unlock(sc);
1706 	} else {
1707 		err = EBUSY;
1708 	}
1709 	return err;
1710 }
1711 
1712 int
1713 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1714 {
1715 	int offs;
1716 	const uint32_t *vals = buf;
1717 
1718 	if (iwx_nic_lock(sc)) {
1719 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1720 		/* WADDR auto-increments */
1721 		for (offs = 0; offs < dwords; offs++) {
1722 			uint32_t val = vals ? vals[offs] : 0;
1723 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1724 		}
1725 		iwx_nic_unlock(sc);
1726 	} else {
1727 		return EBUSY;
1728 	}
1729 	return 0;
1730 }
1731 
1732 int
1733 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1734 {
1735 	return iwx_write_mem(sc, addr, &val, 1);
1736 }
1737 
1738 int
1739 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1740     int timo)
1741 {
1742 	for (;;) {
1743 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1744 			return 1;
1745 		}
1746 		if (timo < 10) {
1747 			return 0;
1748 		}
1749 		timo -= 10;
1750 		DELAY(10);
1751 	}
1752 }
1753 
1754 int
1755 iwx_nic_lock(struct iwx_softc *sc)
1756 {
1757 	if (sc->sc_nic_locks > 0) {
1758 		iwx_nic_assert_locked(sc);
1759 		sc->sc_nic_locks++;
1760 		return 1; /* already locked */
1761 	}
1762 
1763 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1764 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1765 
1766 	DELAY(2);
1767 
1768 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1769 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1770 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1771 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1772 		sc->sc_nic_locks++;
1773 		return 1;
1774 	}
1775 
1776 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1777 	return 0;
1778 }
1779 
1780 void
1781 iwx_nic_assert_locked(struct iwx_softc *sc)
1782 {
1783 	if (sc->sc_nic_locks <= 0)
1784 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1785 }
1786 
1787 void
1788 iwx_nic_unlock(struct iwx_softc *sc)
1789 {
1790 	if (sc->sc_nic_locks > 0) {
1791 		if (--sc->sc_nic_locks == 0)
1792 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1793 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1794 	} else
1795 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1796 }
1797 
1798 int
1799 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1800     uint32_t mask)
1801 {
1802 	uint32_t val;
1803 
1804 	if (iwx_nic_lock(sc)) {
1805 		val = iwx_read_prph(sc, reg) & mask;
1806 		val |= bits;
1807 		iwx_write_prph(sc, reg, val);
1808 		iwx_nic_unlock(sc);
1809 		return 0;
1810 	}
1811 	return EBUSY;
1812 }
1813 
1814 int
1815 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1816 {
1817 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1818 }
1819 
1820 int
1821 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1822 {
1823 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1824 }
1825 
1826 int
1827 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1828     bus_size_t size, bus_size_t alignment)
1829 {
1830 	int nsegs, err;
1831 	caddr_t va;
1832 
1833 	dma->tag = tag;
1834 	dma->size = size;
1835 
1836 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1837 	    &dma->map);
1838 	if (err)
1839 		goto fail;
1840 
1841 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1842 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1843 	if (err)
1844 		goto fail;
1845 
1846 	if (nsegs > 1) {
1847 		err = ENOMEM;
1848 		goto fail;
1849 	}
1850 
1851 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1852 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1853 	if (err)
1854 		goto fail;
1855 	dma->vaddr = va;
1856 
1857 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1858 	    BUS_DMA_NOWAIT);
1859 	if (err)
1860 		goto fail;
1861 
1862 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1863 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1864 
1865 	return 0;
1866 
1867 fail:	iwx_dma_contig_free(dma);
1868 	return err;
1869 }
1870 
1871 void
1872 iwx_dma_contig_free(struct iwx_dma_info *dma)
1873 {
1874 	if (dma->map != NULL) {
1875 		if (dma->vaddr != NULL) {
1876 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1877 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1878 			bus_dmamap_unload(dma->tag, dma->map);
1879 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1880 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1881 			dma->vaddr = NULL;
1882 		}
1883 		bus_dmamap_destroy(dma->tag, dma->map);
1884 		dma->map = NULL;
1885 	}
1886 }
1887 
1888 int
1889 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1890 {
1891 	bus_size_t size;
1892 	int i, err;
1893 
1894 	ring->cur = 0;
1895 
1896 	/* Allocate RX descriptors (256-byte aligned). */
1897 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1898 		size = sizeof(struct iwx_rx_transfer_desc);
1899 	else
1900 		size = sizeof(uint64_t);
1901 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1902 	    size * IWX_RX_MQ_RING_COUNT, 256);
1903 	if (err) {
1904 		printf("%s: could not allocate RX ring DMA memory\n",
1905 		    DEVNAME(sc));
1906 		goto fail;
1907 	}
1908 	ring->desc = ring->free_desc_dma.vaddr;
1909 
1910 	/* Allocate RX status area (16-byte aligned). */
1911 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1912 		size = sizeof(uint16_t);
1913 	else
1914 		size = sizeof(*ring->stat);
1915 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
1916 	if (err) {
1917 		printf("%s: could not allocate RX status DMA memory\n",
1918 		    DEVNAME(sc));
1919 		goto fail;
1920 	}
1921 	ring->stat = ring->stat_dma.vaddr;
1922 
1923 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1924 		size = sizeof(struct iwx_rx_completion_desc);
1925 	else
1926 		size = sizeof(uint32_t);
1927 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1928 	    size * IWX_RX_MQ_RING_COUNT, 256);
1929 	if (err) {
1930 		printf("%s: could not allocate RX ring DMA memory\n",
1931 		    DEVNAME(sc));
1932 		goto fail;
1933 	}
1934 
1935 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1936 		struct iwx_rx_data *data = &ring->data[i];
1937 
1938 		memset(data, 0, sizeof(*data));
1939 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1940 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1941 		    &data->map);
1942 		if (err) {
1943 			printf("%s: could not create RX buf DMA map\n",
1944 			    DEVNAME(sc));
1945 			goto fail;
1946 		}
1947 
1948 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1949 		if (err)
1950 			goto fail;
1951 	}
1952 	return 0;
1953 
1954 fail:	iwx_free_rx_ring(sc, ring);
1955 	return err;
1956 }
1957 
1958 void
1959 iwx_disable_rx_dma(struct iwx_softc *sc)
1960 {
1961 	int ntries;
1962 
1963 	if (iwx_nic_lock(sc)) {
1964 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1965 			iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
1966 		else
1967 			iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1968 		for (ntries = 0; ntries < 1000; ntries++) {
1969 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1970 				if (iwx_read_umac_prph(sc,
1971 				    IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
1972 					break;
1973 			} else {
1974 				if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1975 				    IWX_RXF_DMA_IDLE)
1976 					break;
1977 			}
1978 			DELAY(10);
1979 		}
1980 		iwx_nic_unlock(sc);
1981 	}
1982 }
1983 
1984 void
1985 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1986 {
1987 	ring->cur = 0;
1988 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1989 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1990 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1991 		uint16_t *status = sc->rxq.stat_dma.vaddr;
1992 		*status = 0;
1993 	} else
1994 		memset(ring->stat, 0, sizeof(*ring->stat));
1995 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1996 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1997 
1998 }
1999 
2000 void
2001 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2002 {
2003 	int i;
2004 
2005 	iwx_dma_contig_free(&ring->free_desc_dma);
2006 	iwx_dma_contig_free(&ring->stat_dma);
2007 	iwx_dma_contig_free(&ring->used_desc_dma);
2008 
2009 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2010 		struct iwx_rx_data *data = &ring->data[i];
2011 
2012 		if (data->m != NULL) {
2013 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2014 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2015 			bus_dmamap_unload(sc->sc_dmat, data->map);
2016 			m_freem(data->m);
2017 			data->m = NULL;
2018 		}
2019 		if (data->map != NULL)
2020 			bus_dmamap_destroy(sc->sc_dmat, data->map);
2021 	}
2022 }
2023 
2024 int
2025 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2026 {
2027 	bus_addr_t paddr;
2028 	bus_size_t size;
2029 	int i, err;
2030 	size_t bc_tbl_size;
2031 	bus_size_t bc_align;
2032 
2033 	ring->qid = qid;
2034 	ring->queued = 0;
2035 	ring->cur = 0;
2036 	ring->cur_hw = 0;
2037 	ring->tail = 0;
2038 	ring->tail_hw = 0;
2039 
2040 	/* Allocate TX descriptors (256-byte aligned). */
2041 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2042 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2043 	if (err) {
2044 		printf("%s: could not allocate TX ring DMA memory\n",
2045 		    DEVNAME(sc));
2046 		goto fail;
2047 	}
2048 	ring->desc = ring->desc_dma.vaddr;
2049 
2050 	/*
2051 	 * The hardware supports up to 512 Tx rings which is more
2052 	 * than we currently need.
2053 	 *
2054 	 * In DQA mode we use 1 command queue + 1 default queue for
2055 	 * management, control, and non-QoS data frames.
2056 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2057 	 *
2058 	 * Tx aggregation requires additional queues, one queue per TID for
2059 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2060 	 * Firmware may assign its own internal IDs for these queues
2061 	 * depending on which TID gets aggregation enabled first.
2062 	 * The driver maintains a table mapping driver-side queue IDs
2063 	 * to firmware-side queue IDs.
2064 	 */
2065 
2066 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2067 		bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2068 		    IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2069 		bc_align = 128;
2070 	} else {
2071 		bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2072 		bc_align = 64;
2073 	}
2074 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2075 	    bc_align);
2076 	if (err) {
2077 		printf("%s: could not allocate byte count table DMA memory\n",
2078 		    DEVNAME(sc));
2079 		goto fail;
2080 	}
2081 
2082 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2083 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2084 	    IWX_FIRST_TB_SIZE_ALIGN);
2085 	if (err) {
2086 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
2087 		goto fail;
2088 	}
2089 	ring->cmd = ring->cmd_dma.vaddr;
2090 
2091 	paddr = ring->cmd_dma.paddr;
2092 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2093 		struct iwx_tx_data *data = &ring->data[i];
2094 		size_t mapsize;
2095 
2096 		data->cmd_paddr = paddr;
2097 		paddr += sizeof(struct iwx_device_cmd);
2098 
2099 		/* FW commands may require more mapped space than packets. */
2100 		if (qid == IWX_DQA_CMD_QUEUE)
2101 			mapsize = (sizeof(struct iwx_cmd_header) +
2102 			    IWX_MAX_CMD_PAYLOAD_SIZE);
2103 		else
2104 			mapsize = MCLBYTES;
2105 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
2106 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
2107 		    &data->map);
2108 		if (err) {
2109 			printf("%s: could not create TX buf DMA map\n",
2110 			    DEVNAME(sc));
2111 			goto fail;
2112 		}
2113 	}
2114 	KASSERT(paddr == ring->cmd_dma.paddr + size);
2115 	return 0;
2116 
2117 fail:	iwx_free_tx_ring(sc, ring);
2118 	return err;
2119 }
2120 
2121 void
2122 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2123 {
2124 	int i;
2125 
2126 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2127 		struct iwx_tx_data *data = &ring->data[i];
2128 
2129 		if (data->m != NULL) {
2130 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2131 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2132 			bus_dmamap_unload(sc->sc_dmat, data->map);
2133 			m_freem(data->m);
2134 			data->m = NULL;
2135 		}
2136 	}
2137 
2138 	/* Clear byte count table. */
2139 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2140 
2141 	/* Clear TX descriptors. */
2142 	memset(ring->desc, 0, ring->desc_dma.size);
2143 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
2144 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
2145 	sc->qfullmsk &= ~(1 << ring->qid);
2146 	sc->qenablemsk &= ~(1 << ring->qid);
2147 	for (i = 0; i < nitems(sc->aggqid); i++) {
2148 		if (sc->aggqid[i] == ring->qid) {
2149 			sc->aggqid[i] = 0;
2150 			break;
2151 		}
2152 	}
2153 	ring->queued = 0;
2154 	ring->cur = 0;
2155 	ring->cur_hw = 0;
2156 	ring->tail = 0;
2157 	ring->tail_hw = 0;
2158 	ring->tid = 0;
2159 }
2160 
2161 void
2162 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2163 {
2164 	int i;
2165 
2166 	iwx_dma_contig_free(&ring->desc_dma);
2167 	iwx_dma_contig_free(&ring->cmd_dma);
2168 	iwx_dma_contig_free(&ring->bc_tbl);
2169 
2170 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2171 		struct iwx_tx_data *data = &ring->data[i];
2172 
2173 		if (data->m != NULL) {
2174 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2175 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2176 			bus_dmamap_unload(sc->sc_dmat, data->map);
2177 			m_freem(data->m);
2178 			data->m = NULL;
2179 		}
2180 		if (data->map != NULL)
2181 			bus_dmamap_destroy(sc->sc_dmat, data->map);
2182 	}
2183 }
2184 
2185 void
2186 iwx_enable_rfkill_int(struct iwx_softc *sc)
2187 {
2188 	if (!sc->sc_msix) {
2189 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2190 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2191 	} else {
2192 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2193 		    sc->sc_fh_init_mask);
2194 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2195 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2196 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2197 	}
2198 
2199 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2200 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2201 }
2202 
2203 int
2204 iwx_check_rfkill(struct iwx_softc *sc)
2205 {
2206 	uint32_t v;
2207 	int rv;
2208 
2209 	/*
2210 	 * "documentation" is not really helpful here:
2211 	 *  27:	HW_RF_KILL_SW
2212 	 *	Indicates state of (platform's) hardware RF-Kill switch
2213 	 *
2214 	 * But apparently when it's off, it's on ...
2215 	 */
2216 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2217 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2218 	if (rv) {
2219 		sc->sc_flags |= IWX_FLAG_RFKILL;
2220 	} else {
2221 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
2222 	}
2223 
2224 	return rv;
2225 }
2226 
2227 void
2228 iwx_enable_interrupts(struct iwx_softc *sc)
2229 {
2230 	if (!sc->sc_msix) {
2231 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2232 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2233 	} else {
2234 		/*
2235 		 * fh/hw_mask keeps all the unmasked causes.
2236 		 * Unlike msi, in msix cause is enabled when it is unset.
2237 		 */
2238 		sc->sc_hw_mask = sc->sc_hw_init_mask;
2239 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2240 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2241 		    ~sc->sc_fh_mask);
2242 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2243 		    ~sc->sc_hw_mask);
2244 	}
2245 }
2246 
2247 void
2248 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2249 {
2250 	if (!sc->sc_msix) {
2251 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2252 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2253 	} else {
2254 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2255 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2256 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2257 		/*
2258 		 * Leave all the FH causes enabled to get the ALIVE
2259 		 * notification.
2260 		 */
2261 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2262 		    ~sc->sc_fh_init_mask);
2263 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2264 	}
2265 }
2266 
2267 void
2268 iwx_restore_interrupts(struct iwx_softc *sc)
2269 {
2270 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2271 }
2272 
2273 void
2274 iwx_disable_interrupts(struct iwx_softc *sc)
2275 {
2276 	if (!sc->sc_msix) {
2277 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2278 
2279 		/* acknowledge all interrupts */
2280 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2281 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2282 	} else {
2283 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2284 		    sc->sc_fh_init_mask);
2285 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2286 		    sc->sc_hw_init_mask);
2287 	}
2288 }
2289 
2290 void
2291 iwx_ict_reset(struct iwx_softc *sc)
2292 {
2293 	iwx_disable_interrupts(sc);
2294 
2295 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2296 	sc->ict_cur = 0;
2297 
2298 	/* Set physical address of ICT (4KB aligned). */
2299 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2300 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2301 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2302 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2303 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2304 
2305 	/* Switch to ICT interrupt mode in driver. */
2306 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2307 
2308 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2309 	iwx_enable_interrupts(sc);
2310 }
2311 
2312 #define IWX_HW_READY_TIMEOUT 50
2313 int
2314 iwx_set_hw_ready(struct iwx_softc *sc)
2315 {
2316 	int ready;
2317 
2318 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2319 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2320 
2321 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2322 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2323 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2324 	    IWX_HW_READY_TIMEOUT);
2325 	if (ready)
2326 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2327 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2328 
2329 	return ready;
2330 }
2331 #undef IWX_HW_READY_TIMEOUT
2332 
2333 int
2334 iwx_prepare_card_hw(struct iwx_softc *sc)
2335 {
2336 	int t = 0;
2337 	int ntries;
2338 
2339 	if (iwx_set_hw_ready(sc))
2340 		return 0;
2341 
2342 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2343 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2344 	DELAY(1000);
2345 
2346 	for (ntries = 0; ntries < 10; ntries++) {
2347 		/* If HW is not ready, prepare the conditions to check again */
2348 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2349 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2350 
2351 		do {
2352 			if (iwx_set_hw_ready(sc))
2353 				return 0;
2354 			DELAY(200);
2355 			t += 200;
2356 		} while (t < 150000);
2357 		DELAY(25000);
2358 	}
2359 
2360 	return ETIMEDOUT;
2361 }
2362 
2363 int
2364 iwx_force_power_gating(struct iwx_softc *sc)
2365 {
2366 	int err;
2367 
2368 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2369 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2370 	if (err)
2371 		return err;
2372 	DELAY(20);
2373 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2374 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2375 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2376 	if (err)
2377 		return err;
2378 	DELAY(20);
2379 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2380 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2381 	return err;
2382 }
2383 
2384 void
2385 iwx_apm_config(struct iwx_softc *sc)
2386 {
2387 	pcireg_t lctl, cap;
2388 
2389 	/*
2390 	 * L0S states have been found to be unstable with our devices
2391 	 * and in newer hardware they are not officially supported at
2392 	 * all, so we must always set the L0S_DISABLED bit.
2393 	 */
2394 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2395 
2396 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2397 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2398 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2399 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2400 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2401 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2402 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2403 	    DEVNAME(sc),
2404 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2405 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2406 }
2407 
2408 /*
2409  * Start up NIC's basic functionality after it has been reset
2410  * e.g. after platform boot or shutdown.
2411  * NOTE:  This does not load uCode nor start the embedded processor
2412  */
2413 int
2414 iwx_apm_init(struct iwx_softc *sc)
2415 {
2416 	int err = 0;
2417 
2418 	/*
2419 	 * Disable L0s without affecting L1;
2420 	 *  don't wait for ICH L0s (ICH bug W/A)
2421 	 */
2422 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2423 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2424 
2425 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2426 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2427 
2428 	/*
2429 	 * Enable HAP INTA (interrupt from management bus) to
2430 	 * wake device's PCI Express link L1a -> L0s
2431 	 */
2432 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2433 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2434 
2435 	iwx_apm_config(sc);
2436 
2437 	/*
2438 	 * Set "initialization complete" bit to move adapter from
2439 	 * D0U* --> D0A* (powered-up active) state.
2440 	 */
2441 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2442 
2443 	/*
2444 	 * Wait for clock stabilization; once stabilized, access to
2445 	 * device-internal resources is supported, e.g. iwx_write_prph()
2446 	 * and accesses to uCode SRAM.
2447 	 */
2448 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2449 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2450 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2451 		printf("%s: timeout waiting for clock stabilization\n",
2452 		    DEVNAME(sc));
2453 		err = ETIMEDOUT;
2454 		goto out;
2455 	}
2456  out:
2457 	if (err)
2458 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2459 	return err;
2460 }
2461 
2462 void
2463 iwx_apm_stop(struct iwx_softc *sc)
2464 {
2465 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2466 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2467 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2468 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2469 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2470 	DELAY(1000);
2471 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2472 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2473 	DELAY(5000);
2474 
2475 	/* stop device's busmaster DMA activity */
2476 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2477 
2478 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2479 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2480 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2481 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2482 
2483 	/*
2484 	 * Clear "initialization complete" bit to move adapter from
2485 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2486 	 */
2487 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2488 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2489 }
2490 
2491 void
2492 iwx_init_msix_hw(struct iwx_softc *sc)
2493 {
2494 	iwx_conf_msix_hw(sc, 0);
2495 
2496 	if (!sc->sc_msix)
2497 		return;
2498 
2499 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2500 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2501 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2502 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2503 }
2504 
2505 void
2506 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2507 {
2508 	int vector = 0;
2509 
2510 	if (!sc->sc_msix) {
2511 		/* Newer chips default to MSIX. */
2512 		if (!stopped && iwx_nic_lock(sc)) {
2513 			iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2514 			    IWX_UREG_CHICK_MSI_ENABLE);
2515 			iwx_nic_unlock(sc);
2516 		}
2517 		return;
2518 	}
2519 
2520 	if (!stopped && iwx_nic_lock(sc)) {
2521 		iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2522 		    IWX_UREG_CHICK_MSIX_ENABLE);
2523 		iwx_nic_unlock(sc);
2524 	}
2525 
2526 	/* Disable all interrupts */
2527 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2528 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2529 
2530 	/* Map fallback-queue (command/mgmt) to a single vector */
2531 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2532 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2533 	/* Map RSS queue (data) to the same vector */
2534 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2535 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2536 
2537 	/* Enable the RX queues cause interrupts */
2538 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2539 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2540 
2541 	/* Map non-RX causes to the same vector */
2542 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2543 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2544 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2545 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2546 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2547 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2548 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2549 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2550 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2551 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2552 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2553 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2554 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2555 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2556 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2557 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2558 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2559 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2560 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2561 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2562 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2563 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2564 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2565 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2566 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2567 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2568 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2569 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2570 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2571 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2572 
2573 	/* Enable non-RX causes interrupts */
2574 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2575 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2576 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2577 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2578 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2579 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2580 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2581 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2582 	    IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2583 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2584 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2585 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2586 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2587 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2588 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2589 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2590 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2591 }
2592 
2593 int
2594 iwx_clear_persistence_bit(struct iwx_softc *sc)
2595 {
2596 	uint32_t hpm, wprot;
2597 
2598 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2599 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2600 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2601 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2602 			printf("%s: cannot clear persistence bit\n",
2603 			    DEVNAME(sc));
2604 			return EPERM;
2605 		}
2606 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2607 		    hpm & ~IWX_PERSISTENCE_BIT);
2608 	}
2609 
2610 	return 0;
2611 }
2612 
2613 int
2614 iwx_start_hw(struct iwx_softc *sc)
2615 {
2616 	int err;
2617 
2618 	err = iwx_prepare_card_hw(sc);
2619 	if (err)
2620 		return err;
2621 
2622 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2623 		err = iwx_clear_persistence_bit(sc);
2624 		if (err)
2625 			return err;
2626 	}
2627 
2628 	/* Reset the entire device */
2629 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2630 	DELAY(5000);
2631 
2632 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2633 	    sc->sc_integrated) {
2634 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2635 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2636 		DELAY(20);
2637 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2638 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2639 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2640 			printf("%s: timeout waiting for clock stabilization\n",
2641 			    DEVNAME(sc));
2642 			return ETIMEDOUT;
2643 		}
2644 
2645 		err = iwx_force_power_gating(sc);
2646 		if (err)
2647 			return err;
2648 
2649 		/* Reset the entire device */
2650 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2651 		DELAY(5000);
2652 	}
2653 
2654 	err = iwx_apm_init(sc);
2655 	if (err)
2656 		return err;
2657 
2658 	iwx_init_msix_hw(sc);
2659 
2660 	iwx_enable_rfkill_int(sc);
2661 	iwx_check_rfkill(sc);
2662 
2663 	return 0;
2664 }
2665 
2666 void
2667 iwx_stop_device(struct iwx_softc *sc)
2668 {
2669 	struct ieee80211com *ic = &sc->sc_ic;
2670 	struct ieee80211_node *ni = ic->ic_bss;
2671 	int i;
2672 
2673 	iwx_disable_interrupts(sc);
2674 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2675 
2676 	iwx_disable_rx_dma(sc);
2677 	iwx_reset_rx_ring(sc, &sc->rxq);
2678 	for (i = 0; i < nitems(sc->txq); i++)
2679 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2680 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2681 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2682 		if (ba->ba_state != IEEE80211_BA_AGREED)
2683 			continue;
2684 		ieee80211_delba_request(ic, ni, 0, 1, i);
2685 	}
2686 
2687 	/* Make sure (redundant) we've released our request to stay awake */
2688 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2689 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2690 	if (sc->sc_nic_locks > 0)
2691 		printf("%s: %d active NIC locks forcefully cleared\n",
2692 		    DEVNAME(sc), sc->sc_nic_locks);
2693 	sc->sc_nic_locks = 0;
2694 
2695 	/* Stop the device, and put it in low power state */
2696 	iwx_apm_stop(sc);
2697 
2698 	/* Reset the on-board processor. */
2699 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2700 	DELAY(5000);
2701 
2702 	/*
2703 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2704 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2705 	 * that enables radio won't fire on the correct irq, and the
2706 	 * driver won't be able to handle the interrupt.
2707 	 * Configure the IVAR table again after reset.
2708 	 */
2709 	iwx_conf_msix_hw(sc, 1);
2710 
2711 	/*
2712 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2713 	 * Clear the interrupt again.
2714 	 */
2715 	iwx_disable_interrupts(sc);
2716 
2717 	/* Even though we stop the HW we still want the RF kill interrupt. */
2718 	iwx_enable_rfkill_int(sc);
2719 	iwx_check_rfkill(sc);
2720 
2721 	iwx_prepare_card_hw(sc);
2722 
2723 	iwx_ctxt_info_free_paging(sc);
2724 	iwx_dma_contig_free(&sc->pnvm_dma);
2725 }
2726 
2727 void
2728 iwx_nic_config(struct iwx_softc *sc)
2729 {
2730 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2731 	uint32_t mask, val, reg_val = 0;
2732 
2733 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2734 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2735 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2736 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2737 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2738 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2739 
2740 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2741 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2742 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2743 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2744 
2745 	/* radio configuration */
2746 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2747 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2748 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2749 
2750 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2751 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2752 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2753 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2754 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2755 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2756 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2757 
2758 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2759 	val &= ~mask;
2760 	val |= reg_val;
2761 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2762 }
2763 
2764 int
2765 iwx_nic_rx_init(struct iwx_softc *sc)
2766 {
2767 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2768 
2769 	/*
2770 	 * We don't configure the RFH; the firmware will do that.
2771 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2772 	 */
2773 	return 0;
2774 }
2775 
2776 int
2777 iwx_nic_init(struct iwx_softc *sc)
2778 {
2779 	int err;
2780 
2781 	iwx_apm_init(sc);
2782 	if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2783 		iwx_nic_config(sc);
2784 
2785 	err = iwx_nic_rx_init(sc);
2786 	if (err)
2787 		return err;
2788 
2789 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2790 
2791 	return 0;
2792 }
2793 
2794 /* Map a TID to an ieee80211_edca_ac category. */
2795 const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
2796 	EDCA_AC_BE,
2797 	EDCA_AC_BK,
2798 	EDCA_AC_BK,
2799 	EDCA_AC_BE,
2800 	EDCA_AC_VI,
2801 	EDCA_AC_VI,
2802 	EDCA_AC_VO,
2803 	EDCA_AC_VO,
2804 };
2805 
2806 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2807 const uint8_t iwx_ac_to_tx_fifo[] = {
2808 	IWX_GEN2_EDCA_TX_FIFO_BE,
2809 	IWX_GEN2_EDCA_TX_FIFO_BK,
2810 	IWX_GEN2_EDCA_TX_FIFO_VI,
2811 	IWX_GEN2_EDCA_TX_FIFO_VO,
2812 };
2813 
2814 int
2815 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2816     int num_slots)
2817 {
2818 	struct iwx_rx_packet *pkt;
2819 	struct iwx_tx_queue_cfg_rsp *resp;
2820 	struct iwx_tx_queue_cfg_cmd cmd_v0;
2821 	struct iwx_scd_queue_cfg_cmd cmd_v3;
2822 	struct iwx_host_cmd hcmd = {
2823 		.flags = IWX_CMD_WANT_RESP,
2824 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2825 	};
2826 	struct iwx_tx_ring *ring = &sc->txq[qid];
2827 	int err, fwqid, cmd_ver;
2828 	uint32_t wr_idx;
2829 	size_t resp_len;
2830 
2831 	iwx_reset_tx_ring(sc, ring);
2832 
2833 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2834 	    IWX_SCD_QUEUE_CONFIG_CMD);
2835 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2836 		memset(&cmd_v0, 0, sizeof(cmd_v0));
2837 		cmd_v0.sta_id = sta_id;
2838 		cmd_v0.tid = tid;
2839 		cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2840 		cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2841 		cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2842 		cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2843 		hcmd.id = IWX_SCD_QUEUE_CFG;
2844 		hcmd.data[0] = &cmd_v0;
2845 		hcmd.len[0] = sizeof(cmd_v0);
2846 	} else if (cmd_ver == 3) {
2847 		memset(&cmd_v3, 0, sizeof(cmd_v3));
2848 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2849 		cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2850 		cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2851 		cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2852 		cmd_v3.u.add.flags = htole32(0);
2853 		cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2854 		cmd_v3.u.add.tid = tid;
2855 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2856 		    IWX_SCD_QUEUE_CONFIG_CMD);
2857 		hcmd.data[0] = &cmd_v3;
2858 		hcmd.len[0] = sizeof(cmd_v3);
2859 	} else {
2860 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2861 		    DEVNAME(sc), cmd_ver);
2862 		return ENOTSUP;
2863 	}
2864 
2865 	err = iwx_send_cmd(sc, &hcmd);
2866 	if (err)
2867 		return err;
2868 
2869 	pkt = hcmd.resp_pkt;
2870 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2871 		err = EIO;
2872 		goto out;
2873 	}
2874 
2875 	resp_len = iwx_rx_packet_payload_len(pkt);
2876 	if (resp_len != sizeof(*resp)) {
2877 		err = EIO;
2878 		goto out;
2879 	}
2880 
2881 	resp = (void *)pkt->data;
2882 	fwqid = le16toh(resp->queue_number);
2883 	wr_idx = le16toh(resp->write_pointer);
2884 
2885 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2886 	if (fwqid != qid) {
2887 		err = EIO;
2888 		goto out;
2889 	}
2890 
2891 	if (wr_idx != ring->cur_hw) {
2892 		err = EIO;
2893 		goto out;
2894 	}
2895 
2896 	sc->qenablemsk |= (1 << qid);
2897 	ring->tid = tid;
2898 out:
2899 	iwx_free_resp(sc, &hcmd);
2900 	return err;
2901 }
2902 
2903 int
2904 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2905 {
2906 	struct iwx_rx_packet *pkt;
2907 	struct iwx_tx_queue_cfg_rsp *resp;
2908 	struct iwx_tx_queue_cfg_cmd cmd_v0;
2909 	struct iwx_scd_queue_cfg_cmd cmd_v3;
2910 	struct iwx_host_cmd hcmd = {
2911 		.flags = IWX_CMD_WANT_RESP,
2912 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2913 	};
2914 	struct iwx_tx_ring *ring = &sc->txq[qid];
2915 	int err, cmd_ver;
2916 
2917 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2918 	    IWX_SCD_QUEUE_CONFIG_CMD);
2919 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2920 		memset(&cmd_v0, 0, sizeof(cmd_v0));
2921 		cmd_v0.sta_id = sta_id;
2922 		cmd_v0.tid = tid;
2923 		cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
2924 		cmd_v0.cb_size = htole32(0);
2925 		cmd_v0.byte_cnt_addr = htole64(0);
2926 		cmd_v0.tfdq_addr = htole64(0);
2927 		hcmd.id = IWX_SCD_QUEUE_CFG;
2928 		hcmd.data[0] = &cmd_v0;
2929 		hcmd.len[0] = sizeof(cmd_v0);
2930 	} else if (cmd_ver == 3) {
2931 		memset(&cmd_v3, 0, sizeof(cmd_v3));
2932 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
2933 		cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
2934 		cmd_v3.u.remove.tid = tid;
2935 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2936 		    IWX_SCD_QUEUE_CONFIG_CMD);
2937 		hcmd.data[0] = &cmd_v3;
2938 		hcmd.len[0] = sizeof(cmd_v3);
2939 	} else {
2940 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2941 		    DEVNAME(sc), cmd_ver);
2942 		return ENOTSUP;
2943 	}
2944 
2945 	err = iwx_send_cmd(sc, &hcmd);
2946 	if (err)
2947 		return err;
2948 
2949 	pkt = hcmd.resp_pkt;
2950 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2951 		err = EIO;
2952 		goto out;
2953 	}
2954 
2955 	sc->qenablemsk &= ~(1 << qid);
2956 	iwx_reset_tx_ring(sc, ring);
2957 out:
2958 	iwx_free_resp(sc, &hcmd);
2959 	return err;
2960 }
2961 
2962 void
2963 iwx_post_alive(struct iwx_softc *sc)
2964 {
2965 	int txcmd_ver;
2966 
2967 	iwx_ict_reset(sc);
2968 
2969 	txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
2970 	if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
2971 		sc->sc_rate_n_flags_version = 2;
2972 	else
2973 		sc->sc_rate_n_flags_version = 1;
2974 
2975 	txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
2976 }
2977 
2978 int
2979 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2980     uint32_t duration_tu)
2981 {
2982 	struct iwx_session_prot_cmd cmd = {
2983 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2984 		    in->in_color)),
2985 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
2986 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2987 		.duration_tu = htole32(duration_tu),
2988 	};
2989 	uint32_t cmd_id;
2990 	int err;
2991 
2992 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
2993 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2994 	if (!err)
2995 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
2996 	return err;
2997 }
2998 
2999 void
3000 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3001 {
3002 	struct iwx_session_prot_cmd cmd = {
3003 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3004 		    in->in_color)),
3005 		.action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3006 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3007 		.duration_tu = 0,
3008 	};
3009 	uint32_t cmd_id;
3010 
3011 	/* Do nothing if the time event has already ended. */
3012 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3013 		return;
3014 
3015 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3016 	if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3017 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3018 }
3019 
3020 /*
3021  * NVM read access and content parsing.  We do not support
3022  * external NVM or writing NVM.
3023  */
3024 
3025 uint8_t
3026 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3027 {
3028 	uint8_t tx_ant;
3029 
3030 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3031 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3032 
3033 	if (sc->sc_nvm.valid_tx_ant)
3034 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3035 
3036 	return tx_ant;
3037 }
3038 
3039 uint8_t
3040 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3041 {
3042 	uint8_t rx_ant;
3043 
3044 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3045 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3046 
3047 	if (sc->sc_nvm.valid_rx_ant)
3048 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3049 
3050 	return rx_ant;
3051 }
3052 
3053 void
3054 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
3055     uint32_t *channel_profile_v4, int nchan_profile)
3056 {
3057 	struct ieee80211com *ic = &sc->sc_ic;
3058 	struct iwx_nvm_data *data = &sc->sc_nvm;
3059 	int ch_idx;
3060 	struct ieee80211_channel *channel;
3061 	uint32_t ch_flags;
3062 	int is_5ghz;
3063 	int flags, hw_value;
3064 	int nchan;
3065 	const uint8_t *nvm_channels;
3066 
3067 	if (sc->sc_uhb_supported) {
3068 		nchan = nitems(iwx_nvm_channels_uhb);
3069 		nvm_channels = iwx_nvm_channels_uhb;
3070 	} else {
3071 		nchan = nitems(iwx_nvm_channels_8000);
3072 		nvm_channels = iwx_nvm_channels_8000;
3073 	}
3074 
3075 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
3076 		if (channel_profile_v4)
3077 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
3078 		else
3079 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
3080 
3081 		/* net80211 cannot handle 6 GHz channel numbers yet */
3082 		if (ch_idx >= IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS)
3083 			break;
3084 
3085 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
3086 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
3087 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
3088 
3089 		hw_value = nvm_channels[ch_idx];
3090 		channel = &ic->ic_channels[hw_value];
3091 
3092 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
3093 			channel->ic_freq = 0;
3094 			channel->ic_flags = 0;
3095 			continue;
3096 		}
3097 
3098 		if (!is_5ghz) {
3099 			flags = IEEE80211_CHAN_2GHZ;
3100 			channel->ic_flags
3101 			    = IEEE80211_CHAN_CCK
3102 			    | IEEE80211_CHAN_OFDM
3103 			    | IEEE80211_CHAN_DYN
3104 			    | IEEE80211_CHAN_2GHZ;
3105 		} else {
3106 			flags = IEEE80211_CHAN_5GHZ;
3107 			channel->ic_flags =
3108 			    IEEE80211_CHAN_A;
3109 		}
3110 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3111 
3112 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
3113 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3114 
3115 		if (data->sku_cap_11n_enable) {
3116 			channel->ic_flags |= IEEE80211_CHAN_HT;
3117 			if (ch_flags & IWX_NVM_CHANNEL_40MHZ)
3118 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3119 		}
3120 
3121 		if (is_5ghz && data->sku_cap_11ac_enable) {
3122 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3123 			if (ch_flags & IWX_NVM_CHANNEL_80MHZ)
3124 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3125 		}
3126 	}
3127 }
3128 
3129 int
3130 iwx_mimo_enabled(struct iwx_softc *sc)
3131 {
3132 	struct ieee80211com *ic = &sc->sc_ic;
3133 
3134 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3135 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3136 }
3137 
3138 void
3139 iwx_setup_ht_rates(struct iwx_softc *sc)
3140 {
3141 	struct ieee80211com *ic = &sc->sc_ic;
3142 	uint8_t rx_ant;
3143 
3144 	/* TX is supported with the same MCS as RX. */
3145 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3146 
3147 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3148 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3149 
3150 	if (!iwx_mimo_enabled(sc))
3151 		return;
3152 
3153 	rx_ant = iwx_fw_valid_rx_ant(sc);
3154 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3155 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
3156 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3157 }
3158 
3159 void
3160 iwx_setup_vht_rates(struct iwx_softc *sc)
3161 {
3162 	struct ieee80211com *ic = &sc->sc_ic;
3163 	uint8_t rx_ant = iwx_fw_valid_rx_ant(sc);
3164 	int n;
3165 
3166 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3167 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3168 
3169 	if (iwx_mimo_enabled(sc) &&
3170 	    ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3171 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)) {
3172 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3173 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3174 	} else {
3175 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3176 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3177 	}
3178 
3179 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3180 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3181 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3182 	}
3183 
3184 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3185 }
3186 
3187 void
3188 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3189     uint16_t ssn, uint16_t buf_size)
3190 {
3191 	reorder_buf->head_sn = ssn;
3192 	reorder_buf->num_stored = 0;
3193 	reorder_buf->buf_size = buf_size;
3194 	reorder_buf->last_amsdu = 0;
3195 	reorder_buf->last_sub_index = 0;
3196 	reorder_buf->removed = 0;
3197 	reorder_buf->valid = 0;
3198 	reorder_buf->consec_oldsn_drops = 0;
3199 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3200 	reorder_buf->consec_oldsn_prev_drop = 0;
3201 }
3202 
3203 void
3204 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3205 {
3206 	int i;
3207 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3208 	struct iwx_reorder_buf_entry *entry;
3209 
3210 	for (i = 0; i < reorder_buf->buf_size; i++) {
3211 		entry = &rxba->entries[i];
3212 		ml_purge(&entry->frames);
3213 		timerclear(&entry->reorder_time);
3214 	}
3215 
3216 	reorder_buf->removed = 1;
3217 	timeout_del(&reorder_buf->reorder_timer);
3218 	timerclear(&rxba->last_rx);
3219 	timeout_del(&rxba->session_timer);
3220 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3221 }
3222 
3223 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3224 
3225 void
3226 iwx_rx_ba_session_expired(void *arg)
3227 {
3228 	struct iwx_rxba_data *rxba = arg;
3229 	struct iwx_softc *sc = rxba->sc;
3230 	struct ieee80211com *ic = &sc->sc_ic;
3231 	struct ieee80211_node *ni = ic->ic_bss;
3232 	struct timeval now, timeout, expiry;
3233 	int s;
3234 
3235 	s = splnet();
3236 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
3237 	    ic->ic_state == IEEE80211_S_RUN &&
3238 	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3239 		getmicrouptime(&now);
3240 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3241 		timeradd(&rxba->last_rx, &timeout, &expiry);
3242 		if (timercmp(&now, &expiry, <)) {
3243 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3244 		} else {
3245 			ic->ic_stats.is_ht_rx_ba_timeout++;
3246 			ieee80211_delba_request(ic, ni,
3247 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3248 		}
3249 	}
3250 	splx(s);
3251 }
3252 
3253 void
3254 iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3255     struct mbuf_list *ml)
3256 {
3257 	struct ieee80211com *ic = &sc->sc_ic;
3258 	struct ieee80211_node *ni = ic->ic_bss;
3259 	struct iwx_bar_frame_release *release = (void *)pkt->data;
3260 	struct iwx_reorder_buffer *buf;
3261 	struct iwx_rxba_data *rxba;
3262 	unsigned int baid, nssn, sta_id, tid;
3263 
3264 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
3265 		return;
3266 
3267 	baid = (le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_BAID_MASK) >>
3268 	    IWX_BAR_FRAME_RELEASE_BAID_SHIFT;
3269 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3270 	    baid >= nitems(sc->sc_rxba_data))
3271 		return;
3272 
3273 	rxba = &sc->sc_rxba_data[baid];
3274 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
3275 		return;
3276 
3277 	tid = le32toh(release->sta_tid) & IWX_BAR_FRAME_RELEASE_TID_MASK;
3278 	sta_id = (le32toh(release->sta_tid) &
3279 	    IWX_BAR_FRAME_RELEASE_STA_MASK) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT;
3280 	if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
3281 		return;
3282 
3283 	nssn = le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_NSSN_MASK;
3284 	buf = &rxba->reorder_buf;
3285 	iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
3286 }
3287 
3288 void
3289 iwx_reorder_timer_expired(void *arg)
3290 {
3291 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3292 	struct iwx_reorder_buffer *buf = arg;
3293 	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
3294 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
3295 	struct iwx_softc *sc = rxba->sc;
3296 	struct ieee80211com *ic = &sc->sc_ic;
3297 	struct ieee80211_node *ni = ic->ic_bss;
3298 	int i, s;
3299 	uint16_t sn = 0, index = 0;
3300 	int expired = 0;
3301 	int cont = 0;
3302 	struct timeval now, timeout, expiry;
3303 
3304 	if (!buf->num_stored || buf->removed)
3305 		return;
3306 
3307 	s = splnet();
3308 	getmicrouptime(&now);
3309 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3310 
3311 	for (i = 0; i < buf->buf_size ; i++) {
3312 		index = (buf->head_sn + i) % buf->buf_size;
3313 
3314 		if (ml_empty(&entries[index].frames)) {
3315 			/*
3316 			 * If there is a hole and the next frame didn't expire
3317 			 * we want to break and not advance SN.
3318 			 */
3319 			cont = 0;
3320 			continue;
3321 		}
3322 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3323 		if (!cont && timercmp(&now, &expiry, <))
3324 			break;
3325 
3326 		expired = 1;
3327 		/* continue until next hole after this expired frame */
3328 		cont = 1;
3329 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3330 	}
3331 
3332 	if (expired) {
3333 		/* SN is set to the last expired frame + 1 */
3334 		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
3335 		if_input(&sc->sc_ic.ic_if, &ml);
3336 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3337 	} else {
3338 		/*
3339 		 * If no frame expired and there are stored frames, index is now
3340 		 * pointing to the first unexpired frame - modify reorder timeout
3341 		 * accordingly.
3342 		 */
3343 		timeout_add_usec(&buf->reorder_timer,
3344 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3345 	}
3346 
3347 	splx(s);
3348 }
3349 
3350 #define IWX_MAX_RX_BA_SESSIONS 16
3351 
3352 struct iwx_rxba_data *
3353 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3354 {
3355 	int i;
3356 
3357 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3358 		if (sc->sc_rxba_data[i].baid ==
3359 		    IWX_RX_REORDER_DATA_INVALID_BAID)
3360 			continue;
3361 		if (sc->sc_rxba_data[i].tid == tid)
3362 			return &sc->sc_rxba_data[i];
3363 	}
3364 
3365 	return NULL;
3366 }
3367 
3368 int
3369 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3370     uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3371     uint8_t *baid)
3372 {
3373 	struct iwx_rx_baid_cfg_cmd cmd;
3374 	uint32_t new_baid = 0;
3375 	int err;
3376 
3377 	splassert(IPL_NET);
3378 
3379 	memset(&cmd, 0, sizeof(cmd));
3380 
3381 	if (start) {
3382 		cmd.action = IWX_RX_BAID_ACTION_ADD;
3383 		cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3384 		cmd.alloc.tid = tid;
3385 		cmd.alloc.ssn = htole16(ssn);
3386 		cmd.alloc.win_size = htole16(winsize);
3387 	} else {
3388 		struct iwx_rxba_data *rxba;
3389 
3390 		rxba = iwx_find_rxba_data(sc, tid);
3391 		if (rxba == NULL)
3392 			return ENOENT;
3393 		*baid = rxba->baid;
3394 
3395 		cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3396 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3397 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3398 			cmd.remove_v1.baid = rxba->baid;
3399 		} else {
3400 			cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3401 			cmd.remove.tid = tid;
3402 		}
3403 	}
3404 
3405 	err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3406 	    IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3407 	if (err)
3408 		return err;
3409 
3410 	if (start) {
3411 		if (new_baid >= nitems(sc->sc_rxba_data))
3412 			return ERANGE;
3413 		*baid = new_baid;
3414 	}
3415 
3416 	return 0;
3417 }
3418 
3419 int
3420 iwx_sta_rx_agg_sta_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3421     uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3422     uint8_t *baid)
3423 {
3424 	struct iwx_add_sta_cmd cmd;
3425 	struct iwx_node *in = (void *)ni;
3426 	int err;
3427 	uint32_t status;
3428 
3429 	splassert(IPL_NET);
3430 
3431 	memset(&cmd, 0, sizeof(cmd));
3432 
3433 	cmd.sta_id = IWX_STATION_ID;
3434 	cmd.mac_id_n_color
3435 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3436 	cmd.add_modify = IWX_STA_MODE_MODIFY;
3437 
3438 	if (start) {
3439 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3440 		cmd.add_immediate_ba_ssn = htole16(ssn);
3441 		cmd.rx_ba_window = htole16(winsize);
3442 	} else {
3443 		struct iwx_rxba_data *rxba;
3444 
3445 		rxba = iwx_find_rxba_data(sc, tid);
3446 		if (rxba == NULL)
3447 			return ENOENT;
3448 		*baid = rxba->baid;
3449 
3450 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3451 	}
3452 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
3453 	    IWX_STA_MODIFY_REMOVE_BA_TID;
3454 
3455 	status = IWX_ADD_STA_SUCCESS;
3456 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
3457 	    &status);
3458 	if (err)
3459 		return err;
3460 
3461 	if ((status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
3462 		return EIO;
3463 
3464 	if (!(status & IWX_ADD_STA_BAID_VALID_MASK))
3465 		return EINVAL;
3466 
3467 	if (start) {
3468 		*baid = (status & IWX_ADD_STA_BAID_MASK) >>
3469 		    IWX_ADD_STA_BAID_SHIFT;
3470 		if (*baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3471 		    *baid >= nitems(sc->sc_rxba_data))
3472 			return ERANGE;
3473 	}
3474 
3475 	return 0;
3476 }
3477 
3478 void
3479 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3480     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3481 {
3482 	struct ieee80211com *ic = &sc->sc_ic;
3483 	int err, s;
3484 	struct iwx_rxba_data *rxba = NULL;
3485 	uint8_t baid = 0;
3486 
3487 	s = splnet();
3488 
3489 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3490 		ieee80211_addba_req_refuse(ic, ni, tid);
3491 		splx(s);
3492 		return;
3493 	}
3494 
3495 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3496 		err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3497 		    timeout_val, start, &baid);
3498 	} else {
3499 		err = iwx_sta_rx_agg_sta_cmd(sc, ni, tid, ssn, winsize,
3500 		    timeout_val, start, &baid);
3501 	}
3502 	if (err) {
3503 		ieee80211_addba_req_refuse(ic, ni, tid);
3504 		splx(s);
3505 		return;
3506 	}
3507 
3508 	rxba = &sc->sc_rxba_data[baid];
3509 
3510 	/* Deaggregation is done in hardware. */
3511 	if (start) {
3512 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3513 			ieee80211_addba_req_refuse(ic, ni, tid);
3514 			splx(s);
3515 			return;
3516 		}
3517 		rxba->sta_id = IWX_STATION_ID;
3518 		rxba->tid = tid;
3519 		rxba->baid = baid;
3520 		rxba->timeout = timeout_val;
3521 		getmicrouptime(&rxba->last_rx);
3522 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3523 		    winsize);
3524 		if (timeout_val != 0) {
3525 			struct ieee80211_rx_ba *ba;
3526 			timeout_add_usec(&rxba->session_timer,
3527 			    timeout_val);
3528 			/* XXX disable net80211's BA timeout handler */
3529 			ba = &ni->ni_rx_ba[tid];
3530 			ba->ba_timeout_val = 0;
3531 		}
3532 	} else
3533 		iwx_clear_reorder_buffer(sc, rxba);
3534 
3535 	if (start) {
3536 		sc->sc_rx_ba_sessions++;
3537 		ieee80211_addba_req_accept(ic, ni, tid);
3538 	} else if (sc->sc_rx_ba_sessions > 0)
3539 		sc->sc_rx_ba_sessions--;
3540 
3541 	splx(s);
3542 }
3543 
3544 void
3545 iwx_mac_ctxt_task(void *arg)
3546 {
3547 	struct iwx_softc *sc = arg;
3548 	struct ieee80211com *ic = &sc->sc_ic;
3549 	struct iwx_node *in = (void *)ic->ic_bss;
3550 	int err, s = splnet();
3551 
3552 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3553 	    ic->ic_state != IEEE80211_S_RUN) {
3554 		refcnt_rele_wake(&sc->task_refs);
3555 		splx(s);
3556 		return;
3557 	}
3558 
3559 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
3560 	if (err)
3561 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3562 
3563 	iwx_unprotect_session(sc, in);
3564 
3565 	refcnt_rele_wake(&sc->task_refs);
3566 	splx(s);
3567 }
3568 
3569 void
3570 iwx_phy_ctxt_task(void *arg)
3571 {
3572 	struct iwx_softc *sc = arg;
3573 	struct ieee80211com *ic = &sc->sc_ic;
3574 	struct iwx_node *in = (void *)ic->ic_bss;
3575 	struct ieee80211_node *ni = &in->in_ni;
3576 	uint8_t chains, sco, vht_chan_width;
3577 	int err, s = splnet();
3578 
3579 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3580 	    ic->ic_state != IEEE80211_S_RUN ||
3581 	    in->in_phyctxt == NULL) {
3582 		refcnt_rele_wake(&sc->task_refs);
3583 		splx(s);
3584 		return;
3585 	}
3586 
3587 	chains = iwx_mimo_enabled(sc) ? 2 : 1;
3588 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3589 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3590 	    ieee80211_node_supports_ht_chan40(ni))
3591 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3592 	else
3593 		sco = IEEE80211_HTOP0_SCO_SCN;
3594 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3595 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3596 	    ieee80211_node_supports_vht_chan80(ni))
3597 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3598 	else
3599 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3600 	if (in->in_phyctxt->sco != sco ||
3601 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3602 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3603 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3604 		    vht_chan_width);
3605 		if (err)
3606 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3607 	}
3608 
3609 	refcnt_rele_wake(&sc->task_refs);
3610 	splx(s);
3611 }
3612 
3613 void
3614 iwx_updatechan(struct ieee80211com *ic)
3615 {
3616 	struct iwx_softc *sc = ic->ic_softc;
3617 
3618 	if (ic->ic_state == IEEE80211_S_RUN &&
3619 	    !task_pending(&sc->newstate_task))
3620 		iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3621 }
3622 
3623 void
3624 iwx_updateprot(struct ieee80211com *ic)
3625 {
3626 	struct iwx_softc *sc = ic->ic_softc;
3627 
3628 	if (ic->ic_state == IEEE80211_S_RUN &&
3629 	    !task_pending(&sc->newstate_task))
3630 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3631 }
3632 
3633 void
3634 iwx_updateslot(struct ieee80211com *ic)
3635 {
3636 	struct iwx_softc *sc = ic->ic_softc;
3637 
3638 	if (ic->ic_state == IEEE80211_S_RUN &&
3639 	    !task_pending(&sc->newstate_task))
3640 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3641 }
3642 
3643 void
3644 iwx_updateedca(struct ieee80211com *ic)
3645 {
3646 	struct iwx_softc *sc = ic->ic_softc;
3647 
3648 	if (ic->ic_state == IEEE80211_S_RUN &&
3649 	    !task_pending(&sc->newstate_task))
3650 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3651 }
3652 
3653 void
3654 iwx_updatedtim(struct ieee80211com *ic)
3655 {
3656 	struct iwx_softc *sc = ic->ic_softc;
3657 
3658 	if (ic->ic_state == IEEE80211_S_RUN &&
3659 	    !task_pending(&sc->newstate_task))
3660 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3661 }
3662 
3663 void
3664 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3665     uint8_t tid)
3666 {
3667 	struct ieee80211com *ic = &sc->sc_ic;
3668 	struct ieee80211_tx_ba *ba;
3669 	int err, qid;
3670 	struct iwx_tx_ring *ring;
3671 
3672 	/* Ensure we can map this TID to an aggregation queue. */
3673 	if (tid >= IWX_MAX_TID_COUNT)
3674 		return;
3675 
3676 	ba = &ni->ni_tx_ba[tid];
3677 	if (ba->ba_state != IEEE80211_BA_REQUESTED)
3678 		return;
3679 
3680 	qid = sc->aggqid[tid];
3681 	if (qid == 0) {
3682 		/* Firmware should pick the next unused Tx queue. */
3683 		qid = fls(sc->qenablemsk);
3684 	}
3685 
3686 	/*
3687 	 * Simply enable the queue.
3688 	 * Firmware handles Tx Ba session setup and teardown.
3689 	 */
3690 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3691 		if (!iwx_nic_lock(sc)) {
3692 			ieee80211_addba_resp_refuse(ic, ni, tid,
3693 			    IEEE80211_STATUS_UNSPECIFIED);
3694 			return;
3695 		}
3696 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3697 		    IWX_TX_RING_COUNT);
3698 		iwx_nic_unlock(sc);
3699 		if (err) {
3700 			printf("%s: could not enable Tx queue %d "
3701 			    "(error %d)\n", DEVNAME(sc), qid, err);
3702 			ieee80211_addba_resp_refuse(ic, ni, tid,
3703 			    IEEE80211_STATUS_UNSPECIFIED);
3704 			return;
3705 		}
3706 
3707 		ba->ba_winstart = 0;
3708 	} else
3709 		ba->ba_winstart = ni->ni_qos_txseqs[tid];
3710 
3711 	ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3712 
3713 	ring = &sc->txq[qid];
3714 	ba->ba_timeout_val = 0;
3715 	ieee80211_addba_resp_accept(ic, ni, tid);
3716 	sc->aggqid[tid] = qid;
3717 }
3718 
3719 void
3720 iwx_ba_task(void *arg)
3721 {
3722 	struct iwx_softc *sc = arg;
3723 	struct ieee80211com *ic = &sc->sc_ic;
3724 	struct ieee80211_node *ni = ic->ic_bss;
3725 	int s = splnet();
3726 	int tid;
3727 
3728 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3729 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3730 			break;
3731 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3732 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3733 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3734 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3735 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3736 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3737 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3738 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3739 		}
3740 	}
3741 
3742 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3743 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3744 			break;
3745 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3746 			iwx_sta_tx_agg_start(sc, ni, tid);
3747 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3748 		}
3749 	}
3750 
3751 	refcnt_rele_wake(&sc->task_refs);
3752 	splx(s);
3753 }
3754 
3755 /*
3756  * This function is called by upper layer when an ADDBA request is received
3757  * from another STA and before the ADDBA response is sent.
3758  */
3759 int
3760 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3761     uint8_t tid)
3762 {
3763 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3764 
3765 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3766 	    tid >= IWX_MAX_TID_COUNT)
3767 		return ENOSPC;
3768 
3769 	if (sc->ba_rx.start_tidmask & (1 << tid))
3770 		return EBUSY;
3771 
3772 	sc->ba_rx.start_tidmask |= (1 << tid);
3773 	iwx_add_task(sc, systq, &sc->ba_task);
3774 
3775 	return EBUSY;
3776 }
3777 
3778 /*
3779  * This function is called by upper layer on teardown of an HT-immediate
3780  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3781  */
3782 void
3783 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3784     uint8_t tid)
3785 {
3786 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3787 
3788 	if (tid >= IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3789 		return;
3790 
3791 	sc->ba_rx.stop_tidmask |= (1 << tid);
3792 	iwx_add_task(sc, systq, &sc->ba_task);
3793 }
3794 
3795 int
3796 iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3797     uint8_t tid)
3798 {
3799 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3800 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3801 
3802 	/*
3803 	 * Require a firmware version which uses an internal AUX queue.
3804 	 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3805 	 */
3806 	if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
3807 		return ENOTSUP;
3808 
3809 	/* Ensure we can map this TID to an aggregation queue. */
3810 	if (tid >= IWX_MAX_TID_COUNT)
3811 		return EINVAL;
3812 
3813 	/* We only support a fixed Tx aggregation window size, for now. */
3814 	if (ba->ba_winsize != IWX_FRAME_LIMIT)
3815 		return ENOTSUP;
3816 
3817 	/* Is firmware already using an agg queue with this TID? */
3818 	if (sc->aggqid[tid] != 0)
3819 		return ENOSPC;
3820 
3821 	/* Are we already processing an ADDBA request? */
3822 	if (sc->ba_tx.start_tidmask & (1 << tid))
3823 		return EBUSY;
3824 
3825 	sc->ba_tx.start_tidmask |= (1 << tid);
3826 	iwx_add_task(sc, systq, &sc->ba_task);
3827 
3828 	return EBUSY;
3829 }
3830 
3831 void
3832 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3833 {
3834 	uint32_t mac_addr0, mac_addr1;
3835 
3836 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3837 
3838 	if (!iwx_nic_lock(sc))
3839 		return;
3840 
3841 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3842 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3843 
3844 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3845 
3846 	/* If OEM fused a valid address, use it instead of the one in OTP. */
3847 	if (iwx_is_valid_mac_addr(data->hw_addr)) {
3848 		iwx_nic_unlock(sc);
3849 		return;
3850 	}
3851 
3852 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3853 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3854 
3855 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3856 
3857 	iwx_nic_unlock(sc);
3858 }
3859 
3860 int
3861 iwx_is_valid_mac_addr(const uint8_t *addr)
3862 {
3863 	static const uint8_t reserved_mac[] = {
3864 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3865 	};
3866 
3867 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3868 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3869 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3870 	    !ETHER_IS_MULTICAST(addr));
3871 }
3872 
3873 void
3874 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3875 {
3876 	const uint8_t *hw_addr;
3877 
3878 	hw_addr = (const uint8_t *)&mac_addr0;
3879 	dest[0] = hw_addr[3];
3880 	dest[1] = hw_addr[2];
3881 	dest[2] = hw_addr[1];
3882 	dest[3] = hw_addr[0];
3883 
3884 	hw_addr = (const uint8_t *)&mac_addr1;
3885 	dest[4] = hw_addr[1];
3886 	dest[5] = hw_addr[0];
3887 }
3888 
3889 int
3890 iwx_nvm_get(struct iwx_softc *sc)
3891 {
3892 	struct iwx_nvm_get_info cmd = {};
3893 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3894 	struct iwx_host_cmd hcmd = {
3895 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3896 		.data = { &cmd, },
3897 		.len = { sizeof(cmd) },
3898 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3899 		    IWX_NVM_GET_INFO)
3900 	};
3901 	int err;
3902 	uint32_t mac_flags;
3903 	/*
3904 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3905 	 * in v3, except for the channel profile part of the
3906 	 * regulatory.  So we can just access the new struct, with the
3907 	 * exception of the latter.
3908 	 */
3909 	struct iwx_nvm_get_info_rsp *rsp;
3910 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3911 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3912 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3913 
3914 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3915 	err = iwx_send_cmd(sc, &hcmd);
3916 	if (err)
3917 		return err;
3918 
3919 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3920 		err = EIO;
3921 		goto out;
3922 	}
3923 
3924 	memset(nvm, 0, sizeof(*nvm));
3925 
3926 	iwx_set_mac_addr_from_csr(sc, nvm);
3927 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3928 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3929 		err = EINVAL;
3930 		goto out;
3931 	}
3932 
3933 	rsp = (void *)hcmd.resp_pkt->data;
3934 
3935 	/* Initialize general data */
3936 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3937 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3938 
3939 	/* Initialize MAC sku data */
3940 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3941 	nvm->sku_cap_11ac_enable =
3942 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3943 	nvm->sku_cap_11n_enable =
3944 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3945 	nvm->sku_cap_11ax_enable =
3946 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3947 	nvm->sku_cap_band_24GHz_enable =
3948 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3949 	nvm->sku_cap_band_52GHz_enable =
3950 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3951 	nvm->sku_cap_mimo_disable =
3952 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3953 
3954 	/* Initialize PHY sku data */
3955 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3956 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3957 
3958 	if (le32toh(rsp->regulatory.lar_enabled) &&
3959 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3960 		nvm->lar_enabled = 1;
3961 	}
3962 
3963 	if (v4) {
3964 		iwx_init_channel_map(sc, NULL,
3965 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3966 	} else {
3967 		rsp_v3 = (void *)rsp;
3968 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3969 		    NULL, IWX_NUM_CHANNELS_V1);
3970 	}
3971 out:
3972 	iwx_free_resp(sc, &hcmd);
3973 	return err;
3974 }
3975 
3976 int
3977 iwx_load_firmware(struct iwx_softc *sc)
3978 {
3979 	struct iwx_fw_sects *fws;
3980 	int err;
3981 
3982 	splassert(IPL_NET);
3983 
3984 	sc->sc_uc.uc_intr = 0;
3985 	sc->sc_uc.uc_ok = 0;
3986 
3987 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3988 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3989 		err = iwx_ctxt_info_gen3_init(sc, fws);
3990 	else
3991 		err = iwx_ctxt_info_init(sc, fws);
3992 	if (err) {
3993 		printf("%s: could not init context info\n", DEVNAME(sc));
3994 		return err;
3995 	}
3996 
3997 	/* wait for the firmware to load */
3998 	err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
3999 	if (err || !sc->sc_uc.uc_ok) {
4000 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
4001 		iwx_ctxt_info_free_paging(sc);
4002 	}
4003 
4004 	iwx_dma_contig_free(&sc->iml_dma);
4005 	iwx_ctxt_info_free_fw_img(sc);
4006 
4007 	if (!sc->sc_uc.uc_ok)
4008 		return EINVAL;
4009 
4010 	return err;
4011 }
4012 
4013 int
4014 iwx_start_fw(struct iwx_softc *sc)
4015 {
4016 	int err;
4017 
4018 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
4019 
4020 	iwx_disable_interrupts(sc);
4021 
4022 	/* make sure rfkill handshake bits are cleared */
4023 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
4024 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
4025 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4026 
4027 	/* clear (again), then enable firmware load interrupt */
4028 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
4029 
4030 	err = iwx_nic_init(sc);
4031 	if (err) {
4032 		printf("%s: unable to init nic\n", DEVNAME(sc));
4033 		return err;
4034 	}
4035 
4036 	iwx_enable_fwload_interrupt(sc);
4037 
4038 	return iwx_load_firmware(sc);
4039 }
4040 
4041 int
4042 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
4043     size_t len)
4044 {
4045 	const struct iwx_ucode_tlv *tlv;
4046 	uint32_t sha1 = 0;
4047 	uint16_t mac_type = 0, rf_id = 0;
4048 	uint8_t *pnvm_data = NULL, *tmp;
4049 	int hw_match = 0;
4050 	uint32_t size = 0;
4051 	int err;
4052 
4053 	while (len >= sizeof(*tlv)) {
4054 		uint32_t tlv_len, tlv_type;
4055 
4056 		len -= sizeof(*tlv);
4057 		tlv = (const void *)data;
4058 
4059 		tlv_len = le32toh(tlv->length);
4060 		tlv_type = le32toh(tlv->type);
4061 
4062 		if (len < tlv_len) {
4063 			printf("%s: invalid TLV len: %zd/%u\n",
4064 			    DEVNAME(sc), len, tlv_len);
4065 			err = EINVAL;
4066 			goto out;
4067 		}
4068 
4069 		data += sizeof(*tlv);
4070 
4071 		switch (tlv_type) {
4072 		case IWX_UCODE_TLV_PNVM_VERSION:
4073 			if (tlv_len < sizeof(uint32_t))
4074 				break;
4075 
4076 			sha1 = le32_to_cpup((const uint32_t *)data);
4077 			break;
4078 		case IWX_UCODE_TLV_HW_TYPE:
4079 			if (tlv_len < 2 * sizeof(uint16_t))
4080 				break;
4081 
4082 			if (hw_match)
4083 				break;
4084 
4085 			mac_type = le16_to_cpup((const uint16_t *)data);
4086 			rf_id = le16_to_cpup((const uint16_t *)(data +
4087 			    sizeof(uint16_t)));
4088 
4089 			if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
4090 			    rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
4091 				hw_match = 1;
4092 			break;
4093 		case IWX_UCODE_TLV_SEC_RT: {
4094 			const struct iwx_pnvm_section *section;
4095 			uint32_t data_len;
4096 
4097 			section = (const void *)data;
4098 			data_len = tlv_len - sizeof(*section);
4099 
4100 			/* TODO: remove, this is a deprecated separator */
4101 			if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
4102 				break;
4103 
4104 			tmp = malloc(size + data_len, M_DEVBUF,
4105 			    M_WAITOK | M_CANFAIL | M_ZERO);
4106 			if (tmp == NULL) {
4107 				err = ENOMEM;
4108 				goto out;
4109 			}
4110 			memcpy(tmp, pnvm_data, size);
4111 			memcpy(tmp + size, section->data, data_len);
4112 			free(pnvm_data, M_DEVBUF, size);
4113 			pnvm_data = tmp;
4114 			size += data_len;
4115 			break;
4116 		}
4117 		case IWX_UCODE_TLV_PNVM_SKU:
4118 			/* New PNVM section started, stop parsing. */
4119 			goto done;
4120 		default:
4121 			break;
4122 		}
4123 
4124 		if (roundup(tlv_len, 4) > len)
4125 			break;
4126 		len -= roundup(tlv_len, 4);
4127 		data += roundup(tlv_len, 4);
4128 	}
4129 done:
4130 	if (!hw_match || size == 0) {
4131 		err = ENOENT;
4132 		goto out;
4133 	}
4134 
4135 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 0);
4136 	if (err) {
4137 		printf("%s: could not allocate DMA memory for PNVM\n",
4138 		    DEVNAME(sc));
4139 		err = ENOMEM;
4140 		goto out;
4141 	}
4142 	memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
4143 	iwx_ctxt_info_gen3_set_pnvm(sc);
4144 	sc->sc_pnvm_ver = sha1;
4145 out:
4146 	free(pnvm_data, M_DEVBUF, size);
4147 	return err;
4148 }
4149 
4150 int
4151 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
4152 {
4153 	const struct iwx_ucode_tlv *tlv;
4154 
4155 	while (len >= sizeof(*tlv)) {
4156 		uint32_t tlv_len, tlv_type;
4157 
4158 		len -= sizeof(*tlv);
4159 		tlv = (const void *)data;
4160 
4161 		tlv_len = le32toh(tlv->length);
4162 		tlv_type = le32toh(tlv->type);
4163 
4164 		if (len < tlv_len || roundup(tlv_len, 4) > len)
4165 			return EINVAL;
4166 
4167 		if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
4168 			const struct iwx_sku_id *sku_id =
4169 				(const void *)(data + sizeof(*tlv));
4170 
4171 			data += sizeof(*tlv) + roundup(tlv_len, 4);
4172 			len -= roundup(tlv_len, 4);
4173 
4174 			if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
4175 			    sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
4176 			    sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
4177 			    iwx_pnvm_handle_section(sc, data, len) == 0)
4178 				return 0;
4179 		} else {
4180 			data += sizeof(*tlv) + roundup(tlv_len, 4);
4181 			len -= roundup(tlv_len, 4);
4182 		}
4183 	}
4184 
4185 	return ENOENT;
4186 }
4187 
4188 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
4189 void
4190 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
4191 {
4192 	struct iwx_prph_scratch *prph_scratch;
4193 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
4194 
4195 	prph_scratch = sc->prph_scratch_dma.vaddr;
4196 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
4197 
4198 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
4199 	prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
4200 
4201 	bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, 0, sc->pnvm_dma.size,
4202 	    BUS_DMASYNC_PREWRITE);
4203 }
4204 
4205 /*
4206  * Load platform-NVM (non-volatile-memory) data from the filesystem.
4207  * This data apparently contains regulatory information and affects device
4208  * channel configuration.
4209  * The SKU of AX210 devices tells us which PNVM file section is needed.
4210  * Pre-AX210 devices store NVM data onboard.
4211  */
4212 int
4213 iwx_load_pnvm(struct iwx_softc *sc)
4214 {
4215 	const int wait_flags = IWX_PNVM_COMPLETE;
4216 	int s, err = 0;
4217 	u_char *pnvm_data = NULL;
4218 	size_t pnvm_size = 0;
4219 
4220 	if (sc->sc_sku_id[0] == 0 &&
4221 	    sc->sc_sku_id[1] == 0 &&
4222 	    sc->sc_sku_id[2] == 0)
4223 		return 0;
4224 
4225 	if (sc->sc_pnvm_name) {
4226 		if (sc->pnvm_dma.vaddr == NULL) {
4227 			err = loadfirmware(sc->sc_pnvm_name,
4228 			    &pnvm_data, &pnvm_size);
4229 			if (err) {
4230 				printf("%s: could not read %s (error %d)\n",
4231 				    DEVNAME(sc), sc->sc_pnvm_name, err);
4232 				return err;
4233 			}
4234 
4235 			err = iwx_pnvm_parse(sc, pnvm_data, pnvm_size);
4236 			if (err && err != ENOENT) {
4237 				free(pnvm_data, M_DEVBUF, pnvm_size);
4238 				return err;
4239 			}
4240 		} else
4241 			iwx_ctxt_info_gen3_set_pnvm(sc);
4242 	}
4243 
4244 	s = splnet();
4245 
4246 	if (!iwx_nic_lock(sc)) {
4247 		splx(s);
4248 		free(pnvm_data, M_DEVBUF, pnvm_size);
4249 		return EBUSY;
4250 	}
4251 
4252 	/*
4253 	 * If we don't have a platform NVM file simply ask firmware
4254 	 * to proceed without it.
4255 	 */
4256 
4257 	iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
4258 	    IWX_UREG_DOORBELL_TO_ISR6_PNVM);
4259 
4260 	/* Wait for the pnvm complete notification from firmware. */
4261 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4262 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4263 		    SEC_TO_NSEC(2));
4264 		if (err)
4265 			break;
4266 	}
4267 
4268 	splx(s);
4269 	iwx_nic_unlock(sc);
4270 	free(pnvm_data, M_DEVBUF, pnvm_size);
4271 	return err;
4272 }
4273 
4274 int
4275 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4276 {
4277 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4278 		.valid = htole32(valid_tx_ant),
4279 	};
4280 
4281 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
4282 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4283 }
4284 
4285 int
4286 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4287 {
4288 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
4289 
4290 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4291 	phy_cfg_cmd.calib_control.event_trigger =
4292 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4293 	phy_cfg_cmd.calib_control.flow_trigger =
4294 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4295 
4296 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
4297 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4298 }
4299 
4300 int
4301 iwx_send_dqa_cmd(struct iwx_softc *sc)
4302 {
4303 	struct iwx_dqa_enable_cmd dqa_cmd = {
4304 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4305 	};
4306 	uint32_t cmd_id;
4307 
4308 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4309 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4310 }
4311 
4312 int
4313 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4314 {
4315 	int err;
4316 
4317 	err = iwx_read_firmware(sc);
4318 	if (err)
4319 		return err;
4320 
4321 	err = iwx_start_fw(sc);
4322 	if (err)
4323 		return err;
4324 
4325 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4326 		err = iwx_load_pnvm(sc);
4327 		if (err)
4328 			return err;
4329 	}
4330 
4331 	iwx_post_alive(sc);
4332 
4333 	return 0;
4334 }
4335 
4336 int
4337 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4338 {
4339 	const int wait_flags = IWX_INIT_COMPLETE;
4340 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
4341 	struct iwx_init_extended_cfg_cmd init_cfg = {
4342 		.init_flags = htole32(IWX_INIT_NVM),
4343 	};
4344 	int err, s;
4345 
4346 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4347 		printf("%s: radio is disabled by hardware switch\n",
4348 		    DEVNAME(sc));
4349 		return EPERM;
4350 	}
4351 
4352 	s = splnet();
4353 	sc->sc_init_complete = 0;
4354 	err = iwx_load_ucode_wait_alive(sc);
4355 	if (err) {
4356 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4357 		splx(s);
4358 		return err;
4359 	}
4360 
4361 	/*
4362 	 * Send init config command to mark that we are sending NVM
4363 	 * access commands
4364 	 */
4365 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4366 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4367 	if (err) {
4368 		splx(s);
4369 		return err;
4370 	}
4371 
4372 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4373 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4374 	if (err) {
4375 		splx(s);
4376 		return err;
4377 	}
4378 
4379 	/* Wait for the init complete notification from the firmware. */
4380 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4381 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4382 		    SEC_TO_NSEC(2));
4383 		if (err) {
4384 			splx(s);
4385 			return err;
4386 		}
4387 	}
4388 	splx(s);
4389 	if (readnvm) {
4390 		err = iwx_nvm_get(sc);
4391 		if (err) {
4392 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4393 			return err;
4394 		}
4395 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4396 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4397 			    sc->sc_nvm.hw_addr);
4398 
4399 	}
4400 	return 0;
4401 }
4402 
4403 int
4404 iwx_config_ltr(struct iwx_softc *sc)
4405 {
4406 	struct iwx_ltr_config_cmd cmd = {
4407 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4408 	};
4409 
4410 	if (!sc->sc_ltr_enabled)
4411 		return 0;
4412 
4413 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4414 }
4415 
4416 void
4417 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
4418 {
4419 	struct iwx_rx_data *data = &ring->data[idx];
4420 
4421 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4422 		struct iwx_rx_transfer_desc *desc = ring->desc;
4423 		desc[idx].rbid = htole16(idx & 0xffff);
4424 		desc[idx].addr = htole64(data->map->dm_segs[0].ds_addr);
4425 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4426 		    idx * sizeof(*desc), sizeof(*desc),
4427 		    BUS_DMASYNC_PREWRITE);
4428 	} else {
4429 		((uint64_t *)ring->desc)[idx] =
4430 		    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
4431 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4432 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4433 		    BUS_DMASYNC_PREWRITE);
4434 	}
4435 }
4436 
4437 int
4438 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4439 {
4440 	struct iwx_rx_ring *ring = &sc->rxq;
4441 	struct iwx_rx_data *data = &ring->data[idx];
4442 	struct mbuf *m;
4443 	int err;
4444 	int fatal = 0;
4445 
4446 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4447 	if (m == NULL)
4448 		return ENOBUFS;
4449 
4450 	if (size <= MCLBYTES) {
4451 		MCLGET(m, M_DONTWAIT);
4452 	} else {
4453 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
4454 	}
4455 	if ((m->m_flags & M_EXT) == 0) {
4456 		m_freem(m);
4457 		return ENOBUFS;
4458 	}
4459 
4460 	if (data->m != NULL) {
4461 		bus_dmamap_unload(sc->sc_dmat, data->map);
4462 		fatal = 1;
4463 	}
4464 
4465 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4466 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4467 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4468 	if (err) {
4469 		/* XXX */
4470 		if (fatal)
4471 			panic("%s: could not load RX mbuf", DEVNAME(sc));
4472 		m_freem(m);
4473 		return err;
4474 	}
4475 	data->m = m;
4476 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4477 
4478 	/* Update RX descriptor. */
4479 	iwx_update_rx_desc(sc, ring, idx);
4480 
4481 	return 0;
4482 }
4483 
4484 int
4485 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4486     struct iwx_rx_mpdu_desc *desc)
4487 {
4488 	int energy_a, energy_b;
4489 
4490 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4491 		energy_a = desc->v3.energy_a;
4492 		energy_b = desc->v3.energy_b;
4493 	} else {
4494 		energy_a = desc->v1.energy_a;
4495 		energy_b = desc->v1.energy_b;
4496 	}
4497 	energy_a = energy_a ? -energy_a : -256;
4498 	energy_b = energy_b ? -energy_b : -256;
4499 	return MAX(energy_a, energy_b);
4500 }
4501 
4502 void
4503 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4504     struct iwx_rx_data *data)
4505 {
4506 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4507 
4508 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4509 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4510 
4511 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4512 }
4513 
4514 /*
4515  * Retrieve the average noise (in dBm) among receivers.
4516  */
4517 int
4518 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4519 {
4520 	int i, total, nbant, noise;
4521 
4522 	total = nbant = noise = 0;
4523 	for (i = 0; i < 3; i++) {
4524 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4525 		if (noise) {
4526 			total += noise;
4527 			nbant++;
4528 		}
4529 	}
4530 
4531 	/* There should be at least one antenna but check anyway. */
4532 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4533 }
4534 
4535 int
4536 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4537     struct ieee80211_rxinfo *rxi)
4538 {
4539 	struct ieee80211com *ic = &sc->sc_ic;
4540 	struct ieee80211_key *k;
4541 	struct ieee80211_frame *wh;
4542 	uint64_t pn, *prsc;
4543 	uint8_t *ivp;
4544 	uint8_t tid;
4545 	int hdrlen, hasqos;
4546 
4547 	wh = mtod(m, struct ieee80211_frame *);
4548 	hdrlen = ieee80211_get_hdrlen(wh);
4549 	ivp = (uint8_t *)wh + hdrlen;
4550 
4551 	/* find key for decryption */
4552 	k = ieee80211_get_rxkey(ic, m, ni);
4553 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4554 		return 1;
4555 
4556 	/* Check that ExtIV bit is be set. */
4557 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4558 		return 1;
4559 
4560 	hasqos = ieee80211_has_qos(wh);
4561 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4562 	prsc = &k->k_rsc[tid];
4563 
4564 	/* Extract the 48-bit PN from the CCMP header. */
4565 	pn = (uint64_t)ivp[0]       |
4566 	     (uint64_t)ivp[1] <<  8 |
4567 	     (uint64_t)ivp[4] << 16 |
4568 	     (uint64_t)ivp[5] << 24 |
4569 	     (uint64_t)ivp[6] << 32 |
4570 	     (uint64_t)ivp[7] << 40;
4571 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4572 		if (pn < *prsc) {
4573 			ic->ic_stats.is_ccmp_replays++;
4574 			return 1;
4575 		}
4576 	} else if (pn <= *prsc) {
4577 		ic->ic_stats.is_ccmp_replays++;
4578 		return 1;
4579 	}
4580 	/* Last seen packet number is updated in ieee80211_inputm(). */
4581 
4582 	/*
4583 	 * Some firmware versions strip the MIC, and some don't. It is not
4584 	 * clear which of the capability flags could tell us what to expect.
4585 	 * For now, keep things simple and just leave the MIC in place if
4586 	 * it is present.
4587 	 *
4588 	 * The IV will be stripped by ieee80211_inputm().
4589 	 */
4590 	return 0;
4591 }
4592 
4593 int
4594 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4595     struct ieee80211_rxinfo *rxi)
4596 {
4597 	struct ieee80211com *ic = &sc->sc_ic;
4598 	struct ifnet *ifp = IC2IFP(ic);
4599 	struct ieee80211_frame *wh;
4600 	struct ieee80211_node *ni;
4601 	int ret = 0;
4602 	uint8_t type, subtype;
4603 
4604 	wh = mtod(m, struct ieee80211_frame *);
4605 
4606 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4607 	if (type == IEEE80211_FC0_TYPE_CTL)
4608 		return 0;
4609 
4610 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4611 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4612 		return 0;
4613 
4614 	ni = ieee80211_find_rxnode(ic, wh);
4615 	/* Handle hardware decryption. */
4616 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
4617 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
4618 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4619 	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4620 	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
4621 	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4622 	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
4623 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4624 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4625 			ic->ic_stats.is_ccmp_dec_errs++;
4626 			ret = 1;
4627 			goto out;
4628 		}
4629 		/* Check whether decryption was successful or not. */
4630 		if ((rx_pkt_status &
4631 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4632 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4633 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4634 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4635 			ic->ic_stats.is_ccmp_dec_errs++;
4636 			ret = 1;
4637 			goto out;
4638 		}
4639 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4640 	}
4641 out:
4642 	if (ret)
4643 		ifp->if_ierrors++;
4644 	ieee80211_release_node(ic, ni);
4645 	return ret;
4646 }
4647 
4648 void
4649 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4650     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4651     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4652     struct mbuf_list *ml)
4653 {
4654 	struct ieee80211com *ic = &sc->sc_ic;
4655 	struct ifnet *ifp = IC2IFP(ic);
4656 	struct ieee80211_frame *wh;
4657 	struct ieee80211_node *ni;
4658 
4659 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4660 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4661 
4662 	wh = mtod(m, struct ieee80211_frame *);
4663 	ni = ieee80211_find_rxnode(ic, wh);
4664 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4665 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4666 		ifp->if_ierrors++;
4667 		m_freem(m);
4668 		ieee80211_release_node(ic, ni);
4669 		return;
4670 	}
4671 
4672 #if NBPFILTER > 0
4673 	if (sc->sc_drvbpf != NULL) {
4674 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4675 		uint16_t chan_flags;
4676 		int have_legacy_rate = 1;
4677 		uint8_t mcs, rate;
4678 
4679 		tap->wr_flags = 0;
4680 		if (is_shortpre)
4681 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4682 		tap->wr_chan_freq =
4683 		    htole16(ic->ic_channels[chanidx].ic_freq);
4684 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4685 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4686 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4687 			chan_flags &= ~IEEE80211_CHAN_HT;
4688 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4689 		}
4690 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4691 			chan_flags &= ~IEEE80211_CHAN_VHT;
4692 		tap->wr_chan_flags = htole16(chan_flags);
4693 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4694 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4695 		tap->wr_tsft = device_timestamp;
4696 		if (sc->sc_rate_n_flags_version >= 2) {
4697 			uint32_t mod_type = (rate_n_flags &
4698 			    IWX_RATE_MCS_MOD_TYPE_MSK);
4699 			const struct ieee80211_rateset *rs = NULL;
4700 			uint32_t ridx;
4701 			have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4702 			    mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4703 			mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4704 			ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4705 			if (mod_type == IWX_RATE_MCS_CCK_MSK)
4706 				rs = &ieee80211_std_rateset_11b;
4707 			else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4708 				rs = &ieee80211_std_rateset_11a;
4709 			if (rs && ridx < rs->rs_nrates) {
4710 				rate = (rs->rs_rates[ridx] &
4711 				    IEEE80211_RATE_VAL);
4712 			} else
4713 				rate = 0;
4714 		} else {
4715 			have_legacy_rate = ((rate_n_flags &
4716 			    (IWX_RATE_MCS_HT_MSK_V1 |
4717 			    IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4718 			mcs = (rate_n_flags &
4719 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4720 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
4721 			rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4722 		}
4723 		if (!have_legacy_rate) {
4724 			tap->wr_rate = (0x80 | mcs);
4725 		} else {
4726 			switch (rate) {
4727 			/* CCK rates. */
4728 			case  10: tap->wr_rate =   2; break;
4729 			case  20: tap->wr_rate =   4; break;
4730 			case  55: tap->wr_rate =  11; break;
4731 			case 110: tap->wr_rate =  22; break;
4732 			/* OFDM rates. */
4733 			case 0xd: tap->wr_rate =  12; break;
4734 			case 0xf: tap->wr_rate =  18; break;
4735 			case 0x5: tap->wr_rate =  24; break;
4736 			case 0x7: tap->wr_rate =  36; break;
4737 			case 0x9: tap->wr_rate =  48; break;
4738 			case 0xb: tap->wr_rate =  72; break;
4739 			case 0x1: tap->wr_rate =  96; break;
4740 			case 0x3: tap->wr_rate = 108; break;
4741 			/* Unknown rate: should not happen. */
4742 			default:  tap->wr_rate =   0;
4743 			}
4744 		}
4745 
4746 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4747 		    m, BPF_DIRECTION_IN);
4748 	}
4749 #endif
4750 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4751 	ieee80211_release_node(ic, ni);
4752 }
4753 
4754 /*
4755  * Drop duplicate 802.11 retransmissions
4756  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4757  * and handle pseudo-duplicate frames which result from deaggregation
4758  * of A-MSDU frames in hardware.
4759  */
4760 int
4761 iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4762     struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4763 {
4764 	struct ieee80211com *ic = &sc->sc_ic;
4765 	struct iwx_node *in = (void *)ic->ic_bss;
4766 	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4767 	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
4768 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4769 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4770 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4771 	int hasqos = ieee80211_has_qos(wh);
4772 	uint16_t seq;
4773 
4774 	if (type == IEEE80211_FC0_TYPE_CTL ||
4775 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4776 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4777 		return 0;
4778 
4779 	if (hasqos) {
4780 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4781 		if (tid > IWX_MAX_TID_COUNT)
4782 			tid = IWX_MAX_TID_COUNT;
4783 	}
4784 
4785 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4786 	subframe_idx = desc->amsdu_info &
4787 		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4788 
4789 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4790 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4791 	    dup_data->last_seq[tid] == seq &&
4792 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4793 		return 1;
4794 
4795 	/*
4796 	 * Allow the same frame sequence number for all A-MSDU subframes
4797 	 * following the first subframe.
4798 	 * Otherwise these subframes would be discarded as replays.
4799 	 */
4800 	if (dup_data->last_seq[tid] == seq &&
4801 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4802 	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
4803 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4804 	}
4805 
4806 	dup_data->last_seq[tid] = seq;
4807 	dup_data->last_sub_frame[tid] = subframe_idx;
4808 
4809 	return 0;
4810 }
4811 
4812 /*
4813  * Returns true if sn2 - buffer_size < sn1 < sn2.
4814  * To be used only in order to compare reorder buffer head with NSSN.
4815  * We fully trust NSSN unless it is behind us due to reorder timeout.
4816  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4817  */
4818 int
4819 iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4820 {
4821 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4822 }
4823 
4824 void
4825 iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4826     struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4827     uint16_t nssn, struct mbuf_list *ml)
4828 {
4829 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4830 	uint16_t ssn = reorder_buf->head_sn;
4831 
4832 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4833 	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4834 		goto set_timer;
4835 
4836 	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4837 		int index = ssn % reorder_buf->buf_size;
4838 		struct mbuf *m;
4839 		int chanidx, is_shortpre;
4840 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4841 		struct ieee80211_rxinfo *rxi;
4842 
4843 		/* This data is the same for all A-MSDU subframes. */
4844 		chanidx = entries[index].chanidx;
4845 		rx_pkt_status = entries[index].rx_pkt_status;
4846 		is_shortpre = entries[index].is_shortpre;
4847 		rate_n_flags = entries[index].rate_n_flags;
4848 		device_timestamp = entries[index].device_timestamp;
4849 		rxi = &entries[index].rxi;
4850 
4851 		/*
4852 		 * Empty the list. Will have more than one frame for A-MSDU.
4853 		 * Empty list is valid as well since nssn indicates frames were
4854 		 * received.
4855 		 */
4856 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4857 			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4858 			    rate_n_flags, device_timestamp, rxi, ml);
4859 			reorder_buf->num_stored--;
4860 
4861 			/*
4862 			 * Allow the same frame sequence number and CCMP PN for
4863 			 * all A-MSDU subframes following the first subframe.
4864 			 * Otherwise they would be discarded as replays.
4865 			 */
4866 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4867 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4868 		}
4869 
4870 		ssn = (ssn + 1) & 0xfff;
4871 	}
4872 	reorder_buf->head_sn = nssn;
4873 
4874 set_timer:
4875 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4876 		timeout_add_usec(&reorder_buf->reorder_timer,
4877 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4878 	} else
4879 		timeout_del(&reorder_buf->reorder_timer);
4880 }
4881 
4882 int
4883 iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4884     struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4885 {
4886 	struct ieee80211com *ic = &sc->sc_ic;
4887 
4888 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4889 		/* we have a new (A-)MPDU ... */
4890 
4891 		/*
4892 		 * reset counter to 0 if we didn't have any oldsn in
4893 		 * the last A-MPDU (as detected by GP2 being identical)
4894 		 */
4895 		if (!buffer->consec_oldsn_prev_drop)
4896 			buffer->consec_oldsn_drops = 0;
4897 
4898 		/* either way, update our tracking state */
4899 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4900 	} else if (buffer->consec_oldsn_prev_drop) {
4901 		/*
4902 		 * tracking state didn't change, and we had an old SN
4903 		 * indication before - do nothing in this case, we
4904 		 * already noted this one down and are waiting for the
4905 		 * next A-MPDU (by GP2)
4906 		 */
4907 		return 0;
4908 	}
4909 
4910 	/* return unless this MPDU has old SN */
4911 	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
4912 		return 0;
4913 
4914 	/* update state */
4915 	buffer->consec_oldsn_prev_drop = 1;
4916 	buffer->consec_oldsn_drops++;
4917 
4918 	/* if limit is reached, send del BA and reset state */
4919 	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
4920 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4921 		    0, tid);
4922 		buffer->consec_oldsn_prev_drop = 0;
4923 		buffer->consec_oldsn_drops = 0;
4924 		return 1;
4925 	}
4926 
4927 	return 0;
4928 }
4929 
4930 /*
4931  * Handle re-ordering of frames which were de-aggregated in hardware.
4932  * Returns 1 if the MPDU was consumed (buffered or dropped).
4933  * Returns 0 if the MPDU should be passed to upper layer.
4934  */
4935 int
4936 iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4937     struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4938     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4939     struct mbuf_list *ml)
4940 {
4941 	struct ieee80211com *ic = &sc->sc_ic;
4942 	struct ieee80211_frame *wh;
4943 	struct ieee80211_node *ni;
4944 	struct iwx_rxba_data *rxba;
4945 	struct iwx_reorder_buffer *buffer;
4946 	uint32_t reorder_data = le32toh(desc->reorder_data);
4947 	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
4948 	int last_subframe =
4949 		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
4950 	uint8_t tid;
4951 	uint8_t subframe_idx = (desc->amsdu_info &
4952 	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4953 	struct iwx_reorder_buf_entry *entries;
4954 	int index;
4955 	uint16_t nssn, sn;
4956 	uint8_t baid, type, subtype;
4957 	int hasqos;
4958 
4959 	wh = mtod(m, struct ieee80211_frame *);
4960 	hasqos = ieee80211_has_qos(wh);
4961 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4962 
4963 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4964 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4965 
4966 	/*
4967 	 * We are only interested in Block Ack requests and unicast QoS data.
4968 	 */
4969 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4970 		return 0;
4971 	if (hasqos) {
4972 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4973 			return 0;
4974 	} else {
4975 		if (type != IEEE80211_FC0_TYPE_CTL ||
4976 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4977 			return 0;
4978 	}
4979 
4980 	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
4981 		IWX_RX_MPDU_REORDER_BAID_SHIFT;
4982 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4983 	    baid >= nitems(sc->sc_rxba_data))
4984 		return 0;
4985 
4986 	rxba = &sc->sc_rxba_data[baid];
4987 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4988 	    tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
4989 		return 0;
4990 
4991 	if (rxba->timeout != 0)
4992 		getmicrouptime(&rxba->last_rx);
4993 
4994 	/* Bypass A-MPDU re-ordering in net80211. */
4995 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
4996 
4997 	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
4998 	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
4999 		IWX_RX_MPDU_REORDER_SN_SHIFT;
5000 
5001 	buffer = &rxba->reorder_buf;
5002 	entries = &rxba->entries[0];
5003 
5004 	if (!buffer->valid) {
5005 		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
5006 			return 0;
5007 		buffer->valid = 1;
5008 	}
5009 
5010 	ni = ieee80211_find_rxnode(ic, wh);
5011 	if (type == IEEE80211_FC0_TYPE_CTL &&
5012 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5013 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5014 		goto drop;
5015 	}
5016 
5017 	/*
5018 	 * If there was a significant jump in the nssn - adjust.
5019 	 * If the SN is smaller than the NSSN it might need to first go into
5020 	 * the reorder buffer, in which case we just release up to it and the
5021 	 * rest of the function will take care of storing it and releasing up to
5022 	 * the nssn.
5023 	 */
5024 	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5025 	    buffer->buf_size) ||
5026 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5027 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5028 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5029 		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5030 	}
5031 
5032 	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5033 	    device_timestamp)) {
5034 		 /* BA session will be torn down. */
5035 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5036 		goto drop;
5037 
5038 	}
5039 
5040 	/* drop any outdated packets */
5041 	if (SEQ_LT(sn, buffer->head_sn)) {
5042 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5043 		goto drop;
5044 	}
5045 
5046 	/* release immediately if allowed by nssn and no stored frames */
5047 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5048 		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5049 		   (!is_amsdu || last_subframe))
5050 			buffer->head_sn = nssn;
5051 		ieee80211_release_node(ic, ni);
5052 		return 0;
5053 	}
5054 
5055 	/*
5056 	 * release immediately if there are no stored frames, and the sn is
5057 	 * equal to the head.
5058 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5059 	 * When we released everything, and we got the next frame in the
5060 	 * sequence, according to the NSSN we can't release immediately,
5061 	 * while technically there is no hole and we can move forward.
5062 	 */
5063 	if (!buffer->num_stored && sn == buffer->head_sn) {
5064 		if (!is_amsdu || last_subframe)
5065 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5066 		ieee80211_release_node(ic, ni);
5067 		return 0;
5068 	}
5069 
5070 	index = sn % buffer->buf_size;
5071 
5072 	/*
5073 	 * Check if we already stored this frame
5074 	 * As AMSDU is either received or not as whole, logic is simple:
5075 	 * If we have frames in that position in the buffer and the last frame
5076 	 * originated from AMSDU had a different SN then it is a retransmission.
5077 	 * If it is the same SN then if the subframe index is incrementing it
5078 	 * is the same AMSDU - otherwise it is a retransmission.
5079 	 */
5080 	if (!ml_empty(&entries[index].frames)) {
5081 		if (!is_amsdu) {
5082 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5083 			goto drop;
5084 		} else if (sn != buffer->last_amsdu ||
5085 		    buffer->last_sub_index >= subframe_idx) {
5086 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5087 			goto drop;
5088 		}
5089 	} else {
5090 		/* This data is the same for all A-MSDU subframes. */
5091 		entries[index].chanidx = chanidx;
5092 		entries[index].is_shortpre = is_shortpre;
5093 		entries[index].rate_n_flags = rate_n_flags;
5094 		entries[index].device_timestamp = device_timestamp;
5095 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5096 	}
5097 
5098 	/* put in reorder buffer */
5099 	ml_enqueue(&entries[index].frames, m);
5100 	buffer->num_stored++;
5101 	getmicrouptime(&entries[index].reorder_time);
5102 
5103 	if (is_amsdu) {
5104 		buffer->last_amsdu = sn;
5105 		buffer->last_sub_index = subframe_idx;
5106 	}
5107 
5108 	/*
5109 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5110 	 * The reason is that NSSN advances on the first sub-frame, and may
5111 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5112 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5113 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5114 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5115 	 * already ahead and it will be dropped.
5116 	 * If the last sub-frame is not on this queue - we will get frame
5117 	 * release notification with up to date NSSN.
5118 	 */
5119 	if (!is_amsdu || last_subframe)
5120 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5121 
5122 	ieee80211_release_node(ic, ni);
5123 	return 1;
5124 
5125 drop:
5126 	m_freem(m);
5127 	ieee80211_release_node(ic, ni);
5128 	return 1;
5129 }
5130 
5131 void
5132 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
5133     size_t maxlen, struct mbuf_list *ml)
5134 {
5135 	struct ieee80211com *ic = &sc->sc_ic;
5136 	struct ieee80211_rxinfo rxi;
5137 	struct iwx_rx_mpdu_desc *desc;
5138 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5139 	int rssi;
5140 	uint8_t chanidx;
5141 	uint16_t phy_info;
5142 	size_t desc_size;
5143 
5144 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
5145 		desc_size = sizeof(*desc);
5146 	else
5147 		desc_size = IWX_RX_DESC_SIZE_V1;
5148 
5149 	if (maxlen < desc_size) {
5150 		m_freem(m);
5151 		return; /* drop */
5152 	}
5153 
5154 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
5155 
5156 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
5157 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5158 		m_freem(m);
5159 		return; /* drop */
5160 	}
5161 
5162 	len = le16toh(desc->mpdu_len);
5163 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5164 		/* Allow control frames in monitor mode. */
5165 		if (len < sizeof(struct ieee80211_frame_cts)) {
5166 			ic->ic_stats.is_rx_tooshort++;
5167 			IC2IFP(ic)->if_ierrors++;
5168 			m_freem(m);
5169 			return;
5170 		}
5171 	} else if (len < sizeof(struct ieee80211_frame)) {
5172 		ic->ic_stats.is_rx_tooshort++;
5173 		IC2IFP(ic)->if_ierrors++;
5174 		m_freem(m);
5175 		return;
5176 	}
5177 	if (len > maxlen - desc_size) {
5178 		IC2IFP(ic)->if_ierrors++;
5179 		m_freem(m);
5180 		return;
5181 	}
5182 
5183 	m->m_data = pktdata + desc_size;
5184 	m->m_pkthdr.len = m->m_len = len;
5185 
5186 	/* Account for padding following the frame header. */
5187 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
5188 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5189 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5190 		if (type == IEEE80211_FC0_TYPE_CTL) {
5191 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5192 			case IEEE80211_FC0_SUBTYPE_CTS:
5193 				hdrlen = sizeof(struct ieee80211_frame_cts);
5194 				break;
5195 			case IEEE80211_FC0_SUBTYPE_ACK:
5196 				hdrlen = sizeof(struct ieee80211_frame_ack);
5197 				break;
5198 			default:
5199 				hdrlen = sizeof(struct ieee80211_frame_min);
5200 				break;
5201 			}
5202 		} else
5203 			hdrlen = ieee80211_get_hdrlen(wh);
5204 
5205 		if ((le16toh(desc->status) &
5206 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5207 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5208 			/* Padding is inserted after the IV. */
5209 			hdrlen += IEEE80211_CCMP_HDRLEN;
5210 		}
5211 
5212 		memmove(m->m_data + 2, m->m_data, hdrlen);
5213 		m_adj(m, 2);
5214 	}
5215 
5216 	memset(&rxi, 0, sizeof(rxi));
5217 
5218 	/*
5219 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5220 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5221 	 * bit set in the frame header. We need to clear this bit ourselves.
5222 	 * (XXX This workaround is not required on AX200/AX201 devices that
5223 	 * have been tested by me, but it's unclear when this problem was
5224 	 * fixed in the hardware. It definitely affects the 9k generation.
5225 	 * Leaving this in place for now since some 9k/AX200 hybrids seem
5226 	 * to exist that we may eventually add support for.)
5227 	 *
5228 	 * And we must allow the same CCMP PN for subframes following the
5229 	 * first subframe. Otherwise they would be discarded as replays.
5230 	 */
5231 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
5232 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5233 		uint8_t subframe_idx = (desc->amsdu_info &
5234 		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5235 		if (subframe_idx > 0)
5236 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5237 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5238 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5239 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5240 			    struct ieee80211_qosframe_addr4 *);
5241 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5242 		} else if (ieee80211_has_qos(wh) &&
5243 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5244 			struct ieee80211_qosframe *qwh = mtod(m,
5245 			    struct ieee80211_qosframe *);
5246 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5247 		}
5248 	}
5249 
5250 	/*
5251 	 * Verify decryption before duplicate detection. The latter uses
5252 	 * the TID supplied in QoS frame headers and this TID is implicitly
5253 	 * verified as part of the CCMP nonce.
5254 	 */
5255 	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5256 		m_freem(m);
5257 		return;
5258 	}
5259 
5260 	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
5261 		m_freem(m);
5262 		return;
5263 	}
5264 
5265 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5266 		rate_n_flags = le32toh(desc->v3.rate_n_flags);
5267 		chanidx = desc->v3.channel;
5268 		device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
5269 	} else {
5270 		rate_n_flags = le32toh(desc->v1.rate_n_flags);
5271 		chanidx = desc->v1.channel;
5272 		device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
5273 	}
5274 
5275 	phy_info = le16toh(desc->phy_info);
5276 
5277 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
5278 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
5279 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5280 
5281 	rxi.rxi_rssi = rssi;
5282 	rxi.rxi_tstamp = device_timestamp;
5283 	rxi.rxi_chan = chanidx;
5284 
5285 	if (iwx_rx_reorder(sc, m, chanidx, desc,
5286 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5287 	    rate_n_flags, device_timestamp, &rxi, ml))
5288 		return;
5289 
5290 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
5291 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5292 	    rate_n_flags, device_timestamp, &rxi, ml);
5293 }
5294 
5295 void
5296 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
5297 {
5298 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
5299 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
5300 	int i;
5301 
5302 	/* First TB is never cleared - it is bidirectional DMA data. */
5303 	for (i = 1; i < num_tbs; i++) {
5304 		struct iwx_tfh_tb *tb = &desc->tbs[i];
5305 		memset(tb, 0, sizeof(*tb));
5306 	}
5307 	desc->num_tbs = htole16(1);
5308 
5309 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5310 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5311 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
5312 }
5313 
5314 void
5315 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
5316 {
5317 	struct ieee80211com *ic = &sc->sc_ic;
5318 
5319 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5320 	    BUS_DMASYNC_POSTWRITE);
5321 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5322 	m_freem(txd->m);
5323 	txd->m = NULL;
5324 
5325 	KASSERT(txd->in);
5326 	ieee80211_release_node(ic, &txd->in->in_ni);
5327 	txd->in = NULL;
5328 }
5329 
5330 void
5331 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
5332 {
5333  	struct iwx_tx_data *txd;
5334 
5335 	while (ring->tail_hw != idx) {
5336 		txd = &ring->data[ring->tail];
5337 		if (txd->m != NULL) {
5338 			iwx_clear_tx_desc(sc, ring, ring->tail);
5339 			iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
5340 			iwx_txd_done(sc, txd);
5341 			ring->queued--;
5342 		}
5343 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
5344 		ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
5345 	}
5346 }
5347 
5348 void
5349 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5350     struct iwx_rx_data *data)
5351 {
5352 	struct ieee80211com *ic = &sc->sc_ic;
5353 	struct ifnet *ifp = IC2IFP(ic);
5354 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
5355 	int qid = cmd_hdr->qid, status, txfail;
5356 	struct iwx_tx_ring *ring = &sc->txq[qid];
5357 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
5358 	uint32_t ssn;
5359 	uint32_t len = iwx_rx_packet_len(pkt);
5360 
5361 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
5362 	    BUS_DMASYNC_POSTREAD);
5363 
5364 	/* Sanity checks. */
5365 	if (sizeof(*tx_resp) > len)
5366 		return;
5367 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5368 		return;
5369 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
5370 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5371 		return;
5372 
5373 	sc->sc_tx_timer[qid] = 0;
5374 
5375 	if (tx_resp->frame_count > 1) /* A-MPDU */
5376 		return;
5377 
5378 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
5379 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
5380 	    status != IWX_TX_STATUS_DIRECT_DONE);
5381 
5382 	if (txfail)
5383 		ifp->if_oerrors++;
5384 
5385 	/*
5386 	 * On hardware supported by iwx(4) the SSN counter corresponds
5387 	 * to a Tx ring index rather than a sequence number.
5388 	 * Frames up to this index (non-inclusive) can now be freed.
5389 	 */
5390 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5391 	ssn = le32toh(ssn);
5392 	if (ssn < sc->max_tfd_queue_size) {
5393 		iwx_txq_advance(sc, ring, ssn);
5394 		iwx_clear_oactive(sc, ring);
5395 	}
5396 }
5397 
5398 void
5399 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
5400 {
5401 	struct ieee80211com *ic = &sc->sc_ic;
5402 	struct ifnet *ifp = IC2IFP(ic);
5403 
5404 	if (ring->queued < IWX_TX_RING_LOMARK) {
5405 		sc->qfullmsk &= ~(1 << ring->qid);
5406 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5407 			ifq_clr_oactive(&ifp->if_snd);
5408 			/*
5409 			 * Well, we're in interrupt context, but then again
5410 			 * I guess net80211 does all sorts of stunts in
5411 			 * interrupt context, so maybe this is no biggie.
5412 			 */
5413 			(*ifp->if_start)(ifp);
5414 		}
5415 	}
5416 }
5417 
5418 void
5419 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
5420 {
5421 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
5422 	struct ieee80211com *ic = &sc->sc_ic;
5423 	struct ieee80211_node *ni;
5424 	struct ieee80211_tx_ba *ba;
5425 	struct iwx_node *in;
5426 	struct iwx_tx_ring *ring;
5427 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
5428 	int qid;
5429 
5430 	if (ic->ic_state != IEEE80211_S_RUN)
5431 		return;
5432 
5433 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
5434 		return;
5435 
5436 	if (ba_res->sta_id != IWX_STATION_ID)
5437 		return;
5438 
5439 	ni = ic->ic_bss;
5440 	in = (void *)ni;
5441 
5442 	tfd_cnt = le16toh(ba_res->tfd_cnt);
5443 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
5444 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
5445 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
5446 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
5447 		return;
5448 
5449 	for (i = 0; i < tfd_cnt; i++) {
5450 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
5451 		uint8_t tid;
5452 
5453 		tid = ba_tfd->tid;
5454 		if (tid >= nitems(sc->aggqid))
5455 			continue;
5456 
5457 		qid = sc->aggqid[tid];
5458 		if (qid != htole16(ba_tfd->q_num))
5459 			continue;
5460 
5461 		ring = &sc->txq[qid];
5462 
5463 		ba = &ni->ni_tx_ba[tid];
5464 		if (ba->ba_state != IEEE80211_BA_AGREED)
5465 			continue;
5466 
5467 		idx = le16toh(ba_tfd->tfd_index);
5468 		sc->sc_tx_timer[qid] = 0;
5469 		iwx_txq_advance(sc, ring, idx);
5470 		iwx_clear_oactive(sc, ring);
5471 	}
5472 }
5473 
5474 void
5475 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5476     struct iwx_rx_data *data)
5477 {
5478 	struct ieee80211com *ic = &sc->sc_ic;
5479 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
5480 	uint32_t missed;
5481 
5482 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
5483 	    (ic->ic_state != IEEE80211_S_RUN))
5484 		return;
5485 
5486 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
5487 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
5488 
5489 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
5490 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5491 		if (ic->ic_if.if_flags & IFF_DEBUG)
5492 			printf("%s: receiving no beacons from %s; checking if "
5493 			    "this AP is still responding to probe requests\n",
5494 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
5495 		/*
5496 		 * Rather than go directly to scan state, try to send a
5497 		 * directed probe request first. If that fails then the
5498 		 * state machine will drop us into scanning after timing
5499 		 * out waiting for a probe response.
5500 		 */
5501 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
5502 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
5503 	}
5504 
5505 }
5506 
5507 int
5508 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
5509 {
5510 	struct iwx_binding_cmd cmd;
5511 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
5512 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
5513 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
5514 	uint32_t status;
5515 
5516 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5517 		panic("binding already added");
5518 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5519 		panic("binding already removed");
5520 
5521 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
5522 		return EINVAL;
5523 
5524 	memset(&cmd, 0, sizeof(cmd));
5525 
5526 	cmd.id_and_color
5527 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5528 	cmd.action = htole32(action);
5529 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5530 
5531 	cmd.macs[0] = htole32(mac_id);
5532 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
5533 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
5534 
5535 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
5536 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5537 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5538 	else
5539 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5540 
5541 	status = 0;
5542 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
5543 	    &cmd, &status);
5544 	if (err == 0 && status != 0)
5545 		err = EIO;
5546 
5547 	return err;
5548 }
5549 
5550 uint8_t
5551 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
5552 {
5553 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
5554 	int primary_idx = ic->ic_bss->ni_primary_chan;
5555 	/*
5556 	 * The FW is expected to check the control channel position only
5557 	 * when in HT/VHT and the channel width is not 20MHz. Return
5558 	 * this value as the default one:
5559 	 */
5560 	uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5561 
5562 	switch (primary_idx - center_idx) {
5563 	case -6:
5564 		pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
5565 		break;
5566 	case -2:
5567 		pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5568 		break;
5569 	case 2:
5570 		pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5571 		break;
5572 	case 6:
5573 		pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
5574 		break;
5575 	default:
5576 		break;
5577 	}
5578 
5579 	return pos;
5580 }
5581 
5582 int
5583 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5584     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5585     uint8_t vht_chan_width, int cmdver)
5586 {
5587 	struct ieee80211com *ic = &sc->sc_ic;
5588 	struct iwx_phy_context_cmd_uhb cmd;
5589 	uint8_t active_cnt, idle_cnt;
5590 	struct ieee80211_channel *chan = ctxt->channel;
5591 
5592 	memset(&cmd, 0, sizeof(cmd));
5593 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5594 	    ctxt->color));
5595 	cmd.action = htole32(action);
5596 
5597 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5598 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5599 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5600 	else
5601 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5602 
5603 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5604 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5605 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5606 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5607 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5608 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5609 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5610 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5611 			/* secondary chan above -> control chan below */
5612 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5613 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5614 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5615 			/* secondary chan below -> control chan above */
5616 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5617 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5618 		} else {
5619 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5620 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5621 		}
5622 	} else {
5623 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5624 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5625 	}
5626 
5627 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5628 	    IWX_RLC_CONFIG_CMD) != 2) {
5629 		idle_cnt = chains_static;
5630 		active_cnt = chains_dynamic;
5631 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5632 		    IWX_PHY_RX_CHAIN_VALID_POS);
5633 		cmd.rxchain_info |= htole32(idle_cnt <<
5634 		    IWX_PHY_RX_CHAIN_CNT_POS);
5635 		cmd.rxchain_info |= htole32(active_cnt <<
5636 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5637 	}
5638 
5639 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5640 }
5641 
5642 int
5643 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5644     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5645     uint8_t vht_chan_width, int cmdver)
5646 {
5647 	struct ieee80211com *ic = &sc->sc_ic;
5648 	struct iwx_phy_context_cmd cmd;
5649 	uint8_t active_cnt, idle_cnt;
5650 	struct ieee80211_channel *chan = ctxt->channel;
5651 
5652 	memset(&cmd, 0, sizeof(cmd));
5653 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5654 	    ctxt->color));
5655 	cmd.action = htole32(action);
5656 
5657 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5658 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5659 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5660 	else
5661 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5662 
5663 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5664 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5665 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5666 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5667 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5668 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5669 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5670 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5671 			/* secondary chan above -> control chan below */
5672 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5673 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5674 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5675 			/* secondary chan below -> control chan above */
5676 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5677 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5678 		} else {
5679 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5680 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5681 		}
5682 	} else {
5683 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5684 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5685 	}
5686 
5687 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5688 	    IWX_RLC_CONFIG_CMD) != 2) {
5689 		idle_cnt = chains_static;
5690 		active_cnt = chains_dynamic;
5691 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5692 		    IWX_PHY_RX_CHAIN_VALID_POS);
5693 		cmd.rxchain_info |= htole32(idle_cnt <<
5694 		    IWX_PHY_RX_CHAIN_CNT_POS);
5695 		cmd.rxchain_info |= htole32(active_cnt <<
5696 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5697 	}
5698 
5699 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5700 }
5701 
5702 int
5703 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5704     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5705     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5706 {
5707 	int cmdver;
5708 
5709 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5710 	if (cmdver != 3 && cmdver != 4) {
5711 		printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5712 		    DEVNAME(sc));
5713 		return ENOTSUP;
5714 	}
5715 
5716 	/*
5717 	 * Intel increased the size of the fw_channel_info struct and neglected
5718 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5719 	 * member in the middle.
5720 	 * To keep things simple we use a separate function to handle the larger
5721 	 * variant of the phy context command.
5722 	 */
5723 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5724 		return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5725 		    chains_dynamic, action, sco, vht_chan_width, cmdver);
5726 	}
5727 
5728 	return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5729 	    action, sco, vht_chan_width, cmdver);
5730 }
5731 
5732 int
5733 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5734 {
5735 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5736 	struct iwx_tfh_tfd *desc;
5737 	struct iwx_tx_data *txdata;
5738 	struct iwx_device_cmd *cmd;
5739 	struct mbuf *m;
5740 	bus_addr_t paddr;
5741 	uint64_t addr;
5742 	int err = 0, i, paylen, off, s;
5743 	int idx, code, async, group_id;
5744 	size_t hdrlen, datasz;
5745 	uint8_t *data;
5746 	int generation = sc->sc_generation;
5747 
5748 	code = hcmd->id;
5749 	async = hcmd->flags & IWX_CMD_ASYNC;
5750 	idx = ring->cur;
5751 
5752 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5753 		paylen += hcmd->len[i];
5754 	}
5755 
5756 	/* If this command waits for a response, allocate response buffer. */
5757 	hcmd->resp_pkt = NULL;
5758 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
5759 		uint8_t *resp_buf;
5760 		KASSERT(!async);
5761 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
5762 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
5763 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5764 			return ENOSPC;
5765 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5766 		    M_NOWAIT | M_ZERO);
5767 		if (resp_buf == NULL)
5768 			return ENOMEM;
5769 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5770 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5771 	} else {
5772 		sc->sc_cmd_resp_pkt[idx] = NULL;
5773 	}
5774 
5775 	s = splnet();
5776 
5777 	desc = &ring->desc[idx];
5778 	txdata = &ring->data[idx];
5779 
5780 	/*
5781 	 * XXX Intel inside (tm)
5782 	 * Firmware API versions >= 50 reject old-style commands in
5783 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5784 	 * that such commands were in the LONG_GROUP instead in order
5785 	 * for firmware to accept them.
5786 	 */
5787 	if (iwx_cmd_groupid(code) == 0) {
5788 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5789 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5790 	} else
5791 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5792 
5793 	group_id = iwx_cmd_groupid(code);
5794 
5795 	hdrlen = sizeof(cmd->hdr_wide);
5796 	datasz = sizeof(cmd->data_wide);
5797 
5798 	if (paylen > datasz) {
5799 		/* Command is too large to fit in pre-allocated space. */
5800 		size_t totlen = hdrlen + paylen;
5801 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5802 			printf("%s: firmware command too long (%zd bytes)\n",
5803 			    DEVNAME(sc), totlen);
5804 			err = EINVAL;
5805 			goto out;
5806 		}
5807 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5808 		if (m == NULL) {
5809 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5810 			    DEVNAME(sc), totlen);
5811 			err = ENOMEM;
5812 			goto out;
5813 		}
5814 		cmd = mtod(m, struct iwx_device_cmd *);
5815 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5816 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5817 		if (err) {
5818 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5819 			    DEVNAME(sc), totlen);
5820 			m_freem(m);
5821 			goto out;
5822 		}
5823 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5824 		paddr = txdata->map->dm_segs[0].ds_addr;
5825 	} else {
5826 		cmd = &ring->cmd[idx];
5827 		paddr = txdata->cmd_paddr;
5828 	}
5829 
5830 	memset(cmd, 0, sizeof(*cmd));
5831 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5832 	cmd->hdr_wide.group_id = group_id;
5833 	cmd->hdr_wide.qid = ring->qid;
5834 	cmd->hdr_wide.idx = idx;
5835 	cmd->hdr_wide.length = htole16(paylen);
5836 	cmd->hdr_wide.version = iwx_cmd_version(code);
5837 	data = cmd->data_wide;
5838 
5839 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5840 		if (hcmd->len[i] == 0)
5841 			continue;
5842 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5843 		off += hcmd->len[i];
5844 	}
5845 	KASSERT(off == paylen);
5846 
5847 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5848 	addr = htole64(paddr);
5849 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5850 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5851 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5852 		    IWX_FIRST_TB_SIZE);
5853 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5854 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5855 		desc->num_tbs = htole16(2);
5856 	} else
5857 		desc->num_tbs = htole16(1);
5858 
5859 	if (paylen > datasz) {
5860 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5861 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5862 	} else {
5863 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5864 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5865 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5866 	}
5867 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5868 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5869 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5870 	/* Kick command ring. */
5871 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
5872 	ring->queued++;
5873 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5874 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5875 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5876 
5877 	if (!async) {
5878 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
5879 		if (err == 0) {
5880 			/* if hardware is no longer up, return error */
5881 			if (generation != sc->sc_generation) {
5882 				err = ENXIO;
5883 				goto out;
5884 			}
5885 
5886 			/* Response buffer will be freed in iwx_free_resp(). */
5887 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5888 			sc->sc_cmd_resp_pkt[idx] = NULL;
5889 		} else if (generation == sc->sc_generation) {
5890 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5891 			    sc->sc_cmd_resp_len[idx]);
5892 			sc->sc_cmd_resp_pkt[idx] = NULL;
5893 		}
5894 	}
5895  out:
5896 	splx(s);
5897 
5898 	return err;
5899 }
5900 
5901 int
5902 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5903     uint16_t len, const void *data)
5904 {
5905 	struct iwx_host_cmd cmd = {
5906 		.id = id,
5907 		.len = { len, },
5908 		.data = { data, },
5909 		.flags = flags,
5910 	};
5911 
5912 	return iwx_send_cmd(sc, &cmd);
5913 }
5914 
5915 int
5916 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5917     uint32_t *status)
5918 {
5919 	struct iwx_rx_packet *pkt;
5920 	struct iwx_cmd_response *resp;
5921 	int err, resp_len;
5922 
5923 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
5924 	cmd->flags |= IWX_CMD_WANT_RESP;
5925 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5926 
5927 	err = iwx_send_cmd(sc, cmd);
5928 	if (err)
5929 		return err;
5930 
5931 	pkt = cmd->resp_pkt;
5932 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5933 		return EIO;
5934 
5935 	resp_len = iwx_rx_packet_payload_len(pkt);
5936 	if (resp_len != sizeof(*resp)) {
5937 		iwx_free_resp(sc, cmd);
5938 		return EIO;
5939 	}
5940 
5941 	resp = (void *)pkt->data;
5942 	*status = le32toh(resp->status);
5943 	iwx_free_resp(sc, cmd);
5944 	return err;
5945 }
5946 
5947 int
5948 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5949     const void *data, uint32_t *status)
5950 {
5951 	struct iwx_host_cmd cmd = {
5952 		.id = id,
5953 		.len = { len, },
5954 		.data = { data, },
5955 	};
5956 
5957 	return iwx_send_cmd_status(sc, &cmd, status);
5958 }
5959 
5960 void
5961 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5962 {
5963 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
5964 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5965 	hcmd->resp_pkt = NULL;
5966 }
5967 
5968 void
5969 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5970 {
5971 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5972 	struct iwx_tx_data *data;
5973 
5974 	if (qid != IWX_DQA_CMD_QUEUE) {
5975 		return;	/* Not a command ack. */
5976 	}
5977 
5978 	data = &ring->data[idx];
5979 
5980 	if (data->m != NULL) {
5981 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
5982 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5983 		bus_dmamap_unload(sc->sc_dmat, data->map);
5984 		m_freem(data->m);
5985 		data->m = NULL;
5986 	}
5987 	wakeup(&ring->desc[idx]);
5988 
5989 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5990 	if (ring->queued == 0) {
5991 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5992 			DEVNAME(sc), code));
5993 	} else if (ring->queued > 0)
5994 		ring->queued--;
5995 }
5996 
5997 uint32_t
5998 iwx_fw_rateidx_ofdm(uint8_t rval)
5999 {
6000 	/* Firmware expects indices which match our 11a rate set. */
6001 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
6002 	int i;
6003 
6004 	for (i = 0; i < rs->rs_nrates; i++) {
6005 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6006 			return i;
6007 	}
6008 
6009 	return 0;
6010 }
6011 
6012 uint32_t
6013 iwx_fw_rateidx_cck(uint8_t rval)
6014 {
6015 	/* Firmware expects indices which match our 11b rate set. */
6016 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
6017 	int i;
6018 
6019 	for (i = 0; i < rs->rs_nrates; i++) {
6020 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6021 			return i;
6022 	}
6023 
6024 	return 0;
6025 }
6026 
6027 /*
6028  * Determine the Tx command flags and Tx rate+flags to use.
6029  * Return the selected Tx rate.
6030  */
6031 const struct iwx_rate *
6032 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
6033     struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags)
6034 {
6035 	struct ieee80211com *ic = &sc->sc_ic;
6036 	struct ieee80211_node *ni = &in->in_ni;
6037 	struct ieee80211_rateset *rs = &ni->ni_rates;
6038 	const struct iwx_rate *rinfo;
6039 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6040 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
6041 	int ridx, rate_flags;
6042 	uint8_t rval;
6043 
6044 	*flags = 0;
6045 
6046 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6047 	    type != IEEE80211_FC0_TYPE_DATA) {
6048 		/* for non-data, use the lowest supported rate */
6049 		ridx = min_ridx;
6050 		*flags |= IWX_TX_FLAGS_CMD_RATE;
6051 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
6052 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
6053 	} else {
6054 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
6055 		ridx = iwx_rval2ridx(rval);
6056 		if (ridx < min_ridx)
6057 			ridx = min_ridx;
6058 	}
6059 
6060 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
6061 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
6062 		*flags |= IWX_TX_FLAGS_HIGH_PRI;
6063 
6064 	rinfo = &iwx_rates[ridx];
6065 
6066 	/*
6067 	 * Do not fill rate_n_flags if firmware controls the Tx rate.
6068 	 * For data frames we rely on Tx rate scaling in firmware by default.
6069 	 */
6070 	if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
6071 		*rate_n_flags = 0;
6072 		return rinfo;
6073 	}
6074 
6075 	/*
6076 	 * Forcing a CCK/OFDM legacy rate is important for management frames.
6077 	 * Association will only succeed if we do this correctly.
6078 	 */
6079 	rate_flags = IWX_RATE_MCS_ANT_A_MSK;
6080 	if (IWX_RIDX_IS_CCK(ridx)) {
6081 		if (sc->sc_rate_n_flags_version >= 2)
6082 			rate_flags |= IWX_RATE_MCS_CCK_MSK;
6083 		else
6084 			rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
6085 	} else if (sc->sc_rate_n_flags_version >= 2)
6086 		rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
6087 
6088 	rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
6089 	if (sc->sc_rate_n_flags_version >= 2) {
6090 		if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
6091 			rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
6092 			    IWX_RATE_LEGACY_RATE_MSK);
6093 		} else {
6094 			rate_flags |= (iwx_fw_rateidx_cck(rval) &
6095 			    IWX_RATE_LEGACY_RATE_MSK);
6096 		}
6097 	} else
6098 		rate_flags |= rinfo->plcp;
6099 
6100 	*rate_n_flags = rate_flags;
6101 
6102 	return rinfo;
6103 }
6104 
6105 void
6106 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
6107     int idx, uint16_t byte_cnt, uint16_t num_tbs)
6108 {
6109 	uint8_t filled_tfd_size, num_fetch_chunks;
6110 	uint16_t len = byte_cnt;
6111 	uint16_t bc_ent;
6112 
6113 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
6114 			  num_tbs * sizeof(struct iwx_tfh_tb);
6115 	/*
6116 	 * filled_tfd_size contains the number of filled bytes in the TFD.
6117 	 * Dividing it by 64 will give the number of chunks to fetch
6118 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
6119 	 * If, for example, TFD contains only 3 TBs then 32 bytes
6120 	 * of the TFD are used, and only one chunk of 64 bytes should
6121 	 * be fetched
6122 	 */
6123 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
6124 
6125 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6126 		struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
6127 		/* Starting from AX210, the HW expects bytes */
6128 		bc_ent = htole16(len | (num_fetch_chunks << 14));
6129 		scd_bc_tbl[idx].tfd_offset = bc_ent;
6130 	} else {
6131 		struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
6132 		/* Before AX210, the HW expects DW */
6133 		len = howmany(len, 4);
6134 		bc_ent = htole16(len | (num_fetch_chunks << 12));
6135 		scd_bc_tbl->tfd_offset[idx] = bc_ent;
6136 	}
6137 
6138 	bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, 0,
6139 	    txq->bc_tbl.map->dm_mapsize, BUS_DMASYNC_PREWRITE);
6140 }
6141 
6142 int
6143 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
6144 {
6145 	struct ieee80211com *ic = &sc->sc_ic;
6146 	struct iwx_node *in = (void *)ni;
6147 	struct iwx_tx_ring *ring;
6148 	struct iwx_tx_data *data;
6149 	struct iwx_tfh_tfd *desc;
6150 	struct iwx_device_cmd *cmd;
6151 	struct ieee80211_frame *wh;
6152 	struct ieee80211_key *k = NULL;
6153 	const struct iwx_rate *rinfo;
6154 	uint64_t paddr;
6155 	u_int hdrlen;
6156 	bus_dma_segment_t *seg;
6157 	uint32_t rate_n_flags;
6158 	uint16_t num_tbs, flags, offload_assist = 0;
6159 	uint8_t type, subtype;
6160 	int i, totlen, err, pad, qid;
6161 	size_t txcmd_size;
6162 
6163 	wh = mtod(m, struct ieee80211_frame *);
6164 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6165 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6166 	if (type == IEEE80211_FC0_TYPE_CTL)
6167 		hdrlen = sizeof(struct ieee80211_frame_min);
6168 	else
6169 		hdrlen = ieee80211_get_hdrlen(wh);
6170 
6171 	qid = sc->first_data_qid;
6172 
6173 	/* Put QoS frames on the data queue which maps to their TID. */
6174 	if (ieee80211_has_qos(wh)) {
6175 		struct ieee80211_tx_ba *ba;
6176 		uint16_t qos = ieee80211_get_qos(wh);
6177 		uint8_t tid = qos & IEEE80211_QOS_TID;
6178 
6179 		ba = &ni->ni_tx_ba[tid];
6180 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6181 		    type == IEEE80211_FC0_TYPE_DATA &&
6182 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6183 		    sc->aggqid[tid] != 0 &&
6184 		    ba->ba_state == IEEE80211_BA_AGREED) {
6185 			qid = sc->aggqid[tid];
6186 		}
6187 	}
6188 
6189 	ring = &sc->txq[qid];
6190 	desc = &ring->desc[ring->cur];
6191 	memset(desc, 0, sizeof(*desc));
6192 	data = &ring->data[ring->cur];
6193 
6194 	cmd = &ring->cmd[ring->cur];
6195 	cmd->hdr.code = IWX_TX_CMD;
6196 	cmd->hdr.flags = 0;
6197 	cmd->hdr.qid = ring->qid;
6198 	cmd->hdr.idx = ring->cur;
6199 
6200 	rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags);
6201 
6202 #if NBPFILTER > 0
6203 	if (sc->sc_drvbpf != NULL) {
6204 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
6205 		uint16_t chan_flags;
6206 
6207 		tap->wt_flags = 0;
6208 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6209 		chan_flags = ni->ni_chan->ic_flags;
6210 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6211 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6212 			chan_flags &= ~IEEE80211_CHAN_HT;
6213 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6214 		}
6215 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6216 			chan_flags &= ~IEEE80211_CHAN_VHT;
6217 		tap->wt_chan_flags = htole16(chan_flags);
6218 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6219 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6220 		    type == IEEE80211_FC0_TYPE_DATA &&
6221 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
6222 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
6223 		} else
6224 			tap->wt_rate = rinfo->rate;
6225 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6226 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6227 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6228 
6229 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6230 		    m, BPF_DIRECTION_OUT);
6231 	}
6232 #endif
6233 
6234 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6235                 k = ieee80211_get_txkey(ic, wh, ni);
6236 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6237 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6238 				return ENOBUFS;
6239 			/* 802.11 header may have moved. */
6240 			wh = mtod(m, struct ieee80211_frame *);
6241 			flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6242 		} else {
6243 			k->k_tsc++;
6244 			/* Hardware increments PN internally and adds IV. */
6245 		}
6246 	} else
6247 		flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6248 
6249 	totlen = m->m_pkthdr.len;
6250 
6251 	if (hdrlen & 3) {
6252 		/* First segment length must be a multiple of 4. */
6253 		pad = 4 - (hdrlen & 3);
6254 		offload_assist |= IWX_TX_CMD_OFFLD_PAD;
6255 	} else
6256 		pad = 0;
6257 
6258 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6259 		struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
6260 		memset(tx, 0, sizeof(*tx));
6261 		tx->len = htole16(totlen);
6262 		tx->offload_assist = htole32(offload_assist);
6263 		tx->flags = htole16(flags);
6264 		tx->rate_n_flags = htole32(rate_n_flags);
6265 		memcpy(tx->hdr, wh, hdrlen);
6266 		txcmd_size = sizeof(*tx);
6267 	} else {
6268 		struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
6269 		memset(tx, 0, sizeof(*tx));
6270 		tx->len = htole16(totlen);
6271 		tx->offload_assist = htole16(offload_assist);
6272 		tx->flags = htole32(flags);
6273 		tx->rate_n_flags = htole32(rate_n_flags);
6274 		memcpy(tx->hdr, wh, hdrlen);
6275 		txcmd_size = sizeof(*tx);
6276 	}
6277 
6278 	/* Trim 802.11 header. */
6279 	m_adj(m, hdrlen);
6280 
6281 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6282 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6283 	if (err && err != EFBIG) {
6284 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6285 		m_freem(m);
6286 		return err;
6287 	}
6288 	if (err) {
6289 		/* Too many DMA segments, linearize mbuf. */
6290 		if (m_defrag(m, M_DONTWAIT)) {
6291 			m_freem(m);
6292 			return ENOBUFS;
6293 		}
6294 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6295 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6296 		if (err) {
6297 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6298 			    err);
6299 			m_freem(m);
6300 			return err;
6301 		}
6302 	}
6303 	data->m = m;
6304 	data->in = in;
6305 
6306 	/* Fill TX descriptor. */
6307 	num_tbs = 2 + data->map->dm_nsegs;
6308 	desc->num_tbs = htole16(num_tbs);
6309 
6310 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
6311 	paddr = htole64(data->cmd_paddr);
6312 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
6313 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
6314 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
6315 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
6316 	    txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
6317 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
6318 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
6319 
6320 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
6321 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
6322 
6323 	/* Other DMA segments are for data payload. */
6324 	seg = data->map->dm_segs;
6325 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6326 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
6327 		paddr = htole64(seg->ds_addr);
6328 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
6329 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
6330 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
6331 	}
6332 
6333 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6334 	    BUS_DMASYNC_PREWRITE);
6335 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6336 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6337 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6338 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6339 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6340 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6341 
6342 	iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
6343 
6344 	/* Kick TX ring. */
6345 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
6346 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
6347 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
6348 
6349 	/* Mark TX ring as full if we reach a certain threshold. */
6350 	if (++ring->queued > IWX_TX_RING_HIMARK) {
6351 		sc->qfullmsk |= 1 << ring->qid;
6352 	}
6353 
6354 	if (ic->ic_if.if_flags & IFF_UP)
6355 		sc->sc_tx_timer[ring->qid] = 15;
6356 
6357 	return 0;
6358 }
6359 
6360 int
6361 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
6362 {
6363 	struct iwx_rx_packet *pkt;
6364 	struct iwx_tx_path_flush_cmd_rsp *resp;
6365 	struct iwx_tx_path_flush_cmd flush_cmd = {
6366 		.sta_id = htole32(sta_id),
6367 		.tid_mask = htole16(tids),
6368 	};
6369 	struct iwx_host_cmd hcmd = {
6370 		.id = IWX_TXPATH_FLUSH,
6371 		.len = { sizeof(flush_cmd), },
6372 		.data = { &flush_cmd, },
6373 		.flags = IWX_CMD_WANT_RESP,
6374 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
6375 	};
6376 	int err, resp_len, i, num_flushed_queues;
6377 
6378 	err = iwx_send_cmd(sc, &hcmd);
6379 	if (err)
6380 		return err;
6381 
6382 	pkt = hcmd.resp_pkt;
6383 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
6384 		err = EIO;
6385 		goto out;
6386 	}
6387 
6388 	resp_len = iwx_rx_packet_payload_len(pkt);
6389 	if (resp_len != sizeof(*resp)) {
6390 		err = EIO;
6391 		goto out;
6392 	}
6393 
6394 	resp = (void *)pkt->data;
6395 
6396 	if (le16toh(resp->sta_id) != sta_id) {
6397 		err = EIO;
6398 		goto out;
6399 	}
6400 
6401 	num_flushed_queues = le16toh(resp->num_flushed_queues);
6402 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
6403 		err = EIO;
6404 		goto out;
6405 	}
6406 
6407 	for (i = 0; i < num_flushed_queues; i++) {
6408 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
6409 		uint16_t tid = le16toh(queue_info->tid);
6410 		uint16_t read_after = le16toh(queue_info->read_after_flush);
6411 		uint16_t qid = le16toh(queue_info->queue_num);
6412 		struct iwx_tx_ring *txq;
6413 
6414 		if (qid >= nitems(sc->txq))
6415 			continue;
6416 
6417 		txq = &sc->txq[qid];
6418 		if (tid != txq->tid)
6419 			continue;
6420 
6421 		iwx_txq_advance(sc, txq, read_after);
6422 	}
6423 out:
6424 	iwx_free_resp(sc, &hcmd);
6425 	return err;
6426 }
6427 
6428 #define IWX_FLUSH_WAIT_MS	2000
6429 
6430 int
6431 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
6432 {
6433 	struct iwx_add_sta_cmd cmd;
6434 	int err;
6435 	uint32_t status;
6436 
6437 	memset(&cmd, 0, sizeof(cmd));
6438 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6439 	    in->in_color));
6440 	cmd.sta_id = IWX_STATION_ID;
6441 	cmd.add_modify = IWX_STA_MODE_MODIFY;
6442 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
6443 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
6444 
6445 	status = IWX_ADD_STA_SUCCESS;
6446 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
6447 	    sizeof(cmd), &cmd, &status);
6448 	if (err) {
6449 		printf("%s: could not update sta (error %d)\n",
6450 		    DEVNAME(sc), err);
6451 		return err;
6452 	}
6453 
6454 	switch (status & IWX_ADD_STA_STATUS_MASK) {
6455 	case IWX_ADD_STA_SUCCESS:
6456 		break;
6457 	default:
6458 		err = EIO;
6459 		printf("%s: Couldn't %s draining for station\n",
6460 		    DEVNAME(sc), drain ? "enable" : "disable");
6461 		break;
6462 	}
6463 
6464 	return err;
6465 }
6466 
6467 int
6468 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
6469 {
6470 	int err;
6471 
6472 	splassert(IPL_NET);
6473 
6474 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
6475 
6476 	err = iwx_drain_sta(sc, in, 1);
6477 	if (err)
6478 		goto done;
6479 
6480 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
6481 	if (err) {
6482 		printf("%s: could not flush Tx path (error %d)\n",
6483 		    DEVNAME(sc), err);
6484 		goto done;
6485 	}
6486 
6487 	err = iwx_drain_sta(sc, in, 0);
6488 done:
6489 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
6490 	return err;
6491 }
6492 
6493 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
6494 
6495 int
6496 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
6497     struct iwx_beacon_filter_cmd *cmd)
6498 {
6499 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
6500 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
6501 }
6502 
6503 int
6504 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
6505 {
6506 	struct iwx_beacon_filter_cmd cmd = {
6507 		IWX_BF_CMD_CONFIG_DEFAULTS,
6508 		.bf_enable_beacon_filter = htole32(1),
6509 		.ba_enable_beacon_abort = htole32(enable),
6510 	};
6511 
6512 	if (!sc->sc_bf.bf_enabled)
6513 		return 0;
6514 
6515 	sc->sc_bf.ba_enabled = enable;
6516 	return iwx_beacon_filter_send_cmd(sc, &cmd);
6517 }
6518 
6519 void
6520 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
6521     struct iwx_mac_power_cmd *cmd)
6522 {
6523 	struct ieee80211com *ic = &sc->sc_ic;
6524 	struct ieee80211_node *ni = &in->in_ni;
6525 	int dtim_period, dtim_msec, keep_alive;
6526 
6527 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6528 	    in->in_color));
6529 	if (ni->ni_dtimperiod)
6530 		dtim_period = ni->ni_dtimperiod;
6531 	else
6532 		dtim_period = 1;
6533 
6534 	/*
6535 	 * Regardless of power management state the driver must set
6536 	 * keep alive period. FW will use it for sending keep alive NDPs
6537 	 * immediately after association. Check that keep alive period
6538 	 * is at least 3 * DTIM.
6539 	 */
6540 	dtim_msec = dtim_period * ni->ni_intval;
6541 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6542 	keep_alive = roundup(keep_alive, 1000) / 1000;
6543 	cmd->keep_alive_seconds = htole16(keep_alive);
6544 
6545 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6546 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6547 }
6548 
6549 int
6550 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6551 {
6552 	int err;
6553 	int ba_enable;
6554 	struct iwx_mac_power_cmd cmd;
6555 
6556 	memset(&cmd, 0, sizeof(cmd));
6557 
6558 	iwx_power_build_cmd(sc, in, &cmd);
6559 
6560 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6561 	    sizeof(cmd), &cmd);
6562 	if (err != 0)
6563 		return err;
6564 
6565 	ba_enable = !!(cmd.flags &
6566 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6567 	return iwx_update_beacon_abort(sc, in, ba_enable);
6568 }
6569 
6570 int
6571 iwx_power_update_device(struct iwx_softc *sc)
6572 {
6573 	struct iwx_device_power_cmd cmd = { };
6574 	struct ieee80211com *ic = &sc->sc_ic;
6575 
6576 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6577 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6578 
6579 	return iwx_send_cmd_pdu(sc,
6580 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6581 }
6582 
6583 int
6584 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6585 {
6586 	struct iwx_beacon_filter_cmd cmd = {
6587 		IWX_BF_CMD_CONFIG_DEFAULTS,
6588 		.bf_enable_beacon_filter = htole32(1),
6589 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6590 	};
6591 	int err;
6592 
6593 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6594 	if (err == 0)
6595 		sc->sc_bf.bf_enabled = 1;
6596 
6597 	return err;
6598 }
6599 
6600 int
6601 iwx_disable_beacon_filter(struct iwx_softc *sc)
6602 {
6603 	struct iwx_beacon_filter_cmd cmd;
6604 	int err;
6605 
6606 	memset(&cmd, 0, sizeof(cmd));
6607 
6608 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6609 	if (err == 0)
6610 		sc->sc_bf.bf_enabled = 0;
6611 
6612 	return err;
6613 }
6614 
6615 int
6616 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6617 {
6618 	struct iwx_add_sta_cmd add_sta_cmd;
6619 	int err;
6620 	uint32_t status, aggsize;
6621 	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6622 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6623 	struct ieee80211com *ic = &sc->sc_ic;
6624 
6625 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6626 		panic("STA already added");
6627 
6628 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6629 
6630 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6631 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6632 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6633 	} else {
6634 		add_sta_cmd.sta_id = IWX_STATION_ID;
6635 		add_sta_cmd.station_type = IWX_STA_LINK;
6636 	}
6637 	add_sta_cmd.mac_id_n_color
6638 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6639 	if (!update) {
6640 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6641 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6642 			    etheranyaddr);
6643 		else
6644 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6645 			    in->in_macaddr);
6646 	}
6647 	add_sta_cmd.add_modify = update ? 1 : 0;
6648 	add_sta_cmd.station_flags_msk
6649 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6650 
6651 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6652 		add_sta_cmd.station_flags_msk
6653 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6654 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6655 
6656 		if (iwx_mimo_enabled(sc)) {
6657 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6658 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
6659 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
6660 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
6661 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
6662 					add_sta_cmd.station_flags |=
6663 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6664 				}
6665 			} else {
6666 				if (in->in_ni.ni_rxmcs[1] != 0) {
6667 					add_sta_cmd.station_flags |=
6668 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6669 				}
6670 				if (in->in_ni.ni_rxmcs[2] != 0) {
6671 					add_sta_cmd.station_flags |=
6672 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO3);
6673 				}
6674 			}
6675 		}
6676 
6677 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
6678 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
6679 			add_sta_cmd.station_flags |= htole32(
6680 			    IWX_STA_FLG_FAT_EN_40MHZ);
6681 		}
6682 
6683 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6684 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
6685 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
6686 				add_sta_cmd.station_flags |= htole32(
6687 				    IWX_STA_FLG_FAT_EN_80MHZ);
6688 			}
6689 			aggsize = (in->in_ni.ni_vhtcaps &
6690 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
6691 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
6692 		} else {
6693 			aggsize = (in->in_ni.ni_ampdu_param &
6694 			    IEEE80211_AMPDU_PARAM_LE);
6695 		}
6696 		if (aggsize > max_aggsize)
6697 			aggsize = max_aggsize;
6698 		add_sta_cmd.station_flags |= htole32((aggsize <<
6699 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6700 		    IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6701 
6702 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
6703 		case IEEE80211_AMPDU_PARAM_SS_2:
6704 			add_sta_cmd.station_flags
6705 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6706 			break;
6707 		case IEEE80211_AMPDU_PARAM_SS_4:
6708 			add_sta_cmd.station_flags
6709 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6710 			break;
6711 		case IEEE80211_AMPDU_PARAM_SS_8:
6712 			add_sta_cmd.station_flags
6713 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6714 			break;
6715 		case IEEE80211_AMPDU_PARAM_SS_16:
6716 			add_sta_cmd.station_flags
6717 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6718 			break;
6719 		default:
6720 			break;
6721 		}
6722 	}
6723 
6724 	status = IWX_ADD_STA_SUCCESS;
6725 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6726 	    &add_sta_cmd, &status);
6727 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6728 		err = EIO;
6729 
6730 	return err;
6731 }
6732 
6733 int
6734 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6735 {
6736 	struct ieee80211com *ic = &sc->sc_ic;
6737 	struct iwx_rm_sta_cmd rm_sta_cmd;
6738 	int err;
6739 
6740 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6741 		panic("sta already removed");
6742 
6743 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6744 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6745 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6746 	else
6747 		rm_sta_cmd.sta_id = IWX_STATION_ID;
6748 
6749 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6750 	    &rm_sta_cmd);
6751 
6752 	return err;
6753 }
6754 
6755 int
6756 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6757 {
6758 	struct ieee80211com *ic = &sc->sc_ic;
6759 	struct ieee80211_node *ni = &in->in_ni;
6760 	int err, i, cmd_ver;
6761 
6762 	err = iwx_flush_sta(sc, in);
6763 	if (err) {
6764 		printf("%s: could not flush Tx path (error %d)\n",
6765 		    DEVNAME(sc), err);
6766 		return err;
6767 	}
6768 
6769 	/*
6770 	 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6771 	 * before a station gets removed.
6772 	 */
6773 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6774 	    IWX_SCD_QUEUE_CONFIG_CMD);
6775 	if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6776 		err = iwx_disable_mgmt_queue(sc);
6777 		if (err)
6778 			return err;
6779 		for (i = IWX_FIRST_AGG_TX_QUEUE;
6780 		    i < IWX_LAST_AGG_TX_QUEUE; i++) {
6781 			struct iwx_tx_ring *ring = &sc->txq[i];
6782 			if ((sc->qenablemsk & (1 << i)) == 0)
6783 				continue;
6784 			err = iwx_disable_txq(sc, IWX_STATION_ID,
6785 			    ring->qid, ring->tid);
6786 			if (err) {
6787 				printf("%s: could not disable Tx queue %d "
6788 				    "(error %d)\n", DEVNAME(sc), ring->qid,
6789 				    err);
6790 				return err;
6791 			}
6792 		}
6793 	}
6794 
6795 	err = iwx_rm_sta_cmd(sc, in);
6796 	if (err) {
6797 		printf("%s: could not remove STA (error %d)\n",
6798 		    DEVNAME(sc), err);
6799 		return err;
6800 	}
6801 
6802 	in->in_flags = 0;
6803 
6804 	sc->sc_rx_ba_sessions = 0;
6805 	sc->ba_rx.start_tidmask = 0;
6806 	sc->ba_rx.stop_tidmask = 0;
6807 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
6808 	sc->ba_tx.start_tidmask = 0;
6809 	sc->ba_tx.stop_tidmask = 0;
6810 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6811 		sc->qenablemsk &= ~(1 << i);
6812 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
6813 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6814 		if (ba->ba_state != IEEE80211_BA_AGREED)
6815 			continue;
6816 		ieee80211_delba_request(ic, ni, 0, 1, i);
6817 	}
6818 
6819 	return 0;
6820 }
6821 
6822 uint8_t
6823 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6824     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6825     int n_ssids, uint32_t channel_cfg_flags)
6826 {
6827 	struct ieee80211com *ic = &sc->sc_ic;
6828 	struct ieee80211_channel *c;
6829 	uint8_t nchan;
6830 
6831 	for (nchan = 0, c = &ic->ic_channels[1];
6832 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6833 	    nchan < chan_nitems &&
6834 	    nchan < sc->sc_capa_n_scan_channels;
6835 	    c++) {
6836 		uint8_t channel_num;
6837 
6838 		if (c->ic_flags == 0)
6839 			continue;
6840 
6841 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6842 		if (isset(sc->sc_ucode_api,
6843 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6844 			chan->v2.channel_num = channel_num;
6845 			if (IEEE80211_IS_CHAN_2GHZ(c))
6846 				chan->v2.band = IWX_PHY_BAND_24;
6847 			else
6848 				chan->v2.band = IWX_PHY_BAND_5;
6849 			chan->v2.iter_count = 1;
6850 			chan->v2.iter_interval = 0;
6851 		} else {
6852 			chan->v1.channel_num = channel_num;
6853 			chan->v1.iter_count = 1;
6854 			chan->v1.iter_interval = htole16(0);
6855 		}
6856 
6857 		chan->flags = htole32(channel_cfg_flags);
6858 		chan++;
6859 		nchan++;
6860 	}
6861 
6862 	return nchan;
6863 }
6864 
6865 int
6866 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6867 {
6868 	struct ieee80211com *ic = &sc->sc_ic;
6869 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6870 	struct ieee80211_rateset *rs;
6871 	size_t remain = sizeof(preq->buf);
6872 	uint8_t *frm, *pos;
6873 
6874 	memset(preq, 0, sizeof(*preq));
6875 
6876 	if (remain < sizeof(*wh) + 2)
6877 		return ENOBUFS;
6878 
6879 	/*
6880 	 * Build a probe request frame.  Most of the following code is a
6881 	 * copy & paste of what is done in net80211.
6882 	 */
6883 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6884 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6885 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6886 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6887 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
6888 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6889 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6890 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6891 
6892 	frm = (uint8_t *)(wh + 1);
6893 	*frm++ = IEEE80211_ELEMID_SSID;
6894 	*frm++ = 0;
6895 	/* hardware inserts SSID */
6896 
6897 	/* Tell the firmware where the MAC header is. */
6898 	preq->mac_header.offset = 0;
6899 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6900 	remain -= frm - (uint8_t *)wh;
6901 
6902 	/* Fill in 2GHz IEs and tell firmware where they are. */
6903 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6904 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6905 		if (remain < 4 + rs->rs_nrates)
6906 			return ENOBUFS;
6907 	} else if (remain < 2 + rs->rs_nrates)
6908 		return ENOBUFS;
6909 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6910 	pos = frm;
6911 	frm = ieee80211_add_rates(frm, rs);
6912 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6913 		frm = ieee80211_add_xrates(frm, rs);
6914 	remain -= frm - pos;
6915 
6916 	if (isset(sc->sc_enabled_capa,
6917 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6918 		if (remain < 3)
6919 			return ENOBUFS;
6920 		*frm++ = IEEE80211_ELEMID_DSPARMS;
6921 		*frm++ = 1;
6922 		*frm++ = 0;
6923 		remain -= 3;
6924 	}
6925 	preq->band_data[0].len = htole16(frm - pos);
6926 
6927 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6928 		/* Fill in 5GHz IEs. */
6929 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6930 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6931 			if (remain < 4 + rs->rs_nrates)
6932 				return ENOBUFS;
6933 		} else if (remain < 2 + rs->rs_nrates)
6934 			return ENOBUFS;
6935 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6936 		pos = frm;
6937 		frm = ieee80211_add_rates(frm, rs);
6938 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6939 			frm = ieee80211_add_xrates(frm, rs);
6940 		preq->band_data[1].len = htole16(frm - pos);
6941 		remain -= frm - pos;
6942 		if (ic->ic_flags & IEEE80211_F_VHTON) {
6943 			if (remain < 14)
6944 				return ENOBUFS;
6945 			frm = ieee80211_add_vhtcaps(frm, ic);
6946 			remain -= frm - pos;
6947 			preq->band_data[1].len = htole16(frm - pos);
6948 		}
6949 	}
6950 
6951 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6952 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6953 	pos = frm;
6954 	if (ic->ic_flags & IEEE80211_F_HTON) {
6955 		if (remain < 28)
6956 			return ENOBUFS;
6957 		frm = ieee80211_add_htcaps(frm, ic);
6958 		/* XXX add WME info? */
6959 		remain -= frm - pos;
6960 	}
6961 
6962 	preq->common_data.len = htole16(frm - pos);
6963 
6964 	return 0;
6965 }
6966 
6967 int
6968 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6969 {
6970 	struct iwx_scan_config scan_cfg;
6971 	struct iwx_host_cmd hcmd = {
6972 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6973 		.len[0] = sizeof(scan_cfg),
6974 		.data[0] = &scan_cfg,
6975 		.flags = 0,
6976 	};
6977 	int cmdver;
6978 
6979 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6980 		printf("%s: firmware does not support reduced scan config\n",
6981 		    DEVNAME(sc));
6982 		return ENOTSUP;
6983 	}
6984 
6985 	memset(&scan_cfg, 0, sizeof(scan_cfg));
6986 
6987 	/*
6988 	 * SCAN_CFG version >= 5 implies that the broadcast
6989 	 * STA ID field is deprecated.
6990 	 */
6991 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6992 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6993 		scan_cfg.bcast_sta_id = 0xff;
6994 
6995 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6996 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6997 
6998 	return iwx_send_cmd(sc, &hcmd);
6999 }
7000 
7001 uint16_t
7002 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
7003 {
7004 	struct ieee80211com *ic = &sc->sc_ic;
7005 	uint16_t flags = 0;
7006 
7007 	if (ic->ic_des_esslen == 0)
7008 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
7009 
7010 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
7011 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
7012 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
7013 
7014 	return flags;
7015 }
7016 
7017 #define IWX_SCAN_DWELL_ACTIVE		10
7018 #define IWX_SCAN_DWELL_PASSIVE		110
7019 
7020 /* adaptive dwell max budget time [TU] for full scan */
7021 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7022 /* adaptive dwell max budget time [TU] for directed scan */
7023 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7024 /* adaptive dwell default high band APs number */
7025 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7026 /* adaptive dwell default low band APs number */
7027 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7028 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7029 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7030 /* adaptive dwell number of APs override for p2p friendly GO channels */
7031 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
7032 /* adaptive dwell number of APs override for social channels */
7033 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
7034 
7035 void
7036 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
7037     struct iwx_scan_general_params_v10 *general_params, int bgscan)
7038 {
7039 	uint32_t suspend_time, max_out_time;
7040 	uint8_t active_dwell, passive_dwell;
7041 
7042 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
7043 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
7044 
7045 	general_params->adwell_default_social_chn =
7046 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7047 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
7048 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
7049 
7050 	if (bgscan)
7051 		general_params->adwell_max_budget =
7052 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
7053 	else
7054 		general_params->adwell_max_budget =
7055 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
7056 
7057 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
7058 	if (bgscan) {
7059 		max_out_time = htole32(120);
7060 		suspend_time = htole32(120);
7061 	} else {
7062 		max_out_time = htole32(0);
7063 		suspend_time = htole32(0);
7064 	}
7065 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
7066 		htole32(max_out_time);
7067 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
7068 		htole32(suspend_time);
7069 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
7070 		htole32(max_out_time);
7071 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
7072 		htole32(suspend_time);
7073 
7074 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
7075 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
7076 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
7077 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
7078 }
7079 
7080 void
7081 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
7082     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
7083 {
7084 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
7085 
7086 	gp->flags = htole16(gen_flags);
7087 
7088 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
7089 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
7090 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
7091 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
7092 
7093 	gp->scan_start_mac_id = 0;
7094 }
7095 
7096 void
7097 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
7098     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
7099     int n_ssid)
7100 {
7101 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
7102 
7103 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
7104 	    nitems(cp->channel_config), n_ssid, channel_cfg_flags);
7105 
7106 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
7107 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
7108 }
7109 
7110 int
7111 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
7112 {
7113 	struct ieee80211com *ic = &sc->sc_ic;
7114 	struct iwx_host_cmd hcmd = {
7115 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
7116 		.len = { 0, },
7117 		.data = { NULL, },
7118 		.flags = 0,
7119 	};
7120 	struct iwx_scan_req_umac_v14 *cmd;
7121 	struct iwx_scan_req_params_v14 *scan_p;
7122 	int err, async = bgscan, n_ssid = 0;
7123 	uint16_t gen_flags;
7124 	uint32_t bitmap_ssid = 0;
7125 
7126 	cmd = malloc(sizeof(*cmd), M_DEVBUF,
7127 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7128 	if (cmd == NULL)
7129 		return ENOMEM;
7130 
7131 	scan_p = &cmd->scan_params;
7132 
7133 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
7134 	cmd->uid = htole32(0);
7135 
7136 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
7137 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
7138 	    gen_flags, bgscan);
7139 
7140 	scan_p->periodic_params.schedule[0].interval = htole16(0);
7141 	scan_p->periodic_params.schedule[0].iter_count = 1;
7142 
7143 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
7144 	if (err) {
7145 		free(cmd, M_DEVBUF, sizeof(*cmd));
7146 		return err;
7147 	}
7148 
7149 	if (ic->ic_des_esslen != 0) {
7150 		scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
7151 		scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
7152 		memcpy(scan_p->probe_params.direct_scan[0].ssid,
7153 		    ic->ic_des_essid, ic->ic_des_esslen);
7154 		bitmap_ssid |= (1 << 0);
7155 		n_ssid = 1;
7156 	}
7157 
7158 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
7159 	    n_ssid);
7160 
7161 	hcmd.len[0] = sizeof(*cmd);
7162 	hcmd.data[0] = (void *)cmd;
7163 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
7164 
7165 	err = iwx_send_cmd(sc, &hcmd);
7166 	free(cmd, M_DEVBUF, sizeof(*cmd));
7167 	return err;
7168 }
7169 
7170 void
7171 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
7172 {
7173 	struct ieee80211com *ic = &sc->sc_ic;
7174 	struct ifnet *ifp = IC2IFP(ic);
7175 	char alpha2[3];
7176 
7177 	snprintf(alpha2, sizeof(alpha2), "%c%c",
7178 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
7179 
7180 	if (ifp->if_flags & IFF_DEBUG) {
7181 		printf("%s: firmware has detected regulatory domain '%s' "
7182 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
7183 	}
7184 
7185 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
7186 }
7187 
7188 uint8_t
7189 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
7190 {
7191 	int i;
7192 	uint8_t rval;
7193 
7194 	for (i = 0; i < rs->rs_nrates; i++) {
7195 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
7196 		if (rval == iwx_rates[ridx].rate)
7197 			return rs->rs_rates[i];
7198 	}
7199 
7200 	return 0;
7201 }
7202 
7203 int
7204 iwx_rval2ridx(int rval)
7205 {
7206 	int ridx;
7207 
7208 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
7209 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
7210 			continue;
7211 		if (rval == iwx_rates[ridx].rate)
7212 			break;
7213 	}
7214 
7215        return ridx;
7216 }
7217 
7218 void
7219 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
7220     int *ofdm_rates)
7221 {
7222 	struct ieee80211_node *ni = &in->in_ni;
7223 	struct ieee80211_rateset *rs = &ni->ni_rates;
7224 	int lowest_present_ofdm = -1;
7225 	int lowest_present_cck = -1;
7226 	uint8_t cck = 0;
7227 	uint8_t ofdm = 0;
7228 	int i;
7229 
7230 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
7231 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
7232 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
7233 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7234 				continue;
7235 			cck |= (1 << i);
7236 			if (lowest_present_cck == -1 || lowest_present_cck > i)
7237 				lowest_present_cck = i;
7238 		}
7239 	}
7240 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
7241 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7242 			continue;
7243 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
7244 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
7245 			lowest_present_ofdm = i;
7246 	}
7247 
7248 	/*
7249 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
7250 	 * variables. This isn't sufficient though, as there might not
7251 	 * be all the right rates in the bitmap. E.g. if the only basic
7252 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
7253 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
7254 	 *
7255 	 *    [...] a STA responding to a received frame shall transmit
7256 	 *    its Control Response frame [...] at the highest rate in the
7257 	 *    BSSBasicRateSet parameter that is less than or equal to the
7258 	 *    rate of the immediately previous frame in the frame exchange
7259 	 *    sequence ([...]) and that is of the same modulation class
7260 	 *    ([...]) as the received frame. If no rate contained in the
7261 	 *    BSSBasicRateSet parameter meets these conditions, then the
7262 	 *    control frame sent in response to a received frame shall be
7263 	 *    transmitted at the highest mandatory rate of the PHY that is
7264 	 *    less than or equal to the rate of the received frame, and
7265 	 *    that is of the same modulation class as the received frame.
7266 	 *
7267 	 * As a consequence, we need to add all mandatory rates that are
7268 	 * lower than all of the basic rates to these bitmaps.
7269 	 */
7270 
7271 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
7272 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
7273 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
7274 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
7275 	/* 6M already there or needed so always add */
7276 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
7277 
7278 	/*
7279 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
7280 	 * Note, however:
7281 	 *  - if no CCK rates are basic, it must be ERP since there must
7282 	 *    be some basic rates at all, so they're OFDM => ERP PHY
7283 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
7284 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
7285 	 *  - if 5.5M is basic, 1M and 2M are mandatory
7286 	 *  - if 2M is basic, 1M is mandatory
7287 	 *  - if 1M is basic, that's the only valid ACK rate.
7288 	 * As a consequence, it's not as complicated as it sounds, just add
7289 	 * any lower rates to the ACK rate bitmap.
7290 	 */
7291 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
7292 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
7293 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
7294 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
7295 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
7296 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
7297 	/* 1M already there or needed so always add */
7298 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
7299 
7300 	*cck_rates = cck;
7301 	*ofdm_rates = ofdm;
7302 }
7303 
7304 void
7305 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
7306     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
7307 {
7308 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
7309 	struct ieee80211com *ic = &sc->sc_ic;
7310 	struct ieee80211_node *ni = ic->ic_bss;
7311 	int cck_ack_rates, ofdm_ack_rates;
7312 	int i;
7313 
7314 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
7315 	    in->in_color));
7316 	cmd->action = htole32(action);
7317 
7318 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
7319 		return;
7320 
7321 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7322 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
7323 	else if (ic->ic_opmode == IEEE80211_M_STA)
7324 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
7325 	else
7326 		panic("unsupported operating mode %d", ic->ic_opmode);
7327 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
7328 
7329 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
7330 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7331 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
7332 		return;
7333 	}
7334 
7335 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
7336 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
7337 	cmd->cck_rates = htole32(cck_ack_rates);
7338 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
7339 
7340 	cmd->cck_short_preamble
7341 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7342 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
7343 	cmd->short_slot
7344 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
7345 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
7346 
7347 	for (i = 0; i < EDCA_NUM_AC; i++) {
7348 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
7349 		int txf = iwx_ac_to_tx_fifo[i];
7350 
7351 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
7352 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
7353 		cmd->ac[txf].aifsn = ac->ac_aifsn;
7354 		cmd->ac[txf].fifos_mask = (1 << txf);
7355 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
7356 	}
7357 	if (ni->ni_flags & IEEE80211_NODE_QOS)
7358 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
7359 
7360 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7361 		enum ieee80211_htprot htprot =
7362 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
7363 		switch (htprot) {
7364 		case IEEE80211_HTPROT_NONE:
7365 			break;
7366 		case IEEE80211_HTPROT_NONMEMBER:
7367 		case IEEE80211_HTPROT_NONHT_MIXED:
7368 			cmd->protection_flags |=
7369 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7370 			    IWX_MAC_PROT_FLG_FAT_PROT);
7371 			break;
7372 		case IEEE80211_HTPROT_20MHZ:
7373 			if (in->in_phyctxt &&
7374 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7375 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
7376 				cmd->protection_flags |=
7377 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7378 				    IWX_MAC_PROT_FLG_FAT_PROT);
7379 			}
7380 			break;
7381 		default:
7382 			break;
7383 		}
7384 
7385 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
7386 	}
7387 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7388 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
7389 
7390 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
7391 #undef IWX_EXP2
7392 }
7393 
7394 void
7395 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
7396     struct iwx_mac_data_sta *sta, int assoc)
7397 {
7398 	struct ieee80211_node *ni = &in->in_ni;
7399 	uint32_t dtim_off;
7400 	uint64_t tsf;
7401 
7402 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
7403 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
7404 	tsf = letoh64(tsf);
7405 
7406 	sta->is_assoc = htole32(assoc);
7407 	if (assoc) {
7408 		sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
7409 		sta->dtim_tsf = htole64(tsf + dtim_off);
7410 		sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
7411 	}
7412 	sta->bi = htole32(ni->ni_intval);
7413 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
7414 	sta->data_policy = htole32(0);
7415 	sta->listen_interval = htole32(10);
7416 	sta->assoc_id = htole32(ni->ni_associd);
7417 }
7418 
7419 int
7420 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
7421     int assoc)
7422 {
7423 	struct ieee80211com *ic = &sc->sc_ic;
7424 	struct ieee80211_node *ni = &in->in_ni;
7425 	struct iwx_mac_ctx_cmd cmd;
7426 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
7427 
7428 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
7429 		panic("MAC already added");
7430 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
7431 		panic("MAC already removed");
7432 
7433 	memset(&cmd, 0, sizeof(cmd));
7434 
7435 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
7436 
7437 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
7438 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
7439 		    sizeof(cmd), &cmd);
7440 	}
7441 
7442 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7443 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
7444 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
7445 		    IWX_MAC_FILTER_ACCEPT_GRP |
7446 		    IWX_MAC_FILTER_IN_BEACON |
7447 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
7448 		    IWX_MAC_FILTER_IN_CRC32);
7449 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod) {
7450 		/*
7451 		 * Allow beacons to pass through as long as we are not
7452 		 * associated or we do not have dtim period information.
7453 		 */
7454 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
7455 	}
7456 	iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
7457 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
7458 }
7459 
7460 int
7461 iwx_clear_statistics(struct iwx_softc *sc)
7462 {
7463 	struct iwx_statistics_cmd scmd = {
7464 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
7465 	};
7466 	struct iwx_host_cmd cmd = {
7467 		.id = IWX_STATISTICS_CMD,
7468 		.len[0] = sizeof(scmd),
7469 		.data[0] = &scmd,
7470 		.flags = IWX_CMD_WANT_RESP,
7471 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
7472 	};
7473 	int err;
7474 
7475 	err = iwx_send_cmd(sc, &cmd);
7476 	if (err)
7477 		return err;
7478 
7479 	iwx_free_resp(sc, &cmd);
7480 	return 0;
7481 }
7482 
7483 void
7484 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7485 {
7486 	int s = splnet();
7487 
7488 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7489 		splx(s);
7490 		return;
7491 	}
7492 
7493 	refcnt_take(&sc->task_refs);
7494 	if (!task_add(taskq, task))
7495 		refcnt_rele_wake(&sc->task_refs);
7496 	splx(s);
7497 }
7498 
7499 void
7500 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7501 {
7502 	if (task_del(taskq, task))
7503 		refcnt_rele(&sc->task_refs);
7504 }
7505 
7506 int
7507 iwx_scan(struct iwx_softc *sc)
7508 {
7509 	struct ieee80211com *ic = &sc->sc_ic;
7510 	struct ifnet *ifp = IC2IFP(ic);
7511 	int err;
7512 
7513 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
7514 		err = iwx_scan_abort(sc);
7515 		if (err) {
7516 			printf("%s: could not abort background scan\n",
7517 			    DEVNAME(sc));
7518 			return err;
7519 		}
7520 	}
7521 
7522 	err = iwx_umac_scan_v14(sc, 0);
7523 	if (err) {
7524 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7525 		return err;
7526 	}
7527 
7528 	/*
7529 	 * The current mode might have been fixed during association.
7530 	 * Ensure all channels get scanned.
7531 	 */
7532 	if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
7533 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
7534 
7535 	sc->sc_flags |= IWX_FLAG_SCANNING;
7536 	if (ifp->if_flags & IFF_DEBUG)
7537 		printf("%s: %s -> %s\n", ifp->if_xname,
7538 		    ieee80211_state_name[ic->ic_state],
7539 		    ieee80211_state_name[IEEE80211_S_SCAN]);
7540 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
7541 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
7542 		ieee80211_node_cleanup(ic, ic->ic_bss);
7543 	}
7544 	ic->ic_state = IEEE80211_S_SCAN;
7545 	wakeup(&ic->ic_state); /* wake iwx_init() */
7546 
7547 	return 0;
7548 }
7549 
7550 int
7551 iwx_bgscan(struct ieee80211com *ic)
7552 {
7553 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
7554 	int err;
7555 
7556 	if (sc->sc_flags & IWX_FLAG_SCANNING)
7557 		return 0;
7558 
7559 	err = iwx_umac_scan_v14(sc, 1);
7560 	if (err) {
7561 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7562 		return err;
7563 	}
7564 
7565 	sc->sc_flags |= IWX_FLAG_BGSCAN;
7566 	return 0;
7567 }
7568 
7569 void
7570 iwx_bgscan_done(struct ieee80211com *ic,
7571     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
7572 {
7573 	struct iwx_softc *sc = ic->ic_softc;
7574 
7575 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7576 	sc->bgscan_unref_arg = arg;
7577 	sc->bgscan_unref_arg_size = arg_size;
7578 	iwx_add_task(sc, systq, &sc->bgscan_done_task);
7579 }
7580 
7581 void
7582 iwx_bgscan_done_task(void *arg)
7583 {
7584 	struct iwx_softc *sc = arg;
7585 	struct ieee80211com *ic = &sc->sc_ic;
7586 	struct iwx_node *in = (void *)ic->ic_bss;
7587 	struct ieee80211_node *ni = &in->in_ni;
7588 	int tid, err = 0, s = splnet();
7589 
7590 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
7591 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
7592 	    ic->ic_state != IEEE80211_S_RUN) {
7593 		err = ENXIO;
7594 		goto done;
7595 	}
7596 
7597 	err = iwx_flush_sta(sc, in);
7598 	if (err)
7599 		goto done;
7600 
7601 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
7602 		int qid = IWX_FIRST_AGG_TX_QUEUE + tid;
7603 
7604 		if (sc->aggqid[tid] == 0)
7605 			continue;
7606 
7607 		err = iwx_disable_txq(sc, IWX_STATION_ID, qid, tid);
7608 		if (err)
7609 			goto done;
7610 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
7611 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
7612 		    IEEE80211_ACTION_DELBA,
7613 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
7614 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
7615 #endif
7616 		ieee80211_node_tx_ba_clear(ni, tid);
7617 		sc->aggqid[tid] = 0;
7618 	}
7619 
7620 	/*
7621 	 * Tx queues have been flushed and Tx agg has been stopped.
7622 	 * Allow roaming to proceed.
7623 	 */
7624 	ni->ni_unref_arg = sc->bgscan_unref_arg;
7625 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
7626 	sc->bgscan_unref_arg = NULL;
7627 	sc->bgscan_unref_arg_size = 0;
7628 	ieee80211_node_tx_stopped(ic, &in->in_ni);
7629 done:
7630 	if (err) {
7631 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7632 		sc->bgscan_unref_arg = NULL;
7633 		sc->bgscan_unref_arg_size = 0;
7634 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7635 			task_add(systq, &sc->init_task);
7636 	}
7637 	refcnt_rele_wake(&sc->task_refs);
7638 	splx(s);
7639 }
7640 
7641 int
7642 iwx_umac_scan_abort(struct iwx_softc *sc)
7643 {
7644 	struct iwx_umac_scan_abort cmd = { 0 };
7645 
7646 	return iwx_send_cmd_pdu(sc,
7647 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
7648 	    0, sizeof(cmd), &cmd);
7649 }
7650 
7651 int
7652 iwx_scan_abort(struct iwx_softc *sc)
7653 {
7654 	int err;
7655 
7656 	err = iwx_umac_scan_abort(sc);
7657 	if (err == 0)
7658 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
7659 	return err;
7660 }
7661 
7662 int
7663 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7664 {
7665 	int err;
7666 
7667 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7668 
7669 	/*
7670 	 * Non-QoS frames use the "MGMT" TID and queue.
7671 	 * Other TIDs and data queues are reserved for QoS data frames.
7672 	 */
7673 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7674 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
7675 	if (err) {
7676 		printf("%s: could not enable Tx queue %d (error %d)\n",
7677 		    DEVNAME(sc), sc->first_data_qid, err);
7678 		return err;
7679 	}
7680 
7681 	return 0;
7682 }
7683 
7684 int
7685 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7686 {
7687 	int err, cmd_ver;
7688 
7689 	/* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7690 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7691 	    IWX_SCD_QUEUE_CONFIG_CMD);
7692 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7693 		return 0;
7694 
7695 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7696 
7697 	err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7698 	    IWX_MGMT_TID);
7699 	if (err) {
7700 		printf("%s: could not disable Tx queue %d (error %d)\n",
7701 		    DEVNAME(sc), sc->first_data_qid, err);
7702 		return err;
7703 	}
7704 
7705 	return 0;
7706 }
7707 
7708 int
7709 iwx_rs_rval2idx(uint8_t rval)
7710 {
7711 	/* Firmware expects indices which match our 11g rate set. */
7712 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7713 	int i;
7714 
7715 	for (i = 0; i < rs->rs_nrates; i++) {
7716 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7717 			return i;
7718 	}
7719 
7720 	return -1;
7721 }
7722 
7723 uint16_t
7724 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7725 {
7726 	struct ieee80211com *ic = &sc->sc_ic;
7727 	const struct ieee80211_ht_rateset *rs;
7728 	uint16_t htrates = 0;
7729 	int mcs;
7730 
7731 	rs = &ieee80211_std_ratesets_11n[rsidx];
7732 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
7733 		if (!isset(ni->ni_rxmcs, mcs) ||
7734 		    !isset(ic->ic_sup_mcs, mcs))
7735 			continue;
7736 		htrates |= (1 << (mcs - rs->min_mcs));
7737 	}
7738 
7739 	return htrates;
7740 }
7741 
7742 uint16_t
7743 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7744 {
7745 	uint16_t rx_mcs;
7746 	int max_mcs = -1;
7747 
7748 	rx_mcs = (ni->ni_vht_rxmcs & IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7749 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7750 	switch (rx_mcs) {
7751 	case IEEE80211_VHT_MCS_SS_NOT_SUPP:
7752 		break;
7753 	case IEEE80211_VHT_MCS_0_7:
7754 		max_mcs = 7;
7755 		break;
7756 	case IEEE80211_VHT_MCS_0_8:
7757 		max_mcs = 8;
7758 		break;
7759 	case IEEE80211_VHT_MCS_0_9:
7760 		/* Disable VHT MCS 9 for 20MHz-only stations. */
7761 		if (!ieee80211_node_supports_ht_chan40(ni))
7762 			max_mcs = 8;
7763 		else
7764 			max_mcs = 9;
7765 		break;
7766 	default:
7767 		/* Should not happen; Values above cover the possible range. */
7768 		panic("invalid VHT Rx MCS value %u", rx_mcs);
7769 	}
7770 
7771 	return ((1 << (max_mcs + 1)) - 1);
7772 }
7773 
7774 int
7775 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7776 {
7777 	struct ieee80211_node *ni = &in->in_ni;
7778 	struct ieee80211_rateset *rs = &ni->ni_rates;
7779 	struct iwx_tlc_config_cmd_v3 cfg_cmd;
7780 	uint32_t cmd_id;
7781 	int i;
7782 	size_t cmd_size = sizeof(cfg_cmd);
7783 
7784 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7785 
7786 	for (i = 0; i < rs->rs_nrates; i++) {
7787 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7788 		int idx = iwx_rs_rval2idx(rval);
7789 		if (idx == -1)
7790 			return EINVAL;
7791 		cfg_cmd.non_ht_rates |= (1 << idx);
7792 	}
7793 
7794 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7795 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7796 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7797 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7798 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7799 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7800 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7801 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7802 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7803 		    htole16(iwx_rs_ht_rates(sc, ni,
7804 		    IEEE80211_HT_RATESET_SISO));
7805 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7806 		    htole16(iwx_rs_ht_rates(sc, ni,
7807 		    IEEE80211_HT_RATESET_MIMO2));
7808 	} else
7809 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7810 
7811 	cfg_cmd.sta_id = IWX_STATION_ID;
7812 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7813 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7814 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7815 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7816 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7817 	else
7818 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7819 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7820 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7821 		cfg_cmd.max_mpdu_len = htole16(3895);
7822 	else
7823 		cfg_cmd.max_mpdu_len = htole16(3839);
7824 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7825 		if (ieee80211_node_supports_ht_sgi20(ni)) {
7826 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7827 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7828 		}
7829 		if (ieee80211_node_supports_ht_sgi40(ni)) {
7830 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7831 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7832 		}
7833 	}
7834 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7835 	    ieee80211_node_supports_vht_sgi80(ni))
7836 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7837 
7838 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7839 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7840 }
7841 
7842 int
7843 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7844 {
7845 	struct ieee80211_node *ni = &in->in_ni;
7846 	struct ieee80211_rateset *rs = &ni->ni_rates;
7847 	struct iwx_tlc_config_cmd_v4 cfg_cmd;
7848 	uint32_t cmd_id;
7849 	int i;
7850 	size_t cmd_size = sizeof(cfg_cmd);
7851 
7852 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7853 
7854 	for (i = 0; i < rs->rs_nrates; i++) {
7855 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7856 		int idx = iwx_rs_rval2idx(rval);
7857 		if (idx == -1)
7858 			return EINVAL;
7859 		cfg_cmd.non_ht_rates |= (1 << idx);
7860 	}
7861 
7862 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7863 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7864 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7865 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7866 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7867 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7868 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7869 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7870 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7871 		    htole16(iwx_rs_ht_rates(sc, ni,
7872 		    IEEE80211_HT_RATESET_SISO));
7873 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7874 		    htole16(iwx_rs_ht_rates(sc, ni,
7875 		    IEEE80211_HT_RATESET_MIMO2));
7876 	} else
7877 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7878 
7879 	cfg_cmd.sta_id = IWX_STATION_ID;
7880 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7881 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7882 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7883 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7884 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7885 	else
7886 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7887 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7888 	if (ni->ni_flags & IEEE80211_NODE_VHT)
7889 		cfg_cmd.max_mpdu_len = htole16(3895);
7890 	else
7891 		cfg_cmd.max_mpdu_len = htole16(3839);
7892 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7893 		if (ieee80211_node_supports_ht_sgi20(ni)) {
7894 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7895 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7896 		}
7897 		if (ieee80211_node_supports_ht_sgi40(ni)) {
7898 			cfg_cmd.sgi_ch_width_supp |= (1 <<
7899 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7900 		}
7901 	}
7902 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7903 	    ieee80211_node_supports_vht_sgi80(ni))
7904 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7905 
7906 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7907 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7908 }
7909 
7910 int
7911 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7912 {
7913 	int cmd_ver;
7914 
7915 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7916 	    IWX_TLC_MNG_CONFIG_CMD);
7917 	if (cmd_ver == 4)
7918 		return iwx_rs_init_v4(sc, in);
7919 	return iwx_rs_init_v3(sc, in);
7920 }
7921 
7922 void
7923 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7924 {
7925 	struct ieee80211com *ic = &sc->sc_ic;
7926 	struct ieee80211_node *ni = ic->ic_bss;
7927 	struct ieee80211_rateset *rs = &ni->ni_rates;
7928 	uint32_t rate_n_flags;
7929 	uint8_t plcp, rval;
7930 	int i, cmd_ver, rate_n_flags_ver2 = 0;
7931 
7932 	if (notif->sta_id != IWX_STATION_ID ||
7933 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7934 		return;
7935 
7936 	rate_n_flags = le32toh(notif->rate);
7937 
7938 	cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
7939 	    IWX_TLC_MNG_UPDATE_NOTIF);
7940 	if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
7941 		rate_n_flags_ver2 = 1;
7942 	if (rate_n_flags_ver2) {
7943 		uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7944 		if (mod_type == IWX_RATE_MCS_VHT_MSK) {
7945 			ni->ni_txmcs = (rate_n_flags &
7946 			    IWX_RATE_HT_MCS_CODE_MSK);
7947 			ni->ni_vht_ss = ((rate_n_flags &
7948 			    IWX_RATE_MCS_NSS_MSK) >>
7949 			    IWX_RATE_MCS_NSS_POS) + 1;
7950 			return;
7951 		} else if (mod_type == IWX_RATE_MCS_HT_MSK) {
7952 			ni->ni_txmcs = IWX_RATE_HT_MCS_INDEX(rate_n_flags);
7953 			return;
7954 		}
7955 	} else {
7956 		if (rate_n_flags & IWX_RATE_MCS_VHT_MSK_V1) {
7957 			ni->ni_txmcs = (rate_n_flags &
7958 			    IWX_RATE_VHT_MCS_RATE_CODE_MSK);
7959 			ni->ni_vht_ss = ((rate_n_flags &
7960 			    IWX_RATE_VHT_MCS_NSS_MSK) >>
7961 			    IWX_RATE_VHT_MCS_NSS_POS) + 1;
7962 			return;
7963 		} else if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
7964 			ni->ni_txmcs = (rate_n_flags &
7965 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
7966 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
7967 			return;
7968 		}
7969 	}
7970 
7971 	if (rate_n_flags_ver2) {
7972 		const struct ieee80211_rateset *rs;
7973 		uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7974 		if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
7975 			rs = &ieee80211_std_rateset_11a;
7976 		else
7977 			rs = &ieee80211_std_rateset_11b;
7978 		if (ridx < rs->rs_nrates)
7979 			rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
7980 		else
7981 			rval = 0;
7982 	} else {
7983 		plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
7984 
7985 		rval = 0;
7986 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
7987 			if (iwx_rates[i].plcp == plcp) {
7988 				rval = iwx_rates[i].rate;
7989 				break;
7990 			}
7991 		}
7992 	}
7993 
7994 	if (rval) {
7995 		uint8_t rv;
7996 		for (i = 0; i < rs->rs_nrates; i++) {
7997 			rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7998 			if (rv == rval) {
7999 				ni->ni_txrate = i;
8000 				break;
8001 			}
8002 		}
8003 	}
8004 }
8005 
8006 int
8007 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
8008     uint8_t chains_static, uint8_t chains_dynamic)
8009 {
8010 	struct iwx_rlc_config_cmd cmd;
8011 	uint32_t cmd_id;
8012 	uint8_t active_cnt, idle_cnt;
8013 
8014 	memset(&cmd, 0, sizeof(cmd));
8015 
8016 	idle_cnt = chains_static;
8017 	active_cnt = chains_dynamic;
8018 
8019 	cmd.phy_id = htole32(phyctxt->id);
8020 	cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
8021 	    IWX_PHY_RX_CHAIN_VALID_POS);
8022 	cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
8023 	cmd.rlc.rx_chain_info |= htole32(active_cnt <<
8024 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
8025 
8026 	cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
8027 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8028 }
8029 
8030 int
8031 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
8032     struct ieee80211_channel *chan, uint8_t chains_static,
8033     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8034     uint8_t vht_chan_width)
8035 {
8036 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8037 	int err;
8038 
8039 	if (isset(sc->sc_enabled_capa,
8040 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8041 	    (phyctxt->channel->ic_flags & band_flags) !=
8042 	    (chan->ic_flags & band_flags)) {
8043 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8044 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8045 		    vht_chan_width);
8046 		if (err) {
8047 			printf("%s: could not remove PHY context "
8048 			    "(error %d)\n", DEVNAME(sc), err);
8049 			return err;
8050 		}
8051 		phyctxt->channel = chan;
8052 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8053 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
8054 		    vht_chan_width);
8055 		if (err) {
8056 			printf("%s: could not add PHY context "
8057 			    "(error %d)\n", DEVNAME(sc), err);
8058 			return err;
8059 		}
8060 	} else {
8061 		phyctxt->channel = chan;
8062 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8063 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8064 		    vht_chan_width);
8065 		if (err) {
8066 			printf("%s: could not update PHY context (error %d)\n",
8067 			    DEVNAME(sc), err);
8068 			return err;
8069 		}
8070 	}
8071 
8072 	phyctxt->sco = sco;
8073 	phyctxt->vht_chan_width = vht_chan_width;
8074 
8075 	if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8076 	    IWX_RLC_CONFIG_CMD) == 2)
8077 		return iwx_phy_send_rlc(sc, phyctxt,
8078 		    chains_static, chains_dynamic);
8079 
8080 	return 0;
8081 }
8082 
8083 int
8084 iwx_auth(struct iwx_softc *sc)
8085 {
8086 	struct ieee80211com *ic = &sc->sc_ic;
8087 	struct iwx_node *in = (void *)ic->ic_bss;
8088 	uint32_t duration;
8089 	int generation = sc->sc_generation, err;
8090 
8091 	splassert(IPL_NET);
8092 
8093 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8094 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8095 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8096 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8097 		if (err)
8098 			return err;
8099 	} else {
8100 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8101 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8102 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8103 		if (err)
8104 			return err;
8105 	}
8106 	in->in_phyctxt = &sc->sc_phyctxt[0];
8107 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8108 
8109 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
8110 	if (err) {
8111 		printf("%s: could not add MAC context (error %d)\n",
8112 		    DEVNAME(sc), err);
8113 		return err;
8114  	}
8115 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
8116 
8117 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
8118 	if (err) {
8119 		printf("%s: could not add binding (error %d)\n",
8120 		    DEVNAME(sc), err);
8121 		goto rm_mac_ctxt;
8122 	}
8123 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
8124 
8125 	err = iwx_add_sta_cmd(sc, in, 0);
8126 	if (err) {
8127 		printf("%s: could not add sta (error %d)\n",
8128 		    DEVNAME(sc), err);
8129 		goto rm_binding;
8130 	}
8131 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
8132 
8133 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8134 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
8135 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
8136 		    IWX_TX_RING_COUNT);
8137 		if (err)
8138 			goto rm_sta;
8139 		return 0;
8140 	}
8141 
8142 	err = iwx_enable_mgmt_queue(sc);
8143 	if (err)
8144 		goto rm_sta;
8145 
8146 	err = iwx_clear_statistics(sc);
8147 	if (err)
8148 		goto rm_mgmt_queue;
8149 
8150 	/*
8151 	 * Prevent the FW from wandering off channel during association
8152 	 * by "protecting" the session with a time event.
8153 	 */
8154 	if (in->in_ni.ni_intval)
8155 		duration = in->in_ni.ni_intval * 9;
8156 	else
8157 		duration = 900;
8158 	return iwx_schedule_session_protection(sc, in, duration);
8159 rm_mgmt_queue:
8160 	if (generation == sc->sc_generation)
8161 		iwx_disable_mgmt_queue(sc);
8162 rm_sta:
8163 	if (generation == sc->sc_generation) {
8164 		iwx_rm_sta_cmd(sc, in);
8165 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8166 	}
8167 rm_binding:
8168 	if (generation == sc->sc_generation) {
8169 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
8170 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8171 	}
8172 rm_mac_ctxt:
8173 	if (generation == sc->sc_generation) {
8174 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
8175 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8176 	}
8177 	return err;
8178 }
8179 
8180 int
8181 iwx_deauth(struct iwx_softc *sc)
8182 {
8183 	struct ieee80211com *ic = &sc->sc_ic;
8184 	struct iwx_node *in = (void *)ic->ic_bss;
8185 	int err;
8186 
8187 	splassert(IPL_NET);
8188 
8189 	iwx_unprotect_session(sc, in);
8190 
8191 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
8192 		err = iwx_rm_sta(sc, in);
8193 		if (err)
8194 			return err;
8195 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8196 	}
8197 
8198 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
8199 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
8200 		if (err) {
8201 			printf("%s: could not remove binding (error %d)\n",
8202 			    DEVNAME(sc), err);
8203 			return err;
8204 		}
8205 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8206 	}
8207 
8208 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
8209 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
8210 		if (err) {
8211 			printf("%s: could not remove MAC context (error %d)\n",
8212 			    DEVNAME(sc), err);
8213 			return err;
8214 		}
8215 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8216 	}
8217 
8218 	/* Move unused PHY context to a default channel. */
8219 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8220 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8221 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8222 	if (err)
8223 		return err;
8224 
8225 	return 0;
8226 }
8227 
8228 int
8229 iwx_run(struct iwx_softc *sc)
8230 {
8231 	struct ieee80211com *ic = &sc->sc_ic;
8232 	struct iwx_node *in = (void *)ic->ic_bss;
8233 	struct ieee80211_node *ni = &in->in_ni;
8234 	int err;
8235 
8236 	splassert(IPL_NET);
8237 
8238 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8239 		/* Add a MAC context and a sniffing STA. */
8240 		err = iwx_auth(sc);
8241 		if (err)
8242 			return err;
8243 	}
8244 
8245 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8246 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8247 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
8248 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8249 		    in->in_phyctxt->channel, chains, chains,
8250 		    0, IEEE80211_HTOP0_SCO_SCN,
8251 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8252 		if (err) {
8253 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8254 			return err;
8255 		}
8256 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8257 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
8258 		uint8_t sco, vht_chan_width;
8259 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8260 		    ieee80211_node_supports_ht_chan40(ni))
8261 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8262 		else
8263 			sco = IEEE80211_HTOP0_SCO_SCN;
8264 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8265 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8266 		    ieee80211_node_supports_vht_chan80(ni))
8267 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8268 		else
8269 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8270 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8271 		    in->in_phyctxt->channel, chains, chains,
8272 		    0, sco, vht_chan_width);
8273 		if (err) {
8274 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8275 			return err;
8276 		}
8277 	}
8278 
8279 	/* Update STA again to apply HT and VHT settings. */
8280 	err = iwx_add_sta_cmd(sc, in, 1);
8281 	if (err) {
8282 		printf("%s: could not update STA (error %d)\n",
8283 		    DEVNAME(sc), err);
8284 		return err;
8285 	}
8286 
8287 	/* We have now been assigned an associd by the AP. */
8288 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
8289 	if (err) {
8290 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8291 		return err;
8292 	}
8293 
8294 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
8295 	if (err) {
8296 		printf("%s: could not set sf full on (error %d)\n",
8297 		    DEVNAME(sc), err);
8298 		return err;
8299 	}
8300 
8301 	err = iwx_allow_mcast(sc);
8302 	if (err) {
8303 		printf("%s: could not allow mcast (error %d)\n",
8304 		    DEVNAME(sc), err);
8305 		return err;
8306 	}
8307 
8308 	err = iwx_power_update_device(sc);
8309 	if (err) {
8310 		printf("%s: could not send power command (error %d)\n",
8311 		    DEVNAME(sc), err);
8312 		return err;
8313 	}
8314 #ifdef notyet
8315 	/*
8316 	 * Disabled for now. Default beacon filter settings
8317 	 * prevent net80211 from getting ERP and HT protection
8318 	 * updates from beacons.
8319 	 */
8320 	err = iwx_enable_beacon_filter(sc, in);
8321 	if (err) {
8322 		printf("%s: could not enable beacon filter\n",
8323 		    DEVNAME(sc));
8324 		return err;
8325 	}
8326 #endif
8327 	err = iwx_power_mac_update_mode(sc, in);
8328 	if (err) {
8329 		printf("%s: could not update MAC power (error %d)\n",
8330 		    DEVNAME(sc), err);
8331 		return err;
8332 	}
8333 
8334 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8335 		return 0;
8336 
8337 	/* Start at lowest available bit-rate. Firmware will raise. */
8338 	in->in_ni.ni_txrate = 0;
8339 	in->in_ni.ni_txmcs = 0;
8340 
8341 	err = iwx_rs_init(sc, in);
8342 	if (err) {
8343 		printf("%s: could not init rate scaling (error %d)\n",
8344 		    DEVNAME(sc), err);
8345 		return err;
8346 	}
8347 
8348 	return 0;
8349 }
8350 
8351 int
8352 iwx_run_stop(struct iwx_softc *sc)
8353 {
8354 	struct ieee80211com *ic = &sc->sc_ic;
8355 	struct iwx_node *in = (void *)ic->ic_bss;
8356 	struct ieee80211_node *ni = &in->in_ni;
8357 	int err, i;
8358 
8359 	splassert(IPL_NET);
8360 
8361 	err = iwx_flush_sta(sc, in);
8362 	if (err) {
8363 		printf("%s: could not flush Tx path (error %d)\n",
8364 		    DEVNAME(sc), err);
8365 		return err;
8366 	}
8367 
8368 	/*
8369 	 * Stop Rx BA sessions now. We cannot rely on the BA task
8370 	 * for this when moving out of RUN state since it runs in a
8371 	 * separate thread.
8372 	 * Note that in->in_ni (struct ieee80211_node) already represents
8373 	 * our new access point in case we are roaming between APs.
8374 	 * This means we cannot rely on struct ieee802111_node to tell
8375 	 * us which BA sessions exist.
8376 	 */
8377 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8378 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8379 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
8380 			continue;
8381 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
8382 	}
8383 
8384 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
8385 	if (err)
8386 		return err;
8387 
8388 	err = iwx_disable_beacon_filter(sc);
8389 	if (err) {
8390 		printf("%s: could not disable beacon filter (error %d)\n",
8391 		    DEVNAME(sc), err);
8392 		return err;
8393 	}
8394 
8395 	/* Mark station as disassociated. */
8396 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
8397 	if (err) {
8398 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8399 		return err;
8400 	}
8401 
8402 	return 0;
8403 }
8404 
8405 struct ieee80211_node *
8406 iwx_node_alloc(struct ieee80211com *ic)
8407 {
8408 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
8409 }
8410 
8411 int
8412 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8413     struct ieee80211_key *k)
8414 {
8415 	struct iwx_softc *sc = ic->ic_softc;
8416 	struct iwx_node *in = (void *)ni;
8417 	struct iwx_setkey_task_arg *a;
8418 	int err;
8419 
8420 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8421 		/* Fallback to software crypto for other ciphers. */
8422 		err = ieee80211_set_key(ic, ni, k);
8423 		if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
8424 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8425 		return err;
8426 	}
8427 
8428 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
8429 		return ENOSPC;
8430 
8431 	a = &sc->setkey_arg[sc->setkey_cur];
8432 	a->sta_id = IWX_STATION_ID;
8433 	a->ni = ni;
8434 	a->k = k;
8435 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
8436 	sc->setkey_nkeys++;
8437 	iwx_add_task(sc, systq, &sc->setkey_task);
8438 	return EBUSY;
8439 }
8440 
8441 int
8442 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
8443     struct ieee80211_key *k)
8444 {
8445 	struct ieee80211com *ic = &sc->sc_ic;
8446 	struct iwx_node *in = (void *)ni;
8447 	struct iwx_add_sta_key_cmd cmd;
8448 	uint32_t status;
8449 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
8450 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
8451 	int err;
8452 
8453 	/*
8454 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
8455 	 * Currently we only implement station mode where 'ni' is always
8456 	 * ic->ic_bss so there is no need to validate arguments beyond this:
8457 	 */
8458 	KASSERT(ni == ic->ic_bss);
8459 
8460 	memset(&cmd, 0, sizeof(cmd));
8461 
8462 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
8463 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
8464 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8465 	    IWX_STA_KEY_FLG_KEYID_MSK));
8466 	if (k->k_flags & IEEE80211_KEY_GROUP) {
8467 		cmd.common.key_offset = 1;
8468 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
8469 	} else
8470 		cmd.common.key_offset = 0;
8471 
8472 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8473 	cmd.common.sta_id = sta_id;
8474 
8475 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
8476 
8477 	status = IWX_ADD_STA_SUCCESS;
8478 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
8479 	    &status);
8480 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
8481 		return ECANCELED;
8482 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
8483 		err = EIO;
8484 	if (err) {
8485 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
8486 		    IEEE80211_REASON_AUTH_LEAVE);
8487 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
8488 		return err;
8489 	}
8490 
8491 	if (k->k_flags & IEEE80211_KEY_GROUP)
8492 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8493 	else
8494 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
8495 
8496 	if ((in->in_flags & want_keymask) == want_keymask) {
8497 		DPRINTF(("marking port %s valid\n",
8498 		    ether_sprintf(ni->ni_macaddr)));
8499 		ni->ni_port_valid = 1;
8500 		ieee80211_set_link_state(ic, LINK_STATE_UP);
8501 	}
8502 
8503 	return 0;
8504 }
8505 
8506 void
8507 iwx_setkey_task(void *arg)
8508 {
8509 	struct iwx_softc *sc = arg;
8510 	struct iwx_setkey_task_arg *a;
8511 	int err = 0, s = splnet();
8512 
8513 	while (sc->setkey_nkeys > 0) {
8514 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
8515 			break;
8516 		a = &sc->setkey_arg[sc->setkey_tail];
8517 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
8518 		a->sta_id = 0;
8519 		a->ni = NULL;
8520 		a->k = NULL;
8521 		sc->setkey_tail = (sc->setkey_tail + 1) %
8522 		    nitems(sc->setkey_arg);
8523 		sc->setkey_nkeys--;
8524 	}
8525 
8526 	refcnt_rele_wake(&sc->task_refs);
8527 	splx(s);
8528 }
8529 
8530 void
8531 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8532     struct ieee80211_key *k)
8533 {
8534 	struct iwx_softc *sc = ic->ic_softc;
8535 	struct iwx_add_sta_key_cmd cmd;
8536 
8537 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8538 		/* Fallback to software crypto for other ciphers. */
8539                 ieee80211_delete_key(ic, ni, k);
8540 		return;
8541 	}
8542 
8543 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
8544 		return;
8545 
8546 	memset(&cmd, 0, sizeof(cmd));
8547 
8548 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
8549 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
8550 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8551 	    IWX_STA_KEY_FLG_KEYID_MSK));
8552 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8553 	if (k->k_flags & IEEE80211_KEY_GROUP)
8554 		cmd.common.key_offset = 1;
8555 	else
8556 		cmd.common.key_offset = 0;
8557 	cmd.common.sta_id = IWX_STATION_ID;
8558 
8559 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
8560 }
8561 
8562 int
8563 iwx_media_change(struct ifnet *ifp)
8564 {
8565 	int err;
8566 
8567 	err = ieee80211_media_change(ifp);
8568 	if (err != ENETRESET)
8569 		return err;
8570 
8571 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8572 	    (IFF_UP | IFF_RUNNING)) {
8573 		iwx_stop(ifp);
8574 		err = iwx_init(ifp);
8575 	}
8576 	return err;
8577 }
8578 
8579 void
8580 iwx_newstate_task(void *psc)
8581 {
8582 	struct iwx_softc *sc = (struct iwx_softc *)psc;
8583 	struct ieee80211com *ic = &sc->sc_ic;
8584 	enum ieee80211_state nstate = sc->ns_nstate;
8585 	enum ieee80211_state ostate = ic->ic_state;
8586 	int arg = sc->ns_arg;
8587 	int err = 0, s = splnet();
8588 
8589 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8590 		/* iwx_stop() is waiting for us. */
8591 		refcnt_rele_wake(&sc->task_refs);
8592 		splx(s);
8593 		return;
8594 	}
8595 
8596 	if (ostate == IEEE80211_S_SCAN) {
8597 		if (nstate == ostate) {
8598 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
8599 				refcnt_rele_wake(&sc->task_refs);
8600 				splx(s);
8601 				return;
8602 			}
8603 			/* Firmware is no longer scanning. Do another scan. */
8604 			goto next_scan;
8605 		}
8606 	}
8607 
8608 	if (nstate <= ostate) {
8609 		switch (ostate) {
8610 		case IEEE80211_S_RUN:
8611 			err = iwx_run_stop(sc);
8612 			if (err)
8613 				goto out;
8614 			/* FALLTHROUGH */
8615 		case IEEE80211_S_ASSOC:
8616 		case IEEE80211_S_AUTH:
8617 			if (nstate <= IEEE80211_S_AUTH) {
8618 				err = iwx_deauth(sc);
8619 				if (err)
8620 					goto out;
8621 			}
8622 			/* FALLTHROUGH */
8623 		case IEEE80211_S_SCAN:
8624 		case IEEE80211_S_INIT:
8625 			break;
8626 		}
8627 
8628 		/* Die now if iwx_stop() was called while we were sleeping. */
8629 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8630 			refcnt_rele_wake(&sc->task_refs);
8631 			splx(s);
8632 			return;
8633 		}
8634 	}
8635 
8636 	switch (nstate) {
8637 	case IEEE80211_S_INIT:
8638 		break;
8639 
8640 	case IEEE80211_S_SCAN:
8641 next_scan:
8642 		err = iwx_scan(sc);
8643 		if (err)
8644 			break;
8645 		refcnt_rele_wake(&sc->task_refs);
8646 		splx(s);
8647 		return;
8648 
8649 	case IEEE80211_S_AUTH:
8650 		err = iwx_auth(sc);
8651 		break;
8652 
8653 	case IEEE80211_S_ASSOC:
8654 		break;
8655 
8656 	case IEEE80211_S_RUN:
8657 		err = iwx_run(sc);
8658 		break;
8659 	}
8660 
8661 out:
8662 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
8663 		if (err)
8664 			task_add(systq, &sc->init_task);
8665 		else
8666 			sc->sc_newstate(ic, nstate, arg);
8667 	}
8668 	refcnt_rele_wake(&sc->task_refs);
8669 	splx(s);
8670 }
8671 
8672 int
8673 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
8674 {
8675 	struct ifnet *ifp = IC2IFP(ic);
8676 	struct iwx_softc *sc = ifp->if_softc;
8677 
8678 	/*
8679 	 * Prevent attempts to transition towards the same state, unless
8680 	 * we are scanning in which case a SCAN -> SCAN transition
8681 	 * triggers another scan iteration. And AUTH -> AUTH is needed
8682 	 * to support band-steering.
8683 	 */
8684 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
8685 	    nstate != IEEE80211_S_AUTH)
8686 		return 0;
8687 
8688 	if (ic->ic_state == IEEE80211_S_RUN) {
8689 		iwx_del_task(sc, systq, &sc->ba_task);
8690 		iwx_del_task(sc, systq, &sc->setkey_task);
8691 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
8692 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8693 		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8694 		iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8695 		iwx_del_task(sc, systq, &sc->bgscan_done_task);
8696 	}
8697 
8698 	sc->ns_nstate = nstate;
8699 	sc->ns_arg = arg;
8700 
8701 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
8702 
8703 	return 0;
8704 }
8705 
8706 void
8707 iwx_endscan(struct iwx_softc *sc)
8708 {
8709 	struct ieee80211com *ic = &sc->sc_ic;
8710 
8711 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8712 		return;
8713 
8714 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8715 	ieee80211_end_scan(&ic->ic_if);
8716 }
8717 
8718 /*
8719  * Aging and idle timeouts for the different possible scenarios
8720  * in default configuration
8721  */
8722 static const uint32_t
8723 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8724 	{
8725 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8726 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8727 	},
8728 	{
8729 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8730 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8731 	},
8732 	{
8733 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8734 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8735 	},
8736 	{
8737 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
8738 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8739 	},
8740 	{
8741 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8742 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8743 	},
8744 };
8745 
8746 /*
8747  * Aging and idle timeouts for the different possible scenarios
8748  * in single BSS MAC configuration.
8749  */
8750 static const uint32_t
8751 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8752 	{
8753 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8754 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8755 	},
8756 	{
8757 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8758 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8759 	},
8760 	{
8761 		htole32(IWX_SF_MCAST_AGING_TIMER),
8762 		htole32(IWX_SF_MCAST_IDLE_TIMER)
8763 	},
8764 	{
8765 		htole32(IWX_SF_BA_AGING_TIMER),
8766 		htole32(IWX_SF_BA_IDLE_TIMER)
8767 	},
8768 	{
8769 		htole32(IWX_SF_TX_RE_AGING_TIMER),
8770 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
8771 	},
8772 };
8773 
8774 void
8775 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8776     struct ieee80211_node *ni)
8777 {
8778 	int i, j, watermark;
8779 
8780 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8781 
8782 	/*
8783 	 * If we are in association flow - check antenna configuration
8784 	 * capabilities of the AP station, and choose the watermark accordingly.
8785 	 */
8786 	if (ni) {
8787 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8788 			if (ni->ni_rxmcs[1] != 0)
8789 				watermark = IWX_SF_W_MARK_MIMO2;
8790 			else
8791 				watermark = IWX_SF_W_MARK_SISO;
8792 		} else {
8793 			watermark = IWX_SF_W_MARK_LEGACY;
8794 		}
8795 	/* default watermark value for unassociated mode. */
8796 	} else {
8797 		watermark = IWX_SF_W_MARK_MIMO2;
8798 	}
8799 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8800 
8801 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8802 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8803 			sf_cmd->long_delay_timeouts[i][j] =
8804 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8805 		}
8806 	}
8807 
8808 	if (ni) {
8809 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8810 		       sizeof(iwx_sf_full_timeout));
8811 	} else {
8812 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8813 		       sizeof(iwx_sf_full_timeout_def));
8814 	}
8815 
8816 }
8817 
8818 int
8819 iwx_sf_config(struct iwx_softc *sc, int new_state)
8820 {
8821 	struct ieee80211com *ic = &sc->sc_ic;
8822 	struct iwx_sf_cfg_cmd sf_cmd = {
8823 		.state = htole32(new_state),
8824 	};
8825 	int err = 0;
8826 
8827 	switch (new_state) {
8828 	case IWX_SF_UNINIT:
8829 	case IWX_SF_INIT_OFF:
8830 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
8831 		break;
8832 	case IWX_SF_FULL_ON:
8833 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
8834 		break;
8835 	default:
8836 		return EINVAL;
8837 	}
8838 
8839 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8840 				   sizeof(sf_cmd), &sf_cmd);
8841 	return err;
8842 }
8843 
8844 int
8845 iwx_send_bt_init_conf(struct iwx_softc *sc)
8846 {
8847 	struct iwx_bt_coex_cmd bt_cmd;
8848 
8849 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
8850 	bt_cmd.enabled_modules = 0;
8851 
8852 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8853 	    &bt_cmd);
8854 }
8855 
8856 int
8857 iwx_send_soc_conf(struct iwx_softc *sc)
8858 {
8859 	struct iwx_soc_configuration_cmd cmd;
8860 	int err;
8861 	uint32_t cmd_id, flags = 0;
8862 
8863 	memset(&cmd, 0, sizeof(cmd));
8864 
8865 	/*
8866 	 * In VER_1 of this command, the discrete value is considered
8867 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
8868 	 * values in VER_1, this is backwards-compatible with VER_2,
8869 	 * as long as we don't set any other flag bits.
8870 	 */
8871 	if (!sc->sc_integrated) { /* VER_1 */
8872 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8873 	} else { /* VER_2 */
8874 		uint8_t scan_cmd_ver;
8875 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8876 			flags |= (sc->sc_ltr_delay &
8877 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8878 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8879 		    IWX_SCAN_REQ_UMAC);
8880 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8881 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8882 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8883 	}
8884 	cmd.flags = htole32(flags);
8885 
8886 	cmd.latency = htole32(sc->sc_xtal_latency);
8887 
8888 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8889 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8890 	if (err)
8891 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8892 	return err;
8893 }
8894 
8895 int
8896 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8897 {
8898 	struct iwx_mcc_update_cmd mcc_cmd;
8899 	struct iwx_host_cmd hcmd = {
8900 		.id = IWX_MCC_UPDATE_CMD,
8901 		.flags = IWX_CMD_WANT_RESP,
8902 		.data = { &mcc_cmd },
8903 	};
8904 	struct iwx_rx_packet *pkt;
8905 	struct iwx_mcc_update_resp *resp;
8906 	size_t resp_len;
8907 	int err;
8908 
8909 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8910 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8911 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8912 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8913 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8914 	else
8915 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8916 
8917 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8918 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8919 
8920 	err = iwx_send_cmd(sc, &hcmd);
8921 	if (err)
8922 		return err;
8923 
8924 	pkt = hcmd.resp_pkt;
8925 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8926 		err = EIO;
8927 		goto out;
8928 	}
8929 
8930 	resp_len = iwx_rx_packet_payload_len(pkt);
8931 	if (resp_len < sizeof(*resp)) {
8932 		err = EIO;
8933 		goto out;
8934 	}
8935 
8936 	resp = (void *)pkt->data;
8937 	if (resp_len != sizeof(*resp) +
8938 	    resp->n_channels * sizeof(resp->channels[0])) {
8939 		err = EIO;
8940 		goto out;
8941 	}
8942 
8943 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8944 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8945 
8946 	/* Update channel map for net80211 and our scan configuration. */
8947 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
8948 
8949 out:
8950 	iwx_free_resp(sc, &hcmd);
8951 
8952 	return err;
8953 }
8954 
8955 int
8956 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8957 {
8958 	struct iwx_temp_report_ths_cmd cmd;
8959 	int err;
8960 
8961 	/*
8962 	 * In order to give responsibility for critical-temperature-kill
8963 	 * and TX backoff to FW we need to send an empty temperature
8964 	 * reporting command at init time.
8965 	 */
8966 	memset(&cmd, 0, sizeof(cmd));
8967 
8968 	err = iwx_send_cmd_pdu(sc,
8969 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8970 	    0, sizeof(cmd), &cmd);
8971 	if (err)
8972 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8973 		    DEVNAME(sc), err);
8974 
8975 	return err;
8976 }
8977 
8978 int
8979 iwx_init_hw(struct iwx_softc *sc)
8980 {
8981 	struct ieee80211com *ic = &sc->sc_ic;
8982 	int err, i;
8983 
8984 	err = iwx_run_init_mvm_ucode(sc, 0);
8985 	if (err)
8986 		return err;
8987 
8988 	if (!iwx_nic_lock(sc))
8989 		return EBUSY;
8990 
8991 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8992 	if (err) {
8993 		printf("%s: could not init tx ant config (error %d)\n",
8994 		    DEVNAME(sc), err);
8995 		goto err;
8996 	}
8997 
8998 	if (sc->sc_tx_with_siso_diversity) {
8999 		err = iwx_send_phy_cfg_cmd(sc);
9000 		if (err) {
9001 			printf("%s: could not send phy config (error %d)\n",
9002 			    DEVNAME(sc), err);
9003 			goto err;
9004 		}
9005 	}
9006 
9007 	err = iwx_send_bt_init_conf(sc);
9008 	if (err) {
9009 		printf("%s: could not init bt coex (error %d)\n",
9010 		    DEVNAME(sc), err);
9011 		return err;
9012 	}
9013 
9014 	err = iwx_send_soc_conf(sc);
9015 	if (err)
9016 		return err;
9017 
9018 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
9019 		err = iwx_send_dqa_cmd(sc);
9020 		if (err)
9021 			return err;
9022 	}
9023 
9024 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
9025 		/*
9026 		 * The channel used here isn't relevant as it's
9027 		 * going to be overwritten in the other flows.
9028 		 * For now use the first channel we have.
9029 		 */
9030 		sc->sc_phyctxt[i].id = i;
9031 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
9032 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
9033 		    IWX_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
9034 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9035 		if (err) {
9036 			printf("%s: could not add phy context %d (error %d)\n",
9037 			    DEVNAME(sc), i, err);
9038 			goto err;
9039 		}
9040 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
9041 		    IWX_RLC_CONFIG_CMD) == 2) {
9042 			err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
9043 			if (err) {
9044 				printf("%s: could not configure RLC for PHY "
9045 				    "%d (error %d)\n", DEVNAME(sc), i, err);
9046 				goto err;
9047 			}
9048 		}
9049 	}
9050 
9051 	err = iwx_config_ltr(sc);
9052 	if (err) {
9053 		printf("%s: PCIe LTR configuration failed (error %d)\n",
9054 		    DEVNAME(sc), err);
9055 	}
9056 
9057 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
9058 		err = iwx_send_temp_report_ths_cmd(sc);
9059 		if (err)
9060 			goto err;
9061 	}
9062 
9063 	err = iwx_power_update_device(sc);
9064 	if (err) {
9065 		printf("%s: could not send power command (error %d)\n",
9066 		    DEVNAME(sc), err);
9067 		goto err;
9068 	}
9069 
9070 	if (sc->sc_nvm.lar_enabled) {
9071 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
9072 		if (err) {
9073 			printf("%s: could not init LAR (error %d)\n",
9074 			    DEVNAME(sc), err);
9075 			goto err;
9076 		}
9077 	}
9078 
9079 	err = iwx_config_umac_scan_reduced(sc);
9080 	if (err) {
9081 		printf("%s: could not configure scan (error %d)\n",
9082 		    DEVNAME(sc), err);
9083 		goto err;
9084 	}
9085 
9086 	err = iwx_disable_beacon_filter(sc);
9087 	if (err) {
9088 		printf("%s: could not disable beacon filter (error %d)\n",
9089 		    DEVNAME(sc), err);
9090 		goto err;
9091 	}
9092 
9093 err:
9094 	iwx_nic_unlock(sc);
9095 	return err;
9096 }
9097 
9098 /* Allow multicast from our BSSID. */
9099 int
9100 iwx_allow_mcast(struct iwx_softc *sc)
9101 {
9102 	struct ieee80211com *ic = &sc->sc_ic;
9103 	struct iwx_node *in = (void *)ic->ic_bss;
9104 	struct iwx_mcast_filter_cmd *cmd;
9105 	size_t size;
9106 	int err;
9107 
9108 	size = roundup(sizeof(*cmd), 4);
9109 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
9110 	if (cmd == NULL)
9111 		return ENOMEM;
9112 	cmd->filter_own = 1;
9113 	cmd->port_id = 0;
9114 	cmd->count = 0;
9115 	cmd->pass_all = 1;
9116 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
9117 
9118 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
9119 	    0, size, cmd);
9120 	free(cmd, M_DEVBUF, size);
9121 	return err;
9122 }
9123 
9124 int
9125 iwx_init(struct ifnet *ifp)
9126 {
9127 	struct iwx_softc *sc = ifp->if_softc;
9128 	struct ieee80211com *ic = &sc->sc_ic;
9129 	int err, generation;
9130 
9131 	rw_assert_wrlock(&sc->ioctl_rwl);
9132 
9133 	generation = ++sc->sc_generation;
9134 
9135 	err = iwx_preinit(sc);
9136 	if (err)
9137 		return err;
9138 
9139 	err = iwx_start_hw(sc);
9140 	if (err) {
9141 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9142 		return err;
9143 	}
9144 
9145 	err = iwx_init_hw(sc);
9146 	if (err) {
9147 		if (generation == sc->sc_generation)
9148 			iwx_stop_device(sc);
9149 		return err;
9150 	}
9151 
9152 	if (sc->sc_nvm.sku_cap_11n_enable)
9153 		iwx_setup_ht_rates(sc);
9154 	if (sc->sc_nvm.sku_cap_11ac_enable)
9155 		iwx_setup_vht_rates(sc);
9156 
9157 	KASSERT(sc->task_refs.r_refs == 0);
9158 	refcnt_init(&sc->task_refs);
9159 	ifq_clr_oactive(&ifp->if_snd);
9160 	ifp->if_flags |= IFF_RUNNING;
9161 
9162 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9163 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
9164 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
9165 		return 0;
9166 	}
9167 
9168 	ieee80211_begin_scan(ifp);
9169 
9170 	/*
9171 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
9172 	 * Wait until the transition to SCAN state has completed.
9173 	 */
9174 	do {
9175 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
9176 		    SEC_TO_NSEC(1));
9177 		if (generation != sc->sc_generation)
9178 			return ENXIO;
9179 		if (err) {
9180 			iwx_stop(ifp);
9181 			return err;
9182 		}
9183 	} while (ic->ic_state != IEEE80211_S_SCAN);
9184 
9185 	return 0;
9186 }
9187 
9188 void
9189 iwx_start(struct ifnet *ifp)
9190 {
9191 	struct iwx_softc *sc = ifp->if_softc;
9192 	struct ieee80211com *ic = &sc->sc_ic;
9193 	struct ieee80211_node *ni;
9194 	struct ether_header *eh;
9195 	struct mbuf *m;
9196 
9197 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
9198 		return;
9199 
9200 	for (;;) {
9201 		/* why isn't this done per-queue? */
9202 		if (sc->qfullmsk != 0) {
9203 			ifq_set_oactive(&ifp->if_snd);
9204 			break;
9205 		}
9206 
9207 		/* Don't queue additional frames while flushing Tx queues. */
9208 		if (sc->sc_flags & IWX_FLAG_TXFLUSH)
9209 			break;
9210 
9211 		/* need to send management frames even if we're not RUNning */
9212 		m = mq_dequeue(&ic->ic_mgtq);
9213 		if (m) {
9214 			ni = m->m_pkthdr.ph_cookie;
9215 			goto sendit;
9216 		}
9217 
9218 		if (ic->ic_state != IEEE80211_S_RUN ||
9219 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
9220 			break;
9221 
9222 		m = ifq_dequeue(&ifp->if_snd);
9223 		if (!m)
9224 			break;
9225 		if (m->m_len < sizeof (*eh) &&
9226 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
9227 			ifp->if_oerrors++;
9228 			continue;
9229 		}
9230 #if NBPFILTER > 0
9231 		if (ifp->if_bpf != NULL)
9232 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
9233 #endif
9234 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
9235 			ifp->if_oerrors++;
9236 			continue;
9237 		}
9238 
9239  sendit:
9240 #if NBPFILTER > 0
9241 		if (ic->ic_rawbpf != NULL)
9242 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
9243 #endif
9244 		if (iwx_tx(sc, m, ni) != 0) {
9245 			ieee80211_release_node(ic, ni);
9246 			ifp->if_oerrors++;
9247 			continue;
9248 		}
9249 
9250 		if (ifp->if_flags & IFF_UP)
9251 			ifp->if_timer = 1;
9252 	}
9253 
9254 	return;
9255 }
9256 
9257 void
9258 iwx_stop(struct ifnet *ifp)
9259 {
9260 	struct iwx_softc *sc = ifp->if_softc;
9261 	struct ieee80211com *ic = &sc->sc_ic;
9262 	struct iwx_node *in = (void *)ic->ic_bss;
9263 	int i, s = splnet();
9264 
9265 	rw_assert_wrlock(&sc->ioctl_rwl);
9266 
9267 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
9268 
9269 	/* Cancel scheduled tasks and let any stale tasks finish up. */
9270 	task_del(systq, &sc->init_task);
9271 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
9272 	iwx_del_task(sc, systq, &sc->ba_task);
9273 	iwx_del_task(sc, systq, &sc->setkey_task);
9274 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
9275 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
9276 	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
9277 	iwx_del_task(sc, systq, &sc->phy_ctxt_task);
9278 	iwx_del_task(sc, systq, &sc->bgscan_done_task);
9279 	KASSERT(sc->task_refs.r_refs >= 1);
9280 	refcnt_finalize(&sc->task_refs, "iwxstop");
9281 
9282 	iwx_stop_device(sc);
9283 
9284 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
9285 	sc->bgscan_unref_arg = NULL;
9286 	sc->bgscan_unref_arg_size = 0;
9287 
9288 	/* Reset soft state. */
9289 
9290 	sc->sc_generation++;
9291 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
9292 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
9293 		sc->sc_cmd_resp_pkt[i] = NULL;
9294 		sc->sc_cmd_resp_len[i] = 0;
9295 	}
9296 	ifp->if_flags &= ~IFF_RUNNING;
9297 	ifq_clr_oactive(&ifp->if_snd);
9298 
9299 	in->in_phyctxt = NULL;
9300 	in->in_flags = 0;
9301 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
9302 
9303 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
9304 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
9305 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
9306 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
9307 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9308 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
9309 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
9310 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
9311 
9312 	sc->sc_rx_ba_sessions = 0;
9313 	sc->ba_rx.start_tidmask = 0;
9314 	sc->ba_rx.stop_tidmask = 0;
9315 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
9316 	sc->ba_tx.start_tidmask = 0;
9317 	sc->ba_tx.stop_tidmask = 0;
9318 
9319 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
9320 	sc->ns_nstate = IEEE80211_S_INIT;
9321 
9322 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9323 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
9324 		iwx_clear_reorder_buffer(sc, rxba);
9325 	}
9326 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
9327 	ifp->if_timer = 0;
9328 
9329 	splx(s);
9330 }
9331 
9332 void
9333 iwx_watchdog(struct ifnet *ifp)
9334 {
9335 	struct iwx_softc *sc = ifp->if_softc;
9336 	int i;
9337 
9338 	ifp->if_timer = 0;
9339 
9340 	/*
9341 	 * We maintain a separate timer for each Tx queue because
9342 	 * Tx aggregation queues can get "stuck" while other queues
9343 	 * keep working. The Linux driver uses a similar workaround.
9344 	 */
9345 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
9346 		if (sc->sc_tx_timer[i] > 0) {
9347 			if (--sc->sc_tx_timer[i] == 0) {
9348 				printf("%s: device timeout\n", DEVNAME(sc));
9349 				if (ifp->if_flags & IFF_DEBUG) {
9350 					iwx_nic_error(sc);
9351 					iwx_dump_driver_status(sc);
9352 				}
9353 				if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9354 					task_add(systq, &sc->init_task);
9355 				ifp->if_oerrors++;
9356 				return;
9357 			}
9358 			ifp->if_timer = 1;
9359 		}
9360 	}
9361 
9362 	ieee80211_watchdog(ifp);
9363 }
9364 
9365 int
9366 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
9367 {
9368 	struct iwx_softc *sc = ifp->if_softc;
9369 	int s, err = 0, generation = sc->sc_generation;
9370 
9371 	/*
9372 	 * Prevent processes from entering this function while another
9373 	 * process is tsleep'ing in it.
9374 	 */
9375 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
9376 	if (err == 0 && generation != sc->sc_generation) {
9377 		rw_exit(&sc->ioctl_rwl);
9378 		return ENXIO;
9379 	}
9380 	if (err)
9381 		return err;
9382 	s = splnet();
9383 
9384 	switch (cmd) {
9385 	case SIOCSIFADDR:
9386 		ifp->if_flags |= IFF_UP;
9387 		/* FALLTHROUGH */
9388 	case SIOCSIFFLAGS:
9389 		if (ifp->if_flags & IFF_UP) {
9390 			if (!(ifp->if_flags & IFF_RUNNING)) {
9391 				/* Force reload of firmware image from disk. */
9392 				sc->sc_fw.fw_status = IWX_FW_STATUS_NONE;
9393 				err = iwx_init(ifp);
9394 			}
9395 		} else {
9396 			if (ifp->if_flags & IFF_RUNNING)
9397 				iwx_stop(ifp);
9398 		}
9399 		break;
9400 
9401 	default:
9402 		err = ieee80211_ioctl(ifp, cmd, data);
9403 	}
9404 
9405 	if (err == ENETRESET) {
9406 		err = 0;
9407 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9408 		    (IFF_UP | IFF_RUNNING)) {
9409 			iwx_stop(ifp);
9410 			err = iwx_init(ifp);
9411 		}
9412 	}
9413 
9414 	splx(s);
9415 	rw_exit(&sc->ioctl_rwl);
9416 
9417 	return err;
9418 }
9419 
9420 /*
9421  * Note: This structure is read from the device with IO accesses,
9422  * and the reading already does the endian conversion. As it is
9423  * read with uint32_t-sized accesses, any members with a different size
9424  * need to be ordered correctly though!
9425  */
9426 struct iwx_error_event_table {
9427 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9428 	uint32_t error_id;		/* type of error */
9429 	uint32_t trm_hw_status0;	/* TRM HW status */
9430 	uint32_t trm_hw_status1;	/* TRM HW status */
9431 	uint32_t blink2;		/* branch link */
9432 	uint32_t ilink1;		/* interrupt link */
9433 	uint32_t ilink2;		/* interrupt link */
9434 	uint32_t data1;		/* error-specific data */
9435 	uint32_t data2;		/* error-specific data */
9436 	uint32_t data3;		/* error-specific data */
9437 	uint32_t bcon_time;		/* beacon timer */
9438 	uint32_t tsf_low;		/* network timestamp function timer */
9439 	uint32_t tsf_hi;		/* network timestamp function timer */
9440 	uint32_t gp1;		/* GP1 timer register */
9441 	uint32_t gp2;		/* GP2 timer register */
9442 	uint32_t fw_rev_type;	/* firmware revision type */
9443 	uint32_t major;		/* uCode version major */
9444 	uint32_t minor;		/* uCode version minor */
9445 	uint32_t hw_ver;		/* HW Silicon version */
9446 	uint32_t brd_ver;		/* HW board version */
9447 	uint32_t log_pc;		/* log program counter */
9448 	uint32_t frame_ptr;		/* frame pointer */
9449 	uint32_t stack_ptr;		/* stack pointer */
9450 	uint32_t hcmd;		/* last host command header */
9451 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
9452 				 * rxtx_flag */
9453 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
9454 				 * host_flag */
9455 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
9456 				 * enc_flag */
9457 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
9458 				 * time_flag */
9459 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
9460 				 * wico interrupt */
9461 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
9462 	uint32_t wait_event;		/* wait event() caller address */
9463 	uint32_t l2p_control;	/* L2pControlField */
9464 	uint32_t l2p_duration;	/* L2pDurationField */
9465 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
9466 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
9467 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
9468 				 * (LMPM_PMG_SEL) */
9469 	uint32_t u_timestamp;	/* indicate when the date and time of the
9470 				 * compilation */
9471 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
9472 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
9473 
9474 /*
9475  * UMAC error struct - relevant starting from family 8000 chip.
9476  * Note: This structure is read from the device with IO accesses,
9477  * and the reading already does the endian conversion. As it is
9478  * read with u32-sized accesses, any members with a different size
9479  * need to be ordered correctly though!
9480  */
9481 struct iwx_umac_error_event_table {
9482 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9483 	uint32_t error_id;	/* type of error */
9484 	uint32_t blink1;	/* branch link */
9485 	uint32_t blink2;	/* branch link */
9486 	uint32_t ilink1;	/* interrupt link */
9487 	uint32_t ilink2;	/* interrupt link */
9488 	uint32_t data1;		/* error-specific data */
9489 	uint32_t data2;		/* error-specific data */
9490 	uint32_t data3;		/* error-specific data */
9491 	uint32_t umac_major;
9492 	uint32_t umac_minor;
9493 	uint32_t frame_pointer;	/* core register 27*/
9494 	uint32_t stack_pointer;	/* core register 28 */
9495 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
9496 	uint32_t nic_isr_pref;	/* ISR status register */
9497 } __packed;
9498 
9499 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
9500 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
9501 
9502 void
9503 iwx_nic_umac_error(struct iwx_softc *sc)
9504 {
9505 	struct iwx_umac_error_event_table table;
9506 	uint32_t base;
9507 
9508 	base = sc->sc_uc.uc_umac_error_event_table;
9509 
9510 	if (base < 0x400000) {
9511 		printf("%s: Invalid error log pointer 0x%08x\n",
9512 		    DEVNAME(sc), base);
9513 		return;
9514 	}
9515 
9516 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9517 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9518 		return;
9519 	}
9520 
9521 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9522 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
9523 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9524 			sc->sc_flags, table.valid);
9525 	}
9526 
9527 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
9528 		iwx_desc_lookup(table.error_id));
9529 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
9530 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
9531 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
9532 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
9533 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
9534 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
9535 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
9536 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
9537 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
9538 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
9539 	    table.frame_pointer);
9540 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
9541 	    table.stack_pointer);
9542 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
9543 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
9544 	    table.nic_isr_pref);
9545 }
9546 
9547 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
9548 static struct {
9549 	const char *name;
9550 	uint8_t num;
9551 } advanced_lookup[] = {
9552 	{ "NMI_INTERRUPT_WDG", 0x34 },
9553 	{ "SYSASSERT", 0x35 },
9554 	{ "UCODE_VERSION_MISMATCH", 0x37 },
9555 	{ "BAD_COMMAND", 0x38 },
9556 	{ "BAD_COMMAND", 0x39 },
9557 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
9558 	{ "FATAL_ERROR", 0x3D },
9559 	{ "NMI_TRM_HW_ERR", 0x46 },
9560 	{ "NMI_INTERRUPT_TRM", 0x4C },
9561 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
9562 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
9563 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
9564 	{ "NMI_INTERRUPT_HOST", 0x66 },
9565 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
9566 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
9567 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
9568 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
9569 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
9570 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
9571 	{ "ADVANCED_SYSASSERT", 0 },
9572 };
9573 
9574 const char *
9575 iwx_desc_lookup(uint32_t num)
9576 {
9577 	int i;
9578 
9579 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
9580 		if (advanced_lookup[i].num ==
9581 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
9582 			return advanced_lookup[i].name;
9583 
9584 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
9585 	return advanced_lookup[i].name;
9586 }
9587 
9588 /*
9589  * Support for dumping the error log seemed like a good idea ...
9590  * but it's mostly hex junk and the only sensible thing is the
9591  * hw/ucode revision (which we know anyway).  Since it's here,
9592  * I'll just leave it in, just in case e.g. the Intel guys want to
9593  * help us decipher some "ADVANCED_SYSASSERT" later.
9594  */
9595 void
9596 iwx_nic_error(struct iwx_softc *sc)
9597 {
9598 	struct iwx_error_event_table table;
9599 	uint32_t base;
9600 
9601 	printf("%s: dumping device error log\n", DEVNAME(sc));
9602 	base = sc->sc_uc.uc_lmac_error_event_table[0];
9603 	if (base < 0x400000) {
9604 		printf("%s: Invalid error log pointer 0x%08x\n",
9605 		    DEVNAME(sc), base);
9606 		return;
9607 	}
9608 
9609 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9610 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9611 		return;
9612 	}
9613 
9614 	if (!table.valid) {
9615 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
9616 		return;
9617 	}
9618 
9619 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9620 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
9621 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9622 		    sc->sc_flags, table.valid);
9623 	}
9624 
9625 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
9626 	    iwx_desc_lookup(table.error_id));
9627 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
9628 	    table.trm_hw_status0);
9629 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
9630 	    table.trm_hw_status1);
9631 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
9632 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
9633 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
9634 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
9635 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
9636 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
9637 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
9638 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
9639 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
9640 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
9641 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
9642 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
9643 	    table.fw_rev_type);
9644 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
9645 	    table.major);
9646 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
9647 	    table.minor);
9648 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
9649 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
9650 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
9651 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
9652 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
9653 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
9654 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
9655 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
9656 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
9657 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
9658 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
9659 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
9660 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
9661 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
9662 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
9663 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
9664 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
9665 
9666 	if (sc->sc_uc.uc_umac_error_event_table)
9667 		iwx_nic_umac_error(sc);
9668 }
9669 
9670 void
9671 iwx_dump_driver_status(struct iwx_softc *sc)
9672 {
9673 	int i;
9674 
9675 	printf("driver status:\n");
9676 	for (i = 0; i < nitems(sc->txq); i++) {
9677 		struct iwx_tx_ring *ring = &sc->txq[i];
9678 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
9679 		    "cur_hw=%-3d queued=%-3d\n",
9680 		    i, ring->qid, ring->cur, ring->cur_hw,
9681 		    ring->queued);
9682 	}
9683 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
9684 	printf("  802.11 state %s\n",
9685 	    ieee80211_state_name[sc->sc_ic.ic_state]);
9686 }
9687 
9688 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
9689 do {									\
9690 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9691 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
9692 	_var_ = (void *)((_pkt_)+1);					\
9693 } while (/*CONSTCOND*/0)
9694 
9695 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
9696 do {									\
9697 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9698 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
9699 	_ptr_ = (void *)((_pkt_)+1);					\
9700 } while (/*CONSTCOND*/0)
9701 
9702 int
9703 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
9704 {
9705 	int qid, idx, code;
9706 
9707 	qid = pkt->hdr.qid & ~0x80;
9708 	idx = pkt->hdr.idx;
9709 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9710 
9711 	return (!(qid == 0 && idx == 0 && code == 0) &&
9712 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
9713 }
9714 
9715 void
9716 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
9717 {
9718 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
9719 	struct iwx_rx_packet *pkt, *nextpkt;
9720 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
9721 	struct mbuf *m0, *m;
9722 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
9723 	int qid, idx, code, handled = 1;
9724 
9725 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
9726 	    BUS_DMASYNC_POSTREAD);
9727 
9728 	m0 = data->m;
9729 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
9730 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
9731 		qid = pkt->hdr.qid;
9732 		idx = pkt->hdr.idx;
9733 
9734 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9735 
9736 		if (!iwx_rx_pkt_valid(pkt))
9737 			break;
9738 
9739 		/*
9740 		 * XXX Intel inside (tm)
9741 		 * Any commands in the LONG_GROUP could actually be in the
9742 		 * LEGACY group. Firmware API versions >= 50 reject commands
9743 		 * in group 0, forcing us to use this hack.
9744 		 */
9745 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
9746 			struct iwx_tx_ring *ring = &sc->txq[qid];
9747 			struct iwx_tx_data *txdata = &ring->data[idx];
9748 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
9749 				code = iwx_cmd_opcode(code);
9750 		}
9751 
9752 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
9753 		if (len < minsz || len > (IWX_RBUF_SIZE - offset))
9754 			break;
9755 
9756 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
9757 			/* Take mbuf m0 off the RX ring. */
9758 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
9759 				ifp->if_ierrors++;
9760 				break;
9761 			}
9762 			KASSERT(data->m != m0);
9763 		}
9764 
9765 		switch (code) {
9766 		case IWX_REPLY_RX_PHY_CMD:
9767 			iwx_rx_rx_phy_cmd(sc, pkt, data);
9768 			break;
9769 
9770 		case IWX_REPLY_RX_MPDU_CMD: {
9771 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
9772 			nextoff = offset +
9773 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9774 			nextpkt = (struct iwx_rx_packet *)
9775 			    (m0->m_data + nextoff);
9776 			/* AX210 devices ship only one packet per Rx buffer. */
9777 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
9778 			    nextoff + minsz >= IWX_RBUF_SIZE ||
9779 			    !iwx_rx_pkt_valid(nextpkt)) {
9780 				/* No need to copy last frame in buffer. */
9781 				if (offset > 0)
9782 					m_adj(m0, offset);
9783 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
9784 				m0 = NULL; /* stack owns m0 now; abort loop */
9785 			} else {
9786 				/*
9787 				 * Create an mbuf which points to the current
9788 				 * packet. Always copy from offset zero to
9789 				 * preserve m_pkthdr.
9790 				 */
9791 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
9792 				if (m == NULL) {
9793 					ifp->if_ierrors++;
9794 					m_freem(m0);
9795 					m0 = NULL;
9796 					break;
9797 				}
9798 				m_adj(m, offset);
9799 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
9800 			}
9801  			break;
9802 		}
9803 
9804 		case IWX_BAR_FRAME_RELEASE:
9805 			iwx_rx_bar_frame_release(sc, pkt, ml);
9806 			break;
9807 
9808 		case IWX_TX_CMD:
9809 			iwx_rx_tx_cmd(sc, pkt, data);
9810 			break;
9811 
9812 		case IWX_BA_NOTIF:
9813 			iwx_rx_compressed_ba(sc, pkt);
9814 			break;
9815 
9816 		case IWX_MISSED_BEACONS_NOTIFICATION:
9817 			iwx_rx_bmiss(sc, pkt, data);
9818 			break;
9819 
9820 		case IWX_MFUART_LOAD_NOTIFICATION:
9821 			break;
9822 
9823 		case IWX_ALIVE: {
9824 			struct iwx_alive_resp_v4 *resp4;
9825 			struct iwx_alive_resp_v5 *resp5;
9826 			struct iwx_alive_resp_v6 *resp6;
9827 
9828 			DPRINTF(("%s: firmware alive\n", __func__));
9829 			sc->sc_uc.uc_ok = 0;
9830 
9831 			/*
9832 			 * For v5 and above, we can check the version, for older
9833 			 * versions we need to check the size.
9834 			 */
9835 			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9836 			    IWX_ALIVE) == 6) {
9837 				SYNC_RESP_STRUCT(resp6, pkt);
9838 				if (iwx_rx_packet_payload_len(pkt) !=
9839 				    sizeof(*resp6)) {
9840 					sc->sc_uc.uc_intr = 1;
9841 					wakeup(&sc->sc_uc);
9842 					break;
9843 				}
9844 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9845 				    resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9846 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9847 				    resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9848 				sc->sc_uc.uc_log_event_table = le32toh(
9849 				    resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9850 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9851 				    resp6->umac_data.dbg_ptrs.error_info_addr);
9852 				sc->sc_sku_id[0] =
9853 				    le32toh(resp6->sku_id.data[0]);
9854 				sc->sc_sku_id[1] =
9855 				    le32toh(resp6->sku_id.data[1]);
9856 				sc->sc_sku_id[2] =
9857 				    le32toh(resp6->sku_id.data[2]);
9858 				if (resp6->status == IWX_ALIVE_STATUS_OK)
9859 					sc->sc_uc.uc_ok = 1;
9860 			 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9861 			    IWX_ALIVE) == 5) {
9862 				SYNC_RESP_STRUCT(resp5, pkt);
9863 				if (iwx_rx_packet_payload_len(pkt) !=
9864 				    sizeof(*resp5)) {
9865 					sc->sc_uc.uc_intr = 1;
9866 					wakeup(&sc->sc_uc);
9867 					break;
9868 				}
9869 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9870 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9871 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9872 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9873 				sc->sc_uc.uc_log_event_table = le32toh(
9874 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9875 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9876 				    resp5->umac_data.dbg_ptrs.error_info_addr);
9877 				sc->sc_sku_id[0] =
9878 				    le32toh(resp5->sku_id.data[0]);
9879 				sc->sc_sku_id[1] =
9880 				    le32toh(resp5->sku_id.data[1]);
9881 				sc->sc_sku_id[2] =
9882 				    le32toh(resp5->sku_id.data[2]);
9883 				if (resp5->status == IWX_ALIVE_STATUS_OK)
9884 					sc->sc_uc.uc_ok = 1;
9885 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9886 				SYNC_RESP_STRUCT(resp4, pkt);
9887 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9888 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9889 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9890 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9891 				sc->sc_uc.uc_log_event_table = le32toh(
9892 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9893 				sc->sc_uc.uc_umac_error_event_table = le32toh(
9894 				    resp4->umac_data.dbg_ptrs.error_info_addr);
9895 				if (resp4->status == IWX_ALIVE_STATUS_OK)
9896 					sc->sc_uc.uc_ok = 1;
9897 			}
9898 
9899 			sc->sc_uc.uc_intr = 1;
9900 			wakeup(&sc->sc_uc);
9901 			break;
9902 		}
9903 
9904 		case IWX_STATISTICS_NOTIFICATION: {
9905 			struct iwx_notif_statistics *stats;
9906 			SYNC_RESP_STRUCT(stats, pkt);
9907 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9908 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
9909 			break;
9910 		}
9911 
9912 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
9913 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9914 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9915 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9916 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9917 			break;
9918 
9919 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9920 		    IWX_CT_KILL_NOTIFICATION): {
9921 			struct iwx_ct_kill_notif *notif;
9922 			SYNC_RESP_STRUCT(notif, pkt);
9923 			printf("%s: device at critical temperature (%u degC), "
9924 			    "stopping device\n",
9925 			    DEVNAME(sc), le16toh(notif->temperature));
9926 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9927 			task_add(systq, &sc->init_task);
9928 			break;
9929 		}
9930 
9931 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9932 		    IWX_SCD_QUEUE_CONFIG_CMD):
9933 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9934 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9935 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9936 		    IWX_SESSION_PROTECTION_CMD):
9937 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9938 		    IWX_NVM_GET_INFO):
9939 		case IWX_ADD_STA_KEY:
9940 		case IWX_PHY_CONFIGURATION_CMD:
9941 		case IWX_TX_ANT_CONFIGURATION_CMD:
9942 		case IWX_ADD_STA:
9943 		case IWX_MAC_CONTEXT_CMD:
9944 		case IWX_REPLY_SF_CFG_CMD:
9945 		case IWX_POWER_TABLE_CMD:
9946 		case IWX_LTR_CONFIG:
9947 		case IWX_PHY_CONTEXT_CMD:
9948 		case IWX_BINDING_CONTEXT_CMD:
9949 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9950 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9951 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9952 		case IWX_REPLY_BEACON_FILTERING_CMD:
9953 		case IWX_MAC_PM_POWER_TABLE:
9954 		case IWX_TIME_QUOTA_CMD:
9955 		case IWX_REMOVE_STA:
9956 		case IWX_TXPATH_FLUSH:
9957 		case IWX_BT_CONFIG:
9958 		case IWX_MCC_UPDATE_CMD:
9959 		case IWX_TIME_EVENT_CMD:
9960 		case IWX_STATISTICS_CMD:
9961 		case IWX_SCD_QUEUE_CFG: {
9962 			size_t pkt_len;
9963 
9964 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
9965 				break;
9966 
9967 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
9968 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9969 
9970 			pkt_len = sizeof(pkt->len_n_flags) +
9971 			    iwx_rx_packet_len(pkt);
9972 
9973 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9974 			    pkt_len < sizeof(*pkt) ||
9975 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
9976 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
9977 				    sc->sc_cmd_resp_len[idx]);
9978 				sc->sc_cmd_resp_pkt[idx] = NULL;
9979 				break;
9980 			}
9981 
9982 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
9983 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9984 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9985 			break;
9986 		}
9987 
9988 		case IWX_INIT_COMPLETE_NOTIF:
9989 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
9990 			wakeup(&sc->sc_init_complete);
9991 			break;
9992 
9993 		case IWX_SCAN_COMPLETE_UMAC: {
9994 			struct iwx_umac_scan_complete *notif;
9995 			SYNC_RESP_STRUCT(notif, pkt);
9996 			iwx_endscan(sc);
9997 			break;
9998 		}
9999 
10000 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
10001 			struct iwx_umac_scan_iter_complete_notif *notif;
10002 			SYNC_RESP_STRUCT(notif, pkt);
10003 			iwx_endscan(sc);
10004 			break;
10005 		}
10006 
10007 		case IWX_MCC_CHUB_UPDATE_CMD: {
10008 			struct iwx_mcc_chub_notif *notif;
10009 			SYNC_RESP_STRUCT(notif, pkt);
10010 			iwx_mcc_update(sc, notif);
10011 			break;
10012 		}
10013 
10014 		case IWX_REPLY_ERROR: {
10015 			struct iwx_error_resp *resp;
10016 			SYNC_RESP_STRUCT(resp, pkt);
10017 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
10018 				DEVNAME(sc), le32toh(resp->error_type),
10019 				resp->cmd_id);
10020 			break;
10021 		}
10022 
10023 		case IWX_TIME_EVENT_NOTIFICATION: {
10024 			struct iwx_time_event_notif *notif;
10025 			uint32_t action;
10026 			SYNC_RESP_STRUCT(notif, pkt);
10027 
10028 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
10029 				break;
10030 			action = le32toh(notif->action);
10031 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
10032 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
10033 			break;
10034 		}
10035 
10036 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10037 		    IWX_SESSION_PROTECTION_NOTIF): {
10038 			struct iwx_session_prot_notif *notif;
10039 			uint32_t status, start, conf_id;
10040 
10041 			SYNC_RESP_STRUCT(notif, pkt);
10042 
10043 			status = le32toh(notif->status);
10044 			start = le32toh(notif->start);
10045 			conf_id = le32toh(notif->conf_id);
10046 			/* Check for end of successful PROTECT_CONF_ASSOC. */
10047 			if (status == 1 && start == 0 &&
10048 			    conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
10049 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
10050 			break;
10051 		}
10052 
10053 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
10054 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
10055 		    break;
10056 
10057 		/*
10058 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
10059 		 * messages. Just ignore them for now.
10060 		 */
10061 		case IWX_DEBUG_LOG_MSG:
10062 			break;
10063 
10064 		case IWX_MCAST_FILTER_CMD:
10065 			break;
10066 
10067 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
10068 			break;
10069 
10070 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
10071 			break;
10072 
10073 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
10074 			break;
10075 
10076 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10077 		    IWX_NVM_ACCESS_COMPLETE):
10078 			break;
10079 
10080 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
10081 			break; /* happens in monitor mode; ignore for now */
10082 
10083 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
10084 			break;
10085 
10086 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10087 		    IWX_TLC_MNG_UPDATE_NOTIF): {
10088 			struct iwx_tlc_update_notif *notif;
10089 			SYNC_RESP_STRUCT(notif, pkt);
10090 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
10091 				iwx_rs_update(sc, notif);
10092 			break;
10093 		}
10094 
10095 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
10096 			break;
10097 
10098 		/*
10099 		 * Ignore for now. The Linux driver only acts on this request
10100 		 * with 160Mhz channels in 11ax mode.
10101 		 */
10102 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10103 		    IWX_THERMAL_DUAL_CHAIN_REQUEST):
10104 			DPRINTF(("%s: thermal dual-chain request received\n",
10105 			    DEVNAME(sc)));
10106 			break;
10107 
10108 		/* undocumented notification from iwx-ty-a0-gf-a0-77 image */
10109 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
10110 			break;
10111 
10112 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10113 		    IWX_PNVM_INIT_COMPLETE):
10114 			sc->sc_init_complete |= IWX_PNVM_COMPLETE;
10115 			wakeup(&sc->sc_init_complete);
10116 			break;
10117 
10118 		default:
10119 			handled = 0;
10120 			printf("%s: unhandled firmware response 0x%x/0x%x "
10121 			    "rx ring %d[%d]\n",
10122 			    DEVNAME(sc), code, pkt->len_n_flags,
10123 			    (qid & ~0x80), idx);
10124 			break;
10125 		}
10126 
10127 		/*
10128 		 * uCode sets bit 0x80 when it originates the notification,
10129 		 * i.e. when the notification is not a direct response to a
10130 		 * command sent by the driver.
10131 		 * For example, uCode issues IWX_REPLY_RX when it sends a
10132 		 * received frame to the driver.
10133 		 */
10134 		if (handled && !(qid & (1 << 7))) {
10135 			iwx_cmd_done(sc, qid, idx, code);
10136 		}
10137 
10138 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
10139 
10140 		/* AX210 devices ship only one packet per Rx buffer. */
10141 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10142 			break;
10143 	}
10144 
10145 	if (m0 && m0 != data->m)
10146 		m_freem(m0);
10147 }
10148 
10149 void
10150 iwx_notif_intr(struct iwx_softc *sc)
10151 {
10152 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
10153 	uint16_t hw;
10154 
10155 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
10156 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
10157 
10158 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10159 		uint16_t *status = sc->rxq.stat_dma.vaddr;
10160 		hw = le16toh(*status) & 0xfff;
10161 	} else
10162 		hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
10163 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
10164 	while (sc->rxq.cur != hw) {
10165 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
10166 		iwx_rx_pkt(sc, data, &ml);
10167 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
10168 	}
10169 	if_input(&sc->sc_ic.ic_if, &ml);
10170 
10171 	/*
10172 	 * Tell the firmware what we have processed.
10173 	 * Seems like the hardware gets upset unless we align the write by 8??
10174 	 */
10175 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
10176 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
10177 }
10178 
10179 int
10180 iwx_intr(void *arg)
10181 {
10182 	struct iwx_softc *sc = arg;
10183 	struct ieee80211com *ic = &sc->sc_ic;
10184 	struct ifnet *ifp = IC2IFP(ic);
10185 	int handled = 0;
10186 	int r1, r2, rv = 0;
10187 
10188 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10189 
10190 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
10191 		uint32_t *ict = sc->ict_dma.vaddr;
10192 		int tmp;
10193 
10194 		tmp = htole32(ict[sc->ict_cur]);
10195 		if (!tmp)
10196 			goto out_ena;
10197 
10198 		/*
10199 		 * ok, there was something.  keep plowing until we have all.
10200 		 */
10201 		r1 = r2 = 0;
10202 		while (tmp) {
10203 			r1 |= tmp;
10204 			ict[sc->ict_cur] = 0;
10205 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
10206 			tmp = htole32(ict[sc->ict_cur]);
10207 		}
10208 
10209 		/* this is where the fun begins.  don't ask */
10210 		if (r1 == 0xffffffff)
10211 			r1 = 0;
10212 
10213 		/* i am not expected to understand this */
10214 		if (r1 & 0xc0000)
10215 			r1 |= 0x8000;
10216 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
10217 	} else {
10218 		r1 = IWX_READ(sc, IWX_CSR_INT);
10219 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
10220 			goto out;
10221 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
10222 	}
10223 	if (r1 == 0 && r2 == 0) {
10224 		goto out_ena;
10225 	}
10226 
10227 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
10228 
10229 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
10230 		int i;
10231 
10232 		/* Firmware has now configured the RFH. */
10233 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
10234 			iwx_update_rx_desc(sc, &sc->rxq, i);
10235 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
10236 	}
10237 
10238 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
10239 
10240 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
10241 		handled |= IWX_CSR_INT_BIT_RF_KILL;
10242 		iwx_check_rfkill(sc);
10243 		task_add(systq, &sc->init_task);
10244 		rv = 1;
10245 		goto out_ena;
10246 	}
10247 
10248 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
10249 		if (ifp->if_flags & IFF_DEBUG) {
10250 			iwx_nic_error(sc);
10251 			iwx_dump_driver_status(sc);
10252 		}
10253 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10254 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
10255 			task_add(systq, &sc->init_task);
10256 		rv = 1;
10257 		goto out;
10258 
10259 	}
10260 
10261 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
10262 		handled |= IWX_CSR_INT_BIT_HW_ERR;
10263 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10264 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
10265 			sc->sc_flags |= IWX_FLAG_HW_ERR;
10266 			task_add(systq, &sc->init_task);
10267 		}
10268 		rv = 1;
10269 		goto out;
10270 	}
10271 
10272 	/* firmware chunk loaded */
10273 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
10274 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
10275 		handled |= IWX_CSR_INT_BIT_FH_TX;
10276 
10277 		sc->sc_fw_chunk_done = 1;
10278 		wakeup(&sc->sc_fw);
10279 	}
10280 
10281 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
10282 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
10283 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
10284 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
10285 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
10286 		}
10287 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
10288 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
10289 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
10290 		}
10291 
10292 		/* Disable periodic interrupt; we use it as just a one-shot. */
10293 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
10294 
10295 		/*
10296 		 * Enable periodic interrupt in 8 msec only if we received
10297 		 * real RX interrupt (instead of just periodic int), to catch
10298 		 * any dangling Rx interrupt.  If it was just the periodic
10299 		 * interrupt, there was no dangling Rx activity, and no need
10300 		 * to extend the periodic interrupt; one-shot is enough.
10301 		 */
10302 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
10303 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
10304 			    IWX_CSR_INT_PERIODIC_ENA);
10305 
10306 		iwx_notif_intr(sc);
10307 	}
10308 
10309 	rv = 1;
10310 
10311  out_ena:
10312 	iwx_restore_interrupts(sc);
10313  out:
10314 	return rv;
10315 }
10316 
10317 int
10318 iwx_intr_msix(void *arg)
10319 {
10320 	struct iwx_softc *sc = arg;
10321 	struct ieee80211com *ic = &sc->sc_ic;
10322 	struct ifnet *ifp = IC2IFP(ic);
10323 	uint32_t inta_fh, inta_hw;
10324 	int vector = 0;
10325 
10326 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
10327 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
10328 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
10329 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
10330 	inta_fh &= sc->sc_fh_mask;
10331 	inta_hw &= sc->sc_hw_mask;
10332 
10333 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
10334 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
10335 		iwx_notif_intr(sc);
10336 	}
10337 
10338 	/* firmware chunk loaded */
10339 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
10340 		sc->sc_fw_chunk_done = 1;
10341 		wakeup(&sc->sc_fw);
10342 	}
10343 
10344 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
10345 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
10346 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
10347 		if (ifp->if_flags & IFF_DEBUG) {
10348 			iwx_nic_error(sc);
10349 			iwx_dump_driver_status(sc);
10350 		}
10351 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10352 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
10353 			task_add(systq, &sc->init_task);
10354 		return 1;
10355 	}
10356 
10357 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
10358 		iwx_check_rfkill(sc);
10359 		task_add(systq, &sc->init_task);
10360 	}
10361 
10362 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
10363 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10364 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
10365 			sc->sc_flags |= IWX_FLAG_HW_ERR;
10366 			task_add(systq, &sc->init_task);
10367 		}
10368 		return 1;
10369 	}
10370 
10371 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
10372 		int i;
10373 
10374 		/* Firmware has now configured the RFH. */
10375 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
10376 			iwx_update_rx_desc(sc, &sc->rxq, i);
10377 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
10378 	}
10379 
10380 	/*
10381 	 * Before sending the interrupt the HW disables it to prevent
10382 	 * a nested interrupt. This is done by writing 1 to the corresponding
10383 	 * bit in the mask register. After handling the interrupt, it should be
10384 	 * re-enabled by clearing this bit. This register is defined as
10385 	 * write 1 clear (W1C) register, meaning that it's being clear
10386 	 * by writing 1 to the bit.
10387 	 */
10388 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
10389 	return 1;
10390 }
10391 
10392 typedef void *iwx_match_t;
10393 
10394 static const struct pci_matchid iwx_devices[] = {
10395 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
10396 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
10397 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
10398 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
10399 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
10400 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_6,},
10401 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_7,},
10402 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_8,},
10403 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_9,},
10404 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_10,},
10405 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_11,},
10406 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_12,},
10407 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_13,},
10408 	/* _14 is an MA device, not yet supported */
10409 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_15,},
10410 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_16,},
10411 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_17,},
10412 };
10413 
10414 
10415 int
10416 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
10417 {
10418 	struct pci_attach_args *pa = aux;
10419 	return pci_matchbyid(pa, iwx_devices, nitems(iwx_devices));
10420 }
10421 
10422 /*
10423  * The device info table below contains device-specific config overrides.
10424  * The most important parameter derived from this table is the name of the
10425  * firmware image to load.
10426  *
10427  * The Linux iwlwifi driver uses an "old" and a "new" device info table.
10428  * The "old" table matches devices based on PCI vendor/product IDs only.
10429  * The "new" table extends this with various device parameters derived
10430  * from MAC type, and RF type.
10431  *
10432  * In iwlwifi "old" and "new" tables share the same array, where "old"
10433  * entries contain dummy values for data defined only for "new" entries.
10434  * As of 2022, Linux developers are still in the process of moving entries
10435  * from "old" to "new" style and it looks like this effort has stalled in
10436  * in some work-in-progress state for quite a while. Linux commits moving
10437  * entries from "old" to "new" have at times been reverted due to regressions.
10438  * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
10439  * devices in the same driver.
10440  *
10441  * Our table below contains mostly "new" entries declared in iwlwifi
10442  * with the _IWL_DEV_INFO() macro (with a leading underscore).
10443  * Other devices are matched based on PCI vendor/product ID as usual,
10444  * unless matching specific PCI subsystem vendor/product IDs is required.
10445  *
10446  * Some "old"-style entries are required to identify the firmware image to use.
10447  * Others might be used to print a specific marketing name into Linux dmesg,
10448  * but we can't be sure whether the corresponding devices would be matched
10449  * correctly in the absence of their entries. So we include them just in case.
10450  */
10451 
10452 struct iwx_dev_info {
10453 	uint16_t device;
10454 	uint16_t subdevice;
10455 	uint16_t mac_type;
10456 	uint16_t rf_type;
10457 	uint8_t mac_step;
10458 	uint8_t rf_id;
10459 	uint8_t no_160;
10460 	uint8_t cores;
10461 	uint8_t cdb;
10462 	uint8_t jacket;
10463 	const struct iwx_device_cfg *cfg;
10464 };
10465 
10466 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
10467 		      _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
10468 	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg),  \
10469 	  .mac_type = _mac_type, .rf_type = _rf_type,	   \
10470 	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id,		   \
10471 	  .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
10472 
10473 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
10474 	_IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY,	   \
10475 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY,  \
10476 		      IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
10477 
10478 /*
10479  * When adding entries to this table keep in mind that entries must
10480  * be listed in the same order as in the Linux driver. Code walks this
10481  * table backwards and uses the first matching entry it finds.
10482  * Device firmware must be available in fw_update(8).
10483  */
10484 static const struct iwx_dev_info iwx_dev_info_table[] = {
10485 	/* So with HR */
10486 	IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
10487 	IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
10488 	IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
10489 	IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
10490 	IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
10491 	IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
10492 	IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
10493 	IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
10494 	IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
10495 	IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
10496 	IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
10497 	IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
10498 	IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
10499 	IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
10500 	IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10501 	IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10502 	IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
10503 	IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
10504 	IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10505 	IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10506 	IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
10507 	IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
10508 	IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
10509 	IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
10510 	IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
10511 	IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
10512 	IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
10513 	IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
10514 	IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
10515 	IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10516 	IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10517 	IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
10518 	IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
10519 	IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
10520 	IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10521 	IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10522 
10523 	/* So with GF2 */
10524 	IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10525 	IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10526 	IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10527 	IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10528 	IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10529 	IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10530 	IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10531 	IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10532 	IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10533 	IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10534 	IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10535 	IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10536 
10537 	/* Qu with Jf, C step */
10538 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10539 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10540 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10541 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10542 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
10543 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10544 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10545 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10546 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10547 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
10548 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10549 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10550 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10551 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10552 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
10553 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10554 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10555 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10556 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10557 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
10558 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10559 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10560 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10561 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10562 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
10563 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10564 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10565 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10566 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10567 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
10568 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10569 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10570 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10571 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10572 		      IWX_CFG_ANY,
10573 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
10574 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10575 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10576 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10577 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10578 		      IWX_CFG_ANY,
10579 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
10580 
10581 	/* QuZ with Jf */
10582 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10583 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10584 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10585 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10586 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
10587 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10588 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10589 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10590 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10591 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
10592 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10593 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10594 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10595 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10596 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
10597 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10598 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10599 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10600 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10601 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
10602 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10603 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10604 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10605 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10606 		      IWX_CFG_ANY,
10607 		      iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
10608 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10609 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10610 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10611 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10612 		      IWX_CFG_ANY,
10613 		      iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
10614 
10615 	/* Qu with Hr, B step */
10616 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10617 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
10618 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10619 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10620 		      iwx_qu_b0_hr1_b0), /* AX101 */
10621 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10622 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
10623 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10624 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10625 		      iwx_qu_b0_hr_b0), /* AX203 */
10626 
10627 	/* Qu with Hr, C step */
10628 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10629 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10630 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10631 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10632 		      iwx_qu_c0_hr1_b0), /* AX101 */
10633 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10634 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10635 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10636 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10637 		      iwx_qu_c0_hr_b0), /* AX203 */
10638 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10639 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10640 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10641 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10642 		      iwx_qu_c0_hr_b0), /* AX201 */
10643 
10644 	/* QuZ with Hr */
10645 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10646 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10647 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10648 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10649 		      iwx_quz_a0_hr1_b0), /* AX101 */
10650 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10651 		      IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
10652 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10653 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10654 		      iwx_cfg_quz_a0_hr_b0), /* AX203 */
10655 
10656 	/* SoF with JF2 */
10657 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10658 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10659 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10660 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10661 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
10662 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10663 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10664 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10665 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10666 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
10667 
10668 	/* SoF with JF */
10669 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10670 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10671 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10672 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10673 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
10674 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10675 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10676 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10677 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10678 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
10679 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10680 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10681 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10682 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10683 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
10684 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10685 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10686 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10687 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10688 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
10689 
10690 	/* So with Hr */
10691 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10692 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10693 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10694 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10695 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
10696 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10697 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10698 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10699 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10700 		      iwx_cfg_so_a0_hr_b0), /* ax101 */
10701 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10702 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10703 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10704 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10705 		      iwx_cfg_so_a0_hr_b0), /* ax201 */
10706 
10707 	/* So-F with Hr */
10708 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10709 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10710 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10711 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10712 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
10713 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10714 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10715 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10716 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10717 		      iwx_cfg_so_a0_hr_b0), /* AX101 */
10718 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10719 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10720 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10721 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10722 		      iwx_cfg_so_a0_hr_b0), /* AX201 */
10723 
10724 	/* So-F with GF */
10725 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10726 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10727 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10728 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10729 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
10730 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10731 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10732 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10733 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
10734 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
10735 
10736 	/* So with GF */
10737 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10738 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10739 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10740 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10741 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
10742 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10743 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10744 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10745 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
10746 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
10747 
10748 	/* So with JF2 */
10749 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10750 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10751 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10752 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10753 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
10754 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10755 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10756 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10757 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10758 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
10759 
10760 	/* So with JF */
10761 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10762 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10763 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10764 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10765 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
10766 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10767 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10768 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10769 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10770 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
10771 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10772 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10773 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10774 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10775 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
10776 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10777 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10778 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10779 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10780 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
10781 };
10782 
10783 int
10784 iwx_preinit(struct iwx_softc *sc)
10785 {
10786 	struct ieee80211com *ic = &sc->sc_ic;
10787 	struct ifnet *ifp = IC2IFP(ic);
10788 	int err;
10789 
10790 	err = iwx_prepare_card_hw(sc);
10791 	if (err) {
10792 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10793 		return err;
10794 	}
10795 
10796 	if (sc->attached) {
10797 		/* Update MAC in case the upper layers changed it. */
10798 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
10799 		    ((struct arpcom *)ifp)->ac_enaddr);
10800 		return 0;
10801 	}
10802 
10803 	err = iwx_start_hw(sc);
10804 	if (err) {
10805 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10806 		return err;
10807 	}
10808 
10809 	err = iwx_run_init_mvm_ucode(sc, 1);
10810 	iwx_stop_device(sc);
10811 	if (err)
10812 		return err;
10813 
10814 	/* Print version info and MAC address on first successful fw load. */
10815 	sc->attached = 1;
10816 	if (sc->sc_pnvm_ver) {
10817 		printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
10818 		    "address %s\n",
10819 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10820 		    sc->sc_fwver, sc->sc_pnvm_ver,
10821 		    ether_sprintf(sc->sc_nvm.hw_addr));
10822 	} else {
10823 		printf("%s: hw rev 0x%x, fw %s, address %s\n",
10824 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10825 		    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
10826 	}
10827 
10828 	if (sc->sc_nvm.sku_cap_11n_enable)
10829 		iwx_setup_ht_rates(sc);
10830 	if (sc->sc_nvm.sku_cap_11ac_enable)
10831 		iwx_setup_vht_rates(sc);
10832 
10833 	/* not all hardware can do 5GHz band */
10834 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
10835 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
10836 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
10837 
10838 	/* Configure channel information obtained from firmware. */
10839 	ieee80211_channel_init(ifp);
10840 
10841 	/* Configure MAC address. */
10842 	err = if_setlladdr(ifp, ic->ic_myaddr);
10843 	if (err)
10844 		printf("%s: could not set MAC address (error %d)\n",
10845 		    DEVNAME(sc), err);
10846 
10847 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
10848 
10849 	return 0;
10850 }
10851 
10852 void
10853 iwx_attach_hook(struct device *self)
10854 {
10855 	struct iwx_softc *sc = (void *)self;
10856 
10857 	KASSERT(!cold);
10858 
10859 	iwx_preinit(sc);
10860 }
10861 
10862 const struct iwx_device_cfg *
10863 iwx_find_device_cfg(struct iwx_softc *sc)
10864 {
10865 	pcireg_t sreg;
10866 	pci_product_id_t sdev_id;
10867 	uint16_t mac_type, rf_type;
10868 	uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10869 	int i;
10870 
10871 	sreg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
10872 	sdev_id = PCI_PRODUCT(sreg);
10873 	mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10874 	mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10875 	rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10876 	cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10877 	jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10878 
10879 	rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10880 	no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10881 	cores = IWX_SUBDEVICE_CORES(sdev_id);
10882 
10883 	for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10884 		 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10885 
10886 		if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10887 		    dev_info->device != sc->sc_pid)
10888 			continue;
10889 
10890 		if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10891 		    dev_info->subdevice != sdev_id)
10892 			continue;
10893 
10894 		if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10895 		    dev_info->mac_type != mac_type)
10896 			continue;
10897 
10898 		if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10899 		    dev_info->mac_step != mac_step)
10900 			continue;
10901 
10902 		if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10903 		    dev_info->rf_type != rf_type)
10904 			continue;
10905 
10906 		if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10907 		    dev_info->cdb != cdb)
10908 			continue;
10909 
10910 		if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10911 		    dev_info->jacket != jacket)
10912 			continue;
10913 
10914 		if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10915 		    dev_info->rf_id != rf_id)
10916 			continue;
10917 
10918 		if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10919 		    dev_info->no_160 != no_160)
10920 			continue;
10921 
10922 		if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10923 		    dev_info->cores != cores)
10924 			continue;
10925 
10926 		return dev_info->cfg;
10927 	}
10928 
10929 	return NULL;
10930 }
10931 
10932 
10933 void
10934 iwx_attach(struct device *parent, struct device *self, void *aux)
10935 {
10936 	struct iwx_softc *sc = (void *)self;
10937 	struct pci_attach_args *pa = aux;
10938 	pci_intr_handle_t ih;
10939 	pcireg_t reg, memtype;
10940 	struct ieee80211com *ic = &sc->sc_ic;
10941 	struct ifnet *ifp = &ic->ic_if;
10942 	const char *intrstr;
10943 	const struct iwx_device_cfg *cfg;
10944 	int err;
10945 	int txq_i, i, j;
10946 	size_t ctxt_info_size;
10947 
10948 	sc->sc_pid = PCI_PRODUCT(pa->pa_id);
10949 	sc->sc_pct = pa->pa_pc;
10950 	sc->sc_pcitag = pa->pa_tag;
10951 	sc->sc_dmat = pa->pa_dmat;
10952 
10953 	rw_init(&sc->ioctl_rwl, "iwxioctl");
10954 
10955 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
10956 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
10957 	if (err == 0) {
10958 		printf("%s: PCIe capability structure not found!\n",
10959 		    DEVNAME(sc));
10960 		return;
10961 	}
10962 
10963 	/*
10964 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
10965 	 * PCI Tx retries from interfering with C3 CPU state.
10966 	 */
10967 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
10968 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
10969 
10970 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
10971 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
10972 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
10973 	if (err) {
10974 		printf("%s: can't map mem space\n", DEVNAME(sc));
10975 		return;
10976 	}
10977 
10978 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
10979 		sc->sc_msix = 1;
10980 	} else if (pci_intr_map_msi(pa, &ih)) {
10981 		if (pci_intr_map(pa, &ih)) {
10982 			printf("%s: can't map interrupt\n", DEVNAME(sc));
10983 			return;
10984 		}
10985 		/* Hardware bug workaround. */
10986 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
10987 		    PCI_COMMAND_STATUS_REG);
10988 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
10989 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
10990 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
10991 		    PCI_COMMAND_STATUS_REG, reg);
10992 	}
10993 
10994 	intrstr = pci_intr_string(sc->sc_pct, ih);
10995 	if (sc->sc_msix)
10996 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
10997 		    iwx_intr_msix, sc, DEVNAME(sc));
10998 	else
10999 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11000 		    iwx_intr, sc, DEVNAME(sc));
11001 
11002 	if (sc->sc_ih == NULL) {
11003 		printf("\n");
11004 		printf("%s: can't establish interrupt", DEVNAME(sc));
11005 		if (intrstr != NULL)
11006 			printf(" at %s", intrstr);
11007 		printf("\n");
11008 		return;
11009 	}
11010 	printf(", %s\n", intrstr);
11011 
11012 	/* Clear pending interrupts. */
11013 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
11014 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
11015 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
11016 
11017 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
11018 	sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
11019 
11020 	/*
11021 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11022 	 * changed, and now the revision step also includes bit 0-1 (no more
11023 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11024 	 * in the old format.
11025 	 */
11026 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11027 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11028 
11029 	switch (PCI_PRODUCT(pa->pa_id)) {
11030 	case PCI_PRODUCT_INTEL_WL_22500_1:
11031 		sc->sc_fwname = IWX_CC_A_FW;
11032 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11033 		sc->sc_integrated = 0;
11034 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11035 		sc->sc_low_latency_xtal = 0;
11036 		sc->sc_xtal_latency = 0;
11037 		sc->sc_tx_with_siso_diversity = 0;
11038 		sc->sc_uhb_supported = 0;
11039 		break;
11040 	case PCI_PRODUCT_INTEL_WL_22500_2:
11041 	case PCI_PRODUCT_INTEL_WL_22500_5:
11042 		/* These devices should be QuZ only. */
11043 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
11044 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
11045 			return;
11046 		}
11047 		sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11048 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11049 		sc->sc_integrated = 1;
11050 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
11051 		sc->sc_low_latency_xtal = 0;
11052 		sc->sc_xtal_latency = 500;
11053 		sc->sc_tx_with_siso_diversity = 0;
11054 		sc->sc_uhb_supported = 0;
11055 		break;
11056 	case PCI_PRODUCT_INTEL_WL_22500_3:
11057 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11058 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11059 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11060 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11061 		else
11062 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11063 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11064 		sc->sc_integrated = 1;
11065 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
11066 		sc->sc_low_latency_xtal = 0;
11067 		sc->sc_xtal_latency = 500;
11068 		sc->sc_tx_with_siso_diversity = 0;
11069 		sc->sc_uhb_supported = 0;
11070 		break;
11071 	case PCI_PRODUCT_INTEL_WL_22500_4:
11072 	case PCI_PRODUCT_INTEL_WL_22500_7:
11073 	case PCI_PRODUCT_INTEL_WL_22500_8:
11074 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11075 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11076 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11077 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11078 		else
11079 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11080 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11081 		sc->sc_integrated = 1;
11082 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
11083 		sc->sc_low_latency_xtal = 0;
11084 		sc->sc_xtal_latency = 1820;
11085 		sc->sc_tx_with_siso_diversity = 0;
11086 		sc->sc_uhb_supported = 0;
11087 		break;
11088 	case PCI_PRODUCT_INTEL_WL_22500_6:
11089 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11090 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11091 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11092 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11093 		else
11094 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11095 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11096 		sc->sc_integrated = 1;
11097 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
11098 		sc->sc_low_latency_xtal = 1;
11099 		sc->sc_xtal_latency = 12000;
11100 		sc->sc_tx_with_siso_diversity = 0;
11101 		sc->sc_uhb_supported = 0;
11102 		break;
11103 	case PCI_PRODUCT_INTEL_WL_22500_9:
11104 	case PCI_PRODUCT_INTEL_WL_22500_10:
11105 	case PCI_PRODUCT_INTEL_WL_22500_11:
11106 	case PCI_PRODUCT_INTEL_WL_22500_13:
11107 	/* _14 is an MA device, not yet supported */
11108 	case PCI_PRODUCT_INTEL_WL_22500_15:
11109 	case PCI_PRODUCT_INTEL_WL_22500_16:
11110 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
11111 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
11112 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
11113 		sc->sc_integrated = 0;
11114 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11115 		sc->sc_low_latency_xtal = 0;
11116 		sc->sc_xtal_latency = 0;
11117 		sc->sc_tx_with_siso_diversity = 0;
11118 		sc->sc_uhb_supported = 1;
11119 		break;
11120 	case PCI_PRODUCT_INTEL_WL_22500_12:
11121 	case PCI_PRODUCT_INTEL_WL_22500_17:
11122 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
11123 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
11124 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
11125 		sc->sc_integrated = 1;
11126 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
11127 		sc->sc_low_latency_xtal = 1;
11128 		sc->sc_xtal_latency = 12000;
11129 		sc->sc_tx_with_siso_diversity = 0;
11130 		sc->sc_uhb_supported = 0;
11131 		sc->sc_imr_enabled = 1;
11132 		break;
11133 	default:
11134 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11135 		return;
11136 	}
11137 
11138 	cfg = iwx_find_device_cfg(sc);
11139 	if (cfg) {
11140 		sc->sc_fwname = cfg->fw_name;
11141 		sc->sc_pnvm_name = cfg->pnvm_name;
11142 		sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
11143 		sc->sc_uhb_supported = cfg->uhb_supported;
11144 		if (cfg->xtal_latency) {
11145 			sc->sc_xtal_latency = cfg->xtal_latency;
11146 			sc->sc_low_latency_xtal = cfg->low_latency_xtal;
11147 		}
11148 	}
11149 
11150 	sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
11151 
11152 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
11153 		sc->sc_umac_prph_offset = 0x300000;
11154 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
11155 	} else
11156 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
11157 
11158 	/* Allocate DMA memory for loading firmware. */
11159 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
11160 		ctxt_info_size = sizeof(struct iwx_context_info_gen3);
11161 	else
11162 		ctxt_info_size = sizeof(struct iwx_context_info);
11163 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
11164 	    ctxt_info_size, 0);
11165 	if (err) {
11166 		printf("%s: could not allocate memory for loading firmware\n",
11167 		    DEVNAME(sc));
11168 		return;
11169 	}
11170 
11171 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
11172 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
11173 		    sizeof(struct iwx_prph_scratch), 0);
11174 		if (err) {
11175 			printf("%s: could not allocate prph scratch memory\n",
11176 			    DEVNAME(sc));
11177 			goto fail1;
11178 		}
11179 
11180 		/*
11181 		 * Allocate prph information. The driver doesn't use this.
11182 		 * We use the second half of this page to give the device
11183 		 * some dummy TR/CR tail pointers - which shouldn't be
11184 		 * necessary as we don't use this, but the hardware still
11185 		 * reads/writes there and we can't let it go do that with
11186 		 * a NULL pointer.
11187 		 */
11188 		KASSERT(sizeof(struct iwx_prph_info) < PAGE_SIZE / 2);
11189 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
11190 		    PAGE_SIZE, 0);
11191 		if (err) {
11192 			printf("%s: could not allocate prph info memory\n",
11193 			    DEVNAME(sc));
11194 			goto fail1;
11195 		}
11196 	}
11197 
11198 	/* Allocate interrupt cause table (ICT).*/
11199 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11200 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
11201 	if (err) {
11202 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11203 		goto fail1;
11204 	}
11205 
11206 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11207 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11208 		if (err) {
11209 			printf("%s: could not allocate TX ring %d\n",
11210 			    DEVNAME(sc), txq_i);
11211 			goto fail4;
11212 		}
11213 	}
11214 
11215 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
11216 	if (err) {
11217 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11218 		goto fail4;
11219 	}
11220 
11221 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
11222 	if (sc->sc_nswq == NULL)
11223 		goto fail4;
11224 
11225 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11226 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11227 	ic->ic_state = IEEE80211_S_INIT;
11228 
11229 	/* Set device capabilities. */
11230 	ic->ic_caps =
11231 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11232 	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
11233 	    IEEE80211_C_WEP |		/* WEP */
11234 	    IEEE80211_C_RSN |		/* WPA/RSN */
11235 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11236 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11237 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11238 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11239 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11240 
11241 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11242 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11243 	ic->ic_htcaps |=
11244 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11245 	ic->ic_htxcaps = 0;
11246 	ic->ic_txbfcaps = 0;
11247 	ic->ic_aselcaps = 0;
11248 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11249 
11250 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
11251 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
11252 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
11253 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
11254 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
11255 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
11256 
11257 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11258 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11259 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11260 
11261 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11262 		sc->sc_phyctxt[i].id = i;
11263 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11264 		sc->sc_phyctxt[i].vht_chan_width =
11265 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
11266 	}
11267 
11268 	/* IBSS channel undefined for now. */
11269 	ic->ic_ibss_chan = &ic->ic_channels[1];
11270 
11271 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
11272 
11273 	ifp->if_softc = sc;
11274 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11275 	ifp->if_ioctl = iwx_ioctl;
11276 	ifp->if_start = iwx_start;
11277 	ifp->if_watchdog = iwx_watchdog;
11278 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11279 
11280 	if_attach(ifp);
11281 	ieee80211_ifattach(ifp);
11282 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
11283 
11284 #if NBPFILTER > 0
11285 	iwx_radiotap_attach(sc);
11286 #endif
11287 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
11288 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
11289 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
11290 		rxba->sc = sc;
11291 		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
11292 		    rxba);
11293 		timeout_set(&rxba->reorder_buf.reorder_timer,
11294 		    iwx_reorder_timer_expired, &rxba->reorder_buf);
11295 		for (j = 0; j < nitems(rxba->entries); j++)
11296 			ml_init(&rxba->entries[j].frames);
11297 	}
11298 	task_set(&sc->init_task, iwx_init_task, sc);
11299 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
11300 	task_set(&sc->ba_task, iwx_ba_task, sc);
11301 	task_set(&sc->setkey_task, iwx_setkey_task, sc);
11302 	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
11303 	task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
11304 	task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
11305 
11306 	ic->ic_node_alloc = iwx_node_alloc;
11307 	ic->ic_bgscan_start = iwx_bgscan;
11308 	ic->ic_bgscan_done = iwx_bgscan_done;
11309 	ic->ic_set_key = iwx_set_key;
11310 	ic->ic_delete_key = iwx_delete_key;
11311 
11312 	/* Override 802.11 state transition machine. */
11313 	sc->sc_newstate = ic->ic_newstate;
11314 	ic->ic_newstate = iwx_newstate;
11315 	ic->ic_updatechan = iwx_updatechan;
11316 	ic->ic_updateprot = iwx_updateprot;
11317 	ic->ic_updateslot = iwx_updateslot;
11318 	ic->ic_updateedca = iwx_updateedca;
11319 	ic->ic_updatedtim = iwx_updatedtim;
11320 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
11321 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
11322 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
11323 	ic->ic_ampdu_tx_stop = NULL;
11324 	/*
11325 	 * We cannot read the MAC address without loading the
11326 	 * firmware from disk. Postpone until mountroot is done.
11327 	 */
11328 	config_mountroot(self, iwx_attach_hook);
11329 
11330 	return;
11331 
11332 fail4:	while (--txq_i >= 0)
11333 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
11334 	iwx_free_rx_ring(sc, &sc->rxq);
11335 	if (sc->ict_dma.vaddr != NULL)
11336 		iwx_dma_contig_free(&sc->ict_dma);
11337 
11338 fail1:	iwx_dma_contig_free(&sc->ctxt_info_dma);
11339 	iwx_dma_contig_free(&sc->prph_scratch_dma);
11340 	iwx_dma_contig_free(&sc->prph_info_dma);
11341 	return;
11342 }
11343 
11344 #if NBPFILTER > 0
11345 void
11346 iwx_radiotap_attach(struct iwx_softc *sc)
11347 {
11348 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
11349 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
11350 
11351 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
11352 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
11353 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
11354 
11355 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
11356 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
11357 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
11358 }
11359 #endif
11360 
11361 void
11362 iwx_init_task(void *arg1)
11363 {
11364 	struct iwx_softc *sc = arg1;
11365 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11366 	int s = splnet();
11367 	int generation = sc->sc_generation;
11368 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
11369 
11370 	rw_enter_write(&sc->ioctl_rwl);
11371 	if (generation != sc->sc_generation) {
11372 		rw_exit(&sc->ioctl_rwl);
11373 		splx(s);
11374 		return;
11375 	}
11376 
11377 	if (ifp->if_flags & IFF_RUNNING)
11378 		iwx_stop(ifp);
11379 	else
11380 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
11381 
11382 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
11383 		iwx_init(ifp);
11384 
11385 	rw_exit(&sc->ioctl_rwl);
11386 	splx(s);
11387 }
11388 
11389 void
11390 iwx_resume(struct iwx_softc *sc)
11391 {
11392 	pcireg_t reg;
11393 
11394 	/*
11395 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11396 	 * PCI Tx retries from interfering with C3 CPU state.
11397 	 */
11398 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11399 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11400 
11401 	if (!sc->sc_msix) {
11402 		/* Hardware bug workaround. */
11403 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11404 		    PCI_COMMAND_STATUS_REG);
11405 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11406 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11407 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11408 		    PCI_COMMAND_STATUS_REG, reg);
11409 	}
11410 
11411 	iwx_disable_interrupts(sc);
11412 }
11413 
11414 int
11415 iwx_wakeup(struct iwx_softc *sc)
11416 {
11417 	struct ieee80211com *ic = &sc->sc_ic;
11418 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11419 	int err;
11420 
11421 	rw_enter_write(&sc->ioctl_rwl);
11422 
11423 	err = iwx_start_hw(sc);
11424 	if (err) {
11425 		rw_exit(&sc->ioctl_rwl);
11426 		return err;
11427 	}
11428 
11429 	err = iwx_init_hw(sc);
11430 	if (err) {
11431 		iwx_stop_device(sc);
11432 		rw_exit(&sc->ioctl_rwl);
11433 		return err;
11434 	}
11435 
11436 	refcnt_init(&sc->task_refs);
11437 	ifq_clr_oactive(&ifp->if_snd);
11438 	ifp->if_flags |= IFF_RUNNING;
11439 
11440 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
11441 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
11442 	else
11443 		ieee80211_begin_scan(ifp);
11444 
11445 	rw_exit(&sc->ioctl_rwl);
11446 	return 0;
11447 }
11448 
11449 int
11450 iwx_activate(struct device *self, int act)
11451 {
11452 	struct iwx_softc *sc = (struct iwx_softc *)self;
11453 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11454 	int err = 0;
11455 
11456 	switch (act) {
11457 	case DVACT_QUIESCE:
11458 		if (ifp->if_flags & IFF_RUNNING) {
11459 			rw_enter_write(&sc->ioctl_rwl);
11460 			iwx_stop(ifp);
11461 			rw_exit(&sc->ioctl_rwl);
11462 		}
11463 		break;
11464 	case DVACT_RESUME:
11465 		iwx_resume(sc);
11466 		break;
11467 	case DVACT_WAKEUP:
11468 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
11469 			err = iwx_wakeup(sc);
11470 			if (err)
11471 				printf("%s: could not initialize hardware\n",
11472 				    DEVNAME(sc));
11473 		}
11474 		break;
11475 	}
11476 
11477 	return 0;
11478 }
11479 
11480 struct cfdriver iwx_cd = {
11481 	NULL, "iwx", DV_IFNET
11482 };
11483 
11484 const struct cfattach iwx_ca = {
11485 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
11486 	NULL, iwx_activate
11487 };
11488