xref: /openbsd/sys/dev/pci/if_iwx.c (revision 76d0caae)
1 /*	$OpenBSD: if_iwx.c,v 1.128 2021/12/03 14:32:08 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133 #undef DPRINTF /* defined in ieee80211_priv.h */
134 
135 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
136 
137 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
138 
139 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
140 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
141 
142 #ifdef IWX_DEBUG
143 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
144 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
145 int iwx_debug = 1;
146 #else
147 #define DPRINTF(x)	do { ; } while (0)
148 #define DPRINTFN(n, x)	do { ; } while (0)
149 #endif
150 
151 #include <dev/pci/if_iwxreg.h>
152 #include <dev/pci/if_iwxvar.h>
153 
154 const uint8_t iwx_nvm_channels_8000[] = {
155 	/* 2.4 GHz */
156 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 	/* 5 GHz */
158 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 	149, 153, 157, 161, 165, 169, 173, 177, 181
161 };
162 
163 static const uint8_t iwx_nvm_channels_uhb[] = {
164 	/* 2.4 GHz */
165 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 	/* 5 GHz */
167 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 	149, 153, 157, 161, 165, 169, 173, 177, 181,
170 	/* 6-7 GHz */
171 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175 };
176 
177 #define IWX_NUM_2GHZ_CHANNELS	14
178 
179 const struct iwx_rate {
180 	uint16_t rate;
181 	uint8_t plcp;
182 	uint8_t ht_plcp;
183 } iwx_rates[] = {
184 		/* Legacy */		/* HT */
185 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
186 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
187 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
188 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
189 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
190 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
191 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
192 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
193 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
194 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
195 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
196 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
197 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
198 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
199 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
200 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
201 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
202 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
203 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
204 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
205 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
206 };
207 #define IWX_RIDX_CCK	0
208 #define IWX_RIDX_OFDM	4
209 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
210 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
211 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
212 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
213 
214 /* Convert an MCS index into an iwx_rates[] index. */
215 const int iwx_mcs2ridx[] = {
216 	IWX_RATE_MCS_0_INDEX,
217 	IWX_RATE_MCS_1_INDEX,
218 	IWX_RATE_MCS_2_INDEX,
219 	IWX_RATE_MCS_3_INDEX,
220 	IWX_RATE_MCS_4_INDEX,
221 	IWX_RATE_MCS_5_INDEX,
222 	IWX_RATE_MCS_6_INDEX,
223 	IWX_RATE_MCS_7_INDEX,
224 	IWX_RATE_MCS_8_INDEX,
225 	IWX_RATE_MCS_9_INDEX,
226 	IWX_RATE_MCS_10_INDEX,
227 	IWX_RATE_MCS_11_INDEX,
228 	IWX_RATE_MCS_12_INDEX,
229 	IWX_RATE_MCS_13_INDEX,
230 	IWX_RATE_MCS_14_INDEX,
231 	IWX_RATE_MCS_15_INDEX,
232 };
233 
234 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
235 uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
236 int	iwx_is_mimo_ht_plcp(uint8_t);
237 int	iwx_is_mimo_mcs(int);
238 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
239 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
240 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
241 int	iwx_apply_debug_destination(struct iwx_softc *);
242 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
243 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
244 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
245 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
246 	    struct iwx_context_info_dram *);
247 void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
248 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
249 	    uint8_t *, size_t);
250 int	iwx_set_default_calib(struct iwx_softc *, const void *);
251 void	iwx_fw_info_free(struct iwx_fw_info *);
252 int	iwx_read_firmware(struct iwx_softc *);
253 uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
254 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
255 void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
256 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
257 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
258 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
259 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
260 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
261 int	iwx_nic_lock(struct iwx_softc *);
262 void	iwx_nic_assert_locked(struct iwx_softc *);
263 void	iwx_nic_unlock(struct iwx_softc *);
264 int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
265 	    uint32_t);
266 int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
267 int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
268 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
269 	    bus_size_t);
270 void	iwx_dma_contig_free(struct iwx_dma_info *);
271 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
272 void	iwx_disable_rx_dma(struct iwx_softc *);
273 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
274 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
275 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
276 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
277 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
278 void	iwx_enable_rfkill_int(struct iwx_softc *);
279 int	iwx_check_rfkill(struct iwx_softc *);
280 void	iwx_enable_interrupts(struct iwx_softc *);
281 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
282 void	iwx_restore_interrupts(struct iwx_softc *);
283 void	iwx_disable_interrupts(struct iwx_softc *);
284 void	iwx_ict_reset(struct iwx_softc *);
285 int	iwx_set_hw_ready(struct iwx_softc *);
286 int	iwx_prepare_card_hw(struct iwx_softc *);
287 int	iwx_force_power_gating(struct iwx_softc *);
288 void	iwx_apm_config(struct iwx_softc *);
289 int	iwx_apm_init(struct iwx_softc *);
290 void	iwx_apm_stop(struct iwx_softc *);
291 int	iwx_allow_mcast(struct iwx_softc *);
292 void	iwx_init_msix_hw(struct iwx_softc *);
293 void	iwx_conf_msix_hw(struct iwx_softc *, int);
294 int	iwx_clear_persistence_bit(struct iwx_softc *);
295 int	iwx_start_hw(struct iwx_softc *);
296 void	iwx_stop_device(struct iwx_softc *);
297 void	iwx_nic_config(struct iwx_softc *);
298 int	iwx_nic_rx_init(struct iwx_softc *);
299 int	iwx_nic_init(struct iwx_softc *);
300 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
301 int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
302 void	iwx_post_alive(struct iwx_softc *);
303 int	iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
304 	    uint32_t);
305 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
306 void	iwx_setup_ht_rates(struct iwx_softc *);
307 int	iwx_mimo_enabled(struct iwx_softc *);
308 void	iwx_mac_ctxt_task(void *);
309 void	iwx_phy_ctxt_task(void *);
310 void	iwx_updatechan(struct ieee80211com *);
311 void	iwx_updateprot(struct ieee80211com *);
312 void	iwx_updateslot(struct ieee80211com *);
313 void	iwx_updateedca(struct ieee80211com *);
314 void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
315 	    uint16_t);
316 void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
317 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
318 	    uint8_t);
319 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
320 	    uint8_t);
321 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
322 	    uint8_t);
323 void	iwx_rx_ba_session_expired(void *);
324 void	iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
325 	    struct iwx_rx_data *, struct mbuf_list *);
326 void	iwx_reorder_timer_expired(void *);
327 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
328 	    uint16_t, uint16_t, int, int);
329 void	iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
330 	    uint8_t);
331 void	iwx_ba_task(void *);
332 
333 int	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
334 int	iwx_is_valid_mac_addr(const uint8_t *);
335 int	iwx_nvm_get(struct iwx_softc *);
336 int	iwx_load_firmware(struct iwx_softc *);
337 int	iwx_start_fw(struct iwx_softc *);
338 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
339 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
340 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
341 int	iwx_send_dqa_cmd(struct iwx_softc *);
342 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
343 int	iwx_config_ltr(struct iwx_softc *);
344 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
345 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
346 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
347 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
348 	    struct iwx_rx_data *);
349 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
350 int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
351 	    struct ieee80211_rxinfo *);
352 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
353 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
354 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
355 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
356 void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
357 void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
358 void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, int);
359 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
360 	    struct iwx_rx_data *);
361 void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
362 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
363 	    struct iwx_rx_data *);
364 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
365 int	iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
366 	    uint8_t, uint32_t, uint8_t);
367 int	iwx_phy_ctxt_cmd_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
368 	    uint8_t, uint32_t, uint8_t);
369 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
370 	    uint8_t, uint32_t, uint32_t, uint8_t);
371 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
372 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
373 	    const void *);
374 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
375 	    uint32_t *);
376 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
377 	    const void *, uint32_t *);
378 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
379 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
380 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
381 	    struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
382 void	iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
383 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
384 int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
385 int	iwx_wait_tx_queues_empty(struct iwx_softc *);
386 int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
387 int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
388 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
389 	    struct iwx_beacon_filter_cmd *);
390 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
391 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
392 	    struct iwx_mac_power_cmd *);
393 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
394 int	iwx_power_update_device(struct iwx_softc *);
395 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
396 int	iwx_disable_beacon_filter(struct iwx_softc *);
397 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
398 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
399 int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
400 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
401 int	iwx_config_umac_scan_reduced(struct iwx_softc *);
402 uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
403 void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
404 	    struct iwx_scan_general_params_v10 *, int);
405 void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
406 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
407 void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
408 	    struct iwx_scan_channel_params_v6 *, uint32_t, int, int);
409 int	iwx_umac_scan_v14(struct iwx_softc *, int);
410 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
411 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
412 int	iwx_rval2ridx(int);
413 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
414 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
415 	    struct iwx_mac_ctx_cmd *, uint32_t);
416 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
417 	    struct iwx_mac_data_sta *, int);
418 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
419 int	iwx_clear_statistics(struct iwx_softc *);
420 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
421 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
422 int	iwx_scan(struct iwx_softc *);
423 int	iwx_bgscan(struct ieee80211com *);
424 void	iwx_bgscan_done(struct ieee80211com *,
425 	    struct ieee80211_node_switch_bss_arg *, size_t);
426 void	iwx_bgscan_done_task(void *);
427 int	iwx_umac_scan_abort(struct iwx_softc *);
428 int	iwx_scan_abort(struct iwx_softc *);
429 int	iwx_enable_mgmt_queue(struct iwx_softc *);
430 int	iwx_rs_rval2idx(uint8_t);
431 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
432 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
433 int	iwx_enable_data_tx_queues(struct iwx_softc *);
434 int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
435 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t);
436 int	iwx_auth(struct iwx_softc *);
437 int	iwx_deauth(struct iwx_softc *);
438 int	iwx_run(struct iwx_softc *);
439 int	iwx_run_stop(struct iwx_softc *);
440 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
441 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
442 	    struct ieee80211_key *);
443 void	iwx_setkey_task(void *);
444 void	iwx_delete_key(struct ieee80211com *,
445 	    struct ieee80211_node *, struct ieee80211_key *);
446 int	iwx_media_change(struct ifnet *);
447 void	iwx_newstate_task(void *);
448 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
449 void	iwx_endscan(struct iwx_softc *);
450 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
451 	    struct ieee80211_node *);
452 int	iwx_sf_config(struct iwx_softc *, int);
453 int	iwx_send_bt_init_conf(struct iwx_softc *);
454 int	iwx_send_soc_conf(struct iwx_softc *);
455 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
456 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
457 int	iwx_init_hw(struct iwx_softc *);
458 int	iwx_init(struct ifnet *);
459 void	iwx_start(struct ifnet *);
460 void	iwx_stop(struct ifnet *);
461 void	iwx_watchdog(struct ifnet *);
462 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
463 const char *iwx_desc_lookup(uint32_t);
464 void	iwx_nic_error(struct iwx_softc *);
465 void	iwx_dump_driver_status(struct iwx_softc *);
466 void	iwx_nic_umac_error(struct iwx_softc *);
467 int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
468 	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
469 int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
470 void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
471 	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
472 	    struct mbuf_list *);
473 int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
474 	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
475 int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
476 	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
477 	    struct ieee80211_rxinfo *, struct mbuf_list *);
478 void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
479 	    struct mbuf_list *);
480 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
481 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
482 	    struct mbuf_list *);
483 void	iwx_notif_intr(struct iwx_softc *);
484 int	iwx_intr(void *);
485 int	iwx_intr_msix(void *);
486 int	iwx_match(struct device *, void *, void *);
487 int	iwx_preinit(struct iwx_softc *);
488 void	iwx_attach_hook(struct device *);
489 void	iwx_attach(struct device *, struct device *, void *);
490 void	iwx_init_task(void *);
491 int	iwx_activate(struct device *, int);
492 void	iwx_resume(struct iwx_softc *);
493 int	iwx_wakeup(struct iwx_softc *);
494 
495 #if NBPFILTER > 0
496 void	iwx_radiotap_attach(struct iwx_softc *);
497 #endif
498 
499 uint8_t
500 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
501 {
502 	const struct iwx_fw_cmd_version *entry;
503 	int i;
504 
505 	for (i = 0; i < sc->n_cmd_versions; i++) {
506 		entry = &sc->cmd_versions[i];
507 		if (entry->group == grp && entry->cmd == cmd)
508 			return entry->cmd_ver;
509 	}
510 
511 	return IWX_FW_CMD_VER_UNKNOWN;
512 }
513 
514 uint8_t
515 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
516 {
517 	const struct iwx_fw_cmd_version *entry;
518 	int i;
519 
520 	for (i = 0; i < sc->n_cmd_versions; i++) {
521 		entry = &sc->cmd_versions[i];
522 		if (entry->group == grp && entry->cmd == cmd)
523 			return entry->notif_ver;
524 	}
525 
526 	return IWX_FW_CMD_VER_UNKNOWN;
527 }
528 
529 int
530 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
531 {
532 	return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
533 	    (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
534 }
535 
536 int
537 iwx_is_mimo_mcs(int mcs)
538 {
539 	int ridx = iwx_mcs2ridx[mcs];
540 	return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
541 
542 }
543 
544 int
545 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
546 {
547 	struct iwx_fw_cscheme_list *l = (void *)data;
548 
549 	if (dlen < sizeof(*l) ||
550 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
551 		return EINVAL;
552 
553 	/* we don't actually store anything for now, always use s/w crypto */
554 
555 	return 0;
556 }
557 
558 int
559 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
560     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
561 {
562 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
563 	if (err) {
564 		printf("%s: could not allocate context info DMA memory\n",
565 		    DEVNAME(sc));
566 		return err;
567 	}
568 
569 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
570 
571 	return 0;
572 }
573 
574 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
575 {
576 	struct iwx_self_init_dram *dram = &sc->init_dram;
577 	int i;
578 
579 	if (!dram->paging)
580 		return;
581 
582 	/* free paging*/
583 	for (i = 0; i < dram->paging_cnt; i++)
584 		iwx_dma_contig_free(&dram->paging[i]);
585 
586 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
587 	dram->paging_cnt = 0;
588 	dram->paging = NULL;
589 }
590 
591 int
592 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
593 {
594 	int i = 0;
595 
596 	while (start < fws->fw_count &&
597 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
598 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
599 		start++;
600 		i++;
601 	}
602 
603 	return i;
604 }
605 
606 int
607 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
608     struct iwx_context_info_dram *ctxt_dram)
609 {
610 	struct iwx_self_init_dram *dram = &sc->init_dram;
611 	int i, ret, fw_cnt = 0;
612 
613 	KASSERT(dram->paging == NULL);
614 
615 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
616 	/* add 1 due to separator */
617 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
618 	/* add 2 due to separators */
619 	dram->paging_cnt = iwx_get_num_sections(fws,
620 	    dram->lmac_cnt + dram->umac_cnt + 2);
621 
622 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
623 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
624 	if (!dram->fw) {
625 		printf("%s: could not allocate memory for firmware sections\n",
626 		    DEVNAME(sc));
627 		return ENOMEM;
628 	}
629 
630 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
631 	    M_DEVBUF, M_ZERO | M_NOWAIT);
632 	if (!dram->paging) {
633 		printf("%s: could not allocate memory for firmware paging\n",
634 		    DEVNAME(sc));
635 		return ENOMEM;
636 	}
637 
638 	/* initialize lmac sections */
639 	for (i = 0; i < dram->lmac_cnt; i++) {
640 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
641 						   &dram->fw[fw_cnt]);
642 		if (ret)
643 			return ret;
644 		ctxt_dram->lmac_img[i] =
645 			htole64(dram->fw[fw_cnt].paddr);
646 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
647 		    (unsigned long long)dram->fw[fw_cnt].paddr,
648 		    (unsigned long long)dram->fw[fw_cnt].size));
649 		fw_cnt++;
650 	}
651 
652 	/* initialize umac sections */
653 	for (i = 0; i < dram->umac_cnt; i++) {
654 		/* access FW with +1 to make up for lmac separator */
655 		ret = iwx_ctxt_info_alloc_dma(sc,
656 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
657 		if (ret)
658 			return ret;
659 		ctxt_dram->umac_img[i] =
660 			htole64(dram->fw[fw_cnt].paddr);
661 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
662 			(unsigned long long)dram->fw[fw_cnt].paddr,
663 			(unsigned long long)dram->fw[fw_cnt].size));
664 		fw_cnt++;
665 	}
666 
667 	/*
668 	 * Initialize paging.
669 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
670 	 * stored separately.
671 	 * This is since the timing of its release is different -
672 	 * while fw memory can be released on alive, the paging memory can be
673 	 * freed only when the device goes down.
674 	 * Given that, the logic here in accessing the fw image is a bit
675 	 * different - fw_cnt isn't changing so loop counter is added to it.
676 	 */
677 	for (i = 0; i < dram->paging_cnt; i++) {
678 		/* access FW with +2 to make up for lmac & umac separators */
679 		int fw_idx = fw_cnt + i + 2;
680 
681 		ret = iwx_ctxt_info_alloc_dma(sc,
682 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
683 		if (ret)
684 			return ret;
685 
686 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
687 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
688 		    (unsigned long long)dram->paging[i].paddr,
689 		    (unsigned long long)dram->paging[i].size));
690 	}
691 
692 	return 0;
693 }
694 
695 void
696 iwx_fw_version_str(char *buf, size_t bufsize,
697     uint32_t major, uint32_t minor, uint32_t api)
698 {
699 	/*
700 	 * Starting with major version 35 the Linux driver prints the minor
701 	 * version in hexadecimal.
702 	 */
703 	if (major >= 35)
704 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
705 	else
706 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
707 }
708 
709 int
710 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
711     uint8_t min_power)
712 {
713 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
714 	uint32_t size = 0;
715 	uint8_t power;
716 	int err;
717 
718 	if (fw_mon->size)
719 		return 0;
720 
721 	for (power = max_power; power >= min_power; power--) {
722 		size = (1 << power);
723 
724 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
725 		if (err)
726 			continue;
727 
728 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
729 			 DEVNAME(sc), size));
730 		break;
731 	}
732 
733 	if (err) {
734 		fw_mon->size = 0;
735 		return err;
736 	}
737 
738 	if (power != max_power)
739 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
740 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
741 			(unsigned long)(1 << (max_power - 10))));
742 
743 	return 0;
744 }
745 
746 int
747 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
748 {
749 	if (!max_power) {
750 		/* default max_power is maximum */
751 		max_power = 26;
752 	} else {
753 		max_power += 11;
754 	}
755 
756 	if (max_power > 26) {
757 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
758 		     "check the FW TLV\n", DEVNAME(sc), max_power));
759 		return 0;
760 	}
761 
762 	if (sc->fw_mon.size)
763 		return 0;
764 
765 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
766 }
767 
768 int
769 iwx_apply_debug_destination(struct iwx_softc *sc)
770 {
771 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
772 	int i, err;
773 	uint8_t mon_mode, size_power, base_shift, end_shift;
774 	uint32_t base_reg, end_reg;
775 
776 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
777 	mon_mode = dest_v1->monitor_mode;
778 	size_power = dest_v1->size_power;
779 	base_reg = le32toh(dest_v1->base_reg);
780 	end_reg = le32toh(dest_v1->end_reg);
781 	base_shift = dest_v1->base_shift;
782 	end_shift = dest_v1->end_shift;
783 
784 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
785 
786 	if (mon_mode == EXTERNAL_MODE) {
787 		err = iwx_alloc_fw_monitor(sc, size_power);
788 		if (err)
789 			return err;
790 	}
791 
792 	if (!iwx_nic_lock(sc))
793 		return EBUSY;
794 
795 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
796 		uint32_t addr, val;
797 		uint8_t op;
798 
799 		addr = le32toh(dest_v1->reg_ops[i].addr);
800 		val = le32toh(dest_v1->reg_ops[i].val);
801 		op = dest_v1->reg_ops[i].op;
802 
803 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
804 		switch (op) {
805 		case CSR_ASSIGN:
806 			IWX_WRITE(sc, addr, val);
807 			break;
808 		case CSR_SETBIT:
809 			IWX_SETBITS(sc, addr, (1 << val));
810 			break;
811 		case CSR_CLEARBIT:
812 			IWX_CLRBITS(sc, addr, (1 << val));
813 			break;
814 		case PRPH_ASSIGN:
815 			iwx_write_prph(sc, addr, val);
816 			break;
817 		case PRPH_SETBIT:
818 			err = iwx_set_bits_prph(sc, addr, (1 << val));
819 			if (err)
820 				return err;
821 			break;
822 		case PRPH_CLEARBIT:
823 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
824 			if (err)
825 				return err;
826 			break;
827 		case PRPH_BLOCKBIT:
828 			if (iwx_read_prph(sc, addr) & (1 << val))
829 				goto monitor;
830 			break;
831 		default:
832 			DPRINTF(("%s: FW debug - unknown OP %d\n",
833 			    DEVNAME(sc), op));
834 			break;
835 		}
836 	}
837 
838 monitor:
839 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
840 		iwx_write_prph(sc, le32toh(base_reg),
841 		    sc->fw_mon.paddr >> base_shift);
842 		iwx_write_prph(sc, end_reg,
843 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
844 		    >> end_shift);
845 	}
846 
847 	iwx_nic_unlock(sc);
848 	return 0;
849 }
850 
851 int
852 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
853 {
854 	struct iwx_context_info *ctxt_info;
855 	struct iwx_context_info_rbd_cfg *rx_cfg;
856 	uint32_t control_flags = 0, rb_size;
857 	uint64_t paddr;
858 	int err;
859 
860 	ctxt_info = sc->ctxt_info_dma.vaddr;
861 
862 	ctxt_info->version.version = 0;
863 	ctxt_info->version.mac_id =
864 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
865 	/* size is in DWs */
866 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
867 
868 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22560)
869 		rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
870 	else
871 		rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
872 
873 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
874 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
875 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
876 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
877 			(rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
878 	ctxt_info->control.control_flags = htole32(control_flags);
879 
880 	/* initialize RX default queue */
881 	rx_cfg = &ctxt_info->rbd_cfg;
882 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
883 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
884 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
885 
886 	/* initialize TX command queue */
887 	ctxt_info->hcmd_cfg.cmd_queue_addr =
888 		htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
889 	ctxt_info->hcmd_cfg.cmd_queue_size =
890 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
891 
892 	/* allocate ucode sections in dram and set addresses */
893 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
894 	if (err) {
895 		iwx_ctxt_info_free_fw_img(sc);
896 		return err;
897 	}
898 
899 	/* Configure debug, if exists */
900 	if (sc->sc_fw.dbg_dest_tlv_v1) {
901 		err = iwx_apply_debug_destination(sc);
902 		if (err) {
903 			iwx_ctxt_info_free_fw_img(sc);
904 			return err;
905 		}
906 	}
907 
908 	/*
909 	 * Write the context info DMA base address. The device expects a
910 	 * 64-bit address but a simple bus_space_write_8 to this register
911 	 * won't work on some devices, such as the AX201.
912 	 */
913 	paddr = sc->ctxt_info_dma.paddr;
914 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
915 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
916 
917 	/* kick FW self load */
918 	if (!iwx_nic_lock(sc)) {
919 		iwx_ctxt_info_free_fw_img(sc);
920 		return EBUSY;
921 	}
922 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
923 	iwx_nic_unlock(sc);
924 
925 	/* Context info will be released upon alive or failure to get one */
926 
927 	return 0;
928 }
929 
930 void
931 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
932 {
933 	struct iwx_self_init_dram *dram = &sc->init_dram;
934 	int i;
935 
936 	if (!dram->fw)
937 		return;
938 
939 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
940 		iwx_dma_contig_free(&dram->fw[i]);
941 
942 	free(dram->fw, M_DEVBUF,
943 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
944 	dram->lmac_cnt = 0;
945 	dram->umac_cnt = 0;
946 	dram->fw = NULL;
947 }
948 
949 int
950 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
951     uint8_t *data, size_t dlen)
952 {
953 	struct iwx_fw_sects *fws;
954 	struct iwx_fw_onesect *fwone;
955 
956 	if (type >= IWX_UCODE_TYPE_MAX)
957 		return EINVAL;
958 	if (dlen < sizeof(uint32_t))
959 		return EINVAL;
960 
961 	fws = &sc->sc_fw.fw_sects[type];
962 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
963 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
964 		return EINVAL;
965 
966 	fwone = &fws->fw_sect[fws->fw_count];
967 
968 	/* first 32bit are device load offset */
969 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
970 
971 	/* rest is data */
972 	fwone->fws_data = data + sizeof(uint32_t);
973 	fwone->fws_len = dlen - sizeof(uint32_t);
974 
975 	fws->fw_count++;
976 	fws->fw_totlen += fwone->fws_len;
977 
978 	return 0;
979 }
980 
981 #define IWX_DEFAULT_SCAN_CHANNELS	40
982 /* Newer firmware might support more channels. Raise this value if needed. */
983 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
984 
985 struct iwx_tlv_calib_data {
986 	uint32_t ucode_type;
987 	struct iwx_tlv_calib_ctrl calib;
988 } __packed;
989 
990 int
991 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
992 {
993 	const struct iwx_tlv_calib_data *def_calib = data;
994 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
995 
996 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
997 		return EINVAL;
998 
999 	sc->sc_default_calib[ucode_type].flow_trigger =
1000 	    def_calib->calib.flow_trigger;
1001 	sc->sc_default_calib[ucode_type].event_trigger =
1002 	    def_calib->calib.event_trigger;
1003 
1004 	return 0;
1005 }
1006 
1007 void
1008 iwx_fw_info_free(struct iwx_fw_info *fw)
1009 {
1010 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
1011 	fw->fw_rawdata = NULL;
1012 	fw->fw_rawsize = 0;
1013 	/* don't touch fw->fw_status */
1014 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1015 }
1016 
1017 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1018 
1019 int
1020 iwx_read_firmware(struct iwx_softc *sc)
1021 {
1022 	struct iwx_fw_info *fw = &sc->sc_fw;
1023 	struct iwx_tlv_ucode_header *uhdr;
1024 	struct iwx_ucode_tlv tlv;
1025 	uint32_t tlv_type;
1026 	uint8_t *data;
1027 	int err;
1028 	size_t len;
1029 
1030 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1031 		return 0;
1032 
1033 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
1034 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
1035 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1036 
1037 	if (fw->fw_rawdata != NULL)
1038 		iwx_fw_info_free(fw);
1039 
1040 	err = loadfirmware(sc->sc_fwname,
1041 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1042 	if (err) {
1043 		printf("%s: could not read firmware %s (error %d)\n",
1044 		    DEVNAME(sc), sc->sc_fwname, err);
1045 		goto out;
1046 	}
1047 
1048 	sc->sc_capaflags = 0;
1049 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1050 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1051 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1052 	sc->n_cmd_versions = 0;
1053 
1054 	uhdr = (void *)fw->fw_rawdata;
1055 	if (*(uint32_t *)fw->fw_rawdata != 0
1056 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1057 		printf("%s: invalid firmware %s\n",
1058 		    DEVNAME(sc), sc->sc_fwname);
1059 		err = EINVAL;
1060 		goto out;
1061 	}
1062 
1063 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1064 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1065 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1066 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1067 
1068 	data = uhdr->data;
1069 	len = fw->fw_rawsize - sizeof(*uhdr);
1070 
1071 	while (len >= sizeof(tlv)) {
1072 		size_t tlv_len;
1073 		void *tlv_data;
1074 
1075 		memcpy(&tlv, data, sizeof(tlv));
1076 		tlv_len = le32toh(tlv.length);
1077 		tlv_type = le32toh(tlv.type);
1078 
1079 		len -= sizeof(tlv);
1080 		data += sizeof(tlv);
1081 		tlv_data = data;
1082 
1083 		if (len < tlv_len) {
1084 			printf("%s: firmware too short: %zu bytes\n",
1085 			    DEVNAME(sc), len);
1086 			err = EINVAL;
1087 			goto parse_out;
1088 		}
1089 
1090 		switch (tlv_type) {
1091 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1092 			if (tlv_len < sizeof(uint32_t)) {
1093 				err = EINVAL;
1094 				goto parse_out;
1095 			}
1096 			sc->sc_capa_max_probe_len
1097 			    = le32toh(*(uint32_t *)tlv_data);
1098 			if (sc->sc_capa_max_probe_len >
1099 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1100 				err = EINVAL;
1101 				goto parse_out;
1102 			}
1103 			break;
1104 		case IWX_UCODE_TLV_PAN:
1105 			if (tlv_len) {
1106 				err = EINVAL;
1107 				goto parse_out;
1108 			}
1109 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1110 			break;
1111 		case IWX_UCODE_TLV_FLAGS:
1112 			if (tlv_len < sizeof(uint32_t)) {
1113 				err = EINVAL;
1114 				goto parse_out;
1115 			}
1116 			/*
1117 			 * Apparently there can be many flags, but Linux driver
1118 			 * parses only the first one, and so do we.
1119 			 *
1120 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1121 			 * Intentional or a bug?  Observations from
1122 			 * current firmware file:
1123 			 *  1) TLV_PAN is parsed first
1124 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1125 			 * ==> this resets TLV_PAN to itself... hnnnk
1126 			 */
1127 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1128 			break;
1129 		case IWX_UCODE_TLV_CSCHEME:
1130 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1131 			if (err)
1132 				goto parse_out;
1133 			break;
1134 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1135 			uint32_t num_cpu;
1136 			if (tlv_len != sizeof(uint32_t)) {
1137 				err = EINVAL;
1138 				goto parse_out;
1139 			}
1140 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1141 			if (num_cpu < 1 || num_cpu > 2) {
1142 				err = EINVAL;
1143 				goto parse_out;
1144 			}
1145 			break;
1146 		}
1147 		case IWX_UCODE_TLV_SEC_RT:
1148 			err = iwx_firmware_store_section(sc,
1149 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1150 			if (err)
1151 				goto parse_out;
1152 			break;
1153 		case IWX_UCODE_TLV_SEC_INIT:
1154 			err = iwx_firmware_store_section(sc,
1155 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1156 			if (err)
1157 				goto parse_out;
1158 			break;
1159 		case IWX_UCODE_TLV_SEC_WOWLAN:
1160 			err = iwx_firmware_store_section(sc,
1161 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1162 			if (err)
1163 				goto parse_out;
1164 			break;
1165 		case IWX_UCODE_TLV_DEF_CALIB:
1166 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1167 				err = EINVAL;
1168 				goto parse_out;
1169 			}
1170 			err = iwx_set_default_calib(sc, tlv_data);
1171 			if (err)
1172 				goto parse_out;
1173 			break;
1174 		case IWX_UCODE_TLV_PHY_SKU:
1175 			if (tlv_len != sizeof(uint32_t)) {
1176 				err = EINVAL;
1177 				goto parse_out;
1178 			}
1179 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1180 			break;
1181 
1182 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1183 			struct iwx_ucode_api *api;
1184 			int idx, i;
1185 			if (tlv_len != sizeof(*api)) {
1186 				err = EINVAL;
1187 				goto parse_out;
1188 			}
1189 			api = (struct iwx_ucode_api *)tlv_data;
1190 			idx = le32toh(api->api_index);
1191 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1192 				err = EINVAL;
1193 				goto parse_out;
1194 			}
1195 			for (i = 0; i < 32; i++) {
1196 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1197 					continue;
1198 				setbit(sc->sc_ucode_api, i + (32 * idx));
1199 			}
1200 			break;
1201 		}
1202 
1203 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1204 			struct iwx_ucode_capa *capa;
1205 			int idx, i;
1206 			if (tlv_len != sizeof(*capa)) {
1207 				err = EINVAL;
1208 				goto parse_out;
1209 			}
1210 			capa = (struct iwx_ucode_capa *)tlv_data;
1211 			idx = le32toh(capa->api_index);
1212 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1213 				goto parse_out;
1214 			}
1215 			for (i = 0; i < 32; i++) {
1216 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1217 					continue;
1218 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1219 			}
1220 			break;
1221 		}
1222 
1223 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1224 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1225 			/* ignore, not used by current driver */
1226 			break;
1227 
1228 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1229 			err = iwx_firmware_store_section(sc,
1230 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1231 			    tlv_len);
1232 			if (err)
1233 				goto parse_out;
1234 			break;
1235 
1236 		case IWX_UCODE_TLV_PAGING:
1237 			if (tlv_len != sizeof(uint32_t)) {
1238 				err = EINVAL;
1239 				goto parse_out;
1240 			}
1241 			break;
1242 
1243 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1244 			if (tlv_len != sizeof(uint32_t)) {
1245 				err = EINVAL;
1246 				goto parse_out;
1247 			}
1248 			sc->sc_capa_n_scan_channels =
1249 			  le32toh(*(uint32_t *)tlv_data);
1250 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1251 				err = ERANGE;
1252 				goto parse_out;
1253 			}
1254 			break;
1255 
1256 		case IWX_UCODE_TLV_FW_VERSION:
1257 			if (tlv_len != sizeof(uint32_t) * 3) {
1258 				err = EINVAL;
1259 				goto parse_out;
1260 			}
1261 
1262 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1263 			    le32toh(((uint32_t *)tlv_data)[0]),
1264 			    le32toh(((uint32_t *)tlv_data)[1]),
1265 			    le32toh(((uint32_t *)tlv_data)[2]));
1266 			break;
1267 
1268 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1269 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1270 
1271 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1272 			if (*fw->dbg_dest_ver != 0) {
1273 				err = EINVAL;
1274 				goto parse_out;
1275 			}
1276 
1277 			if (fw->dbg_dest_tlv_init)
1278 				break;
1279 			fw->dbg_dest_tlv_init = true;
1280 
1281 			dest_v1 = (void *)tlv_data;
1282 			fw->dbg_dest_tlv_v1 = dest_v1;
1283 			fw->n_dest_reg = tlv_len -
1284 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1285 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1286 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1287 			break;
1288 		}
1289 
1290 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1291 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1292 
1293 			if (!fw->dbg_dest_tlv_init ||
1294 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1295 			    fw->dbg_conf_tlv[conf->id] != NULL)
1296 				break;
1297 
1298 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1299 			fw->dbg_conf_tlv[conf->id] = conf;
1300 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1301 			break;
1302 		}
1303 
1304 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1305 			struct iwx_umac_debug_addrs *dbg_ptrs =
1306 				(void *)tlv_data;
1307 
1308 			if (tlv_len != sizeof(*dbg_ptrs)) {
1309 				err = EINVAL;
1310 				goto parse_out;
1311 			}
1312 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1313 				break;
1314 			sc->sc_uc.uc_umac_error_event_table =
1315 				le32toh(dbg_ptrs->error_info_addr) &
1316 				~IWX_FW_ADDR_CACHE_CONTROL;
1317 			sc->sc_uc.error_event_table_tlv_status |=
1318 				IWX_ERROR_EVENT_TABLE_UMAC;
1319 			break;
1320 		}
1321 
1322 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1323 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1324 				(void *)tlv_data;
1325 
1326 			if (tlv_len != sizeof(*dbg_ptrs)) {
1327 				err = EINVAL;
1328 				goto parse_out;
1329 			}
1330 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1331 				break;
1332 			sc->sc_uc.uc_lmac_error_event_table[0] =
1333 				le32toh(dbg_ptrs->error_event_table_ptr) &
1334 				~IWX_FW_ADDR_CACHE_CONTROL;
1335 			sc->sc_uc.error_event_table_tlv_status |=
1336 				IWX_ERROR_EVENT_TABLE_LMAC1;
1337 			break;
1338 		}
1339 
1340 		case IWX_UCODE_TLV_FW_MEM_SEG:
1341 			break;
1342 
1343 		case IWX_UCODE_TLV_CMD_VERSIONS:
1344 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1345 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1346 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1347 			}
1348 			if (sc->n_cmd_versions != 0) {
1349 				err = EINVAL;
1350 				goto parse_out;
1351 			}
1352 			if (tlv_len > sizeof(sc->cmd_versions)) {
1353 				err = EINVAL;
1354 				goto parse_out;
1355 			}
1356 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1357 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1358 			break;
1359 
1360 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1361 			break;
1362 
1363 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1364 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1365 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1366 			break;
1367 
1368 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1369 		case 58:
1370 		case 0x1000003:
1371 		case 0x1000004:
1372 			break;
1373 
1374 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1375 		case 0x1000000:
1376 		case 0x1000002:
1377 			break;
1378 
1379 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1380 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1381 		case IWX_UCODE_TLV_TYPE_HCMD:
1382 		case IWX_UCODE_TLV_TYPE_REGIONS:
1383 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1384 		case IWX_UCODE_TLV_TYPE_CONF_SET:
1385 			break;
1386 
1387 		/* undocumented TLV found in iwx-cc-a0-67 image */
1388 		case 0x100000b:
1389 			break;
1390 
1391 		default:
1392 			err = EINVAL;
1393 			goto parse_out;
1394 		}
1395 
1396 		len -= roundup(tlv_len, 4);
1397 		data += roundup(tlv_len, 4);
1398 	}
1399 
1400 	KASSERT(err == 0);
1401 
1402  parse_out:
1403 	if (err) {
1404 		printf("%s: firmware parse error %d, "
1405 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1406 	}
1407 
1408  out:
1409 	if (err) {
1410 		fw->fw_status = IWX_FW_STATUS_NONE;
1411 		if (fw->fw_rawdata != NULL)
1412 			iwx_fw_info_free(fw);
1413 	} else
1414 		fw->fw_status = IWX_FW_STATUS_DONE;
1415 	wakeup(&sc->sc_fw);
1416 
1417 	return err;
1418 }
1419 
1420 uint32_t
1421 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1422 {
1423 	IWX_WRITE(sc,
1424 	    IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1425 	IWX_BARRIER_READ_WRITE(sc);
1426 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1427 }
1428 
1429 uint32_t
1430 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1431 {
1432 	iwx_nic_assert_locked(sc);
1433 	return iwx_read_prph_unlocked(sc, addr);
1434 }
1435 
1436 void
1437 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1438 {
1439 	IWX_WRITE(sc,
1440 	    IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1441 	IWX_BARRIER_WRITE(sc);
1442 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1443 }
1444 
1445 void
1446 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1447 {
1448 	iwx_nic_assert_locked(sc);
1449 	iwx_write_prph_unlocked(sc, addr, val);
1450 }
1451 
1452 void
1453 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1454 {
1455 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1456 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1457 }
1458 
1459 int
1460 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1461 {
1462 	int offs, err = 0;
1463 	uint32_t *vals = buf;
1464 
1465 	if (iwx_nic_lock(sc)) {
1466 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1467 		for (offs = 0; offs < dwords; offs++)
1468 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1469 		iwx_nic_unlock(sc);
1470 	} else {
1471 		err = EBUSY;
1472 	}
1473 	return err;
1474 }
1475 
1476 int
1477 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1478 {
1479 	int offs;
1480 	const uint32_t *vals = buf;
1481 
1482 	if (iwx_nic_lock(sc)) {
1483 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1484 		/* WADDR auto-increments */
1485 		for (offs = 0; offs < dwords; offs++) {
1486 			uint32_t val = vals ? vals[offs] : 0;
1487 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1488 		}
1489 		iwx_nic_unlock(sc);
1490 	} else {
1491 		return EBUSY;
1492 	}
1493 	return 0;
1494 }
1495 
1496 int
1497 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1498 {
1499 	return iwx_write_mem(sc, addr, &val, 1);
1500 }
1501 
1502 int
1503 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1504     int timo)
1505 {
1506 	for (;;) {
1507 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1508 			return 1;
1509 		}
1510 		if (timo < 10) {
1511 			return 0;
1512 		}
1513 		timo -= 10;
1514 		DELAY(10);
1515 	}
1516 }
1517 
1518 int
1519 iwx_nic_lock(struct iwx_softc *sc)
1520 {
1521 	if (sc->sc_nic_locks > 0) {
1522 		iwx_nic_assert_locked(sc);
1523 		sc->sc_nic_locks++;
1524 		return 1; /* already locked */
1525 	}
1526 
1527 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1528 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1529 
1530 	DELAY(2);
1531 
1532 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1533 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1534 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1535 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1536 		sc->sc_nic_locks++;
1537 		return 1;
1538 	}
1539 
1540 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1541 	return 0;
1542 }
1543 
1544 void
1545 iwx_nic_assert_locked(struct iwx_softc *sc)
1546 {
1547 	if (sc->sc_nic_locks <= 0)
1548 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1549 }
1550 
1551 void
1552 iwx_nic_unlock(struct iwx_softc *sc)
1553 {
1554 	if (sc->sc_nic_locks > 0) {
1555 		if (--sc->sc_nic_locks == 0)
1556 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1557 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1558 	} else
1559 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1560 }
1561 
1562 int
1563 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1564     uint32_t mask)
1565 {
1566 	uint32_t val;
1567 
1568 	if (iwx_nic_lock(sc)) {
1569 		val = iwx_read_prph(sc, reg) & mask;
1570 		val |= bits;
1571 		iwx_write_prph(sc, reg, val);
1572 		iwx_nic_unlock(sc);
1573 		return 0;
1574 	}
1575 	return EBUSY;
1576 }
1577 
1578 int
1579 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1580 {
1581 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1582 }
1583 
1584 int
1585 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1586 {
1587 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1588 }
1589 
1590 int
1591 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1592     bus_size_t size, bus_size_t alignment)
1593 {
1594 	int nsegs, err;
1595 	caddr_t va;
1596 
1597 	dma->tag = tag;
1598 	dma->size = size;
1599 
1600 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1601 	    &dma->map);
1602 	if (err)
1603 		goto fail;
1604 
1605 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1606 	    BUS_DMA_NOWAIT);
1607 	if (err)
1608 		goto fail;
1609 
1610 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1611 	    BUS_DMA_NOWAIT);
1612 	if (err)
1613 		goto fail;
1614 	dma->vaddr = va;
1615 
1616 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1617 	    BUS_DMA_NOWAIT);
1618 	if (err)
1619 		goto fail;
1620 
1621 	memset(dma->vaddr, 0, size);
1622 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1623 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1624 
1625 	return 0;
1626 
1627 fail:	iwx_dma_contig_free(dma);
1628 	return err;
1629 }
1630 
1631 void
1632 iwx_dma_contig_free(struct iwx_dma_info *dma)
1633 {
1634 	if (dma->map != NULL) {
1635 		if (dma->vaddr != NULL) {
1636 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1637 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1638 			bus_dmamap_unload(dma->tag, dma->map);
1639 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1640 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1641 			dma->vaddr = NULL;
1642 		}
1643 		bus_dmamap_destroy(dma->tag, dma->map);
1644 		dma->map = NULL;
1645 	}
1646 }
1647 
1648 int
1649 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1650 {
1651 	bus_size_t size;
1652 	int i, err;
1653 
1654 	ring->cur = 0;
1655 
1656 	/* Allocate RX descriptors (256-byte aligned). */
1657 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint64_t);
1658 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1659 	if (err) {
1660 		printf("%s: could not allocate RX ring DMA memory\n",
1661 		    DEVNAME(sc));
1662 		goto fail;
1663 	}
1664 	ring->desc = ring->free_desc_dma.vaddr;
1665 
1666 	/* Allocate RX status area (16-byte aligned). */
1667 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1668 	    sizeof(*ring->stat), 16);
1669 	if (err) {
1670 		printf("%s: could not allocate RX status DMA memory\n",
1671 		    DEVNAME(sc));
1672 		goto fail;
1673 	}
1674 	ring->stat = ring->stat_dma.vaddr;
1675 
1676 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint32_t);
1677 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1678 	    size, 256);
1679 	if (err) {
1680 		printf("%s: could not allocate RX ring DMA memory\n",
1681 		    DEVNAME(sc));
1682 		goto fail;
1683 	}
1684 
1685 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1686 		struct iwx_rx_data *data = &ring->data[i];
1687 
1688 		memset(data, 0, sizeof(*data));
1689 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1690 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1691 		    &data->map);
1692 		if (err) {
1693 			printf("%s: could not create RX buf DMA map\n",
1694 			    DEVNAME(sc));
1695 			goto fail;
1696 		}
1697 
1698 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1699 		if (err)
1700 			goto fail;
1701 	}
1702 	return 0;
1703 
1704 fail:	iwx_free_rx_ring(sc, ring);
1705 	return err;
1706 }
1707 
1708 void
1709 iwx_disable_rx_dma(struct iwx_softc *sc)
1710 {
1711 	int ntries;
1712 
1713 	if (iwx_nic_lock(sc)) {
1714 		iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1715 		for (ntries = 0; ntries < 1000; ntries++) {
1716 			if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1717 			    IWX_RXF_DMA_IDLE)
1718 				break;
1719 			DELAY(10);
1720 		}
1721 		iwx_nic_unlock(sc);
1722 	}
1723 }
1724 
1725 void
1726 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1727 {
1728 	ring->cur = 0;
1729 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1730 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1731 	memset(ring->stat, 0, sizeof(*ring->stat));
1732 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1733 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1734 
1735 }
1736 
1737 void
1738 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1739 {
1740 	int i;
1741 
1742 	iwx_dma_contig_free(&ring->free_desc_dma);
1743 	iwx_dma_contig_free(&ring->stat_dma);
1744 	iwx_dma_contig_free(&ring->used_desc_dma);
1745 
1746 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1747 		struct iwx_rx_data *data = &ring->data[i];
1748 
1749 		if (data->m != NULL) {
1750 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1751 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1752 			bus_dmamap_unload(sc->sc_dmat, data->map);
1753 			m_freem(data->m);
1754 			data->m = NULL;
1755 		}
1756 		if (data->map != NULL)
1757 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1758 	}
1759 }
1760 
1761 int
1762 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1763 {
1764 	bus_addr_t paddr;
1765 	bus_size_t size;
1766 	int i, err;
1767 
1768 	ring->qid = qid;
1769 	ring->queued = 0;
1770 	ring->cur = 0;
1771 	ring->tail = 0;
1772 
1773 	/* Allocate TX descriptors (256-byte aligned). */
1774 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
1775 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1776 	if (err) {
1777 		printf("%s: could not allocate TX ring DMA memory\n",
1778 		    DEVNAME(sc));
1779 		goto fail;
1780 	}
1781 	ring->desc = ring->desc_dma.vaddr;
1782 
1783 	/*
1784 	 * The hardware supports up to 512 Tx rings which is more
1785 	 * than we currently need.
1786 	 *
1787 	 * In DQA mode we use 1 command queue + 1 default queue for
1788 	 * managment, control, and non-QoS data frames.
1789 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
1790 	 *
1791 	 * Tx aggregation requires additional queues, one queue per TID for
1792 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
1793 	 * Firmware may assign its own internal IDs for these queues
1794 	 * depending on which TID gets aggregation enabled first.
1795 	 * The driver maintains a table mapping driver-side queue IDs
1796 	 * to firmware-side queue IDs.
1797 	 */
1798 
1799 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1800 	    sizeof(struct iwx_agn_scd_bc_tbl), 0);
1801 	if (err) {
1802 		printf("%s: could not allocate byte count table DMA memory\n",
1803 		    DEVNAME(sc));
1804 		goto fail;
1805 	}
1806 
1807 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
1808 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1809 	    IWX_FIRST_TB_SIZE_ALIGN);
1810 	if (err) {
1811 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1812 		goto fail;
1813 	}
1814 	ring->cmd = ring->cmd_dma.vaddr;
1815 
1816 	paddr = ring->cmd_dma.paddr;
1817 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1818 		struct iwx_tx_data *data = &ring->data[i];
1819 		size_t mapsize;
1820 
1821 		data->cmd_paddr = paddr;
1822 		paddr += sizeof(struct iwx_device_cmd);
1823 
1824 		/* FW commands may require more mapped space than packets. */
1825 		if (qid == IWX_DQA_CMD_QUEUE)
1826 			mapsize = (sizeof(struct iwx_cmd_header) +
1827 			    IWX_MAX_CMD_PAYLOAD_SIZE);
1828 		else
1829 			mapsize = MCLBYTES;
1830 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1831 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1832 		    &data->map);
1833 		if (err) {
1834 			printf("%s: could not create TX buf DMA map\n",
1835 			    DEVNAME(sc));
1836 			goto fail;
1837 		}
1838 	}
1839 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1840 	return 0;
1841 
1842 fail:	iwx_free_tx_ring(sc, ring);
1843 	return err;
1844 }
1845 
1846 void
1847 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1848 {
1849 	int i;
1850 
1851 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1852 		struct iwx_tx_data *data = &ring->data[i];
1853 
1854 		if (data->m != NULL) {
1855 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1856 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1857 			bus_dmamap_unload(sc->sc_dmat, data->map);
1858 			m_freem(data->m);
1859 			data->m = NULL;
1860 		}
1861 	}
1862 
1863 	/* Clear byte count table. */
1864 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
1865 
1866 	/* Clear TX descriptors. */
1867 	memset(ring->desc, 0, ring->desc_dma.size);
1868 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1869 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1870 	sc->qfullmsk &= ~(1 << ring->qid);
1871 	sc->qenablemsk &= ~(1 << ring->qid);
1872 	for (i = 0; i < nitems(sc->aggqid); i++) {
1873 		if (sc->aggqid[i] == ring->qid) {
1874 			sc->aggqid[i] = 0;
1875 			break;
1876 		}
1877 	}
1878 	ring->queued = 0;
1879 	ring->cur = 0;
1880 	ring->tail = 0;
1881 	ring->tid = 0;
1882 }
1883 
1884 void
1885 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1886 {
1887 	int i;
1888 
1889 	iwx_dma_contig_free(&ring->desc_dma);
1890 	iwx_dma_contig_free(&ring->cmd_dma);
1891 	iwx_dma_contig_free(&ring->bc_tbl);
1892 
1893 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1894 		struct iwx_tx_data *data = &ring->data[i];
1895 
1896 		if (data->m != NULL) {
1897 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1898 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1899 			bus_dmamap_unload(sc->sc_dmat, data->map);
1900 			m_freem(data->m);
1901 			data->m = NULL;
1902 		}
1903 		if (data->map != NULL)
1904 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1905 	}
1906 }
1907 
1908 void
1909 iwx_enable_rfkill_int(struct iwx_softc *sc)
1910 {
1911 	if (!sc->sc_msix) {
1912 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
1913 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1914 	} else {
1915 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1916 		    sc->sc_fh_init_mask);
1917 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1918 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1919 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1920 	}
1921 
1922 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1923 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1924 }
1925 
1926 int
1927 iwx_check_rfkill(struct iwx_softc *sc)
1928 {
1929 	uint32_t v;
1930 	int rv;
1931 
1932 	/*
1933 	 * "documentation" is not really helpful here:
1934 	 *  27:	HW_RF_KILL_SW
1935 	 *	Indicates state of (platform's) hardware RF-Kill switch
1936 	 *
1937 	 * But apparently when it's off, it's on ...
1938 	 */
1939 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1940 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1941 	if (rv) {
1942 		sc->sc_flags |= IWX_FLAG_RFKILL;
1943 	} else {
1944 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
1945 	}
1946 
1947 	return rv;
1948 }
1949 
1950 void
1951 iwx_enable_interrupts(struct iwx_softc *sc)
1952 {
1953 	if (!sc->sc_msix) {
1954 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
1955 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1956 	} else {
1957 		/*
1958 		 * fh/hw_mask keeps all the unmasked causes.
1959 		 * Unlike msi, in msix cause is enabled when it is unset.
1960 		 */
1961 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1962 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1963 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1964 		    ~sc->sc_fh_mask);
1965 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1966 		    ~sc->sc_hw_mask);
1967 	}
1968 }
1969 
1970 void
1971 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1972 {
1973 	if (!sc->sc_msix) {
1974 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
1975 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1976 	} else {
1977 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1978 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
1979 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1980 		/*
1981 		 * Leave all the FH causes enabled to get the ALIVE
1982 		 * notification.
1983 		 */
1984 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1985 		    ~sc->sc_fh_init_mask);
1986 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1987 	}
1988 }
1989 
1990 void
1991 iwx_restore_interrupts(struct iwx_softc *sc)
1992 {
1993 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1994 }
1995 
1996 void
1997 iwx_disable_interrupts(struct iwx_softc *sc)
1998 {
1999 	if (!sc->sc_msix) {
2000 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2001 
2002 		/* acknowledge all interrupts */
2003 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2004 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2005 	} else {
2006 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2007 		    sc->sc_fh_init_mask);
2008 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2009 		    sc->sc_hw_init_mask);
2010 	}
2011 }
2012 
2013 void
2014 iwx_ict_reset(struct iwx_softc *sc)
2015 {
2016 	iwx_disable_interrupts(sc);
2017 
2018 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2019 	sc->ict_cur = 0;
2020 
2021 	/* Set physical address of ICT (4KB aligned). */
2022 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2023 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2024 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2025 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2026 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2027 
2028 	/* Switch to ICT interrupt mode in driver. */
2029 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2030 
2031 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2032 	iwx_enable_interrupts(sc);
2033 }
2034 
2035 #define IWX_HW_READY_TIMEOUT 50
2036 int
2037 iwx_set_hw_ready(struct iwx_softc *sc)
2038 {
2039 	int ready;
2040 
2041 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2042 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2043 
2044 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2045 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2046 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2047 	    IWX_HW_READY_TIMEOUT);
2048 	if (ready)
2049 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2050 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2051 
2052 	return ready;
2053 }
2054 #undef IWX_HW_READY_TIMEOUT
2055 
2056 int
2057 iwx_prepare_card_hw(struct iwx_softc *sc)
2058 {
2059 	int t = 0;
2060 	int ntries;
2061 
2062 	if (iwx_set_hw_ready(sc))
2063 		return 0;
2064 
2065 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2066 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2067 	DELAY(1000);
2068 
2069 	for (ntries = 0; ntries < 10; ntries++) {
2070 		/* If HW is not ready, prepare the conditions to check again */
2071 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2072 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2073 
2074 		do {
2075 			if (iwx_set_hw_ready(sc))
2076 				return 0;
2077 			DELAY(200);
2078 			t += 200;
2079 		} while (t < 150000);
2080 		DELAY(25000);
2081 	}
2082 
2083 	return ETIMEDOUT;
2084 }
2085 
2086 int
2087 iwx_force_power_gating(struct iwx_softc *sc)
2088 {
2089 	int err;
2090 
2091 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2092 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2093 	if (err)
2094 		return err;
2095 	DELAY(20);
2096 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2097 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2098 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2099 	if (err)
2100 		return err;
2101 	DELAY(20);
2102 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2103 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2104 	return err;
2105 }
2106 
2107 void
2108 iwx_apm_config(struct iwx_softc *sc)
2109 {
2110 	pcireg_t lctl, cap;
2111 
2112 	/*
2113 	 * L0S states have been found to be unstable with our devices
2114 	 * and in newer hardware they are not officially supported at
2115 	 * all, so we must always set the L0S_DISABLED bit.
2116 	 */
2117 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2118 
2119 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2120 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2121 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2122 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2123 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2124 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2125 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2126 	    DEVNAME(sc),
2127 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2128 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2129 }
2130 
2131 /*
2132  * Start up NIC's basic functionality after it has been reset
2133  * e.g. after platform boot or shutdown.
2134  * NOTE:  This does not load uCode nor start the embedded processor
2135  */
2136 int
2137 iwx_apm_init(struct iwx_softc *sc)
2138 {
2139 	int err = 0;
2140 
2141 	/*
2142 	 * Disable L0s without affecting L1;
2143 	 *  don't wait for ICH L0s (ICH bug W/A)
2144 	 */
2145 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2146 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2147 
2148 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2149 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2150 
2151 	/*
2152 	 * Enable HAP INTA (interrupt from management bus) to
2153 	 * wake device's PCI Express link L1a -> L0s
2154 	 */
2155 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2156 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2157 
2158 	iwx_apm_config(sc);
2159 
2160 	/*
2161 	 * Set "initialization complete" bit to move adapter from
2162 	 * D0U* --> D0A* (powered-up active) state.
2163 	 */
2164 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2165 
2166 	/*
2167 	 * Wait for clock stabilization; once stabilized, access to
2168 	 * device-internal resources is supported, e.g. iwx_write_prph()
2169 	 * and accesses to uCode SRAM.
2170 	 */
2171 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2172 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2173 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2174 		printf("%s: timeout waiting for clock stabilization\n",
2175 		    DEVNAME(sc));
2176 		err = ETIMEDOUT;
2177 		goto out;
2178 	}
2179  out:
2180 	if (err)
2181 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2182 	return err;
2183 }
2184 
2185 void
2186 iwx_apm_stop(struct iwx_softc *sc)
2187 {
2188 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2189 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2190 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2191 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2192 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2193 	DELAY(1000);
2194 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2195 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2196 	DELAY(5000);
2197 
2198 	/* stop device's busmaster DMA activity */
2199 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2200 
2201 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2202 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2203 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2204 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2205 
2206 	/*
2207 	 * Clear "initialization complete" bit to move adapter from
2208 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2209 	 */
2210 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2211 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2212 }
2213 
2214 void
2215 iwx_init_msix_hw(struct iwx_softc *sc)
2216 {
2217 	iwx_conf_msix_hw(sc, 0);
2218 
2219 	if (!sc->sc_msix)
2220 		return;
2221 
2222 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2223 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2224 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2225 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2226 }
2227 
2228 void
2229 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2230 {
2231 	int vector = 0;
2232 
2233 	if (!sc->sc_msix) {
2234 		/* Newer chips default to MSIX. */
2235 		if (!stopped && iwx_nic_lock(sc)) {
2236 			iwx_write_prph(sc, IWX_UREG_CHICK,
2237 			    IWX_UREG_CHICK_MSI_ENABLE);
2238 			iwx_nic_unlock(sc);
2239 		}
2240 		return;
2241 	}
2242 
2243 	if (!stopped && iwx_nic_lock(sc)) {
2244 		iwx_write_prph(sc, IWX_UREG_CHICK, IWX_UREG_CHICK_MSIX_ENABLE);
2245 		iwx_nic_unlock(sc);
2246 	}
2247 
2248 	/* Disable all interrupts */
2249 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2250 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2251 
2252 	/* Map fallback-queue (command/mgmt) to a single vector */
2253 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2254 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2255 	/* Map RSS queue (data) to the same vector */
2256 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2257 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2258 
2259 	/* Enable the RX queues cause interrupts */
2260 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2261 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2262 
2263 	/* Map non-RX causes to the same vector */
2264 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2265 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2266 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2267 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2268 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2269 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2270 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2271 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2272 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2273 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2274 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2275 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2276 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),
2277 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2278 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2279 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2280 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2281 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2282 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2283 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2284 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2285 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2286 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2287 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2288 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2289 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2290 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2291 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2292 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2293 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2294 
2295 	/* Enable non-RX causes interrupts */
2296 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2297 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2298 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2299 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2300 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2301 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2302 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2303 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2304 	    IWX_MSIX_HW_INT_CAUSES_REG_IML |
2305 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2306 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2307 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2308 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2309 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2310 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2311 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2312 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2313 }
2314 
2315 int
2316 iwx_clear_persistence_bit(struct iwx_softc *sc)
2317 {
2318 	uint32_t hpm, wprot;
2319 
2320 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2321 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2322 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2323 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2324 			printf("%s: cannot clear persistence bit\n",
2325 			    DEVNAME(sc));
2326 			return EPERM;
2327 		}
2328 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2329 		    hpm & ~IWX_PERSISTENCE_BIT);
2330 	}
2331 
2332 	return 0;
2333 }
2334 
2335 int
2336 iwx_start_hw(struct iwx_softc *sc)
2337 {
2338 	int err;
2339 
2340 	err = iwx_prepare_card_hw(sc);
2341 	if (err)
2342 		return err;
2343 
2344 	err = iwx_clear_persistence_bit(sc);
2345 	if (err)
2346 		return err;
2347 
2348 	/* Reset the entire device */
2349 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2350 	DELAY(5000);
2351 
2352 	if (sc->sc_integrated) {
2353 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2354 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2355 		DELAY(20);
2356 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2357 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2358 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2359 			printf("%s: timeout waiting for clock stabilization\n",
2360 			    DEVNAME(sc));
2361 			return ETIMEDOUT;
2362 		}
2363 
2364 		err = iwx_force_power_gating(sc);
2365 		if (err)
2366 			return err;
2367 
2368 		/* Reset the entire device */
2369 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2370 		DELAY(5000);
2371 	}
2372 
2373 	err = iwx_apm_init(sc);
2374 	if (err)
2375 		return err;
2376 
2377 	iwx_init_msix_hw(sc);
2378 
2379 	iwx_enable_rfkill_int(sc);
2380 	iwx_check_rfkill(sc);
2381 
2382 	return 0;
2383 }
2384 
2385 void
2386 iwx_stop_device(struct iwx_softc *sc)
2387 {
2388 	struct ieee80211com *ic = &sc->sc_ic;
2389 	struct ieee80211_node *ni = ic->ic_bss;
2390 	int i;
2391 
2392 	iwx_disable_interrupts(sc);
2393 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2394 
2395 	iwx_disable_rx_dma(sc);
2396 	iwx_reset_rx_ring(sc, &sc->rxq);
2397 	for (i = 0; i < nitems(sc->txq); i++)
2398 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2399 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2400 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2401 		if (ba->ba_state != IEEE80211_BA_AGREED)
2402 			continue;
2403 		ieee80211_delba_request(ic, ni, 0, 1, i);
2404 	}
2405 
2406 	/* Make sure (redundant) we've released our request to stay awake */
2407 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2408 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2409 	if (sc->sc_nic_locks > 0)
2410 		printf("%s: %d active NIC locks forcefully cleared\n",
2411 		    DEVNAME(sc), sc->sc_nic_locks);
2412 	sc->sc_nic_locks = 0;
2413 
2414 	/* Stop the device, and put it in low power state */
2415 	iwx_apm_stop(sc);
2416 
2417 	/* Reset the on-board processor. */
2418 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2419 	DELAY(5000);
2420 
2421 	/*
2422 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2423 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2424 	 * that enables radio won't fire on the correct irq, and the
2425 	 * driver won't be able to handle the interrupt.
2426 	 * Configure the IVAR table again after reset.
2427 	 */
2428 	iwx_conf_msix_hw(sc, 1);
2429 
2430 	/*
2431 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2432 	 * Clear the interrupt again.
2433 	 */
2434 	iwx_disable_interrupts(sc);
2435 
2436 	/* Even though we stop the HW we still want the RF kill interrupt. */
2437 	iwx_enable_rfkill_int(sc);
2438 	iwx_check_rfkill(sc);
2439 
2440 	iwx_prepare_card_hw(sc);
2441 
2442 	iwx_ctxt_info_free_paging(sc);
2443 }
2444 
2445 void
2446 iwx_nic_config(struct iwx_softc *sc)
2447 {
2448 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2449 	uint32_t mask, val, reg_val = 0;
2450 
2451 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2452 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2453 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2454 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2455 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2456 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2457 
2458 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2459 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2460 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2461 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2462 
2463 	/* radio configuration */
2464 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2465 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2466 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2467 
2468 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2469 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2470 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2471 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2472 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2473 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2474 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2475 
2476 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2477 	val &= ~mask;
2478 	val |= reg_val;
2479 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2480 }
2481 
2482 int
2483 iwx_nic_rx_init(struct iwx_softc *sc)
2484 {
2485 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2486 
2487 	/*
2488 	 * We don't configure the RFH; the firmware will do that.
2489 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2490 	 */
2491 	return 0;
2492 }
2493 
2494 int
2495 iwx_nic_init(struct iwx_softc *sc)
2496 {
2497 	int err;
2498 
2499 	iwx_apm_init(sc);
2500 	iwx_nic_config(sc);
2501 
2502 	err = iwx_nic_rx_init(sc);
2503 	if (err)
2504 		return err;
2505 
2506 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2507 
2508 	return 0;
2509 }
2510 
2511 /* Map a TID to an ieee80211_edca_ac category. */
2512 const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
2513 	EDCA_AC_BE,
2514 	EDCA_AC_BK,
2515 	EDCA_AC_BK,
2516 	EDCA_AC_BE,
2517 	EDCA_AC_VI,
2518 	EDCA_AC_VI,
2519 	EDCA_AC_VO,
2520 	EDCA_AC_VO,
2521 };
2522 
2523 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2524 const uint8_t iwx_ac_to_tx_fifo[] = {
2525 	IWX_GEN2_EDCA_TX_FIFO_BE,
2526 	IWX_GEN2_EDCA_TX_FIFO_BK,
2527 	IWX_GEN2_EDCA_TX_FIFO_VI,
2528 	IWX_GEN2_EDCA_TX_FIFO_VO,
2529 };
2530 
2531 int
2532 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2533     int num_slots)
2534 {
2535 	struct iwx_tx_queue_cfg_cmd cmd;
2536 	struct iwx_rx_packet *pkt;
2537 	struct iwx_tx_queue_cfg_rsp *resp;
2538 	struct iwx_host_cmd hcmd = {
2539 		.id = IWX_SCD_QUEUE_CFG,
2540 		.flags = IWX_CMD_WANT_RESP,
2541 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2542 	};
2543 	struct iwx_tx_ring *ring = &sc->txq[qid];
2544 	int err, fwqid;
2545 	uint32_t wr_idx;
2546 	size_t resp_len;
2547 
2548 	iwx_reset_tx_ring(sc, ring);
2549 
2550 	memset(&cmd, 0, sizeof(cmd));
2551 	cmd.sta_id = sta_id;
2552 	cmd.tid = tid;
2553 	cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2554 	cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2555 	cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2556 	cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
2557 
2558 	hcmd.data[0] = &cmd;
2559 	hcmd.len[0] = sizeof(cmd);
2560 
2561 	err = iwx_send_cmd(sc, &hcmd);
2562 	if (err)
2563 		return err;
2564 
2565 	pkt = hcmd.resp_pkt;
2566 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2567 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2568 		err = EIO;
2569 		goto out;
2570 	}
2571 
2572 	resp_len = iwx_rx_packet_payload_len(pkt);
2573 	if (resp_len != sizeof(*resp)) {
2574 		DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
2575 		err = EIO;
2576 		goto out;
2577 	}
2578 
2579 	resp = (void *)pkt->data;
2580 	fwqid = le16toh(resp->queue_number);
2581 	wr_idx = le16toh(resp->write_pointer);
2582 
2583 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2584 	if (fwqid != qid) {
2585 		DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
2586 		err = EIO;
2587 		goto out;
2588 	}
2589 
2590 	if (wr_idx != ring->cur) {
2591 		DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur));
2592 		err = EIO;
2593 		goto out;
2594 	}
2595 
2596 	sc->qenablemsk |= (1 << qid);
2597 	ring->tid = tid;
2598 out:
2599 	iwx_free_resp(sc, &hcmd);
2600 	return err;
2601 }
2602 
2603 int
2604 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2605 {
2606 	struct iwx_tx_queue_cfg_cmd cmd;
2607 	struct iwx_rx_packet *pkt;
2608 	struct iwx_tx_queue_cfg_rsp *resp;
2609 	struct iwx_host_cmd hcmd = {
2610 		.id = IWX_SCD_QUEUE_CFG,
2611 		.flags = IWX_CMD_WANT_RESP,
2612 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2613 	};
2614 	struct iwx_tx_ring *ring = &sc->txq[qid];
2615 	int err;
2616 
2617 	memset(&cmd, 0, sizeof(cmd));
2618 	cmd.sta_id = sta_id;
2619 	cmd.tid = tid;
2620 	cmd.flags = htole16(0); /* clear "queue enabled" flag */
2621 	cmd.cb_size = htole32(0);
2622 	cmd.byte_cnt_addr = htole64(0);
2623 	cmd.tfdq_addr = htole64(0);
2624 
2625 	hcmd.data[0] = &cmd;
2626 	hcmd.len[0] = sizeof(cmd);
2627 
2628 	err = iwx_send_cmd(sc, &hcmd);
2629 	if (err)
2630 		return err;
2631 
2632 	pkt = hcmd.resp_pkt;
2633 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2634 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2635 		err = EIO;
2636 		goto out;
2637 	}
2638 
2639 	sc->qenablemsk &= ~(1 << qid);
2640 	iwx_reset_tx_ring(sc, ring);
2641 out:
2642 	iwx_free_resp(sc, &hcmd);
2643 	return err;
2644 }
2645 
2646 void
2647 iwx_post_alive(struct iwx_softc *sc)
2648 {
2649 	iwx_ict_reset(sc);
2650 }
2651 
2652 /*
2653  * For the high priority TE use a time event type that has similar priority to
2654  * the FW's action scan priority.
2655  */
2656 #define IWX_ROC_TE_TYPE_NORMAL IWX_TE_P2P_DEVICE_DISCOVERABLE
2657 #define IWX_ROC_TE_TYPE_MGMT_TX IWX_TE_P2P_CLIENT_ASSOC
2658 
2659 int
2660 iwx_send_time_event_cmd(struct iwx_softc *sc,
2661     const struct iwx_time_event_cmd *cmd)
2662 {
2663 	struct iwx_rx_packet *pkt;
2664 	struct iwx_time_event_resp *resp;
2665 	struct iwx_host_cmd hcmd = {
2666 		.id = IWX_TIME_EVENT_CMD,
2667 		.flags = IWX_CMD_WANT_RESP,
2668 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2669 	};
2670 	uint32_t resp_len;
2671 	int err;
2672 
2673 	hcmd.data[0] = cmd;
2674 	hcmd.len[0] = sizeof(*cmd);
2675 	err = iwx_send_cmd(sc, &hcmd);
2676 	if (err)
2677 		return err;
2678 
2679 	pkt = hcmd.resp_pkt;
2680 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2681 		err = EIO;
2682 		goto out;
2683 	}
2684 
2685 	resp_len = iwx_rx_packet_payload_len(pkt);
2686 	if (resp_len != sizeof(*resp)) {
2687 		err = EIO;
2688 		goto out;
2689 	}
2690 
2691 	resp = (void *)pkt->data;
2692 	if (le32toh(resp->status) == 0)
2693 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2694 	else
2695 		err = EIO;
2696 out:
2697 	iwx_free_resp(sc, &hcmd);
2698 	return err;
2699 }
2700 
2701 int
2702 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2703     uint32_t duration)
2704 {
2705 	struct iwx_session_prot_cmd cmd = {
2706 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2707 		    in->in_color)),
2708 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
2709 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2710 		.duration_tu = htole32(duration * IEEE80211_DUR_TU),
2711 	};
2712 	uint32_t cmd_id;
2713 
2714 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
2715 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2716 }
2717 
2718 /*
2719  * NVM read access and content parsing.  We do not support
2720  * external NVM or writing NVM.
2721  */
2722 
2723 uint8_t
2724 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2725 {
2726 	uint8_t tx_ant;
2727 
2728 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
2729 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
2730 
2731 	if (sc->sc_nvm.valid_tx_ant)
2732 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2733 
2734 	return tx_ant;
2735 }
2736 
2737 uint8_t
2738 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2739 {
2740 	uint8_t rx_ant;
2741 
2742 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
2743 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
2744 
2745 	if (sc->sc_nvm.valid_rx_ant)
2746 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2747 
2748 	return rx_ant;
2749 }
2750 
2751 void
2752 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2753     uint32_t *channel_profile_v4, int nchan_profile)
2754 {
2755 	struct ieee80211com *ic = &sc->sc_ic;
2756 	struct iwx_nvm_data *data = &sc->sc_nvm;
2757 	int ch_idx;
2758 	struct ieee80211_channel *channel;
2759 	uint32_t ch_flags;
2760 	int is_5ghz;
2761 	int flags, hw_value;
2762 	int nchan;
2763 	const uint8_t *nvm_channels;
2764 
2765 	if (sc->sc_uhb_supported) {
2766 		nchan = nitems(iwx_nvm_channels_uhb);
2767 		nvm_channels = iwx_nvm_channels_uhb;
2768 	} else {
2769 		nchan = nitems(iwx_nvm_channels_8000);
2770 		nvm_channels = iwx_nvm_channels_8000;
2771 	}
2772 
2773 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
2774 		if (channel_profile_v4)
2775 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
2776 		else
2777 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
2778 
2779 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
2780 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
2781 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
2782 
2783 		hw_value = nvm_channels[ch_idx];
2784 		channel = &ic->ic_channels[hw_value];
2785 
2786 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
2787 			channel->ic_freq = 0;
2788 			channel->ic_flags = 0;
2789 			continue;
2790 		}
2791 
2792 		if (!is_5ghz) {
2793 			flags = IEEE80211_CHAN_2GHZ;
2794 			channel->ic_flags
2795 			    = IEEE80211_CHAN_CCK
2796 			    | IEEE80211_CHAN_OFDM
2797 			    | IEEE80211_CHAN_DYN
2798 			    | IEEE80211_CHAN_2GHZ;
2799 		} else {
2800 			flags = IEEE80211_CHAN_5GHZ;
2801 			channel->ic_flags =
2802 			    IEEE80211_CHAN_A;
2803 		}
2804 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2805 
2806 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
2807 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2808 
2809 		if (data->sku_cap_11n_enable) {
2810 			channel->ic_flags |= IEEE80211_CHAN_HT;
2811 			if (ch_flags & IWX_NVM_CHANNEL_40MHZ)
2812 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
2813 		}
2814 	}
2815 }
2816 
2817 int
2818 iwx_mimo_enabled(struct iwx_softc *sc)
2819 {
2820 	struct ieee80211com *ic = &sc->sc_ic;
2821 
2822 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2823 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2824 }
2825 
2826 void
2827 iwx_setup_ht_rates(struct iwx_softc *sc)
2828 {
2829 	struct ieee80211com *ic = &sc->sc_ic;
2830 	uint8_t rx_ant;
2831 
2832 	/* TX is supported with the same MCS as RX. */
2833 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2834 
2835 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2836 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2837 
2838 	if (!iwx_mimo_enabled(sc))
2839 		return;
2840 
2841 	rx_ant = iwx_fw_valid_rx_ant(sc);
2842 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
2843 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
2844 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2845 }
2846 
2847 void
2848 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
2849     uint16_t ssn, uint16_t buf_size)
2850 {
2851 	reorder_buf->head_sn = ssn;
2852 	reorder_buf->num_stored = 0;
2853 	reorder_buf->buf_size = buf_size;
2854 	reorder_buf->last_amsdu = 0;
2855 	reorder_buf->last_sub_index = 0;
2856 	reorder_buf->removed = 0;
2857 	reorder_buf->valid = 0;
2858 	reorder_buf->consec_oldsn_drops = 0;
2859 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
2860 	reorder_buf->consec_oldsn_prev_drop = 0;
2861 }
2862 
2863 void
2864 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
2865 {
2866 	int i;
2867 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
2868 	struct iwx_reorder_buf_entry *entry;
2869 
2870 	for (i = 0; i < reorder_buf->buf_size; i++) {
2871 		entry = &rxba->entries[i];
2872 		ml_purge(&entry->frames);
2873 		timerclear(&entry->reorder_time);
2874 	}
2875 
2876 	reorder_buf->removed = 1;
2877 	timeout_del(&reorder_buf->reorder_timer);
2878 	timerclear(&rxba->last_rx);
2879 	timeout_del(&rxba->session_timer);
2880 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
2881 }
2882 
2883 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
2884 
2885 void
2886 iwx_rx_ba_session_expired(void *arg)
2887 {
2888 	struct iwx_rxba_data *rxba = arg;
2889 	struct iwx_softc *sc = rxba->sc;
2890 	struct ieee80211com *ic = &sc->sc_ic;
2891 	struct ieee80211_node *ni = ic->ic_bss;
2892 	struct timeval now, timeout, expiry;
2893 	int s;
2894 
2895 	s = splnet();
2896 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
2897 	    ic->ic_state == IEEE80211_S_RUN &&
2898 	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
2899 		getmicrouptime(&now);
2900 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
2901 		timeradd(&rxba->last_rx, &timeout, &expiry);
2902 		if (timercmp(&now, &expiry, <)) {
2903 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
2904 		} else {
2905 			ic->ic_stats.is_ht_rx_ba_timeout++;
2906 			ieee80211_delba_request(ic, ni,
2907 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
2908 		}
2909 	}
2910 	splx(s);
2911 }
2912 
2913 void
2914 iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
2915     struct iwx_rx_data *data, struct mbuf_list *ml)
2916 {
2917 	struct ieee80211com *ic = &sc->sc_ic;
2918 	struct ieee80211_node *ni = ic->ic_bss;
2919 	struct iwx_bar_frame_release *release = (void *)data;
2920 	struct iwx_reorder_buffer *buf;
2921 	struct iwx_rxba_data *rxba;
2922 	unsigned int baid, nssn, sta_id, tid;
2923 
2924 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
2925 		return;
2926 
2927 	baid = (le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_BAID_MASK) >>
2928 	    IWX_BAR_FRAME_RELEASE_BAID_SHIFT;
2929 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
2930 	    baid >= nitems(sc->sc_rxba_data))
2931 		return;
2932 
2933 	rxba = &sc->sc_rxba_data[baid];
2934 	if (rxba == NULL || rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
2935 		return;
2936 
2937 	tid = le32toh(release->sta_tid) & IWX_BAR_FRAME_RELEASE_TID_MASK;
2938 	sta_id = (le32toh(release->sta_tid) &
2939 	    IWX_BAR_FRAME_RELEASE_STA_MASK) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT;
2940 	if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
2941 		return;
2942 
2943 	nssn = le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_NSSN_MASK;
2944 	buf = &rxba->reorder_buf;
2945 	iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
2946 }
2947 
2948 void
2949 iwx_reorder_timer_expired(void *arg)
2950 {
2951 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2952 	struct iwx_reorder_buffer *buf = arg;
2953 	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
2954 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
2955 	struct iwx_softc *sc = rxba->sc;
2956 	struct ieee80211com *ic = &sc->sc_ic;
2957 	struct ieee80211_node *ni = ic->ic_bss;
2958 	int i, s;
2959 	uint16_t sn = 0, index = 0;
2960 	int expired = 0;
2961 	int cont = 0;
2962 	struct timeval now, timeout, expiry;
2963 
2964 	if (!buf->num_stored || buf->removed)
2965 		return;
2966 
2967 	s = splnet();
2968 	getmicrouptime(&now);
2969 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
2970 
2971 	for (i = 0; i < buf->buf_size ; i++) {
2972 		index = (buf->head_sn + i) % buf->buf_size;
2973 
2974 		if (ml_empty(&entries[index].frames)) {
2975 			/*
2976 			 * If there is a hole and the next frame didn't expire
2977 			 * we want to break and not advance SN.
2978 			 */
2979 			cont = 0;
2980 			continue;
2981 		}
2982 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
2983 		if (!cont && timercmp(&now, &expiry, <))
2984 			break;
2985 
2986 		expired = 1;
2987 		/* continue until next hole after this expired frame */
2988 		cont = 1;
2989 		sn = (buf->head_sn + (i + 1)) & 0xfff;
2990 	}
2991 
2992 	if (expired) {
2993 		/* SN is set to the last expired frame + 1 */
2994 		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
2995 		if_input(&sc->sc_ic.ic_if, &ml);
2996 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
2997 	} else {
2998 		/*
2999 		 * If no frame expired and there are stored frames, index is now
3000 		 * pointing to the first unexpired frame - modify reorder timeout
3001 		 * accordingly.
3002 		 */
3003 		timeout_add_usec(&buf->reorder_timer,
3004 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3005 	}
3006 
3007 	splx(s);
3008 }
3009 
3010 #define IWX_MAX_RX_BA_SESSIONS 16
3011 
3012 void
3013 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3014     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3015 {
3016 	struct ieee80211com *ic = &sc->sc_ic;
3017 	struct iwx_add_sta_cmd cmd;
3018 	struct iwx_node *in = (void *)ni;
3019 	int err, s;
3020 	uint32_t status;
3021 	struct iwx_rxba_data *rxba = NULL;
3022 	uint8_t baid = 0;
3023 
3024 	s = splnet();
3025 
3026 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3027 		ieee80211_addba_req_refuse(ic, ni, tid);
3028 		splx(s);
3029 		return;
3030 	}
3031 
3032 	memset(&cmd, 0, sizeof(cmd));
3033 
3034 	cmd.sta_id = IWX_STATION_ID;
3035 	cmd.mac_id_n_color
3036 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3037 	cmd.add_modify = IWX_STA_MODE_MODIFY;
3038 
3039 	if (start) {
3040 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3041 		cmd.add_immediate_ba_ssn = htole16(ssn);
3042 		cmd.rx_ba_window = htole16(winsize);
3043 	} else {
3044 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3045 	}
3046 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
3047 	    IWX_STA_MODIFY_REMOVE_BA_TID;
3048 
3049 	status = IWX_ADD_STA_SUCCESS;
3050 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
3051 	    &status);
3052 
3053 	if (err || (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS) {
3054 		if (start)
3055 			ieee80211_addba_req_refuse(ic, ni, tid);
3056 		splx(s);
3057 		return;
3058 	}
3059 
3060 	/* Deaggregation is done in hardware. */
3061 	if (start) {
3062 		if (!(status & IWX_ADD_STA_BAID_VALID_MASK)) {
3063 			ieee80211_addba_req_refuse(ic, ni, tid);
3064 			splx(s);
3065 			return;
3066 		}
3067 		baid = (status & IWX_ADD_STA_BAID_MASK) >>
3068 		    IWX_ADD_STA_BAID_SHIFT;
3069 		if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3070 		    baid >= nitems(sc->sc_rxba_data)) {
3071 			ieee80211_addba_req_refuse(ic, ni, tid);
3072 			splx(s);
3073 			return;
3074 		}
3075 		rxba = &sc->sc_rxba_data[baid];
3076 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3077 			ieee80211_addba_req_refuse(ic, ni, tid);
3078 			splx(s);
3079 			return;
3080 		}
3081 		rxba->sta_id = IWX_STATION_ID;
3082 		rxba->tid = tid;
3083 		rxba->baid = baid;
3084 		rxba->timeout = timeout_val;
3085 		getmicrouptime(&rxba->last_rx);
3086 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3087 		    winsize);
3088 		if (timeout_val != 0) {
3089 			struct ieee80211_rx_ba *ba;
3090 			timeout_add_usec(&rxba->session_timer,
3091 			    timeout_val);
3092 			/* XXX disable net80211's BA timeout handler */
3093 			ba = &ni->ni_rx_ba[tid];
3094 			ba->ba_timeout_val = 0;
3095 		}
3096 	} else {
3097 		int i;
3098 		for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3099 			rxba = &sc->sc_rxba_data[i];
3100 			if (rxba->baid ==
3101 			    IWX_RX_REORDER_DATA_INVALID_BAID)
3102 				continue;
3103 			if (rxba->tid != tid)
3104 				continue;
3105 			iwx_clear_reorder_buffer(sc, rxba);
3106 			break;
3107 		}
3108 	}
3109 
3110 	if (start) {
3111 		sc->sc_rx_ba_sessions++;
3112 		ieee80211_addba_req_accept(ic, ni, tid);
3113 	} else if (sc->sc_rx_ba_sessions > 0)
3114 		sc->sc_rx_ba_sessions--;
3115 
3116 	splx(s);
3117 }
3118 
3119 void
3120 iwx_mac_ctxt_task(void *arg)
3121 {
3122 	struct iwx_softc *sc = arg;
3123 	struct ieee80211com *ic = &sc->sc_ic;
3124 	struct iwx_node *in = (void *)ic->ic_bss;
3125 	int err, s = splnet();
3126 
3127 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3128 	    ic->ic_state != IEEE80211_S_RUN) {
3129 		refcnt_rele_wake(&sc->task_refs);
3130 		splx(s);
3131 		return;
3132 	}
3133 
3134 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
3135 	if (err)
3136 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3137 
3138 	refcnt_rele_wake(&sc->task_refs);
3139 	splx(s);
3140 }
3141 
3142 void
3143 iwx_phy_ctxt_task(void *arg)
3144 {
3145 	struct iwx_softc *sc = arg;
3146 	struct ieee80211com *ic = &sc->sc_ic;
3147 	struct iwx_node *in = (void *)ic->ic_bss;
3148 	struct ieee80211_node *ni = &in->in_ni;
3149 	uint8_t chains, sco;
3150 	int err, s = splnet();
3151 
3152 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3153 	    ic->ic_state != IEEE80211_S_RUN ||
3154 	    in->in_phyctxt == NULL) {
3155 		refcnt_rele_wake(&sc->task_refs);
3156 		splx(s);
3157 		return;
3158 	}
3159 
3160 	chains = iwx_mimo_enabled(sc) ? 2 : 1;
3161 	if (ieee80211_node_supports_ht_chan40(ni))
3162 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3163 	else
3164 		sco = IEEE80211_HTOP0_SCO_SCN;
3165 	if (in->in_phyctxt->sco != sco) {
3166 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3167 		    in->in_phyctxt->channel, chains, chains, 0, sco);
3168 		if (err)
3169 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3170 	}
3171 
3172 	refcnt_rele_wake(&sc->task_refs);
3173 	splx(s);
3174 }
3175 
3176 void
3177 iwx_updatechan(struct ieee80211com *ic)
3178 {
3179 	struct iwx_softc *sc = ic->ic_softc;
3180 
3181 	if (ic->ic_state == IEEE80211_S_RUN &&
3182 	    !task_pending(&sc->newstate_task))
3183 		iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3184 }
3185 
3186 void
3187 iwx_updateprot(struct ieee80211com *ic)
3188 {
3189 	struct iwx_softc *sc = ic->ic_softc;
3190 
3191 	if (ic->ic_state == IEEE80211_S_RUN &&
3192 	    !task_pending(&sc->newstate_task))
3193 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3194 }
3195 
3196 void
3197 iwx_updateslot(struct ieee80211com *ic)
3198 {
3199 	struct iwx_softc *sc = ic->ic_softc;
3200 
3201 	if (ic->ic_state == IEEE80211_S_RUN &&
3202 	    !task_pending(&sc->newstate_task))
3203 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3204 }
3205 
3206 void
3207 iwx_updateedca(struct ieee80211com *ic)
3208 {
3209 	struct iwx_softc *sc = ic->ic_softc;
3210 
3211 	if (ic->ic_state == IEEE80211_S_RUN &&
3212 	    !task_pending(&sc->newstate_task))
3213 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3214 }
3215 
3216 void
3217 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3218     uint8_t tid)
3219 {
3220 	struct ieee80211com *ic = &sc->sc_ic;
3221 	struct ieee80211_tx_ba *ba;
3222 	int err, qid;
3223 	struct iwx_tx_ring *ring;
3224 
3225 	/* Ensure we can map this TID to an aggregation queue. */
3226 	if (tid >= IWX_MAX_TID_COUNT)
3227 		return;
3228 
3229 	ba = &ni->ni_tx_ba[tid];
3230 	if (ba->ba_state != IEEE80211_BA_REQUESTED)
3231 		return;
3232 
3233 	qid = sc->aggqid[tid];
3234 	if (qid == 0) {
3235 		/* Firmware should pick the next unused Tx queue. */
3236 		qid = fls(sc->qenablemsk);
3237 	}
3238 
3239 	/*
3240 	 * Simply enable the queue.
3241 	 * Firmware handles Tx Ba session setup and teardown.
3242 	 */
3243 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3244 		if (!iwx_nic_lock(sc)) {
3245 			ieee80211_addba_resp_refuse(ic, ni, tid,
3246 			    IEEE80211_STATUS_UNSPECIFIED);
3247 			return;
3248 		}
3249 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3250 		    IWX_TX_RING_COUNT);
3251 		iwx_nic_unlock(sc);
3252 		if (err) {
3253 			printf("%s: could not enable Tx queue %d "
3254 			    "(error %d)\n", DEVNAME(sc), qid, err);
3255 			ieee80211_addba_resp_refuse(ic, ni, tid,
3256 			    IEEE80211_STATUS_UNSPECIFIED);
3257 			return;
3258 		}
3259 
3260 		ba->ba_winstart = 0;
3261 	} else
3262 		ba->ba_winstart = ni->ni_qos_txseqs[tid];
3263 
3264 	ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3265 
3266 	ring = &sc->txq[qid];
3267 	ba->ba_timeout_val = 0;
3268 	ieee80211_addba_resp_accept(ic, ni, tid);
3269 	sc->aggqid[tid] = qid;
3270 }
3271 
3272 void
3273 iwx_ba_task(void *arg)
3274 {
3275 	struct iwx_softc *sc = arg;
3276 	struct ieee80211com *ic = &sc->sc_ic;
3277 	struct ieee80211_node *ni = ic->ic_bss;
3278 	int s = splnet();
3279 	int tid;
3280 
3281 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3282 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3283 			break;
3284 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3285 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3286 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3287 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3288 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3289 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3290 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3291 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3292 		}
3293 	}
3294 
3295 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3296 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3297 			break;
3298 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3299 			iwx_sta_tx_agg_start(sc, ni, tid);
3300 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3301 		}
3302 	}
3303 
3304 	refcnt_rele_wake(&sc->task_refs);
3305 	splx(s);
3306 }
3307 
3308 /*
3309  * This function is called by upper layer when an ADDBA request is received
3310  * from another STA and before the ADDBA response is sent.
3311  */
3312 int
3313 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3314     uint8_t tid)
3315 {
3316 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3317 
3318 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3319 	    tid >= IWX_MAX_TID_COUNT)
3320 		return ENOSPC;
3321 
3322 	if (sc->ba_rx.start_tidmask & (1 << tid))
3323 		return EBUSY;
3324 
3325 	sc->ba_rx.start_tidmask |= (1 << tid);
3326 	iwx_add_task(sc, systq, &sc->ba_task);
3327 
3328 	return EBUSY;
3329 }
3330 
3331 /*
3332  * This function is called by upper layer on teardown of an HT-immediate
3333  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3334  */
3335 void
3336 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3337     uint8_t tid)
3338 {
3339 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3340 
3341 	if (tid >= IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3342 		return;
3343 
3344 	sc->ba_rx.stop_tidmask = (1 << tid);
3345 	iwx_add_task(sc, systq, &sc->ba_task);
3346 }
3347 
3348 int
3349 iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3350     uint8_t tid)
3351 {
3352 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3353 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3354 
3355 	/*
3356 	 * Require a firmware version which uses an internal AUX queue.
3357 	 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3358 	 */
3359 	if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
3360 		return ENOTSUP;
3361 
3362 	/* Ensure we can map this TID to an aggregation queue. */
3363 	if (tid >= IWX_MAX_TID_COUNT)
3364 		return EINVAL;
3365 
3366 	/* We only support a fixed Tx aggregation window size, for now. */
3367 	if (ba->ba_winsize != IWX_FRAME_LIMIT)
3368 		return ENOTSUP;
3369 
3370 	/* Is firmware already using an agg queue with this TID? */
3371 	if (sc->aggqid[tid] != 0)
3372 		return ENOSPC;
3373 
3374 	/* Are we already processing an ADDBA request? */
3375 	if (sc->ba_tx.start_tidmask & (1 << tid))
3376 		return EBUSY;
3377 
3378 	sc->ba_tx.start_tidmask |= (1 << tid);
3379 	iwx_add_task(sc, systq, &sc->ba_task);
3380 
3381 	return EBUSY;
3382 }
3383 
3384 /* Read the mac address from WFMP registers. */
3385 int
3386 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3387 {
3388 	const uint8_t *hw_addr;
3389 	uint32_t mac_addr0, mac_addr1;
3390 
3391 	if (!iwx_nic_lock(sc))
3392 		return EBUSY;
3393 
3394 	mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0));
3395 	mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1));
3396 
3397 	hw_addr = (const uint8_t *)&mac_addr0;
3398 	data->hw_addr[0] = hw_addr[3];
3399 	data->hw_addr[1] = hw_addr[2];
3400 	data->hw_addr[2] = hw_addr[1];
3401 	data->hw_addr[3] = hw_addr[0];
3402 
3403 	hw_addr = (const uint8_t *)&mac_addr1;
3404 	data->hw_addr[4] = hw_addr[1];
3405 	data->hw_addr[5] = hw_addr[0];
3406 
3407 	iwx_nic_unlock(sc);
3408 	return 0;
3409 }
3410 
3411 int
3412 iwx_is_valid_mac_addr(const uint8_t *addr)
3413 {
3414 	static const uint8_t reserved_mac[] = {
3415 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3416 	};
3417 
3418 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3419 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3420 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3421 	    !ETHER_IS_MULTICAST(addr));
3422 }
3423 
3424 int
3425 iwx_nvm_get(struct iwx_softc *sc)
3426 {
3427 	struct iwx_nvm_get_info cmd = {};
3428 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3429 	struct iwx_host_cmd hcmd = {
3430 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3431 		.data = { &cmd, },
3432 		.len = { sizeof(cmd) },
3433 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3434 		    IWX_NVM_GET_INFO)
3435 	};
3436 	int err;
3437 	uint32_t mac_flags;
3438 	/*
3439 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3440 	 * in v3, except for the channel profile part of the
3441 	 * regulatory.  So we can just access the new struct, with the
3442 	 * exception of the latter.
3443 	 */
3444 	struct iwx_nvm_get_info_rsp *rsp;
3445 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3446 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3447 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3448 
3449 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3450 	err = iwx_send_cmd(sc, &hcmd);
3451 	if (err)
3452 		return err;
3453 
3454 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3455 		err = EIO;
3456 		goto out;
3457 	}
3458 
3459 	memset(nvm, 0, sizeof(*nvm));
3460 
3461 	iwx_set_mac_addr_from_csr(sc, nvm);
3462 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3463 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3464 		err = EINVAL;
3465 		goto out;
3466 	}
3467 
3468 	rsp = (void *)hcmd.resp_pkt->data;
3469 
3470 	/* Initialize general data */
3471 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3472 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3473 
3474 	/* Initialize MAC sku data */
3475 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3476 	nvm->sku_cap_11ac_enable =
3477 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3478 	nvm->sku_cap_11n_enable =
3479 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3480 	nvm->sku_cap_11ax_enable =
3481 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3482 	nvm->sku_cap_band_24GHz_enable =
3483 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3484 	nvm->sku_cap_band_52GHz_enable =
3485 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3486 	nvm->sku_cap_mimo_disable =
3487 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3488 
3489 	/* Initialize PHY sku data */
3490 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3491 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3492 
3493 	if (le32toh(rsp->regulatory.lar_enabled) &&
3494 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3495 		nvm->lar_enabled = 1;
3496 	}
3497 
3498 	if (v4) {
3499 		iwx_init_channel_map(sc, NULL,
3500 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3501 	} else {
3502 		rsp_v3 = (void *)rsp;
3503 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3504 		    NULL, IWX_NUM_CHANNELS_V1);
3505 	}
3506 out:
3507 	iwx_free_resp(sc, &hcmd);
3508 	return err;
3509 }
3510 
3511 int
3512 iwx_load_firmware(struct iwx_softc *sc)
3513 {
3514 	struct iwx_fw_sects *fws;
3515 	int err;
3516 
3517 	splassert(IPL_NET);
3518 
3519 	sc->sc_uc.uc_intr = 0;
3520 	sc->sc_uc.uc_ok = 0;
3521 
3522 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3523 	err = iwx_ctxt_info_init(sc, fws);
3524 	if (err) {
3525 		printf("%s: could not init context info\n", DEVNAME(sc));
3526 		return err;
3527 	}
3528 
3529 	/* wait for the firmware to load */
3530 	err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
3531 	if (err || !sc->sc_uc.uc_ok) {
3532 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
3533 		iwx_ctxt_info_free_paging(sc);
3534 	}
3535 
3536 	iwx_ctxt_info_free_fw_img(sc);
3537 
3538 	if (!sc->sc_uc.uc_ok)
3539 		return EINVAL;
3540 
3541 	return err;
3542 }
3543 
3544 int
3545 iwx_start_fw(struct iwx_softc *sc)
3546 {
3547 	int err;
3548 
3549 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3550 
3551 	iwx_disable_interrupts(sc);
3552 
3553 	/* make sure rfkill handshake bits are cleared */
3554 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3555 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3556 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3557 
3558 	/* clear (again), then enable firwmare load interrupt */
3559 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3560 
3561 	err = iwx_nic_init(sc);
3562 	if (err) {
3563 		printf("%s: unable to init nic\n", DEVNAME(sc));
3564 		return err;
3565 	}
3566 
3567 	iwx_enable_fwload_interrupt(sc);
3568 
3569 	return iwx_load_firmware(sc);
3570 }
3571 
3572 int
3573 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3574 {
3575 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3576 		.valid = htole32(valid_tx_ant),
3577 	};
3578 
3579 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3580 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3581 }
3582 
3583 int
3584 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3585 {
3586 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3587 
3588 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3589 	phy_cfg_cmd.calib_control.event_trigger =
3590 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3591 	phy_cfg_cmd.calib_control.flow_trigger =
3592 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3593 
3594 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3595 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3596 }
3597 
3598 int
3599 iwx_send_dqa_cmd(struct iwx_softc *sc)
3600 {
3601 	struct iwx_dqa_enable_cmd dqa_cmd = {
3602 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
3603 	};
3604 	uint32_t cmd_id;
3605 
3606 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
3607 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3608 }
3609 
3610 int
3611 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3612 {
3613 	int err;
3614 
3615 	err = iwx_read_firmware(sc);
3616 	if (err)
3617 		return err;
3618 
3619 	err = iwx_start_fw(sc);
3620 	if (err)
3621 		return err;
3622 
3623 	iwx_post_alive(sc);
3624 
3625 	return 0;
3626 }
3627 
3628 int
3629 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3630 {
3631 	const int wait_flags = IWX_INIT_COMPLETE;
3632 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
3633 	struct iwx_init_extended_cfg_cmd init_cfg = {
3634 		.init_flags = htole32(IWX_INIT_NVM),
3635 	};
3636 	int err, s;
3637 
3638 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
3639 		printf("%s: radio is disabled by hardware switch\n",
3640 		    DEVNAME(sc));
3641 		return EPERM;
3642 	}
3643 
3644 	s = splnet();
3645 	sc->sc_init_complete = 0;
3646 	err = iwx_load_ucode_wait_alive(sc);
3647 	if (err) {
3648 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3649 		splx(s);
3650 		return err;
3651 	}
3652 
3653 	/*
3654 	 * Send init config command to mark that we are sending NVM
3655 	 * access commands
3656 	 */
3657 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
3658 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
3659 	if (err) {
3660 		splx(s);
3661 		return err;
3662 	}
3663 
3664 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3665 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
3666 	if (err) {
3667 		splx(s);
3668 		return err;
3669 	}
3670 
3671 	/* Wait for the init complete notification from the firmware. */
3672 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3673 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3674 		    SEC_TO_NSEC(2));
3675 		if (err) {
3676 			splx(s);
3677 			return err;
3678 		}
3679 	}
3680 	splx(s);
3681 	if (readnvm) {
3682 		err = iwx_nvm_get(sc);
3683 		if (err) {
3684 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3685 			return err;
3686 		}
3687 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3688 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3689 			    sc->sc_nvm.hw_addr);
3690 
3691 	}
3692 	return 0;
3693 }
3694 
3695 int
3696 iwx_config_ltr(struct iwx_softc *sc)
3697 {
3698 	struct iwx_ltr_config_cmd cmd = {
3699 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
3700 	};
3701 
3702 	if (!sc->sc_ltr_enabled)
3703 		return 0;
3704 
3705 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3706 }
3707 
3708 void
3709 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3710 {
3711 	struct iwx_rx_data *data = &ring->data[idx];
3712 
3713 	((uint64_t *)ring->desc)[idx] =
3714 	    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
3715 	bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3716 	    idx * sizeof(uint64_t), sizeof(uint64_t),
3717 	    BUS_DMASYNC_PREWRITE);
3718 }
3719 
3720 int
3721 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3722 {
3723 	struct iwx_rx_ring *ring = &sc->rxq;
3724 	struct iwx_rx_data *data = &ring->data[idx];
3725 	struct mbuf *m;
3726 	int err;
3727 	int fatal = 0;
3728 
3729 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3730 	if (m == NULL)
3731 		return ENOBUFS;
3732 
3733 	if (size <= MCLBYTES) {
3734 		MCLGET(m, M_DONTWAIT);
3735 	} else {
3736 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
3737 	}
3738 	if ((m->m_flags & M_EXT) == 0) {
3739 		m_freem(m);
3740 		return ENOBUFS;
3741 	}
3742 
3743 	if (data->m != NULL) {
3744 		bus_dmamap_unload(sc->sc_dmat, data->map);
3745 		fatal = 1;
3746 	}
3747 
3748 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3749 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3750 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3751 	if (err) {
3752 		/* XXX */
3753 		if (fatal)
3754 			panic("%s: could not load RX mbuf", DEVNAME(sc));
3755 		m_freem(m);
3756 		return err;
3757 	}
3758 	data->m = m;
3759 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3760 
3761 	/* Update RX descriptor. */
3762 	iwx_update_rx_desc(sc, ring, idx);
3763 
3764 	return 0;
3765 }
3766 
3767 int
3768 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3769     struct iwx_rx_mpdu_desc *desc)
3770 {
3771 	int energy_a, energy_b;
3772 
3773 	energy_a = desc->v1.energy_a;
3774 	energy_b = desc->v1.energy_b;
3775 	energy_a = energy_a ? -energy_a : -256;
3776 	energy_b = energy_b ? -energy_b : -256;
3777 	return MAX(energy_a, energy_b);
3778 }
3779 
3780 void
3781 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3782     struct iwx_rx_data *data)
3783 {
3784 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3785 
3786 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3787 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3788 
3789 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3790 }
3791 
3792 /*
3793  * Retrieve the average noise (in dBm) among receivers.
3794  */
3795 int
3796 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3797 {
3798 	int i, total, nbant, noise;
3799 
3800 	total = nbant = noise = 0;
3801 	for (i = 0; i < 3; i++) {
3802 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3803 		if (noise) {
3804 			total += noise;
3805 			nbant++;
3806 		}
3807 	}
3808 
3809 	/* There should be at least one antenna but check anyway. */
3810 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3811 }
3812 
3813 int
3814 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3815     struct ieee80211_rxinfo *rxi)
3816 {
3817 	struct ieee80211com *ic = &sc->sc_ic;
3818 	struct ieee80211_key *k;
3819 	struct ieee80211_frame *wh;
3820 	uint64_t pn, *prsc;
3821 	uint8_t *ivp;
3822 	uint8_t tid;
3823 	int hdrlen, hasqos;
3824 
3825 	wh = mtod(m, struct ieee80211_frame *);
3826 	hdrlen = ieee80211_get_hdrlen(wh);
3827 	ivp = (uint8_t *)wh + hdrlen;
3828 
3829 	/* find key for decryption */
3830 	k = ieee80211_get_rxkey(ic, m, ni);
3831 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
3832 		return 1;
3833 
3834 	/* Check that ExtIV bit is be set. */
3835 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
3836 		return 1;
3837 
3838 	hasqos = ieee80211_has_qos(wh);
3839 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3840 	prsc = &k->k_rsc[tid];
3841 
3842 	/* Extract the 48-bit PN from the CCMP header. */
3843 	pn = (uint64_t)ivp[0]       |
3844 	     (uint64_t)ivp[1] <<  8 |
3845 	     (uint64_t)ivp[4] << 16 |
3846 	     (uint64_t)ivp[5] << 24 |
3847 	     (uint64_t)ivp[6] << 32 |
3848 	     (uint64_t)ivp[7] << 40;
3849 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
3850 		if (pn < *prsc) {
3851 			ic->ic_stats.is_ccmp_replays++;
3852 			return 1;
3853 		}
3854 	} else if (pn <= *prsc) {
3855 		ic->ic_stats.is_ccmp_replays++;
3856 		return 1;
3857 	}
3858 	/* Last seen packet number is updated in ieee80211_inputm(). */
3859 
3860 	/*
3861 	 * Some firmware versions strip the MIC, and some don't. It is not
3862 	 * clear which of the capability flags could tell us what to expect.
3863 	 * For now, keep things simple and just leave the MIC in place if
3864 	 * it is present.
3865 	 *
3866 	 * The IV will be stripped by ieee80211_inputm().
3867 	 */
3868 	return 0;
3869 }
3870 
3871 int
3872 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
3873     struct ieee80211_rxinfo *rxi)
3874 {
3875 	struct ieee80211com *ic = &sc->sc_ic;
3876 	struct ifnet *ifp = IC2IFP(ic);
3877 	struct ieee80211_frame *wh;
3878 	struct ieee80211_node *ni;
3879 	int ret = 0;
3880 	uint8_t type, subtype;
3881 
3882 	wh = mtod(m, struct ieee80211_frame *);
3883 
3884 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3885 	if (type == IEEE80211_FC0_TYPE_CTL)
3886 		return 0;
3887 
3888 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3889 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
3890 		return 0;
3891 
3892 	ni = ieee80211_find_rxnode(ic, wh);
3893 	/* Handle hardware decryption. */
3894 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
3895 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
3896 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
3897 	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3898 	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
3899 	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3900 	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
3901 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
3902 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3903 			ic->ic_stats.is_ccmp_dec_errs++;
3904 			ret = 1;
3905 			goto out;
3906 		}
3907 		/* Check whether decryption was successful or not. */
3908 		if ((rx_pkt_status &
3909 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3910 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
3911 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3912 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
3913 			ic->ic_stats.is_ccmp_dec_errs++;
3914 			ret = 1;
3915 			goto out;
3916 		}
3917 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
3918 	}
3919 out:
3920 	if (ret)
3921 		ifp->if_ierrors++;
3922 	ieee80211_release_node(ic, ni);
3923 	return ret;
3924 }
3925 
3926 void
3927 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3928     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3929     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3930     struct mbuf_list *ml)
3931 {
3932 	struct ieee80211com *ic = &sc->sc_ic;
3933 	struct ifnet *ifp = IC2IFP(ic);
3934 	struct ieee80211_frame *wh;
3935 	struct ieee80211_node *ni;
3936 	struct ieee80211_channel *bss_chan;
3937 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3938 
3939 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3940 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3941 
3942 	wh = mtod(m, struct ieee80211_frame *);
3943 	ni = ieee80211_find_rxnode(ic, wh);
3944 	if (ni == ic->ic_bss) {
3945 		/*
3946 		 * We may switch ic_bss's channel during scans.
3947 		 * Record the current channel so we can restore it later.
3948 		 */
3949 		bss_chan = ni->ni_chan;
3950 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3951 	}
3952 	ni->ni_chan = &ic->ic_channels[chanidx];
3953 
3954 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
3955 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
3956 		ifp->if_ierrors++;
3957 		m_freem(m);
3958 		ieee80211_release_node(ic, ni);
3959 		return;
3960 	}
3961 
3962 #if NBPFILTER > 0
3963 	if (sc->sc_drvbpf != NULL) {
3964 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
3965 		uint16_t chan_flags;
3966 
3967 		tap->wr_flags = 0;
3968 		if (is_shortpre)
3969 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3970 		tap->wr_chan_freq =
3971 		    htole16(ic->ic_channels[chanidx].ic_freq);
3972 		chan_flags = ic->ic_channels[chanidx].ic_flags;
3973 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3974 			chan_flags &= ~IEEE80211_CHAN_HT;
3975 		tap->wr_chan_flags = htole16(chan_flags);
3976 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3977 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3978 		tap->wr_tsft = device_timestamp;
3979 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
3980 			uint8_t mcs = (rate_n_flags &
3981 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
3982 			    IWX_RATE_HT_MCS_NSS_MSK));
3983 			tap->wr_rate = (0x80 | mcs);
3984 		} else {
3985 			uint8_t rate = (rate_n_flags &
3986 			    IWX_RATE_LEGACY_RATE_MSK);
3987 			switch (rate) {
3988 			/* CCK rates. */
3989 			case  10: tap->wr_rate =   2; break;
3990 			case  20: tap->wr_rate =   4; break;
3991 			case  55: tap->wr_rate =  11; break;
3992 			case 110: tap->wr_rate =  22; break;
3993 			/* OFDM rates. */
3994 			case 0xd: tap->wr_rate =  12; break;
3995 			case 0xf: tap->wr_rate =  18; break;
3996 			case 0x5: tap->wr_rate =  24; break;
3997 			case 0x7: tap->wr_rate =  36; break;
3998 			case 0x9: tap->wr_rate =  48; break;
3999 			case 0xb: tap->wr_rate =  72; break;
4000 			case 0x1: tap->wr_rate =  96; break;
4001 			case 0x3: tap->wr_rate = 108; break;
4002 			/* Unknown rate: should not happen. */
4003 			default:  tap->wr_rate =   0;
4004 			}
4005 		}
4006 
4007 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4008 		    m, BPF_DIRECTION_IN);
4009 	}
4010 #endif
4011 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4012 	/*
4013 	 * ieee80211_inputm() might have changed our BSS.
4014 	 * Restore ic_bss's channel if we are still in the same BSS.
4015 	 */
4016 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
4017 		ni->ni_chan = bss_chan;
4018 	ieee80211_release_node(ic, ni);
4019 }
4020 
4021 /*
4022  * Drop duplicate 802.11 retransmissions
4023  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4024  * and handle pseudo-duplicate frames which result from deaggregation
4025  * of A-MSDU frames in hardware.
4026  */
4027 int
4028 iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4029     struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4030 {
4031 	struct ieee80211com *ic = &sc->sc_ic;
4032 	struct iwx_node *in = (void *)ic->ic_bss;
4033 	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4034 	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
4035 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4036 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4037 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4038 	int hasqos = ieee80211_has_qos(wh);
4039 	uint16_t seq;
4040 
4041 	if (type == IEEE80211_FC0_TYPE_CTL ||
4042 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4043 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4044 		return 0;
4045 
4046 	if (hasqos) {
4047 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4048 		if (tid > IWX_MAX_TID_COUNT)
4049 			tid = IWX_MAX_TID_COUNT;
4050 	}
4051 
4052 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4053 	subframe_idx = desc->amsdu_info &
4054 		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4055 
4056 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4057 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4058 	    dup_data->last_seq[tid] == seq &&
4059 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4060 		return 1;
4061 
4062 	/*
4063 	 * Allow the same frame sequence number for all A-MSDU subframes
4064 	 * following the first subframe.
4065 	 * Otherwise these subframes would be discarded as replays.
4066 	 */
4067 	if (dup_data->last_seq[tid] == seq &&
4068 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4069 	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
4070 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4071 	}
4072 
4073 	dup_data->last_seq[tid] = seq;
4074 	dup_data->last_sub_frame[tid] = subframe_idx;
4075 
4076 	return 0;
4077 }
4078 
4079 /*
4080  * Returns true if sn2 - buffer_size < sn1 < sn2.
4081  * To be used only in order to compare reorder buffer head with NSSN.
4082  * We fully trust NSSN unless it is behind us due to reorder timeout.
4083  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4084  */
4085 int
4086 iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4087 {
4088 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4089 }
4090 
4091 void
4092 iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4093     struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4094     uint16_t nssn, struct mbuf_list *ml)
4095 {
4096 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4097 	uint16_t ssn = reorder_buf->head_sn;
4098 
4099 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4100 	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4101 		goto set_timer;
4102 
4103 	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4104 		int index = ssn % reorder_buf->buf_size;
4105 		struct mbuf *m;
4106 		int chanidx, is_shortpre;
4107 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4108 		struct ieee80211_rxinfo *rxi;
4109 
4110 		/* This data is the same for all A-MSDU subframes. */
4111 		chanidx = entries[index].chanidx;
4112 		rx_pkt_status = entries[index].rx_pkt_status;
4113 		is_shortpre = entries[index].is_shortpre;
4114 		rate_n_flags = entries[index].rate_n_flags;
4115 		device_timestamp = entries[index].device_timestamp;
4116 		rxi = &entries[index].rxi;
4117 
4118 		/*
4119 		 * Empty the list. Will have more than one frame for A-MSDU.
4120 		 * Empty list is valid as well since nssn indicates frames were
4121 		 * received.
4122 		 */
4123 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4124 			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4125 			    rate_n_flags, device_timestamp, rxi, ml);
4126 			reorder_buf->num_stored--;
4127 
4128 			/*
4129 			 * Allow the same frame sequence number and CCMP PN for
4130 			 * all A-MSDU subframes following the first subframe.
4131 			 * Otherwise they would be discarded as replays.
4132 			 */
4133 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4134 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4135 		}
4136 
4137 		ssn = (ssn + 1) & 0xfff;
4138 	}
4139 	reorder_buf->head_sn = nssn;
4140 
4141 set_timer:
4142 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4143 		timeout_add_usec(&reorder_buf->reorder_timer,
4144 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4145 	} else
4146 		timeout_del(&reorder_buf->reorder_timer);
4147 }
4148 
4149 int
4150 iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4151     struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4152 {
4153 	struct ieee80211com *ic = &sc->sc_ic;
4154 
4155 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4156 		/* we have a new (A-)MPDU ... */
4157 
4158 		/*
4159 		 * reset counter to 0 if we didn't have any oldsn in
4160 		 * the last A-MPDU (as detected by GP2 being identical)
4161 		 */
4162 		if (!buffer->consec_oldsn_prev_drop)
4163 			buffer->consec_oldsn_drops = 0;
4164 
4165 		/* either way, update our tracking state */
4166 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4167 	} else if (buffer->consec_oldsn_prev_drop) {
4168 		/*
4169 		 * tracking state didn't change, and we had an old SN
4170 		 * indication before - do nothing in this case, we
4171 		 * already noted this one down and are waiting for the
4172 		 * next A-MPDU (by GP2)
4173 		 */
4174 		return 0;
4175 	}
4176 
4177 	/* return unless this MPDU has old SN */
4178 	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
4179 		return 0;
4180 
4181 	/* update state */
4182 	buffer->consec_oldsn_prev_drop = 1;
4183 	buffer->consec_oldsn_drops++;
4184 
4185 	/* if limit is reached, send del BA and reset state */
4186 	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
4187 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4188 		    0, tid);
4189 		buffer->consec_oldsn_prev_drop = 0;
4190 		buffer->consec_oldsn_drops = 0;
4191 		return 1;
4192 	}
4193 
4194 	return 0;
4195 }
4196 
4197 /*
4198  * Handle re-ordering of frames which were de-aggregated in hardware.
4199  * Returns 1 if the MPDU was consumed (buffered or dropped).
4200  * Returns 0 if the MPDU should be passed to upper layer.
4201  */
4202 int
4203 iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4204     struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4205     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4206     struct mbuf_list *ml)
4207 {
4208 	struct ieee80211com *ic = &sc->sc_ic;
4209 	struct ieee80211_frame *wh;
4210 	struct ieee80211_node *ni;
4211 	struct iwx_rxba_data *rxba;
4212 	struct iwx_reorder_buffer *buffer;
4213 	uint32_t reorder_data = le32toh(desc->reorder_data);
4214 	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
4215 	int last_subframe =
4216 		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
4217 	uint8_t tid;
4218 	uint8_t subframe_idx = (desc->amsdu_info &
4219 	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4220 	struct iwx_reorder_buf_entry *entries;
4221 	int index;
4222 	uint16_t nssn, sn;
4223 	uint8_t baid, type, subtype;
4224 	int hasqos;
4225 
4226 	wh = mtod(m, struct ieee80211_frame *);
4227 	hasqos = ieee80211_has_qos(wh);
4228 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4229 
4230 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4231 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4232 
4233 	/*
4234 	 * We are only interested in Block Ack requests and unicast QoS data.
4235 	 */
4236 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4237 		return 0;
4238 	if (hasqos) {
4239 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4240 			return 0;
4241 	} else {
4242 		if (type != IEEE80211_FC0_TYPE_CTL ||
4243 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4244 			return 0;
4245 	}
4246 
4247 	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
4248 		IWX_RX_MPDU_REORDER_BAID_SHIFT;
4249 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4250 	    baid >= nitems(sc->sc_rxba_data))
4251 		return 0;
4252 
4253 	rxba = &sc->sc_rxba_data[baid];
4254 	if (rxba == NULL || tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
4255 		return 0;
4256 
4257 	if (rxba->timeout != 0)
4258 		getmicrouptime(&rxba->last_rx);
4259 
4260 	/* Bypass A-MPDU re-ordering in net80211. */
4261 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
4262 
4263 	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
4264 	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
4265 		IWX_RX_MPDU_REORDER_SN_SHIFT;
4266 
4267 	buffer = &rxba->reorder_buf;
4268 	entries = &rxba->entries[0];
4269 
4270 	if (!buffer->valid) {
4271 		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
4272 			return 0;
4273 		buffer->valid = 1;
4274 	}
4275 
4276 	ni = ieee80211_find_rxnode(ic, wh);
4277 	if (type == IEEE80211_FC0_TYPE_CTL &&
4278 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
4279 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4280 		goto drop;
4281 	}
4282 
4283 	/*
4284 	 * If there was a significant jump in the nssn - adjust.
4285 	 * If the SN is smaller than the NSSN it might need to first go into
4286 	 * the reorder buffer, in which case we just release up to it and the
4287 	 * rest of the function will take care of storing it and releasing up to
4288 	 * the nssn.
4289 	 */
4290 	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
4291 	    buffer->buf_size) ||
4292 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
4293 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
4294 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
4295 		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
4296 	}
4297 
4298 	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
4299 	    device_timestamp)) {
4300 		 /* BA session will be torn down. */
4301 		ic->ic_stats.is_ht_rx_ba_window_jump++;
4302 		goto drop;
4303 
4304 	}
4305 
4306 	/* drop any outdated packets */
4307 	if (SEQ_LT(sn, buffer->head_sn)) {
4308 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
4309 		goto drop;
4310 	}
4311 
4312 	/* release immediately if allowed by nssn and no stored frames */
4313 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
4314 		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
4315 		   (!is_amsdu || last_subframe))
4316 			buffer->head_sn = nssn;
4317 		ieee80211_release_node(ic, ni);
4318 		return 0;
4319 	}
4320 
4321 	/*
4322 	 * release immediately if there are no stored frames, and the sn is
4323 	 * equal to the head.
4324 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
4325 	 * When we released everything, and we got the next frame in the
4326 	 * sequence, according to the NSSN we can't release immediately,
4327 	 * while technically there is no hole and we can move forward.
4328 	 */
4329 	if (!buffer->num_stored && sn == buffer->head_sn) {
4330 		if (!is_amsdu || last_subframe)
4331 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
4332 		ieee80211_release_node(ic, ni);
4333 		return 0;
4334 	}
4335 
4336 	index = sn % buffer->buf_size;
4337 
4338 	/*
4339 	 * Check if we already stored this frame
4340 	 * As AMSDU is either received or not as whole, logic is simple:
4341 	 * If we have frames in that position in the buffer and the last frame
4342 	 * originated from AMSDU had a different SN then it is a retransmission.
4343 	 * If it is the same SN then if the subframe index is incrementing it
4344 	 * is the same AMSDU - otherwise it is a retransmission.
4345 	 */
4346 	if (!ml_empty(&entries[index].frames)) {
4347 		if (!is_amsdu) {
4348 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4349 			goto drop;
4350 		} else if (sn != buffer->last_amsdu ||
4351 		    buffer->last_sub_index >= subframe_idx) {
4352 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4353 			goto drop;
4354 		}
4355 	} else {
4356 		/* This data is the same for all A-MSDU subframes. */
4357 		entries[index].chanidx = chanidx;
4358 		entries[index].is_shortpre = is_shortpre;
4359 		entries[index].rate_n_flags = rate_n_flags;
4360 		entries[index].device_timestamp = device_timestamp;
4361 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
4362 	}
4363 
4364 	/* put in reorder buffer */
4365 	ml_enqueue(&entries[index].frames, m);
4366 	buffer->num_stored++;
4367 	getmicrouptime(&entries[index].reorder_time);
4368 
4369 	if (is_amsdu) {
4370 		buffer->last_amsdu = sn;
4371 		buffer->last_sub_index = subframe_idx;
4372 	}
4373 
4374 	/*
4375 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
4376 	 * The reason is that NSSN advances on the first sub-frame, and may
4377 	 * cause the reorder buffer to advance before all the sub-frames arrive.
4378 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
4379 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
4380 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
4381 	 * already ahead and it will be dropped.
4382 	 * If the last sub-frame is not on this queue - we will get frame
4383 	 * release notification with up to date NSSN.
4384 	 */
4385 	if (!is_amsdu || last_subframe)
4386 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4387 
4388 	ieee80211_release_node(ic, ni);
4389 	return 1;
4390 
4391 drop:
4392 	m_freem(m);
4393 	ieee80211_release_node(ic, ni);
4394 	return 1;
4395 }
4396 
4397 void
4398 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4399     size_t maxlen, struct mbuf_list *ml)
4400 {
4401 	struct ieee80211com *ic = &sc->sc_ic;
4402 	struct ieee80211_rxinfo rxi;
4403 	struct iwx_rx_mpdu_desc *desc;
4404 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4405 	int rssi;
4406 	uint8_t chanidx;
4407 	uint16_t phy_info;
4408 
4409 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
4410 
4411 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4412 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4413 		m_freem(m);
4414 		return; /* drop */
4415 	}
4416 
4417 	len = le16toh(desc->mpdu_len);
4418 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4419 		/* Allow control frames in monitor mode. */
4420 		if (len < sizeof(struct ieee80211_frame_cts)) {
4421 			ic->ic_stats.is_rx_tooshort++;
4422 			IC2IFP(ic)->if_ierrors++;
4423 			m_freem(m);
4424 			return;
4425 		}
4426 	} else if (len < sizeof(struct ieee80211_frame)) {
4427 		ic->ic_stats.is_rx_tooshort++;
4428 		IC2IFP(ic)->if_ierrors++;
4429 		m_freem(m);
4430 		return;
4431 	}
4432 	if (len > maxlen - sizeof(*desc)) {
4433 		IC2IFP(ic)->if_ierrors++;
4434 		m_freem(m);
4435 		return;
4436 	}
4437 
4438 	m->m_data = pktdata + sizeof(*desc);
4439 	m->m_pkthdr.len = m->m_len = len;
4440 
4441 	/* Account for padding following the frame header. */
4442 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4443 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4444 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4445 		if (type == IEEE80211_FC0_TYPE_CTL) {
4446 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4447 			case IEEE80211_FC0_SUBTYPE_CTS:
4448 				hdrlen = sizeof(struct ieee80211_frame_cts);
4449 				break;
4450 			case IEEE80211_FC0_SUBTYPE_ACK:
4451 				hdrlen = sizeof(struct ieee80211_frame_ack);
4452 				break;
4453 			default:
4454 				hdrlen = sizeof(struct ieee80211_frame_min);
4455 				break;
4456 			}
4457 		} else
4458 			hdrlen = ieee80211_get_hdrlen(wh);
4459 
4460 		if ((le16toh(desc->status) &
4461 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4462 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4463 			/* Padding is inserted after the IV. */
4464 			hdrlen += IEEE80211_CCMP_HDRLEN;
4465 		}
4466 
4467 		memmove(m->m_data + 2, m->m_data, hdrlen);
4468 		m_adj(m, 2);
4469 	}
4470 
4471 	memset(&rxi, 0, sizeof(rxi));
4472 
4473 	/*
4474 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4475 	 * in place for each subframe. But it leaves the 'A-MSDU present'
4476 	 * bit set in the frame header. We need to clear this bit ourselves.
4477 	 * (XXX This workaround is not required on AX200/AX201 devices that
4478 	 * have been tested by me, but it's unclear when this problem was
4479 	 * fixed in the hardware. It definitely affects the 9k generation.
4480 	 * Leaving this in place for now since some 9k/AX200 hybrids seem
4481 	 * to exist that we may eventually add support for.)
4482 	 *
4483 	 * And we must allow the same CCMP PN for subframes following the
4484 	 * first subframe. Otherwise they would be discarded as replays.
4485 	 */
4486 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4487 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4488 		uint8_t subframe_idx = (desc->amsdu_info &
4489 		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4490 		if (subframe_idx > 0)
4491 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4492 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4493 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4494 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4495 			    struct ieee80211_qosframe_addr4 *);
4496 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4497 		} else if (ieee80211_has_qos(wh) &&
4498 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
4499 			struct ieee80211_qosframe *qwh = mtod(m,
4500 			    struct ieee80211_qosframe *);
4501 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4502 		}
4503 	}
4504 
4505 	/*
4506 	 * Verify decryption before duplicate detection. The latter uses
4507 	 * the TID supplied in QoS frame headers and this TID is implicitly
4508 	 * verified as part of the CCMP nonce.
4509 	 */
4510 	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
4511 		m_freem(m);
4512 		return;
4513 	}
4514 
4515 	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
4516 		m_freem(m);
4517 		return;
4518 	}
4519 
4520 	phy_info = le16toh(desc->phy_info);
4521 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
4522 	chanidx = desc->v1.channel;
4523 	device_timestamp = desc->v1.gp2_on_air_rise;
4524 
4525 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
4526 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
4527 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4528 
4529 	rxi.rxi_rssi = rssi;
4530 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
4531 
4532 	if (iwx_rx_reorder(sc, m, chanidx, desc,
4533 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4534 	    rate_n_flags, device_timestamp, &rxi, ml))
4535 		return;
4536 
4537 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4538 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4539 	    rate_n_flags, device_timestamp, &rxi, ml);
4540 }
4541 
4542 void
4543 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4544 {
4545 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
4546 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4547 	int i;
4548 
4549 	/* First TB is never cleared - it is bidirectional DMA data. */
4550 	for (i = 1; i < num_tbs; i++) {
4551 		struct iwx_tfh_tb *tb = &desc->tbs[i];
4552 		memset(tb, 0, sizeof(*tb));
4553 	}
4554 	desc->num_tbs = 0;
4555 
4556 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4557 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4558 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
4559 }
4560 
4561 void
4562 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
4563 {
4564 	struct ieee80211com *ic = &sc->sc_ic;
4565 
4566 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4567 	    BUS_DMASYNC_POSTWRITE);
4568 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4569 	m_freem(txd->m);
4570 	txd->m = NULL;
4571 
4572 	KASSERT(txd->in);
4573 	ieee80211_release_node(ic, &txd->in->in_ni);
4574 	txd->in = NULL;
4575 }
4576 
4577 void
4578 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4579 {
4580  	struct iwx_tx_data *txd;
4581 
4582 	while (ring->tail != idx) {
4583 		txd = &ring->data[ring->tail];
4584 		if (txd->m != NULL) {
4585 			iwx_clear_tx_desc(sc, ring, ring->tail);
4586 			iwx_tx_update_byte_tbl(ring, ring->tail, 0, 0);
4587 			iwx_txd_done(sc, txd);
4588 			ring->queued--;
4589 		}
4590 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4591 	}
4592 }
4593 
4594 void
4595 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4596     struct iwx_rx_data *data)
4597 {
4598 	struct ieee80211com *ic = &sc->sc_ic;
4599 	struct ifnet *ifp = IC2IFP(ic);
4600 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4601 	int qid = cmd_hdr->qid, status, txfail;
4602 	struct iwx_tx_ring *ring = &sc->txq[qid];
4603 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4604 	uint32_t ssn;
4605 	uint32_t len = iwx_rx_packet_len(pkt);
4606 
4607 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
4608 	    BUS_DMASYNC_POSTREAD);
4609 
4610 	/* Sanity checks. */
4611 	if (sizeof(*tx_resp) > len)
4612 		return;
4613 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4614 		return;
4615 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4616 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
4617 		return;
4618 
4619 	sc->sc_tx_timer[qid] = 0;
4620 
4621 	if (tx_resp->frame_count > 1) /* A-MPDU */
4622 		return;
4623 
4624 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4625 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
4626 	    status != IWX_TX_STATUS_DIRECT_DONE);
4627 
4628 	if (txfail)
4629 		ifp->if_oerrors++;
4630 
4631 	/*
4632 	 * On hardware supported by iwx(4) the SSN counter is only
4633 	 * 8 bit and corresponds to a Tx ring index rather than a
4634 	 * sequence number. Frames up to this index (non-inclusive)
4635 	 * can now be freed.
4636 	 */
4637 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4638 	ssn = le32toh(ssn) & 0xff;
4639 	iwx_txq_advance(sc, ring, ssn);
4640 	iwx_clear_oactive(sc, ring);
4641 }
4642 
4643 void
4644 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4645 {
4646 	struct ieee80211com *ic = &sc->sc_ic;
4647 	struct ifnet *ifp = IC2IFP(ic);
4648 
4649 	if (ring->queued < IWX_TX_RING_LOMARK) {
4650 		sc->qfullmsk &= ~(1 << ring->qid);
4651 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4652 			ifq_clr_oactive(&ifp->if_snd);
4653 			/*
4654 			 * Well, we're in interrupt context, but then again
4655 			 * I guess net80211 does all sorts of stunts in
4656 			 * interrupt context, so maybe this is no biggie.
4657 			 */
4658 			(*ifp->if_start)(ifp);
4659 		}
4660 	}
4661 }
4662 
4663 void
4664 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4665     struct iwx_rx_data *data)
4666 {
4667 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4668 	struct ieee80211com *ic = &sc->sc_ic;
4669 	struct ieee80211_node *ni;
4670 	struct ieee80211_tx_ba *ba;
4671 	struct iwx_node *in;
4672 	struct iwx_tx_ring *ring;
4673 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4674 	int qid;
4675 
4676 	if (ic->ic_state != IEEE80211_S_RUN)
4677 		return;
4678 
4679 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4680 		return;
4681 
4682 	if (ba_res->sta_id != IWX_STATION_ID)
4683 		return;
4684 
4685 	ni = ic->ic_bss;
4686 	in = (void *)ni;
4687 
4688 	tfd_cnt = le16toh(ba_res->tfd_cnt);
4689 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4690 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4691 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4692 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
4693 		return;
4694 
4695 	for (i = 0; i < tfd_cnt; i++) {
4696 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4697 		uint8_t tid;
4698 
4699 		tid = ba_tfd->tid;
4700 		if (tid >= nitems(sc->aggqid))
4701 			continue;
4702 
4703 		qid = sc->aggqid[tid];
4704 		if (qid != htole16(ba_tfd->q_num))
4705 			continue;
4706 
4707 		ring = &sc->txq[qid];
4708 
4709 		ba = &ni->ni_tx_ba[tid];
4710 		if (ba->ba_state != IEEE80211_BA_AGREED)
4711 			continue;
4712 
4713 		idx = le16toh(ba_tfd->tfd_index);
4714 		if (idx >= IWX_TX_RING_COUNT)
4715 			continue;
4716 		sc->sc_tx_timer[qid] = 0;
4717 		iwx_txq_advance(sc, ring, idx);
4718 		iwx_clear_oactive(sc, ring);
4719 	}
4720 }
4721 
4722 void
4723 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4724     struct iwx_rx_data *data)
4725 {
4726 	struct ieee80211com *ic = &sc->sc_ic;
4727 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4728 	uint32_t missed;
4729 
4730 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4731 	    (ic->ic_state != IEEE80211_S_RUN))
4732 		return;
4733 
4734 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4735 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
4736 
4737 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4738 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4739 		if (ic->ic_if.if_flags & IFF_DEBUG)
4740 			printf("%s: receiving no beacons from %s; checking if "
4741 			    "this AP is still responding to probe requests\n",
4742 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
4743 		/*
4744 		 * Rather than go directly to scan state, try to send a
4745 		 * directed probe request first. If that fails then the
4746 		 * state machine will drop us into scanning after timing
4747 		 * out waiting for a probe response.
4748 		 */
4749 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
4750 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
4751 	}
4752 
4753 }
4754 
4755 int
4756 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4757 {
4758 	struct iwx_binding_cmd cmd;
4759 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
4760 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4761 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4762 	uint32_t status;
4763 
4764 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
4765 		panic("binding already added");
4766 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4767 		panic("binding already removed");
4768 
4769 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
4770 		return EINVAL;
4771 
4772 	memset(&cmd, 0, sizeof(cmd));
4773 
4774 	cmd.id_and_color
4775 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4776 	cmd.action = htole32(action);
4777 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4778 
4779 	cmd.macs[0] = htole32(mac_id);
4780 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4781 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4782 
4783 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4784 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4785 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4786 	else
4787 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4788 
4789 	status = 0;
4790 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4791 	    &cmd, &status);
4792 	if (err == 0 && status != 0)
4793 		err = EIO;
4794 
4795 	return err;
4796 }
4797 
4798 int
4799 iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4800     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco)
4801 {
4802 	struct ieee80211com *ic = &sc->sc_ic;
4803 	struct iwx_phy_context_cmd_uhb cmd;
4804 	uint8_t active_cnt, idle_cnt;
4805 	struct ieee80211_channel *chan = ctxt->channel;
4806 
4807 	memset(&cmd, 0, sizeof(cmd));
4808 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4809 	    ctxt->color));
4810 	cmd.action = htole32(action);
4811 
4812 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
4813 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4814 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4815 	else
4816 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4817 
4818 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4819 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4820 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
4821 	if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
4822 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
4823 			/* secondary chan above -> control chan below */
4824 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4825 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4826 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
4827 			/* secondary chan below -> control chan above */
4828 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4829 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4830 		} else {
4831 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4832 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4833 		}
4834 	} else {
4835 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4836 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4837 	}
4838 
4839 	idle_cnt = chains_static;
4840 	active_cnt = chains_dynamic;
4841 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
4842 	    IWX_PHY_RX_CHAIN_VALID_POS);
4843 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
4844 	cmd.rxchain_info |= htole32(active_cnt <<
4845 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
4846 
4847 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
4848 }
4849 
4850 int
4851 iwx_phy_ctxt_cmd_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4852     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco)
4853 {
4854 	struct ieee80211com *ic = &sc->sc_ic;
4855 	struct iwx_phy_context_cmd cmd;
4856 	uint8_t active_cnt, idle_cnt;
4857 	struct ieee80211_channel *chan = ctxt->channel;
4858 
4859 	memset(&cmd, 0, sizeof(cmd));
4860 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
4861 	    ctxt->color));
4862 	cmd.action = htole32(action);
4863 
4864 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
4865 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4866 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4867 	else
4868 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4869 
4870 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4871 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
4872 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
4873 	if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
4874 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
4875 			/* secondary chan above -> control chan below */
4876 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4877 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4878 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
4879 			/* secondary chan below -> control chan above */
4880 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
4881 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
4882 		} else {
4883 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4884 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4885 		}
4886 	} else {
4887 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
4888 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
4889 	}
4890 
4891 	idle_cnt = chains_static;
4892 	active_cnt = chains_dynamic;
4893 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
4894 	    IWX_PHY_RX_CHAIN_VALID_POS);
4895 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
4896 	cmd.rxchain_info |= htole32(active_cnt <<
4897 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
4898 
4899 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
4900 }
4901 
4902 int
4903 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4904     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4905     uint32_t apply_time, uint8_t sco)
4906 {
4907 	int cmdver;
4908 
4909 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
4910 	if (cmdver != 3) {
4911 		printf("%s: firmware does not support phy-context-cmd v3\n",
4912 		    DEVNAME(sc));
4913 		return ENOTSUP;
4914 	}
4915 
4916 	/*
4917 	 * Intel increased the size of the fw_channel_info struct and neglected
4918 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
4919 	 * member in the middle.
4920 	 * To keep things simple we use a separate function to handle the larger
4921 	 * variant of the phy context command.
4922 	 */
4923 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
4924 		return iwx_phy_ctxt_cmd_uhb_v3(sc, ctxt, chains_static,
4925 		    chains_dynamic, action, sco);
4926 	}
4927 
4928 	return iwx_phy_ctxt_cmd_v3(sc, ctxt, chains_static, chains_dynamic,
4929 	    action, sco);
4930 }
4931 
4932 int
4933 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
4934 {
4935 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
4936 	struct iwx_tfh_tfd *desc;
4937 	struct iwx_tx_data *txdata;
4938 	struct iwx_device_cmd *cmd;
4939 	struct mbuf *m;
4940 	bus_addr_t paddr;
4941 	uint64_t addr;
4942 	int err = 0, i, paylen, off, s;
4943 	int idx, code, async, group_id;
4944 	size_t hdrlen, datasz;
4945 	uint8_t *data;
4946 	int generation = sc->sc_generation;
4947 
4948 	code = hcmd->id;
4949 	async = hcmd->flags & IWX_CMD_ASYNC;
4950 	idx = ring->cur;
4951 
4952 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
4953 		paylen += hcmd->len[i];
4954 	}
4955 
4956 	/* If this command waits for a response, allocate response buffer. */
4957 	hcmd->resp_pkt = NULL;
4958 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
4959 		uint8_t *resp_buf;
4960 		KASSERT(!async);
4961 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
4962 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
4963 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
4964 			return ENOSPC;
4965 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
4966 		    M_NOWAIT | M_ZERO);
4967 		if (resp_buf == NULL)
4968 			return ENOMEM;
4969 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
4970 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
4971 	} else {
4972 		sc->sc_cmd_resp_pkt[idx] = NULL;
4973 	}
4974 
4975 	s = splnet();
4976 
4977 	desc = &ring->desc[idx];
4978 	txdata = &ring->data[idx];
4979 
4980 	/*
4981 	 * XXX Intel inside (tm)
4982 	 * Firmware API versions >= 50 reject old-style commands in
4983 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
4984 	 * that such commands were in the LONG_GROUP instead in order
4985 	 * for firmware to accept them.
4986 	 */
4987 	if (iwx_cmd_groupid(code) == 0) {
4988 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
4989 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
4990 	} else
4991 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
4992 
4993 	group_id = iwx_cmd_groupid(code);
4994 
4995 	hdrlen = sizeof(cmd->hdr_wide);
4996 	datasz = sizeof(cmd->data_wide);
4997 
4998 	if (paylen > datasz) {
4999 		/* Command is too large to fit in pre-allocated space. */
5000 		size_t totlen = hdrlen + paylen;
5001 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5002 			printf("%s: firmware command too long (%zd bytes)\n",
5003 			    DEVNAME(sc), totlen);
5004 			err = EINVAL;
5005 			goto out;
5006 		}
5007 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5008 		if (m == NULL) {
5009 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5010 			    DEVNAME(sc), totlen);
5011 			err = ENOMEM;
5012 			goto out;
5013 		}
5014 		cmd = mtod(m, struct iwx_device_cmd *);
5015 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5016 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5017 		if (err) {
5018 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5019 			    DEVNAME(sc), totlen);
5020 			m_freem(m);
5021 			goto out;
5022 		}
5023 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5024 		paddr = txdata->map->dm_segs[0].ds_addr;
5025 	} else {
5026 		cmd = &ring->cmd[idx];
5027 		paddr = txdata->cmd_paddr;
5028 	}
5029 
5030 	memset(cmd, 0, sizeof(*cmd));
5031 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5032 	cmd->hdr_wide.group_id = group_id;
5033 	cmd->hdr_wide.qid = ring->qid;
5034 	cmd->hdr_wide.idx = idx;
5035 	cmd->hdr_wide.length = htole16(paylen);
5036 	cmd->hdr_wide.version = iwx_cmd_version(code);
5037 	data = cmd->data_wide;
5038 
5039 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5040 		if (hcmd->len[i] == 0)
5041 			continue;
5042 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5043 		off += hcmd->len[i];
5044 	}
5045 	KASSERT(off == paylen);
5046 
5047 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5048 	addr = htole64(paddr);
5049 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5050 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5051 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5052 		    IWX_FIRST_TB_SIZE);
5053 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5054 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5055 		desc->num_tbs = htole16(2);
5056 	} else
5057 		desc->num_tbs = htole16(1);
5058 
5059 	if (paylen > datasz) {
5060 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5061 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5062 	} else {
5063 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5064 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5065 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5066 	}
5067 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5068 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5069 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5070 	/* Kick command ring. */
5071 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
5072 	ring->queued++;
5073 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5074 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
5075 
5076 	if (!async) {
5077 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
5078 		if (err == 0) {
5079 			/* if hardware is no longer up, return error */
5080 			if (generation != sc->sc_generation) {
5081 				err = ENXIO;
5082 				goto out;
5083 			}
5084 
5085 			/* Response buffer will be freed in iwx_free_resp(). */
5086 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5087 			sc->sc_cmd_resp_pkt[idx] = NULL;
5088 		} else if (generation == sc->sc_generation) {
5089 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5090 			    sc->sc_cmd_resp_len[idx]);
5091 			sc->sc_cmd_resp_pkt[idx] = NULL;
5092 		}
5093 	}
5094  out:
5095 	splx(s);
5096 
5097 	return err;
5098 }
5099 
5100 int
5101 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5102     uint16_t len, const void *data)
5103 {
5104 	struct iwx_host_cmd cmd = {
5105 		.id = id,
5106 		.len = { len, },
5107 		.data = { data, },
5108 		.flags = flags,
5109 	};
5110 
5111 	return iwx_send_cmd(sc, &cmd);
5112 }
5113 
5114 int
5115 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5116     uint32_t *status)
5117 {
5118 	struct iwx_rx_packet *pkt;
5119 	struct iwx_cmd_response *resp;
5120 	int err, resp_len;
5121 
5122 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
5123 	cmd->flags |= IWX_CMD_WANT_RESP;
5124 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5125 
5126 	err = iwx_send_cmd(sc, cmd);
5127 	if (err)
5128 		return err;
5129 
5130 	pkt = cmd->resp_pkt;
5131 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5132 		return EIO;
5133 
5134 	resp_len = iwx_rx_packet_payload_len(pkt);
5135 	if (resp_len != sizeof(*resp)) {
5136 		iwx_free_resp(sc, cmd);
5137 		return EIO;
5138 	}
5139 
5140 	resp = (void *)pkt->data;
5141 	*status = le32toh(resp->status);
5142 	iwx_free_resp(sc, cmd);
5143 	return err;
5144 }
5145 
5146 int
5147 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5148     const void *data, uint32_t *status)
5149 {
5150 	struct iwx_host_cmd cmd = {
5151 		.id = id,
5152 		.len = { len, },
5153 		.data = { data, },
5154 	};
5155 
5156 	return iwx_send_cmd_status(sc, &cmd, status);
5157 }
5158 
5159 void
5160 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5161 {
5162 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
5163 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5164 	hcmd->resp_pkt = NULL;
5165 }
5166 
5167 void
5168 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5169 {
5170 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5171 	struct iwx_tx_data *data;
5172 
5173 	if (qid != IWX_DQA_CMD_QUEUE) {
5174 		return;	/* Not a command ack. */
5175 	}
5176 
5177 	data = &ring->data[idx];
5178 
5179 	if (data->m != NULL) {
5180 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
5181 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5182 		bus_dmamap_unload(sc->sc_dmat, data->map);
5183 		m_freem(data->m);
5184 		data->m = NULL;
5185 	}
5186 	wakeup(&ring->desc[idx]);
5187 
5188 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5189 	if (ring->queued == 0) {
5190 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5191 			DEVNAME(sc), code));
5192 	} else if (ring->queued > 0)
5193 		ring->queued--;
5194 }
5195 
5196 /*
5197  * Fill in various bit for management frames, and leave them
5198  * unfilled for data frames (firmware takes care of that).
5199  * Return the selected TX rate.
5200  */
5201 const struct iwx_rate *
5202 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5203     struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
5204 {
5205 	struct ieee80211com *ic = &sc->sc_ic;
5206 	struct ieee80211_node *ni = &in->in_ni;
5207 	struct ieee80211_rateset *rs = &ni->ni_rates;
5208 	const struct iwx_rate *rinfo;
5209 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5210 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
5211 	int ridx, rate_flags;
5212 	uint32_t flags = 0;
5213 
5214 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5215 	    type != IEEE80211_FC0_TYPE_DATA) {
5216 		/* for non-data, use the lowest supported rate */
5217 		ridx = min_ridx;
5218 		flags |= IWX_TX_FLAGS_CMD_RATE;
5219 	} else if (ic->ic_fixed_mcs != -1) {
5220 		ridx = sc->sc_fixed_ridx;
5221 		flags |= IWX_TX_FLAGS_CMD_RATE;
5222 	} else if (ic->ic_fixed_rate != -1) {
5223 		ridx = sc->sc_fixed_ridx;
5224 		flags |= IWX_TX_FLAGS_CMD_RATE;
5225 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5226 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
5227 	} else {
5228 		uint8_t rval;
5229 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
5230 		ridx = iwx_rval2ridx(rval);
5231 		if (ridx < min_ridx)
5232 			ridx = min_ridx;
5233 	}
5234 
5235 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
5236 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
5237 		flags |= IWX_TX_FLAGS_HIGH_PRI;
5238 	tx->flags = htole32(flags);
5239 
5240 	rinfo = &iwx_rates[ridx];
5241 	if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
5242 		rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
5243 	else
5244 		rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5245 	if (IWX_RIDX_IS_CCK(ridx))
5246 		rate_flags |= IWX_RATE_MCS_CCK_MSK;
5247 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5248  	    type == IEEE80211_FC0_TYPE_DATA &&
5249 	    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
5250 		uint8_t sco;
5251 		if (ieee80211_node_supports_ht_chan40(ni))
5252 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
5253 		else
5254 			sco = IEEE80211_HTOP0_SCO_SCN;
5255 		rate_flags |= IWX_RATE_MCS_HT_MSK;
5256 		if ((sco == IEEE80211_HTOP0_SCO_SCA ||
5257 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
5258 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
5259 			rate_flags |= IWX_RATE_MCS_CHAN_WIDTH_40;
5260 			if (ieee80211_node_supports_ht_sgi40(ni))
5261 				rate_flags |= IWX_RATE_MCS_SGI_MSK;
5262 		} else if (ieee80211_node_supports_ht_sgi20(ni))
5263 			rate_flags |= IWX_RATE_MCS_SGI_MSK;
5264 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
5265 	} else
5266 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
5267 
5268 	return rinfo;
5269 }
5270 
5271 void
5272 iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
5273     uint16_t num_tbs)
5274 {
5275 	uint8_t filled_tfd_size, num_fetch_chunks;
5276 	uint16_t len = byte_cnt;
5277 	uint16_t bc_ent;
5278 	struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5279 
5280 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5281 			  num_tbs * sizeof(struct iwx_tfh_tb);
5282 	/*
5283 	 * filled_tfd_size contains the number of filled bytes in the TFD.
5284 	 * Dividing it by 64 will give the number of chunks to fetch
5285 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5286 	 * If, for example, TFD contains only 3 TBs then 32 bytes
5287 	 * of the TFD are used, and only one chunk of 64 bytes should
5288 	 * be fetched
5289 	 */
5290 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5291 
5292 	/* Before AX210, the HW expects DW */
5293 	len = howmany(len, 4);
5294 	bc_ent = htole16(len | (num_fetch_chunks << 12));
5295 	scd_bc_tbl->tfd_offset[idx] = bc_ent;
5296 }
5297 
5298 int
5299 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5300 {
5301 	struct ieee80211com *ic = &sc->sc_ic;
5302 	struct iwx_node *in = (void *)ni;
5303 	struct iwx_tx_ring *ring;
5304 	struct iwx_tx_data *data;
5305 	struct iwx_tfh_tfd *desc;
5306 	struct iwx_device_cmd *cmd;
5307 	struct iwx_tx_cmd_gen2 *tx;
5308 	struct ieee80211_frame *wh;
5309 	struct ieee80211_key *k = NULL;
5310 	const struct iwx_rate *rinfo;
5311 	uint64_t paddr;
5312 	u_int hdrlen;
5313 	bus_dma_segment_t *seg;
5314 	uint16_t num_tbs;
5315 	uint8_t type, subtype;
5316 	int i, totlen, err, pad, qid;
5317 
5318 	wh = mtod(m, struct ieee80211_frame *);
5319 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5320 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5321 	if (type == IEEE80211_FC0_TYPE_CTL)
5322 		hdrlen = sizeof(struct ieee80211_frame_min);
5323 	else
5324 		hdrlen = ieee80211_get_hdrlen(wh);
5325 
5326 	qid = sc->first_data_qid;
5327 
5328 	/* Put QoS frames on the data queue which maps to their TID. */
5329 	if (ieee80211_has_qos(wh)) {
5330 		struct ieee80211_tx_ba *ba;
5331 		uint16_t qos = ieee80211_get_qos(wh);
5332 		uint8_t tid = qos & IEEE80211_QOS_TID;
5333 
5334 		ba = &ni->ni_tx_ba[tid];
5335 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5336 		    type == IEEE80211_FC0_TYPE_DATA &&
5337 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
5338 		    sc->aggqid[tid] != 0 &&
5339 		    ba->ba_state == IEEE80211_BA_AGREED) {
5340 			qid = sc->aggqid[tid];
5341 		}
5342 	}
5343 
5344 	ring = &sc->txq[qid];
5345 	desc = &ring->desc[ring->cur];
5346 	memset(desc, 0, sizeof(*desc));
5347 	data = &ring->data[ring->cur];
5348 
5349 	cmd = &ring->cmd[ring->cur];
5350 	cmd->hdr.code = IWX_TX_CMD;
5351 	cmd->hdr.flags = 0;
5352 	cmd->hdr.qid = ring->qid;
5353 	cmd->hdr.idx = ring->cur;
5354 
5355 	tx = (void *)cmd->data;
5356 	memset(tx, 0, sizeof(*tx));
5357 
5358 	rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
5359 
5360 #if NBPFILTER > 0
5361 	if (sc->sc_drvbpf != NULL) {
5362 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5363 		uint16_t chan_flags;
5364 
5365 		tap->wt_flags = 0;
5366 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5367 		chan_flags = ni->ni_chan->ic_flags;
5368 		if (ic->ic_curmode != IEEE80211_MODE_11N)
5369 			chan_flags &= ~IEEE80211_CHAN_HT;
5370 		tap->wt_chan_flags = htole16(chan_flags);
5371 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5372 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5373 		    type == IEEE80211_FC0_TYPE_DATA &&
5374 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
5375 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
5376 		} else
5377 			tap->wt_rate = rinfo->rate;
5378 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
5379 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
5380 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5381 
5382 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
5383 		    m, BPF_DIRECTION_OUT);
5384 	}
5385 #endif
5386 
5387 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5388                 k = ieee80211_get_txkey(ic, wh, ni);
5389 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
5390 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
5391 				return ENOBUFS;
5392 			/* 802.11 header may have moved. */
5393 			wh = mtod(m, struct ieee80211_frame *);
5394 			tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
5395 		} else {
5396 			k->k_tsc++;
5397 			/* Hardware increments PN internally and adds IV. */
5398 		}
5399 	} else
5400 		tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
5401 
5402 	totlen = m->m_pkthdr.len;
5403 
5404 	if (hdrlen & 3) {
5405 		/* First segment length must be a multiple of 4. */
5406 		pad = 4 - (hdrlen & 3);
5407 		tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD);
5408 	} else
5409 		pad = 0;
5410 
5411 	tx->len = htole16(totlen);
5412 
5413 	/* Copy 802.11 header in TX command. */
5414 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
5415 
5416 	/* Trim 802.11 header. */
5417 	m_adj(m, hdrlen);
5418 
5419 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5420 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5421 	if (err && err != EFBIG) {
5422 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5423 		m_freem(m);
5424 		return err;
5425 	}
5426 	if (err) {
5427 		/* Too many DMA segments, linearize mbuf. */
5428 		if (m_defrag(m, M_DONTWAIT)) {
5429 			m_freem(m);
5430 			return ENOBUFS;
5431 		}
5432 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5433 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5434 		if (err) {
5435 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
5436 			    err);
5437 			m_freem(m);
5438 			return err;
5439 		}
5440 	}
5441 	data->m = m;
5442 	data->in = in;
5443 
5444 	/* Fill TX descriptor. */
5445 	num_tbs = 2 + data->map->dm_nsegs;
5446 	desc->num_tbs = htole16(num_tbs);
5447 
5448 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5449 	paddr = htole64(data->cmd_paddr);
5450 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5451 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
5452 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5453 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5454 	    sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE);
5455 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5456 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5457 
5458 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
5459 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5460 
5461 	/* Other DMA segments are for data payload. */
5462 	seg = data->map->dm_segs;
5463 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5464 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5465 		paddr = htole64(seg->ds_addr);
5466 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5467 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5468 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
5469 	}
5470 
5471 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
5472 	    BUS_DMASYNC_PREWRITE);
5473 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5474 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5475 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
5476 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5477 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5478 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5479 
5480 	iwx_tx_update_byte_tbl(ring, ring->cur, totlen, num_tbs);
5481 
5482 	/* Kick TX ring. */
5483 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5484 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
5485 
5486 	/* Mark TX ring as full if we reach a certain threshold. */
5487 	if (++ring->queued > IWX_TX_RING_HIMARK) {
5488 		sc->qfullmsk |= 1 << ring->qid;
5489 	}
5490 
5491 	if (ic->ic_if.if_flags & IFF_UP)
5492 		sc->sc_tx_timer[ring->qid] = 15;
5493 
5494 	return 0;
5495 }
5496 
5497 int
5498 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5499 {
5500 	struct iwx_rx_packet *pkt;
5501 	struct iwx_tx_path_flush_cmd_rsp *resp;
5502 	struct iwx_tx_path_flush_cmd flush_cmd = {
5503 		.sta_id = htole32(sta_id),
5504 		.tid_mask = htole16(tids),
5505 	};
5506 	struct iwx_host_cmd hcmd = {
5507 		.id = IWX_TXPATH_FLUSH,
5508 		.len = { sizeof(flush_cmd), },
5509 		.data = { &flush_cmd, },
5510 		.flags = IWX_CMD_WANT_RESP,
5511 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5512 	};
5513 	int err, resp_len, i, num_flushed_queues;
5514 
5515 	err = iwx_send_cmd(sc, &hcmd);
5516 	if (err)
5517 		return err;
5518 
5519 	pkt = hcmd.resp_pkt;
5520 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5521 		err = EIO;
5522 		goto out;
5523 	}
5524 
5525 	resp_len = iwx_rx_packet_payload_len(pkt);
5526 	/* Some firmware versions don't provide a response. */
5527 	if (resp_len == 0)
5528 		goto out;
5529 	else if (resp_len != sizeof(*resp)) {
5530 		err = EIO;
5531 		goto out;
5532 	}
5533 
5534 	resp = (void *)pkt->data;
5535 
5536 	if (le16toh(resp->sta_id) != sta_id) {
5537 		err = EIO;
5538 		goto out;
5539 	}
5540 
5541 	num_flushed_queues = le16toh(resp->num_flushed_queues);
5542 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5543 		err = EIO;
5544 		goto out;
5545 	}
5546 
5547 	for (i = 0; i < num_flushed_queues; i++) {
5548 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5549 		uint16_t tid = le16toh(queue_info->tid);
5550 		uint16_t read_after = le16toh(queue_info->read_after_flush);
5551 		uint16_t qid = le16toh(queue_info->queue_num);
5552 		struct iwx_tx_ring *txq;
5553 
5554 		if (qid >= nitems(sc->txq))
5555 			continue;
5556 
5557 		txq = &sc->txq[qid];
5558 		if (tid != txq->tid)
5559 			continue;
5560 
5561 		iwx_txq_advance(sc, txq, read_after);
5562 	}
5563 out:
5564 	iwx_free_resp(sc, &hcmd);
5565 	return err;
5566 }
5567 
5568 #define IWX_FLUSH_WAIT_MS	2000
5569 
5570 int
5571 iwx_wait_tx_queues_empty(struct iwx_softc *sc)
5572 {
5573 	int i, err;
5574 
5575 	for (i = 0; i < nitems(sc->txq); i++) {
5576 		struct iwx_tx_ring *ring = &sc->txq[i];
5577 
5578 		if (i == IWX_DQA_CMD_QUEUE)
5579 			continue;
5580 
5581 		while (ring->queued > 0) {
5582 			err = tsleep_nsec(ring, 0, "iwxflush",
5583 			    MSEC_TO_NSEC(IWX_FLUSH_WAIT_MS));
5584 			if (err)
5585 				return err;
5586 		}
5587 	}
5588 
5589 	return 0;
5590 }
5591 
5592 int
5593 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5594 {
5595 	struct iwx_add_sta_cmd cmd;
5596 	int err;
5597 	uint32_t status;
5598 
5599 	memset(&cmd, 0, sizeof(cmd));
5600 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5601 	    in->in_color));
5602 	cmd.sta_id = IWX_STATION_ID;
5603 	cmd.add_modify = IWX_STA_MODE_MODIFY;
5604 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5605 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5606 
5607 	status = IWX_ADD_STA_SUCCESS;
5608 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5609 	    sizeof(cmd), &cmd, &status);
5610 	if (err) {
5611 		printf("%s: could not update sta (error %d)\n",
5612 		    DEVNAME(sc), err);
5613 		return err;
5614 	}
5615 
5616 	switch (status & IWX_ADD_STA_STATUS_MASK) {
5617 	case IWX_ADD_STA_SUCCESS:
5618 		break;
5619 	default:
5620 		err = EIO;
5621 		printf("%s: Couldn't %s draining for station\n",
5622 		    DEVNAME(sc), drain ? "enable" : "disable");
5623 		break;
5624 	}
5625 
5626 	return err;
5627 }
5628 
5629 int
5630 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5631 {
5632 	int err;
5633 
5634 	splassert(IPL_NET);
5635 
5636 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
5637 
5638 	err = iwx_drain_sta(sc, in, 1);
5639 	if (err)
5640 		goto done;
5641 
5642 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5643 	if (err) {
5644 		printf("%s: could not flush Tx path (error %d)\n",
5645 		    DEVNAME(sc), err);
5646 		goto done;
5647 	}
5648 
5649 	err = iwx_wait_tx_queues_empty(sc);
5650 	if (err) {
5651 		printf("%s: Could not empty Tx queues (error %d)\n",
5652 		    DEVNAME(sc), err);
5653 		goto done;
5654 	}
5655 
5656 	err = iwx_drain_sta(sc, in, 0);
5657 done:
5658 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
5659 	return err;
5660 }
5661 
5662 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
5663 
5664 int
5665 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5666     struct iwx_beacon_filter_cmd *cmd)
5667 {
5668 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
5669 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5670 }
5671 
5672 int
5673 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5674 {
5675 	struct iwx_beacon_filter_cmd cmd = {
5676 		IWX_BF_CMD_CONFIG_DEFAULTS,
5677 		.bf_enable_beacon_filter = htole32(1),
5678 		.ba_enable_beacon_abort = htole32(enable),
5679 	};
5680 
5681 	if (!sc->sc_bf.bf_enabled)
5682 		return 0;
5683 
5684 	sc->sc_bf.ba_enabled = enable;
5685 	return iwx_beacon_filter_send_cmd(sc, &cmd);
5686 }
5687 
5688 void
5689 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5690     struct iwx_mac_power_cmd *cmd)
5691 {
5692 	struct ieee80211com *ic = &sc->sc_ic;
5693 	struct ieee80211_node *ni = &in->in_ni;
5694 	int dtim_period, dtim_msec, keep_alive;
5695 
5696 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5697 	    in->in_color));
5698 	if (ni->ni_dtimperiod)
5699 		dtim_period = ni->ni_dtimperiod;
5700 	else
5701 		dtim_period = 1;
5702 
5703 	/*
5704 	 * Regardless of power management state the driver must set
5705 	 * keep alive period. FW will use it for sending keep alive NDPs
5706 	 * immediately after association. Check that keep alive period
5707 	 * is at least 3 * DTIM.
5708 	 */
5709 	dtim_msec = dtim_period * ni->ni_intval;
5710 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
5711 	keep_alive = roundup(keep_alive, 1000) / 1000;
5712 	cmd->keep_alive_seconds = htole16(keep_alive);
5713 
5714 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5715 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5716 }
5717 
5718 int
5719 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
5720 {
5721 	int err;
5722 	int ba_enable;
5723 	struct iwx_mac_power_cmd cmd;
5724 
5725 	memset(&cmd, 0, sizeof(cmd));
5726 
5727 	iwx_power_build_cmd(sc, in, &cmd);
5728 
5729 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
5730 	    sizeof(cmd), &cmd);
5731 	if (err != 0)
5732 		return err;
5733 
5734 	ba_enable = !!(cmd.flags &
5735 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5736 	return iwx_update_beacon_abort(sc, in, ba_enable);
5737 }
5738 
5739 int
5740 iwx_power_update_device(struct iwx_softc *sc)
5741 {
5742 	struct iwx_device_power_cmd cmd = { };
5743 	struct ieee80211com *ic = &sc->sc_ic;
5744 
5745 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5746 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5747 
5748 	return iwx_send_cmd_pdu(sc,
5749 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5750 }
5751 
5752 int
5753 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
5754 {
5755 	struct iwx_beacon_filter_cmd cmd = {
5756 		IWX_BF_CMD_CONFIG_DEFAULTS,
5757 		.bf_enable_beacon_filter = htole32(1),
5758 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
5759 	};
5760 	int err;
5761 
5762 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
5763 	if (err == 0)
5764 		sc->sc_bf.bf_enabled = 1;
5765 
5766 	return err;
5767 }
5768 
5769 int
5770 iwx_disable_beacon_filter(struct iwx_softc *sc)
5771 {
5772 	struct iwx_beacon_filter_cmd cmd;
5773 	int err;
5774 
5775 	memset(&cmd, 0, sizeof(cmd));
5776 
5777 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
5778 	if (err == 0)
5779 		sc->sc_bf.bf_enabled = 0;
5780 
5781 	return err;
5782 }
5783 
5784 int
5785 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
5786 {
5787 	struct iwx_add_sta_cmd add_sta_cmd;
5788 	int err;
5789 	uint32_t status;
5790 	struct ieee80211com *ic = &sc->sc_ic;
5791 
5792 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
5793 		panic("STA already added");
5794 
5795 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5796 
5797 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5798 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
5799 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
5800 	} else {
5801 		add_sta_cmd.sta_id = IWX_STATION_ID;
5802 		add_sta_cmd.station_type = IWX_STA_LINK;
5803 	}
5804 	add_sta_cmd.mac_id_n_color
5805 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5806 	if (!update) {
5807 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
5808 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
5809 			    etheranyaddr);
5810 		else
5811 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
5812 			    in->in_macaddr);
5813 	}
5814 	add_sta_cmd.add_modify = update ? 1 : 0;
5815 	add_sta_cmd.station_flags_msk
5816 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
5817 
5818 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5819 		add_sta_cmd.station_flags_msk
5820 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
5821 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
5822 
5823 		if (iwx_mimo_enabled(sc)) {
5824 			if (in->in_ni.ni_rxmcs[1] != 0) {
5825 				add_sta_cmd.station_flags |=
5826 				    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
5827 			}
5828 			if (in->in_ni.ni_rxmcs[2] != 0) {
5829 				add_sta_cmd.station_flags |=
5830 				    htole32(IWX_STA_FLG_MIMO_EN_MIMO3);
5831 			}
5832 		}
5833 
5834 		if (ieee80211_node_supports_ht_chan40(&in->in_ni)) {
5835 			add_sta_cmd.station_flags |= htole32(
5836 			    IWX_STA_FLG_FAT_EN_40MHZ);
5837 		}
5838 
5839 		add_sta_cmd.station_flags
5840 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_64K);
5841 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5842 		case IEEE80211_AMPDU_PARAM_SS_2:
5843 			add_sta_cmd.station_flags
5844 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
5845 			break;
5846 		case IEEE80211_AMPDU_PARAM_SS_4:
5847 			add_sta_cmd.station_flags
5848 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
5849 			break;
5850 		case IEEE80211_AMPDU_PARAM_SS_8:
5851 			add_sta_cmd.station_flags
5852 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
5853 			break;
5854 		case IEEE80211_AMPDU_PARAM_SS_16:
5855 			add_sta_cmd.station_flags
5856 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
5857 			break;
5858 		default:
5859 			break;
5860 		}
5861 	}
5862 
5863 	status = IWX_ADD_STA_SUCCESS;
5864 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
5865 	    &add_sta_cmd, &status);
5866 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
5867 		err = EIO;
5868 
5869 	return err;
5870 }
5871 
5872 int
5873 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
5874 {
5875 	struct ieee80211com *ic = &sc->sc_ic;
5876 	struct iwx_rm_sta_cmd rm_sta_cmd;
5877 	int err;
5878 
5879 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
5880 		panic("sta already removed");
5881 
5882 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
5883 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5884 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
5885 	else
5886 		rm_sta_cmd.sta_id = IWX_STATION_ID;
5887 
5888 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
5889 	    &rm_sta_cmd);
5890 
5891 	return err;
5892 }
5893 
5894 int
5895 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
5896 {
5897 	struct ieee80211com *ic = &sc->sc_ic;
5898 	struct ieee80211_node *ni = &in->in_ni;
5899 	int err, i;
5900 
5901 	err = iwx_flush_sta(sc, in);
5902 	if (err) {
5903 		printf("%s: could not flush Tx path (error %d)\n",
5904 		    DEVNAME(sc), err);
5905 		return err;
5906 	}
5907 	err = iwx_rm_sta_cmd(sc, in);
5908 	if (err) {
5909 		printf("%s: could not remove STA (error %d)\n",
5910 		    DEVNAME(sc), err);
5911 		return err;
5912 	}
5913 
5914 	in->in_flags = 0;
5915 
5916 	sc->sc_rx_ba_sessions = 0;
5917 	sc->ba_rx.start_tidmask = 0;
5918 	sc->ba_rx.stop_tidmask = 0;
5919 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
5920 	sc->ba_tx.start_tidmask = 0;
5921 	sc->ba_tx.stop_tidmask = 0;
5922 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
5923 		sc->qenablemsk &= ~(1 << i);
5924 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
5925 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
5926 		if (ba->ba_state != IEEE80211_BA_AGREED)
5927 			continue;
5928 		ieee80211_delba_request(ic, ni, 0, 1, i);
5929 	}
5930 
5931 	return 0;
5932 }
5933 
5934 uint8_t
5935 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
5936     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
5937     int n_ssids, int bgscan)
5938 {
5939 	struct ieee80211com *ic = &sc->sc_ic;
5940 	struct ieee80211_channel *c;
5941 	uint8_t nchan;
5942 
5943 	for (nchan = 0, c = &ic->ic_channels[1];
5944 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5945 	    nchan < chan_nitems &&
5946 	    nchan < sc->sc_capa_n_scan_channels;
5947 	    c++) {
5948 		uint8_t channel_num;
5949 
5950 		if (c->ic_flags == 0)
5951 			continue;
5952 
5953 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5954 		if (isset(sc->sc_ucode_api,
5955 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
5956 			chan->v2.channel_num = channel_num;
5957 			if (IEEE80211_IS_CHAN_2GHZ(c))
5958 				chan->v2.band = IWX_PHY_BAND_24;
5959 			else
5960 				chan->v2.band = IWX_PHY_BAND_5;
5961 			chan->v2.iter_count = 1;
5962 			chan->v2.iter_interval = 0;
5963 		} else {
5964 			chan->v1.channel_num = channel_num;
5965 			chan->v1.iter_count = 1;
5966 			chan->v1.iter_interval = htole16(0);
5967 		}
5968 		/*
5969 		 * Firmware may become unresponsive when asked to send
5970 		 * a directed probe request on a passive channel.
5971 		 */
5972 #if 0 /* Some people see "device timeout" after active scans. */
5973 		if (n_ssids != 0 && !bgscan &&
5974 		    (c->ic_flags & IEEE80211_CHAN_PASSIVE) == 0)
5975 			chan->flags = htole32(1 << 0); /* select SSID 0 */
5976 #endif
5977 		chan++;
5978 		nchan++;
5979 	}
5980 
5981 	return nchan;
5982 }
5983 
5984 int
5985 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
5986 {
5987 	struct ieee80211com *ic = &sc->sc_ic;
5988 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5989 	struct ieee80211_rateset *rs;
5990 	size_t remain = sizeof(preq->buf);
5991 	uint8_t *frm, *pos;
5992 
5993 	memset(preq, 0, sizeof(*preq));
5994 
5995 	if (remain < sizeof(*wh) + 2)
5996 		return ENOBUFS;
5997 
5998 	/*
5999 	 * Build a probe request frame.  Most of the following code is a
6000 	 * copy & paste of what is done in net80211.
6001 	 */
6002 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6003 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6004 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6005 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6006 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
6007 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6008 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6009 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6010 
6011 	frm = (uint8_t *)(wh + 1);
6012 	*frm++ = IEEE80211_ELEMID_SSID;
6013 	*frm++ = 0;
6014 	/* hardware inserts SSID */
6015 
6016 	/* Tell the firmware where the MAC header is. */
6017 	preq->mac_header.offset = 0;
6018 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6019 	remain -= frm - (uint8_t *)wh;
6020 
6021 	/* Fill in 2GHz IEs and tell firmware where they are. */
6022 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6023 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6024 		if (remain < 4 + rs->rs_nrates)
6025 			return ENOBUFS;
6026 	} else if (remain < 2 + rs->rs_nrates)
6027 		return ENOBUFS;
6028 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6029 	pos = frm;
6030 	frm = ieee80211_add_rates(frm, rs);
6031 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6032 		frm = ieee80211_add_xrates(frm, rs);
6033 	remain -= frm - pos;
6034 
6035 	if (isset(sc->sc_enabled_capa,
6036 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6037 		if (remain < 3)
6038 			return ENOBUFS;
6039 		*frm++ = IEEE80211_ELEMID_DSPARMS;
6040 		*frm++ = 1;
6041 		*frm++ = 0;
6042 		remain -= 3;
6043 	}
6044 	preq->band_data[0].len = htole16(frm - pos);
6045 
6046 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6047 		/* Fill in 5GHz IEs. */
6048 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6049 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6050 			if (remain < 4 + rs->rs_nrates)
6051 				return ENOBUFS;
6052 		} else if (remain < 2 + rs->rs_nrates)
6053 			return ENOBUFS;
6054 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6055 		pos = frm;
6056 		frm = ieee80211_add_rates(frm, rs);
6057 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6058 			frm = ieee80211_add_xrates(frm, rs);
6059 		preq->band_data[1].len = htole16(frm - pos);
6060 		remain -= frm - pos;
6061 	}
6062 
6063 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6064 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6065 	pos = frm;
6066 	if (ic->ic_flags & IEEE80211_F_HTON) {
6067 		if (remain < 28)
6068 			return ENOBUFS;
6069 		frm = ieee80211_add_htcaps(frm, ic);
6070 		/* XXX add WME info? */
6071 	}
6072 	preq->common_data.len = htole16(frm - pos);
6073 
6074 	return 0;
6075 }
6076 
6077 int
6078 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6079 {
6080 	struct iwx_scan_config scan_cfg;
6081 	struct iwx_host_cmd hcmd = {
6082 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6083 		.len[0] = sizeof(scan_cfg),
6084 		.data[0] = &scan_cfg,
6085 		.flags = 0,
6086 	};
6087 	int cmdver;
6088 
6089 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6090 		printf("%s: firmware does not support reduced scan config\n",
6091 		    DEVNAME(sc));
6092 		return ENOTSUP;
6093 	}
6094 
6095 	memset(&scan_cfg, 0, sizeof(scan_cfg));
6096 
6097 	/*
6098 	 * SCAN_CFG version >= 5 implies that the broadcast
6099 	 * STA ID field is deprecated.
6100 	 */
6101 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6102 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6103 		scan_cfg.bcast_sta_id = 0xff;
6104 
6105 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6106 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6107 
6108 	return iwx_send_cmd(sc, &hcmd);
6109 }
6110 
6111 uint16_t
6112 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6113 {
6114 	struct ieee80211com *ic = &sc->sc_ic;
6115 	uint16_t flags = 0;
6116 
6117 	if (ic->ic_des_esslen == 0)
6118 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6119 
6120 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6121 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6122 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6123 
6124 	return flags;
6125 }
6126 
6127 #define IWX_SCAN_DWELL_ACTIVE		10
6128 #define IWX_SCAN_DWELL_PASSIVE		110
6129 
6130 /* adaptive dwell max budget time [TU] for full scan */
6131 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6132 /* adaptive dwell max budget time [TU] for directed scan */
6133 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6134 /* adaptive dwell default high band APs number */
6135 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6136 /* adaptive dwell default low band APs number */
6137 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6138 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6139 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6140 /* adaptive dwell number of APs override for p2p friendly GO channels */
6141 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6142 /* adaptive dwell number of APs override for social channels */
6143 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6144 
6145 void
6146 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6147     struct iwx_scan_general_params_v10 *general_params, int bgscan)
6148 {
6149 	uint32_t suspend_time, max_out_time;
6150 	uint8_t active_dwell, passive_dwell;
6151 
6152 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
6153 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6154 
6155 	general_params->adwell_default_social_chn =
6156 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6157 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6158 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6159 
6160 	if (bgscan)
6161 		general_params->adwell_max_budget =
6162 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6163 	else
6164 		general_params->adwell_max_budget =
6165 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6166 
6167 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6168 	if (bgscan) {
6169 		max_out_time = htole32(120);
6170 		suspend_time = htole32(120);
6171 	} else {
6172 		max_out_time = htole32(0);
6173 		suspend_time = htole32(0);
6174 	}
6175 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6176 		htole32(max_out_time);
6177 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6178 		htole32(suspend_time);
6179 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6180 		htole32(max_out_time);
6181 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6182 		htole32(suspend_time);
6183 
6184 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6185 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6186 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6187 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6188 }
6189 
6190 void
6191 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6192     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6193 {
6194 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6195 
6196 	gp->flags = htole16(gen_flags);
6197 
6198 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6199 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6200 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6201 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6202 
6203 	gp->scan_start_mac_id = 0;
6204 }
6205 
6206 void
6207 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6208     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6209     int n_ssid, int bgscan)
6210 {
6211 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6212 
6213 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6214 	    nitems(cp->channel_config), n_ssid, bgscan);
6215 
6216 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6217 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6218 }
6219 
6220 int
6221 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6222 {
6223 #if 0 /* Some people see "device timeout" after active scans. */
6224 	struct ieee80211com *ic = &sc->sc_ic;
6225 #endif
6226 	struct iwx_host_cmd hcmd = {
6227 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6228 		.len = { 0, },
6229 		.data = { NULL, },
6230 		.flags = 0,
6231 	};
6232 	struct iwx_scan_req_umac_v14 *cmd;
6233 	struct iwx_scan_req_params_v14 *scan_p;
6234 	int err, async = bgscan, n_ssid = 0;
6235 	uint16_t gen_flags;
6236 	uint32_t bitmap_ssid = 0;
6237 
6238 	cmd = malloc(sizeof(*cmd), M_DEVBUF,
6239 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
6240 	if (cmd == NULL)
6241 		return ENOMEM;
6242 
6243 	scan_p = &cmd->scan_params;
6244 
6245 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6246 	cmd->uid = htole32(0);
6247 
6248 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6249 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6250 	    gen_flags, bgscan);
6251 
6252 	scan_p->periodic_params.schedule[0].interval = htole16(0);
6253 	scan_p->periodic_params.schedule[0].iter_count = 1;
6254 
6255 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6256 	if (err) {
6257 		free(cmd, M_DEVBUF, sizeof(*cmd));
6258 		return err;
6259 	}
6260 
6261 #if 0 /* Some people see "device timeout" after active scans. */
6262 	if (ic->ic_des_esslen != 0) {
6263 		scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
6264 		scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
6265 		memcpy(scan_p->probe_params.direct_scan[0].ssid,
6266 		    ic->ic_des_essid, ic->ic_des_esslen);
6267 		bitmap_ssid |= (1 << 0);
6268 		n_ssid = 1;
6269 	}
6270 #endif
6271 
6272 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6273 	    n_ssid, bgscan);
6274 
6275 	hcmd.len[0] = sizeof(*cmd);
6276 	hcmd.data[0] = (void *)cmd;
6277 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6278 
6279 	err = iwx_send_cmd(sc, &hcmd);
6280 	free(cmd, M_DEVBUF, sizeof(*cmd));
6281 	return err;
6282 }
6283 
6284 void
6285 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6286 {
6287 	struct ieee80211com *ic = &sc->sc_ic;
6288 	struct ifnet *ifp = IC2IFP(ic);
6289 	char alpha2[3];
6290 
6291 	snprintf(alpha2, sizeof(alpha2), "%c%c",
6292 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6293 
6294 	if (ifp->if_flags & IFF_DEBUG) {
6295 		printf("%s: firmware has detected regulatory domain '%s' "
6296 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6297 	}
6298 
6299 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6300 }
6301 
6302 uint8_t
6303 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6304 {
6305 	int i;
6306 	uint8_t rval;
6307 
6308 	for (i = 0; i < rs->rs_nrates; i++) {
6309 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6310 		if (rval == iwx_rates[ridx].rate)
6311 			return rs->rs_rates[i];
6312 	}
6313 
6314 	return 0;
6315 }
6316 
6317 int
6318 iwx_rval2ridx(int rval)
6319 {
6320 	int ridx;
6321 
6322 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6323 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6324 			continue;
6325 		if (rval == iwx_rates[ridx].rate)
6326 			break;
6327 	}
6328 
6329        return ridx;
6330 }
6331 
6332 void
6333 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6334     int *ofdm_rates)
6335 {
6336 	struct ieee80211_node *ni = &in->in_ni;
6337 	struct ieee80211_rateset *rs = &ni->ni_rates;
6338 	int lowest_present_ofdm = -1;
6339 	int lowest_present_cck = -1;
6340 	uint8_t cck = 0;
6341 	uint8_t ofdm = 0;
6342 	int i;
6343 
6344 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6345 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6346 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6347 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6348 				continue;
6349 			cck |= (1 << i);
6350 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6351 				lowest_present_cck = i;
6352 		}
6353 	}
6354 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6355 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6356 			continue;
6357 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6358 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6359 			lowest_present_ofdm = i;
6360 	}
6361 
6362 	/*
6363 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6364 	 * variables. This isn't sufficient though, as there might not
6365 	 * be all the right rates in the bitmap. E.g. if the only basic
6366 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6367 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6368 	 *
6369 	 *    [...] a STA responding to a received frame shall transmit
6370 	 *    its Control Response frame [...] at the highest rate in the
6371 	 *    BSSBasicRateSet parameter that is less than or equal to the
6372 	 *    rate of the immediately previous frame in the frame exchange
6373 	 *    sequence ([...]) and that is of the same modulation class
6374 	 *    ([...]) as the received frame. If no rate contained in the
6375 	 *    BSSBasicRateSet parameter meets these conditions, then the
6376 	 *    control frame sent in response to a received frame shall be
6377 	 *    transmitted at the highest mandatory rate of the PHY that is
6378 	 *    less than or equal to the rate of the received frame, and
6379 	 *    that is of the same modulation class as the received frame.
6380 	 *
6381 	 * As a consequence, we need to add all mandatory rates that are
6382 	 * lower than all of the basic rates to these bitmaps.
6383 	 */
6384 
6385 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6386 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6387 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6388 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6389 	/* 6M already there or needed so always add */
6390 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6391 
6392 	/*
6393 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6394 	 * Note, however:
6395 	 *  - if no CCK rates are basic, it must be ERP since there must
6396 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6397 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6398 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6399 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6400 	 *  - if 2M is basic, 1M is mandatory
6401 	 *  - if 1M is basic, that's the only valid ACK rate.
6402 	 * As a consequence, it's not as complicated as it sounds, just add
6403 	 * any lower rates to the ACK rate bitmap.
6404 	 */
6405 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
6406 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6407 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
6408 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6409 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
6410 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6411 	/* 1M already there or needed so always add */
6412 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6413 
6414 	*cck_rates = cck;
6415 	*ofdm_rates = ofdm;
6416 }
6417 
6418 void
6419 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6420     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6421 {
6422 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6423 	struct ieee80211com *ic = &sc->sc_ic;
6424 	struct ieee80211_node *ni = ic->ic_bss;
6425 	int cck_ack_rates, ofdm_ack_rates;
6426 	int i;
6427 
6428 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6429 	    in->in_color));
6430 	cmd->action = htole32(action);
6431 
6432 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
6433 		return;
6434 
6435 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6436 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6437 	else if (ic->ic_opmode == IEEE80211_M_STA)
6438 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6439 	else
6440 		panic("unsupported operating mode %d", ic->ic_opmode);
6441 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
6442 
6443 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
6444 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6445 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6446 		return;
6447 	}
6448 
6449 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6450 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6451 	cmd->cck_rates = htole32(cck_ack_rates);
6452 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6453 
6454 	cmd->cck_short_preamble
6455 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6456 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6457 	cmd->short_slot
6458 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6459 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
6460 
6461 	for (i = 0; i < EDCA_NUM_AC; i++) {
6462 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6463 		int txf = iwx_ac_to_tx_fifo[i];
6464 
6465 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
6466 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
6467 		cmd->ac[txf].aifsn = ac->ac_aifsn;
6468 		cmd->ac[txf].fifos_mask = (1 << txf);
6469 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
6470 	}
6471 	if (ni->ni_flags & IEEE80211_NODE_QOS)
6472 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6473 
6474 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6475 		enum ieee80211_htprot htprot =
6476 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
6477 		switch (htprot) {
6478 		case IEEE80211_HTPROT_NONE:
6479 			break;
6480 		case IEEE80211_HTPROT_NONMEMBER:
6481 		case IEEE80211_HTPROT_NONHT_MIXED:
6482 			cmd->protection_flags |=
6483 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6484 			    IWX_MAC_PROT_FLG_FAT_PROT);
6485 			break;
6486 		case IEEE80211_HTPROT_20MHZ:
6487 			if (in->in_phyctxt &&
6488 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
6489 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
6490 				cmd->protection_flags |=
6491 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
6492 				    IWX_MAC_PROT_FLG_FAT_PROT);
6493 			}
6494 			break;
6495 		default:
6496 			break;
6497 		}
6498 
6499 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6500 	}
6501 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6502 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6503 
6504 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6505 #undef IWX_EXP2
6506 }
6507 
6508 void
6509 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6510     struct iwx_mac_data_sta *sta, int assoc)
6511 {
6512 	struct ieee80211_node *ni = &in->in_ni;
6513 	uint32_t dtim_off;
6514 	uint64_t tsf;
6515 
6516 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
6517 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
6518 	tsf = letoh64(tsf);
6519 
6520 	sta->is_assoc = htole32(assoc);
6521 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
6522 	sta->dtim_tsf = htole64(tsf + dtim_off);
6523 	sta->bi = htole32(ni->ni_intval);
6524 	sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
6525 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
6526 	sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
6527 	sta->listen_interval = htole32(10);
6528 	sta->assoc_id = htole32(ni->ni_associd);
6529 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
6530 }
6531 
6532 int
6533 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6534     int assoc)
6535 {
6536 	struct ieee80211com *ic = &sc->sc_ic;
6537 	struct ieee80211_node *ni = &in->in_ni;
6538 	struct iwx_mac_ctx_cmd cmd;
6539 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6540 
6541 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
6542 		panic("MAC already added");
6543 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6544 		panic("MAC already removed");
6545 
6546 	memset(&cmd, 0, sizeof(cmd));
6547 
6548 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6549 
6550 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6551 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6552 		    sizeof(cmd), &cmd);
6553 	}
6554 
6555 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6556 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6557 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6558 		    IWX_MAC_FILTER_ACCEPT_GRP |
6559 		    IWX_MAC_FILTER_IN_BEACON |
6560 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
6561 		    IWX_MAC_FILTER_IN_CRC32);
6562 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
6563 		/*
6564 		 * Allow beacons to pass through as long as we are not
6565 		 * associated or we do not have dtim period information.
6566 		 */
6567 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
6568 	else
6569 		iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6570 
6571 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6572 }
6573 
6574 int
6575 iwx_clear_statistics(struct iwx_softc *sc)
6576 {
6577 	struct iwx_statistics_cmd scmd = {
6578 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
6579 	};
6580 	struct iwx_host_cmd cmd = {
6581 		.id = IWX_STATISTICS_CMD,
6582 		.len[0] = sizeof(scmd),
6583 		.data[0] = &scmd,
6584 		.flags = IWX_CMD_WANT_RESP,
6585 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
6586 	};
6587 	int err;
6588 
6589 	err = iwx_send_cmd(sc, &cmd);
6590 	if (err)
6591 		return err;
6592 
6593 	iwx_free_resp(sc, &cmd);
6594 	return 0;
6595 }
6596 
6597 void
6598 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6599 {
6600 	int s = splnet();
6601 
6602 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6603 		splx(s);
6604 		return;
6605 	}
6606 
6607 	refcnt_take(&sc->task_refs);
6608 	if (!task_add(taskq, task))
6609 		refcnt_rele_wake(&sc->task_refs);
6610 	splx(s);
6611 }
6612 
6613 void
6614 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6615 {
6616 	if (task_del(taskq, task))
6617 		refcnt_rele(&sc->task_refs);
6618 }
6619 
6620 int
6621 iwx_scan(struct iwx_softc *sc)
6622 {
6623 	struct ieee80211com *ic = &sc->sc_ic;
6624 	struct ifnet *ifp = IC2IFP(ic);
6625 	int err;
6626 
6627 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
6628 		err = iwx_scan_abort(sc);
6629 		if (err) {
6630 			printf("%s: could not abort background scan\n",
6631 			    DEVNAME(sc));
6632 			return err;
6633 		}
6634 	}
6635 
6636 	err = iwx_umac_scan_v14(sc, 0);
6637 	if (err) {
6638 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6639 		return err;
6640 	}
6641 
6642 	/*
6643 	 * The current mode might have been fixed during association.
6644 	 * Ensure all channels get scanned.
6645 	 */
6646 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
6647 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
6648 
6649 	sc->sc_flags |= IWX_FLAG_SCANNING;
6650 	if (ifp->if_flags & IFF_DEBUG)
6651 		printf("%s: %s -> %s\n", ifp->if_xname,
6652 		    ieee80211_state_name[ic->ic_state],
6653 		    ieee80211_state_name[IEEE80211_S_SCAN]);
6654 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
6655 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
6656 		ieee80211_node_cleanup(ic, ic->ic_bss);
6657 	}
6658 	ic->ic_state = IEEE80211_S_SCAN;
6659 	wakeup(&ic->ic_state); /* wake iwx_init() */
6660 
6661 	return 0;
6662 }
6663 
6664 int
6665 iwx_bgscan(struct ieee80211com *ic)
6666 {
6667 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
6668 	int err;
6669 
6670 	if (sc->sc_flags & IWX_FLAG_SCANNING)
6671 		return 0;
6672 
6673 	err = iwx_umac_scan_v14(sc, 1);
6674 	if (err) {
6675 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6676 		return err;
6677 	}
6678 
6679 	sc->sc_flags |= IWX_FLAG_BGSCAN;
6680 	return 0;
6681 }
6682 
6683 void
6684 iwx_bgscan_done(struct ieee80211com *ic,
6685     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
6686 {
6687 	struct iwx_softc *sc = ic->ic_softc;
6688 
6689 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
6690 	sc->bgscan_unref_arg = arg;
6691 	sc->bgscan_unref_arg_size = arg_size;
6692 	iwx_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
6693 }
6694 
6695 void
6696 iwx_bgscan_done_task(void *arg)
6697 {
6698 	struct iwx_softc *sc = arg;
6699 	struct ieee80211com *ic = &sc->sc_ic;
6700 	struct iwx_node *in = (void *)ic->ic_bss;
6701 	struct ieee80211_node *ni = &in->in_ni;
6702 	int tid, err = 0, s = splnet();
6703 
6704 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
6705 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
6706 	    ic->ic_state != IEEE80211_S_RUN) {
6707 		err = ENXIO;
6708 		goto done;
6709 	}
6710 
6711 	err = iwx_flush_sta(sc, in);
6712 	if (err)
6713 		goto done;
6714 
6715 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
6716 		int qid = IWX_FIRST_AGG_TX_QUEUE + tid;
6717 
6718 		if (sc->aggqid[tid] == 0)
6719 			continue;
6720 
6721 		err = iwx_disable_txq(sc, IWX_STATION_ID, qid, tid);
6722 		if (err)
6723 			goto done;
6724 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
6725 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
6726 		    IEEE80211_ACTION_DELBA,
6727 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
6728 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
6729 #endif
6730 		ieee80211_node_tx_ba_clear(ni, tid);
6731 		sc->aggqid[tid] = 0;
6732 	}
6733 
6734 	/*
6735 	 * Tx queues have been flushed and Tx agg has been stopped.
6736 	 * Allow roaming to proceed.
6737 	 */
6738 	ni->ni_unref_arg = sc->bgscan_unref_arg;
6739 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
6740 	sc->bgscan_unref_arg = NULL;
6741 	sc->bgscan_unref_arg_size = 0;
6742 	ieee80211_node_tx_stopped(ic, &in->in_ni);
6743 done:
6744 	if (err) {
6745 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
6746 		sc->bgscan_unref_arg = NULL;
6747 		sc->bgscan_unref_arg_size = 0;
6748 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
6749 			task_add(systq, &sc->init_task);
6750 	}
6751 	refcnt_rele_wake(&sc->task_refs);
6752 	splx(s);
6753 }
6754 
6755 int
6756 iwx_umac_scan_abort(struct iwx_softc *sc)
6757 {
6758 	struct iwx_umac_scan_abort cmd = { 0 };
6759 
6760 	return iwx_send_cmd_pdu(sc,
6761 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
6762 	    0, sizeof(cmd), &cmd);
6763 }
6764 
6765 int
6766 iwx_scan_abort(struct iwx_softc *sc)
6767 {
6768 	int err;
6769 
6770 	err = iwx_umac_scan_abort(sc);
6771 	if (err == 0)
6772 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6773 	return err;
6774 }
6775 
6776 int
6777 iwx_enable_mgmt_queue(struct iwx_softc *sc)
6778 {
6779 	int err;
6780 
6781 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
6782 
6783 	/*
6784 	 * Non-QoS frames use the "MGMT" TID and queue.
6785 	 * Other TIDs and data queues are reserved for QoS data frames.
6786 	 */
6787 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
6788 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
6789 	if (err) {
6790 		printf("%s: could not enable Tx queue %d (error %d)\n",
6791 		    DEVNAME(sc), sc->first_data_qid, err);
6792 		return err;
6793 	}
6794 
6795 	return 0;
6796 }
6797 
6798 int
6799 iwx_rs_rval2idx(uint8_t rval)
6800 {
6801 	/* Firmware expects indices which match our 11g rate set. */
6802 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
6803 	int i;
6804 
6805 	for (i = 0; i < rs->rs_nrates; i++) {
6806 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6807 			return i;
6808 	}
6809 
6810 	return -1;
6811 }
6812 
6813 uint16_t
6814 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
6815 {
6816 	struct ieee80211com *ic = &sc->sc_ic;
6817 	const struct ieee80211_ht_rateset *rs;
6818 	uint16_t htrates = 0;
6819 	int mcs;
6820 
6821 	rs = &ieee80211_std_ratesets_11n[rsidx];
6822 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
6823 		if (!isset(ni->ni_rxmcs, mcs) ||
6824 		    !isset(ic->ic_sup_mcs, mcs))
6825 			continue;
6826 		htrates |= (1 << (mcs - rs->min_mcs));
6827 	}
6828 
6829 	return htrates;
6830 }
6831 
6832 int
6833 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
6834 {
6835 	struct ieee80211_node *ni = &in->in_ni;
6836 	struct ieee80211_rateset *rs = &ni->ni_rates;
6837 	struct iwx_tlc_config_cmd cfg_cmd;
6838 	uint32_t cmd_id;
6839 	int i;
6840 	size_t cmd_size = sizeof(cfg_cmd);
6841 
6842 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
6843 
6844 	for (i = 0; i < rs->rs_nrates; i++) {
6845 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
6846 		int idx = iwx_rs_rval2idx(rval);
6847 		if (idx == -1)
6848 			return EINVAL;
6849 		cfg_cmd.non_ht_rates |= (1 << idx);
6850 	}
6851 
6852 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6853 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
6854 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
6855 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_SISO);
6856 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
6857 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_MIMO2);
6858 	} else
6859 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
6860 
6861 	cfg_cmd.sta_id = IWX_STATION_ID;
6862 	if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
6863 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
6864 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
6865 	else
6866 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
6867 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
6868 	cfg_cmd.max_mpdu_len = 3839;
6869 	if (ieee80211_node_supports_ht_sgi20(ni))
6870 		cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_20MHZ);
6871 	if (ieee80211_node_supports_ht_sgi40(ni))
6872 		cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_40MHZ);
6873 
6874 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
6875 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
6876 }
6877 
6878 void
6879 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
6880 {
6881 	struct ieee80211com *ic = &sc->sc_ic;
6882 	struct ieee80211_node *ni = ic->ic_bss;
6883 	struct ieee80211_rateset *rs = &ni->ni_rates;
6884 	uint32_t rate_n_flags;
6885 	int i;
6886 
6887 	if (notif->sta_id != IWX_STATION_ID ||
6888 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
6889 		return;
6890 
6891 	rate_n_flags = le32toh(notif->rate);
6892 	if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
6893 		ni->ni_txmcs = (rate_n_flags &
6894 		    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
6895 		    IWX_RATE_HT_MCS_NSS_MSK));
6896 	} else {
6897 		uint8_t plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
6898 		uint8_t rval = 0;
6899 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
6900 			if (iwx_rates[i].plcp == plcp) {
6901 				rval = iwx_rates[i].rate;
6902 				break;
6903 			}
6904 		}
6905 		if (rval) {
6906 			uint8_t rv;
6907 			for (i = 0; i < rs->rs_nrates; i++) {
6908 				rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
6909 				if (rv == rval) {
6910 					ni->ni_txrate = i;
6911 					break;
6912 				}
6913 			}
6914 		}
6915 	}
6916 }
6917 
6918 int
6919 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
6920     struct ieee80211_channel *chan, uint8_t chains_static,
6921     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco)
6922 {
6923 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
6924 	int err;
6925 
6926 	if (isset(sc->sc_enabled_capa,
6927 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
6928 	    (phyctxt->channel->ic_flags & band_flags) !=
6929 	    (chan->ic_flags & band_flags)) {
6930 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6931 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco);
6932 		if (err) {
6933 			printf("%s: could not remove PHY context "
6934 			    "(error %d)\n", DEVNAME(sc), err);
6935 			return err;
6936 		}
6937 		phyctxt->channel = chan;
6938 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6939 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco);
6940 		if (err) {
6941 			printf("%s: could not add PHY context "
6942 			    "(error %d)\n", DEVNAME(sc), err);
6943 			return err;
6944 		}
6945 	} else {
6946 		phyctxt->channel = chan;
6947 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6948 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco);
6949 		if (err) {
6950 			printf("%s: could not update PHY context (error %d)\n",
6951 			    DEVNAME(sc), err);
6952 			return err;
6953 		}
6954 	}
6955 
6956 	phyctxt->sco = sco;
6957 	return 0;
6958 }
6959 
6960 int
6961 iwx_auth(struct iwx_softc *sc)
6962 {
6963 	struct ieee80211com *ic = &sc->sc_ic;
6964 	struct iwx_node *in = (void *)ic->ic_bss;
6965 	uint32_t duration;
6966 	int generation = sc->sc_generation, err;
6967 
6968 	splassert(IPL_NET);
6969 
6970 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6971 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
6972 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
6973 		if (err)
6974 			return err;
6975 	} else {
6976 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
6977 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
6978 		if (err)
6979 			return err;
6980 	}
6981 	in->in_phyctxt = &sc->sc_phyctxt[0];
6982 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
6983 
6984 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
6985 	if (err) {
6986 		printf("%s: could not add MAC context (error %d)\n",
6987 		    DEVNAME(sc), err);
6988 		return err;
6989  	}
6990 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
6991 
6992 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
6993 	if (err) {
6994 		printf("%s: could not add binding (error %d)\n",
6995 		    DEVNAME(sc), err);
6996 		goto rm_mac_ctxt;
6997 	}
6998 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
6999 
7000 	err = iwx_add_sta_cmd(sc, in, 0);
7001 	if (err) {
7002 		printf("%s: could not add sta (error %d)\n",
7003 		    DEVNAME(sc), err);
7004 		goto rm_binding;
7005 	}
7006 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7007 
7008 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7009 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7010 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7011 		    IWX_TX_RING_COUNT);
7012 		if (err)
7013 			goto rm_sta;
7014 		return 0;
7015 	}
7016 
7017 	err = iwx_enable_mgmt_queue(sc);
7018 	if (err)
7019 		goto rm_sta;
7020 
7021 	err = iwx_clear_statistics(sc);
7022 	if (err)
7023 		goto rm_sta;
7024 
7025 	/*
7026 	 * Prevent the FW from wandering off channel during association
7027 	 * by "protecting" the session with a time event.
7028 	 */
7029 	if (in->in_ni.ni_intval)
7030 		duration = in->in_ni.ni_intval * 2;
7031 	else
7032 		duration = IEEE80211_DUR_TU;
7033 	return iwx_schedule_session_protection(sc, in, duration);
7034 rm_sta:
7035 	if (generation == sc->sc_generation) {
7036 		iwx_rm_sta_cmd(sc, in);
7037 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7038 	}
7039 rm_binding:
7040 	if (generation == sc->sc_generation) {
7041 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7042 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7043 	}
7044 rm_mac_ctxt:
7045 	if (generation == sc->sc_generation) {
7046 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7047 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7048 	}
7049 	return err;
7050 }
7051 
7052 int
7053 iwx_deauth(struct iwx_softc *sc)
7054 {
7055 	struct ieee80211com *ic = &sc->sc_ic;
7056 	struct iwx_node *in = (void *)ic->ic_bss;
7057 	int err;
7058 
7059 	splassert(IPL_NET);
7060 
7061 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7062 		err = iwx_rm_sta(sc, in);
7063 		if (err)
7064 			return err;
7065 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7066 	}
7067 
7068 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7069 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7070 		if (err) {
7071 			printf("%s: could not remove binding (error %d)\n",
7072 			    DEVNAME(sc), err);
7073 			return err;
7074 		}
7075 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7076 	}
7077 
7078 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7079 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7080 		if (err) {
7081 			printf("%s: could not remove MAC context (error %d)\n",
7082 			    DEVNAME(sc), err);
7083 			return err;
7084 		}
7085 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7086 	}
7087 
7088 	/* Move unused PHY context to a default channel. */
7089 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7090 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
7091 	if (err)
7092 		return err;
7093 
7094 	return 0;
7095 }
7096 
7097 int
7098 iwx_run(struct iwx_softc *sc)
7099 {
7100 	struct ieee80211com *ic = &sc->sc_ic;
7101 	struct iwx_node *in = (void *)ic->ic_bss;
7102 	struct ieee80211_node *ni = &in->in_ni;
7103 	int err;
7104 
7105 	splassert(IPL_NET);
7106 
7107 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7108 		/* Add a MAC context and a sniffing STA. */
7109 		err = iwx_auth(sc);
7110 		if (err)
7111 			return err;
7112 	}
7113 
7114 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
7115 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7116 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7117 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7118 		    in->in_phyctxt->channel, chains, chains,
7119 		    0, IEEE80211_HTOP0_SCO_SCN);
7120 		if (err) {
7121 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7122 			return err;
7123 		}
7124 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7125 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7126 		uint8_t sco;
7127 		if (ieee80211_node_supports_ht_chan40(ni))
7128 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
7129 		else
7130 			sco = IEEE80211_HTOP0_SCO_SCN;
7131 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7132 		    in->in_phyctxt->channel, chains, chains,
7133 		    0, sco);
7134 		if (err) {
7135 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7136 			return err;
7137 		}
7138 	}
7139 
7140 	/* We have now been assigned an associd by the AP. */
7141 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7142 	if (err) {
7143 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7144 		return err;
7145 	}
7146 
7147 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7148 	if (err) {
7149 		printf("%s: could not set sf full on (error %d)\n",
7150 		    DEVNAME(sc), err);
7151 		return err;
7152 	}
7153 
7154 	err = iwx_allow_mcast(sc);
7155 	if (err) {
7156 		printf("%s: could not allow mcast (error %d)\n",
7157 		    DEVNAME(sc), err);
7158 		return err;
7159 	}
7160 
7161 	err = iwx_power_update_device(sc);
7162 	if (err) {
7163 		printf("%s: could not send power command (error %d)\n",
7164 		    DEVNAME(sc), err);
7165 		return err;
7166 	}
7167 #ifdef notyet
7168 	/*
7169 	 * Disabled for now. Default beacon filter settings
7170 	 * prevent net80211 from getting ERP and HT protection
7171 	 * updates from beacons.
7172 	 */
7173 	err = iwx_enable_beacon_filter(sc, in);
7174 	if (err) {
7175 		printf("%s: could not enable beacon filter\n",
7176 		    DEVNAME(sc));
7177 		return err;
7178 	}
7179 #endif
7180 	err = iwx_power_mac_update_mode(sc, in);
7181 	if (err) {
7182 		printf("%s: could not update MAC power (error %d)\n",
7183 		    DEVNAME(sc), err);
7184 		return err;
7185 	}
7186 
7187 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7188 		return 0;
7189 
7190 	/* Start at lowest available bit-rate. Firmware will raise. */
7191 	in->in_ni.ni_txrate = 0;
7192 	in->in_ni.ni_txmcs = 0;
7193 
7194 	err = iwx_rs_init(sc, in);
7195 	if (err) {
7196 		printf("%s: could not init rate scaling (error %d)\n",
7197 		    DEVNAME(sc), err);
7198 		return err;
7199 	}
7200 
7201 	return 0;
7202 }
7203 
7204 int
7205 iwx_run_stop(struct iwx_softc *sc)
7206 {
7207 	struct ieee80211com *ic = &sc->sc_ic;
7208 	struct iwx_node *in = (void *)ic->ic_bss;
7209 	struct ieee80211_node *ni = &in->in_ni;
7210 	int err, i;
7211 
7212 	splassert(IPL_NET);
7213 
7214 	err = iwx_flush_sta(sc, in);
7215 	if (err) {
7216 		printf("%s: could not flush Tx path (error %d)\n",
7217 		    DEVNAME(sc), err);
7218 		return err;
7219 	}
7220 
7221 	/*
7222 	 * Stop Rx BA sessions now. We cannot rely on the BA task
7223 	 * for this when moving out of RUN state since it runs in a
7224 	 * separate thread.
7225 	 * Note that in->in_ni (struct ieee80211_node) already represents
7226 	 * our new access point in case we are roaming between APs.
7227 	 * This means we cannot rely on struct ieee802111_node to tell
7228 	 * us which BA sessions exist.
7229 	 */
7230 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7231 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7232 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7233 			continue;
7234 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7235 	}
7236 
7237 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7238 	if (err)
7239 		return err;
7240 
7241 	err = iwx_disable_beacon_filter(sc);
7242 	if (err) {
7243 		printf("%s: could not disable beacon filter (error %d)\n",
7244 		    DEVNAME(sc), err);
7245 		return err;
7246 	}
7247 
7248 	/* Mark station as disassociated. */
7249 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7250 	if (err) {
7251 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7252 		return err;
7253 	}
7254 
7255 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
7256 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7257 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7258 		   in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN);
7259 		if (err) {
7260 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7261 			return err;
7262 		}
7263 	}
7264 
7265 	return 0;
7266 }
7267 
7268 struct ieee80211_node *
7269 iwx_node_alloc(struct ieee80211com *ic)
7270 {
7271 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
7272 }
7273 
7274 int
7275 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7276     struct ieee80211_key *k)
7277 {
7278 	struct iwx_softc *sc = ic->ic_softc;
7279 	struct iwx_node *in = (void *)ni;
7280 	struct iwx_setkey_task_arg *a;
7281 	int err;
7282 
7283 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7284 		/* Fallback to software crypto for other ciphers. */
7285 		err = ieee80211_set_key(ic, ni, k);
7286 		if (!err && (k->k_flags & IEEE80211_KEY_GROUP))
7287 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7288 		return err;
7289 	}
7290 
7291 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7292 		return ENOSPC;
7293 
7294 	a = &sc->setkey_arg[sc->setkey_cur];
7295 	a->sta_id = IWX_STATION_ID;
7296 	a->ni = ni;
7297 	a->k = k;
7298 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7299 	sc->setkey_nkeys++;
7300 	iwx_add_task(sc, systq, &sc->setkey_task);
7301 	return EBUSY;
7302 }
7303 
7304 int
7305 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7306     struct ieee80211_key *k)
7307 {
7308 	struct ieee80211com *ic = &sc->sc_ic;
7309 	struct iwx_node *in = (void *)ni;
7310 	struct iwx_add_sta_key_cmd cmd;
7311 	uint32_t status;
7312 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7313 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
7314 	int err;
7315 
7316 	/*
7317 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7318 	 * Currently we only implement station mode where 'ni' is always
7319 	 * ic->ic_bss so there is no need to validate arguments beyond this:
7320 	 */
7321 	KASSERT(ni == ic->ic_bss);
7322 
7323 	memset(&cmd, 0, sizeof(cmd));
7324 
7325 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7326 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
7327 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7328 	    IWX_STA_KEY_FLG_KEYID_MSK));
7329 	if (k->k_flags & IEEE80211_KEY_GROUP) {
7330 		cmd.common.key_offset = 1;
7331 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7332 	} else
7333 		cmd.common.key_offset = 0;
7334 
7335 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7336 	cmd.common.sta_id = sta_id;
7337 
7338 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
7339 
7340 	status = IWX_ADD_STA_SUCCESS;
7341 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7342 	    &status);
7343 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7344 		return ECANCELED;
7345 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7346 		err = EIO;
7347 	if (err) {
7348 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7349 		    IEEE80211_REASON_AUTH_LEAVE);
7350 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7351 		return err;
7352 	}
7353 
7354 	if (k->k_flags & IEEE80211_KEY_GROUP)
7355 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7356 	else
7357 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7358 
7359 	if ((in->in_flags & want_keymask) == want_keymask) {
7360 		DPRINTF(("marking port %s valid\n",
7361 		    ether_sprintf(ni->ni_macaddr)));
7362 		ni->ni_port_valid = 1;
7363 		ieee80211_set_link_state(ic, LINK_STATE_UP);
7364 	}
7365 
7366 	return 0;
7367 }
7368 
7369 void
7370 iwx_setkey_task(void *arg)
7371 {
7372 	struct iwx_softc *sc = arg;
7373 	struct iwx_setkey_task_arg *a;
7374 	int err = 0, s = splnet();
7375 
7376 	while (sc->setkey_nkeys > 0) {
7377 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7378 			break;
7379 		a = &sc->setkey_arg[sc->setkey_tail];
7380 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7381 		a->sta_id = 0;
7382 		a->ni = NULL;
7383 		a->k = NULL;
7384 		sc->setkey_tail = (sc->setkey_tail + 1) %
7385 		    nitems(sc->setkey_arg);
7386 		sc->setkey_nkeys--;
7387 	}
7388 
7389 	refcnt_rele_wake(&sc->task_refs);
7390 	splx(s);
7391 }
7392 
7393 void
7394 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7395     struct ieee80211_key *k)
7396 {
7397 	struct iwx_softc *sc = ic->ic_softc;
7398 	struct iwx_add_sta_key_cmd cmd;
7399 
7400 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7401 		/* Fallback to software crypto for other ciphers. */
7402                 ieee80211_delete_key(ic, ni, k);
7403 		return;
7404 	}
7405 
7406 	memset(&cmd, 0, sizeof(cmd));
7407 
7408 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
7409 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
7410 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7411 	    IWX_STA_KEY_FLG_KEYID_MSK));
7412 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7413 	if (k->k_flags & IEEE80211_KEY_GROUP)
7414 		cmd.common.key_offset = 1;
7415 	else
7416 		cmd.common.key_offset = 0;
7417 	cmd.common.sta_id = IWX_STATION_ID;
7418 
7419 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7420 }
7421 
7422 int
7423 iwx_media_change(struct ifnet *ifp)
7424 {
7425 	struct iwx_softc *sc = ifp->if_softc;
7426 	struct ieee80211com *ic = &sc->sc_ic;
7427 	uint8_t rate, ridx;
7428 	int err;
7429 
7430 	err = ieee80211_media_change(ifp);
7431 	if (err != ENETRESET)
7432 		return err;
7433 
7434 	if (ic->ic_fixed_mcs != -1)
7435 		sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
7436 	else if (ic->ic_fixed_rate != -1) {
7437 		rate = ic->ic_sup_rates[ic->ic_curmode].
7438 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
7439 		/* Map 802.11 rate to HW rate index. */
7440 		for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
7441 			if (iwx_rates[ridx].rate == rate)
7442 				break;
7443 		sc->sc_fixed_ridx = ridx;
7444 	}
7445 
7446 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7447 	    (IFF_UP | IFF_RUNNING)) {
7448 		iwx_stop(ifp);
7449 		err = iwx_init(ifp);
7450 	}
7451 	return err;
7452 }
7453 
7454 void
7455 iwx_newstate_task(void *psc)
7456 {
7457 	struct iwx_softc *sc = (struct iwx_softc *)psc;
7458 	struct ieee80211com *ic = &sc->sc_ic;
7459 	enum ieee80211_state nstate = sc->ns_nstate;
7460 	enum ieee80211_state ostate = ic->ic_state;
7461 	int arg = sc->ns_arg;
7462 	int err = 0, s = splnet();
7463 
7464 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7465 		/* iwx_stop() is waiting for us. */
7466 		refcnt_rele_wake(&sc->task_refs);
7467 		splx(s);
7468 		return;
7469 	}
7470 
7471 	if (ostate == IEEE80211_S_SCAN) {
7472 		if (nstate == ostate) {
7473 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
7474 				refcnt_rele_wake(&sc->task_refs);
7475 				splx(s);
7476 				return;
7477 			}
7478 			/* Firmware is no longer scanning. Do another scan. */
7479 			goto next_scan;
7480 		}
7481 	}
7482 
7483 	if (nstate <= ostate) {
7484 		switch (ostate) {
7485 		case IEEE80211_S_RUN:
7486 			err = iwx_run_stop(sc);
7487 			if (err)
7488 				goto out;
7489 			/* FALLTHROUGH */
7490 		case IEEE80211_S_ASSOC:
7491 		case IEEE80211_S_AUTH:
7492 			if (nstate <= IEEE80211_S_AUTH) {
7493 				err = iwx_deauth(sc);
7494 				if (err)
7495 					goto out;
7496 			}
7497 			/* FALLTHROUGH */
7498 		case IEEE80211_S_SCAN:
7499 		case IEEE80211_S_INIT:
7500 			break;
7501 		}
7502 
7503 		/* Die now if iwx_stop() was called while we were sleeping. */
7504 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7505 			refcnt_rele_wake(&sc->task_refs);
7506 			splx(s);
7507 			return;
7508 		}
7509 	}
7510 
7511 	switch (nstate) {
7512 	case IEEE80211_S_INIT:
7513 		break;
7514 
7515 	case IEEE80211_S_SCAN:
7516 next_scan:
7517 		err = iwx_scan(sc);
7518 		if (err)
7519 			break;
7520 		refcnt_rele_wake(&sc->task_refs);
7521 		splx(s);
7522 		return;
7523 
7524 	case IEEE80211_S_AUTH:
7525 		err = iwx_auth(sc);
7526 		break;
7527 
7528 	case IEEE80211_S_ASSOC:
7529 		break;
7530 
7531 	case IEEE80211_S_RUN:
7532 		err = iwx_run(sc);
7533 		break;
7534 	}
7535 
7536 out:
7537 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7538 		if (err)
7539 			task_add(systq, &sc->init_task);
7540 		else
7541 			sc->sc_newstate(ic, nstate, arg);
7542 	}
7543 	refcnt_rele_wake(&sc->task_refs);
7544 	splx(s);
7545 }
7546 
7547 int
7548 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
7549 {
7550 	struct ifnet *ifp = IC2IFP(ic);
7551 	struct iwx_softc *sc = ifp->if_softc;
7552 
7553 	/*
7554 	 * Prevent attemps to transition towards the same state, unless
7555 	 * we are scanning in which case a SCAN -> SCAN transition
7556 	 * triggers another scan iteration. And AUTH -> AUTH is needed
7557 	 * to support band-steering.
7558 	 */
7559 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
7560 	    nstate != IEEE80211_S_AUTH)
7561 		return 0;
7562 
7563 	if (ic->ic_state == IEEE80211_S_RUN) {
7564 		iwx_del_task(sc, systq, &sc->ba_task);
7565 		iwx_del_task(sc, systq, &sc->setkey_task);
7566 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
7567 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
7568 		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
7569 		iwx_del_task(sc, systq, &sc->phy_ctxt_task);
7570 		iwx_del_task(sc, systq, &sc->bgscan_done_task);
7571 	}
7572 
7573 	sc->ns_nstate = nstate;
7574 	sc->ns_arg = arg;
7575 
7576 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
7577 
7578 	return 0;
7579 }
7580 
7581 void
7582 iwx_endscan(struct iwx_softc *sc)
7583 {
7584 	struct ieee80211com *ic = &sc->sc_ic;
7585 
7586 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
7587 		return;
7588 
7589 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
7590 	ieee80211_end_scan(&ic->ic_if);
7591 }
7592 
7593 /*
7594  * Aging and idle timeouts for the different possible scenarios
7595  * in default configuration
7596  */
7597 static const uint32_t
7598 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
7599 	{
7600 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
7601 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
7602 	},
7603 	{
7604 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
7605 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
7606 	},
7607 	{
7608 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
7609 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
7610 	},
7611 	{
7612 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
7613 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
7614 	},
7615 	{
7616 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
7617 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
7618 	},
7619 };
7620 
7621 /*
7622  * Aging and idle timeouts for the different possible scenarios
7623  * in single BSS MAC configuration.
7624  */
7625 static const uint32_t
7626 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
7627 	{
7628 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
7629 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
7630 	},
7631 	{
7632 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
7633 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
7634 	},
7635 	{
7636 		htole32(IWX_SF_MCAST_AGING_TIMER),
7637 		htole32(IWX_SF_MCAST_IDLE_TIMER)
7638 	},
7639 	{
7640 		htole32(IWX_SF_BA_AGING_TIMER),
7641 		htole32(IWX_SF_BA_IDLE_TIMER)
7642 	},
7643 	{
7644 		htole32(IWX_SF_TX_RE_AGING_TIMER),
7645 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
7646 	},
7647 };
7648 
7649 void
7650 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
7651     struct ieee80211_node *ni)
7652 {
7653 	int i, j, watermark;
7654 
7655 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
7656 
7657 	/*
7658 	 * If we are in association flow - check antenna configuration
7659 	 * capabilities of the AP station, and choose the watermark accordingly.
7660 	 */
7661 	if (ni) {
7662 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7663 			if (ni->ni_rxmcs[1] != 0)
7664 				watermark = IWX_SF_W_MARK_MIMO2;
7665 			else
7666 				watermark = IWX_SF_W_MARK_SISO;
7667 		} else {
7668 			watermark = IWX_SF_W_MARK_LEGACY;
7669 		}
7670 	/* default watermark value for unassociated mode. */
7671 	} else {
7672 		watermark = IWX_SF_W_MARK_MIMO2;
7673 	}
7674 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
7675 
7676 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
7677 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
7678 			sf_cmd->long_delay_timeouts[i][j] =
7679 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
7680 		}
7681 	}
7682 
7683 	if (ni) {
7684 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
7685 		       sizeof(iwx_sf_full_timeout));
7686 	} else {
7687 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
7688 		       sizeof(iwx_sf_full_timeout_def));
7689 	}
7690 
7691 }
7692 
7693 int
7694 iwx_sf_config(struct iwx_softc *sc, int new_state)
7695 {
7696 	struct ieee80211com *ic = &sc->sc_ic;
7697 	struct iwx_sf_cfg_cmd sf_cmd = {
7698 		.state = htole32(new_state),
7699 	};
7700 	int err = 0;
7701 
7702 	switch (new_state) {
7703 	case IWX_SF_UNINIT:
7704 	case IWX_SF_INIT_OFF:
7705 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
7706 		break;
7707 	case IWX_SF_FULL_ON:
7708 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
7709 		break;
7710 	default:
7711 		return EINVAL;
7712 	}
7713 
7714 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
7715 				   sizeof(sf_cmd), &sf_cmd);
7716 	return err;
7717 }
7718 
7719 int
7720 iwx_send_bt_init_conf(struct iwx_softc *sc)
7721 {
7722 	struct iwx_bt_coex_cmd bt_cmd;
7723 
7724 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
7725 	bt_cmd.enabled_modules = 0;
7726 
7727 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
7728 	    &bt_cmd);
7729 }
7730 
7731 int
7732 iwx_send_soc_conf(struct iwx_softc *sc)
7733 {
7734 	struct iwx_soc_configuration_cmd cmd;
7735 	int err;
7736 	uint32_t cmd_id, flags = 0;
7737 
7738 	memset(&cmd, 0, sizeof(cmd));
7739 
7740 	/*
7741 	 * In VER_1 of this command, the discrete value is considered
7742 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
7743 	 * values in VER_1, this is backwards-compatible with VER_2,
7744 	 * as long as we don't set any other flag bits.
7745 	 */
7746 	if (!sc->sc_integrated) { /* VER_1 */
7747 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
7748 	} else { /* VER_2 */
7749 		uint8_t scan_cmd_ver;
7750 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
7751 			flags |= (sc->sc_ltr_delay &
7752 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
7753 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
7754 		    IWX_SCAN_REQ_UMAC);
7755 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
7756 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
7757 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
7758 	}
7759 	cmd.flags = htole32(flags);
7760 
7761 	cmd.latency = htole32(sc->sc_xtal_latency);
7762 
7763 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
7764 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7765 	if (err)
7766 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
7767 	return err;
7768 }
7769 
7770 int
7771 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
7772 {
7773 	struct iwx_mcc_update_cmd mcc_cmd;
7774 	struct iwx_host_cmd hcmd = {
7775 		.id = IWX_MCC_UPDATE_CMD,
7776 		.flags = IWX_CMD_WANT_RESP,
7777 		.data = { &mcc_cmd },
7778 	};
7779 	struct iwx_rx_packet *pkt;
7780 	struct iwx_mcc_update_resp *resp;
7781 	size_t resp_len;
7782 	int err;
7783 
7784 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
7785 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
7786 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
7787 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
7788 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
7789 	else
7790 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
7791 
7792 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
7793 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
7794 
7795 	err = iwx_send_cmd(sc, &hcmd);
7796 	if (err)
7797 		return err;
7798 
7799 	pkt = hcmd.resp_pkt;
7800 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
7801 		err = EIO;
7802 		goto out;
7803 	}
7804 
7805 	resp_len = iwx_rx_packet_payload_len(pkt);
7806 	if (resp_len < sizeof(*resp)) {
7807 		err = EIO;
7808 		goto out;
7809 	}
7810 
7811 	resp = (void *)pkt->data;
7812 	if (resp_len != sizeof(*resp) +
7813 	    resp->n_channels * sizeof(resp->channels[0])) {
7814 		err = EIO;
7815 		goto out;
7816 	}
7817 
7818 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
7819 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
7820 
7821 	/* Update channel map for net80211 and our scan configuration. */
7822 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
7823 
7824 out:
7825 	iwx_free_resp(sc, &hcmd);
7826 
7827 	return err;
7828 }
7829 
7830 int
7831 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
7832 {
7833 	struct iwx_temp_report_ths_cmd cmd;
7834 	int err;
7835 
7836 	/*
7837 	 * In order to give responsibility for critical-temperature-kill
7838 	 * and TX backoff to FW we need to send an empty temperature
7839 	 * reporting command at init time.
7840 	 */
7841 	memset(&cmd, 0, sizeof(cmd));
7842 
7843 	err = iwx_send_cmd_pdu(sc,
7844 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
7845 	    0, sizeof(cmd), &cmd);
7846 	if (err)
7847 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
7848 		    DEVNAME(sc), err);
7849 
7850 	return err;
7851 }
7852 
7853 int
7854 iwx_init_hw(struct iwx_softc *sc)
7855 {
7856 	struct ieee80211com *ic = &sc->sc_ic;
7857 	int err, i;
7858 
7859 	err = iwx_run_init_mvm_ucode(sc, 0);
7860 	if (err)
7861 		return err;
7862 
7863 	if (!iwx_nic_lock(sc))
7864 		return EBUSY;
7865 
7866 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
7867 	if (err) {
7868 		printf("%s: could not init tx ant config (error %d)\n",
7869 		    DEVNAME(sc), err);
7870 		goto err;
7871 	}
7872 
7873 	if (sc->sc_tx_with_siso_diversity) {
7874 		err = iwx_send_phy_cfg_cmd(sc);
7875 		if (err) {
7876 			printf("%s: could not send phy config (error %d)\n",
7877 			    DEVNAME(sc), err);
7878 			goto err;
7879 		}
7880 	}
7881 
7882 	err = iwx_send_bt_init_conf(sc);
7883 	if (err) {
7884 		printf("%s: could not init bt coex (error %d)\n",
7885 		    DEVNAME(sc), err);
7886 		return err;
7887 	}
7888 
7889 	err = iwx_send_soc_conf(sc);
7890 	if (err)
7891 		return err;
7892 
7893 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7894 		err = iwx_send_dqa_cmd(sc);
7895 		if (err)
7896 			return err;
7897 	}
7898 
7899 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
7900 		/*
7901 		 * The channel used here isn't relevant as it's
7902 		 * going to be overwritten in the other flows.
7903 		 * For now use the first channel we have.
7904 		 */
7905 		sc->sc_phyctxt[i].id = i;
7906 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
7907 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
7908 		    IWX_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN);
7909 		if (err) {
7910 			printf("%s: could not add phy context %d (error %d)\n",
7911 			    DEVNAME(sc), i, err);
7912 			goto err;
7913 		}
7914 	}
7915 
7916 	err = iwx_config_ltr(sc);
7917 	if (err) {
7918 		printf("%s: PCIe LTR configuration failed (error %d)\n",
7919 		    DEVNAME(sc), err);
7920 	}
7921 
7922 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
7923 		err = iwx_send_temp_report_ths_cmd(sc);
7924 		if (err)
7925 			goto err;
7926 	}
7927 
7928 	err = iwx_power_update_device(sc);
7929 	if (err) {
7930 		printf("%s: could not send power command (error %d)\n",
7931 		    DEVNAME(sc), err);
7932 		goto err;
7933 	}
7934 
7935 	if (sc->sc_nvm.lar_enabled) {
7936 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
7937 		if (err) {
7938 			printf("%s: could not init LAR (error %d)\n",
7939 			    DEVNAME(sc), err);
7940 			goto err;
7941 		}
7942 	}
7943 
7944 	err = iwx_config_umac_scan_reduced(sc);
7945 	if (err) {
7946 		printf("%s: could not configure scan (error %d)\n",
7947 		    DEVNAME(sc), err);
7948 		goto err;
7949 	}
7950 
7951 	err = iwx_disable_beacon_filter(sc);
7952 	if (err) {
7953 		printf("%s: could not disable beacon filter (error %d)\n",
7954 		    DEVNAME(sc), err);
7955 		goto err;
7956 	}
7957 
7958 err:
7959 	iwx_nic_unlock(sc);
7960 	return err;
7961 }
7962 
7963 /* Allow multicast from our BSSID. */
7964 int
7965 iwx_allow_mcast(struct iwx_softc *sc)
7966 {
7967 	struct ieee80211com *ic = &sc->sc_ic;
7968 	struct iwx_node *in = (void *)ic->ic_bss;
7969 	struct iwx_mcast_filter_cmd *cmd;
7970 	size_t size;
7971 	int err;
7972 
7973 	size = roundup(sizeof(*cmd), 4);
7974 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
7975 	if (cmd == NULL)
7976 		return ENOMEM;
7977 	cmd->filter_own = 1;
7978 	cmd->port_id = 0;
7979 	cmd->count = 0;
7980 	cmd->pass_all = 1;
7981 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
7982 
7983 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
7984 	    0, size, cmd);
7985 	free(cmd, M_DEVBUF, size);
7986 	return err;
7987 }
7988 
7989 int
7990 iwx_init(struct ifnet *ifp)
7991 {
7992 	struct iwx_softc *sc = ifp->if_softc;
7993 	struct ieee80211com *ic = &sc->sc_ic;
7994 	int err, generation;
7995 
7996 	rw_assert_wrlock(&sc->ioctl_rwl);
7997 
7998 	generation = ++sc->sc_generation;
7999 
8000 	err = iwx_preinit(sc);
8001 	if (err)
8002 		return err;
8003 
8004 	err = iwx_start_hw(sc);
8005 	if (err) {
8006 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8007 		return err;
8008 	}
8009 
8010 	err = iwx_init_hw(sc);
8011 	if (err) {
8012 		if (generation == sc->sc_generation)
8013 			iwx_stop_device(sc);
8014 		return err;
8015 	}
8016 
8017 	if (sc->sc_nvm.sku_cap_11n_enable)
8018 		iwx_setup_ht_rates(sc);
8019 
8020 	KASSERT(sc->task_refs.refs == 0);
8021 	refcnt_init(&sc->task_refs);
8022 	ifq_clr_oactive(&ifp->if_snd);
8023 	ifp->if_flags |= IFF_RUNNING;
8024 
8025 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8026 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
8027 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
8028 		return 0;
8029 	}
8030 
8031 	ieee80211_begin_scan(ifp);
8032 
8033 	/*
8034 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
8035 	 * Wait until the transition to SCAN state has completed.
8036 	 */
8037 	do {
8038 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
8039 		    SEC_TO_NSEC(1));
8040 		if (generation != sc->sc_generation)
8041 			return ENXIO;
8042 		if (err) {
8043 			iwx_stop(ifp);
8044 			return err;
8045 		}
8046 	} while (ic->ic_state != IEEE80211_S_SCAN);
8047 
8048 	return 0;
8049 }
8050 
8051 void
8052 iwx_start(struct ifnet *ifp)
8053 {
8054 	struct iwx_softc *sc = ifp->if_softc;
8055 	struct ieee80211com *ic = &sc->sc_ic;
8056 	struct ieee80211_node *ni;
8057 	struct ether_header *eh;
8058 	struct mbuf *m;
8059 
8060 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
8061 		return;
8062 
8063 	for (;;) {
8064 		/* why isn't this done per-queue? */
8065 		if (sc->qfullmsk != 0) {
8066 			ifq_set_oactive(&ifp->if_snd);
8067 			break;
8068 		}
8069 
8070 		/* Don't queue additional frames while flushing Tx queues. */
8071 		if (sc->sc_flags & IWX_FLAG_TXFLUSH)
8072 			break;
8073 
8074 		/* need to send management frames even if we're not RUNning */
8075 		m = mq_dequeue(&ic->ic_mgtq);
8076 		if (m) {
8077 			ni = m->m_pkthdr.ph_cookie;
8078 			goto sendit;
8079 		}
8080 
8081 		if (ic->ic_state != IEEE80211_S_RUN ||
8082 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
8083 			break;
8084 
8085 		m = ifq_dequeue(&ifp->if_snd);
8086 		if (!m)
8087 			break;
8088 		if (m->m_len < sizeof (*eh) &&
8089 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
8090 			ifp->if_oerrors++;
8091 			continue;
8092 		}
8093 #if NBPFILTER > 0
8094 		if (ifp->if_bpf != NULL)
8095 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
8096 #endif
8097 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
8098 			ifp->if_oerrors++;
8099 			continue;
8100 		}
8101 
8102  sendit:
8103 #if NBPFILTER > 0
8104 		if (ic->ic_rawbpf != NULL)
8105 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
8106 #endif
8107 		if (iwx_tx(sc, m, ni) != 0) {
8108 			ieee80211_release_node(ic, ni);
8109 			ifp->if_oerrors++;
8110 			continue;
8111 		}
8112 
8113 		if (ifp->if_flags & IFF_UP)
8114 			ifp->if_timer = 1;
8115 	}
8116 
8117 	return;
8118 }
8119 
8120 void
8121 iwx_stop(struct ifnet *ifp)
8122 {
8123 	struct iwx_softc *sc = ifp->if_softc;
8124 	struct ieee80211com *ic = &sc->sc_ic;
8125 	struct iwx_node *in = (void *)ic->ic_bss;
8126 	int i, s = splnet();
8127 
8128 	rw_assert_wrlock(&sc->ioctl_rwl);
8129 
8130 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
8131 
8132 	/* Cancel scheduled tasks and let any stale tasks finish up. */
8133 	task_del(systq, &sc->init_task);
8134 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
8135 	iwx_del_task(sc, systq, &sc->ba_task);
8136 	iwx_del_task(sc, systq, &sc->setkey_task);
8137 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
8138 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8139 	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8140 	iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8141 	iwx_del_task(sc, systq, &sc->bgscan_done_task);
8142 	KASSERT(sc->task_refs.refs >= 1);
8143 	refcnt_finalize(&sc->task_refs, "iwxstop");
8144 
8145 	iwx_stop_device(sc);
8146 
8147 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8148 	sc->bgscan_unref_arg = NULL;
8149 	sc->bgscan_unref_arg_size = 0;
8150 
8151 	/* Reset soft state. */
8152 
8153 	sc->sc_generation++;
8154 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
8155 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
8156 		sc->sc_cmd_resp_pkt[i] = NULL;
8157 		sc->sc_cmd_resp_len[i] = 0;
8158 	}
8159 	ifp->if_flags &= ~IFF_RUNNING;
8160 	ifq_clr_oactive(&ifp->if_snd);
8161 
8162 	in->in_phyctxt = NULL;
8163 	in->in_flags = 0;
8164 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
8165 
8166 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8167 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8168 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8169 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8170 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8171 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8172 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8173 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8174 
8175 	sc->sc_rx_ba_sessions = 0;
8176 	sc->ba_rx.start_tidmask = 0;
8177 	sc->ba_rx.stop_tidmask = 0;
8178 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
8179 	sc->ba_tx.start_tidmask = 0;
8180 	sc->ba_tx.stop_tidmask = 0;
8181 
8182 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
8183 	sc->ns_nstate = IEEE80211_S_INIT;
8184 
8185 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8186 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8187 		iwx_clear_reorder_buffer(sc, rxba);
8188 	}
8189 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
8190 	ifp->if_timer = 0;
8191 
8192 	splx(s);
8193 }
8194 
8195 void
8196 iwx_watchdog(struct ifnet *ifp)
8197 {
8198 	struct iwx_softc *sc = ifp->if_softc;
8199 	int i;
8200 
8201 	ifp->if_timer = 0;
8202 
8203 	/*
8204 	 * We maintain a separate timer for each Tx queue because
8205 	 * Tx aggregation queues can get "stuck" while other queues
8206 	 * keep working. The Linux driver uses a similar workaround.
8207 	 */
8208 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8209 		if (sc->sc_tx_timer[i] > 0) {
8210 			if (--sc->sc_tx_timer[i] == 0) {
8211 				printf("%s: device timeout\n", DEVNAME(sc));
8212 				if (ifp->if_flags & IFF_DEBUG) {
8213 					iwx_nic_error(sc);
8214 					iwx_dump_driver_status(sc);
8215 				}
8216 				if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
8217 					task_add(systq, &sc->init_task);
8218 				ifp->if_oerrors++;
8219 				return;
8220 			}
8221 			ifp->if_timer = 1;
8222 		}
8223 	}
8224 
8225 	ieee80211_watchdog(ifp);
8226 }
8227 
8228 int
8229 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
8230 {
8231 	struct iwx_softc *sc = ifp->if_softc;
8232 	int s, err = 0, generation = sc->sc_generation;
8233 
8234 	/*
8235 	 * Prevent processes from entering this function while another
8236 	 * process is tsleep'ing in it.
8237 	 */
8238 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
8239 	if (err == 0 && generation != sc->sc_generation) {
8240 		rw_exit(&sc->ioctl_rwl);
8241 		return ENXIO;
8242 	}
8243 	if (err)
8244 		return err;
8245 	s = splnet();
8246 
8247 	switch (cmd) {
8248 	case SIOCSIFADDR:
8249 		ifp->if_flags |= IFF_UP;
8250 		/* FALLTHROUGH */
8251 	case SIOCSIFFLAGS:
8252 		if (ifp->if_flags & IFF_UP) {
8253 			if (!(ifp->if_flags & IFF_RUNNING)) {
8254 				/* Force reload of firmware image from disk. */
8255 				sc->sc_fw.fw_status = IWX_FW_STATUS_NONE;
8256 				err = iwx_init(ifp);
8257 			}
8258 		} else {
8259 			if (ifp->if_flags & IFF_RUNNING)
8260 				iwx_stop(ifp);
8261 		}
8262 		break;
8263 
8264 	default:
8265 		err = ieee80211_ioctl(ifp, cmd, data);
8266 	}
8267 
8268 	if (err == ENETRESET) {
8269 		err = 0;
8270 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8271 		    (IFF_UP | IFF_RUNNING)) {
8272 			iwx_stop(ifp);
8273 			err = iwx_init(ifp);
8274 		}
8275 	}
8276 
8277 	splx(s);
8278 	rw_exit(&sc->ioctl_rwl);
8279 
8280 	return err;
8281 }
8282 
8283 /*
8284  * Note: This structure is read from the device with IO accesses,
8285  * and the reading already does the endian conversion. As it is
8286  * read with uint32_t-sized accesses, any members with a different size
8287  * need to be ordered correctly though!
8288  */
8289 struct iwx_error_event_table {
8290 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8291 	uint32_t error_id;		/* type of error */
8292 	uint32_t trm_hw_status0;	/* TRM HW status */
8293 	uint32_t trm_hw_status1;	/* TRM HW status */
8294 	uint32_t blink2;		/* branch link */
8295 	uint32_t ilink1;		/* interrupt link */
8296 	uint32_t ilink2;		/* interrupt link */
8297 	uint32_t data1;		/* error-specific data */
8298 	uint32_t data2;		/* error-specific data */
8299 	uint32_t data3;		/* error-specific data */
8300 	uint32_t bcon_time;		/* beacon timer */
8301 	uint32_t tsf_low;		/* network timestamp function timer */
8302 	uint32_t tsf_hi;		/* network timestamp function timer */
8303 	uint32_t gp1;		/* GP1 timer register */
8304 	uint32_t gp2;		/* GP2 timer register */
8305 	uint32_t fw_rev_type;	/* firmware revision type */
8306 	uint32_t major;		/* uCode version major */
8307 	uint32_t minor;		/* uCode version minor */
8308 	uint32_t hw_ver;		/* HW Silicon version */
8309 	uint32_t brd_ver;		/* HW board version */
8310 	uint32_t log_pc;		/* log program counter */
8311 	uint32_t frame_ptr;		/* frame pointer */
8312 	uint32_t stack_ptr;		/* stack pointer */
8313 	uint32_t hcmd;		/* last host command header */
8314 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8315 				 * rxtx_flag */
8316 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8317 				 * host_flag */
8318 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8319 				 * enc_flag */
8320 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8321 				 * time_flag */
8322 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8323 				 * wico interrupt */
8324 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8325 	uint32_t wait_event;		/* wait event() caller address */
8326 	uint32_t l2p_control;	/* L2pControlField */
8327 	uint32_t l2p_duration;	/* L2pDurationField */
8328 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8329 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8330 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8331 				 * (LMPM_PMG_SEL) */
8332 	uint32_t u_timestamp;	/* indicate when the date and time of the
8333 				 * compilation */
8334 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8335 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8336 
8337 /*
8338  * UMAC error struct - relevant starting from family 8000 chip.
8339  * Note: This structure is read from the device with IO accesses,
8340  * and the reading already does the endian conversion. As it is
8341  * read with u32-sized accesses, any members with a different size
8342  * need to be ordered correctly though!
8343  */
8344 struct iwx_umac_error_event_table {
8345 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8346 	uint32_t error_id;	/* type of error */
8347 	uint32_t blink1;	/* branch link */
8348 	uint32_t blink2;	/* branch link */
8349 	uint32_t ilink1;	/* interrupt link */
8350 	uint32_t ilink2;	/* interrupt link */
8351 	uint32_t data1;		/* error-specific data */
8352 	uint32_t data2;		/* error-specific data */
8353 	uint32_t data3;		/* error-specific data */
8354 	uint32_t umac_major;
8355 	uint32_t umac_minor;
8356 	uint32_t frame_pointer;	/* core register 27*/
8357 	uint32_t stack_pointer;	/* core register 28 */
8358 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
8359 	uint32_t nic_isr_pref;	/* ISR status register */
8360 } __packed;
8361 
8362 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
8363 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
8364 
8365 void
8366 iwx_nic_umac_error(struct iwx_softc *sc)
8367 {
8368 	struct iwx_umac_error_event_table table;
8369 	uint32_t base;
8370 
8371 	base = sc->sc_uc.uc_umac_error_event_table;
8372 
8373 	if (base < 0x800000) {
8374 		printf("%s: Invalid error log pointer 0x%08x\n",
8375 		    DEVNAME(sc), base);
8376 		return;
8377 	}
8378 
8379 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8380 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8381 		return;
8382 	}
8383 
8384 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8385 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8386 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8387 			sc->sc_flags, table.valid);
8388 	}
8389 
8390 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8391 		iwx_desc_lookup(table.error_id));
8392 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8393 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8394 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8395 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8396 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8397 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8398 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8399 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8400 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8401 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8402 	    table.frame_pointer);
8403 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8404 	    table.stack_pointer);
8405 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8406 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8407 	    table.nic_isr_pref);
8408 }
8409 
8410 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8411 static struct {
8412 	const char *name;
8413 	uint8_t num;
8414 } advanced_lookup[] = {
8415 	{ "NMI_INTERRUPT_WDG", 0x34 },
8416 	{ "SYSASSERT", 0x35 },
8417 	{ "UCODE_VERSION_MISMATCH", 0x37 },
8418 	{ "BAD_COMMAND", 0x38 },
8419 	{ "BAD_COMMAND", 0x39 },
8420 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8421 	{ "FATAL_ERROR", 0x3D },
8422 	{ "NMI_TRM_HW_ERR", 0x46 },
8423 	{ "NMI_INTERRUPT_TRM", 0x4C },
8424 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8425 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8426 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8427 	{ "NMI_INTERRUPT_HOST", 0x66 },
8428 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8429 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8430 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8431 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
8432 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
8433 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8434 	{ "ADVANCED_SYSASSERT", 0 },
8435 };
8436 
8437 const char *
8438 iwx_desc_lookup(uint32_t num)
8439 {
8440 	int i;
8441 
8442 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8443 		if (advanced_lookup[i].num ==
8444 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8445 			return advanced_lookup[i].name;
8446 
8447 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8448 	return advanced_lookup[i].name;
8449 }
8450 
8451 /*
8452  * Support for dumping the error log seemed like a good idea ...
8453  * but it's mostly hex junk and the only sensible thing is the
8454  * hw/ucode revision (which we know anyway).  Since it's here,
8455  * I'll just leave it in, just in case e.g. the Intel guys want to
8456  * help us decipher some "ADVANCED_SYSASSERT" later.
8457  */
8458 void
8459 iwx_nic_error(struct iwx_softc *sc)
8460 {
8461 	struct iwx_error_event_table table;
8462 	uint32_t base;
8463 
8464 	printf("%s: dumping device error log\n", DEVNAME(sc));
8465 	base = sc->sc_uc.uc_lmac_error_event_table[0];
8466 	if (base < 0x800000) {
8467 		printf("%s: Invalid error log pointer 0x%08x\n",
8468 		    DEVNAME(sc), base);
8469 		return;
8470 	}
8471 
8472 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8473 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8474 		return;
8475 	}
8476 
8477 	if (!table.valid) {
8478 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8479 		return;
8480 	}
8481 
8482 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8483 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8484 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8485 		    sc->sc_flags, table.valid);
8486 	}
8487 
8488 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8489 	    iwx_desc_lookup(table.error_id));
8490 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8491 	    table.trm_hw_status0);
8492 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8493 	    table.trm_hw_status1);
8494 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8495 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8496 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8497 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8498 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8499 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8500 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8501 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8502 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8503 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8504 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8505 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8506 	    table.fw_rev_type);
8507 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8508 	    table.major);
8509 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8510 	    table.minor);
8511 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8512 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8513 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8514 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8515 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8516 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8517 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8518 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8519 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8520 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8521 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8522 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8523 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8524 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8525 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8526 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8527 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8528 
8529 	if (sc->sc_uc.uc_umac_error_event_table)
8530 		iwx_nic_umac_error(sc);
8531 }
8532 
8533 void
8534 iwx_dump_driver_status(struct iwx_softc *sc)
8535 {
8536 	int i;
8537 
8538 	printf("driver status:\n");
8539 	for (i = 0; i < nitems(sc->txq); i++) {
8540 		struct iwx_tx_ring *ring = &sc->txq[i];
8541 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
8542 		    "queued=%-3d\n",
8543 		    i, ring->qid, ring->cur, ring->queued);
8544 	}
8545 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
8546 	printf("  802.11 state %s\n",
8547 	    ieee80211_state_name[sc->sc_ic.ic_state]);
8548 }
8549 
8550 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
8551 do {									\
8552 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8553 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
8554 	_var_ = (void *)((_pkt_)+1);					\
8555 } while (/*CONSTCOND*/0)
8556 
8557 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
8558 do {									\
8559 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8560 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
8561 	_ptr_ = (void *)((_pkt_)+1);					\
8562 } while (/*CONSTCOND*/0)
8563 
8564 int
8565 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8566 {
8567 	int qid, idx, code;
8568 
8569 	qid = pkt->hdr.qid & ~0x80;
8570 	idx = pkt->hdr.idx;
8571 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8572 
8573 	return (!(qid == 0 && idx == 0 && code == 0) &&
8574 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8575 }
8576 
8577 void
8578 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
8579 {
8580 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8581 	struct iwx_rx_packet *pkt, *nextpkt;
8582 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8583 	struct mbuf *m0, *m;
8584 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8585 	int qid, idx, code, handled = 1;
8586 
8587 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
8588 	    BUS_DMASYNC_POSTREAD);
8589 
8590 	m0 = data->m;
8591 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8592 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8593 		qid = pkt->hdr.qid;
8594 		idx = pkt->hdr.idx;
8595 
8596 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8597 
8598 		if (!iwx_rx_pkt_valid(pkt))
8599 			break;
8600 
8601 		/*
8602 		 * XXX Intel inside (tm)
8603 		 * Any commands in the LONG_GROUP could actually be in the
8604 		 * LEGACY group. Firmware API versions >= 50 reject commands
8605 		 * in group 0, forcing us to use this hack.
8606 		 */
8607 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8608 			struct iwx_tx_ring *ring = &sc->txq[qid];
8609 			struct iwx_tx_data *txdata = &ring->data[idx];
8610 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8611 				code = iwx_cmd_opcode(code);
8612 		}
8613 
8614 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8615 		if (len < sizeof(pkt->hdr) ||
8616 		    len > (IWX_RBUF_SIZE - offset - minsz))
8617 			break;
8618 
8619 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8620 			/* Take mbuf m0 off the RX ring. */
8621 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8622 				ifp->if_ierrors++;
8623 				break;
8624 			}
8625 			KASSERT(data->m != m0);
8626 		}
8627 
8628 		switch (code) {
8629 		case IWX_REPLY_RX_PHY_CMD:
8630 			iwx_rx_rx_phy_cmd(sc, pkt, data);
8631 			break;
8632 
8633 		case IWX_REPLY_RX_MPDU_CMD: {
8634 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
8635 			nextoff = offset +
8636 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8637 			nextpkt = (struct iwx_rx_packet *)
8638 			    (m0->m_data + nextoff);
8639 			if (nextoff + minsz >= IWX_RBUF_SIZE ||
8640 			    !iwx_rx_pkt_valid(nextpkt)) {
8641 				/* No need to copy last frame in buffer. */
8642 				if (offset > 0)
8643 					m_adj(m0, offset);
8644 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
8645 				m0 = NULL; /* stack owns m0 now; abort loop */
8646 			} else {
8647 				/*
8648 				 * Create an mbuf which points to the current
8649 				 * packet. Always copy from offset zero to
8650 				 * preserve m_pkthdr.
8651 				 */
8652 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
8653 				if (m == NULL) {
8654 					ifp->if_ierrors++;
8655 					m_freem(m0);
8656 					m0 = NULL;
8657 					break;
8658 				}
8659 				m_adj(m, offset);
8660 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
8661 			}
8662  			break;
8663 		}
8664 
8665 		case IWX_BAR_FRAME_RELEASE:
8666 			iwx_rx_bar_frame_release(sc, pkt, data, ml);
8667 			break;
8668 
8669 		case IWX_TX_CMD:
8670 			iwx_rx_tx_cmd(sc, pkt, data);
8671 			break;
8672 
8673 		case IWX_BA_NOTIF:
8674 			iwx_rx_compressed_ba(sc, pkt, data);
8675 			break;
8676 
8677 		case IWX_MISSED_BEACONS_NOTIFICATION:
8678 			iwx_rx_bmiss(sc, pkt, data);
8679 			break;
8680 
8681 		case IWX_MFUART_LOAD_NOTIFICATION:
8682 			break;
8683 
8684 		case IWX_ALIVE: {
8685 			struct iwx_alive_resp_v4 *resp4;
8686 			struct iwx_alive_resp_v5 *resp5;
8687 
8688 			DPRINTF(("%s: firmware alive\n", __func__));
8689 			sc->sc_uc.uc_ok = 0;
8690 
8691 			/*
8692 			 * For v5 and above, we can check the version, for older
8693 			 * versions we need to check the size.
8694 			 */
8695 			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
8696 			    IWX_ALIVE) == 5) {
8697 				SYNC_RESP_STRUCT(resp5, pkt);
8698 				if (iwx_rx_packet_payload_len(pkt) !=
8699 				    sizeof(*resp5)) {
8700 					sc->sc_uc.uc_intr = 1;
8701 					wakeup(&sc->sc_uc);
8702 					break;
8703 				}
8704 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
8705 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
8706 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
8707 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
8708 				sc->sc_uc.uc_log_event_table = le32toh(
8709 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
8710 				sc->sc_uc.uc_umac_error_event_table = le32toh(
8711 				    resp5->umac_data.dbg_ptrs.error_info_addr);
8712 				if (resp5->status == IWX_ALIVE_STATUS_OK)
8713 					sc->sc_uc.uc_ok = 1;
8714 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
8715 				SYNC_RESP_STRUCT(resp4, pkt);
8716 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
8717 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
8718 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
8719 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
8720 				sc->sc_uc.uc_log_event_table = le32toh(
8721 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
8722 				sc->sc_uc.uc_umac_error_event_table = le32toh(
8723 				    resp4->umac_data.dbg_ptrs.error_info_addr);
8724 				if (resp4->status == IWX_ALIVE_STATUS_OK)
8725 					sc->sc_uc.uc_ok = 1;
8726 			}
8727 
8728 			sc->sc_uc.uc_intr = 1;
8729 			wakeup(&sc->sc_uc);
8730 			break;
8731 		}
8732 
8733 		case IWX_STATISTICS_NOTIFICATION: {
8734 			struct iwx_notif_statistics *stats;
8735 			SYNC_RESP_STRUCT(stats, pkt);
8736 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
8737 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
8738 			break;
8739 		}
8740 
8741 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
8742 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8743 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
8744 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8745 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
8746 			break;
8747 
8748 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
8749 		    IWX_CT_KILL_NOTIFICATION): {
8750 			struct iwx_ct_kill_notif *notif;
8751 			SYNC_RESP_STRUCT(notif, pkt);
8752 			printf("%s: device at critical temperature (%u degC), "
8753 			    "stopping device\n",
8754 			    DEVNAME(sc), le16toh(notif->temperature));
8755 			sc->sc_flags |= IWX_FLAG_HW_ERR;
8756 			task_add(systq, &sc->init_task);
8757 			break;
8758 		}
8759 
8760 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
8761 		    IWX_SESSION_PROTECTION_CMD):
8762 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
8763 		    IWX_NVM_GET_INFO):
8764 		case IWX_ADD_STA_KEY:
8765 		case IWX_PHY_CONFIGURATION_CMD:
8766 		case IWX_TX_ANT_CONFIGURATION_CMD:
8767 		case IWX_ADD_STA:
8768 		case IWX_MAC_CONTEXT_CMD:
8769 		case IWX_REPLY_SF_CFG_CMD:
8770 		case IWX_POWER_TABLE_CMD:
8771 		case IWX_LTR_CONFIG:
8772 		case IWX_PHY_CONTEXT_CMD:
8773 		case IWX_BINDING_CONTEXT_CMD:
8774 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
8775 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
8776 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
8777 		case IWX_REPLY_BEACON_FILTERING_CMD:
8778 		case IWX_MAC_PM_POWER_TABLE:
8779 		case IWX_TIME_QUOTA_CMD:
8780 		case IWX_REMOVE_STA:
8781 		case IWX_TXPATH_FLUSH:
8782 		case IWX_BT_CONFIG:
8783 		case IWX_MCC_UPDATE_CMD:
8784 		case IWX_TIME_EVENT_CMD:
8785 		case IWX_STATISTICS_CMD:
8786 		case IWX_SCD_QUEUE_CFG: {
8787 			size_t pkt_len;
8788 
8789 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
8790 				break;
8791 
8792 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
8793 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8794 
8795 			pkt_len = sizeof(pkt->len_n_flags) +
8796 			    iwx_rx_packet_len(pkt);
8797 
8798 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
8799 			    pkt_len < sizeof(*pkt) ||
8800 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
8801 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
8802 				    sc->sc_cmd_resp_len[idx]);
8803 				sc->sc_cmd_resp_pkt[idx] = NULL;
8804 				break;
8805 			}
8806 
8807 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
8808 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8809 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
8810 			break;
8811 		}
8812 
8813 		case IWX_INIT_COMPLETE_NOTIF:
8814 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
8815 			wakeup(&sc->sc_init_complete);
8816 			break;
8817 
8818 		case IWX_SCAN_COMPLETE_UMAC: {
8819 			struct iwx_umac_scan_complete *notif;
8820 			SYNC_RESP_STRUCT(notif, pkt);
8821 			iwx_endscan(sc);
8822 			break;
8823 		}
8824 
8825 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
8826 			struct iwx_umac_scan_iter_complete_notif *notif;
8827 			SYNC_RESP_STRUCT(notif, pkt);
8828 			iwx_endscan(sc);
8829 			break;
8830 		}
8831 
8832 		case IWX_MCC_CHUB_UPDATE_CMD: {
8833 			struct iwx_mcc_chub_notif *notif;
8834 			SYNC_RESP_STRUCT(notif, pkt);
8835 			iwx_mcc_update(sc, notif);
8836 			break;
8837 		}
8838 
8839 		case IWX_REPLY_ERROR: {
8840 			struct iwx_error_resp *resp;
8841 			SYNC_RESP_STRUCT(resp, pkt);
8842 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
8843 				DEVNAME(sc), le32toh(resp->error_type),
8844 				resp->cmd_id);
8845 			break;
8846 		}
8847 
8848 		case IWX_TIME_EVENT_NOTIFICATION: {
8849 			struct iwx_time_event_notif *notif;
8850 			uint32_t action;
8851 			SYNC_RESP_STRUCT(notif, pkt);
8852 
8853 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
8854 				break;
8855 			action = le32toh(notif->action);
8856 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
8857 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8858 			break;
8859 		}
8860 
8861 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
8862 		    IWX_SESSION_PROTECTION_NOTIF):
8863 			break;
8864 
8865 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
8866 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
8867 		    break;
8868 
8869 		/*
8870 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
8871 		 * messages. Just ignore them for now.
8872 		 */
8873 		case IWX_DEBUG_LOG_MSG:
8874 			break;
8875 
8876 		case IWX_MCAST_FILTER_CMD:
8877 			break;
8878 
8879 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
8880 			break;
8881 
8882 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
8883 			break;
8884 
8885 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
8886 			break;
8887 
8888 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
8889 		    IWX_NVM_ACCESS_COMPLETE):
8890 			break;
8891 
8892 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
8893 			break; /* happens in monitor mode; ignore for now */
8894 
8895 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
8896 			break;
8897 
8898 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
8899 		    IWX_TLC_MNG_UPDATE_NOTIF): {
8900 			struct iwx_tlc_update_notif *notif;
8901 			SYNC_RESP_STRUCT(notif, pkt);
8902 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
8903 				iwx_rs_update(sc, notif);
8904 			break;
8905 		}
8906 
8907 		default:
8908 			handled = 0;
8909 			printf("%s: unhandled firmware response 0x%x/0x%x "
8910 			    "rx ring %d[%d]\n",
8911 			    DEVNAME(sc), code, pkt->len_n_flags,
8912 			    (qid & ~0x80), idx);
8913 			break;
8914 		}
8915 
8916 		/*
8917 		 * uCode sets bit 0x80 when it originates the notification,
8918 		 * i.e. when the notification is not a direct response to a
8919 		 * command sent by the driver.
8920 		 * For example, uCode issues IWX_REPLY_RX when it sends a
8921 		 * received frame to the driver.
8922 		 */
8923 		if (handled && !(qid & (1 << 7))) {
8924 			iwx_cmd_done(sc, qid, idx, code);
8925 		}
8926 
8927 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
8928 	}
8929 
8930 	if (m0 && m0 != data->m)
8931 		m_freem(m0);
8932 }
8933 
8934 void
8935 iwx_notif_intr(struct iwx_softc *sc)
8936 {
8937 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
8938 	uint16_t hw;
8939 
8940 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
8941 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
8942 
8943 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
8944 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
8945 	while (sc->rxq.cur != hw) {
8946 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
8947 		iwx_rx_pkt(sc, data, &ml);
8948 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
8949 	}
8950 	if_input(&sc->sc_ic.ic_if, &ml);
8951 
8952 	/*
8953 	 * Tell the firmware what we have processed.
8954 	 * Seems like the hardware gets upset unless we align the write by 8??
8955 	 */
8956 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
8957 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
8958 }
8959 
8960 int
8961 iwx_intr(void *arg)
8962 {
8963 	struct iwx_softc *sc = arg;
8964 	struct ieee80211com *ic = &sc->sc_ic;
8965 	struct ifnet *ifp = IC2IFP(ic);
8966 	int handled = 0;
8967 	int r1, r2, rv = 0;
8968 
8969 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
8970 
8971 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
8972 		uint32_t *ict = sc->ict_dma.vaddr;
8973 		int tmp;
8974 
8975 		tmp = htole32(ict[sc->ict_cur]);
8976 		if (!tmp)
8977 			goto out_ena;
8978 
8979 		/*
8980 		 * ok, there was something.  keep plowing until we have all.
8981 		 */
8982 		r1 = r2 = 0;
8983 		while (tmp) {
8984 			r1 |= tmp;
8985 			ict[sc->ict_cur] = 0;
8986 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
8987 			tmp = htole32(ict[sc->ict_cur]);
8988 		}
8989 
8990 		/* this is where the fun begins.  don't ask */
8991 		if (r1 == 0xffffffff)
8992 			r1 = 0;
8993 
8994 		/* i am not expected to understand this */
8995 		if (r1 & 0xc0000)
8996 			r1 |= 0x8000;
8997 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
8998 	} else {
8999 		r1 = IWX_READ(sc, IWX_CSR_INT);
9000 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9001 			goto out;
9002 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9003 	}
9004 	if (r1 == 0 && r2 == 0) {
9005 		goto out_ena;
9006 	}
9007 
9008 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9009 
9010 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9011 		int i;
9012 
9013 		/* Firmware has now configured the RFH. */
9014 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9015 			iwx_update_rx_desc(sc, &sc->rxq, i);
9016 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9017 	}
9018 
9019 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
9020 
9021 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9022 		handled |= IWX_CSR_INT_BIT_RF_KILL;
9023 		iwx_check_rfkill(sc);
9024 		task_add(systq, &sc->init_task);
9025 		rv = 1;
9026 		goto out_ena;
9027 	}
9028 
9029 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9030 		if (ifp->if_flags & IFF_DEBUG) {
9031 			iwx_nic_error(sc);
9032 			iwx_dump_driver_status(sc);
9033 		}
9034 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9035 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9036 			task_add(systq, &sc->init_task);
9037 		rv = 1;
9038 		goto out;
9039 
9040 	}
9041 
9042 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9043 		handled |= IWX_CSR_INT_BIT_HW_ERR;
9044 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9045 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
9046 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9047 			task_add(systq, &sc->init_task);
9048 		}
9049 		rv = 1;
9050 		goto out;
9051 	}
9052 
9053 	/* firmware chunk loaded */
9054 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9055 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9056 		handled |= IWX_CSR_INT_BIT_FH_TX;
9057 
9058 		sc->sc_fw_chunk_done = 1;
9059 		wakeup(&sc->sc_fw);
9060 	}
9061 
9062 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9063 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
9064 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9065 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
9066 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9067 		}
9068 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9069 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
9070 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9071 		}
9072 
9073 		/* Disable periodic interrupt; we use it as just a one-shot. */
9074 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9075 
9076 		/*
9077 		 * Enable periodic interrupt in 8 msec only if we received
9078 		 * real RX interrupt (instead of just periodic int), to catch
9079 		 * any dangling Rx interrupt.  If it was just the periodic
9080 		 * interrupt, there was no dangling Rx activity, and no need
9081 		 * to extend the periodic interrupt; one-shot is enough.
9082 		 */
9083 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9084 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9085 			    IWX_CSR_INT_PERIODIC_ENA);
9086 
9087 		iwx_notif_intr(sc);
9088 	}
9089 
9090 	rv = 1;
9091 
9092  out_ena:
9093 	iwx_restore_interrupts(sc);
9094  out:
9095 	return rv;
9096 }
9097 
9098 int
9099 iwx_intr_msix(void *arg)
9100 {
9101 	struct iwx_softc *sc = arg;
9102 	struct ieee80211com *ic = &sc->sc_ic;
9103 	struct ifnet *ifp = IC2IFP(ic);
9104 	uint32_t inta_fh, inta_hw;
9105 	int vector = 0;
9106 
9107 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9108 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9109 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9110 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9111 	inta_fh &= sc->sc_fh_mask;
9112 	inta_hw &= sc->sc_hw_mask;
9113 
9114 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9115 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9116 		iwx_notif_intr(sc);
9117 	}
9118 
9119 	/* firmware chunk loaded */
9120 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9121 		sc->sc_fw_chunk_done = 1;
9122 		wakeup(&sc->sc_fw);
9123 	}
9124 
9125 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9126 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9127 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9128 		if (ifp->if_flags & IFF_DEBUG) {
9129 			iwx_nic_error(sc);
9130 			iwx_dump_driver_status(sc);
9131 		}
9132 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9133 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9134 			task_add(systq, &sc->init_task);
9135 		return 1;
9136 	}
9137 
9138 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9139 		iwx_check_rfkill(sc);
9140 		task_add(systq, &sc->init_task);
9141 	}
9142 
9143 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9144 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9145 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
9146 			sc->sc_flags |= IWX_FLAG_HW_ERR;
9147 			task_add(systq, &sc->init_task);
9148 		}
9149 		return 1;
9150 	}
9151 
9152 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9153 		int i;
9154 
9155 		/* Firmware has now configured the RFH. */
9156 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9157 			iwx_update_rx_desc(sc, &sc->rxq, i);
9158 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9159 	}
9160 
9161 	/*
9162 	 * Before sending the interrupt the HW disables it to prevent
9163 	 * a nested interrupt. This is done by writing 1 to the corresponding
9164 	 * bit in the mask register. After handling the interrupt, it should be
9165 	 * re-enabled by clearing this bit. This register is defined as
9166 	 * write 1 clear (W1C) register, meaning that it's being clear
9167 	 * by writing 1 to the bit.
9168 	 */
9169 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9170 	return 1;
9171 }
9172 
9173 typedef void *iwx_match_t;
9174 
9175 static const struct pci_matchid iwx_devices[] = {
9176 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
9177 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
9178 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
9179 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
9180 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
9181 };
9182 
9183 static const struct pci_matchid iwx_subsystem_id_ax201[] = {
9184 	{ PCI_VENDOR_INTEL,	0x0070 },
9185 	{ PCI_VENDOR_INTEL,	0x0074 },
9186 	{ PCI_VENDOR_INTEL,	0x0078 },
9187 	{ PCI_VENDOR_INTEL,	0x007c },
9188 	{ PCI_VENDOR_INTEL,	0x0310 },
9189 	{ PCI_VENDOR_INTEL,	0x2074 },
9190 	{ PCI_VENDOR_INTEL,	0x4070 },
9191 	/* TODO: There are more ax201 devices with "main" product ID 0x06f0 */
9192 };
9193 
9194 int
9195 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
9196 {
9197 	struct pci_attach_args *pa = aux;
9198 	pcireg_t subid;
9199 	pci_vendor_id_t svid;
9200 	pci_product_id_t spid;
9201 	int i;
9202 
9203 	if (!pci_matchbyid(pa, iwx_devices, nitems(iwx_devices)))
9204 		return 0;
9205 
9206 	/*
9207 	 * Some PCI product IDs are shared among devices which use distinct
9208 	 * chips or firmware. We need to match the subsystem ID as well to
9209 	 * ensure that we have in fact found a supported device.
9210 	 */
9211 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
9212 	svid = PCI_VENDOR(subid);
9213 	spid = PCI_PRODUCT(subid);
9214 
9215 	switch (PCI_PRODUCT(pa->pa_id)) {
9216 	case PCI_PRODUCT_INTEL_WL_22500_1: /* AX200 */
9217 		return 1; /* match any device */
9218 	case PCI_PRODUCT_INTEL_WL_22500_2: /* AX201 */
9219 	case PCI_PRODUCT_INTEL_WL_22500_3: /* AX201 */
9220 	case PCI_PRODUCT_INTEL_WL_22500_4: /* AX201 */
9221 	case PCI_PRODUCT_INTEL_WL_22500_5: /* AX201 */
9222 		for (i = 0; i < nitems(iwx_subsystem_id_ax201); i++) {
9223 			if (svid == iwx_subsystem_id_ax201[i].pm_vid &&
9224 			    spid == iwx_subsystem_id_ax201[i].pm_pid)
9225 				return 1;
9226 
9227 		}
9228 		break;
9229 	default:
9230 		break;
9231 	}
9232 
9233 	return 0;
9234 }
9235 
9236 int
9237 iwx_preinit(struct iwx_softc *sc)
9238 {
9239 	struct ieee80211com *ic = &sc->sc_ic;
9240 	struct ifnet *ifp = IC2IFP(ic);
9241 	int err;
9242 	static int attached;
9243 
9244 	err = iwx_prepare_card_hw(sc);
9245 	if (err) {
9246 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9247 		return err;
9248 	}
9249 
9250 	if (attached) {
9251 		/* Update MAC in case the upper layers changed it. */
9252 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
9253 		    ((struct arpcom *)ifp)->ac_enaddr);
9254 		return 0;
9255 	}
9256 
9257 	err = iwx_start_hw(sc);
9258 	if (err) {
9259 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9260 		return err;
9261 	}
9262 
9263 	err = iwx_run_init_mvm_ucode(sc, 1);
9264 	iwx_stop_device(sc);
9265 	if (err)
9266 		return err;
9267 
9268 	/* Print version info and MAC address on first successful fw load. */
9269 	attached = 1;
9270 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
9271 	    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
9272 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9273 
9274 	if (sc->sc_nvm.sku_cap_11n_enable)
9275 		iwx_setup_ht_rates(sc);
9276 
9277 	/* not all hardware can do 5GHz band */
9278 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9279 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9280 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9281 
9282 	/* Configure channel information obtained from firmware. */
9283 	ieee80211_channel_init(ifp);
9284 
9285 	/* Configure MAC address. */
9286 	err = if_setlladdr(ifp, ic->ic_myaddr);
9287 	if (err)
9288 		printf("%s: could not set MAC address (error %d)\n",
9289 		    DEVNAME(sc), err);
9290 
9291 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
9292 
9293 	return 0;
9294 }
9295 
9296 void
9297 iwx_attach_hook(struct device *self)
9298 {
9299 	struct iwx_softc *sc = (void *)self;
9300 
9301 	KASSERT(!cold);
9302 
9303 	iwx_preinit(sc);
9304 }
9305 
9306 void
9307 iwx_attach(struct device *parent, struct device *self, void *aux)
9308 {
9309 	struct iwx_softc *sc = (void *)self;
9310 	struct pci_attach_args *pa = aux;
9311 	pci_intr_handle_t ih;
9312 	pcireg_t reg, memtype;
9313 	struct ieee80211com *ic = &sc->sc_ic;
9314 	struct ifnet *ifp = &ic->ic_if;
9315 	const char *intrstr;
9316 	int err;
9317 	int txq_i, i, j;
9318 
9319 	sc->sc_pct = pa->pa_pc;
9320 	sc->sc_pcitag = pa->pa_tag;
9321 	sc->sc_dmat = pa->pa_dmat;
9322 
9323 	rw_init(&sc->ioctl_rwl, "iwxioctl");
9324 
9325 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
9326 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
9327 	if (err == 0) {
9328 		printf("%s: PCIe capability structure not found!\n",
9329 		    DEVNAME(sc));
9330 		return;
9331 	}
9332 
9333 	/*
9334 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
9335 	 * PCI Tx retries from interfering with C3 CPU state.
9336 	 */
9337 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9338 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9339 
9340 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
9341 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
9342 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
9343 	if (err) {
9344 		printf("%s: can't map mem space\n", DEVNAME(sc));
9345 		return;
9346 	}
9347 
9348 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
9349 		sc->sc_msix = 1;
9350 	} else if (pci_intr_map_msi(pa, &ih)) {
9351 		if (pci_intr_map(pa, &ih)) {
9352 			printf("%s: can't map interrupt\n", DEVNAME(sc));
9353 			return;
9354 		}
9355 		/* Hardware bug workaround. */
9356 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
9357 		    PCI_COMMAND_STATUS_REG);
9358 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
9359 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9360 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
9361 		    PCI_COMMAND_STATUS_REG, reg);
9362 	}
9363 
9364 	intrstr = pci_intr_string(sc->sc_pct, ih);
9365 	if (sc->sc_msix)
9366 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9367 		    iwx_intr_msix, sc, DEVNAME(sc));
9368 	else
9369 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9370 		    iwx_intr, sc, DEVNAME(sc));
9371 
9372 	if (sc->sc_ih == NULL) {
9373 		printf("\n");
9374 		printf("%s: can't establish interrupt", DEVNAME(sc));
9375 		if (intrstr != NULL)
9376 			printf(" at %s", intrstr);
9377 		printf("\n");
9378 		return;
9379 	}
9380 	printf(", %s\n", intrstr);
9381 
9382 	/* Clear pending interrupts. */
9383 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9384 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
9385 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
9386 
9387 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
9388 
9389 	/*
9390 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
9391 	 * changed, and now the revision step also includes bit 0-1 (no more
9392 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
9393 	 * in the old format.
9394 	 */
9395 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
9396 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
9397 
9398 	switch (PCI_PRODUCT(pa->pa_id)) {
9399 	case PCI_PRODUCT_INTEL_WL_22500_1:
9400 		sc->sc_fwname = "iwx-cc-a0-67";
9401 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9402 		sc->sc_integrated = 0;
9403 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
9404 		sc->sc_low_latency_xtal = 0;
9405 		sc->sc_xtal_latency = 0;
9406 		sc->sc_tx_with_siso_diversity = 0;
9407 		sc->sc_uhb_supported = 0;
9408 		break;
9409 	case PCI_PRODUCT_INTEL_WL_22500_2:
9410 	case PCI_PRODUCT_INTEL_WL_22500_3:
9411 	case PCI_PRODUCT_INTEL_WL_22500_5:
9412 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
9413 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
9414 			return;
9415 		}
9416 
9417 		sc->sc_fwname = "iwx-QuZ-a0-hr-b0-67";
9418 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9419 		sc->sc_integrated = 1;
9420 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
9421 		sc->sc_low_latency_xtal = 0;
9422 		sc->sc_xtal_latency = 500;
9423 		sc->sc_tx_with_siso_diversity = 0;
9424 		sc->sc_uhb_supported = 0;
9425 		break;
9426 	case PCI_PRODUCT_INTEL_WL_22500_4:
9427 		sc->sc_fwname = "iwx-Qu-c0-hr-b0-63";
9428 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
9429 		sc->sc_integrated = 1;
9430 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
9431 		sc->sc_low_latency_xtal = 0;
9432 		sc->sc_xtal_latency = 1820;
9433 		sc->sc_tx_with_siso_diversity = 0;
9434 		sc->sc_uhb_supported = 0;
9435 		break;
9436 	default:
9437 		printf("%s: unknown adapter type\n", DEVNAME(sc));
9438 		return;
9439 	}
9440 
9441 	/* Allocate DMA memory for loading firmware. */
9442 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
9443 	    sizeof(struct iwx_context_info), 0);
9444 	if (err) {
9445 		printf("%s: could not allocate memory for loading firmware\n",
9446 		    DEVNAME(sc));
9447 		return;
9448 	}
9449 
9450 	/* Allocate interrupt cause table (ICT).*/
9451 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
9452 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
9453 	if (err) {
9454 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
9455 		goto fail1;
9456 	}
9457 
9458 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
9459 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
9460 		if (err) {
9461 			printf("%s: could not allocate TX ring %d\n",
9462 			    DEVNAME(sc), txq_i);
9463 			goto fail4;
9464 		}
9465 	}
9466 
9467 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
9468 	if (err) {
9469 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
9470 		goto fail4;
9471 	}
9472 
9473 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
9474 	if (sc->sc_nswq == NULL)
9475 		goto fail4;
9476 
9477 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
9478 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
9479 	ic->ic_state = IEEE80211_S_INIT;
9480 
9481 	/* Set device capabilities. */
9482 	ic->ic_caps =
9483 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
9484 	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
9485 	    IEEE80211_C_WEP |		/* WEP */
9486 	    IEEE80211_C_RSN |		/* WPA/RSN */
9487 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
9488 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
9489 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
9490 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
9491 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
9492 
9493 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
9494 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
9495 	ic->ic_htcaps |=
9496 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
9497 	ic->ic_htxcaps = 0;
9498 	ic->ic_txbfcaps = 0;
9499 	ic->ic_aselcaps = 0;
9500 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
9501 
9502 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
9503 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
9504 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
9505 
9506 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
9507 		sc->sc_phyctxt[i].id = i;
9508 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
9509 	}
9510 
9511 	/* IBSS channel undefined for now. */
9512 	ic->ic_ibss_chan = &ic->ic_channels[1];
9513 
9514 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
9515 
9516 	ifp->if_softc = sc;
9517 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9518 	ifp->if_ioctl = iwx_ioctl;
9519 	ifp->if_start = iwx_start;
9520 	ifp->if_watchdog = iwx_watchdog;
9521 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
9522 
9523 	if_attach(ifp);
9524 	ieee80211_ifattach(ifp);
9525 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
9526 
9527 #if NBPFILTER > 0
9528 	iwx_radiotap_attach(sc);
9529 #endif
9530 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9531 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
9532 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
9533 		rxba->sc = sc;
9534 		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
9535 		    rxba);
9536 		timeout_set(&rxba->reorder_buf.reorder_timer,
9537 		    iwx_reorder_timer_expired, &rxba->reorder_buf);
9538 		for (j = 0; j < nitems(rxba->entries); j++)
9539 			ml_init(&rxba->entries[j].frames);
9540 	}
9541 	task_set(&sc->init_task, iwx_init_task, sc);
9542 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
9543 	task_set(&sc->ba_task, iwx_ba_task, sc);
9544 	task_set(&sc->setkey_task, iwx_setkey_task, sc);
9545 	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
9546 	task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
9547 	task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
9548 
9549 	ic->ic_node_alloc = iwx_node_alloc;
9550 	ic->ic_bgscan_start = iwx_bgscan;
9551 	ic->ic_bgscan_done = iwx_bgscan_done;
9552 	ic->ic_set_key = iwx_set_key;
9553 	ic->ic_delete_key = iwx_delete_key;
9554 
9555 	/* Override 802.11 state transition machine. */
9556 	sc->sc_newstate = ic->ic_newstate;
9557 	ic->ic_newstate = iwx_newstate;
9558 	ic->ic_updateprot = iwx_updateprot;
9559 	ic->ic_updateslot = iwx_updateslot;
9560 	ic->ic_updateedca = iwx_updateedca;
9561 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
9562 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
9563 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
9564 	ic->ic_ampdu_tx_stop = NULL;
9565 	/*
9566 	 * We cannot read the MAC address without loading the
9567 	 * firmware from disk. Postpone until mountroot is done.
9568 	 */
9569 	config_mountroot(self, iwx_attach_hook);
9570 
9571 	return;
9572 
9573 fail4:	while (--txq_i >= 0)
9574 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
9575 	iwx_free_rx_ring(sc, &sc->rxq);
9576 	if (sc->ict_dma.vaddr != NULL)
9577 		iwx_dma_contig_free(&sc->ict_dma);
9578 
9579 fail1:	iwx_dma_contig_free(&sc->ctxt_info_dma);
9580 	return;
9581 }
9582 
9583 #if NBPFILTER > 0
9584 void
9585 iwx_radiotap_attach(struct iwx_softc *sc)
9586 {
9587 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
9588 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
9589 
9590 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
9591 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
9592 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
9593 
9594 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
9595 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
9596 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
9597 }
9598 #endif
9599 
9600 void
9601 iwx_init_task(void *arg1)
9602 {
9603 	struct iwx_softc *sc = arg1;
9604 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9605 	int s = splnet();
9606 	int generation = sc->sc_generation;
9607 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
9608 
9609 	rw_enter_write(&sc->ioctl_rwl);
9610 	if (generation != sc->sc_generation) {
9611 		rw_exit(&sc->ioctl_rwl);
9612 		splx(s);
9613 		return;
9614 	}
9615 
9616 	if (ifp->if_flags & IFF_RUNNING)
9617 		iwx_stop(ifp);
9618 	else
9619 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
9620 
9621 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
9622 		iwx_init(ifp);
9623 
9624 	rw_exit(&sc->ioctl_rwl);
9625 	splx(s);
9626 }
9627 
9628 void
9629 iwx_resume(struct iwx_softc *sc)
9630 {
9631 	pcireg_t reg;
9632 
9633 	/*
9634 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
9635 	 * PCI Tx retries from interfering with C3 CPU state.
9636 	 */
9637 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9638 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9639 
9640 	if (!sc->sc_msix) {
9641 		/* Hardware bug workaround. */
9642 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
9643 		    PCI_COMMAND_STATUS_REG);
9644 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
9645 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9646 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
9647 		    PCI_COMMAND_STATUS_REG, reg);
9648 	}
9649 
9650 	iwx_disable_interrupts(sc);
9651 }
9652 
9653 int
9654 iwx_wakeup(struct iwx_softc *sc)
9655 {
9656 	struct ieee80211com *ic = &sc->sc_ic;
9657 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9658 	int err;
9659 
9660 	err = iwx_start_hw(sc);
9661 	if (err)
9662 		return err;
9663 
9664 	err = iwx_init_hw(sc);
9665 	if (err)
9666 		return err;
9667 
9668 	refcnt_init(&sc->task_refs);
9669 	ifq_clr_oactive(&ifp->if_snd);
9670 	ifp->if_flags |= IFF_RUNNING;
9671 
9672 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
9673 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
9674 	else
9675 		ieee80211_begin_scan(ifp);
9676 
9677 	return 0;
9678 }
9679 
9680 int
9681 iwx_activate(struct device *self, int act)
9682 {
9683 	struct iwx_softc *sc = (struct iwx_softc *)self;
9684 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9685 	int err = 0;
9686 
9687 	switch (act) {
9688 	case DVACT_QUIESCE:
9689 		if (ifp->if_flags & IFF_RUNNING) {
9690 			rw_enter_write(&sc->ioctl_rwl);
9691 			iwx_stop(ifp);
9692 			rw_exit(&sc->ioctl_rwl);
9693 		}
9694 		break;
9695 	case DVACT_RESUME:
9696 		iwx_resume(sc);
9697 		break;
9698 	case DVACT_WAKEUP:
9699 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
9700 			err = iwx_wakeup(sc);
9701 			if (err)
9702 				printf("%s: could not initialize hardware\n",
9703 				    DEVNAME(sc));
9704 		}
9705 		break;
9706 	}
9707 
9708 	return 0;
9709 }
9710 
9711 struct cfdriver iwx_cd = {
9712 	NULL, "iwx", DV_IFNET
9713 };
9714 
9715 struct cfattach iwx_ca = {
9716 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
9717 	NULL, iwx_activate
9718 };
9719