xref: /openbsd/sys/dev/pci/if_iwm.c (revision 097a140d)
1 /*	$OpenBSD: if_iwm.c,v 1.319 2021/04/25 15:32:21 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_ra.h>
146 #include <net80211/ieee80211_radiotap.h>
147 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
148 #undef DPRINTF /* defined in ieee80211_priv.h */
149 
150 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
151 
152 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
153 
154 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
155 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
156 
157 #ifdef IWM_DEBUG
158 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
159 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
160 int iwm_debug = 1;
161 #else
162 #define DPRINTF(x)	do { ; } while (0)
163 #define DPRINTFN(n, x)	do { ; } while (0)
164 #endif
165 
166 #include <dev/pci/if_iwmreg.h>
167 #include <dev/pci/if_iwmvar.h>
168 
169 const uint8_t iwm_nvm_channels[] = {
170 	/* 2.4 GHz */
171 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
172 	/* 5 GHz */
173 	36, 40, 44 , 48, 52, 56, 60, 64,
174 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
175 	149, 153, 157, 161, 165
176 };
177 
178 const uint8_t iwm_nvm_channels_8000[] = {
179 	/* 2.4 GHz */
180 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
181 	/* 5 GHz */
182 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
183 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
184 	149, 153, 157, 161, 165, 169, 173, 177, 181
185 };
186 
187 #define IWM_NUM_2GHZ_CHANNELS	14
188 
189 const struct iwm_rate {
190 	uint16_t rate;
191 	uint8_t plcp;
192 	uint8_t ht_plcp;
193 } iwm_rates[] = {
194 		/* Legacy */		/* HT */
195 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
196 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
198 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
199 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
200 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
201 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
202 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
203 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
204 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
205 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
206 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
207 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
208 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
209 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
210 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
211 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
212 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
213 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
214 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
215 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
216 };
217 #define IWM_RIDX_CCK	0
218 #define IWM_RIDX_OFDM	4
219 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
220 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
221 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
222 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
223 
224 /* Convert an MCS index into an iwm_rates[] index. */
225 const int iwm_mcs2ridx[] = {
226 	IWM_RATE_MCS_0_INDEX,
227 	IWM_RATE_MCS_1_INDEX,
228 	IWM_RATE_MCS_2_INDEX,
229 	IWM_RATE_MCS_3_INDEX,
230 	IWM_RATE_MCS_4_INDEX,
231 	IWM_RATE_MCS_5_INDEX,
232 	IWM_RATE_MCS_6_INDEX,
233 	IWM_RATE_MCS_7_INDEX,
234 	IWM_RATE_MCS_8_INDEX,
235 	IWM_RATE_MCS_9_INDEX,
236 	IWM_RATE_MCS_10_INDEX,
237 	IWM_RATE_MCS_11_INDEX,
238 	IWM_RATE_MCS_12_INDEX,
239 	IWM_RATE_MCS_13_INDEX,
240 	IWM_RATE_MCS_14_INDEX,
241 	IWM_RATE_MCS_15_INDEX,
242 };
243 
244 struct iwm_nvm_section {
245 	uint16_t length;
246 	uint8_t *data;
247 };
248 
249 int	iwm_is_mimo_ht_plcp(uint8_t);
250 int	iwm_is_mimo_mcs(int);
251 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
252 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
253 	    uint8_t *, size_t);
254 int	iwm_set_default_calib(struct iwm_softc *, const void *);
255 void	iwm_fw_info_free(struct iwm_fw_info *);
256 int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
257 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
258 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
259 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
260 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
261 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
262 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
263 int	iwm_nic_lock(struct iwm_softc *);
264 void	iwm_nic_assert_locked(struct iwm_softc *);
265 void	iwm_nic_unlock(struct iwm_softc *);
266 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
267 	    uint32_t);
268 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
269 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
270 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
271 	    bus_size_t);
272 void	iwm_dma_contig_free(struct iwm_dma_info *);
273 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 void	iwm_disable_rx_dma(struct iwm_softc *);
275 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
277 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
278 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 void	iwm_enable_rfkill_int(struct iwm_softc *);
281 int	iwm_check_rfkill(struct iwm_softc *);
282 void	iwm_enable_interrupts(struct iwm_softc *);
283 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
284 void	iwm_restore_interrupts(struct iwm_softc *);
285 void	iwm_disable_interrupts(struct iwm_softc *);
286 void	iwm_ict_reset(struct iwm_softc *);
287 int	iwm_set_hw_ready(struct iwm_softc *);
288 int	iwm_prepare_card_hw(struct iwm_softc *);
289 void	iwm_apm_config(struct iwm_softc *);
290 int	iwm_apm_init(struct iwm_softc *);
291 void	iwm_apm_stop(struct iwm_softc *);
292 int	iwm_allow_mcast(struct iwm_softc *);
293 void	iwm_init_msix_hw(struct iwm_softc *);
294 void	iwm_conf_msix_hw(struct iwm_softc *, int);
295 int	iwm_start_hw(struct iwm_softc *);
296 void	iwm_stop_device(struct iwm_softc *);
297 void	iwm_nic_config(struct iwm_softc *);
298 int	iwm_nic_rx_init(struct iwm_softc *);
299 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
300 int	iwm_nic_rx_mq_init(struct iwm_softc *);
301 int	iwm_nic_tx_init(struct iwm_softc *);
302 int	iwm_nic_init(struct iwm_softc *);
303 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
304 int	iwm_enable_txq(struct iwm_softc *, int, int, int);
305 int	iwm_post_alive(struct iwm_softc *);
306 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
307 	    uint16_t);
308 int	iwm_phy_db_set_section(struct iwm_softc *,
309 	    struct iwm_calib_res_notif_phy_db *);
310 int	iwm_is_valid_channel(uint16_t);
311 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
312 uint16_t iwm_channel_id_to_papd(uint16_t);
313 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
314 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
315 	    uint16_t *, uint16_t);
316 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
317 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
318 	    uint8_t);
319 int	iwm_send_phy_db_data(struct iwm_softc *);
320 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
321 	    uint32_t);
322 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
323 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
324 	    uint8_t *, uint16_t *);
325 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
326 	    uint16_t *, size_t);
327 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
328 	    const uint8_t *nvm_channels, int nchan);
329 int	iwm_mimo_enabled(struct iwm_softc *);
330 void	iwm_setup_ht_rates(struct iwm_softc *);
331 void	iwm_htprot_task(void *);
332 void	iwm_update_htprot(struct ieee80211com *, struct ieee80211_node *);
333 void	iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
334 	    uint16_t);
335 void	iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
336 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
337 	    uint8_t);
338 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
339 	    uint8_t);
340 void	iwm_rx_ba_session_expired(void *);
341 void	iwm_reorder_timer_expired(void *);
342 void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
343 	    uint16_t, uint16_t, int, int);
344 #ifdef notyet
345 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
346 	    uint8_t);
347 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
348 	    uint8_t);
349 #endif
350 void	iwm_ba_task(void *);
351 
352 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
353 	    const uint16_t *, const uint16_t *,
354 	    const uint16_t *, const uint16_t *,
355 	    const uint16_t *, int);
356 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
357 	    const uint16_t *, const uint16_t *);
358 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
359 int	iwm_nvm_init(struct iwm_softc *);
360 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
361 	    uint32_t);
362 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
363 	    uint32_t);
364 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
365 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
366 	    int , int *);
367 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
368 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
369 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
370 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
371 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
372 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
373 int	iwm_send_dqa_cmd(struct iwm_softc *);
374 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
375 int	iwm_config_ltr(struct iwm_softc *);
376 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
377 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
378 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
379 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
380 	    struct iwm_rx_data *);
381 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
382 int	iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
383 	    struct ieee80211_rxinfo *);
384 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
385 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
386 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
387 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
388 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
389 	    struct iwm_node *, int, int);
390 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
391 	    struct iwm_rx_data *);
392 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
393 	    struct iwm_rx_data *);
394 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
395 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
396 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
397 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
398 	    struct ieee80211_channel *, uint8_t, uint8_t);
399 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
400 	    uint8_t, uint32_t, uint32_t);
401 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
402 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
403 	    const void *);
404 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
405 	    uint32_t *);
406 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
407 	    const void *, uint32_t *);
408 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
409 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
410 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
411 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
412 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
413 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
414 int	iwm_flush_tx_path(struct iwm_softc *, int);
415 void	iwm_led_enable(struct iwm_softc *);
416 void	iwm_led_disable(struct iwm_softc *);
417 int	iwm_led_is_enabled(struct iwm_softc *);
418 void	iwm_led_blink_timeout(void *);
419 void	iwm_led_blink_start(struct iwm_softc *);
420 void	iwm_led_blink_stop(struct iwm_softc *);
421 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
422 	    struct iwm_beacon_filter_cmd *);
423 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
424 	    struct iwm_beacon_filter_cmd *);
425 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
426 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
427 	    struct iwm_mac_power_cmd *);
428 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
429 int	iwm_power_update_device(struct iwm_softc *);
430 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
431 int	iwm_disable_beacon_filter(struct iwm_softc *);
432 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
433 int	iwm_add_aux_sta(struct iwm_softc *);
434 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
435 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
436 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
437 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
438 	    struct iwm_scan_channel_cfg_lmac *, int, int);
439 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
440 int	iwm_lmac_scan(struct iwm_softc *, int);
441 int	iwm_config_umac_scan(struct iwm_softc *);
442 int	iwm_umac_scan(struct iwm_softc *, int);
443 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
444 int	iwm_rval2ridx(int);
445 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
446 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
447 	    struct iwm_mac_ctx_cmd *, uint32_t);
448 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
449 	    struct iwm_mac_data_sta *, int);
450 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
451 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
452 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
453 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
454 int	iwm_scan(struct iwm_softc *);
455 int	iwm_bgscan(struct ieee80211com *);
456 int	iwm_umac_scan_abort(struct iwm_softc *);
457 int	iwm_lmac_scan_abort(struct iwm_softc *);
458 int	iwm_scan_abort(struct iwm_softc *);
459 int	iwm_auth(struct iwm_softc *);
460 int	iwm_deauth(struct iwm_softc *);
461 int	iwm_assoc(struct iwm_softc *);
462 int	iwm_disassoc(struct iwm_softc *);
463 int	iwm_run(struct iwm_softc *);
464 int	iwm_run_stop(struct iwm_softc *);
465 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
466 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
467 	    struct ieee80211_key *);
468 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
469 	    struct ieee80211_key *);
470 void	iwm_delete_key_v1(struct ieee80211com *,
471 	    struct ieee80211_node *, struct ieee80211_key *);
472 void	iwm_delete_key(struct ieee80211com *,
473 	    struct ieee80211_node *, struct ieee80211_key *);
474 void	iwm_calib_timeout(void *);
475 void	iwm_setrates(struct iwm_node *, int);
476 int	iwm_media_change(struct ifnet *);
477 void	iwm_newstate_task(void *);
478 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
479 void	iwm_endscan(struct iwm_softc *);
480 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
481 	    struct ieee80211_node *);
482 int	iwm_sf_config(struct iwm_softc *, int);
483 int	iwm_send_bt_init_conf(struct iwm_softc *);
484 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
485 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
486 void	iwm_free_fw_paging(struct iwm_softc *);
487 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
488 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
489 int	iwm_init_hw(struct iwm_softc *);
490 int	iwm_init(struct ifnet *);
491 void	iwm_start(struct ifnet *);
492 void	iwm_stop(struct ifnet *);
493 void	iwm_watchdog(struct ifnet *);
494 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
495 #ifdef IWM_DEBUG
496 const char *iwm_desc_lookup(uint32_t);
497 void	iwm_nic_error(struct iwm_softc *);
498 void	iwm_nic_umac_error(struct iwm_softc *);
499 #endif
500 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
501 	    struct mbuf_list *);
502 void	iwm_flip_address(uint8_t *);
503 int	iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
504 	    struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
505 int	iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
506 void	iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
507 	    struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
508 	    struct mbuf_list *);
509 int	iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
510 	    int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
511 int	iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
512 	    struct iwm_rx_mpdu_desc *, int, int, uint32_t,
513 	    struct ieee80211_rxinfo *, struct mbuf_list *);
514 void	iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
515 	    struct mbuf_list *);
516 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
517 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
518 	    struct mbuf_list *);
519 void	iwm_notif_intr(struct iwm_softc *);
520 int	iwm_intr(void *);
521 int	iwm_intr_msix(void *);
522 int	iwm_match(struct device *, void *, void *);
523 int	iwm_preinit(struct iwm_softc *);
524 void	iwm_attach_hook(struct device *);
525 void	iwm_attach(struct device *, struct device *, void *);
526 void	iwm_init_task(void *);
527 int	iwm_activate(struct device *, int);
528 int	iwm_resume(struct iwm_softc *);
529 
530 #if NBPFILTER > 0
531 void	iwm_radiotap_attach(struct iwm_softc *);
532 #endif
533 
534 int
535 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
536 {
537 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
538 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
539 }
540 
541 int
542 iwm_is_mimo_mcs(int mcs)
543 {
544 	int ridx = iwm_mcs2ridx[mcs];
545 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
546 
547 }
548 
549 int
550 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
551 {
552 	struct iwm_fw_cscheme_list *l = (void *)data;
553 
554 	if (dlen < sizeof(*l) ||
555 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
556 		return EINVAL;
557 
558 	/* we don't actually store anything for now, always use s/w crypto */
559 
560 	return 0;
561 }
562 
563 int
564 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
565     uint8_t *data, size_t dlen)
566 {
567 	struct iwm_fw_sects *fws;
568 	struct iwm_fw_onesect *fwone;
569 
570 	if (type >= IWM_UCODE_TYPE_MAX)
571 		return EINVAL;
572 	if (dlen < sizeof(uint32_t))
573 		return EINVAL;
574 
575 	fws = &sc->sc_fw.fw_sects[type];
576 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
577 		return EINVAL;
578 
579 	fwone = &fws->fw_sect[fws->fw_count];
580 
581 	/* first 32bit are device load offset */
582 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
583 
584 	/* rest is data */
585 	fwone->fws_data = data + sizeof(uint32_t);
586 	fwone->fws_len = dlen - sizeof(uint32_t);
587 
588 	fws->fw_count++;
589 	fws->fw_totlen += fwone->fws_len;
590 
591 	return 0;
592 }
593 
594 #define IWM_DEFAULT_SCAN_CHANNELS	40
595 /* Newer firmware might support more channels. Raise this value if needed. */
596 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
597 
598 struct iwm_tlv_calib_data {
599 	uint32_t ucode_type;
600 	struct iwm_tlv_calib_ctrl calib;
601 } __packed;
602 
603 int
604 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
605 {
606 	const struct iwm_tlv_calib_data *def_calib = data;
607 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
608 
609 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
610 		return EINVAL;
611 
612 	sc->sc_default_calib[ucode_type].flow_trigger =
613 	    def_calib->calib.flow_trigger;
614 	sc->sc_default_calib[ucode_type].event_trigger =
615 	    def_calib->calib.event_trigger;
616 
617 	return 0;
618 }
619 
620 void
621 iwm_fw_info_free(struct iwm_fw_info *fw)
622 {
623 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
624 	fw->fw_rawdata = NULL;
625 	fw->fw_rawsize = 0;
626 	/* don't touch fw->fw_status */
627 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
628 }
629 
630 int
631 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
632 {
633 	struct iwm_fw_info *fw = &sc->sc_fw;
634 	struct iwm_tlv_ucode_header *uhdr;
635 	struct iwm_ucode_tlv tlv;
636 	uint32_t tlv_type;
637 	uint8_t *data;
638 	uint32_t usniffer_img;
639 	uint32_t paging_mem_size;
640 	int err;
641 	size_t len;
642 
643 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
644 	    ucode_type != IWM_UCODE_TYPE_INIT)
645 		return 0;
646 
647 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
648 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
649 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
650 
651 	if (fw->fw_rawdata != NULL)
652 		iwm_fw_info_free(fw);
653 
654 	err = loadfirmware(sc->sc_fwname,
655 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
656 	if (err) {
657 		printf("%s: could not read firmware %s (error %d)\n",
658 		    DEVNAME(sc), sc->sc_fwname, err);
659 		goto out;
660 	}
661 
662 	sc->sc_capaflags = 0;
663 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
664 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
665 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
666 
667 	uhdr = (void *)fw->fw_rawdata;
668 	if (*(uint32_t *)fw->fw_rawdata != 0
669 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
670 		printf("%s: invalid firmware %s\n",
671 		    DEVNAME(sc), sc->sc_fwname);
672 		err = EINVAL;
673 		goto out;
674 	}
675 
676 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
677 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
678 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
679 	    IWM_UCODE_API(le32toh(uhdr->ver)));
680 	data = uhdr->data;
681 	len = fw->fw_rawsize - sizeof(*uhdr);
682 
683 	while (len >= sizeof(tlv)) {
684 		size_t tlv_len;
685 		void *tlv_data;
686 
687 		memcpy(&tlv, data, sizeof(tlv));
688 		tlv_len = le32toh(tlv.length);
689 		tlv_type = le32toh(tlv.type);
690 
691 		len -= sizeof(tlv);
692 		data += sizeof(tlv);
693 		tlv_data = data;
694 
695 		if (len < tlv_len) {
696 			printf("%s: firmware too short: %zu bytes\n",
697 			    DEVNAME(sc), len);
698 			err = EINVAL;
699 			goto parse_out;
700 		}
701 
702 		switch (tlv_type) {
703 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
704 			if (tlv_len < sizeof(uint32_t)) {
705 				err = EINVAL;
706 				goto parse_out;
707 			}
708 			sc->sc_capa_max_probe_len
709 			    = le32toh(*(uint32_t *)tlv_data);
710 			if (sc->sc_capa_max_probe_len >
711 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
712 				err = EINVAL;
713 				goto parse_out;
714 			}
715 			break;
716 		case IWM_UCODE_TLV_PAN:
717 			if (tlv_len) {
718 				err = EINVAL;
719 				goto parse_out;
720 			}
721 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
722 			break;
723 		case IWM_UCODE_TLV_FLAGS:
724 			if (tlv_len < sizeof(uint32_t)) {
725 				err = EINVAL;
726 				goto parse_out;
727 			}
728 			/*
729 			 * Apparently there can be many flags, but Linux driver
730 			 * parses only the first one, and so do we.
731 			 *
732 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
733 			 * Intentional or a bug?  Observations from
734 			 * current firmware file:
735 			 *  1) TLV_PAN is parsed first
736 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
737 			 * ==> this resets TLV_PAN to itself... hnnnk
738 			 */
739 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
740 			break;
741 		case IWM_UCODE_TLV_CSCHEME:
742 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
743 			if (err)
744 				goto parse_out;
745 			break;
746 		case IWM_UCODE_TLV_NUM_OF_CPU: {
747 			uint32_t num_cpu;
748 			if (tlv_len != sizeof(uint32_t)) {
749 				err = EINVAL;
750 				goto parse_out;
751 			}
752 			num_cpu = le32toh(*(uint32_t *)tlv_data);
753 			if (num_cpu < 1 || num_cpu > 2) {
754 				err = EINVAL;
755 				goto parse_out;
756 			}
757 			break;
758 		}
759 		case IWM_UCODE_TLV_SEC_RT:
760 			err = iwm_firmware_store_section(sc,
761 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
762 			if (err)
763 				goto parse_out;
764 			break;
765 		case IWM_UCODE_TLV_SEC_INIT:
766 			err = iwm_firmware_store_section(sc,
767 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
768 			if (err)
769 				goto parse_out;
770 			break;
771 		case IWM_UCODE_TLV_SEC_WOWLAN:
772 			err = iwm_firmware_store_section(sc,
773 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
774 			if (err)
775 				goto parse_out;
776 			break;
777 		case IWM_UCODE_TLV_DEF_CALIB:
778 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
779 				err = EINVAL;
780 				goto parse_out;
781 			}
782 			err = iwm_set_default_calib(sc, tlv_data);
783 			if (err)
784 				goto parse_out;
785 			break;
786 		case IWM_UCODE_TLV_PHY_SKU:
787 			if (tlv_len != sizeof(uint32_t)) {
788 				err = EINVAL;
789 				goto parse_out;
790 			}
791 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
792 			break;
793 
794 		case IWM_UCODE_TLV_API_CHANGES_SET: {
795 			struct iwm_ucode_api *api;
796 			int idx, i;
797 			if (tlv_len != sizeof(*api)) {
798 				err = EINVAL;
799 				goto parse_out;
800 			}
801 			api = (struct iwm_ucode_api *)tlv_data;
802 			idx = le32toh(api->api_index);
803 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
804 				err = EINVAL;
805 				goto parse_out;
806 			}
807 			for (i = 0; i < 32; i++) {
808 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
809 					continue;
810 				setbit(sc->sc_ucode_api, i + (32 * idx));
811 			}
812 			break;
813 		}
814 
815 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
816 			struct iwm_ucode_capa *capa;
817 			int idx, i;
818 			if (tlv_len != sizeof(*capa)) {
819 				err = EINVAL;
820 				goto parse_out;
821 			}
822 			capa = (struct iwm_ucode_capa *)tlv_data;
823 			idx = le32toh(capa->api_index);
824 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
825 				goto parse_out;
826 			}
827 			for (i = 0; i < 32; i++) {
828 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
829 					continue;
830 				setbit(sc->sc_enabled_capa, i + (32 * idx));
831 			}
832 			break;
833 		}
834 
835 		case 48: /* undocumented TLV */
836 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
837 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
838 			/* ignore, not used by current driver */
839 			break;
840 
841 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
842 			err = iwm_firmware_store_section(sc,
843 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
844 			    tlv_len);
845 			if (err)
846 				goto parse_out;
847 			break;
848 
849 		case IWM_UCODE_TLV_PAGING:
850 			if (tlv_len != sizeof(uint32_t)) {
851 				err = EINVAL;
852 				goto parse_out;
853 			}
854 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
855 
856 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
857 			    DEVNAME(sc), paging_mem_size));
858 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
859 				printf("%s: Driver only supports up to %u"
860 				    " bytes for paging image (%u requested)\n",
861 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
862 				    paging_mem_size);
863 				err = EINVAL;
864 				goto out;
865 			}
866 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
867 				printf("%s: Paging: image isn't multiple of %u\n",
868 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
869 				err = EINVAL;
870 				goto out;
871 			}
872 
873 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
874 			    paging_mem_size;
875 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
876 			fw->fw_sects[usniffer_img].paging_mem_size =
877 			    paging_mem_size;
878 			break;
879 
880 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
881 			if (tlv_len != sizeof(uint32_t)) {
882 				err = EINVAL;
883 				goto parse_out;
884 			}
885 			sc->sc_capa_n_scan_channels =
886 			  le32toh(*(uint32_t *)tlv_data);
887 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
888 				err = ERANGE;
889 				goto parse_out;
890 			}
891 			break;
892 
893 		case IWM_UCODE_TLV_FW_VERSION:
894 			if (tlv_len != sizeof(uint32_t) * 3) {
895 				err = EINVAL;
896 				goto parse_out;
897 			}
898 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
899 			    "%u.%u.%u",
900 			    le32toh(((uint32_t *)tlv_data)[0]),
901 			    le32toh(((uint32_t *)tlv_data)[1]),
902 			    le32toh(((uint32_t *)tlv_data)[2]));
903 			break;
904 
905 		case IWM_UCODE_TLV_FW_DBG_DEST:
906 		case IWM_UCODE_TLV_FW_DBG_CONF:
907 			break;
908 
909 		case IWM_UCODE_TLV_FW_MEM_SEG:
910 			break;
911 
912 		default:
913 			err = EINVAL;
914 			goto parse_out;
915 		}
916 
917 		len -= roundup(tlv_len, 4);
918 		data += roundup(tlv_len, 4);
919 	}
920 
921 	KASSERT(err == 0);
922 
923  parse_out:
924 	if (err) {
925 		printf("%s: firmware parse error %d, "
926 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
927 	}
928 
929  out:
930 	if (err) {
931 		fw->fw_status = IWM_FW_STATUS_NONE;
932 		if (fw->fw_rawdata != NULL)
933 			iwm_fw_info_free(fw);
934 	} else
935 		fw->fw_status = IWM_FW_STATUS_DONE;
936 	wakeup(&sc->sc_fw);
937 
938 	return err;
939 }
940 
941 uint32_t
942 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
943 {
944 	iwm_nic_assert_locked(sc);
945 	IWM_WRITE(sc,
946 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
947 	IWM_BARRIER_READ_WRITE(sc);
948 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
949 }
950 
951 void
952 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
953 {
954 	iwm_nic_assert_locked(sc);
955 	IWM_WRITE(sc,
956 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
957 	IWM_BARRIER_WRITE(sc);
958 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
959 }
960 
961 void
962 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
963 {
964 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
965 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
966 }
967 
968 int
969 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
970 {
971 	int offs, err = 0;
972 	uint32_t *vals = buf;
973 
974 	if (iwm_nic_lock(sc)) {
975 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
976 		for (offs = 0; offs < dwords; offs++)
977 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
978 		iwm_nic_unlock(sc);
979 	} else {
980 		err = EBUSY;
981 	}
982 	return err;
983 }
984 
985 int
986 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
987 {
988 	int offs;
989 	const uint32_t *vals = buf;
990 
991 	if (iwm_nic_lock(sc)) {
992 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
993 		/* WADDR auto-increments */
994 		for (offs = 0; offs < dwords; offs++) {
995 			uint32_t val = vals ? vals[offs] : 0;
996 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
997 		}
998 		iwm_nic_unlock(sc);
999 	} else {
1000 		return EBUSY;
1001 	}
1002 	return 0;
1003 }
1004 
1005 int
1006 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1007 {
1008 	return iwm_write_mem(sc, addr, &val, 1);
1009 }
1010 
1011 int
1012 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1013     int timo)
1014 {
1015 	for (;;) {
1016 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1017 			return 1;
1018 		}
1019 		if (timo < 10) {
1020 			return 0;
1021 		}
1022 		timo -= 10;
1023 		DELAY(10);
1024 	}
1025 }
1026 
1027 int
1028 iwm_nic_lock(struct iwm_softc *sc)
1029 {
1030 	if (sc->sc_nic_locks > 0) {
1031 		iwm_nic_assert_locked(sc);
1032 		sc->sc_nic_locks++;
1033 		return 1; /* already locked */
1034 	}
1035 
1036 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1037 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1038 
1039 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1040 		DELAY(2);
1041 
1042 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1043 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1044 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1045 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1046 		sc->sc_nic_locks++;
1047 		return 1;
1048 	}
1049 
1050 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1051 	return 0;
1052 }
1053 
1054 void
1055 iwm_nic_assert_locked(struct iwm_softc *sc)
1056 {
1057 	uint32_t reg = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1058 	if ((reg & IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1059 		panic("%s: mac clock not ready", DEVNAME(sc));
1060 	if (reg & IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1061 		panic("%s: mac gone to sleep", DEVNAME(sc));
1062 	if (sc->sc_nic_locks <= 0)
1063 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1064 }
1065 
1066 void
1067 iwm_nic_unlock(struct iwm_softc *sc)
1068 {
1069 	if (sc->sc_nic_locks > 0) {
1070 		if (--sc->sc_nic_locks == 0)
1071 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1072 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1073 	} else
1074 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1075 }
1076 
1077 void
1078 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1079     uint32_t mask)
1080 {
1081 	uint32_t val;
1082 
1083 	/* XXX: no error path? */
1084 	if (iwm_nic_lock(sc)) {
1085 		val = iwm_read_prph(sc, reg) & mask;
1086 		val |= bits;
1087 		iwm_write_prph(sc, reg, val);
1088 		iwm_nic_unlock(sc);
1089 	}
1090 }
1091 
1092 void
1093 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1094 {
1095 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1096 }
1097 
1098 void
1099 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1100 {
1101 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1102 }
1103 
1104 int
1105 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1106     bus_size_t size, bus_size_t alignment)
1107 {
1108 	int nsegs, err;
1109 	caddr_t va;
1110 
1111 	dma->tag = tag;
1112 	dma->size = size;
1113 
1114 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1115 	    &dma->map);
1116 	if (err)
1117 		goto fail;
1118 
1119 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1120 	    BUS_DMA_NOWAIT);
1121 	if (err)
1122 		goto fail;
1123 
1124 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1125 	    BUS_DMA_NOWAIT);
1126 	if (err)
1127 		goto fail;
1128 	dma->vaddr = va;
1129 
1130 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1131 	    BUS_DMA_NOWAIT);
1132 	if (err)
1133 		goto fail;
1134 
1135 	memset(dma->vaddr, 0, size);
1136 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1137 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1138 
1139 	return 0;
1140 
1141 fail:	iwm_dma_contig_free(dma);
1142 	return err;
1143 }
1144 
1145 void
1146 iwm_dma_contig_free(struct iwm_dma_info *dma)
1147 {
1148 	if (dma->map != NULL) {
1149 		if (dma->vaddr != NULL) {
1150 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1151 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1152 			bus_dmamap_unload(dma->tag, dma->map);
1153 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1154 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1155 			dma->vaddr = NULL;
1156 		}
1157 		bus_dmamap_destroy(dma->tag, dma->map);
1158 		dma->map = NULL;
1159 	}
1160 }
1161 
1162 int
1163 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1164 {
1165 	bus_size_t size;
1166 	size_t descsz;
1167 	int count, i, err;
1168 
1169 	ring->cur = 0;
1170 
1171 	if (sc->sc_mqrx_supported) {
1172 		count = IWM_RX_MQ_RING_COUNT;
1173 		descsz = sizeof(uint64_t);
1174 	} else {
1175 		count = IWM_RX_RING_COUNT;
1176 		descsz = sizeof(uint32_t);
1177 	}
1178 
1179 	/* Allocate RX descriptors (256-byte aligned). */
1180 	size = count * descsz;
1181 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1182 	if (err) {
1183 		printf("%s: could not allocate RX ring DMA memory\n",
1184 		    DEVNAME(sc));
1185 		goto fail;
1186 	}
1187 	ring->desc = ring->free_desc_dma.vaddr;
1188 
1189 	/* Allocate RX status area (16-byte aligned). */
1190 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1191 	    sizeof(*ring->stat), 16);
1192 	if (err) {
1193 		printf("%s: could not allocate RX status DMA memory\n",
1194 		    DEVNAME(sc));
1195 		goto fail;
1196 	}
1197 	ring->stat = ring->stat_dma.vaddr;
1198 
1199 	if (sc->sc_mqrx_supported) {
1200 		size = count * sizeof(uint32_t);
1201 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1202 		    size, 256);
1203 		if (err) {
1204 			printf("%s: could not allocate RX ring DMA memory\n",
1205 			    DEVNAME(sc));
1206 			goto fail;
1207 		}
1208 	}
1209 
1210 	for (i = 0; i < count; i++) {
1211 		struct iwm_rx_data *data = &ring->data[i];
1212 
1213 		memset(data, 0, sizeof(*data));
1214 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1215 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1216 		    &data->map);
1217 		if (err) {
1218 			printf("%s: could not create RX buf DMA map\n",
1219 			    DEVNAME(sc));
1220 			goto fail;
1221 		}
1222 
1223 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1224 		if (err)
1225 			goto fail;
1226 	}
1227 	return 0;
1228 
1229 fail:	iwm_free_rx_ring(sc, ring);
1230 	return err;
1231 }
1232 
1233 void
1234 iwm_disable_rx_dma(struct iwm_softc *sc)
1235 {
1236 	int ntries;
1237 
1238 	if (iwm_nic_lock(sc)) {
1239 		if (sc->sc_mqrx_supported) {
1240 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1241 			for (ntries = 0; ntries < 1000; ntries++) {
1242 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1243 				    IWM_RXF_DMA_IDLE)
1244 					break;
1245 				DELAY(10);
1246 			}
1247 		} else {
1248 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1249 			for (ntries = 0; ntries < 1000; ntries++) {
1250 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1251 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1252 					break;
1253 				DELAY(10);
1254 			}
1255 		}
1256 		iwm_nic_unlock(sc);
1257 	}
1258 }
1259 
1260 void
1261 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1262 {
1263 	ring->cur = 0;
1264 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1265 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1266 	memset(ring->stat, 0, sizeof(*ring->stat));
1267 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1268 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1269 
1270 }
1271 
1272 void
1273 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1274 {
1275 	int count, i;
1276 
1277 	iwm_dma_contig_free(&ring->free_desc_dma);
1278 	iwm_dma_contig_free(&ring->stat_dma);
1279 	iwm_dma_contig_free(&ring->used_desc_dma);
1280 
1281 	if (sc->sc_mqrx_supported)
1282 		count = IWM_RX_MQ_RING_COUNT;
1283 	else
1284 		count = IWM_RX_RING_COUNT;
1285 
1286 	for (i = 0; i < count; i++) {
1287 		struct iwm_rx_data *data = &ring->data[i];
1288 
1289 		if (data->m != NULL) {
1290 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1291 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1292 			bus_dmamap_unload(sc->sc_dmat, data->map);
1293 			m_freem(data->m);
1294 			data->m = NULL;
1295 		}
1296 		if (data->map != NULL)
1297 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1298 	}
1299 }
1300 
1301 int
1302 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1303 {
1304 	bus_addr_t paddr;
1305 	bus_size_t size;
1306 	int i, err;
1307 
1308 	ring->qid = qid;
1309 	ring->queued = 0;
1310 	ring->cur = 0;
1311 	ring->tail = 0;
1312 
1313 	/* Allocate TX descriptors (256-byte aligned). */
1314 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1315 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1316 	if (err) {
1317 		printf("%s: could not allocate TX ring DMA memory\n",
1318 		    DEVNAME(sc));
1319 		goto fail;
1320 	}
1321 	ring->desc = ring->desc_dma.vaddr;
1322 
1323 	/*
1324 	 * There is no need to allocate DMA buffers for unused rings.
1325 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1326 	 * than we currently need.
1327 	 *
1328 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1329 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1330 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1331 	 * in order to provide one queue per EDCA category.
1332 	 *
1333 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd).
1334 	 *
1335 	 * Tx aggregation will require additional queues (one queue per TID
1336 	 * for which aggregation is enabled) but we do not implement this yet.
1337 	 *
1338 	 * Unfortunately, we cannot tell if DQA will be used until the
1339 	 * firmware gets loaded later, so just allocate sufficient rings
1340 	 * in order to satisfy both cases.
1341 	 */
1342 	if (qid > IWM_CMD_QUEUE)
1343 		return 0;
1344 
1345 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1346 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1347 	if (err) {
1348 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1349 		goto fail;
1350 	}
1351 	ring->cmd = ring->cmd_dma.vaddr;
1352 
1353 	paddr = ring->cmd_dma.paddr;
1354 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1355 		struct iwm_tx_data *data = &ring->data[i];
1356 		size_t mapsize;
1357 
1358 		data->cmd_paddr = paddr;
1359 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1360 		    + offsetof(struct iwm_tx_cmd, scratch);
1361 		paddr += sizeof(struct iwm_device_cmd);
1362 
1363 		/* FW commands may require more mapped space than packets. */
1364 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1365 			mapsize = (sizeof(struct iwm_cmd_header) +
1366 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1367 		else
1368 			mapsize = MCLBYTES;
1369 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1370 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1371 		    &data->map);
1372 		if (err) {
1373 			printf("%s: could not create TX buf DMA map\n",
1374 			    DEVNAME(sc));
1375 			goto fail;
1376 		}
1377 	}
1378 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1379 	return 0;
1380 
1381 fail:	iwm_free_tx_ring(sc, ring);
1382 	return err;
1383 }
1384 
1385 void
1386 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1387 {
1388 	int i;
1389 
1390 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1391 		struct iwm_tx_data *data = &ring->data[i];
1392 
1393 		if (data->m != NULL) {
1394 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1395 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1396 			bus_dmamap_unload(sc->sc_dmat, data->map);
1397 			m_freem(data->m);
1398 			data->m = NULL;
1399 		}
1400 	}
1401 	/* Clear TX descriptors. */
1402 	memset(ring->desc, 0, ring->desc_dma.size);
1403 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1404 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1405 	sc->qfullmsk &= ~(1 << ring->qid);
1406 	/* 7000 family NICs are locked while commands are in progress. */
1407 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1408 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1409 			iwm_nic_unlock(sc);
1410 	}
1411 	ring->queued = 0;
1412 	ring->cur = 0;
1413 	ring->tail = 0;
1414 }
1415 
1416 void
1417 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1418 {
1419 	int i;
1420 
1421 	iwm_dma_contig_free(&ring->desc_dma);
1422 	iwm_dma_contig_free(&ring->cmd_dma);
1423 
1424 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1425 		struct iwm_tx_data *data = &ring->data[i];
1426 
1427 		if (data->m != NULL) {
1428 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1429 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1430 			bus_dmamap_unload(sc->sc_dmat, data->map);
1431 			m_freem(data->m);
1432 			data->m = NULL;
1433 		}
1434 		if (data->map != NULL)
1435 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1436 	}
1437 }
1438 
1439 void
1440 iwm_enable_rfkill_int(struct iwm_softc *sc)
1441 {
1442 	if (!sc->sc_msix) {
1443 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1444 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1445 	} else {
1446 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1447 		    sc->sc_fh_init_mask);
1448 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1449 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1450 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1451 	}
1452 
1453 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1454 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1455 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1456 }
1457 
1458 int
1459 iwm_check_rfkill(struct iwm_softc *sc)
1460 {
1461 	uint32_t v;
1462 	int s;
1463 	int rv;
1464 
1465 	s = splnet();
1466 
1467 	/*
1468 	 * "documentation" is not really helpful here:
1469 	 *  27:	HW_RF_KILL_SW
1470 	 *	Indicates state of (platform's) hardware RF-Kill switch
1471 	 *
1472 	 * But apparently when it's off, it's on ...
1473 	 */
1474 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1475 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1476 	if (rv) {
1477 		sc->sc_flags |= IWM_FLAG_RFKILL;
1478 	} else {
1479 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1480 	}
1481 
1482 	splx(s);
1483 	return rv;
1484 }
1485 
1486 void
1487 iwm_enable_interrupts(struct iwm_softc *sc)
1488 {
1489 	if (!sc->sc_msix) {
1490 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1491 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1492 	} else {
1493 		/*
1494 		 * fh/hw_mask keeps all the unmasked causes.
1495 		 * Unlike msi, in msix cause is enabled when it is unset.
1496 		 */
1497 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1498 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1499 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1500 		    ~sc->sc_fh_mask);
1501 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1502 		    ~sc->sc_hw_mask);
1503 	}
1504 }
1505 
1506 void
1507 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1508 {
1509 	if (!sc->sc_msix) {
1510 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1511 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1512 	} else {
1513 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1514 		    sc->sc_hw_init_mask);
1515 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1516 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1517 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1518 	}
1519 }
1520 
1521 void
1522 iwm_restore_interrupts(struct iwm_softc *sc)
1523 {
1524 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1525 }
1526 
1527 void
1528 iwm_disable_interrupts(struct iwm_softc *sc)
1529 {
1530 	int s = splnet();
1531 
1532 	if (!sc->sc_msix) {
1533 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1534 
1535 		/* acknowledge all interrupts */
1536 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1537 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1538 	} else {
1539 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1540 		    sc->sc_fh_init_mask);
1541 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1542 		    sc->sc_hw_init_mask);
1543 	}
1544 
1545 	splx(s);
1546 }
1547 
1548 void
1549 iwm_ict_reset(struct iwm_softc *sc)
1550 {
1551 	iwm_disable_interrupts(sc);
1552 
1553 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1554 	sc->ict_cur = 0;
1555 
1556 	/* Set physical address of ICT (4KB aligned). */
1557 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1558 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1559 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1560 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1561 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1562 
1563 	/* Switch to ICT interrupt mode in driver. */
1564 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1565 
1566 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1567 	iwm_enable_interrupts(sc);
1568 }
1569 
1570 #define IWM_HW_READY_TIMEOUT 50
1571 int
1572 iwm_set_hw_ready(struct iwm_softc *sc)
1573 {
1574 	int ready;
1575 
1576 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1577 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1578 
1579 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1580 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1581 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1582 	    IWM_HW_READY_TIMEOUT);
1583 	if (ready)
1584 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1585 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1586 
1587 	return ready;
1588 }
1589 #undef IWM_HW_READY_TIMEOUT
1590 
1591 int
1592 iwm_prepare_card_hw(struct iwm_softc *sc)
1593 {
1594 	int t = 0;
1595 
1596 	if (iwm_set_hw_ready(sc))
1597 		return 0;
1598 
1599 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1600 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1601 	DELAY(1000);
1602 
1603 
1604 	/* If HW is not ready, prepare the conditions to check again */
1605 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1606 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1607 
1608 	do {
1609 		if (iwm_set_hw_ready(sc))
1610 			return 0;
1611 		DELAY(200);
1612 		t += 200;
1613 	} while (t < 150000);
1614 
1615 	return ETIMEDOUT;
1616 }
1617 
1618 void
1619 iwm_apm_config(struct iwm_softc *sc)
1620 {
1621 	pcireg_t lctl, cap;
1622 
1623 	/*
1624 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1625 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1626 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1627 	 *    costs negligible amount of power savings.
1628 	 * If not (unlikely), enable L0S, so there is at least some
1629 	 *    power savings, even without L1.
1630 	 */
1631 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1632 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1633 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1634 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1635 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1636 	} else {
1637 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1638 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1639 	}
1640 
1641 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1642 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1643 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1644 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1645 	    DEVNAME(sc),
1646 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1647 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1648 }
1649 
1650 /*
1651  * Start up NIC's basic functionality after it has been reset
1652  * e.g. after platform boot or shutdown.
1653  * NOTE:  This does not load uCode nor start the embedded processor
1654  */
1655 int
1656 iwm_apm_init(struct iwm_softc *sc)
1657 {
1658 	int err = 0;
1659 
1660 	/* Disable L0S exit timer (platform NMI workaround) */
1661 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1662 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1663 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1664 
1665 	/*
1666 	 * Disable L0s without affecting L1;
1667 	 *  don't wait for ICH L0s (ICH bug W/A)
1668 	 */
1669 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1670 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1671 
1672 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1673 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1674 
1675 	/*
1676 	 * Enable HAP INTA (interrupt from management bus) to
1677 	 * wake device's PCI Express link L1a -> L0s
1678 	 */
1679 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1680 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1681 
1682 	iwm_apm_config(sc);
1683 
1684 #if 0 /* not for 7k/8k */
1685 	/* Configure analog phase-lock-loop before activating to D0A */
1686 	if (trans->cfg->base_params->pll_cfg_val)
1687 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1688 		    trans->cfg->base_params->pll_cfg_val);
1689 #endif
1690 
1691 	/*
1692 	 * Set "initialization complete" bit to move adapter from
1693 	 * D0U* --> D0A* (powered-up active) state.
1694 	 */
1695 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1696 
1697 	/*
1698 	 * Wait for clock stabilization; once stabilized, access to
1699 	 * device-internal resources is supported, e.g. iwm_write_prph()
1700 	 * and accesses to uCode SRAM.
1701 	 */
1702 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1703 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1704 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1705 		printf("%s: timeout waiting for clock stabilization\n",
1706 		    DEVNAME(sc));
1707 		err = ETIMEDOUT;
1708 		goto out;
1709 	}
1710 
1711 	if (sc->host_interrupt_operation_mode) {
1712 		/*
1713 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1714 		 * only check host_interrupt_operation_mode even if this is
1715 		 * not related to host_interrupt_operation_mode.
1716 		 *
1717 		 * Enable the oscillator to count wake up time for L1 exit. This
1718 		 * consumes slightly more power (100uA) - but allows to be sure
1719 		 * that we wake up from L1 on time.
1720 		 *
1721 		 * This looks weird: read twice the same register, discard the
1722 		 * value, set a bit, and yet again, read that same register
1723 		 * just to discard the value. But that's the way the hardware
1724 		 * seems to like it.
1725 		 */
1726 		if (iwm_nic_lock(sc)) {
1727 			iwm_read_prph(sc, IWM_OSC_CLK);
1728 			iwm_read_prph(sc, IWM_OSC_CLK);
1729 			iwm_nic_unlock(sc);
1730 		}
1731 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1732 		if (iwm_nic_lock(sc)) {
1733 			iwm_read_prph(sc, IWM_OSC_CLK);
1734 			iwm_read_prph(sc, IWM_OSC_CLK);
1735 			iwm_nic_unlock(sc);
1736 		}
1737 	}
1738 
1739 	/*
1740 	 * Enable DMA clock and wait for it to stabilize.
1741 	 *
1742 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1743 	 * do not disable clocks.  This preserves any hardware bits already
1744 	 * set by default in "CLK_CTRL_REG" after reset.
1745 	 */
1746 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1747 		if (iwm_nic_lock(sc)) {
1748 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1749 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1750 			iwm_nic_unlock(sc);
1751 		}
1752 		DELAY(20);
1753 
1754 		/* Disable L1-Active */
1755 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1756 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1757 
1758 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1759 		if (iwm_nic_lock(sc)) {
1760 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1761 			    IWM_APMG_RTC_INT_STT_RFKILL);
1762 			iwm_nic_unlock(sc);
1763 		}
1764 	}
1765  out:
1766 	if (err)
1767 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1768 	return err;
1769 }
1770 
1771 void
1772 iwm_apm_stop(struct iwm_softc *sc)
1773 {
1774 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1775 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1776 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1777 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1778 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1779 	DELAY(1000);
1780 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1781 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1782 	DELAY(5000);
1783 
1784 	/* stop device's busmaster DMA activity */
1785 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1786 
1787 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1788 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1789 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1790 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1791 
1792 	/*
1793 	 * Clear "initialization complete" bit to move adapter from
1794 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1795 	 */
1796 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1797 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1798 }
1799 
1800 void
1801 iwm_init_msix_hw(struct iwm_softc *sc)
1802 {
1803 	iwm_conf_msix_hw(sc, 0);
1804 
1805 	if (!sc->sc_msix)
1806 		return;
1807 
1808 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1809 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1810 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1811 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1812 }
1813 
1814 void
1815 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1816 {
1817 	int vector = 0;
1818 
1819 	if (!sc->sc_msix) {
1820 		/* Newer chips default to MSIX. */
1821 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1822 			iwm_write_prph(sc, IWM_UREG_CHICK,
1823 			    IWM_UREG_CHICK_MSI_ENABLE);
1824 			iwm_nic_unlock(sc);
1825 		}
1826 		return;
1827 	}
1828 
1829 	if (!stopped && iwm_nic_lock(sc)) {
1830 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1831 		iwm_nic_unlock(sc);
1832 	}
1833 
1834 	/* Disable all interrupts */
1835 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1836 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1837 
1838 	/* Map fallback-queue (command/mgmt) to a single vector */
1839 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1840 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1841 	/* Map RSS queue (data) to the same vector */
1842 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1843 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1844 
1845 	/* Enable the RX queues cause interrupts */
1846 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1847 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1848 
1849 	/* Map non-RX causes to the same vector */
1850 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1851 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1852 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1853 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1854 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1855 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1856 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1857 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1858 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1859 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1860 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1861 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1862 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1863 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1864 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1865 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1866 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1867 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1868 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1869 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1870 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1871 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1872 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1873 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1874 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1875 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1876 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1877 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1878 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1879 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1880 
1881 	/* Enable non-RX causes interrupts */
1882 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1883 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1884 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1885 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1886 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1887 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1888 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
1889 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
1890 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
1891 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
1892 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
1893 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
1894 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
1895 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
1896 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
1897 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
1898 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
1899 }
1900 
1901 int
1902 iwm_start_hw(struct iwm_softc *sc)
1903 {
1904 	int err;
1905 
1906 	err = iwm_prepare_card_hw(sc);
1907 	if (err)
1908 		return err;
1909 
1910 	/* Reset the entire device */
1911 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1912 	DELAY(5000);
1913 
1914 	err = iwm_apm_init(sc);
1915 	if (err)
1916 		return err;
1917 
1918 	iwm_init_msix_hw(sc);
1919 
1920 	iwm_enable_rfkill_int(sc);
1921 	iwm_check_rfkill(sc);
1922 
1923 	return 0;
1924 }
1925 
1926 
1927 void
1928 iwm_stop_device(struct iwm_softc *sc)
1929 {
1930 	int chnl, ntries;
1931 	int qid;
1932 
1933 	iwm_disable_interrupts(sc);
1934 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1935 
1936 	/* Stop all DMA channels. */
1937 	if (iwm_nic_lock(sc)) {
1938 		/* Deactivate TX scheduler. */
1939 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1940 
1941 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1942 			IWM_WRITE(sc,
1943 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1944 			for (ntries = 0; ntries < 200; ntries++) {
1945 				uint32_t r;
1946 
1947 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1948 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1949 				    chnl))
1950 					break;
1951 				DELAY(20);
1952 			}
1953 		}
1954 		iwm_nic_unlock(sc);
1955 	}
1956 	iwm_disable_rx_dma(sc);
1957 
1958 	iwm_reset_rx_ring(sc, &sc->rxq);
1959 
1960 	for (qid = 0; qid < nitems(sc->txq); qid++)
1961 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1962 
1963 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1964 		if (iwm_nic_lock(sc)) {
1965 			/* Power-down device's busmaster DMA clocks */
1966 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1967 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1968 			iwm_nic_unlock(sc);
1969 		}
1970 		DELAY(5);
1971 	}
1972 
1973 	/* Make sure (redundant) we've released our request to stay awake */
1974 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1975 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1976 	if (sc->sc_nic_locks > 0)
1977 		printf("%s: %d active NIC locks forcefully cleared\n",
1978 		    DEVNAME(sc), sc->sc_nic_locks);
1979 	sc->sc_nic_locks = 0;
1980 
1981 	/* Stop the device, and put it in low power state */
1982 	iwm_apm_stop(sc);
1983 
1984 	/* Reset the on-board processor. */
1985 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1986 	DELAY(5000);
1987 
1988 	/*
1989 	 * Upon stop, the IVAR table gets erased, so msi-x won't
1990 	 * work. This causes a bug in RF-KILL flows, since the interrupt
1991 	 * that enables radio won't fire on the correct irq, and the
1992 	 * driver won't be able to handle the interrupt.
1993 	 * Configure the IVAR table again after reset.
1994 	 */
1995 	iwm_conf_msix_hw(sc, 1);
1996 
1997 	/*
1998 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1999 	 * Clear the interrupt again.
2000 	 */
2001 	iwm_disable_interrupts(sc);
2002 
2003 	/* Even though we stop the HW we still want the RF kill interrupt. */
2004 	iwm_enable_rfkill_int(sc);
2005 	iwm_check_rfkill(sc);
2006 
2007 	iwm_prepare_card_hw(sc);
2008 }
2009 
2010 void
2011 iwm_nic_config(struct iwm_softc *sc)
2012 {
2013 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2014 	uint32_t mask, val, reg_val = 0;
2015 
2016 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2017 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
2018 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2019 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
2020 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2021 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
2022 
2023 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2024 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2025 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2026 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2027 
2028 	/* radio configuration */
2029 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2030 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2031 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2032 
2033 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2034 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2035 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2036 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2037 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2038 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2039 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2040 
2041 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2042 	val &= ~mask;
2043 	val |= reg_val;
2044 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2045 
2046 	/*
2047 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2048 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2049 	 * to lose ownership and not being able to obtain it back.
2050 	 */
2051 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2052 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2053 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2054 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2055 }
2056 
2057 int
2058 iwm_nic_rx_init(struct iwm_softc *sc)
2059 {
2060 	if (sc->sc_mqrx_supported)
2061 		return iwm_nic_rx_mq_init(sc);
2062 	else
2063 		return iwm_nic_rx_legacy_init(sc);
2064 }
2065 
2066 int
2067 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2068 {
2069 	int enabled;
2070 
2071 	if (!iwm_nic_lock(sc))
2072 		return EBUSY;
2073 
2074 	/* Stop RX DMA. */
2075 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2076 	/* Disable RX used and free queue operation. */
2077 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2078 
2079 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2080 	    sc->rxq.free_desc_dma.paddr);
2081 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2082 	    sc->rxq.used_desc_dma.paddr);
2083 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2084 	    sc->rxq.stat_dma.paddr);
2085 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2086 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2087 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2088 
2089 	/* We configure only queue 0 for now. */
2090 	enabled = ((1 << 0) << 16) | (1 << 0);
2091 
2092 	/* Enable RX DMA, 4KB buffer size. */
2093 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2094 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2095 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2096 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2097 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2098 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2099 
2100 	/* Enable RX DMA snooping. */
2101 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2102 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2103 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2104 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2105 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2106 
2107 	/* Enable the configured queue(s). */
2108 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2109 
2110 	iwm_nic_unlock(sc);
2111 
2112 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2113 
2114 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2115 
2116 	return 0;
2117 }
2118 
2119 int
2120 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2121 {
2122 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2123 
2124 	iwm_disable_rx_dma(sc);
2125 
2126 	if (!iwm_nic_lock(sc))
2127 		return EBUSY;
2128 
2129 	/* reset and flush pointers */
2130 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2131 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2132 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2133 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2134 
2135 	/* Set physical address of RX ring (256-byte aligned). */
2136 	IWM_WRITE(sc,
2137 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2138 
2139 	/* Set physical address of RX status (16-byte aligned). */
2140 	IWM_WRITE(sc,
2141 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2142 
2143 	/* Enable RX. */
2144 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2145 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2146 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2147 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2148 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2149 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2150 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2151 
2152 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2153 
2154 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2155 	if (sc->host_interrupt_operation_mode)
2156 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2157 
2158 	iwm_nic_unlock(sc);
2159 
2160 	/*
2161 	 * This value should initially be 0 (before preparing any RBs),
2162 	 * and should be 8 after preparing the first 8 RBs (for example).
2163 	 */
2164 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2165 
2166 	return 0;
2167 }
2168 
2169 int
2170 iwm_nic_tx_init(struct iwm_softc *sc)
2171 {
2172 	int qid;
2173 
2174 	if (!iwm_nic_lock(sc))
2175 		return EBUSY;
2176 
2177 	/* Deactivate TX scheduler. */
2178 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2179 
2180 	/* Set physical address of "keep warm" page (16-byte aligned). */
2181 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2182 
2183 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2184 		struct iwm_tx_ring *txq = &sc->txq[qid];
2185 
2186 		/* Set physical address of TX ring (256-byte aligned). */
2187 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2188 		    txq->desc_dma.paddr >> 8);
2189 	}
2190 
2191 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2192 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2193 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2194 
2195 	iwm_nic_unlock(sc);
2196 
2197 	return 0;
2198 }
2199 
2200 int
2201 iwm_nic_init(struct iwm_softc *sc)
2202 {
2203 	int err;
2204 
2205 	iwm_apm_init(sc);
2206 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2207 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2208 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2209 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2210 
2211 	iwm_nic_config(sc);
2212 
2213 	err = iwm_nic_rx_init(sc);
2214 	if (err)
2215 		return err;
2216 
2217 	err = iwm_nic_tx_init(sc);
2218 	if (err)
2219 		return err;
2220 
2221 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2222 
2223 	return 0;
2224 }
2225 
2226 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2227 const uint8_t iwm_ac_to_tx_fifo[] = {
2228 	IWM_TX_FIFO_BE,
2229 	IWM_TX_FIFO_BK,
2230 	IWM_TX_FIFO_VI,
2231 	IWM_TX_FIFO_VO,
2232 };
2233 
2234 int
2235 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2236 {
2237 	iwm_nic_assert_locked(sc);
2238 
2239 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2240 
2241 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2242 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2243 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2244 
2245 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2246 
2247 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2248 
2249 	iwm_write_mem32(sc,
2250 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2251 
2252 	/* Set scheduler window size and frame limit. */
2253 	iwm_write_mem32(sc,
2254 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2255 	    sizeof(uint32_t),
2256 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2257 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2258 	    ((IWM_FRAME_LIMIT
2259 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2260 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2261 
2262 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2263 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2264 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2265 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2266 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2267 
2268 	if (qid == sc->cmdqid)
2269 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2270 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2271 
2272 	return 0;
2273 }
2274 
2275 int
2276 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
2277 {
2278 	struct iwm_scd_txq_cfg_cmd cmd;
2279 	int err;
2280 
2281 	iwm_nic_assert_locked(sc);
2282 
2283 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2284 
2285 	memset(&cmd, 0, sizeof(cmd));
2286 	cmd.scd_queue = qid;
2287 	cmd.enable = 1;
2288 	cmd.sta_id = sta_id;
2289 	cmd.tx_fifo = fifo;
2290 	cmd.aggregate = 0;
2291 	cmd.window = IWM_FRAME_LIMIT;
2292 
2293 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2294 	    sizeof(cmd), &cmd);
2295 	if (err)
2296 		return err;
2297 
2298 	return 0;
2299 }
2300 
2301 int
2302 iwm_post_alive(struct iwm_softc *sc)
2303 {
2304 	int nwords;
2305 	int err, chnl;
2306 	uint32_t base;
2307 
2308 	if (!iwm_nic_lock(sc))
2309 		return EBUSY;
2310 
2311 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2312 
2313 	iwm_ict_reset(sc);
2314 
2315 	iwm_nic_unlock(sc);
2316 
2317 	/* Clear TX scheduler state in SRAM. */
2318 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2319 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2320 	    / sizeof(uint32_t);
2321 	err = iwm_write_mem(sc,
2322 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2323 	    NULL, nwords);
2324 	if (err)
2325 		return err;
2326 
2327 	if (!iwm_nic_lock(sc))
2328 		return EBUSY;
2329 
2330 	/* Set physical address of TX scheduler rings (1KB aligned). */
2331 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2332 
2333 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2334 
2335 	/* enable command channel */
2336 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2337 	if (err) {
2338 		iwm_nic_unlock(sc);
2339 		return err;
2340 	}
2341 
2342 	/* Activate TX scheduler. */
2343 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2344 
2345 	/* Enable DMA channels. */
2346 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2347 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2348 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2349 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2350 	}
2351 
2352 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2353 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2354 
2355 	iwm_nic_unlock(sc);
2356 
2357 	/* Enable L1-Active */
2358 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
2359 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2360 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2361 
2362 	return err;
2363 }
2364 
2365 struct iwm_phy_db_entry *
2366 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2367 {
2368 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2369 
2370 	if (type >= IWM_PHY_DB_MAX)
2371 		return NULL;
2372 
2373 	switch (type) {
2374 	case IWM_PHY_DB_CFG:
2375 		return &phy_db->cfg;
2376 	case IWM_PHY_DB_CALIB_NCH:
2377 		return &phy_db->calib_nch;
2378 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2379 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2380 			return NULL;
2381 		return &phy_db->calib_ch_group_papd[chg_id];
2382 	case IWM_PHY_DB_CALIB_CHG_TXP:
2383 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2384 			return NULL;
2385 		return &phy_db->calib_ch_group_txp[chg_id];
2386 	default:
2387 		return NULL;
2388 	}
2389 	return NULL;
2390 }
2391 
2392 int
2393 iwm_phy_db_set_section(struct iwm_softc *sc,
2394     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2395 {
2396 	uint16_t type = le16toh(phy_db_notif->type);
2397 	uint16_t size  = le16toh(phy_db_notif->length);
2398 	struct iwm_phy_db_entry *entry;
2399 	uint16_t chg_id = 0;
2400 
2401 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2402 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2403 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2404 
2405 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2406 	if (!entry)
2407 		return EINVAL;
2408 
2409 	if (entry->data)
2410 		free(entry->data, M_DEVBUF, entry->size);
2411 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2412 	if (!entry->data) {
2413 		entry->size = 0;
2414 		return ENOMEM;
2415 	}
2416 	memcpy(entry->data, phy_db_notif->data, size);
2417 	entry->size = size;
2418 
2419 	return 0;
2420 }
2421 
2422 int
2423 iwm_is_valid_channel(uint16_t ch_id)
2424 {
2425 	if (ch_id <= 14 ||
2426 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2427 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2428 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2429 		return 1;
2430 	return 0;
2431 }
2432 
2433 uint8_t
2434 iwm_ch_id_to_ch_index(uint16_t ch_id)
2435 {
2436 	if (!iwm_is_valid_channel(ch_id))
2437 		return 0xff;
2438 
2439 	if (ch_id <= 14)
2440 		return ch_id - 1;
2441 	if (ch_id <= 64)
2442 		return (ch_id + 20) / 4;
2443 	if (ch_id <= 140)
2444 		return (ch_id - 12) / 4;
2445 	return (ch_id - 13) / 4;
2446 }
2447 
2448 
2449 uint16_t
2450 iwm_channel_id_to_papd(uint16_t ch_id)
2451 {
2452 	if (!iwm_is_valid_channel(ch_id))
2453 		return 0xff;
2454 
2455 	if (1 <= ch_id && ch_id <= 14)
2456 		return 0;
2457 	if (36 <= ch_id && ch_id <= 64)
2458 		return 1;
2459 	if (100 <= ch_id && ch_id <= 140)
2460 		return 2;
2461 	return 3;
2462 }
2463 
2464 uint16_t
2465 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2466 {
2467 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2468 	struct iwm_phy_db_chg_txp *txp_chg;
2469 	int i;
2470 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2471 
2472 	if (ch_index == 0xff)
2473 		return 0xff;
2474 
2475 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2476 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2477 		if (!txp_chg)
2478 			return 0xff;
2479 		/*
2480 		 * Looking for the first channel group the max channel
2481 		 * of which is higher than the requested channel.
2482 		 */
2483 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2484 			return i;
2485 	}
2486 	return 0xff;
2487 }
2488 
2489 int
2490 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2491     uint16_t *size, uint16_t ch_id)
2492 {
2493 	struct iwm_phy_db_entry *entry;
2494 	uint16_t ch_group_id = 0;
2495 
2496 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2497 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2498 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2499 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2500 
2501 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2502 	if (!entry)
2503 		return EINVAL;
2504 
2505 	*data = entry->data;
2506 	*size = entry->size;
2507 
2508 	return 0;
2509 }
2510 
2511 int
2512 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2513     void *data)
2514 {
2515 	struct iwm_phy_db_cmd phy_db_cmd;
2516 	struct iwm_host_cmd cmd = {
2517 		.id = IWM_PHY_DB_CMD,
2518 		.flags = IWM_CMD_ASYNC,
2519 	};
2520 
2521 	phy_db_cmd.type = le16toh(type);
2522 	phy_db_cmd.length = le16toh(length);
2523 
2524 	cmd.data[0] = &phy_db_cmd;
2525 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2526 	cmd.data[1] = data;
2527 	cmd.len[1] = length;
2528 
2529 	return iwm_send_cmd(sc, &cmd);
2530 }
2531 
2532 int
2533 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2534     uint8_t max_ch_groups)
2535 {
2536 	uint16_t i;
2537 	int err;
2538 	struct iwm_phy_db_entry *entry;
2539 
2540 	for (i = 0; i < max_ch_groups; i++) {
2541 		entry = iwm_phy_db_get_section(sc, type, i);
2542 		if (!entry)
2543 			return EINVAL;
2544 
2545 		if (!entry->size)
2546 			continue;
2547 
2548 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2549 		if (err)
2550 			return err;
2551 
2552 		DELAY(1000);
2553 	}
2554 
2555 	return 0;
2556 }
2557 
2558 int
2559 iwm_send_phy_db_data(struct iwm_softc *sc)
2560 {
2561 	uint8_t *data = NULL;
2562 	uint16_t size = 0;
2563 	int err;
2564 
2565 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2566 	if (err)
2567 		return err;
2568 
2569 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2570 	if (err)
2571 		return err;
2572 
2573 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2574 	    &data, &size, 0);
2575 	if (err)
2576 		return err;
2577 
2578 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2579 	if (err)
2580 		return err;
2581 
2582 	err = iwm_phy_db_send_all_channel_groups(sc,
2583 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2584 	if (err)
2585 		return err;
2586 
2587 	err = iwm_phy_db_send_all_channel_groups(sc,
2588 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2589 	if (err)
2590 		return err;
2591 
2592 	return 0;
2593 }
2594 
2595 /*
2596  * For the high priority TE use a time event type that has similar priority to
2597  * the FW's action scan priority.
2598  */
2599 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2600 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2601 
2602 int
2603 iwm_send_time_event_cmd(struct iwm_softc *sc,
2604     const struct iwm_time_event_cmd *cmd)
2605 {
2606 	struct iwm_rx_packet *pkt;
2607 	struct iwm_time_event_resp *resp;
2608 	struct iwm_host_cmd hcmd = {
2609 		.id = IWM_TIME_EVENT_CMD,
2610 		.flags = IWM_CMD_WANT_RESP,
2611 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2612 	};
2613 	uint32_t resp_len;
2614 	int err;
2615 
2616 	hcmd.data[0] = cmd;
2617 	hcmd.len[0] = sizeof(*cmd);
2618 	err = iwm_send_cmd(sc, &hcmd);
2619 	if (err)
2620 		return err;
2621 
2622 	pkt = hcmd.resp_pkt;
2623 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2624 		err = EIO;
2625 		goto out;
2626 	}
2627 
2628 	resp_len = iwm_rx_packet_payload_len(pkt);
2629 	if (resp_len != sizeof(*resp)) {
2630 		err = EIO;
2631 		goto out;
2632 	}
2633 
2634 	resp = (void *)pkt->data;
2635 	if (le32toh(resp->status) == 0)
2636 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2637 	else
2638 		err = EIO;
2639 out:
2640 	iwm_free_resp(sc, &hcmd);
2641 	return err;
2642 }
2643 
2644 void
2645 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2646     uint32_t duration, uint32_t max_delay)
2647 {
2648 	struct iwm_time_event_cmd time_cmd;
2649 
2650 	/* Do nothing if a time event is already scheduled. */
2651 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2652 		return;
2653 
2654 	memset(&time_cmd, 0, sizeof(time_cmd));
2655 
2656 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2657 	time_cmd.id_and_color =
2658 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2659 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2660 
2661 	time_cmd.apply_time = htole32(0);
2662 
2663 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2664 	time_cmd.max_delay = htole32(max_delay);
2665 	/* TODO: why do we need to interval = bi if it is not periodic? */
2666 	time_cmd.interval = htole32(1);
2667 	time_cmd.duration = htole32(duration);
2668 	time_cmd.repeat = 1;
2669 	time_cmd.policy
2670 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2671 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2672 		IWM_T2_V2_START_IMMEDIATELY);
2673 
2674 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2675 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2676 
2677 	DELAY(100);
2678 }
2679 
2680 void
2681 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2682 {
2683 	struct iwm_time_event_cmd time_cmd;
2684 
2685 	/* Do nothing if the time event has already ended. */
2686 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2687 		return;
2688 
2689 	memset(&time_cmd, 0, sizeof(time_cmd));
2690 
2691 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2692 	time_cmd.id_and_color =
2693 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2694 	time_cmd.id = htole32(sc->sc_time_event_uid);
2695 
2696 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2697 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2698 
2699 	DELAY(100);
2700 }
2701 
2702 /*
2703  * NVM read access and content parsing.  We do not support
2704  * external NVM or writing NVM.
2705  */
2706 
2707 /* list of NVM sections we are allowed/need to read */
2708 const int iwm_nvm_to_read[] = {
2709 	IWM_NVM_SECTION_TYPE_HW,
2710 	IWM_NVM_SECTION_TYPE_SW,
2711 	IWM_NVM_SECTION_TYPE_REGULATORY,
2712 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2713 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2714 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2715 	IWM_NVM_SECTION_TYPE_HW_8000,
2716 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2717 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2718 };
2719 
2720 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2721 
2722 #define IWM_NVM_WRITE_OPCODE 1
2723 #define IWM_NVM_READ_OPCODE 0
2724 
2725 int
2726 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2727     uint16_t length, uint8_t *data, uint16_t *len)
2728 {
2729 	offset = 0;
2730 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2731 		.offset = htole16(offset),
2732 		.length = htole16(length),
2733 		.type = htole16(section),
2734 		.op_code = IWM_NVM_READ_OPCODE,
2735 	};
2736 	struct iwm_nvm_access_resp *nvm_resp;
2737 	struct iwm_rx_packet *pkt;
2738 	struct iwm_host_cmd cmd = {
2739 		.id = IWM_NVM_ACCESS_CMD,
2740 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2741 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2742 		.data = { &nvm_access_cmd, },
2743 	};
2744 	int err, offset_read;
2745 	size_t bytes_read;
2746 	uint8_t *resp_data;
2747 
2748 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2749 
2750 	err = iwm_send_cmd(sc, &cmd);
2751 	if (err)
2752 		return err;
2753 
2754 	pkt = cmd.resp_pkt;
2755 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2756 		err = EIO;
2757 		goto exit;
2758 	}
2759 
2760 	/* Extract NVM response */
2761 	nvm_resp = (void *)pkt->data;
2762 	if (nvm_resp == NULL)
2763 		return EIO;
2764 
2765 	err = le16toh(nvm_resp->status);
2766 	bytes_read = le16toh(nvm_resp->length);
2767 	offset_read = le16toh(nvm_resp->offset);
2768 	resp_data = nvm_resp->data;
2769 	if (err) {
2770 		err = EINVAL;
2771 		goto exit;
2772 	}
2773 
2774 	if (offset_read != offset) {
2775 		err = EINVAL;
2776 		goto exit;
2777 	}
2778 
2779 	if (bytes_read > length) {
2780 		err = EINVAL;
2781 		goto exit;
2782 	}
2783 
2784 	memcpy(data + offset, resp_data, bytes_read);
2785 	*len = bytes_read;
2786 
2787  exit:
2788 	iwm_free_resp(sc, &cmd);
2789 	return err;
2790 }
2791 
2792 /*
2793  * Reads an NVM section completely.
2794  * NICs prior to 7000 family doesn't have a real NVM, but just read
2795  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2796  * by uCode, we need to manually check in this case that we don't
2797  * overflow and try to read more than the EEPROM size.
2798  */
2799 int
2800 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2801     uint16_t *len, size_t max_len)
2802 {
2803 	uint16_t chunklen, seglen;
2804 	int err = 0;
2805 
2806 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2807 	*len = 0;
2808 
2809 	/* Read NVM chunks until exhausted (reading less than requested) */
2810 	while (seglen == chunklen && *len < max_len) {
2811 		err = iwm_nvm_read_chunk(sc,
2812 		    section, *len, chunklen, data, &seglen);
2813 		if (err)
2814 			return err;
2815 
2816 		*len += seglen;
2817 	}
2818 
2819 	return err;
2820 }
2821 
2822 uint8_t
2823 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2824 {
2825 	uint8_t tx_ant;
2826 
2827 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2828 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2829 
2830 	if (sc->sc_nvm.valid_tx_ant)
2831 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2832 
2833 	return tx_ant;
2834 }
2835 
2836 uint8_t
2837 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2838 {
2839 	uint8_t rx_ant;
2840 
2841 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2842 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2843 
2844 	if (sc->sc_nvm.valid_rx_ant)
2845 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2846 
2847 	return rx_ant;
2848 }
2849 
2850 void
2851 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2852     const uint8_t *nvm_channels, int nchan)
2853 {
2854 	struct ieee80211com *ic = &sc->sc_ic;
2855 	struct iwm_nvm_data *data = &sc->sc_nvm;
2856 	int ch_idx;
2857 	struct ieee80211_channel *channel;
2858 	uint16_t ch_flags;
2859 	int is_5ghz;
2860 	int flags, hw_value;
2861 
2862 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2863 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2864 
2865 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2866 		    !data->sku_cap_band_52GHz_enable)
2867 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2868 
2869 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
2870 			continue;
2871 
2872 		hw_value = nvm_channels[ch_idx];
2873 		channel = &ic->ic_channels[hw_value];
2874 
2875 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2876 		if (!is_5ghz) {
2877 			flags = IEEE80211_CHAN_2GHZ;
2878 			channel->ic_flags
2879 			    = IEEE80211_CHAN_CCK
2880 			    | IEEE80211_CHAN_OFDM
2881 			    | IEEE80211_CHAN_DYN
2882 			    | IEEE80211_CHAN_2GHZ;
2883 		} else {
2884 			flags = IEEE80211_CHAN_5GHZ;
2885 			channel->ic_flags =
2886 			    IEEE80211_CHAN_A;
2887 		}
2888 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2889 
2890 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2891 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2892 
2893 		if (data->sku_cap_11n_enable)
2894 			channel->ic_flags |= IEEE80211_CHAN_HT;
2895 	}
2896 }
2897 
2898 int
2899 iwm_mimo_enabled(struct iwm_softc *sc)
2900 {
2901 	struct ieee80211com *ic = &sc->sc_ic;
2902 
2903 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2904 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2905 }
2906 
2907 void
2908 iwm_setup_ht_rates(struct iwm_softc *sc)
2909 {
2910 	struct ieee80211com *ic = &sc->sc_ic;
2911 	uint8_t rx_ant;
2912 
2913 	/* TX is supported with the same MCS as RX. */
2914 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2915 
2916 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2917 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2918 
2919 	if (!iwm_mimo_enabled(sc))
2920 		return;
2921 
2922 	rx_ant = iwm_fw_valid_rx_ant(sc);
2923 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
2924 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
2925 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2926 }
2927 
2928 void
2929 iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
2930     uint16_t ssn, uint16_t buf_size)
2931 {
2932 	reorder_buf->head_sn = ssn;
2933 	reorder_buf->num_stored = 0;
2934 	reorder_buf->buf_size = buf_size;
2935 	reorder_buf->last_amsdu = 0;
2936 	reorder_buf->last_sub_index = 0;
2937 	reorder_buf->removed = 0;
2938 	reorder_buf->valid = 0;
2939 	reorder_buf->consec_oldsn_drops = 0;
2940 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
2941 	reorder_buf->consec_oldsn_prev_drop = 0;
2942 }
2943 
2944 void
2945 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
2946 {
2947 	int i;
2948 	struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
2949 	struct iwm_reorder_buf_entry *entry;
2950 
2951 	for (i = 0; i < reorder_buf->buf_size; i++) {
2952 		entry = &rxba->entries[i];
2953 		ml_purge(&entry->frames);
2954 		timerclear(&entry->reorder_time);
2955 	}
2956 
2957 	reorder_buf->removed = 1;
2958 	timeout_del(&reorder_buf->reorder_timer);
2959 	timerclear(&rxba->last_rx);
2960 	timeout_del(&rxba->session_timer);
2961 	rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
2962 }
2963 
2964 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
2965 
2966 void
2967 iwm_rx_ba_session_expired(void *arg)
2968 {
2969 	struct iwm_rxba_data *rxba = arg;
2970 	struct iwm_softc *sc = rxba->sc;
2971 	struct ieee80211com *ic = &sc->sc_ic;
2972 	struct ieee80211_node *ni = ic->ic_bss;
2973 	struct timeval now, timeout, expiry;
2974 	int s;
2975 
2976 	s = splnet();
2977 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
2978 	    ic->ic_state == IEEE80211_S_RUN &&
2979 	    rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
2980 		getmicrouptime(&now);
2981 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
2982 		timeradd(&rxba->last_rx, &timeout, &expiry);
2983 		if (timercmp(&now, &expiry, <)) {
2984 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
2985 		} else {
2986 			ic->ic_stats.is_ht_rx_ba_timeout++;
2987 			ieee80211_delba_request(ic, ni,
2988 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
2989 		}
2990 	}
2991 	splx(s);
2992 }
2993 
2994 void
2995 iwm_reorder_timer_expired(void *arg)
2996 {
2997 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2998 	struct iwm_reorder_buffer *buf = arg;
2999 	struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3000 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3001 	struct iwm_softc *sc = rxba->sc;
3002 	struct ieee80211com *ic = &sc->sc_ic;
3003 	struct ieee80211_node *ni = ic->ic_bss;
3004 	int i, s;
3005 	uint16_t sn = 0, index = 0;
3006 	int expired = 0;
3007 	int cont = 0;
3008 	struct timeval now, timeout, expiry;
3009 
3010 	if (!buf->num_stored || buf->removed)
3011 		return;
3012 
3013 	s = splnet();
3014 	getmicrouptime(&now);
3015 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3016 
3017 	for (i = 0; i < buf->buf_size ; i++) {
3018 		index = (buf->head_sn + i) % buf->buf_size;
3019 
3020 		if (ml_empty(&entries[index].frames)) {
3021 			/*
3022 			 * If there is a hole and the next frame didn't expire
3023 			 * we want to break and not advance SN.
3024 			 */
3025 			cont = 0;
3026 			continue;
3027 		}
3028 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3029 		if (!cont && timercmp(&now, &expiry, <))
3030 			break;
3031 
3032 		expired = 1;
3033 		/* continue until next hole after this expired frame */
3034 		cont = 1;
3035 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3036 	}
3037 
3038 	if (expired) {
3039 		/* SN is set to the last expired frame + 1 */
3040 		iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3041 		if_input(&sc->sc_ic.ic_if, &ml);
3042 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3043 	} else {
3044 		/*
3045 		 * If no frame expired and there are stored frames, index is now
3046 		 * pointing to the first unexpired frame - modify reorder timeout
3047 		 * accordingly.
3048 		 */
3049 		timeout_add_usec(&buf->reorder_timer,
3050 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3051 	}
3052 
3053 	splx(s);
3054 }
3055 
3056 #define IWM_MAX_RX_BA_SESSIONS 16
3057 
3058 void
3059 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3060     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3061 {
3062 	struct ieee80211com *ic = &sc->sc_ic;
3063 	struct iwm_add_sta_cmd cmd;
3064 	struct iwm_node *in = (void *)ni;
3065 	int err, s;
3066 	uint32_t status;
3067 	size_t cmdsize;
3068 	struct iwm_rxba_data *rxba = NULL;
3069 	uint8_t baid = 0;
3070 
3071 	s = splnet();
3072 
3073 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3074 		ieee80211_addba_req_refuse(ic, ni, tid);
3075 		splx(s);
3076 		return;
3077 	}
3078 
3079 	memset(&cmd, 0, sizeof(cmd));
3080 
3081 	cmd.sta_id = IWM_STATION_ID;
3082 	cmd.mac_id_n_color
3083 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3084 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3085 
3086 	if (start) {
3087 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3088 		cmd.add_immediate_ba_ssn = ssn;
3089 		cmd.rx_ba_window = winsize;
3090 	} else {
3091 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3092 	}
3093 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
3094 	    IWM_STA_MODIFY_REMOVE_BA_TID;
3095 
3096 	status = IWM_ADD_STA_SUCCESS;
3097 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3098 		cmdsize = sizeof(cmd);
3099 	else
3100 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3101 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3102 	    &status);
3103 
3104 	if (err || (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS) {
3105 		if (start)
3106 			ieee80211_addba_req_refuse(ic, ni, tid);
3107 		splx(s);
3108 		return;
3109 	}
3110 
3111 	if (sc->sc_mqrx_supported) {
3112 		/* Deaggregation is done in hardware. */
3113 		if (start) {
3114 			if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
3115 				ieee80211_addba_req_refuse(ic, ni, tid);
3116 				splx(s);
3117 				return;
3118 			}
3119 			baid = (status & IWM_ADD_STA_BAID_MASK) >>
3120 			    IWM_ADD_STA_BAID_SHIFT;
3121 			if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
3122 			    baid >= nitems(sc->sc_rxba_data)) {
3123 				ieee80211_addba_req_refuse(ic, ni, tid);
3124 				splx(s);
3125 				return;
3126 			}
3127 			rxba = &sc->sc_rxba_data[baid];
3128 			if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3129 				ieee80211_addba_req_refuse(ic, ni, tid);
3130 				splx(s);
3131 				return;
3132 			}
3133 			rxba->sta_id = IWM_STATION_ID;
3134 			rxba->tid = tid;
3135 			rxba->baid = baid;
3136 			rxba->timeout = timeout_val;
3137 			getmicrouptime(&rxba->last_rx);
3138 			iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3139 			    winsize);
3140 			if (timeout_val != 0) {
3141 				struct ieee80211_rx_ba *ba;
3142 				timeout_add_usec(&rxba->session_timer,
3143 				    timeout_val);
3144 				/* XXX disable net80211's BA timeout handler */
3145 				ba = &ni->ni_rx_ba[tid];
3146 				ba->ba_timeout_val = 0;
3147 			}
3148 		} else {
3149 			int i;
3150 			for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3151 				rxba = &sc->sc_rxba_data[i];
3152 				if (rxba->baid ==
3153 				    IWM_RX_REORDER_DATA_INVALID_BAID)
3154 					continue;
3155 				if (rxba->tid != tid)
3156 					continue;
3157 				iwm_clear_reorder_buffer(sc, rxba);
3158 				break;
3159 			}
3160 		}
3161 	}
3162 
3163 	if (start) {
3164 		sc->sc_rx_ba_sessions++;
3165 		ieee80211_addba_req_accept(ic, ni, tid);
3166 	} else if (sc->sc_rx_ba_sessions > 0)
3167 		sc->sc_rx_ba_sessions--;
3168 
3169 	splx(s);
3170 }
3171 
3172 void
3173 iwm_htprot_task(void *arg)
3174 {
3175 	struct iwm_softc *sc = arg;
3176 	struct ieee80211com *ic = &sc->sc_ic;
3177 	struct iwm_node *in = (void *)ic->ic_bss;
3178 	int err, s = splnet();
3179 
3180 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
3181 		refcnt_rele_wake(&sc->task_refs);
3182 		splx(s);
3183 		return;
3184 	}
3185 
3186 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
3187 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3188 	if (err)
3189 		printf("%s: could not change HT protection: error %d\n",
3190 		    DEVNAME(sc), err);
3191 
3192 	refcnt_rele_wake(&sc->task_refs);
3193 	splx(s);
3194 }
3195 
3196 /*
3197  * This function is called by upper layer when HT protection settings in
3198  * beacons have changed.
3199  */
3200 void
3201 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
3202 {
3203 	struct iwm_softc *sc = ic->ic_softc;
3204 
3205 	/* assumes that ni == ic->ic_bss */
3206 	iwm_add_task(sc, systq, &sc->htprot_task);
3207 }
3208 
3209 void
3210 iwm_ba_task(void *arg)
3211 {
3212 	struct iwm_softc *sc = arg;
3213 	struct ieee80211com *ic = &sc->sc_ic;
3214 	struct ieee80211_node *ni = ic->ic_bss;
3215 	int s = splnet();
3216 	int tid;
3217 
3218 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
3219 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3220 			break;
3221 		if (sc->ba_start_tidmask & (1 << tid)) {
3222 			iwm_sta_rx_agg(sc, ni, tid, sc->ba_ssn[tid],
3223 			    sc->ba_winsize[tid], sc->ba_timeout_val[tid], 1);
3224 			sc->ba_start_tidmask &= ~(1 << tid);
3225 		} else if (sc->ba_stop_tidmask & (1 << tid)) {
3226 			iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3227 			sc->ba_stop_tidmask &= ~(1 << tid);
3228 		}
3229 	}
3230 
3231 	refcnt_rele_wake(&sc->task_refs);
3232 	splx(s);
3233 }
3234 
3235 /*
3236  * This function is called by upper layer when an ADDBA request is received
3237  * from another STA and before the ADDBA response is sent.
3238  */
3239 int
3240 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3241     uint8_t tid)
3242 {
3243 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3244 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3245 
3246 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3247 	    tid > IWM_MAX_TID_COUNT || (sc->ba_start_tidmask & (1 << tid)))
3248 		return ENOSPC;
3249 
3250 	sc->ba_start_tidmask |= (1 << tid);
3251 	sc->ba_ssn[tid] = ba->ba_winstart;
3252 	sc->ba_winsize[tid] = ba->ba_winsize;
3253 	sc->ba_timeout_val[tid] = ba->ba_timeout_val;
3254 	iwm_add_task(sc, systq, &sc->ba_task);
3255 
3256 	return EBUSY;
3257 }
3258 
3259 /*
3260  * This function is called by upper layer on teardown of an HT-immediate
3261  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3262  */
3263 void
3264 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3265     uint8_t tid)
3266 {
3267 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3268 
3269 	if (tid > IWM_MAX_TID_COUNT || sc->ba_stop_tidmask & (1 << tid))
3270 		return;
3271 
3272 	sc->ba_stop_tidmask = (1 << tid);
3273 	iwm_add_task(sc, systq, &sc->ba_task);
3274 }
3275 
3276 void
3277 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3278     const uint16_t *mac_override, const uint16_t *nvm_hw)
3279 {
3280 	const uint8_t *hw_addr;
3281 
3282 	if (mac_override) {
3283 		static const uint8_t reserved_mac[] = {
3284 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3285 		};
3286 
3287 		hw_addr = (const uint8_t *)(mac_override +
3288 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3289 
3290 		/*
3291 		 * Store the MAC address from MAO section.
3292 		 * No byte swapping is required in MAO section
3293 		 */
3294 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3295 
3296 		/*
3297 		 * Force the use of the OTP MAC address in case of reserved MAC
3298 		 * address in the NVM, or if address is given but invalid.
3299 		 */
3300 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3301 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3302 		    sizeof(etherbroadcastaddr)) != 0) &&
3303 		    (memcmp(etheranyaddr, data->hw_addr,
3304 		    sizeof(etheranyaddr)) != 0) &&
3305 		    !ETHER_IS_MULTICAST(data->hw_addr))
3306 			return;
3307 	}
3308 
3309 	if (nvm_hw) {
3310 		/* Read the mac address from WFMP registers. */
3311 		uint32_t mac_addr0, mac_addr1;
3312 
3313 		if (!iwm_nic_lock(sc))
3314 			goto out;
3315 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3316 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3317 		iwm_nic_unlock(sc);
3318 
3319 		hw_addr = (const uint8_t *)&mac_addr0;
3320 		data->hw_addr[0] = hw_addr[3];
3321 		data->hw_addr[1] = hw_addr[2];
3322 		data->hw_addr[2] = hw_addr[1];
3323 		data->hw_addr[3] = hw_addr[0];
3324 
3325 		hw_addr = (const uint8_t *)&mac_addr1;
3326 		data->hw_addr[4] = hw_addr[1];
3327 		data->hw_addr[5] = hw_addr[0];
3328 
3329 		return;
3330 	}
3331 out:
3332 	printf("%s: mac address not found\n", DEVNAME(sc));
3333 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3334 }
3335 
3336 int
3337 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3338     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3339     const uint16_t *mac_override, const uint16_t *phy_sku,
3340     const uint16_t *regulatory, int n_regulatory)
3341 {
3342 	struct iwm_nvm_data *data = &sc->sc_nvm;
3343 	uint8_t hw_addr[ETHER_ADDR_LEN];
3344 	uint32_t sku;
3345 	uint16_t lar_config;
3346 
3347 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3348 
3349 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3350 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3351 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3352 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3353 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3354 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3355 
3356 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3357 	} else {
3358 		uint32_t radio_cfg =
3359 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3360 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3361 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3362 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3363 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3364 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3365 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3366 
3367 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3368 	}
3369 
3370 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3371 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3372 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3373 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3374 
3375 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3376 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3377 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3378 				       IWM_NVM_LAR_OFFSET_8000;
3379 
3380 		lar_config = le16_to_cpup(regulatory + lar_offset);
3381 		data->lar_enabled = !!(lar_config &
3382 				       IWM_NVM_LAR_ENABLED_8000);
3383 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3384 	} else
3385 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3386 
3387 
3388 	/* The byte order is little endian 16 bit, meaning 214365 */
3389 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3390 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3391 		data->hw_addr[0] = hw_addr[1];
3392 		data->hw_addr[1] = hw_addr[0];
3393 		data->hw_addr[2] = hw_addr[3];
3394 		data->hw_addr[3] = hw_addr[2];
3395 		data->hw_addr[4] = hw_addr[5];
3396 		data->hw_addr[5] = hw_addr[4];
3397 	} else
3398 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3399 
3400 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3401 		if (sc->nvm_type == IWM_NVM_SDP) {
3402 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3403 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3404 		} else {
3405 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3406 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3407 		}
3408 	} else
3409 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3410 		    iwm_nvm_channels_8000,
3411 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3412 
3413 	data->calib_version = 255;   /* TODO:
3414 					this value will prevent some checks from
3415 					failing, we need to check if this
3416 					field is still needed, and if it does,
3417 					where is it in the NVM */
3418 
3419 	return 0;
3420 }
3421 
3422 int
3423 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3424 {
3425 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3426 	const uint16_t *regulatory = NULL;
3427 	int n_regulatory = 0;
3428 
3429 	/* Checking for required sections */
3430 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3431 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3432 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3433 			return ENOENT;
3434 		}
3435 
3436 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3437 
3438 		if (sc->nvm_type == IWM_NVM_SDP) {
3439 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3440 				return ENOENT;
3441 			regulatory = (const uint16_t *)
3442 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3443 			n_regulatory =
3444 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3445 		}
3446 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3447 		/* SW and REGULATORY sections are mandatory */
3448 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3449 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3450 			return ENOENT;
3451 		}
3452 		/* MAC_OVERRIDE or at least HW section must exist */
3453 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3454 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3455 			return ENOENT;
3456 		}
3457 
3458 		/* PHY_SKU section is mandatory in B0 */
3459 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3460 			return ENOENT;
3461 		}
3462 
3463 		regulatory = (const uint16_t *)
3464 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3465 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
3466 		hw = (const uint16_t *)
3467 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3468 		mac_override =
3469 			(const uint16_t *)
3470 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3471 		phy_sku = (const uint16_t *)
3472 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3473 	} else {
3474 		panic("unknown device family %d\n", sc->sc_device_family);
3475 	}
3476 
3477 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3478 	calib = (const uint16_t *)
3479 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3480 
3481 	/* XXX should pass in the length of every section */
3482 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3483 	    phy_sku, regulatory, n_regulatory);
3484 }
3485 
3486 int
3487 iwm_nvm_init(struct iwm_softc *sc)
3488 {
3489 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3490 	int i, section, err;
3491 	uint16_t len;
3492 	uint8_t *buf;
3493 	const size_t bufsz = sc->sc_nvm_max_section_size;
3494 
3495 	memset(nvm_sections, 0, sizeof(nvm_sections));
3496 
3497 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
3498 	if (buf == NULL)
3499 		return ENOMEM;
3500 
3501 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
3502 		section = iwm_nvm_to_read[i];
3503 		KASSERT(section <= nitems(nvm_sections));
3504 
3505 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3506 		if (err) {
3507 			err = 0;
3508 			continue;
3509 		}
3510 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
3511 		if (nvm_sections[section].data == NULL) {
3512 			err = ENOMEM;
3513 			break;
3514 		}
3515 		memcpy(nvm_sections[section].data, buf, len);
3516 		nvm_sections[section].length = len;
3517 	}
3518 	free(buf, M_DEVBUF, bufsz);
3519 	if (err == 0)
3520 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3521 
3522 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3523 		if (nvm_sections[i].data != NULL)
3524 			free(nvm_sections[i].data, M_DEVBUF,
3525 			    nvm_sections[i].length);
3526 	}
3527 
3528 	return err;
3529 }
3530 
3531 int
3532 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3533     const uint8_t *section, uint32_t byte_cnt)
3534 {
3535 	int err = EINVAL;
3536 	uint32_t chunk_sz, offset;
3537 
3538 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3539 
3540 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3541 		uint32_t addr, len;
3542 		const uint8_t *data;
3543 
3544 		addr = dst_addr + offset;
3545 		len = MIN(chunk_sz, byte_cnt - offset);
3546 		data = section + offset;
3547 
3548 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3549 		if (err)
3550 			break;
3551 	}
3552 
3553 	return err;
3554 }
3555 
3556 int
3557 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3558     const uint8_t *chunk, uint32_t byte_cnt)
3559 {
3560 	struct iwm_dma_info *dma = &sc->fw_dma;
3561 	int err;
3562 
3563 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3564 	memcpy(dma->vaddr, chunk, byte_cnt);
3565 	bus_dmamap_sync(sc->sc_dmat,
3566 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
3567 
3568 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3569 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
3570 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3571 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3572 
3573 	sc->sc_fw_chunk_done = 0;
3574 
3575 	if (!iwm_nic_lock(sc))
3576 		return EBUSY;
3577 
3578 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3579 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3580 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3581 	    dst_addr);
3582 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3583 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3584 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3585 	    (iwm_get_dma_hi_addr(dma->paddr)
3586 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3587 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3588 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3589 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3590 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3591 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3592 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3593 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3594 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3595 
3596 	iwm_nic_unlock(sc);
3597 
3598 	/* Wait for this segment to load. */
3599 	err = 0;
3600 	while (!sc->sc_fw_chunk_done) {
3601 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
3602 		if (err)
3603 			break;
3604 	}
3605 
3606 	if (!sc->sc_fw_chunk_done)
3607 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
3608 		    DEVNAME(sc), dst_addr, byte_cnt);
3609 
3610 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3611 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
3612 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3613 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3614 	}
3615 
3616 	return err;
3617 }
3618 
3619 int
3620 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3621 {
3622 	struct iwm_fw_sects *fws;
3623 	int err, i;
3624 	void *data;
3625 	uint32_t dlen;
3626 	uint32_t offset;
3627 
3628 	fws = &sc->sc_fw.fw_sects[ucode_type];
3629 	for (i = 0; i < fws->fw_count; i++) {
3630 		data = fws->fw_sect[i].fws_data;
3631 		dlen = fws->fw_sect[i].fws_len;
3632 		offset = fws->fw_sect[i].fws_devoff;
3633 		if (dlen > sc->sc_fwdmasegsz) {
3634 			err = EFBIG;
3635 		} else
3636 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3637 		if (err) {
3638 			printf("%s: could not load firmware chunk %u of %u\n",
3639 			    DEVNAME(sc), i, fws->fw_count);
3640 			return err;
3641 		}
3642 	}
3643 
3644 	iwm_enable_interrupts(sc);
3645 
3646 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3647 
3648 	return 0;
3649 }
3650 
3651 int
3652 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3653     int cpu, int *first_ucode_section)
3654 {
3655 	int shift_param;
3656 	int i, err = 0, sec_num = 0x1;
3657 	uint32_t val, last_read_idx = 0;
3658 	void *data;
3659 	uint32_t dlen;
3660 	uint32_t offset;
3661 
3662 	if (cpu == 1) {
3663 		shift_param = 0;
3664 		*first_ucode_section = 0;
3665 	} else {
3666 		shift_param = 16;
3667 		(*first_ucode_section)++;
3668 	}
3669 
3670 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3671 		last_read_idx = i;
3672 		data = fws->fw_sect[i].fws_data;
3673 		dlen = fws->fw_sect[i].fws_len;
3674 		offset = fws->fw_sect[i].fws_devoff;
3675 
3676 		/*
3677 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3678 		 * CPU1 to CPU2.
3679 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3680 		 * CPU2 non paged to CPU2 paging sec.
3681 		 */
3682 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3683 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3684 			break;
3685 
3686 		if (dlen > sc->sc_fwdmasegsz) {
3687 			err = EFBIG;
3688 		} else
3689 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3690 		if (err) {
3691 			printf("%s: could not load firmware chunk %d "
3692 			    "(error %d)\n", DEVNAME(sc), i, err);
3693 			return err;
3694 		}
3695 
3696 		/* Notify the ucode of the loaded section number and status */
3697 		if (iwm_nic_lock(sc)) {
3698 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3699 			val = val | (sec_num << shift_param);
3700 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3701 			sec_num = (sec_num << 1) | 0x1;
3702 			iwm_nic_unlock(sc);
3703 		} else {
3704 			err = EBUSY;
3705 			printf("%s: could not load firmware chunk %d "
3706 			    "(error %d)\n", DEVNAME(sc), i, err);
3707 			return err;
3708 		}
3709 	}
3710 
3711 	*first_ucode_section = last_read_idx;
3712 
3713 	if (iwm_nic_lock(sc)) {
3714 		if (cpu == 1)
3715 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3716 		else
3717 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3718 		iwm_nic_unlock(sc);
3719 	} else {
3720 		err = EBUSY;
3721 		printf("%s: could not finalize firmware loading (error %d)\n",
3722 		    DEVNAME(sc), err);
3723 		return err;
3724 	}
3725 
3726 	return 0;
3727 }
3728 
3729 int
3730 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3731 {
3732 	struct iwm_fw_sects *fws;
3733 	int err = 0;
3734 	int first_ucode_section;
3735 
3736 	fws = &sc->sc_fw.fw_sects[ucode_type];
3737 
3738 	/* configure the ucode to be ready to get the secured image */
3739 	/* release CPU reset */
3740 	if (iwm_nic_lock(sc)) {
3741 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3742 		    IWM_RELEASE_CPU_RESET_BIT);
3743 		iwm_nic_unlock(sc);
3744 	}
3745 
3746 	/* load to FW the binary Secured sections of CPU1 */
3747 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3748 	if (err)
3749 		return err;
3750 
3751 	/* load to FW the binary sections of CPU2 */
3752 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3753 	if (err)
3754 		return err;
3755 
3756 	iwm_enable_interrupts(sc);
3757 	return 0;
3758 }
3759 
3760 int
3761 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3762 {
3763 	int err, w;
3764 
3765 	sc->sc_uc.uc_intr = 0;
3766 
3767 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
3768 		err = iwm_load_firmware_8000(sc, ucode_type);
3769 	else
3770 		err = iwm_load_firmware_7000(sc, ucode_type);
3771 
3772 	if (err)
3773 		return err;
3774 
3775 	/* wait for the firmware to load */
3776 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
3777 		err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", MSEC_TO_NSEC(100));
3778 	}
3779 	if (err || !sc->sc_uc.uc_ok)
3780 		printf("%s: could not load firmware\n", DEVNAME(sc));
3781 
3782 	return err;
3783 }
3784 
3785 int
3786 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3787 {
3788 	int err;
3789 
3790 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3791 
3792 	err = iwm_nic_init(sc);
3793 	if (err) {
3794 		printf("%s: unable to init nic\n", DEVNAME(sc));
3795 		return err;
3796 	}
3797 
3798 	/* make sure rfkill handshake bits are cleared */
3799 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3800 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3801 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3802 
3803 	/* clear (again), then enable firwmare load interrupt */
3804 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3805 	iwm_enable_fwload_interrupt(sc);
3806 
3807 	/* really make sure rfkill handshake bits are cleared */
3808 	/* maybe we should write a few times more?  just to make sure */
3809 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3810 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3811 
3812 	return iwm_load_firmware(sc, ucode_type);
3813 }
3814 
3815 int
3816 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3817 {
3818 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3819 		.valid = htole32(valid_tx_ant),
3820 	};
3821 
3822 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
3823 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3824 }
3825 
3826 int
3827 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3828 {
3829 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3830 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3831 
3832 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3833 	phy_cfg_cmd.calib_control.event_trigger =
3834 	    sc->sc_default_calib[ucode_type].event_trigger;
3835 	phy_cfg_cmd.calib_control.flow_trigger =
3836 	    sc->sc_default_calib[ucode_type].flow_trigger;
3837 
3838 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3839 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3840 }
3841 
3842 int
3843 iwm_send_dqa_cmd(struct iwm_softc *sc)
3844 {
3845 	struct iwm_dqa_enable_cmd dqa_cmd = {
3846 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
3847 	};
3848 	uint32_t cmd_id;
3849 
3850 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
3851 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3852 }
3853 
3854 int
3855 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
3856 	enum iwm_ucode_type ucode_type)
3857 {
3858 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3859 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
3860 	int err;
3861 
3862 	err = iwm_read_firmware(sc, ucode_type);
3863 	if (err)
3864 		return err;
3865 
3866 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3867 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
3868 	else
3869 		sc->cmdqid = IWM_CMD_QUEUE;
3870 
3871 	sc->sc_uc_current = ucode_type;
3872 	err = iwm_start_fw(sc, ucode_type);
3873 	if (err) {
3874 		sc->sc_uc_current = old_type;
3875 		return err;
3876 	}
3877 
3878 	err = iwm_post_alive(sc);
3879 	if (err)
3880 		return err;
3881 
3882 	/*
3883 	 * configure and operate fw paging mechanism.
3884 	 * driver configures the paging flow only once, CPU2 paging image
3885 	 * included in the IWM_UCODE_INIT image.
3886 	 */
3887 	if (fw->paging_mem_size) {
3888 		err = iwm_save_fw_paging(sc, fw);
3889 		if (err) {
3890 			printf("%s: failed to save the FW paging image\n",
3891 			    DEVNAME(sc));
3892 			return err;
3893 		}
3894 
3895 		err = iwm_send_paging_cmd(sc, fw);
3896 		if (err) {
3897 			printf("%s: failed to send the paging cmd\n",
3898 			    DEVNAME(sc));
3899 			iwm_free_fw_paging(sc);
3900 			return err;
3901 		}
3902 	}
3903 
3904 	return 0;
3905 }
3906 
3907 int
3908 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3909 {
3910 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
3911 	int err;
3912 
3913 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3914 		printf("%s: radio is disabled by hardware switch\n",
3915 		    DEVNAME(sc));
3916 		return EPERM;
3917 	}
3918 
3919 	sc->sc_init_complete = 0;
3920 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3921 	if (err) {
3922 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3923 		return err;
3924 	}
3925 
3926 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
3927 		err = iwm_send_bt_init_conf(sc);
3928 		if (err) {
3929 			printf("%s: could not init bt coex (error %d)\n",
3930 			    DEVNAME(sc), err);
3931 			return err;
3932 		}
3933 	}
3934 
3935 	if (justnvm) {
3936 		err = iwm_nvm_init(sc);
3937 		if (err) {
3938 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3939 			return err;
3940 		}
3941 
3942 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3943 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3944 			    sc->sc_nvm.hw_addr);
3945 
3946 		return 0;
3947 	}
3948 
3949 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3950 	if (err)
3951 		return err;
3952 
3953 	/* Send TX valid antennas before triggering calibrations */
3954 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3955 	if (err)
3956 		return err;
3957 
3958 	/*
3959 	 * Send phy configurations command to init uCode
3960 	 * to start the 16.0 uCode init image internal calibrations.
3961 	 */
3962 	err = iwm_send_phy_cfg_cmd(sc);
3963 	if (err)
3964 		return err;
3965 
3966 	/*
3967 	 * Nothing to do but wait for the init complete and phy DB
3968 	 * notifications from the firmware.
3969 	 */
3970 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3971 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
3972 		    SEC_TO_NSEC(2));
3973 		if (err)
3974 			break;
3975 	}
3976 
3977 	return err;
3978 }
3979 
3980 int
3981 iwm_config_ltr(struct iwm_softc *sc)
3982 {
3983 	struct iwm_ltr_config_cmd cmd = {
3984 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3985 	};
3986 
3987 	if (!sc->sc_ltr_enabled)
3988 		return 0;
3989 
3990 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3991 }
3992 
3993 int
3994 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3995 {
3996 	struct iwm_rx_ring *ring = &sc->rxq;
3997 	struct iwm_rx_data *data = &ring->data[idx];
3998 	struct mbuf *m;
3999 	int err;
4000 	int fatal = 0;
4001 
4002 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4003 	if (m == NULL)
4004 		return ENOBUFS;
4005 
4006 	if (size <= MCLBYTES) {
4007 		MCLGET(m, M_DONTWAIT);
4008 	} else {
4009 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
4010 	}
4011 	if ((m->m_flags & M_EXT) == 0) {
4012 		m_freem(m);
4013 		return ENOBUFS;
4014 	}
4015 
4016 	if (data->m != NULL) {
4017 		bus_dmamap_unload(sc->sc_dmat, data->map);
4018 		fatal = 1;
4019 	}
4020 
4021 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4022 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4023 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4024 	if (err) {
4025 		/* XXX */
4026 		if (fatal)
4027 			panic("iwm: could not load RX mbuf");
4028 		m_freem(m);
4029 		return err;
4030 	}
4031 	data->m = m;
4032 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4033 
4034 	/* Update RX descriptor. */
4035 	if (sc->sc_mqrx_supported) {
4036 		((uint64_t *)ring->desc)[idx] =
4037 		    htole64(data->map->dm_segs[0].ds_addr);
4038 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4039 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4040 		    BUS_DMASYNC_PREWRITE);
4041 	} else {
4042 		((uint32_t *)ring->desc)[idx] =
4043 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
4044 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4045 		    idx * sizeof(uint32_t), sizeof(uint32_t),
4046 		    BUS_DMASYNC_PREWRITE);
4047 	}
4048 
4049 	return 0;
4050 }
4051 
4052 /*
4053  * RSSI values are reported by the FW as positive values - need to negate
4054  * to obtain their dBM.  Account for missing antennas by replacing 0
4055  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4056  */
4057 int
4058 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4059 {
4060 	int energy_a, energy_b, energy_c, max_energy;
4061 	uint32_t val;
4062 
4063 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
4064 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
4065 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
4066 	energy_a = energy_a ? -energy_a : -256;
4067 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
4068 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
4069 	energy_b = energy_b ? -energy_b : -256;
4070 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
4071 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
4072 	energy_c = energy_c ? -energy_c : -256;
4073 	max_energy = MAX(energy_a, energy_b);
4074 	max_energy = MAX(max_energy, energy_c);
4075 
4076 	return max_energy;
4077 }
4078 
4079 int
4080 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4081     struct iwm_rx_mpdu_desc *desc)
4082 {
4083 	int energy_a, energy_b;
4084 
4085 	energy_a = desc->v1.energy_a;
4086 	energy_b = desc->v1.energy_b;
4087 	energy_a = energy_a ? -energy_a : -256;
4088 	energy_b = energy_b ? -energy_b : -256;
4089 	return MAX(energy_a, energy_b);
4090 }
4091 
4092 void
4093 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4094     struct iwm_rx_data *data)
4095 {
4096 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4097 
4098 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4099 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4100 
4101 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4102 }
4103 
4104 /*
4105  * Retrieve the average noise (in dBm) among receivers.
4106  */
4107 int
4108 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4109 {
4110 	int i, total, nbant, noise;
4111 
4112 	total = nbant = noise = 0;
4113 	for (i = 0; i < 3; i++) {
4114 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4115 		if (noise) {
4116 			total += noise;
4117 			nbant++;
4118 		}
4119 	}
4120 
4121 	/* There should be at least one antenna but check anyway. */
4122 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4123 }
4124 
4125 int
4126 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4127     struct ieee80211_rxinfo *rxi)
4128 {
4129 	struct ieee80211com *ic = &sc->sc_ic;
4130 	struct ieee80211_key *k = &ni->ni_pairwise_key;
4131 	struct ieee80211_frame *wh;
4132 	uint64_t pn, *prsc;
4133 	uint8_t *ivp;
4134 	uint8_t tid;
4135 	int hdrlen, hasqos;
4136 
4137 	wh = mtod(m, struct ieee80211_frame *);
4138 	hdrlen = ieee80211_get_hdrlen(wh);
4139 	ivp = (uint8_t *)wh + hdrlen;
4140 
4141 	/* Check that ExtIV bit is set. */
4142 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4143 		return 1;
4144 
4145 	hasqos = ieee80211_has_qos(wh);
4146 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4147 	prsc = &k->k_rsc[tid];
4148 
4149 	/* Extract the 48-bit PN from the CCMP header. */
4150 	pn = (uint64_t)ivp[0]       |
4151 	     (uint64_t)ivp[1] <<  8 |
4152 	     (uint64_t)ivp[4] << 16 |
4153 	     (uint64_t)ivp[5] << 24 |
4154 	     (uint64_t)ivp[6] << 32 |
4155 	     (uint64_t)ivp[7] << 40;
4156 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4157 		if (pn < *prsc) {
4158 			ic->ic_stats.is_ccmp_replays++;
4159 			return 1;
4160 		}
4161 	} else if (pn <= *prsc) {
4162 		ic->ic_stats.is_ccmp_replays++;
4163 		return 1;
4164 	}
4165 	/* Last seen packet number is updated in ieee80211_inputm(). */
4166 
4167 	/*
4168 	 * Some firmware versions strip the MIC, and some don't. It is not
4169 	 * clear which of the capability flags could tell us what to expect.
4170 	 * For now, keep things simple and just leave the MIC in place if
4171 	 * it is present.
4172 	 *
4173 	 * The IV will be stripped by ieee80211_inputm().
4174 	 */
4175 	return 0;
4176 }
4177 
4178 int
4179 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4180     struct ieee80211_rxinfo *rxi)
4181 {
4182 	struct ieee80211com *ic = &sc->sc_ic;
4183 	struct ifnet *ifp = IC2IFP(ic);
4184 	struct ieee80211_frame *wh;
4185 	struct ieee80211_node *ni;
4186 	int ret = 0;
4187 	uint8_t type, subtype;
4188 
4189 	wh = mtod(m, struct ieee80211_frame *);
4190 
4191 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4192 	if (type == IEEE80211_FC0_TYPE_CTL)
4193 		return 0;
4194 
4195 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4196 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4197 		return 0;
4198 
4199 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4200 	    !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4201 		return 0;
4202 
4203 	ni = ieee80211_find_rxnode(ic, wh);
4204 	/* Handle hardware decryption. */
4205 	if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4206 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4207 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4208 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4209 			ic->ic_stats.is_ccmp_dec_errs++;
4210 			ret = 1;
4211 			goto out;
4212 		}
4213 		/* Check whether decryption was successful or not. */
4214 		if ((rx_pkt_status &
4215 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4216 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4217 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4218 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4219 			ic->ic_stats.is_ccmp_dec_errs++;
4220 			ret = 1;
4221 			goto out;
4222 		}
4223 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4224 	}
4225 out:
4226 	if (ret)
4227 		ifp->if_ierrors++;
4228 	ieee80211_release_node(ic, ni);
4229 	return ret;
4230 }
4231 
4232 void
4233 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4234     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4235     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4236     struct mbuf_list *ml)
4237 {
4238 	struct ieee80211com *ic = &sc->sc_ic;
4239 	struct ifnet *ifp = IC2IFP(ic);
4240 	struct ieee80211_frame *wh;
4241 	struct ieee80211_node *ni;
4242 	struct ieee80211_channel *bss_chan;
4243 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
4244 
4245 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4246 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4247 
4248 	wh = mtod(m, struct ieee80211_frame *);
4249 	ni = ieee80211_find_rxnode(ic, wh);
4250 	if (ni == ic->ic_bss) {
4251 		/*
4252 		 * We may switch ic_bss's channel during scans.
4253 		 * Record the current channel so we can restore it later.
4254 		 */
4255 		bss_chan = ni->ni_chan;
4256 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
4257 	}
4258 	ni->ni_chan = &ic->ic_channels[chanidx];
4259 
4260 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4261 	    iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4262 		ifp->if_ierrors++;
4263 		m_freem(m);
4264 		ieee80211_release_node(ic, ni);
4265 		return;
4266 	}
4267 
4268 #if NBPFILTER > 0
4269 	if (sc->sc_drvbpf != NULL) {
4270 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4271 		uint16_t chan_flags;
4272 
4273 		tap->wr_flags = 0;
4274 		if (is_shortpre)
4275 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4276 		tap->wr_chan_freq =
4277 		    htole16(ic->ic_channels[chanidx].ic_freq);
4278 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4279 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4280 			chan_flags &= ~IEEE80211_CHAN_HT;
4281 		tap->wr_chan_flags = htole16(chan_flags);
4282 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4283 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4284 		tap->wr_tsft = device_timestamp;
4285 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4286 			uint8_t mcs = (rate_n_flags &
4287 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4288 			    IWM_RATE_HT_MCS_NSS_MSK));
4289 			tap->wr_rate = (0x80 | mcs);
4290 		} else {
4291 			uint8_t rate = (rate_n_flags &
4292 			    IWM_RATE_LEGACY_RATE_MSK);
4293 			switch (rate) {
4294 			/* CCK rates. */
4295 			case  10: tap->wr_rate =   2; break;
4296 			case  20: tap->wr_rate =   4; break;
4297 			case  55: tap->wr_rate =  11; break;
4298 			case 110: tap->wr_rate =  22; break;
4299 			/* OFDM rates. */
4300 			case 0xd: tap->wr_rate =  12; break;
4301 			case 0xf: tap->wr_rate =  18; break;
4302 			case 0x5: tap->wr_rate =  24; break;
4303 			case 0x7: tap->wr_rate =  36; break;
4304 			case 0x9: tap->wr_rate =  48; break;
4305 			case 0xb: tap->wr_rate =  72; break;
4306 			case 0x1: tap->wr_rate =  96; break;
4307 			case 0x3: tap->wr_rate = 108; break;
4308 			/* Unknown rate: should not happen. */
4309 			default:  tap->wr_rate =   0;
4310 			}
4311 		}
4312 
4313 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4314 		    m, BPF_DIRECTION_IN);
4315 	}
4316 #endif
4317 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4318 	/*
4319 	 * ieee80211_inputm() might have changed our BSS.
4320 	 * Restore ic_bss's channel if we are still in the same BSS.
4321 	 */
4322 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
4323 		ni->ni_chan = bss_chan;
4324 	ieee80211_release_node(ic, ni);
4325 }
4326 
4327 void
4328 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4329     size_t maxlen, struct mbuf_list *ml)
4330 {
4331 	struct ieee80211com *ic = &sc->sc_ic;
4332 	struct ieee80211_rxinfo rxi;
4333 	struct iwm_rx_phy_info *phy_info;
4334 	struct iwm_rx_mpdu_res_start *rx_res;
4335 	int device_timestamp;
4336 	uint16_t phy_flags;
4337 	uint32_t len;
4338 	uint32_t rx_pkt_status;
4339 	int rssi, chanidx, rate_n_flags;
4340 
4341 	memset(&rxi, 0, sizeof(rxi));
4342 
4343 	phy_info = &sc->sc_last_phy_info;
4344 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4345 	len = le16toh(rx_res->byte_count);
4346 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4347 		/* Allow control frames in monitor mode. */
4348 		if (len < sizeof(struct ieee80211_frame_cts)) {
4349 			ic->ic_stats.is_rx_tooshort++;
4350 			IC2IFP(ic)->if_ierrors++;
4351 			m_freem(m);
4352 			return;
4353 		}
4354 	} else if (len < sizeof(struct ieee80211_frame)) {
4355 		ic->ic_stats.is_rx_tooshort++;
4356 		IC2IFP(ic)->if_ierrors++;
4357 		m_freem(m);
4358 		return;
4359 	}
4360 	if (len > maxlen - sizeof(*rx_res)) {
4361 		IC2IFP(ic)->if_ierrors++;
4362 		m_freem(m);
4363 		return;
4364 	}
4365 
4366 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4367 		m_freem(m);
4368 		return;
4369 	}
4370 
4371 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4372 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4373 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4374 		m_freem(m);
4375 		return; /* drop */
4376 	}
4377 
4378 	m->m_data = pktdata + sizeof(*rx_res);
4379 	m->m_pkthdr.len = m->m_len = len;
4380 
4381 	if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4382 		m_freem(m);
4383 		return;
4384 	}
4385 
4386 	chanidx = letoh32(phy_info->channel);
4387 	device_timestamp = le32toh(phy_info->system_timestamp);
4388 	phy_flags = letoh16(phy_info->phy_flags);
4389 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4390 
4391 	rssi = iwm_get_signal_strength(sc, phy_info);
4392 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4393 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4394 
4395 	rxi.rxi_rssi = rssi;
4396 	rxi.rxi_tstamp = device_timestamp;
4397 
4398 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4399 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4400 	    rate_n_flags, device_timestamp, &rxi, ml);
4401 }
4402 
4403 void
4404 iwm_flip_address(uint8_t *addr)
4405 {
4406 	int i;
4407 	uint8_t mac_addr[ETHER_ADDR_LEN];
4408 
4409 	for (i = 0; i < ETHER_ADDR_LEN; i++)
4410 		mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
4411 	IEEE80211_ADDR_COPY(addr, mac_addr);
4412 }
4413 
4414 /*
4415  * Drop duplicate 802.11 retransmissions
4416  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4417  * and handle pseudo-duplicate frames which result from deaggregation
4418  * of A-MSDU frames in hardware.
4419  */
4420 int
4421 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4422     struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4423 {
4424 	struct ieee80211com *ic = &sc->sc_ic;
4425 	struct iwm_node *in = (void *)ic->ic_bss;
4426 	struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4427 	uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
4428 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4429 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4430 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4431 	int hasqos = ieee80211_has_qos(wh);
4432 	uint16_t seq;
4433 
4434 	if (type == IEEE80211_FC0_TYPE_CTL ||
4435 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4436 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4437 		return 0;
4438 
4439 	if (hasqos) {
4440 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4441 		if (tid > IWM_MAX_TID_COUNT)
4442 			tid = IWM_MAX_TID_COUNT;
4443 	}
4444 
4445 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4446 	subframe_idx = desc->amsdu_info &
4447 		IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4448 
4449 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4450 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4451 	    dup_data->last_seq[tid] == seq &&
4452 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4453 		return 1;
4454 
4455 	/*
4456 	 * Allow the same frame sequence number for all A-MSDU subframes
4457 	 * following the first subframe.
4458 	 * Otherwise these subframes would be discarded as replays.
4459 	 */
4460 	if (dup_data->last_seq[tid] == seq &&
4461 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4462 	    (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
4463 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4464 	}
4465 
4466 	dup_data->last_seq[tid] = seq;
4467 	dup_data->last_sub_frame[tid] = subframe_idx;
4468 
4469 	return 0;
4470 }
4471 
4472 /*
4473  * Returns true if sn2 - buffer_size < sn1 < sn2.
4474  * To be used only in order to compare reorder buffer head with NSSN.
4475  * We fully trust NSSN unless it is behind us due to reorder timeout.
4476  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4477  */
4478 int
4479 iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4480 {
4481 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4482 }
4483 
4484 void
4485 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
4486     struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
4487     uint16_t nssn, struct mbuf_list *ml)
4488 {
4489 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
4490 	uint16_t ssn = reorder_buf->head_sn;
4491 
4492 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4493 	if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4494 		goto set_timer;
4495 
4496 	while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4497 		int index = ssn % reorder_buf->buf_size;
4498 		struct mbuf *m;
4499 		int chanidx, is_shortpre;
4500 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4501 		struct ieee80211_rxinfo *rxi;
4502 
4503 		/* This data is the same for all A-MSDU subframes. */
4504 		chanidx = entries[index].chanidx;
4505 		rx_pkt_status = entries[index].rx_pkt_status;
4506 		is_shortpre = entries[index].is_shortpre;
4507 		rate_n_flags = entries[index].rate_n_flags;
4508 		device_timestamp = entries[index].device_timestamp;
4509 		rxi = &entries[index].rxi;
4510 
4511 		/*
4512 		 * Empty the list. Will have more than one frame for A-MSDU.
4513 		 * Empty list is valid as well since nssn indicates frames were
4514 		 * received.
4515 		 */
4516 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4517 			iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4518 			    rate_n_flags, device_timestamp, rxi, ml);
4519 			reorder_buf->num_stored--;
4520 
4521 			/*
4522 			 * Allow the same frame sequence number and CCMP PN for
4523 			 * all A-MSDU subframes following the first subframe.
4524 			 * Otherwise they would be discarded as replays.
4525 			 */
4526 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4527 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4528 		}
4529 
4530 		ssn = (ssn + 1) & 0xfff;
4531 	}
4532 	reorder_buf->head_sn = nssn;
4533 
4534 set_timer:
4535 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4536 		timeout_add_usec(&reorder_buf->reorder_timer,
4537 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4538 	} else
4539 		timeout_del(&reorder_buf->reorder_timer);
4540 }
4541 
4542 int
4543 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
4544     struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4545 {
4546 	struct ieee80211com *ic = &sc->sc_ic;
4547 
4548 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4549 		/* we have a new (A-)MPDU ... */
4550 
4551 		/*
4552 		 * reset counter to 0 if we didn't have any oldsn in
4553 		 * the last A-MPDU (as detected by GP2 being identical)
4554 		 */
4555 		if (!buffer->consec_oldsn_prev_drop)
4556 			buffer->consec_oldsn_drops = 0;
4557 
4558 		/* either way, update our tracking state */
4559 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4560 	} else if (buffer->consec_oldsn_prev_drop) {
4561 		/*
4562 		 * tracking state didn't change, and we had an old SN
4563 		 * indication before - do nothing in this case, we
4564 		 * already noted this one down and are waiting for the
4565 		 * next A-MPDU (by GP2)
4566 		 */
4567 		return 0;
4568 	}
4569 
4570 	/* return unless this MPDU has old SN */
4571 	if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
4572 		return 0;
4573 
4574 	/* update state */
4575 	buffer->consec_oldsn_prev_drop = 1;
4576 	buffer->consec_oldsn_drops++;
4577 
4578 	/* if limit is reached, send del BA and reset state */
4579 	if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
4580 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4581 		    0, tid);
4582 		buffer->consec_oldsn_prev_drop = 0;
4583 		buffer->consec_oldsn_drops = 0;
4584 		return 1;
4585 	}
4586 
4587 	return 0;
4588 }
4589 
4590 /*
4591  * Handle re-ordering of frames which were de-aggregated in hardware.
4592  * Returns 1 if the MPDU was consumed (buffered or dropped).
4593  * Returns 0 if the MPDU should be passed to upper layer.
4594  */
4595 int
4596 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4597     struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4598     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4599     struct mbuf_list *ml)
4600 {
4601 	struct ieee80211com *ic = &sc->sc_ic;
4602 	struct ieee80211_frame *wh;
4603 	struct ieee80211_node *ni;
4604 	struct iwm_rxba_data *rxba;
4605 	struct iwm_reorder_buffer *buffer;
4606 	uint32_t reorder_data = le32toh(desc->reorder_data);
4607 	int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
4608 	int last_subframe =
4609 		(desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
4610 	uint8_t tid;
4611 	uint8_t subframe_idx = (desc->amsdu_info &
4612 	    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4613 	struct iwm_reorder_buf_entry *entries;
4614 	int index;
4615 	uint16_t nssn, sn;
4616 	uint8_t baid, type, subtype;
4617 	int hasqos;
4618 
4619 	wh = mtod(m, struct ieee80211_frame *);
4620 	hasqos = ieee80211_has_qos(wh);
4621 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4622 
4623 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4624 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4625 	ni = ieee80211_find_rxnode(ic, wh);
4626 
4627 	/*
4628 	 * We are only interested in Block Ack requests and unicast QoS data.
4629 	 */
4630 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4631 		return 0;
4632 	if (hasqos) {
4633 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4634 			return 0;
4635 	} else {
4636 		if (type != IEEE80211_FC0_TYPE_CTL ||
4637 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4638 			return 0;
4639 	}
4640 
4641 	baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
4642 		IWM_RX_MPDU_REORDER_BAID_SHIFT;
4643 	if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
4644 	    baid >= nitems(sc->sc_rxba_data))
4645 		return 0;
4646 
4647 	rxba = &sc->sc_rxba_data[baid];
4648 	if (rxba == NULL || tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
4649 		return 0;
4650 
4651 	/* Bypass A-MPDU re-ordering in net80211. */
4652 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
4653 
4654 	nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
4655 	sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
4656 		IWM_RX_MPDU_REORDER_SN_SHIFT;
4657 
4658 	buffer = &rxba->reorder_buf;
4659 	entries = &rxba->entries[0];
4660 
4661 	if (!buffer->valid) {
4662 		if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
4663 			return 0;
4664 		buffer->valid = 1;
4665 	}
4666 
4667 	if (type == IEEE80211_FC0_TYPE_CTL &&
4668 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
4669 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
4670 		goto drop;
4671 	}
4672 
4673 	/*
4674 	 * If there was a significant jump in the nssn - adjust.
4675 	 * If the SN is smaller than the NSSN it might need to first go into
4676 	 * the reorder buffer, in which case we just release up to it and the
4677 	 * rest of the function will take care of storing it and releasing up to
4678 	 * the nssn.
4679 	 */
4680 	if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
4681 	    buffer->buf_size) ||
4682 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
4683 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
4684 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
4685 		iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
4686 	}
4687 
4688 	if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
4689 	    device_timestamp)) {
4690 		 /* BA session will be torn down. */
4691 		ic->ic_stats.is_ht_rx_ba_window_jump++;
4692 		goto drop;
4693 
4694 	}
4695 
4696 	/* drop any outdated packets */
4697 	if (SEQ_LT(sn, buffer->head_sn)) {
4698 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
4699 		goto drop;
4700 	}
4701 
4702 	/* release immediately if allowed by nssn and no stored frames */
4703 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
4704 		if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
4705 		   (!is_amsdu || last_subframe))
4706 			buffer->head_sn = nssn;
4707 		return 0;
4708 	}
4709 
4710 	/*
4711 	 * release immediately if there are no stored frames, and the sn is
4712 	 * equal to the head.
4713 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
4714 	 * When we released everything, and we got the next frame in the
4715 	 * sequence, according to the NSSN we can't release immediately,
4716 	 * while technically there is no hole and we can move forward.
4717 	 */
4718 	if (!buffer->num_stored && sn == buffer->head_sn) {
4719 		if (!is_amsdu || last_subframe)
4720 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
4721 		return 0;
4722 	}
4723 
4724 	index = sn % buffer->buf_size;
4725 
4726 	/*
4727 	 * Check if we already stored this frame
4728 	 * As AMSDU is either received or not as whole, logic is simple:
4729 	 * If we have frames in that position in the buffer and the last frame
4730 	 * originated from AMSDU had a different SN then it is a retransmission.
4731 	 * If it is the same SN then if the subframe index is incrementing it
4732 	 * is the same AMSDU - otherwise it is a retransmission.
4733 	 */
4734 	if (!ml_empty(&entries[index].frames)) {
4735 		if (!is_amsdu) {
4736 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4737 			goto drop;
4738 		} else if (sn != buffer->last_amsdu ||
4739 		    buffer->last_sub_index >= subframe_idx) {
4740 			ic->ic_stats.is_ht_rx_ba_no_buf++;
4741 			goto drop;
4742 		}
4743 	} else {
4744 		/* This data is the same for all A-MSDU subframes. */
4745 		entries[index].chanidx = chanidx;
4746 		entries[index].is_shortpre = is_shortpre;
4747 		entries[index].rate_n_flags = rate_n_flags;
4748 		entries[index].device_timestamp = device_timestamp;
4749 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
4750 	}
4751 
4752 	/* put in reorder buffer */
4753 	ml_enqueue(&entries[index].frames, m);
4754 	buffer->num_stored++;
4755 	getmicrouptime(&entries[index].reorder_time);
4756 
4757 	if (is_amsdu) {
4758 		buffer->last_amsdu = sn;
4759 		buffer->last_sub_index = subframe_idx;
4760 	}
4761 
4762 	/*
4763 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
4764 	 * The reason is that NSSN advances on the first sub-frame, and may
4765 	 * cause the reorder buffer to advance before all the sub-frames arrive.
4766 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
4767 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
4768 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
4769 	 * already ahead and it will be dropped.
4770 	 * If the last sub-frame is not on this queue - we will get frame
4771 	 * release notification with up to date NSSN.
4772 	 */
4773 	if (!is_amsdu || last_subframe)
4774 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
4775 
4776 	return 1;
4777 
4778 drop:
4779 	m_freem(m);
4780 	return 1;
4781 }
4782 
4783 void
4784 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4785     size_t maxlen, struct mbuf_list *ml)
4786 {
4787 	struct ieee80211com *ic = &sc->sc_ic;
4788 	struct ieee80211_rxinfo rxi;
4789 	struct iwm_rx_mpdu_desc *desc;
4790 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4791 	int rssi;
4792 	uint8_t chanidx;
4793 	uint16_t phy_info;
4794 
4795 	memset(&rxi, 0, sizeof(rxi));
4796 
4797 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
4798 
4799 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
4800 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4801 		m_freem(m);
4802 		return; /* drop */
4803 	}
4804 
4805 	len = le16toh(desc->mpdu_len);
4806 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4807 		/* Allow control frames in monitor mode. */
4808 		if (len < sizeof(struct ieee80211_frame_cts)) {
4809 			ic->ic_stats.is_rx_tooshort++;
4810 			IC2IFP(ic)->if_ierrors++;
4811 			m_freem(m);
4812 			return;
4813 		}
4814 	} else if (len < sizeof(struct ieee80211_frame)) {
4815 		ic->ic_stats.is_rx_tooshort++;
4816 		IC2IFP(ic)->if_ierrors++;
4817 		m_freem(m);
4818 		return;
4819 	}
4820 	if (len > maxlen - sizeof(*desc)) {
4821 		IC2IFP(ic)->if_ierrors++;
4822 		m_freem(m);
4823 		return;
4824 	}
4825 
4826 	m->m_data = pktdata + sizeof(*desc);
4827 	m->m_pkthdr.len = m->m_len = len;
4828 
4829 	/* Account for padding following the frame header. */
4830 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
4831 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4832 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4833 		if (type == IEEE80211_FC0_TYPE_CTL) {
4834 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4835 			case IEEE80211_FC0_SUBTYPE_CTS:
4836 				hdrlen = sizeof(struct ieee80211_frame_cts);
4837 				break;
4838 			case IEEE80211_FC0_SUBTYPE_ACK:
4839 				hdrlen = sizeof(struct ieee80211_frame_ack);
4840 				break;
4841 			default:
4842 				hdrlen = sizeof(struct ieee80211_frame_min);
4843 				break;
4844 			}
4845 		} else
4846 			hdrlen = ieee80211_get_hdrlen(wh);
4847 
4848 		if ((le16toh(desc->status) &
4849 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4850 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4851 			/* Padding is inserted after the IV. */
4852 			hdrlen += IEEE80211_CCMP_HDRLEN;
4853 		}
4854 
4855 		memmove(m->m_data + 2, m->m_data, hdrlen);
4856 		m_adj(m, 2);
4857 	}
4858 
4859 	/*
4860 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4861 	 * in place for each subframe. But it leaves the 'A-MSDU present'
4862 	 * bit set in the frame header. We need to clear this bit ourselves.
4863 	 *
4864 	 * And we must allow the same CCMP PN for subframes following the
4865 	 * first subframe. Otherwise they would be discarded as replays.
4866 	 */
4867 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
4868 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4869 		uint8_t subframe_idx = (desc->amsdu_info &
4870 		    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4871 		if (subframe_idx > 0)
4872 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4873 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4874 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4875 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
4876 			    struct ieee80211_qosframe_addr4 *);
4877 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4878 
4879 			/* HW reverses addr3 and addr4. */
4880 			iwm_flip_address(qwh4->i_addr3);
4881 			iwm_flip_address(qwh4->i_addr4);
4882 		} else if (ieee80211_has_qos(wh) &&
4883 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
4884 			struct ieee80211_qosframe *qwh = mtod(m,
4885 			    struct ieee80211_qosframe *);
4886 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
4887 
4888 			/* HW reverses addr3. */
4889 			iwm_flip_address(qwh->i_addr3);
4890 		}
4891 	}
4892 
4893 	/*
4894 	 * Verify decryption before duplicate detection. The latter uses
4895 	 * the TID supplied in QoS frame headers and this TID is implicitly
4896 	 * verified as part of the CCMP nonce.
4897 	 */
4898 	if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
4899 		m_freem(m);
4900 		return;
4901 	}
4902 
4903 	if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
4904 		m_freem(m);
4905 		return;
4906 	}
4907 
4908 	phy_info = le16toh(desc->phy_info);
4909 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
4910 	chanidx = desc->v1.channel;
4911 	device_timestamp = desc->v1.gp2_on_air_rise;
4912 
4913 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
4914 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4915 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4916 
4917 	rxi.rxi_rssi = rssi;
4918 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
4919 
4920 	if (iwm_rx_reorder(sc, m, chanidx, desc,
4921 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
4922 	    rate_n_flags, device_timestamp, &rxi, ml))
4923 		return;
4924 
4925 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
4926 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
4927 	    rate_n_flags, device_timestamp, &rxi, ml);
4928 }
4929 
4930 void
4931 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4932     struct iwm_node *in, int txmcs, int txrate)
4933 {
4934 	struct ieee80211com *ic = &sc->sc_ic;
4935 	struct ieee80211_node *ni = &in->in_ni;
4936 	struct ifnet *ifp = IC2IFP(ic);
4937 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4938 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4939 	int txfail;
4940 
4941 	KASSERT(tx_resp->frame_count == 1);
4942 
4943 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
4944 	    status != IWM_TX_STATUS_DIRECT_DONE);
4945 
4946 	/*
4947 	 * Update rate control statistics.
4948 	 * Only report frames which were actually queued with the currently
4949 	 * selected Tx rate. Because Tx queues are relatively long we may
4950 	 * encounter previously selected rates here during Tx bursts.
4951 	 * Providing feedback based on such frames can lead to suboptimal
4952 	 * Tx rate control decisions.
4953 	 */
4954 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
4955 		if (txrate != ni->ni_txrate) {
4956 			if (++in->lq_rate_mismatch > 15) {
4957 				/* Try to sync firmware with the driver... */
4958 				iwm_setrates(in, 1);
4959 				in->lq_rate_mismatch = 0;
4960 			}
4961 		} else {
4962 			in->lq_rate_mismatch = 0;
4963 
4964 			in->in_amn.amn_txcnt++;
4965 			if (txfail)
4966 				in->in_amn.amn_retrycnt++;
4967 			if (tx_resp->failure_frame > 0)
4968 				in->in_amn.amn_retrycnt++;
4969 		}
4970 	} else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
4971 	    (le32toh(tx_resp->initial_rate) & IWM_RATE_MCS_HT_MSK)) {
4972 		uint32_t fw_txmcs = le32toh(tx_resp->initial_rate) &
4973 		   (IWM_RATE_HT_MCS_RATE_CODE_MSK | IWM_RATE_HT_MCS_NSS_MSK);
4974 		/* Ignore Tx reports which don't match our last LQ command. */
4975 		if (fw_txmcs != ni->ni_txmcs) {
4976 			if (++in->lq_rate_mismatch > 15) {
4977 				/* Try to sync firmware with the driver... */
4978 				iwm_setrates(in, 1);
4979 				in->lq_rate_mismatch = 0;
4980 			}
4981 		} else {
4982 			int mcs = fw_txmcs;
4983 			const struct ieee80211_ht_rateset *rs =
4984 			    ieee80211_ra_get_ht_rateset(fw_txmcs,
4985 			    ieee80211_node_supports_ht_sgi20(ni));
4986 			unsigned int retries = 0, i;
4987 			int old_txmcs = ni->ni_txmcs;
4988 
4989 			in->lq_rate_mismatch = 0;
4990 
4991 			for (i = 0; i < tx_resp->failure_frame; i++) {
4992 				if (mcs > rs->min_mcs) {
4993 					ieee80211_ra_add_stats_ht(&in->in_rn,
4994 					    ic, ni, mcs, 1, 1);
4995 					mcs--;
4996 				} else
4997 					retries++;
4998 			}
4999 
5000 			if (txfail && tx_resp->failure_frame == 0) {
5001 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5002 				    fw_txmcs, 1, 1);
5003 			} else {
5004 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5005 				    mcs, retries + 1, retries);
5006 			}
5007 
5008 			ieee80211_ra_choose(&in->in_rn, ic, ni);
5009 
5010 			/*
5011 			 * If RA has chosen a new TX rate we must update
5012 			 * the firmware's LQ rate table.
5013 			 * ni_txmcs may change again before the task runs so
5014 			 * cache the chosen rate in the iwm_node structure.
5015 			 */
5016 			if (ni->ni_txmcs != old_txmcs)
5017 				iwm_setrates(in, 1);
5018 		}
5019 	}
5020 
5021 	if (txfail)
5022 		ifp->if_oerrors++;
5023 }
5024 
5025 void
5026 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5027 {
5028 	struct ieee80211com *ic = &sc->sc_ic;
5029 
5030 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5031 	    BUS_DMASYNC_POSTWRITE);
5032 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5033 	m_freem(txd->m);
5034 	txd->m = NULL;
5035 
5036 	KASSERT(txd->in);
5037 	ieee80211_release_node(ic, &txd->in->in_ni);
5038 	txd->in = NULL;
5039 }
5040 
5041 void
5042 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5043     struct iwm_rx_data *data)
5044 {
5045 	struct ieee80211com *ic = &sc->sc_ic;
5046 	struct ifnet *ifp = IC2IFP(ic);
5047 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5048 	int idx = cmd_hdr->idx;
5049 	int qid = cmd_hdr->qid;
5050 	struct iwm_tx_ring *ring = &sc->txq[qid];
5051 	struct iwm_tx_data *txd;
5052 
5053 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5054 	    BUS_DMASYNC_POSTREAD);
5055 
5056 	sc->sc_tx_timer = 0;
5057 
5058 	txd = &ring->data[idx];
5059 	if (txd->m == NULL)
5060 		return;
5061 
5062 	iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5063 	iwm_txd_done(sc, txd);
5064 
5065 	/*
5066 	 * XXX Sometimes we miss Tx completion interrupts.
5067 	 * We cannot check Tx success/failure for affected frames; just free
5068 	 * the associated mbuf and release the associated node reference.
5069 	 */
5070 	while (ring->tail != idx) {
5071 		txd = &ring->data[ring->tail];
5072 		if (txd->m != NULL) {
5073 			DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
5074 			    __func__, ring->tail, idx));
5075 			iwm_txd_done(sc, txd);
5076 			ring->queued--;
5077 		}
5078 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
5079 	}
5080 
5081 	if (--ring->queued < IWM_TX_RING_LOMARK) {
5082 		sc->qfullmsk &= ~(1 << ring->qid);
5083 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5084 			ifq_clr_oactive(&ifp->if_snd);
5085 			/*
5086 			 * Well, we're in interrupt context, but then again
5087 			 * I guess net80211 does all sorts of stunts in
5088 			 * interrupt context, so maybe this is no biggie.
5089 			 */
5090 			(*ifp->if_start)(ifp);
5091 		}
5092 	}
5093 }
5094 
5095 void
5096 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5097     struct iwm_rx_data *data)
5098 {
5099 	struct ieee80211com *ic = &sc->sc_ic;
5100 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
5101 	uint32_t missed;
5102 
5103 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
5104 	    (ic->ic_state != IEEE80211_S_RUN))
5105 		return;
5106 
5107 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
5108 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
5109 
5110 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
5111 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5112 		if (ic->ic_if.if_flags & IFF_DEBUG)
5113 			printf("%s: receiving no beacons from %s; checking if "
5114 			    "this AP is still responding to probe requests\n",
5115 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
5116 		/*
5117 		 * Rather than go directly to scan state, try to send a
5118 		 * directed probe request first. If that fails then the
5119 		 * state machine will drop us into scanning after timing
5120 		 * out waiting for a probe response.
5121 		 */
5122 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
5123 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
5124 	}
5125 
5126 }
5127 
5128 int
5129 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
5130 {
5131 	struct iwm_binding_cmd cmd;
5132 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
5133 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
5134 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
5135 	uint32_t status;
5136 
5137 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
5138 		panic("binding already added");
5139 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
5140 		panic("binding already removed");
5141 
5142 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
5143 		return EINVAL;
5144 
5145 	memset(&cmd, 0, sizeof(cmd));
5146 
5147 	cmd.id_and_color
5148 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5149 	cmd.action = htole32(action);
5150 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5151 
5152 	cmd.macs[0] = htole32(mac_id);
5153 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
5154 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
5155 
5156 	status = 0;
5157 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
5158 	    sizeof(cmd), &cmd, &status);
5159 	if (err == 0 && status != 0)
5160 		err = EIO;
5161 
5162 	return err;
5163 }
5164 
5165 void
5166 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
5167     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
5168 {
5169 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
5170 
5171 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
5172 	    ctxt->color));
5173 	cmd->action = htole32(action);
5174 	cmd->apply_time = htole32(apply_time);
5175 }
5176 
5177 void
5178 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
5179     struct ieee80211_channel *chan, uint8_t chains_static,
5180     uint8_t chains_dynamic)
5181 {
5182 	struct ieee80211com *ic = &sc->sc_ic;
5183 	uint8_t active_cnt, idle_cnt;
5184 
5185 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5186 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
5187 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
5188 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
5189 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
5190 
5191 	/* Set rx the chains */
5192 	idle_cnt = chains_static;
5193 	active_cnt = chains_dynamic;
5194 
5195 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
5196 					IWM_PHY_RX_CHAIN_VALID_POS);
5197 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
5198 	cmd->rxchain_info |= htole32(active_cnt <<
5199 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
5200 
5201 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
5202 }
5203 
5204 int
5205 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
5206     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5207     uint32_t apply_time)
5208 {
5209 	struct iwm_phy_context_cmd cmd;
5210 
5211 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
5212 
5213 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
5214 	    chains_static, chains_dynamic);
5215 
5216 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
5217 	    sizeof(struct iwm_phy_context_cmd), &cmd);
5218 }
5219 
5220 int
5221 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
5222 {
5223 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
5224 	struct iwm_tfd *desc;
5225 	struct iwm_tx_data *txdata;
5226 	struct iwm_device_cmd *cmd;
5227 	struct mbuf *m;
5228 	bus_addr_t paddr;
5229 	uint32_t addr_lo;
5230 	int err = 0, i, paylen, off, s;
5231 	int idx, code, async, group_id;
5232 	size_t hdrlen, datasz;
5233 	uint8_t *data;
5234 	int generation = sc->sc_generation;
5235 
5236 	code = hcmd->id;
5237 	async = hcmd->flags & IWM_CMD_ASYNC;
5238 	idx = ring->cur;
5239 
5240 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5241 		paylen += hcmd->len[i];
5242 	}
5243 
5244 	/* If this command waits for a response, allocate response buffer. */
5245 	hcmd->resp_pkt = NULL;
5246 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
5247 		uint8_t *resp_buf;
5248 		KASSERT(!async);
5249 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
5250 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
5251 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5252 			return ENOSPC;
5253 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5254 		    M_NOWAIT | M_ZERO);
5255 		if (resp_buf == NULL)
5256 			return ENOMEM;
5257 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5258 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5259 	} else {
5260 		sc->sc_cmd_resp_pkt[idx] = NULL;
5261 	}
5262 
5263 	s = splnet();
5264 
5265 	desc = &ring->desc[idx];
5266 	txdata = &ring->data[idx];
5267 
5268 	group_id = iwm_cmd_groupid(code);
5269 	if (group_id != 0) {
5270 		hdrlen = sizeof(cmd->hdr_wide);
5271 		datasz = sizeof(cmd->data_wide);
5272 	} else {
5273 		hdrlen = sizeof(cmd->hdr);
5274 		datasz = sizeof(cmd->data);
5275 	}
5276 
5277 	if (paylen > datasz) {
5278 		/* Command is too large to fit in pre-allocated space. */
5279 		size_t totlen = hdrlen + paylen;
5280 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
5281 			printf("%s: firmware command too long (%zd bytes)\n",
5282 			    DEVNAME(sc), totlen);
5283 			err = EINVAL;
5284 			goto out;
5285 		}
5286 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5287 		if (m == NULL) {
5288 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5289 			    DEVNAME(sc), totlen);
5290 			err = ENOMEM;
5291 			goto out;
5292 		}
5293 		cmd = mtod(m, struct iwm_device_cmd *);
5294 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5295 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5296 		if (err) {
5297 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5298 			    DEVNAME(sc), totlen);
5299 			m_freem(m);
5300 			goto out;
5301 		}
5302 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
5303 		paddr = txdata->map->dm_segs[0].ds_addr;
5304 	} else {
5305 		cmd = &ring->cmd[idx];
5306 		paddr = txdata->cmd_paddr;
5307 	}
5308 
5309 	if (group_id != 0) {
5310 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
5311 		cmd->hdr_wide.group_id = group_id;
5312 		cmd->hdr_wide.qid = ring->qid;
5313 		cmd->hdr_wide.idx = idx;
5314 		cmd->hdr_wide.length = htole16(paylen);
5315 		cmd->hdr_wide.version = iwm_cmd_version(code);
5316 		data = cmd->data_wide;
5317 	} else {
5318 		cmd->hdr.code = code;
5319 		cmd->hdr.flags = 0;
5320 		cmd->hdr.qid = ring->qid;
5321 		cmd->hdr.idx = idx;
5322 		data = cmd->data;
5323 	}
5324 
5325 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5326 		if (hcmd->len[i] == 0)
5327 			continue;
5328 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5329 		off += hcmd->len[i];
5330 	}
5331 	KASSERT(off == paylen);
5332 
5333 	/* lo field is not aligned */
5334 	addr_lo = htole32((uint32_t)paddr);
5335 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
5336 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
5337 	    | ((hdrlen + paylen) << 4));
5338 	desc->num_tbs = 1;
5339 
5340 	if (paylen > datasz) {
5341 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5342 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5343 	} else {
5344 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5345 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5346 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5347 	}
5348 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5349 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5350 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5351 
5352 	/*
5353 	 * Wake up the NIC to make sure that the firmware will see the host
5354 	 * command - we will let the NIC sleep once all the host commands
5355 	 * returned. This needs to be done only on 7000 family NICs.
5356 	 */
5357 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
5358 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
5359 			err = EBUSY;
5360 			goto out;
5361 		}
5362 	}
5363 
5364 #if 0
5365 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
5366 #endif
5367 	/* Kick command ring. */
5368 	ring->queued++;
5369 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
5370 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5371 
5372 	if (!async) {
5373 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
5374 		if (err == 0) {
5375 			/* if hardware is no longer up, return error */
5376 			if (generation != sc->sc_generation) {
5377 				err = ENXIO;
5378 				goto out;
5379 			}
5380 
5381 			/* Response buffer will be freed in iwm_free_resp(). */
5382 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5383 			sc->sc_cmd_resp_pkt[idx] = NULL;
5384 		} else if (generation == sc->sc_generation) {
5385 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5386 			    sc->sc_cmd_resp_len[idx]);
5387 			sc->sc_cmd_resp_pkt[idx] = NULL;
5388 		}
5389 	}
5390  out:
5391 	splx(s);
5392 
5393 	return err;
5394 }
5395 
5396 int
5397 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
5398     uint16_t len, const void *data)
5399 {
5400 	struct iwm_host_cmd cmd = {
5401 		.id = id,
5402 		.len = { len, },
5403 		.data = { data, },
5404 		.flags = flags,
5405 	};
5406 
5407 	return iwm_send_cmd(sc, &cmd);
5408 }
5409 
5410 int
5411 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
5412     uint32_t *status)
5413 {
5414 	struct iwm_rx_packet *pkt;
5415 	struct iwm_cmd_response *resp;
5416 	int err, resp_len;
5417 
5418 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
5419 	cmd->flags |= IWM_CMD_WANT_RESP;
5420 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5421 
5422 	err = iwm_send_cmd(sc, cmd);
5423 	if (err)
5424 		return err;
5425 
5426 	pkt = cmd->resp_pkt;
5427 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
5428 		return EIO;
5429 
5430 	resp_len = iwm_rx_packet_payload_len(pkt);
5431 	if (resp_len != sizeof(*resp)) {
5432 		iwm_free_resp(sc, cmd);
5433 		return EIO;
5434 	}
5435 
5436 	resp = (void *)pkt->data;
5437 	*status = le32toh(resp->status);
5438 	iwm_free_resp(sc, cmd);
5439 	return err;
5440 }
5441 
5442 int
5443 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
5444     const void *data, uint32_t *status)
5445 {
5446 	struct iwm_host_cmd cmd = {
5447 		.id = id,
5448 		.len = { len, },
5449 		.data = { data, },
5450 	};
5451 
5452 	return iwm_send_cmd_status(sc, &cmd, status);
5453 }
5454 
5455 void
5456 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
5457 {
5458 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
5459 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5460 	hcmd->resp_pkt = NULL;
5461 }
5462 
5463 void
5464 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
5465 {
5466 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
5467 	struct iwm_tx_data *data;
5468 
5469 	if (qid != sc->cmdqid) {
5470 		return;	/* Not a command ack. */
5471 	}
5472 
5473 	data = &ring->data[idx];
5474 
5475 	if (data->m != NULL) {
5476 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
5477 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5478 		bus_dmamap_unload(sc->sc_dmat, data->map);
5479 		m_freem(data->m);
5480 		data->m = NULL;
5481 	}
5482 	wakeup(&ring->desc[idx]);
5483 
5484 	if (ring->queued == 0) {
5485 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5486 		    DEVNAME(sc), code));
5487 	} else if (--ring->queued == 0) {
5488 		/*
5489 		 * 7000 family NICs are locked while commands are in progress.
5490 		 * All commands are now done so we may unlock the NIC again.
5491 		 */
5492 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
5493 			iwm_nic_unlock(sc);
5494 	}
5495 }
5496 
5497 #if 0
5498 /*
5499  * necessary only for block ack mode
5500  */
5501 void
5502 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
5503     uint16_t len)
5504 {
5505 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
5506 	uint16_t w_val;
5507 
5508 	scd_bc_tbl = sc->sched_dma.vaddr;
5509 
5510 	len += 8; /* magic numbers came naturally from paris */
5511 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
5512 		len = roundup(len, 4) / 4;
5513 
5514 	w_val = htole16(sta_id << 12 | len);
5515 
5516 	/* Update TX scheduler. */
5517 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
5518 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
5519 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
5520 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
5521 
5522 	/* I really wonder what this is ?!? */
5523 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
5524 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
5525 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
5526 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
5527 		    (char *)(void *)sc->sched_dma.vaddr,
5528 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
5529 	}
5530 }
5531 #endif
5532 
5533 /*
5534  * Fill in various bit for management frames, and leave them
5535  * unfilled for data frames (firmware takes care of that).
5536  * Return the selected TX rate.
5537  */
5538 const struct iwm_rate *
5539 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
5540     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
5541 {
5542 	struct ieee80211com *ic = &sc->sc_ic;
5543 	struct ieee80211_node *ni = &in->in_ni;
5544 	const struct iwm_rate *rinfo;
5545 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5546 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
5547 	int ridx, rate_flags;
5548 
5549 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
5550 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
5551 
5552 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5553 	    type != IEEE80211_FC0_TYPE_DATA) {
5554 		/* for non-data, use the lowest supported rate */
5555 		ridx = min_ridx;
5556 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
5557 	} else if (ic->ic_fixed_mcs != -1) {
5558 		ridx = sc->sc_fixed_ridx;
5559 	} else if (ic->ic_fixed_rate != -1) {
5560 		ridx = sc->sc_fixed_ridx;
5561  	} else {
5562 		int i;
5563 		/* Use firmware rateset retry table. */
5564 		tx->initial_rate_index = 0;
5565 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
5566 		if (ni->ni_flags & IEEE80211_NODE_HT) {
5567 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
5568 			return &iwm_rates[ridx];
5569 		}
5570 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5571 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
5572 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
5573 			if (iwm_rates[i].rate == (ni->ni_txrate &
5574 			    IEEE80211_RATE_VAL)) {
5575 				ridx = i;
5576 				break;
5577 			}
5578 		}
5579 		return &iwm_rates[ridx];
5580 	}
5581 
5582 	rinfo = &iwm_rates[ridx];
5583 	if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
5584 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
5585 	else
5586 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
5587 	if (IWM_RIDX_IS_CCK(ridx))
5588 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
5589 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5590 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5591 		rate_flags |= IWM_RATE_MCS_HT_MSK;
5592 		if (ieee80211_node_supports_ht_sgi20(ni))
5593 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
5594 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
5595 	} else
5596 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
5597 
5598 	return rinfo;
5599 }
5600 
5601 #define TB0_SIZE 16
5602 int
5603 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
5604 {
5605 	struct ieee80211com *ic = &sc->sc_ic;
5606 	struct iwm_node *in = (void *)ni;
5607 	struct iwm_tx_ring *ring;
5608 	struct iwm_tx_data *data;
5609 	struct iwm_tfd *desc;
5610 	struct iwm_device_cmd *cmd;
5611 	struct iwm_tx_cmd *tx;
5612 	struct ieee80211_frame *wh;
5613 	struct ieee80211_key *k = NULL;
5614 	const struct iwm_rate *rinfo;
5615 	uint8_t *ivp;
5616 	uint32_t flags;
5617 	u_int hdrlen;
5618 	bus_dma_segment_t *seg;
5619 	uint8_t tid, type;
5620 	int i, totlen, err, pad;
5621 	int hdrlen2;
5622 
5623 	wh = mtod(m, struct ieee80211_frame *);
5624 	hdrlen = ieee80211_get_hdrlen(wh);
5625 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5626 
5627 	hdrlen2 = (ieee80211_has_qos(wh)) ?
5628 	    sizeof (struct ieee80211_qosframe) :
5629 	    sizeof (struct ieee80211_frame);
5630 
5631 	tid = 0;
5632 
5633 	/*
5634 	 * Map EDCA categories to Tx data queues.
5635 	 *
5636 	 * We use static data queue assignments even in DQA mode. We do not
5637 	 * need to share Tx queues between stations because we only implement
5638 	 * client mode; the firmware's station table contains only one entry
5639 	 * which represents our access point.
5640 	 *
5641 	 * Tx aggregation will require additional queues (one queue per TID
5642 	 * for which aggregation is enabled) but we do not implement this yet.
5643 	 */
5644 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
5645 		ring = &sc->txq[IWM_DQA_MIN_MGMT_QUEUE + ac];
5646 	else
5647 		ring = &sc->txq[ac];
5648 	desc = &ring->desc[ring->cur];
5649 	memset(desc, 0, sizeof(*desc));
5650 	data = &ring->data[ring->cur];
5651 
5652 	cmd = &ring->cmd[ring->cur];
5653 	cmd->hdr.code = IWM_TX_CMD;
5654 	cmd->hdr.flags = 0;
5655 	cmd->hdr.qid = ring->qid;
5656 	cmd->hdr.idx = ring->cur;
5657 
5658 	tx = (void *)cmd->data;
5659 	memset(tx, 0, sizeof(*tx));
5660 
5661 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
5662 
5663 #if NBPFILTER > 0
5664 	if (sc->sc_drvbpf != NULL) {
5665 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
5666 		uint16_t chan_flags;
5667 
5668 		tap->wt_flags = 0;
5669 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5670 		chan_flags = ni->ni_chan->ic_flags;
5671 		if (ic->ic_curmode != IEEE80211_MODE_11N)
5672 			chan_flags &= ~IEEE80211_CHAN_HT;
5673 		tap->wt_chan_flags = htole16(chan_flags);
5674 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5675 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5676 		    type == IEEE80211_FC0_TYPE_DATA &&
5677 		    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5678 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
5679 		} else
5680 			tap->wt_rate = rinfo->rate;
5681 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
5682 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
5683 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5684 
5685 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
5686 		    m, BPF_DIRECTION_OUT);
5687 	}
5688 #endif
5689 	totlen = m->m_pkthdr.len;
5690 
5691 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5692 		k = ieee80211_get_txkey(ic, wh, ni);
5693 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
5694 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
5695 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
5696 				return ENOBUFS;
5697 			/* 802.11 header may have moved. */
5698 			wh = mtod(m, struct ieee80211_frame *);
5699 			totlen = m->m_pkthdr.len;
5700 			k = NULL; /* skip hardware crypto below */
5701 		} else {
5702 			/* HW appends CCMP MIC */
5703 			totlen += IEEE80211_CCMP_HDRLEN;
5704 		}
5705 	}
5706 
5707 	flags = 0;
5708 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
5709 		flags |= IWM_TX_CMD_FLG_ACK;
5710 	}
5711 
5712 	if (type == IEEE80211_FC0_TYPE_DATA &&
5713 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5714 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
5715 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
5716 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
5717 
5718 	tx->sta_id = IWM_STATION_ID;
5719 
5720 	if (type == IEEE80211_FC0_TYPE_MGT) {
5721 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5722 
5723 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
5724 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
5725 			tx->pm_frame_timeout = htole16(3);
5726 		else
5727 			tx->pm_frame_timeout = htole16(2);
5728 	} else {
5729 		tx->pm_frame_timeout = htole16(0);
5730 	}
5731 
5732 	if (hdrlen & 3) {
5733 		/* First segment length must be a multiple of 4. */
5734 		flags |= IWM_TX_CMD_FLG_MH_PAD;
5735 		pad = 4 - (hdrlen & 3);
5736 	} else
5737 		pad = 0;
5738 
5739 	tx->driver_txop = 0;
5740 	tx->next_frame_len = 0;
5741 
5742 	tx->len = htole16(totlen);
5743 	tx->tid_tspec = tid;
5744 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
5745 
5746 	/* Set physical address of "scratch area". */
5747 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
5748 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
5749 
5750 	/* Copy 802.11 header in TX command. */
5751 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
5752 
5753 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
5754 		/* Trim 802.11 header and prepend CCMP IV. */
5755 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
5756 		ivp = mtod(m, u_int8_t *);
5757 		k->k_tsc++;	/* increment the 48-bit PN */
5758 		ivp[0] = k->k_tsc; /* PN0 */
5759 		ivp[1] = k->k_tsc >> 8; /* PN1 */
5760 		ivp[2] = 0;        /* Rsvd */
5761 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
5762 		ivp[4] = k->k_tsc >> 16; /* PN2 */
5763 		ivp[5] = k->k_tsc >> 24; /* PN3 */
5764 		ivp[6] = k->k_tsc >> 32; /* PN4 */
5765 		ivp[7] = k->k_tsc >> 40; /* PN5 */
5766 
5767 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
5768 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
5769 	} else {
5770 		/* Trim 802.11 header. */
5771 		m_adj(m, hdrlen);
5772 		tx->sec_ctl = 0;
5773 	}
5774 
5775 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
5776 
5777 	tx->tx_flags |= htole32(flags);
5778 
5779 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5780 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5781 	if (err && err != EFBIG) {
5782 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5783 		m_freem(m);
5784 		return err;
5785 	}
5786 	if (err) {
5787 		/* Too many DMA segments, linearize mbuf. */
5788 		if (m_defrag(m, M_DONTWAIT)) {
5789 			m_freem(m);
5790 			return ENOBUFS;
5791 		}
5792 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5793 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5794 		if (err) {
5795 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
5796 			    err);
5797 			m_freem(m);
5798 			return err;
5799 		}
5800 	}
5801 	data->m = m;
5802 	data->in = in;
5803 	data->txmcs = ni->ni_txmcs;
5804 	data->txrate = ni->ni_txrate;
5805 
5806 	/* Fill TX descriptor. */
5807 	desc->num_tbs = 2 + data->map->dm_nsegs;
5808 
5809 	desc->tbs[0].lo = htole32(data->cmd_paddr);
5810 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
5811 	    (TB0_SIZE << 4));
5812 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
5813 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
5814 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
5815 	      + hdrlen + pad - TB0_SIZE) << 4));
5816 
5817 	/* Other DMA segments are for data payload. */
5818 	seg = data->map->dm_segs;
5819 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5820 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
5821 		desc->tbs[i+2].hi_n_len = \
5822 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
5823 		    | ((seg->ds_len) << 4));
5824 	}
5825 
5826 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
5827 	    BUS_DMASYNC_PREWRITE);
5828 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5829 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5830 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
5831 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5832 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5833 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5834 
5835 #if 0
5836 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
5837 #endif
5838 
5839 	/* Kick TX ring. */
5840 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
5841 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5842 
5843 	/* Mark TX ring as full if we reach a certain threshold. */
5844 	if (++ring->queued > IWM_TX_RING_HIMARK) {
5845 		sc->qfullmsk |= 1 << ring->qid;
5846 	}
5847 
5848 	return 0;
5849 }
5850 
5851 int
5852 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
5853 {
5854 	struct iwm_tx_path_flush_cmd flush_cmd = {
5855 		.queues_ctl = htole32(tfd_queue_msk),
5856 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
5857 	};
5858 	int err;
5859 
5860 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
5861 	    sizeof(flush_cmd), &flush_cmd);
5862 	if (err)
5863                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
5864 	return err;
5865 }
5866 
5867 void
5868 iwm_led_enable(struct iwm_softc *sc)
5869 {
5870 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
5871 }
5872 
5873 void
5874 iwm_led_disable(struct iwm_softc *sc)
5875 {
5876 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
5877 }
5878 
5879 int
5880 iwm_led_is_enabled(struct iwm_softc *sc)
5881 {
5882 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
5883 }
5884 
5885 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
5886 
5887 void
5888 iwm_led_blink_timeout(void *arg)
5889 {
5890 	struct iwm_softc *sc = arg;
5891 
5892 	if (iwm_led_is_enabled(sc))
5893 		iwm_led_disable(sc);
5894 	else
5895 		iwm_led_enable(sc);
5896 
5897 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
5898 }
5899 
5900 void
5901 iwm_led_blink_start(struct iwm_softc *sc)
5902 {
5903 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
5904 	iwm_led_enable(sc);
5905 }
5906 
5907 void
5908 iwm_led_blink_stop(struct iwm_softc *sc)
5909 {
5910 	timeout_del(&sc->sc_led_blink_to);
5911 	iwm_led_disable(sc);
5912 }
5913 
5914 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
5915 
5916 int
5917 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
5918     struct iwm_beacon_filter_cmd *cmd)
5919 {
5920 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
5921 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
5922 }
5923 
5924 void
5925 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
5926     struct iwm_beacon_filter_cmd *cmd)
5927 {
5928 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
5929 }
5930 
5931 int
5932 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
5933 {
5934 	struct iwm_beacon_filter_cmd cmd = {
5935 		IWM_BF_CMD_CONFIG_DEFAULTS,
5936 		.bf_enable_beacon_filter = htole32(1),
5937 		.ba_enable_beacon_abort = htole32(enable),
5938 	};
5939 
5940 	if (!sc->sc_bf.bf_enabled)
5941 		return 0;
5942 
5943 	sc->sc_bf.ba_enabled = enable;
5944 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5945 	return iwm_beacon_filter_send_cmd(sc, &cmd);
5946 }
5947 
5948 void
5949 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
5950     struct iwm_mac_power_cmd *cmd)
5951 {
5952 	struct ieee80211com *ic = &sc->sc_ic;
5953 	struct ieee80211_node *ni = &in->in_ni;
5954 	int dtim_period, dtim_msec, keep_alive;
5955 
5956 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5957 	    in->in_color));
5958 	if (ni->ni_dtimperiod)
5959 		dtim_period = ni->ni_dtimperiod;
5960 	else
5961 		dtim_period = 1;
5962 
5963 	/*
5964 	 * Regardless of power management state the driver must set
5965 	 * keep alive period. FW will use it for sending keep alive NDPs
5966 	 * immediately after association. Check that keep alive period
5967 	 * is at least 3 * DTIM.
5968 	 */
5969 	dtim_msec = dtim_period * ni->ni_intval;
5970 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
5971 	keep_alive = roundup(keep_alive, 1000) / 1000;
5972 	cmd->keep_alive_seconds = htole16(keep_alive);
5973 
5974 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5975 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5976 }
5977 
5978 int
5979 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
5980 {
5981 	int err;
5982 	int ba_enable;
5983 	struct iwm_mac_power_cmd cmd;
5984 
5985 	memset(&cmd, 0, sizeof(cmd));
5986 
5987 	iwm_power_build_cmd(sc, in, &cmd);
5988 
5989 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
5990 	    sizeof(cmd), &cmd);
5991 	if (err != 0)
5992 		return err;
5993 
5994 	ba_enable = !!(cmd.flags &
5995 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5996 	return iwm_update_beacon_abort(sc, in, ba_enable);
5997 }
5998 
5999 int
6000 iwm_power_update_device(struct iwm_softc *sc)
6001 {
6002 	struct iwm_device_power_cmd cmd = { };
6003 	struct ieee80211com *ic = &sc->sc_ic;
6004 
6005 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6006 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6007 
6008 	return iwm_send_cmd_pdu(sc,
6009 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6010 }
6011 
6012 int
6013 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
6014 {
6015 	struct iwm_beacon_filter_cmd cmd = {
6016 		IWM_BF_CMD_CONFIG_DEFAULTS,
6017 		.bf_enable_beacon_filter = htole32(1),
6018 	};
6019 	int err;
6020 
6021 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
6022 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
6023 
6024 	if (err == 0)
6025 		sc->sc_bf.bf_enabled = 1;
6026 
6027 	return err;
6028 }
6029 
6030 int
6031 iwm_disable_beacon_filter(struct iwm_softc *sc)
6032 {
6033 	struct iwm_beacon_filter_cmd cmd;
6034 	int err;
6035 
6036 	memset(&cmd, 0, sizeof(cmd));
6037 
6038 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
6039 	if (err == 0)
6040 		sc->sc_bf.bf_enabled = 0;
6041 
6042 	return err;
6043 }
6044 
6045 int
6046 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
6047 {
6048 	struct iwm_add_sta_cmd add_sta_cmd;
6049 	int err;
6050 	uint32_t status;
6051 	size_t cmdsize;
6052 	struct ieee80211com *ic = &sc->sc_ic;
6053 
6054 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
6055 		panic("STA already added");
6056 
6057 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6058 
6059 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6060 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
6061 	else
6062 		add_sta_cmd.sta_id = IWM_STATION_ID;
6063 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
6064 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6065 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
6066 		else
6067 			add_sta_cmd.station_type = IWM_STA_LINK;
6068 	}
6069 	add_sta_cmd.mac_id_n_color
6070 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6071 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6072 		int qid;
6073 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
6074 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6075 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
6076 		else
6077 			qid = IWM_AUX_QUEUE;
6078 		add_sta_cmd.tfd_queue_msk |= htole32(1 << qid);
6079 	} else if (!update) {
6080 		int ac;
6081 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6082 			int qid = ac;
6083 			if (isset(sc->sc_enabled_capa,
6084 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6085 				qid += IWM_DQA_MIN_MGMT_QUEUE;
6086 			add_sta_cmd.tfd_queue_msk |= htole32(1 << qid);
6087 		}
6088 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
6089 	}
6090 	add_sta_cmd.add_modify = update ? 1 : 0;
6091 	add_sta_cmd.station_flags_msk
6092 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
6093 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
6094 	if (update)
6095 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
6096 
6097 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6098 		add_sta_cmd.station_flags_msk
6099 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
6100 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
6101 
6102 		if (iwm_mimo_enabled(sc)) {
6103 			if (in->in_ni.ni_rxmcs[1] != 0) {
6104 				add_sta_cmd.station_flags |=
6105 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
6106 			}
6107 			if (in->in_ni.ni_rxmcs[2] != 0) {
6108 				add_sta_cmd.station_flags |=
6109 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
6110 			}
6111 		}
6112 
6113 		add_sta_cmd.station_flags
6114 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
6115 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
6116 		case IEEE80211_AMPDU_PARAM_SS_2:
6117 			add_sta_cmd.station_flags
6118 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
6119 			break;
6120 		case IEEE80211_AMPDU_PARAM_SS_4:
6121 			add_sta_cmd.station_flags
6122 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
6123 			break;
6124 		case IEEE80211_AMPDU_PARAM_SS_8:
6125 			add_sta_cmd.station_flags
6126 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
6127 			break;
6128 		case IEEE80211_AMPDU_PARAM_SS_16:
6129 			add_sta_cmd.station_flags
6130 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
6131 			break;
6132 		default:
6133 			break;
6134 		}
6135 	}
6136 
6137 	status = IWM_ADD_STA_SUCCESS;
6138 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
6139 		cmdsize = sizeof(add_sta_cmd);
6140 	else
6141 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
6142 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
6143 	    &add_sta_cmd, &status);
6144 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
6145 		err = EIO;
6146 
6147 	return err;
6148 }
6149 
6150 int
6151 iwm_add_aux_sta(struct iwm_softc *sc)
6152 {
6153 	struct iwm_add_sta_cmd cmd;
6154 	int err, qid;
6155 	uint32_t status;
6156 	size_t cmdsize;
6157 
6158 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
6159 		qid = IWM_DQA_AUX_QUEUE;
6160 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
6161 		    IWM_TX_FIFO_MCAST);
6162 	} else {
6163 		qid = IWM_AUX_QUEUE;
6164 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
6165 	}
6166 	if (err)
6167 		return err;
6168 
6169 	memset(&cmd, 0, sizeof(cmd));
6170 	cmd.sta_id = IWM_AUX_STA_ID;
6171 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
6172 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
6173 	cmd.mac_id_n_color =
6174 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
6175 	cmd.tfd_queue_msk = htole32(1 << qid);
6176 	cmd.tid_disable_tx = htole16(0xffff);
6177 
6178 	status = IWM_ADD_STA_SUCCESS;
6179 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
6180 		cmdsize = sizeof(cmd);
6181 	else
6182 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
6183 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
6184 	    &status);
6185 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
6186 		err = EIO;
6187 
6188 	return err;
6189 }
6190 
6191 int
6192 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
6193 {
6194 	struct ieee80211com *ic = &sc->sc_ic;
6195 	struct iwm_rm_sta_cmd rm_sta_cmd;
6196 	int err;
6197 
6198 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
6199 		panic("sta already removed");
6200 
6201 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6202 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6203 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
6204 	else
6205 		rm_sta_cmd.sta_id = IWM_STATION_ID;
6206 
6207 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6208 	    &rm_sta_cmd);
6209 
6210 	return err;
6211 }
6212 
6213 uint16_t
6214 iwm_scan_rx_chain(struct iwm_softc *sc)
6215 {
6216 	uint16_t rx_chain;
6217 	uint8_t rx_ant;
6218 
6219 	rx_ant = iwm_fw_valid_rx_ant(sc);
6220 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
6221 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
6222 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
6223 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
6224 	return htole16(rx_chain);
6225 }
6226 
6227 uint32_t
6228 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
6229 {
6230 	uint32_t tx_ant;
6231 	int i, ind;
6232 
6233 	for (i = 0, ind = sc->sc_scan_last_antenna;
6234 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
6235 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
6236 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
6237 			sc->sc_scan_last_antenna = ind;
6238 			break;
6239 		}
6240 	}
6241 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
6242 
6243 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
6244 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
6245 				   tx_ant);
6246 	else
6247 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
6248 }
6249 
6250 uint8_t
6251 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
6252     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
6253 {
6254 	struct ieee80211com *ic = &sc->sc_ic;
6255 	struct ieee80211_channel *c;
6256 	uint8_t nchan;
6257 
6258 	for (nchan = 0, c = &ic->ic_channels[1];
6259 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6260 	    nchan < sc->sc_capa_n_scan_channels;
6261 	    c++) {
6262 		if (c->ic_flags == 0)
6263 			continue;
6264 
6265 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
6266 		chan->iter_count = htole16(1);
6267 		chan->iter_interval = 0;
6268 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
6269 		if (n_ssids != 0 && !bgscan)
6270 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
6271 		chan++;
6272 		nchan++;
6273 	}
6274 
6275 	return nchan;
6276 }
6277 
6278 uint8_t
6279 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
6280     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
6281 {
6282 	struct ieee80211com *ic = &sc->sc_ic;
6283 	struct ieee80211_channel *c;
6284 	uint8_t nchan;
6285 
6286 	for (nchan = 0, c = &ic->ic_channels[1];
6287 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6288 	    nchan < sc->sc_capa_n_scan_channels;
6289 	    c++) {
6290 		if (c->ic_flags == 0)
6291 			continue;
6292 
6293 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6294 		chan->iter_count = 1;
6295 		chan->iter_interval = htole16(0);
6296 		if (n_ssids != 0 && !bgscan)
6297 			chan->flags = htole32(1 << 0); /* select SSID 0 */
6298 		chan++;
6299 		nchan++;
6300 	}
6301 
6302 	return nchan;
6303 }
6304 
6305 int
6306 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
6307 {
6308 	struct iwm_scan_probe_req preq2;
6309 	int err, i;
6310 
6311 	err = iwm_fill_probe_req(sc, &preq2);
6312 	if (err)
6313 		return err;
6314 
6315 	preq1->mac_header = preq2.mac_header;
6316 	for (i = 0; i < nitems(preq1->band_data); i++)
6317 		preq1->band_data[i] = preq2.band_data[i];
6318 	preq1->common_data = preq2.common_data;
6319 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
6320 	return 0;
6321 }
6322 
6323 int
6324 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
6325 {
6326 	struct ieee80211com *ic = &sc->sc_ic;
6327 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6328 	struct ieee80211_rateset *rs;
6329 	size_t remain = sizeof(preq->buf);
6330 	uint8_t *frm, *pos;
6331 
6332 	memset(preq, 0, sizeof(*preq));
6333 
6334 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
6335 		return ENOBUFS;
6336 
6337 	/*
6338 	 * Build a probe request frame.  Most of the following code is a
6339 	 * copy & paste of what is done in net80211.
6340 	 */
6341 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6342 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6343 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6344 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6345 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
6346 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6347 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6348 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6349 
6350 	frm = (uint8_t *)(wh + 1);
6351 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
6352 
6353 	/* Tell the firmware where the MAC header is. */
6354 	preq->mac_header.offset = 0;
6355 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6356 	remain -= frm - (uint8_t *)wh;
6357 
6358 	/* Fill in 2GHz IEs and tell firmware where they are. */
6359 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6360 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6361 		if (remain < 4 + rs->rs_nrates)
6362 			return ENOBUFS;
6363 	} else if (remain < 2 + rs->rs_nrates)
6364 		return ENOBUFS;
6365 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6366 	pos = frm;
6367 	frm = ieee80211_add_rates(frm, rs);
6368 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6369 		frm = ieee80211_add_xrates(frm, rs);
6370 	preq->band_data[0].len = htole16(frm - pos);
6371 	remain -= frm - pos;
6372 
6373 	if (isset(sc->sc_enabled_capa,
6374 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6375 		if (remain < 3)
6376 			return ENOBUFS;
6377 		*frm++ = IEEE80211_ELEMID_DSPARMS;
6378 		*frm++ = 1;
6379 		*frm++ = 0;
6380 		remain -= 3;
6381 	}
6382 
6383 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6384 		/* Fill in 5GHz IEs. */
6385 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6386 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6387 			if (remain < 4 + rs->rs_nrates)
6388 				return ENOBUFS;
6389 		} else if (remain < 2 + rs->rs_nrates)
6390 			return ENOBUFS;
6391 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6392 		pos = frm;
6393 		frm = ieee80211_add_rates(frm, rs);
6394 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6395 			frm = ieee80211_add_xrates(frm, rs);
6396 		preq->band_data[1].len = htole16(frm - pos);
6397 		remain -= frm - pos;
6398 	}
6399 
6400 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6401 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6402 	pos = frm;
6403 	if (ic->ic_flags & IEEE80211_F_HTON) {
6404 		if (remain < 28)
6405 			return ENOBUFS;
6406 		frm = ieee80211_add_htcaps(frm, ic);
6407 		/* XXX add WME info? */
6408 	}
6409 	preq->common_data.len = htole16(frm - pos);
6410 
6411 	return 0;
6412 }
6413 
6414 int
6415 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
6416 {
6417 	struct ieee80211com *ic = &sc->sc_ic;
6418 	struct iwm_host_cmd hcmd = {
6419 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
6420 		.len = { 0, },
6421 		.data = { NULL, },
6422 		.flags = 0,
6423 	};
6424 	struct iwm_scan_req_lmac *req;
6425 	struct iwm_scan_probe_req_v1 *preq;
6426 	size_t req_len;
6427 	int err, async = bgscan;
6428 
6429 	req_len = sizeof(struct iwm_scan_req_lmac) +
6430 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
6431 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
6432 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
6433 		return ENOMEM;
6434 	req = malloc(req_len, M_DEVBUF,
6435 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
6436 	if (req == NULL)
6437 		return ENOMEM;
6438 
6439 	hcmd.len[0] = (uint16_t)req_len;
6440 	hcmd.data[0] = (void *)req;
6441 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
6442 
6443 	/* These timings correspond to iwlwifi's UNASSOC scan. */
6444 	req->active_dwell = 10;
6445 	req->passive_dwell = 110;
6446 	req->fragmented_dwell = 44;
6447 	req->extended_dwell = 90;
6448 	if (bgscan) {
6449 		req->max_out_time = htole32(120);
6450 		req->suspend_time = htole32(120);
6451 	} else {
6452 		req->max_out_time = htole32(0);
6453 		req->suspend_time = htole32(0);
6454 	}
6455 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
6456 	req->rx_chain_select = iwm_scan_rx_chain(sc);
6457 	req->iter_num = htole32(1);
6458 	req->delay = 0;
6459 
6460 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
6461 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
6462 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
6463 	if (ic->ic_des_esslen == 0)
6464 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
6465 	else
6466 		req->scan_flags |=
6467 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
6468 	if (isset(sc->sc_enabled_capa,
6469 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
6470 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
6471 
6472 	req->flags = htole32(IWM_PHY_BAND_24);
6473 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
6474 		req->flags |= htole32(IWM_PHY_BAND_5);
6475 	req->filter_flags =
6476 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
6477 
6478 	/* Tx flags 2 GHz. */
6479 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
6480 	    IWM_TX_CMD_FLG_BT_DIS);
6481 	req->tx_cmd[0].rate_n_flags =
6482 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
6483 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
6484 
6485 	/* Tx flags 5 GHz. */
6486 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
6487 	    IWM_TX_CMD_FLG_BT_DIS);
6488 	req->tx_cmd[1].rate_n_flags =
6489 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
6490 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
6491 
6492 	/* Check if we're doing an active directed scan. */
6493 	if (ic->ic_des_esslen != 0) {
6494 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
6495 		req->direct_scan[0].len = ic->ic_des_esslen;
6496 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
6497 		    ic->ic_des_esslen);
6498 	}
6499 
6500 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
6501 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
6502 	    ic->ic_des_esslen != 0, bgscan);
6503 
6504 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
6505 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
6506 	    sc->sc_capa_n_scan_channels));
6507 	err = iwm_fill_probe_req_v1(sc, preq);
6508 	if (err) {
6509 		free(req, M_DEVBUF, req_len);
6510 		return err;
6511 	}
6512 
6513 	/* Specify the scan plan: We'll do one iteration. */
6514 	req->schedule[0].iterations = 1;
6515 	req->schedule[0].full_scan_mul = 1;
6516 
6517 	/* Disable EBS. */
6518 	req->channel_opt[0].non_ebs_ratio = 1;
6519 	req->channel_opt[1].non_ebs_ratio = 1;
6520 
6521 	err = iwm_send_cmd(sc, &hcmd);
6522 	free(req, M_DEVBUF, req_len);
6523 	return err;
6524 }
6525 
6526 int
6527 iwm_config_umac_scan(struct iwm_softc *sc)
6528 {
6529 	struct ieee80211com *ic = &sc->sc_ic;
6530 	struct iwm_scan_config *scan_config;
6531 	int err, nchan;
6532 	size_t cmd_size;
6533 	struct ieee80211_channel *c;
6534 	struct iwm_host_cmd hcmd = {
6535 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
6536 		.flags = 0,
6537 	};
6538 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
6539 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
6540 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
6541 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
6542 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
6543 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
6544 	    IWM_SCAN_CONFIG_RATE_54M);
6545 
6546 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
6547 
6548 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
6549 	if (scan_config == NULL)
6550 		return ENOMEM;
6551 
6552 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
6553 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
6554 	scan_config->legacy_rates = htole32(rates |
6555 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
6556 
6557 	/* These timings correspond to iwlwifi's UNASSOC scan. */
6558 	scan_config->dwell_active = 10;
6559 	scan_config->dwell_passive = 110;
6560 	scan_config->dwell_fragmented = 44;
6561 	scan_config->dwell_extended = 90;
6562 	scan_config->out_of_channel_time = htole32(0);
6563 	scan_config->suspend_time = htole32(0);
6564 
6565 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
6566 
6567 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
6568 	scan_config->channel_flags = 0;
6569 
6570 	for (c = &ic->ic_channels[1], nchan = 0;
6571 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6572 	    nchan < sc->sc_capa_n_scan_channels; c++) {
6573 		if (c->ic_flags == 0)
6574 			continue;
6575 		scan_config->channel_array[nchan++] =
6576 		    ieee80211_mhz2ieee(c->ic_freq, 0);
6577 	}
6578 
6579 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
6580 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
6581 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
6582 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
6583 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
6584 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
6585 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
6586 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
6587 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
6588 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
6589 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
6590 
6591 	hcmd.data[0] = scan_config;
6592 	hcmd.len[0] = cmd_size;
6593 
6594 	err = iwm_send_cmd(sc, &hcmd);
6595 	free(scan_config, M_DEVBUF, cmd_size);
6596 	return err;
6597 }
6598 
6599 int
6600 iwm_umac_scan_size(struct iwm_softc *sc)
6601 {
6602 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
6603 	int tail_size;
6604 
6605 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
6606 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
6607 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
6608 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
6609 #ifdef notyet
6610 	else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
6611 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V6;
6612 #endif
6613 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
6614 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
6615 	else
6616 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
6617 
6618 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
6619 	    sc->sc_capa_n_scan_channels + tail_size;
6620 }
6621 
6622 struct iwm_scan_umac_chan_param *
6623 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
6624     struct iwm_scan_req_umac *req)
6625 {
6626 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
6627 		return &req->v8.channel;
6628 
6629 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
6630 		return &req->v7.channel;
6631 #ifdef notyet
6632 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
6633 		return &req->v6.channel;
6634 #endif
6635 	return &req->v1.channel;
6636 }
6637 
6638 void *
6639 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
6640 {
6641 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
6642 		return (void *)&req->v8.data;
6643 
6644 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
6645 		return (void *)&req->v7.data;
6646 #ifdef notyet
6647 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
6648 		return (void *)&req->v6.data;
6649 #endif
6650 	return (void *)&req->v1.data;
6651 
6652 }
6653 
6654 /* adaptive dwell max budget time [TU] for full scan */
6655 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6656 /* adaptive dwell max budget time [TU] for directed scan */
6657 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6658 /* adaptive dwell default high band APs number */
6659 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6660 /* adaptive dwell default low band APs number */
6661 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6662 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6663 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6664 
6665 int
6666 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
6667 {
6668 	struct ieee80211com *ic = &sc->sc_ic;
6669 	struct iwm_host_cmd hcmd = {
6670 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
6671 		.len = { 0, },
6672 		.data = { NULL, },
6673 		.flags = 0,
6674 	};
6675 	struct iwm_scan_req_umac *req;
6676 	void *cmd_data, *tail_data;
6677 	struct iwm_scan_req_umac_tail_v2 *tail;
6678 	struct iwm_scan_req_umac_tail_v1 *tailv1;
6679 	struct iwm_scan_umac_chan_param *chanparam;
6680 	size_t req_len;
6681 	int err, async = bgscan;
6682 
6683 	req_len = iwm_umac_scan_size(sc);
6684 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
6685 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
6686 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
6687 		return ERANGE;
6688 	req = malloc(req_len, M_DEVBUF,
6689 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
6690 	if (req == NULL)
6691 		return ENOMEM;
6692 
6693 	hcmd.len[0] = (uint16_t)req_len;
6694 	hcmd.data[0] = (void *)req;
6695 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
6696 
6697 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
6698 		req->v7.adwell_default_n_aps_social =
6699 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6700 		req->v7.adwell_default_n_aps =
6701 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
6702 
6703 		if (ic->ic_des_esslen != 0)
6704 			req->v7.adwell_max_budget =
6705 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6706 		else
6707 			req->v7.adwell_max_budget =
6708 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6709 
6710 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
6711 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
6712 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
6713 
6714 		if (isset(sc->sc_ucode_api,
6715 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
6716 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
6717 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
6718 		} else {
6719 			req->v7.active_dwell = 10;
6720 			req->v7.passive_dwell = 110;
6721 			req->v7.fragmented_dwell = 44;
6722 		}
6723 	} else {
6724 		/* These timings correspond to iwlwifi's UNASSOC scan. */
6725 		req->v1.active_dwell = 10;
6726 		req->v1.passive_dwell = 110;
6727 		req->v1.fragmented_dwell = 44;
6728 		req->v1.extended_dwell = 90;
6729 	}
6730 
6731 	if (bgscan) {
6732 		const uint32_t timeout = htole32(120);
6733 		if (isset(sc->sc_ucode_api,
6734 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
6735 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6736 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6737 		} else if (isset(sc->sc_ucode_api,
6738 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
6739 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6740 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6741 		} else {
6742 			req->v1.max_out_time = timeout;
6743 			req->v1.suspend_time = timeout;
6744 		}
6745 	}
6746 
6747 	req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
6748 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
6749 
6750 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
6751 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
6752 	chanparam->count = iwm_umac_scan_fill_channels(sc,
6753 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
6754 	    ic->ic_des_esslen != 0, bgscan);
6755 	chanparam->flags = 0;
6756 
6757 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
6758 	    sc->sc_capa_n_scan_channels;
6759 	tail = tail_data;
6760 	/* tail v1 layout differs in preq and direct_scan member fields. */
6761 	tailv1 = tail_data;
6762 
6763 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
6764 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
6765 
6766 	/* Check if we're doing an active directed scan. */
6767 	if (ic->ic_des_esslen != 0) {
6768 		if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6769 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
6770 			tail->direct_scan[0].len = ic->ic_des_esslen;
6771 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
6772 			    ic->ic_des_esslen);
6773 		} else {
6774 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
6775 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
6776 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
6777 			    ic->ic_des_esslen);
6778 		}
6779 		req->general_flags |=
6780 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
6781 	} else
6782 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
6783 
6784 	if (isset(sc->sc_enabled_capa,
6785 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
6786 		req->general_flags |=
6787 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
6788 
6789 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
6790 		req->general_flags |=
6791 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
6792 	} else {
6793 		req->general_flags |=
6794 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
6795 	}
6796 
6797 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
6798 		err = iwm_fill_probe_req(sc, &tail->preq);
6799 	else
6800 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
6801 	if (err) {
6802 		free(req, M_DEVBUF, req_len);
6803 		return err;
6804 	}
6805 
6806 	/* Specify the scan plan: We'll do one iteration. */
6807 	tail->schedule[0].interval = 0;
6808 	tail->schedule[0].iter_count = 1;
6809 
6810 	err = iwm_send_cmd(sc, &hcmd);
6811 	free(req, M_DEVBUF, req_len);
6812 	return err;
6813 }
6814 
6815 uint8_t
6816 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6817 {
6818 	int i;
6819 	uint8_t rval;
6820 
6821 	for (i = 0; i < rs->rs_nrates; i++) {
6822 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6823 		if (rval == iwm_rates[ridx].rate)
6824 			return rs->rs_rates[i];
6825 	}
6826 
6827 	return 0;
6828 }
6829 
6830 int
6831 iwm_rval2ridx(int rval)
6832 {
6833 	int ridx;
6834 
6835 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
6836 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
6837 			continue;
6838 		if (rval == iwm_rates[ridx].rate)
6839 			break;
6840 	}
6841 
6842        return ridx;
6843 }
6844 
6845 void
6846 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
6847     int *ofdm_rates)
6848 {
6849 	struct ieee80211_node *ni = &in->in_ni;
6850 	struct ieee80211_rateset *rs = &ni->ni_rates;
6851 	int lowest_present_ofdm = -1;
6852 	int lowest_present_cck = -1;
6853 	uint8_t cck = 0;
6854 	uint8_t ofdm = 0;
6855 	int i;
6856 
6857 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6858 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6859 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
6860 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6861 				continue;
6862 			cck |= (1 << i);
6863 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6864 				lowest_present_cck = i;
6865 		}
6866 	}
6867 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
6868 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6869 			continue;
6870 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
6871 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6872 			lowest_present_ofdm = i;
6873 	}
6874 
6875 	/*
6876 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6877 	 * variables. This isn't sufficient though, as there might not
6878 	 * be all the right rates in the bitmap. E.g. if the only basic
6879 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6880 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6881 	 *
6882 	 *    [...] a STA responding to a received frame shall transmit
6883 	 *    its Control Response frame [...] at the highest rate in the
6884 	 *    BSSBasicRateSet parameter that is less than or equal to the
6885 	 *    rate of the immediately previous frame in the frame exchange
6886 	 *    sequence ([...]) and that is of the same modulation class
6887 	 *    ([...]) as the received frame. If no rate contained in the
6888 	 *    BSSBasicRateSet parameter meets these conditions, then the
6889 	 *    control frame sent in response to a received frame shall be
6890 	 *    transmitted at the highest mandatory rate of the PHY that is
6891 	 *    less than or equal to the rate of the received frame, and
6892 	 *    that is of the same modulation class as the received frame.
6893 	 *
6894 	 * As a consequence, we need to add all mandatory rates that are
6895 	 * lower than all of the basic rates to these bitmaps.
6896 	 */
6897 
6898 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
6899 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
6900 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
6901 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
6902 	/* 6M already there or needed so always add */
6903 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
6904 
6905 	/*
6906 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6907 	 * Note, however:
6908 	 *  - if no CCK rates are basic, it must be ERP since there must
6909 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6910 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6911 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6912 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6913 	 *  - if 2M is basic, 1M is mandatory
6914 	 *  - if 1M is basic, that's the only valid ACK rate.
6915 	 * As a consequence, it's not as complicated as it sounds, just add
6916 	 * any lower rates to the ACK rate bitmap.
6917 	 */
6918 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
6919 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
6920 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
6921 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
6922 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
6923 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
6924 	/* 1M already there or needed so always add */
6925 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
6926 
6927 	*cck_rates = cck;
6928 	*ofdm_rates = ofdm;
6929 }
6930 
6931 void
6932 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
6933     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
6934 {
6935 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6936 	struct ieee80211com *ic = &sc->sc_ic;
6937 	struct ieee80211_node *ni = ic->ic_bss;
6938 	int cck_ack_rates, ofdm_ack_rates;
6939 	int i;
6940 
6941 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
6942 	    in->in_color));
6943 	cmd->action = htole32(action);
6944 
6945 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6946 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
6947 	else if (ic->ic_opmode == IEEE80211_M_STA)
6948 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
6949 	else
6950 		panic("unsupported operating mode %d\n", ic->ic_opmode);
6951 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
6952 
6953 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
6954 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6955 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6956 		return;
6957 	}
6958 
6959 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
6960 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6961 	cmd->cck_rates = htole32(cck_ack_rates);
6962 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6963 
6964 	cmd->cck_short_preamble
6965 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6966 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
6967 	cmd->short_slot
6968 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6969 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
6970 
6971 	for (i = 0; i < EDCA_NUM_AC; i++) {
6972 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6973 		int txf = iwm_ac_to_tx_fifo[i];
6974 
6975 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
6976 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
6977 		cmd->ac[txf].aifsn = ac->ac_aifsn;
6978 		cmd->ac[txf].fifos_mask = (1 << txf);
6979 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
6980 	}
6981 	if (ni->ni_flags & IEEE80211_NODE_QOS)
6982 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
6983 
6984 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6985 		enum ieee80211_htprot htprot =
6986 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
6987 		switch (htprot) {
6988 		case IEEE80211_HTPROT_NONE:
6989 			break;
6990 		case IEEE80211_HTPROT_NONMEMBER:
6991 		case IEEE80211_HTPROT_NONHT_MIXED:
6992 			cmd->protection_flags |=
6993 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
6994 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
6995 				cmd->protection_flags |=
6996 				    htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
6997 			break;
6998 		case IEEE80211_HTPROT_20MHZ:
6999 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
7000 				/* XXX ... and if our channel is 40 MHz ... */
7001 				cmd->protection_flags |=
7002 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
7003 				    IWM_MAC_PROT_FLG_FAT_PROT);
7004 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
7005 					cmd->protection_flags |= htole32(
7006 					    IWM_MAC_PROT_FLG_SELF_CTS_EN);
7007 			}
7008 			break;
7009 		default:
7010 			break;
7011 		}
7012 
7013 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
7014 	}
7015 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7016 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
7017 
7018 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
7019 #undef IWM_EXP2
7020 }
7021 
7022 void
7023 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
7024     struct iwm_mac_data_sta *sta, int assoc)
7025 {
7026 	struct ieee80211_node *ni = &in->in_ni;
7027 	uint32_t dtim_off;
7028 	uint64_t tsf;
7029 
7030 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
7031 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
7032 	tsf = letoh64(tsf);
7033 
7034 	sta->is_assoc = htole32(assoc);
7035 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
7036 	sta->dtim_tsf = htole64(tsf + dtim_off);
7037 	sta->bi = htole32(ni->ni_intval);
7038 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
7039 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
7040 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
7041 	sta->listen_interval = htole32(10);
7042 	sta->assoc_id = htole32(ni->ni_associd);
7043 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
7044 }
7045 
7046 int
7047 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
7048     int assoc)
7049 {
7050 	struct ieee80211com *ic = &sc->sc_ic;
7051 	struct ieee80211_node *ni = &in->in_ni;
7052 	struct iwm_mac_ctx_cmd cmd;
7053 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
7054 
7055 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
7056 		panic("MAC already added");
7057 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
7058 		panic("MAC already removed");
7059 
7060 	memset(&cmd, 0, sizeof(cmd));
7061 
7062 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
7063 
7064 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7065 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
7066 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
7067 		    IWM_MAC_FILTER_ACCEPT_GRP |
7068 		    IWM_MAC_FILTER_IN_BEACON |
7069 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
7070 		    IWM_MAC_FILTER_IN_CRC32);
7071 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
7072 		/*
7073 		 * Allow beacons to pass through as long as we are not
7074 		 * associated or we do not have dtim period information.
7075 		 */
7076 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
7077 	else
7078 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
7079 
7080 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
7081 }
7082 
7083 int
7084 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
7085 {
7086 	struct iwm_time_quota_cmd cmd;
7087 	int i, idx, num_active_macs, quota, quota_rem;
7088 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
7089 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
7090 	uint16_t id;
7091 
7092 	memset(&cmd, 0, sizeof(cmd));
7093 
7094 	/* currently, PHY ID == binding ID */
7095 	if (in && in->in_phyctxt) {
7096 		id = in->in_phyctxt->id;
7097 		KASSERT(id < IWM_MAX_BINDINGS);
7098 		colors[id] = in->in_phyctxt->color;
7099 		if (running)
7100 			n_ifs[id] = 1;
7101 	}
7102 
7103 	/*
7104 	 * The FW's scheduling session consists of
7105 	 * IWM_MAX_QUOTA fragments. Divide these fragments
7106 	 * equally between all the bindings that require quota
7107 	 */
7108 	num_active_macs = 0;
7109 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
7110 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
7111 		num_active_macs += n_ifs[i];
7112 	}
7113 
7114 	quota = 0;
7115 	quota_rem = 0;
7116 	if (num_active_macs) {
7117 		quota = IWM_MAX_QUOTA / num_active_macs;
7118 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
7119 	}
7120 
7121 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
7122 		if (colors[i] < 0)
7123 			continue;
7124 
7125 		cmd.quotas[idx].id_and_color =
7126 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
7127 
7128 		if (n_ifs[i] <= 0) {
7129 			cmd.quotas[idx].quota = htole32(0);
7130 			cmd.quotas[idx].max_duration = htole32(0);
7131 		} else {
7132 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
7133 			cmd.quotas[idx].max_duration = htole32(0);
7134 		}
7135 		idx++;
7136 	}
7137 
7138 	/* Give the remainder of the session to the first binding */
7139 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
7140 
7141 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
7142 	    sizeof(cmd), &cmd);
7143 }
7144 
7145 void
7146 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
7147 {
7148 	int s = splnet();
7149 
7150 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7151 		splx(s);
7152 		return;
7153 	}
7154 
7155 	refcnt_take(&sc->task_refs);
7156 	if (!task_add(taskq, task))
7157 		refcnt_rele_wake(&sc->task_refs);
7158 	splx(s);
7159 }
7160 
7161 void
7162 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
7163 {
7164 	if (task_del(taskq, task))
7165 		refcnt_rele(&sc->task_refs);
7166 }
7167 
7168 int
7169 iwm_scan(struct iwm_softc *sc)
7170 {
7171 	struct ieee80211com *ic = &sc->sc_ic;
7172 	struct ifnet *ifp = IC2IFP(ic);
7173 	int err;
7174 
7175 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
7176 		err = iwm_scan_abort(sc);
7177 		if (err) {
7178 			printf("%s: could not abort background scan\n",
7179 			    DEVNAME(sc));
7180 			return err;
7181 		}
7182 	}
7183 
7184 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
7185 		err = iwm_umac_scan(sc, 0);
7186 	else
7187 		err = iwm_lmac_scan(sc, 0);
7188 	if (err) {
7189 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7190 		return err;
7191 	}
7192 
7193 	/*
7194 	 * The current mode might have been fixed during association.
7195 	 * Ensure all channels get scanned.
7196 	 */
7197 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
7198 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
7199 
7200 	sc->sc_flags |= IWM_FLAG_SCANNING;
7201 	if (ifp->if_flags & IFF_DEBUG)
7202 		printf("%s: %s -> %s\n", ifp->if_xname,
7203 		    ieee80211_state_name[ic->ic_state],
7204 		    ieee80211_state_name[IEEE80211_S_SCAN]);
7205 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
7206 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
7207 		ieee80211_node_cleanup(ic, ic->ic_bss);
7208 	}
7209 	ic->ic_state = IEEE80211_S_SCAN;
7210 	iwm_led_blink_start(sc);
7211 	wakeup(&ic->ic_state); /* wake iwm_init() */
7212 
7213 	return 0;
7214 }
7215 
7216 int
7217 iwm_bgscan(struct ieee80211com *ic)
7218 {
7219 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
7220 	int err;
7221 
7222 	if (sc->sc_flags & IWM_FLAG_SCANNING)
7223 		return 0;
7224 
7225 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
7226 		err = iwm_umac_scan(sc, 1);
7227 	else
7228 		err = iwm_lmac_scan(sc, 1);
7229 	if (err) {
7230 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7231 		return err;
7232 	}
7233 
7234 	sc->sc_flags |= IWM_FLAG_BGSCAN;
7235 	return 0;
7236 }
7237 
7238 int
7239 iwm_umac_scan_abort(struct iwm_softc *sc)
7240 {
7241 	struct iwm_umac_scan_abort cmd = { 0 };
7242 
7243 	return iwm_send_cmd_pdu(sc,
7244 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
7245 	    0, sizeof(cmd), &cmd);
7246 }
7247 
7248 int
7249 iwm_lmac_scan_abort(struct iwm_softc *sc)
7250 {
7251 	struct iwm_host_cmd cmd = {
7252 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
7253 	};
7254 	int err, status;
7255 
7256 	err = iwm_send_cmd_status(sc, &cmd, &status);
7257 	if (err)
7258 		return err;
7259 
7260 	if (status != IWM_CAN_ABORT_STATUS) {
7261 		/*
7262 		 * The scan abort will return 1 for success or
7263 		 * 2 for "failure".  A failure condition can be
7264 		 * due to simply not being in an active scan which
7265 		 * can occur if we send the scan abort before the
7266 		 * microcode has notified us that a scan is completed.
7267 		 */
7268 		return EBUSY;
7269 	}
7270 
7271 	return 0;
7272 }
7273 
7274 int
7275 iwm_scan_abort(struct iwm_softc *sc)
7276 {
7277 	int err;
7278 
7279 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
7280 		err = iwm_umac_scan_abort(sc);
7281 	else
7282 		err = iwm_lmac_scan_abort(sc);
7283 
7284 	if (err == 0)
7285 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
7286 	return err;
7287 }
7288 
7289 int
7290 iwm_auth(struct iwm_softc *sc)
7291 {
7292 	struct ieee80211com *ic = &sc->sc_ic;
7293 	struct iwm_node *in = (void *)ic->ic_bss;
7294 	uint32_t duration;
7295 	int generation = sc->sc_generation, err;
7296 
7297 	splassert(IPL_NET);
7298 
7299 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7300 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
7301 	else
7302 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
7303 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
7304 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
7305 	if (err) {
7306 		printf("%s: could not update PHY context (error %d)\n",
7307 		    DEVNAME(sc), err);
7308 		return err;
7309 	}
7310 	in->in_phyctxt = &sc->sc_phyctxt[0];
7311 
7312 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
7313 	if (err) {
7314 		printf("%s: could not add MAC context (error %d)\n",
7315 		    DEVNAME(sc), err);
7316 		return err;
7317  	}
7318 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
7319 
7320 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
7321 	if (err) {
7322 		printf("%s: could not add binding (error %d)\n",
7323 		    DEVNAME(sc), err);
7324 		goto rm_mac_ctxt;
7325 	}
7326 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
7327 
7328 	err = iwm_add_sta_cmd(sc, in, 0);
7329 	if (err) {
7330 		printf("%s: could not add sta (error %d)\n",
7331 		    DEVNAME(sc), err);
7332 		goto rm_binding;
7333 	}
7334 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
7335 
7336 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7337 		return 0;
7338 
7339 	/*
7340 	 * Prevent the FW from wandering off channel during association
7341 	 * by "protecting" the session with a time event.
7342 	 */
7343 	if (in->in_ni.ni_intval)
7344 		duration = in->in_ni.ni_intval * 2;
7345 	else
7346 		duration = IEEE80211_DUR_TU;
7347 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
7348 
7349 	return 0;
7350 
7351 rm_binding:
7352 	if (generation == sc->sc_generation) {
7353 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
7354 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
7355 	}
7356 rm_mac_ctxt:
7357 	if (generation == sc->sc_generation) {
7358 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
7359 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
7360 	}
7361 	return err;
7362 }
7363 
7364 int
7365 iwm_deauth(struct iwm_softc *sc)
7366 {
7367 	struct ieee80211com *ic = &sc->sc_ic;
7368 	struct iwm_node *in = (void *)ic->ic_bss;
7369 	int ac, tfd_queue_msk, err;
7370 
7371 	splassert(IPL_NET);
7372 
7373 	iwm_unprotect_session(sc, in);
7374 
7375 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
7376 		err = iwm_rm_sta_cmd(sc, in);
7377 		if (err) {
7378 			printf("%s: could not remove STA (error %d)\n",
7379 			    DEVNAME(sc), err);
7380 			return err;
7381 		}
7382 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
7383 		sc->sc_rx_ba_sessions = 0;
7384 		sc->ba_start_tidmask = 0;
7385 		sc->ba_stop_tidmask = 0;
7386 	}
7387 
7388 	tfd_queue_msk = 0;
7389 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7390 		int qid = ac;
7391 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7392 			qid += IWM_DQA_MIN_MGMT_QUEUE;
7393 		tfd_queue_msk |= htole32(1 << qid);
7394 	}
7395 
7396 	err = iwm_flush_tx_path(sc, tfd_queue_msk);
7397 	if (err) {
7398 		printf("%s: could not flush Tx path (error %d)\n",
7399 		    DEVNAME(sc), err);
7400 		return err;
7401 	}
7402 
7403 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
7404 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
7405 		if (err) {
7406 			printf("%s: could not remove binding (error %d)\n",
7407 			    DEVNAME(sc), err);
7408 			return err;
7409 		}
7410 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
7411 	}
7412 
7413 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
7414 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
7415 		if (err) {
7416 			printf("%s: could not remove MAC context (error %d)\n",
7417 			    DEVNAME(sc), err);
7418 			return err;
7419 		}
7420 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
7421 	}
7422 
7423 	return 0;
7424 }
7425 
7426 int
7427 iwm_assoc(struct iwm_softc *sc)
7428 {
7429 	struct ieee80211com *ic = &sc->sc_ic;
7430 	struct iwm_node *in = (void *)ic->ic_bss;
7431 	int update_sta = (sc->sc_flags & IWM_FLAG_STA_ACTIVE);
7432 	int err;
7433 
7434 	splassert(IPL_NET);
7435 
7436 	err = iwm_add_sta_cmd(sc, in, update_sta);
7437 	if (err) {
7438 		printf("%s: could not %s STA (error %d)\n",
7439 		    DEVNAME(sc), update_sta ? "update" : "add", err);
7440 		return err;
7441 	}
7442 
7443 	return 0;
7444 }
7445 
7446 int
7447 iwm_disassoc(struct iwm_softc *sc)
7448 {
7449 	struct ieee80211com *ic = &sc->sc_ic;
7450 	struct iwm_node *in = (void *)ic->ic_bss;
7451 	int err;
7452 
7453 	splassert(IPL_NET);
7454 
7455 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
7456 		err = iwm_rm_sta_cmd(sc, in);
7457 		if (err) {
7458 			printf("%s: could not remove STA (error %d)\n",
7459 			    DEVNAME(sc), err);
7460 			return err;
7461 		}
7462 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
7463 		sc->sc_rx_ba_sessions = 0;
7464 		sc->ba_start_tidmask = 0;
7465 		sc->ba_stop_tidmask = 0;
7466 	}
7467 
7468 	return 0;
7469 }
7470 
7471 int
7472 iwm_run(struct iwm_softc *sc)
7473 {
7474 	struct ieee80211com *ic = &sc->sc_ic;
7475 	struct iwm_node *in = (void *)ic->ic_bss;
7476 	int err;
7477 
7478 	splassert(IPL_NET);
7479 
7480 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7481 		/* Add a MAC context and a sniffing STA. */
7482 		err = iwm_auth(sc);
7483 		if (err)
7484 			return err;
7485 	}
7486 
7487 	/* Configure Rx chains for MIMO. */
7488 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
7489 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
7490 	    iwm_mimo_enabled(sc)) {
7491 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
7492 		    2, 2, IWM_FW_CTXT_ACTION_MODIFY, 0);
7493 		if (err) {
7494 			printf("%s: failed to update PHY\n",
7495 			    DEVNAME(sc));
7496 			return err;
7497 		}
7498 	}
7499 
7500 	/* Update STA again, for HT-related settings such as MIMO. */
7501 	err = iwm_add_sta_cmd(sc, in, 1);
7502 	if (err) {
7503 		printf("%s: could not update STA (error %d)\n",
7504 		    DEVNAME(sc), err);
7505 		return err;
7506 	}
7507 
7508 	/* We have now been assigned an associd by the AP. */
7509 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
7510 	if (err) {
7511 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7512 		return err;
7513 	}
7514 
7515 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
7516 	if (err) {
7517 		printf("%s: could not set sf full on (error %d)\n",
7518 		    DEVNAME(sc), err);
7519 		return err;
7520 	}
7521 
7522 	err = iwm_allow_mcast(sc);
7523 	if (err) {
7524 		printf("%s: could not allow mcast (error %d)\n",
7525 		    DEVNAME(sc), err);
7526 		return err;
7527 	}
7528 
7529 	err = iwm_power_update_device(sc);
7530 	if (err) {
7531 		printf("%s: could not send power command (error %d)\n",
7532 		    DEVNAME(sc), err);
7533 		return err;
7534 	}
7535 #ifdef notyet
7536 	/*
7537 	 * Disabled for now. Default beacon filter settings
7538 	 * prevent net80211 from getting ERP and HT protection
7539 	 * updates from beacons.
7540 	 */
7541 	err = iwm_enable_beacon_filter(sc, in);
7542 	if (err) {
7543 		printf("%s: could not enable beacon filter\n",
7544 		    DEVNAME(sc));
7545 		return err;
7546 	}
7547 #endif
7548 	err = iwm_power_mac_update_mode(sc, in);
7549 	if (err) {
7550 		printf("%s: could not update MAC power (error %d)\n",
7551 		    DEVNAME(sc), err);
7552 		return err;
7553 	}
7554 
7555 	err = iwm_update_quotas(sc, in, 1);
7556 	if (err) {
7557 		printf("%s: could not update quotas (error %d)\n",
7558 		    DEVNAME(sc), err);
7559 		return err;
7560 	}
7561 
7562 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
7563 	ieee80211_ra_node_init(&in->in_rn);
7564 
7565 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7566 		iwm_led_blink_start(sc);
7567 		return 0;
7568 	}
7569 
7570 	/* Start at lowest available bit-rate, AMRR will raise. */
7571 	in->in_ni.ni_txrate = 0;
7572 	in->in_ni.ni_txmcs = 0;
7573 	iwm_setrates(in, 0);
7574 
7575 	timeout_add_msec(&sc->sc_calib_to, 500);
7576 	iwm_led_enable(sc);
7577 
7578 	return 0;
7579 }
7580 
7581 int
7582 iwm_run_stop(struct iwm_softc *sc)
7583 {
7584 	struct ieee80211com *ic = &sc->sc_ic;
7585 	struct iwm_node *in = (void *)ic->ic_bss;
7586 	int err;
7587 
7588 	splassert(IPL_NET);
7589 
7590 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7591 		iwm_led_blink_stop(sc);
7592 
7593 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
7594 	if (err)
7595 		return err;
7596 
7597 	iwm_disable_beacon_filter(sc);
7598 
7599 	err = iwm_update_quotas(sc, in, 0);
7600 	if (err) {
7601 		printf("%s: could not update quotas (error %d)\n",
7602 		    DEVNAME(sc), err);
7603 		return err;
7604 	}
7605 
7606 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
7607 	if (err) {
7608 		printf("%s: failed to update MAC\n", DEVNAME(sc));
7609 		return err;
7610 	}
7611 
7612 	/* Reset Tx chains in case MIMO was enabled. */
7613 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
7614 	    iwm_mimo_enabled(sc)) {
7615 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
7616 		    IWM_FW_CTXT_ACTION_MODIFY, 0);
7617 		if (err) {
7618 			printf("%s: failed to update PHY\n", DEVNAME(sc));
7619 			return err;
7620 		}
7621 	}
7622 
7623 	return 0;
7624 }
7625 
7626 struct ieee80211_node *
7627 iwm_node_alloc(struct ieee80211com *ic)
7628 {
7629 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
7630 }
7631 
7632 int
7633 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
7634     struct ieee80211_key *k)
7635 {
7636 	struct iwm_softc *sc = ic->ic_softc;
7637 	struct iwm_add_sta_key_cmd_v1 cmd;
7638 
7639 	memset(&cmd, 0, sizeof(cmd));
7640 
7641 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
7642 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
7643 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
7644 	    IWM_STA_KEY_FLG_KEYID_MSK));
7645 	if (k->k_flags & IEEE80211_KEY_GROUP)
7646 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
7647 
7648 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7649 	cmd.common.key_offset = 0;
7650 	cmd.common.sta_id = IWM_STATION_ID;
7651 
7652 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
7653 	    sizeof(cmd), &cmd);
7654 }
7655 
7656 int
7657 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7658     struct ieee80211_key *k)
7659 {
7660 	struct iwm_softc *sc = ic->ic_softc;
7661 	struct iwm_add_sta_key_cmd cmd;
7662 
7663 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
7664 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
7665 		/* Fallback to software crypto for other ciphers. */
7666 		return (ieee80211_set_key(ic, ni, k));
7667 	}
7668 
7669 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
7670 		return iwm_set_key_v1(ic, ni, k);
7671 
7672 	memset(&cmd, 0, sizeof(cmd));
7673 
7674 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
7675 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
7676 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
7677 	    IWM_STA_KEY_FLG_KEYID_MSK));
7678 	if (k->k_flags & IEEE80211_KEY_GROUP)
7679 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
7680 
7681 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7682 	cmd.common.key_offset = 0;
7683 	cmd.common.sta_id = IWM_STATION_ID;
7684 
7685 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
7686 
7687 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
7688 	    sizeof(cmd), &cmd);
7689 }
7690 
7691 void
7692 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
7693     struct ieee80211_key *k)
7694 {
7695 	struct iwm_softc *sc = ic->ic_softc;
7696 	struct iwm_add_sta_key_cmd_v1 cmd;
7697 
7698 	memset(&cmd, 0, sizeof(cmd));
7699 
7700 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
7701 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
7702 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
7703 	    IWM_STA_KEY_FLG_KEYID_MSK));
7704 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7705 	cmd.common.key_offset = 0;
7706 	cmd.common.sta_id = IWM_STATION_ID;
7707 
7708 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
7709 }
7710 
7711 void
7712 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7713     struct ieee80211_key *k)
7714 {
7715 	struct iwm_softc *sc = ic->ic_softc;
7716 	struct iwm_add_sta_key_cmd cmd;
7717 
7718 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
7719 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
7720 		/* Fallback to software crypto for other ciphers. */
7721                 ieee80211_delete_key(ic, ni, k);
7722 		return;
7723 	}
7724 
7725 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
7726 		return iwm_delete_key_v1(ic, ni, k);
7727 
7728 	memset(&cmd, 0, sizeof(cmd));
7729 
7730 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
7731 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
7732 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
7733 	    IWM_STA_KEY_FLG_KEYID_MSK));
7734 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7735 	cmd.common.key_offset = 0;
7736 	cmd.common.sta_id = IWM_STATION_ID;
7737 
7738 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
7739 }
7740 
7741 void
7742 iwm_calib_timeout(void *arg)
7743 {
7744 	struct iwm_softc *sc = arg;
7745 	struct ieee80211com *ic = &sc->sc_ic;
7746 	struct iwm_node *in = (void *)ic->ic_bss;
7747 	struct ieee80211_node *ni = &in->in_ni;
7748 	int s;
7749 
7750 	s = splnet();
7751 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
7752 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
7753 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
7754 		int old_txrate = ni->ni_txrate;
7755 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
7756 		/*
7757 		 * If AMRR has chosen a new TX rate we must update
7758 		 * the firwmare's LQ rate table.
7759 		 * ni_txrate may change again before the task runs so
7760 		 * cache the chosen rate in the iwm_node structure.
7761 		 */
7762 		if (ni->ni_txrate != old_txrate)
7763 			iwm_setrates(in, 1);
7764 	}
7765 
7766 	splx(s);
7767 
7768 	timeout_add_msec(&sc->sc_calib_to, 500);
7769 }
7770 
7771 void
7772 iwm_setrates(struct iwm_node *in, int async)
7773 {
7774 	struct ieee80211_node *ni = &in->in_ni;
7775 	struct ieee80211com *ic = ni->ni_ic;
7776 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
7777 	struct iwm_lq_cmd lqcmd;
7778 	struct ieee80211_rateset *rs = &ni->ni_rates;
7779 	int i, ridx, ridx_min, ridx_max, j, sgi_ok = 0, mimo, tab = 0;
7780 	struct iwm_host_cmd cmd = {
7781 		.id = IWM_LQ_CMD,
7782 		.len = { sizeof(lqcmd), },
7783 	};
7784 
7785 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
7786 
7787 	memset(&lqcmd, 0, sizeof(lqcmd));
7788 	lqcmd.sta_id = IWM_STATION_ID;
7789 
7790 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7791 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
7792 
7793 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
7794 	    ieee80211_node_supports_ht_sgi20(ni)) {
7795 		ni->ni_flags |= IEEE80211_NODE_HT_SGI20;
7796 		sgi_ok = 1;
7797 	}
7798 
7799 	/*
7800 	 * Fill the LQ rate selection table with legacy and/or HT rates
7801 	 * in descending order, i.e. with the node's current TX rate first.
7802 	 * In cases where throughput of an HT rate corresponds to a legacy
7803 	 * rate it makes no sense to add both. We rely on the fact that
7804 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
7805 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
7806 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
7807 	 */
7808 	j = 0;
7809 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
7810 	mimo = iwm_is_mimo_mcs(ni->ni_txmcs);
7811 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
7812 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
7813 		uint8_t plcp = iwm_rates[ridx].plcp;
7814 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
7815 
7816 		if (j >= nitems(lqcmd.rs_table))
7817 			break;
7818 		tab = 0;
7819 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7820 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
7821 				continue;
7822 	 		/* Do not mix SISO and MIMO HT rates. */
7823 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
7824 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
7825 				continue;
7826 			for (i = ni->ni_txmcs; i >= 0; i--) {
7827 				if (isclr(ni->ni_rxmcs, i))
7828 					continue;
7829 				if (ridx == iwm_mcs2ridx[i]) {
7830 					tab = ht_plcp;
7831 					tab |= IWM_RATE_MCS_HT_MSK;
7832 					if (sgi_ok)
7833 						tab |= IWM_RATE_MCS_SGI_MSK;
7834 					break;
7835 				}
7836 			}
7837 		} else if (plcp != IWM_RATE_INVM_PLCP) {
7838 			for (i = ni->ni_txrate; i >= 0; i--) {
7839 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
7840 				    IEEE80211_RATE_VAL)) {
7841 					tab = plcp;
7842 					break;
7843 				}
7844 			}
7845 		}
7846 
7847 		if (tab == 0)
7848 			continue;
7849 
7850 		if (iwm_is_mimo_ht_plcp(ht_plcp))
7851 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
7852 		else
7853 			tab |= IWM_RATE_MCS_ANT_A_MSK;
7854 
7855 		if (IWM_RIDX_IS_CCK(ridx))
7856 			tab |= IWM_RATE_MCS_CCK_MSK;
7857 		lqcmd.rs_table[j++] = htole32(tab);
7858 	}
7859 
7860 	lqcmd.mimo_delim = (mimo ? j : 0);
7861 
7862 	/* Fill the rest with the lowest possible rate */
7863 	while (j < nitems(lqcmd.rs_table)) {
7864 		tab = iwm_rates[ridx_min].plcp;
7865 		if (IWM_RIDX_IS_CCK(ridx_min))
7866 			tab |= IWM_RATE_MCS_CCK_MSK;
7867 		tab |= IWM_RATE_MCS_ANT_A_MSK;
7868 		lqcmd.rs_table[j++] = htole32(tab);
7869 	}
7870 
7871 	lqcmd.single_stream_ant_msk = IWM_ANT_A;
7872 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
7873 
7874 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
7875 	lqcmd.agg_disable_start_th = 3;
7876 #ifdef notyet
7877 	lqcmd.agg_frame_cnt_limit = 0x3f;
7878 #else
7879 	lqcmd.agg_frame_cnt_limit = 1; /* tx agg disabled */
7880 #endif
7881 
7882 	cmd.data[0] = &lqcmd;
7883 	iwm_send_cmd(sc, &cmd);
7884 }
7885 
7886 int
7887 iwm_media_change(struct ifnet *ifp)
7888 {
7889 	struct iwm_softc *sc = ifp->if_softc;
7890 	struct ieee80211com *ic = &sc->sc_ic;
7891 	uint8_t rate, ridx;
7892 	int err;
7893 
7894 	err = ieee80211_media_change(ifp);
7895 	if (err != ENETRESET)
7896 		return err;
7897 
7898 	if (ic->ic_fixed_mcs != -1)
7899 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
7900 	else if (ic->ic_fixed_rate != -1) {
7901 		rate = ic->ic_sup_rates[ic->ic_curmode].
7902 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
7903 		/* Map 802.11 rate to HW rate index. */
7904 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
7905 			if (iwm_rates[ridx].rate == rate)
7906 				break;
7907 		sc->sc_fixed_ridx = ridx;
7908 	}
7909 
7910 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7911 	    (IFF_UP | IFF_RUNNING)) {
7912 		iwm_stop(ifp);
7913 		err = iwm_init(ifp);
7914 	}
7915 	return err;
7916 }
7917 
7918 void
7919 iwm_newstate_task(void *psc)
7920 {
7921 	struct iwm_softc *sc = (struct iwm_softc *)psc;
7922 	struct ieee80211com *ic = &sc->sc_ic;
7923 	enum ieee80211_state nstate = sc->ns_nstate;
7924 	enum ieee80211_state ostate = ic->ic_state;
7925 	int arg = sc->ns_arg;
7926 	int err = 0, s = splnet();
7927 
7928 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7929 		/* iwm_stop() is waiting for us. */
7930 		refcnt_rele_wake(&sc->task_refs);
7931 		splx(s);
7932 		return;
7933 	}
7934 
7935 	if (ostate == IEEE80211_S_SCAN) {
7936 		if (nstate == ostate) {
7937 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
7938 				refcnt_rele_wake(&sc->task_refs);
7939 				splx(s);
7940 				return;
7941 			}
7942 			/* Firmware is no longer scanning. Do another scan. */
7943 			goto next_scan;
7944 		} else
7945 			iwm_led_blink_stop(sc);
7946 	}
7947 
7948 	if (nstate <= ostate) {
7949 		switch (ostate) {
7950 		case IEEE80211_S_RUN:
7951 			err = iwm_run_stop(sc);
7952 			if (err)
7953 				goto out;
7954 			/* FALLTHROUGH */
7955 		case IEEE80211_S_ASSOC:
7956 			if (nstate <= IEEE80211_S_ASSOC) {
7957 				err = iwm_disassoc(sc);
7958 				if (err)
7959 					goto out;
7960 			}
7961 			/* FALLTHROUGH */
7962 		case IEEE80211_S_AUTH:
7963 			if (nstate <= IEEE80211_S_AUTH) {
7964 				err = iwm_deauth(sc);
7965 				if (err)
7966 					goto out;
7967 			}
7968 			/* FALLTHROUGH */
7969 		case IEEE80211_S_SCAN:
7970 		case IEEE80211_S_INIT:
7971 			break;
7972 		}
7973 
7974 		/* Die now if iwm_stop() was called while we were sleeping. */
7975 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7976 			refcnt_rele_wake(&sc->task_refs);
7977 			splx(s);
7978 			return;
7979 		}
7980 	}
7981 
7982 	switch (nstate) {
7983 	case IEEE80211_S_INIT:
7984 		break;
7985 
7986 	case IEEE80211_S_SCAN:
7987 next_scan:
7988 		err = iwm_scan(sc);
7989 		if (err)
7990 			break;
7991 		refcnt_rele_wake(&sc->task_refs);
7992 		splx(s);
7993 		return;
7994 
7995 	case IEEE80211_S_AUTH:
7996 		err = iwm_auth(sc);
7997 		break;
7998 
7999 	case IEEE80211_S_ASSOC:
8000 		err = iwm_assoc(sc);
8001 		break;
8002 
8003 	case IEEE80211_S_RUN:
8004 		err = iwm_run(sc);
8005 		break;
8006 	}
8007 
8008 out:
8009 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
8010 		if (err)
8011 			task_add(systq, &sc->init_task);
8012 		else
8013 			sc->sc_newstate(ic, nstate, arg);
8014 	}
8015 	refcnt_rele_wake(&sc->task_refs);
8016 	splx(s);
8017 }
8018 
8019 int
8020 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
8021 {
8022 	struct ifnet *ifp = IC2IFP(ic);
8023 	struct iwm_softc *sc = ifp->if_softc;
8024 	int i;
8025 
8026 	if (ic->ic_state == IEEE80211_S_RUN) {
8027 		timeout_del(&sc->sc_calib_to);
8028 		iwm_del_task(sc, systq, &sc->ba_task);
8029 		iwm_del_task(sc, systq, &sc->htprot_task);
8030 		for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8031 			struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
8032 			iwm_clear_reorder_buffer(sc, rxba);
8033 		}
8034 	}
8035 
8036 	sc->ns_nstate = nstate;
8037 	sc->ns_arg = arg;
8038 
8039 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
8040 
8041 	return 0;
8042 }
8043 
8044 void
8045 iwm_endscan(struct iwm_softc *sc)
8046 {
8047 	struct ieee80211com *ic = &sc->sc_ic;
8048 
8049 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
8050 		return;
8051 
8052 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8053 	ieee80211_end_scan(&ic->ic_if);
8054 }
8055 
8056 /*
8057  * Aging and idle timeouts for the different possible scenarios
8058  * in default configuration
8059  */
8060 static const uint32_t
8061 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
8062 	{
8063 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8064 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8065 	},
8066 	{
8067 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
8068 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8069 	},
8070 	{
8071 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
8072 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
8073 	},
8074 	{
8075 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
8076 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
8077 	},
8078 	{
8079 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
8080 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
8081 	},
8082 };
8083 
8084 /*
8085  * Aging and idle timeouts for the different possible scenarios
8086  * in single BSS MAC configuration.
8087  */
8088 static const uint32_t
8089 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
8090 	{
8091 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
8092 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
8093 	},
8094 	{
8095 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
8096 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
8097 	},
8098 	{
8099 		htole32(IWM_SF_MCAST_AGING_TIMER),
8100 		htole32(IWM_SF_MCAST_IDLE_TIMER)
8101 	},
8102 	{
8103 		htole32(IWM_SF_BA_AGING_TIMER),
8104 		htole32(IWM_SF_BA_IDLE_TIMER)
8105 	},
8106 	{
8107 		htole32(IWM_SF_TX_RE_AGING_TIMER),
8108 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
8109 	},
8110 };
8111 
8112 void
8113 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
8114     struct ieee80211_node *ni)
8115 {
8116 	int i, j, watermark;
8117 
8118 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
8119 
8120 	/*
8121 	 * If we are in association flow - check antenna configuration
8122 	 * capabilities of the AP station, and choose the watermark accordingly.
8123 	 */
8124 	if (ni) {
8125 		if (ni->ni_flags & IEEE80211_NODE_HT) {
8126 			if (ni->ni_rxmcs[1] != 0)
8127 				watermark = IWM_SF_W_MARK_MIMO2;
8128 			else
8129 				watermark = IWM_SF_W_MARK_SISO;
8130 		} else {
8131 			watermark = IWM_SF_W_MARK_LEGACY;
8132 		}
8133 	/* default watermark value for unassociated mode. */
8134 	} else {
8135 		watermark = IWM_SF_W_MARK_MIMO2;
8136 	}
8137 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
8138 
8139 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
8140 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
8141 			sf_cmd->long_delay_timeouts[i][j] =
8142 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
8143 		}
8144 	}
8145 
8146 	if (ni) {
8147 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
8148 		       sizeof(iwm_sf_full_timeout));
8149 	} else {
8150 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
8151 		       sizeof(iwm_sf_full_timeout_def));
8152 	}
8153 
8154 }
8155 
8156 int
8157 iwm_sf_config(struct iwm_softc *sc, int new_state)
8158 {
8159 	struct ieee80211com *ic = &sc->sc_ic;
8160 	struct iwm_sf_cfg_cmd sf_cmd = {
8161 		.state = htole32(new_state),
8162 	};
8163 	int err = 0;
8164 
8165 #if 0	/* only used for models with sdio interface, in iwlwifi */
8166 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
8167 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
8168 #endif
8169 
8170 	switch (new_state) {
8171 	case IWM_SF_UNINIT:
8172 	case IWM_SF_INIT_OFF:
8173 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
8174 		break;
8175 	case IWM_SF_FULL_ON:
8176 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
8177 		break;
8178 	default:
8179 		return EINVAL;
8180 	}
8181 
8182 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
8183 				   sizeof(sf_cmd), &sf_cmd);
8184 	return err;
8185 }
8186 
8187 int
8188 iwm_send_bt_init_conf(struct iwm_softc *sc)
8189 {
8190 	struct iwm_bt_coex_cmd bt_cmd;
8191 
8192 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
8193 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
8194 
8195 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
8196 	    &bt_cmd);
8197 }
8198 
8199 int
8200 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
8201 {
8202 	struct iwm_mcc_update_cmd mcc_cmd;
8203 	struct iwm_host_cmd hcmd = {
8204 		.id = IWM_MCC_UPDATE_CMD,
8205 		.flags = IWM_CMD_WANT_RESP,
8206 		.data = { &mcc_cmd },
8207 	};
8208 	int err;
8209 	int resp_v2 = isset(sc->sc_enabled_capa,
8210 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
8211 
8212 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
8213 	    !sc->sc_nvm.lar_enabled) {
8214 		return 0;
8215 	}
8216 
8217 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8218 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8219 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8220 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8221 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
8222 	else
8223 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
8224 
8225 	if (resp_v2) {
8226 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
8227 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
8228 		    sizeof(struct iwm_mcc_update_resp);
8229 	} else {
8230 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
8231 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
8232 		    sizeof(struct iwm_mcc_update_resp_v1);
8233 	}
8234 
8235 	err = iwm_send_cmd(sc, &hcmd);
8236 	if (err)
8237 		return err;
8238 
8239 	iwm_free_resp(sc, &hcmd);
8240 
8241 	return 0;
8242 }
8243 
8244 void
8245 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
8246 {
8247 	struct iwm_host_cmd cmd = {
8248 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
8249 		.len = { sizeof(uint32_t), },
8250 		.data = { &backoff, },
8251 	};
8252 
8253 	iwm_send_cmd(sc, &cmd);
8254 }
8255 
8256 void
8257 iwm_free_fw_paging(struct iwm_softc *sc)
8258 {
8259 	int i;
8260 
8261 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
8262 		return;
8263 
8264 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
8265 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
8266 	}
8267 
8268 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
8269 }
8270 
8271 int
8272 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
8273 {
8274 	int sec_idx, idx;
8275 	uint32_t offset = 0;
8276 
8277 	/*
8278 	 * find where is the paging image start point:
8279 	 * if CPU2 exist and it's in paging format, then the image looks like:
8280 	 * CPU1 sections (2 or more)
8281 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
8282 	 * CPU2 sections (not paged)
8283 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
8284 	 * non paged to CPU2 paging sec
8285 	 * CPU2 paging CSS
8286 	 * CPU2 paging image (including instruction and data)
8287 	 */
8288 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
8289 		if (image->fw_sect[sec_idx].fws_devoff ==
8290 		    IWM_PAGING_SEPARATOR_SECTION) {
8291 			sec_idx++;
8292 			break;
8293 		}
8294 	}
8295 
8296 	/*
8297 	 * If paging is enabled there should be at least 2 more sections left
8298 	 * (one for CSS and one for Paging data)
8299 	 */
8300 	if (sec_idx >= nitems(image->fw_sect) - 1) {
8301 		printf("%s: Paging: Missing CSS and/or paging sections\n",
8302 		    DEVNAME(sc));
8303 		iwm_free_fw_paging(sc);
8304 		return EINVAL;
8305 	}
8306 
8307 	/* copy the CSS block to the dram */
8308 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
8309 	    DEVNAME(sc), sec_idx));
8310 
8311 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
8312 	    image->fw_sect[sec_idx].fws_data,
8313 	    sc->fw_paging_db[0].fw_paging_size);
8314 
8315 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
8316 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
8317 
8318 	sec_idx++;
8319 
8320 	/*
8321 	 * copy the paging blocks to the dram
8322 	 * loop index start from 1 since that CSS block already copied to dram
8323 	 * and CSS index is 0.
8324 	 * loop stop at num_of_paging_blk since that last block is not full.
8325 	 */
8326 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
8327 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
8328 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
8329 		    sc->fw_paging_db[idx].fw_paging_size);
8330 
8331 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
8332 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
8333 
8334 		offset += sc->fw_paging_db[idx].fw_paging_size;
8335 	}
8336 
8337 	/* copy the last paging block */
8338 	if (sc->num_of_pages_in_last_blk > 0) {
8339 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
8340 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
8341 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
8342 
8343 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
8344 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
8345 	}
8346 
8347 	return 0;
8348 }
8349 
8350 int
8351 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
8352 {
8353 	int blk_idx = 0;
8354 	int error, num_of_pages;
8355 
8356 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
8357 		int i;
8358 		/* Device got reset, and we setup firmware paging again */
8359 		bus_dmamap_sync(sc->sc_dmat,
8360 		    sc->fw_paging_db[0].fw_paging_block.map,
8361 		    0, IWM_FW_PAGING_SIZE,
8362 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
8363 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
8364 			bus_dmamap_sync(sc->sc_dmat,
8365 			    sc->fw_paging_db[i].fw_paging_block.map,
8366 			    0, IWM_PAGING_BLOCK_SIZE,
8367 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
8368 		}
8369 		return 0;
8370 	}
8371 
8372 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
8373 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
8374 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
8375 #endif
8376 
8377 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
8378 	sc->num_of_paging_blk =
8379 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
8380 
8381 	sc->num_of_pages_in_last_blk =
8382 		num_of_pages -
8383 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
8384 
8385 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
8386 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
8387 	    sc->num_of_paging_blk,
8388 	    sc->num_of_pages_in_last_blk));
8389 
8390 	/* allocate block of 4Kbytes for paging CSS */
8391 	error = iwm_dma_contig_alloc(sc->sc_dmat,
8392 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
8393 	    4096);
8394 	if (error) {
8395 		/* free all the previous pages since we failed */
8396 		iwm_free_fw_paging(sc);
8397 		return ENOMEM;
8398 	}
8399 
8400 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
8401 
8402 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
8403 	    DEVNAME(sc)));
8404 
8405 	/*
8406 	 * allocate blocks in dram.
8407 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
8408 	 */
8409 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
8410 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
8411 		/* XXX Use iwm_dma_contig_alloc for allocating */
8412 		error = iwm_dma_contig_alloc(sc->sc_dmat,
8413 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
8414 		    IWM_PAGING_BLOCK_SIZE, 4096);
8415 		if (error) {
8416 			/* free all the previous pages since we failed */
8417 			iwm_free_fw_paging(sc);
8418 			return ENOMEM;
8419 		}
8420 
8421 		sc->fw_paging_db[blk_idx].fw_paging_size =
8422 		    IWM_PAGING_BLOCK_SIZE;
8423 
8424 		DPRINTF((
8425 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
8426 		    DEVNAME(sc)));
8427 	}
8428 
8429 	return 0;
8430 }
8431 
8432 int
8433 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
8434 {
8435 	int ret;
8436 
8437 	ret = iwm_alloc_fw_paging_mem(sc, fw);
8438 	if (ret)
8439 		return ret;
8440 
8441 	return iwm_fill_paging_mem(sc, fw);
8442 }
8443 
8444 /* send paging cmd to FW in case CPU2 has paging image */
8445 int
8446 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
8447 {
8448 	int blk_idx;
8449 	uint32_t dev_phy_addr;
8450 	struct iwm_fw_paging_cmd fw_paging_cmd = {
8451 		.flags =
8452 			htole32(IWM_PAGING_CMD_IS_SECURED |
8453 				IWM_PAGING_CMD_IS_ENABLED |
8454 				(sc->num_of_pages_in_last_blk <<
8455 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
8456 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
8457 		.block_num = htole32(sc->num_of_paging_blk),
8458 	};
8459 
8460 	/* loop for for all paging blocks + CSS block */
8461 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
8462 		dev_phy_addr = htole32(
8463 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
8464 		    IWM_PAGE_2_EXP_SIZE);
8465 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
8466 		bus_dmamap_sync(sc->sc_dmat,
8467 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
8468 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
8469 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
8470 	}
8471 
8472 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
8473 					       IWM_LONG_GROUP, 0),
8474 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
8475 }
8476 
8477 int
8478 iwm_init_hw(struct iwm_softc *sc)
8479 {
8480 	struct ieee80211com *ic = &sc->sc_ic;
8481 	int err, i, ac, qid;
8482 
8483 	err = iwm_preinit(sc);
8484 	if (err)
8485 		return err;
8486 
8487 	err = iwm_start_hw(sc);
8488 	if (err) {
8489 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8490 		return err;
8491 	}
8492 
8493 	err = iwm_run_init_mvm_ucode(sc, 0);
8494 	if (err)
8495 		return err;
8496 
8497 	/* Should stop and start HW since INIT image just loaded. */
8498 	iwm_stop_device(sc);
8499 	err = iwm_start_hw(sc);
8500 	if (err) {
8501 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
8502 		return err;
8503 	}
8504 
8505 	/* Restart, this time with the regular firmware */
8506 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
8507 	if (err) {
8508 		printf("%s: could not load firmware\n", DEVNAME(sc));
8509 		goto err;
8510 	}
8511 
8512 	if (!iwm_nic_lock(sc))
8513 		return EBUSY;
8514 
8515 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
8516 	if (err) {
8517 		printf("%s: could not init tx ant config (error %d)\n",
8518 		    DEVNAME(sc), err);
8519 		goto err;
8520 	}
8521 
8522 	err = iwm_send_phy_db_data(sc);
8523 	if (err) {
8524 		printf("%s: could not init phy db (error %d)\n",
8525 		    DEVNAME(sc), err);
8526 		goto err;
8527 	}
8528 
8529 	err = iwm_send_phy_cfg_cmd(sc);
8530 	if (err) {
8531 		printf("%s: could not send phy config (error %d)\n",
8532 		    DEVNAME(sc), err);
8533 		goto err;
8534 	}
8535 
8536 	err = iwm_send_bt_init_conf(sc);
8537 	if (err) {
8538 		printf("%s: could not init bt coex (error %d)\n",
8539 		    DEVNAME(sc), err);
8540 		return err;
8541 	}
8542 
8543 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8544 		err = iwm_send_dqa_cmd(sc);
8545 		if (err)
8546 			return err;
8547 	}
8548 
8549 	/* Add auxiliary station for scanning */
8550 	err = iwm_add_aux_sta(sc);
8551 	if (err) {
8552 		printf("%s: could not add aux station (error %d)\n",
8553 		    DEVNAME(sc), err);
8554 		goto err;
8555 	}
8556 
8557 	for (i = 0; i < 1; i++) {
8558 		/*
8559 		 * The channel used here isn't relevant as it's
8560 		 * going to be overwritten in the other flows.
8561 		 * For now use the first channel we have.
8562 		 */
8563 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8564 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8565 		    IWM_FW_CTXT_ACTION_ADD, 0);
8566 		if (err) {
8567 			printf("%s: could not add phy context %d (error %d)\n",
8568 			    DEVNAME(sc), i, err);
8569 			goto err;
8570 		}
8571 	}
8572 
8573 	/* Initialize tx backoffs to the minimum. */
8574 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
8575 		iwm_tt_tx_backoff(sc, 0);
8576 
8577 
8578 	err = iwm_config_ltr(sc);
8579 	if (err) {
8580 		printf("%s: PCIe LTR configuration failed (error %d)\n",
8581 		    DEVNAME(sc), err);
8582 	}
8583 
8584 	err = iwm_power_update_device(sc);
8585 	if (err) {
8586 		printf("%s: could not send power command (error %d)\n",
8587 		    DEVNAME(sc), err);
8588 		goto err;
8589 	}
8590 
8591 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
8592 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
8593 		if (err) {
8594 			printf("%s: could not init LAR (error %d)\n",
8595 			    DEVNAME(sc), err);
8596 			goto err;
8597 		}
8598 	}
8599 
8600 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
8601 		err = iwm_config_umac_scan(sc);
8602 		if (err) {
8603 			printf("%s: could not configure scan (error %d)\n",
8604 			    DEVNAME(sc), err);
8605 			goto err;
8606 		}
8607 	}
8608 
8609 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8610 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
8611 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
8612 		else
8613 			qid = IWM_AUX_QUEUE;
8614 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
8615 		    iwm_ac_to_tx_fifo[EDCA_AC_BE]);
8616 		if (err) {
8617 			printf("%s: could not enable monitor inject Tx queue "
8618 			    "(error %d)\n", DEVNAME(sc), err);
8619 			goto err;
8620 		}
8621 	} else {
8622 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
8623 			if (isset(sc->sc_enabled_capa,
8624 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
8625 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
8626 			else
8627 				qid = ac;
8628 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
8629 			    iwm_ac_to_tx_fifo[ac]);
8630 			if (err) {
8631 				printf("%s: could not enable Tx queue %d "
8632 				    "(error %d)\n", DEVNAME(sc), ac, err);
8633 				goto err;
8634 			}
8635 		}
8636 	}
8637 
8638 	err = iwm_disable_beacon_filter(sc);
8639 	if (err) {
8640 		printf("%s: could not disable beacon filter (error %d)\n",
8641 		    DEVNAME(sc), err);
8642 		goto err;
8643 	}
8644 
8645 err:
8646 	iwm_nic_unlock(sc);
8647 	return err;
8648 }
8649 
8650 /* Allow multicast from our BSSID. */
8651 int
8652 iwm_allow_mcast(struct iwm_softc *sc)
8653 {
8654 	struct ieee80211com *ic = &sc->sc_ic;
8655 	struct ieee80211_node *ni = ic->ic_bss;
8656 	struct iwm_mcast_filter_cmd *cmd;
8657 	size_t size;
8658 	int err;
8659 
8660 	size = roundup(sizeof(*cmd), 4);
8661 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8662 	if (cmd == NULL)
8663 		return ENOMEM;
8664 	cmd->filter_own = 1;
8665 	cmd->port_id = 0;
8666 	cmd->count = 0;
8667 	cmd->pass_all = 1;
8668 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
8669 
8670 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
8671 	    0, size, cmd);
8672 	free(cmd, M_DEVBUF, size);
8673 	return err;
8674 }
8675 
8676 int
8677 iwm_init(struct ifnet *ifp)
8678 {
8679 	struct iwm_softc *sc = ifp->if_softc;
8680 	struct ieee80211com *ic = &sc->sc_ic;
8681 	int err, generation;
8682 
8683 	rw_assert_wrlock(&sc->ioctl_rwl);
8684 
8685 	generation = ++sc->sc_generation;
8686 
8687 	KASSERT(sc->task_refs.refs == 0);
8688 	refcnt_init(&sc->task_refs);
8689 
8690 	err = iwm_init_hw(sc);
8691 	if (err) {
8692 		if (generation == sc->sc_generation)
8693 			iwm_stop(ifp);
8694 		return err;
8695 	}
8696 
8697 	if (sc->sc_nvm.sku_cap_11n_enable)
8698 		iwm_setup_ht_rates(sc);
8699 
8700 	ifq_clr_oactive(&ifp->if_snd);
8701 	ifp->if_flags |= IFF_RUNNING;
8702 
8703 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8704 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
8705 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
8706 		return 0;
8707 	}
8708 
8709 	ieee80211_begin_scan(ifp);
8710 
8711 	/*
8712 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
8713 	 * Wait until the transition to SCAN state has completed.
8714 	 */
8715 	do {
8716 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
8717 		    SEC_TO_NSEC(1));
8718 		if (generation != sc->sc_generation)
8719 			return ENXIO;
8720 		if (err)
8721 			return err;
8722 	} while (ic->ic_state != IEEE80211_S_SCAN);
8723 
8724 	return 0;
8725 }
8726 
8727 void
8728 iwm_start(struct ifnet *ifp)
8729 {
8730 	struct iwm_softc *sc = ifp->if_softc;
8731 	struct ieee80211com *ic = &sc->sc_ic;
8732 	struct ieee80211_node *ni;
8733 	struct ether_header *eh;
8734 	struct mbuf *m;
8735 	int ac = EDCA_AC_BE; /* XXX */
8736 
8737 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
8738 		return;
8739 
8740 	for (;;) {
8741 		/* why isn't this done per-queue? */
8742 		if (sc->qfullmsk != 0) {
8743 			ifq_set_oactive(&ifp->if_snd);
8744 			break;
8745 		}
8746 
8747 		/* need to send management frames even if we're not RUNning */
8748 		m = mq_dequeue(&ic->ic_mgtq);
8749 		if (m) {
8750 			ni = m->m_pkthdr.ph_cookie;
8751 			goto sendit;
8752 		}
8753 
8754 		if (ic->ic_state != IEEE80211_S_RUN ||
8755 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
8756 			break;
8757 
8758 		m = ifq_dequeue(&ifp->if_snd);
8759 		if (!m)
8760 			break;
8761 		if (m->m_len < sizeof (*eh) &&
8762 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
8763 			ifp->if_oerrors++;
8764 			continue;
8765 		}
8766 #if NBPFILTER > 0
8767 		if (ifp->if_bpf != NULL)
8768 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
8769 #endif
8770 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
8771 			ifp->if_oerrors++;
8772 			continue;
8773 		}
8774 
8775  sendit:
8776 #if NBPFILTER > 0
8777 		if (ic->ic_rawbpf != NULL)
8778 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
8779 #endif
8780 		if (iwm_tx(sc, m, ni, ac) != 0) {
8781 			ieee80211_release_node(ic, ni);
8782 			ifp->if_oerrors++;
8783 			continue;
8784 		}
8785 
8786 		if (ifp->if_flags & IFF_UP) {
8787 			sc->sc_tx_timer = 15;
8788 			ifp->if_timer = 1;
8789 		}
8790 	}
8791 
8792 	return;
8793 }
8794 
8795 void
8796 iwm_stop(struct ifnet *ifp)
8797 {
8798 	struct iwm_softc *sc = ifp->if_softc;
8799 	struct ieee80211com *ic = &sc->sc_ic;
8800 	struct iwm_node *in = (void *)ic->ic_bss;
8801 	int i, s = splnet();
8802 
8803 	rw_assert_wrlock(&sc->ioctl_rwl);
8804 
8805 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
8806 
8807 	/* Cancel scheduled tasks and let any stale tasks finish up. */
8808 	task_del(systq, &sc->init_task);
8809 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
8810 	iwm_del_task(sc, systq, &sc->ba_task);
8811 	iwm_del_task(sc, systq, &sc->htprot_task);
8812 	KASSERT(sc->task_refs.refs >= 1);
8813 	refcnt_finalize(&sc->task_refs, "iwmstop");
8814 
8815 	iwm_stop_device(sc);
8816 
8817 	/* Reset soft state. */
8818 
8819 	sc->sc_generation++;
8820 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
8821 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
8822 		sc->sc_cmd_resp_pkt[i] = NULL;
8823 		sc->sc_cmd_resp_len[i] = 0;
8824 	}
8825 	ifp->if_flags &= ~IFF_RUNNING;
8826 	ifq_clr_oactive(&ifp->if_snd);
8827 
8828 	in->in_phyctxt = NULL;
8829 
8830 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8831 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8832 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8833 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8834 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
8835 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
8836 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
8837 
8838 	sc->sc_rx_ba_sessions = 0;
8839 	sc->ba_start_tidmask = 0;
8840 	sc->ba_stop_tidmask = 0;
8841 	memset(sc->ba_ssn, 0, sizeof(sc->ba_ssn));
8842 	memset(sc->ba_winsize, 0, sizeof(sc->ba_winsize));
8843 	memset(sc->ba_timeout_val, 0, sizeof(sc->ba_timeout_val));
8844 
8845 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
8846 
8847 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
8848 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8849 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
8850 		iwm_clear_reorder_buffer(sc, rxba);
8851 	}
8852 	iwm_led_blink_stop(sc);
8853 	ifp->if_timer = sc->sc_tx_timer = 0;
8854 
8855 	splx(s);
8856 }
8857 
8858 void
8859 iwm_watchdog(struct ifnet *ifp)
8860 {
8861 	struct iwm_softc *sc = ifp->if_softc;
8862 
8863 	ifp->if_timer = 0;
8864 	if (sc->sc_tx_timer > 0) {
8865 		if (--sc->sc_tx_timer == 0) {
8866 			printf("%s: device timeout\n", DEVNAME(sc));
8867 #ifdef IWM_DEBUG
8868 			iwm_nic_error(sc);
8869 #endif
8870 			if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8871 				task_add(systq, &sc->init_task);
8872 			ifp->if_oerrors++;
8873 			return;
8874 		}
8875 		ifp->if_timer = 1;
8876 	}
8877 
8878 	ieee80211_watchdog(ifp);
8879 }
8880 
8881 int
8882 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
8883 {
8884 	struct iwm_softc *sc = ifp->if_softc;
8885 	int s, err = 0, generation = sc->sc_generation;
8886 
8887 	/*
8888 	 * Prevent processes from entering this function while another
8889 	 * process is tsleep'ing in it.
8890 	 */
8891 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
8892 	if (err == 0 && generation != sc->sc_generation) {
8893 		rw_exit(&sc->ioctl_rwl);
8894 		return ENXIO;
8895 	}
8896 	if (err)
8897 		return err;
8898 	s = splnet();
8899 
8900 	switch (cmd) {
8901 	case SIOCSIFADDR:
8902 		ifp->if_flags |= IFF_UP;
8903 		/* FALLTHROUGH */
8904 	case SIOCSIFFLAGS:
8905 		if (ifp->if_flags & IFF_UP) {
8906 			if (!(ifp->if_flags & IFF_RUNNING)) {
8907 				err = iwm_init(ifp);
8908 			}
8909 		} else {
8910 			if (ifp->if_flags & IFF_RUNNING)
8911 				iwm_stop(ifp);
8912 		}
8913 		break;
8914 
8915 	default:
8916 		err = ieee80211_ioctl(ifp, cmd, data);
8917 	}
8918 
8919 	if (err == ENETRESET) {
8920 		err = 0;
8921 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8922 		    (IFF_UP | IFF_RUNNING)) {
8923 			iwm_stop(ifp);
8924 			err = iwm_init(ifp);
8925 		}
8926 	}
8927 
8928 	splx(s);
8929 	rw_exit(&sc->ioctl_rwl);
8930 
8931 	return err;
8932 }
8933 
8934 #ifdef IWM_DEBUG
8935 /*
8936  * Note: This structure is read from the device with IO accesses,
8937  * and the reading already does the endian conversion. As it is
8938  * read with uint32_t-sized accesses, any members with a different size
8939  * need to be ordered correctly though!
8940  */
8941 struct iwm_error_event_table {
8942 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8943 	uint32_t error_id;		/* type of error */
8944 	uint32_t trm_hw_status0;	/* TRM HW status */
8945 	uint32_t trm_hw_status1;	/* TRM HW status */
8946 	uint32_t blink2;		/* branch link */
8947 	uint32_t ilink1;		/* interrupt link */
8948 	uint32_t ilink2;		/* interrupt link */
8949 	uint32_t data1;		/* error-specific data */
8950 	uint32_t data2;		/* error-specific data */
8951 	uint32_t data3;		/* error-specific data */
8952 	uint32_t bcon_time;		/* beacon timer */
8953 	uint32_t tsf_low;		/* network timestamp function timer */
8954 	uint32_t tsf_hi;		/* network timestamp function timer */
8955 	uint32_t gp1;		/* GP1 timer register */
8956 	uint32_t gp2;		/* GP2 timer register */
8957 	uint32_t fw_rev_type;	/* firmware revision type */
8958 	uint32_t major;		/* uCode version major */
8959 	uint32_t minor;		/* uCode version minor */
8960 	uint32_t hw_ver;		/* HW Silicon version */
8961 	uint32_t brd_ver;		/* HW board version */
8962 	uint32_t log_pc;		/* log program counter */
8963 	uint32_t frame_ptr;		/* frame pointer */
8964 	uint32_t stack_ptr;		/* stack pointer */
8965 	uint32_t hcmd;		/* last host command header */
8966 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8967 				 * rxtx_flag */
8968 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8969 				 * host_flag */
8970 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8971 				 * enc_flag */
8972 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8973 				 * time_flag */
8974 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8975 				 * wico interrupt */
8976 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8977 	uint32_t wait_event;		/* wait event() caller address */
8978 	uint32_t l2p_control;	/* L2pControlField */
8979 	uint32_t l2p_duration;	/* L2pDurationField */
8980 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8981 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8982 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8983 				 * (LMPM_PMG_SEL) */
8984 	uint32_t u_timestamp;	/* indicate when the date and time of the
8985 				 * compilation */
8986 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8987 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8988 
8989 /*
8990  * UMAC error struct - relevant starting from family 8000 chip.
8991  * Note: This structure is read from the device with IO accesses,
8992  * and the reading already does the endian conversion. As it is
8993  * read with u32-sized accesses, any members with a different size
8994  * need to be ordered correctly though!
8995  */
8996 struct iwm_umac_error_event_table {
8997 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8998 	uint32_t error_id;	/* type of error */
8999 	uint32_t blink1;	/* branch link */
9000 	uint32_t blink2;	/* branch link */
9001 	uint32_t ilink1;	/* interrupt link */
9002 	uint32_t ilink2;	/* interrupt link */
9003 	uint32_t data1;		/* error-specific data */
9004 	uint32_t data2;		/* error-specific data */
9005 	uint32_t data3;		/* error-specific data */
9006 	uint32_t umac_major;
9007 	uint32_t umac_minor;
9008 	uint32_t frame_pointer;	/* core register 27*/
9009 	uint32_t stack_pointer;	/* core register 28 */
9010 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
9011 	uint32_t nic_isr_pref;	/* ISR status register */
9012 } __packed;
9013 
9014 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
9015 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
9016 
9017 void
9018 iwm_nic_umac_error(struct iwm_softc *sc)
9019 {
9020 	struct iwm_umac_error_event_table table;
9021 	uint32_t base;
9022 
9023 	base = sc->sc_uc.uc_umac_error_event_table;
9024 
9025 	if (base < 0x800000) {
9026 		printf("%s: Invalid error log pointer 0x%08x\n",
9027 		    DEVNAME(sc), base);
9028 		return;
9029 	}
9030 
9031 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9032 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9033 		return;
9034 	}
9035 
9036 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9037 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
9038 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9039 			sc->sc_flags, table.valid);
9040 	}
9041 
9042 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
9043 		iwm_desc_lookup(table.error_id));
9044 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
9045 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
9046 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
9047 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
9048 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
9049 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
9050 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
9051 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
9052 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
9053 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
9054 	    table.frame_pointer);
9055 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
9056 	    table.stack_pointer);
9057 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
9058 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
9059 	    table.nic_isr_pref);
9060 }
9061 
9062 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
9063 static struct {
9064 	const char *name;
9065 	uint8_t num;
9066 } advanced_lookup[] = {
9067 	{ "NMI_INTERRUPT_WDG", 0x34 },
9068 	{ "SYSASSERT", 0x35 },
9069 	{ "UCODE_VERSION_MISMATCH", 0x37 },
9070 	{ "BAD_COMMAND", 0x38 },
9071 	{ "BAD_COMMAND", 0x39 },
9072 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
9073 	{ "FATAL_ERROR", 0x3D },
9074 	{ "NMI_TRM_HW_ERR", 0x46 },
9075 	{ "NMI_INTERRUPT_TRM", 0x4C },
9076 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
9077 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
9078 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
9079 	{ "NMI_INTERRUPT_HOST", 0x66 },
9080 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
9081 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
9082 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
9083 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
9084 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
9085 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
9086 	{ "ADVANCED_SYSASSERT", 0 },
9087 };
9088 
9089 const char *
9090 iwm_desc_lookup(uint32_t num)
9091 {
9092 	int i;
9093 
9094 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
9095 		if (advanced_lookup[i].num ==
9096 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
9097 			return advanced_lookup[i].name;
9098 
9099 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
9100 	return advanced_lookup[i].name;
9101 }
9102 
9103 /*
9104  * Support for dumping the error log seemed like a good idea ...
9105  * but it's mostly hex junk and the only sensible thing is the
9106  * hw/ucode revision (which we know anyway).  Since it's here,
9107  * I'll just leave it in, just in case e.g. the Intel guys want to
9108  * help us decipher some "ADVANCED_SYSASSERT" later.
9109  */
9110 void
9111 iwm_nic_error(struct iwm_softc *sc)
9112 {
9113 	struct iwm_error_event_table table;
9114 	uint32_t base;
9115 
9116 	printf("%s: dumping device error log\n", DEVNAME(sc));
9117 	base = sc->sc_uc.uc_error_event_table;
9118 	if (base < 0x800000) {
9119 		printf("%s: Invalid error log pointer 0x%08x\n",
9120 		    DEVNAME(sc), base);
9121 		return;
9122 	}
9123 
9124 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9125 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9126 		return;
9127 	}
9128 
9129 	if (!table.valid) {
9130 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
9131 		return;
9132 	}
9133 
9134 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9135 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
9136 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9137 		    sc->sc_flags, table.valid);
9138 	}
9139 
9140 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
9141 	    iwm_desc_lookup(table.error_id));
9142 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
9143 	    table.trm_hw_status0);
9144 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
9145 	    table.trm_hw_status1);
9146 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
9147 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
9148 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
9149 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
9150 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
9151 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
9152 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
9153 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
9154 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
9155 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
9156 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
9157 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
9158 	    table.fw_rev_type);
9159 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
9160 	    table.major);
9161 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
9162 	    table.minor);
9163 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
9164 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
9165 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
9166 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
9167 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
9168 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
9169 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
9170 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
9171 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
9172 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
9173 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
9174 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
9175 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
9176 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
9177 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
9178 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
9179 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
9180 
9181 	if (sc->sc_uc.uc_umac_error_event_table)
9182 		iwm_nic_umac_error(sc);
9183 }
9184 #endif
9185 
9186 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
9187 do {									\
9188 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9189 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
9190 	_var_ = (void *)((_pkt_)+1);					\
9191 } while (/*CONSTCOND*/0)
9192 
9193 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
9194 do {									\
9195 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9196 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
9197 	_ptr_ = (void *)((_pkt_)+1);					\
9198 } while (/*CONSTCOND*/0)
9199 
9200 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
9201 
9202 int
9203 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
9204 {
9205 	int qid, idx, code;
9206 
9207 	qid = pkt->hdr.qid & ~0x80;
9208 	idx = pkt->hdr.idx;
9209 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9210 
9211 	return (!(qid == 0 && idx == 0 && code == 0) &&
9212 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
9213 }
9214 
9215 void
9216 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
9217 {
9218 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
9219 	struct iwm_rx_packet *pkt, *nextpkt;
9220 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
9221 	struct mbuf *m0, *m;
9222 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
9223 	int qid, idx, code, handled = 1;
9224 
9225 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
9226 	    BUS_DMASYNC_POSTREAD);
9227 
9228 	m0 = data->m;
9229 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
9230 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
9231 		qid = pkt->hdr.qid;
9232 		idx = pkt->hdr.idx;
9233 
9234 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9235 
9236 		if (!iwm_rx_pkt_valid(pkt))
9237 			break;
9238 
9239 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
9240 		if (len < sizeof(pkt->hdr) ||
9241 		    len > (IWM_RBUF_SIZE - offset - minsz))
9242 			break;
9243 
9244 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
9245 			/* Take mbuf m0 off the RX ring. */
9246 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
9247 				ifp->if_ierrors++;
9248 				break;
9249 			}
9250 			KASSERT(data->m != m0);
9251 		}
9252 
9253 		switch (code) {
9254 		case IWM_REPLY_RX_PHY_CMD:
9255 			iwm_rx_rx_phy_cmd(sc, pkt, data);
9256 			break;
9257 
9258 		case IWM_REPLY_RX_MPDU_CMD: {
9259 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
9260 			nextoff = offset +
9261 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
9262 			nextpkt = (struct iwm_rx_packet *)
9263 			    (m0->m_data + nextoff);
9264 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
9265 			    !iwm_rx_pkt_valid(nextpkt)) {
9266 				/* No need to copy last frame in buffer. */
9267 				if (offset > 0)
9268 					m_adj(m0, offset);
9269 				if (sc->sc_mqrx_supported)
9270 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
9271 					    maxlen, ml);
9272 				else
9273 					iwm_rx_mpdu(sc, m0, pkt->data,
9274 					    maxlen, ml);
9275 				m0 = NULL; /* stack owns m0 now; abort loop */
9276 			} else {
9277 				/*
9278 				 * Create an mbuf which points to the current
9279 				 * packet. Always copy from offset zero to
9280 				 * preserve m_pkthdr.
9281 				 */
9282 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
9283 				if (m == NULL) {
9284 					ifp->if_ierrors++;
9285 					m_freem(m0);
9286 					m0 = NULL;
9287 					break;
9288 				}
9289 				m_adj(m, offset);
9290 				if (sc->sc_mqrx_supported)
9291 					iwm_rx_mpdu_mq(sc, m, pkt->data,
9292 					    maxlen, ml);
9293 				else
9294 					iwm_rx_mpdu(sc, m, pkt->data,
9295 					    maxlen, ml);
9296 			}
9297  			break;
9298 		}
9299 
9300 		case IWM_TX_CMD:
9301 			iwm_rx_tx_cmd(sc, pkt, data);
9302 			break;
9303 
9304 		case IWM_MISSED_BEACONS_NOTIFICATION:
9305 			iwm_rx_bmiss(sc, pkt, data);
9306 			break;
9307 
9308 		case IWM_MFUART_LOAD_NOTIFICATION:
9309 			break;
9310 
9311 		case IWM_ALIVE: {
9312 			struct iwm_alive_resp_v1 *resp1;
9313 			struct iwm_alive_resp_v2 *resp2;
9314 			struct iwm_alive_resp_v3 *resp3;
9315 
9316 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
9317 				SYNC_RESP_STRUCT(resp1, pkt);
9318 				sc->sc_uc.uc_error_event_table
9319 				    = le32toh(resp1->error_event_table_ptr);
9320 				sc->sc_uc.uc_log_event_table
9321 				    = le32toh(resp1->log_event_table_ptr);
9322 				sc->sched_base = le32toh(resp1->scd_base_ptr);
9323 				if (resp1->status == IWM_ALIVE_STATUS_OK)
9324 					sc->sc_uc.uc_ok = 1;
9325 				else
9326 					sc->sc_uc.uc_ok = 0;
9327 			}
9328 
9329 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
9330 				SYNC_RESP_STRUCT(resp2, pkt);
9331 				sc->sc_uc.uc_error_event_table
9332 				    = le32toh(resp2->error_event_table_ptr);
9333 				sc->sc_uc.uc_log_event_table
9334 				    = le32toh(resp2->log_event_table_ptr);
9335 				sc->sched_base = le32toh(resp2->scd_base_ptr);
9336 				sc->sc_uc.uc_umac_error_event_table
9337 				    = le32toh(resp2->error_info_addr);
9338 				if (resp2->status == IWM_ALIVE_STATUS_OK)
9339 					sc->sc_uc.uc_ok = 1;
9340 				else
9341 					sc->sc_uc.uc_ok = 0;
9342 			}
9343 
9344 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
9345 				SYNC_RESP_STRUCT(resp3, pkt);
9346 				sc->sc_uc.uc_error_event_table
9347 				    = le32toh(resp3->error_event_table_ptr);
9348 				sc->sc_uc.uc_log_event_table
9349 				    = le32toh(resp3->log_event_table_ptr);
9350 				sc->sched_base = le32toh(resp3->scd_base_ptr);
9351 				sc->sc_uc.uc_umac_error_event_table
9352 				    = le32toh(resp3->error_info_addr);
9353 				if (resp3->status == IWM_ALIVE_STATUS_OK)
9354 					sc->sc_uc.uc_ok = 1;
9355 				else
9356 					sc->sc_uc.uc_ok = 0;
9357 			}
9358 
9359 			sc->sc_uc.uc_intr = 1;
9360 			wakeup(&sc->sc_uc);
9361 			break;
9362 		}
9363 
9364 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
9365 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
9366 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
9367 			iwm_phy_db_set_section(sc, phy_db_notif);
9368 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
9369 			wakeup(&sc->sc_init_complete);
9370 			break;
9371 		}
9372 
9373 		case IWM_STATISTICS_NOTIFICATION: {
9374 			struct iwm_notif_statistics *stats;
9375 			SYNC_RESP_STRUCT(stats, pkt);
9376 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9377 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
9378 			break;
9379 		}
9380 
9381 		case IWM_MCC_CHUB_UPDATE_CMD: {
9382 			struct iwm_mcc_chub_notif *notif;
9383 			SYNC_RESP_STRUCT(notif, pkt);
9384 
9385 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
9386 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
9387 			sc->sc_fw_mcc[2] = '\0';
9388 		}
9389 
9390 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
9391 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
9392 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
9393 			break;
9394 
9395 		case IWM_ADD_STA_KEY:
9396 		case IWM_PHY_CONFIGURATION_CMD:
9397 		case IWM_TX_ANT_CONFIGURATION_CMD:
9398 		case IWM_ADD_STA:
9399 		case IWM_MAC_CONTEXT_CMD:
9400 		case IWM_REPLY_SF_CFG_CMD:
9401 		case IWM_POWER_TABLE_CMD:
9402 		case IWM_LTR_CONFIG:
9403 		case IWM_PHY_CONTEXT_CMD:
9404 		case IWM_BINDING_CONTEXT_CMD:
9405 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
9406 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
9407 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
9408 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
9409 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
9410 		case IWM_REPLY_BEACON_FILTERING_CMD:
9411 		case IWM_MAC_PM_POWER_TABLE:
9412 		case IWM_TIME_QUOTA_CMD:
9413 		case IWM_REMOVE_STA:
9414 		case IWM_TXPATH_FLUSH:
9415 		case IWM_LQ_CMD:
9416 		case IWM_WIDE_ID(IWM_LONG_GROUP,
9417 				 IWM_FW_PAGING_BLOCK_CMD):
9418 		case IWM_BT_CONFIG:
9419 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
9420 		case IWM_NVM_ACCESS_CMD:
9421 		case IWM_MCC_UPDATE_CMD:
9422 		case IWM_TIME_EVENT_CMD: {
9423 			size_t pkt_len;
9424 
9425 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
9426 				break;
9427 
9428 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
9429 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9430 
9431 			pkt_len = sizeof(pkt->len_n_flags) +
9432 			    iwm_rx_packet_len(pkt);
9433 
9434 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
9435 			    pkt_len < sizeof(*pkt) ||
9436 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
9437 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
9438 				    sc->sc_cmd_resp_len[idx]);
9439 				sc->sc_cmd_resp_pkt[idx] = NULL;
9440 				break;
9441 			}
9442 
9443 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
9444 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9445 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9446 			break;
9447 		}
9448 
9449 		/* ignore */
9450 		case IWM_PHY_DB_CMD:
9451 			break;
9452 
9453 		case IWM_INIT_COMPLETE_NOTIF:
9454 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
9455 			wakeup(&sc->sc_init_complete);
9456 			break;
9457 
9458 		case IWM_SCAN_OFFLOAD_COMPLETE: {
9459 			struct iwm_periodic_scan_complete *notif;
9460 			SYNC_RESP_STRUCT(notif, pkt);
9461 			break;
9462 		}
9463 
9464 		case IWM_SCAN_ITERATION_COMPLETE: {
9465 			struct iwm_lmac_scan_complete_notif *notif;
9466 			SYNC_RESP_STRUCT(notif, pkt);
9467 			iwm_endscan(sc);
9468 			break;
9469 		}
9470 
9471 		case IWM_SCAN_COMPLETE_UMAC: {
9472 			struct iwm_umac_scan_complete *notif;
9473 			SYNC_RESP_STRUCT(notif, pkt);
9474 			iwm_endscan(sc);
9475 			break;
9476 		}
9477 
9478 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
9479 			struct iwm_umac_scan_iter_complete_notif *notif;
9480 			SYNC_RESP_STRUCT(notif, pkt);
9481 			iwm_endscan(sc);
9482 			break;
9483 		}
9484 
9485 		case IWM_REPLY_ERROR: {
9486 			struct iwm_error_resp *resp;
9487 			SYNC_RESP_STRUCT(resp, pkt);
9488 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
9489 				DEVNAME(sc), le32toh(resp->error_type),
9490 				resp->cmd_id);
9491 			break;
9492 		}
9493 
9494 		case IWM_TIME_EVENT_NOTIFICATION: {
9495 			struct iwm_time_event_notif *notif;
9496 			uint32_t action;
9497 			SYNC_RESP_STRUCT(notif, pkt);
9498 
9499 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9500 				break;
9501 			action = le32toh(notif->action);
9502 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
9503 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
9504 			break;
9505 		}
9506 
9507 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
9508 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
9509 		    break;
9510 
9511 		/*
9512 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9513 		 * messages. Just ignore them for now.
9514 		 */
9515 		case IWM_DEBUG_LOG_MSG:
9516 			break;
9517 
9518 		case IWM_MCAST_FILTER_CMD:
9519 			break;
9520 
9521 		case IWM_SCD_QUEUE_CFG: {
9522 			struct iwm_scd_txq_cfg_rsp *rsp;
9523 			SYNC_RESP_STRUCT(rsp, pkt);
9524 
9525 			break;
9526 		}
9527 
9528 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
9529 			break;
9530 
9531 		default:
9532 			handled = 0;
9533 			printf("%s: unhandled firmware response 0x%x/0x%x "
9534 			    "rx ring %d[%d]\n",
9535 			    DEVNAME(sc), code, pkt->len_n_flags,
9536 			    (qid & ~0x80), idx);
9537 			break;
9538 		}
9539 
9540 		/*
9541 		 * uCode sets bit 0x80 when it originates the notification,
9542 		 * i.e. when the notification is not a direct response to a
9543 		 * command sent by the driver.
9544 		 * For example, uCode issues IWM_REPLY_RX when it sends a
9545 		 * received frame to the driver.
9546 		 */
9547 		if (handled && !(qid & (1 << 7))) {
9548 			iwm_cmd_done(sc, qid, idx, code);
9549 		}
9550 
9551 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
9552 	}
9553 
9554 	if (m0 && m0 != data->m)
9555 		m_freem(m0);
9556 }
9557 
9558 void
9559 iwm_notif_intr(struct iwm_softc *sc)
9560 {
9561 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
9562 	uint32_t wreg;
9563 	uint16_t hw;
9564 	int count;
9565 
9566 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
9567 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
9568 
9569 	if (sc->sc_mqrx_supported) {
9570 		count = IWM_RX_MQ_RING_COUNT;
9571 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
9572 	} else {
9573 		count = IWM_RX_RING_COUNT;
9574 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
9575 	}
9576 
9577 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9578 	hw &= (count - 1);
9579 	while (sc->rxq.cur != hw) {
9580 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9581 		iwm_rx_pkt(sc, data, &ml);
9582 		ADVANCE_RXQ(sc);
9583 	}
9584 	if_input(&sc->sc_ic.ic_if, &ml);
9585 
9586 	/*
9587 	 * Tell the firmware what we have processed.
9588 	 * Seems like the hardware gets upset unless we align the write by 8??
9589 	 */
9590 	hw = (hw == 0) ? count - 1 : hw - 1;
9591 	IWM_WRITE(sc, wreg, hw & ~7);
9592 }
9593 
9594 int
9595 iwm_intr(void *arg)
9596 {
9597 	struct iwm_softc *sc = arg;
9598 	int handled = 0;
9599 	int rv = 0;
9600 	uint32_t r1, r2;
9601 
9602 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
9603 
9604 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
9605 		uint32_t *ict = sc->ict_dma.vaddr;
9606 		int tmp;
9607 
9608 		tmp = htole32(ict[sc->ict_cur]);
9609 		if (!tmp)
9610 			goto out_ena;
9611 
9612 		/*
9613 		 * ok, there was something.  keep plowing until we have all.
9614 		 */
9615 		r1 = r2 = 0;
9616 		while (tmp) {
9617 			r1 |= tmp;
9618 			ict[sc->ict_cur] = 0;
9619 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
9620 			tmp = htole32(ict[sc->ict_cur]);
9621 		}
9622 
9623 		/* this is where the fun begins.  don't ask */
9624 		if (r1 == 0xffffffff)
9625 			r1 = 0;
9626 
9627 		/*
9628 		 * Workaround for hardware bug where bits are falsely cleared
9629 		 * when using interrupt coalescing.  Bit 15 should be set if
9630 		 * bits 18 and 19 are set.
9631 		 */
9632 		if (r1 & 0xc0000)
9633 			r1 |= 0x8000;
9634 
9635 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9636 	} else {
9637 		r1 = IWM_READ(sc, IWM_CSR_INT);
9638 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
9639 	}
9640 	if (r1 == 0 && r2 == 0) {
9641 		goto out_ena;
9642 	}
9643 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9644 		goto out;
9645 
9646 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
9647 
9648 	/* ignored */
9649 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
9650 
9651 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
9652 		handled |= IWM_CSR_INT_BIT_RF_KILL;
9653 		iwm_check_rfkill(sc);
9654 		task_add(systq, &sc->init_task);
9655 		rv = 1;
9656 		goto out_ena;
9657 	}
9658 
9659 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
9660 #ifdef IWM_DEBUG
9661 		int i;
9662 
9663 		iwm_nic_error(sc);
9664 
9665 		/* Dump driver status (TX and RX rings) while we're here. */
9666 		DPRINTF(("driver status:\n"));
9667 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
9668 			struct iwm_tx_ring *ring = &sc->txq[i];
9669 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
9670 			    "queued=%-3d\n",
9671 			    i, ring->qid, ring->cur, ring->queued));
9672 		}
9673 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
9674 		DPRINTF(("  802.11 state %s\n",
9675 		    ieee80211_state_name[sc->sc_ic.ic_state]));
9676 #endif
9677 
9678 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9679 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
9680 			task_add(systq, &sc->init_task);
9681 		rv = 1;
9682 		goto out;
9683 
9684 	}
9685 
9686 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
9687 		handled |= IWM_CSR_INT_BIT_HW_ERR;
9688 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9689 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9690 			sc->sc_flags |= IWM_FLAG_HW_ERR;
9691 			task_add(systq, &sc->init_task);
9692 		}
9693 		rv = 1;
9694 		goto out;
9695 	}
9696 
9697 	/* firmware chunk loaded */
9698 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
9699 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
9700 		handled |= IWM_CSR_INT_BIT_FH_TX;
9701 
9702 		sc->sc_fw_chunk_done = 1;
9703 		wakeup(&sc->sc_fw);
9704 	}
9705 
9706 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
9707 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
9708 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
9709 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
9710 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
9711 		}
9712 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
9713 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
9714 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
9715 		}
9716 
9717 		/* Disable periodic interrupt; we use it as just a one-shot. */
9718 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
9719 
9720 		/*
9721 		 * Enable periodic interrupt in 8 msec only if we received
9722 		 * real RX interrupt (instead of just periodic int), to catch
9723 		 * any dangling Rx interrupt.  If it was just the periodic
9724 		 * interrupt, there was no dangling Rx activity, and no need
9725 		 * to extend the periodic interrupt; one-shot is enough.
9726 		 */
9727 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
9728 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
9729 			    IWM_CSR_INT_PERIODIC_ENA);
9730 
9731 		iwm_notif_intr(sc);
9732 	}
9733 
9734 	rv = 1;
9735 
9736  out_ena:
9737 	iwm_restore_interrupts(sc);
9738  out:
9739 	return rv;
9740 }
9741 
9742 int
9743 iwm_intr_msix(void *arg)
9744 {
9745 	struct iwm_softc *sc = arg;
9746 	uint32_t inta_fh, inta_hw;
9747 	int vector = 0;
9748 
9749 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
9750 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
9751 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9752 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9753 	inta_fh &= sc->sc_fh_mask;
9754 	inta_hw &= sc->sc_hw_mask;
9755 
9756 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
9757 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
9758 		iwm_notif_intr(sc);
9759 	}
9760 
9761 	/* firmware chunk loaded */
9762 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9763 		sc->sc_fw_chunk_done = 1;
9764 		wakeup(&sc->sc_fw);
9765 	}
9766 
9767 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
9768 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9769 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9770 #ifdef IWM_DEBUG
9771 		int i;
9772 
9773 		iwm_nic_error(sc);
9774 
9775 		/* Dump driver status (TX and RX rings) while we're here. */
9776 		DPRINTF(("driver status:\n"));
9777 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
9778 			struct iwm_tx_ring *ring = &sc->txq[i];
9779 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
9780 			    "queued=%-3d\n",
9781 			    i, ring->qid, ring->cur, ring->queued));
9782 		}
9783 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
9784 		DPRINTF(("  802.11 state %s\n",
9785 		    ieee80211_state_name[sc->sc_ic.ic_state]));
9786 #endif
9787 
9788 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9789 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
9790 			task_add(systq, &sc->init_task);
9791 		return 1;
9792 	}
9793 
9794 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9795 		iwm_check_rfkill(sc);
9796 		task_add(systq, &sc->init_task);
9797 	}
9798 
9799 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9800 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9801 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9802 			sc->sc_flags |= IWM_FLAG_HW_ERR;
9803 			task_add(systq, &sc->init_task);
9804 		}
9805 		return 1;
9806 	}
9807 
9808 	/*
9809 	 * Before sending the interrupt the HW disables it to prevent
9810 	 * a nested interrupt. This is done by writing 1 to the corresponding
9811 	 * bit in the mask register. After handling the interrupt, it should be
9812 	 * re-enabled by clearing this bit. This register is defined as
9813 	 * write 1 clear (W1C) register, meaning that it's being clear
9814 	 * by writing 1 to the bit.
9815 	 */
9816 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9817 	return 1;
9818 }
9819 
9820 typedef void *iwm_match_t;
9821 
9822 static const struct pci_matchid iwm_devices[] = {
9823 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
9824 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
9825 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
9826 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
9827 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
9828 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
9829 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
9830 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
9831 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
9832 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
9833 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
9834 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
9835 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
9836 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
9837 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
9838 };
9839 
9840 int
9841 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
9842 {
9843 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
9844 	    nitems(iwm_devices));
9845 }
9846 
9847 int
9848 iwm_preinit(struct iwm_softc *sc)
9849 {
9850 	struct ieee80211com *ic = &sc->sc_ic;
9851 	struct ifnet *ifp = IC2IFP(ic);
9852 	int err;
9853 	static int attached;
9854 
9855 	err = iwm_prepare_card_hw(sc);
9856 	if (err) {
9857 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9858 		return err;
9859 	}
9860 
9861 	if (attached) {
9862 		/* Update MAC in case the upper layers changed it. */
9863 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
9864 		    ((struct arpcom *)ifp)->ac_enaddr);
9865 		return 0;
9866 	}
9867 
9868 	err = iwm_start_hw(sc);
9869 	if (err) {
9870 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9871 		return err;
9872 	}
9873 
9874 	err = iwm_run_init_mvm_ucode(sc, 1);
9875 	iwm_stop_device(sc);
9876 	if (err)
9877 		return err;
9878 
9879 	/* Print version info and MAC address on first successful fw load. */
9880 	attached = 1;
9881 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
9882 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
9883 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9884 
9885 	if (sc->sc_nvm.sku_cap_11n_enable)
9886 		iwm_setup_ht_rates(sc);
9887 
9888 	/* not all hardware can do 5GHz band */
9889 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9890 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9891 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9892 
9893 	/* Configure channel information obtained from firmware. */
9894 	ieee80211_channel_init(ifp);
9895 
9896 	/* Configure MAC address. */
9897 	err = if_setlladdr(ifp, ic->ic_myaddr);
9898 	if (err)
9899 		printf("%s: could not set MAC address (error %d)\n",
9900 		    DEVNAME(sc), err);
9901 
9902 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
9903 
9904 	return 0;
9905 }
9906 
9907 void
9908 iwm_attach_hook(struct device *self)
9909 {
9910 	struct iwm_softc *sc = (void *)self;
9911 
9912 	KASSERT(!cold);
9913 
9914 	iwm_preinit(sc);
9915 }
9916 
9917 void
9918 iwm_attach(struct device *parent, struct device *self, void *aux)
9919 {
9920 	struct iwm_softc *sc = (void *)self;
9921 	struct pci_attach_args *pa = aux;
9922 	pci_intr_handle_t ih;
9923 	pcireg_t reg, memtype;
9924 	struct ieee80211com *ic = &sc->sc_ic;
9925 	struct ifnet *ifp = &ic->ic_if;
9926 	const char *intrstr;
9927 	int err;
9928 	int txq_i, i, j;
9929 
9930 	sc->sc_pct = pa->pa_pc;
9931 	sc->sc_pcitag = pa->pa_tag;
9932 	sc->sc_dmat = pa->pa_dmat;
9933 
9934 	rw_init(&sc->ioctl_rwl, "iwmioctl");
9935 
9936 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
9937 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
9938 	if (err == 0) {
9939 		printf("%s: PCIe capability structure not found!\n",
9940 		    DEVNAME(sc));
9941 		return;
9942 	}
9943 
9944 	/* Clear device-specific "PCI retry timeout" register (41h). */
9945 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9946 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9947 
9948 	/* Enable bus-mastering and hardware bug workaround. */
9949 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
9950 	reg |= PCI_COMMAND_MASTER_ENABLE;
9951 	/* if !MSI */
9952 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
9953 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9954 	}
9955 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
9956 
9957 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
9958 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
9959 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
9960 	if (err) {
9961 		printf("%s: can't map mem space\n", DEVNAME(sc));
9962 		return;
9963 	}
9964 
9965 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
9966 		sc->sc_msix = 1;
9967 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
9968 		printf("%s: can't map interrupt\n", DEVNAME(sc));
9969 		return;
9970 	}
9971 
9972 	intrstr = pci_intr_string(sc->sc_pct, ih);
9973 	if (sc->sc_msix)
9974 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9975 		    iwm_intr_msix, sc, DEVNAME(sc));
9976 	else
9977 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9978 		    iwm_intr, sc, DEVNAME(sc));
9979 
9980 	if (sc->sc_ih == NULL) {
9981 		printf("\n");
9982 		printf("%s: can't establish interrupt", DEVNAME(sc));
9983 		if (intrstr != NULL)
9984 			printf(" at %s", intrstr);
9985 		printf("\n");
9986 		return;
9987 	}
9988 	printf(", %s\n", intrstr);
9989 
9990 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
9991 	switch (PCI_PRODUCT(pa->pa_id)) {
9992 	case PCI_PRODUCT_INTEL_WL_3160_1:
9993 	case PCI_PRODUCT_INTEL_WL_3160_2:
9994 		sc->sc_fwname = "iwm-3160-17";
9995 		sc->host_interrupt_operation_mode = 1;
9996 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9997 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9998 		sc->sc_nvm_max_section_size = 16384;
9999 		sc->nvm_type = IWM_NVM;
10000 		break;
10001 	case PCI_PRODUCT_INTEL_WL_3165_1:
10002 	case PCI_PRODUCT_INTEL_WL_3165_2:
10003 		sc->sc_fwname = "iwm-7265-17";
10004 		sc->host_interrupt_operation_mode = 0;
10005 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10006 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10007 		sc->sc_nvm_max_section_size = 16384;
10008 		sc->nvm_type = IWM_NVM;
10009 		break;
10010 	case PCI_PRODUCT_INTEL_WL_3168_1:
10011 		sc->sc_fwname = "iwm-3168-29";
10012 		sc->host_interrupt_operation_mode = 0;
10013 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10014 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10015 		sc->sc_nvm_max_section_size = 16384;
10016 		sc->nvm_type = IWM_NVM_SDP;
10017 		break;
10018 	case PCI_PRODUCT_INTEL_WL_7260_1:
10019 	case PCI_PRODUCT_INTEL_WL_7260_2:
10020 		sc->sc_fwname = "iwm-7260-17";
10021 		sc->host_interrupt_operation_mode = 1;
10022 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10023 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10024 		sc->sc_nvm_max_section_size = 16384;
10025 		sc->nvm_type = IWM_NVM;
10026 		break;
10027 	case PCI_PRODUCT_INTEL_WL_7265_1:
10028 	case PCI_PRODUCT_INTEL_WL_7265_2:
10029 		sc->sc_fwname = "iwm-7265-17";
10030 		sc->host_interrupt_operation_mode = 0;
10031 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
10032 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
10033 		sc->sc_nvm_max_section_size = 16384;
10034 		sc->nvm_type = IWM_NVM;
10035 		break;
10036 	case PCI_PRODUCT_INTEL_WL_8260_1:
10037 	case PCI_PRODUCT_INTEL_WL_8260_2:
10038 		sc->sc_fwname = "iwm-8000C-34";
10039 		sc->host_interrupt_operation_mode = 0;
10040 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
10041 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
10042 		sc->sc_nvm_max_section_size = 32768;
10043 		sc->nvm_type = IWM_NVM_EXT;
10044 		break;
10045 	case PCI_PRODUCT_INTEL_WL_8265_1:
10046 		sc->sc_fwname = "iwm-8265-34";
10047 		sc->host_interrupt_operation_mode = 0;
10048 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
10049 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
10050 		sc->sc_nvm_max_section_size = 32768;
10051 		sc->nvm_type = IWM_NVM_EXT;
10052 		break;
10053 	case PCI_PRODUCT_INTEL_WL_9260_1:
10054 		sc->sc_fwname = "iwm-9260-34";
10055 		sc->host_interrupt_operation_mode = 0;
10056 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
10057 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
10058 		sc->sc_nvm_max_section_size = 32768;
10059 		sc->sc_mqrx_supported = 1;
10060 		break;
10061 	case PCI_PRODUCT_INTEL_WL_9560_1:
10062 	case PCI_PRODUCT_INTEL_WL_9560_2:
10063 		sc->sc_fwname = "iwm-9000-34";
10064 		sc->host_interrupt_operation_mode = 0;
10065 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
10066 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
10067 		sc->sc_nvm_max_section_size = 32768;
10068 		sc->sc_mqrx_supported = 1;
10069 		sc->sc_integrated = 1;
10070 		break;
10071 	default:
10072 		printf("%s: unknown adapter type\n", DEVNAME(sc));
10073 		return;
10074 	}
10075 
10076 	/*
10077 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10078 	 * changed, and now the revision step also includes bit 0-1 (no more
10079 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10080 	 * in the old format.
10081 	 */
10082 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
10083 		uint32_t hw_step;
10084 
10085 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10086 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10087 
10088 		if (iwm_prepare_card_hw(sc) != 0) {
10089 			printf("%s: could not initialize hardware\n",
10090 			    DEVNAME(sc));
10091 			return;
10092 		}
10093 
10094 		/*
10095 		 * In order to recognize C step the driver should read the
10096 		 * chip version id located at the AUX bus MISC address.
10097 		 */
10098 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
10099 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
10100 		DELAY(2);
10101 
10102 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
10103 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
10104 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
10105 				   25000);
10106 		if (!err) {
10107 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
10108 			return;
10109 		}
10110 
10111 		if (iwm_nic_lock(sc)) {
10112 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
10113 			hw_step |= IWM_ENABLE_WFPM;
10114 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
10115 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
10116 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
10117 			if (hw_step == 0x3)
10118 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
10119 						(IWM_SILICON_C_STEP << 2);
10120 			iwm_nic_unlock(sc);
10121 		} else {
10122 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
10123 			return;
10124 		}
10125 	}
10126 
10127 	/*
10128 	 * Allocate DMA memory for firmware transfers.
10129 	 * Must be aligned on a 16-byte boundary.
10130 	 */
10131 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
10132 	    sc->sc_fwdmasegsz, 16);
10133 	if (err) {
10134 		printf("%s: could not allocate memory for firmware\n",
10135 		    DEVNAME(sc));
10136 		return;
10137 	}
10138 
10139 	/* Allocate "Keep Warm" page, used internally by the card. */
10140 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
10141 	if (err) {
10142 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
10143 		goto fail1;
10144 	}
10145 
10146 	/* Allocate interrupt cause table (ICT).*/
10147 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10148 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
10149 	if (err) {
10150 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
10151 		goto fail2;
10152 	}
10153 
10154 	/* TX scheduler rings must be aligned on a 1KB boundary. */
10155 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
10156 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
10157 	if (err) {
10158 		printf("%s: could not allocate TX scheduler rings\n",
10159 		    DEVNAME(sc));
10160 		goto fail3;
10161 	}
10162 
10163 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10164 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10165 		if (err) {
10166 			printf("%s: could not allocate TX ring %d\n",
10167 			    DEVNAME(sc), txq_i);
10168 			goto fail4;
10169 		}
10170 	}
10171 
10172 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
10173 	if (err) {
10174 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
10175 		goto fail4;
10176 	}
10177 
10178 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
10179 	if (sc->sc_nswq == NULL)
10180 		goto fail4;
10181 
10182 	/* Clear pending interrupts. */
10183 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
10184 
10185 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
10186 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
10187 	ic->ic_state = IEEE80211_S_INIT;
10188 
10189 	/* Set device capabilities. */
10190 	ic->ic_caps =
10191 	    IEEE80211_C_WEP |		/* WEP */
10192 	    IEEE80211_C_RSN |		/* WPA/RSN */
10193 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
10194 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
10195 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
10196 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
10197 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
10198 
10199 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
10200 	ic->ic_htcaps |=
10201 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
10202 	ic->ic_htxcaps = 0;
10203 	ic->ic_txbfcaps = 0;
10204 	ic->ic_aselcaps = 0;
10205 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
10206 
10207 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
10208 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
10209 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
10210 
10211 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
10212 		sc->sc_phyctxt[i].id = i;
10213 	}
10214 
10215 	sc->sc_amrr.amrr_min_success_threshold =  1;
10216 	sc->sc_amrr.amrr_max_success_threshold = 15;
10217 
10218 	/* IBSS channel undefined for now. */
10219 	ic->ic_ibss_chan = &ic->ic_channels[1];
10220 
10221 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
10222 
10223 	ifp->if_softc = sc;
10224 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
10225 	ifp->if_ioctl = iwm_ioctl;
10226 	ifp->if_start = iwm_start;
10227 	ifp->if_watchdog = iwm_watchdog;
10228 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
10229 
10230 	if_attach(ifp);
10231 	ieee80211_ifattach(ifp);
10232 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
10233 
10234 #if NBPFILTER > 0
10235 	iwm_radiotap_attach(sc);
10236 #endif
10237 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
10238 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
10239 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10240 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10241 		rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
10242 		rxba->sc = sc;
10243 		timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
10244 		    rxba);
10245 		timeout_set(&rxba->reorder_buf.reorder_timer,
10246 		    iwm_reorder_timer_expired, &rxba->reorder_buf);
10247 		for (j = 0; j < nitems(rxba->entries); j++)
10248 			ml_init(&rxba->entries[j].frames);
10249 	}
10250 	task_set(&sc->init_task, iwm_init_task, sc);
10251 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
10252 	task_set(&sc->ba_task, iwm_ba_task, sc);
10253 	task_set(&sc->htprot_task, iwm_htprot_task, sc);
10254 
10255 	ic->ic_node_alloc = iwm_node_alloc;
10256 	ic->ic_bgscan_start = iwm_bgscan;
10257 	ic->ic_set_key = iwm_set_key;
10258 	ic->ic_delete_key = iwm_delete_key;
10259 
10260 	/* Override 802.11 state transition machine. */
10261 	sc->sc_newstate = ic->ic_newstate;
10262 	ic->ic_newstate = iwm_newstate;
10263 	ic->ic_update_htprot = iwm_update_htprot;
10264 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
10265 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
10266 #ifdef notyet
10267 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
10268 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
10269 #endif
10270 	/*
10271 	 * We cannot read the MAC address without loading the
10272 	 * firmware from disk. Postpone until mountroot is done.
10273 	 */
10274 	config_mountroot(self, iwm_attach_hook);
10275 
10276 	return;
10277 
10278 fail4:	while (--txq_i >= 0)
10279 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
10280 	iwm_free_rx_ring(sc, &sc->rxq);
10281 	iwm_dma_contig_free(&sc->sched_dma);
10282 fail3:	if (sc->ict_dma.vaddr != NULL)
10283 		iwm_dma_contig_free(&sc->ict_dma);
10284 
10285 fail2:	iwm_dma_contig_free(&sc->kw_dma);
10286 fail1:	iwm_dma_contig_free(&sc->fw_dma);
10287 	return;
10288 }
10289 
10290 #if NBPFILTER > 0
10291 void
10292 iwm_radiotap_attach(struct iwm_softc *sc)
10293 {
10294 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
10295 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
10296 
10297 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
10298 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
10299 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
10300 
10301 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
10302 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
10303 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
10304 }
10305 #endif
10306 
10307 void
10308 iwm_init_task(void *arg1)
10309 {
10310 	struct iwm_softc *sc = arg1;
10311 	struct ifnet *ifp = &sc->sc_ic.ic_if;
10312 	int s = splnet();
10313 	int generation = sc->sc_generation;
10314 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
10315 
10316 	rw_enter_write(&sc->ioctl_rwl);
10317 	if (generation != sc->sc_generation) {
10318 		rw_exit(&sc->ioctl_rwl);
10319 		splx(s);
10320 		return;
10321 	}
10322 
10323 	if (ifp->if_flags & IFF_RUNNING)
10324 		iwm_stop(ifp);
10325 	else
10326 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
10327 
10328 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
10329 		iwm_init(ifp);
10330 
10331 	rw_exit(&sc->ioctl_rwl);
10332 	splx(s);
10333 }
10334 
10335 int
10336 iwm_resume(struct iwm_softc *sc)
10337 {
10338 	pcireg_t reg;
10339 
10340 	/* Clear device-specific "PCI retry timeout" register (41h). */
10341 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
10342 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
10343 
10344 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
10345 	iwm_conf_msix_hw(sc, 0);
10346 
10347 	iwm_enable_rfkill_int(sc);
10348 	iwm_check_rfkill(sc);
10349 
10350 	return iwm_prepare_card_hw(sc);
10351 }
10352 
10353 int
10354 iwm_activate(struct device *self, int act)
10355 {
10356 	struct iwm_softc *sc = (struct iwm_softc *)self;
10357 	struct ifnet *ifp = &sc->sc_ic.ic_if;
10358 	int err = 0;
10359 
10360 	switch (act) {
10361 	case DVACT_QUIESCE:
10362 		if (ifp->if_flags & IFF_RUNNING) {
10363 			rw_enter_write(&sc->ioctl_rwl);
10364 			iwm_stop(ifp);
10365 			rw_exit(&sc->ioctl_rwl);
10366 		}
10367 		break;
10368 	case DVACT_RESUME:
10369 		err = iwm_resume(sc);
10370 		if (err)
10371 			printf("%s: could not initialize hardware\n",
10372 			    DEVNAME(sc));
10373 		break;
10374 	case DVACT_WAKEUP:
10375 		/* Hardware should be up at this point. */
10376 		if (iwm_set_hw_ready(sc))
10377 			task_add(systq, &sc->init_task);
10378 		break;
10379 	}
10380 
10381 	return 0;
10382 }
10383 
10384 struct cfdriver iwm_cd = {
10385 	NULL, "iwm", DV_IFNET
10386 };
10387 
10388 struct cfattach iwm_ca = {
10389 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
10390 	NULL, iwm_activate
10391 };
10392